Home Home > GIT Browse > SLE12-SP3-AZURE
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorOlaf Hering <ohering@suse.de>2019-05-14 23:36:07 +0200
committerOlaf Hering <ohering@suse.de>2019-05-14 23:36:07 +0200
commitf47af4f34458ece8615eaa2778a919e39d5c1d57 (patch)
tree31924f0a14058288d6bb78924e96cbf56836c82e
parentb9858b6884c96e98e1dfeeb353e201d8c541a68b (diff)
parent74b134e91328546d34355a40edabab64c8fd2ae1 (diff)
Merge remote-tracking branch 'kerncvs/SLE12-SP3' into SLE12-SP3-AZURE
-rw-r--r--blacklist.conf1
-rw-r--r--patches.arch/cpu-speculation-add-mitigations-cmdline-option.patch3
-rw-r--r--patches.arch/kvm-x86-report-stibp-on-get_supported_cpuid.patch57
-rw-r--r--patches.arch/locking-atomics-asm-generic-move-some-macros-from-linux-bitops-h-to-a-new-linux-bits-h-file.patch90
-rw-r--r--patches.arch/locking-static_keys-improve-uninitialized-key-warning.patch207
-rw-r--r--patches.arch/locking-static_keys-provide-declare-and-well-as-define-macros.patch43
-rw-r--r--patches.arch/powerpc-numa-document-topology_updates_enabled-disab.patch3
-rw-r--r--patches.arch/powerpc-numa-improve-control-of-topology-updates.patch3
-rw-r--r--patches.arch/powerpc-speculation-support-mitigations-cmdline-option.patch3
-rw-r--r--patches.arch/s390-speculation-support-mitigations-cmdline-option.patch3
-rw-r--r--patches.arch/sched-core-fix-cpu-max-vs-cpuhotplug-deadlock.patch121
-rw-r--r--patches.arch/x86-bugs-add-amd-s-variant-of-ssb_no.patch66
-rw-r--r--patches.arch/x86-cpu-sanitize-fam6_atom-naming.patch215
-rw-r--r--patches.arch/x86-kvm-expose-x86_feature_md_clear-to-guests.patch53
-rw-r--r--patches.arch/x86-kvm-vmx-add-mds-protection-when-l1d-flush-is-not-active.patch54
-rw-r--r--patches.arch/x86-msr-index-cleanup-bit-defines.patch102
-rw-r--r--patches.arch/x86-speculation-consolidate-cpu-whitelists.patch164
-rw-r--r--patches.arch/x86-speculation-enable-cross-hyperthread-spectre-v2-stibp-mitigation.patch78
-rw-r--r--patches.arch/x86-speculation-mds-add-basic-bug-infrastructure-for-mds.patch160
-rw-r--r--patches.arch/x86-speculation-mds-add-bug_msbds_only.patch87
-rw-r--r--patches.arch/x86-speculation-mds-add-mds-full-nosmt-cmdline-option.patch72
-rw-r--r--patches.arch/x86-speculation-mds-add-mds_clear_cpu_buffers.patch79
-rw-r--r--patches.arch/x86-speculation-mds-add-mitigation-control-for-mds.patch191
-rw-r--r--patches.arch/x86-speculation-mds-add-mitigation-mode-vmwerv.patch83
-rw-r--r--patches.arch/x86-speculation-mds-add-mitigations-support-for-mds.patch62
-rw-r--r--patches.arch/x86-speculation-mds-add-smt-warning-message.patch50
-rw-r--r--patches.arch/x86-speculation-mds-add-sysfs-reporting-for-mds.patch134
-rw-r--r--patches.arch/x86-speculation-mds-clear-cpu-buffers-on-exit-to-user.patch143
-rw-r--r--patches.arch/x86-speculation-mds-conditionally-clear-cpu-buffers-on-idle-entry.patch165
-rw-r--r--patches.arch/x86-speculation-mds-print-smt-vulnerable-on-msbds-with-mitigations-off.patch49
-rw-r--r--patches.arch/x86-speculation-move-arch_smt_update-call-to-after-mitigation-decisions.patch42
-rw-r--r--patches.arch/x86-speculation-remove-redundant-arch_smt_update-invocation.patch54
-rw-r--r--patches.arch/x86-speculation-rework-smt-state-change.patch109
-rw-r--r--patches.arch/x86-speculation-simplify-the-cpu-bug-detection-logic.patch84
-rw-r--r--patches.arch/x86-speculation-support-mitigations-cmdline-option.patch3
-rw-r--r--patches.arch/x86-stop-exporting-msr-index-h-to-userland.patch35
-rw-r--r--patches.drivers/ibmvnic-Report-actual-backing-device-speed-and-duple.patch3
-rw-r--r--patches.drivers/iommu-vt-d-don-t-request-page-request-irq-under-dmar_global_lock101
-rw-r--r--patches.drivers/iommu-vt-d-make-kernel-parameter-igfx_off-work-with-viommu42
-rw-r--r--patches.drivers/iommu-vt-d-set-intel_iommu_gfx_mapped-correctly49
-rw-r--r--patches.drivers/net-ibmvnic-Update-MAC-address-settings-after-adapte.patch160
-rw-r--r--patches.drivers/net-ibmvnic-Update-carrier-state-after-link-state-ch.patch66
-rw-r--r--patches.fixes/0001-PCI-xilinx-nwl-Add-missing-of_node_put.patch33
-rw-r--r--patches.fixes/0001-USB-Add-new-USB-LPM-helpers.patch152
-rw-r--r--patches.fixes/0001-USB-Consolidate-LPM-checks-to-avoid-enabling-LPM-twi.patch123
-rw-r--r--patches.fixes/0001-drm-i915-Fix-I915_EXEC_RING_MASK.patch36
-rw-r--r--patches.fixes/0001-media-vb2-don-t-call-__vb2_queue_cancel-if-vb2_start.patch42
-rw-r--r--patches.fixes/0002-drm-fb-helper-dpms_legacy-Only-set-on-connectors-in-.patch54
-rw-r--r--patches.fixes/nvme-fc-resolve-io-failures-during-connect.patch192
-rw-r--r--patches.fixes/sched-smt-expose-sched_smt_present-static-key.patch83
-rw-r--r--patches.fixes/sched-smt-make-sched_smt_present-track-topology.patch91
-rw-r--r--patches.kabi/kabi-deduplicate-X86_FEATURE_L1TF_PTEINV.patch4
-rw-r--r--patches.kernel.org/4.4.179-078-net-rds-force-to-destroy-connection-if-t_sock.patch2
-rw-r--r--patches.suse/0001-drm-ttm-Remove-warning-about-inconsistent-mapping-in.patch33
-rw-r--r--patches.suse/0001-kvm-Introduce-nopvspin-kernel-parameter.patch22
-rw-r--r--patches.suse/06-x86-idle-toggle-ibrs-when-going-idle.patch8
-rw-r--r--patches.suse/do-not-default-to-ibrs-on-skl.patch10
-rw-r--r--patches.suse/lpfc-validate-command-in-lpfc_sli4_scmd_to_wqidx_dis.patch32
-rw-r--r--patches.suse/retpolines-disable-ibrs-on-non-skl.patch16
-rw-r--r--series.conf64
60 files changed, 4157 insertions, 129 deletions
diff --git a/blacklist.conf b/blacklist.conf
index ddce605da2..201a8afbe5 100644
--- a/blacklist.conf
+++ b/blacklist.conf
@@ -652,3 +652,4 @@ c15562c0dcb2c7f26e891923b784cf1926b8c833 # we don't provide libusbip from here
45bb8d802742842fa974b0d7d474d115df1d07db # not a bug
b68f3cc7d978943fcf85148165b00594c38db776 # We're not building 32bit x86 kernels from this branch
bd99f9a159b072be743c6681f81e06b9ebd370a4 # not needed for bsc#1126040 backport
+250854eed5d45a73d81e4137dfd85180af6f2ec3 # falsely attributed
diff --git a/patches.arch/cpu-speculation-add-mitigations-cmdline-option.patch b/patches.arch/cpu-speculation-add-mitigations-cmdline-option.patch
index d7ab0da829..1b4d059378 100644
--- a/patches.arch/cpu-speculation-add-mitigations-cmdline-option.patch
+++ b/patches.arch/cpu-speculation-add-mitigations-cmdline-option.patch
@@ -1,8 +1,7 @@
From: Josh Poimboeuf <jpoimboe@redhat.com>
Date: Fri, 12 Apr 2019 15:39:28 -0500
Subject: cpu/speculation: Add 'mitigations=' cmdline option
-Git-repo: git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git
-Patch-mainline: Queued for v5.2
+Patch-mainline: v5.2-rc1
Git-commit: 98af8452945c55652de68536afdde3b520fec429
References: bsc#1112178
diff --git a/patches.arch/kvm-x86-report-stibp-on-get_supported_cpuid.patch b/patches.arch/kvm-x86-report-stibp-on-get_supported_cpuid.patch
new file mode 100644
index 0000000000..19579911aa
--- /dev/null
+++ b/patches.arch/kvm-x86-report-stibp-on-get_supported_cpuid.patch
@@ -0,0 +1,57 @@
+From: Eduardo Habkost <ehabkost@redhat.com>
+Date: Wed, 5 Dec 2018 17:19:56 -0200
+Subject: kvm: x86: Report STIBP on GET_SUPPORTED_CPUID
+Git-commit: d7b09c827a6cf291f66637a36f46928dd1423184
+Patch-mainline: v5.0-rc1
+References: bsc#1111331, CVE-2018-12126, CVE-2018-12127, CVE-2018-12130
+
+Months ago, we have added code to allow direct access to MSR_IA32_SPEC_CTRL
+to the guest, which makes STIBP available to guests. This was implemented
+by commits d28b387fb74d ("KVM/VMX: Allow direct access to
+MSR_IA32_SPEC_CTRL") and b2ac58f90540 ("KVM/SVM: Allow direct access to
+MSR_IA32_SPEC_CTRL").
+
+However, we never updated GET_SUPPORTED_CPUID to let userspace know that
+STIBP can be enabled in CPUID. Fix that by updating
+kvm_cpuid_8000_0008_ebx_x86_features and kvm_cpuid_7_0_edx_x86_features.
+
+Signed-off-by: Eduardo Habkost <ehabkost@redhat.com>
+Reviewed-by: Jim Mattson <jmattson@google.com>
+Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Acked-by: Borislav Petkov <bp@suse.de>
+---
+ arch/x86/kvm/cpuid.c | 4 ++--
+ arch/x86/kvm/cpuid.h | 1 +
+ 2 files changed, 3 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/kvm/cpuid.c
++++ b/arch/x86/kvm/cpuid.c
+@@ -353,7 +353,7 @@ static inline int __do_cpuid_ent(struct
+
+ /* cpuid 0x80000008.ebx */
+ const u32 kvm_cpuid_8000_0008_ebx_x86_features =
+- F(AMD_IBPB) | F(AMD_IBRS) | F(VIRT_SSBD) | F(AMD_SSB_NO);
++ F(AMD_IBPB) | F(AMD_IBRS) | F(VIRT_SSBD) | F(AMD_SSB_NO) | F(AMD_STIBP);
+
+ /* cpuid 0xC0000001.edx */
+ const u32 kvm_cpuid_C000_0001_edx_x86_features =
+@@ -380,7 +380,7 @@ static inline int __do_cpuid_ent(struct
+ /* cpuid 7.0.edx*/
+ const u32 kvm_cpuid_7_0_edx_x86_features =
+ KF(AVX512_4VNNIW) | KF(AVX512_4FMAPS) | KF(SPEC_CTRL) |
+- KF(SPEC_CTRL_SSBD) | KF(ARCH_CAPABILITIES);
++ KF(SPEC_CTRL_SSBD) | KF(ARCH_CAPABILITIES) | KF(INTEL_STIBP);
+
+ /* all calls to cpuid_count() should be made on the same cpu */
+ get_cpu();
+--- a/arch/x86/kvm/cpuid.h
++++ b/arch/x86/kvm/cpuid.h
+@@ -6,6 +6,7 @@
+ /* These are scattered features in cpufeatures.h. */
+ #define KVM_CPUID_BIT_AVX512_4VNNIW 2
+ #define KVM_CPUID_BIT_AVX512_4FMAPS 3
++#define KVM_CPUID_BIT_INTEL_STIBP 21
+ #define KVM_CPUID_BIT_SPEC_CTRL 26
+ #define KVM_CPUID_BIT_ARCH_CAPABILITIES 29
+ #define KVM_CPUID_BIT_SPEC_CTRL_SSBD 31
diff --git a/patches.arch/locking-atomics-asm-generic-move-some-macros-from-linux-bitops-h-to-a-new-linux-bits-h-file.patch b/patches.arch/locking-atomics-asm-generic-move-some-macros-from-linux-bitops-h-to-a-new-linux-bits-h-file.patch
new file mode 100644
index 0000000000..e431bdf60d
--- /dev/null
+++ b/patches.arch/locking-atomics-asm-generic-move-some-macros-from-linux-bitops-h-to-a-new-linux-bits-h-file.patch
@@ -0,0 +1,90 @@
+From: Will Deacon <will.deacon@arm.com>
+Date: Tue, 19 Jun 2018 13:53:08 +0100
+Subject: locking/atomics, asm-generic: Move some macros from <linux/bitops.h>
+ to a new <linux/bits.h> file
+Git-commit: 8bd9cb51daac89337295b6f037b0486911e1b408
+Patch-mainline: v4.19-rc1
+References: bsc#1111331, CVE-2018-12126, CVE-2018-12127, CVE-2018-12130
+
+In preparation for implementing the asm-generic atomic bitops in terms
+of atomic_long_*(), we need to prevent <asm/atomic.h> implementations from
+pulling in <linux/bitops.h>. A common reason for this include is for the
+BITS_PER_BYTE definition, so move this and some other BIT() and masking
+macros into a new header file, <linux/bits.h>.
+
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: linux-arm-kernel@lists.infradead.org
+Cc: yamada.masahiro@socionext.com
+Link: https://lore.kernel.org/lkml/1529412794-17720-4-git-send-email-will.deacon@arm.com
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Acked-by: Borislav Petkov <bp@suse.de>
+---
+ include/linux/bitops.h | 21 +--------------------
+ include/linux/bits.h | 26 ++++++++++++++++++++++++++
+ 2 files changed, 27 insertions(+), 20 deletions(-)
+
+--- a/include/linux/bitops.h
++++ b/include/linux/bitops.h
+@@ -1,28 +1,9 @@
+ #ifndef _LINUX_BITOPS_H
+ #define _LINUX_BITOPS_H
+ #include <asm/types.h>
++#include <linux/bits.h>
+
+-#ifdef __KERNEL__
+-#define BIT(nr) (1UL << (nr))
+-#define BIT_ULL(nr) (1ULL << (nr))
+-#define BIT_MASK(nr) (1UL << ((nr) % BITS_PER_LONG))
+-#define BIT_WORD(nr) ((nr) / BITS_PER_LONG)
+-#define BIT_ULL_MASK(nr) (1ULL << ((nr) % BITS_PER_LONG_LONG))
+-#define BIT_ULL_WORD(nr) ((nr) / BITS_PER_LONG_LONG)
+-#define BITS_PER_BYTE 8
+ #define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long))
+-#endif
+-
+-/*
+- * Create a contiguous bitmask starting at bit position @l and ending at
+- * position @h. For example
+- * GENMASK_ULL(39, 21) gives us the 64bit vector 0x000000ffffe00000.
+- */
+-#define GENMASK(h, l) \
+- (((~0UL) << (l)) & (~0UL >> (BITS_PER_LONG - 1 - (h))))
+-
+-#define GENMASK_ULL(h, l) \
+- (((~0ULL) << (l)) & (~0ULL >> (BITS_PER_LONG_LONG - 1 - (h))))
+
+ extern unsigned int __sw_hweight8(unsigned int w);
+ extern unsigned int __sw_hweight16(unsigned int w);
+--- /dev/null
++++ b/include/linux/bits.h
+@@ -0,0 +1,26 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++#ifndef __LINUX_BITS_H
++#define __LINUX_BITS_H
++#include <asm/bitsperlong.h>
++
++#define BIT(nr) (1UL << (nr))
++#define BIT_ULL(nr) (1ULL << (nr))
++#define BIT_MASK(nr) (1UL << ((nr) % BITS_PER_LONG))
++#define BIT_WORD(nr) ((nr) / BITS_PER_LONG)
++#define BIT_ULL_MASK(nr) (1ULL << ((nr) % BITS_PER_LONG_LONG))
++#define BIT_ULL_WORD(nr) ((nr) / BITS_PER_LONG_LONG)
++#define BITS_PER_BYTE 8
++
++/*
++ * Create a contiguous bitmask starting at bit position @l and ending at
++ * position @h. For example
++ * GENMASK_ULL(39, 21) gives us the 64bit vector 0x000000ffffe00000.
++ */
++#define GENMASK(h, l) \
++ (((~0UL) - (1UL << (l)) + 1) & (~0UL >> (BITS_PER_LONG - 1 - (h))))
++
++#define GENMASK_ULL(h, l) \
++ (((~0ULL) - (1ULL << (l)) + 1) & \
++ (~0ULL >> (BITS_PER_LONG_LONG - 1 - (h))))
++
++#endif /* __LINUX_BITS_H */
diff --git a/patches.arch/locking-static_keys-improve-uninitialized-key-warning.patch b/patches.arch/locking-static_keys-improve-uninitialized-key-warning.patch
new file mode 100644
index 0000000000..b71ed265cd
--- /dev/null
+++ b/patches.arch/locking-static_keys-improve-uninitialized-key-warning.patch
@@ -0,0 +1,207 @@
+From: Borislav Petkov <bp@suse.de>
+Date: Wed, 18 Oct 2017 17:24:28 +0200
+Subject: locking/static_keys: Improve uninitialized key warning
+Git-commit: 5cdda5117e125e0dbb020425cc55a4c143c6febc
+Patch-mainline: v4.15-rc1
+References: bsc#1106913
+
+Right now it says:
+
+ static_key_disable_cpuslocked used before call to jump_label_init
+ ------------[ cut here ]------------
+ WARNING: CPU: 0 PID: 0 at kernel/jump_label.c:161 static_key_disable_cpuslocked+0x68/0x70
+ Modules linked in:
+ CPU: 0 PID: 0 Comm: swapper Not tainted 4.14.0-rc5+ #1
+ Hardware name: SGI.COM C2112-4GP3/X10DRT-P-Series, BIOS 2.0a 05/09/2016
+ task: ffffffff81c0e480 task.stack: ffffffff81c00000
+ RIP: 0010:static_key_disable_cpuslocked+0x68/0x70
+ RSP: 0000:ffffffff81c03ef0 EFLAGS: 00010096 ORIG_RAX: 0000000000000000
+ RAX: 0000000000000041 RBX: ffffffff81c32680 RCX: ffffffff81c5cbf8
+ RDX: 0000000000000001 RSI: 0000000000000092 RDI: 0000000000000002
+ RBP: ffff88807fffd240 R08: 726f666562206465 R09: 0000000000000136
+ R10: 0000000000000000 R11: 696e695f6c656261 R12: ffffffff82158900
+ R13: ffffffff8215f760 R14: 0000000000000001 R15: 0000000000000008
+ FS: 0000000000000000(0000) GS:ffff883f7f400000(0000) knlGS:0000000000000000
+ CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+ CR2: ffff88807ffff000 CR3: 0000000001c09000 CR4: 00000000000606b0
+ Call Trace:
+ static_key_disable+0x16/0x20
+ start_kernel+0x15a/0x45d
+ ? load_ucode_intel_bsp+0x11/0x2d
+ secondary_startup_64+0xa5/0xb0
+ Code: 48 c7 c7 a0 15 cf 81 e9 47 53 4b 00 48 89 df e8 5f fc ff ff eb e8 48 c7 c6 \
+ c0 97 83 81 48 c7 c7 d0 ff a2 81 31 c0 e8 c5 9d f5 ff <0f> ff eb a7 0f ff eb \
+ b0 e8 eb a2 4b 00 53 48 89 fb e8 42 0e f0
+
+but it doesn't tell me which key it is. So dump the key's name too:
+
+ static_key_disable_cpuslocked(): static key 'virt_spin_lock_key' used before call to jump_label_init()
+
+And that makes pinpointing which key is causing that a lot easier.
+
+ include/linux/jump_label.h | 14 +++++++-------
+ include/linux/jump_label_ratelimit.h | 6 +++---
+ kernel/jump_label.c | 14 +++++++-------
+ 3 files changed, 17 insertions(+), 17 deletions(-)
+
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Reviewed-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
+Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com>
+Cc: Hannes Frederic Sowa <hannes@stressinduktion.org>
+Cc: Jason Baron <jbaron@akamai.com>
+Cc: Juergen Gross <jgross@suse.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Paolo Bonzini <pbonzini@redhat.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Link: http://lkml.kernel.org/r/20171018152428.ffjgak4o25f7ept6@pd.tnic
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+---
+ include/linux/jump_label.h | 14 +++++++-------
+ include/linux/jump_label_ratelimit.h | 6 +++---
+ kernel/jump_label.c | 14 +++++++-------
+ 3 files changed, 17 insertions(+), 17 deletions(-)
+
+diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h
+index cd5861651b17..979a2f2d529b 100644
+--- a/include/linux/jump_label.h
++++ b/include/linux/jump_label.h
+@@ -81,9 +81,9 @@
+
+ extern bool static_key_initialized;
+
+-#define STATIC_KEY_CHECK_USE() WARN(!static_key_initialized, \
+- "%s used before call to jump_label_init", \
+- __func__)
++#define STATIC_KEY_CHECK_USE(key) WARN(!static_key_initialized, \
++ "%s(): static key '%pS' used before call to jump_label_init()", \
++ __func__, (key))
+
+ #ifdef HAVE_JUMP_LABEL
+
+@@ -211,13 +211,13 @@ static __always_inline bool static_key_true(struct static_key *key)
+
+ static inline void static_key_slow_inc(struct static_key *key)
+ {
+- STATIC_KEY_CHECK_USE();
++ STATIC_KEY_CHECK_USE(key);
+ atomic_inc(&key->enabled);
+ }
+
+ static inline void static_key_slow_dec(struct static_key *key)
+ {
+- STATIC_KEY_CHECK_USE();
++ STATIC_KEY_CHECK_USE(key);
+ atomic_dec(&key->enabled);
+ }
+
+@@ -236,7 +236,7 @@ static inline int jump_label_apply_nops(struct module *mod)
+
+ static inline void static_key_enable(struct static_key *key)
+ {
+- STATIC_KEY_CHECK_USE();
++ STATIC_KEY_CHECK_USE(key);
+
+ if (atomic_read(&key->enabled) != 0) {
+ WARN_ON_ONCE(atomic_read(&key->enabled) != 1);
+@@ -247,7 +247,7 @@ static inline void static_key_enable(struct static_key *key)
+
+ static inline void static_key_disable(struct static_key *key)
+ {
+- STATIC_KEY_CHECK_USE();
++ STATIC_KEY_CHECK_USE(key);
+
+ if (atomic_read(&key->enabled) != 1) {
+ WARN_ON_ONCE(atomic_read(&key->enabled) != 0);
+diff --git a/include/linux/jump_label_ratelimit.h b/include/linux/jump_label_ratelimit.h
+index 23da3af459fe..93086df0a847 100644
+--- a/include/linux/jump_label_ratelimit.h
++++ b/include/linux/jump_label_ratelimit.h
+@@ -24,18 +24,18 @@ struct static_key_deferred {
+ };
+ static inline void static_key_slow_dec_deferred(struct static_key_deferred *key)
+ {
+- STATIC_KEY_CHECK_USE();
++ STATIC_KEY_CHECK_USE(key);
+ static_key_slow_dec(&key->key);
+ }
+ static inline void static_key_deferred_flush(struct static_key_deferred *key)
+ {
+- STATIC_KEY_CHECK_USE();
++ STATIC_KEY_CHECK_USE(key);
+ }
+ static inline void
+ jump_label_rate_limit(struct static_key_deferred *key,
+ unsigned long rl)
+ {
+- STATIC_KEY_CHECK_USE();
++ STATIC_KEY_CHECK_USE(key);
+ }
+ #endif /* HAVE_JUMP_LABEL */
+ #endif /* _LINUX_JUMP_LABEL_RATELIMIT_H */
+diff --git a/kernel/jump_label.c b/kernel/jump_label.c
+index 0bf2e8f5244a..8ff4ca4665ff 100644
+--- a/kernel/jump_label.c
++++ b/kernel/jump_label.c
+@@ -83,7 +83,7 @@ static void static_key_slow_inc_cpuslocked(struct static_key *key)
+ {
+ int v, v1;
+
+- STATIC_KEY_CHECK_USE();
++ STATIC_KEY_CHECK_USE(key);
+
+ /*
+ * Careful if we get concurrent static_key_slow_inc() calls;
+@@ -128,7 +128,7 @@ EXPORT_SYMBOL_GPL(static_key_slow_inc);
+
+ void static_key_enable_cpuslocked(struct static_key *key)
+ {
+- STATIC_KEY_CHECK_USE();
++ STATIC_KEY_CHECK_USE(key);
+
+ if (atomic_read(&key->enabled) > 0) {
+ WARN_ON_ONCE(atomic_read(&key->enabled) != 1);
+@@ -158,7 +158,7 @@ EXPORT_SYMBOL_GPL(static_key_enable);
+
+ void static_key_disable_cpuslocked(struct static_key *key)
+ {
+- STATIC_KEY_CHECK_USE();
++ STATIC_KEY_CHECK_USE(key);
+
+ if (atomic_read(&key->enabled) != 1) {
+ WARN_ON_ONCE(atomic_read(&key->enabled) != 0);
+@@ -224,21 +224,21 @@ static void jump_label_update_timeout(struct work_struct *work)
+
+ void static_key_slow_dec(struct static_key *key)
+ {
+- STATIC_KEY_CHECK_USE();
++ STATIC_KEY_CHECK_USE(key);
+ __static_key_slow_dec(key, 0, NULL);
+ }
+ EXPORT_SYMBOL_GPL(static_key_slow_dec);
+
+ void static_key_slow_dec_deferred(struct static_key_deferred *key)
+ {
+- STATIC_KEY_CHECK_USE();
++ STATIC_KEY_CHECK_USE(key);
+ __static_key_slow_dec(&key->key, key->timeout, &key->work);
+ }
+ EXPORT_SYMBOL_GPL(static_key_slow_dec_deferred);
+
+ void static_key_deferred_flush(struct static_key_deferred *key)
+ {
+- STATIC_KEY_CHECK_USE();
++ STATIC_KEY_CHECK_USE(key);
+ flush_delayed_work(&key->work);
+ }
+ EXPORT_SYMBOL_GPL(static_key_deferred_flush);
+@@ -246,7 +246,7 @@ EXPORT_SYMBOL_GPL(static_key_deferred_flush);
+ void jump_label_rate_limit(struct static_key_deferred *key,
+ unsigned long rl)
+ {
+- STATIC_KEY_CHECK_USE();
++ STATIC_KEY_CHECK_USE(key);
+ key->timeout = rl;
+ INIT_DELAYED_WORK(&key->work, jump_label_update_timeout);
+ }
+
diff --git a/patches.arch/locking-static_keys-provide-declare-and-well-as-define-macros.patch b/patches.arch/locking-static_keys-provide-declare-and-well-as-define-macros.patch
new file mode 100644
index 0000000000..2c9148245a
--- /dev/null
+++ b/patches.arch/locking-static_keys-provide-declare-and-well-as-define-macros.patch
@@ -0,0 +1,43 @@
+From: Tony Luck <tony.luck@intel.com>
+Date: Thu, 1 Sep 2016 11:39:33 -0700
+Subject: locking/static_keys: Provide DECLARE and well as DEFINE macros
+Git-commit: b8fb03785d4de097507d0cf45873525e0ac4d2b2
+Patch-mainline: v4.9-rc1
+References: bsc#1111331, CVE-2018-12126, CVE-2018-12127, CVE-2018-12130
+
+We will need to provide declarations of static keys in header
+files. Provide DECLARE_STATIC_KEY_{TRUE,FALSE} macros.
+
+Signed-off-by: Tony Luck <tony.luck@intel.com>
+Acked-by: Borislav Petkov <bp@suse.de>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Dan Williams <dan.j.williams@intel.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Link: http://lkml.kernel.org/r/816881cf85bd3cf13385d212882618f38a3b5d33.1472754711.git.tony.luck@intel.com
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ include/linux/jump_label.h | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+--- a/include/linux/jump_label.h
++++ b/include/linux/jump_label.h
+@@ -269,6 +269,9 @@ struct static_key_false {
+ #define DEFINE_STATIC_KEY_TRUE(name) \
+ struct static_key_true name = STATIC_KEY_TRUE_INIT
+
++#define DECLARE_STATIC_KEY_TRUE(name) \
++ extern struct static_key_true name
++
+ #define DEFINE_STATIC_KEY_FALSE(name) \
+ struct static_key_false name = STATIC_KEY_FALSE_INIT
+
+@@ -282,6 +285,9 @@ struct static_key_false {
+ [0 ... (count) - 1] = STATIC_KEY_FALSE_INIT, \
+ }
+
++#define DECLARE_STATIC_KEY_FALSE(name) \
++ extern struct static_key_false name
++
+ extern bool ____wrong_branch_error(void);
+
+ #define static_key_enabled(x) \
diff --git a/patches.arch/powerpc-numa-document-topology_updates_enabled-disab.patch b/patches.arch/powerpc-numa-document-topology_updates_enabled-disab.patch
index ce83ac8841..9f5a29a14b 100644
--- a/patches.arch/powerpc-numa-document-topology_updates_enabled-disab.patch
+++ b/patches.arch/powerpc-numa-document-topology_updates_enabled-disab.patch
@@ -5,8 +5,7 @@ Subject: [PATCH] powerpc/numa: document topology_updates_enabled, disable by
default
References: bsc#1133584
-Patch-mainline: queued
-Git-repo: https://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux.git
+Patch-mainline: v5.2-rc1
Git-commit: 558f86493df09f68f79fe056d9028d317a3ce8ab
Changing the NUMA associations for CPUs and memory at runtime is
diff --git a/patches.arch/powerpc-numa-improve-control-of-topology-updates.patch b/patches.arch/powerpc-numa-improve-control-of-topology-updates.patch
index f62c6c2bae..3a04a469dd 100644
--- a/patches.arch/powerpc-numa-improve-control-of-topology-updates.patch
+++ b/patches.arch/powerpc-numa-improve-control-of-topology-updates.patch
@@ -4,8 +4,7 @@ Date: Thu, 18 Apr 2019 13:56:57 -0500
Subject: [PATCH] powerpc/numa: improve control of topology updates
References: bsc#1133584
-Patch-mainline: queued
-Git-repo: https://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux.git
+Patch-mainline: v5.2-rc1
Git-commit: 2d4d9b308f8f8dec68f6dbbff18c68ec7c6bd26f
When booted with "topology_updates=no", or when "off" is written to
diff --git a/patches.arch/powerpc-speculation-support-mitigations-cmdline-option.patch b/patches.arch/powerpc-speculation-support-mitigations-cmdline-option.patch
index d23c836b51..3ae21dcd8f 100644
--- a/patches.arch/powerpc-speculation-support-mitigations-cmdline-option.patch
+++ b/patches.arch/powerpc-speculation-support-mitigations-cmdline-option.patch
@@ -1,9 +1,8 @@
From: Josh Poimboeuf <jpoimboe@redhat.com>
Date: Fri, 12 Apr 2019 15:39:30 -0500
Subject: powerpc/speculation: Support 'mitigations=' cmdline option
-Git-repo: git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git
Git-commit: 782e69efb3dfed6e8360bc612e8c7827a901a8f9
-Patch-mainline: Queued for v5.2
+Patch-mainline: v5.2-rc1
References: bsc#1112178
Configure powerpc CPU runtime speculation bug mitigations in accordance
diff --git a/patches.arch/s390-speculation-support-mitigations-cmdline-option.patch b/patches.arch/s390-speculation-support-mitigations-cmdline-option.patch
index 80a0f1f66f..e88dd4010f 100644
--- a/patches.arch/s390-speculation-support-mitigations-cmdline-option.patch
+++ b/patches.arch/s390-speculation-support-mitigations-cmdline-option.patch
@@ -1,9 +1,8 @@
From: Josh Poimboeuf <jpoimboe@redhat.com>
Date: Fri, 12 Apr 2019 15:39:31 -0500
Subject: s390/speculation: Support 'mitigations=' cmdline option
-Git-repo: git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git
Git-commit: 0336e04a6520bdaefdb0769d2a70084fa52e81ed
-Patch-mainline: Queued for v5.2
+Patch-mainline: v5.2-rc1
References: bsc#1112178
Configure s390 runtime CPU speculation bug mitigations in accordance
diff --git a/patches.arch/sched-core-fix-cpu-max-vs-cpuhotplug-deadlock.patch b/patches.arch/sched-core-fix-cpu-max-vs-cpuhotplug-deadlock.patch
new file mode 100644
index 0000000000..cc7fc9843c
--- /dev/null
+++ b/patches.arch/sched-core-fix-cpu-max-vs-cpuhotplug-deadlock.patch
@@ -0,0 +1,121 @@
+From: Peter Zijlstra <peterz@infradead.org>
+Date: Mon, 22 Jan 2018 22:53:28 +0100
+Subject: sched/core: Fix cpu.max vs. cpuhotplug deadlock
+Git-commit: ce48c146495a1a50e48cdbfbfaba3e708be7c07c
+Patch-mainline: v4.15
+References: bsc#1106913
+
+Tejun reported the following cpu-hotplug lock (percpu-rwsem) read recursion:
+
+ tg_set_cfs_bandwidth()
+ get_online_cpus()
+ cpus_read_lock()
+
+ cfs_bandwidth_usage_inc()
+ static_key_slow_inc()
+ cpus_read_lock()
+
+Reported-by: Tejun Heo <tj@kernel.org>
+Tested-by: Tejun Heo <tj@kernel.org>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Link: http://lkml.kernel.org/r/20180122215328.GP3397@worktop
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Acked-by: Borislav Petkov <bp@suse.de>
+---
+ include/linux/jump_label.h | 7 +++++++
+ kernel/jump_label.c | 12 +++++++++---
+ kernel/sched/fair.c | 4 ++--
+ 3 files changed, 18 insertions(+), 5 deletions(-)
+
+--- a/include/linux/jump_label.h
++++ b/include/linux/jump_label.h
+@@ -146,6 +146,8 @@ extern void arch_jump_label_transform_st
+ extern int jump_label_text_reserved(void *start, void *end);
+ extern void static_key_slow_inc(struct static_key *key);
+ extern void static_key_slow_dec(struct static_key *key);
++extern void static_key_slow_inc_cpuslocked(struct static_key *key);
++extern void static_key_slow_dec_cpuslocked(struct static_key *key);
+ extern void jump_label_apply_nops(struct module *mod);
+ extern int static_key_count(struct static_key *key);
+ extern void static_key_enable(struct static_key *key);
+@@ -208,6 +210,9 @@ static inline void static_key_slow_dec(s
+ atomic_dec(&key->enabled);
+ }
+
++#define static_key_slow_inc_cpuslocked(key) static_key_slow_inc(key)
++#define static_key_slow_dec_cpuslocked(key) static_key_slow_dec(key)
++
+ static inline int jump_label_text_reserved(void *start, void *end)
+ {
+ return 0;
+@@ -402,6 +407,8 @@ extern bool ____wrong_branch_error(void)
+
+ #define static_branch_inc(x) static_key_slow_inc(&(x)->key)
+ #define static_branch_dec(x) static_key_slow_dec(&(x)->key)
++#define static_branch_inc_cpuslocked(x) static_key_slow_inc_cpuslocked(&(x)->key)
++#define static_branch_dec_cpuslocked(x) static_key_slow_dec_cpuslocked(&(x)->key)
+
+ /*
+ * Normal usage; boolean enable/disable.
+--- a/kernel/jump_label.c
++++ b/kernel/jump_label.c
+@@ -79,7 +79,7 @@ int static_key_count(struct static_key *
+ }
+ EXPORT_SYMBOL_GPL(static_key_count);
+
+-static void static_key_slow_inc_cpuslocked(struct static_key *key)
++void static_key_slow_inc_cpuslocked(struct static_key *key)
+ {
+ int v, v1;
+
+@@ -180,7 +180,7 @@ void static_key_disable(struct static_ke
+ }
+ EXPORT_SYMBOL_GPL(static_key_disable);
+
+-static void static_key_slow_dec_cpuslocked(struct static_key *key,
++static void __static_key_slow_dec_cpuslocked(struct static_key *key,
+ unsigned long rate_limit,
+ struct delayed_work *work)
+ {
+@@ -211,7 +211,7 @@ static void __static_key_slow_dec(struct
+ struct delayed_work *work)
+ {
+ get_online_cpus();
+- static_key_slow_dec_cpuslocked(key, rate_limit, work);
++ __static_key_slow_dec_cpuslocked(key, rate_limit, work);
+ put_online_cpus();
+ }
+
+@@ -229,6 +229,12 @@ void static_key_slow_dec(struct static_k
+ }
+ EXPORT_SYMBOL_GPL(static_key_slow_dec);
+
++void static_key_slow_dec_cpuslocked(struct static_key *key)
++{
++ STATIC_KEY_CHECK_USE(key);
++ __static_key_slow_dec_cpuslocked(key, 0, NULL);
++}
++
+ void static_key_slow_dec_deferred(struct static_key_deferred *key)
+ {
+ STATIC_KEY_CHECK_USE(key);
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -3681,12 +3681,12 @@ static inline bool cfs_bandwidth_used(vo
+
+ void cfs_bandwidth_usage_inc(void)
+ {
+- static_key_slow_inc(&__cfs_bandwidth_used);
++ static_key_slow_inc_cpuslocked(&__cfs_bandwidth_used);
+ }
+
+ void cfs_bandwidth_usage_dec(void)
+ {
+- static_key_slow_dec(&__cfs_bandwidth_used);
++ static_key_slow_dec_cpuslocked(&__cfs_bandwidth_used);
+ }
+ #else /* HAVE_JUMP_LABEL */
+ static bool cfs_bandwidth_used(void)
diff --git a/patches.arch/x86-bugs-add-amd-s-variant-of-ssb_no.patch b/patches.arch/x86-bugs-add-amd-s-variant-of-ssb_no.patch
new file mode 100644
index 0000000000..347287039e
--- /dev/null
+++ b/patches.arch/x86-bugs-add-amd-s-variant-of-ssb_no.patch
@@ -0,0 +1,66 @@
+From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Date: Fri, 1 Jun 2018 10:59:19 -0400
+Subject: x86/bugs: Add AMD's variant of SSB_NO
+Git-commit: 24809860012e0130fbafe536709e08a22b3e959e
+Patch-mainline: v4.18-rc1
+References: bsc#1111331, CVE-2018-12126, CVE-2018-12127, CVE-2018-12130
+
+The AMD document outlining the SSBD handling
+124441_AMD64_SpeculativeStoreBypassDisable_Whitepaper_final.pdf
+mentions that the CPUID 8000_0008.EBX[26] will mean that the
+speculative store bypass disable is no longer needed.
+
+A copy of this document is available at:
+ https://bugzilla.kernel.org/show_bug.cgi?id=199889
+
+Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: Tom Lendacky <thomas.lendacky@amd.com>
+Cc: Janakarajan Natarajan <Janakarajan.Natarajan@amd.com>
+Cc: kvm@vger.kernel.org
+Cc: andrew.cooper3@citrix.com
+Cc: Andy Lutomirski <luto@kernel.org>
+Cc: "H. Peter Anvin" <hpa@zytor.com>
+Cc: Borislav Petkov <bp@suse.de>
+Cc: David Woodhouse <dwmw@amazon.co.uk>
+Link: https://lkml.kernel.org/r/20180601145921.9500-2-konrad.wilk@oracle.com
+Acked-by: Borislav Petkov <bp@suse.de>
+---
+ arch/x86/include/asm/cpufeatures.h | 1 +
+ arch/x86/kernel/cpu/common.c | 3 ++-
+ arch/x86/kvm/cpuid.c | 2 +-
+ 3 files changed, 4 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/include/asm/cpufeatures.h
++++ b/arch/x86/include/asm/cpufeatures.h
+@@ -308,6 +308,7 @@
+ #define X86_FEATURE_AMD_IBRS (13*32+14) /* Indirect Branch Restricted Speculation */
+ #define X86_FEATURE_AMD_STIBP (13*32+15) /* Single Thread Indirect Branch Predictors */
+ #define X86_FEATURE_VIRT_SSBD (13*32+25) /* Virtualized Speculative Store Bypass Disable */
++#define X86_FEATURE_AMD_SSB_NO (13*32+26) /* "" Speculative Store Bypass is fixed in hardware. */
+
+ /* Intel-defined CPU features, CPUID level 0x00000007:0 (ecx), word 16 */
+ #define X86_FEATURE_PKU (16*32+ 3) /* Protection Keys for Userspace */
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -946,7 +946,8 @@ static void __init cpu_set_bug_bits(stru
+ rdmsrl(MSR_IA32_ARCH_CAPABILITIES, ia32_cap);
+
+ if (!x86_match_cpu(cpu_no_spec_store_bypass) &&
+- !(ia32_cap & ARCH_CAP_SSB_NO))
++ !(ia32_cap & ARCH_CAP_SSB_NO) &&
++ !cpu_has(c, X86_FEATURE_AMD_SSB_NO))
+ setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS);
+
+ if (x86_match_cpu(cpu_no_meltdown))
+--- a/arch/x86/kvm/cpuid.c
++++ b/arch/x86/kvm/cpuid.c
+@@ -353,7 +353,7 @@ static inline int __do_cpuid_ent(struct
+
+ /* cpuid 0x80000008.ebx */
+ const u32 kvm_cpuid_8000_0008_ebx_x86_features =
+- F(AMD_IBPB) | F(AMD_IBRS) | F(VIRT_SSBD);
++ F(AMD_IBPB) | F(AMD_IBRS) | F(VIRT_SSBD) | F(AMD_SSB_NO);
+
+ /* cpuid 0xC0000001.edx */
+ const u32 kvm_cpuid_C000_0001_edx_x86_features =
diff --git a/patches.arch/x86-cpu-sanitize-fam6_atom-naming.patch b/patches.arch/x86-cpu-sanitize-fam6_atom-naming.patch
new file mode 100644
index 0000000000..65434c22c3
--- /dev/null
+++ b/patches.arch/x86-cpu-sanitize-fam6_atom-naming.patch
@@ -0,0 +1,215 @@
+From: Peter Zijlstra <peterz@infradead.org>
+Date: Tue, 7 Aug 2018 10:17:27 -0700
+Subject: x86/cpu: Sanitize FAM6_ATOM naming
+Git-commit: f2c4db1bd80720cd8cb2a5aa220d9bc9f374f04e
+Patch-mainline: v5.1-rc1
+References: bsc#1111331, CVE-2018-12126, CVE-2018-12127, CVE-2018-12130
+
+commit f2c4db1bd80720cd8cb2a5aa220d9bc9f374f04e upstream
+
+Going primarily by:
+
+ https://en.wikipedia.org/wiki/List_of_Intel_Atom_microprocessors
+
+with additional information gleaned from other related pages; notably:
+
+ - Bonnell shrink was called Saltwell
+ - Moorefield is the Merriefield refresh which makes it Airmont
+
+The general naming scheme is: FAM6_ATOM_UARCH_SOCTYPE
+
+ for i in `git grep -l FAM6_ATOM` ; do
+ sed -i -e 's/ATOM_PINEVIEW/ATOM_BONNELL/g' \
+ -e 's/ATOM_LINCROFT/ATOM_BONNELL_MID/' \
+ -e 's/ATOM_PENWELL/ATOM_SALTWELL_MID/g' \
+ -e 's/ATOM_CLOVERVIEW/ATOM_SALTWELL_TABLET/g' \
+ -e 's/ATOM_CEDARVIEW/ATOM_SALTWELL/g' \
+ -e 's/ATOM_SILVERMONT1/ATOM_SILVERMONT/g' \
+ -e 's/ATOM_SILVERMONT2/ATOM_SILVERMONT_X/g' \
+ -e 's/ATOM_MERRIFIELD/ATOM_SILVERMONT_MID/g' \
+ -e 's/ATOM_MOOREFIELD/ATOM_AIRMONT_MID/g' \
+ -e 's/ATOM_DENVERTON/ATOM_GOLDMONT_X/g' \
+ -e 's/ATOM_GEMINI_LAKE/ATOM_GOLDMONT_PLUS/g' ${i}
+ done
+
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
+Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
+Cc: Jiri Olsa <jolsa@redhat.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Stephane Eranian <eranian@google.com>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Vince Weaver <vincent.weaver@maine.edu>
+Cc: dave.hansen@linux.intel.com
+Cc: len.brown@intel.com
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+[ 4.14.y speck backport, commit id there:
+ f0fae1c931dd3a49cd42855836fc3f075960d4be. diff for this backport was created
+ using the script above. ]
+Acked-by: Borislav Petkov <bp@suse.de>
+---
+ arch/x86/include/asm/intel-family.h | 22 +++++++++++-----------
+ arch/x86/kernel/cpu/common.c | 28 ++++++++++++++--------------
+ drivers/cpufreq/intel_pstate.c | 2 +-
+ drivers/idle/intel_idle.c | 14 +++++++-------
+ drivers/powercap/intel_rapl.c | 8 ++++----
+ 5 files changed, 37 insertions(+), 37 deletions(-)
+--- a/arch/x86/include/asm/intel-family.h
++++ b/arch/x86/include/asm/intel-family.h
+@@ -50,19 +50,19 @@
+
+ /* "Small Core" Processors (Atom) */
+
+-#define INTEL_FAM6_ATOM_PINEVIEW 0x1C
+-#define INTEL_FAM6_ATOM_LINCROFT 0x26
+-#define INTEL_FAM6_ATOM_PENWELL 0x27
+-#define INTEL_FAM6_ATOM_CLOVERVIEW 0x35
+-#define INTEL_FAM6_ATOM_CEDARVIEW 0x36
+-#define INTEL_FAM6_ATOM_SILVERMONT1 0x37 /* BayTrail/BYT / Valleyview */
+-#define INTEL_FAM6_ATOM_SILVERMONT2 0x4D /* Avaton/Rangely */
++#define INTEL_FAM6_ATOM_BONNELL 0x1C
++#define INTEL_FAM6_ATOM_BONNELL_MID 0x26
++#define INTEL_FAM6_ATOM_SALTWELL_MID 0x27
++#define INTEL_FAM6_ATOM_SALTWELL_TABLET 0x35
++#define INTEL_FAM6_ATOM_SALTWELL 0x36
++#define INTEL_FAM6_ATOM_SILVERMONT 0x37 /* BayTrail/BYT / Valleyview */
++#define INTEL_FAM6_ATOM_SILVERMONT_X 0x4D /* Avaton/Rangely */
+ #define INTEL_FAM6_ATOM_AIRMONT 0x4C /* CherryTrail / Braswell */
+-#define INTEL_FAM6_ATOM_MERRIFIELD 0x4A /* Tangier */
+-#define INTEL_FAM6_ATOM_MOOREFIELD 0x5A /* Annidale */
++#define INTEL_FAM6_ATOM_SILVERMONT_MID 0x4A /* Tangier */
++#define INTEL_FAM6_ATOM_AIRMONT_MID 0x5A /* Annidale */
+ #define INTEL_FAM6_ATOM_GOLDMONT 0x5C
+-#define INTEL_FAM6_ATOM_DENVERTON 0x5F /* Goldmont Microserver */
+-#define INTEL_FAM6_ATOM_GEMINI_LAKE 0x7A
++#define INTEL_FAM6_ATOM_GOLDMONT_X 0x5F /* Goldmont Microserver */
++#define INTEL_FAM6_ATOM_GOLDMONT_PLUS 0x7A
+
+ /* Xeon Phi */
+
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -878,11 +878,11 @@ static void identify_cpu_without_cpuid(s
+ }
+
+ static const __initconst struct x86_cpu_id cpu_no_speculation[] = {
+- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_CEDARVIEW, X86_FEATURE_ANY },
+- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_CLOVERVIEW, X86_FEATURE_ANY },
+- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_LINCROFT, X86_FEATURE_ANY },
+- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_PENWELL, X86_FEATURE_ANY },
+- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_PINEVIEW, X86_FEATURE_ANY },
++ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SALTWELL, X86_FEATURE_ANY },
++ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SALTWELL_TABLET, X86_FEATURE_ANY },
++ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_BONNELL_MID, X86_FEATURE_ANY },
++ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SALTWELL_MID, X86_FEATURE_ANY },
++ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_BONNELL, X86_FEATURE_ANY },
+ { X86_VENDOR_CENTAUR, 5 },
+ { X86_VENDOR_INTEL, 5 },
+ { X86_VENDOR_NSC, 5 },
+@@ -897,10 +897,10 @@ static const __initconst struct x86_cpu_
+
+ /* Only list CPUs which speculate but are non susceptible to SSB */
+ static const __initconst struct x86_cpu_id cpu_no_spec_store_bypass[] = {
+- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT1 },
++ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT },
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_AIRMONT },
+- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT2 },
+- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_MERRIFIELD },
++ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT_X },
++ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT_MID },
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_CORE_YONAH },
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_XEON_PHI_KNL },
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_XEON_PHI_KNM },
+@@ -913,14 +913,14 @@ static const __initconst struct x86_cpu_
+
+ static const __initconst struct x86_cpu_id cpu_no_l1tf[] = {
+ /* in addition to cpu_no_speculation */
+- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT1 },
+- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT2 },
++ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT },
++ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT_X },
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_AIRMONT },
+- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_MERRIFIELD },
+- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_MOOREFIELD },
++ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT_MID },
++ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_AIRMONT_MID },
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_GOLDMONT },
+- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_DENVERTON },
+- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_GEMINI_LAKE },
++ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_GOLDMONT_X },
++ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_GOLDMONT_PLUS },
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_XEON_PHI_KNL },
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_XEON_PHI_KNM },
+ {}
+--- a/drivers/cpufreq/intel_pstate.c
++++ b/drivers/cpufreq/intel_pstate.c
+@@ -1060,7 +1060,7 @@ static void intel_pstate_timer_func(unsi
+ static const struct x86_cpu_id intel_pstate_cpu_ids[] = {
+ ICPU(INTEL_FAM6_SANDYBRIDGE, core_params),
+ ICPU(INTEL_FAM6_SANDYBRIDGE_X, core_params),
+- ICPU(INTEL_FAM6_ATOM_SILVERMONT1, silvermont_params),
++ ICPU(INTEL_FAM6_ATOM_SILVERMONT, silvermont_params),
+ ICPU(INTEL_FAM6_IVYBRIDGE, core_params),
+ ICPU(INTEL_FAM6_HASWELL_CORE, core_params),
+ ICPU(INTEL_FAM6_BROADWELL_CORE, core_params),
+--- a/drivers/idle/intel_idle.c
++++ b/drivers/idle/intel_idle.c
+@@ -1106,14 +1106,14 @@ static const struct x86_cpu_id intel_idl
+ ICPU(INTEL_FAM6_WESTMERE, idle_cpu_nehalem),
+ ICPU(INTEL_FAM6_WESTMERE_EP, idle_cpu_nehalem),
+ ICPU(INTEL_FAM6_NEHALEM_EX, idle_cpu_nehalem),
+- ICPU(INTEL_FAM6_ATOM_PINEVIEW, idle_cpu_atom),
+- ICPU(INTEL_FAM6_ATOM_LINCROFT, idle_cpu_lincroft),
++ ICPU(INTEL_FAM6_ATOM_BONNELL, idle_cpu_atom),
++ ICPU(INTEL_FAM6_ATOM_BONNELL_MID, idle_cpu_lincroft),
+ ICPU(INTEL_FAM6_WESTMERE_EX, idle_cpu_nehalem),
+ ICPU(INTEL_FAM6_SANDYBRIDGE, idle_cpu_snb),
+ ICPU(INTEL_FAM6_SANDYBRIDGE_X, idle_cpu_snb),
+- ICPU(INTEL_FAM6_ATOM_CEDARVIEW, idle_cpu_atom),
+- ICPU(INTEL_FAM6_ATOM_SILVERMONT1, idle_cpu_byt),
+- ICPU(INTEL_FAM6_ATOM_MERRIFIELD, idle_cpu_tangier),
++ ICPU(INTEL_FAM6_ATOM_SALTWELL, idle_cpu_atom),
++ ICPU(INTEL_FAM6_ATOM_SILVERMONT, idle_cpu_byt),
++ ICPU(INTEL_FAM6_ATOM_SILVERMONT_MID, idle_cpu_tangier),
+ ICPU(INTEL_FAM6_ATOM_AIRMONT, idle_cpu_cht),
+ ICPU(INTEL_FAM6_IVYBRIDGE, idle_cpu_ivb),
+ ICPU(INTEL_FAM6_IVYBRIDGE_X, idle_cpu_ivt),
+@@ -1121,7 +1121,7 @@ static const struct x86_cpu_id intel_idl
+ ICPU(INTEL_FAM6_HASWELL_X, idle_cpu_hsw),
+ ICPU(INTEL_FAM6_HASWELL_ULT, idle_cpu_hsw),
+ ICPU(INTEL_FAM6_HASWELL_GT3E, idle_cpu_hsw),
+- ICPU(INTEL_FAM6_ATOM_SILVERMONT2, idle_cpu_avn),
++ ICPU(INTEL_FAM6_ATOM_SILVERMONT_X, idle_cpu_avn),
+ ICPU(INTEL_FAM6_BROADWELL_CORE, idle_cpu_bdw),
+ ICPU(INTEL_FAM6_BROADWELL_GT3E, idle_cpu_bdw),
+ ICPU(INTEL_FAM6_BROADWELL_X, idle_cpu_bdw),
+@@ -1134,7 +1134,7 @@ static const struct x86_cpu_id intel_idl
+ ICPU(INTEL_FAM6_XEON_PHI_KNL, idle_cpu_knl),
+ ICPU(INTEL_FAM6_XEON_PHI_KNM, idle_cpu_knl),
+ ICPU(INTEL_FAM6_ATOM_GOLDMONT, idle_cpu_bxt),
+- ICPU(INTEL_FAM6_ATOM_DENVERTON, idle_cpu_dnv),
++ ICPU(INTEL_FAM6_ATOM_GOLDMONT_X, idle_cpu_dnv),
+ {}
+ };
+ MODULE_DEVICE_TABLE(x86cpu, intel_idle_ids);
+--- a/drivers/powercap/intel_rapl.c
++++ b/drivers/powercap/intel_rapl.c
+@@ -1166,12 +1166,12 @@ static const struct x86_cpu_id rapl_ids[
+ RAPL_CPU(INTEL_FAM6_KABYLAKE_MOBILE, rapl_defaults_core),
+ RAPL_CPU(INTEL_FAM6_KABYLAKE_DESKTOP, rapl_defaults_core),
+
+- RAPL_CPU(INTEL_FAM6_ATOM_SILVERMONT1, rapl_defaults_byt),
++ RAPL_CPU(INTEL_FAM6_ATOM_SILVERMONT, rapl_defaults_byt),
+ RAPL_CPU(INTEL_FAM6_ATOM_AIRMONT, rapl_defaults_cht),
+- RAPL_CPU(INTEL_FAM6_ATOM_MERRIFIELD, rapl_defaults_tng),
+- RAPL_CPU(INTEL_FAM6_ATOM_MOOREFIELD, rapl_defaults_ann),
++ RAPL_CPU(INTEL_FAM6_ATOM_SILVERMONT_MID, rapl_defaults_tng),
++ RAPL_CPU(INTEL_FAM6_ATOM_AIRMONT_MID, rapl_defaults_ann),
+ RAPL_CPU(INTEL_FAM6_ATOM_GOLDMONT, rapl_defaults_core),
+- RAPL_CPU(INTEL_FAM6_ATOM_DENVERTON, rapl_defaults_core),
++ RAPL_CPU(INTEL_FAM6_ATOM_GOLDMONT_X, rapl_defaults_core),
+
+ RAPL_CPU(INTEL_FAM6_XEON_PHI_KNL, rapl_defaults_hsw_server),
+ RAPL_CPU(INTEL_FAM6_XEON_PHI_KNM, rapl_defaults_hsw_server),
diff --git a/patches.arch/x86-kvm-expose-x86_feature_md_clear-to-guests.patch b/patches.arch/x86-kvm-expose-x86_feature_md_clear-to-guests.patch
new file mode 100644
index 0000000000..af9b0a1314
--- /dev/null
+++ b/patches.arch/x86-kvm-expose-x86_feature_md_clear-to-guests.patch
@@ -0,0 +1,53 @@
+From: Andi Kleen <ak@linux.intel.com>
+Date: Fri, 18 Jan 2019 16:50:23 -0800
+Subject: x86/kvm: Expose X86_FEATURE_MD_CLEAR to guests
+Git-commit: 6c4dbbd14730c43f4ed808a9c42ca41625925c22
+Patch-mainline: v5.1 or v5.1-rc3 (next release)
+References: bsc#1111331, CVE-2018-12126, CVE-2018-12127, CVE-2018-12130
+
+X86_FEATURE_MD_CLEAR is a new CPUID bit which is set when microcode
+provides the mechanism to invoke a flush of various exploitable CPU buffers
+by invoking the VERW instruction.
+
+Hand it through to guests so they can adjust their mitigations.
+
+This also requires corresponding qemu changes, which are available
+separately.
+
+[ tglx: Massaged changelog ]
+
+Signed-off-by: Andi Kleen <ak@linux.intel.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Borislav Petkov <bp@suse.de>
+Reviewed-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Reviewed-by: Frederic Weisbecker <frederic@kernel.org>
+Reviewed-by: Jon Masters <jcm@redhat.com>
+Tested-by: Jon Masters <jcm@redhat.com>
+Acked-by: Borislav Petkov <bp@suse.de>
+---
+ arch/x86/kvm/cpuid.c | 3 ++-
+ arch/x86/kvm/cpuid.h | 1 +
+ 2 files changed, 3 insertions(+), 1 deletion(-)
+
+--- a/arch/x86/kvm/cpuid.c
++++ b/arch/x86/kvm/cpuid.c
+@@ -380,7 +380,8 @@ static inline int __do_cpuid_ent(struct
+ /* cpuid 7.0.edx*/
+ const u32 kvm_cpuid_7_0_edx_x86_features =
+ KF(AVX512_4VNNIW) | KF(AVX512_4FMAPS) | KF(SPEC_CTRL) |
+- KF(SPEC_CTRL_SSBD) | KF(ARCH_CAPABILITIES) | KF(INTEL_STIBP);
++ KF(SPEC_CTRL_SSBD) | KF(ARCH_CAPABILITIES) | KF(INTEL_STIBP) |
++ KF(MD_CLEAR);
+
+ /* all calls to cpuid_count() should be made on the same cpu */
+ get_cpu();
+--- a/arch/x86/kvm/cpuid.h
++++ b/arch/x86/kvm/cpuid.h
+@@ -6,6 +6,7 @@
+ /* These are scattered features in cpufeatures.h. */
+ #define KVM_CPUID_BIT_AVX512_4VNNIW 2
+ #define KVM_CPUID_BIT_AVX512_4FMAPS 3
++#define KVM_CPUID_BIT_MD_CLEAR 10
+ #define KVM_CPUID_BIT_INTEL_STIBP 21
+ #define KVM_CPUID_BIT_SPEC_CTRL 26
+ #define KVM_CPUID_BIT_ARCH_CAPABILITIES 29
diff --git a/patches.arch/x86-kvm-vmx-add-mds-protection-when-l1d-flush-is-not-active.patch b/patches.arch/x86-kvm-vmx-add-mds-protection-when-l1d-flush-is-not-active.patch
new file mode 100644
index 0000000000..828cb4119f
--- /dev/null
+++ b/patches.arch/x86-kvm-vmx-add-mds-protection-when-l1d-flush-is-not-active.patch
@@ -0,0 +1,54 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Wed, 27 Feb 2019 12:48:14 +0100
+Subject: x86/kvm/vmx: Add MDS protection when L1D Flush is not active
+Git-commit: 650b68a0622f933444a6d66936abb3103029413b
+Patch-mainline: v5.1 or v5.1-rc3 (next release)
+References: bsc#1111331, CVE-2018-12126, CVE-2018-12127, CVE-2018-12130
+
+CPUs which are affected by L1TF and MDS mitigate MDS with the L1D Flush on
+VMENTER when updated microcode is installed.
+
+If a CPU is not affected by L1TF or if the L1D Flush is not in use, then
+MDS mitigation needs to be invoked explicitly.
+
+For these cases, follow the host mitigation state and invoke the MDS
+mitigation before VMENTER.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Reviewed-by: Frederic Weisbecker <frederic@kernel.org>
+Reviewed-by: Borislav Petkov <bp@suse.de>
+Reviewed-by: Jon Masters <jcm@redhat.com>
+Tested-by: Jon Masters <jcm@redhat.com>
+Acked-by: Borislav Petkov <bp@suse.de>
+---
+ arch/x86/kernel/cpu/bugs.c | 1 +
+ arch/x86/kvm/vmx.c | 3 +++
+ 2 files changed, 4 insertions(+)
+
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -55,6 +55,7 @@ u64 x86_amd_ls_cfg_ssbd_mask;
+
+ /* Control MDS CPU buffer clear before returning to user space */
+ DEFINE_STATIC_KEY_FALSE(mds_user_clear);
++EXPORT_SYMBOL_GPL(mds_user_clear);
+
+ void __init check_bugs(void)
+ {
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -8899,10 +8899,13 @@ static void __noclone vmx_vcpu_run(struc
+
+ vmx->__launched = vmx->loaded_vmcs->launched;
+
++ /* L1D Flush includes CPU buffer clear to mitigate MDS */
+ if (static_branch_unlikely(&vmx_l1d_should_flush)) {
+ if (vcpu->arch.l1tf_flush_l1d)
+ vmx_l1d_flush(vcpu);
+ }
++ else if (static_branch_unlikely(&mds_user_clear))
++ mds_clear_cpu_buffers();
+
+ asm(
+ /* Store host registers */
diff --git a/patches.arch/x86-msr-index-cleanup-bit-defines.patch b/patches.arch/x86-msr-index-cleanup-bit-defines.patch
new file mode 100644
index 0000000000..665de69c1a
--- /dev/null
+++ b/patches.arch/x86-msr-index-cleanup-bit-defines.patch
@@ -0,0 +1,102 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 21 Feb 2019 12:36:50 +0100
+Subject: x86/msr-index: Cleanup bit defines
+Git-commit: d8eabc37310a92df40d07c5a8afc53cebf996716
+Patch-mainline: v5.1-rc1
+References: bsc#1111331, CVE-2018-12126, CVE-2018-12127, CVE-2018-12130
+
+Greg pointed out that speculation related bit defines are using (1 << N)
+format instead of BIT(N). Aside of that (1 << N) is wrong as it should use
+1UL at least.
+
+Clean it up.
+
+[ Josh Poimboeuf: Fix tools build ]
+
+Reported-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Reviewed-by: Borislav Petkov <bp@suse.de>
+Reviewed-by: Frederic Weisbecker <frederic@kernel.org>
+Reviewed-by: Jon Masters <jcm@redhat.com>
+Tested-by: Jon Masters <jcm@redhat.com>
+Acked-by: Borislav Petkov <bp@suse.de>
+---
+ arch/x86/include/asm/msr-index.h | 35 +++++++++++++++++++----------------
+ tools/power/x86/turbostat/Makefile | 2 +-
+ 2 files changed, 20 insertions(+), 17 deletions(-)
+
+--- a/arch/x86/include/asm/msr-index.h
++++ b/arch/x86/include/asm/msr-index.h
+@@ -1,6 +1,8 @@
+ #ifndef _ASM_X86_MSR_INDEX_H
+ #define _ASM_X86_MSR_INDEX_H
+
++#include <linux/bits.h>
++
+ /* CPU model specific register (MSR) numbers */
+
+ /* x86-64 specific MSRs */
+@@ -33,13 +35,14 @@
+
+ /* Intel MSRs. Some also available on other CPUs */
+ #define MSR_IA32_SPEC_CTRL 0x00000048 /* Speculation Control */
+-#define SPEC_CTRL_IBRS (1 << 0) /* Indirect Branch Restricted Speculation */
+-#define SPEC_CTRL_STIBP (1 << 1) /* Single Thread Indirect Branch Predictors */
++#define SPEC_CTRL_IBRS BIT(0) /* Indirect Branch Restricted Speculation */
++#define SPEC_CTRL_STIBP_SHIFT 1 /* Single Thread Indirect Branch Predictor (STIBP) bit */
++#define SPEC_CTRL_STIBP BIT(SPEC_CTRL_STIBP_SHIFT) /* STIBP mask */
+ #define SPEC_CTRL_SSBD_SHIFT 2 /* Speculative Store Bypass Disable bit */
+-#define SPEC_CTRL_SSBD (1 << SPEC_CTRL_SSBD_SHIFT) /* Speculative Store Bypass Disable */
++#define SPEC_CTRL_SSBD BIT(SPEC_CTRL_SSBD_SHIFT) /* Speculative Store Bypass Disable */
+
+ #define MSR_IA32_PRED_CMD 0x00000049 /* Prediction Command */
+-#define PRED_CMD_IBPB (1 << 0) /* Indirect Branch Prediction Barrier */
++#define PRED_CMD_IBPB BIT(0) /* Indirect Branch Prediction Barrier */
+
+ #define MSR_IA32_PERFCTR0 0x000000c1
+ #define MSR_IA32_PERFCTR1 0x000000c2
+@@ -56,20 +59,20 @@
+ #define MSR_MTRRcap 0x000000fe
+
+ #define MSR_IA32_ARCH_CAPABILITIES 0x0000010a
+-#define ARCH_CAP_RDCL_NO (1 << 0) /* Not susceptible to Meltdown */
+-#define ARCH_CAP_IBRS_ALL (1 << 1) /* Enhanced IBRS support */
+-#define ARCH_CAP_SKIP_VMENTRY_L1DFLUSH (1 << 3) /* Skip L1D flush on vmentry */
+-#define ARCH_CAP_SSB_NO (1 << 4) /*
+- * Not susceptible to Speculative Store Bypass
+- * attack, so no Speculative Store Bypass
+- * control required.
+- */
++#define ARCH_CAP_RDCL_NO BIT(0) /* Not susceptible to Meltdown */
++#define ARCH_CAP_IBRS_ALL BIT(1) /* Enhanced IBRS support */
++#define ARCH_CAP_SKIP_VMENTRY_L1DFLUSH BIT(3) /* Skip L1D flush on vmentry */
++#define ARCH_CAP_SSB_NO BIT(4) /*
++ * Not susceptible to Speculative Store Bypass
++ * attack, so no Speculative Store Bypass
++ * control required.
++ */
+
+ #define MSR_IA32_FLUSH_CMD 0x0000010b
+-#define L1D_FLUSH (1 << 0) /*
+- * Writeback and invalidate the
+- * L1 data cache.
+- */
++#define L1D_FLUSH BIT(0) /*
++ * Writeback and invalidate the
++ * L1 data cache.
++ */
+
+ #define MSR_IA32_BBL_CR_CTL 0x00000119
+ #define MSR_IA32_BBL_CR_CTL3 0x0000011e
+--- a/tools/power/x86/turbostat/Makefile
++++ b/tools/power/x86/turbostat/Makefile
+@@ -8,7 +8,7 @@ ifeq ("$(origin O)", "command line")
+ endif
+
+ turbostat : turbostat.c
+-CFLAGS += -Wall
++override CFLAGS += -Wall -I../../../include
+ CFLAGS += -DMSRHEADER='"../../../../arch/x86/include/asm/msr-index.h"'
+
+ %: %.c
diff --git a/patches.arch/x86-speculation-consolidate-cpu-whitelists.patch b/patches.arch/x86-speculation-consolidate-cpu-whitelists.patch
new file mode 100644
index 0000000000..72d54623c9
--- /dev/null
+++ b/patches.arch/x86-speculation-consolidate-cpu-whitelists.patch
@@ -0,0 +1,164 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Wed, 27 Feb 2019 10:10:23 +0100
+Subject: x86/speculation: Consolidate CPU whitelists
+Git-commit: 36ad35131adacc29b328b9c8b6277a8bf0d6fd5d
+Patch-mainline: v5.1-rc1
+References: bsc#1111331, CVE-2018-12126, CVE-2018-12127, CVE-2018-12130
+
+The CPU vulnerability whitelists have some overlap and there are more
+whitelists coming along.
+
+Use the driver_data field in the x86_cpu_id struct to denote the
+whitelisted vulnerabilities and combine all whitelists into one.
+
+Suggested-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Frederic Weisbecker <frederic@kernel.org>
+Reviewed-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Reviewed-by: Borislav Petkov <bp@suse.de>
+Reviewed-by: Jon Masters <jcm@redhat.com>
+Tested-by: Jon Masters <jcm@redhat.com>
+Acked-by: Borislav Petkov <bp@suse.de>
+---
+ arch/x86/kernel/cpu/common.c | 103 ++++++++++++++++++++++---------------------
+ 1 file changed, 55 insertions(+), 48 deletions(-)
+
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -877,60 +877,68 @@ static void identify_cpu_without_cpuid(s
+ #endif
+ }
+
+-static const __initconst struct x86_cpu_id cpu_no_speculation[] = {
+- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SALTWELL, X86_FEATURE_ANY },
+- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SALTWELL_TABLET, X86_FEATURE_ANY },
+- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_BONNELL_MID, X86_FEATURE_ANY },
+- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SALTWELL_MID, X86_FEATURE_ANY },
+- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_BONNELL, X86_FEATURE_ANY },
+- { X86_VENDOR_CENTAUR, 5 },
+- { X86_VENDOR_INTEL, 5 },
+- { X86_VENDOR_NSC, 5 },
+- { X86_VENDOR_ANY, 4 },
+- {}
+-};
++#define NO_SPECULATION BIT(0)
++#define NO_MELTDOWN BIT(1)
++#define NO_SSB BIT(2)
++#define NO_L1TF BIT(3)
++
++#define VULNWL(_vendor, _family, _model, _whitelist) \
++ { X86_VENDOR_##_vendor, _family, _model, X86_FEATURE_ANY, _whitelist }
++
++#define VULNWL_INTEL(model, whitelist) \
++ VULNWL(INTEL, 6, INTEL_FAM6_##model, whitelist)
++
++#define VULNWL_AMD(family, whitelist) \
++ VULNWL(AMD, family, X86_MODEL_ANY, whitelist)
++
++static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = {
++ VULNWL(ANY, 4, X86_MODEL_ANY, NO_SPECULATION),
++ VULNWL(CENTAUR, 5, X86_MODEL_ANY, NO_SPECULATION),
++ VULNWL(INTEL, 5, X86_MODEL_ANY, NO_SPECULATION),
++ VULNWL(NSC, 5, X86_MODEL_ANY, NO_SPECULATION),
++
++ VULNWL_INTEL(ATOM_SALTWELL, NO_SPECULATION),
++ VULNWL_INTEL(ATOM_SALTWELL_TABLET, NO_SPECULATION),
++ VULNWL_INTEL(ATOM_SALTWELL_MID, NO_SPECULATION),
++ VULNWL_INTEL(ATOM_BONNELL, NO_SPECULATION),
++ VULNWL_INTEL(ATOM_BONNELL_MID, NO_SPECULATION),
++
++ VULNWL_INTEL(ATOM_SILVERMONT, NO_SSB | NO_L1TF),
++ VULNWL_INTEL(ATOM_SILVERMONT_X, NO_SSB | NO_L1TF),
++ VULNWL_INTEL(ATOM_SILVERMONT_MID, NO_SSB | NO_L1TF),
++ VULNWL_INTEL(ATOM_AIRMONT, NO_SSB | NO_L1TF),
++ VULNWL_INTEL(XEON_PHI_KNL, NO_SSB | NO_L1TF),
++ VULNWL_INTEL(XEON_PHI_KNM, NO_SSB | NO_L1TF),
++
++ VULNWL_INTEL(CORE_YONAH, NO_SSB),
++
++ VULNWL_INTEL(ATOM_AIRMONT_MID, NO_L1TF),
++ VULNWL_INTEL(ATOM_GOLDMONT, NO_L1TF),
++ VULNWL_INTEL(ATOM_GOLDMONT_X, NO_L1TF),
++ VULNWL_INTEL(ATOM_GOLDMONT_PLUS, NO_L1TF),
++
++ VULNWL_AMD(0x0f, NO_MELTDOWN | NO_SSB | NO_L1TF),
++ VULNWL_AMD(0x10, NO_MELTDOWN | NO_SSB | NO_L1TF),
++ VULNWL_AMD(0x11, NO_MELTDOWN | NO_SSB | NO_L1TF),
++ VULNWL_AMD(0x12, NO_MELTDOWN | NO_SSB | NO_L1TF),
+
+-static const __initconst struct x86_cpu_id cpu_no_meltdown[] = {
+- { X86_VENDOR_AMD },
++ /* FAMILY_ANY must be last, otherwise 0x0f - 0x12 matches won't work */
++ VULNWL_AMD(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF),
+ {}
+ };
+
+-/* Only list CPUs which speculate but are non susceptible to SSB */
+-static const __initconst struct x86_cpu_id cpu_no_spec_store_bypass[] = {
+- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT },
+- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_AIRMONT },
+- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT_X },
+- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT_MID },
+- { X86_VENDOR_INTEL, 6, INTEL_FAM6_CORE_YONAH },
+- { X86_VENDOR_INTEL, 6, INTEL_FAM6_XEON_PHI_KNL },
+- { X86_VENDOR_INTEL, 6, INTEL_FAM6_XEON_PHI_KNM },
+- { X86_VENDOR_AMD, 0x12, },
+- { X86_VENDOR_AMD, 0x11, },
+- { X86_VENDOR_AMD, 0x10, },
+- { X86_VENDOR_AMD, 0xf, },
+- {}
+-};
++static bool __init cpu_matches(unsigned long which)
++{
++ const struct x86_cpu_id *m = x86_match_cpu(cpu_vuln_whitelist);
+
+-static const __initconst struct x86_cpu_id cpu_no_l1tf[] = {
+- /* in addition to cpu_no_speculation */
+- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT },
+- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT_X },
+- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_AIRMONT },
+- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT_MID },
+- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_AIRMONT_MID },
+- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_GOLDMONT },
+- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_GOLDMONT_X },
+- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_GOLDMONT_PLUS },
+- { X86_VENDOR_INTEL, 6, INTEL_FAM6_XEON_PHI_KNL },
+- { X86_VENDOR_INTEL, 6, INTEL_FAM6_XEON_PHI_KNM },
+- {}
+-};
++ return m && !!(m->driver_data & which);
++}
+
+ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
+ {
+ u64 ia32_cap = 0;
+
+- if (x86_match_cpu(cpu_no_speculation))
++ if (cpu_matches(NO_SPECULATION))
+ return;
+
+ setup_force_cpu_bug(X86_BUG_SPECTRE_V1);
+@@ -939,12 +947,11 @@ static void __init cpu_set_bug_bits(stru
+ if (cpu_has(c, X86_FEATURE_ARCH_CAPABILITIES))
+ rdmsrl(MSR_IA32_ARCH_CAPABILITIES, ia32_cap);
+
+- if (!x86_match_cpu(cpu_no_spec_store_bypass) &&
+- !(ia32_cap & ARCH_CAP_SSB_NO) &&
++ if (!cpu_matches(NO_SSB) && !(ia32_cap & ARCH_CAP_SSB_NO) &&
+ !cpu_has(c, X86_FEATURE_AMD_SSB_NO))
+ setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS);
+
+- if (x86_match_cpu(cpu_no_meltdown))
++ if (cpu_matches(NO_MELTDOWN))
+ return;
+
+ /* Rogue Data Cache Load? No! */
+@@ -953,7 +960,7 @@ static void __init cpu_set_bug_bits(stru
+
+ setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN);
+
+- if (x86_match_cpu(cpu_no_l1tf))
++ if (cpu_matches(NO_L1TF))
+ return;
+
+ setup_force_cpu_bug(X86_BUG_L1TF);
diff --git a/patches.arch/x86-speculation-enable-cross-hyperthread-spectre-v2-stibp-mitigation.patch b/patches.arch/x86-speculation-enable-cross-hyperthread-spectre-v2-stibp-mitigation.patch
index 0ffd451b75..c2cc78cf8a 100644
--- a/patches.arch/x86-speculation-enable-cross-hyperthread-spectre-v2-stibp-mitigation.patch
+++ b/patches.arch/x86-speculation-enable-cross-hyperthread-spectre-v2-stibp-mitigation.patch
@@ -3,7 +3,7 @@ Date: Tue, 25 Sep 2018 14:38:55 +0200
Subject: x86/speculation: Enable cross-hyperthread spectre v2 STIBP mitigation
Git-commit: 53c613fe6349994f023245519265999eed75957f
Patch-mainline: v4.20-rc1
-References: bsc#1106913
+References: bsc#1111331, CVE-2018-12126, CVE-2018-12127, CVE-2018-12130
STIBP is a feature provided by certain Intel ucodes / CPUs. This feature
(once enabled) prevents cross-hyperthread control of decisions made by
@@ -36,16 +36,17 @@ Cc: Tim Chen <tim.c.chen@linux.intel.com>
Cc: "SchauflerCasey" <casey.schaufler@intel.com>
Cc: stable@vger.kernel.org
Link: https://lkml.kernel.org/r/nycvar.YFH.7.76.1809251438240.15880@cbobk.fhfr.pm
-
+[ Backport only the machinery around arch_smt_update() so that MDS pile can be
+ applied. The STIBP improvements later. ]
Acked-by: Borislav Petkov <bp@suse.de>
---
- arch/x86/kernel/cpu/bugs.c | 55 ++++++++++++++++++++++++++++++++++++++++-----
- kernel/cpu.c | 11 ++++++++-
- 2 files changed, 60 insertions(+), 6 deletions(-)
+ arch/x86/kernel/cpu/bugs.c | 15 +++++++++++----
+ kernel/cpu.c | 11 ++++++++++-
+ 2 files changed, 21 insertions(+), 5 deletions(-)
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
-@@ -34,12 +34,10 @@ static void __init spectre_v2_select_mit
+@@ -33,12 +33,10 @@ static void __init spectre_v2_select_mit
static void __init ssb_select_mitigation(void);
static void __init l1tf_select_mitigation(void);
@@ -60,54 +61,20 @@ Acked-by: Borislav Petkov <bp@suse.de>
/*
* The vendor and possibly platform specific bits which can be modified in
-@@ -345,6 +343,46 @@ static enum spectre_v2_mitigation_cmd __
+@@ -329,6 +327,12 @@ static enum spectre_v2_mitigation_cmd __
return cmd;
}
-+static bool stibp_needed(void)
-+{
-+ if (spectre_v2_enabled == SPECTRE_V2_NONE)
-+ return false;
-+
-+ if (!boot_cpu_has(X86_FEATURE_STIBP))
-+ return false;
-+
-+ return true;
-+}
-+
-+static void update_stibp_msr(void *info)
-+{
-+ wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
-+}
-+
+void arch_smt_update(void)
+{
-+ u64 mask;
-+
-+ if (!stibp_needed())
-+ return;
-+
+ mutex_lock(&spec_ctrl_mutex);
-+ mask = x86_spec_ctrl_base;
-+ if (cpu_smt_control == CPU_SMT_ENABLED)
-+ mask |= SPEC_CTRL_STIBP;
-+ else
-+ mask &= ~SPEC_CTRL_STIBP;
-+
-+ if (mask != x86_spec_ctrl_base) {
-+ pr_info("Spectre v2 cross-process SMT mitigation: %s STIBP\n",
-+ cpu_smt_control == CPU_SMT_ENABLED ?
-+ "Enabling" : "Disabling");
-+ x86_spec_ctrl_base = mask;
-+ on_each_cpu(update_stibp_msr, NULL, 1);
-+ }
+ mutex_unlock(&spec_ctrl_mutex);
+}
+
- /* Check for Skylake-like CPUs (for IBRS handling) */
- static bool __init is_skylake_era(void)
+ static void __init spectre_v2_select_mitigation(void)
{
-@@ -451,6 +489,9 @@ retpoline_auto:
+ enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline();
+@@ -413,6 +417,9 @@ retpoline_auto:
setup_force_cpu_cap(X86_FEATURE_USE_IBRS_FW);
pr_info("Enabling Restricted Speculation for firmware calls\n");
}
@@ -117,29 +84,6 @@ Acked-by: Borislav Petkov <bp@suse.de>
}
#undef pr_fmt
-@@ -792,6 +833,8 @@ static ssize_t l1tf_show_state(char *buf
- static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr,
- char *buf, unsigned int bug)
- {
-+ int ret;
-+
- if (!boot_cpu_has_bug(bug))
- return sprintf(buf, "Not affected\n");
-
-@@ -812,10 +855,12 @@ static ssize_t cpu_show_common(struct de
- case X86_BUG_SPECTRE_V2:
- if (boot_cpu_has(X86_FEATURE_SPEC_CTRL) && x86_ibrs_enabled())
- return sprintf(buf, "Mitigation: IBRS+IBPB\n");
-- return sprintf(buf, "%s%s%s%s\n", spectre_v2_strings[spectre_v2_enabled],
-+ ret = sprintf(buf, "%s%s%s%s%s\n", spectre_v2_strings[spectre_v2_enabled],
- boot_cpu_has(X86_FEATURE_USE_IBPB) && x86_ibpb_enabled() ? ", IBPB" : "",
- boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "",
-+ (x86_spec_ctrl_base & SPEC_CTRL_STIBP) ? ", STIBP" : "",
- spectre_v2_module_string());
-+ return ret;
-
- case X86_BUG_SPEC_STORE_BYPASS:
- return sprintf(buf, "%s\n", ssb_strings[ssb_mode]);
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -950,6 +950,12 @@ static void cpuhp_online_cpu_device(unsi
diff --git a/patches.arch/x86-speculation-mds-add-basic-bug-infrastructure-for-mds.patch b/patches.arch/x86-speculation-mds-add-basic-bug-infrastructure-for-mds.patch
new file mode 100644
index 0000000000..3244875799
--- /dev/null
+++ b/patches.arch/x86-speculation-mds-add-basic-bug-infrastructure-for-mds.patch
@@ -0,0 +1,160 @@
+From: Andi Kleen <ak@linux.intel.com>
+Date: Fri, 18 Jan 2019 16:50:16 -0800
+Subject: x86/speculation/mds: Add basic bug infrastructure for MDS
+Git-commit: ed5194c2732c8084af9fd159c146ea92bf137128
+Patch-mainline: v5.1 or v5.1-rc3 (next release)
+References: bsc#1111331, CVE-2018-12126, CVE-2018-12127, CVE-2018-12130
+
+Microarchitectural Data Sampling (MDS), is a class of side channel attacks
+on internal buffers in Intel CPUs. The variants are:
+
+ - Microarchitectural Store Buffer Data Sampling (MSBDS) (CVE-2018-12126)
+ - Microarchitectural Fill Buffer Data Sampling (MFBDS) (CVE-2018-12130)
+ - Microarchitectural Load Port Data Sampling (MLPDS) (CVE-2018-12127)
+
+MSBDS leaks Store Buffer Entries which can be speculatively forwarded to a
+dependent load (store-to-load forwarding) as an optimization. The forward
+can also happen to a faulting or assisting load operation for a different
+memory address, which can be exploited under certain conditions. Store
+buffers are partitioned between Hyper-Threads so cross thread forwarding is
+not possible. But if a thread enters or exits a sleep state the store
+buffer is repartitioned which can expose data from one thread to the other.
+
+MFBDS leaks Fill Buffer Entries. Fill buffers are used internally to manage
+L1 miss situations and to hold data which is returned or sent in response
+to a memory or I/O operation. Fill buffers can forward data to a load
+operation and also write data to the cache. When the fill buffer is
+deallocated it can retain the stale data of the preceding operations which
+can then be forwarded to a faulting or assisting load operation, which can
+be exploited under certain conditions. Fill buffers are shared between
+Hyper-Threads so cross thread leakage is possible.
+
+MLDPS leaks Load Port Data. Load ports are used to perform load operations
+from memory or I/O. The received data is then forwarded to the register
+file or a subsequent operation. In some implementations the Load Port can
+contain stale data from a previous operation which can be forwarded to
+faulting or assisting loads under certain conditions, which again can be
+exploited eventually. Load ports are shared between Hyper-Threads so cross
+thread leakage is possible.
+
+All variants have the same mitigation for single CPU thread case (SMT off),
+so the kernel can treat them as one MDS issue.
+
+Add the basic infrastructure to detect if the current CPU is affected by
+MDS.
+
+[ tglx: Rewrote changelog ]
+
+Signed-off-by: Andi Kleen <ak@linux.intel.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Borislav Petkov <bp@suse.de>
+Reviewed-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Reviewed-by: Frederic Weisbecker <frederic@kernel.org>
+Reviewed-by: Jon Masters <jcm@redhat.com>
+Tested-by: Jon Masters <jcm@redhat.com>
+Acked-by: Borislav Petkov <bp@suse.de>
+---
+ arch/x86/include/asm/cpufeatures.h | 2 ++
+ arch/x86/include/asm/msr-index.h | 5 +++++
+ arch/x86/kernel/cpu/common.c | 25 ++++++++++++++++---------
+ arch/x86/kernel/cpu/scattered.c | 1 +
+ 4 files changed, 24 insertions(+), 9 deletions(-)
+
+--- a/arch/x86/include/asm/cpufeatures.h
++++ b/arch/x86/include/asm/cpufeatures.h
+@@ -84,6 +84,7 @@
+ #define X86_FEATURE_IBRS ( 2*32+ 7) /* Indirect Branch Restricted Speculation */
+ #define X86_FEATURE_FLUSH_L1D ( 2*32+ 8) /* Flush L1D cache */
+ #define X86_FEATURE_TSX_FORCE_ABORT ( 2*32+10) /* "" TSX_FORCE_ABORT */
++#define X86_FEATURE_MD_CLEAR ( 2*32+11) /* VERW clears CPU buffers */
+
+ /* Other features, Linux-defined mapping, word 3 */
+ /* This range is used for feature bits which conflict or are synthesized */
+@@ -340,5 +341,6 @@
+ #define X86_BUG_SPECTRE_V2 X86_BUG(16) /* CPU is affected by Spectre variant 2 attack with indirect branches */
+ #define X86_BUG_SPEC_STORE_BYPASS X86_BUG(17) /* CPU is affected by speculative store bypass attack */
+ #define X86_BUG_L1TF X86_BUG(18) /* CPU is affected by L1 Terminal Fault */
++#define X86_BUG_MDS X86_BUG(19) /* CPU is affected by Microarchitectural data sampling */
+
+ #endif /* _ASM_X86_CPUFEATURES_H */
+--- a/arch/x86/include/asm/msr-index.h
++++ b/arch/x86/include/asm/msr-index.h
+@@ -67,6 +67,11 @@
+ * attack, so no Speculative Store Bypass
+ * control required.
+ */
++#define ARCH_CAP_MDS_NO BIT(5) /*
++ * Not susceptible to
++ * Microarchitectural Data
++ * Sampling (MDS) vulnerabilities.
++ */
+
+ #define MSR_IA32_FLUSH_CMD 0x0000010b
+ #define L1D_FLUSH BIT(0) /*
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -881,6 +881,7 @@ static void identify_cpu_without_cpuid(s
+ #define NO_MELTDOWN BIT(1)
+ #define NO_SSB BIT(2)
+ #define NO_L1TF BIT(3)
++#define NO_MDS BIT(4)
+
+ #define VULNWL(_vendor, _family, _model, _whitelist) \
+ { X86_VENDOR_##_vendor, _family, _model, X86_FEATURE_ANY, _whitelist }
+@@ -897,6 +898,7 @@ static const __initconst struct x86_cpu_
+ VULNWL(INTEL, 5, X86_MODEL_ANY, NO_SPECULATION),
+ VULNWL(NSC, 5, X86_MODEL_ANY, NO_SPECULATION),
+
++ /* Intel Family 6 */
+ VULNWL_INTEL(ATOM_SALTWELL, NO_SPECULATION),
+ VULNWL_INTEL(ATOM_SALTWELL_TABLET, NO_SPECULATION),
+ VULNWL_INTEL(ATOM_SALTWELL_MID, NO_SPECULATION),
+@@ -913,17 +915,19 @@ static const __initconst struct x86_cpu_
+ VULNWL_INTEL(CORE_YONAH, NO_SSB),
+
+ VULNWL_INTEL(ATOM_AIRMONT_MID, NO_L1TF),
+- VULNWL_INTEL(ATOM_GOLDMONT, NO_L1TF),
+- VULNWL_INTEL(ATOM_GOLDMONT_X, NO_L1TF),
+- VULNWL_INTEL(ATOM_GOLDMONT_PLUS, NO_L1TF),
+-
+- VULNWL_AMD(0x0f, NO_MELTDOWN | NO_SSB | NO_L1TF),
+- VULNWL_AMD(0x10, NO_MELTDOWN | NO_SSB | NO_L1TF),
+- VULNWL_AMD(0x11, NO_MELTDOWN | NO_SSB | NO_L1TF),
+- VULNWL_AMD(0x12, NO_MELTDOWN | NO_SSB | NO_L1TF),
++
++ VULNWL_INTEL(ATOM_GOLDMONT, NO_MDS | NO_L1TF),
++ VULNWL_INTEL(ATOM_GOLDMONT_X, NO_MDS | NO_L1TF),
++ VULNWL_INTEL(ATOM_GOLDMONT_PLUS, NO_MDS | NO_L1TF),
++
++ /* AMD Family 0xf - 0x12 */
++ VULNWL_AMD(0x0f, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS),
++ VULNWL_AMD(0x10, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS),
++ VULNWL_AMD(0x11, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS),
++ VULNWL_AMD(0x12, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS),
+
+ /* FAMILY_ANY must be last, otherwise 0x0f - 0x12 matches won't work */
+- VULNWL_AMD(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF),
++ VULNWL_AMD(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS),
+ {}
+ };
+
+@@ -951,6 +955,9 @@ static void __init cpu_set_bug_bits(stru
+ !cpu_has(c, X86_FEATURE_AMD_SSB_NO))
+ setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS);
+
++ if (!cpu_matches(NO_MDS) && !(ia32_cap & ARCH_CAP_MDS_NO))
++ setup_force_cpu_bug(X86_BUG_MDS);
++
+ if (cpu_matches(NO_MELTDOWN))
+ return;
+
+--- a/arch/x86/kernel/cpu/scattered.c
++++ b/arch/x86/kernel/cpu/scattered.c
+@@ -34,6 +34,7 @@ static const struct cpuid_bit cpuid_bits
+ { X86_FEATURE_INTEL_PT, CPUID_EBX,25, 0x00000007, 0 },
+ { X86_FEATURE_AVX512_4VNNIW, CPUID_EDX, 2, 0x00000007, 0 },
+ { X86_FEATURE_AVX512_4FMAPS, CPUID_EDX, 3, 0x00000007, 0 },
++ { X86_FEATURE_MD_CLEAR, CPUID_EDX,10, 0x00000007, 0 },
+ { X86_FEATURE_TSX_FORCE_ABORT, CPUID_EDX,13, 0x00000007, 0 },
+ { X86_FEATURE_SPEC_CTRL, CPUID_EDX,26, 0x00000007, 0 },
+ { X86_FEATURE_INTEL_STIBP, CPUID_EDX,27, 0x00000007, 0 },
diff --git a/patches.arch/x86-speculation-mds-add-bug_msbds_only.patch b/patches.arch/x86-speculation-mds-add-bug_msbds_only.patch
new file mode 100644
index 0000000000..6133a203f9
--- /dev/null
+++ b/patches.arch/x86-speculation-mds-add-bug_msbds_only.patch
@@ -0,0 +1,87 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Fri, 1 Mar 2019 20:21:08 +0100
+Subject: x86/speculation/mds: Add BUG_MSBDS_ONLY
+Git-commit: e261f209c3666e842fd645a1e31f001c3a26def9
+Patch-mainline: v5.1 or v5.1-rc3 (next release)
+References: bsc#1111331, CVE-2018-12126, CVE-2018-12127, CVE-2018-12130
+
+This bug bit is set on CPUs which are only affected by Microarchitectural
+Store Buffer Data Sampling (MSBDS) and not by any other MDS variant.
+
+This is important because the Store Buffers are partitioned between
+Hyper-Threads so cross thread forwarding is not possible. But if a thread
+enters or exits a sleep state the store buffer is repartitioned which can
+expose data from one thread to the other. This transition can be mitigated.
+
+That means that for CPUs which are only affected by MSBDS SMT can be
+enabled, if the CPU is not affected by other SMT sensitive vulnerabilities,
+e.g. L1TF. The XEON PHI variants fall into that category. Also the
+Silvermont/Airmont ATOMs, but for them it's not really relevant as they do
+not support SMT, but mark them for completeness sake.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Frederic Weisbecker <frederic@kernel.org>
+Reviewed-by: Jon Masters <jcm@redhat.com>
+Tested-by: Jon Masters <jcm@redhat.com>
+Acked-by: Borislav Petkov <bp@suse.de>
+---
+ arch/x86/include/asm/cpufeatures.h | 1 +
+ arch/x86/kernel/cpu/common.c | 20 ++++++++++++--------
+ 2 files changed, 13 insertions(+), 8 deletions(-)
+
+--- a/arch/x86/include/asm/cpufeatures.h
++++ b/arch/x86/include/asm/cpufeatures.h
+@@ -342,5 +342,6 @@
+ #define X86_BUG_SPEC_STORE_BYPASS X86_BUG(17) /* CPU is affected by speculative store bypass attack */
+ #define X86_BUG_L1TF X86_BUG(18) /* CPU is affected by L1 Terminal Fault */
+ #define X86_BUG_MDS X86_BUG(19) /* CPU is affected by Microarchitectural data sampling */
++#define X86_BUG_MSBDS_ONLY X86_BUG(20) /* CPU is only affected by the MSDBS variant of BUG_MDS */
+
+ #endif /* _ASM_X86_CPUFEATURES_H */
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -882,6 +882,7 @@ static void identify_cpu_without_cpuid(s
+ #define NO_SSB BIT(2)
+ #define NO_L1TF BIT(3)
+ #define NO_MDS BIT(4)
++#define MSBDS_ONLY BIT(5)
+
+ #define VULNWL(_vendor, _family, _model, _whitelist) \
+ { X86_VENDOR_##_vendor, _family, _model, X86_FEATURE_ANY, _whitelist }
+@@ -905,16 +906,16 @@ static const __initconst struct x86_cpu_
+ VULNWL_INTEL(ATOM_BONNELL, NO_SPECULATION),
+ VULNWL_INTEL(ATOM_BONNELL_MID, NO_SPECULATION),
+
+- VULNWL_INTEL(ATOM_SILVERMONT, NO_SSB | NO_L1TF),
+- VULNWL_INTEL(ATOM_SILVERMONT_X, NO_SSB | NO_L1TF),
+- VULNWL_INTEL(ATOM_SILVERMONT_MID, NO_SSB | NO_L1TF),
+- VULNWL_INTEL(ATOM_AIRMONT, NO_SSB | NO_L1TF),
+- VULNWL_INTEL(XEON_PHI_KNL, NO_SSB | NO_L1TF),
+- VULNWL_INTEL(XEON_PHI_KNM, NO_SSB | NO_L1TF),
++ VULNWL_INTEL(ATOM_SILVERMONT, NO_SSB | NO_L1TF | MSBDS_ONLY),
++ VULNWL_INTEL(ATOM_SILVERMONT_X, NO_SSB | NO_L1TF | MSBDS_ONLY),
++ VULNWL_INTEL(ATOM_SILVERMONT_MID, NO_SSB | NO_L1TF | MSBDS_ONLY),
++ VULNWL_INTEL(ATOM_AIRMONT, NO_SSB | NO_L1TF | MSBDS_ONLY),
++ VULNWL_INTEL(XEON_PHI_KNL, NO_SSB | NO_L1TF | MSBDS_ONLY),
++ VULNWL_INTEL(XEON_PHI_KNM, NO_SSB | NO_L1TF | MSBDS_ONLY),
+
+ VULNWL_INTEL(CORE_YONAH, NO_SSB),
+
+- VULNWL_INTEL(ATOM_AIRMONT_MID, NO_L1TF),
++ VULNWL_INTEL(ATOM_AIRMONT_MID, NO_L1TF | MSBDS_ONLY),
+
+ VULNWL_INTEL(ATOM_GOLDMONT, NO_MDS | NO_L1TF),
+ VULNWL_INTEL(ATOM_GOLDMONT_X, NO_MDS | NO_L1TF),
+@@ -955,8 +956,11 @@ static void __init cpu_set_bug_bits(stru
+ !cpu_has(c, X86_FEATURE_AMD_SSB_NO))
+ setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS);
+
+- if (!cpu_matches(NO_MDS) && !(ia32_cap & ARCH_CAP_MDS_NO))
++ if (!cpu_matches(NO_MDS) && !(ia32_cap & ARCH_CAP_MDS_NO)) {
+ setup_force_cpu_bug(X86_BUG_MDS);
++ if (cpu_matches(MSBDS_ONLY))
++ setup_force_cpu_bug(X86_BUG_MSBDS_ONLY);
++ }
+
+ if (cpu_matches(NO_MELTDOWN))
+ return;
diff --git a/patches.arch/x86-speculation-mds-add-mds-full-nosmt-cmdline-option.patch b/patches.arch/x86-speculation-mds-add-mds-full-nosmt-cmdline-option.patch
new file mode 100644
index 0000000000..3aa897920c
--- /dev/null
+++ b/patches.arch/x86-speculation-mds-add-mds-full-nosmt-cmdline-option.patch
@@ -0,0 +1,72 @@
+From: Josh Poimboeuf <jpoimboe@redhat.com>
+Date: Tue, 2 Apr 2019 09:59:33 -0500
+Subject: x86/speculation/mds: Add mds=full,nosmt cmdline option
+Git-repo: tip/tip
+Git-commit: d71eb0ce109a124b0fa714832823b9452f2762cf
+Patch-mainline: Queued in a subsystem tree
+References: bsc#1111331, CVE-2018-12126, CVE-2018-12127, CVE-2018-12130
+
+Add the mds=full,nosmt cmdline option. This is like mds=full, but with
+SMT disabled if the CPU is vulnerable.
+
+Signed-off-by: Josh Poimboeuf <jpoimboe@redhat.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Tyler Hicks <tyhicks@canonical.com>
+Acked-by: Jiri Kosina <jkosina@suse.cz>
+[ rip out docs ]
+Acked-by: Borislav Petkov <bp@suse.de>
+---
+ Documentation/kernel-parameters.txt | 6 ++++--
+ arch/x86/kernel/cpu/bugs.c | 10 ++++++++++
+ 2 files changed, 14 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -156,6 +156,7 @@ static const char *spectre_v2_strings[]
+
+ /* Default mitigation for L1TF-affected CPUs */
+ static enum mds_mitigations mds_mitigation __ro_after_init = MDS_MITIGATION_FULL;
++static bool mds_nosmt __ro_after_init = false;
+
+ static const char * const mds_strings[] = {
+ [MDS_MITIGATION_OFF] = "Vulnerable",
+@@ -173,8 +174,13 @@ static void __init mds_select_mitigation
+ if (mds_mitigation == MDS_MITIGATION_FULL) {
+ if (!boot_cpu_has(X86_FEATURE_MD_CLEAR))
+ mds_mitigation = MDS_MITIGATION_VMWERV;
++
+ static_branch_enable(&mds_user_clear);
++
++ if (mds_nosmt && !boot_cpu_has(X86_BUG_MSBDS_ONLY))
++ cpu_smt_disable(false);
+ }
++
+ pr_info("%s\n", mds_strings[mds_mitigation]);
+ }
+
+@@ -190,6 +196,10 @@ static int __init mds_cmdline(char *str)
+ mds_mitigation = MDS_MITIGATION_OFF;
+ else if (!strcmp(str, "full"))
+ mds_mitigation = MDS_MITIGATION_FULL;
++ else if (!strcmp(str, "full,nosmt")) {
++ mds_mitigation = MDS_MITIGATION_FULL;
++ mds_nosmt = true;
++ }
+
+ return 0;
+ }
+--- a/Documentation/kernel-parameters.txt
++++ b/Documentation/kernel-parameters.txt
+@@ -2216,8 +2216,10 @@ bytes respectively. Such letter suffixes
+ This parameter controls the MDS mitigation. The
+ options are:
+
+- full - Enable MDS mitigation on vulnerable CPUs
+- off - Unconditionally disable MDS mitigation
++ full - Enable MDS mitigation on vulnerable CPUs
++ full,nosmt - Enable MDS mitigation and disable
++ SMT on vulnerable CPUs
++ off - Unconditionally disable MDS mitigation
+
+ Not specifying this option is equivalent to
+ mds=full.
diff --git a/patches.arch/x86-speculation-mds-add-mds_clear_cpu_buffers.patch b/patches.arch/x86-speculation-mds-add-mds_clear_cpu_buffers.patch
new file mode 100644
index 0000000000..7b63508b80
--- /dev/null
+++ b/patches.arch/x86-speculation-mds-add-mds_clear_cpu_buffers.patch
@@ -0,0 +1,79 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Mon, 18 Feb 2019 23:13:06 +0100
+Subject: x86/speculation/mds: Add mds_clear_cpu_buffers()
+Git-commit: c6ce193021e73b236bd4354770c0125daada1a19
+Patch-mainline: v5.1 or v5.1-rc3 (next release)
+References: bsc#1111331, CVE-2018-12126, CVE-2018-12127, CVE-2018-12130
+
+commit 6a9e529272517755904b7afa639f6db59ddb793e upstream
+
+The Microarchitectural Data Sampling (MDS) vulernabilities are mitigated by
+clearing the affected CPU buffers. The mechanism for clearing the buffers
+uses the unused and obsolete VERW instruction in combination with a
+microcode update which triggers a CPU buffer clear when VERW is executed.
+
+Provide a inline function with the assembly magic. The argument of the VERW
+instruction must be a memory operand as documented:
+
+ "MD_CLEAR enumerates that the memory-operand variant of VERW (for
+ example, VERW m16) has been extended to also overwrite buffers affected
+ by MDS. This buffer overwriting functionality is not guaranteed for the
+ register operand variant of VERW."
+
+Documentation also recommends to use a writable data segment selector:
+
+ "The buffer overwriting occurs regardless of the result of the VERW
+ permission check, as well as when the selector is null or causes a
+ descriptor load segment violation. However, for lowest latency we
+ recommend using a selector that indicates a valid writable data
+ segment."
+
+Add x86 specific documentation about MDS and the internal workings of the
+mitigation.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Borislav Petkov <bp@suse.de>
+Reviewed-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Reviewed-by: Frederic Weisbecker <frederic@kernel.org>
+Reviewed-by: Jon Masters <jcm@redhat.com>
+Tested-by: Jon Masters <jcm@redhat.com>
+[ rip out docs ]
+Acked-by: Borislav Petkov <bp@suse.de>
+---
+ arch/x86/include/asm/nospec-branch.h | 25 +++++++++++++++++++++++++
+ 1 file changed, 25 insertions(+)
+
+--- a/arch/x86/include/asm/nospec-branch.h
++++ b/arch/x86/include/asm/nospec-branch.h
+@@ -248,6 +248,31 @@ do { \
+ preempt_enable(); \
+ } while (0)
+
++#include <asm/segment.h>
++
++/**
++ * mds_clear_cpu_buffers - Mitigation for MDS vulnerability
++ *
++ * This uses the otherwise unused and obsolete VERW instruction in
++ * combination with microcode which triggers a CPU buffer flush when the
++ * instruction is executed.
++ */
++static inline void mds_clear_cpu_buffers(void)
++{
++ static const u16 ds = __KERNEL_DS;
++
++ /*
++ * Has to be the memory-operand variant because only that
++ * guarantees the CPU buffer flush functionality according to
++ * documentation. The register-operand variant does not.
++ * Works with any segment selector, but a valid writable
++ * data segment is the fastest variant.
++ *
++ * "cc" clobber is required because VERW modifies ZF.
++ */
++ asm volatile("verw %[ds]" : : [ds] "m" (ds) : "cc");
++}
++
+ #endif /* __ASSEMBLY__ */
+
+ /*
diff --git a/patches.arch/x86-speculation-mds-add-mitigation-control-for-mds.patch b/patches.arch/x86-speculation-mds-add-mitigation-control-for-mds.patch
new file mode 100644
index 0000000000..53983ec497
--- /dev/null
+++ b/patches.arch/x86-speculation-mds-add-mitigation-control-for-mds.patch
@@ -0,0 +1,191 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Mon, 18 Feb 2019 22:04:08 +0100
+Subject: x86/speculation/mds: Add mitigation control for MDS
+Git-commit: bc1241700acd82ec69fde98c5763ce51086269f8
+Patch-mainline: v5.1 or v5.1-rc3 (next release)
+References: bsc#1111331, CVE-2018-12126, CVE-2018-12127, CVE-2018-12130
+
+Now that the mitigations are in place, add a command line parameter to
+control the mitigation, a mitigation selector function and a SMT update
+mechanism.
+
+This is the minimal straight forward initial implementation which just
+provides an always on/off mode. The command line parameter is:
+
+ mds=[full|off]
+
+This is consistent with the existing mitigations for other speculative
+hardware vulnerabilities.
+
+The idle invocation is dynamically updated according to the SMT state of
+the system similar to the dynamic update of the STIBP mitigation. The idle
+mitigation is limited to CPUs which are only affected by MSBDS and not any
+other variant, because the other variants cannot be mitigated on SMT
+enabled systems.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Borislav Petkov <bp@suse.de>
+Reviewed-by: Jon Masters <jcm@redhat.com>
+Tested-by: Jon Masters <jcm@redhat.com>
+Acked-by: Borislav Petkov <bp@suse.de>
+---
+ Documentation/kernel-parameters.txt | 22 +++++++++++
+ arch/x86/include/asm/processor.h | 5 ++
+ arch/x86/kernel/cpu/bugs.c | 72 ++++++++++++++++++++++++++++++++++++
+ 3 files changed, 99 insertions(+)
+
+--- a/arch/x86/include/asm/processor.h
++++ b/arch/x86/include/asm/processor.h
+@@ -871,4 +871,9 @@ enum l1tf_mitigations {
+
+ extern enum l1tf_mitigations l1tf_mitigation;
+
++enum mds_mitigations {
++ MDS_MITIGATION_OFF,
++ MDS_MITIGATION_FULL,
++};
++
+ #endif /* _ASM_X86_PROCESSOR_H */
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -13,6 +13,7 @@
+ #include <linux/module.h>
+ #include <linux/nospec.h>
+ #include <linux/prctl.h>
++#include <linux/sched/smt.h>
+
+ #include <asm/spec-ctrl.h>
+ #include <asm/cmdline.h>
+@@ -32,6 +33,7 @@
+ static void __init spectre_v2_select_mitigation(void);
+ static void __init ssb_select_mitigation(void);
+ static void __init l1tf_select_mitigation(void);
++static void __init mds_select_mitigation(void);
+
+ /* The base value of the SPEC_CTRL MSR that always has to be preserved. */
+ u64 x86_spec_ctrl_base;
+@@ -96,6 +98,8 @@ void __init check_bugs(void)
+
+ l1tf_select_mitigation();
+
++ mds_select_mitigation();
++
+ #ifdef CONFIG_X86_32
+ /*
+ * Check whether we are able to run this kernel safely on SMP.
+@@ -147,6 +151,50 @@ static const char *spectre_v2_strings[]
+ };
+
+ #undef pr_fmt
++#define pr_fmt(fmt) "MDS: " fmt
++
++/* Default mitigation for L1TF-affected CPUs */
++static enum mds_mitigations mds_mitigation __ro_after_init = MDS_MITIGATION_FULL;
++
++static const char * const mds_strings[] = {
++ [MDS_MITIGATION_OFF] = "Vulnerable",
++ [MDS_MITIGATION_FULL] = "Mitigation: Clear CPU buffers"
++};
++
++static void __init mds_select_mitigation(void)
++{
++ if (!boot_cpu_has_bug(X86_BUG_MDS)) {
++ mds_mitigation = MDS_MITIGATION_OFF;
++ return;
++ }
++
++ if (mds_mitigation == MDS_MITIGATION_FULL) {
++ if (boot_cpu_has(X86_FEATURE_MD_CLEAR))
++ static_branch_enable(&mds_user_clear);
++ else
++ mds_mitigation = MDS_MITIGATION_OFF;
++ }
++ pr_info("%s\n", mds_strings[mds_mitigation]);
++}
++
++static int __init mds_cmdline(char *str)
++{
++ if (!boot_cpu_has_bug(X86_BUG_MDS))
++ return 0;
++
++ if (!str)
++ return -EINVAL;
++
++ if (!strcmp(str, "off"))
++ mds_mitigation = MDS_MITIGATION_OFF;
++ else if (!strcmp(str, "full"))
++ mds_mitigation = MDS_MITIGATION_FULL;
++
++ return 0;
++}
++early_param("mds", mds_cmdline);
++
++#undef pr_fmt
+ #define pr_fmt(fmt) "Spectre V2 : " fmt
+
+ static enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init = SPECTRE_V2_NONE;
+@@ -334,9 +382,33 @@ static enum spectre_v2_mitigation_cmd __
+ return cmd;
+ }
+
++/* Update the static key controlling the MDS CPU buffer clear in idle */
++static void update_mds_branch_idle(void)
++{
++ /*
++ * Enable the idle clearing if SMT is active on CPUs which are
++ * affected only by MSBDS and not any other MDS variant.
++ *
++ * The other variants cannot be mitigated when SMT is enabled, so
++ * clearing the buffers on idle just to prevent the Store Buffer
++ * repartitioning leak would be a window dressing exercise.
++ */
++ if (!boot_cpu_has_bug(X86_BUG_MSBDS_ONLY))
++ return;
++
++ if (sched_smt_active())
++ static_branch_enable(&mds_idle_clear);
++ else
++ static_branch_disable(&mds_idle_clear);
++}
++
+ void arch_smt_update(void)
+ {
+ mutex_lock(&spec_ctrl_mutex);
++
++ if (mds_mitigation == MDS_MITIGATION_FULL)
++ update_mds_branch_idle();
++
+ mutex_unlock(&spec_ctrl_mutex);
+ }
+
+--- a/Documentation/kernel-parameters.txt
++++ b/Documentation/kernel-parameters.txt
+@@ -2200,6 +2200,28 @@ bytes respectively. Such letter suffixes
+ Format: <first>,<last>
+ Specifies range of consoles to be captured by the MDA.
+
++ mds= [X86,INTEL]
++ Control mitigation for the Micro-architectural Data
++ Sampling (MDS) vulnerability.
++
++ Certain CPUs are vulnerable to an exploit against CPU
++ internal buffers which can forward information to a
++ disclosure gadget under certain conditions.
++
++ In vulnerable processors, the speculatively
++ forwarded data can be used in a cache side channel
++ attack, to access data to which the attacker does
++ not have direct access.
++
++ This parameter controls the MDS mitigation. The
++ options are:
++
++ full - Enable MDS mitigation on vulnerable CPUs
++ off - Unconditionally disable MDS mitigation
++
++ Not specifying this option is equivalent to
++ mds=full.
++
+ mem=nn[KMG] [KNL,BOOT] Force usage of a specific amount of memory
+ Amount of memory to be used when the kernel is not able
+ to see the whole system memory or for test.
diff --git a/patches.arch/x86-speculation-mds-add-mitigation-mode-vmwerv.patch b/patches.arch/x86-speculation-mds-add-mitigation-mode-vmwerv.patch
new file mode 100644
index 0000000000..d4eea418bc
--- /dev/null
+++ b/patches.arch/x86-speculation-mds-add-mitigation-mode-vmwerv.patch
@@ -0,0 +1,83 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Wed, 20 Feb 2019 09:40:40 +0100
+Subject: x86/speculation/mds: Add mitigation mode VMWERV
+Git-commit: 22dd8365088b6403630b82423cf906491859b65e
+Patch-mainline: v5.1 or v5.1-rc3 (next release)
+References: bsc#1111331, CVE-2018-12126, CVE-2018-12127, CVE-2018-12130
+
+In virtualized environments it can happen that the host has the microcode
+update which utilizes the VERW instruction to clear CPU buffers, but the
+hypervisor is not yet updated to expose the X86_FEATURE_MD_CLEAR CPUID bit
+to guests.
+
+Introduce an internal mitigation mode VMWERV which enables the invocation
+of the CPU buffer clearing even if X86_FEATURE_MD_CLEAR is not set. If the
+system has no updated microcode this results in a pointless execution of
+the VERW instruction wasting a few CPU cycles. If the microcode is updated,
+but not exposed to a guest then the CPU buffers will be cleared.
+
+That said: Virtual Machines Will Eventually Receive Vaccine
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Borislav Petkov <bp@suse.de>
+Reviewed-by: Jon Masters <jcm@redhat.com>
+Tested-by: Jon Masters <jcm@redhat.com>
+[ rip out docs ]
+Acked-by: Borislav Petkov <bp@suse.de>
+---
+ arch/x86/include/asm/processor.h | 1 +
+ arch/x86/kernel/cpu/bugs.c | 18 ++++++++++++------
+ 2 files changed, 13 insertions(+), 6 deletions(-)
+
+--- a/arch/x86/include/asm/processor.h
++++ b/arch/x86/include/asm/processor.h
+@@ -874,6 +874,7 @@ extern enum l1tf_mitigations l1tf_mitiga
+ enum mds_mitigations {
+ MDS_MITIGATION_OFF,
+ MDS_MITIGATION_FULL,
++ MDS_MITIGATION_VMWERV,
+ };
+
+ #endif /* _ASM_X86_PROCESSOR_H */
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -159,7 +159,8 @@ static enum mds_mitigations mds_mitigati
+
+ static const char * const mds_strings[] = {
+ [MDS_MITIGATION_OFF] = "Vulnerable",
+- [MDS_MITIGATION_FULL] = "Mitigation: Clear CPU buffers"
++ [MDS_MITIGATION_FULL] = "Mitigation: Clear CPU buffers",
++ [MDS_MITIGATION_VMWERV] = "Vulnerable: Clear CPU buffers attempted, no microcode",
+ };
+
+ static void __init mds_select_mitigation(void)
+@@ -170,10 +171,9 @@ static void __init mds_select_mitigation
+ }
+
+ if (mds_mitigation == MDS_MITIGATION_FULL) {
+- if (boot_cpu_has(X86_FEATURE_MD_CLEAR))
+- static_branch_enable(&mds_user_clear);
+- else
+- mds_mitigation = MDS_MITIGATION_OFF;
++ if (!boot_cpu_has(X86_FEATURE_MD_CLEAR))
++ mds_mitigation = MDS_MITIGATION_VMWERV;
++ static_branch_enable(&mds_user_clear);
+ }
+ pr_info("%s\n", mds_strings[mds_mitigation]);
+ }
+@@ -407,8 +407,14 @@ void arch_smt_update(void)
+ {
+ mutex_lock(&spec_ctrl_mutex);
+
+- if (mds_mitigation == MDS_MITIGATION_FULL)
++ switch (mds_mitigation) {
++ case MDS_MITIGATION_FULL:
++ case MDS_MITIGATION_VMWERV:
+ update_mds_branch_idle();
++ break;
++ case MDS_MITIGATION_OFF:
++ break;
++ }
+
+ mutex_unlock(&spec_ctrl_mutex);
+ }
diff --git a/patches.arch/x86-speculation-mds-add-mitigations-support-for-mds.patch b/patches.arch/x86-speculation-mds-add-mitigations-support-for-mds.patch
new file mode 100644
index 0000000000..4fe3c259e2
--- /dev/null
+++ b/patches.arch/x86-speculation-mds-add-mitigations-support-for-mds.patch
@@ -0,0 +1,62 @@
+From: Josh Poimboeuf <jpoimboe@redhat.com>
+Date: Wed, 17 Apr 2019 16:39:02 -0500
+Subject: x86/speculation/mds: Add 'mitigations=' support for MDS
+Git-repo: tip/tip
+Git-commit: 5c14068f87d04adc73ba3f41c2a303d3c3d1fa12
+Patch-mainline: Queued in a subsystem tree
+References: bsc#1111331, CVE-2018-12126, CVE-2018-12127, CVE-2018-12130
+
+Add MDS to the new 'mitigations=' cmdline option.
+
+Signed-off-by: Josh Poimboeuf <jpoimboe@redhat.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Acked-by: Borislav Petkov <bp@suse.de>
+---
+ Documentation/kernel-parameters.txt | 2 ++
+ arch/x86/kernel/cpu/bugs.c | 5 +++--
+ 2 files changed, 5 insertions(+), 2 deletions(-)
+
+diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
+index 9aa3543a8723..18cad2b0392a 100644
+--- a/Documentation/kernel-parameters.txt
++++ b/Documentation/kernel-parameters.txt
+@@ -2556,6 +2556,7 @@
+ spectre_v2_user=off [X86]
+ spec_store_bypass_disable=off [X86,PPC]
+ l1tf=off [X86]
++ mds=off [X86]
+
+ auto (default)
+ Mitigate all CPU vulnerabilities, but leave SMT
+@@ -2570,6 +2571,7 @@
+ if needed. This is for users who always want to
+ be fully mitigated, even if it means losing SMT.
+ Equivalent to: l1tf=flush,nosmt [X86]
++ mds=full,nosmt [X86]
+
+ mminit_loglevel=
+ [KNL] When CONFIG_DEBUG_MEMORY_INIT is set, this
+diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
+index 3c5c3c3ba734..667c273a66d7 100644
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -233,7 +233,7 @@ static const char * const mds_strings[] = {
+
+ static void __init mds_select_mitigation(void)
+ {
+- if (!boot_cpu_has_bug(X86_BUG_MDS)) {
++ if (!boot_cpu_has_bug(X86_BUG_MDS) || cpu_mitigations_off()) {
+ mds_mitigation = MDS_MITIGATION_OFF;
+ return;
+ }
+@@ -244,7 +244,8 @@ static void __init mds_select_mitigation(void)
+
+ static_branch_enable(&mds_user_clear);
+
+- if (mds_nosmt && !boot_cpu_has(X86_BUG_MSBDS_ONLY))
++ if (!boot_cpu_has(X86_BUG_MSBDS_ONLY) &&
++ (mds_nosmt || cpu_mitigations_auto_nosmt()))
+ cpu_smt_disable(false);
+ }
+
+
diff --git a/patches.arch/x86-speculation-mds-add-smt-warning-message.patch b/patches.arch/x86-speculation-mds-add-smt-warning-message.patch
new file mode 100644
index 0000000000..7c1fceb2d8
--- /dev/null
+++ b/patches.arch/x86-speculation-mds-add-smt-warning-message.patch
@@ -0,0 +1,50 @@
+From: Josh Poimboeuf <jpoimboe@redhat.com>
+Date: Tue, 2 Apr 2019 10:00:51 -0500
+Subject: x86/speculation/mds: Add SMT warning message
+Git-commit: 39226ef02bfb43248b7db12a4fdccb39d95318e3
+Git-repo: tip/tip
+Patch-mainline: Queued in a subsystem tree
+References: bsc#1111331, CVE-2018-12126, CVE-2018-12127, CVE-2018-12130
+
+MDS is vulnerable with SMT. Make that clear with a one-time printk
+whenever SMT first gets enabled.
+
+Signed-off-by: Josh Poimboeuf <jpoimboe@redhat.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Tyler Hicks <tyhicks@canonical.com>
+Acked-by: Jiri Kosina <jkosina@suse.cz>
+Acked-by: Borislav Petkov <bp@suse.de>
+---
+ arch/x86/kernel/cpu/bugs.c | 7 +++++++
+ 1 file changed, 7 insertions(+)
+
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -396,6 +396,9 @@ static enum spectre_v2_mitigation_cmd __
+ return cmd;
+ }
+
++#undef pr_fmt
++#define pr_fmt(fmt) fmt
++
+ /* Update the static key controlling the MDS CPU buffer clear in idle */
+ static void update_mds_branch_idle(void)
+ {
+@@ -416,6 +419,8 @@ static void update_mds_branch_idle(void)
+ static_branch_disable(&mds_idle_clear);
+ }
+
++#define MDS_MSG_SMT "MDS CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/mds.html for more details.\n"
++
+ void arch_smt_update(void)
+ {
+ mutex_lock(&spec_ctrl_mutex);
+@@ -423,6 +428,8 @@ void arch_smt_update(void)
+ switch (mds_mitigation) {
+ case MDS_MITIGATION_FULL:
+ case MDS_MITIGATION_VMWERV:
++ if (sched_smt_active() && !boot_cpu_has(X86_BUG_MSBDS_ONLY))
++ pr_warn_once(MDS_MSG_SMT);
+ update_mds_branch_idle();
+ break;
+ case MDS_MITIGATION_OFF:
diff --git a/patches.arch/x86-speculation-mds-add-sysfs-reporting-for-mds.patch b/patches.arch/x86-speculation-mds-add-sysfs-reporting-for-mds.patch
new file mode 100644
index 0000000000..9f655b279e
--- /dev/null
+++ b/patches.arch/x86-speculation-mds-add-sysfs-reporting-for-mds.patch
@@ -0,0 +1,134 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Mon, 18 Feb 2019 22:51:43 +0100
+Subject: x86/speculation/mds: Add sysfs reporting for MDS
+Git-commit: 8a4b06d391b0a42a373808979b5028f5c84d9c6a
+Patch-mainline: v5.1 or v5.1-rc3 (next release)
+References: bsc#1111331, CVE-2018-12126, CVE-2018-12127, CVE-2018-12130
+
+Add the sysfs reporting file for MDS. It exposes the vulnerability and
+mitigation state similar to the existing files for the other speculative
+hardware vulnerabilities.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Reviewed-by: Borislav Petkov <bp@suse.de>
+Reviewed-by: Jon Masters <jcm@redhat.com>
+Tested-by: Jon Masters <jcm@redhat.com>
+
+ [ change the check in mds_show_state():
+ s/!hypervisor_is_type(X86_HYPER_NATIVE)/x86_hyper/
+ because backporting the pile converting to hypervisor_is_type() removes
+ exported symbols and that is a no-no for kABI. ]
+
+Acked-by: Borislav Petkov <bp@suse.de>
+---
+ Documentation/ABI/testing/sysfs-devices-system-cpu | 1
+ arch/x86/kernel/cpu/bugs.c | 26 +++++++++++++++++++++
+ drivers/base/cpu.c | 8 ++++++
+ include/linux/cpu.h | 2 +
+ 4 files changed, 37 insertions(+)
+
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -29,6 +29,7 @@
+ #include <asm/cacheflush.h>
+ #include <asm/intel-family.h>
+ #include <asm/e820.h>
++#include <asm/hypervisor.h>
+
+ static void __init spectre_v2_select_mitigation(void);
+ static void __init ssb_select_mitigation(void);
+@@ -696,6 +697,22 @@ void arch_seccomp_spec_mitigate(struct t
+ }
+ #endif
+
++static ssize_t mds_show_state(char *buf)
++{
++ if (x86_hyper) {
++ return sprintf(buf, "%s; SMT Host state unknown\n",
++ mds_strings[mds_mitigation]);
++ }
++
++ if (boot_cpu_has(X86_BUG_MSBDS_ONLY)) {
++ return sprintf(buf, "%s; SMT %s\n", mds_strings[mds_mitigation],
++ sched_smt_active() ? "mitigated" : "disabled");
++ }
++
++ return sprintf(buf, "%s; SMT %s\n", mds_strings[mds_mitigation],
++ sched_smt_active() ? "vulnerable" : "disabled");
++}
++
+ static int ssb_prctl_get(struct task_struct *task)
+ {
+ switch (ssb_mode) {
+@@ -909,6 +926,10 @@ static ssize_t cpu_show_common(struct de
+ return l1tf_show_state(buf);
+ break;
+
++
++ case X86_BUG_MDS:
++ return mds_show_state(buf);
++
+ default:
+ break;
+ }
+@@ -940,4 +961,9 @@ ssize_t cpu_show_l1tf(struct device *dev
+ {
+ return cpu_show_common(dev, attr, buf, X86_BUG_L1TF);
+ }
++
++ssize_t cpu_show_mds(struct device *dev, struct device_attribute *attr, char *buf)
++{
++ return cpu_show_common(dev, attr, buf, X86_BUG_MDS);
++}
+ #endif
+--- a/Documentation/ABI/testing/sysfs-devices-system-cpu
++++ b/Documentation/ABI/testing/sysfs-devices-system-cpu
+@@ -278,6 +278,7 @@ What: /sys/devices/system/cpu/vulnerabi
+ /sys/devices/system/cpu/vulnerabilities/spectre_v2
+ /sys/devices/system/cpu/vulnerabilities/spec_store_bypass
+ /sys/devices/system/cpu/vulnerabilities/l1tf
++ /sys/devices/system/cpu/vulnerabilities/mds
+ Date: January 2018
+ Contact: Linux kernel mailing list <linux-kernel@vger.kernel.org>
+ Description: Information about CPU vulnerabilities
+--- a/drivers/base/cpu.c
++++ b/drivers/base/cpu.c
+@@ -530,11 +530,18 @@ ssize_t __weak cpu_show_l1tf(struct devi
+ return sprintf(buf, "Not affected\n");
+ }
+
++ssize_t __weak cpu_show_mds(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ return sprintf(buf, "Not affected\n");
++}
++
+ static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL);
+ static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL);
+ static DEVICE_ATTR(spectre_v2, 0444, cpu_show_spectre_v2, NULL);
+ static DEVICE_ATTR(spec_store_bypass, 0444, cpu_show_spec_store_bypass, NULL);
+ static DEVICE_ATTR(l1tf, 0444, cpu_show_l1tf, NULL);
++static DEVICE_ATTR(mds, 0444, cpu_show_mds, NULL);
+
+ static struct attribute *cpu_root_vulnerabilities_attrs[] = {
+ &dev_attr_meltdown.attr,
+@@ -542,6 +549,7 @@ static struct attribute *cpu_root_vulner
+ &dev_attr_spectre_v2.attr,
+ &dev_attr_spec_store_bypass.attr,
+ &dev_attr_l1tf.attr,
++ &dev_attr_mds.attr,
+ NULL
+ };
+
+--- a/include/linux/cpu.h
++++ b/include/linux/cpu.h
+@@ -50,6 +50,8 @@ extern ssize_t cpu_show_spec_store_bypas
+ struct device_attribute *attr, char *buf);
+ extern ssize_t cpu_show_l1tf(struct device *dev,
+ struct device_attribute *attr, char *buf);
++extern ssize_t cpu_show_mds(struct device *dev,
++ struct device_attribute *attr, char *buf);
+
+ extern __printf(4, 5)
+ struct device *cpu_device_create(struct device *parent, void *drvdata,
diff --git a/patches.arch/x86-speculation-mds-clear-cpu-buffers-on-exit-to-user.patch b/patches.arch/x86-speculation-mds-clear-cpu-buffers-on-exit-to-user.patch
new file mode 100644
index 0000000000..767685ae3f
--- /dev/null
+++ b/patches.arch/x86-speculation-mds-clear-cpu-buffers-on-exit-to-user.patch
@@ -0,0 +1,143 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Mon, 18 Feb 2019 23:42:51 +0100
+Subject: x86/speculation/mds: Clear CPU buffers on exit to user
+Git-commit: 04dcbdb8057827b043b3c71aa397c4c63e67d086
+Patch-mainline: v5.1 or v5.1-rc3 (next release)
+References: bsc#1111331, CVE-2018-12126, CVE-2018-12127, CVE-2018-12130
+
+Add a static key which controls the invocation of the CPU buffer clear
+mechanism on exit to user space and add the call into
+prepare_exit_to_usermode() and do_nmi() right before actually returning.
+
+Add documentation which kernel to user space transition this covers and
+explain why some corner cases are not mitigated.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Reviewed-by: Borislav Petkov <bp@suse.de>
+Reviewed-by: Frederic Weisbecker <frederic@kernel.org>
+Reviewed-by: Jon Masters <jcm@redhat.com>
+Tested-by: Jon Masters <jcm@redhat.com>
+[ rip out docs ]
+Acked-by: Borislav Petkov <bp@suse.de>
+---
+ arch/x86/entry/common.c | 3 +++
+ arch/x86/include/asm/nospec-branch.h | 15 +++++++++++++++
+ arch/x86/kernel/cpu/bugs.c | 3 +++
+ arch/x86/kernel/nmi.c | 4 ++++
+ arch/x86/kernel/traps.c | 8 ++++++++
+ 5 files changed, 33 insertions(+)
+
+--- a/arch/x86/entry/common.c
++++ b/arch/x86/entry/common.c
+@@ -28,6 +28,7 @@
+ #include <asm/vdso.h>
+ #include <linux/uaccess.h>
+ #include <asm/cpufeature.h>
++#include <asm/nospec-branch.h>
+
+ #define CREATE_TRACE_POINTS
+ #include <trace/events/syscalls.h>
+@@ -295,6 +296,8 @@ __visible inline void prepare_exit_to_us
+ #endif
+
+ user_enter();
++
++ mds_user_clear_cpu_buffers();
+ }
+
+ #define SYSCALL_EXIT_WORK_FLAGS \
+--- a/arch/x86/include/asm/nospec-branch.h
++++ b/arch/x86/include/asm/nospec-branch.h
+@@ -3,6 +3,8 @@
+ #ifndef _ASM_X86_NOSPEC_BRANCH_H_
+ #define _ASM_X86_NOSPEC_BRANCH_H_
+
++#include <linux/static_key.h>
++
+ #include <asm/alternative.h>
+ #include <asm/alternative-asm.h>
+ #include <asm/cpufeatures.h>
+@@ -248,6 +250,8 @@ do { \
+ preempt_enable(); \
+ } while (0)
+
++DECLARE_STATIC_KEY_FALSE(mds_user_clear);
++
+ #include <asm/segment.h>
+
+ /**
+@@ -273,6 +277,17 @@ static inline void mds_clear_cpu_buffers
+ asm volatile("verw %[ds]" : : [ds] "m" (ds) : "cc");
+ }
+
++/**
++ * mds_user_clear_cpu_buffers - Mitigation for MDS vulnerability
++ *
++ * Clear CPU buffers if the corresponding static key is enabled
++ */
++static inline void mds_user_clear_cpu_buffers(void)
++{
++ if (static_branch_likely(&mds_user_clear))
++ mds_clear_cpu_buffers();
++}
++
+ #endif /* __ASSEMBLY__ */
+
+ /*
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -53,6 +53,9 @@ static u64 x86_spec_ctrl_mask = SPEC_CTR
+ u64 x86_amd_ls_cfg_base;
+ u64 x86_amd_ls_cfg_ssbd_mask;
+
++/* Control MDS CPU buffer clear before returning to user space */
++DEFINE_STATIC_KEY_FALSE(mds_user_clear);
++
+ void __init check_bugs(void)
+ {
+ identify_boot_cpu();
+--- a/arch/x86/kernel/nmi.c
++++ b/arch/x86/kernel/nmi.c
+@@ -30,6 +30,7 @@
+ #include <asm/nmi.h>
+ #include <asm/x86_init.h>
+ #include <asm/reboot.h>
++#include <asm/nospec-branch.h>
+
+ #define CREATE_TRACE_POINTS
+ #include <trace/events/nmi.h>
+@@ -540,6 +541,9 @@ nmi_restart:
+ write_cr2(this_cpu_read(nmi_cr2));
+ if (this_cpu_dec_return(nmi_state))
+ goto nmi_restart;
++
++ if (user_mode(regs))
++ mds_user_clear_cpu_buffers();
+ }
+ NOKPROBE_SYMBOL(do_nmi);
+
+--- a/arch/x86/kernel/traps.c
++++ b/arch/x86/kernel/traps.c
+@@ -61,6 +61,7 @@
+ #include <asm/alternative.h>
+ #include <asm/fpu/xstate.h>
+ #include <asm/trace/mpx.h>
++#include <asm/nospec-branch.h>
+ #include <asm/mpx.h>
+ #include <asm/vm86.h>
+
+@@ -337,6 +338,13 @@ dotraplinkage void do_double_fault(struc
+ regs->ip = (unsigned long)general_protection;
+ regs->sp = (unsigned long)&normal_regs->orig_ax;
+
++ /*
++ * This situation can be triggered by userspace via
++ * modify_ldt(2) and the return does not take the regular
++ * user space exit, so a CPU buffer clear is required when
++ * MDS mitigation is enabled.
++ */
++ mds_user_clear_cpu_buffers();
+ return;
+ }
+ #endif
diff --git a/patches.arch/x86-speculation-mds-conditionally-clear-cpu-buffers-on-idle-entry.patch b/patches.arch/x86-speculation-mds-conditionally-clear-cpu-buffers-on-idle-entry.patch
new file mode 100644
index 0000000000..b38857f5ef
--- /dev/null
+++ b/patches.arch/x86-speculation-mds-conditionally-clear-cpu-buffers-on-idle-entry.patch
@@ -0,0 +1,165 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Mon, 18 Feb 2019 23:04:01 +0100
+Subject: x86/speculation/mds: Conditionally clear CPU buffers on idle entry
+Git-commit: 07f07f55a29cb705e221eda7894dd67ab81ef343
+Patch-mainline: v5.1 or v5.1-rc3 (next release)
+References: bsc#1111331, CVE-2018-12126, CVE-2018-12127, CVE-2018-12130
+
+Add a static key which controls the invocation of the CPU buffer clear
+mechanism on idle entry. This is independent of other MDS mitigations
+because the idle entry invocation to mitigate the potential leakage due to
+store buffer repartitioning is only necessary on SMT systems.
+
+Add the actual invocations to the different halt/mwait variants which
+covers all usage sites. mwaitx is not patched as it's not available on
+Intel CPUs.
+
+The buffer clear is only invoked before entering the C-State to prevent
+that stale data from the idling CPU is spilled to the Hyper-Thread sibling
+after the Store buffer got repartitioned and all entries are available to
+the non idle sibling.
+
+When coming out of idle the store buffer is partitioned again so each
+sibling has half of it available. Now CPU which returned from idle could be
+speculatively exposed to contents of the sibling, but the buffers are
+flushed either on exit to user space or on VMENTER.
+
+When later on conditional buffer clearing is implemented on top of this,
+then there is no action required either because before returning to user
+space the context switch will set the condition flag which causes a flush
+on the return to user path.
+
+Note, that the buffer clearing on idle is only sensible on CPUs which are
+solely affected by MSBDS and not any other variant of MDS because the other
+MDS variants cannot be mitigated when SMT is enabled, so the buffer
+clearing on idle would be a window dressing exercise.
+
+This intentionally does not handle the case in the acpi/processor_idle
+driver which uses the legacy IO port interface for C-State transitions for
+two reasons:
+
+ - The acpi/processor_idle driver was replaced by the intel_idle driver
+ almost a decade ago. Anything Nehalem upwards supports it and defaults
+ to that new driver.
+
+ - The legacy IO port interface is likely to be used on older and therefore
+ unaffected CPUs or on systems which do not receive microcode updates
+ anymore, so there is no point in adding that.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Borislav Petkov <bp@suse.de>
+Reviewed-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Reviewed-by: Frederic Weisbecker <frederic@kernel.org>
+Reviewed-by: Jon Masters <jcm@redhat.com>
+Tested-by: Jon Masters <jcm@redhat.com>
+[ rip out docs. ]
+Acked-by: Borislav Petkov <bp@suse.de>
+---
+ arch/x86/include/asm/irqflags.h | 4 ++++
+ arch/x86/include/asm/mwait.h | 7 +++++++
+ arch/x86/include/asm/nospec-branch.h | 12 ++++++++++++
+ arch/x86/kernel/cpu/bugs.c | 3 +++
+ 4 files changed, 26 insertions(+)
+
+--- a/arch/x86/include/asm/irqflags.h
++++ b/arch/x86/include/asm/irqflags.h
+@@ -8,6 +8,8 @@
+ * Interrupt control:
+ */
+
++#include <asm/nospec-branch.h>
++
+ /* Declaration required for gcc < 4.9 to prevent -Werror=missing-prototypes */
+ extern inline unsigned long native_save_fl(void);
+ extern inline unsigned long native_save_fl(void)
+@@ -49,11 +51,13 @@ static inline void native_irq_enable(voi
+
+ static inline void native_safe_halt(void)
+ {
++ mds_idle_clear_cpu_buffers();
+ asm volatile("sti; hlt": : :"memory");
+ }
+
+ static inline void native_halt(void)
+ {
++ mds_idle_clear_cpu_buffers();
+ asm volatile("hlt": : :"memory");
+ }
+
+--- a/arch/x86/include/asm/mwait.h
++++ b/arch/x86/include/asm/mwait.h
+@@ -4,6 +4,7 @@
+ #include <linux/sched.h>
+
+ #include <asm/cpufeature.h>
++#include <asm/nospec-branch.h>
+
+ #define MWAIT_SUBSTATE_MASK 0xf
+ #define MWAIT_CSTATE_MASK 0xf
+@@ -38,6 +39,8 @@ static inline void __monitorx(const void
+
+ static inline void __mwait(unsigned long eax, unsigned long ecx)
+ {
++ mds_idle_clear_cpu_buffers();
++
+ /* "mwait %eax, %ecx;" */
+ asm volatile(".byte 0x0f, 0x01, 0xc9;"
+ :: "a" (eax), "c" (ecx));
+@@ -72,6 +75,8 @@ static inline void __mwait(unsigned long
+ static inline void __mwaitx(unsigned long eax, unsigned long ebx,
+ unsigned long ecx)
+ {
++ /* No MDS buffer clear as this is AMD/HYGON only */
++
+ /* "mwaitx %eax, %ebx, %ecx;" */
+ asm volatile(".byte 0x0f, 0x01, 0xfb;"
+ :: "a" (eax), "b" (ebx), "c" (ecx));
+@@ -79,6 +84,8 @@ static inline void __mwaitx(unsigned lon
+
+ static inline void __sti_mwait(unsigned long eax, unsigned long ecx)
+ {
++ mds_idle_clear_cpu_buffers();
++
+ trace_hardirqs_on();
+ /* "mwait %eax, %ecx;" */
+ asm volatile("sti; .byte 0x0f, 0x01, 0xc9;"
+--- a/arch/x86/include/asm/nospec-branch.h
++++ b/arch/x86/include/asm/nospec-branch.h
+@@ -251,6 +251,7 @@ do { \
+ } while (0)
+
+ DECLARE_STATIC_KEY_FALSE(mds_user_clear);
++DECLARE_STATIC_KEY_FALSE(mds_idle_clear);
+
+ #include <asm/segment.h>
+
+@@ -288,6 +289,17 @@ static inline void mds_user_clear_cpu_bu
+ mds_clear_cpu_buffers();
+ }
+
++/**
++ * mds_idle_clear_cpu_buffers - Mitigation for MDS vulnerability
++ *
++ * Clear CPU buffers if the corresponding static key is enabled
++ */
++static inline void mds_idle_clear_cpu_buffers(void)
++{
++ if (static_branch_likely(&mds_idle_clear))
++ mds_clear_cpu_buffers();
++}
++
+ #endif /* __ASSEMBLY__ */
+
+ /*
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -56,6 +56,9 @@ u64 x86_amd_ls_cfg_ssbd_mask;
+ /* Control MDS CPU buffer clear before returning to user space */
+ DEFINE_STATIC_KEY_FALSE(mds_user_clear);
+ EXPORT_SYMBOL_GPL(mds_user_clear);
++/* Control MDS CPU buffer clear before idling (halt, mwait) */
++DEFINE_STATIC_KEY_FALSE(mds_idle_clear);
++EXPORT_SYMBOL_GPL(mds_idle_clear);
+
+ void __init check_bugs(void)
+ {
diff --git a/patches.arch/x86-speculation-mds-print-smt-vulnerable-on-msbds-with-mitigations-off.patch b/patches.arch/x86-speculation-mds-print-smt-vulnerable-on-msbds-with-mitigations-off.patch
new file mode 100644
index 0000000000..c9762dddd2
--- /dev/null
+++ b/patches.arch/x86-speculation-mds-print-smt-vulnerable-on-msbds-with-mitigations-off.patch
@@ -0,0 +1,49 @@
+From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Date: Fri, 12 Apr 2019 17:50:58 -0400
+Subject: x86/speculation/mds: Print SMT vulnerable on MSBDS with mitigations off
+Git-repo: tip/tip
+Git-commit: e2c3c94788b08891dcf3dbe608f9880523ecd71b
+Patch-mainline: Queued in a subsystem tree
+References: bsc#1111331, CVE-2018-12126, CVE-2018-12127, CVE-2018-12130
+
+This code is only for CPUs which are affected by MSBDS, but are *not*
+affected by the other two MDS issues.
+
+For such CPUs, enabling the mds_idle_clear mitigation is enough to
+mitigate SMT.
+
+However if user boots with 'mds=off' and still has SMT enabled, we should
+not report that SMT is mitigated:
+
+$cat /sys//devices/system/cpu/vulnerabilities/mds
+Vulnerable; SMT mitigated
+
+But rather:
+Vulnerable; SMT vulnerable
+
+Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Tyler Hicks <tyhicks@canonical.com>
+Reviewed-by: Josh Poimboeuf <jpoimboe@redhat.com>
+Link: https://lkml.kernel.org/r/20190412215118.294906495@localhost.localdomain
+
+Acked-by: Borislav Petkov <bp@suse.de>
+---
+ arch/x86/kernel/cpu/bugs.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
+index 0642505dda69..6b8a55c7cebc 100644
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -1204,7 +1204,8 @@ static ssize_t mds_show_state(char *buf)
+
+ if (boot_cpu_has(X86_BUG_MSBDS_ONLY)) {
+ return sprintf(buf, "%s; SMT %s\n", mds_strings[mds_mitigation],
+- sched_smt_active() ? "mitigated" : "disabled");
++ (mds_mitigation == MDS_MITIGATION_OFF ? "vulnerable" :
++ sched_smt_active() ? "mitigated" : "disabled"));
+ }
+
+ return sprintf(buf, "%s; SMT %s\n", mds_strings[mds_mitigation],
+
diff --git a/patches.arch/x86-speculation-move-arch_smt_update-call-to-after-mitigation-decisions.patch b/patches.arch/x86-speculation-move-arch_smt_update-call-to-after-mitigation-decisions.patch
new file mode 100644
index 0000000000..258cae5139
--- /dev/null
+++ b/patches.arch/x86-speculation-move-arch_smt_update-call-to-after-mitigation-decisions.patch
@@ -0,0 +1,42 @@
+From: Josh Poimboeuf <jpoimboe@redhat.com>
+Date: Tue, 2 Apr 2019 10:00:14 -0500
+Subject: x86/speculation: Move arch_smt_update() call to after mitigation decisions
+Git-repo: tip/tip
+Git-commit: 7c3658b20194a5b3209a143f63bc9c643c6a3ae2
+Patch-mainline: Queued in a subsystem tree
+References: bsc#1111331, CVE-2018-12126, CVE-2018-12127, CVE-2018-12130
+
+arch_smt_update() now has a dependency on both Spectre v2 and MDS
+mitigations. Move its initial call to after all the mitigation decisions
+have been made.
+
+Signed-off-by: Josh Poimboeuf <jpoimboe@redhat.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Tyler Hicks <tyhicks@canonical.com>
+Acked-by: Jiri Kosina <jkosina@suse.cz>
+Acked-by: Borislav Petkov <bp@suse.de>
+---
+ arch/x86/kernel/cpu/bugs.c | 5 ++---
+ 1 file changed, 2 insertions(+), 3 deletions(-)
+
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -101,6 +101,8 @@ void __init check_bugs(void)
+
+ mds_select_mitigation();
+
++ arch_smt_update();
++
+ #ifdef CONFIG_X86_32
+ /*
+ * Check whether we are able to run this kernel safely on SMP.
+@@ -514,9 +516,6 @@ retpoline_auto:
+ setup_force_cpu_cap(X86_FEATURE_USE_IBRS_FW);
+ pr_info("Enabling Restricted Speculation for firmware calls\n");
+ }
+-
+- /* Enable STIBP if appropriate */
+- arch_smt_update();
+ }
+
+ #undef pr_fmt
diff --git a/patches.arch/x86-speculation-remove-redundant-arch_smt_update-invocation.patch b/patches.arch/x86-speculation-remove-redundant-arch_smt_update-invocation.patch
new file mode 100644
index 0000000000..e592a0d8dc
--- /dev/null
+++ b/patches.arch/x86-speculation-remove-redundant-arch_smt_update-invocation.patch
@@ -0,0 +1,54 @@
+From: Zhenzhong Duan <zhenzhong.duan@oracle.com>
+Date: Thu, 17 Jan 2019 02:10:59 -0800
+Subject: x86/speculation: Remove redundant arch_smt_update() invocation
+Git-commit: 34d66caf251df91ff27b24a3a786810d29989eca
+Patch-mainline: v5.0-rc5
+References: bsc#1111331
+
+With commit a74cfffb03b7 ("x86/speculation: Rework SMT state change"),
+arch_smt_update() is invoked from each individual CPU hotplug function.
+
+Therefore the extra arch_smt_update() call in the sysfs SMT control is
+redundant.
+
+Fixes: a74cfffb03b7 ("x86/speculation: Rework SMT state change")
+Signed-off-by: Zhenzhong Duan <zhenzhong.duan@oracle.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: <konrad.wilk@oracle.com>
+Cc: <dwmw@amazon.co.uk>
+Cc: <bp@suse.de>
+Cc: <srinivas.eeda@oracle.com>
+Cc: <peterz@infradead.org>
+Cc: <hpa@zytor.com>
+Link: https://lkml.kernel.org/r/e2e064f2-e8ef-42ca-bf4f-76b612964752@default
+
+Acked-by: Borislav Petkov <bp@suse.de>
+---
+ kernel/cpu.c | 5 +----
+ 1 file changed, 1 insertion(+), 4 deletions(-)
+
+diff --git a/kernel/cpu.c b/kernel/cpu.c
+index 91d5c38eb7e5..c0c7f64573ed 100644
+--- a/kernel/cpu.c
++++ b/kernel/cpu.c
+@@ -2090,10 +2090,8 @@ static int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval)
+ */
+ cpuhp_offline_cpu_device(cpu);
+ }
+- if (!ret) {
++ if (!ret)
+ cpu_smt_control = ctrlval;
+- arch_smt_update();
+- }
+ cpu_maps_update_done();
+ return ret;
+ }
+@@ -2104,7 +2102,6 @@ static int cpuhp_smt_enable(void)
+
+ cpu_maps_update_begin();
+ cpu_smt_control = CPU_SMT_ENABLED;
+- arch_smt_update();
+ for_each_present_cpu(cpu) {
+ /* Skip online CPUs and CPUs on offline nodes */
+ if (cpu_online(cpu) || !node_online(cpu_to_node(cpu)))
+
diff --git a/patches.arch/x86-speculation-rework-smt-state-change.patch b/patches.arch/x86-speculation-rework-smt-state-change.patch
new file mode 100644
index 0000000000..77f965b543
--- /dev/null
+++ b/patches.arch/x86-speculation-rework-smt-state-change.patch
@@ -0,0 +1,109 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Sun, 25 Nov 2018 19:33:39 +0100
+Subject: x86/speculation: Rework SMT state change
+Git-commit: a74cfffb03b73d41e08f84c2e5c87dec0ce3db9f
+Patch-mainline: v4.20-rc5
+References: bsc#1111331, CVE-2018-12126, CVE-2018-12127, CVE-2018-12130
+
+arch_smt_update() is only called when the sysfs SMT control knob is
+changed. This means that when SMT is enabled in the sysfs control knob the
+system is considered to have SMT active even if all siblings are offline.
+
+To allow finegrained control of the speculation mitigations, the actual SMT
+state is more interesting than the fact that siblings could be enabled.
+
+Rework the code, so arch_smt_update() is invoked from each individual CPU
+hotplug function, and simplify the update function while at it.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Ingo Molnar <mingo@kernel.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Andy Lutomirski <luto@kernel.org>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Jiri Kosina <jkosina@suse.cz>
+Cc: Tom Lendacky <thomas.lendacky@amd.com>
+Cc: Josh Poimboeuf <jpoimboe@redhat.com>
+Cc: Andrea Arcangeli <aarcange@redhat.com>
+Cc: David Woodhouse <dwmw@amazon.co.uk>
+Cc: Tim Chen <tim.c.chen@linux.intel.com>
+Cc: Andi Kleen <ak@linux.intel.com>
+Cc: Dave Hansen <dave.hansen@intel.com>
+Cc: Casey Schaufler <casey.schaufler@intel.com>
+Cc: Asit Mallick <asit.k.mallick@intel.com>
+Cc: Arjan van de Ven <arjan@linux.intel.com>
+Cc: Jon Masters <jcm@redhat.com>
+Cc: Waiman Long <longman9394@gmail.com>
+Cc: Greg KH <gregkh@linuxfoundation.org>
+Cc: Dave Stewart <david.c.stewart@intel.com>
+Cc: Kees Cook <keescook@chromium.org>
+Cc: stable@vger.kernel.org
+Link: https://lkml.kernel.org/r/20181125185004.521974984@linutronix.de
+[ rip out the STIBP machinery for now, that comes later ]
+Acked-by: Borislav Petkov <bp@suse.de>
+---
+ include/linux/sched/smt.h | 2 ++
+ kernel/cpu.c | 16 +++++++++-------
+ 2 files changed, 11 insertions(+), 7 deletions(-)
+
+--- a/include/linux/sched/smt.h
++++ b/include/linux/sched/smt.h
+@@ -15,4 +15,6 @@ static __always_inline bool sched_smt_ac
+ static inline bool sched_smt_active(void) { return false; }
+ #endif
+
++void arch_smt_update(void);
++
+ #endif
+--- a/kernel/cpu.c
++++ b/kernel/cpu.c
+@@ -8,6 +8,7 @@
+ #include <linux/init.h>
+ #include <linux/notifier.h>
+ #include <linux/sched.h>
++#include <linux/sched/smt.h>
+ #include <linux/unistd.h>
+ #include <linux/cpu.h>
+ #include <linux/oom.h>
+@@ -440,6 +441,7 @@ out_release:
+ cpu_hotplug_done();
+ if (!err)
+ cpu_notify_nofail(CPU_POST_DEAD | mod, hcpu);
++ arch_smt_update();
+ return err;
+ }
+
+@@ -467,6 +469,12 @@ int cpu_down(unsigned int cpu)
+ EXPORT_SYMBOL(cpu_down);
+ #endif /*CONFIG_HOTPLUG_CPU*/
+
++/*
++ * Architectures that need SMT-specific errata handling during SMT hotplug
++ * should override this.
++ */
++void __weak arch_smt_update(void) { }
++
+ #ifdef CONFIG_HOTPLUG_SMT
+ enum cpuhp_smt_control cpu_smt_control __read_mostly = CPU_SMT_ENABLED;
+ EXPORT_SYMBOL_GPL(cpu_smt_control);
+@@ -627,7 +635,7 @@ out_notify:
+ __cpu_notify(CPU_UP_CANCELED | mod, hcpu, nr_calls, NULL);
+ out:
+ cpu_hotplug_done();
+-
++ arch_smt_update();
+ return ret;
+ }
+
+@@ -950,12 +958,6 @@ static void cpuhp_online_cpu_device(unsi
+ kobject_uevent(&dev->kobj, KOBJ_ONLINE);
+ }
+
+-/*
+- * Architectures that need SMT-specific errata handling during SMT hotplug
+- * should override this.
+- */
+-void __weak arch_smt_update(void) { };
+-
+ static int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval)
+ {
+ int cpu, ret = 0;
diff --git a/patches.arch/x86-speculation-simplify-the-cpu-bug-detection-logic.patch b/patches.arch/x86-speculation-simplify-the-cpu-bug-detection-logic.patch
new file mode 100644
index 0000000000..2ca003db81
--- /dev/null
+++ b/patches.arch/x86-speculation-simplify-the-cpu-bug-detection-logic.patch
@@ -0,0 +1,84 @@
+From: Dominik Brodowski <linux@dominikbrodowski.net>
+Date: Tue, 22 May 2018 11:05:39 +0200
+Subject: x86/speculation: Simplify the CPU bug detection logic
+Git-commit: 8ecc4979b1bd9c94168e6fc92960033b7a951336
+Patch-mainline: v4.17-rc7
+References: bsc#1111331, CVE-2018-12126, CVE-2018-12127, CVE-2018-12130
+
+Only CPUs which speculate can speculate. Therefore, it seems prudent
+to test for cpu_no_speculation first and only then determine whether
+a specific speculating CPU is susceptible to store bypass speculation.
+This is underlined by all CPUs currently listed in cpu_no_speculation
+were present in cpu_no_spec_store_bypass as well.
+
+Signed-off-by: Dominik Brodowski <linux@dominikbrodowski.net>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: bp@suse.de
+Cc: konrad.wilk@oracle.com
+Link: https://lkml.kernel.org/r/20180522090539.GA24668@light.dominikbrodowski.net
+
+Acked-by: Borislav Petkov <bp@suse.de>
+---
+ arch/x86/kernel/cpu/common.c | 22 +++++++---------------
+ 1 file changed, 7 insertions(+), 15 deletions(-)
+
+diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
+index 78decc3e3067..38276f58d3bf 100644
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -942,12 +942,8 @@ static const __initconst struct x86_cpu_id cpu_no_meltdown[] = {
+ {}
+ };
+
++/* Only list CPUs which speculate but are non susceptible to SSB */
+ static const __initconst struct x86_cpu_id cpu_no_spec_store_bypass[] = {
+- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_PINEVIEW },
+- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_LINCROFT },
+- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_PENWELL },
+- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_CLOVERVIEW },
+- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_CEDARVIEW },
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT1 },
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_AIRMONT },
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT2 },
+@@ -955,14 +951,10 @@ static const __initconst struct x86_cpu_id cpu_no_spec_store_bypass[] = {
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_CORE_YONAH },
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_XEON_PHI_KNL },
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_XEON_PHI_KNM },
+- { X86_VENDOR_CENTAUR, 5, },
+- { X86_VENDOR_INTEL, 5, },
+- { X86_VENDOR_NSC, 5, },
+ { X86_VENDOR_AMD, 0x12, },
+ { X86_VENDOR_AMD, 0x11, },
+ { X86_VENDOR_AMD, 0x10, },
+ { X86_VENDOR_AMD, 0xf, },
+- { X86_VENDOR_ANY, 4, },
+ {}
+ };
+
+@@ -970,6 +962,12 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
+ {
+ u64 ia32_cap = 0;
+
++ if (x86_match_cpu(cpu_no_speculation))
++ return;
++
++ setup_force_cpu_bug(X86_BUG_SPECTRE_V1);
++ setup_force_cpu_bug(X86_BUG_SPECTRE_V2);
++
+ if (cpu_has(c, X86_FEATURE_ARCH_CAPABILITIES))
+ rdmsrl(MSR_IA32_ARCH_CAPABILITIES, ia32_cap);
+
+@@ -977,12 +975,6 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
+ !(ia32_cap & ARCH_CAP_SSB_NO))
+ setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS);
+
+- if (x86_match_cpu(cpu_no_speculation))
+- return;
+-
+- setup_force_cpu_bug(X86_BUG_SPECTRE_V1);
+- setup_force_cpu_bug(X86_BUG_SPECTRE_V2);
+-
+ if (x86_match_cpu(cpu_no_meltdown))
+ return;
+
+
diff --git a/patches.arch/x86-speculation-support-mitigations-cmdline-option.patch b/patches.arch/x86-speculation-support-mitigations-cmdline-option.patch
index 1f2caf41d3..3a758ff0b7 100644
--- a/patches.arch/x86-speculation-support-mitigations-cmdline-option.patch
+++ b/patches.arch/x86-speculation-support-mitigations-cmdline-option.patch
@@ -1,9 +1,8 @@
From: Josh Poimboeuf <jpoimboe@redhat.com>
Date: Fri, 12 Apr 2019 15:39:29 -0500
Subject: x86/speculation: Support 'mitigations=' cmdline option
-Git-repo: git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git
Git-commit: d68be4c4d31295ff6ae34a8ddfaa4c1a8ff42812
-Patch-mainline: Queued for v5.2
+Patch-mainline: v5.2-rc1
References: bsc#1112178
Configure x86 runtime CPU speculation bug mitigations in accordance with
diff --git a/patches.arch/x86-stop-exporting-msr-index-h-to-userland.patch b/patches.arch/x86-stop-exporting-msr-index-h-to-userland.patch
new file mode 100644
index 0000000000..c2e6a69e86
--- /dev/null
+++ b/patches.arch/x86-stop-exporting-msr-index-h-to-userland.patch
@@ -0,0 +1,35 @@
+From: Nicolas Dichtel <nicolas.dichtel@6wind.com>
+Date: Mon, 27 Mar 2017 14:20:08 +0200
+Subject: x86: stop exporting msr-index.h to userland
+Git-commit: 25dc1d6cc3082aab293e5dad47623b550f7ddd2a
+Patch-mainline: v4.12-rc1
+References: bsc#1111331, CVE-2018-12126, CVE-2018-12127, CVE-2018-12130
+
+Even if this file was not in an uapi directory, it was exported because
+it was listed in the Kbuild file.
+
+Fixes: b72e7464e4cf ("x86/uapi: Do not export <asm/msr-index.h> as part of the user API headers")
+Suggested-by: Borislav Petkov <bp@alien8.de>
+Signed-off-by: Nicolas Dichtel <nicolas.dichtel@6wind.com>
+Acked-by: Ingo Molnar <mingo@kernel.org>
+Acked-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Masahiro Yamada <yamada.masahiro@socionext.com>
+ [ needed to fix make headers_check build ]
+Acked-by: Borislav Petkov <bp@suse.de>
+---
+ arch/x86/include/uapi/asm/Kbuild | 1 -
+ 1 file changed, 1 deletion(-)
+
+diff --git a/arch/x86/include/uapi/asm/Kbuild b/arch/x86/include/uapi/asm/Kbuild
+index 3dec769cadf7..1c532b3f18ea 100644
+--- a/arch/x86/include/uapi/asm/Kbuild
++++ b/arch/x86/include/uapi/asm/Kbuild
+@@ -27,7 +27,6 @@ header-y += ldt.h
+ header-y += mce.h
+ header-y += mman.h
+ header-y += msgbuf.h
+-header-y += msr-index.h
+ header-y += msr.h
+ header-y += mtrr.h
+ header-y += param.h
+
diff --git a/patches.drivers/ibmvnic-Report-actual-backing-device-speed-and-duple.patch b/patches.drivers/ibmvnic-Report-actual-backing-device-speed-and-duple.patch
index 5ddcb5196e..ddb42f7bbf 100644
--- a/patches.drivers/ibmvnic-Report-actual-backing-device-speed-and-duple.patch
+++ b/patches.drivers/ibmvnic-Report-actual-backing-device-speed-and-duple.patch
@@ -4,8 +4,7 @@ Date: Tue, 19 Mar 2019 10:28:51 -0300
Subject: [PATCH] ibmvnic: Report actual backing device speed and duplex values
References: bsc#1129923
-Patch-mainline: queued
-Git-repo: https://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next.git
+Patch-mainline: v5.2-rc1
Git-commit: f8d6ae0d27ec1e81e4be454e63bc96086bbf8e6b
The ibmvnic driver currently reports a fixed value for both speed and
diff --git a/patches.drivers/iommu-vt-d-don-t-request-page-request-irq-under-dmar_global_lock b/patches.drivers/iommu-vt-d-don-t-request-page-request-irq-under-dmar_global_lock
new file mode 100644
index 0000000000..c6df381292
--- /dev/null
+++ b/patches.drivers/iommu-vt-d-don-t-request-page-request-irq-under-dmar_global_lock
@@ -0,0 +1,101 @@
+From: Lu Baolu <baolu.lu@linux.intel.com>
+Date: Fri, 19 Apr 2019 14:43:29 +0800
+Subject: iommu/vt-d: Don't request page request irq under dmar_global_lock
+Git-commit: a7755c3cfa5df755e39447b08c28203e011fb98c
+References: bsc#1135013
+Patch-mainline: v5.2-rc1
+
+Requesting page reqest irq under dmar_global_lock could cause
+potential lock race condition (caught by lockdep).
+
+[ 4.100055] ======================================================
+[ 4.100063] WARNING: possible circular locking dependency detected
+[ 4.100072] 5.1.0-rc4+ #2169 Not tainted
+[ 4.100078] ------------------------------------------------------
+[ 4.100086] swapper/0/1 is trying to acquire lock:
+[ 4.100094] 000000007dcbe3c3 (dmar_lock){+.+.}, at: dmar_alloc_hwirq+0x35/0x140
+[ 4.100112] but task is already holding lock:
+[ 4.100120] 0000000060bbe946 (dmar_global_lock){++++}, at: intel_iommu_init+0x191/0x1438
+[ 4.100136] which lock already depends on the new lock.
+[ 4.100146] the existing dependency chain (in reverse order) is:
+[ 4.100155]
+ -> #2 (dmar_global_lock){++++}:
+[ 4.100169] down_read+0x44/0xa0
+[ 4.100178] intel_irq_remapping_alloc+0xb2/0x7b0
+[ 4.100186] mp_irqdomain_alloc+0x9e/0x2e0
+[ 4.100195] __irq_domain_alloc_irqs+0x131/0x330
+[ 4.100203] alloc_isa_irq_from_domain.isra.4+0x9a/0xd0
+[ 4.100212] mp_map_pin_to_irq+0x244/0x310
+[ 4.100221] setup_IO_APIC+0x757/0x7ed
+[ 4.100229] x86_late_time_init+0x17/0x1c
+[ 4.100238] start_kernel+0x425/0x4e3
+[ 4.100247] secondary_startup_64+0xa4/0xb0
+[ 4.100254]
+ -> #1 (irq_domain_mutex){+.+.}:
+[ 4.100265] __mutex_lock+0x7f/0x9d0
+[ 4.100273] __irq_domain_add+0x195/0x2b0
+[ 4.100280] irq_domain_create_hierarchy+0x3d/0x40
+[ 4.100289] msi_create_irq_domain+0x32/0x110
+[ 4.100297] dmar_alloc_hwirq+0x111/0x140
+[ 4.100305] dmar_set_interrupt.part.14+0x1a/0x70
+[ 4.100314] enable_drhd_fault_handling+0x2c/0x6c
+[ 4.100323] apic_bsp_setup+0x75/0x7a
+[ 4.100330] x86_late_time_init+0x17/0x1c
+[ 4.100338] start_kernel+0x425/0x4e3
+[ 4.100346] secondary_startup_64+0xa4/0xb0
+[ 4.100352]
+ -> #0 (dmar_lock){+.+.}:
+[ 4.100364] lock_acquire+0xb4/0x1c0
+[ 4.100372] __mutex_lock+0x7f/0x9d0
+[ 4.100379] dmar_alloc_hwirq+0x35/0x140
+[ 4.100389] intel_svm_enable_prq+0x61/0x180
+[ 4.100397] intel_iommu_init+0x1128/0x1438
+[ 4.100406] pci_iommu_init+0x16/0x3f
+[ 4.100414] do_one_initcall+0x5d/0x2be
+[ 4.100422] kernel_init_freeable+0x1f0/0x27c
+[ 4.100431] kernel_init+0xa/0x110
+[ 4.100438] ret_from_fork+0x3a/0x50
+[ 4.100444]
+ other info that might help us debug this:
+
+[ 4.100454] Chain exists of:
+ dmar_lock --> irq_domain_mutex --> dmar_global_lock
+[ 4.100469] Possible unsafe locking scenario:
+
+[ 4.100476] CPU0 CPU1
+[ 4.100483] ---- ----
+[ 4.100488] lock(dmar_global_lock);
+[ 4.100495] lock(irq_domain_mutex);
+[ 4.100503] lock(dmar_global_lock);
+[ 4.100512] lock(dmar_lock);
+[ 4.100518]
+ *** DEADLOCK ***
+
+Cc: Ashok Raj <ashok.raj@intel.com>
+Cc: Jacob Pan <jacob.jun.pan@linux.intel.com>
+Cc: Kevin Tian <kevin.tian@intel.com>
+Reported-by: Dave Jiang <dave.jiang@intel.com>
+Fixes: a222a7f0bb6c9 ("iommu/vt-d: Implement page request handling")
+Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com>
+Signed-off-by: Joerg Roedel <jroedel@suse.de>
+---
+ drivers/iommu/intel-iommu.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+--- a/drivers/iommu/intel-iommu.c
++++ b/drivers/iommu/intel-iommu.c
+@@ -3430,7 +3430,13 @@ domains_done:
+
+ #ifdef CONFIG_INTEL_IOMMU_SVM
+ if (pasid_enabled(iommu) && ecap_prs(iommu->ecap)) {
++ /*
++ * Call dmar_alloc_hwirq() with dmar_global_lock held,
++ * could cause possible lock race condition.
++ */
++ up_write(&dmar_global_lock);
+ ret = intel_svm_enable_prq(iommu);
++ down_write(&dmar_global_lock);
+ if (ret)
+ goto free_iommu;
+ }
+
diff --git a/patches.drivers/iommu-vt-d-make-kernel-parameter-igfx_off-work-with-viommu b/patches.drivers/iommu-vt-d-make-kernel-parameter-igfx_off-work-with-viommu
new file mode 100644
index 0000000000..b40d16b670
--- /dev/null
+++ b/patches.drivers/iommu-vt-d-make-kernel-parameter-igfx_off-work-with-viommu
@@ -0,0 +1,42 @@
+From: Lu Baolu <baolu.lu@linux.intel.com>
+Date: Thu, 2 May 2019 09:34:26 +0800
+Subject: iommu/vt-d: Make kernel parameter igfx_off work with vIOMMU
+Git-commit: 5daab58043ee2bca861068e2595564828f3bc663
+References: bsc#1135014
+Patch-mainline: v5.2-rc1
+
+The kernel parameter igfx_off is used by users to disable
+DMA remapping for the Intel integrated graphic device. It
+was designed for bare metal cases where a dedicated IOMMU
+is used for graphic. This doesn't apply to virtual IOMMU
+case where an include-all IOMMU is used. This makes the
+kernel parameter work with virtual IOMMU as well.
+
+Cc: Ashok Raj <ashok.raj@intel.com>
+Cc: Jacob Pan <jacob.jun.pan@linux.intel.com>
+Suggested-by: Kevin Tian <kevin.tian@intel.com>
+Fixes: c0771df8d5297 ("intel-iommu: Export a flag indicating that the IOMMU is used for iGFX.")
+Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com>
+Tested-by: Zhenyu Wang <zhenyuw@linux.intel.com>
+Signed-off-by: Joerg Roedel <jroedel@suse.de>
+---
+ drivers/iommu/intel-iommu.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+--- a/drivers/iommu/intel-iommu.c
++++ b/drivers/iommu/intel-iommu.c
+@@ -3346,9 +3346,12 @@ static int __init init_dmars(void)
+ iommu_identity_mapping |= IDENTMAP_ALL;
+
+ #ifdef CONFIG_INTEL_IOMMU_BROKEN_GFX_WA
+- iommu_identity_mapping |= IDENTMAP_GFX;
++ dmar_map_gfx = 0;
+ #endif
+
++ if (!dmar_map_gfx)
++ iommu_identity_mapping |= IDENTMAP_GFX;
++
+ check_tylersburg_isoch();
+
+ if (iommu_identity_mapping) {
+
diff --git a/patches.drivers/iommu-vt-d-set-intel_iommu_gfx_mapped-correctly b/patches.drivers/iommu-vt-d-set-intel_iommu_gfx_mapped-correctly
new file mode 100644
index 0000000000..77f06087d6
--- /dev/null
+++ b/patches.drivers/iommu-vt-d-set-intel_iommu_gfx_mapped-correctly
@@ -0,0 +1,49 @@
+From: Lu Baolu <baolu.lu@linux.intel.com>
+Date: Thu, 2 May 2019 09:34:25 +0800
+Subject: iommu/vt-d: Set intel_iommu_gfx_mapped correctly
+Git-commit: cf1ec4539a50bdfe688caad4615ca47646884316
+References: bsc#1135015
+Patch-mainline: v5.2-rc1
+
+The intel_iommu_gfx_mapped flag is exported by the Intel
+IOMMU driver to indicate whether an IOMMU is used for the
+graphic device. In a virtualized IOMMU environment (e.g.
+QEMU), an include-all IOMMU is used for graphic device.
+This flag is found to be clear even the IOMMU is used.
+
+Cc: Ashok Raj <ashok.raj@intel.com>
+Cc: Jacob Pan <jacob.jun.pan@linux.intel.com>
+Cc: Kevin Tian <kevin.tian@intel.com>
+Reported-by: Zhenyu Wang <zhenyuw@linux.intel.com>
+Fixes: c0771df8d5297 ("intel-iommu: Export a flag indicating that the IOMMU is used for iGFX.")
+Suggested-by: Kevin Tian <kevin.tian@intel.com>
+Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com>
+Signed-off-by: Joerg Roedel <jroedel@suse.de>
+---
+ drivers/iommu/intel-iommu.c | 7 ++++---
+ 1 file changed, 4 insertions(+), 3 deletions(-)
+
+--- a/drivers/iommu/intel-iommu.c
++++ b/drivers/iommu/intel-iommu.c
+@@ -4126,9 +4126,7 @@ static void __init init_no_remapping_dev
+
+ /* This IOMMU has *only* gfx devices. Either bypass it or
+ set the gfx_mapped flag, as appropriate */
+- if (dmar_map_gfx) {
+- intel_iommu_gfx_mapped = 1;
+- } else {
++ if (!dmar_map_gfx) {
+ drhd->ignored = 1;
+ for_each_active_dev_scope(drhd->devices,
+ drhd->devices_cnt, i, dev)
+@@ -4920,6 +4918,9 @@ int __init intel_iommu_init(void)
+ goto out_free_reserved_range;
+ }
+
++ if (dmar_map_gfx)
++ intel_iommu_gfx_mapped = 1;
++
+ init_no_remapping_devices();
+
+ ret = init_dmars();
+
diff --git a/patches.drivers/net-ibmvnic-Update-MAC-address-settings-after-adapte.patch b/patches.drivers/net-ibmvnic-Update-MAC-address-settings-after-adapte.patch
new file mode 100644
index 0000000000..f91883fe2c
--- /dev/null
+++ b/patches.drivers/net-ibmvnic-Update-MAC-address-settings-after-adapte.patch
@@ -0,0 +1,160 @@
+From 62740e97881c78b45a117a358a866fb32975def6 Mon Sep 17 00:00:00 2001
+From: Thomas Falcon <tlfalcon@linux.ibm.com>
+Date: Thu, 9 May 2019 23:13:43 -0500
+Subject: [PATCH] net/ibmvnic: Update MAC address settings after adapter reset
+
+References: bsc#1134760
+Patch-mainline: v5.2-rc1
+Git-commit: 62740e97881c78b45a117a358a866fb32975def6
+
+It was discovered in testing that the underlying hardware MAC
+address will revert to initial settings following a device reset,
+but the driver fails to resend the current OS MAC settings. This
+oversight can result in dropped packets should the scenario occur.
+Fix this by informing hardware of current MAC address settings
+following any adapter initialization or resets.
+
+Signed-off-by: Thomas Falcon <tlfalcon@linux.ibm.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Michal Suchanek <msuchanek@suse.de>
+---
+ drivers/net/ethernet/ibm/ibmvnic.c | 53 ++++++++++++++++--------------
+ drivers/net/ethernet/ibm/ibmvnic.h | 2 --
+ 2 files changed, 28 insertions(+), 27 deletions(-)
+
+diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
+index b398d6c94dbd..2be3bcd0192f 100644
+--- a/drivers/net/ethernet/ibm/ibmvnic.c
++++ b/drivers/net/ethernet/ibm/ibmvnic.c
+@@ -118,7 +118,7 @@ static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter);
+ static int ibmvnic_init(struct ibmvnic_adapter *);
+ static int ibmvnic_reset_init(struct ibmvnic_adapter *);
+ static void release_crq_queue(struct ibmvnic_adapter *);
+-static int __ibmvnic_set_mac(struct net_device *netdev, struct sockaddr *p);
++static int __ibmvnic_set_mac(struct net_device *, u8 *);
+ static int init_crq_queue(struct ibmvnic_adapter *adapter);
+ static int send_query_phys_parms(struct ibmvnic_adapter *adapter);
+
+@@ -849,11 +849,7 @@ static int ibmvnic_login(struct net_device *netdev)
+ }
+ } while (retry);
+
+- /* handle pending MAC address changes after successful login */
+- if (adapter->mac_change_pending) {
+- __ibmvnic_set_mac(netdev, &adapter->desired.mac);
+- adapter->mac_change_pending = false;
+- }
++ __ibmvnic_set_mac(netdev, adapter->mac_addr);
+
+ return 0;
+ }
+@@ -1686,28 +1682,40 @@ static void ibmvnic_set_multi(struct net_device *netdev)
+ }
+ }
+
+-static int __ibmvnic_set_mac(struct net_device *netdev, struct sockaddr *p)
++static int __ibmvnic_set_mac(struct net_device *netdev, u8 *dev_addr)
+ {
+ struct ibmvnic_adapter *adapter = netdev_priv(netdev);
+- struct sockaddr *addr = p;
+ union ibmvnic_crq crq;
+ int rc;
+
+- if (!is_valid_ether_addr(addr->sa_data))
+- return -EADDRNOTAVAIL;
++ if (!is_valid_ether_addr(dev_addr)) {
++ rc = -EADDRNOTAVAIL;
++ goto err;
++ }
+
+ memset(&crq, 0, sizeof(crq));
+ crq.change_mac_addr.first = IBMVNIC_CRQ_CMD;
+ crq.change_mac_addr.cmd = CHANGE_MAC_ADDR;
+- ether_addr_copy(&crq.change_mac_addr.mac_addr[0], addr->sa_data);
++ ether_addr_copy(&crq.change_mac_addr.mac_addr[0], dev_addr);
+
+ init_completion(&adapter->fw_done);
+ rc = ibmvnic_send_crq(adapter, &crq);
+- if (rc)
+- return rc;
++ if (rc) {
++ rc = -EIO;
++ goto err;
++ }
++
+ wait_for_completion(&adapter->fw_done);
+ /* netdev->dev_addr is changed in handle_change_mac_rsp function */
+- return adapter->fw_done_rc ? -EIO : 0;
++ if (adapter->fw_done_rc) {
++ rc = -EIO;
++ goto err;
++ }
++
++ return 0;
++err:
++ ether_addr_copy(adapter->mac_addr, netdev->dev_addr);
++ return rc;
+ }
+
+ static int ibmvnic_set_mac(struct net_device *netdev, void *p)
+@@ -1716,13 +1724,10 @@ static int ibmvnic_set_mac(struct net_device *netdev, void *p)
+ struct sockaddr *addr = p;
+ int rc;
+
+- if (adapter->state == VNIC_PROBED) {
+- memcpy(&adapter->desired.mac, addr, sizeof(struct sockaddr));
+- adapter->mac_change_pending = true;
+- return 0;
+- }
+-
+- rc = __ibmvnic_set_mac(netdev, addr);
++ rc = 0;
++ ether_addr_copy(adapter->mac_addr, addr->sa_data);
++ if (adapter->state != VNIC_PROBED)
++ rc = __ibmvnic_set_mac(netdev, addr->sa_data);
+
+ return rc;
+ }
+@@ -3937,8 +3942,8 @@ static int handle_change_mac_rsp(union ibmvnic_crq *crq,
+ dev_err(dev, "Error %ld in CHANGE_MAC_ADDR_RSP\n", rc);
+ goto out;
+ }
+- memcpy(netdev->dev_addr, &crq->change_mac_addr_rsp.mac_addr[0],
+- ETH_ALEN);
++ ether_addr_copy(netdev->dev_addr,
++ &crq->change_mac_addr_rsp.mac_addr[0]);
+ out:
+ complete(&adapter->fw_done);
+ return rc;
+@@ -4852,8 +4857,6 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
+ init_completion(&adapter->init_done);
+ adapter->resetting = false;
+
+- adapter->mac_change_pending = false;
+-
+ do {
+ rc = init_crq_queue(adapter);
+ if (rc) {
+diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h
+index cffdac372a33..dcf2eb6d9290 100644
+--- a/drivers/net/ethernet/ibm/ibmvnic.h
++++ b/drivers/net/ethernet/ibm/ibmvnic.h
+@@ -969,7 +969,6 @@ struct ibmvnic_tunables {
+ u64 rx_entries;
+ u64 tx_entries;
+ u64 mtu;
+- struct sockaddr mac;
+ };
+
+ struct ibmvnic_adapter {
+@@ -1091,7 +1090,6 @@ struct ibmvnic_adapter {
+ bool resetting;
+ bool napi_enabled, from_passive_init;
+
+- bool mac_change_pending;
+ bool failover_pending;
+ bool force_reset_recovery;
+
+--
+2.20.1
+
diff --git a/patches.drivers/net-ibmvnic-Update-carrier-state-after-link-state-ch.patch b/patches.drivers/net-ibmvnic-Update-carrier-state-after-link-state-ch.patch
new file mode 100644
index 0000000000..5acb3e95be
--- /dev/null
+++ b/patches.drivers/net-ibmvnic-Update-carrier-state-after-link-state-ch.patch
@@ -0,0 +1,66 @@
+From 0655f9943df2f2d71f406fd77b51d05548134fc2 Mon Sep 17 00:00:00 2001
+From: Thomas Falcon <tlfalcon@linux.ibm.com>
+Date: Thu, 9 May 2019 23:13:44 -0500
+Subject: [PATCH] net/ibmvnic: Update carrier state after link state change
+
+References: bsc#1135100
+Patch-mainline: v5.2-rc1
+Git-commit: 0655f9943df2f2d71f406fd77b51d05548134fc2
+
+Only set the device carrier state to on after receiving an up link
+state indication from the underlying adapter. Likewise, if a down
+link indication is receieved, update the carrier state accordingly.
+This fix ensures that accurate carrier state is reported by the driver
+following a link state update by the underlying adapter.
+
+Signed-off-by: Thomas Falcon <tlfalcon@linux.ibm.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Michal Suchanek <msuchanek@suse.de>
+---
+ drivers/net/ethernet/ibm/ibmvnic.c | 9 ++++-----
+ 1 file changed, 4 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
+index 2be3bcd0192f..3dcd9c3d8781 100644
+--- a/drivers/net/ethernet/ibm/ibmvnic.c
++++ b/drivers/net/ethernet/ibm/ibmvnic.c
+@@ -1111,7 +1111,6 @@ static int ibmvnic_open(struct net_device *netdev)
+ }
+
+ rc = __ibmvnic_open(netdev);
+- netif_carrier_on(netdev);
+
+ return rc;
+ }
+@@ -1864,8 +1863,6 @@ static int do_reset(struct ibmvnic_adapter *adapter,
+ adapter->reset_reason != VNIC_RESET_CHANGE_PARAM)
+ call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, netdev);
+
+- netif_carrier_on(netdev);
+-
+ return 0;
+ }
+
+@@ -1935,8 +1932,6 @@ static int do_hard_reset(struct ibmvnic_adapter *adapter,
+ return 0;
+ }
+
+- netif_carrier_on(netdev);
+-
+ return 0;
+ }
+
+@@ -4480,6 +4475,10 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
+ crq->link_state_indication.phys_link_state;
+ adapter->logical_link_state =
+ crq->link_state_indication.logical_link_state;
++ if (adapter->phys_link_state && adapter->logical_link_state)
++ netif_carrier_on(netdev);
++ else
++ netif_carrier_off(netdev);
+ break;
+ case CHANGE_MAC_ADDR_RSP:
+ netdev_dbg(netdev, "Got MAC address change Response\n");
+--
+2.20.1
+
diff --git a/patches.fixes/0001-PCI-xilinx-nwl-Add-missing-of_node_put.patch b/patches.fixes/0001-PCI-xilinx-nwl-Add-missing-of_node_put.patch
new file mode 100644
index 0000000000..ca7a4bbf1f
--- /dev/null
+++ b/patches.fixes/0001-PCI-xilinx-nwl-Add-missing-of_node_put.patch
@@ -0,0 +1,33 @@
+From 342639d996f18bc0a4db2f42a84230c0a966dc94 Mon Sep 17 00:00:00 2001
+From: Nicholas Mc Guire <hofrat@osadl.org>
+Date: Fri, 29 Jun 2018 13:50:10 -0500
+Subject: [PATCH] PCI: xilinx-nwl: Add missing of_node_put()
+Git-commit: 342639d996f18bc0a4db2f42a84230c0a966dc94
+Patch-mainline: v4.18
+References: bsc#1100132
+
+The call to of_get_next_child() returns a node pointer with
+refcount incremented thus it must be explicitly decremented
+here after the last usage.
+
+Fixes: ab597d35ef11 ("PCI: xilinx-nwl: Add support for Xilinx NWL PCIe Host Controller")
+Signed-off-by: Nicholas Mc Guire <hofrat@osadl.org>
+[lorenzo.pieralisi@arm.com: updated commit log]
+Signed-off-by: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
+Signed-off-by: Bjorn Helgaas <bhelgaas@google.com>
+Signed-off-by: Oliver Neukum <oneukum@suse.com>
+---
+ drivers/pci/host/pcie-xilinx-nwl.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/pci/host/pcie-xilinx-nwl.c
++++ b/drivers/pci/host/pcie-xilinx-nwl.c
+@@ -532,7 +532,7 @@ static int nwl_pcie_init_irq_domain(stru
+ INTX_NUM,
+ &legacy_domain_ops,
+ pcie);
+-
++ of_node_put(legacy_intc_node);
+ if (!pcie->legacy_irq_domain) {
+ dev_err(pcie->dev, "failed to create IRQ domain\n");
+ return -ENOMEM;
diff --git a/patches.fixes/0001-USB-Add-new-USB-LPM-helpers.patch b/patches.fixes/0001-USB-Add-new-USB-LPM-helpers.patch
new file mode 100644
index 0000000000..8edf54d441
--- /dev/null
+++ b/patches.fixes/0001-USB-Add-new-USB-LPM-helpers.patch
@@ -0,0 +1,152 @@
+From 7529b2574a7aaf902f1f8159fbc2a7caa74be559 Mon Sep 17 00:00:00 2001
+From: Kai-Heng Feng <kai.heng.feng@canonical.com>
+Date: Sat, 12 Jan 2019 03:54:24 +0800
+Subject: [PATCH] USB: Add new USB LPM helpers
+Git-commit: 7529b2574a7aaf902f1f8159fbc2a7caa74be559
+Patch-mainline: v5.1-rc1
+References: bsc#1129770
+
+Use new helpers to make LPM enabling/disabling more clear.
+
+This is a preparation to subsequent patch.
+
+Signed-off-by: Kai-Heng Feng <kai.heng.feng@canonical.com>
+Cc: stable <stable@vger.kernel.org> # after much soaking
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Oliver Neukum <oneukum@suse.com>
+---
+ drivers/usb/core/driver.c | 10 ++++++++++
+ drivers/usb/core/hub.c | 12 ++++++------
+ drivers/usb/core/message.c | 2 +-
+ drivers/usb/core/sysfs.c | 5 ++++-
+ drivers/usb/core/usb.h | 12 ++++++++++++
+ 5 files changed, 33 insertions(+), 8 deletions(-)
+
+--- a/drivers/usb/core/driver.c
++++ b/drivers/usb/core/driver.c
+@@ -1905,6 +1905,16 @@ int usb_set_usb2_hardware_lpm(struct usb
+ return ret;
+ }
+
++int usb_enable_usb2_hardware_lpm(struct usb_device *udev)
++{
++ return usb_set_usb2_hardware_lpm(udev, 1);
++}
++
++int usb_disable_usb2_hardware_lpm(struct usb_device *udev)
++{
++ return usb_set_usb2_hardware_lpm(udev, 0);
++}
++
+ #endif /* CONFIG_PM */
+
+ struct bus_type usb_bus_type = {
+--- a/drivers/usb/core/hub.c
++++ b/drivers/usb/core/hub.c
+@@ -3171,7 +3171,7 @@ int usb_port_suspend(struct usb_device *
+
+ /* disable USB2 hardware LPM */
+ if (udev->usb2_hw_lpm_enabled == 1)
+- usb_set_usb2_hardware_lpm(udev, 0);
++ usb_disable_usb2_hardware_lpm(udev);
+
+ if (usb_disable_ltm(udev)) {
+ dev_err(&udev->dev, "Failed to disable LTM before suspend\n.");
+@@ -3218,7 +3218,7 @@ int usb_port_suspend(struct usb_device *
+ err_ltm:
+ /* Try to enable USB2 hardware LPM again */
+ if (udev->usb2_hw_lpm_capable == 1)
+- usb_set_usb2_hardware_lpm(udev, 1);
++ usb_enable_usb2_hardware_lpm(udev);
+
+ if (udev->do_remote_wakeup)
+ (void) usb_disable_remote_wakeup(udev);
+@@ -3498,7 +3498,7 @@ int usb_port_resume(struct usb_device *u
+ } else {
+ /* Try to enable USB2 hardware LPM */
+ if (udev->usb2_hw_lpm_capable == 1)
+- usb_set_usb2_hardware_lpm(udev, 1);
++ usb_enable_usb2_hardware_lpm(udev);
+
+ /* Try to enable USB3 LTM and LPM */
+ usb_enable_ltm(udev);
+@@ -4335,7 +4335,7 @@ static void hub_set_initial_usb2_lpm_pol
+ if ((udev->bos->ext_cap->bmAttributes & cpu_to_le32(USB_BESL_SUPPORT)) ||
+ connect_type == USB_PORT_CONNECT_TYPE_HARD_WIRED) {
+ udev->usb2_hw_lpm_allowed = 1;
+- usb_set_usb2_hardware_lpm(udev, 1);
++ usb_enable_usb2_hardware_lpm(udev);
+ }
+ }
+
+@@ -5481,7 +5481,7 @@ static int usb_reset_and_verify_device(s
+ * It will be re-enabled by the enumeration process.
+ */
+ if (udev->usb2_hw_lpm_enabled == 1)
+- usb_set_usb2_hardware_lpm(udev, 0);
++ usb_disable_usb2_hardware_lpm(udev);
+
+ /* Disable LPM and LTM while we reset the device and reinstall the alt
+ * settings. Device-initiated LPM settings, and system exit latency
+@@ -5591,7 +5591,7 @@ static int usb_reset_and_verify_device(s
+
+ done:
+ /* Now that the alt settings are re-installed, enable LTM and LPM. */
+- usb_set_usb2_hardware_lpm(udev, 1);
++ usb_enable_usb2_hardware_lpm(udev);
+ usb_unlocked_enable_lpm(udev);
+ usb_enable_ltm(udev);
+ usb_release_bos_descriptor(udev);
+--- a/drivers/usb/core/message.c
++++ b/drivers/usb/core/message.c
+@@ -1185,7 +1185,7 @@ void usb_disable_device(struct usb_devic
+ }
+
+ if (dev->usb2_hw_lpm_enabled == 1)
+- usb_set_usb2_hardware_lpm(dev, 0);
++ usb_disable_usb2_hardware_lpm(dev);
+ usb_unlocked_disable_lpm(dev);
+ usb_disable_ltm(dev);
+
+--- a/drivers/usb/core/sysfs.c
++++ b/drivers/usb/core/sysfs.c
+@@ -494,7 +494,10 @@ static ssize_t usb2_hardware_lpm_store(s
+
+ if (!ret) {
+ udev->usb2_hw_lpm_allowed = value;
+- ret = usb_set_usb2_hardware_lpm(udev, value);
++ if (value)
++ ret = usb_enable_usb2_hardware_lpm(udev);
++ else
++ ret = usb_disable_usb2_hardware_lpm(udev);
+ }
+
+ usb_unlock_device(udev);
+--- a/drivers/usb/core/usb.h
++++ b/drivers/usb/core/usb.h
+@@ -85,6 +85,8 @@ extern int usb_runtime_suspend(struct de
+ extern int usb_runtime_resume(struct device *dev);
+ extern int usb_runtime_idle(struct device *dev);
+ extern int usb_set_usb2_hardware_lpm(struct usb_device *udev, int enable);
++extern int usb_enable_usb2_hardware_lpm(struct usb_device *udev);
++extern int usb_disable_usb2_hardware_lpm(struct usb_device *udev);
+
+ #else
+
+@@ -108,6 +110,16 @@ static inline int usb_set_usb2_hardware_
+ {
+ return 0;
+ }
++
++static inline int usb_enable_usb2_hardware_lpm(struct usb_device *udev)
++{
++ return 0;
++}
++
++static inline int usb_disable_usb2_hardware_lpm(struct usb_device *udev)
++{
++ return 0;
++}
+
+ #endif
+
diff --git a/patches.fixes/0001-USB-Consolidate-LPM-checks-to-avoid-enabling-LPM-twi.patch b/patches.fixes/0001-USB-Consolidate-LPM-checks-to-avoid-enabling-LPM-twi.patch
new file mode 100644
index 0000000000..af4f406abf
--- /dev/null
+++ b/patches.fixes/0001-USB-Consolidate-LPM-checks-to-avoid-enabling-LPM-twi.patch
@@ -0,0 +1,123 @@
+From d7a6c0ce8d26412903c7981503bad9e1cc7c45d2 Mon Sep 17 00:00:00 2001
+From: Kai-Heng Feng <kai.heng.feng@canonical.com>
+Date: Sat, 12 Jan 2019 03:54:25 +0800
+Subject: [PATCH] USB: Consolidate LPM checks to avoid enabling LPM twice
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+Git-commit: d7a6c0ce8d26412903c7981503bad9e1cc7c45d2
+Patch-mainline: v5.1-rc1
+References: bsc#1129770
+
+USB Bluetooth controller QCA ROME (0cf3:e007) sometimes stops working
+after S3:
+[ 165.110742] Bluetooth: hci0: using NVM file: qca/nvm_usb_00000302.bin
+[ 168.432065] Bluetooth: hci0: Failed to send body at 4 of 1953 (-110)
+
+After some experiments, I found that disabling LPM can workaround the
+issue.
+
+On some platforms, the USB power is cut during S3, so the driver uses
+reset-resume to resume the device. During port resume, LPM gets enabled
+twice, by usb_reset_and_verify_device() and usb_port_resume().
+
+Consolidate all checks into new LPM helpers to make sure LPM only gets
+enabled once.
+
+Fixes: de68bab4fa96 ("usb: Don't enable USB 2.0 Link PM by default.”)
+Signed-off-by: Kai-Heng Feng <kai.heng.feng@canonical.com>
+Cc: stable <stable@vger.kernel.org> # after much soaking
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Oliver Neukum <oneukum@suse.com>
+---
+ drivers/usb/core/driver.c | 11 ++++++++---
+ drivers/usb/core/hub.c | 12 ++++--------
+ drivers/usb/core/message.c | 3 +--
+ 3 files changed, 13 insertions(+), 13 deletions(-)
+
+--- a/drivers/usb/core/driver.c
++++ b/drivers/usb/core/driver.c
+@@ -1893,9 +1893,6 @@ int usb_set_usb2_hardware_lpm(struct usb
+ struct usb_hcd *hcd = bus_to_hcd(udev->bus);
+ int ret = -EPERM;
+
+- if (enable && !udev->usb2_hw_lpm_allowed)
+- return 0;
+-
+ if (hcd->driver->set_usb2_hw_lpm) {
+ ret = hcd->driver->set_usb2_hw_lpm(hcd, udev, enable);
+ if (!ret)
+@@ -1907,11 +1904,19 @@ int usb_set_usb2_hardware_lpm(struct usb
+
+ int usb_enable_usb2_hardware_lpm(struct usb_device *udev)
+ {
++ if (!udev->usb2_hw_lpm_capable ||
++ !udev->usb2_hw_lpm_allowed ||
++ udev->usb2_hw_lpm_enabled)
++ return 0;
++
+ return usb_set_usb2_hardware_lpm(udev, 1);
+ }
+
+ int usb_disable_usb2_hardware_lpm(struct usb_device *udev)
+ {
++ if (!udev->usb2_hw_lpm_enabled)
++ return 0;
++
+ return usb_set_usb2_hardware_lpm(udev, 0);
+ }
+
+--- a/drivers/usb/core/hub.c
++++ b/drivers/usb/core/hub.c
+@@ -3170,8 +3170,7 @@ int usb_port_suspend(struct usb_device *
+ }
+
+ /* disable USB2 hardware LPM */
+- if (udev->usb2_hw_lpm_enabled == 1)
+- usb_disable_usb2_hardware_lpm(udev);
++ usb_disable_usb2_hardware_lpm(udev);
+
+ if (usb_disable_ltm(udev)) {
+ dev_err(&udev->dev, "Failed to disable LTM before suspend\n.");
+@@ -3217,8 +3216,7 @@ int usb_port_suspend(struct usb_device *
+ usb_enable_ltm(udev);
+ err_ltm:
+ /* Try to enable USB2 hardware LPM again */
+- if (udev->usb2_hw_lpm_capable == 1)
+- usb_enable_usb2_hardware_lpm(udev);
++ usb_enable_usb2_hardware_lpm(udev);
+
+ if (udev->do_remote_wakeup)
+ (void) usb_disable_remote_wakeup(udev);
+@@ -3497,8 +3495,7 @@ int usb_port_resume(struct usb_device *u
+ hub_port_logical_disconnect(hub, port1);
+ } else {
+ /* Try to enable USB2 hardware LPM */
+- if (udev->usb2_hw_lpm_capable == 1)
+- usb_enable_usb2_hardware_lpm(udev);
++ usb_enable_usb2_hardware_lpm(udev);
+
+ /* Try to enable USB3 LTM and LPM */
+ usb_enable_ltm(udev);
+@@ -5480,8 +5477,7 @@ static int usb_reset_and_verify_device(s
+ /* Disable USB2 hardware LPM.
+ * It will be re-enabled by the enumeration process.
+ */
+- if (udev->usb2_hw_lpm_enabled == 1)
+- usb_disable_usb2_hardware_lpm(udev);
++ usb_disable_usb2_hardware_lpm(udev);
+
+ /* Disable LPM and LTM while we reset the device and reinstall the alt
+ * settings. Device-initiated LPM settings, and system exit latency
+--- a/drivers/usb/core/message.c
++++ b/drivers/usb/core/message.c
+@@ -1184,8 +1184,7 @@ void usb_disable_device(struct usb_devic
+ dev->actconfig->interface[i] = NULL;
+ }
+
+- if (dev->usb2_hw_lpm_enabled == 1)
+- usb_disable_usb2_hardware_lpm(dev);
++ usb_disable_usb2_hardware_lpm(dev);
+ usb_unlocked_disable_lpm(dev);
+ usb_disable_ltm(dev);
+
diff --git a/patches.fixes/0001-drm-i915-Fix-I915_EXEC_RING_MASK.patch b/patches.fixes/0001-drm-i915-Fix-I915_EXEC_RING_MASK.patch
new file mode 100644
index 0000000000..e214b63e20
--- /dev/null
+++ b/patches.fixes/0001-drm-i915-Fix-I915_EXEC_RING_MASK.patch
@@ -0,0 +1,36 @@
+From d90c06d57027203f73021bb7ddb30b800d65c636 Mon Sep 17 00:00:00 2001
+From: Chris Wilson <chris@chris-wilson.co.uk>
+Date: Fri, 1 Mar 2019 14:03:47 +0000
+Subject: drm/i915: Fix I915_EXEC_RING_MASK
+Git-commit: d90c06d57027203f73021bb7ddb30b800d65c636
+Patch-mainline: v5.2-rc1
+References: bnc#1106929
+
+This was supposed to be a mask of all known rings, but it is being used
+by execbuffer to filter out invalid rings, and so is instead mapping high
+unused values onto valid rings. Instead of a mask of all known rings,
+we need it to be the mask of all possible rings.
+
+Fixes: 549f7365820a ("drm/i915: Enable SandyBridge blitter ring")
+Fixes: de1add360522 ("drm/i915: Decouple execbuf uAPI from internal implementation")
+Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
+Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
+Cc: <stable@vger.kernel.org> # v4.6+
+Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20190301140404.26690-21-chris@chris-wilson.co.uk
+Acked-by: Thomas Zimmermann <tzimmermann@suse.de>
+---
+ include/uapi/drm/i915_drm.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/include/uapi/drm/i915_drm.h
++++ b/include/uapi/drm/i915_drm.h
+@@ -714,7 +714,7 @@ struct drm_i915_gem_execbuffer2 {
+ __u32 num_cliprects;
+ /** This is a struct drm_clip_rect *cliprects */
+ __u64 cliprects_ptr;
+-#define I915_EXEC_RING_MASK (7<<0)
++#define I915_EXEC_RING_MASK (0x3f)
+ #define I915_EXEC_DEFAULT (0<<0)
+ #define I915_EXEC_RENDER (1<<0)
+ #define I915_EXEC_BSD (2<<0)
diff --git a/patches.fixes/0001-media-vb2-don-t-call-__vb2_queue_cancel-if-vb2_start.patch b/patches.fixes/0001-media-vb2-don-t-call-__vb2_queue_cancel-if-vb2_start.patch
new file mode 100644
index 0000000000..130151a650
--- /dev/null
+++ b/patches.fixes/0001-media-vb2-don-t-call-__vb2_queue_cancel-if-vb2_start.patch
@@ -0,0 +1,42 @@
+From 04990215dec43c424daff00d1f622167b8aafd1f Mon Sep 17 00:00:00 2001
+From: Hans Verkuil <hverkuil-cisco@xs4all.nl>
+Date: Wed, 28 Nov 2018 03:37:43 -0500
+Subject: [PATCH] media: vb2: don't call __vb2_queue_cancel if
+ vb2_start_streaming failed
+Git-commit: 04990215dec43c424daff00d1f622167b8aafd1f
+Patch-mainline: v5.0
+References: bsc#1120902
+
+vb2_start_streaming() already rolls back the buffers, so there is no
+need to call __vb2_queue_cancel(). Especially since __vb2_queue_cancel()
+does too much, such as zeroing the q->queued_count value, causing vb2
+to think that no buffers have been queued.
+
+It appears that this call to __vb2_queue_cancel() is a left-over from
+before commit b3379c6201bb3.
+
+Fixes: b3379c6201bb3 ('vb2: only call start_streaming if sufficient buffers are queued')
+
+Signed-off-by: Hans Verkuil <hverkuil-cisco@xs4all.nl>
+Cc: <stable@vger.kernel.org> # for v4.16 and up
+Acked-by: Sakari Ailus <sakari.ailus@linux.intel.com>
+Signed-off-by: Mauro Carvalho Chehab <mchehab+samsung@kernel.org>
+Signed-off-by: Oliver Neukum <oneukum@suse.com>
+---
+ drivers/media/v4l2-core/videobuf2-core.c | 4 +---
+ 1 file changed, 1 insertion(+), 3 deletions(-)
+
+--- a/drivers/media/v4l2-core/videobuf2-core.c
++++ b/drivers/media/v4l2-core/videobuf2-core.c
+@@ -1754,10 +1754,8 @@ int vb2_core_streamon(struct vb2_queue *
+ */
+ if (q->queued_count >= q->min_buffers_needed) {
+ ret = vb2_start_streaming(q);
+- if (ret) {
+- __vb2_queue_cancel(q);
++ if (ret)
+ return ret;
+- }
+ }
+
+ q->streaming = 1;
diff --git a/patches.fixes/0002-drm-fb-helper-dpms_legacy-Only-set-on-connectors-in-.patch b/patches.fixes/0002-drm-fb-helper-dpms_legacy-Only-set-on-connectors-in-.patch
new file mode 100644
index 0000000000..3d3aa9c0ff
--- /dev/null
+++ b/patches.fixes/0002-drm-fb-helper-dpms_legacy-Only-set-on-connectors-in-.patch
@@ -0,0 +1,54 @@
+From 65a102f68005891d7f39354cfd79099908df6d51 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Noralf=20Tr=C3=B8nnes?= <noralf@tronnes.org>
+Date: Tue, 26 Mar 2019 18:55:32 +0100
+Subject: drm/fb-helper: dpms_legacy(): Only set on connectors in use
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+Git-commit: 65a102f68005891d7f39354cfd79099908df6d51
+Patch-mainline: v5.2-rc1
+References: bnc#1106929
+
+For each enabled crtc the functions sets dpms on all registered connectors.
+Limit this to only doing it once and on the connectors actually in use.
+
+Signed-off-by: Noralf Trønnes <noralf@tronnes.org>
+Fixes: 023eb571a1d0 ("drm: correctly update connector DPMS status in drm_fb_helper")
+Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>
+Link: https://patchwork.freedesktop.org/patch/msgid/20190326175546.18126-3-noralf@tronnes.org
+Acked-by: Thomas Zimmermann <tzimmermann@suse.de>
+---
+ drivers/gpu/drm/drm_fb_helper.c | 11 +++++------
+ 1 file changed, 5 insertions(+), 6 deletions(-)
+
+--- a/drivers/gpu/drm/drm_fb_helper.c
++++ b/drivers/gpu/drm/drm_fb_helper.c
+@@ -560,8 +560,8 @@ static void drm_fb_helper_dpms(struct fb
+ {
+ struct drm_fb_helper *fb_helper = info->par;
+ struct drm_device *dev = fb_helper->dev;
+- struct drm_crtc *crtc;
+ struct drm_connector *connector;
++ struct drm_mode_set *modeset;
+ int i, j;
+
+ /*
+@@ -574,14 +574,13 @@ static void drm_fb_helper_dpms(struct fb
+ }
+
+ for (i = 0; i < fb_helper->crtc_count; i++) {
+- crtc = fb_helper->crtc_info[i].mode_set.crtc;
++ modeset = &fb_helper->crtc_info[i].mode_set;
+
+- if (!crtc->enabled)
++ if (!modeset->crtc->enabled)
+ continue;
+
+- /* Walk the connectors & encoders on this fb turning them on/off */
+- for (j = 0; j < fb_helper->connector_count; j++) {
+- connector = fb_helper->connector_info[j]->connector;
++ for (j = 0; j < modeset->num_connectors; j++) {
++ connector = modeset->connectors[j];
+ connector->funcs->dpms(connector, dpms_mode);
+ drm_object_property_set_value(&connector->base,
+ dev->mode_config.dpms_property, dpms_mode);
diff --git a/patches.fixes/nvme-fc-resolve-io-failures-during-connect.patch b/patches.fixes/nvme-fc-resolve-io-failures-during-connect.patch
new file mode 100644
index 0000000000..f9da1df80e
--- /dev/null
+++ b/patches.fixes/nvme-fc-resolve-io-failures-during-connect.patch
@@ -0,0 +1,192 @@
+From: James Smart <jsmart2021@gmail.com>
+Date: Wed, 14 Nov 2018 16:35:10 -0800
+Subject: [PATCH] nvme-fc: resolve io failures during connect
+Git-commit: 4cff280a5fccf6513ed9e895bb3a4e7ad8b0cedc
+Patch-mainline: v4.20-rc4
+References: bsc#1116803
+
+If an io error occurs on an io issued while connecting, recovery
+of the io falls flat as the state checking ends up nooping the error
+handler.
+
+Create an err_work work item that is scheduled upon an io error while
+connecting. The work thread terminates all io on all queues and marks
+the queues as not connected. The termination of the io will return
+back to the callee, which will then back out of the connection attempt
+and will reschedule, if possible, the connection attempt.
+
+The changes:
+- in case there are several commands hitting the error handler, a
+ state flag is kept so that the error work is only scheduled once,
+ on the first error. The subsequent errors can be ignored.
+- The calling sequence to stop keep alive and terminate the queues
+ and their io is lifted from the reset routine. Made a small
+ service routine used by both reset and err_work.
+- During debugging, found that the teardown path can reference
+ an uninitialized pointer, resulting in a NULL pointer oops.
+ The aen_ops weren't initialized yet. Add validation on their
+ initialization before calling the teardown routine.
+
+[hare: ported to SLE12 SP3]
+
+Signed-off-by: James Smart <jsmart2021@gmail.com>
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Acked-by: Hannes Reinecke <hare@suse.com>
+---
+ drivers/nvme/host/fc.c | 68 ++++++++++++++++++++++++++++++++++++++++++++------
+ 1 file changed, 61 insertions(+), 7 deletions(-)
+
+diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
+index e43a270122a4..8e4e71b2026f 100644
+--- a/drivers/nvme/host/fc.c
++++ b/drivers/nvme/host/fc.c
+@@ -157,6 +157,7 @@ struct nvme_fc_ctrl {
+
+ bool ioq_live;
+ bool assoc_active;
++ atomic_t err_work_active;
+ u64 association_id;
+
+ u64 cap;
+@@ -169,6 +170,7 @@ struct nvme_fc_ctrl {
+ struct work_struct delete_work;
+ struct work_struct reset_work;
+ struct delayed_work connect_work;
++ struct work_struct err_work;
+
+ struct kref ref;
+ u32 flags;
+@@ -1547,6 +1549,10 @@ nvme_fc_abort_aen_ops(struct nvme_fc_ctrl *ctrl)
+ struct nvme_fc_fcp_op *aen_op = ctrl->aen_ops;
+ int i;
+
++ /* ensure we've initialized the ops once */
++ if (!(aen_op->flags & FCOP_FLAGS_AEN))
++ return;
++
+ for (i = 0; i < NVME_FC_NR_AEN_COMMANDS; i++, aen_op++)
+ __nvme_fc_abort_op(ctrl, aen_op);
+ }
+@@ -2070,7 +2076,25 @@ nvme_fc_nvme_ctrl_freed(struct nvme_ctrl *nctrl)
+ static void
+ nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg)
+ {
+- /* only proceed if in LIVE state - e.g. on first error */
++ int active;
++
++ /*
++ * if an error (io timeout, etc) while (re)connecting,
++ * it's an error on creating the new association.
++ * Start the error recovery thread if it hasn't already
++ * been started. It is expected there could be multiple
++ * ios hitting this path before things are cleaned up.
++ */
++ if (ctrl->ctrl.state == NVME_CTRL_RECONNECTING) {
++ active = atomic_xchg(&ctrl->err_work_active, 1);
++ if (!active && !schedule_work(&ctrl->err_work)) {
++ atomic_set(&ctrl->err_work_active, 0);
++ WARN_ON(1);
++ }
++ return;
++ }
++
++ /* Otherwise, only proceed if in LIVE state - e.g. on first error */
+ if (ctrl->ctrl.state != NVME_CTRL_LIVE)
+ return;
+
+@@ -2864,6 +2888,7 @@ nvme_fc_delete_ctrl_work(struct work_struct *work)
+ struct nvme_fc_ctrl *ctrl =
+ container_of(work, struct nvme_fc_ctrl, delete_work);
+
++ cancel_work_sync(&ctrl->err_work);
+ cancel_work_sync(&ctrl->reset_work);
+ cancel_delayed_work_sync(&ctrl->connect_work);
+
+@@ -2970,21 +2995,29 @@ nvme_fc_reconnect_or_delete(struct nvme_fc_ctrl *ctrl, int status)
+ }
+
+ static void
+-nvme_fc_reset_ctrl_work(struct work_struct *work)
++__nvme_fc_terminate_io(struct nvme_fc_ctrl *ctrl)
+ {
+- struct nvme_fc_ctrl *ctrl =
+- container_of(work, struct nvme_fc_ctrl, reset_work);
+- int ret;
++ nvme_stop_keep_alive(&ctrl->ctrl);
+
+ /* will block will waiting for io to terminate */
+ nvme_fc_delete_association(ctrl);
+
+- if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RECONNECTING)) {
++ if (ctrl->ctrl.state != NVME_CTRL_RECONNECTING &&
++ !nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RECONNECTING)) {
+ dev_err(ctrl->ctrl.device,
+ "NVME-FC{%d}: error_recovery: Couldn't change state "
+ "to RECONNECTING\n", ctrl->cnum);
+- return;
+ }
++}
++
++static void
++nvme_fc_reset_ctrl_work(struct work_struct *work)
++{
++ struct nvme_fc_ctrl *ctrl =
++ container_of(work, struct nvme_fc_ctrl, reset_work);
++ int ret;
++
++ __nvme_fc_terminate_io(ctrl);
+
+ if (ctrl->rport->remoteport.port_state == FC_OBJSTATE_ONLINE)
+ ret = nvme_fc_create_association(ctrl);
+@@ -3022,6 +3055,24 @@ nvme_fc_reset_nvme_ctrl(struct nvme_ctrl *nctrl)
+ return 0;
+ }
+
++static void
++nvme_fc_connect_err_work(struct work_struct *work)
++{
++ struct nvme_fc_ctrl *ctrl =
++ container_of(work, struct nvme_fc_ctrl, err_work);
++
++ __nvme_fc_terminate_io(ctrl);
++
++ atomic_set(&ctrl->err_work_active, 0);
++
++ /*
++ * Rescheduling the connection after recovering
++ * from the io error is left to the reconnect work
++ * item, which is what should have stalled waiting on
++ * the io that had the error that scheduled this work.
++ */
++}
++
+ static const struct nvme_ctrl_ops nvme_fc_ctrl_ops = {
+ .name = "fc",
+ .module = THIS_MODULE,
+@@ -3135,6 +3186,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
+ ctrl->cnum = idx;
+ ctrl->ioq_live = false;
+ ctrl->assoc_active = false;
++ atomic_set(&ctrl->err_work_active, 0);
+ init_waitqueue_head(&ctrl->ioabort_wait);
+
+ get_device(ctrl->dev);
+@@ -3143,6 +3195,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
+ INIT_WORK(&ctrl->delete_work, nvme_fc_delete_ctrl_work);
+ INIT_WORK(&ctrl->reset_work, nvme_fc_reset_ctrl_work);
+ INIT_DELAYED_WORK(&ctrl->connect_work, nvme_fc_connect_ctrl_work);
++ INIT_WORK(&ctrl->err_work, nvme_fc_connect_err_work);
+ spin_lock_init(&ctrl->lock);
+
+ /* io queue count */
+@@ -3231,6 +3284,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
+ fail_ctrl:
+ nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING);
+ cancel_work_sync(&ctrl->reset_work);
++ cancel_work_sync(&ctrl->err_work);
+ cancel_delayed_work_sync(&ctrl->connect_work);
+
+ ctrl->ctrl.opts = NULL;
+--
+2.16.4
+
diff --git a/patches.fixes/sched-smt-expose-sched_smt_present-static-key.patch b/patches.fixes/sched-smt-expose-sched_smt_present-static-key.patch
new file mode 100644
index 0000000000..6e2805a2e6
--- /dev/null
+++ b/patches.fixes/sched-smt-expose-sched_smt_present-static-key.patch
@@ -0,0 +1,83 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Sun, 25 Nov 2018 19:33:38 +0100
+Subject: sched/smt: Expose sched_smt_present static key
+Git-commit: 321a874a7ef85655e93b3206d0f36b4a6097f948
+Patch-mainline: v4.20-rc5
+References: bsc#1106913
+
+Make the scheduler's 'sched_smt_present' static key globaly available, so
+it can be used in the x86 speculation control code.
+
+Provide a query function and a stub for the CONFIG_SMP=n case.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Ingo Molnar <mingo@kernel.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Andy Lutomirski <luto@kernel.org>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Jiri Kosina <jkosina@suse.cz>
+Cc: Tom Lendacky <thomas.lendacky@amd.com>
+Cc: Josh Poimboeuf <jpoimboe@redhat.com>
+Cc: Andrea Arcangeli <aarcange@redhat.com>
+Cc: David Woodhouse <dwmw@amazon.co.uk>
+Cc: Tim Chen <tim.c.chen@linux.intel.com>
+Cc: Andi Kleen <ak@linux.intel.com>
+Cc: Dave Hansen <dave.hansen@intel.com>
+Cc: Casey Schaufler <casey.schaufler@intel.com>
+Cc: Asit Mallick <asit.k.mallick@intel.com>
+Cc: Arjan van de Ven <arjan@linux.intel.com>
+Cc: Jon Masters <jcm@redhat.com>
+Cc: Waiman Long <longman9394@gmail.com>
+Cc: Greg KH <gregkh@linuxfoundation.org>
+Cc: Dave Stewart <david.c.stewart@intel.com>
+Cc: Kees Cook <keescook@chromium.org>
+Cc: stable@vger.kernel.org
+Link: https://lkml.kernel.org/r/20181125185004.430168326@linutronix.de
+
+Acked-by: Borislav Petkov <bp@suse.de>
+---
+ include/linux/sched/smt.h | 18 ++++++++++++++++++
+ kernel/sched/sched.h | 4 +---
+ 2 files changed, 19 insertions(+), 3 deletions(-)
+
+--- /dev/null
++++ b/include/linux/sched/smt.h
+@@ -0,0 +1,18 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++#ifndef _LINUX_SCHED_SMT_H
++#define _LINUX_SCHED_SMT_H
++
++#include <linux/static_key.h>
++
++#ifdef CONFIG_SCHED_SMT
++extern struct static_key_false sched_smt_present;
++
++static __always_inline bool sched_smt_active(void)
++{
++ return static_branch_likely(&sched_smt_present);
++}
++#else
++static inline bool sched_smt_active(void) { return false; }
++#endif
++
++#endif
+--- a/kernel/sched/sched.h
++++ b/kernel/sched/sched.h
+@@ -3,6 +3,7 @@
+ #include <linux/sched/sysctl.h>
+ #include <linux/sched/rt.h>
+ #include <linux/sched/deadline.h>
++#include <linux/sched/smt.h>
+ #include <linux/mutex.h>
+ #include <linux/spinlock.h>
+ #include <linux/stop_machine.h>
+@@ -744,9 +745,6 @@ static inline int cpu_of(struct rq *rq)
+
+
+ #ifdef CONFIG_SCHED_SMT
+-
+-extern struct static_key_false sched_smt_present;
+-
+ extern void __update_idle_core(struct rq *rq);
+
+ static inline void update_idle_core(struct rq *rq)
diff --git a/patches.fixes/sched-smt-make-sched_smt_present-track-topology.patch b/patches.fixes/sched-smt-make-sched_smt_present-track-topology.patch
new file mode 100644
index 0000000000..95250c96ae
--- /dev/null
+++ b/patches.fixes/sched-smt-make-sched_smt_present-track-topology.patch
@@ -0,0 +1,91 @@
+From: "Peter Zijlstra (Intel)" <peterz@infradead.org>
+Date: Sun, 25 Nov 2018 19:33:36 +0100
+Subject: sched/smt: Make sched_smt_present track topology
+Git-commit: c5511d03ec090980732e929c318a7a6374b5550e
+Patch-mainline: v4.20-rc5
+References: bsc#1106913
+
+Currently the 'sched_smt_present' static key is enabled when at CPU bringup
+SMT topology is observed, but it is never disabled. However there is demand
+to also disable the key when the topology changes such that there is no SMT
+present anymore.
+
+Implement this by making the key count the number of cores that have SMT
+enabled.
+
+In particular, the SMT topology bits are set before interrrupts are enabled
+and similarly, are cleared after interrupts are disabled for the last time
+and the CPU dies.
+
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Ingo Molnar <mingo@kernel.org>
+Cc: Andy Lutomirski <luto@kernel.org>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Jiri Kosina <jkosina@suse.cz>
+Cc: Tom Lendacky <thomas.lendacky@amd.com>
+Cc: Josh Poimboeuf <jpoimboe@redhat.com>
+Cc: Andrea Arcangeli <aarcange@redhat.com>
+Cc: David Woodhouse <dwmw@amazon.co.uk>
+Cc: Tim Chen <tim.c.chen@linux.intel.com>
+Cc: Andi Kleen <ak@linux.intel.com>
+Cc: Dave Hansen <dave.hansen@intel.com>
+Cc: Casey Schaufler <casey.schaufler@intel.com>
+Cc: Asit Mallick <asit.k.mallick@intel.com>
+Cc: Arjan van de Ven <arjan@linux.intel.com>
+Cc: Jon Masters <jcm@redhat.com>
+Cc: Waiman Long <longman9394@gmail.com>
+Cc: Greg KH <gregkh@linuxfoundation.org>
+Cc: Dave Stewart <david.c.stewart@intel.com>
+Cc: Kees Cook <keescook@chromium.org>
+Cc: stable@vger.kernel.org
+Link: https://lkml.kernel.org/r/20181125185004.246110444@linutronix.de
+
+Acked-by: Borislav Petkov <bp@suse.de>
+---
+ kernel/sched/core.c | 24 +++++++++++++++---------
+ 1 file changed, 15 insertions(+), 9 deletions(-)
+
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -5964,15 +5964,10 @@ static int sched_cpu_active(struct notif
+
+ #ifdef CONFIG_SCHED_SMT
+ /*
+- * The sched_smt_present static key needs to be evaluated on
+- * every hotplug event because at boot time SMT might be disabled
+- * when the number of booted CPUs is limited.
+- *
+- * If then later a sibling gets hotplugged, then the key would
+- * stay off and SMT scheduling would never be functional.
++ * When going up, increment the number of cores with SMT present.
+ */
+- if (cpumask_weight(cpu_smt_mask(cpu)) > 1)
+- static_branch_enable_cpuslocked(&sched_smt_present);
++ if (cpumask_weight(cpu_smt_mask(cpu)) == 2)
++ static_branch_inc_cpuslocked(&sched_smt_present);
+ #endif
+
+ return NOTIFY_OK;
+@@ -5989,9 +5984,20 @@ static int sched_cpu_active(struct notif
+ static int sched_cpu_inactive(struct notifier_block *nfb,
+ unsigned long action, void *hcpu)
+ {
++ int cpu = (long)hcpu;
++
+ switch (action & ~CPU_TASKS_FROZEN) {
+ case CPU_DOWN_PREPARE:
+- set_cpu_active((long)hcpu, false);
++
++#ifdef CONFIG_SCHED_SMT
++ /*
++ * When going down, decrement the number of cores with SMT present.
++ */
++ if (cpumask_weight(cpu_smt_mask(cpu)) == 2)
++ static_branch_dec_cpuslocked(&sched_smt_present);
++#endif
++
++ set_cpu_active(cpu, false);
+ return NOTIFY_OK;
+ default:
+ return NOTIFY_DONE;
diff --git a/patches.kabi/kabi-deduplicate-X86_FEATURE_L1TF_PTEINV.patch b/patches.kabi/kabi-deduplicate-X86_FEATURE_L1TF_PTEINV.patch
index e85c6c9021..31ffaa3794 100644
--- a/patches.kabi/kabi-deduplicate-X86_FEATURE_L1TF_PTEINV.patch
+++ b/patches.kabi/kabi-deduplicate-X86_FEATURE_L1TF_PTEINV.patch
@@ -24,9 +24,9 @@ Signed-off-by: Jiri Slaby <jslaby@suse.cz>
#define X86_FEATURE_FLUSH_L1D ( 2*32+ 8) /* Flush L1D cache */
+#define X86_FEATURE_L1TF_PTEINV ( 2*32+ 9) /* "" L1TF workaround PTE inversion */
#define X86_FEATURE_TSX_FORCE_ABORT ( 2*32+10) /* "" TSX_FORCE_ABORT */
+ #define X86_FEATURE_MD_CLEAR ( 2*32+11) /* VERW clears CPU buffers */
- /* Other features, Linux-defined mapping, word 3 */
-@@ -238,8 +239,6 @@
+@@ -240,8 +241,6 @@
/* Because the ALTERNATIVE scheme is for members of the X86_FEATURE club... */
#define X86_FEATURE_KAISER ( 7*32+31) /* CONFIG_PAGE_TABLE_ISOLATION w/o nokaiser */
diff --git a/patches.kernel.org/4.4.179-078-net-rds-force-to-destroy-connection-if-t_sock.patch b/patches.kernel.org/4.4.179-078-net-rds-force-to-destroy-connection-if-t_sock.patch
index f503358293..4dde5d5969 100644
--- a/patches.kernel.org/4.4.179-078-net-rds-force-to-destroy-connection-if-t_sock.patch
+++ b/patches.kernel.org/4.4.179-078-net-rds-force-to-destroy-connection-if-t_sock.patch
@@ -2,7 +2,7 @@ From: Mao Wenan <maowenan@huawei.com>
Date: Thu, 28 Mar 2019 17:10:56 +0800
Subject: [PATCH] net: rds: force to destroy connection if t_sock is NULL in
rds_tcp_kill_sock().
-References: bnc#1012382
+References: CVE-2019-11815 bnc#1012382 bsc#1134537
Patch-mainline: 4.4.179
Git-commit: cb66ddd156203daefb8d71158036b27b0e2caf63
diff --git a/patches.suse/0001-drm-ttm-Remove-warning-about-inconsistent-mapping-in.patch b/patches.suse/0001-drm-ttm-Remove-warning-about-inconsistent-mapping-in.patch
new file mode 100644
index 0000000000..e90511dc51
--- /dev/null
+++ b/patches.suse/0001-drm-ttm-Remove-warning-about-inconsistent-mapping-in.patch
@@ -0,0 +1,33 @@
+From e8c365e8e82111b0f71f861c91a38af3cb1a87fc Mon Sep 17 00:00:00 2001
+From: Thomas Zimmermann <tzimmermann@suse.de>
+Date: Tue, 14 May 2019 12:37:31 +0200
+Subject: drm/ttm: Remove warning about inconsistent mapping information
+Patch-mainline: Never, local cleanup
+References: bnc#1131488
+
+Fixing the issue of bnc1131488 requires changing a significant amount
+of the fbdev emulation. As the problem is rather cosmetical, we drop
+the warning for now.
+
+Signed-off-by: Thomas Zimmermann <tzimmermann@suse.de>
+Acked-by: Thomas Zimmermann <tzimmermann@suse.de>
+---
+ drivers/gpu/drm/ttm/ttm_bo_vm.c | 2 --
+ 1 file changed, 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
+index bac8bb61d4be..fd9fd59dd3ff 100644
+--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
++++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
+@@ -276,8 +276,6 @@ static void ttm_bo_vm_open(struct vm_area_struct *vma)
+ struct ttm_buffer_object *bo =
+ (struct ttm_buffer_object *)vma->vm_private_data;
+
+- WARN_ON(bo->bdev->dev_mapping != vma->vm_file->f_mapping);
+-
+ (void)ttm_bo_reference(bo);
+ }
+
+--
+2.21.0
+
diff --git a/patches.suse/0001-kvm-Introduce-nopvspin-kernel-parameter.patch b/patches.suse/0001-kvm-Introduce-nopvspin-kernel-parameter.patch
index 725b9d41fc..e82b21736a 100644
--- a/patches.suse/0001-kvm-Introduce-nopvspin-kernel-parameter.patch
+++ b/patches.suse/0001-kvm-Introduce-nopvspin-kernel-parameter.patch
@@ -10,6 +10,10 @@ spinlocks in favor of the bare metal behavior under specific 1:1
cpu to vcpu mappings.
Signed-off-by: Davidlohr Bueso <dbueso@suse.de>
+[ rip out last hunk which is
+ b8fb03785d4d ("locking/static_keys: Provide DECLARE and well as DEFINE macros")
+ anyway. ]
+Signed-off-by: Borislav Petkov <bp@suse.de>
---
Documentation/kernel-parameters.txt | 4 ++++
@@ -163,21 +167,3 @@ Signed-off-by: Davidlohr Bueso <dbueso@suse.de>
l1tf= [X86] Control mitigation of the L1TF vulnerability on
affected CPUs
---- a/include/linux/jump_label.h
-+++ b/include/linux/jump_label.h
-@@ -277,9 +277,15 @@ struct static_key_false {
- #define DEFINE_STATIC_KEY_TRUE(name) \
- struct static_key_true name = STATIC_KEY_TRUE_INIT
-
-+#define DECLARE_STATIC_KEY_TRUE(name) \
-+ extern struct static_key_true name
-+
- #define DEFINE_STATIC_KEY_FALSE(name) \
- struct static_key_false name = STATIC_KEY_FALSE_INIT
-
-+#define DECLARE_STATIC_KEY_FALSE(name) \
-+ extern struct static_key_false name
-+
- #define DEFINE_STATIC_KEY_ARRAY_TRUE(name, count) \
- struct static_key_true name[count] = { \
- [0 ... (count) - 1] = STATIC_KEY_TRUE_INIT, \
diff --git a/patches.suse/06-x86-idle-toggle-ibrs-when-going-idle.patch b/patches.suse/06-x86-idle-toggle-ibrs-when-going-idle.patch
index f45b449471..6dd5d4915c 100644
--- a/patches.suse/06-x86-idle-toggle-ibrs-when-going-idle.patch
+++ b/patches.suse/06-x86-idle-toggle-ibrs-when-going-idle.patch
@@ -16,16 +16,16 @@ Signed-off-by: Borislav Petkov <bp@suse.de>
--- a/arch/x86/include/asm/mwait.h
+++ b/arch/x86/include/asm/mwait.h
-@@ -5,6 +5,8 @@
-
+@@ -6,6 +6,8 @@
#include <asm/cpufeature.h>
+ #include <asm/nospec-branch.h>
+#include <asm/spec_ctrl.h>
+
#define MWAIT_SUBSTATE_MASK 0xf
#define MWAIT_CSTATE_MASK 0xf
#define MWAIT_SUBSTATE_SIZE 4
-@@ -104,9 +106,13 @@ static inline void mwait_idle_with_hints
+@@ -111,9 +113,13 @@ static inline void mwait_idle_with_hints
mb();
}
@@ -41,7 +41,7 @@ Signed-off-by: Borislav Petkov <bp@suse.de>
}
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
-@@ -426,11 +426,16 @@ static void mwait_idle(void)
+@@ -568,11 +568,16 @@ static void mwait_idle(void)
smp_mb(); /* quirk */
}
diff --git a/patches.suse/do-not-default-to-ibrs-on-skl.patch b/patches.suse/do-not-default-to-ibrs-on-skl.patch
index 0c5b08475e..1899dbe0be 100644
--- a/patches.suse/do-not-default-to-ibrs-on-skl.patch
+++ b/patches.suse/do-not-default-to-ibrs-on-skl.patch
@@ -9,10 +9,14 @@ https://software.intel.com/security-software-guidance/api-app/sites/default/file
Signed-off-by: Jiri Kosina <jkosina@suse.cz>
+---
+ arch/x86/kernel/cpu/bugs.c | 29 ++---------------------------
+ 1 file changed, 2 insertions(+), 27 deletions(-)
+
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
-@@ -341,23 +341,6 @@
- return cmd;
+@@ -346,23 +346,6 @@ void arch_smt_update(void)
+ mutex_unlock(&spec_ctrl_mutex);
}
-/* Check for Skylake-like CPUs (for IBRS handling) */
@@ -35,7 +39,7 @@ Signed-off-by: Jiri Kosina <jkosina@suse.cz>
static void __init spectre_v2_select_mitigation(void)
{
enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline();
-@@ -414,16 +397,8 @@
+@@ -419,16 +402,8 @@ retpoline_auto:
setup_force_cpu_cap(X86_FEATURE_RETPOLINE);
}
diff --git a/patches.suse/lpfc-validate-command-in-lpfc_sli4_scmd_to_wqidx_dis.patch b/patches.suse/lpfc-validate-command-in-lpfc_sli4_scmd_to_wqidx_dis.patch
new file mode 100644
index 0000000000..ed07c65ac5
--- /dev/null
+++ b/patches.suse/lpfc-validate-command-in-lpfc_sli4_scmd_to_wqidx_dis.patch
@@ -0,0 +1,32 @@
+From: Hannes Reinecke <hare@suse.de>
+Date: Tue, 14 May 2019 12:36:46 +0200
+Subject: [PATCH] lpfc: validate command in lpfc_sli4_scmd_to_wqidx_distr()
+References: bsc#1129138
+Patch-Mainline: never, SLE15 specific
+
+According to Broadcom we need to validate the command to avoid
+crashes with blk-mq.
+
+Suggested-by: Justin Tee <justin.tee@broadcom.com>
+Signed-off-by: Hannes Reinecke <hare@suse.com>
+---
+ drivers/scsi/lpfc/lpfc_scsi.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
+index 100a4a5a5b99..d1aab85f5102 100644
+--- a/drivers/scsi/lpfc/lpfc_scsi.c
++++ b/drivers/scsi/lpfc/lpfc_scsi.c
+@@ -3932,7 +3932,8 @@ int lpfc_sli4_scmd_to_wqidx_distr(struct lpfc_hba *phba,
+ uint32_t tag;
+ uint16_t hwq;
+
+- if (cmnd && shost_use_blk_mq(cmnd->device->host)) {
++ if (cmnd && shost_use_blk_mq(cmnd->device->host) &&
++ cmnd->request && cmnd->request->q) {
+ tag = blk_mq_unique_tag(cmnd->request);
+ hwq = blk_mq_unique_tag_to_hwq(tag);
+
+--
+2.16.4
+
diff --git a/patches.suse/retpolines-disable-ibrs-on-non-skl.patch b/patches.suse/retpolines-disable-ibrs-on-non-skl.patch
index 5607745b0b..0fae2e51bc 100644
--- a/patches.suse/retpolines-disable-ibrs-on-non-skl.patch
+++ b/patches.suse/retpolines-disable-ibrs-on-non-skl.patch
@@ -32,16 +32,16 @@ Signed-off-by: Jiri Kosina <jkosina@suse.cz>
unsigned int x86_ibrs_enabled(void);
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
-@@ -27,6 +27,7 @@
- #include <asm/cacheflush.h>
+@@ -30,6 +30,7 @@
#include <asm/intel-family.h>
#include <asm/e820.h>
+ #include <asm/hypervisor.h>
+#include <asm/spec_ctrl.h>
static void __init spectre_v2_select_mitigation(void);
static void __init ssb_select_mitigation(void);
-@@ -313,6 +314,23 @@ static enum spectre_v2_mitigation_cmd __
- return cmd;
+@@ -413,6 +414,23 @@ void arch_smt_update(void)
+ mutex_unlock(&spec_ctrl_mutex);
}
+/* Check for Skylake-like CPUs (for IBRS handling) */
@@ -64,7 +64,7 @@ Signed-off-by: Jiri Kosina <jkosina@suse.cz>
static void __init spectre_v2_select_mitigation(void)
{
enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline();
-@@ -383,6 +401,11 @@ retpoline_auto:
+@@ -483,6 +501,11 @@ retpoline_auto:
setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);
pr_info("Spectre v2 / SpectreRSB mitigation: Filling RSB on context switch\n");
@@ -76,7 +76,7 @@ Signed-off-by: Jiri Kosina <jkosina@suse.cz>
/* Initialize Indirect Branch Prediction Barrier if supported */
if (boot_cpu_has(X86_FEATURE_IBPB)) {
setup_force_cpu_cap(X86_FEATURE_USE_IBPB);
-@@ -680,10 +703,10 @@ static ssize_t cpu_show_common(struct de
+@@ -913,10 +936,10 @@ static ssize_t cpu_show_common(struct de
return sprintf(buf, "Mitigation: __user pointer sanitization\n");
case X86_BUG_SPECTRE_V2:
@@ -91,7 +91,7 @@ Signed-off-by: Jiri Kosina <jkosina@suse.cz>
--- a/arch/x86/kernel/cpu/spec_ctrl.c
+++ b/arch/x86/kernel/cpu/spec_ctrl.c
-@@ -12,8 +12,11 @@
+@@ -13,8 +13,11 @@
* Keep it open for more flags in case needed.
*
* -1 means "not touched by nospec() earlyparam"
@@ -104,7 +104,7 @@ Signed-off-by: Jiri Kosina <jkosina@suse.cz>
static int ibpb_state = -1;
unsigned int notrace x86_ibrs_enabled(void)
-@@ -52,19 +55,22 @@ EXPORT_SYMBOL_GPL(x86_enable_ibrs);
+@@ -53,19 +56,22 @@ EXPORT_SYMBOL_GPL(x86_enable_ibrs);
void x86_spec_check(void)
{
diff --git a/series.conf b/series.conf
index 99b3d40a15..ad92139d63 100644
--- a/series.conf
+++ b/series.conf
@@ -15589,6 +15589,7 @@
patches.suse/0020-perf-x86-intel-uncore-remove-hard-coded-implementation-for-node-id-mapping-location.patch
patches.suse/0021-perf-x86-intel-uncore-handle-non-standard-counter-offset.patch
patches.suse/0022-perf-x86-intel-uncore-add-skylake-server-uncore-support.patch
+ patches.arch/locking-static_keys-provide-declare-and-well-as-define-macros.patch
patches.arch/38-x86-mce-amd-use-msr_ops-misc-in-allocate_threshold_blocks.patch
patches.drivers/00-x86-mce-add-support-for-new-mca_synd-register.patch
patches.drivers/01-edac-mce_amd-print-syndrome-register-value-on-smca-systems.patch
@@ -21742,6 +21743,7 @@
patches.fixes/0016-pNFS-flexfiles-Fix-up-the-ff_layout_write_pagelist-f.patch
patches.fixes/0001-arm64-Use-full-path-in-KBUILD_IMAGE-definition.patch
patches.fixes/0002-arm-Use-full-path-in-KBUILD_IMAGE-definition.patch
+ patches.arch/x86-stop-exporting-msr-index-h-to-userland.patch
patches.drivers/0044-lightnvm-remove-unused-rq-parameter-of-nvme_nvm_rqto.patch
patches.drivers/0002-arm64-traps-fix-userspace-cache-maintenance-emulatio.patch
patches.arch/arm64-ls1043-0049-soc-fsl-qe-get-rid-of-immrbar_virt_to_phys.patch
@@ -23247,6 +23249,7 @@
patches.drivers/tpm-185-tis_spi-Use-DMA-safe-memory-for-SPI-transfers.patch
patches.drivers/tpm-dev-common-Reject-too-short-writes.patch
patches.arch/s390-sles12sp3-08-02-12-kexec-datamover.patch
+ patches.arch/locking-static_keys-improve-uninitialized-key-warning.patch
patches.arch/x86-mce-amd-always-give-panic-severity-for-uc-errors-in-kernel-context.patch
patches.suse/sched-isolcpus-Fix-isolcpus-boot-parameter-handling-.patch
patches.suse/genirq-proc-Return-proper-error-code-when-irq_set_af.patch
@@ -23711,6 +23714,7 @@
patches.drivers/be2net-restore-properly-promisc-mode-after-queues-re.patch
patches.fixes/0002-drm-vc4-Flush-the-caches-before-the-bin-jobs-as-well.patch
patches.fixes/0003-drm-vc4-Fix-NULL-pointer-dereference-in-vc4_save_han.patch
+ patches.arch/sched-core-fix-cpu-max-vs-cpuhotplug-deadlock.patch
patches.drivers/ALSA-hda-Clean-up-ALC299-init-code
patches.drivers/ALSA-hda-realtek-update-ALC225-depop-optimize
patches.drivers/ALSA-hda-realtek-Support-headset-mode-for-ALC215-ALC
@@ -24528,6 +24532,7 @@
patches.fixes/bpf-properly-enforce-index-mask-to-prevent-out-of-bo.patch
patches.drivers/ibmvnic-Fix-partial-success-login-retries.patch
patches.arch/kvm-x86-ia32_arch_capabilities-is-always-supported
+ patches.arch/x86-speculation-simplify-the-cpu-bug-detection-logic.patch
patches.fixes/afs-Fix-directory-permissions-check.patch
patches.drivers/0081-bcache-stop-bcache-device-when-backing-device-is-off.patch
patches.suse/0081-bcache-Move-couple-of-string-arrays-to-sysfs.c.patch
@@ -24642,6 +24647,7 @@
patches.arch/0014-arm64-ssbd-Introduce-thread-flag-to-control-userspac.patch
patches.arch/0015-arm64-ssbd-Add-prctl-interface-for-per-thread-mitiga.patch
patches.fixes/regulator-don-t-return-or-expect-errno-from-of_map_mode.patch
+ patches.arch/x86-bugs-add-amd-s-variant-of-ssb_no.patch
patches.suse/x86-apic-Provide-apic_ack_irq.patch
patches.suse/msft-hv-1660-scsi-vmbus-Add-function-to-report-available-ring-buf.patch
patches.suse/msft-hv-1661-scsi-netvsc-Use-the-vmbus-function-to-calculate-ring.patch
@@ -24766,6 +24772,7 @@
patches.fixes/cifs-Fix-use-after-free-of-a-mid_q_entry.patch
patches.fixes/0001-cifs-Fix-stack-out-of-bounds-in-smb-2-3-_create_leas.patch
patches.fixes/0007-pci-xilinx-add-missing-of_node_put
+ patches.fixes/0001-PCI-xilinx-nwl-Add-missing-of_node_put.patch
patches.suse/sched-fair-Fix-bandwidth-timer-clock-drift-condition.patch
patches.suse/msft-hv-1726-x86-hyper-v-Fix-the-circular-dependency-in-IPI-enlig.patch
patches.drivers/0004-mmc-dw_mmc-fix-card-threshold-control-configuration.patch
@@ -24813,6 +24820,7 @@
patches.drivers/scsi-mpt3sas-swap-i-o-memory-read-value-back-to-cpu-endianness
patches.fixes/0002-rculist-Improve-documentation-for-list_for_each_entr.patch
patches.fixes/stop_machine-Atomically-queue-and-wake-stopper-threads.patch
+ patches.arch/locking-atomics-asm-generic-move-some-macros-from-linux-bitops-h-to-a-new-linux-bits-h-file.patch
patches.suse/msft-hv-1711-x86-hyper-v-Use-cheaper-HVCALL_FLUSH_VIRTUAL_ADDRESS.patch
patches.suse/msft-hv-1714-x86-hyper-v-Implement-hv_do_fast_hypercall16.patch
patches.suse/msft-hv-1715-x86-hyper-v-Use-fast-hypercall-for-HVCALL_SEND_IPI.patch
@@ -25112,8 +25120,10 @@
patches.suse/0126-bcache-panic-fix-for-making-cache-device.patch
patches.drivers/0001-ipmi-ssif-Add-support-for-multi-part-transmit-messag.patch
patches.suse/ACPI-processor-Fix-the-return-value-of-acpi_processor_ids_walk.patch
+ patches.arch/x86-cpu-sanitize-fam6_atom-naming.patch
patches.fixes/x86-kexec-correct-kexec_backup_src_end-off-by-one-error.patch
patches.fixes/resource-include-resource-end-in-walk_-interfaces.patch
+ patches.arch/x86-speculation-enable-cross-hyperthread-spectre-v2-stibp-mitigation.patch
patches.fixes/ip-fail-fast-on-IP-defrag-errors.patch
patches.drivers/net-ibm-fix-return-type-of-ndo_start_xmit-function.patch
patches.fixes/ipv6-discard-IP-frag-queue-on-more-errors.patch
@@ -25242,6 +25252,7 @@
patches.fixes/acpi-nfit-fix-ars-overflow-continuation.patch
patches.drivers/xhci-add-quirk-to-workaround-the-errata-seen-on-cavium-thunder-x2-soc.patch
patches.drivers/0003-amd-iommu-fix-guest-virtual-apic-log-tail-address-register
+ patches.fixes/nvme-fc-resolve-io-failures-during-connect.patch
patches.fixes/libceph-fall-back-to-sendmsg-for-slab-pages.patch
patches.drivers/net-ena-fix-crash-during-failed-resume-from-hibernat.patch
patches.drivers/net-ena-fix-crash-during-ena_remove.patch
@@ -25257,6 +25268,9 @@
patches.suse/msft-hv-1800-Drivers-hv-vmbus-check-the-creation_status-in-vmbus_.patch
patches.fixes/0001-fscache-Fix-race-in-fscache_op_complete-due-to-split.patch
patches.fixes/fs-fix-lost-error-code-in-dio_complete.patch
+ patches.fixes/sched-smt-make-sched_smt_present-track-topology.patch
+ patches.fixes/sched-smt-expose-sched_smt_present-static-key.patch
+ patches.arch/x86-speculation-rework-smt-state-change.patch
patches.fixes/0001-xen-x86-add-diagnostic-printout-to-xen_mc_flush-in-c.patch
patches.fixes/0001-btrfs-tree-checker-Don-t-check-max-block-group-size-.patch
patches.fixes/scsi-lpfc-fix-block-guard-enablement-on-sli3-adapters.patch
@@ -25269,6 +25283,7 @@
patches.fixes/ipv4-ipv6-netfilter-Adjust-the-frag-mem-limit-when-t.patch
patches.drivers/net-mlx4_core-Correctly-set-PFC-param-if-global-paus.patch
patches.fixes/fuse-continue-to-send-FUSE_RELEASEDIR-when-FUSE_OPEN-returns-ENOSYS.patch
+ patches.fixes/0001-media-vb2-don-t-call-__vb2_queue_cancel-if-vb2_start.patch
patches.drivers/IB-hfi1-Fix-an-out-of-bounds-access-in-get_hw_stats.patch
patches.arch/ibmvnic-Convert-reset-work-item-mutex-to-spin-lock.patch
patches.arch/ibmvnic-Fix-non-atomic-memory-allocation-in-IRQ-cont.patch
@@ -25282,6 +25297,7 @@
patches.arch/kvm-nvmx-set-vm-instruction-error-for-vmptrld-of-unbacked-page
patches.arch/kvm-nvmx-free-the-vmread-vmwrite-bitmaps-if-alloc_kvm_area-fails
patches.arch/kvm-vmx-set-ia32_tsc_aux-for-legacy-mode-guests
+ patches.arch/kvm-x86-report-stibp-on-get_supported_cpuid.patch
patches.arch/kvm-nvmx-nmi-window-and-interrupt-window-exiting-should-wake-l2-from-hlt
patches.arch/powerpc-xmon-Fix-invocation-inside-lock-region.patch
patches.arch/powerpc-perf-Fix-unit_sel-cache_sel-checks.patch
@@ -25344,6 +25360,7 @@
patches.drivers/iommu-amd-unmap-all-mapped-pages-in-error-path-of-map_sg
patches.drivers/iommu-vt-d-fix-memory-leak-in-intel_iommu_put_resv_regions
patches.suse/mm-memory_hotplug-fix-scan_movable_pages-for-giganti.patch
+ patches.arch/x86-speculation-remove-redundant-arch_smt_update-invocation.patch
patches.fixes/0001-s390-qeth-cancel-close_dev-work-before-removing-a-ca.patch
patches.fixes/scsi-target-make-the-pi_prot_format-ConfigFS-path-re.patch
patches.fixes/perf-x86-intel-delay-memory-deallocation-until-x86_pmu_dead_cpu.patch
@@ -25362,6 +25379,8 @@
patches.fixes/0001-crypto-tgr192-fix-unaligned-memory-access.patch
patches.fixes/0001-device-property-Fix-the-length-used-in-PROPERTY_ENTR.patch
patches.suse/intel_th-gth-Fix-an-off-by-one-in-output-unassigning.patch
+ patches.fixes/0001-USB-Add-new-USB-LPM-helpers.patch
+ patches.fixes/0001-USB-Consolidate-LPM-checks-to-avoid-enabling-LPM-twi.patch
patches.fixes/0001-cdc-wdm-pass-return-value-of-recover_from_urb_loss.patch
patches.suse/0001-btrfs-relocation-Delay-reloc-tree-deletion-after-mer.patch
patches.suse/0002-btrfs-qgroup-Refactor-btrfs_qgroup_trace_subtree_swa.patch
@@ -25456,35 +25475,56 @@
patches.fixes/ceph-fix-ci-i_head_snapc-leak.patch
patches.fixes/0002-drm-vc4-Fix-memory-leak-during-gpu-reset.patch
patches.fixes/0001-drm-vc4-Fix-compilation-error-reported-by-kbuild-tes.patch
+ patches.arch/cpu-speculation-add-mitigations-cmdline-option.patch
+ patches.arch/x86-speculation-support-mitigations-cmdline-option.patch
+ patches.arch/powerpc-speculation-support-mitigations-cmdline-option.patch
+ patches.arch/s390-speculation-support-mitigations-cmdline-option.patch
patches.fixes/0001-btrfs-Don-t-panic-when-we-can-t-find-a-root-key.patch
patches.fixes/0001-btrfs-reloc-Fix-NULL-pointer-dereference-due-to-expa.patch
patches.fixes/block-disk_events-introduce-event-flags.patch
patches.fixes/Revert-ide-unexport-DISK_EVENT_MEDIA_CHANGE-for-ide-.patch
patches.fixes/Revert-block-unexport-DISK_EVENT_MEDIA_CHANGE-for-le.patch
patches.fixes/block-check_events-don-t-bother-with-events-if-unsup.patch
+ patches.drivers/ibmvnic-Report-actual-backing-device-speed-and-duple.patch
patches.drivers/Bluetooth-hidp-fix-buffer-overflow.patch
patches.fixes/0001-UAS-fix-alignment-of-scatter-gather-segments.patch
-
- # davem/net-next
- patches.drivers/ibmvnic-Report-actual-backing-device-speed-and-duple.patch
-
- # powerpc/linux next
+ patches.fixes/0001-drm-i915-Fix-I915_EXEC_RING_MASK.patch
+ patches.fixes/0002-drm-fb-helper-dpms_legacy-Only-set-on-connectors-in-.patch
patches.arch/powerpc-numa-improve-control-of-topology-updates.patch
patches.arch/powerpc-numa-document-topology_updates_enabled-disab.patch
-
- # tip/tip
- patches.arch/cpu-speculation-add-mitigations-cmdline-option.patch
- patches.arch/x86-speculation-support-mitigations-cmdline-option.patch
- patches.arch/powerpc-speculation-support-mitigations-cmdline-option.patch
- patches.arch/s390-speculation-support-mitigations-cmdline-option.patch
+ patches.drivers/iommu-vt-d-don-t-request-page-request-irq-under-dmar_global_lock
+ patches.drivers/iommu-vt-d-set-intel_iommu_gfx_mapped-correctly
+ patches.drivers/iommu-vt-d-make-kernel-parameter-igfx_off-work-with-viommu
+ patches.drivers/net-ibmvnic-Update-MAC-address-settings-after-adapte.patch
+ patches.drivers/net-ibmvnic-Update-carrier-state-after-link-state-ch.patch
# out-of-tree patches
patches.kabi/0001-move-power_up_on_resume-flag-to-end-of-structure-for.patch
+ patches.suse/0001-drm-ttm-Remove-warning-about-inconsistent-mapping-in.patch
########################################################
# end of sorted patches
########################################################
+ # MDS
+ patches.arch/x86-msr-index-cleanup-bit-defines.patch
+ patches.arch/x86-speculation-consolidate-cpu-whitelists.patch
+ patches.arch/x86-speculation-mds-add-basic-bug-infrastructure-for-mds.patch
+ patches.arch/x86-speculation-mds-add-bug_msbds_only.patch
+ patches.arch/x86-kvm-expose-x86_feature_md_clear-to-guests.patch
+ patches.arch/x86-speculation-mds-add-mds_clear_cpu_buffers.patch
+ patches.arch/x86-speculation-mds-clear-cpu-buffers-on-exit-to-user.patch
+ patches.arch/x86-kvm-vmx-add-mds-protection-when-l1d-flush-is-not-active.patch
+ patches.arch/x86-speculation-mds-conditionally-clear-cpu-buffers-on-idle-entry.patch
+ patches.arch/x86-speculation-mds-add-mitigation-control-for-mds.patch
+ patches.arch/x86-speculation-mds-add-sysfs-reporting-for-mds.patch
+ patches.arch/x86-speculation-mds-add-mitigation-mode-vmwerv.patch
+ patches.arch/x86-speculation-mds-add-mds-full-nosmt-cmdline-option.patch
+ patches.arch/x86-speculation-move-arch_smt_update-call-to-after-mitigation-decisions.patch
+ patches.arch/x86-speculation-mds-add-smt-warning-message.patch
+ patches.arch/x86-speculation-mds-print-smt-vulnerable-on-msbds-with-mitigations-off.patch
+ patches.arch/x86-speculation-mds-add-mitigations-support-for-mds.patch
+
########################################################
# Scheduler / Core
######################################################
@@ -25902,6 +25942,7 @@
patches.drivers/nvmet_fc-correct-broken-add_port.patch
patches.suse/cgroup-Export-cgroup_is_descendant.patch
patches.drivers/block-sanity-check-for-integrity-intervals.patch
+ patches.suse/lpfc-validate-command-in-lpfc_sli4_scmd_to_wqidx_dis.patch
# Xen
patches.fixes/xen-hold-lock_device_hotplug-throughout-vcpu-hotplug.patch
@@ -26798,7 +26839,6 @@
patches.suse/nospec-fix-forced-cpucaps-ordering.patch
# disable it temporary pending upstream discussion
-- patches.arch/x86-speculation-enable-cross-hyperthread-spectre-v2-stibp-mitigation.patch
patches.arch/x86-speculation-propagate-information-about-rsb-filling-mitigation-to-sysfs.patch
patches.suse/x86-fix-ibrs-reporting.patch