Home Home > GIT Browse > SLE11-SP4
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJiri Kosina <jkosina@suse.cz>2018-06-12 16:25:11 +0200
committerJiri Kosina <jkosina@suse.cz>2018-06-12 16:25:11 +0200
commit1274d5bf8eb2adde02efee6d18a9b1d92713a75c (patch)
treedd11ad26eca333dc5341e794b2b35813045d93cf
parent943535c528dc2f7409b77691de07a07623081b8a (diff)
parent1b6a5c3a2dfbbd684bfdaad7da129944e5c3e33e (diff)
Merge remote-tracking branch 'origin/users/jroedel/SLE11-SP4/for-next' into SLE11-SP4
Pull SPEC_CTRL MSR KVM syncing fix from Joerg Roedel suse-commit: 3af5fe324cc091eb842e172c6365d490e56ea52f
-rw-r--r--arch/x86/include/asm/cpufeature.h1
-rw-r--r--arch/x86/kernel/cpu/bugs.c6
-rw-r--r--arch/x86/kernel/cpu/scattered.c1
-rw-r--r--arch/x86/kvm/svm.c6
-rw-r--r--arch/x86/kvm/vmx.c6
5 files changed, 10 insertions, 10 deletions
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
index d925948ddf7f..872b9d7c818c 100644
--- a/arch/x86/include/asm/cpufeature.h
+++ b/arch/x86/include/asm/cpufeature.h
@@ -185,6 +185,7 @@
#define X86_FEATURE_SPEC_CTRL ( 7*32+20) /* Control Speculation Control */
#define X86_FEATURE_IBRS ( 7*32+21) /* "" Indirect Branch Restricted Speculation */
#define X86_FEATURE_SSBD ( 7*32+22) /* Speculative Store Bypass Disable */
+#define X86_FEATURE_SPEC_CTRL_MSR ( 7*32+23) /* "" Speculation Control MSR */
#define X86_FEATURE_AMD_SSBD ( 7*32+27) /* "" AMD SSBD implementation */
#define X86_FEATURE_SPEC_STORE_BYPASS_DISABLE ( 7*32+28) /* "" Disable Speculative Store Bypass. */
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index f0eaa2480d59..764d524110c6 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -793,9 +793,6 @@ void x86_spec_ctrl_set_guest(u64 guest_spec_ctrl)
{
u64 host = x86_spec_ctrl_base;
- if (!boot_cpu_has(X86_FEATURE_IBRS))
- return;
-
if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
host |= ssbd_tif_to_spec_ctrl(current_thread_info()->flags);
@@ -808,9 +805,6 @@ void x86_spec_ctrl_restore_host(u64 guest_spec_ctrl)
{
u64 host = x86_spec_ctrl_base;
- if (!boot_cpu_has(X86_FEATURE_IBRS))
- return;
-
if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
host |= ssbd_tif_to_spec_ctrl(current_thread_info()->flags);
diff --git a/arch/x86/kernel/cpu/scattered.c b/arch/x86/kernel/cpu/scattered.c
index d38ebc870a5e..7f1e02b682ac 100644
--- a/arch/x86/kernel/cpu/scattered.c
+++ b/arch/x86/kernel/cpu/scattered.c
@@ -39,6 +39,7 @@ void __cpuinit init_scattered_cpuid_features(struct cpuinfo_x86 *c)
{ X86_FEATURE_APERFMPERF, CR_ECX, 0, 0x00000006, 0 },
{ X86_FEATURE_EPB, CR_ECX, 3, 0x00000006, 0 },
{ X86_FEATURE_SPEC_CTRL, CR_EDX,26, 0x00000007, 0 },
+ { X86_FEATURE_SPEC_CTRL_MSR, CR_EDX,26, 0x00000007, 0 },
{ X86_FEATURE_SSBD, CR_EDX,31, 0x00000007, 0 },
{ X86_FEATURE_XSAVEOPT, CR_EAX, 0, 0x0000000d, 1 },
{ X86_FEATURE_CPB, CR_EDX, 9, 0x80000007, 0 },
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index b6056342e3b8..42dc5ff81995 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -3832,7 +3832,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
local_irq_enable();
- if (x86_ibrs_enabled())
+ if (static_cpu_has(X86_FEATURE_SPEC_CTRL_MSR))
x86_spec_ctrl_set_guest(svm->spec_ctrl);
asm volatile (
@@ -3936,8 +3936,10 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
reload_tss(vcpu);
- if (x86_ibrs_enabled())
+ if (static_cpu_has(X86_FEATURE_SPEC_CTRL_MSR)) {
+ svm->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL);
x86_spec_ctrl_restore_host(svm->spec_ctrl);
+ }
local_irq_disable();
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 476a7d6019c9..ad37756c360b 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -6748,7 +6748,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
vmx_set_interrupt_shadow(vcpu, 0);
- if (x86_ibrs_enabled())
+ if (static_cpu_has(X86_FEATURE_SPEC_CTRL_MSR))
x86_spec_ctrl_set_guest(vmx->spec_ctrl);
vmx->__launched = vmx->loaded_vmcs->launched;
@@ -6851,8 +6851,10 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
#endif
);
- if (x86_ibrs_enabled())
+ if (static_cpu_has(X86_FEATURE_SPEC_CTRL_MSR)) {
+ vmx->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL);
x86_spec_ctrl_restore_host(vmx->spec_ctrl);
+ }
/* Eliminate branch target predictions from guest mode */
vmexit_fill_RSB();