Home Home > GIT Browse > SLE11-SP4
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJiri Kosina <jkosina@suse.cz>2018-07-17 23:20:37 +0200
committerJiri Kosina <jkosina@suse.cz>2018-07-17 23:20:37 +0200
commitf0ce9552960294f461fc65c17c183d0b8979c6a6 (patch)
tree50bd050fa7a65f908c25f8e2b9fefe02d9aca609
parent9ed577e27ae3dbb85b96c21e4602c3df279f8a96 (diff)
parent5db3ed52828f1e1d101df6250b9747d3322e6867 (diff)
Merge remote-tracking branch 'origin/users/jroedel/SLE11-SP4/for-next' into SLE11-SP4SLE11-SP4
Pull speculation fixes from Joerg Roedel suse-commit: 1347dbe5a4052ff1e9a3a9754869587eb7267a85
-rw-r--r--arch/x86/include/asm/spec_ctrl.h2
-rw-r--r--arch/x86/kernel/cpu/bugs.c18
-rw-r--r--arch/x86/kernel/cpu/spec_ctrl.c25
-rw-r--r--arch/x86/kernel/entry_32.S14
-rw-r--r--arch/x86/kernel/microcode_core.c1
5 files changed, 55 insertions, 5 deletions
diff --git a/arch/x86/include/asm/spec_ctrl.h b/arch/x86/include/asm/spec_ctrl.h
index c62e685afca8..ee8f5c2fc4cd 100644
--- a/arch/x86/include/asm/spec_ctrl.h
+++ b/arch/x86/include/asm/spec_ctrl.h
@@ -90,8 +90,10 @@ void x86_disable_ibrs(void);
unsigned int x86_ibrs_enabled(void);
unsigned int x86_ibpb_enabled(void);
void x86_spec_check(void);
+void x86_spec_set_on_each_cpu(void);
int nospec(char *str);
void stuff_RSB(void);
+void ssb_select_mitigation(void);
static inline void x86_ibp_barrier(void)
{
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index a02907968448..563cf7c23f6e 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -30,6 +30,8 @@
#include <asm/nospec-branch.h>
#include <asm/spec-ctrl.h>
+static void ssb_init_cmd_line(void);
+
#ifdef CONFIG_X86_32
#ifndef CONFIG_XEN
static int __init no_halt(char *s)
@@ -178,7 +180,7 @@ static void __init check_config(void)
#endif /* CONFIG_X86_32 */
static void __init spectre_v2_select_mitigation(void);
-static void __init ssb_select_mitigation(void);
+void ssb_select_mitigation(void);
static void x86_amd_ssbd_disable(void);
/*
@@ -232,6 +234,7 @@ void __init check_bugs(void)
* Select proper mitigation for any exposure to the Speculative Store
* Bypass vulnerability.
*/
+ ssb_init_cmd_line();
ssb_select_mitigation();
#ifdef CONFIG_X86_32
@@ -538,6 +541,8 @@ enum ssb_mitigation_cmd {
SPEC_STORE_BYPASS_CMD_SECCOMP,
};
+static enum ssb_mitigation_cmd ssb_cmd;
+
static const char *ssb_strings[] = {
[SPEC_STORE_BYPASS_NONE] = "Vulnerable",
[SPEC_STORE_BYPASS_DISABLE] = "Mitigation: Speculative Store Bypass disabled",
@@ -587,7 +592,12 @@ static enum ssb_mitigation_cmd __init ssb_parse_cmdline(void)
return cmd;
}
-static enum ssb_mitigation_cmd __init __ssb_select_mitigation(void)
+static void ssb_init_cmd_line(void)
+{
+ ssb_cmd = ssb_parse_cmdline();
+}
+
+static enum ssb_mitigation_cmd __ssb_select_mitigation(void)
{
enum ssb_mitigation mode = SPEC_STORE_BYPASS_NONE;
enum ssb_mitigation_cmd cmd;
@@ -595,7 +605,7 @@ static enum ssb_mitigation_cmd __init __ssb_select_mitigation(void)
if (!boot_cpu_has(X86_FEATURE_SSBD))
return mode;
- cmd = ssb_parse_cmdline();
+ cmd = ssb_cmd;
if (!x86_bug_spec_store_bypass &&
(cmd == SPEC_STORE_BYPASS_CMD_NONE ||
cmd == SPEC_STORE_BYPASS_CMD_AUTO))
@@ -651,7 +661,7 @@ static enum ssb_mitigation_cmd __init __ssb_select_mitigation(void)
return mode;
}
-static void ssb_select_mitigation()
+void ssb_select_mitigation(void)
{
ssb_mode = __ssb_select_mitigation();
diff --git a/arch/x86/kernel/cpu/spec_ctrl.c b/arch/x86/kernel/cpu/spec_ctrl.c
index 1fa86a4b8757..523bbebf9afe 100644
--- a/arch/x86/kernel/cpu/spec_ctrl.c
+++ b/arch/x86/kernel/cpu/spec_ctrl.c
@@ -7,6 +7,7 @@
#include <asm/msr.h>
#include <asm/processor.h>
#include <asm/spec_ctrl.h>
+#include <asm/cpu.h>
/*
* Keep it open for more flags in case needed.
@@ -63,13 +64,16 @@ EXPORT_SYMBOL_GPL(stuff_RSB);
*/
void x86_spec_check(void)
{
+ unsigned int edx;
if (ibpb_state == 0) {
printk_once(KERN_INFO "IBRS/IBPB: disabled\n");
return;
}
- if (cpuid_edx(7) & BIT(26)) {
+ edx = cpuid_edx(7);
+
+ if (edx & BIT(26)) {
if (ibrs_state == -1) {
/* noone force-disabled IBRS */
ibrs_state = 1;
@@ -80,6 +84,13 @@ void x86_spec_check(void)
setup_force_cpu_cap(X86_FEATURE_SPEC_CTRL);
setup_force_cpu_cap(X86_FEATURE_IBRS);
+
+ if (!boot_cpu_has(X86_FEATURE_SSBD) &&
+ (edx & BIT(31))) {
+ /* We gained SSBD support - initialize the mitigation */
+ setup_force_cpu_cap(X86_FEATURE_SSBD);
+ ssb_select_mitigation();
+ }
}
if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
@@ -93,6 +104,18 @@ void x86_spec_check(void)
}
EXPORT_SYMBOL_GPL(x86_spec_check);
+static void __x86_spec_set(void *data)
+{
+ x86_spec_ctrl_setup_ap();
+}
+
+void x86_spec_set_on_each_cpu(void)
+{
+ if (boot_cpu_has(X86_FEATURE_SSBD))
+ on_each_cpu(__x86_spec_set, NULL, 1);
+}
+EXPORT_SYMBOL_GPL(x86_spec_set_on_each_cpu);
+
int __init nospec(char *str)
{
/*
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
index 5f0dc54ec0c0..ea77d9f4914a 100644
--- a/arch/x86/kernel/entry_32.S
+++ b/arch/x86/kernel/entry_32.S
@@ -950,8 +950,22 @@ restore_all:
restore_all_notrace:
CHECK_AND_APPLY_ESPFIX
restore_nocheck:
+
+#ifdef CONFIG_VM86
+ movl PT_EFLAGS(%esp), %eax # mix EFLAGS and CS
+ movb PT_CS(%esp), %al
+ andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
+#else
+ movl PT_CS(%esp), %eax
+ andl $SEGMENT_RPL_MASK, %eax
+#endif
+ cmpl $USER_RPL, %eax
+ jae restore_all_cr3_switch
+
testl $CS_FROM_USER_CR3, PT_CS(%esp)
jz restore_all_no_switch
+
+restore_all_cr3_switch:
andl $(~CS_FROM_USER_CR3), PT_CS(%esp)
SWITCH_TO_USER_CR3 scratch_reg=%eax
restore_all_no_switch:
diff --git a/arch/x86/kernel/microcode_core.c b/arch/x86/kernel/microcode_core.c
index f86e750aa5ed..8ff319e52329 100644
--- a/arch/x86/kernel/microcode_core.c
+++ b/arch/x86/kernel/microcode_core.c
@@ -298,6 +298,7 @@ static void microcode_check(void)
{
perf_check_microcode();
x86_spec_check();
+ x86_spec_set_on_each_cpu();
cpu_caps_sync_late();
}