Home Home > GIT Browse > SLE11-SP4
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJiri Kosina <jkosina@suse.cz>2018-06-14 17:06:54 +0200
committerJiri Kosina <jkosina@suse.cz>2018-06-14 17:06:54 +0200
commit25cce395ede0bdce7f6be685463f6eb6ccb5711e (patch)
tree5827418e839bbc07af216c776f7c04231e389afe
parent968a7efc366899d2386318ccfa98eb26bd1ed7c0 (diff)
Xen counterparts of eager FPU implementation.
- xen/i387: use 'restore_fpu_checking()' directly in task switching code (bnc#1087086 CVE-2018-3665). - xen: x86-non-upstream-eager-fpu (bnc#1087086 CVE-2018-3665). - Refresh patches.xen/xen3-x86_amd_interlagos_mem_alignment_perf_improve_2_4.patch. suse-commit: 7550adf917411a8cb7388ecd1d9c5b9e335f9ee7
-rw-r--r--arch/x86/include/mach-xen/asm/i387.h18
-rw-r--r--arch/x86/kernel/cpu/common-xen.c4
-rw-r--r--arch/x86/kernel/process-xen.c12
-rw-r--r--arch/x86/kernel/process_32-xen.c5
-rw-r--r--arch/x86/kernel/process_64-xen.c5
-rw-r--r--arch/x86/kernel/traps-xen.c43
-rw-r--r--arch/x86/kernel/xsave.c4
7 files changed, 37 insertions, 54 deletions
diff --git a/arch/x86/include/mach-xen/asm/i387.h b/arch/x86/include/mach-xen/asm/i387.h
index 4599bf3ce28e..c402ac311aee 100644
--- a/arch/x86/include/mach-xen/asm/i387.h
+++ b/arch/x86/include/mach-xen/asm/i387.h
@@ -6,14 +6,18 @@
#undef switch_fpu_prepare
#ifndef __ASSEMBLY__
-static inline void xen_thread_fpu_begin(struct task_struct *tsk,
+static inline bool xen_thread_fpu_begin(struct task_struct *tsk,
multicall_entry_t *mcl)
{
- if (mcl) {
+ bool switching = !use_eager_fpu() && mcl;
+
+ if (switching) {
mcl->op = __HYPERVISOR_fpu_taskswitch;
mcl->args[0] = 0;
}
__thread_set_has_fpu(tsk);
+
+ return switching;
}
static inline fpu_switch_t xen_switch_fpu_prepare(struct task_struct *old,
@@ -22,7 +26,8 @@ static inline fpu_switch_t xen_switch_fpu_prepare(struct task_struct *old,
{
fpu_switch_t fpu;
- fpu.preload = tsk_used_math(new) && new->fpu_counter > 5;
+ fpu.preload = tsk_used_math(new) && (use_eager_fpu() ||
+ new->fpu_counter > 5);
if (__thread_has_fpu(old)) {
if (!__save_init_fpu(old))
fpu_lazy_state_intact(old);
@@ -33,7 +38,7 @@ static inline fpu_switch_t xen_switch_fpu_prepare(struct task_struct *old,
if (fpu.preload) {
__thread_set_has_fpu(new);
prefetch(new->thread.fpu.state);
- } else {
+ } else if (!use_eager_fpu()) {
(*mcl)->op = __HYPERVISOR_fpu_taskswitch;
(*mcl)++->args[0] = 1;
}
@@ -41,11 +46,12 @@ static inline fpu_switch_t xen_switch_fpu_prepare(struct task_struct *old,
old->fpu_counter = 0;
if (fpu.preload) {
new->fpu_counter++;
- if (fpu_lazy_restore(new))
+ if (!use_eager_fpu() && fpu_lazy_restore(new))
fpu.preload = 0;
else
prefetch(new->thread.fpu.state);
- xen_thread_fpu_begin(new, (*mcl)++);
+ if (xen_thread_fpu_begin(new, *mcl))
+ ++*mcl;
}
}
return fpu;
diff --git a/arch/x86/kernel/cpu/common-xen.c b/arch/x86/kernel/cpu/common-xen.c
index 627ec4501533..5befae680091 100644
--- a/arch/x86/kernel/cpu/common-xen.c
+++ b/arch/x86/kernel/cpu/common-xen.c
@@ -818,6 +818,8 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
setup_smep(c);
+ fpu__init_parse_early_param();
+
if (this_cpu->c_bsp_init)
this_cpu->c_bsp_init(c);
@@ -1430,7 +1432,6 @@ void __cpuinit cpu_init(void)
dbg_restore_debug_regs();
fpu_init();
- xsave_init();
#ifndef CONFIG_XEN
raw_local_save_flags(kernel_eflags);
@@ -1495,6 +1496,5 @@ void __cpuinit cpu_init(void)
dbg_restore_debug_regs();
fpu_init();
- xsave_init();
}
#endif
diff --git a/arch/x86/kernel/process-xen.c b/arch/x86/kernel/process-xen.c
index 197092c244f7..bfdc721aaacb 100644
--- a/arch/x86/kernel/process-xen.c
+++ b/arch/x86/kernel/process-xen.c
@@ -39,7 +39,7 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
ret = fpu_alloc(&dst->thread.fpu);
if (ret)
return ret;
- fpu_copy(&dst->thread.fpu, &src->thread.fpu);
+ fpu_copy(dst, src);
}
return 0;
}
@@ -129,9 +129,13 @@ void flush_thread(void)
/*
* Forget coprocessor state..
*/
- tsk->fpu_counter = 0;
- clear_fpu(tsk);
- clear_used_math();
+ drop_init_fpu(tsk);
+ /*
+ * Free the FPU state for non xsave platforms. They get reallocated
+ * lazily at the first use.
+ */
+ if (!use_eager_fpu())
+ free_thread_xstate(tsk);
}
static void hard_disable_TSC(void)
diff --git a/arch/x86/kernel/process_32-xen.c b/arch/x86/kernel/process_32-xen.c
index e9f9f9964129..cfb0a6b9130f 100644
--- a/arch/x86/kernel/process_32-xen.c
+++ b/arch/x86/kernel/process_32-xen.c
@@ -188,7 +188,6 @@ void release_thread(struct task_struct *dead_task)
*/
void prepare_to_copy(struct task_struct *tsk)
{
- unlazy_fpu(tsk);
}
int copy_thread(unsigned long clone_flags, unsigned long sp,
@@ -260,10 +259,6 @@ start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
regs->cs = __USER_CS;
regs->ip = new_ip;
regs->sp = new_sp;
- /*
- * Free the old FP and other extended state
- */
- free_thread_xstate(current);
}
EXPORT_SYMBOL_GPL(start_thread);
diff --git a/arch/x86/kernel/process_64-xen.c b/arch/x86/kernel/process_64-xen.c
index d49031bee6c9..bbef297cb0ca 100644
--- a/arch/x86/kernel/process_64-xen.c
+++ b/arch/x86/kernel/process_64-xen.c
@@ -260,7 +260,6 @@ static inline u32 read_32bit_tls(struct task_struct *t, int tls)
*/
void prepare_to_copy(struct task_struct *tsk)
{
- unlazy_fpu(tsk);
}
int copy_thread(unsigned long clone_flags, unsigned long sp,
@@ -349,10 +348,6 @@ start_thread_common(struct pt_regs *regs, unsigned long new_ip,
regs->cs = _cs;
regs->ss = _ss;
regs->flags = X86_EFLAGS_IF;
- /*
- * Free the old FP and other extended state
- */
- free_thread_xstate(current);
}
void
diff --git a/arch/x86/kernel/traps-xen.c b/arch/x86/kernel/traps-xen.c
index 700fca2f7db7..33f90e21c77e 100644
--- a/arch/x86/kernel/traps-xen.c
+++ b/arch/x86/kernel/traps-xen.c
@@ -765,37 +765,6 @@ asmlinkage void __attribute__((weak)) smp_threshold_interrupt(void)
#endif /* CONFIG_XEN */
/*
- * This gets called with the process already owning the
- * FPU state, and with CR0.TS cleared. It just needs to
- * restore the FPU register state.
- */
-void __math_state_restore(struct task_struct *tsk)
-{
- /* We need a safe address that is cheap to find and that is already
- in L1. We've just brought in "tsk->thread.has_fpu", so use that */
-#define safe_address (tsk->thread.has_fpu)
-
- /* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception
- is pending. Clear the x87 state here by setting it to fixed
- values. safe_address is a random variable that should be in L1 */
- alternative_input(
- ASM_NOP8 ASM_NOP2,
- "emms\n\t" /* clear stack tags */
- "fildl %P[addr]", /* set F?P to defined value */
- X86_FEATURE_FXSAVE_LEAK,
- [addr] "m" (safe_address));
-
- /*
- * Paranoid restore. send a SIGSEGV if we fail to restore the state.
- */
- if (unlikely(restore_fpu_checking(tsk))) {
- __thread_fpu_end(tsk);
- force_sig(SIGSEGV, tsk);
- return;
- }
-}
-
-/*
* 'math_state_restore()' saves the current math information in the
* old math state array, and gets the new ones from the current task
*
@@ -829,14 +798,24 @@ void math_state_restore(void)
}
xen_thread_fpu_begin(tsk, NULL);
- __math_state_restore(tsk);
+
+ /*
+ * Paranoid restore. send a SIGSEGV if we fail to restore the state.
+ */
+ if (unlikely(restore_fpu_checking(tsk))) {
+ drop_init_fpu(tsk);
+ force_sig(SIGSEGV, tsk);
+ return;
+ }
tsk->fpu_counter++;
}
+EXPORT_SYMBOL_GPL(math_state_restore);
dotraplinkage void __kprobes
do_device_not_available(struct pt_regs *regs, long error_code)
{
+ BUG_ON(use_eager_fpu());
#ifdef CONFIG_MATH_EMULATION
if (read_cr0() & X86_CR0_EM) {
struct math_emu_info info = { };
diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
index ef0b143e60ca..2bfd300ddd37 100644
--- a/arch/x86/kernel/xsave.c
+++ b/arch/x86/kernel/xsave.c
@@ -511,7 +511,11 @@ void __cpuinit eager_fpu_init(void)
* not yet patched to use math_state_restore().
*/
init_fpu(current);
+#ifndef CONFIG_XEN
__thread_fpu_begin(current);
+#else
+ native_thread_fpu_begin(current);
+#endif
if (cpu_has_xsave)
xrstor_state(init_xstate_buf, -1);
else