Home Home > GIT Browse
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMiroslav Benes <mbenes@suse.cz>2018-06-25 14:53:59 +0200
committerMiroslav Benes <mbenes@suse.cz>2018-06-25 14:53:59 +0200
commita854b32a36e779d1f88ba6efc0c4287a799af2f9 (patch)
tree0be263a26df1b1a599efd2bb70bdc43a096f90eb
parente1c344530d9e995dbaa420b703c35f02df040992 (diff)
parent056ea65863d9a7d72dd8a7a1cc4a98a8052bc945 (diff)
Merge branch 'bsc#1090338_12.3_a' into SLE12-SP3_Update_2
-rw-r--r--bsc1090338/kgr_patch_bsc1090338.c625
-rw-r--r--bsc1090338/kgr_patch_bsc1090338.h36
2 files changed, 661 insertions, 0 deletions
diff --git a/bsc1090338/kgr_patch_bsc1090338.c b/bsc1090338/kgr_patch_bsc1090338.c
new file mode 100644
index 0000000..e14689f
--- /dev/null
+++ b/bsc1090338/kgr_patch_bsc1090338.c
@@ -0,0 +1,625 @@
+/*
+ * kgraft_patch_bsc1090338
+ *
+ * Fix for CVE-2018-3665, bsc#1090338
+ *
+ * Upstream commits:
+ * 58122bf1d856 ("x86/fpu: Default eagerfpu=on on all CPUs")
+ * 4ecd16ec7059 ("x86/fpu: Fix math emulation in eager fpu mode")
+ * 6e6867093de3 ("x86/fpu: Fix eager-FPU handling on legacy FPU machines")
+ * 814fb7bb7db5 ("x86/fpu: Don't let userspace set bogus xcomp_bv")
+ *
+ * SLE12 commit:
+ * none yet
+ *
+ * SLE12-SP1 commit
+ * none yet
+ *
+ * SLE12-SP2 commit:
+ * 73dfaa56b0cc56630790ca5e7dad422aa591cc16 (stable 4.4.90)
+ *
+ * SLE12-SP3 commit:
+ * 19373fd1583ccc3e579fd459f102fba7f8bdf98a
+ * 4ca41b9382bb4c330347e4dc06d5512b2cd6ffac (stable 4.4.90)
+ *
+ * Copyright (c) 2018 SUSE
+ * Author: Nicolai Stange <nstange@suse.de>
+ *
+ * Based on the original Linux kernel code. Other copyrights apply.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#if IS_ENABLED(CONFIG_X86_64)
+
+#define pr_fmt(fmt) "kgraft-patch: " fmt
+
+#include <linux/kernel.h>
+#include <linux/kallsyms.h>
+#include <linux/sched.h>
+#include <linux/tracepoint.h>
+#include <linux/uaccess.h>
+#include <asm/processor.h>
+#include <asm/fpu/internal.h>
+#include <asm/fpu/signal.h>
+#include <asm/fpu/regset.h>
+#include <asm/syscall.h>
+#include "patch_state.h"
+#include "kgr_patch_bsc1090338.h"
+
+#if IS_ENABLED(CONFIG_X86_32)
+#error "Live patch supports only CONFIG_X86_32=n."
+#endif
+
+#if IS_ENABLED(CONFIG_MATH_EMULATION)
+#error "Live patch supports only CONFIG_MATH_EMULATION=n."
+#endif
+
+
+#define BSC1090338_SUBPATCH_ID KGR_SUBPATCH_ID_BSC(1090338)
+
+
+struct fpu * __percpu *kgr_fpu_fpregs_owner_ctx;
+static union fpregs_state *kgr_init_fpstate;
+static u64 *kgr_xfeatures_mask;
+static int *kgr_arch_task_struct_size;
+static void (*kgr_fpu__drop)(struct fpu *fpu);
+static int (*kgr_fpu__copy)(struct fpu *dst_fpu, struct fpu *src_fpu);
+static void (*kgr_convert_to_fxsr)(struct task_struct *tsk,
+ const struct user_i387_ia32_struct *env);
+static void (*kgr_copy_init_pkru_to_fpregs)(void);
+static void (*kgr_fpu__activate_fpstate_write)(struct fpu *fpu);
+
+static struct tracepoint *kgr__tracepoint_sched_switch;
+static void (*kgr_synchronize_sched)(void);
+
+
+static struct {
+ char *name;
+ char **addr;
+} kgr_funcs[] = {
+ { "fpu_fpregs_owner_ctx", (void *)&kgr_fpu_fpregs_owner_ctx },
+ { "init_fpstate", (void *)&kgr_init_fpstate },
+ { "xfeatures_mask", (void *)&kgr_xfeatures_mask },
+ { "arch_task_struct_size", (void *)&kgr_arch_task_struct_size },
+ { "fpu__drop", (void *)&kgr_fpu__drop },
+ { "fpu__copy", (void *)&kgr_fpu__copy },
+ { "convert_to_fxsr", (void *)&kgr_convert_to_fxsr },
+ { "copy_init_pkru_to_fpregs", (void *)&kgr_copy_init_pkru_to_fpregs },
+ { "fpu__activate_fpstate_write",
+ (void *)&kgr_fpu__activate_fpstate_write},
+
+ { "__tracepoint_sched_switch", (void *)&kgr__tracepoint_sched_switch },
+ { "synchronize_sched", (void *)&kgr_synchronize_sched },
+};
+
+
+/* from linux/tracepoint.h */
+static inline void kgr_tracepoint_synchronize_unregister(void)
+{
+ kgr_synchronize_sched();
+}
+
+
+/* from arch/x86/include/asm/fpu/internal.h */
+/* use the kallsyms-resolved kgr_fpu_fpregs_owner_ctx */
+static inline void kgr__fpregs_activate(struct fpu *fpu)
+{
+ WARN_ON_FPU(fpu->fpregs_active);
+
+ fpu->fpregs_active = 1;
+ this_cpu_write(*kgr_fpu_fpregs_owner_ctx, fpu);
+}
+
+static inline void kgr_fpregs_activate(struct fpu *fpu)
+{
+ __fpregs_activate_hw();
+ kgr__fpregs_activate(fpu);
+}
+
+static inline void kgr_user_fpu_begin(void)
+{
+ struct fpu *fpu = &current->thread.fpu;
+
+ preempt_disable();
+ if (!fpregs_active())
+ kgr_fpregs_activate(fpu);
+ preempt_enable();
+}
+
+
+/* from arch/x86/kernel/fpu/core.c */
+static inline void kgr_copy_init_fpstate_to_fpregs(void)
+{
+ if (use_xsave())
+ copy_kernel_to_xregs(&kgr_init_fpstate->xsave, -1);
+ else
+ copy_kernel_to_fxregs(&kgr_init_fpstate->fxsave);
+
+ if (boot_cpu_has(X86_FEATURE_OSPKE))
+ kgr_copy_init_pkru_to_fpregs();
+}
+
+
+/* from arch/x86/kernel/fpu/signal.c */
+/* inlined */
+static inline int kgr_check_for_xstate(struct fxregs_state __user *buf,
+ void __user *fpstate,
+ struct _fpx_sw_bytes *fx_sw)
+{
+ int min_xstate_size = sizeof(struct fxregs_state) +
+ sizeof(struct xstate_header);
+ unsigned int magic2;
+
+ if (__copy_from_user(fx_sw, &buf->sw_reserved[0], sizeof(*fx_sw)))
+ return -1;
+
+ /* Check for the first magic field and other error scenarios. */
+ if (fx_sw->magic1 != FP_XSTATE_MAGIC1 ||
+ fx_sw->xstate_size < min_xstate_size ||
+ fx_sw->xstate_size > xstate_size ||
+ fx_sw->xstate_size > fx_sw->extended_size)
+ return -1;
+
+ /*
+ * Check for the presence of second magic word at the end of memory
+ * layout. This detects the case where the user just copied the legacy
+ * fpstate layout with out copying the extended state information
+ * in the memory layout.
+ */
+ if (__get_user(magic2, (__u32 __user *)(fpstate + fx_sw->xstate_size))
+ || magic2 != FP_XSTATE_MAGIC2)
+ return -1;
+
+ return 0;
+}
+
+/* inlined */
+static inline void
+kgr_sanitize_restored_xstate(struct task_struct *tsk,
+ struct user_i387_ia32_struct *ia32_env,
+ u64 xfeatures, int fx_only)
+{
+ struct xregs_state *xsave = &tsk->thread.fpu.state.xsave;
+ struct xstate_header *header = &xsave->header;
+
+ if (use_xsave()) {
+ /* These bits must be zero. */
+ memset(header->reserved, 0, 48);
+
+ /*
+ * Init the state that is not present in the memory
+ * layout and not enabled by the OS.
+ */
+ if (fx_only)
+ header->xfeatures = XFEATURE_MASK_FPSSE;
+ else
+ header->xfeatures &= (*kgr_xfeatures_mask & xfeatures);
+ }
+
+ if (use_fxsr()) {
+ /*
+ * mscsr reserved bits must be masked to zero for security
+ * reasons.
+ */
+ xsave->i387.mxcsr &= mxcsr_feature_mask;
+
+ kgr_convert_to_fxsr(tsk, ia32_env);
+ }
+}
+
+/* inlined */
+static inline int kgr_copy_user_to_fpregs_zeroing(void __user *buf, u64 xbv,
+ int fx_only)
+{
+ if (use_xsave()) {
+ if ((unsigned long)buf % 64 || fx_only) {
+ u64 init_bv = *kgr_xfeatures_mask & ~XFEATURE_MASK_FPSSE;
+ copy_kernel_to_xregs(&kgr_init_fpstate->xsave, init_bv);
+ return copy_user_to_fxregs(buf);
+ } else {
+ u64 init_bv = *kgr_xfeatures_mask & ~xbv;
+ if (unlikely(init_bv))
+ copy_kernel_to_xregs(&kgr_init_fpstate->xsave, init_bv);
+ return copy_user_to_xregs(buf, xbv);
+ }
+ } else if (use_fxsr()) {
+ return copy_user_to_fxregs(buf);
+ } else
+ return copy_user_to_fregs(buf);
+}
+
+
+
+/* patched */
+int kgr_arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
+{
+ /*
+ * Fix CVE-2018-3665
+ * +1 line
+ */
+ int ret;
+
+ memcpy(dst, src, *kgr_arch_task_struct_size);
+#ifdef CONFIG_VM86
+ dst->thread.vm86 = NULL;
+#endif
+
+ /*
+ * Fix CVE-2018-3665
+ * -1 line, +11 lines
+ */
+ ret = kgr_fpu__copy(&dst->thread.fpu, &src->thread.fpu);
+ if (ret)
+ return ret;
+
+ if (!dst->thread.fpu.fpstate_active && !(dst->flags & PF_KTHREAD)) {
+ struct fpu *fpu = &dst->thread.fpu;
+
+ /* Like fpu__activate_curr()< but w/o the WARN_ON(). */
+ fpstate_init(&fpu->state);
+ fpu->fpstate_active = 1;
+ }
+ return 0;
+}
+
+/* patched */
+void kgr_fpu__clear(struct fpu *fpu)
+{
+ WARN_ON_FPU(fpu != &current->thread.fpu); /* Almost certainly an anomaly */
+
+ if (!use_eager_fpu()) {
+ /*
+ * Fix CVE-2018-3665
+ * -1 line (comment), +2 lines
+ */
+ preempt_disable();
+
+ kgr_fpu__drop(fpu);
+ /*
+ * Fix CVE-2018-3665
+ * +9 lines
+ */
+ /*
+ * C.f. upstream commit 4ecd16ec7059 ("x86/fpu: Fix
+ * math emulation in eager fpu mode"). Note that we
+ * should always have X86_FEATURE_FPU on
+ * CONFIG_X86_64, but be defensive.
+ */
+ if(static_cpu_has(X86_FEATURE_FPU))
+ fpu__restore(fpu);
+ preempt_enable();
+
+ } else {
+ if (!fpu->fpstate_active) {
+ fpu__activate_curr(fpu);
+ kgr_user_fpu_begin();
+ }
+ kgr_copy_init_fpstate_to_fpregs();
+ }
+}
+
+/* patched */
+int kgr__fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
+{
+ int ia32_fxstate = (buf != buf_fx);
+ struct task_struct *tsk = current;
+ struct fpu *fpu = &tsk->thread.fpu;
+ int state_size = xstate_size;
+ u64 xfeatures = 0;
+ int fx_only = 0;
+
+ ia32_fxstate &= (config_enabled(CONFIG_X86_32) ||
+ config_enabled(CONFIG_IA32_EMULATION));
+
+ if (!buf) {
+ kgr_fpu__clear(fpu);
+ return 0;
+ }
+
+ if (!access_ok(VERIFY_READ, buf, size))
+ return -EACCES;
+
+ fpu__activate_curr(fpu);
+
+ if (!static_cpu_has(X86_FEATURE_FPU))
+ return fpregs_soft_set(current, NULL,
+ 0, sizeof(struct user_i387_ia32_struct),
+ NULL, buf) != 0;
+
+ if (use_xsave()) {
+ struct _fpx_sw_bytes fx_sw_user;
+ if (unlikely(kgr_check_for_xstate(buf_fx, buf_fx, &fx_sw_user))) {
+ /*
+ * Couldn't find the extended state information in the
+ * memory layout. Restore just the FP/SSE and init all
+ * the other extended state.
+ */
+ state_size = sizeof(struct fxregs_state);
+ fx_only = 1;
+ } else {
+ state_size = fx_sw_user.xstate_size;
+ xfeatures = fx_sw_user.xfeatures;
+ }
+ }
+
+ if (ia32_fxstate) {
+ /*
+ * For 32-bit frames with fxstate, copy the user state to the
+ * thread's fpu state, reconstruct fxstate from the fsave
+ * header. Sanitize the copied state etc.
+ */
+ struct fpu *fpu = &tsk->thread.fpu;
+ struct user_i387_ia32_struct env;
+ int err = 0;
+
+ /*
+ * Drop the current fpu which clears fpu->fpstate_active. This ensures
+ * that any context-switch during the copy of the new state,
+ * avoids the intermediate state from getting restored/saved.
+ * Thus avoiding the new restored state from getting corrupted.
+ * We will be ready to restore/save the state only after
+ * fpu->fpstate_active is again set.
+ */
+ kgr_fpu__drop(fpu);
+
+ if (__copy_from_user(&fpu->state.xsave, buf_fx, state_size) ||
+ /*
+ * Fix CVE-2018-3665 (not exactly)
+ * -1 line, +3 lines
+ */
+ __copy_from_user(&env, buf, sizeof(env)) ||
+ (state_size > offsetof(struct xregs_state, header) &&
+ fpu->state.xsave.header.xcomp_bv)) {
+ fpstate_init(&fpu->state);
+ err = -1;
+ } else {
+ kgr_sanitize_restored_xstate(tsk, &env, xfeatures, fx_only);
+ }
+
+ fpu->fpstate_active = 1;
+ /*
+ * Fix CVE-2018-3665
+ * -1 line, +6 lines
+ */
+ /*
+ * C.f. upstream commit 4ecd16ec7059 ("x86/fpu: Fix math
+ * emulation in eager fpu mode"). Note that we should always
+ * have X86_FEATURE_FPU on CONFIG_X86_64, but be defensive.
+ */
+ if (use_eager_fpu() || static_cpu_has(X86_FEATURE_FPU)) {
+ preempt_disable();
+ fpu__restore(fpu);
+ preempt_enable();
+ }
+
+ return err;
+ } else {
+ /*
+ * For 64-bit frames and 32-bit fsave frames, restore the user
+ * state to the registers directly (with exceptions handled).
+ */
+ kgr_user_fpu_begin();
+ if (kgr_copy_user_to_fpregs_zeroing(buf_fx, xfeatures, fx_only)) {
+ kgr_fpu__clear(fpu);
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+/* patched */
+int kgr_xstateregs_set(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ struct fpu *fpu = &target->thread.fpu;
+ struct xregs_state *xsave;
+ int ret;
+
+ if (!cpu_has_xsave)
+ return -ENODEV;
+
+ kgr_fpu__activate_fpstate_write(fpu);
+
+ xsave = &fpu->state.xsave;
+
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, xsave, 0, -1);
+
+ /*
+ * Fix CVE-2018-3665 (not exactly)
+ * +3 lines
+ */
+ /* xcomp_bv must be 0 when using uncompacted format */
+ if (!ret && xsave->header.xcomp_bv)
+ ret = -EINVAL;
+
+ /*
+ * mxcsr reserved bits must be masked to zero for security reasons.
+ */
+ xsave->i387.mxcsr &= mxcsr_feature_mask;
+ xsave->header.xfeatures &= *kgr_xfeatures_mask;
+ /*
+ * These bits must be zero.
+ */
+ memset(&xsave->header.reserved, 0, 48);
+
+ /*
+ * Fix CVE-2018-3665 (not exactly)
+ * +5 lines
+ */
+ /*
+ * In case of failure, mark all states as init:
+ */
+ if (ret)
+ fpstate_init(&fpu->state);
+
+ return ret;
+}
+
+
+
+/*
+ * This hook will be installed at the sched_switch tracepoint. It
+ * tricks the subsequently called switch_fpu_prepare() into eager
+ * switching behaviour.
+ */
+static void trick_eager_fpu_sched_switch(void *data, bool preempt,
+ struct task_struct *prev,
+ struct task_struct *next)
+{
+ struct fpu *fpu = &next->thread.fpu;
+ /*
+ * Trick the lazy FPU switching heuristic in
+ * switch_fpu_prepare() into !!use_eager_fpu() behaviour.
+ */
+ if (!fpu->fpstate_active && !(next->flags & PF_KTHREAD)) {
+ if (task_thread_info(next)->status & TS_COMPAT) {
+ /*
+ * This task might have been scheduled out
+ * while in sys32_sigreturn or
+ * sys32_rt_sigreturn. More precisely, in
+ * __fpu__restore_sig()'s !!ia32_fxstate path
+ * where we must not concurrently reinit the FPU
+ * state here.
+ */
+ return;
+ }
+ /*
+ * The next task doesn't have its FPU state
+ * initialized and thus, switch_fpu_prepare()/_finish()
+ * won't update the FP regs.
+ *
+ * This is fpu__activate_curr() w/o the WARN_ON().
+ */
+ fpstate_init(&fpu->state);
+ fpu->fpstate_active = 1;
+ }
+
+ if (fpu->fpstate_active) {
+ /* Touch ->fpu_counter only if needed. */
+ fpu->counter = fpu->counter > 5 ? fpu->counter : 6;
+ }
+}
+
+
+
+static bool sched_switch_probe_registered = false;
+
+static void bsc1090338_activate(struct kgr_subpatch_data *header)
+{
+ int ret;
+
+ if (use_eager_fpu()) {
+ /* Everything's alright already, do nothing. */
+ return;
+ }
+
+ /*
+ * C.f. upstream commit 4ecd16ec7059 ("x86/fpu: Fix math
+ * emulation in eager fpu mode"). Note that we should always
+ * have X86_FEATURE_FPU on CONFIG_X86_64, but be defensive.
+ */
+ if (!static_cpu_has(X86_FEATURE_FPU))
+ return;
+
+ ret = tracepoint_probe_register(kgr__tracepoint_sched_switch,
+ trick_eager_fpu_sched_switch, NULL);
+ if (ret) {
+ pr_err("failed to register sched switch probe: %d\n", ret);
+ return;
+ }
+ sched_switch_probe_registered = true;
+}
+
+static void bsc1090338_deactivate(struct kgr_subpatch_data *header)
+{
+ int ret;
+
+ if (!sched_switch_probe_registered)
+ return;
+
+ ret = tracepoint_probe_unregister(kgr__tracepoint_sched_switch,
+ trick_eager_fpu_sched_switch, NULL);
+ if (ret) {
+ /*
+ * That's impossible, but for debugging purposes,
+ * print an error.
+ */
+ pr_err("failed to unregister sched switch probe: %d\n", ret);
+ }
+}
+
+static bool bsc1090338_prepare_migration(struct kgr_subpatch *self,
+ struct kgr_subpatch_data *prev_data)
+{
+ return true;
+}
+
+static struct kgr_subpatch bsc1090338_subpatch = {
+ .mod = THIS_MODULE,
+ .alloc_data = NULL, /* There is no shared state. */
+ .free_data = NULL,
+
+ .prepare_migration = bsc1090338_prepare_migration,
+
+ .post_patch = bsc1090338_activate,
+ .pre_migrate_to = bsc1090338_activate,
+ .pre_revert = bsc1090338_deactivate,
+ .post_migrate_away = bsc1090338_deactivate,
+};
+
+
+static int kgr_patch_bsc1090338_kallsyms(void)
+{
+ unsigned long addr;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(kgr_funcs); i++) {
+ addr = kallsyms_lookup_name(kgr_funcs[i].name);
+ if (!addr) {
+ pr_err("symbol %s not resolved\n", kgr_funcs[i].name);
+ return -ENOENT;
+ }
+
+ *(kgr_funcs[i].addr) = (void *)addr;
+ }
+
+ return 0;
+}
+
+int kgr_patch_bsc1090338_init(void)
+{
+ int ret;
+
+ ret = kgr_patch_bsc1090338_kallsyms();
+ if (ret)
+ return ret;
+
+ ret = kgr_subpatch_register(BSC1090338_SUBPATCH_ID,
+ &bsc1090338_subpatch);
+ return ret;
+}
+
+void kgr_patch_bsc1090338_cleanup(void)
+{
+ kgr_subpatch_unregister(&bsc1090338_subpatch);
+
+ if (sched_switch_probe_registered)
+ kgr_tracepoint_synchronize_unregister();
+}
+
+#endif /* CONFIG_X86_64 */
diff --git a/bsc1090338/kgr_patch_bsc1090338.h b/bsc1090338/kgr_patch_bsc1090338.h
new file mode 100644
index 0000000..aae8cdb
--- /dev/null
+++ b/bsc1090338/kgr_patch_bsc1090338.h
@@ -0,0 +1,36 @@
+#ifndef _KGR_PATCH_BSC1080338_H
+#define _KGR_PATCH_BSC1080338_H
+
+#if IS_ENABLED(CONFIG_X86_64)
+
+int kgr_patch_bsc1090338_init(void);
+void kgr_patch_bsc1090338_cleanup(void);
+
+struct fpu;
+struct task_struct;
+struct user_regset;
+void kgr_fpu__clear(struct fpu *fpu);
+int kgr_arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
+int kgr__fpu__restore_sig(void __user *buf, void __user *buf_fx, int size);
+int kgr_xstateregs_set(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf);
+
+#define KGR_PATCH_BSC1090338_FUNCS \
+ KGR_PATCH(fpu__clear, kgr_fpu__clear), \
+ KGR_PATCH(arch_dup_task_struct, kgr_arch_dup_task_struct), \
+ KGR_PATCH(__fpu__restore_sig, kgr__fpu__restore_sig), \
+ KGR_PATCH(xstateregs_set, kgr_xstateregs_set), \
+
+
+#else /* !IS_ENABLED(CONFIG_X86_64) */
+
+static inline int kgr_patch_bsc1090338_init(void) { return 0; }
+static inline void kgr_patch_bsc1090338_cleanup(void) {}
+
+#define KGR_PATCH_BSC1090338_FUNCS
+
+#endif /* IS_ENABLED(CONFIG_X86_64) */
+
+#endif