Home Home > GIT Browse
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMiroslav Benes <mbenes@suse.cz>2018-05-15 17:11:24 +0200
committerMiroslav Benes <mbenes@suse.cz>2018-05-15 17:11:24 +0200
commitc2b77c80191a3a3c889255495db3a526b5e43110 (patch)
tree14fe39c6f2b32433500ee1ee91c4e316c68cecc9
parent6ce302f331378b8443e8471cfdbaf1c24b684933 (diff)
parent293ce1f5d5ec4568d31256edb36cacbc17e70fb2 (diff)
Merge branch 'bsc#1083125_12.23' into SLE12-SP3_Update_2
-rw-r--r--callbacks/kgr_patch_callbacks.c761
-rw-r--r--callbacks/kgr_patch_callbacks.h21
-rw-r--r--patch_state.c381
-rw-r--r--patch_state.h51
-rw-r--r--rpm/kgraft-patch.spec4
-rwxr-xr-xscripts/register-patches.sh24
-rwxr-xr-xscripts/tar-up.sh2
7 files changed, 1242 insertions, 2 deletions
diff --git a/callbacks/kgr_patch_callbacks.c b/callbacks/kgr_patch_callbacks.c
new file mode 100644
index 0000000..0ea13d3
--- /dev/null
+++ b/callbacks/kgr_patch_callbacks.c
@@ -0,0 +1,761 @@
+/*
+ * kgraft_patch_callbacks
+ *
+ * Livepatch the kGraft core to provide patch state callback
+ * functionality. In addition, fix bsc#1083125.
+ *
+ * For bsc#1083125, the respective suse-commits are:
+ *
+ * SLE12 commit:
+ * 5ac2e24432264b51601d3704fe6bd8167a076292
+ *
+ * SLE12-SP1 commit:
+ * 2b120e1fc3ba097e3a50fb1fcafd89c8b0585478
+ *
+ * SLE12-SP2 commit:
+ * cffff8cbdaee52a891f421e04d9c043057478f1b
+ *
+ * SLE12-SP3 commit:
+ * 5c90e50d816c957f3e4ffad1572d499f9db89e4f
+ *
+ * Copyright (c) 2018 SUSE
+ * Author: Nicolai Stange <nstange@suse.de>
+ *
+ * Based on the original Linux kernel code. Other copyrights apply.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+/* from kernel/kgraft.c */
+#define pr_fmt(fmt) "kgr: " fmt
+
+#include <linux/kconfig.h>
+#include <linux/kallsyms.h>
+#include <linux/module.h>
+#include <linux/rwlock.h>
+#include <linux/workqueue.h>
+#include <linux/mutex.h>
+#include <linux/list.h>
+#include <linux/kgraft.h>
+#include <linux/sched.h>
+
+#include "kgr_patch_callbacks.h"
+#include "patch_state.h"
+
+
+int kgr_kgr_patch_code(struct kgr_patch_fun *patch_fun, bool final,
+ bool revert, bool replace_revert);
+
+
+enum kgr_find_type;
+
+static rwlock_t *kgr_tasklist_lock;
+static void (*kgr_signal_wake_up_state)(struct task_struct *t,
+ unsigned int state);
+static int (*kgr_schedule_on_each_cpu)(work_func_t func);
+
+static struct workqueue_struct **kgr_kgr_wq;
+static struct delayed_work *kgr_kgr_work;
+static struct mutex *kgr_kgr_in_progress_lock;
+static struct list_head *kgr_kgr_patches;
+static bool __percpu **kgr_kgr_irq_use_new;
+static bool *kgr_kgr_in_progress;
+static bool *kgr_kgr_initialized;
+static struct kgr_patch **kgr_kgr_patch;
+static bool *kgr_kgr_revert;
+static unsigned long (*kgr_kgr_immutable)[BITS_TO_LONGS(1)];
+
+static bool (*kgr_kgr_patch_contains)(const struct kgr_patch *p,
+ const struct kgr_patch_fun *patch_fun);
+static void (*kgr_kgr_patching_failed)(struct kgr_patch *patch,
+ struct kgr_patch_fun *patch_fun,
+ bool process_all);
+static void (*kgr_kgr_handle_irq_cpu)(struct work_struct *work);
+static int (*kgr_kgr_init_ftrace_ops)(struct kgr_patch_fun *patch_fun);
+static struct kgr_patch_fun *
+(*kgr_kgr_get_patch_fun)(const struct kgr_patch_fun *patch_fun,
+ enum kgr_find_type type);
+static int (*kgr_kgr_switch_fops)(struct kgr_patch_fun *patch_fun,
+ struct ftrace_ops *new_fops,
+ struct ftrace_ops *unreg_fops);
+
+static struct {
+ char *name;
+ char **addr;
+} kgr_funcs[] = {
+ { "tasklist_lock", (void *)&kgr_tasklist_lock },
+ { "signal_wake_up_state", (void *)&kgr_signal_wake_up_state },
+ { "schedule_on_each_cpu", (void *)&kgr_schedule_on_each_cpu },
+ { "kgr_wq", (void *)&kgr_kgr_wq },
+ { "kgr_work", (void *)&kgr_kgr_work },
+ { "kgr_in_progress_lock", (void *)&kgr_kgr_in_progress_lock},
+ { "kgr_patches", (void *)&kgr_kgr_patches },
+ { "kgr_irq_use_new", (void *)&kgr_kgr_irq_use_new },
+ { "kgr_in_progress", (void *)&kgr_kgr_in_progress },
+ { "kgr_initialized", (void *)&kgr_kgr_initialized },
+ { "kgr_patch", (void *)&kgr_kgr_patch },
+ { "kgr_revert", (void *)&kgr_kgr_revert },
+ { "kgr_immutable", (void *)&kgr_kgr_immutable },
+ { "kgr_patch_contains", (void *)&kgr_kgr_patch_contains },
+ { "kgr_patching_failed", (void *)&kgr_kgr_patching_failed },
+ { "kgr_handle_irq_cpu", (void *)&kgr_kgr_handle_irq_cpu },
+ { "kgr_init_ftrace_ops", (void *)&kgr_kgr_init_ftrace_ops },
+ { "kgr_get_patch_fun", (void *)&kgr_kgr_get_patch_fun },
+ { "kgr_switch_fops", (void *)&kgr_kgr_switch_fops },
+};
+
+
+/* from include/linux/sched.h */
+/* calls non-exported signal_wake_up_state() */
+static inline void kgr_signal_wake_up(struct task_struct *t, bool resume)
+{
+ kgr_signal_wake_up_state(t, resume ? TASK_WAKEKILL : 0);
+}
+
+
+/* from kernel/kgraft.c */
+/* inlined */
+static void kgr_kgr_refs_inc(void)
+{
+ struct kgr_patch *p;
+
+ list_for_each_entry(p, kgr_kgr_patches, list)
+ p->refs++;
+}
+
+/* inlined */
+static void kgr_kgr_refs_dec(void)
+{
+ struct kgr_patch *p;
+
+ list_for_each_entry(p, kgr_kgr_patches, list)
+ p->refs--;
+}
+
+/* inlined */
+static const char *kgr_kgr_get_objname(const struct kgr_patch_fun *pf)
+{
+ return pf->objname ? pf->objname : "vmlinux";
+}
+
+/* inlined */
+static bool kgr_kgr_still_patching(void)
+{
+ struct task_struct *p, *t;
+ bool failed = false;
+
+ read_lock(kgr_tasklist_lock);
+ for_each_process_thread(p, t) {
+ /*
+ * Ignore zombie tasks, that is task with ->state == TASK_DEAD.
+ * We also need to check their ->on_cpu to be sure that they are
+ * not running any code and they are really almost dead.
+ */
+ if (klp_kgraft_task_in_progress(t) && (t->state != TASK_DEAD ||
+ t->on_cpu != 0)) {
+ failed = true;
+ goto unlock;
+ }
+ }
+unlock:
+ read_unlock(kgr_tasklist_lock);
+ return failed;
+}
+
+/* inlined */
+static void kgr_kgr_remove_patches_fast(void)
+{
+ struct kgr_patch *p, *tmp;
+
+ list_for_each_entry_safe(p, tmp, kgr_kgr_patches, list) {
+ list_del_init(&p->list);
+ module_put(p->owner);
+ }
+}
+
+/* inlined */
+static void kgr_kgr_finalize_replaced_funs(void)
+{
+ struct kgr_patch_fun *pf;
+ struct kgr_patch *p;
+ int ret;
+
+ list_for_each_entry(p, kgr_kgr_patches, list)
+ kgr_for_each_patch_fun(p, pf) {
+ /*
+ * Function was not reverted, but is no longer used.
+ * Mark it as reverted so the user would not be confused
+ * by sysfs reporting of states.
+ */
+ if (pf->state == KGR_PATCH_APPLIED) {
+ pf->state = KGR_PATCH_REVERTED;
+ continue;
+ }
+
+ ret = kgr_kgr_patch_code(pf, true, true, true);
+ if (ret < 0) {
+ /*
+ * Note: This should not happen. We only disable
+ * slow stubs and if this failed we would BUG in
+ * kgr_switch_fops called by kgr_patch_code. But
+ * leave it here to be sure.
+ */
+ pr_err("finalization for %s:%s,%lu failed (%d). System in inconsistent state with no way out.\n",
+ kgr_kgr_get_objname(pf), pf->name,
+ pf->sympos, ret);
+ BUG();
+ }
+ }
+}
+
+/* inlined */
+static void kgr_kgr_send_fake_signal(void)
+{
+ struct task_struct *p, *t;
+
+ read_lock(kgr_tasklist_lock);
+ for_each_process_thread(p, t) {
+ if (!klp_kgraft_task_in_progress(t))
+ continue;
+
+ /*
+ * There is a small race here. We could see TIF_KGR_IN_PROGRESS
+ * set and decide to wake up a kthread or send a fake signal.
+ * Meanwhile the thread could migrate itself and the action
+ * would be meaningless. It is not serious though.
+ */
+ if (t->flags & PF_KTHREAD) {
+ /*
+ * Wake up a kthread which still has not been migrated.
+ */
+ wake_up_process(t);
+ } else {
+ /*
+ * Send fake signal to all non-kthread tasks which are
+ * still not migrated.
+ */
+ spin_lock_irq(&t->sighand->siglock);
+ kgr_signal_wake_up(t, 0);
+ spin_unlock_irq(&t->sighand->siglock);
+ }
+ }
+ read_unlock(kgr_tasklist_lock);
+}
+
+/* inlined */
+static void kgr_kgr_handle_processes(void)
+{
+ struct task_struct *p, *t;
+
+ read_lock(kgr_tasklist_lock);
+ for_each_process_thread(p, t) {
+ klp_kgraft_mark_task_in_progress(t);
+ }
+ read_unlock(kgr_tasklist_lock);
+}
+
+/* inline */
+static void kgr_kgr_wakeup_kthreads(void)
+{
+ struct task_struct *p, *t;
+
+ read_lock(kgr_tasklist_lock);
+ for_each_process_thread(p, t) {
+ /*
+ * Wake up kthreads, they will clean the progress flag.
+ *
+ * There is a small race here. We could see TIF_KGR_IN_PROGRESS
+ * set and decide to wake up a kthread. Meanwhile the kthread
+ * could migrate itself and the waking up would be meaningless.
+ * It is not serious though.
+ */
+ if ((t->flags & PF_KTHREAD) &&
+ klp_kgraft_task_in_progress(t)) {
+ /*
+ * this is incorrect for kthreads waiting still for
+ * their first wake_up.
+ */
+ wake_up_process(t);
+ }
+ }
+ read_unlock(kgr_tasklist_lock);
+}
+
+/* inlined */
+static bool kgr_kgr_is_object_loaded(const char *objname)
+{
+ struct module *mod;
+
+ if (!objname)
+ return true;
+
+ mutex_lock(&module_mutex);
+ mod = find_module(objname);
+ mutex_unlock(&module_mutex);
+
+ /*
+ * Do not mess with a work of kgr_module_init() and a going notifier.
+ */
+ return (mod && mod->kgr_alive);
+}
+
+/* inlined */
+static void kgr_kgr_handle_irqs(void)
+{
+ kgr_schedule_on_each_cpu(kgr_kgr_handle_irq_cpu);
+}
+
+enum kgr_find_type {
+ /*
+ * Find previous function variant in respect to stacking. Take
+ * into account even the patch in progress that is considered to be
+ * on top of the stack.
+ */
+ KGR_PREVIOUS,
+ /* Find the last finalized variant of the function on the stack. */
+ KGR_LAST_FINALIZED,
+ /*
+ * Find the last variant of the function on the stack. Take into
+ * account even the patch in progress.
+ */
+ KGR_LAST_EXISTING,
+ /* Find the variant of the function _only_ in the patch in progress. */
+ KGR_IN_PROGRESS,
+ /*
+ * This is the first unused find type. It can be used to check for
+ * invalid value.
+ */
+ KGR_LAST_TYPE
+};
+
+/* inlined */
+static bool kgr_kgr_is_patch_fun(const struct kgr_patch_fun *patch_fun,
+ enum kgr_find_type type)
+{
+ struct kgr_patch_fun *found_pf;
+
+ if (type == KGR_IN_PROGRESS)
+ return patch_fun->patch == *kgr_kgr_patch;
+
+ found_pf = kgr_kgr_get_patch_fun(patch_fun, type);
+ return patch_fun == found_pf;
+}
+
+/* inlined */
+static struct ftrace_ops *
+kgr_kgr_get_old_fops(const struct kgr_patch_fun *patch_fun)
+{
+ struct kgr_patch_fun *pf = kgr_kgr_get_patch_fun(patch_fun, KGR_PREVIOUS);
+
+ return pf ? &pf->ftrace_ops_fast : NULL;
+}
+
+
+/* inlined */
+static int kgr_kgr_revert_replaced_funs(struct kgr_patch *patch)
+{
+ struct kgr_patch *p;
+ struct kgr_patch_fun *pf;
+ unsigned long loc_old_temp;
+ int ret;
+
+ list_for_each_entry(p, kgr_kgr_patches, list)
+ kgr_for_each_patch_fun(p, pf)
+ if (!kgr_kgr_patch_contains(patch, pf)) {
+ /*
+ * Calls from new universe to all functions
+ * being reverted are redirected to loc_old in
+ * the slow stub. We need to call the original
+ * functions and not the previous ones in terms
+ * of stacking, so loc_old is changed to
+ * loc_name. Fast stub is still used, so change
+ * of loc_old is safe.
+ */
+ loc_old_temp = pf->loc_old;
+ pf->loc_old = pf->loc_name;
+
+ ret = kgr_kgr_patch_code(pf, false, true, true);
+ if (ret < 0) {
+ pr_err("cannot revert function %s:%s,%lu in patch %s (%d)\n",
+ kgr_kgr_get_objname(pf), pf->name,
+ pf->sympos, p->name, ret);
+ pf->loc_old = loc_old_temp;
+ kgr_kgr_patching_failed(p, pf, true);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+
+
+/* patched, inlined */
+static void kgr_kgr_finalize(void)
+{
+ struct kgr_patch_fun *patch_fun;
+ int ret;
+
+ /*
+ * Patch in callbacks support
+ * +3 lines
+ */
+ if (!*kgr_kgr_revert)
+ kgr_patch_state_post_patch_cb();
+
+ mutex_lock(kgr_kgr_in_progress_lock);
+
+ kgr_for_each_patch_fun((*kgr_kgr_patch), patch_fun) {
+ ret = kgr_kgr_patch_code(patch_fun, true, *kgr_kgr_revert, false);
+
+ if (ret < 0) {
+ pr_err("finalization for %s:%s,%lu failed (%d). System in inconsistent state with no way out.\n",
+ kgr_kgr_get_objname(patch_fun), patch_fun->name,
+ patch_fun->sympos, ret);
+ BUG();
+ }
+
+ /*
+ * When applying the replace_all patch all older patches are
+ * removed. We need to update loc_old and point it to the
+ * original function for the patch_funs from replace_all patch.
+ * The change is safe because the fast stub is used now. The
+ * correct value might be needed later when the patch is
+ * reverted.
+ */
+ if ((*kgr_kgr_patch)->replace_all && !*kgr_kgr_revert)
+ patch_fun->loc_old = patch_fun->loc_name;
+ }
+
+ if ((*kgr_kgr_patch)->replace_all && !*kgr_kgr_revert) {
+ kgr_kgr_finalize_replaced_funs();
+ kgr_kgr_remove_patches_fast();
+ }
+
+ free_percpu(*kgr_kgr_irq_use_new);
+
+ if (*kgr_kgr_revert) {
+ kgr_kgr_refs_dec();
+ module_put((*kgr_kgr_patch)->owner);
+ } else {
+ list_add_tail(&(*kgr_kgr_patch)->list, kgr_kgr_patches);
+ }
+
+ *kgr_kgr_patch = NULL;
+ *kgr_kgr_in_progress = false;
+
+ pr_info("patching succeeded\n");
+
+ mutex_unlock(kgr_kgr_in_progress_lock);
+}
+
+/* patched, calls inlined kgr_finalize() */
+void kgr_kgr_work_fn(struct work_struct *work)
+{
+ static bool printed = false;
+
+ if (kgr_kgr_still_patching()) {
+ if (!printed) {
+ pr_info("still in progress after timeout, will keep"
+ " trying every %d seconds\n",
+ KGR_TIMEOUT);
+ printed = true;
+ }
+ /* send fake signal */
+ kgr_kgr_send_fake_signal();
+ /* recheck again later */
+ queue_delayed_work(*kgr_kgr_wq, kgr_kgr_work, KGR_TIMEOUT * HZ);
+ return;
+ }
+
+ /*
+ * victory, patching finished, put everything back in shape
+ * with as less performance impact as possible again
+ */
+ kgr_kgr_finalize();
+ printed = false;
+}
+
+/* patched */
+int kgr_kgr_modify_kernel(struct kgr_patch *patch, bool revert)
+{
+ struct kgr_patch_fun *patch_fun;
+ int ret;
+
+ if (!*kgr_kgr_initialized) {
+ pr_err("can't patch, not initialized\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(kgr_kgr_in_progress_lock);
+ if (patch->refs) {
+ pr_err("can't patch, this patch is still referenced\n");
+ ret = -EBUSY;
+ goto err_unlock;
+ }
+
+ if (*kgr_kgr_in_progress) {
+ pr_err("can't patch, another patching not yet finalized\n");
+ ret = -EAGAIN;
+ goto err_unlock;
+ }
+
+ if (revert && list_empty(&patch->list)) {
+ pr_err("can't patch, this one was already reverted\n");
+ ret = -EINVAL;
+ goto err_unlock;
+ }
+
+ *kgr_kgr_irq_use_new = alloc_percpu(bool);
+ if (!*kgr_kgr_irq_use_new) {
+ pr_err("can't patch, cannot allocate percpu data\n");
+ ret = -ENOMEM;
+ goto err_unlock;
+ }
+
+ add_taint_module(patch->owner, TAINT_LIVEPATCH, LOCKDEP_STILL_OK);
+
+ pr_info("%sing patch '%s'\n", revert ? "revert" : "apply",
+ patch->name);
+
+ set_bit(0, *kgr_kgr_immutable);
+ wmb(); /* set_bit before kgr_handle_processes */
+
+ /*
+ * Set kgr_patch before it can be used in kgr_patching_failed if
+ * something bad happens.
+ */
+ *kgr_kgr_patch = patch;
+
+ /*
+ * We need to revert patches of functions not patched in replace_all
+ * patch. Do that only while applying the replace_all patch.
+ */
+ if (patch->replace_all && !revert) {
+ ret = kgr_kgr_revert_replaced_funs(patch);
+ if (ret)
+ goto err_free;
+ }
+
+ kgr_for_each_patch_fun(patch, patch_fun) {
+ patch_fun->patch = patch;
+
+ ret = kgr_kgr_patch_code(patch_fun, false, revert, false);
+ if (ret < 0) {
+ kgr_kgr_patching_failed(patch, patch_fun,
+ patch->replace_all && !revert);
+ goto err_free;
+ }
+ }
+ *kgr_kgr_in_progress = true;
+ *kgr_kgr_revert = revert;
+ if (revert)
+ list_del_init(&patch->list); /* init for list_empty() above */
+ else if (!patch->replace_all)
+ /* block all older patches if they are not replaced */
+ kgr_kgr_refs_inc();
+ mutex_unlock(kgr_kgr_in_progress_lock);
+
+ kgr_kgr_handle_irqs();
+ kgr_kgr_handle_processes();
+
+ /*
+ * Patch in callbacks support
+ * +5 lines
+ */
+ if (!revert)
+ kgr_patch_state_pre_replace_cb(patch->owner);
+ else
+ kgr_patch_state_pre_revert_cb();
+
+ wmb(); /* clear_bit after kgr_handle_processes */
+ clear_bit(0, *kgr_kgr_immutable);
+
+ /*
+ * There is no need to have an explicit barrier here. wake_up_process()
+ * implies a write barrier. That is every woken up task sees
+ * kgr_immutable cleared.
+ */
+ kgr_kgr_wakeup_kthreads();
+ /*
+ * give everyone time to exit kernel, and check after a while
+ */
+ queue_delayed_work(*kgr_kgr_wq, kgr_kgr_work, KGR_TIMEOUT * HZ);
+
+ return 0;
+err_free:
+ *kgr_kgr_patch = NULL;
+ /* No need for barrier as there are no slow stubs involved */
+ clear_bit(0, *kgr_kgr_immutable);
+ free_percpu(*kgr_kgr_irq_use_new);
+err_unlock:
+ mutex_unlock(kgr_kgr_in_progress_lock);
+
+ return ret;
+}
+
+/* patched */
+int kgr_kgr_patch_code(struct kgr_patch_fun *patch_fun, bool final,
+ bool revert, bool replace_revert)
+{
+ struct ftrace_ops *new_ops = NULL, *unreg_ops = NULL;
+ /*
+ * Fix bsc#1083125
+ * -1 line, +1 line
+ */
+ enum kgr_patch_state prev_state, next_state;
+ int err;
+
+ /*
+ * Fix bsc#1083125
+ * +2 lines
+ */
+ prev_state = patch_fun->state;
+
+ switch (patch_fun->state) {
+ case KGR_PATCH_INIT:
+ if (revert || final || replace_revert)
+ return -EINVAL;
+
+ if (!kgr_kgr_is_object_loaded(patch_fun->objname)) {
+ patch_fun->state = KGR_PATCH_SKIPPED;
+ return 0;
+ }
+
+ err = kgr_kgr_init_ftrace_ops(patch_fun);
+ if (err)
+ return err;
+
+ next_state = KGR_PATCH_SLOW;
+ new_ops = &patch_fun->ftrace_ops_slow;
+ /*
+ * If some previous patch already patched a function, the old
+ * fops need to be disabled, otherwise the new redirection will
+ * never be used.
+ */
+ unreg_ops = kgr_kgr_get_old_fops(patch_fun);
+ break;
+ case KGR_PATCH_SLOW:
+ if (revert || !final || replace_revert)
+ return -EINVAL;
+ next_state = KGR_PATCH_APPLIED;
+ new_ops = &patch_fun->ftrace_ops_fast;
+ unreg_ops = &patch_fun->ftrace_ops_slow;
+ break;
+ case KGR_PATCH_APPLIED:
+ if (!revert || final)
+ return -EINVAL;
+ next_state = KGR_PATCH_REVERT_SLOW;
+ /*
+ * Update ftrace ops only when used. It is always needed for
+ * normal revert and in case of replace_all patch for the last
+ * patch_fun stacked (which has been as such called till now).
+ */
+ if (!replace_revert ||
+ kgr_kgr_is_patch_fun(patch_fun, KGR_LAST_FINALIZED)) {
+ new_ops = &patch_fun->ftrace_ops_slow;
+ unreg_ops = &patch_fun->ftrace_ops_fast;
+ }
+ break;
+ case KGR_PATCH_REVERT_SLOW:
+ if (!revert || !final)
+ return -EINVAL;
+ next_state = KGR_PATCH_REVERTED;
+ /*
+ * Update ftrace only when used. Normal revert removes the slow
+ * ops and enables fast ops from the fallback patch if any. In
+ * case of replace_all patch and reverting old patch_funs we
+ * just need to remove the slow stub and only for the last old
+ * patch_fun. The original code will be used.
+ */
+ if (!replace_revert) {
+ unreg_ops = &patch_fun->ftrace_ops_slow;
+ new_ops = kgr_kgr_get_old_fops(patch_fun);
+ } else if (kgr_kgr_is_patch_fun(patch_fun, KGR_LAST_FINALIZED)) {
+ unreg_ops = &patch_fun->ftrace_ops_slow;
+ }
+ break;
+ case KGR_PATCH_REVERTED:
+ if (!revert || final || replace_revert)
+ return -EINVAL;
+ return 0;
+ case KGR_PATCH_SKIPPED:
+ return 0;
+ default:
+ return -EINVAL;
+ }
+
+ /*
+ * Fix bsc#1083125
+ * +4 lines
+ */
+ /* Slow stub has to see KGR_PATCH_REVERT_SLOW state all the time. */
+ if (next_state == KGR_PATCH_REVERT_SLOW)
+ patch_fun->state = next_state;
+
+ /*
+ * In case of error the caller can still have a chance to restore the
+ * previous consistent state.
+ */
+ err = kgr_kgr_switch_fops(patch_fun, new_ops, unreg_ops);
+ /*
+ * Fix bsc#1083125
+ * -2 lines, +4 lines
+ */
+ if (err) {
+ patch_fun->state = prev_state;
+ return err;
+ }
+
+ /*
+ * Fix bsc#1083125
+ * -1 line, +2 lines
+ */
+ if (next_state != KGR_PATCH_REVERT_SLOW)
+ patch_fun->state = next_state;
+
+ pr_debug("redirection for %s:%s,%lu done\n",
+ kgr_kgr_get_objname(patch_fun), patch_fun->name, patch_fun->sympos);
+
+ return 0;
+}
+
+
+
+#undef pr_fmt
+#define pr_fmt(fmt) fmt
+
+static int kgr_patch_callbacks_kallsyms(void)
+{
+ unsigned long addr;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(kgr_funcs); i++) {
+ addr = kallsyms_lookup_name(kgr_funcs[i].name);
+ if (!addr) {
+ pr_err("kgraft-patch: symbol %s not resolved\n",
+ kgr_funcs[i].name);
+ return -ENOENT;
+ }
+
+ *(kgr_funcs[i].addr) = (void *)addr;
+ }
+
+ return 0;
+}
+
+int kgr_patch_callbacks_init(void)
+{
+ return kgr_patch_callbacks_kallsyms();
+}
diff --git a/callbacks/kgr_patch_callbacks.h b/callbacks/kgr_patch_callbacks.h
new file mode 100644
index 0000000..c0a0b2b
--- /dev/null
+++ b/callbacks/kgr_patch_callbacks.h
@@ -0,0 +1,21 @@
+#ifndef _KGR_PATCH_CALLBACKS_H
+#define _KGR_PATCH_CALLBACKS_H
+
+int kgr_patch_callbacks_init(void);
+static inline void kgr_patch_callbacks_cleanup(void) {}
+
+struct work_struct;
+struct kgr_patch;
+struct kgr_patch_fun;
+
+void kgr_kgr_work_fn(struct work_struct *work);
+int kgr_kgr_modify_kernel(struct kgr_patch *patch, bool revert);
+int kgr_kgr_patch_code(struct kgr_patch_fun *patch_fun, bool final,
+ bool revert, bool replace_revert);
+
+#define KGR_PATCH_CALLBACKS_FUNCS \
+ KGR_PATCH(kgr_work_fn, kgr_kgr_work_fn), \
+ KGR_PATCH(kgr_modify_kernel, kgr_kgr_modify_kernel), \
+ KGR_PATCH(kgr_patch_code, kgr_kgr_patch_code), \
+
+#endif
diff --git a/patch_state.c b/patch_state.c
new file mode 100644
index 0000000..89629bf
--- /dev/null
+++ b/patch_state.c
@@ -0,0 +1,381 @@
+#include <linux/module.h>
+#include <linux/kallsyms.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include "patch_state.h"
+
+/*
+ * Collect a list of kgr_subpatch instances from different modules
+ * with the same id.
+ */
+struct kgr_subpatch_state
+{
+ unsigned long id;
+ struct list_head subpatches;
+
+ struct list_head list;
+
+ struct kgr_subpatch *subpatch_old;
+};
+
+/*
+ * The list of kgr_subpatch_state instances.
+ * Shared among kGraft modules, c.f. the
+ * kallsyms search in kgr_patch_state_init().
+ */
+struct kgr_patch_states
+{
+ unsigned long refcount;
+ struct mutex mutex;
+ struct list_head subpatch_states;
+};
+
+static struct kgr_patch_states *kgr_patch_states;
+
+/*
+ * Module-local initialization count to make kgr_patch_state_init()
+ * idempotent.
+ */
+static unsigned int init_count;
+
+static int __kgr_find_other_module_patch_states(void *data, const char *name,
+ struct module *mod,
+ unsigned long addr)
+{
+ struct kgr_patch_states **pps;
+
+ if (!mod || mod == THIS_MODULE)
+ return 0;
+
+ if (strcmp("kgr_patch_states", name))
+ return 0;
+
+ pps = (struct kgr_patch_states **)addr;
+ if (!*pps)
+ return 0;
+
+ kgr_patch_states = *pps;
+ return 1;
+}
+
+static int kgr_patch_state_init(void)
+{
+ int ret = 0;
+
+ if (init_count++)
+ return 0;
+
+ mutex_lock(&module_mutex);
+ if (kallsyms_on_each_symbol(__kgr_find_other_module_patch_states,
+ NULL)) {
+ ++kgr_patch_states->refcount;
+ goto out;
+ }
+
+ kgr_patch_states = kzalloc(sizeof(*kgr_patch_states), GFP_KERNEL);
+ if (!kgr_patch_states) {
+ ret = -ENOMEM;
+ init_count = 0;
+ goto out;
+ }
+
+ kgr_patch_states->refcount = 1;
+ mutex_init(&kgr_patch_states->mutex);
+ INIT_LIST_HEAD(&kgr_patch_states->subpatch_states);
+
+out:
+ mutex_unlock(&module_mutex);
+ return ret;
+}
+
+static void kgr_patch_state_cleanup(void)
+{
+ if (WARN_ON(!init_count))
+ return;
+
+ if(--init_count || !kgr_patch_states)
+ return;
+
+ mutex_lock(&module_mutex);
+ if (!--kgr_patch_states->refcount)
+ kfree(kgr_patch_states);
+ kgr_patch_states = NULL;
+ mutex_unlock(&module_mutex);
+}
+
+int kgr_subpatch_register(unsigned long id, struct kgr_subpatch *subpatch)
+{
+ struct kgr_subpatch_state *subpatch_state;
+ int ret;
+
+ ret = kgr_patch_state_init();
+ if (ret)
+ return ret;
+
+ /*
+ * Preallocate the data. We might need it at post patch and
+ * it's too late to fail then.
+ */
+ if (subpatch->alloc_data) {
+ subpatch->data = subpatch->alloc_data();
+ if (!subpatch->data) {
+ kgr_patch_state_cleanup();
+ return -ENOMEM;
+ }
+ subpatch->owns_data = true;
+ }
+
+ mutex_lock(&kgr_patch_states->mutex);
+ list_for_each_entry(subpatch_state, &kgr_patch_states->subpatch_states,
+ list) {
+ if (subpatch_state->id == id)
+ goto add_to_list;
+ }
+
+ subpatch_state = kzalloc(sizeof(*subpatch_state), GFP_KERNEL);
+ if (!subpatch_state) {
+ mutex_unlock(&kgr_patch_states->mutex);
+ if (subpatch->data) {
+ subpatch->free_data(subpatch->data);
+ subpatch->data = NULL;
+ subpatch->owns_data = false;
+ }
+ kgr_patch_state_cleanup();
+ return -ENOMEM;
+ }
+
+ subpatch_state->id = id;
+ INIT_LIST_HEAD(&subpatch_state->subpatches);
+ list_add(&subpatch_state->list, &kgr_patch_states->subpatch_states);
+
+add_to_list:
+ subpatch->parent = subpatch_state;
+ list_add(&subpatch->list, &subpatch_state->subpatches);
+ mutex_unlock(&kgr_patch_states->mutex);
+ return 0;
+}
+
+void kgr_subpatch_unregister(struct kgr_subpatch *subpatch)
+{
+ struct kgr_subpatch_state *parent;
+
+ mutex_lock(&kgr_patch_states->mutex);
+ list_del(&subpatch->list);
+ if (subpatch->data && subpatch->owns_data)
+ subpatch->free_data(subpatch->data);
+ subpatch->data = NULL;
+ subpatch->owns_data = false;
+
+ parent = subpatch->parent;
+ if (list_empty(&parent->subpatches)) {
+ list_del(&parent->list);
+ kfree(parent);
+ }
+ mutex_unlock(&kgr_patch_states->mutex);
+ kgr_patch_state_cleanup();
+}
+
+
+static struct kgr_subpatch*
+__kgr_find_subpatch_from_mod(struct kgr_subpatch_state *subpatch_state,
+ struct module *mod)
+{
+ struct kgr_subpatch *subpatch;
+
+ list_for_each_entry(subpatch, &subpatch_state->subpatches, list) {
+ if (subpatch->mod == mod)
+ return subpatch;
+ }
+
+ return NULL;
+}
+
+static void __kgr_subpatch_pre_revert(struct kgr_subpatch *subpatch)
+{
+ if (subpatch->pre_revert)
+ subpatch->pre_revert(subpatch->data);
+}
+
+void kgr_patch_state_pre_revert_cb(void)
+{
+ struct kgr_subpatch_state *subpatch_state;
+
+ /*
+ * If there are no subpatches registered, kgr_patch_states is
+ * NULL.
+ */
+ if (!kgr_patch_states)
+ return;
+
+ mutex_lock(&kgr_patch_states->mutex);
+ list_for_each_entry(subpatch_state, &kgr_patch_states->subpatch_states,
+ list) {
+ struct kgr_subpatch *subpatch;
+
+ /*
+ * We know that the to be reverted patch's module is
+ * THIS_MODULE.
+ */
+ subpatch = __kgr_find_subpatch_from_mod(subpatch_state,
+ THIS_MODULE);
+ if (subpatch)
+ __kgr_subpatch_pre_revert(subpatch);
+ }
+ mutex_unlock(&kgr_patch_states->mutex);
+}
+
+static void
+__kgr_subpatch_try_to_migrate(struct kgr_subpatch_state *subpatch_state,
+ struct kgr_subpatch *subpatch_old,
+ struct kgr_subpatch *subpatch_new)
+{
+ struct kgr_subpatch_data *orig_new_data;
+
+ if (!subpatch_old) {
+ /* Nothing to migrate from, treat as new subpatch, if any. */
+ return;
+
+ } else if (!subpatch_new) {
+ /* Nothing to migrate to, treat as revert */
+ __kgr_subpatch_pre_revert(subpatch_old);
+ return;
+ }
+
+ orig_new_data = subpatch_new->data;
+ if (subpatch_new->prepare_migration &&
+ subpatch_new->prepare_migration(subpatch_new, subpatch_old->data)) {
+ /*
+ * Hooray, the new subpatch claims that it can continue on
+ * the old patch's state.
+ */
+ if (subpatch_old->pre_migrate_away)
+ subpatch_old->pre_migrate_away(subpatch_old->data);
+
+ if (subpatch_new->pre_migrate_to)
+ subpatch_new->pre_migrate_to(subpatch_new->data);
+
+ /*
+ * Transfer data ownership. From now on, the new patch
+ * is in charge of freeing it.
+ */
+ subpatch_new->owns_data = true;
+ subpatch_old->owns_data = false;
+
+ /*
+ * If ->prepare_migration() changed subpatch_new->data
+ * to something else, i.e. subpatch_old->data most likely,
+ * free the preallocated data.
+ */
+ if (subpatch_new->data != orig_new_data)
+ subpatch_new->free_data(orig_new_data);
+
+ /*
+ * Remember the previous patch such that the post patch
+ * callback can call ->post_migrate_away() on it.
+ */
+ subpatch_state->subpatch_old = subpatch_old;
+
+ } else {
+ /*
+ * The new patch can't continue on the old patch's
+ * state. Treat this as a revert + completely new
+ * patch application.
+ */
+ __kgr_subpatch_pre_revert(subpatch_old);
+ }
+}
+
+void kgr_patch_state_pre_replace_cb(struct module *new_patch_mod)
+{
+ struct kgr_subpatch_state *subpatch_state;
+
+ /*
+ * If there are no subpatches registered, kgr_patch_states is
+ * NULL.
+ */
+ if (!kgr_patch_states)
+ return;
+
+ /* We know that the replaced patch's module is THIS_MODULE. */
+ mutex_lock(&kgr_patch_states->mutex);
+ list_for_each_entry(subpatch_state, &kgr_patch_states->subpatch_states,
+ list) {
+ struct kgr_subpatch *subpatch;
+ struct kgr_subpatch *subpatch_old = NULL;
+ struct kgr_subpatch *subpatch_new = NULL;
+
+ list_for_each_entry(subpatch, &subpatch_state->subpatches,
+ list) {
+ /*
+ * We know that the replaced patch's module is
+ * THIS_MODULE.
+ */
+ if (subpatch->mod == THIS_MODULE)
+ subpatch_old = subpatch;
+ else if (subpatch->mod == new_patch_mod)
+ subpatch_new = subpatch;
+
+ if (subpatch_old && subpatch_new)
+ break;
+ }
+
+ __kgr_subpatch_try_to_migrate(subpatch_state, subpatch_old,
+ subpatch_new);
+ }
+ mutex_unlock(&kgr_patch_states->mutex);
+}
+
+void kgr_patch_state_post_patch_cb(void)
+{
+ struct kgr_subpatch_state *subpatch_state;
+
+ /*
+ * If there are no subpatches registered, kgr_patch_states is
+ * NULL.
+ */
+ if (!kgr_patch_states)
+ return;
+
+ mutex_lock(&kgr_patch_states->mutex);
+ list_for_each_entry(subpatch_state, &kgr_patch_states->subpatch_states,
+ list) {
+ struct kgr_subpatch *subpatch_new;
+
+ /* We know that the new patch's module is THIS_MODULE. */
+ subpatch_new = __kgr_find_subpatch_from_mod(subpatch_state,
+ THIS_MODULE);
+ if (!subpatch_new)
+ continue;
+
+ if (subpatch_state->subpatch_old) {
+ struct kgr_subpatch *subpatch_old;
+
+ /* This is a clean handover */
+ subpatch_old = subpatch_state->subpatch_old;
+ subpatch_state->subpatch_old = NULL;
+
+ if (subpatch_new->post_migrate_to) {
+ struct kgr_subpatch_data *data;
+
+ data = subpatch_new->data;
+ subpatch_new->post_migrate_to(data);
+ }
+ if (subpatch_old->post_migrate_away) {
+ struct kgr_subpatch_data *data;
+
+ data = subpatch_old->data;
+ subpatch_old->post_migrate_away(data);
+ }
+
+ } else {
+ /*
+ * Completely new subpatch for this issue.
+ * Use the data preallocated at registration
+ * for this subpatch.
+ */
+ if (subpatch_new->post_patch)
+ subpatch_new->post_patch(subpatch_new->data);
+ }
+ }
+ mutex_unlock(&kgr_patch_states->mutex);
+}
diff --git a/patch_state.h b/patch_state.h
new file mode 100644
index 0000000..00c1b26
--- /dev/null
+++ b/patch_state.h
@@ -0,0 +1,51 @@
+#ifndef _KGR_PATCH_STATE_H
+#define _KGR_PATCH_STATE_H
+
+#include <linux/list.h>
+
+struct kgr_subpatch_data
+{
+ unsigned long version;
+};
+
+struct kgr_subpatch
+{
+ struct module *mod;
+
+ struct kgr_subpatch_data* (*alloc_data)(void);
+ void (*free_data)(struct kgr_subpatch_data *data);
+
+ bool (*prepare_migration)(struct kgr_subpatch *self,
+ struct kgr_subpatch_data *prev_data);
+
+ void (*post_patch)(struct kgr_subpatch_data *data);
+ void (*pre_migrate_to)(struct kgr_subpatch_data *data);
+ void (*post_migrate_to)(struct kgr_subpatch_data *data);
+
+ void (*pre_revert)(struct kgr_subpatch_data *data);
+ void (*pre_migrate_away)(struct kgr_subpatch_data *data);
+ void (*post_migrate_away)(struct kgr_subpatch_data *data);
+
+ struct list_head list;
+ struct kgr_subpatch_state *parent;
+
+ struct kgr_subpatch_data *data;
+ bool owns_data;
+};
+
+
+#define KGR_SUBPATCH_ID_BSC(num) \
+ ((1lu << (BITS_PER_LONG - 1)) | (num))
+
+#define KGR_SUBPATCH_ID_CVE(year, num) \
+ (((unsigned long)(year) << (BITS_PER_LONG - 13)) | (num))
+
+int kgr_subpatch_register(unsigned long id, struct kgr_subpatch *subpatch);
+void kgr_subpatch_unregister(struct kgr_subpatch *subpatch);
+
+/* Callbacks called from live patched kGraft core */
+void kgr_patch_state_pre_replace_cb(struct module *new_patch_mod);
+void kgr_patch_state_pre_revert_cb(void);
+void kgr_patch_state_post_patch_cb(void);
+
+#endif
diff --git a/rpm/kgraft-patch.spec b/rpm/kgraft-patch.spec
index 501124c..6c023a6 100644
--- a/rpm/kgraft-patch.spec
+++ b/rpm/kgraft-patch.spec
@@ -31,6 +31,8 @@ Source3: config.sh
Source4: source-timestamp
Source5: shadow.c
Source6: shadow.h
+Source7: patch_state.h
+Source8: patch_state.c
@@KGR_PATCHES_SOURCES@@
BuildRequires: kernel-syms kgraft-devel
ExclusiveArch: @@EXCARCH@@
@@ -48,6 +50,8 @@ cp %_sourcedir/kgr_patch_main.c .
cp %_sourcedir/Makefile .
cp %_sourcedir/shadow.c .
cp %_sourcedir/shadow.h .
+cp %_sourcedir/patch_state.h .
+cp %_sourcedir/patch_state.c .
%build
sed -i 's/@@RPMRELEASE@@/%module_num/g' Makefile
diff --git a/scripts/register-patches.sh b/scripts/register-patches.sh
index a3622f8..c6b1ab6 100755
--- a/scripts/register-patches.sh
+++ b/scripts/register-patches.sh
@@ -42,10 +42,12 @@ kgr_patch_spec_file="$2"
# Generate list of patches
+need_callbacks=0
declare -a kgr_patches
for d in *; do
[ -d "$d" ] || continue
[ x"$d" = xrpm -o x"$d" = xscripts -o x"$d" = xuname_patch ] && continue
+ [ x"$d" = xcallbacks ] && continue
[ -e "$d/kgr_patch_main.c" ] && continue # this is some builddir
if [ ! -f "$d/kgr_patch_${d}.h" ]; then
@@ -53,9 +55,20 @@ for d in *; do
exit 1
fi
+ # Check whether the current fix needs the callback
+ # functionality. If so, the 'callbacks' patch will be added
+ # below.
+ if grep -qr kgr_subpatch_register "$d"; then
+ need_callbacks=1
+ fi
+
kgr_patches[${#kgr_patches[@]}]=$(basename $d)
done
+if [ $need_callbacks -eq 1 ]; then
+ kgr_patches[${#kgr_patches[@]}]=callbacks
+fi
+
# Sort it
kgr_patches=($(
for p in "${kgr_patches[@]}"; do
@@ -125,8 +138,8 @@ EOF
# Finish kgraft-patch.spec:
## Enumerate the per subpatch source *.tar.bz2.
-## Note: Start with Source7
-S=7
+## Note: Start with Source9
+S=9
## First check that none of the to be occupied Source<n> slots has
## been used already.
for i in "${!kgr_patches[@]}"; do
@@ -136,6 +149,13 @@ for i in "${!kgr_patches[@]}"; do
fi
done
+# If need_callbacks == 0 and thus, 'callbacks' is not a member of
+# kgr_patches[], then add it now: the sources will always be needed or
+# else the build fails.
+if [ $need_callbacks -eq 0 ]; then
+ kgr_patches[${#kgr_patches[@]}]=callbacks
+fi
+
KGR_PATCHES_SOURCES=$(
echo -n "# Auto expanded KGR_PATCHES_SOURCES:\n"
for i in "${!kgr_patches[@]}"; do
diff --git a/scripts/tar-up.sh b/scripts/tar-up.sh
index cb58c80..fa404af 100755
--- a/scripts/tar-up.sh
+++ b/scripts/tar-up.sh
@@ -78,6 +78,8 @@ scripts/register-patches.sh $build_dir/kgr_patch_main.c $build_dir/kgraft-patch-
install -m 644 rpm/config.sh $build_dir/config.sh
install -m 644 shadow.c $build_dir
install -m 644 shadow.h $build_dir
+install -m 644 patch_state.h $build_dir
+install -m 644 patch_state.c $build_dir
# create new Makefile in $build_dir
scripts/create-makefile.sh $build_dir