1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * patch.c - livepatch patching functions
5 * Copyright (C) 2014 Seth Jennings <sjenning@redhat.com>
6 * Copyright (C) 2014 SUSE
7 * Copyright (C) 2015 Josh Poimboeuf <jpoimboe@redhat.com>
10 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12 #include <linux/livepatch.h>
13 #include <linux/list.h>
14 #include <linux/ftrace.h>
15 #include <linux/rculist.h>
16 #include <linux/slab.h>
17 #include <linux/bug.h>
18 #include <linux/printk.h>
21 #include "transition.h"
23 static LIST_HEAD(klp_ops);
25 struct klp_ops *klp_find_ops(void *old_func)
28 struct klp_func *func;
30 list_for_each_entry(ops, &klp_ops, node) {
31 func = list_first_entry(&ops->func_stack, struct klp_func,
33 if (func->old_func == old_func)
40 static void notrace klp_ftrace_handler(unsigned long ip,
41 unsigned long parent_ip,
42 struct ftrace_ops *fops,
43 struct ftrace_regs *fregs)
46 struct klp_func *func;
50 ops = container_of(fops, struct klp_ops, fops);
53 * The ftrace_test_recursion_trylock() will disable preemption,
54 * which is required for the variant of synchronize_rcu() that is
55 * used to allow patching functions where RCU is not watching.
56 * See klp_synchronize_transition() for more details.
58 bit = ftrace_test_recursion_trylock(ip, parent_ip);
59 if (WARN_ON_ONCE(bit < 0))
62 func = list_first_or_null_rcu(&ops->func_stack, struct klp_func,
66 * func should never be NULL because preemption should be disabled here
67 * and unregister_ftrace_function() does the equivalent of a
68 * synchronize_rcu() before the func_stack removal.
70 if (WARN_ON_ONCE(!func))
74 * In the enable path, enforce the order of the ops->func_stack and
75 * func->transition reads. The corresponding write barrier is in
76 * __klp_enable_patch().
78 * (Note that this barrier technically isn't needed in the disable
79 * path. In the rare case where klp_update_patch_state() runs before
80 * this handler, its TIF_PATCH_PENDING read and this func->transition
81 * read need to be ordered. But klp_update_patch_state() already
86 if (unlikely(func->transition)) {
89 * Enforce the order of the func->transition and
90 * current->patch_state reads. Otherwise we could read an
91 * out-of-date task state and pick the wrong function. The
92 * corresponding write barrier is in klp_init_transition().
96 patch_state = current->patch_state;
98 WARN_ON_ONCE(patch_state == KLP_UNDEFINED);
100 if (patch_state == KLP_UNPATCHED) {
102 * Use the previously patched version of the function.
103 * If no previous patches exist, continue with the
106 func = list_entry_rcu(func->stack_node.next,
107 struct klp_func, stack_node);
109 if (&func->stack_node == &ops->func_stack)
115 * NOPs are used to replace existing patches with original code.
116 * Do nothing! Setting pc would cause an infinite loop.
121 ftrace_instruction_pointer_set(fregs, (unsigned long)func->new_func);
124 ftrace_test_recursion_unlock(bit);
127 static void klp_unpatch_func(struct klp_func *func)
131 if (WARN_ON(!func->patched))
133 if (WARN_ON(!func->old_func))
136 ops = klp_find_ops(func->old_func);
140 if (list_is_singular(&ops->func_stack)) {
141 unsigned long ftrace_loc;
143 ftrace_loc = ftrace_location((unsigned long)func->old_func);
144 if (WARN_ON(!ftrace_loc))
147 WARN_ON(unregister_ftrace_function(&ops->fops));
148 WARN_ON(ftrace_set_filter_ip(&ops->fops, ftrace_loc, 1, 0));
150 list_del_rcu(&func->stack_node);
151 list_del(&ops->node);
154 list_del_rcu(&func->stack_node);
157 func->patched = false;
160 static int klp_patch_func(struct klp_func *func)
165 if (WARN_ON(!func->old_func))
168 if (WARN_ON(func->patched))
171 ops = klp_find_ops(func->old_func);
173 unsigned long ftrace_loc;
175 ftrace_loc = ftrace_location((unsigned long)func->old_func);
177 pr_err("failed to find location for function '%s'\n",
182 ops = kzalloc(sizeof(*ops), GFP_KERNEL);
186 ops->fops.func = klp_ftrace_handler;
187 ops->fops.flags = FTRACE_OPS_FL_DYNAMIC |
188 #ifndef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS
189 FTRACE_OPS_FL_SAVE_REGS |
191 FTRACE_OPS_FL_IPMODIFY |
192 FTRACE_OPS_FL_PERMANENT;
194 list_add(&ops->node, &klp_ops);
196 INIT_LIST_HEAD(&ops->func_stack);
197 list_add_rcu(&func->stack_node, &ops->func_stack);
199 ret = ftrace_set_filter_ip(&ops->fops, ftrace_loc, 0, 0);
201 pr_err("failed to set ftrace filter for function '%s' (%d)\n",
202 func->old_name, ret);
206 ret = register_ftrace_function(&ops->fops);
208 pr_err("failed to register ftrace handler for function '%s' (%d)\n",
209 func->old_name, ret);
210 ftrace_set_filter_ip(&ops->fops, ftrace_loc, 1, 0);
216 list_add_rcu(&func->stack_node, &ops->func_stack);
219 func->patched = true;
224 list_del_rcu(&func->stack_node);
225 list_del(&ops->node);
230 static void __klp_unpatch_object(struct klp_object *obj, bool nops_only)
232 struct klp_func *func;
234 klp_for_each_func(obj, func) {
235 if (nops_only && !func->nop)
239 klp_unpatch_func(func);
242 if (obj->dynamic || !nops_only)
243 obj->patched = false;
247 void klp_unpatch_object(struct klp_object *obj)
249 __klp_unpatch_object(obj, false);
252 int klp_patch_object(struct klp_object *obj)
254 struct klp_func *func;
257 if (WARN_ON(obj->patched))
260 klp_for_each_func(obj, func) {
261 ret = klp_patch_func(func);
263 klp_unpatch_object(obj);
272 static void __klp_unpatch_objects(struct klp_patch *patch, bool nops_only)
274 struct klp_object *obj;
276 klp_for_each_object(patch, obj)
278 __klp_unpatch_object(obj, nops_only);
281 void klp_unpatch_objects(struct klp_patch *patch)
283 __klp_unpatch_objects(patch, false);
286 void klp_unpatch_objects_dynamic(struct klp_patch *patch)
288 __klp_unpatch_objects(patch, true);