1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * core.c - Kernel Live Patching Core
5 * Copyright (C) 2014 Seth Jennings <sjenning@redhat.com>
6 * Copyright (C) 2014 SUSE
9 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11 #include <linux/module.h>
12 #include <linux/kernel.h>
13 #include <linux/mutex.h>
14 #include <linux/slab.h>
15 #include <linux/list.h>
16 #include <linux/kallsyms.h>
17 #include <linux/livepatch.h>
18 #include <linux/elf.h>
19 #include <linux/moduleloader.h>
20 #include <linux/completion.h>
21 #include <linux/memory.h>
22 #include <linux/rcupdate.h>
23 #include <asm/cacheflush.h>
27 #include "transition.h"
30 * klp_mutex is a coarse lock which serializes access to klp data. All
31 * accesses to klp-related variables and structures must have mutex protection,
32 * except within the following functions which carefully avoid the need for it:
34 * - klp_ftrace_handler()
35 * - klp_update_patch_state()
37 DEFINE_MUTEX(klp_mutex);
40 * Actively used patches: enabled or in transition. Note that replaced
41 * or disabled patches are not listed even though the related kernel
42 * module still can be loaded.
44 LIST_HEAD(klp_patches);
46 static struct kobject *klp_root_kobj;
48 static bool klp_is_module(struct klp_object *obj)
53 /* sets obj->mod if object is not vmlinux and module is found */
54 static void klp_find_object_module(struct klp_object *obj)
58 if (!klp_is_module(obj))
61 rcu_read_lock_sched();
63 * We do not want to block removal of patched modules and therefore
64 * we do not take a reference here. The patches are removed by
65 * klp_module_going() instead.
67 mod = find_module(obj->name);
69 * Do not mess work of klp_module_coming() and klp_module_going().
70 * Note that the patch might still be needed before klp_module_going()
71 * is called. Module functions can be called even in the GOING state
72 * until mod->exit() finishes. This is especially important for
73 * patches that modify semantic of the functions.
75 if (mod && mod->klp_alive)
78 rcu_read_unlock_sched();
81 static bool klp_initialized(void)
83 return !!klp_root_kobj;
86 static struct klp_func *klp_find_func(struct klp_object *obj,
87 struct klp_func *old_func)
89 struct klp_func *func;
91 klp_for_each_func(obj, func) {
92 if ((strcmp(old_func->old_name, func->old_name) == 0) &&
93 (old_func->old_sympos == func->old_sympos)) {
101 static struct klp_object *klp_find_object(struct klp_patch *patch,
102 struct klp_object *old_obj)
104 struct klp_object *obj;
106 klp_for_each_object(patch, obj) {
107 if (klp_is_module(old_obj)) {
108 if (klp_is_module(obj) &&
109 strcmp(old_obj->name, obj->name) == 0) {
112 } else if (!klp_is_module(obj)) {
120 struct klp_find_arg {
128 static int klp_match_callback(void *data, unsigned long addr)
130 struct klp_find_arg *args = data;
136 * Finish the search when the symbol is found for the desired position
137 * or the position is not defined for a non-unique symbol.
139 if ((args->pos && (args->count == args->pos)) ||
140 (!args->pos && (args->count > 1)))
146 static int klp_find_callback(void *data, const char *name,
147 struct module *mod, unsigned long addr)
149 struct klp_find_arg *args = data;
151 if ((mod && !args->objname) || (!mod && args->objname))
154 if (strcmp(args->name, name))
157 if (args->objname && strcmp(args->objname, mod->name))
160 return klp_match_callback(data, addr);
163 static int klp_find_object_symbol(const char *objname, const char *name,
164 unsigned long sympos, unsigned long *addr)
166 struct klp_find_arg args = {
175 module_kallsyms_on_each_symbol(klp_find_callback, &args);
177 kallsyms_on_each_match_symbol(klp_match_callback, name, &args);
180 * Ensure an address was found. If sympos is 0, ensure symbol is unique;
181 * otherwise ensure the symbol position count matches sympos.
184 pr_err("symbol '%s' not found in symbol table\n", name);
185 else if (args.count > 1 && sympos == 0) {
186 pr_err("unresolvable ambiguity for symbol '%s' in object '%s'\n",
188 } else if (sympos != args.count && sympos > 0) {
189 pr_err("symbol position %lu for symbol '%s' in object '%s' not found\n",
190 sympos, name, objname ? objname : "vmlinux");
200 static int klp_resolve_symbols(Elf_Shdr *sechdrs, const char *strtab,
201 unsigned int symndx, Elf_Shdr *relasec,
202 const char *sec_objname)
205 char sym_objname[MODULE_NAME_LEN];
206 char sym_name[KSYM_NAME_LEN];
209 unsigned long sympos, addr;
211 bool sec_vmlinux = !strcmp(sec_objname, "vmlinux");
214 * Since the field widths for sym_objname and sym_name in the sscanf()
215 * call are hard-coded and correspond to MODULE_NAME_LEN and
216 * KSYM_NAME_LEN respectively, we must make sure that MODULE_NAME_LEN
217 * and KSYM_NAME_LEN have the values we expect them to have.
219 * Because the value of MODULE_NAME_LEN can differ among architectures,
220 * we use the smallest/strictest upper bound possible (56, based on
221 * the current definition of MODULE_NAME_LEN) to prevent overflows.
223 BUILD_BUG_ON(MODULE_NAME_LEN < 56 || KSYM_NAME_LEN != 512);
225 relas = (Elf_Rela *) relasec->sh_addr;
226 /* For each rela in this klp relocation section */
227 for (i = 0; i < relasec->sh_size / sizeof(Elf_Rela); i++) {
228 sym = (Elf_Sym *)sechdrs[symndx].sh_addr + ELF_R_SYM(relas[i].r_info);
229 if (sym->st_shndx != SHN_LIVEPATCH) {
230 pr_err("symbol %s is not marked as a livepatch symbol\n",
231 strtab + sym->st_name);
235 /* Format: .klp.sym.sym_objname.sym_name,sympos */
236 cnt = sscanf(strtab + sym->st_name,
237 ".klp.sym.%55[^.].%511[^,],%lu",
238 sym_objname, sym_name, &sympos);
240 pr_err("symbol %s has an incorrectly formatted name\n",
241 strtab + sym->st_name);
245 sym_vmlinux = !strcmp(sym_objname, "vmlinux");
248 * Prevent module-specific KLP rela sections from referencing
249 * vmlinux symbols. This helps prevent ordering issues with
250 * module special section initializations. Presumably such
251 * symbols are exported and normal relas can be used instead.
253 if (!sec_vmlinux && sym_vmlinux) {
254 pr_err("invalid access to vmlinux symbol '%s' from module-specific livepatch relocation section",
259 /* klp_find_object_symbol() treats a NULL objname as vmlinux */
260 ret = klp_find_object_symbol(sym_vmlinux ? NULL : sym_objname,
261 sym_name, sympos, &addr);
265 sym->st_value = addr;
272 * At a high-level, there are two types of klp relocation sections: those which
273 * reference symbols which live in vmlinux; and those which reference symbols
274 * which live in other modules. This function is called for both types:
276 * 1) When a klp module itself loads, the module code calls this function to
277 * write vmlinux-specific klp relocations (.klp.rela.vmlinux.* sections).
278 * These relocations are written to the klp module text to allow the patched
279 * code/data to reference unexported vmlinux symbols. They're written as
280 * early as possible to ensure that other module init code (.e.g.,
281 * jump_label_apply_nops) can access any unexported vmlinux symbols which
282 * might be referenced by the klp module's special sections.
284 * 2) When a to-be-patched module loads -- or is already loaded when a
285 * corresponding klp module loads -- klp code calls this function to write
286 * module-specific klp relocations (.klp.rela.{module}.* sections). These
287 * are written to the klp module text to allow the patched code/data to
288 * reference symbols which live in the to-be-patched module or one of its
289 * module dependencies. Exported symbols are supported, in addition to
290 * unexported symbols, in order to enable late module patching, which allows
291 * the to-be-patched module to be loaded and patched sometime *after* the
292 * klp module is loaded.
294 int klp_apply_section_relocs(struct module *pmod, Elf_Shdr *sechdrs,
295 const char *shstrtab, const char *strtab,
296 unsigned int symndx, unsigned int secndx,
300 char sec_objname[MODULE_NAME_LEN];
301 Elf_Shdr *sec = sechdrs + secndx;
304 * Format: .klp.rela.sec_objname.section_name
305 * See comment in klp_resolve_symbols() for an explanation
306 * of the selected field width value.
308 cnt = sscanf(shstrtab + sec->sh_name, ".klp.rela.%55[^.]",
311 pr_err("section %s has an incorrectly formatted name\n",
312 shstrtab + sec->sh_name);
316 if (strcmp(objname ? objname : "vmlinux", sec_objname))
319 ret = klp_resolve_symbols(sechdrs, strtab, symndx, sec, sec_objname);
323 return apply_relocate_add(sechdrs, strtab, symndx, secndx, pmod);
329 * /sys/kernel/livepatch
330 * /sys/kernel/livepatch/<patch>
331 * /sys/kernel/livepatch/<patch>/enabled
332 * /sys/kernel/livepatch/<patch>/transition
333 * /sys/kernel/livepatch/<patch>/force
334 * /sys/kernel/livepatch/<patch>/<object>
335 * /sys/kernel/livepatch/<patch>/<object>/patched
336 * /sys/kernel/livepatch/<patch>/<object>/<function,sympos>
338 static int __klp_disable_patch(struct klp_patch *patch);
340 static ssize_t enabled_store(struct kobject *kobj, struct kobj_attribute *attr,
341 const char *buf, size_t count)
343 struct klp_patch *patch;
347 ret = kstrtobool(buf, &enabled);
351 patch = container_of(kobj, struct klp_patch, kobj);
353 mutex_lock(&klp_mutex);
355 if (patch->enabled == enabled) {
356 /* already in requested state */
362 * Allow to reverse a pending transition in both ways. It might be
363 * necessary to complete the transition without forcing and breaking
364 * the system integrity.
366 * Do not allow to re-enable a disabled patch.
368 if (patch == klp_transition_patch)
369 klp_reverse_transition();
371 ret = __klp_disable_patch(patch);
376 mutex_unlock(&klp_mutex);
383 static ssize_t enabled_show(struct kobject *kobj,
384 struct kobj_attribute *attr, char *buf)
386 struct klp_patch *patch;
388 patch = container_of(kobj, struct klp_patch, kobj);
389 return snprintf(buf, PAGE_SIZE-1, "%d\n", patch->enabled);
392 static ssize_t transition_show(struct kobject *kobj,
393 struct kobj_attribute *attr, char *buf)
395 struct klp_patch *patch;
397 patch = container_of(kobj, struct klp_patch, kobj);
398 return snprintf(buf, PAGE_SIZE-1, "%d\n",
399 patch == klp_transition_patch);
402 static ssize_t force_store(struct kobject *kobj, struct kobj_attribute *attr,
403 const char *buf, size_t count)
405 struct klp_patch *patch;
409 ret = kstrtobool(buf, &val);
416 mutex_lock(&klp_mutex);
418 patch = container_of(kobj, struct klp_patch, kobj);
419 if (patch != klp_transition_patch) {
420 mutex_unlock(&klp_mutex);
424 klp_force_transition();
426 mutex_unlock(&klp_mutex);
431 static struct kobj_attribute enabled_kobj_attr = __ATTR_RW(enabled);
432 static struct kobj_attribute transition_kobj_attr = __ATTR_RO(transition);
433 static struct kobj_attribute force_kobj_attr = __ATTR_WO(force);
434 static struct attribute *klp_patch_attrs[] = {
435 &enabled_kobj_attr.attr,
436 &transition_kobj_attr.attr,
437 &force_kobj_attr.attr,
440 ATTRIBUTE_GROUPS(klp_patch);
442 static ssize_t patched_show(struct kobject *kobj,
443 struct kobj_attribute *attr, char *buf)
445 struct klp_object *obj;
447 obj = container_of(kobj, struct klp_object, kobj);
448 return sysfs_emit(buf, "%d\n", obj->patched);
451 static struct kobj_attribute patched_kobj_attr = __ATTR_RO(patched);
452 static struct attribute *klp_object_attrs[] = {
453 &patched_kobj_attr.attr,
456 ATTRIBUTE_GROUPS(klp_object);
458 static void klp_free_object_dynamic(struct klp_object *obj)
464 static void klp_init_func_early(struct klp_object *obj,
465 struct klp_func *func);
466 static void klp_init_object_early(struct klp_patch *patch,
467 struct klp_object *obj);
469 static struct klp_object *klp_alloc_object_dynamic(const char *name,
470 struct klp_patch *patch)
472 struct klp_object *obj;
474 obj = kzalloc(sizeof(*obj), GFP_KERNEL);
479 obj->name = kstrdup(name, GFP_KERNEL);
486 klp_init_object_early(patch, obj);
492 static void klp_free_func_nop(struct klp_func *func)
494 kfree(func->old_name);
498 static struct klp_func *klp_alloc_func_nop(struct klp_func *old_func,
499 struct klp_object *obj)
501 struct klp_func *func;
503 func = kzalloc(sizeof(*func), GFP_KERNEL);
507 if (old_func->old_name) {
508 func->old_name = kstrdup(old_func->old_name, GFP_KERNEL);
509 if (!func->old_name) {
515 klp_init_func_early(obj, func);
517 * func->new_func is same as func->old_func. These addresses are
518 * set when the object is loaded, see klp_init_object_loaded().
520 func->old_sympos = old_func->old_sympos;
526 static int klp_add_object_nops(struct klp_patch *patch,
527 struct klp_object *old_obj)
529 struct klp_object *obj;
530 struct klp_func *func, *old_func;
532 obj = klp_find_object(patch, old_obj);
535 obj = klp_alloc_object_dynamic(old_obj->name, patch);
540 klp_for_each_func(old_obj, old_func) {
541 func = klp_find_func(obj, old_func);
545 func = klp_alloc_func_nop(old_func, obj);
554 * Add 'nop' functions which simply return to the caller to run
555 * the original function. The 'nop' functions are added to a
556 * patch to facilitate a 'replace' mode.
558 static int klp_add_nops(struct klp_patch *patch)
560 struct klp_patch *old_patch;
561 struct klp_object *old_obj;
563 klp_for_each_patch(old_patch) {
564 klp_for_each_object(old_patch, old_obj) {
567 err = klp_add_object_nops(patch, old_obj);
576 static void klp_kobj_release_patch(struct kobject *kobj)
578 struct klp_patch *patch;
580 patch = container_of(kobj, struct klp_patch, kobj);
581 complete(&patch->finish);
584 static struct kobj_type klp_ktype_patch = {
585 .release = klp_kobj_release_patch,
586 .sysfs_ops = &kobj_sysfs_ops,
587 .default_groups = klp_patch_groups,
590 static void klp_kobj_release_object(struct kobject *kobj)
592 struct klp_object *obj;
594 obj = container_of(kobj, struct klp_object, kobj);
597 klp_free_object_dynamic(obj);
600 static struct kobj_type klp_ktype_object = {
601 .release = klp_kobj_release_object,
602 .sysfs_ops = &kobj_sysfs_ops,
603 .default_groups = klp_object_groups,
606 static void klp_kobj_release_func(struct kobject *kobj)
608 struct klp_func *func;
610 func = container_of(kobj, struct klp_func, kobj);
613 klp_free_func_nop(func);
616 static struct kobj_type klp_ktype_func = {
617 .release = klp_kobj_release_func,
618 .sysfs_ops = &kobj_sysfs_ops,
621 static void __klp_free_funcs(struct klp_object *obj, bool nops_only)
623 struct klp_func *func, *tmp_func;
625 klp_for_each_func_safe(obj, func, tmp_func) {
626 if (nops_only && !func->nop)
629 list_del(&func->node);
630 kobject_put(&func->kobj);
634 /* Clean up when a patched object is unloaded */
635 static void klp_free_object_loaded(struct klp_object *obj)
637 struct klp_func *func;
641 klp_for_each_func(obj, func) {
642 func->old_func = NULL;
645 func->new_func = NULL;
649 static void __klp_free_objects(struct klp_patch *patch, bool nops_only)
651 struct klp_object *obj, *tmp_obj;
653 klp_for_each_object_safe(patch, obj, tmp_obj) {
654 __klp_free_funcs(obj, nops_only);
656 if (nops_only && !obj->dynamic)
659 list_del(&obj->node);
660 kobject_put(&obj->kobj);
664 static void klp_free_objects(struct klp_patch *patch)
666 __klp_free_objects(patch, false);
669 static void klp_free_objects_dynamic(struct klp_patch *patch)
671 __klp_free_objects(patch, true);
675 * This function implements the free operations that can be called safely
678 * The operation must be completed by calling klp_free_patch_finish()
681 static void klp_free_patch_start(struct klp_patch *patch)
683 if (!list_empty(&patch->list))
684 list_del(&patch->list);
686 klp_free_objects(patch);
690 * This function implements the free part that must be called outside
693 * It must be called after klp_free_patch_start(). And it has to be
694 * the last function accessing the livepatch structures when the patch
697 static void klp_free_patch_finish(struct klp_patch *patch)
700 * Avoid deadlock with enabled_store() sysfs callback by
701 * calling this outside klp_mutex. It is safe because
702 * this is called when the patch gets disabled and it
703 * cannot get enabled again.
705 kobject_put(&patch->kobj);
706 wait_for_completion(&patch->finish);
708 /* Put the module after the last access to struct klp_patch. */
710 module_put(patch->mod);
714 * The livepatch might be freed from sysfs interface created by the patch.
715 * This work allows to wait until the interface is destroyed in a separate
718 static void klp_free_patch_work_fn(struct work_struct *work)
720 struct klp_patch *patch =
721 container_of(work, struct klp_patch, free_work);
723 klp_free_patch_finish(patch);
726 void klp_free_patch_async(struct klp_patch *patch)
728 klp_free_patch_start(patch);
729 schedule_work(&patch->free_work);
732 void klp_free_replaced_patches_async(struct klp_patch *new_patch)
734 struct klp_patch *old_patch, *tmp_patch;
736 klp_for_each_patch_safe(old_patch, tmp_patch) {
737 if (old_patch == new_patch)
739 klp_free_patch_async(old_patch);
743 static int klp_init_func(struct klp_object *obj, struct klp_func *func)
749 * NOPs get the address later. The patched module must be loaded,
750 * see klp_init_object_loaded().
752 if (!func->new_func && !func->nop)
755 if (strlen(func->old_name) >= KSYM_NAME_LEN)
758 INIT_LIST_HEAD(&func->stack_node);
759 func->patched = false;
760 func->transition = false;
762 /* The format for the sysfs directory is <function,sympos> where sympos
763 * is the nth occurrence of this symbol in kallsyms for the patched
764 * object. If the user selects 0 for old_sympos, then 1 will be used
765 * since a unique symbol will be the first occurrence.
767 return kobject_add(&func->kobj, &obj->kobj, "%s,%lu",
769 func->old_sympos ? func->old_sympos : 1);
772 static int klp_apply_object_relocs(struct klp_patch *patch,
773 struct klp_object *obj)
776 struct klp_modinfo *info = patch->mod->klp_info;
778 for (i = 1; i < info->hdr.e_shnum; i++) {
779 Elf_Shdr *sec = info->sechdrs + i;
781 if (!(sec->sh_flags & SHF_RELA_LIVEPATCH))
784 ret = klp_apply_section_relocs(patch->mod, info->sechdrs,
786 patch->mod->core_kallsyms.strtab,
787 info->symndx, i, obj->name);
795 /* parts of the initialization that is done only when the object is loaded */
796 static int klp_init_object_loaded(struct klp_patch *patch,
797 struct klp_object *obj)
799 struct klp_func *func;
802 if (klp_is_module(obj)) {
804 * Only write module-specific relocations here
805 * (.klp.rela.{module}.*). vmlinux-specific relocations were
806 * written earlier during the initialization of the klp module
809 ret = klp_apply_object_relocs(patch, obj);
814 klp_for_each_func(obj, func) {
815 ret = klp_find_object_symbol(obj->name, func->old_name,
817 (unsigned long *)&func->old_func);
821 ret = kallsyms_lookup_size_offset((unsigned long)func->old_func,
822 &func->old_size, NULL);
824 pr_err("kallsyms size lookup failed for '%s'\n",
830 func->new_func = func->old_func;
832 ret = kallsyms_lookup_size_offset((unsigned long)func->new_func,
833 &func->new_size, NULL);
835 pr_err("kallsyms size lookup failed for '%s' replacement\n",
844 static int klp_init_object(struct klp_patch *patch, struct klp_object *obj)
846 struct klp_func *func;
850 if (klp_is_module(obj) && strlen(obj->name) >= MODULE_NAME_LEN)
853 obj->patched = false;
856 klp_find_object_module(obj);
858 name = klp_is_module(obj) ? obj->name : "vmlinux";
859 ret = kobject_add(&obj->kobj, &patch->kobj, "%s", name);
863 klp_for_each_func(obj, func) {
864 ret = klp_init_func(obj, func);
869 if (klp_is_object_loaded(obj))
870 ret = klp_init_object_loaded(patch, obj);
875 static void klp_init_func_early(struct klp_object *obj,
876 struct klp_func *func)
878 kobject_init(&func->kobj, &klp_ktype_func);
879 list_add_tail(&func->node, &obj->func_list);
882 static void klp_init_object_early(struct klp_patch *patch,
883 struct klp_object *obj)
885 INIT_LIST_HEAD(&obj->func_list);
886 kobject_init(&obj->kobj, &klp_ktype_object);
887 list_add_tail(&obj->node, &patch->obj_list);
890 static void klp_init_patch_early(struct klp_patch *patch)
892 struct klp_object *obj;
893 struct klp_func *func;
895 INIT_LIST_HEAD(&patch->list);
896 INIT_LIST_HEAD(&patch->obj_list);
897 kobject_init(&patch->kobj, &klp_ktype_patch);
898 patch->enabled = false;
899 patch->forced = false;
900 INIT_WORK(&patch->free_work, klp_free_patch_work_fn);
901 init_completion(&patch->finish);
903 klp_for_each_object_static(patch, obj) {
904 klp_init_object_early(patch, obj);
906 klp_for_each_func_static(obj, func) {
907 klp_init_func_early(obj, func);
912 static int klp_init_patch(struct klp_patch *patch)
914 struct klp_object *obj;
917 ret = kobject_add(&patch->kobj, klp_root_kobj, "%s", patch->mod->name);
921 if (patch->replace) {
922 ret = klp_add_nops(patch);
927 klp_for_each_object(patch, obj) {
928 ret = klp_init_object(patch, obj);
933 list_add_tail(&patch->list, &klp_patches);
938 static int __klp_disable_patch(struct klp_patch *patch)
940 struct klp_object *obj;
942 if (WARN_ON(!patch->enabled))
945 if (klp_transition_patch)
948 klp_init_transition(patch, KLP_UNPATCHED);
950 klp_for_each_object(patch, obj)
952 klp_pre_unpatch_callback(obj);
955 * Enforce the order of the func->transition writes in
956 * klp_init_transition() and the TIF_PATCH_PENDING writes in
957 * klp_start_transition(). In the rare case where klp_ftrace_handler()
958 * is called shortly after klp_update_patch_state() switches the task,
959 * this ensures the handler sees that func->transition is set.
963 klp_start_transition();
964 patch->enabled = false;
965 klp_try_complete_transition();
970 static int __klp_enable_patch(struct klp_patch *patch)
972 struct klp_object *obj;
975 if (klp_transition_patch)
978 if (WARN_ON(patch->enabled))
981 pr_notice("enabling patch '%s'\n", patch->mod->name);
983 klp_init_transition(patch, KLP_PATCHED);
986 * Enforce the order of the func->transition writes in
987 * klp_init_transition() and the ops->func_stack writes in
988 * klp_patch_object(), so that klp_ftrace_handler() will see the
989 * func->transition updates before the handler is registered and the
990 * new funcs become visible to the handler.
994 klp_for_each_object(patch, obj) {
995 if (!klp_is_object_loaded(obj))
998 ret = klp_pre_patch_callback(obj);
1000 pr_warn("pre-patch callback failed for object '%s'\n",
1001 klp_is_module(obj) ? obj->name : "vmlinux");
1005 ret = klp_patch_object(obj);
1007 pr_warn("failed to patch object '%s'\n",
1008 klp_is_module(obj) ? obj->name : "vmlinux");
1013 klp_start_transition();
1014 patch->enabled = true;
1015 klp_try_complete_transition();
1019 pr_warn("failed to enable patch '%s'\n", patch->mod->name);
1021 klp_cancel_transition();
1026 * klp_enable_patch() - enable the livepatch
1027 * @patch: patch to be enabled
1029 * Initializes the data structure associated with the patch, creates the sysfs
1030 * interface, performs the needed symbol lookups and code relocations,
1031 * registers the patched functions with ftrace.
1033 * This function is supposed to be called from the livepatch module_init()
1036 * Return: 0 on success, otherwise error
1038 int klp_enable_patch(struct klp_patch *patch)
1041 struct klp_object *obj;
1043 if (!patch || !patch->mod || !patch->objs)
1046 klp_for_each_object_static(patch, obj) {
1052 if (!is_livepatch_module(patch->mod)) {
1053 pr_err("module %s is not marked as a livepatch module\n",
1058 if (!klp_initialized())
1061 if (!klp_have_reliable_stack()) {
1062 pr_warn("This architecture doesn't have support for the livepatch consistency model.\n");
1063 pr_warn("The livepatch transition may never complete.\n");
1066 mutex_lock(&klp_mutex);
1068 if (!klp_is_patch_compatible(patch)) {
1069 pr_err("Livepatch patch (%s) is not compatible with the already installed livepatches.\n",
1071 mutex_unlock(&klp_mutex);
1075 if (!try_module_get(patch->mod)) {
1076 mutex_unlock(&klp_mutex);
1080 klp_init_patch_early(patch);
1082 ret = klp_init_patch(patch);
1086 ret = __klp_enable_patch(patch);
1090 mutex_unlock(&klp_mutex);
1095 klp_free_patch_start(patch);
1097 mutex_unlock(&klp_mutex);
1099 klp_free_patch_finish(patch);
1103 EXPORT_SYMBOL_GPL(klp_enable_patch);
1106 * This function unpatches objects from the replaced livepatches.
1108 * We could be pretty aggressive here. It is called in the situation where
1109 * these structures are no longer accessed from the ftrace handler.
1110 * All functions are redirected by the klp_transition_patch. They
1111 * use either a new code or they are in the original code because
1112 * of the special nop function patches.
1114 * The only exception is when the transition was forced. In this case,
1115 * klp_ftrace_handler() might still see the replaced patch on the stack.
1116 * Fortunately, it is carefully designed to work with removed functions
1117 * thanks to RCU. We only have to keep the patches on the system. Also
1118 * this is handled transparently by patch->module_put.
1120 void klp_unpatch_replaced_patches(struct klp_patch *new_patch)
1122 struct klp_patch *old_patch;
1124 klp_for_each_patch(old_patch) {
1125 if (old_patch == new_patch)
1128 old_patch->enabled = false;
1129 klp_unpatch_objects(old_patch);
1134 * This function removes the dynamically allocated 'nop' functions.
1136 * We could be pretty aggressive. NOPs do not change the existing
1137 * behavior except for adding unnecessary delay by the ftrace handler.
1139 * It is safe even when the transition was forced. The ftrace handler
1140 * will see a valid ops->func_stack entry thanks to RCU.
1142 * We could even free the NOPs structures. They must be the last entry
1143 * in ops->func_stack. Therefore unregister_ftrace_function() is called.
1144 * It does the same as klp_synchronize_transition() to make sure that
1145 * nobody is inside the ftrace handler once the operation finishes.
1147 * IMPORTANT: It must be called right after removing the replaced patches!
1149 void klp_discard_nops(struct klp_patch *new_patch)
1151 klp_unpatch_objects_dynamic(klp_transition_patch);
1152 klp_free_objects_dynamic(klp_transition_patch);
1156 * Remove parts of patches that touch a given kernel module. The list of
1157 * patches processed might be limited. When limit is NULL, all patches
1160 static void klp_cleanup_module_patches_limited(struct module *mod,
1161 struct klp_patch *limit)
1163 struct klp_patch *patch;
1164 struct klp_object *obj;
1166 klp_for_each_patch(patch) {
1170 klp_for_each_object(patch, obj) {
1171 if (!klp_is_module(obj) || strcmp(obj->name, mod->name))
1174 if (patch != klp_transition_patch)
1175 klp_pre_unpatch_callback(obj);
1177 pr_notice("reverting patch '%s' on unloading module '%s'\n",
1178 patch->mod->name, obj->mod->name);
1179 klp_unpatch_object(obj);
1181 klp_post_unpatch_callback(obj);
1183 klp_free_object_loaded(obj);
1189 int klp_module_coming(struct module *mod)
1192 struct klp_patch *patch;
1193 struct klp_object *obj;
1195 if (WARN_ON(mod->state != MODULE_STATE_COMING))
1198 if (!strcmp(mod->name, "vmlinux")) {
1199 pr_err("vmlinux.ko: invalid module name\n");
1203 mutex_lock(&klp_mutex);
1205 * Each module has to know that klp_module_coming()
1206 * has been called. We never know what module will
1207 * get patched by a new patch.
1209 mod->klp_alive = true;
1211 klp_for_each_patch(patch) {
1212 klp_for_each_object(patch, obj) {
1213 if (!klp_is_module(obj) || strcmp(obj->name, mod->name))
1218 ret = klp_init_object_loaded(patch, obj);
1220 pr_warn("failed to initialize patch '%s' for module '%s' (%d)\n",
1221 patch->mod->name, obj->mod->name, ret);
1225 pr_notice("applying patch '%s' to loading module '%s'\n",
1226 patch->mod->name, obj->mod->name);
1228 ret = klp_pre_patch_callback(obj);
1230 pr_warn("pre-patch callback failed for object '%s'\n",
1235 ret = klp_patch_object(obj);
1237 pr_warn("failed to apply patch '%s' to module '%s' (%d)\n",
1238 patch->mod->name, obj->mod->name, ret);
1240 klp_post_unpatch_callback(obj);
1244 if (patch != klp_transition_patch)
1245 klp_post_patch_callback(obj);
1251 mutex_unlock(&klp_mutex);
1257 * If a patch is unsuccessfully applied, return
1258 * error to the module loader.
1260 pr_warn("patch '%s' failed for module '%s', refusing to load module '%s'\n",
1261 patch->mod->name, obj->mod->name, obj->mod->name);
1262 mod->klp_alive = false;
1264 klp_cleanup_module_patches_limited(mod, patch);
1265 mutex_unlock(&klp_mutex);
1270 void klp_module_going(struct module *mod)
1272 if (WARN_ON(mod->state != MODULE_STATE_GOING &&
1273 mod->state != MODULE_STATE_COMING))
1276 mutex_lock(&klp_mutex);
1278 * Each module has to know that klp_module_going()
1279 * has been called. We never know what module will
1280 * get patched by a new patch.
1282 mod->klp_alive = false;
1284 klp_cleanup_module_patches_limited(mod, NULL);
1286 mutex_unlock(&klp_mutex);
1289 static int __init klp_init(void)
1291 klp_root_kobj = kobject_create_and_add("livepatch", kernel_kobj);
1298 module_init(klp_init);