1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/init.h>
3 #include <linux/static_call.h>
6 #include <linux/sort.h>
7 #include <linux/slab.h>
8 #include <linux/module.h>
10 #include <linux/processor.h>
11 #include <asm/sections.h>
13 extern struct static_call_site __start_static_call_sites[],
14 __stop_static_call_sites[];
15 extern struct static_call_tramp_key __start_static_call_tramp_key[],
16 __stop_static_call_tramp_key[];
18 static int static_call_initialized;
21 * Must be called before early_initcall() to be effective.
23 void static_call_force_reinit(void)
25 if (WARN_ON_ONCE(!static_call_initialized))
28 static_call_initialized++;
31 /* mutex to protect key modules/sites */
32 static DEFINE_MUTEX(static_call_mutex);
34 static void static_call_lock(void)
36 mutex_lock(&static_call_mutex);
39 static void static_call_unlock(void)
41 mutex_unlock(&static_call_mutex);
44 static inline void *static_call_addr(struct static_call_site *site)
46 return (void *)((long)site->addr + (long)&site->addr);
49 static inline unsigned long __static_call_key(const struct static_call_site *site)
51 return (long)site->key + (long)&site->key;
54 static inline struct static_call_key *static_call_key(const struct static_call_site *site)
56 return (void *)(__static_call_key(site) & ~STATIC_CALL_SITE_FLAGS);
59 /* These assume the key is word-aligned. */
60 static inline bool static_call_is_init(struct static_call_site *site)
62 return __static_call_key(site) & STATIC_CALL_SITE_INIT;
65 static inline bool static_call_is_tail(struct static_call_site *site)
67 return __static_call_key(site) & STATIC_CALL_SITE_TAIL;
70 static inline void static_call_set_init(struct static_call_site *site)
72 site->key = (__static_call_key(site) | STATIC_CALL_SITE_INIT) -
76 static int static_call_site_cmp(const void *_a, const void *_b)
78 const struct static_call_site *a = _a;
79 const struct static_call_site *b = _b;
80 const struct static_call_key *key_a = static_call_key(a);
81 const struct static_call_key *key_b = static_call_key(b);
92 static void static_call_site_swap(void *_a, void *_b, int size)
94 long delta = (unsigned long)_a - (unsigned long)_b;
95 struct static_call_site *a = _a;
96 struct static_call_site *b = _b;
97 struct static_call_site tmp = *a;
99 a->addr = b->addr - delta;
100 a->key = b->key - delta;
102 b->addr = tmp.addr + delta;
103 b->key = tmp.key + delta;
106 static inline void static_call_sort_entries(struct static_call_site *start,
107 struct static_call_site *stop)
109 sort(start, stop - start, sizeof(struct static_call_site),
110 static_call_site_cmp, static_call_site_swap);
113 static inline bool static_call_key_has_mods(struct static_call_key *key)
115 return !(key->type & 1);
118 static inline struct static_call_mod *static_call_key_next(struct static_call_key *key)
120 if (!static_call_key_has_mods(key))
126 static inline struct static_call_site *static_call_key_sites(struct static_call_key *key)
128 if (static_call_key_has_mods(key))
131 return (struct static_call_site *)(key->type & ~1);
134 void __static_call_update(struct static_call_key *key, void *tramp, void *func)
136 struct static_call_site *site, *stop;
137 struct static_call_mod *site_mod, first;
142 if (key->func == func)
147 arch_static_call_transform(NULL, tramp, func, false);
150 * If uninitialized, we'll not update the callsites, but they still
151 * point to the trampoline and we just patched that.
153 if (WARN_ON_ONCE(!static_call_initialized))
156 first = (struct static_call_mod){
157 .next = static_call_key_next(key),
159 .sites = static_call_key_sites(key),
162 for (site_mod = &first; site_mod; site_mod = site_mod->next) {
163 bool init = system_state < SYSTEM_RUNNING;
164 struct module *mod = site_mod->mod;
166 if (!site_mod->sites) {
168 * This can happen if the static call key is defined in
169 * a module which doesn't use it.
171 * It also happens in the has_mods case, where the
172 * 'first' entry has no sites associated with it.
177 stop = __stop_static_call_sites;
180 #ifdef CONFIG_MODULES
181 stop = mod->static_call_sites +
182 mod->num_static_call_sites;
183 init = mod->state == MODULE_STATE_COMING;
187 for (site = site_mod->sites;
188 site < stop && static_call_key(site) == key; site++) {
189 void *site_addr = static_call_addr(site);
191 if (!init && static_call_is_init(site))
194 if (!kernel_text_address((unsigned long)site_addr)) {
196 * This skips patching built-in __exit, which
197 * is part of init_section_contains() but is
198 * not part of kernel_text_address().
200 * Skipping built-in __exit is fine since it
201 * will never be executed.
203 WARN_ONCE(!static_call_is_init(site),
204 "can't patch static call site at %pS",
209 arch_static_call_transform(site_addr, NULL, func,
210 static_call_is_tail(site));
215 static_call_unlock();
218 EXPORT_SYMBOL_GPL(__static_call_update);
220 static int __static_call_init(struct module *mod,
221 struct static_call_site *start,
222 struct static_call_site *stop)
224 struct static_call_site *site;
225 struct static_call_key *key, *prev_key = NULL;
226 struct static_call_mod *site_mod;
231 static_call_sort_entries(start, stop);
233 for (site = start; site < stop; site++) {
234 void *site_addr = static_call_addr(site);
236 if ((mod && within_module_init((unsigned long)site_addr, mod)) ||
237 (!mod && init_section_contains(site_addr, 1)))
238 static_call_set_init(site);
240 key = static_call_key(site);
241 if (key != prev_key) {
245 * For vmlinux (!mod) avoid the allocation by storing
246 * the sites pointer in the key itself. Also see
247 * __static_call_update()'s @first.
249 * This allows architectures (eg. x86) to call
250 * static_call_init() before memory allocation works.
258 site_mod = kzalloc(sizeof(*site_mod), GFP_KERNEL);
263 * When the key has a direct sites pointer, extract
264 * that into an explicit struct static_call_mod, so we
265 * can have a list of modules.
267 if (static_call_key_sites(key)) {
268 site_mod->mod = NULL;
269 site_mod->next = NULL;
270 site_mod->sites = static_call_key_sites(key);
272 key->mods = site_mod;
274 site_mod = kzalloc(sizeof(*site_mod), GFP_KERNEL);
280 site_mod->sites = site;
281 site_mod->next = static_call_key_next(key);
282 key->mods = site_mod;
286 arch_static_call_transform(site_addr, NULL, key->func,
287 static_call_is_tail(site));
293 static int addr_conflict(struct static_call_site *site, void *start, void *end)
295 unsigned long addr = (unsigned long)static_call_addr(site);
297 if (addr <= (unsigned long)end &&
298 addr + CALL_INSN_SIZE > (unsigned long)start)
304 static int __static_call_text_reserved(struct static_call_site *iter_start,
305 struct static_call_site *iter_stop,
306 void *start, void *end, bool init)
308 struct static_call_site *iter = iter_start;
310 while (iter < iter_stop) {
311 if (init || !static_call_is_init(iter)) {
312 if (addr_conflict(iter, start, end))
321 #ifdef CONFIG_MODULES
323 static int __static_call_mod_text_reserved(void *start, void *end)
329 mod = __module_text_address((unsigned long)start);
330 WARN_ON_ONCE(__module_text_address((unsigned long)end) != mod);
331 if (!try_module_get(mod))
338 ret = __static_call_text_reserved(mod->static_call_sites,
339 mod->static_call_sites + mod->num_static_call_sites,
340 start, end, mod->state == MODULE_STATE_COMING);
347 static unsigned long tramp_key_lookup(unsigned long addr)
349 struct static_call_tramp_key *start = __start_static_call_tramp_key;
350 struct static_call_tramp_key *stop = __stop_static_call_tramp_key;
351 struct static_call_tramp_key *tramp_key;
353 for (tramp_key = start; tramp_key != stop; tramp_key++) {
356 tramp = (long)tramp_key->tramp + (long)&tramp_key->tramp;
358 return (long)tramp_key->key + (long)&tramp_key->key;
364 static int static_call_add_module(struct module *mod)
366 struct static_call_site *start = mod->static_call_sites;
367 struct static_call_site *stop = start + mod->num_static_call_sites;
368 struct static_call_site *site;
370 for (site = start; site != stop; site++) {
371 unsigned long s_key = __static_call_key(site);
372 unsigned long addr = s_key & ~STATIC_CALL_SITE_FLAGS;
376 * Is the key is exported, 'addr' points to the key, which
377 * means modules are allowed to call static_call_update() on
380 * Otherwise, the key isn't exported, and 'addr' points to the
381 * trampoline so we need to lookup the key.
383 * We go through this dance to prevent crazy modules from
384 * abusing sensitive static calls.
386 if (!kernel_text_address(addr))
389 key = tramp_key_lookup(addr);
391 pr_warn("Failed to fixup __raw_static_call() usage at: %ps\n",
392 static_call_addr(site));
396 key |= s_key & STATIC_CALL_SITE_FLAGS;
397 site->key = key - (long)&site->key;
400 return __static_call_init(mod, start, stop);
403 static void static_call_del_module(struct module *mod)
405 struct static_call_site *start = mod->static_call_sites;
406 struct static_call_site *stop = mod->static_call_sites +
407 mod->num_static_call_sites;
408 struct static_call_key *key, *prev_key = NULL;
409 struct static_call_mod *site_mod, **prev;
410 struct static_call_site *site;
412 for (site = start; site < stop; site++) {
413 key = static_call_key(site);
419 for (prev = &key->mods, site_mod = key->mods;
420 site_mod && site_mod->mod != mod;
421 prev = &site_mod->next, site_mod = site_mod->next)
427 *prev = site_mod->next;
432 static int static_call_module_notify(struct notifier_block *nb,
433 unsigned long val, void *data)
435 struct module *mod = data;
442 case MODULE_STATE_COMING:
443 ret = static_call_add_module(mod);
445 WARN(1, "Failed to allocate memory for static calls");
446 static_call_del_module(mod);
449 case MODULE_STATE_GOING:
450 static_call_del_module(mod);
454 static_call_unlock();
457 return notifier_from_errno(ret);
460 static struct notifier_block static_call_module_nb = {
461 .notifier_call = static_call_module_notify,
466 static inline int __static_call_mod_text_reserved(void *start, void *end)
471 #endif /* CONFIG_MODULES */
473 int static_call_text_reserved(void *start, void *end)
475 bool init = system_state < SYSTEM_RUNNING;
476 int ret = __static_call_text_reserved(__start_static_call_sites,
477 __stop_static_call_sites, start, end, init);
482 return __static_call_mod_text_reserved(start, end);
485 int __init static_call_init(void)
489 /* See static_call_force_reinit(). */
490 if (static_call_initialized == 1)
495 ret = __static_call_init(NULL, __start_static_call_sites,
496 __stop_static_call_sites);
497 static_call_unlock();
501 pr_err("Failed to allocate memory for static_call!\n");
505 #ifdef CONFIG_MODULES
506 if (!static_call_initialized)
507 register_module_notifier(&static_call_module_nb);
510 static_call_initialized = 1;
513 early_initcall(static_call_init);
515 #ifdef CONFIG_STATIC_CALL_SELFTEST
517 static int func_a(int x)
522 static int func_b(int x)
527 DEFINE_STATIC_CALL(sc_selftest, func_a);
529 static struct static_call_data {
533 } static_call_data [] __initdata = {
539 static int __init test_static_call_init(void)
543 for (i = 0; i < ARRAY_SIZE(static_call_data); i++ ) {
544 struct static_call_data *scd = &static_call_data[i];
547 static_call_update(sc_selftest, scd->func);
549 WARN_ON(static_call(sc_selftest)(scd->val) != scd->expect);
554 early_initcall(test_static_call_init);
556 #endif /* CONFIG_STATIC_CALL_SELFTEST */