Merge tag 'perf-urgent-2021-07-11' of git://git.kernel.org/pub/scm/linux/kernel/git...
[platform/kernel/linux-starfive.git] / kernel / jump_label.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * jump label support
4  *
5  * Copyright (C) 2009 Jason Baron <jbaron@redhat.com>
6  * Copyright (C) 2011 Peter Zijlstra
7  *
8  */
9 #include <linux/memory.h>
10 #include <linux/uaccess.h>
11 #include <linux/module.h>
12 #include <linux/list.h>
13 #include <linux/slab.h>
14 #include <linux/sort.h>
15 #include <linux/err.h>
16 #include <linux/static_key.h>
17 #include <linux/jump_label_ratelimit.h>
18 #include <linux/bug.h>
19 #include <linux/cpu.h>
20 #include <asm/sections.h>
21
22 /* mutex to protect coming/going of the jump_label table */
23 static DEFINE_MUTEX(jump_label_mutex);
24
25 void jump_label_lock(void)
26 {
27         mutex_lock(&jump_label_mutex);
28 }
29
30 void jump_label_unlock(void)
31 {
32         mutex_unlock(&jump_label_mutex);
33 }
34
35 static int jump_label_cmp(const void *a, const void *b)
36 {
37         const struct jump_entry *jea = a;
38         const struct jump_entry *jeb = b;
39
40         /*
41          * Entrires are sorted by key.
42          */
43         if (jump_entry_key(jea) < jump_entry_key(jeb))
44                 return -1;
45
46         if (jump_entry_key(jea) > jump_entry_key(jeb))
47                 return 1;
48
49         /*
50          * In the batching mode, entries should also be sorted by the code
51          * inside the already sorted list of entries, enabling a bsearch in
52          * the vector.
53          */
54         if (jump_entry_code(jea) < jump_entry_code(jeb))
55                 return -1;
56
57         if (jump_entry_code(jea) > jump_entry_code(jeb))
58                 return 1;
59
60         return 0;
61 }
62
63 static void jump_label_swap(void *a, void *b, int size)
64 {
65         long delta = (unsigned long)a - (unsigned long)b;
66         struct jump_entry *jea = a;
67         struct jump_entry *jeb = b;
68         struct jump_entry tmp = *jea;
69
70         jea->code       = jeb->code - delta;
71         jea->target     = jeb->target - delta;
72         jea->key        = jeb->key - delta;
73
74         jeb->code       = tmp.code + delta;
75         jeb->target     = tmp.target + delta;
76         jeb->key        = tmp.key + delta;
77 }
78
79 static void
80 jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop)
81 {
82         unsigned long size;
83         void *swapfn = NULL;
84
85         if (IS_ENABLED(CONFIG_HAVE_ARCH_JUMP_LABEL_RELATIVE))
86                 swapfn = jump_label_swap;
87
88         size = (((unsigned long)stop - (unsigned long)start)
89                                         / sizeof(struct jump_entry));
90         sort(start, size, sizeof(struct jump_entry), jump_label_cmp, swapfn);
91 }
92
93 static void jump_label_update(struct static_key *key);
94
95 /*
96  * There are similar definitions for the !CONFIG_JUMP_LABEL case in jump_label.h.
97  * The use of 'atomic_read()' requires atomic.h and its problematic for some
98  * kernel headers such as kernel.h and others. Since static_key_count() is not
99  * used in the branch statements as it is for the !CONFIG_JUMP_LABEL case its ok
100  * to have it be a function here. Similarly, for 'static_key_enable()' and
101  * 'static_key_disable()', which require bug.h. This should allow jump_label.h
102  * to be included from most/all places for CONFIG_JUMP_LABEL.
103  */
104 int static_key_count(struct static_key *key)
105 {
106         /*
107          * -1 means the first static_key_slow_inc() is in progress.
108          *  static_key_enabled() must return true, so return 1 here.
109          */
110         int n = atomic_read(&key->enabled);
111
112         return n >= 0 ? n : 1;
113 }
114 EXPORT_SYMBOL_GPL(static_key_count);
115
116 void static_key_slow_inc_cpuslocked(struct static_key *key)
117 {
118         int v, v1;
119
120         STATIC_KEY_CHECK_USE(key);
121         lockdep_assert_cpus_held();
122
123         /*
124          * Careful if we get concurrent static_key_slow_inc() calls;
125          * later calls must wait for the first one to _finish_ the
126          * jump_label_update() process.  At the same time, however,
127          * the jump_label_update() call below wants to see
128          * static_key_enabled(&key) for jumps to be updated properly.
129          *
130          * So give a special meaning to negative key->enabled: it sends
131          * static_key_slow_inc() down the slow path, and it is non-zero
132          * so it counts as "enabled" in jump_label_update().  Note that
133          * atomic_inc_unless_negative() checks >= 0, so roll our own.
134          */
135         for (v = atomic_read(&key->enabled); v > 0; v = v1) {
136                 v1 = atomic_cmpxchg(&key->enabled, v, v + 1);
137                 if (likely(v1 == v))
138                         return;
139         }
140
141         jump_label_lock();
142         if (atomic_read(&key->enabled) == 0) {
143                 atomic_set(&key->enabled, -1);
144                 jump_label_update(key);
145                 /*
146                  * Ensure that if the above cmpxchg loop observes our positive
147                  * value, it must also observe all the text changes.
148                  */
149                 atomic_set_release(&key->enabled, 1);
150         } else {
151                 atomic_inc(&key->enabled);
152         }
153         jump_label_unlock();
154 }
155
156 void static_key_slow_inc(struct static_key *key)
157 {
158         cpus_read_lock();
159         static_key_slow_inc_cpuslocked(key);
160         cpus_read_unlock();
161 }
162 EXPORT_SYMBOL_GPL(static_key_slow_inc);
163
164 void static_key_enable_cpuslocked(struct static_key *key)
165 {
166         STATIC_KEY_CHECK_USE(key);
167         lockdep_assert_cpus_held();
168
169         if (atomic_read(&key->enabled) > 0) {
170                 WARN_ON_ONCE(atomic_read(&key->enabled) != 1);
171                 return;
172         }
173
174         jump_label_lock();
175         if (atomic_read(&key->enabled) == 0) {
176                 atomic_set(&key->enabled, -1);
177                 jump_label_update(key);
178                 /*
179                  * See static_key_slow_inc().
180                  */
181                 atomic_set_release(&key->enabled, 1);
182         }
183         jump_label_unlock();
184 }
185 EXPORT_SYMBOL_GPL(static_key_enable_cpuslocked);
186
187 void static_key_enable(struct static_key *key)
188 {
189         cpus_read_lock();
190         static_key_enable_cpuslocked(key);
191         cpus_read_unlock();
192 }
193 EXPORT_SYMBOL_GPL(static_key_enable);
194
195 void static_key_disable_cpuslocked(struct static_key *key)
196 {
197         STATIC_KEY_CHECK_USE(key);
198         lockdep_assert_cpus_held();
199
200         if (atomic_read(&key->enabled) != 1) {
201                 WARN_ON_ONCE(atomic_read(&key->enabled) != 0);
202                 return;
203         }
204
205         jump_label_lock();
206         if (atomic_cmpxchg(&key->enabled, 1, 0))
207                 jump_label_update(key);
208         jump_label_unlock();
209 }
210 EXPORT_SYMBOL_GPL(static_key_disable_cpuslocked);
211
212 void static_key_disable(struct static_key *key)
213 {
214         cpus_read_lock();
215         static_key_disable_cpuslocked(key);
216         cpus_read_unlock();
217 }
218 EXPORT_SYMBOL_GPL(static_key_disable);
219
220 static bool static_key_slow_try_dec(struct static_key *key)
221 {
222         int val;
223
224         val = atomic_fetch_add_unless(&key->enabled, -1, 1);
225         if (val == 1)
226                 return false;
227
228         /*
229          * The negative count check is valid even when a negative
230          * key->enabled is in use by static_key_slow_inc(); a
231          * __static_key_slow_dec() before the first static_key_slow_inc()
232          * returns is unbalanced, because all other static_key_slow_inc()
233          * instances block while the update is in progress.
234          */
235         WARN(val < 0, "jump label: negative count!\n");
236         return true;
237 }
238
239 static void __static_key_slow_dec_cpuslocked(struct static_key *key)
240 {
241         lockdep_assert_cpus_held();
242
243         if (static_key_slow_try_dec(key))
244                 return;
245
246         jump_label_lock();
247         if (atomic_dec_and_test(&key->enabled))
248                 jump_label_update(key);
249         jump_label_unlock();
250 }
251
252 static void __static_key_slow_dec(struct static_key *key)
253 {
254         cpus_read_lock();
255         __static_key_slow_dec_cpuslocked(key);
256         cpus_read_unlock();
257 }
258
259 void jump_label_update_timeout(struct work_struct *work)
260 {
261         struct static_key_deferred *key =
262                 container_of(work, struct static_key_deferred, work.work);
263         __static_key_slow_dec(&key->key);
264 }
265 EXPORT_SYMBOL_GPL(jump_label_update_timeout);
266
267 void static_key_slow_dec(struct static_key *key)
268 {
269         STATIC_KEY_CHECK_USE(key);
270         __static_key_slow_dec(key);
271 }
272 EXPORT_SYMBOL_GPL(static_key_slow_dec);
273
274 void static_key_slow_dec_cpuslocked(struct static_key *key)
275 {
276         STATIC_KEY_CHECK_USE(key);
277         __static_key_slow_dec_cpuslocked(key);
278 }
279
280 void __static_key_slow_dec_deferred(struct static_key *key,
281                                     struct delayed_work *work,
282                                     unsigned long timeout)
283 {
284         STATIC_KEY_CHECK_USE(key);
285
286         if (static_key_slow_try_dec(key))
287                 return;
288
289         schedule_delayed_work(work, timeout);
290 }
291 EXPORT_SYMBOL_GPL(__static_key_slow_dec_deferred);
292
293 void __static_key_deferred_flush(void *key, struct delayed_work *work)
294 {
295         STATIC_KEY_CHECK_USE(key);
296         flush_delayed_work(work);
297 }
298 EXPORT_SYMBOL_GPL(__static_key_deferred_flush);
299
300 void jump_label_rate_limit(struct static_key_deferred *key,
301                 unsigned long rl)
302 {
303         STATIC_KEY_CHECK_USE(key);
304         key->timeout = rl;
305         INIT_DELAYED_WORK(&key->work, jump_label_update_timeout);
306 }
307 EXPORT_SYMBOL_GPL(jump_label_rate_limit);
308
309 static int addr_conflict(struct jump_entry *entry, void *start, void *end)
310 {
311         if (jump_entry_code(entry) <= (unsigned long)end &&
312             jump_entry_code(entry) + jump_entry_size(entry) > (unsigned long)start)
313                 return 1;
314
315         return 0;
316 }
317
318 static int __jump_label_text_reserved(struct jump_entry *iter_start,
319                 struct jump_entry *iter_stop, void *start, void *end, bool init)
320 {
321         struct jump_entry *iter;
322
323         iter = iter_start;
324         while (iter < iter_stop) {
325                 if (init || !jump_entry_is_init(iter)) {
326                         if (addr_conflict(iter, start, end))
327                                 return 1;
328                 }
329                 iter++;
330         }
331
332         return 0;
333 }
334
335 /*
336  * Update code which is definitely not currently executing.
337  * Architectures which need heavyweight synchronization to modify
338  * running code can override this to make the non-live update case
339  * cheaper.
340  */
341 void __weak __init_or_module arch_jump_label_transform_static(struct jump_entry *entry,
342                                             enum jump_label_type type)
343 {
344         arch_jump_label_transform(entry, type);
345 }
346
347 static inline struct jump_entry *static_key_entries(struct static_key *key)
348 {
349         WARN_ON_ONCE(key->type & JUMP_TYPE_LINKED);
350         return (struct jump_entry *)(key->type & ~JUMP_TYPE_MASK);
351 }
352
353 static inline bool static_key_type(struct static_key *key)
354 {
355         return key->type & JUMP_TYPE_TRUE;
356 }
357
358 static inline bool static_key_linked(struct static_key *key)
359 {
360         return key->type & JUMP_TYPE_LINKED;
361 }
362
363 static inline void static_key_clear_linked(struct static_key *key)
364 {
365         key->type &= ~JUMP_TYPE_LINKED;
366 }
367
368 static inline void static_key_set_linked(struct static_key *key)
369 {
370         key->type |= JUMP_TYPE_LINKED;
371 }
372
373 /***
374  * A 'struct static_key' uses a union such that it either points directly
375  * to a table of 'struct jump_entry' or to a linked list of modules which in
376  * turn point to 'struct jump_entry' tables.
377  *
378  * The two lower bits of the pointer are used to keep track of which pointer
379  * type is in use and to store the initial branch direction, we use an access
380  * function which preserves these bits.
381  */
382 static void static_key_set_entries(struct static_key *key,
383                                    struct jump_entry *entries)
384 {
385         unsigned long type;
386
387         WARN_ON_ONCE((unsigned long)entries & JUMP_TYPE_MASK);
388         type = key->type & JUMP_TYPE_MASK;
389         key->entries = entries;
390         key->type |= type;
391 }
392
393 static enum jump_label_type jump_label_type(struct jump_entry *entry)
394 {
395         struct static_key *key = jump_entry_key(entry);
396         bool enabled = static_key_enabled(key);
397         bool branch = jump_entry_is_branch(entry);
398
399         /* See the comment in linux/jump_label.h */
400         return enabled ^ branch;
401 }
402
403 static bool jump_label_can_update(struct jump_entry *entry, bool init)
404 {
405         /*
406          * Cannot update code that was in an init text area.
407          */
408         if (!init && jump_entry_is_init(entry))
409                 return false;
410
411         if (!kernel_text_address(jump_entry_code(entry))) {
412                 /*
413                  * This skips patching built-in __exit, which
414                  * is part of init_section_contains() but is
415                  * not part of kernel_text_address().
416                  *
417                  * Skipping built-in __exit is fine since it
418                  * will never be executed.
419                  */
420                 WARN_ONCE(!jump_entry_is_init(entry),
421                           "can't patch jump_label at %pS",
422                           (void *)jump_entry_code(entry));
423                 return false;
424         }
425
426         return true;
427 }
428
429 #ifndef HAVE_JUMP_LABEL_BATCH
430 static void __jump_label_update(struct static_key *key,
431                                 struct jump_entry *entry,
432                                 struct jump_entry *stop,
433                                 bool init)
434 {
435         for (; (entry < stop) && (jump_entry_key(entry) == key); entry++) {
436                 if (jump_label_can_update(entry, init))
437                         arch_jump_label_transform(entry, jump_label_type(entry));
438         }
439 }
440 #else
441 static void __jump_label_update(struct static_key *key,
442                                 struct jump_entry *entry,
443                                 struct jump_entry *stop,
444                                 bool init)
445 {
446         for (; (entry < stop) && (jump_entry_key(entry) == key); entry++) {
447
448                 if (!jump_label_can_update(entry, init))
449                         continue;
450
451                 if (!arch_jump_label_transform_queue(entry, jump_label_type(entry))) {
452                         /*
453                          * Queue is full: Apply the current queue and try again.
454                          */
455                         arch_jump_label_transform_apply();
456                         BUG_ON(!arch_jump_label_transform_queue(entry, jump_label_type(entry)));
457                 }
458         }
459         arch_jump_label_transform_apply();
460 }
461 #endif
462
463 void __init jump_label_init(void)
464 {
465         struct jump_entry *iter_start = __start___jump_table;
466         struct jump_entry *iter_stop = __stop___jump_table;
467         struct static_key *key = NULL;
468         struct jump_entry *iter;
469
470         /*
471          * Since we are initializing the static_key.enabled field with
472          * with the 'raw' int values (to avoid pulling in atomic.h) in
473          * jump_label.h, let's make sure that is safe. There are only two
474          * cases to check since we initialize to 0 or 1.
475          */
476         BUILD_BUG_ON((int)ATOMIC_INIT(0) != 0);
477         BUILD_BUG_ON((int)ATOMIC_INIT(1) != 1);
478
479         if (static_key_initialized)
480                 return;
481
482         cpus_read_lock();
483         jump_label_lock();
484         jump_label_sort_entries(iter_start, iter_stop);
485
486         for (iter = iter_start; iter < iter_stop; iter++) {
487                 struct static_key *iterk;
488                 bool in_init;
489
490                 /* rewrite NOPs */
491                 if (jump_label_type(iter) == JUMP_LABEL_NOP)
492                         arch_jump_label_transform_static(iter, JUMP_LABEL_NOP);
493
494                 in_init = init_section_contains((void *)jump_entry_code(iter), 1);
495                 jump_entry_set_init(iter, in_init);
496
497                 iterk = jump_entry_key(iter);
498                 if (iterk == key)
499                         continue;
500
501                 key = iterk;
502                 static_key_set_entries(key, iter);
503         }
504         static_key_initialized = true;
505         jump_label_unlock();
506         cpus_read_unlock();
507 }
508
509 #ifdef CONFIG_MODULES
510
511 static enum jump_label_type jump_label_init_type(struct jump_entry *entry)
512 {
513         struct static_key *key = jump_entry_key(entry);
514         bool type = static_key_type(key);
515         bool branch = jump_entry_is_branch(entry);
516
517         /* See the comment in linux/jump_label.h */
518         return type ^ branch;
519 }
520
521 struct static_key_mod {
522         struct static_key_mod *next;
523         struct jump_entry *entries;
524         struct module *mod;
525 };
526
527 static inline struct static_key_mod *static_key_mod(struct static_key *key)
528 {
529         WARN_ON_ONCE(!static_key_linked(key));
530         return (struct static_key_mod *)(key->type & ~JUMP_TYPE_MASK);
531 }
532
533 /***
534  * key->type and key->next are the same via union.
535  * This sets key->next and preserves the type bits.
536  *
537  * See additional comments above static_key_set_entries().
538  */
539 static void static_key_set_mod(struct static_key *key,
540                                struct static_key_mod *mod)
541 {
542         unsigned long type;
543
544         WARN_ON_ONCE((unsigned long)mod & JUMP_TYPE_MASK);
545         type = key->type & JUMP_TYPE_MASK;
546         key->next = mod;
547         key->type |= type;
548 }
549
550 static int __jump_label_mod_text_reserved(void *start, void *end)
551 {
552         struct module *mod;
553         int ret;
554
555         preempt_disable();
556         mod = __module_text_address((unsigned long)start);
557         WARN_ON_ONCE(__module_text_address((unsigned long)end) != mod);
558         if (!try_module_get(mod))
559                 mod = NULL;
560         preempt_enable();
561
562         if (!mod)
563                 return 0;
564
565         ret = __jump_label_text_reserved(mod->jump_entries,
566                                 mod->jump_entries + mod->num_jump_entries,
567                                 start, end, mod->state == MODULE_STATE_COMING);
568
569         module_put(mod);
570
571         return ret;
572 }
573
574 static void __jump_label_mod_update(struct static_key *key)
575 {
576         struct static_key_mod *mod;
577
578         for (mod = static_key_mod(key); mod; mod = mod->next) {
579                 struct jump_entry *stop;
580                 struct module *m;
581
582                 /*
583                  * NULL if the static_key is defined in a module
584                  * that does not use it
585                  */
586                 if (!mod->entries)
587                         continue;
588
589                 m = mod->mod;
590                 if (!m)
591                         stop = __stop___jump_table;
592                 else
593                         stop = m->jump_entries + m->num_jump_entries;
594                 __jump_label_update(key, mod->entries, stop,
595                                     m && m->state == MODULE_STATE_COMING);
596         }
597 }
598
599 /***
600  * apply_jump_label_nops - patch module jump labels with arch_get_jump_label_nop()
601  * @mod: module to patch
602  *
603  * Allow for run-time selection of the optimal nops. Before the module
604  * loads patch these with arch_get_jump_label_nop(), which is specified by
605  * the arch specific jump label code.
606  */
607 void jump_label_apply_nops(struct module *mod)
608 {
609         struct jump_entry *iter_start = mod->jump_entries;
610         struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
611         struct jump_entry *iter;
612
613         /* if the module doesn't have jump label entries, just return */
614         if (iter_start == iter_stop)
615                 return;
616
617         for (iter = iter_start; iter < iter_stop; iter++) {
618                 /* Only write NOPs for arch_branch_static(). */
619                 if (jump_label_init_type(iter) == JUMP_LABEL_NOP)
620                         arch_jump_label_transform_static(iter, JUMP_LABEL_NOP);
621         }
622 }
623
624 static int jump_label_add_module(struct module *mod)
625 {
626         struct jump_entry *iter_start = mod->jump_entries;
627         struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
628         struct jump_entry *iter;
629         struct static_key *key = NULL;
630         struct static_key_mod *jlm, *jlm2;
631
632         /* if the module doesn't have jump label entries, just return */
633         if (iter_start == iter_stop)
634                 return 0;
635
636         jump_label_sort_entries(iter_start, iter_stop);
637
638         for (iter = iter_start; iter < iter_stop; iter++) {
639                 struct static_key *iterk;
640                 bool in_init;
641
642                 in_init = within_module_init(jump_entry_code(iter), mod);
643                 jump_entry_set_init(iter, in_init);
644
645                 iterk = jump_entry_key(iter);
646                 if (iterk == key)
647                         continue;
648
649                 key = iterk;
650                 if (within_module((unsigned long)key, mod)) {
651                         static_key_set_entries(key, iter);
652                         continue;
653                 }
654                 jlm = kzalloc(sizeof(struct static_key_mod), GFP_KERNEL);
655                 if (!jlm)
656                         return -ENOMEM;
657                 if (!static_key_linked(key)) {
658                         jlm2 = kzalloc(sizeof(struct static_key_mod),
659                                        GFP_KERNEL);
660                         if (!jlm2) {
661                                 kfree(jlm);
662                                 return -ENOMEM;
663                         }
664                         preempt_disable();
665                         jlm2->mod = __module_address((unsigned long)key);
666                         preempt_enable();
667                         jlm2->entries = static_key_entries(key);
668                         jlm2->next = NULL;
669                         static_key_set_mod(key, jlm2);
670                         static_key_set_linked(key);
671                 }
672                 jlm->mod = mod;
673                 jlm->entries = iter;
674                 jlm->next = static_key_mod(key);
675                 static_key_set_mod(key, jlm);
676                 static_key_set_linked(key);
677
678                 /* Only update if we've changed from our initial state */
679                 if (jump_label_type(iter) != jump_label_init_type(iter))
680                         __jump_label_update(key, iter, iter_stop, true);
681         }
682
683         return 0;
684 }
685
686 static void jump_label_del_module(struct module *mod)
687 {
688         struct jump_entry *iter_start = mod->jump_entries;
689         struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
690         struct jump_entry *iter;
691         struct static_key *key = NULL;
692         struct static_key_mod *jlm, **prev;
693
694         for (iter = iter_start; iter < iter_stop; iter++) {
695                 if (jump_entry_key(iter) == key)
696                         continue;
697
698                 key = jump_entry_key(iter);
699
700                 if (within_module((unsigned long)key, mod))
701                         continue;
702
703                 /* No memory during module load */
704                 if (WARN_ON(!static_key_linked(key)))
705                         continue;
706
707                 prev = &key->next;
708                 jlm = static_key_mod(key);
709
710                 while (jlm && jlm->mod != mod) {
711                         prev = &jlm->next;
712                         jlm = jlm->next;
713                 }
714
715                 /* No memory during module load */
716                 if (WARN_ON(!jlm))
717                         continue;
718
719                 if (prev == &key->next)
720                         static_key_set_mod(key, jlm->next);
721                 else
722                         *prev = jlm->next;
723
724                 kfree(jlm);
725
726                 jlm = static_key_mod(key);
727                 /* if only one etry is left, fold it back into the static_key */
728                 if (jlm->next == NULL) {
729                         static_key_set_entries(key, jlm->entries);
730                         static_key_clear_linked(key);
731                         kfree(jlm);
732                 }
733         }
734 }
735
736 static int
737 jump_label_module_notify(struct notifier_block *self, unsigned long val,
738                          void *data)
739 {
740         struct module *mod = data;
741         int ret = 0;
742
743         cpus_read_lock();
744         jump_label_lock();
745
746         switch (val) {
747         case MODULE_STATE_COMING:
748                 ret = jump_label_add_module(mod);
749                 if (ret) {
750                         WARN(1, "Failed to allocate memory: jump_label may not work properly.\n");
751                         jump_label_del_module(mod);
752                 }
753                 break;
754         case MODULE_STATE_GOING:
755                 jump_label_del_module(mod);
756                 break;
757         }
758
759         jump_label_unlock();
760         cpus_read_unlock();
761
762         return notifier_from_errno(ret);
763 }
764
765 static struct notifier_block jump_label_module_nb = {
766         .notifier_call = jump_label_module_notify,
767         .priority = 1, /* higher than tracepoints */
768 };
769
770 static __init int jump_label_init_module(void)
771 {
772         return register_module_notifier(&jump_label_module_nb);
773 }
774 early_initcall(jump_label_init_module);
775
776 #endif /* CONFIG_MODULES */
777
778 /***
779  * jump_label_text_reserved - check if addr range is reserved
780  * @start: start text addr
781  * @end: end text addr
782  *
783  * checks if the text addr located between @start and @end
784  * overlaps with any of the jump label patch addresses. Code
785  * that wants to modify kernel text should first verify that
786  * it does not overlap with any of the jump label addresses.
787  * Caller must hold jump_label_mutex.
788  *
789  * returns 1 if there is an overlap, 0 otherwise
790  */
791 int jump_label_text_reserved(void *start, void *end)
792 {
793         bool init = system_state < SYSTEM_RUNNING;
794         int ret = __jump_label_text_reserved(__start___jump_table,
795                         __stop___jump_table, start, end, init);
796
797         if (ret)
798                 return ret;
799
800 #ifdef CONFIG_MODULES
801         ret = __jump_label_mod_text_reserved(start, end);
802 #endif
803         return ret;
804 }
805
806 static void jump_label_update(struct static_key *key)
807 {
808         struct jump_entry *stop = __stop___jump_table;
809         bool init = system_state < SYSTEM_RUNNING;
810         struct jump_entry *entry;
811 #ifdef CONFIG_MODULES
812         struct module *mod;
813
814         if (static_key_linked(key)) {
815                 __jump_label_mod_update(key);
816                 return;
817         }
818
819         preempt_disable();
820         mod = __module_address((unsigned long)key);
821         if (mod) {
822                 stop = mod->jump_entries + mod->num_jump_entries;
823                 init = mod->state == MODULE_STATE_COMING;
824         }
825         preempt_enable();
826 #endif
827         entry = static_key_entries(key);
828         /* if there are no users, entry can be NULL */
829         if (entry)
830                 __jump_label_update(key, entry, stop, init);
831 }
832
833 #ifdef CONFIG_STATIC_KEYS_SELFTEST
834 static DEFINE_STATIC_KEY_TRUE(sk_true);
835 static DEFINE_STATIC_KEY_FALSE(sk_false);
836
837 static __init int jump_label_test(void)
838 {
839         int i;
840
841         for (i = 0; i < 2; i++) {
842                 WARN_ON(static_key_enabled(&sk_true.key) != true);
843                 WARN_ON(static_key_enabled(&sk_false.key) != false);
844
845                 WARN_ON(!static_branch_likely(&sk_true));
846                 WARN_ON(!static_branch_unlikely(&sk_true));
847                 WARN_ON(static_branch_likely(&sk_false));
848                 WARN_ON(static_branch_unlikely(&sk_false));
849
850                 static_branch_disable(&sk_true);
851                 static_branch_enable(&sk_false);
852
853                 WARN_ON(static_key_enabled(&sk_true.key) == true);
854                 WARN_ON(static_key_enabled(&sk_false.key) == false);
855
856                 WARN_ON(static_branch_likely(&sk_true));
857                 WARN_ON(static_branch_unlikely(&sk_true));
858                 WARN_ON(!static_branch_likely(&sk_false));
859                 WARN_ON(!static_branch_unlikely(&sk_false));
860
861                 static_branch_enable(&sk_true);
862                 static_branch_disable(&sk_false);
863         }
864
865         return 0;
866 }
867 early_initcall(jump_label_test);
868 #endif /* STATIC_KEYS_SELFTEST */