Merge tag 'regmap-domain-deps' of git://git.kernel.org/pub/scm/linux/kernel/git/broon...
[platform/adaptation/renesas_rcar/renesas_kernel.git] / kernel / jump_label.c
1 /*
2  * jump label support
3  *
4  * Copyright (C) 2009 Jason Baron <jbaron@redhat.com>
5  * Copyright (C) 2011 Peter Zijlstra <pzijlstr@redhat.com>
6  *
7  */
8 #include <linux/memory.h>
9 #include <linux/uaccess.h>
10 #include <linux/module.h>
11 #include <linux/list.h>
12 #include <linux/slab.h>
13 #include <linux/sort.h>
14 #include <linux/err.h>
15 #include <linux/static_key.h>
16
17 #ifdef HAVE_JUMP_LABEL
18
19 /* mutex to protect coming/going of the the jump_label table */
20 static DEFINE_MUTEX(jump_label_mutex);
21
22 void jump_label_lock(void)
23 {
24         mutex_lock(&jump_label_mutex);
25 }
26
27 void jump_label_unlock(void)
28 {
29         mutex_unlock(&jump_label_mutex);
30 }
31
32 static int jump_label_cmp(const void *a, const void *b)
33 {
34         const struct jump_entry *jea = a;
35         const struct jump_entry *jeb = b;
36
37         if (jea->key < jeb->key)
38                 return -1;
39
40         if (jea->key > jeb->key)
41                 return 1;
42
43         return 0;
44 }
45
46 static void
47 jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop)
48 {
49         unsigned long size;
50
51         size = (((unsigned long)stop - (unsigned long)start)
52                                         / sizeof(struct jump_entry));
53         sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
54 }
55
56 static void jump_label_update(struct static_key *key, int enable);
57
58 void static_key_slow_inc(struct static_key *key)
59 {
60         if (atomic_inc_not_zero(&key->enabled))
61                 return;
62
63         jump_label_lock();
64         if (atomic_read(&key->enabled) == 0) {
65                 if (!jump_label_get_branch_default(key))
66                         jump_label_update(key, JUMP_LABEL_ENABLE);
67                 else
68                         jump_label_update(key, JUMP_LABEL_DISABLE);
69         }
70         atomic_inc(&key->enabled);
71         jump_label_unlock();
72 }
73 EXPORT_SYMBOL_GPL(static_key_slow_inc);
74
75 static void __static_key_slow_dec(struct static_key *key,
76                 unsigned long rate_limit, struct delayed_work *work)
77 {
78         if (!atomic_dec_and_mutex_lock(&key->enabled, &jump_label_mutex)) {
79                 WARN(atomic_read(&key->enabled) < 0,
80                      "jump label: negative count!\n");
81                 return;
82         }
83
84         if (rate_limit) {
85                 atomic_inc(&key->enabled);
86                 schedule_delayed_work(work, rate_limit);
87         } else {
88                 if (!jump_label_get_branch_default(key))
89                         jump_label_update(key, JUMP_LABEL_DISABLE);
90                 else
91                         jump_label_update(key, JUMP_LABEL_ENABLE);
92         }
93         jump_label_unlock();
94 }
95
96 static void jump_label_update_timeout(struct work_struct *work)
97 {
98         struct static_key_deferred *key =
99                 container_of(work, struct static_key_deferred, work.work);
100         __static_key_slow_dec(&key->key, 0, NULL);
101 }
102
103 void static_key_slow_dec(struct static_key *key)
104 {
105         __static_key_slow_dec(key, 0, NULL);
106 }
107 EXPORT_SYMBOL_GPL(static_key_slow_dec);
108
109 void static_key_slow_dec_deferred(struct static_key_deferred *key)
110 {
111         __static_key_slow_dec(&key->key, key->timeout, &key->work);
112 }
113 EXPORT_SYMBOL_GPL(static_key_slow_dec_deferred);
114
115 void jump_label_rate_limit(struct static_key_deferred *key,
116                 unsigned long rl)
117 {
118         key->timeout = rl;
119         INIT_DELAYED_WORK(&key->work, jump_label_update_timeout);
120 }
121
122 static int addr_conflict(struct jump_entry *entry, void *start, void *end)
123 {
124         if (entry->code <= (unsigned long)end &&
125                 entry->code + JUMP_LABEL_NOP_SIZE > (unsigned long)start)
126                 return 1;
127
128         return 0;
129 }
130
131 static int __jump_label_text_reserved(struct jump_entry *iter_start,
132                 struct jump_entry *iter_stop, void *start, void *end)
133 {
134         struct jump_entry *iter;
135
136         iter = iter_start;
137         while (iter < iter_stop) {
138                 if (addr_conflict(iter, start, end))
139                         return 1;
140                 iter++;
141         }
142
143         return 0;
144 }
145
146 /* 
147  * Update code which is definitely not currently executing.
148  * Architectures which need heavyweight synchronization to modify
149  * running code can override this to make the non-live update case
150  * cheaper.
151  */
152 void __weak __init_or_module arch_jump_label_transform_static(struct jump_entry *entry,
153                                             enum jump_label_type type)
154 {
155         arch_jump_label_transform(entry, type); 
156 }
157
158 static void __jump_label_update(struct static_key *key,
159                                 struct jump_entry *entry,
160                                 struct jump_entry *stop, int enable)
161 {
162         for (; (entry < stop) &&
163               (entry->key == (jump_label_t)(unsigned long)key);
164               entry++) {
165                 /*
166                  * entry->code set to 0 invalidates module init text sections
167                  * kernel_text_address() verifies we are not in core kernel
168                  * init code, see jump_label_invalidate_module_init().
169                  */
170                 if (entry->code && kernel_text_address(entry->code))
171                         arch_jump_label_transform(entry, enable);
172         }
173 }
174
175 static enum jump_label_type jump_label_type(struct static_key *key)
176 {
177         bool true_branch = jump_label_get_branch_default(key);
178         bool state = static_key_enabled(key);
179
180         if ((!true_branch && state) || (true_branch && !state))
181                 return JUMP_LABEL_ENABLE;
182
183         return JUMP_LABEL_DISABLE;
184 }
185
186 void __init jump_label_init(void)
187 {
188         struct jump_entry *iter_start = __start___jump_table;
189         struct jump_entry *iter_stop = __stop___jump_table;
190         struct static_key *key = NULL;
191         struct jump_entry *iter;
192
193         jump_label_lock();
194         jump_label_sort_entries(iter_start, iter_stop);
195
196         for (iter = iter_start; iter < iter_stop; iter++) {
197                 struct static_key *iterk;
198
199                 iterk = (struct static_key *)(unsigned long)iter->key;
200                 arch_jump_label_transform_static(iter, jump_label_type(iterk));
201                 if (iterk == key)
202                         continue;
203
204                 key = iterk;
205                 /*
206                  * Set key->entries to iter, but preserve JUMP_LABEL_TRUE_BRANCH.
207                  */
208                 *((unsigned long *)&key->entries) += (unsigned long)iter;
209 #ifdef CONFIG_MODULES
210                 key->next = NULL;
211 #endif
212         }
213         jump_label_unlock();
214 }
215
216 #ifdef CONFIG_MODULES
217
218 struct static_key_mod {
219         struct static_key_mod *next;
220         struct jump_entry *entries;
221         struct module *mod;
222 };
223
224 static int __jump_label_mod_text_reserved(void *start, void *end)
225 {
226         struct module *mod;
227
228         mod = __module_text_address((unsigned long)start);
229         if (!mod)
230                 return 0;
231
232         WARN_ON_ONCE(__module_text_address((unsigned long)end) != mod);
233
234         return __jump_label_text_reserved(mod->jump_entries,
235                                 mod->jump_entries + mod->num_jump_entries,
236                                 start, end);
237 }
238
239 static void __jump_label_mod_update(struct static_key *key, int enable)
240 {
241         struct static_key_mod *mod = key->next;
242
243         while (mod) {
244                 struct module *m = mod->mod;
245
246                 __jump_label_update(key, mod->entries,
247                                     m->jump_entries + m->num_jump_entries,
248                                     enable);
249                 mod = mod->next;
250         }
251 }
252
253 /***
254  * apply_jump_label_nops - patch module jump labels with arch_get_jump_label_nop()
255  * @mod: module to patch
256  *
257  * Allow for run-time selection of the optimal nops. Before the module
258  * loads patch these with arch_get_jump_label_nop(), which is specified by
259  * the arch specific jump label code.
260  */
261 void jump_label_apply_nops(struct module *mod)
262 {
263         struct jump_entry *iter_start = mod->jump_entries;
264         struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
265         struct jump_entry *iter;
266
267         /* if the module doesn't have jump label entries, just return */
268         if (iter_start == iter_stop)
269                 return;
270
271         for (iter = iter_start; iter < iter_stop; iter++) {
272                 arch_jump_label_transform_static(iter, JUMP_LABEL_DISABLE);
273         }
274 }
275
276 static int jump_label_add_module(struct module *mod)
277 {
278         struct jump_entry *iter_start = mod->jump_entries;
279         struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
280         struct jump_entry *iter;
281         struct static_key *key = NULL;
282         struct static_key_mod *jlm;
283
284         /* if the module doesn't have jump label entries, just return */
285         if (iter_start == iter_stop)
286                 return 0;
287
288         jump_label_sort_entries(iter_start, iter_stop);
289
290         for (iter = iter_start; iter < iter_stop; iter++) {
291                 struct static_key *iterk;
292
293                 iterk = (struct static_key *)(unsigned long)iter->key;
294                 if (iterk == key)
295                         continue;
296
297                 key = iterk;
298                 if (__module_address(iter->key) == mod) {
299                         /*
300                          * Set key->entries to iter, but preserve JUMP_LABEL_TRUE_BRANCH.
301                          */
302                         *((unsigned long *)&key->entries) += (unsigned long)iter;
303                         key->next = NULL;
304                         continue;
305                 }
306                 jlm = kzalloc(sizeof(struct static_key_mod), GFP_KERNEL);
307                 if (!jlm)
308                         return -ENOMEM;
309                 jlm->mod = mod;
310                 jlm->entries = iter;
311                 jlm->next = key->next;
312                 key->next = jlm;
313
314                 if (jump_label_type(key) == JUMP_LABEL_ENABLE)
315                         __jump_label_update(key, iter, iter_stop, JUMP_LABEL_ENABLE);
316         }
317
318         return 0;
319 }
320
321 static void jump_label_del_module(struct module *mod)
322 {
323         struct jump_entry *iter_start = mod->jump_entries;
324         struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
325         struct jump_entry *iter;
326         struct static_key *key = NULL;
327         struct static_key_mod *jlm, **prev;
328
329         for (iter = iter_start; iter < iter_stop; iter++) {
330                 if (iter->key == (jump_label_t)(unsigned long)key)
331                         continue;
332
333                 key = (struct static_key *)(unsigned long)iter->key;
334
335                 if (__module_address(iter->key) == mod)
336                         continue;
337
338                 prev = &key->next;
339                 jlm = key->next;
340
341                 while (jlm && jlm->mod != mod) {
342                         prev = &jlm->next;
343                         jlm = jlm->next;
344                 }
345
346                 if (jlm) {
347                         *prev = jlm->next;
348                         kfree(jlm);
349                 }
350         }
351 }
352
353 static void jump_label_invalidate_module_init(struct module *mod)
354 {
355         struct jump_entry *iter_start = mod->jump_entries;
356         struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
357         struct jump_entry *iter;
358
359         for (iter = iter_start; iter < iter_stop; iter++) {
360                 if (within_module_init(iter->code, mod))
361                         iter->code = 0;
362         }
363 }
364
365 static int
366 jump_label_module_notify(struct notifier_block *self, unsigned long val,
367                          void *data)
368 {
369         struct module *mod = data;
370         int ret = 0;
371
372         switch (val) {
373         case MODULE_STATE_COMING:
374                 jump_label_lock();
375                 ret = jump_label_add_module(mod);
376                 if (ret)
377                         jump_label_del_module(mod);
378                 jump_label_unlock();
379                 break;
380         case MODULE_STATE_GOING:
381                 jump_label_lock();
382                 jump_label_del_module(mod);
383                 jump_label_unlock();
384                 break;
385         case MODULE_STATE_LIVE:
386                 jump_label_lock();
387                 jump_label_invalidate_module_init(mod);
388                 jump_label_unlock();
389                 break;
390         }
391
392         return notifier_from_errno(ret);
393 }
394
395 struct notifier_block jump_label_module_nb = {
396         .notifier_call = jump_label_module_notify,
397         .priority = 1, /* higher than tracepoints */
398 };
399
400 static __init int jump_label_init_module(void)
401 {
402         return register_module_notifier(&jump_label_module_nb);
403 }
404 early_initcall(jump_label_init_module);
405
406 #endif /* CONFIG_MODULES */
407
408 /***
409  * jump_label_text_reserved - check if addr range is reserved
410  * @start: start text addr
411  * @end: end text addr
412  *
413  * checks if the text addr located between @start and @end
414  * overlaps with any of the jump label patch addresses. Code
415  * that wants to modify kernel text should first verify that
416  * it does not overlap with any of the jump label addresses.
417  * Caller must hold jump_label_mutex.
418  *
419  * returns 1 if there is an overlap, 0 otherwise
420  */
421 int jump_label_text_reserved(void *start, void *end)
422 {
423         int ret = __jump_label_text_reserved(__start___jump_table,
424                         __stop___jump_table, start, end);
425
426         if (ret)
427                 return ret;
428
429 #ifdef CONFIG_MODULES
430         ret = __jump_label_mod_text_reserved(start, end);
431 #endif
432         return ret;
433 }
434
435 static void jump_label_update(struct static_key *key, int enable)
436 {
437         struct jump_entry *stop = __stop___jump_table;
438         struct jump_entry *entry = jump_label_get_entries(key);
439
440 #ifdef CONFIG_MODULES
441         struct module *mod = __module_address((unsigned long)key);
442
443         __jump_label_mod_update(key, enable);
444
445         if (mod)
446                 stop = mod->jump_entries + mod->num_jump_entries;
447 #endif
448         /* if there are no users, entry can be NULL */
449         if (entry)
450                 __jump_label_update(key, entry, stop, enable);
451 }
452
453 #endif