tracing: Stop FORTIFY_SOURCE complaining about stack trace caller
[platform/kernel/linux-starfive.git] / lib / once.c
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/slab.h>
3 #include <linux/spinlock.h>
4 #include <linux/once.h>
5 #include <linux/random.h>
6 #include <linux/module.h>
7
8 struct once_work {
9         struct work_struct work;
10         struct static_key_true *key;
11         struct module *module;
12 };
13
14 static void once_deferred(struct work_struct *w)
15 {
16         struct once_work *work;
17
18         work = container_of(w, struct once_work, work);
19         BUG_ON(!static_key_enabled(work->key));
20         static_branch_disable(work->key);
21         module_put(work->module);
22         kfree(work);
23 }
24
25 static void once_disable_jump(struct static_key_true *key, struct module *mod)
26 {
27         struct once_work *w;
28
29         w = kmalloc(sizeof(*w), GFP_ATOMIC);
30         if (!w)
31                 return;
32
33         INIT_WORK(&w->work, once_deferred);
34         w->key = key;
35         w->module = mod;
36         __module_get(mod);
37         schedule_work(&w->work);
38 }
39
40 static DEFINE_SPINLOCK(once_lock);
41
42 bool __do_once_start(bool *done, unsigned long *flags)
43         __acquires(once_lock)
44 {
45         spin_lock_irqsave(&once_lock, *flags);
46         if (*done) {
47                 spin_unlock_irqrestore(&once_lock, *flags);
48                 /* Keep sparse happy by restoring an even lock count on
49                  * this lock. In case we return here, we don't call into
50                  * __do_once_done but return early in the DO_ONCE() macro.
51                  */
52                 __acquire(once_lock);
53                 return false;
54         }
55
56         return true;
57 }
58 EXPORT_SYMBOL(__do_once_start);
59
60 void __do_once_done(bool *done, struct static_key_true *once_key,
61                     unsigned long *flags, struct module *mod)
62         __releases(once_lock)
63 {
64         *done = true;
65         spin_unlock_irqrestore(&once_lock, *flags);
66         once_disable_jump(once_key, mod);
67 }
68 EXPORT_SYMBOL(__do_once_done);
69
70 static DEFINE_MUTEX(once_mutex);
71
72 bool __do_once_sleepable_start(bool *done)
73         __acquires(once_mutex)
74 {
75         mutex_lock(&once_mutex);
76         if (*done) {
77                 mutex_unlock(&once_mutex);
78                 /* Keep sparse happy by restoring an even lock count on
79                  * this mutex. In case we return here, we don't call into
80                  * __do_once_done but return early in the DO_ONCE_SLEEPABLE() macro.
81                  */
82                 __acquire(once_mutex);
83                 return false;
84         }
85
86         return true;
87 }
88 EXPORT_SYMBOL(__do_once_sleepable_start);
89
90 void __do_once_sleepable_done(bool *done, struct static_key_true *once_key,
91                          struct module *mod)
92         __releases(once_mutex)
93 {
94         *done = true;
95         mutex_unlock(&once_mutex);
96         once_disable_jump(once_key, mod);
97 }
98 EXPORT_SYMBOL(__do_once_sleepable_done);