Merge tag 'rcu-urgent.2022.12.17a' of git://git.kernel.org/pub/scm/linux/kernel/git...
[platform/kernel/linux-starfive.git] / kernel / bpf / percpu_freelist.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2016 Facebook
3  */
4 #include "percpu_freelist.h"
5
6 int pcpu_freelist_init(struct pcpu_freelist *s)
7 {
8         int cpu;
9
10         s->freelist = alloc_percpu(struct pcpu_freelist_head);
11         if (!s->freelist)
12                 return -ENOMEM;
13
14         for_each_possible_cpu(cpu) {
15                 struct pcpu_freelist_head *head = per_cpu_ptr(s->freelist, cpu);
16
17                 raw_spin_lock_init(&head->lock);
18                 head->first = NULL;
19         }
20         raw_spin_lock_init(&s->extralist.lock);
21         s->extralist.first = NULL;
22         return 0;
23 }
24
25 void pcpu_freelist_destroy(struct pcpu_freelist *s)
26 {
27         free_percpu(s->freelist);
28 }
29
30 static inline void pcpu_freelist_push_node(struct pcpu_freelist_head *head,
31                                            struct pcpu_freelist_node *node)
32 {
33         node->next = head->first;
34         WRITE_ONCE(head->first, node);
35 }
36
37 static inline void ___pcpu_freelist_push(struct pcpu_freelist_head *head,
38                                          struct pcpu_freelist_node *node)
39 {
40         raw_spin_lock(&head->lock);
41         pcpu_freelist_push_node(head, node);
42         raw_spin_unlock(&head->lock);
43 }
44
45 static inline bool pcpu_freelist_try_push_extra(struct pcpu_freelist *s,
46                                                 struct pcpu_freelist_node *node)
47 {
48         if (!raw_spin_trylock(&s->extralist.lock))
49                 return false;
50
51         pcpu_freelist_push_node(&s->extralist, node);
52         raw_spin_unlock(&s->extralist.lock);
53         return true;
54 }
55
56 static inline void ___pcpu_freelist_push_nmi(struct pcpu_freelist *s,
57                                              struct pcpu_freelist_node *node)
58 {
59         int cpu, orig_cpu;
60
61         orig_cpu = raw_smp_processor_id();
62         while (1) {
63                 for_each_cpu_wrap(cpu, cpu_possible_mask, orig_cpu) {
64                         struct pcpu_freelist_head *head;
65
66                         head = per_cpu_ptr(s->freelist, cpu);
67                         if (raw_spin_trylock(&head->lock)) {
68                                 pcpu_freelist_push_node(head, node);
69                                 raw_spin_unlock(&head->lock);
70                                 return;
71                         }
72                 }
73
74                 /* cannot lock any per cpu lock, try extralist */
75                 if (pcpu_freelist_try_push_extra(s, node))
76                         return;
77         }
78 }
79
80 void __pcpu_freelist_push(struct pcpu_freelist *s,
81                         struct pcpu_freelist_node *node)
82 {
83         if (in_nmi())
84                 ___pcpu_freelist_push_nmi(s, node);
85         else
86                 ___pcpu_freelist_push(this_cpu_ptr(s->freelist), node);
87 }
88
89 void pcpu_freelist_push(struct pcpu_freelist *s,
90                         struct pcpu_freelist_node *node)
91 {
92         unsigned long flags;
93
94         local_irq_save(flags);
95         __pcpu_freelist_push(s, node);
96         local_irq_restore(flags);
97 }
98
99 void pcpu_freelist_populate(struct pcpu_freelist *s, void *buf, u32 elem_size,
100                             u32 nr_elems)
101 {
102         struct pcpu_freelist_head *head;
103         unsigned int cpu, cpu_idx, i, j, n, m;
104
105         n = nr_elems / num_possible_cpus();
106         m = nr_elems % num_possible_cpus();
107
108         cpu_idx = 0;
109         for_each_possible_cpu(cpu) {
110                 head = per_cpu_ptr(s->freelist, cpu);
111                 j = n + (cpu_idx < m ? 1 : 0);
112                 for (i = 0; i < j; i++) {
113                         /* No locking required as this is not visible yet. */
114                         pcpu_freelist_push_node(head, buf);
115                         buf += elem_size;
116                 }
117                 cpu_idx++;
118         }
119 }
120
121 static struct pcpu_freelist_node *___pcpu_freelist_pop(struct pcpu_freelist *s)
122 {
123         struct pcpu_freelist_head *head;
124         struct pcpu_freelist_node *node;
125         int cpu;
126
127         for_each_cpu_wrap(cpu, cpu_possible_mask, raw_smp_processor_id()) {
128                 head = per_cpu_ptr(s->freelist, cpu);
129                 if (!READ_ONCE(head->first))
130                         continue;
131                 raw_spin_lock(&head->lock);
132                 node = head->first;
133                 if (node) {
134                         WRITE_ONCE(head->first, node->next);
135                         raw_spin_unlock(&head->lock);
136                         return node;
137                 }
138                 raw_spin_unlock(&head->lock);
139         }
140
141         /* per cpu lists are all empty, try extralist */
142         if (!READ_ONCE(s->extralist.first))
143                 return NULL;
144         raw_spin_lock(&s->extralist.lock);
145         node = s->extralist.first;
146         if (node)
147                 WRITE_ONCE(s->extralist.first, node->next);
148         raw_spin_unlock(&s->extralist.lock);
149         return node;
150 }
151
152 static struct pcpu_freelist_node *
153 ___pcpu_freelist_pop_nmi(struct pcpu_freelist *s)
154 {
155         struct pcpu_freelist_head *head;
156         struct pcpu_freelist_node *node;
157         int cpu;
158
159         for_each_cpu_wrap(cpu, cpu_possible_mask, raw_smp_processor_id()) {
160                 head = per_cpu_ptr(s->freelist, cpu);
161                 if (!READ_ONCE(head->first))
162                         continue;
163                 if (raw_spin_trylock(&head->lock)) {
164                         node = head->first;
165                         if (node) {
166                                 WRITE_ONCE(head->first, node->next);
167                                 raw_spin_unlock(&head->lock);
168                                 return node;
169                         }
170                         raw_spin_unlock(&head->lock);
171                 }
172         }
173
174         /* cannot pop from per cpu lists, try extralist */
175         if (!READ_ONCE(s->extralist.first) || !raw_spin_trylock(&s->extralist.lock))
176                 return NULL;
177         node = s->extralist.first;
178         if (node)
179                 WRITE_ONCE(s->extralist.first, node->next);
180         raw_spin_unlock(&s->extralist.lock);
181         return node;
182 }
183
184 struct pcpu_freelist_node *__pcpu_freelist_pop(struct pcpu_freelist *s)
185 {
186         if (in_nmi())
187                 return ___pcpu_freelist_pop_nmi(s);
188         return ___pcpu_freelist_pop(s);
189 }
190
191 struct pcpu_freelist_node *pcpu_freelist_pop(struct pcpu_freelist *s)
192 {
193         struct pcpu_freelist_node *ret;
194         unsigned long flags;
195
196         local_irq_save(flags);
197         ret = __pcpu_freelist_pop(s);
198         local_irq_restore(flags);
199         return ret;
200 }