Merge 6.4-rc5 into usb-next
[platform/kernel/linux-starfive.git] / drivers / base / regmap / regcache-maple.c
1 // SPDX-License-Identifier: GPL-2.0
2 //
3 // Register cache access API - maple tree based cache
4 //
5 // Copyright 2023 Arm, Ltd
6 //
7 // Author: Mark Brown <broonie@kernel.org>
8
9 #include <linux/debugfs.h>
10 #include <linux/device.h>
11 #include <linux/maple_tree.h>
12 #include <linux/slab.h>
13
14 #include "internal.h"
15
16 static int regcache_maple_read(struct regmap *map,
17                                unsigned int reg, unsigned int *value)
18 {
19         struct maple_tree *mt = map->cache;
20         MA_STATE(mas, mt, reg, reg);
21         unsigned long *entry;
22
23         rcu_read_lock();
24
25         entry = mas_walk(&mas);
26         if (!entry) {
27                 rcu_read_unlock();
28                 return -ENOENT;
29         }
30
31         *value = entry[reg - mas.index];
32
33         rcu_read_unlock();
34
35         return 0;
36 }
37
38 static int regcache_maple_write(struct regmap *map, unsigned int reg,
39                                 unsigned int val)
40 {
41         struct maple_tree *mt = map->cache;
42         MA_STATE(mas, mt, reg, reg);
43         unsigned long *entry, *upper, *lower;
44         unsigned long index, last;
45         size_t lower_sz, upper_sz;
46         int ret;
47
48         rcu_read_lock();
49
50         entry = mas_walk(&mas);
51         if (entry) {
52                 entry[reg - mas.index] = val;
53                 rcu_read_unlock();
54                 return 0;
55         }
56
57         /* Any adjacent entries to extend/merge? */
58         mas_set_range(&mas, reg - 1, reg + 1);
59         index = reg;
60         last = reg;
61
62         lower = mas_find(&mas, reg - 1);
63         if (lower) {
64                 index = mas.index;
65                 lower_sz = (mas.last - mas.index + 1) * sizeof(unsigned long);
66         }
67
68         upper = mas_find(&mas, reg + 1);
69         if (upper) {
70                 last = mas.last;
71                 upper_sz = (mas.last - mas.index + 1) * sizeof(unsigned long);
72         }
73
74         rcu_read_unlock();
75
76         entry = kmalloc((last - index + 1) * sizeof(unsigned long),
77                         GFP_KERNEL);
78         if (!entry)
79                 return -ENOMEM;
80
81         if (lower)
82                 memcpy(entry, lower, lower_sz);
83         entry[reg - index] = val;
84         if (upper)
85                 memcpy(&entry[reg - index + 1], upper, upper_sz);
86
87         /*
88          * This is safe because the regmap lock means the Maple lock
89          * is redundant, but we need to take it due to lockdep asserts
90          * in the maple tree code.
91          */
92         mas_lock(&mas);
93
94         mas_set_range(&mas, index, last);
95         ret = mas_store_gfp(&mas, entry, GFP_KERNEL);
96
97         mas_unlock(&mas);
98
99         if (ret == 0) {
100                 kfree(lower);
101                 kfree(upper);
102         }
103         
104         return ret;
105 }
106
107 static int regcache_maple_drop(struct regmap *map, unsigned int min,
108                                unsigned int max)
109 {
110         struct maple_tree *mt = map->cache;
111         MA_STATE(mas, mt, min, max);
112         unsigned long *entry, *lower, *upper;
113         unsigned long lower_index, lower_last;
114         unsigned long upper_index, upper_last;
115         int ret;
116
117         lower = NULL;
118         upper = NULL;
119
120         mas_lock(&mas);
121
122         mas_for_each(&mas, entry, max) {
123                 /*
124                  * This is safe because the regmap lock means the
125                  * Maple lock is redundant, but we need to take it due
126                  * to lockdep asserts in the maple tree code.
127                  */
128                 mas_unlock(&mas);
129
130                 /* Do we need to save any of this entry? */
131                 if (mas.index < min) {
132                         lower_index = mas.index;
133                         lower_last = min -1;
134
135                         lower = kmemdup(entry, ((min - mas.index) *
136                                                 sizeof(unsigned long)),
137                                         GFP_KERNEL);
138                         if (!lower) {
139                                 ret = -ENOMEM;
140                                 goto out_unlocked;
141                         }
142                 }
143
144                 if (mas.last > max) {
145                         upper_index = max + 1;
146                         upper_last = mas.last;
147
148                         upper = kmemdup(&entry[max + 1],
149                                         ((mas.last - max) *
150                                          sizeof(unsigned long)),
151                                         GFP_KERNEL);
152                         if (!upper) {
153                                 ret = -ENOMEM;
154                                 goto out_unlocked;
155                         }
156                 }
157
158                 kfree(entry);
159                 mas_lock(&mas);
160                 mas_erase(&mas);
161
162                 /* Insert new nodes with the saved data */
163                 if (lower) {
164                         mas_set_range(&mas, lower_index, lower_last);
165                         ret = mas_store_gfp(&mas, lower, GFP_KERNEL);
166                         if (ret != 0)
167                                 goto out;
168                         lower = NULL;
169                 }
170
171                 if (upper) {
172                         mas_set_range(&mas, upper_index, upper_last);
173                         ret = mas_store_gfp(&mas, upper, GFP_KERNEL);
174                         if (ret != 0)
175                                 goto out;
176                         upper = NULL;
177                 }
178         }
179
180 out:
181         mas_unlock(&mas);
182 out_unlocked:
183         kfree(lower);
184         kfree(upper);
185
186         return ret;
187 }
188
189 static int regcache_maple_sync(struct regmap *map, unsigned int min,
190                                unsigned int max)
191 {
192         struct maple_tree *mt = map->cache;
193         unsigned long *entry;
194         MA_STATE(mas, mt, min, max);
195         unsigned long lmin = min;
196         unsigned long lmax = max;
197         unsigned int r;
198         int ret;
199
200         map->cache_bypass = true;
201
202         rcu_read_lock();
203
204         mas_for_each(&mas, entry, max) {
205                 for (r = max(mas.index, lmin); r <= min(mas.last, lmax); r++) {
206                         mas_pause(&mas);
207                         rcu_read_unlock();
208                         ret = regcache_sync_val(map, r, entry[r - mas.index]);
209                         if (ret != 0)
210                                 goto out;
211                         rcu_read_lock();
212                 }
213         }
214
215         rcu_read_unlock();
216
217 out:
218         map->cache_bypass = false;
219
220         return ret;
221 }
222
223 static int regcache_maple_exit(struct regmap *map)
224 {
225         struct maple_tree *mt = map->cache;
226         MA_STATE(mas, mt, 0, UINT_MAX);
227         unsigned int *entry;;
228
229         /* if we've already been called then just return */
230         if (!mt)
231                 return 0;
232
233         mas_lock(&mas);
234         mas_for_each(&mas, entry, UINT_MAX)
235                 kfree(entry);
236         __mt_destroy(mt);
237         mas_unlock(&mas);
238
239         kfree(mt);
240         map->cache = NULL;
241
242         return 0;
243 }
244
245 static int regcache_maple_init(struct regmap *map)
246 {
247         struct maple_tree *mt;
248         int i;
249         int ret;
250
251         mt = kmalloc(sizeof(*mt), GFP_KERNEL);
252         if (!mt)
253                 return -ENOMEM;
254         map->cache = mt;
255
256         mt_init(mt);
257
258         for (i = 0; i < map->num_reg_defaults; i++) {
259                 ret = regcache_maple_write(map,
260                                            map->reg_defaults[i].reg,
261                                            map->reg_defaults[i].def);
262                 if (ret)
263                         goto err;
264         }
265
266         return 0;
267
268 err:
269         regcache_maple_exit(map);
270         return ret;
271 }
272
273 struct regcache_ops regcache_maple_ops = {
274         .type = REGCACHE_MAPLE,
275         .name = "maple",
276         .init = regcache_maple_init,
277         .exit = regcache_maple_exit,
278         .read = regcache_maple_read,
279         .write = regcache_maple_write,
280         .drop = regcache_maple_drop,
281         .sync = regcache_maple_sync,
282 };