1 // SPDX-License-Identifier: GPL-2.0
3 // Register cache access API - maple tree based cache
5 // Copyright 2023 Arm, Ltd
7 // Author: Mark Brown <broonie@kernel.org>
9 #include <linux/debugfs.h>
10 #include <linux/device.h>
11 #include <linux/maple_tree.h>
12 #include <linux/slab.h>
16 static int regcache_maple_read(struct regmap *map,
17 unsigned int reg, unsigned int *value)
19 struct maple_tree *mt = map->cache;
20 MA_STATE(mas, mt, reg, reg);
25 entry = mas_walk(&mas);
31 *value = entry[reg - mas.index];
38 static int regcache_maple_write(struct regmap *map, unsigned int reg,
41 struct maple_tree *mt = map->cache;
42 MA_STATE(mas, mt, reg, reg);
43 unsigned long *entry, *upper, *lower;
44 unsigned long index, last;
45 size_t lower_sz, upper_sz;
50 entry = mas_walk(&mas);
52 entry[reg - mas.index] = val;
57 /* Any adjacent entries to extend/merge? */
58 mas_set_range(&mas, reg - 1, reg + 1);
62 lower = mas_find(&mas, reg - 1);
65 lower_sz = (mas.last - mas.index + 1) * sizeof(unsigned long);
68 upper = mas_find(&mas, reg + 1);
71 upper_sz = (mas.last - mas.index + 1) * sizeof(unsigned long);
76 entry = kmalloc((last - index + 1) * sizeof(unsigned long),
82 memcpy(entry, lower, lower_sz);
83 entry[reg - index] = val;
85 memcpy(&entry[reg - index + 1], upper, upper_sz);
88 * This is safe because the regmap lock means the Maple lock
89 * is redundant, but we need to take it due to lockdep asserts
90 * in the maple tree code.
94 mas_set_range(&mas, index, last);
95 ret = mas_store_gfp(&mas, entry, GFP_KERNEL);
107 static int regcache_maple_drop(struct regmap *map, unsigned int min,
110 struct maple_tree *mt = map->cache;
111 MA_STATE(mas, mt, min, max);
112 unsigned long *entry, *lower, *upper;
113 unsigned long lower_index, lower_last;
114 unsigned long upper_index, upper_last;
122 mas_for_each(&mas, entry, max) {
124 * This is safe because the regmap lock means the
125 * Maple lock is redundant, but we need to take it due
126 * to lockdep asserts in the maple tree code.
130 /* Do we need to save any of this entry? */
131 if (mas.index < min) {
132 lower_index = mas.index;
135 lower = kmemdup(entry, ((min - mas.index) *
136 sizeof(unsigned long)),
144 if (mas.last > max) {
145 upper_index = max + 1;
146 upper_last = mas.last;
148 upper = kmemdup(&entry[max + 1],
150 sizeof(unsigned long)),
162 /* Insert new nodes with the saved data */
164 mas_set_range(&mas, lower_index, lower_last);
165 ret = mas_store_gfp(&mas, lower, GFP_KERNEL);
172 mas_set_range(&mas, upper_index, upper_last);
173 ret = mas_store_gfp(&mas, upper, GFP_KERNEL);
189 static int regcache_maple_sync(struct regmap *map, unsigned int min,
192 struct maple_tree *mt = map->cache;
193 unsigned long *entry;
194 MA_STATE(mas, mt, min, max);
195 unsigned long lmin = min;
196 unsigned long lmax = max;
200 map->cache_bypass = true;
204 mas_for_each(&mas, entry, max) {
205 for (r = max(mas.index, lmin); r <= min(mas.last, lmax); r++) {
206 ret = regcache_sync_val(map, r, entry[r - mas.index]);
215 map->cache_bypass = false;
220 static int regcache_maple_exit(struct regmap *map)
222 struct maple_tree *mt = map->cache;
223 MA_STATE(mas, mt, 0, UINT_MAX);
224 unsigned int *entry;;
226 /* if we've already been called then just return */
231 mas_for_each(&mas, entry, UINT_MAX)
242 static int regcache_maple_init(struct regmap *map)
244 struct maple_tree *mt;
248 mt = kmalloc(sizeof(*mt), GFP_KERNEL);
255 for (i = 0; i < map->num_reg_defaults; i++) {
256 ret = regcache_maple_write(map,
257 map->reg_defaults[i].reg,
258 map->reg_defaults[i].def);
266 regcache_maple_exit(map);
270 struct regcache_ops regcache_maple_ops = {
271 .type = REGCACHE_MAPLE,
273 .init = regcache_maple_init,
274 .exit = regcache_maple_exit,
275 .read = regcache_maple_read,
276 .write = regcache_maple_write,
277 .drop = regcache_maple_drop,
278 .sync = regcache_maple_sync,