genirq: Allow irq_set_chip_handler_name_locked() to take a const irq_chip
[platform/kernel/linux-starfive.git] / lib / logic_iomem.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2021 Intel Corporation
4  * Author: Johannes Berg <johannes@sipsolutions.net>
5  */
6 #include <linux/types.h>
7 #include <linux/slab.h>
8 #include <linux/logic_iomem.h>
9 #include <asm/io.h>
10
11 struct logic_iomem_region {
12         const struct resource *res;
13         const struct logic_iomem_region_ops *ops;
14         struct list_head list;
15 };
16
17 struct logic_iomem_area {
18         const struct logic_iomem_ops *ops;
19         void *priv;
20 };
21
22 #define AREA_SHIFT      24
23 #define MAX_AREA_SIZE   (1 << AREA_SHIFT)
24 #define MAX_AREAS       ((1U << 31) / MAX_AREA_SIZE)
25 #define AREA_BITS       ((MAX_AREAS - 1) << AREA_SHIFT)
26 #define AREA_MASK       (MAX_AREA_SIZE - 1)
27 #ifdef CONFIG_64BIT
28 #define IOREMAP_BIAS    0xDEAD000000000000UL
29 #define IOREMAP_MASK    0xFFFFFFFF00000000UL
30 #else
31 #define IOREMAP_BIAS    0x80000000UL
32 #define IOREMAP_MASK    0x80000000UL
33 #endif
34
35 static DEFINE_MUTEX(regions_mtx);
36 static LIST_HEAD(regions_list);
37 static struct logic_iomem_area mapped_areas[MAX_AREAS];
38
39 int logic_iomem_add_region(struct resource *resource,
40                            const struct logic_iomem_region_ops *ops)
41 {
42         struct logic_iomem_region *rreg;
43         int err;
44
45         if (WARN_ON(!resource || !ops))
46                 return -EINVAL;
47
48         if (WARN_ON((resource->flags & IORESOURCE_TYPE_BITS) != IORESOURCE_MEM))
49                 return -EINVAL;
50
51         rreg = kzalloc(sizeof(*rreg), GFP_KERNEL);
52         if (!rreg)
53                 return -ENOMEM;
54
55         err = request_resource(&iomem_resource, resource);
56         if (err) {
57                 kfree(rreg);
58                 return -ENOMEM;
59         }
60
61         mutex_lock(&regions_mtx);
62         rreg->res = resource;
63         rreg->ops = ops;
64         list_add_tail(&rreg->list, &regions_list);
65         mutex_unlock(&regions_mtx);
66
67         return 0;
68 }
69 EXPORT_SYMBOL(logic_iomem_add_region);
70
71 #ifndef CONFIG_INDIRECT_IOMEM_FALLBACK
72 static void __iomem *real_ioremap(phys_addr_t offset, size_t size)
73 {
74         WARN(1, "invalid ioremap(0x%llx, 0x%zx)\n",
75              (unsigned long long)offset, size);
76         return NULL;
77 }
78
79 static void real_iounmap(volatile void __iomem *addr)
80 {
81         WARN(1, "invalid iounmap for addr 0x%llx\n",
82              (unsigned long long)(uintptr_t __force)addr);
83 }
84 #endif /* CONFIG_INDIRECT_IOMEM_FALLBACK */
85
86 void __iomem *ioremap(phys_addr_t offset, size_t size)
87 {
88         void __iomem *ret = NULL;
89         struct logic_iomem_region *rreg, *found = NULL;
90         int i;
91
92         mutex_lock(&regions_mtx);
93         list_for_each_entry(rreg, &regions_list, list) {
94                 if (rreg->res->start > offset)
95                         continue;
96                 if (rreg->res->end < offset + size - 1)
97                         continue;
98                 found = rreg;
99                 break;
100         }
101
102         if (!found)
103                 goto out;
104
105         for (i = 0; i < MAX_AREAS; i++) {
106                 long offs;
107
108                 if (mapped_areas[i].ops)
109                         continue;
110
111                 offs = rreg->ops->map(offset - found->res->start,
112                                       size, &mapped_areas[i].ops,
113                                       &mapped_areas[i].priv);
114                 if (offs < 0) {
115                         mapped_areas[i].ops = NULL;
116                         break;
117                 }
118
119                 if (WARN_ON(!mapped_areas[i].ops)) {
120                         mapped_areas[i].ops = NULL;
121                         break;
122                 }
123
124                 ret = (void __iomem *)(IOREMAP_BIAS + (i << AREA_SHIFT) + offs);
125                 break;
126         }
127 out:
128         mutex_unlock(&regions_mtx);
129         if (ret)
130                 return ret;
131         return real_ioremap(offset, size);
132 }
133 EXPORT_SYMBOL(ioremap);
134
135 static inline struct logic_iomem_area *
136 get_area(const volatile void __iomem *addr)
137 {
138         unsigned long a = (unsigned long)addr;
139         unsigned int idx;
140
141         if (WARN_ON((a & IOREMAP_MASK) != IOREMAP_BIAS))
142                 return NULL;
143
144         idx = (a & AREA_BITS) >> AREA_SHIFT;
145
146         if (mapped_areas[idx].ops)
147                 return &mapped_areas[idx];
148
149         return NULL;
150 }
151
152 void iounmap(volatile void __iomem *addr)
153 {
154         struct logic_iomem_area *area = get_area(addr);
155
156         if (!area) {
157                 real_iounmap(addr);
158                 return;
159         }
160
161         if (area->ops->unmap)
162                 area->ops->unmap(area->priv);
163
164         mutex_lock(&regions_mtx);
165         area->ops = NULL;
166         area->priv = NULL;
167         mutex_unlock(&regions_mtx);
168 }
169 EXPORT_SYMBOL(iounmap);
170
171 #ifndef CONFIG_INDIRECT_IOMEM_FALLBACK
172 #define MAKE_FALLBACK(op, sz)                                           \
173 static u##sz real_raw_read ## op(const volatile void __iomem *addr)     \
174 {                                                                       \
175         WARN(1, "Invalid read" #op " at address %llx\n",                \
176              (unsigned long long)(uintptr_t __force)addr);              \
177         return (u ## sz)~0ULL;                                          \
178 }                                                                       \
179                                                                         \
180 static void real_raw_write ## op(u ## sz val,                           \
181                                  volatile void __iomem *addr)           \
182 {                                                                       \
183         WARN(1, "Invalid writeq" #op " of 0x%llx at address %llx\n",    \
184              (unsigned long long)val,                                   \
185              (unsigned long long)(uintptr_t __force)addr);\
186 }                                                                       \
187
188 MAKE_FALLBACK(b, 8);
189 MAKE_FALLBACK(w, 16);
190 MAKE_FALLBACK(l, 32);
191 #ifdef CONFIG_64BIT
192 MAKE_FALLBACK(q, 64);
193 #endif
194
195 static void real_memset_io(volatile void __iomem *addr, int value, size_t size)
196 {
197         WARN(1, "Invalid memset_io at address 0x%llx\n",
198              (unsigned long long)(uintptr_t __force)addr);
199 }
200
201 static void real_memcpy_fromio(void *buffer, const volatile void __iomem *addr,
202                                size_t size)
203 {
204         WARN(1, "Invalid memcpy_fromio at address 0x%llx\n",
205              (unsigned long long)(uintptr_t __force)addr);
206
207         memset(buffer, 0xff, size);
208 }
209
210 static void real_memcpy_toio(volatile void __iomem *addr, const void *buffer,
211                              size_t size)
212 {
213         WARN(1, "Invalid memcpy_toio at address 0x%llx\n",
214              (unsigned long long)(uintptr_t __force)addr);
215 }
216 #endif /* CONFIG_INDIRECT_IOMEM_FALLBACK */
217
218 #define MAKE_OP(op, sz)                                                 \
219 u##sz __raw_read ## op(const volatile void __iomem *addr)               \
220 {                                                                       \
221         struct logic_iomem_area *area = get_area(addr);                 \
222                                                                         \
223         if (!area)                                                      \
224                 return real_raw_read ## op(addr);                       \
225                                                                         \
226         return (u ## sz) area->ops->read(area->priv,                    \
227                                          (unsigned long)addr & AREA_MASK,\
228                                          sz / 8);                       \
229 }                                                                       \
230 EXPORT_SYMBOL(__raw_read ## op);                                        \
231                                                                         \
232 void __raw_write ## op(u ## sz val, volatile void __iomem *addr)        \
233 {                                                                       \
234         struct logic_iomem_area *area = get_area(addr);                 \
235                                                                         \
236         if (!area) {                                                    \
237                 real_raw_write ## op(val, addr);                        \
238                 return;                                                 \
239         }                                                               \
240                                                                         \
241         area->ops->write(area->priv,                                    \
242                          (unsigned long)addr & AREA_MASK,               \
243                          sz / 8, val);                                  \
244 }                                                                       \
245 EXPORT_SYMBOL(__raw_write ## op)
246
247 MAKE_OP(b, 8);
248 MAKE_OP(w, 16);
249 MAKE_OP(l, 32);
250 #ifdef CONFIG_64BIT
251 MAKE_OP(q, 64);
252 #endif
253
254 void memset_io(volatile void __iomem *addr, int value, size_t size)
255 {
256         struct logic_iomem_area *area = get_area(addr);
257         unsigned long offs, start;
258
259         if (!area) {
260                 real_memset_io(addr, value, size);
261                 return;
262         }
263
264         start = (unsigned long)addr & AREA_MASK;
265
266         if (area->ops->set) {
267                 area->ops->set(area->priv, start, value, size);
268                 return;
269         }
270
271         for (offs = 0; offs < size; offs++)
272                 area->ops->write(area->priv, start + offs, 1, value);
273 }
274 EXPORT_SYMBOL(memset_io);
275
276 void memcpy_fromio(void *buffer, const volatile void __iomem *addr,
277                    size_t size)
278 {
279         struct logic_iomem_area *area = get_area(addr);
280         u8 *buf = buffer;
281         unsigned long offs, start;
282
283         if (!area) {
284                 real_memcpy_fromio(buffer, addr, size);
285                 return;
286         }
287
288         start = (unsigned long)addr & AREA_MASK;
289
290         if (area->ops->copy_from) {
291                 area->ops->copy_from(area->priv, buffer, start, size);
292                 return;
293         }
294
295         for (offs = 0; offs < size; offs++)
296                 buf[offs] = area->ops->read(area->priv, start + offs, 1);
297 }
298 EXPORT_SYMBOL(memcpy_fromio);
299
300 void memcpy_toio(volatile void __iomem *addr, const void *buffer, size_t size)
301 {
302         struct logic_iomem_area *area = get_area(addr);
303         const u8 *buf = buffer;
304         unsigned long offs, start;
305
306         if (!area) {
307                 real_memcpy_toio(addr, buffer, size);
308                 return;
309         }
310
311         start = (unsigned long)addr & AREA_MASK;
312
313         if (area->ops->copy_to) {
314                 area->ops->copy_to(area->priv, start, buffer, size);
315                 return;
316         }
317
318         for (offs = 0; offs < size; offs++)
319                 area->ops->write(area->priv, start + offs, 1, buf[offs]);
320 }
321 EXPORT_SYMBOL(memcpy_toio);