1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2021 Intel Corporation
4 * Author: Johannes Berg <johannes@sipsolutions.net>
6 #include <linux/types.h>
7 #include <linux/slab.h>
8 #include <linux/logic_iomem.h>
10 struct logic_iomem_region {
11 const struct resource *res;
12 const struct logic_iomem_region_ops *ops;
13 struct list_head list;
16 struct logic_iomem_area {
17 const struct logic_iomem_ops *ops;
22 #define MAX_AREA_SIZE (1 << AREA_SHIFT)
23 #define MAX_AREAS ((1ULL<<32) / MAX_AREA_SIZE)
24 #define AREA_BITS ((MAX_AREAS - 1) << AREA_SHIFT)
25 #define AREA_MASK (MAX_AREA_SIZE - 1)
27 #define IOREMAP_BIAS 0xDEAD000000000000UL
28 #define IOREMAP_MASK 0xFFFFFFFF00000000UL
30 #define IOREMAP_BIAS 0
31 #define IOREMAP_MASK 0
34 static DEFINE_MUTEX(regions_mtx);
35 static LIST_HEAD(regions_list);
36 static struct logic_iomem_area mapped_areas[MAX_AREAS];
38 int logic_iomem_add_region(struct resource *resource,
39 const struct logic_iomem_region_ops *ops)
41 struct logic_iomem_region *rreg;
44 if (WARN_ON(!resource || !ops))
47 if (WARN_ON((resource->flags & IORESOURCE_TYPE_BITS) != IORESOURCE_MEM))
50 rreg = kzalloc(sizeof(*rreg), GFP_KERNEL);
54 err = request_resource(&iomem_resource, resource);
60 mutex_lock(®ions_mtx);
63 list_add_tail(&rreg->list, ®ions_list);
64 mutex_unlock(®ions_mtx);
68 EXPORT_SYMBOL(logic_iomem_add_region);
70 #ifndef CONFIG_LOGIC_IOMEM_FALLBACK
71 static void __iomem *real_ioremap(phys_addr_t offset, size_t size)
73 WARN(1, "invalid ioremap(0x%llx, 0x%zx)\n",
74 (unsigned long long)offset, size);
78 static void real_iounmap(void __iomem *addr)
80 WARN(1, "invalid iounmap for addr 0x%llx\n",
81 (unsigned long long)addr);
83 #endif /* CONFIG_LOGIC_IOMEM_FALLBACK */
85 void __iomem *ioremap(phys_addr_t offset, size_t size)
87 void __iomem *ret = NULL;
88 struct logic_iomem_region *rreg, *found = NULL;
91 mutex_lock(®ions_mtx);
92 list_for_each_entry(rreg, ®ions_list, list) {
93 if (rreg->res->start > offset)
95 if (rreg->res->end < offset + size - 1)
104 for (i = 0; i < MAX_AREAS; i++) {
107 if (mapped_areas[i].ops)
110 offs = rreg->ops->map(offset - found->res->start,
111 size, &mapped_areas[i].ops,
112 &mapped_areas[i].priv);
114 mapped_areas[i].ops = NULL;
118 if (WARN_ON(!mapped_areas[i].ops)) {
119 mapped_areas[i].ops = NULL;
123 ret = (void __iomem *)(IOREMAP_BIAS + (i << AREA_SHIFT) + offs);
127 mutex_unlock(®ions_mtx);
130 return real_ioremap(offset, size);
132 EXPORT_SYMBOL(ioremap);
134 static inline struct logic_iomem_area *
135 get_area(const volatile void __iomem *addr)
137 unsigned long a = (unsigned long)addr;
140 if (WARN_ON((a & IOREMAP_MASK) != IOREMAP_BIAS))
143 idx = (a & AREA_BITS) >> AREA_SHIFT;
145 if (mapped_areas[idx].ops)
146 return &mapped_areas[idx];
151 void iounmap(void __iomem *addr)
153 struct logic_iomem_area *area = get_area(addr);
160 if (area->ops->unmap)
161 area->ops->unmap(area->priv);
163 mutex_lock(®ions_mtx);
166 mutex_unlock(®ions_mtx);
168 EXPORT_SYMBOL(iounmap);
170 #ifndef CONFIG_LOGIC_IOMEM_FALLBACK
171 #define MAKE_FALLBACK(op, sz) \
172 static u##sz real_raw_read ## op(const volatile void __iomem *addr) \
174 WARN(1, "Invalid read" #op " at address %llx\n", \
175 (unsigned long long)addr); \
176 return (u ## sz)~0ULL; \
179 void real_raw_write ## op(u ## sz val, volatile void __iomem *addr) \
181 WARN(1, "Invalid writeq" #op " of 0x%llx at address %llx\n", \
182 (unsigned long long)val, (unsigned long long)addr); \
186 MAKE_FALLBACK(w, 16);
187 MAKE_FALLBACK(l, 32);
189 MAKE_FALLBACK(q, 64);
192 static void real_memset_io(volatile void __iomem *addr, int value, size_t size)
194 WARN(1, "Invalid memset_io at address 0x%llx\n",
195 (unsigned long long)addr);
198 static void real_memcpy_fromio(void *buffer, const volatile void __iomem *addr,
201 WARN(1, "Invalid memcpy_fromio at address 0x%llx\n",
202 (unsigned long long)addr);
204 memset(buffer, 0xff, size);
207 static void real_memcpy_toio(volatile void __iomem *addr, const void *buffer,
210 WARN(1, "Invalid memcpy_toio at address 0x%llx\n",
211 (unsigned long long)addr);
213 #endif /* CONFIG_LOGIC_IOMEM_FALLBACK */
215 #define MAKE_OP(op, sz) \
216 u##sz __raw_read ## op(const volatile void __iomem *addr) \
218 struct logic_iomem_area *area = get_area(addr); \
221 return real_raw_read ## op(addr); \
223 return (u ## sz) area->ops->read(area->priv, \
224 (unsigned long)addr & AREA_MASK,\
227 EXPORT_SYMBOL(__raw_read ## op); \
229 void __raw_write ## op(u ## sz val, volatile void __iomem *addr) \
231 struct logic_iomem_area *area = get_area(addr); \
234 real_raw_write ## op(val, addr); \
238 area->ops->write(area->priv, \
239 (unsigned long)addr & AREA_MASK, \
242 EXPORT_SYMBOL(__raw_write ## op)
251 void memset_io(volatile void __iomem *addr, int value, size_t size)
253 struct logic_iomem_area *area = get_area(addr);
254 unsigned long offs, start;
257 real_memset_io(addr, value, size);
261 start = (unsigned long)addr & AREA_MASK;
263 if (area->ops->set) {
264 area->ops->set(area->priv, start, value, size);
268 for (offs = 0; offs < size; offs++)
269 area->ops->write(area->priv, start + offs, 1, value);
271 EXPORT_SYMBOL(memset_io);
273 void memcpy_fromio(void *buffer, const volatile void __iomem *addr,
276 struct logic_iomem_area *area = get_area(addr);
278 unsigned long offs, start;
281 real_memcpy_fromio(buffer, addr, size);
285 start = (unsigned long)addr & AREA_MASK;
287 if (area->ops->copy_from) {
288 area->ops->copy_from(area->priv, buffer, start, size);
292 for (offs = 0; offs < size; offs++)
293 buf[offs] = area->ops->read(area->priv, start + offs, 1);
295 EXPORT_SYMBOL(memcpy_fromio);
297 void memcpy_toio(volatile void __iomem *addr, const void *buffer, size_t size)
299 struct logic_iomem_area *area = get_area(addr);
300 const u8 *buf = buffer;
301 unsigned long offs, start;
304 real_memcpy_toio(addr, buffer, size);
308 start = (unsigned long)addr & AREA_MASK;
310 if (area->ops->copy_to) {
311 area->ops->copy_to(area->priv, start, buffer, size);
315 for (offs = 0; offs < size; offs++)
316 area->ops->write(area->priv, start + offs, 1, buf[offs]);
318 EXPORT_SYMBOL(memcpy_toio);