1 /* SPDX-License-Identifier: GPL-2.0-or-later */
3 * Copyright © 2000-2010 David Woodhouse <dwmw2@infradead.org> et al.
6 /* Overhauled routines for dealing with different mmap regions of flash */
8 #ifndef __LINUX_MTD_MAP_H__
9 #define __LINUX_MTD_MAP_H__
11 #include <linux/types.h>
12 #include <linux/list.h>
13 #include <linux/string.h>
14 #include <linux/bug.h>
15 #include <linux/kernel.h>
18 #include <asm/unaligned.h>
19 #include <asm/barrier.h>
21 #ifdef CONFIG_MTD_MAP_BANK_WIDTH_1
22 #define map_bankwidth(map) 1
23 #define map_bankwidth_is_1(map) (map_bankwidth(map) == 1)
24 #define map_bankwidth_is_large(map) (0)
25 #define map_words(map) (1)
26 #define MAX_MAP_BANKWIDTH 1
28 #define map_bankwidth_is_1(map) (0)
31 #ifdef CONFIG_MTD_MAP_BANK_WIDTH_2
34 # define map_bankwidth(map) ((map)->bankwidth)
36 # define map_bankwidth(map) 2
37 # define map_bankwidth_is_large(map) (0)
38 # define map_words(map) (1)
40 #define map_bankwidth_is_2(map) (map_bankwidth(map) == 2)
41 #undef MAX_MAP_BANKWIDTH
42 #define MAX_MAP_BANKWIDTH 2
44 #define map_bankwidth_is_2(map) (0)
47 #ifdef CONFIG_MTD_MAP_BANK_WIDTH_4
50 # define map_bankwidth(map) ((map)->bankwidth)
52 # define map_bankwidth(map) 4
53 # define map_bankwidth_is_large(map) (0)
54 # define map_words(map) (1)
56 #define map_bankwidth_is_4(map) (map_bankwidth(map) == 4)
57 #undef MAX_MAP_BANKWIDTH
58 #define MAX_MAP_BANKWIDTH 4
60 #define map_bankwidth_is_4(map) (0)
63 /* ensure we never evaluate anything shorted than an unsigned long
64 * to zero, and ensure we'll never miss the end of an comparison (bjd) */
66 #define map_calc_words(map) ((map_bankwidth(map) + (sizeof(unsigned long)-1)) / sizeof(unsigned long))
68 #ifdef CONFIG_MTD_MAP_BANK_WIDTH_8
71 # define map_bankwidth(map) ((map)->bankwidth)
72 # if BITS_PER_LONG < 64
73 # undef map_bankwidth_is_large
74 # define map_bankwidth_is_large(map) (map_bankwidth(map) > BITS_PER_LONG/8)
76 # define map_words(map) map_calc_words(map)
79 # define map_bankwidth(map) 8
80 # define map_bankwidth_is_large(map) (BITS_PER_LONG < 64)
81 # define map_words(map) map_calc_words(map)
83 #define map_bankwidth_is_8(map) (map_bankwidth(map) == 8)
84 #undef MAX_MAP_BANKWIDTH
85 #define MAX_MAP_BANKWIDTH 8
87 #define map_bankwidth_is_8(map) (0)
90 #ifdef CONFIG_MTD_MAP_BANK_WIDTH_16
93 # define map_bankwidth(map) ((map)->bankwidth)
94 # undef map_bankwidth_is_large
95 # define map_bankwidth_is_large(map) (map_bankwidth(map) > BITS_PER_LONG/8)
97 # define map_words(map) map_calc_words(map)
99 # define map_bankwidth(map) 16
100 # define map_bankwidth_is_large(map) (1)
101 # define map_words(map) map_calc_words(map)
103 #define map_bankwidth_is_16(map) (map_bankwidth(map) == 16)
104 #undef MAX_MAP_BANKWIDTH
105 #define MAX_MAP_BANKWIDTH 16
107 #define map_bankwidth_is_16(map) (0)
110 #ifdef CONFIG_MTD_MAP_BANK_WIDTH_32
111 /* always use indirect access for 256-bit to preserve kernel stack */
112 # undef map_bankwidth
113 # define map_bankwidth(map) ((map)->bankwidth)
114 # undef map_bankwidth_is_large
115 # define map_bankwidth_is_large(map) (map_bankwidth(map) > BITS_PER_LONG/8)
117 # define map_words(map) map_calc_words(map)
118 #define map_bankwidth_is_32(map) (map_bankwidth(map) == 32)
119 #undef MAX_MAP_BANKWIDTH
120 #define MAX_MAP_BANKWIDTH 32
122 #define map_bankwidth_is_32(map) (0)
125 #ifndef map_bankwidth
127 #warning "No CONFIG_MTD_MAP_BANK_WIDTH_xx selected. No NOR chip support can work"
129 static inline int map_bankwidth(void *map)
134 #define map_bankwidth_is_large(map) (0)
135 #define map_words(map) (0)
136 #define MAX_MAP_BANKWIDTH 1
139 static inline int map_bankwidth_supported(int w)
142 #ifdef CONFIG_MTD_MAP_BANK_WIDTH_1
145 #ifdef CONFIG_MTD_MAP_BANK_WIDTH_2
148 #ifdef CONFIG_MTD_MAP_BANK_WIDTH_4
151 #ifdef CONFIG_MTD_MAP_BANK_WIDTH_8
154 #ifdef CONFIG_MTD_MAP_BANK_WIDTH_16
157 #ifdef CONFIG_MTD_MAP_BANK_WIDTH_32
167 #define MAX_MAP_LONGS (((MAX_MAP_BANKWIDTH * 8) + BITS_PER_LONG - 1) / BITS_PER_LONG)
170 unsigned long x[MAX_MAP_LONGS];
173 /* The map stuff is very simple. You fill in your struct map_info with
174 a handful of routines for accessing the device, making sure they handle
175 paging etc. correctly if your device needs it. Then you pass it off
176 to a chip probe routine -- either JEDEC or CFI probe or both -- via
177 do_map_probe(). If a chip is recognised, the probe code will invoke the
178 appropriate chip driver (if present) and return a struct mtd_info.
179 At which point, you fill in the mtd->module with your own module
180 address, and register it with the MTD core code. Or you could partition
181 it and register the partitions instead, or keep it for your own private
184 The mtd->priv field will point to the struct map_info, and any further
185 private data required by the chip driver is linked from the
186 mtd->priv->fldrv_priv field. This allows the map driver to get at
187 the destructor function map->fldrv_destroy() when it's tired
194 resource_size_t phys;
195 #define NO_XIP (-1UL)
200 int swap; /* this mapping's byte-swapping requirement */
201 int bankwidth; /* in octets. This isn't necessarily the width
202 of actual bus cycles -- it's the repeat interval
203 in bytes, before you are talking to the first chip again.
206 #ifdef CONFIG_MTD_COMPLEX_MAPPINGS
207 map_word (*read)(struct map_info *, unsigned long);
208 void (*copy_from)(struct map_info *, void *, unsigned long, ssize_t);
210 void (*write)(struct map_info *, const map_word, unsigned long);
211 void (*copy_to)(struct map_info *, unsigned long, const void *, ssize_t);
213 /* We can perhaps put in 'point' and 'unpoint' methods, if we really
214 want to enable XIP for non-linear mappings. Not yet though. */
216 /* It's possible for the map driver to use cached memory in its
217 copy_from implementation (and _only_ with copy_from). However,
218 when the chip driver knows some flash area has changed contents,
219 it will signal it to the map driver through this routine to let
220 the map driver invalidate the corresponding cache as needed.
221 If there is no cache to care about this can be set to NULL. */
222 void (*inval_cache)(struct map_info *, unsigned long, ssize_t);
224 /* This will be called with 1 as parameter when the first map user
225 * needs VPP, and called with 0 when the last user exits. The map
226 * core maintains a reference counter, and assumes that VPP is a
227 * global resource applying to all mapped flash chips on the system.
229 void (*set_vpp)(struct map_info *, int);
231 unsigned long pfow_base;
232 unsigned long map_priv_1;
233 unsigned long map_priv_2;
234 struct device_node *device_node;
236 struct mtd_chip_driver *fldrv;
239 struct mtd_chip_driver {
240 struct mtd_info *(*probe)(struct map_info *map);
241 void (*destroy)(struct mtd_info *);
242 struct module *module;
244 struct list_head list;
247 void register_mtd_chip_driver(struct mtd_chip_driver *);
248 void unregister_mtd_chip_driver(struct mtd_chip_driver *);
250 struct mtd_info *do_map_probe(const char *name, struct map_info *map);
251 void map_destroy(struct mtd_info *mtd);
253 #define ENABLE_VPP(map) do { if (map->set_vpp) map->set_vpp(map, 1); } while (0)
254 #define DISABLE_VPP(map) do { if (map->set_vpp) map->set_vpp(map, 0); } while (0)
256 #define INVALIDATE_CACHED_RANGE(map, from, size) \
257 do { if (map->inval_cache) map->inval_cache(map, from, size); } while (0)
259 #define map_word_equal(map, val1, val2) \
262 for (i = 0; i < map_words(map); i++) \
263 if ((val1).x[i] != (val2).x[i]) { \
270 #define map_word_and(map, val1, val2) \
274 for (i = 0; i < map_words(map); i++) \
275 r.x[i] = (val1).x[i] & (val2).x[i]; \
279 #define map_word_clr(map, val1, val2) \
283 for (i = 0; i < map_words(map); i++) \
284 r.x[i] = (val1).x[i] & ~(val2).x[i]; \
288 #define map_word_or(map, val1, val2) \
292 for (i = 0; i < map_words(map); i++) \
293 r.x[i] = (val1).x[i] | (val2).x[i]; \
297 #define map_word_andequal(map, val1, val2, val3) \
300 for (i = 0; i < map_words(map); i++) { \
301 if (((val1).x[i] & (val2).x[i]) != (val3).x[i]) { \
309 #define map_word_bitsset(map, val1, val2) \
312 for (i = 0; i < map_words(map); i++) { \
313 if ((val1).x[i] & (val2).x[i]) { \
321 static inline map_word map_word_load(struct map_info *map, const void *ptr)
325 if (map_bankwidth_is_1(map))
326 r.x[0] = *(unsigned char *)ptr;
327 else if (map_bankwidth_is_2(map))
328 r.x[0] = get_unaligned((uint16_t *)ptr);
329 else if (map_bankwidth_is_4(map))
330 r.x[0] = get_unaligned((uint32_t *)ptr);
331 #if BITS_PER_LONG >= 64
332 else if (map_bankwidth_is_8(map))
333 r.x[0] = get_unaligned((uint64_t *)ptr);
335 else if (map_bankwidth_is_large(map))
336 memcpy(r.x, ptr, map->bankwidth);
343 static inline map_word map_word_load_partial(struct map_info *map, map_word orig, const unsigned char *buf, int start, int len)
347 if (map_bankwidth_is_large(map)) {
348 char *dest = (char *)&orig;
350 memcpy(dest+start, buf, len);
352 for (i = start; i < start+len; i++) {
355 #ifdef __LITTLE_ENDIAN
357 #else /* __BIG_ENDIAN */
358 bitpos = (map_bankwidth(map) - 1 - i) * 8;
360 orig.x[0] &= ~(0xff << bitpos);
361 orig.x[0] |= (unsigned long)buf[i-start] << bitpos;
367 #if BITS_PER_LONG < 64
368 #define MAP_FF_LIMIT 4
370 #define MAP_FF_LIMIT 8
373 static inline map_word map_word_ff(struct map_info *map)
378 if (map_bankwidth(map) < MAP_FF_LIMIT) {
379 int bw = 8 * map_bankwidth(map);
381 r.x[0] = (1UL << bw) - 1;
383 for (i = 0; i < map_words(map); i++)
389 static inline map_word inline_map_read(struct map_info *map, unsigned long ofs)
393 if (map_bankwidth_is_1(map))
394 r.x[0] = __raw_readb(map->virt + ofs);
395 else if (map_bankwidth_is_2(map))
396 r.x[0] = __raw_readw(map->virt + ofs);
397 else if (map_bankwidth_is_4(map))
398 r.x[0] = __raw_readl(map->virt + ofs);
399 #if BITS_PER_LONG >= 64
400 else if (map_bankwidth_is_8(map))
401 r.x[0] = __raw_readq(map->virt + ofs);
403 else if (map_bankwidth_is_large(map))
404 memcpy_fromio(r.x, map->virt + ofs, map->bankwidth);
411 static inline void inline_map_write(struct map_info *map, const map_word datum, unsigned long ofs)
413 if (map_bankwidth_is_1(map))
414 __raw_writeb(datum.x[0], map->virt + ofs);
415 else if (map_bankwidth_is_2(map))
416 __raw_writew(datum.x[0], map->virt + ofs);
417 else if (map_bankwidth_is_4(map))
418 __raw_writel(datum.x[0], map->virt + ofs);
419 #if BITS_PER_LONG >= 64
420 else if (map_bankwidth_is_8(map))
421 __raw_writeq(datum.x[0], map->virt + ofs);
423 else if (map_bankwidth_is_large(map))
424 memcpy_toio(map->virt+ofs, datum.x, map->bankwidth);
430 static inline void inline_map_copy_from(struct map_info *map, void *to, unsigned long from, ssize_t len)
433 memcpy(to, (char *)map->cached + from, len);
435 memcpy_fromio(to, map->virt + from, len);
438 static inline void inline_map_copy_to(struct map_info *map, unsigned long to, const void *from, ssize_t len)
440 memcpy_toio(map->virt + to, from, len);
443 #ifdef CONFIG_MTD_COMPLEX_MAPPINGS
444 #define map_read(map, ofs) (map)->read(map, ofs)
445 #define map_copy_from(map, to, from, len) (map)->copy_from(map, to, from, len)
446 #define map_write(map, datum, ofs) (map)->write(map, datum, ofs)
447 #define map_copy_to(map, to, from, len) (map)->copy_to(map, to, from, len)
449 extern void simple_map_init(struct map_info *);
450 #define map_is_linear(map) (map->phys != NO_XIP)
453 #define map_read(map, ofs) inline_map_read(map, ofs)
454 #define map_copy_from(map, to, from, len) inline_map_copy_from(map, to, from, len)
455 #define map_write(map, datum, ofs) inline_map_write(map, datum, ofs)
456 #define map_copy_to(map, to, from, len) inline_map_copy_to(map, to, from, len)
459 #define simple_map_init(map) BUG_ON(!map_bankwidth_supported((map)->bankwidth))
460 #define map_is_linear(map) ({ (void)(map); 1; })
462 #endif /* !CONFIG_MTD_COMPLEX_MAPPINGS */
464 #endif /* __LINUX_MTD_MAP_H__ */