selftests: drivers/dma-buf: Fix implicit declaration warns
[platform/kernel/linux-rpi.git] / arch / m68k / mm / kmap.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  linux/arch/m68k/mm/kmap.c
4  *
5  *  Copyright (C) 1997 Roman Hodek
6  *
7  *  10/01/99 cleaned up the code and changing to the same interface
8  *           used by other architectures                /Roman Zippel
9  */
10
11 #include <linux/module.h>
12 #include <linux/mm.h>
13 #include <linux/kernel.h>
14 #include <linux/string.h>
15 #include <linux/types.h>
16 #include <linux/slab.h>
17 #include <linux/vmalloc.h>
18
19 #include <asm/setup.h>
20 #include <asm/segment.h>
21 #include <asm/page.h>
22 #include <asm/io.h>
23 #include <asm/tlbflush.h>
24
25 #undef DEBUG
26
27 /*
28  * For 040/060 we can use the virtual memory area like other architectures,
29  * but for 020/030 we want to use early termination page descriptors and we
30  * can't mix this with normal page descriptors, so we have to copy that code
31  * (mm/vmalloc.c) and return appropriately aligned addresses.
32  */
33
34 #ifdef CPU_M68040_OR_M68060_ONLY
35
36 #define IO_SIZE         PAGE_SIZE
37
38 static inline struct vm_struct *get_io_area(unsigned long size)
39 {
40         return get_vm_area(size, VM_IOREMAP);
41 }
42
43
44 static inline void free_io_area(void *addr)
45 {
46         vfree((void *)(PAGE_MASK & (unsigned long)addr));
47 }
48
49 #else
50
51 #define IO_SIZE         PMD_SIZE
52
53 static struct vm_struct *iolist;
54
55 /*
56  * __free_io_area unmaps nearly everything, so be careful
57  * Currently it doesn't free pointer/page tables anymore but this
58  * wasn't used anyway and might be added later.
59  */
60 static void __free_io_area(void *addr, unsigned long size)
61 {
62         unsigned long virtaddr = (unsigned long)addr;
63         pgd_t *pgd_dir;
64         p4d_t *p4d_dir;
65         pud_t *pud_dir;
66         pmd_t *pmd_dir;
67         pte_t *pte_dir;
68
69         while ((long)size > 0) {
70                 pgd_dir = pgd_offset_k(virtaddr);
71                 p4d_dir = p4d_offset(pgd_dir, virtaddr);
72                 pud_dir = pud_offset(p4d_dir, virtaddr);
73                 if (pud_bad(*pud_dir)) {
74                         printk("iounmap: bad pud(%08lx)\n", pud_val(*pud_dir));
75                         pud_clear(pud_dir);
76                         return;
77                 }
78                 pmd_dir = pmd_offset(pud_dir, virtaddr);
79
80 #if CONFIG_PGTABLE_LEVELS == 3
81                 if (CPU_IS_020_OR_030) {
82                         int pmd_type = pmd_val(*pmd_dir) & _DESCTYPE_MASK;
83
84                         if (pmd_type == _PAGE_PRESENT) {
85                                 pmd_clear(pmd_dir);
86                                 virtaddr += PMD_SIZE;
87                                 size -= PMD_SIZE;
88
89                         } else if (pmd_type == 0)
90                                 continue;
91                 }
92 #endif
93
94                 if (pmd_bad(*pmd_dir)) {
95                         printk("iounmap: bad pmd (%08lx)\n", pmd_val(*pmd_dir));
96                         pmd_clear(pmd_dir);
97                         return;
98                 }
99                 pte_dir = pte_offset_kernel(pmd_dir, virtaddr);
100
101                 pte_val(*pte_dir) = 0;
102                 virtaddr += PAGE_SIZE;
103                 size -= PAGE_SIZE;
104         }
105
106         flush_tlb_all();
107 }
108
109 static struct vm_struct *get_io_area(unsigned long size)
110 {
111         unsigned long addr;
112         struct vm_struct **p, *tmp, *area;
113
114         area = kmalloc(sizeof(*area), GFP_KERNEL);
115         if (!area)
116                 return NULL;
117         addr = KMAP_START;
118         for (p = &iolist; (tmp = *p) ; p = &tmp->next) {
119                 if (size + addr < (unsigned long)tmp->addr)
120                         break;
121                 if (addr > KMAP_END-size) {
122                         kfree(area);
123                         return NULL;
124                 }
125                 addr = tmp->size + (unsigned long)tmp->addr;
126         }
127         area->addr = (void *)addr;
128         area->size = size + IO_SIZE;
129         area->next = *p;
130         *p = area;
131         return area;
132 }
133
134 static inline void free_io_area(void *addr)
135 {
136         struct vm_struct **p, *tmp;
137
138         if (!addr)
139                 return;
140         addr = (void *)((unsigned long)addr & -IO_SIZE);
141         for (p = &iolist ; (tmp = *p) ; p = &tmp->next) {
142                 if (tmp->addr == addr) {
143                         *p = tmp->next;
144                         /* remove gap added in get_io_area() */
145                         __free_io_area(tmp->addr, tmp->size - IO_SIZE);
146                         kfree(tmp);
147                         return;
148                 }
149         }
150 }
151
152 #endif
153
154 /*
155  * Map some physical address range into the kernel address space.
156  */
157 /* Rewritten by Andreas Schwab to remove all races. */
158
159 void __iomem *__ioremap(unsigned long physaddr, unsigned long size, int cacheflag)
160 {
161         struct vm_struct *area;
162         unsigned long virtaddr, retaddr;
163         long offset;
164         pgd_t *pgd_dir;
165         p4d_t *p4d_dir;
166         pud_t *pud_dir;
167         pmd_t *pmd_dir;
168         pte_t *pte_dir;
169
170         /*
171          * Don't allow mappings that wrap..
172          */
173         if (!size || physaddr > (unsigned long)(-size))
174                 return NULL;
175
176 #ifdef CONFIG_AMIGA
177         if (MACH_IS_AMIGA) {
178                 if ((physaddr >= 0x40000000) && (physaddr + size < 0x60000000)
179                     && (cacheflag == IOMAP_NOCACHE_SER))
180                         return (void __iomem *)physaddr;
181         }
182 #endif
183 #ifdef CONFIG_COLDFIRE
184         if (__cf_internalio(physaddr))
185                 return (void __iomem *) physaddr;
186 #endif
187
188 #ifdef DEBUG
189         printk("ioremap: 0x%lx,0x%lx(%d) - ", physaddr, size, cacheflag);
190 #endif
191         /*
192          * Mappings have to be aligned
193          */
194         offset = physaddr & (IO_SIZE - 1);
195         physaddr &= -IO_SIZE;
196         size = (size + offset + IO_SIZE - 1) & -IO_SIZE;
197
198         /*
199          * Ok, go for it..
200          */
201         area = get_io_area(size);
202         if (!area)
203                 return NULL;
204
205         virtaddr = (unsigned long)area->addr;
206         retaddr = virtaddr + offset;
207 #ifdef DEBUG
208         printk("0x%lx,0x%lx,0x%lx", physaddr, virtaddr, retaddr);
209 #endif
210
211         /*
212          * add cache and table flags to physical address
213          */
214         if (CPU_IS_040_OR_060) {
215                 physaddr |= (_PAGE_PRESENT | _PAGE_GLOBAL040 |
216                              _PAGE_ACCESSED | _PAGE_DIRTY);
217                 switch (cacheflag) {
218                 case IOMAP_FULL_CACHING:
219                         physaddr |= _PAGE_CACHE040;
220                         break;
221                 case IOMAP_NOCACHE_SER:
222                 default:
223                         physaddr |= _PAGE_NOCACHE_S;
224                         break;
225                 case IOMAP_NOCACHE_NONSER:
226                         physaddr |= _PAGE_NOCACHE;
227                         break;
228                 case IOMAP_WRITETHROUGH:
229                         physaddr |= _PAGE_CACHE040W;
230                         break;
231                 }
232         } else {
233                 physaddr |= (_PAGE_PRESENT | _PAGE_ACCESSED |
234                              _PAGE_DIRTY | _PAGE_READWRITE);
235                 switch (cacheflag) {
236                 case IOMAP_NOCACHE_SER:
237                 case IOMAP_NOCACHE_NONSER:
238                 default:
239                         physaddr |= _PAGE_NOCACHE030;
240                         break;
241                 case IOMAP_FULL_CACHING:
242                 case IOMAP_WRITETHROUGH:
243                         break;
244                 }
245         }
246
247         while ((long)size > 0) {
248 #ifdef DEBUG
249                 if (!(virtaddr & (PMD_SIZE-1)))
250                         printk ("\npa=%#lx va=%#lx ", physaddr, virtaddr);
251 #endif
252                 pgd_dir = pgd_offset_k(virtaddr);
253                 p4d_dir = p4d_offset(pgd_dir, virtaddr);
254                 pud_dir = pud_offset(p4d_dir, virtaddr);
255                 pmd_dir = pmd_alloc(&init_mm, pud_dir, virtaddr);
256                 if (!pmd_dir) {
257                         printk("ioremap: no mem for pmd_dir\n");
258                         return NULL;
259                 }
260
261 #if CONFIG_PGTABLE_LEVELS == 3
262                 if (CPU_IS_020_OR_030) {
263                         pmd_val(*pmd_dir) = physaddr;
264                         physaddr += PMD_SIZE;
265                         virtaddr += PMD_SIZE;
266                         size -= PMD_SIZE;
267                 } else
268 #endif
269                 {
270                         pte_dir = pte_alloc_kernel(pmd_dir, virtaddr);
271                         if (!pte_dir) {
272                                 printk("ioremap: no mem for pte_dir\n");
273                                 return NULL;
274                         }
275
276                         pte_val(*pte_dir) = physaddr;
277                         virtaddr += PAGE_SIZE;
278                         physaddr += PAGE_SIZE;
279                         size -= PAGE_SIZE;
280                 }
281         }
282 #ifdef DEBUG
283         printk("\n");
284 #endif
285         flush_tlb_all();
286
287         return (void __iomem *)retaddr;
288 }
289 EXPORT_SYMBOL(__ioremap);
290
291 /*
292  * Unmap an ioremap()ed region again
293  */
294 void iounmap(void __iomem *addr)
295 {
296 #ifdef CONFIG_AMIGA
297         if ((!MACH_IS_AMIGA) ||
298             (((unsigned long)addr < 0x40000000) ||
299              ((unsigned long)addr > 0x60000000)))
300                         free_io_area((__force void *)addr);
301 #else
302 #ifdef CONFIG_COLDFIRE
303         if (cf_internalio(addr))
304                 return;
305 #endif
306         free_io_area((__force void *)addr);
307 #endif
308 }
309 EXPORT_SYMBOL(iounmap);
310
311 /*
312  * Set new cache mode for some kernel address space.
313  * The caller must push data for that range itself, if such data may already
314  * be in the cache.
315  */
316 void kernel_set_cachemode(void *addr, unsigned long size, int cmode)
317 {
318         unsigned long virtaddr = (unsigned long)addr;
319         pgd_t *pgd_dir;
320         p4d_t *p4d_dir;
321         pud_t *pud_dir;
322         pmd_t *pmd_dir;
323         pte_t *pte_dir;
324
325         if (CPU_IS_040_OR_060) {
326                 switch (cmode) {
327                 case IOMAP_FULL_CACHING:
328                         cmode = _PAGE_CACHE040;
329                         break;
330                 case IOMAP_NOCACHE_SER:
331                 default:
332                         cmode = _PAGE_NOCACHE_S;
333                         break;
334                 case IOMAP_NOCACHE_NONSER:
335                         cmode = _PAGE_NOCACHE;
336                         break;
337                 case IOMAP_WRITETHROUGH:
338                         cmode = _PAGE_CACHE040W;
339                         break;
340                 }
341         } else {
342                 switch (cmode) {
343                 case IOMAP_NOCACHE_SER:
344                 case IOMAP_NOCACHE_NONSER:
345                 default:
346                         cmode = _PAGE_NOCACHE030;
347                         break;
348                 case IOMAP_FULL_CACHING:
349                 case IOMAP_WRITETHROUGH:
350                         cmode = 0;
351                 }
352         }
353
354         while ((long)size > 0) {
355                 pgd_dir = pgd_offset_k(virtaddr);
356                 p4d_dir = p4d_offset(pgd_dir, virtaddr);
357                 pud_dir = pud_offset(p4d_dir, virtaddr);
358                 if (pud_bad(*pud_dir)) {
359                         printk("iocachemode: bad pud(%08lx)\n", pud_val(*pud_dir));
360                         pud_clear(pud_dir);
361                         return;
362                 }
363                 pmd_dir = pmd_offset(pud_dir, virtaddr);
364
365 #if CONFIG_PGTABLE_LEVELS == 3
366                 if (CPU_IS_020_OR_030) {
367                         unsigned long pmd = pmd_val(*pmd_dir);
368
369                         if ((pmd & _DESCTYPE_MASK) == _PAGE_PRESENT) {
370                                 *pmd_dir = __pmd((pmd & _CACHEMASK040) | cmode);
371                                 virtaddr += PMD_SIZE;
372                                 size -= PMD_SIZE;
373                                 continue;
374                         }
375                 }
376 #endif
377
378                 if (pmd_bad(*pmd_dir)) {
379                         printk("iocachemode: bad pmd (%08lx)\n", pmd_val(*pmd_dir));
380                         pmd_clear(pmd_dir);
381                         return;
382                 }
383                 pte_dir = pte_offset_kernel(pmd_dir, virtaddr);
384
385                 pte_val(*pte_dir) = (pte_val(*pte_dir) & _CACHEMASK040) | cmode;
386                 virtaddr += PAGE_SIZE;
387                 size -= PAGE_SIZE;
388         }
389
390         flush_tlb_all();
391 }
392 EXPORT_SYMBOL(kernel_set_cachemode);