1 // SPDX-License-Identifier: GPL-2.0
3 * Re-map IO memory to kernel address space so that we can access it.
4 * This is needed for high PCI addresses that aren't mapped in the
5 * 640k-1MB IO memory area on PC's
7 * (C) Copyright 1995 1996 Linus Torvalds
9 #include <linux/vmalloc.h>
11 #include <linux/sched.h>
13 #include <linux/export.h>
14 #include <asm/cacheflush.h>
16 #include "pgalloc-track.h"
18 #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
19 static int __read_mostly ioremap_p4d_capable;
20 static int __read_mostly ioremap_pud_capable;
21 static int __read_mostly ioremap_pmd_capable;
22 static int __read_mostly ioremap_huge_disabled;
24 static int __init set_nohugeiomap(char *str)
26 ioremap_huge_disabled = 1;
29 early_param("nohugeiomap", set_nohugeiomap);
31 void __init ioremap_huge_init(void)
33 if (!ioremap_huge_disabled) {
34 if (arch_ioremap_p4d_supported())
35 ioremap_p4d_capable = 1;
36 if (arch_ioremap_pud_supported())
37 ioremap_pud_capable = 1;
38 if (arch_ioremap_pmd_supported())
39 ioremap_pmd_capable = 1;
43 static inline int ioremap_p4d_enabled(void)
45 return ioremap_p4d_capable;
48 static inline int ioremap_pud_enabled(void)
50 return ioremap_pud_capable;
53 static inline int ioremap_pmd_enabled(void)
55 return ioremap_pmd_capable;
58 #else /* !CONFIG_HAVE_ARCH_HUGE_VMAP */
59 static inline int ioremap_p4d_enabled(void) { return 0; }
60 static inline int ioremap_pud_enabled(void) { return 0; }
61 static inline int ioremap_pmd_enabled(void) { return 0; }
62 #endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
64 static int ioremap_pte_range(pmd_t *pmd, unsigned long addr,
65 unsigned long end, phys_addr_t phys_addr, pgprot_t prot,
71 pfn = phys_addr >> PAGE_SHIFT;
72 pte = pte_alloc_kernel_track(pmd, addr, mask);
76 BUG_ON(!pte_none(*pte));
77 set_pte_at(&init_mm, addr, pte, pfn_pte(pfn, prot));
79 } while (pte++, addr += PAGE_SIZE, addr != end);
80 *mask |= PGTBL_PTE_MODIFIED;
84 static int ioremap_try_huge_pmd(pmd_t *pmd, unsigned long addr,
85 unsigned long end, phys_addr_t phys_addr,
88 if (!ioremap_pmd_enabled())
91 if ((end - addr) != PMD_SIZE)
94 if (!IS_ALIGNED(addr, PMD_SIZE))
97 if (!IS_ALIGNED(phys_addr, PMD_SIZE))
100 if (pmd_present(*pmd) && !pmd_free_pte_page(pmd, addr))
103 return pmd_set_huge(pmd, phys_addr, prot);
106 static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
107 unsigned long end, phys_addr_t phys_addr, pgprot_t prot,
108 pgtbl_mod_mask *mask)
113 pmd = pmd_alloc_track(&init_mm, pud, addr, mask);
117 next = pmd_addr_end(addr, end);
119 if (ioremap_try_huge_pmd(pmd, addr, next, phys_addr, prot)) {
120 *mask |= PGTBL_PMD_MODIFIED;
124 if (ioremap_pte_range(pmd, addr, next, phys_addr, prot, mask))
126 } while (pmd++, phys_addr += (next - addr), addr = next, addr != end);
130 static int ioremap_try_huge_pud(pud_t *pud, unsigned long addr,
131 unsigned long end, phys_addr_t phys_addr,
134 if (!ioremap_pud_enabled())
137 if ((end - addr) != PUD_SIZE)
140 if (!IS_ALIGNED(addr, PUD_SIZE))
143 if (!IS_ALIGNED(phys_addr, PUD_SIZE))
146 if (pud_present(*pud) && !pud_free_pmd_page(pud, addr))
149 return pud_set_huge(pud, phys_addr, prot);
152 static inline int ioremap_pud_range(p4d_t *p4d, unsigned long addr,
153 unsigned long end, phys_addr_t phys_addr, pgprot_t prot,
154 pgtbl_mod_mask *mask)
159 pud = pud_alloc_track(&init_mm, p4d, addr, mask);
163 next = pud_addr_end(addr, end);
165 if (ioremap_try_huge_pud(pud, addr, next, phys_addr, prot)) {
166 *mask |= PGTBL_PUD_MODIFIED;
170 if (ioremap_pmd_range(pud, addr, next, phys_addr, prot, mask))
172 } while (pud++, phys_addr += (next - addr), addr = next, addr != end);
176 static int ioremap_try_huge_p4d(p4d_t *p4d, unsigned long addr,
177 unsigned long end, phys_addr_t phys_addr,
180 if (!ioremap_p4d_enabled())
183 if ((end - addr) != P4D_SIZE)
186 if (!IS_ALIGNED(addr, P4D_SIZE))
189 if (!IS_ALIGNED(phys_addr, P4D_SIZE))
192 if (p4d_present(*p4d) && !p4d_free_pud_page(p4d, addr))
195 return p4d_set_huge(p4d, phys_addr, prot);
198 static inline int ioremap_p4d_range(pgd_t *pgd, unsigned long addr,
199 unsigned long end, phys_addr_t phys_addr, pgprot_t prot,
200 pgtbl_mod_mask *mask)
205 p4d = p4d_alloc_track(&init_mm, pgd, addr, mask);
209 next = p4d_addr_end(addr, end);
211 if (ioremap_try_huge_p4d(p4d, addr, next, phys_addr, prot)) {
212 *mask |= PGTBL_P4D_MODIFIED;
216 if (ioremap_pud_range(p4d, addr, next, phys_addr, prot, mask))
218 } while (p4d++, phys_addr += (next - addr), addr = next, addr != end);
222 int ioremap_page_range(unsigned long addr,
223 unsigned long end, phys_addr_t phys_addr, pgprot_t prot)
229 pgtbl_mod_mask mask = 0;
235 pgd = pgd_offset_k(addr);
237 next = pgd_addr_end(addr, end);
238 err = ioremap_p4d_range(pgd, addr, next, phys_addr, prot,
242 } while (pgd++, phys_addr += (next - addr), addr = next, addr != end);
244 flush_cache_vmap(start, end);
246 if (mask & ARCH_PAGE_TABLE_SYNC_MASK)
247 arch_sync_kernel_mappings(start, end);
252 #ifdef CONFIG_GENERIC_IOREMAP
253 void __iomem *ioremap_prot(phys_addr_t addr, size_t size, unsigned long prot)
255 unsigned long offset, vaddr;
256 phys_addr_t last_addr;
257 struct vm_struct *area;
259 /* Disallow wrap-around or zero size */
260 last_addr = addr + size - 1;
261 if (!size || last_addr < addr)
264 /* Page-align mappings */
265 offset = addr & (~PAGE_MASK);
267 size = PAGE_ALIGN(size + offset);
269 area = get_vm_area_caller(size, VM_IOREMAP,
270 __builtin_return_address(0));
273 vaddr = (unsigned long)area->addr;
275 if (ioremap_page_range(vaddr, vaddr + size, addr, __pgprot(prot))) {
280 return (void __iomem *)(vaddr + offset);
282 EXPORT_SYMBOL(ioremap_prot);
284 void iounmap(volatile void __iomem *addr)
286 vunmap((void *)((unsigned long)addr & PAGE_MASK));
288 EXPORT_SYMBOL(iounmap);
289 #endif /* CONFIG_GENERIC_IOREMAP */