1 // SPDX-License-Identifier: GPL-2.0
3 * Re-map IO memory to kernel address space so that we can access it.
4 * This is needed for high PCI addresses that aren't mapped in the
5 * 640k-1MB IO memory area on PC's
7 * (C) Copyright 1995 1996 Linus Torvalds
9 #include <linux/vmalloc.h>
11 #include <linux/sched.h>
13 #include <linux/export.h>
14 #include <asm/cacheflush.h>
16 #include "pgalloc-track.h"
18 #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
19 static unsigned int __ro_after_init iomap_max_page_shift = BITS_PER_LONG - 1;
21 static int __init set_nohugeiomap(char *str)
23 iomap_max_page_shift = PAGE_SHIFT;
26 early_param("nohugeiomap", set_nohugeiomap);
27 #else /* CONFIG_HAVE_ARCH_HUGE_VMAP */
28 static const unsigned int iomap_max_page_shift = PAGE_SHIFT;
29 #endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
31 int ioremap_page_range(unsigned long addr,
32 unsigned long end, phys_addr_t phys_addr, pgprot_t prot)
34 return vmap_range(addr, end, phys_addr, prot, iomap_max_page_shift);
37 #ifdef CONFIG_GENERIC_IOREMAP
38 void __iomem *ioremap_prot(phys_addr_t addr, size_t size, unsigned long prot)
40 unsigned long offset, vaddr;
41 phys_addr_t last_addr;
42 struct vm_struct *area;
44 /* Disallow wrap-around or zero size */
45 last_addr = addr + size - 1;
46 if (!size || last_addr < addr)
49 /* Page-align mappings */
50 offset = addr & (~PAGE_MASK);
52 size = PAGE_ALIGN(size + offset);
54 area = get_vm_area_caller(size, VM_IOREMAP,
55 __builtin_return_address(0));
58 vaddr = (unsigned long)area->addr;
60 if (ioremap_page_range(vaddr, vaddr + size, addr, __pgprot(prot))) {
65 return (void __iomem *)(vaddr + offset);
67 EXPORT_SYMBOL(ioremap_prot);
69 void iounmap(volatile void __iomem *addr)
71 vunmap((void *)((unsigned long)addr & PAGE_MASK));
73 EXPORT_SYMBOL(iounmap);
74 #endif /* CONFIG_GENERIC_IOREMAP */