1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2014, The Linux Foundation. All rights reserved.
5 #include <linux/kernel.h>
7 #include <linux/module.h>
8 #include <linux/sched.h>
9 #include <linux/vmalloc.h>
11 #include <asm/cacheflush.h>
12 #include <asm/set_memory.h>
13 #include <asm/tlbflush.h>
14 #include <asm/kfence.h>
16 struct page_change_data {
21 bool rodata_full __ro_after_init = IS_ENABLED(CONFIG_RODATA_FULL_DEFAULT_ENABLED);
23 bool can_set_direct_map(void)
26 * rodata_full and DEBUG_PAGEALLOC require linear map to be
27 * mapped at page granularity, so that it is possible to
28 * protect/unprotect single pages.
30 * KFENCE pool requires page-granular mapping if initialized late.
32 return (rodata_enabled && rodata_full) || debug_pagealloc_enabled() ||
33 arm64_kfence_can_set_direct_map();
36 static int change_page_range(pte_t *ptep, unsigned long addr, void *data)
38 struct page_change_data *cdata = data;
39 pte_t pte = READ_ONCE(*ptep);
41 pte = clear_pte_bit(pte, cdata->clear_mask);
42 pte = set_pte_bit(pte, cdata->set_mask);
49 * This function assumes that the range is mapped with PAGE_SIZE pages.
51 static int __change_memory_common(unsigned long start, unsigned long size,
52 pgprot_t set_mask, pgprot_t clear_mask)
54 struct page_change_data data;
57 data.set_mask = set_mask;
58 data.clear_mask = clear_mask;
60 ret = apply_to_page_range(&init_mm, start, size, change_page_range,
63 flush_tlb_kernel_range(start, start + size);
67 static int change_memory_common(unsigned long addr, int numpages,
68 pgprot_t set_mask, pgprot_t clear_mask)
70 unsigned long start = addr;
71 unsigned long size = PAGE_SIZE * numpages;
72 unsigned long end = start + size;
73 struct vm_struct *area;
76 if (!PAGE_ALIGNED(addr)) {
83 * Kernel VA mappings are always live, and splitting live section
84 * mappings into page mappings may cause TLB conflicts. This means
85 * we have to ensure that changing the permission bits of the range
86 * we are operating on does not result in such splitting.
88 * Let's restrict ourselves to mappings created by vmalloc (or vmap).
89 * Those are guaranteed to consist entirely of page mappings, and
90 * splitting is never needed.
92 * So check whether the [addr, addr + size) interval is entirely
93 * covered by precisely one VM area that has the VM_ALLOC flag set.
95 area = find_vm_area((void *)addr);
97 end > (unsigned long)kasan_reset_tag(area->addr) + area->size ||
98 !(area->flags & VM_ALLOC))
105 * If we are manipulating read-only permissions, apply the same
106 * change to the linear mapping of the pages that back this VM area.
108 if (rodata_enabled &&
109 rodata_full && (pgprot_val(set_mask) == PTE_RDONLY ||
110 pgprot_val(clear_mask) == PTE_RDONLY)) {
111 for (i = 0; i < area->nr_pages; i++) {
112 __change_memory_common((u64)page_address(area->pages[i]),
113 PAGE_SIZE, set_mask, clear_mask);
118 * Get rid of potentially aliasing lazily unmapped vm areas that may
119 * have permissions set that deviate from the ones we are setting here.
123 return __change_memory_common(start, size, set_mask, clear_mask);
126 int set_memory_ro(unsigned long addr, int numpages)
128 return change_memory_common(addr, numpages,
129 __pgprot(PTE_RDONLY),
130 __pgprot(PTE_WRITE));
133 int set_memory_rw(unsigned long addr, int numpages)
135 return change_memory_common(addr, numpages,
137 __pgprot(PTE_RDONLY));
140 int set_memory_nx(unsigned long addr, int numpages)
142 return change_memory_common(addr, numpages,
144 __pgprot(PTE_MAYBE_GP));
147 int set_memory_x(unsigned long addr, int numpages)
149 return change_memory_common(addr, numpages,
150 __pgprot(PTE_MAYBE_GP),
154 int set_memory_valid(unsigned long addr, int numpages, int enable)
157 return __change_memory_common(addr, PAGE_SIZE * numpages,
161 return __change_memory_common(addr, PAGE_SIZE * numpages,
163 __pgprot(PTE_VALID));
166 int set_direct_map_invalid_noflush(struct page *page)
168 struct page_change_data data = {
169 .set_mask = __pgprot(0),
170 .clear_mask = __pgprot(PTE_VALID),
173 if (!can_set_direct_map())
176 return apply_to_page_range(&init_mm,
177 (unsigned long)page_address(page),
178 PAGE_SIZE, change_page_range, &data);
181 int set_direct_map_default_noflush(struct page *page)
183 struct page_change_data data = {
184 .set_mask = __pgprot(PTE_VALID | PTE_WRITE),
185 .clear_mask = __pgprot(PTE_RDONLY),
188 if (!can_set_direct_map())
191 return apply_to_page_range(&init_mm,
192 (unsigned long)page_address(page),
193 PAGE_SIZE, change_page_range, &data);
196 #ifdef CONFIG_DEBUG_PAGEALLOC
197 void __kernel_map_pages(struct page *page, int numpages, int enable)
199 if (!can_set_direct_map())
202 set_memory_valid((unsigned long)page_address(page), numpages, enable);
204 #endif /* CONFIG_DEBUG_PAGEALLOC */
207 * This function is used to determine if a linear map page has been marked as
208 * not-valid. Walk the page table and check the PTE_VALID bit.
210 * Because this is only called on the kernel linear map, p?d_sect() implies
211 * p?d_present(). When debug_pagealloc is enabled, sections mappings are
214 bool kernel_page_present(struct page *page)
221 unsigned long addr = (unsigned long)page_address(page);
223 if (!can_set_direct_map())
226 pgdp = pgd_offset_k(addr);
227 if (pgd_none(READ_ONCE(*pgdp)))
230 p4dp = p4d_offset(pgdp, addr);
231 if (p4d_none(READ_ONCE(*p4dp)))
234 pudp = pud_offset(p4dp, addr);
235 pud = READ_ONCE(*pudp);
241 pmdp = pmd_offset(pudp, addr);
242 pmd = READ_ONCE(*pmdp);
248 ptep = pte_offset_kernel(pmdp, addr);
249 return pte_valid(READ_ONCE(*ptep));