1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2019 SiFive
6 #include <linux/pagewalk.h>
7 #include <linux/pgtable.h>
8 #include <linux/vmalloc.h>
9 #include <asm/tlbflush.h>
10 #include <asm/bitops.h>
11 #include <asm/set_memory.h>
13 struct pageattr_masks {
18 static unsigned long set_pageattr_masks(unsigned long val, struct mm_walk *walk)
20 struct pageattr_masks *masks = walk->private;
21 unsigned long new_val = val;
23 new_val &= ~(pgprot_val(masks->clear_mask));
24 new_val |= (pgprot_val(masks->set_mask));
29 static int pageattr_p4d_entry(p4d_t *p4d, unsigned long addr,
30 unsigned long next, struct mm_walk *walk)
32 p4d_t val = READ_ONCE(*p4d);
35 val = __p4d(set_pageattr_masks(p4d_val(val), walk));
42 static int pageattr_pud_entry(pud_t *pud, unsigned long addr,
43 unsigned long next, struct mm_walk *walk)
45 pud_t val = READ_ONCE(*pud);
48 val = __pud(set_pageattr_masks(pud_val(val), walk));
55 static int pageattr_pmd_entry(pmd_t *pmd, unsigned long addr,
56 unsigned long next, struct mm_walk *walk)
58 pmd_t val = READ_ONCE(*pmd);
61 val = __pmd(set_pageattr_masks(pmd_val(val), walk));
68 static int pageattr_pte_entry(pte_t *pte, unsigned long addr,
69 unsigned long next, struct mm_walk *walk)
71 pte_t val = READ_ONCE(*pte);
73 val = __pte(set_pageattr_masks(pte_val(val), walk));
79 static int pageattr_pte_hole(unsigned long addr, unsigned long next,
80 int depth, struct mm_walk *walk)
82 /* Nothing to do here */
86 static const struct mm_walk_ops pageattr_ops = {
87 .p4d_entry = pageattr_p4d_entry,
88 .pud_entry = pageattr_pud_entry,
89 .pmd_entry = pageattr_pmd_entry,
90 .pte_entry = pageattr_pte_entry,
91 .pte_hole = pageattr_pte_hole,
92 .walk_lock = PGWALK_RDLOCK,
96 static int __split_linear_mapping_pmd(pud_t *pudp,
97 unsigned long vaddr, unsigned long end)
102 pmdp = pmd_offset(pudp, vaddr);
105 next = pmd_addr_end(vaddr, end);
107 if (next - vaddr >= PMD_SIZE &&
108 vaddr <= (vaddr & PMD_MASK) && end >= next)
111 if (pmd_leaf(*pmdp)) {
112 struct page *pte_page;
113 unsigned long pfn = _pmd_pfn(*pmdp);
114 pgprot_t prot = __pgprot(pmd_val(*pmdp) & ~_PAGE_PFN_MASK);
118 pte_page = alloc_page(GFP_KERNEL);
122 ptep_new = (pte_t *)page_address(pte_page);
123 for (i = 0; i < PTRS_PER_PTE; ++i, ++ptep_new)
124 set_pte(ptep_new, pfn_pte(pfn + i, prot));
128 set_pmd(pmdp, pfn_pmd(page_to_pfn(pte_page), PAGE_TABLE));
130 } while (pmdp++, vaddr = next, vaddr != end);
135 static int __split_linear_mapping_pud(p4d_t *p4dp,
136 unsigned long vaddr, unsigned long end)
142 pudp = pud_offset(p4dp, vaddr);
145 next = pud_addr_end(vaddr, end);
147 if (next - vaddr >= PUD_SIZE &&
148 vaddr <= (vaddr & PUD_MASK) && end >= next)
151 if (pud_leaf(*pudp)) {
152 struct page *pmd_page;
153 unsigned long pfn = _pud_pfn(*pudp);
154 pgprot_t prot = __pgprot(pud_val(*pudp) & ~_PAGE_PFN_MASK);
158 pmd_page = alloc_page(GFP_KERNEL);
162 pmdp_new = (pmd_t *)page_address(pmd_page);
163 for (i = 0; i < PTRS_PER_PMD; ++i, ++pmdp_new)
165 pfn_pmd(pfn + ((i * PMD_SIZE) >> PAGE_SHIFT), prot));
169 set_pud(pudp, pfn_pud(page_to_pfn(pmd_page), PAGE_TABLE));
172 ret = __split_linear_mapping_pmd(pudp, vaddr, next);
175 } while (pudp++, vaddr = next, vaddr != end);
180 static int __split_linear_mapping_p4d(pgd_t *pgdp,
181 unsigned long vaddr, unsigned long end)
187 p4dp = p4d_offset(pgdp, vaddr);
190 next = p4d_addr_end(vaddr, end);
193 * If [vaddr; end] contains [vaddr & P4D_MASK; next], we don't
194 * need to split, we'll change the protections on the whole P4D.
196 if (next - vaddr >= P4D_SIZE &&
197 vaddr <= (vaddr & P4D_MASK) && end >= next)
200 if (p4d_leaf(*p4dp)) {
201 struct page *pud_page;
202 unsigned long pfn = _p4d_pfn(*p4dp);
203 pgprot_t prot = __pgprot(p4d_val(*p4dp) & ~_PAGE_PFN_MASK);
207 pud_page = alloc_page(GFP_KERNEL);
212 * Fill the pud level with leaf puds that have the same
213 * protections as the leaf p4d.
215 pudp_new = (pud_t *)page_address(pud_page);
216 for (i = 0; i < PTRS_PER_PUD; ++i, ++pudp_new)
218 pfn_pud(pfn + ((i * PUD_SIZE) >> PAGE_SHIFT), prot));
221 * Make sure the pud filling is not reordered with the
222 * p4d store which could result in seeing a partially
227 set_p4d(p4dp, pfn_p4d(page_to_pfn(pud_page), PAGE_TABLE));
230 ret = __split_linear_mapping_pud(p4dp, vaddr, next);
233 } while (p4dp++, vaddr = next, vaddr != end);
238 static int __split_linear_mapping_pgd(pgd_t *pgdp,
246 next = pgd_addr_end(vaddr, end);
247 /* We never use PGD mappings for the linear mapping */
248 ret = __split_linear_mapping_p4d(pgdp, vaddr, next);
251 } while (pgdp++, vaddr = next, vaddr != end);
256 static int split_linear_mapping(unsigned long start, unsigned long end)
258 return __split_linear_mapping_pgd(pgd_offset_k(start), start, end);
260 #endif /* CONFIG_64BIT */
262 static int __set_memory(unsigned long addr, int numpages, pgprot_t set_mask,
266 unsigned long start = addr;
267 unsigned long end = start + PAGE_SIZE * numpages;
268 unsigned long __maybe_unused lm_start;
269 unsigned long __maybe_unused lm_end;
270 struct pageattr_masks masks = {
271 .set_mask = set_mask,
272 .clear_mask = clear_mask
278 mmap_write_lock(&init_mm);
282 * We are about to change the permissions of a kernel mapping, we must
283 * apply the same changes to its linear mapping alias, which may imply
284 * splitting a huge mapping.
287 if (is_vmalloc_or_module_addr((void *)start)) {
288 struct vm_struct *area = NULL;
291 area = find_vm_area((void *)start);
292 page_start = (start - (unsigned long)area->addr) >> PAGE_SHIFT;
294 for (i = page_start; i < page_start + numpages; ++i) {
295 lm_start = (unsigned long)page_address(area->pages[i]);
296 lm_end = lm_start + PAGE_SIZE;
298 ret = split_linear_mapping(lm_start, lm_end);
302 ret = walk_page_range_novma(&init_mm, lm_start, lm_end,
303 &pageattr_ops, NULL, &masks);
307 } else if (is_kernel_mapping(start) || is_linear_mapping(start)) {
308 lm_start = (unsigned long)lm_alias(start);
309 lm_end = (unsigned long)lm_alias(end);
311 ret = split_linear_mapping(lm_start, lm_end);
315 ret = walk_page_range_novma(&init_mm, lm_start, lm_end,
316 &pageattr_ops, NULL, &masks);
321 ret = walk_page_range_novma(&init_mm, start, end, &pageattr_ops, NULL,
325 mmap_write_unlock(&init_mm);
328 * We can't use flush_tlb_kernel_range() here as we may have split a
329 * hugepage that is larger than that, so let's flush everything.
333 ret = walk_page_range_novma(&init_mm, start, end, &pageattr_ops, NULL,
336 mmap_write_unlock(&init_mm);
338 flush_tlb_kernel_range(start, end);
344 int set_memory_rw_nx(unsigned long addr, int numpages)
346 return __set_memory(addr, numpages, __pgprot(_PAGE_READ | _PAGE_WRITE),
347 __pgprot(_PAGE_EXEC));
350 int set_memory_ro(unsigned long addr, int numpages)
352 return __set_memory(addr, numpages, __pgprot(_PAGE_READ),
353 __pgprot(_PAGE_WRITE));
356 int set_memory_rw(unsigned long addr, int numpages)
358 return __set_memory(addr, numpages, __pgprot(_PAGE_READ | _PAGE_WRITE),
362 int set_memory_x(unsigned long addr, int numpages)
364 return __set_memory(addr, numpages, __pgprot(_PAGE_EXEC), __pgprot(0));
367 int set_memory_nx(unsigned long addr, int numpages)
369 return __set_memory(addr, numpages, __pgprot(0), __pgprot(_PAGE_EXEC));
372 int set_direct_map_invalid_noflush(struct page *page)
374 return __set_memory((unsigned long)page_address(page), 1,
375 __pgprot(0), __pgprot(_PAGE_PRESENT));
378 int set_direct_map_default_noflush(struct page *page)
380 return __set_memory((unsigned long)page_address(page), 1,
381 PAGE_KERNEL, __pgprot(_PAGE_EXEC));
384 #ifdef CONFIG_DEBUG_PAGEALLOC
385 void __kernel_map_pages(struct page *page, int numpages, int enable)
387 if (!debug_pagealloc_enabled())
391 __set_memory((unsigned long)page_address(page), numpages,
392 __pgprot(_PAGE_PRESENT), __pgprot(0));
394 __set_memory((unsigned long)page_address(page), numpages,
395 __pgprot(0), __pgprot(_PAGE_PRESENT));
399 bool kernel_page_present(struct page *page)
401 unsigned long addr = (unsigned long)page_address(page);
408 pgd = pgd_offset_k(addr);
409 if (!pgd_present(*pgd))
414 p4d = p4d_offset(pgd, addr);
415 if (!p4d_present(*p4d))
420 pud = pud_offset(p4d, addr);
421 if (!pud_present(*pud))
426 pmd = pmd_offset(pud, addr);
427 if (!pmd_present(*pmd))
432 pte = pte_offset_kernel(pmd, addr);
433 return pte_present(*pte);