1 // SPDX-License-Identifier: GPL-2.0-only
3 * This kernel test validates architecture page table helpers and
4 * accessors and helps in verifying their continued compliance with
5 * expected generic MM semantics.
7 * Copyright (C) 2019 ARM Ltd.
9 * Author: Anshuman Khandual <anshuman.khandual@arm.com>
11 #define pr_fmt(fmt) "debug_vm_pgtable: [%-25s]: " fmt, __func__
13 #include <linux/gfp.h>
14 #include <linux/highmem.h>
15 #include <linux/hugetlb.h>
16 #include <linux/kernel.h>
17 #include <linux/kconfig.h>
19 #include <linux/mman.h>
20 #include <linux/mm_types.h>
21 #include <linux/module.h>
22 #include <linux/pfn_t.h>
23 #include <linux/printk.h>
24 #include <linux/pgtable.h>
25 #include <linux/random.h>
26 #include <linux/spinlock.h>
27 #include <linux/swap.h>
28 #include <linux/swapops.h>
29 #include <linux/start_kernel.h>
30 #include <linux/sched/mm.h>
33 #include <asm/cacheflush.h>
34 #include <asm/pgalloc.h>
35 #include <asm/tlbflush.h>
38 * Please refer Documentation/mm/arch_pgtable_helpers.rst for the semantics
39 * expectations that are being validated here. All future changes in here
40 * or the documentation need to be in sync.
42 * On s390 platform, the lower 4 bits are used to identify given page table
43 * entry type. But these bits might affect the ability to clear entries with
44 * pxx_clear() because of how dynamic page table folding works on s390. So
45 * while loading up the entries do not change the lower 4 bits. It does not
46 * have affect any other platform. Also avoid the 62nd bit on ppc64 that is
47 * used to mark a pte entry.
49 #define S390_SKIP_MASK GENMASK(3, 0)
50 #if __BITS_PER_LONG == 64
51 #define PPC64_SKIP_MASK GENMASK(62, 62)
53 #define PPC64_SKIP_MASK 0x0
55 #define ARCH_SKIP_MASK (S390_SKIP_MASK | PPC64_SKIP_MASK)
56 #define RANDOM_ORVALUE (GENMASK(BITS_PER_LONG - 1, 0) & ~ARCH_SKIP_MASK)
57 #define RANDOM_NZVALUE GENMASK(7, 0)
59 struct pgtable_debug_args {
61 struct vm_area_struct *vma;
76 pgprot_t page_prot_none;
78 bool is_contiguous_page;
79 unsigned long pud_pfn;
80 unsigned long pmd_pfn;
81 unsigned long pte_pfn;
83 unsigned long fixed_pgd_pfn;
84 unsigned long fixed_p4d_pfn;
85 unsigned long fixed_pud_pfn;
86 unsigned long fixed_pmd_pfn;
87 unsigned long fixed_pte_pfn;
90 static void __init pte_basic_tests(struct pgtable_debug_args *args, int idx)
92 pgprot_t prot = vm_get_page_prot(idx);
93 pte_t pte = pfn_pte(args->fixed_pte_pfn, prot);
94 unsigned long val = idx, *ptr = &val;
96 pr_debug("Validating PTE basic (%pGv)\n", ptr);
99 * This test needs to be executed after the given page table entry
100 * is created with pfn_pte() to make sure that vm_get_page_prot(idx)
101 * does not have the dirty bit enabled from the beginning. This is
102 * important for platforms like arm64 where (!PTE_RDONLY) indicate
103 * dirty bit being set.
105 WARN_ON(pte_dirty(pte_wrprotect(pte)));
107 WARN_ON(!pte_same(pte, pte));
108 WARN_ON(!pte_young(pte_mkyoung(pte_mkold(pte))));
109 WARN_ON(!pte_dirty(pte_mkdirty(pte_mkclean(pte))));
110 WARN_ON(!pte_write(pte_mkwrite(pte_wrprotect(pte))));
111 WARN_ON(pte_young(pte_mkold(pte_mkyoung(pte))));
112 WARN_ON(pte_dirty(pte_mkclean(pte_mkdirty(pte))));
113 WARN_ON(pte_write(pte_wrprotect(pte_mkwrite(pte))));
114 WARN_ON(pte_dirty(pte_wrprotect(pte_mkclean(pte))));
115 WARN_ON(!pte_dirty(pte_wrprotect(pte_mkdirty(pte))));
118 static void __init pte_advanced_tests(struct pgtable_debug_args *args)
124 * Architectures optimize set_pte_at by avoiding TLB flush.
125 * This requires set_pte_at to be not used to update an
126 * existing pte entry. Clear pte before we do set_pte_at
128 * flush_dcache_page() is called after set_pte_at() to clear
129 * PG_arch_1 for the page on ARM64. The page flag isn't cleared
130 * when it's released and page allocation check will fail when
131 * the page is allocated again. For architectures other than ARM64,
132 * the unexpected overhead of cache flushing is acceptable.
134 page = (args->pte_pfn != ULONG_MAX) ? pfn_to_page(args->pte_pfn) : NULL;
138 pr_debug("Validating PTE advanced\n");
139 pte = pfn_pte(args->pte_pfn, args->page_prot);
140 set_pte_at(args->mm, args->vaddr, args->ptep, pte);
141 flush_dcache_page(page);
142 ptep_set_wrprotect(args->mm, args->vaddr, args->ptep);
143 pte = ptep_get(args->ptep);
144 WARN_ON(pte_write(pte));
145 ptep_get_and_clear(args->mm, args->vaddr, args->ptep);
146 pte = ptep_get(args->ptep);
147 WARN_ON(!pte_none(pte));
149 pte = pfn_pte(args->pte_pfn, args->page_prot);
150 pte = pte_wrprotect(pte);
151 pte = pte_mkclean(pte);
152 set_pte_at(args->mm, args->vaddr, args->ptep, pte);
153 flush_dcache_page(page);
154 pte = pte_mkwrite(pte);
155 pte = pte_mkdirty(pte);
156 ptep_set_access_flags(args->vma, args->vaddr, args->ptep, pte, 1);
157 pte = ptep_get(args->ptep);
158 WARN_ON(!(pte_write(pte) && pte_dirty(pte)));
159 ptep_get_and_clear_full(args->mm, args->vaddr, args->ptep, 1);
160 pte = ptep_get(args->ptep);
161 WARN_ON(!pte_none(pte));
163 pte = pfn_pte(args->pte_pfn, args->page_prot);
164 pte = pte_mkyoung(pte);
165 set_pte_at(args->mm, args->vaddr, args->ptep, pte);
166 flush_dcache_page(page);
167 ptep_test_and_clear_young(args->vma, args->vaddr, args->ptep);
168 pte = ptep_get(args->ptep);
169 WARN_ON(pte_young(pte));
171 ptep_get_and_clear_full(args->mm, args->vaddr, args->ptep, 1);
174 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
175 static void __init pmd_basic_tests(struct pgtable_debug_args *args, int idx)
177 pgprot_t prot = vm_get_page_prot(idx);
178 unsigned long val = idx, *ptr = &val;
181 if (!has_transparent_hugepage())
184 pr_debug("Validating PMD basic (%pGv)\n", ptr);
185 pmd = pfn_pmd(args->fixed_pmd_pfn, prot);
188 * This test needs to be executed after the given page table entry
189 * is created with pfn_pmd() to make sure that vm_get_page_prot(idx)
190 * does not have the dirty bit enabled from the beginning. This is
191 * important for platforms like arm64 where (!PTE_RDONLY) indicate
192 * dirty bit being set.
194 WARN_ON(pmd_dirty(pmd_wrprotect(pmd)));
197 WARN_ON(!pmd_same(pmd, pmd));
198 WARN_ON(!pmd_young(pmd_mkyoung(pmd_mkold(pmd))));
199 WARN_ON(!pmd_dirty(pmd_mkdirty(pmd_mkclean(pmd))));
200 WARN_ON(!pmd_write(pmd_mkwrite(pmd_wrprotect(pmd))));
201 WARN_ON(pmd_young(pmd_mkold(pmd_mkyoung(pmd))));
202 WARN_ON(pmd_dirty(pmd_mkclean(pmd_mkdirty(pmd))));
203 WARN_ON(pmd_write(pmd_wrprotect(pmd_mkwrite(pmd))));
204 WARN_ON(pmd_dirty(pmd_wrprotect(pmd_mkclean(pmd))));
205 WARN_ON(!pmd_dirty(pmd_wrprotect(pmd_mkdirty(pmd))));
207 * A huge page does not point to next level page table
208 * entry. Hence this must qualify as pmd_bad().
210 WARN_ON(!pmd_bad(pmd_mkhuge(pmd)));
213 static void __init pmd_advanced_tests(struct pgtable_debug_args *args)
217 unsigned long vaddr = args->vaddr;
219 if (!has_transparent_hugepage())
222 page = (args->pmd_pfn != ULONG_MAX) ? pfn_to_page(args->pmd_pfn) : NULL;
227 * flush_dcache_page() is called after set_pmd_at() to clear
228 * PG_arch_1 for the page on ARM64. The page flag isn't cleared
229 * when it's released and page allocation check will fail when
230 * the page is allocated again. For architectures other than ARM64,
231 * the unexpected overhead of cache flushing is acceptable.
233 pr_debug("Validating PMD advanced\n");
234 /* Align the address wrt HPAGE_PMD_SIZE */
235 vaddr &= HPAGE_PMD_MASK;
237 pgtable_trans_huge_deposit(args->mm, args->pmdp, args->start_ptep);
239 pmd = pfn_pmd(args->pmd_pfn, args->page_prot);
240 set_pmd_at(args->mm, vaddr, args->pmdp, pmd);
241 flush_dcache_page(page);
242 pmdp_set_wrprotect(args->mm, vaddr, args->pmdp);
243 pmd = READ_ONCE(*args->pmdp);
244 WARN_ON(pmd_write(pmd));
245 pmdp_huge_get_and_clear(args->mm, vaddr, args->pmdp);
246 pmd = READ_ONCE(*args->pmdp);
247 WARN_ON(!pmd_none(pmd));
249 pmd = pfn_pmd(args->pmd_pfn, args->page_prot);
250 pmd = pmd_wrprotect(pmd);
251 pmd = pmd_mkclean(pmd);
252 set_pmd_at(args->mm, vaddr, args->pmdp, pmd);
253 flush_dcache_page(page);
254 pmd = pmd_mkwrite(pmd);
255 pmd = pmd_mkdirty(pmd);
256 pmdp_set_access_flags(args->vma, vaddr, args->pmdp, pmd, 1);
257 pmd = READ_ONCE(*args->pmdp);
258 WARN_ON(!(pmd_write(pmd) && pmd_dirty(pmd)));
259 pmdp_huge_get_and_clear_full(args->vma, vaddr, args->pmdp, 1);
260 pmd = READ_ONCE(*args->pmdp);
261 WARN_ON(!pmd_none(pmd));
263 pmd = pmd_mkhuge(pfn_pmd(args->pmd_pfn, args->page_prot));
264 pmd = pmd_mkyoung(pmd);
265 set_pmd_at(args->mm, vaddr, args->pmdp, pmd);
266 flush_dcache_page(page);
267 pmdp_test_and_clear_young(args->vma, vaddr, args->pmdp);
268 pmd = READ_ONCE(*args->pmdp);
269 WARN_ON(pmd_young(pmd));
271 /* Clear the pte entries */
272 pmdp_huge_get_and_clear(args->mm, vaddr, args->pmdp);
273 pgtable_trans_huge_withdraw(args->mm, args->pmdp);
276 static void __init pmd_leaf_tests(struct pgtable_debug_args *args)
280 if (!has_transparent_hugepage())
283 pr_debug("Validating PMD leaf\n");
284 pmd = pfn_pmd(args->fixed_pmd_pfn, args->page_prot);
287 * PMD based THP is a leaf entry.
289 pmd = pmd_mkhuge(pmd);
290 WARN_ON(!pmd_leaf(pmd));
293 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
294 static void __init pud_basic_tests(struct pgtable_debug_args *args, int idx)
296 pgprot_t prot = vm_get_page_prot(idx);
297 unsigned long val = idx, *ptr = &val;
300 if (!has_transparent_hugepage())
303 pr_debug("Validating PUD basic (%pGv)\n", ptr);
304 pud = pfn_pud(args->fixed_pud_pfn, prot);
307 * This test needs to be executed after the given page table entry
308 * is created with pfn_pud() to make sure that vm_get_page_prot(idx)
309 * does not have the dirty bit enabled from the beginning. This is
310 * important for platforms like arm64 where (!PTE_RDONLY) indicate
311 * dirty bit being set.
313 WARN_ON(pud_dirty(pud_wrprotect(pud)));
315 WARN_ON(!pud_same(pud, pud));
316 WARN_ON(!pud_young(pud_mkyoung(pud_mkold(pud))));
317 WARN_ON(!pud_dirty(pud_mkdirty(pud_mkclean(pud))));
318 WARN_ON(pud_dirty(pud_mkclean(pud_mkdirty(pud))));
319 WARN_ON(!pud_write(pud_mkwrite(pud_wrprotect(pud))));
320 WARN_ON(pud_write(pud_wrprotect(pud_mkwrite(pud))));
321 WARN_ON(pud_young(pud_mkold(pud_mkyoung(pud))));
322 WARN_ON(pud_dirty(pud_wrprotect(pud_mkclean(pud))));
323 WARN_ON(!pud_dirty(pud_wrprotect(pud_mkdirty(pud))));
325 if (mm_pmd_folded(args->mm))
329 * A huge page does not point to next level page table
330 * entry. Hence this must qualify as pud_bad().
332 WARN_ON(!pud_bad(pud_mkhuge(pud)));
335 static void __init pud_advanced_tests(struct pgtable_debug_args *args)
338 unsigned long vaddr = args->vaddr;
341 if (!has_transparent_hugepage())
344 page = (args->pud_pfn != ULONG_MAX) ? pfn_to_page(args->pud_pfn) : NULL;
349 * flush_dcache_page() is called after set_pud_at() to clear
350 * PG_arch_1 for the page on ARM64. The page flag isn't cleared
351 * when it's released and page allocation check will fail when
352 * the page is allocated again. For architectures other than ARM64,
353 * the unexpected overhead of cache flushing is acceptable.
355 pr_debug("Validating PUD advanced\n");
356 /* Align the address wrt HPAGE_PUD_SIZE */
357 vaddr &= HPAGE_PUD_MASK;
359 pud = pfn_pud(args->pud_pfn, args->page_prot);
360 set_pud_at(args->mm, vaddr, args->pudp, pud);
361 flush_dcache_page(page);
362 pudp_set_wrprotect(args->mm, vaddr, args->pudp);
363 pud = READ_ONCE(*args->pudp);
364 WARN_ON(pud_write(pud));
366 #ifndef __PAGETABLE_PMD_FOLDED
367 pudp_huge_get_and_clear(args->mm, vaddr, args->pudp);
368 pud = READ_ONCE(*args->pudp);
369 WARN_ON(!pud_none(pud));
370 #endif /* __PAGETABLE_PMD_FOLDED */
371 pud = pfn_pud(args->pud_pfn, args->page_prot);
372 pud = pud_wrprotect(pud);
373 pud = pud_mkclean(pud);
374 set_pud_at(args->mm, vaddr, args->pudp, pud);
375 flush_dcache_page(page);
376 pud = pud_mkwrite(pud);
377 pud = pud_mkdirty(pud);
378 pudp_set_access_flags(args->vma, vaddr, args->pudp, pud, 1);
379 pud = READ_ONCE(*args->pudp);
380 WARN_ON(!(pud_write(pud) && pud_dirty(pud)));
382 #ifndef __PAGETABLE_PMD_FOLDED
383 pudp_huge_get_and_clear_full(args->mm, vaddr, args->pudp, 1);
384 pud = READ_ONCE(*args->pudp);
385 WARN_ON(!pud_none(pud));
386 #endif /* __PAGETABLE_PMD_FOLDED */
388 pud = pfn_pud(args->pud_pfn, args->page_prot);
389 pud = pud_mkyoung(pud);
390 set_pud_at(args->mm, vaddr, args->pudp, pud);
391 flush_dcache_page(page);
392 pudp_test_and_clear_young(args->vma, vaddr, args->pudp);
393 pud = READ_ONCE(*args->pudp);
394 WARN_ON(pud_young(pud));
396 pudp_huge_get_and_clear(args->mm, vaddr, args->pudp);
399 static void __init pud_leaf_tests(struct pgtable_debug_args *args)
403 if (!has_transparent_hugepage())
406 pr_debug("Validating PUD leaf\n");
407 pud = pfn_pud(args->fixed_pud_pfn, args->page_prot);
409 * PUD based THP is a leaf entry.
411 pud = pud_mkhuge(pud);
412 WARN_ON(!pud_leaf(pud));
414 #else /* !CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
415 static void __init pud_basic_tests(struct pgtable_debug_args *args, int idx) { }
416 static void __init pud_advanced_tests(struct pgtable_debug_args *args) { }
417 static void __init pud_leaf_tests(struct pgtable_debug_args *args) { }
418 #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
419 #else /* !CONFIG_TRANSPARENT_HUGEPAGE */
420 static void __init pmd_basic_tests(struct pgtable_debug_args *args, int idx) { }
421 static void __init pud_basic_tests(struct pgtable_debug_args *args, int idx) { }
422 static void __init pmd_advanced_tests(struct pgtable_debug_args *args) { }
423 static void __init pud_advanced_tests(struct pgtable_debug_args *args) { }
424 static void __init pmd_leaf_tests(struct pgtable_debug_args *args) { }
425 static void __init pud_leaf_tests(struct pgtable_debug_args *args) { }
426 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
428 #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
429 static void __init pmd_huge_tests(struct pgtable_debug_args *args)
433 if (!arch_vmap_pmd_supported(args->page_prot))
436 pr_debug("Validating PMD huge\n");
438 * X86 defined pmd_set_huge() verifies that the given
439 * PMD is not a populated non-leaf entry.
441 WRITE_ONCE(*args->pmdp, __pmd(0));
442 WARN_ON(!pmd_set_huge(args->pmdp, __pfn_to_phys(args->fixed_pmd_pfn), args->page_prot));
443 WARN_ON(!pmd_clear_huge(args->pmdp));
444 pmd = READ_ONCE(*args->pmdp);
445 WARN_ON(!pmd_none(pmd));
448 static void __init pud_huge_tests(struct pgtable_debug_args *args)
452 if (!arch_vmap_pud_supported(args->page_prot))
455 pr_debug("Validating PUD huge\n");
457 * X86 defined pud_set_huge() verifies that the given
458 * PUD is not a populated non-leaf entry.
460 WRITE_ONCE(*args->pudp, __pud(0));
461 WARN_ON(!pud_set_huge(args->pudp, __pfn_to_phys(args->fixed_pud_pfn), args->page_prot));
462 WARN_ON(!pud_clear_huge(args->pudp));
463 pud = READ_ONCE(*args->pudp);
464 WARN_ON(!pud_none(pud));
466 #else /* !CONFIG_HAVE_ARCH_HUGE_VMAP */
467 static void __init pmd_huge_tests(struct pgtable_debug_args *args) { }
468 static void __init pud_huge_tests(struct pgtable_debug_args *args) { }
469 #endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
471 static void __init p4d_basic_tests(struct pgtable_debug_args *args)
475 pr_debug("Validating P4D basic\n");
476 memset(&p4d, RANDOM_NZVALUE, sizeof(p4d_t));
477 WARN_ON(!p4d_same(p4d, p4d));
480 static void __init pgd_basic_tests(struct pgtable_debug_args *args)
484 pr_debug("Validating PGD basic\n");
485 memset(&pgd, RANDOM_NZVALUE, sizeof(pgd_t));
486 WARN_ON(!pgd_same(pgd, pgd));
489 #ifndef __PAGETABLE_PUD_FOLDED
490 static void __init pud_clear_tests(struct pgtable_debug_args *args)
492 pud_t pud = READ_ONCE(*args->pudp);
494 if (mm_pmd_folded(args->mm))
497 pr_debug("Validating PUD clear\n");
498 pud = __pud(pud_val(pud) | RANDOM_ORVALUE);
499 WRITE_ONCE(*args->pudp, pud);
500 pud_clear(args->pudp);
501 pud = READ_ONCE(*args->pudp);
502 WARN_ON(!pud_none(pud));
505 static void __init pud_populate_tests(struct pgtable_debug_args *args)
509 if (mm_pmd_folded(args->mm))
512 pr_debug("Validating PUD populate\n");
514 * This entry points to next level page table page.
515 * Hence this must not qualify as pud_bad().
517 pud_populate(args->mm, args->pudp, args->start_pmdp);
518 pud = READ_ONCE(*args->pudp);
519 WARN_ON(pud_bad(pud));
521 #else /* !__PAGETABLE_PUD_FOLDED */
522 static void __init pud_clear_tests(struct pgtable_debug_args *args) { }
523 static void __init pud_populate_tests(struct pgtable_debug_args *args) { }
524 #endif /* PAGETABLE_PUD_FOLDED */
526 #ifndef __PAGETABLE_P4D_FOLDED
527 static void __init p4d_clear_tests(struct pgtable_debug_args *args)
529 p4d_t p4d = READ_ONCE(*args->p4dp);
531 if (mm_pud_folded(args->mm))
534 pr_debug("Validating P4D clear\n");
535 p4d = __p4d(p4d_val(p4d) | RANDOM_ORVALUE);
536 WRITE_ONCE(*args->p4dp, p4d);
537 p4d_clear(args->p4dp);
538 p4d = READ_ONCE(*args->p4dp);
539 WARN_ON(!p4d_none(p4d));
542 static void __init p4d_populate_tests(struct pgtable_debug_args *args)
546 if (mm_pud_folded(args->mm))
549 pr_debug("Validating P4D populate\n");
551 * This entry points to next level page table page.
552 * Hence this must not qualify as p4d_bad().
554 pud_clear(args->pudp);
555 p4d_clear(args->p4dp);
556 p4d_populate(args->mm, args->p4dp, args->start_pudp);
557 p4d = READ_ONCE(*args->p4dp);
558 WARN_ON(p4d_bad(p4d));
561 static void __init pgd_clear_tests(struct pgtable_debug_args *args)
563 pgd_t pgd = READ_ONCE(*(args->pgdp));
565 if (mm_p4d_folded(args->mm))
568 pr_debug("Validating PGD clear\n");
569 pgd = __pgd(pgd_val(pgd) | RANDOM_ORVALUE);
570 WRITE_ONCE(*args->pgdp, pgd);
571 pgd_clear(args->pgdp);
572 pgd = READ_ONCE(*args->pgdp);
573 WARN_ON(!pgd_none(pgd));
576 static void __init pgd_populate_tests(struct pgtable_debug_args *args)
580 if (mm_p4d_folded(args->mm))
583 pr_debug("Validating PGD populate\n");
585 * This entry points to next level page table page.
586 * Hence this must not qualify as pgd_bad().
588 p4d_clear(args->p4dp);
589 pgd_clear(args->pgdp);
590 pgd_populate(args->mm, args->pgdp, args->start_p4dp);
591 pgd = READ_ONCE(*args->pgdp);
592 WARN_ON(pgd_bad(pgd));
594 #else /* !__PAGETABLE_P4D_FOLDED */
595 static void __init p4d_clear_tests(struct pgtable_debug_args *args) { }
596 static void __init pgd_clear_tests(struct pgtable_debug_args *args) { }
597 static void __init p4d_populate_tests(struct pgtable_debug_args *args) { }
598 static void __init pgd_populate_tests(struct pgtable_debug_args *args) { }
599 #endif /* PAGETABLE_P4D_FOLDED */
601 static void __init pte_clear_tests(struct pgtable_debug_args *args)
604 pte_t pte = pfn_pte(args->pte_pfn, args->page_prot);
606 page = (args->pte_pfn != ULONG_MAX) ? pfn_to_page(args->pte_pfn) : NULL;
611 * flush_dcache_page() is called after set_pte_at() to clear
612 * PG_arch_1 for the page on ARM64. The page flag isn't cleared
613 * when it's released and page allocation check will fail when
614 * the page is allocated again. For architectures other than ARM64,
615 * the unexpected overhead of cache flushing is acceptable.
617 pr_debug("Validating PTE clear\n");
619 pte = __pte(pte_val(pte) | RANDOM_ORVALUE);
621 set_pte_at(args->mm, args->vaddr, args->ptep, pte);
622 flush_dcache_page(page);
624 ptep_clear(args->mm, args->vaddr, args->ptep);
625 pte = ptep_get(args->ptep);
626 WARN_ON(!pte_none(pte));
629 static void __init pmd_clear_tests(struct pgtable_debug_args *args)
631 pmd_t pmd = READ_ONCE(*args->pmdp);
633 pr_debug("Validating PMD clear\n");
634 pmd = __pmd(pmd_val(pmd) | RANDOM_ORVALUE);
635 WRITE_ONCE(*args->pmdp, pmd);
636 pmd_clear(args->pmdp);
637 pmd = READ_ONCE(*args->pmdp);
638 WARN_ON(!pmd_none(pmd));
641 static void __init pmd_populate_tests(struct pgtable_debug_args *args)
645 pr_debug("Validating PMD populate\n");
647 * This entry points to next level page table page.
648 * Hence this must not qualify as pmd_bad().
650 pmd_populate(args->mm, args->pmdp, args->start_ptep);
651 pmd = READ_ONCE(*args->pmdp);
652 WARN_ON(pmd_bad(pmd));
655 static void __init pte_special_tests(struct pgtable_debug_args *args)
657 pte_t pte = pfn_pte(args->fixed_pte_pfn, args->page_prot);
659 if (!IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL))
662 pr_debug("Validating PTE special\n");
663 WARN_ON(!pte_special(pte_mkspecial(pte)));
666 static void __init pte_protnone_tests(struct pgtable_debug_args *args)
668 pte_t pte = pfn_pte(args->fixed_pte_pfn, args->page_prot_none);
670 if (!IS_ENABLED(CONFIG_NUMA_BALANCING))
673 pr_debug("Validating PTE protnone\n");
674 WARN_ON(!pte_protnone(pte));
675 WARN_ON(!pte_present(pte));
678 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
679 static void __init pmd_protnone_tests(struct pgtable_debug_args *args)
683 if (!IS_ENABLED(CONFIG_NUMA_BALANCING))
686 if (!has_transparent_hugepage())
689 pr_debug("Validating PMD protnone\n");
690 pmd = pmd_mkhuge(pfn_pmd(args->fixed_pmd_pfn, args->page_prot_none));
691 WARN_ON(!pmd_protnone(pmd));
692 WARN_ON(!pmd_present(pmd));
694 #else /* !CONFIG_TRANSPARENT_HUGEPAGE */
695 static void __init pmd_protnone_tests(struct pgtable_debug_args *args) { }
696 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
698 #ifdef CONFIG_ARCH_HAS_PTE_DEVMAP
699 static void __init pte_devmap_tests(struct pgtable_debug_args *args)
701 pte_t pte = pfn_pte(args->fixed_pte_pfn, args->page_prot);
703 pr_debug("Validating PTE devmap\n");
704 WARN_ON(!pte_devmap(pte_mkdevmap(pte)));
707 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
708 static void __init pmd_devmap_tests(struct pgtable_debug_args *args)
712 if (!has_transparent_hugepage())
715 pr_debug("Validating PMD devmap\n");
716 pmd = pfn_pmd(args->fixed_pmd_pfn, args->page_prot);
717 WARN_ON(!pmd_devmap(pmd_mkdevmap(pmd)));
720 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
721 static void __init pud_devmap_tests(struct pgtable_debug_args *args)
725 if (!has_transparent_hugepage())
728 pr_debug("Validating PUD devmap\n");
729 pud = pfn_pud(args->fixed_pud_pfn, args->page_prot);
730 WARN_ON(!pud_devmap(pud_mkdevmap(pud)));
732 #else /* !CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
733 static void __init pud_devmap_tests(struct pgtable_debug_args *args) { }
734 #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
735 #else /* CONFIG_TRANSPARENT_HUGEPAGE */
736 static void __init pmd_devmap_tests(struct pgtable_debug_args *args) { }
737 static void __init pud_devmap_tests(struct pgtable_debug_args *args) { }
738 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
740 static void __init pte_devmap_tests(struct pgtable_debug_args *args) { }
741 static void __init pmd_devmap_tests(struct pgtable_debug_args *args) { }
742 static void __init pud_devmap_tests(struct pgtable_debug_args *args) { }
743 #endif /* CONFIG_ARCH_HAS_PTE_DEVMAP */
745 static void __init pte_soft_dirty_tests(struct pgtable_debug_args *args)
747 pte_t pte = pfn_pte(args->fixed_pte_pfn, args->page_prot);
749 if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY))
752 pr_debug("Validating PTE soft dirty\n");
753 WARN_ON(!pte_soft_dirty(pte_mksoft_dirty(pte)));
754 WARN_ON(pte_soft_dirty(pte_clear_soft_dirty(pte)));
757 static void __init pte_swap_soft_dirty_tests(struct pgtable_debug_args *args)
759 pte_t pte = pfn_pte(args->fixed_pte_pfn, args->page_prot);
761 if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY))
764 pr_debug("Validating PTE swap soft dirty\n");
765 WARN_ON(!pte_swp_soft_dirty(pte_swp_mksoft_dirty(pte)));
766 WARN_ON(pte_swp_soft_dirty(pte_swp_clear_soft_dirty(pte)));
769 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
770 static void __init pmd_soft_dirty_tests(struct pgtable_debug_args *args)
774 if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY))
777 if (!has_transparent_hugepage())
780 pr_debug("Validating PMD soft dirty\n");
781 pmd = pfn_pmd(args->fixed_pmd_pfn, args->page_prot);
782 WARN_ON(!pmd_soft_dirty(pmd_mksoft_dirty(pmd)));
783 WARN_ON(pmd_soft_dirty(pmd_clear_soft_dirty(pmd)));
786 static void __init pmd_swap_soft_dirty_tests(struct pgtable_debug_args *args)
790 if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY) ||
791 !IS_ENABLED(CONFIG_ARCH_ENABLE_THP_MIGRATION))
794 if (!has_transparent_hugepage())
797 pr_debug("Validating PMD swap soft dirty\n");
798 pmd = pfn_pmd(args->fixed_pmd_pfn, args->page_prot);
799 WARN_ON(!pmd_swp_soft_dirty(pmd_swp_mksoft_dirty(pmd)));
800 WARN_ON(pmd_swp_soft_dirty(pmd_swp_clear_soft_dirty(pmd)));
802 #else /* !CONFIG_TRANSPARENT_HUGEPAGE */
803 static void __init pmd_soft_dirty_tests(struct pgtable_debug_args *args) { }
804 static void __init pmd_swap_soft_dirty_tests(struct pgtable_debug_args *args) { }
805 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
807 static void __init pte_swap_exclusive_tests(struct pgtable_debug_args *args)
809 #ifdef __HAVE_ARCH_PTE_SWP_EXCLUSIVE
810 pte_t pte = pfn_pte(args->fixed_pte_pfn, args->page_prot);
812 pr_debug("Validating PTE swap exclusive\n");
813 pte = pte_swp_mkexclusive(pte);
814 WARN_ON(!pte_swp_exclusive(pte));
815 pte = pte_swp_clear_exclusive(pte);
816 WARN_ON(pte_swp_exclusive(pte));
817 #endif /* __HAVE_ARCH_PTE_SWP_EXCLUSIVE */
820 static void __init pte_swap_tests(struct pgtable_debug_args *args)
825 pr_debug("Validating PTE swap\n");
826 pte = pfn_pte(args->fixed_pte_pfn, args->page_prot);
827 swp = __pte_to_swp_entry(pte);
828 pte = __swp_entry_to_pte(swp);
829 WARN_ON(args->fixed_pte_pfn != pte_pfn(pte));
832 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
833 static void __init pmd_swap_tests(struct pgtable_debug_args *args)
838 if (!has_transparent_hugepage())
841 pr_debug("Validating PMD swap\n");
842 pmd = pfn_pmd(args->fixed_pmd_pfn, args->page_prot);
843 swp = __pmd_to_swp_entry(pmd);
844 pmd = __swp_entry_to_pmd(swp);
845 WARN_ON(args->fixed_pmd_pfn != pmd_pfn(pmd));
847 #else /* !CONFIG_ARCH_ENABLE_THP_MIGRATION */
848 static void __init pmd_swap_tests(struct pgtable_debug_args *args) { }
849 #endif /* CONFIG_ARCH_ENABLE_THP_MIGRATION */
851 static void __init swap_migration_tests(struct pgtable_debug_args *args)
856 if (!IS_ENABLED(CONFIG_MIGRATION))
860 * swap_migration_tests() requires a dedicated page as it needs to
861 * be locked before creating a migration entry from it. Locking the
862 * page that actually maps kernel text ('start_kernel') can be real
863 * problematic. Lets use the allocated page explicitly for this
866 page = (args->pte_pfn != ULONG_MAX) ? pfn_to_page(args->pte_pfn) : NULL;
870 pr_debug("Validating swap migration\n");
873 * make_[readable|writable]_migration_entry() expects given page to
874 * be locked, otherwise it stumbles upon a BUG_ON().
876 __SetPageLocked(page);
877 swp = make_writable_migration_entry(page_to_pfn(page));
878 WARN_ON(!is_migration_entry(swp));
879 WARN_ON(!is_writable_migration_entry(swp));
881 swp = make_readable_migration_entry(swp_offset(swp));
882 WARN_ON(!is_migration_entry(swp));
883 WARN_ON(is_writable_migration_entry(swp));
885 swp = make_readable_migration_entry(page_to_pfn(page));
886 WARN_ON(!is_migration_entry(swp));
887 WARN_ON(is_writable_migration_entry(swp));
888 __ClearPageLocked(page);
891 #ifdef CONFIG_HUGETLB_PAGE
892 static void __init hugetlb_basic_tests(struct pgtable_debug_args *args)
897 pr_debug("Validating HugeTLB basic\n");
899 * Accessing the page associated with the pfn is safe here,
900 * as it was previously derived from a real kernel symbol.
902 page = pfn_to_page(args->fixed_pmd_pfn);
903 pte = mk_huge_pte(page, args->page_prot);
905 WARN_ON(!huge_pte_dirty(huge_pte_mkdirty(pte)));
906 WARN_ON(!huge_pte_write(huge_pte_mkwrite(huge_pte_wrprotect(pte))));
907 WARN_ON(huge_pte_write(huge_pte_wrprotect(huge_pte_mkwrite(pte))));
909 #ifdef CONFIG_ARCH_WANT_GENERAL_HUGETLB
910 pte = pfn_pte(args->fixed_pmd_pfn, args->page_prot);
912 WARN_ON(!pte_huge(pte_mkhuge(pte)));
913 #endif /* CONFIG_ARCH_WANT_GENERAL_HUGETLB */
915 #else /* !CONFIG_HUGETLB_PAGE */
916 static void __init hugetlb_basic_tests(struct pgtable_debug_args *args) { }
917 #endif /* CONFIG_HUGETLB_PAGE */
919 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
920 static void __init pmd_thp_tests(struct pgtable_debug_args *args)
924 if (!has_transparent_hugepage())
927 pr_debug("Validating PMD based THP\n");
929 * pmd_trans_huge() and pmd_present() must return positive after
930 * MMU invalidation with pmd_mkinvalid(). This behavior is an
931 * optimization for transparent huge page. pmd_trans_huge() must
932 * be true if pmd_page() returns a valid THP to avoid taking the
933 * pmd_lock when others walk over non transhuge pmds (i.e. there
934 * are no THP allocated). Especially when splitting a THP and
935 * removing the present bit from the pmd, pmd_trans_huge() still
936 * needs to return true. pmd_present() should be true whenever
937 * pmd_trans_huge() returns true.
939 pmd = pfn_pmd(args->fixed_pmd_pfn, args->page_prot);
940 WARN_ON(!pmd_trans_huge(pmd_mkhuge(pmd)));
942 #ifndef __HAVE_ARCH_PMDP_INVALIDATE
943 WARN_ON(!pmd_trans_huge(pmd_mkinvalid(pmd_mkhuge(pmd))));
944 WARN_ON(!pmd_present(pmd_mkinvalid(pmd_mkhuge(pmd))));
945 #endif /* __HAVE_ARCH_PMDP_INVALIDATE */
948 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
949 static void __init pud_thp_tests(struct pgtable_debug_args *args)
953 if (!has_transparent_hugepage())
956 pr_debug("Validating PUD based THP\n");
957 pud = pfn_pud(args->fixed_pud_pfn, args->page_prot);
958 WARN_ON(!pud_trans_huge(pud_mkhuge(pud)));
961 * pud_mkinvalid() has been dropped for now. Enable back
962 * these tests when it comes back with a modified pud_present().
964 * WARN_ON(!pud_trans_huge(pud_mkinvalid(pud_mkhuge(pud))));
965 * WARN_ON(!pud_present(pud_mkinvalid(pud_mkhuge(pud))));
968 #else /* !CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
969 static void __init pud_thp_tests(struct pgtable_debug_args *args) { }
970 #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
971 #else /* !CONFIG_TRANSPARENT_HUGEPAGE */
972 static void __init pmd_thp_tests(struct pgtable_debug_args *args) { }
973 static void __init pud_thp_tests(struct pgtable_debug_args *args) { }
974 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
976 static unsigned long __init get_random_vaddr(void)
978 unsigned long random_vaddr, random_pages, total_user_pages;
980 total_user_pages = (TASK_SIZE - FIRST_USER_ADDRESS) / PAGE_SIZE;
982 random_pages = get_random_long() % total_user_pages;
983 random_vaddr = FIRST_USER_ADDRESS + random_pages * PAGE_SIZE;
988 static void __init destroy_args(struct pgtable_debug_args *args)
990 struct page *page = NULL;
992 /* Free (huge) page */
993 if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
994 IS_ENABLED(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD) &&
995 has_transparent_hugepage() &&
996 args->pud_pfn != ULONG_MAX) {
997 if (args->is_contiguous_page) {
998 free_contig_range(args->pud_pfn,
999 (1 << (HPAGE_PUD_SHIFT - PAGE_SHIFT)));
1001 page = pfn_to_page(args->pud_pfn);
1002 __free_pages(page, HPAGE_PUD_SHIFT - PAGE_SHIFT);
1005 args->pud_pfn = ULONG_MAX;
1006 args->pmd_pfn = ULONG_MAX;
1007 args->pte_pfn = ULONG_MAX;
1010 if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
1011 has_transparent_hugepage() &&
1012 args->pmd_pfn != ULONG_MAX) {
1013 if (args->is_contiguous_page) {
1014 free_contig_range(args->pmd_pfn, (1 << HPAGE_PMD_ORDER));
1016 page = pfn_to_page(args->pmd_pfn);
1017 __free_pages(page, HPAGE_PMD_ORDER);
1020 args->pmd_pfn = ULONG_MAX;
1021 args->pte_pfn = ULONG_MAX;
1024 if (args->pte_pfn != ULONG_MAX) {
1025 page = pfn_to_page(args->pte_pfn);
1026 __free_pages(page, 0);
1028 args->pte_pfn = ULONG_MAX;
1031 /* Free page table entries */
1032 if (args->start_ptep) {
1033 pte_free(args->mm, args->start_ptep);
1034 mm_dec_nr_ptes(args->mm);
1037 if (args->start_pmdp) {
1038 pmd_free(args->mm, args->start_pmdp);
1039 mm_dec_nr_pmds(args->mm);
1042 if (args->start_pudp) {
1043 pud_free(args->mm, args->start_pudp);
1044 mm_dec_nr_puds(args->mm);
1047 if (args->start_p4dp)
1048 p4d_free(args->mm, args->start_p4dp);
1050 /* Free vma and mm struct */
1052 vm_area_free(args->vma);
1058 static struct page * __init
1059 debug_vm_pgtable_alloc_huge_page(struct pgtable_debug_args *args, int order)
1061 struct page *page = NULL;
1063 #ifdef CONFIG_CONTIG_ALLOC
1064 if (order >= MAX_ORDER) {
1065 page = alloc_contig_pages((1 << order), GFP_KERNEL,
1066 first_online_node, NULL);
1068 args->is_contiguous_page = true;
1074 if (order < MAX_ORDER)
1075 page = alloc_pages(GFP_KERNEL, order);
1080 static int __init init_args(struct pgtable_debug_args *args)
1082 struct page *page = NULL;
1087 * Initialize the debugging data.
1089 * vm_get_page_prot(VM_NONE) or vm_get_page_prot(VM_SHARED|VM_NONE)
1090 * will help create page table entries with PROT_NONE permission as
1091 * required for pxx_protnone_tests().
1093 memset(args, 0, sizeof(*args));
1094 args->vaddr = get_random_vaddr();
1095 args->page_prot = vm_get_page_prot(VM_ACCESS_FLAGS);
1096 args->page_prot_none = vm_get_page_prot(VM_NONE);
1097 args->is_contiguous_page = false;
1098 args->pud_pfn = ULONG_MAX;
1099 args->pmd_pfn = ULONG_MAX;
1100 args->pte_pfn = ULONG_MAX;
1101 args->fixed_pgd_pfn = ULONG_MAX;
1102 args->fixed_p4d_pfn = ULONG_MAX;
1103 args->fixed_pud_pfn = ULONG_MAX;
1104 args->fixed_pmd_pfn = ULONG_MAX;
1105 args->fixed_pte_pfn = ULONG_MAX;
1107 /* Allocate mm and vma */
1108 args->mm = mm_alloc();
1110 pr_err("Failed to allocate mm struct\n");
1115 args->vma = vm_area_alloc(args->mm);
1117 pr_err("Failed to allocate vma\n");
1123 * Allocate page table entries. They will be modified in the tests.
1124 * Lets save the page table entries so that they can be released
1125 * when the tests are completed.
1127 args->pgdp = pgd_offset(args->mm, args->vaddr);
1128 args->p4dp = p4d_alloc(args->mm, args->pgdp, args->vaddr);
1130 pr_err("Failed to allocate p4d entries\n");
1134 args->start_p4dp = p4d_offset(args->pgdp, 0UL);
1135 WARN_ON(!args->start_p4dp);
1137 args->pudp = pud_alloc(args->mm, args->p4dp, args->vaddr);
1139 pr_err("Failed to allocate pud entries\n");
1143 args->start_pudp = pud_offset(args->p4dp, 0UL);
1144 WARN_ON(!args->start_pudp);
1146 args->pmdp = pmd_alloc(args->mm, args->pudp, args->vaddr);
1148 pr_err("Failed to allocate pmd entries\n");
1152 args->start_pmdp = pmd_offset(args->pudp, 0UL);
1153 WARN_ON(!args->start_pmdp);
1155 if (pte_alloc(args->mm, args->pmdp)) {
1156 pr_err("Failed to allocate pte entries\n");
1160 args->start_ptep = pmd_pgtable(READ_ONCE(*args->pmdp));
1161 WARN_ON(!args->start_ptep);
1164 * PFN for mapping at PTE level is determined from a standard kernel
1165 * text symbol. But pfns for higher page table levels are derived by
1166 * masking lower bits of this real pfn. These derived pfns might not
1167 * exist on the platform but that does not really matter as pfn_pxx()
1168 * helpers will still create appropriate entries for the test. This
1169 * helps avoid large memory block allocations to be used for mapping
1170 * at higher page table levels in some of the tests.
1172 phys = __pa_symbol(&start_kernel);
1173 args->fixed_pgd_pfn = __phys_to_pfn(phys & PGDIR_MASK);
1174 args->fixed_p4d_pfn = __phys_to_pfn(phys & P4D_MASK);
1175 args->fixed_pud_pfn = __phys_to_pfn(phys & PUD_MASK);
1176 args->fixed_pmd_pfn = __phys_to_pfn(phys & PMD_MASK);
1177 args->fixed_pte_pfn = __phys_to_pfn(phys & PAGE_MASK);
1178 WARN_ON(!pfn_valid(args->fixed_pte_pfn));
1181 * Allocate (huge) pages because some of the tests need to access
1182 * the data in the pages. The corresponding tests will be skipped
1183 * if we fail to allocate (huge) pages.
1185 if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
1186 IS_ENABLED(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD) &&
1187 has_transparent_hugepage()) {
1188 page = debug_vm_pgtable_alloc_huge_page(args,
1189 HPAGE_PUD_SHIFT - PAGE_SHIFT);
1191 args->pud_pfn = page_to_pfn(page);
1192 args->pmd_pfn = args->pud_pfn;
1193 args->pte_pfn = args->pud_pfn;
1198 if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
1199 has_transparent_hugepage()) {
1200 page = debug_vm_pgtable_alloc_huge_page(args, HPAGE_PMD_ORDER);
1202 args->pmd_pfn = page_to_pfn(page);
1203 args->pte_pfn = args->pmd_pfn;
1208 page = alloc_pages(GFP_KERNEL, 0);
1210 args->pte_pfn = page_to_pfn(page);
1219 static int __init debug_vm_pgtable(void)
1221 struct pgtable_debug_args args;
1222 spinlock_t *ptl = NULL;
1225 pr_info("Validating architecture page table helpers\n");
1226 ret = init_args(&args);
1231 * Iterate over each possible vm_flags to make sure that all
1232 * the basic page table transformation validations just hold
1233 * true irrespective of the starting protection value for a
1234 * given page table entry.
1236 * Protection based vm_flags combinatins are always linear
1237 * and increasing i.e starting from VM_NONE and going upto
1238 * (VM_SHARED | READ | WRITE | EXEC).
1240 #define VM_FLAGS_START (VM_NONE)
1241 #define VM_FLAGS_END (VM_SHARED | VM_EXEC | VM_WRITE | VM_READ)
1243 for (idx = VM_FLAGS_START; idx <= VM_FLAGS_END; idx++) {
1244 pte_basic_tests(&args, idx);
1245 pmd_basic_tests(&args, idx);
1246 pud_basic_tests(&args, idx);
1250 * Both P4D and PGD level tests are very basic which do not
1251 * involve creating page table entries from the protection
1252 * value and the given pfn. Hence just keep them out from
1253 * the above iteration for now to save some test execution
1256 p4d_basic_tests(&args);
1257 pgd_basic_tests(&args);
1259 pmd_leaf_tests(&args);
1260 pud_leaf_tests(&args);
1262 pte_special_tests(&args);
1263 pte_protnone_tests(&args);
1264 pmd_protnone_tests(&args);
1266 pte_devmap_tests(&args);
1267 pmd_devmap_tests(&args);
1268 pud_devmap_tests(&args);
1270 pte_soft_dirty_tests(&args);
1271 pmd_soft_dirty_tests(&args);
1272 pte_swap_soft_dirty_tests(&args);
1273 pmd_swap_soft_dirty_tests(&args);
1275 pte_swap_exclusive_tests(&args);
1277 pte_swap_tests(&args);
1278 pmd_swap_tests(&args);
1280 swap_migration_tests(&args);
1282 pmd_thp_tests(&args);
1283 pud_thp_tests(&args);
1285 hugetlb_basic_tests(&args);
1288 * Page table modifying tests. They need to hold
1289 * proper page table lock.
1292 args.ptep = pte_offset_map_lock(args.mm, args.pmdp, args.vaddr, &ptl);
1293 pte_clear_tests(&args);
1294 pte_advanced_tests(&args);
1295 pte_unmap_unlock(args.ptep, ptl);
1297 ptl = pmd_lock(args.mm, args.pmdp);
1298 pmd_clear_tests(&args);
1299 pmd_advanced_tests(&args);
1300 pmd_huge_tests(&args);
1301 pmd_populate_tests(&args);
1304 ptl = pud_lock(args.mm, args.pudp);
1305 pud_clear_tests(&args);
1306 pud_advanced_tests(&args);
1307 pud_huge_tests(&args);
1308 pud_populate_tests(&args);
1311 spin_lock(&(args.mm->page_table_lock));
1312 p4d_clear_tests(&args);
1313 pgd_clear_tests(&args);
1314 p4d_populate_tests(&args);
1315 pgd_populate_tests(&args);
1316 spin_unlock(&(args.mm->page_table_lock));
1318 destroy_args(&args);
1321 late_initcall(debug_vm_pgtable);