2 #include <linux/highmem.h>
3 #include <linux/sched.h>
4 #include <linux/hugetlb.h>
6 static int walk_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
12 pte = pte_offset_map(pmd, addr);
14 err = walk->pte_entry(pte, addr, addr + PAGE_SIZE, walk);
27 static int walk_pmd_range(pud_t *pud, unsigned long addr, unsigned long end,
34 pmd = pmd_offset(pud, addr);
36 next = pmd_addr_end(addr, end);
37 if (pmd_none_or_clear_bad(pmd)) {
39 err = walk->pte_hole(addr, next, walk);
45 err = walk->pmd_entry(pmd, addr, next, walk);
46 if (!err && walk->pte_entry)
47 err = walk_pte_range(pmd, addr, next, walk);
50 } while (pmd++, addr = next, addr != end);
55 static int walk_pud_range(pgd_t *pgd, unsigned long addr, unsigned long end,
62 pud = pud_offset(pgd, addr);
64 next = pud_addr_end(addr, end);
65 if (pud_none_or_clear_bad(pud)) {
67 err = walk->pte_hole(addr, next, walk);
73 err = walk->pud_entry(pud, addr, next, walk);
74 if (!err && (walk->pmd_entry || walk->pte_entry))
75 err = walk_pmd_range(pud, addr, next, walk);
78 } while (pud++, addr = next, addr != end);
83 #ifdef CONFIG_HUGETLB_PAGE
84 static unsigned long hugetlb_entry_end(struct hstate *h, unsigned long addr,
87 unsigned long boundary = (addr & huge_page_mask(h)) + huge_page_size(h);
88 return boundary < end ? boundary : end;
91 static int walk_hugetlb_range(struct vm_area_struct *vma,
92 unsigned long addr, unsigned long end,
95 struct hstate *h = hstate_vma(vma);
97 unsigned long hmask = huge_page_mask(h);
102 next = hugetlb_entry_end(h, addr, end);
103 pte = huge_pte_offset(walk->mm, addr & hmask);
104 if (pte && walk->hugetlb_entry)
105 err = walk->hugetlb_entry(pte, hmask, addr, next, walk);
108 } while (addr = next, addr != end);
115 * walk_page_range - walk a memory map's page tables with a callback
116 * @mm: memory map to walk
117 * @addr: starting address
118 * @end: ending address
119 * @walk: set of callbacks to invoke for each level of the tree
121 * Recursively walk the page table for the memory area in a VMA,
122 * calling supplied callbacks. Callbacks are called in-order (first
123 * PGD, first PUD, first PMD, first PTE, second PTE... second PMD,
124 * etc.). If lower-level callbacks are omitted, walking depth is reduced.
126 * Each callback receives an entry pointer and the start and end of the
127 * associated range, and a copy of the original mm_walk for access to
128 * the ->private or ->mm fields.
130 * No locks are taken, but the bottom level iterator will map PTE
131 * directories from highmem if necessary.
133 * If any callback returns a non-zero value, the walk is aborted and
134 * the return value is propagated back to the caller. Otherwise 0 is returned.
136 int walk_page_range(unsigned long addr, unsigned long end,
137 struct mm_walk *walk)
142 struct vm_area_struct *vma;
150 pgd = pgd_offset(walk->mm, addr);
152 next = pgd_addr_end(addr, end);
155 * handle hugetlb vma individually because pagetable walk for
156 * the hugetlb page is dependent on the architecture and
157 * we can't handled it in the same manner as non-huge pages.
159 vma = find_vma(walk->mm, addr);
160 #ifdef CONFIG_HUGETLB_PAGE
161 if (vma && is_vm_hugetlb_page(vma)) {
162 if (vma->vm_end < next)
165 * Hugepage is very tightly coupled with vma, so
166 * walk through hugetlb entries within a given vma.
168 err = walk_hugetlb_range(vma, addr, next, walk);
171 pgd = pgd_offset(walk->mm, next);
175 if (pgd_none_or_clear_bad(pgd)) {
177 err = walk->pte_hole(addr, next, walk);
184 err = walk->pgd_entry(pgd, addr, next, walk);
186 (walk->pud_entry || walk->pmd_entry || walk->pte_entry))
187 err = walk_pud_range(pgd, addr, next, walk);
191 } while (addr = next, addr != end);