1 // SPDX-License-Identifier: GPL-2.0
3 #include <linux/rmap.h>
4 #include <linux/hugetlb.h>
5 #include <linux/swap.h>
6 #include <linux/swapops.h>
10 static inline bool not_found(struct page_vma_mapped_walk *pvmw)
12 page_vma_mapped_walk_done(pvmw);
16 static bool map_pte(struct page_vma_mapped_walk *pvmw)
18 pvmw->pte = pte_offset_map(pvmw->pmd, pvmw->address);
19 if (!(pvmw->flags & PVMW_SYNC)) {
20 if (pvmw->flags & PVMW_MIGRATION) {
21 if (!is_swap_pte(*pvmw->pte))
25 * We get here when we are trying to unmap a private
26 * device page from the process address space. Such
27 * page is not CPU accessible and thus is mapped as
28 * a special swap entry, nonetheless it still does
29 * count as a valid regular mapping for the page (and
30 * is accounted as such in page maps count).
32 * So handle this special case as if it was a normal
33 * page mapping ie lock CPU page table and returns
36 * For more details on device private memory see HMM
37 * (include/linux/hmm.h or mm/hmm.c).
39 if (is_swap_pte(*pvmw->pte)) {
42 /* Handle un-addressable ZONE_DEVICE memory */
43 entry = pte_to_swp_entry(*pvmw->pte);
44 if (!is_device_private_entry(entry))
46 } else if (!pte_present(*pvmw->pte))
50 pvmw->ptl = pte_lockptr(pvmw->vma->vm_mm, pvmw->pmd);
55 static inline bool pfn_is_match(struct page *page, unsigned long pfn)
57 unsigned long page_pfn = page_to_pfn(page);
59 /* normal page and hugetlbfs page */
60 if (!PageTransCompound(page) || PageHuge(page))
61 return page_pfn == pfn;
63 /* THP can be referenced by any subpage */
64 return pfn >= page_pfn && pfn - page_pfn < thp_nr_pages(page);
68 * check_pte - check if @pvmw->page is mapped at the @pvmw->pte
69 * @pvmw: page_vma_mapped_walk struct, includes a pair pte and page for checking
71 * page_vma_mapped_walk() found a place where @pvmw->page is *potentially*
72 * mapped. check_pte() has to validate this.
74 * pvmw->pte may point to empty PTE, swap PTE or PTE pointing to
77 * If PVMW_MIGRATION flag is set, returns true if @pvmw->pte contains migration
78 * entry that points to @pvmw->page or any subpage in case of THP.
80 * If PVMW_MIGRATION flag is not set, returns true if pvmw->pte points to
81 * pvmw->page or any subpage in case of THP.
83 * Otherwise, return false.
86 static bool check_pte(struct page_vma_mapped_walk *pvmw)
90 if (pvmw->flags & PVMW_MIGRATION) {
92 if (!is_swap_pte(*pvmw->pte))
94 entry = pte_to_swp_entry(*pvmw->pte);
96 if (!is_migration_entry(entry))
99 pfn = migration_entry_to_pfn(entry);
100 } else if (is_swap_pte(*pvmw->pte)) {
103 /* Handle un-addressable ZONE_DEVICE memory */
104 entry = pte_to_swp_entry(*pvmw->pte);
105 if (!is_device_private_entry(entry))
108 pfn = device_private_entry_to_pfn(entry);
110 if (!pte_present(*pvmw->pte))
113 pfn = pte_pfn(*pvmw->pte);
116 return pfn_is_match(pvmw->page, pfn);
119 static void step_forward(struct page_vma_mapped_walk *pvmw, unsigned long size)
121 pvmw->address = (pvmw->address + size) & ~(size - 1);
123 pvmw->address = ULONG_MAX;
127 * page_vma_mapped_walk - check if @pvmw->page is mapped in @pvmw->vma at
129 * @pvmw: pointer to struct page_vma_mapped_walk. page, vma, address and flags
130 * must be set. pmd, pte and ptl must be NULL.
132 * Returns true if the page is mapped in the vma. @pvmw->pmd and @pvmw->pte point
133 * to relevant page table entries. @pvmw->ptl is locked. @pvmw->address is
134 * adjusted if needed (for PTE-mapped THPs).
136 * If @pvmw->pmd is set but @pvmw->pte is not, you have found PMD-mapped page
137 * (usually THP). For PTE-mapped THP, you should run page_vma_mapped_walk() in
138 * a loop to find all PTEs that map the THP.
140 * For HugeTLB pages, @pvmw->pte is set to the relevant page table entry
141 * regardless of which page table level the page is mapped at. @pvmw->pmd is
144 * Returns false if there are no more page table entries for the page in
145 * the vma. @pvmw->ptl is unlocked and @pvmw->pte is unmapped.
147 * If you need to stop the walk before page_vma_mapped_walk() returned false,
148 * use page_vma_mapped_walk_done(). It will do the housekeeping.
150 bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
152 struct mm_struct *mm = pvmw->vma->vm_mm;
153 struct page *page = pvmw->page;
160 /* The only possible pmd mapping has been handled on last iteration */
161 if (pvmw->pmd && !pvmw->pte)
162 return not_found(pvmw);
164 if (unlikely(PageHuge(page))) {
165 /* The only possible mapping was handled on last iteration */
167 return not_found(pvmw);
169 /* when pud is not present, pte will be NULL */
170 pvmw->pte = huge_pte_offset(mm, pvmw->address, page_size(page));
174 pvmw->ptl = huge_pte_lockptr(page_hstate(page), mm, pvmw->pte);
175 spin_lock(pvmw->ptl);
176 if (!check_pte(pvmw))
177 return not_found(pvmw);
182 * Seek to next pte only makes sense for THP.
183 * But more important than that optimization, is to filter out
184 * any PageKsm page: whose page->index misleads vma_address()
185 * and vma_address_end() to disaster.
187 end = PageTransCompound(page) ?
188 vma_address_end(page, pvmw->vma) :
189 pvmw->address + PAGE_SIZE;
194 pgd = pgd_offset(mm, pvmw->address);
195 if (!pgd_present(*pgd)) {
196 step_forward(pvmw, PGDIR_SIZE);
199 p4d = p4d_offset(pgd, pvmw->address);
200 if (!p4d_present(*p4d)) {
201 step_forward(pvmw, P4D_SIZE);
204 pud = pud_offset(p4d, pvmw->address);
205 if (!pud_present(*pud)) {
206 step_forward(pvmw, PUD_SIZE);
210 pvmw->pmd = pmd_offset(pud, pvmw->address);
212 * Make sure the pmd value isn't cached in a register by the
213 * compiler and used as a stale value after we've observed a
216 pmde = READ_ONCE(*pvmw->pmd);
218 if (pmd_trans_huge(pmde) || is_pmd_migration_entry(pmde)) {
219 pvmw->ptl = pmd_lock(mm, pvmw->pmd);
221 if (likely(pmd_trans_huge(pmde))) {
222 if (pvmw->flags & PVMW_MIGRATION)
223 return not_found(pvmw);
224 if (pmd_page(pmde) != page)
225 return not_found(pvmw);
228 if (!pmd_present(pmde)) {
231 if (!thp_migration_supported() ||
232 !(pvmw->flags & PVMW_MIGRATION))
233 return not_found(pvmw);
234 entry = pmd_to_swp_entry(pmde);
235 if (!is_migration_entry(entry) ||
236 migration_entry_to_page(entry) != page)
237 return not_found(pvmw);
240 /* THP pmd was split under us: handle on pte level */
241 spin_unlock(pvmw->ptl);
243 } else if (!pmd_present(pmde)) {
245 * If PVMW_SYNC, take and drop THP pmd lock so that we
246 * cannot return prematurely, while zap_huge_pmd() has
247 * cleared *pmd but not decremented compound_mapcount().
249 if ((pvmw->flags & PVMW_SYNC) &&
250 PageTransCompound(page)) {
251 spinlock_t *ptl = pmd_lock(mm, pvmw->pmd);
255 step_forward(pvmw, PMD_SIZE);
265 pvmw->address += PAGE_SIZE;
266 if (pvmw->address >= end)
267 return not_found(pvmw);
268 /* Did we cross page table boundary? */
269 if ((pvmw->address & (PMD_SIZE - PAGE_SIZE)) == 0) {
271 spin_unlock(pvmw->ptl);
274 pte_unmap(pvmw->pte);
279 if ((pvmw->flags & PVMW_SYNC) && !pvmw->ptl) {
280 pvmw->ptl = pte_lockptr(mm, pvmw->pmd);
281 spin_lock(pvmw->ptl);
283 } while (pte_none(*pvmw->pte));
286 pvmw->ptl = pte_lockptr(mm, pvmw->pmd);
287 spin_lock(pvmw->ptl);
290 } while (pvmw->address < end);
296 * page_mapped_in_vma - check whether a page is really mapped in a VMA
297 * @page: the page to test
298 * @vma: the VMA to test
300 * Returns 1 if the page is mapped into the page tables of the VMA, 0
301 * if the page is not mapped into the page tables of this VMA. Only
302 * valid for normal file or anonymous VMAs.
304 int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma)
306 struct page_vma_mapped_walk pvmw = {
312 pvmw.address = vma_address(page, vma);
313 if (pvmw.address == -EFAULT)
315 if (!page_vma_mapped_walk(&pvmw))
317 page_vma_mapped_walk_done(&pvmw);