1 // SPDX-License-Identifier: GPL-2.0
3 #include <linux/rmap.h>
4 #include <linux/hugetlb.h>
5 #include <linux/swap.h>
6 #include <linux/swapops.h>
10 static inline bool not_found(struct page_vma_mapped_walk *pvmw)
12 page_vma_mapped_walk_done(pvmw);
16 static bool map_pte(struct page_vma_mapped_walk *pvmw, spinlock_t **ptlp)
20 if (pvmw->flags & PVMW_SYNC) {
21 /* Use the stricter lookup */
22 pvmw->pte = pte_offset_map_lock(pvmw->vma->vm_mm, pvmw->pmd,
23 pvmw->address, &pvmw->ptl);
29 * It is important to return the ptl corresponding to pte,
30 * in case *pvmw->pmd changes underneath us; so we need to
31 * return it even when choosing not to lock, in case caller
32 * proceeds to loop over next ptes, and finds a match later.
33 * Though, in most cases, page lock already protects this.
35 pvmw->pte = pte_offset_map_nolock(pvmw->vma->vm_mm, pvmw->pmd,
40 ptent = ptep_get(pvmw->pte);
42 if (pvmw->flags & PVMW_MIGRATION) {
43 if (!is_swap_pte(ptent))
45 } else if (is_swap_pte(ptent)) {
48 * Handle un-addressable ZONE_DEVICE memory.
50 * We get here when we are trying to unmap a private
51 * device page from the process address space. Such
52 * page is not CPU accessible and thus is mapped as
53 * a special swap entry, nonetheless it still does
54 * count as a valid regular mapping for the page
55 * (and is accounted as such in page maps count).
57 * So handle this special case as if it was a normal
58 * page mapping ie lock CPU page table and return true.
60 * For more details on device private memory see HMM
61 * (include/linux/hmm.h or mm/hmm.c).
63 entry = pte_to_swp_entry(ptent);
64 if (!is_device_private_entry(entry) &&
65 !is_device_exclusive_entry(entry))
67 } else if (!pte_present(ptent)) {
76 * check_pte - check if @pvmw->page is mapped at the @pvmw->pte
77 * @pvmw: page_vma_mapped_walk struct, includes a pair pte and page for checking
79 * page_vma_mapped_walk() found a place where @pvmw->page is *potentially*
80 * mapped. check_pte() has to validate this.
82 * pvmw->pte may point to empty PTE, swap PTE or PTE pointing to
85 * If PVMW_MIGRATION flag is set, returns true if @pvmw->pte contains migration
86 * entry that points to @pvmw->page or any subpage in case of THP.
88 * If PVMW_MIGRATION flag is not set, returns true if pvmw->pte points to
89 * pvmw->page or any subpage in case of THP.
91 * Otherwise, return false.
94 static bool check_pte(struct page_vma_mapped_walk *pvmw)
97 pte_t ptent = ptep_get(pvmw->pte);
99 if (pvmw->flags & PVMW_MIGRATION) {
101 if (!is_swap_pte(ptent))
103 entry = pte_to_swp_entry(ptent);
105 if (!is_migration_entry(entry) &&
106 !is_device_exclusive_entry(entry))
109 pfn = swp_offset_pfn(entry);
110 } else if (is_swap_pte(ptent)) {
113 /* Handle un-addressable ZONE_DEVICE memory */
114 entry = pte_to_swp_entry(ptent);
115 if (!is_device_private_entry(entry) &&
116 !is_device_exclusive_entry(entry))
119 pfn = swp_offset_pfn(entry);
121 if (!pte_present(ptent))
124 pfn = pte_pfn(ptent);
127 return (pfn - pvmw->pfn) < pvmw->nr_pages;
130 /* Returns true if the two ranges overlap. Careful to not overflow. */
131 static bool check_pmd(unsigned long pfn, struct page_vma_mapped_walk *pvmw)
133 if ((pfn + HPAGE_PMD_NR - 1) < pvmw->pfn)
135 if (pfn > pvmw->pfn + pvmw->nr_pages - 1)
140 static void step_forward(struct page_vma_mapped_walk *pvmw, unsigned long size)
142 pvmw->address = (pvmw->address + size) & ~(size - 1);
144 pvmw->address = ULONG_MAX;
148 * page_vma_mapped_walk - check if @pvmw->pfn is mapped in @pvmw->vma at
150 * @pvmw: pointer to struct page_vma_mapped_walk. page, vma, address and flags
151 * must be set. pmd, pte and ptl must be NULL.
153 * Returns true if the page is mapped in the vma. @pvmw->pmd and @pvmw->pte point
154 * to relevant page table entries. @pvmw->ptl is locked. @pvmw->address is
155 * adjusted if needed (for PTE-mapped THPs).
157 * If @pvmw->pmd is set but @pvmw->pte is not, you have found PMD-mapped page
158 * (usually THP). For PTE-mapped THP, you should run page_vma_mapped_walk() in
159 * a loop to find all PTEs that map the THP.
161 * For HugeTLB pages, @pvmw->pte is set to the relevant page table entry
162 * regardless of which page table level the page is mapped at. @pvmw->pmd is
165 * Returns false if there are no more page table entries for the page in
166 * the vma. @pvmw->ptl is unlocked and @pvmw->pte is unmapped.
168 * If you need to stop the walk before page_vma_mapped_walk() returned false,
169 * use page_vma_mapped_walk_done(). It will do the housekeeping.
171 bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
173 struct vm_area_struct *vma = pvmw->vma;
174 struct mm_struct *mm = vma->vm_mm;
182 /* The only possible pmd mapping has been handled on last iteration */
183 if (pvmw->pmd && !pvmw->pte)
184 return not_found(pvmw);
186 if (unlikely(is_vm_hugetlb_page(vma))) {
187 struct hstate *hstate = hstate_vma(vma);
188 unsigned long size = huge_page_size(hstate);
189 /* The only possible mapping was handled on last iteration */
191 return not_found(pvmw);
193 * All callers that get here will already hold the
194 * i_mmap_rwsem. Therefore, no additional locks need to be
195 * taken before calling hugetlb_walk().
197 pvmw->pte = hugetlb_walk(vma, pvmw->address, size);
201 pvmw->ptl = huge_pte_lock(hstate, mm, pvmw->pte);
202 if (!check_pte(pvmw))
203 return not_found(pvmw);
207 end = vma_address_end(pvmw);
212 pgd = pgd_offset(mm, pvmw->address);
213 if (!pgd_present(*pgd)) {
214 step_forward(pvmw, PGDIR_SIZE);
217 p4d = p4d_offset(pgd, pvmw->address);
218 if (!p4d_present(*p4d)) {
219 step_forward(pvmw, P4D_SIZE);
222 pud = pud_offset(p4d, pvmw->address);
223 if (!pud_present(*pud)) {
224 step_forward(pvmw, PUD_SIZE);
228 pvmw->pmd = pmd_offset(pud, pvmw->address);
230 * Make sure the pmd value isn't cached in a register by the
231 * compiler and used as a stale value after we've observed a
234 pmde = pmdp_get_lockless(pvmw->pmd);
236 if (pmd_trans_huge(pmde) || is_pmd_migration_entry(pmde) ||
237 (pmd_present(pmde) && pmd_devmap(pmde))) {
238 pvmw->ptl = pmd_lock(mm, pvmw->pmd);
240 if (!pmd_present(pmde)) {
243 if (!thp_migration_supported() ||
244 !(pvmw->flags & PVMW_MIGRATION))
245 return not_found(pvmw);
246 entry = pmd_to_swp_entry(pmde);
247 if (!is_migration_entry(entry) ||
248 !check_pmd(swp_offset_pfn(entry), pvmw))
249 return not_found(pvmw);
252 if (likely(pmd_trans_huge(pmde) || pmd_devmap(pmde))) {
253 if (pvmw->flags & PVMW_MIGRATION)
254 return not_found(pvmw);
255 if (!check_pmd(pmd_pfn(pmde), pvmw))
256 return not_found(pvmw);
259 /* THP pmd was split under us: handle on pte level */
260 spin_unlock(pvmw->ptl);
262 } else if (!pmd_present(pmde)) {
264 * If PVMW_SYNC, take and drop THP pmd lock so that we
265 * cannot return prematurely, while zap_huge_pmd() has
266 * cleared *pmd but not decremented compound_mapcount().
268 if ((pvmw->flags & PVMW_SYNC) &&
269 transhuge_vma_suitable(vma, pvmw->address) &&
270 (pvmw->nr_pages >= HPAGE_PMD_NR)) {
271 spinlock_t *ptl = pmd_lock(mm, pvmw->pmd);
275 step_forward(pvmw, PMD_SIZE);
278 if (!map_pte(pvmw, &ptl)) {
288 pvmw->address += PAGE_SIZE;
289 if (pvmw->address >= end)
290 return not_found(pvmw);
291 /* Did we cross page table boundary? */
292 if ((pvmw->address & (PMD_SIZE - PAGE_SIZE)) == 0) {
294 spin_unlock(pvmw->ptl);
297 pte_unmap(pvmw->pte);
302 } while (pte_none(ptep_get(pvmw->pte)));
306 spin_lock(pvmw->ptl);
309 } while (pvmw->address < end);
315 * page_mapped_in_vma - check whether a page is really mapped in a VMA
316 * @page: the page to test
317 * @vma: the VMA to test
319 * Returns 1 if the page is mapped into the page tables of the VMA, 0
320 * if the page is not mapped into the page tables of this VMA. Only
321 * valid for normal file or anonymous VMAs.
323 int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma)
325 struct page_vma_mapped_walk pvmw = {
326 .pfn = page_to_pfn(page),
332 pvmw.address = vma_address(page, vma);
333 if (pvmw.address == -EFAULT)
335 if (!page_vma_mapped_walk(&pvmw))
337 page_vma_mapped_walk_done(&pvmw);