Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dtor/input
[platform/kernel/linux-rpi.git] / mm / page_vma_mapped.c
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/mm.h>
3 #include <linux/rmap.h>
4 #include <linux/hugetlb.h>
5 #include <linux/swap.h>
6 #include <linux/swapops.h>
7
8 #include "internal.h"
9
10 static inline bool not_found(struct page_vma_mapped_walk *pvmw)
11 {
12         page_vma_mapped_walk_done(pvmw);
13         return false;
14 }
15
16 static bool map_pte(struct page_vma_mapped_walk *pvmw)
17 {
18         pvmw->pte = pte_offset_map(pvmw->pmd, pvmw->address);
19         if (!(pvmw->flags & PVMW_SYNC)) {
20                 if (pvmw->flags & PVMW_MIGRATION) {
21                         if (!is_swap_pte(*pvmw->pte))
22                                 return false;
23                 } else {
24                         /*
25                          * We get here when we are trying to unmap a private
26                          * device page from the process address space. Such
27                          * page is not CPU accessible and thus is mapped as
28                          * a special swap entry, nonetheless it still does
29                          * count as a valid regular mapping for the page (and
30                          * is accounted as such in page maps count).
31                          *
32                          * So handle this special case as if it was a normal
33                          * page mapping ie lock CPU page table and returns
34                          * true.
35                          *
36                          * For more details on device private memory see HMM
37                          * (include/linux/hmm.h or mm/hmm.c).
38                          */
39                         if (is_swap_pte(*pvmw->pte)) {
40                                 swp_entry_t entry;
41
42                                 /* Handle un-addressable ZONE_DEVICE memory */
43                                 entry = pte_to_swp_entry(*pvmw->pte);
44                                 if (!is_device_private_entry(entry))
45                                         return false;
46                         } else if (!pte_present(*pvmw->pte))
47                                 return false;
48                 }
49         }
50         pvmw->ptl = pte_lockptr(pvmw->vma->vm_mm, pvmw->pmd);
51         spin_lock(pvmw->ptl);
52         return true;
53 }
54
55 static inline bool pfn_in_hpage(struct page *hpage, unsigned long pfn)
56 {
57         unsigned long hpage_pfn = page_to_pfn(hpage);
58
59         /* THP can be referenced by any subpage */
60         return pfn >= hpage_pfn && pfn - hpage_pfn < hpage_nr_pages(hpage);
61 }
62
63 /**
64  * check_pte - check if @pvmw->page is mapped at the @pvmw->pte
65  *
66  * page_vma_mapped_walk() found a place where @pvmw->page is *potentially*
67  * mapped. check_pte() has to validate this.
68  *
69  * @pvmw->pte may point to empty PTE, swap PTE or PTE pointing to arbitrary
70  * page.
71  *
72  * If PVMW_MIGRATION flag is set, returns true if @pvmw->pte contains migration
73  * entry that points to @pvmw->page or any subpage in case of THP.
74  *
75  * If PVMW_MIGRATION flag is not set, returns true if @pvmw->pte points to
76  * @pvmw->page or any subpage in case of THP.
77  *
78  * Otherwise, return false.
79  *
80  */
81 static bool check_pte(struct page_vma_mapped_walk *pvmw)
82 {
83         unsigned long pfn;
84
85         if (pvmw->flags & PVMW_MIGRATION) {
86                 swp_entry_t entry;
87                 if (!is_swap_pte(*pvmw->pte))
88                         return false;
89                 entry = pte_to_swp_entry(*pvmw->pte);
90
91                 if (!is_migration_entry(entry))
92                         return false;
93
94                 pfn = migration_entry_to_pfn(entry);
95         } else if (is_swap_pte(*pvmw->pte)) {
96                 swp_entry_t entry;
97
98                 /* Handle un-addressable ZONE_DEVICE memory */
99                 entry = pte_to_swp_entry(*pvmw->pte);
100                 if (!is_device_private_entry(entry))
101                         return false;
102
103                 pfn = device_private_entry_to_pfn(entry);
104         } else {
105                 if (!pte_present(*pvmw->pte))
106                         return false;
107
108                 pfn = pte_pfn(*pvmw->pte);
109         }
110
111         return pfn_in_hpage(pvmw->page, pfn);
112 }
113
114 /**
115  * page_vma_mapped_walk - check if @pvmw->page is mapped in @pvmw->vma at
116  * @pvmw->address
117  * @pvmw: pointer to struct page_vma_mapped_walk. page, vma, address and flags
118  * must be set. pmd, pte and ptl must be NULL.
119  *
120  * Returns true if the page is mapped in the vma. @pvmw->pmd and @pvmw->pte point
121  * to relevant page table entries. @pvmw->ptl is locked. @pvmw->address is
122  * adjusted if needed (for PTE-mapped THPs).
123  *
124  * If @pvmw->pmd is set but @pvmw->pte is not, you have found PMD-mapped page
125  * (usually THP). For PTE-mapped THP, you should run page_vma_mapped_walk() in
126  * a loop to find all PTEs that map the THP.
127  *
128  * For HugeTLB pages, @pvmw->pte is set to the relevant page table entry
129  * regardless of which page table level the page is mapped at. @pvmw->pmd is
130  * NULL.
131  *
132  * Retruns false if there are no more page table entries for the page in
133  * the vma. @pvmw->ptl is unlocked and @pvmw->pte is unmapped.
134  *
135  * If you need to stop the walk before page_vma_mapped_walk() returned false,
136  * use page_vma_mapped_walk_done(). It will do the housekeeping.
137  */
138 bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
139 {
140         struct mm_struct *mm = pvmw->vma->vm_mm;
141         struct page *page = pvmw->page;
142         pgd_t *pgd;
143         p4d_t *p4d;
144         pud_t *pud;
145         pmd_t pmde;
146
147         /* The only possible pmd mapping has been handled on last iteration */
148         if (pvmw->pmd && !pvmw->pte)
149                 return not_found(pvmw);
150
151         if (pvmw->pte)
152                 goto next_pte;
153
154         if (unlikely(PageHuge(pvmw->page))) {
155                 /* when pud is not present, pte will be NULL */
156                 pvmw->pte = huge_pte_offset(mm, pvmw->address, page_size(page));
157                 if (!pvmw->pte)
158                         return false;
159
160                 pvmw->ptl = huge_pte_lockptr(page_hstate(page), mm, pvmw->pte);
161                 spin_lock(pvmw->ptl);
162                 if (!check_pte(pvmw))
163                         return not_found(pvmw);
164                 return true;
165         }
166 restart:
167         pgd = pgd_offset(mm, pvmw->address);
168         if (!pgd_present(*pgd))
169                 return false;
170         p4d = p4d_offset(pgd, pvmw->address);
171         if (!p4d_present(*p4d))
172                 return false;
173         pud = pud_offset(p4d, pvmw->address);
174         if (!pud_present(*pud))
175                 return false;
176         pvmw->pmd = pmd_offset(pud, pvmw->address);
177         /*
178          * Make sure the pmd value isn't cached in a register by the
179          * compiler and used as a stale value after we've observed a
180          * subsequent update.
181          */
182         pmde = READ_ONCE(*pvmw->pmd);
183         if (pmd_trans_huge(pmde) || is_pmd_migration_entry(pmde)) {
184                 pvmw->ptl = pmd_lock(mm, pvmw->pmd);
185                 if (likely(pmd_trans_huge(*pvmw->pmd))) {
186                         if (pvmw->flags & PVMW_MIGRATION)
187                                 return not_found(pvmw);
188                         if (pmd_page(*pvmw->pmd) != page)
189                                 return not_found(pvmw);
190                         return true;
191                 } else if (!pmd_present(*pvmw->pmd)) {
192                         if (thp_migration_supported()) {
193                                 if (!(pvmw->flags & PVMW_MIGRATION))
194                                         return not_found(pvmw);
195                                 if (is_migration_entry(pmd_to_swp_entry(*pvmw->pmd))) {
196                                         swp_entry_t entry = pmd_to_swp_entry(*pvmw->pmd);
197
198                                         if (migration_entry_to_page(entry) != page)
199                                                 return not_found(pvmw);
200                                         return true;
201                                 }
202                         }
203                         return not_found(pvmw);
204                 } else {
205                         /* THP pmd was split under us: handle on pte level */
206                         spin_unlock(pvmw->ptl);
207                         pvmw->ptl = NULL;
208                 }
209         } else if (!pmd_present(pmde)) {
210                 return false;
211         }
212         if (!map_pte(pvmw))
213                 goto next_pte;
214         while (1) {
215                 if (check_pte(pvmw))
216                         return true;
217 next_pte:
218                 /* Seek to next pte only makes sense for THP */
219                 if (!PageTransHuge(pvmw->page) || PageHuge(pvmw->page))
220                         return not_found(pvmw);
221                 do {
222                         pvmw->address += PAGE_SIZE;
223                         if (pvmw->address >= pvmw->vma->vm_end ||
224                             pvmw->address >=
225                                         __vma_address(pvmw->page, pvmw->vma) +
226                                         hpage_nr_pages(pvmw->page) * PAGE_SIZE)
227                                 return not_found(pvmw);
228                         /* Did we cross page table boundary? */
229                         if (pvmw->address % PMD_SIZE == 0) {
230                                 pte_unmap(pvmw->pte);
231                                 if (pvmw->ptl) {
232                                         spin_unlock(pvmw->ptl);
233                                         pvmw->ptl = NULL;
234                                 }
235                                 goto restart;
236                         } else {
237                                 pvmw->pte++;
238                         }
239                 } while (pte_none(*pvmw->pte));
240
241                 if (!pvmw->ptl) {
242                         pvmw->ptl = pte_lockptr(mm, pvmw->pmd);
243                         spin_lock(pvmw->ptl);
244                 }
245         }
246 }
247
248 /**
249  * page_mapped_in_vma - check whether a page is really mapped in a VMA
250  * @page: the page to test
251  * @vma: the VMA to test
252  *
253  * Returns 1 if the page is mapped into the page tables of the VMA, 0
254  * if the page is not mapped into the page tables of this VMA.  Only
255  * valid for normal file or anonymous VMAs.
256  */
257 int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma)
258 {
259         struct page_vma_mapped_walk pvmw = {
260                 .page = page,
261                 .vma = vma,
262                 .flags = PVMW_SYNC,
263         };
264         unsigned long start, end;
265
266         start = __vma_address(page, vma);
267         end = start + PAGE_SIZE * (hpage_nr_pages(page) - 1);
268
269         if (unlikely(end < vma->vm_start || start >= vma->vm_end))
270                 return 0;
271         pvmw.address = max(start, vma->vm_start);
272         if (!page_vma_mapped_walk(&pvmw))
273                 return 0;
274         page_vma_mapped_walk_done(&pvmw);
275         return 1;
276 }