hsr: avoid to create proc file after unregister
[platform/kernel/linux-starfive.git] / mm / page_vma_mapped.c
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/mm.h>
3 #include <linux/rmap.h>
4 #include <linux/hugetlb.h>
5 #include <linux/swap.h>
6 #include <linux/swapops.h>
7
8 #include "internal.h"
9
10 static inline bool not_found(struct page_vma_mapped_walk *pvmw)
11 {
12         page_vma_mapped_walk_done(pvmw);
13         return false;
14 }
15
16 static bool map_pte(struct page_vma_mapped_walk *pvmw)
17 {
18         pvmw->pte = pte_offset_map(pvmw->pmd, pvmw->address);
19         if (!(pvmw->flags & PVMW_SYNC)) {
20                 if (pvmw->flags & PVMW_MIGRATION) {
21                         if (!is_swap_pte(*pvmw->pte))
22                                 return false;
23                 } else {
24                         /*
25                          * We get here when we are trying to unmap a private
26                          * device page from the process address space. Such
27                          * page is not CPU accessible and thus is mapped as
28                          * a special swap entry, nonetheless it still does
29                          * count as a valid regular mapping for the page (and
30                          * is accounted as such in page maps count).
31                          *
32                          * So handle this special case as if it was a normal
33                          * page mapping ie lock CPU page table and returns
34                          * true.
35                          *
36                          * For more details on device private memory see HMM
37                          * (include/linux/hmm.h or mm/hmm.c).
38                          */
39                         if (is_swap_pte(*pvmw->pte)) {
40                                 swp_entry_t entry;
41
42                                 /* Handle un-addressable ZONE_DEVICE memory */
43                                 entry = pte_to_swp_entry(*pvmw->pte);
44                                 if (!is_device_private_entry(entry))
45                                         return false;
46                         } else if (!pte_present(*pvmw->pte))
47                                 return false;
48                 }
49         }
50         pvmw->ptl = pte_lockptr(pvmw->vma->vm_mm, pvmw->pmd);
51         spin_lock(pvmw->ptl);
52         return true;
53 }
54
55 static inline bool pfn_is_match(struct page *page, unsigned long pfn)
56 {
57         unsigned long page_pfn = page_to_pfn(page);
58
59         /* normal page and hugetlbfs page */
60         if (!PageTransCompound(page) || PageHuge(page))
61                 return page_pfn == pfn;
62
63         /* THP can be referenced by any subpage */
64         return pfn >= page_pfn && pfn - page_pfn < hpage_nr_pages(page);
65 }
66
67 /**
68  * check_pte - check if @pvmw->page is mapped at the @pvmw->pte
69  *
70  * page_vma_mapped_walk() found a place where @pvmw->page is *potentially*
71  * mapped. check_pte() has to validate this.
72  *
73  * @pvmw->pte may point to empty PTE, swap PTE or PTE pointing to arbitrary
74  * page.
75  *
76  * If PVMW_MIGRATION flag is set, returns true if @pvmw->pte contains migration
77  * entry that points to @pvmw->page or any subpage in case of THP.
78  *
79  * If PVMW_MIGRATION flag is not set, returns true if @pvmw->pte points to
80  * @pvmw->page or any subpage in case of THP.
81  *
82  * Otherwise, return false.
83  *
84  */
85 static bool check_pte(struct page_vma_mapped_walk *pvmw)
86 {
87         unsigned long pfn;
88
89         if (pvmw->flags & PVMW_MIGRATION) {
90                 swp_entry_t entry;
91                 if (!is_swap_pte(*pvmw->pte))
92                         return false;
93                 entry = pte_to_swp_entry(*pvmw->pte);
94
95                 if (!is_migration_entry(entry))
96                         return false;
97
98                 pfn = migration_entry_to_pfn(entry);
99         } else if (is_swap_pte(*pvmw->pte)) {
100                 swp_entry_t entry;
101
102                 /* Handle un-addressable ZONE_DEVICE memory */
103                 entry = pte_to_swp_entry(*pvmw->pte);
104                 if (!is_device_private_entry(entry))
105                         return false;
106
107                 pfn = device_private_entry_to_pfn(entry);
108         } else {
109                 if (!pte_present(*pvmw->pte))
110                         return false;
111
112                 pfn = pte_pfn(*pvmw->pte);
113         }
114
115         return pfn_is_match(pvmw->page, pfn);
116 }
117
118 /**
119  * page_vma_mapped_walk - check if @pvmw->page is mapped in @pvmw->vma at
120  * @pvmw->address
121  * @pvmw: pointer to struct page_vma_mapped_walk. page, vma, address and flags
122  * must be set. pmd, pte and ptl must be NULL.
123  *
124  * Returns true if the page is mapped in the vma. @pvmw->pmd and @pvmw->pte point
125  * to relevant page table entries. @pvmw->ptl is locked. @pvmw->address is
126  * adjusted if needed (for PTE-mapped THPs).
127  *
128  * If @pvmw->pmd is set but @pvmw->pte is not, you have found PMD-mapped page
129  * (usually THP). For PTE-mapped THP, you should run page_vma_mapped_walk() in
130  * a loop to find all PTEs that map the THP.
131  *
132  * For HugeTLB pages, @pvmw->pte is set to the relevant page table entry
133  * regardless of which page table level the page is mapped at. @pvmw->pmd is
134  * NULL.
135  *
136  * Retruns false if there are no more page table entries for the page in
137  * the vma. @pvmw->ptl is unlocked and @pvmw->pte is unmapped.
138  *
139  * If you need to stop the walk before page_vma_mapped_walk() returned false,
140  * use page_vma_mapped_walk_done(). It will do the housekeeping.
141  */
142 bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
143 {
144         struct mm_struct *mm = pvmw->vma->vm_mm;
145         struct page *page = pvmw->page;
146         pgd_t *pgd;
147         p4d_t *p4d;
148         pud_t *pud;
149         pmd_t pmde;
150
151         /* The only possible pmd mapping has been handled on last iteration */
152         if (pvmw->pmd && !pvmw->pte)
153                 return not_found(pvmw);
154
155         if (pvmw->pte)
156                 goto next_pte;
157
158         if (unlikely(PageHuge(pvmw->page))) {
159                 /* when pud is not present, pte will be NULL */
160                 pvmw->pte = huge_pte_offset(mm, pvmw->address, page_size(page));
161                 if (!pvmw->pte)
162                         return false;
163
164                 pvmw->ptl = huge_pte_lockptr(page_hstate(page), mm, pvmw->pte);
165                 spin_lock(pvmw->ptl);
166                 if (!check_pte(pvmw))
167                         return not_found(pvmw);
168                 return true;
169         }
170 restart:
171         pgd = pgd_offset(mm, pvmw->address);
172         if (!pgd_present(*pgd))
173                 return false;
174         p4d = p4d_offset(pgd, pvmw->address);
175         if (!p4d_present(*p4d))
176                 return false;
177         pud = pud_offset(p4d, pvmw->address);
178         if (!pud_present(*pud))
179                 return false;
180         pvmw->pmd = pmd_offset(pud, pvmw->address);
181         /*
182          * Make sure the pmd value isn't cached in a register by the
183          * compiler and used as a stale value after we've observed a
184          * subsequent update.
185          */
186         pmde = READ_ONCE(*pvmw->pmd);
187         if (pmd_trans_huge(pmde) || is_pmd_migration_entry(pmde)) {
188                 pvmw->ptl = pmd_lock(mm, pvmw->pmd);
189                 if (likely(pmd_trans_huge(*pvmw->pmd))) {
190                         if (pvmw->flags & PVMW_MIGRATION)
191                                 return not_found(pvmw);
192                         if (pmd_page(*pvmw->pmd) != page)
193                                 return not_found(pvmw);
194                         return true;
195                 } else if (!pmd_present(*pvmw->pmd)) {
196                         if (thp_migration_supported()) {
197                                 if (!(pvmw->flags & PVMW_MIGRATION))
198                                         return not_found(pvmw);
199                                 if (is_migration_entry(pmd_to_swp_entry(*pvmw->pmd))) {
200                                         swp_entry_t entry = pmd_to_swp_entry(*pvmw->pmd);
201
202                                         if (migration_entry_to_page(entry) != page)
203                                                 return not_found(pvmw);
204                                         return true;
205                                 }
206                         }
207                         return not_found(pvmw);
208                 } else {
209                         /* THP pmd was split under us: handle on pte level */
210                         spin_unlock(pvmw->ptl);
211                         pvmw->ptl = NULL;
212                 }
213         } else if (!pmd_present(pmde)) {
214                 return false;
215         }
216         if (!map_pte(pvmw))
217                 goto next_pte;
218         while (1) {
219                 if (check_pte(pvmw))
220                         return true;
221 next_pte:
222                 /* Seek to next pte only makes sense for THP */
223                 if (!PageTransHuge(pvmw->page) || PageHuge(pvmw->page))
224                         return not_found(pvmw);
225                 do {
226                         pvmw->address += PAGE_SIZE;
227                         if (pvmw->address >= pvmw->vma->vm_end ||
228                             pvmw->address >=
229                                         __vma_address(pvmw->page, pvmw->vma) +
230                                         hpage_nr_pages(pvmw->page) * PAGE_SIZE)
231                                 return not_found(pvmw);
232                         /* Did we cross page table boundary? */
233                         if (pvmw->address % PMD_SIZE == 0) {
234                                 pte_unmap(pvmw->pte);
235                                 if (pvmw->ptl) {
236                                         spin_unlock(pvmw->ptl);
237                                         pvmw->ptl = NULL;
238                                 }
239                                 goto restart;
240                         } else {
241                                 pvmw->pte++;
242                         }
243                 } while (pte_none(*pvmw->pte));
244
245                 if (!pvmw->ptl) {
246                         pvmw->ptl = pte_lockptr(mm, pvmw->pmd);
247                         spin_lock(pvmw->ptl);
248                 }
249         }
250 }
251
252 /**
253  * page_mapped_in_vma - check whether a page is really mapped in a VMA
254  * @page: the page to test
255  * @vma: the VMA to test
256  *
257  * Returns 1 if the page is mapped into the page tables of the VMA, 0
258  * if the page is not mapped into the page tables of this VMA.  Only
259  * valid for normal file or anonymous VMAs.
260  */
261 int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma)
262 {
263         struct page_vma_mapped_walk pvmw = {
264                 .page = page,
265                 .vma = vma,
266                 .flags = PVMW_SYNC,
267         };
268         unsigned long start, end;
269
270         start = __vma_address(page, vma);
271         end = start + PAGE_SIZE * (hpage_nr_pages(page) - 1);
272
273         if (unlikely(end < vma->vm_start || start >= vma->vm_end))
274                 return 0;
275         pvmw.address = max(start, vma->vm_start);
276         if (!page_vma_mapped_walk(&pvmw))
277                 return 0;
278         page_vma_mapped_walk_done(&pvmw);
279         return 1;
280 }