thp, khugepaged: skip retracting page table if a 64KB hugepage mapping is already...
[platform/kernel/linux-rpi.git] / mm / rmap.c
1 /*
2  * mm/rmap.c - physical to virtual reverse mappings
3  *
4  * Copyright 2001, Rik van Riel <riel@conectiva.com.br>
5  * Released under the General Public License (GPL).
6  *
7  * Simple, low overhead reverse mapping scheme.
8  * Please try to keep this thing as modular as possible.
9  *
10  * Provides methods for unmapping each kind of mapped page:
11  * the anon methods track anonymous pages, and
12  * the file methods track pages belonging to an inode.
13  *
14  * Original design by Rik van Riel <riel@conectiva.com.br> 2001
15  * File methods by Dave McCracken <dmccr@us.ibm.com> 2003, 2004
16  * Anonymous methods by Andrea Arcangeli <andrea@suse.de> 2004
17  * Contributions by Hugh Dickins 2003, 2004
18  */
19
20 /*
21  * Lock ordering in mm:
22  *
23  * inode->i_mutex       (while writing or truncating, not reading or faulting)
24  *   mm->mmap_lock
25  *     page->flags PG_locked (lock_page)   * (see huegtlbfs below)
26  *       hugetlbfs_i_mmap_rwsem_key (in huge_pmd_share)
27  *         mapping->i_mmap_rwsem
28  *           hugetlb_fault_mutex (hugetlbfs specific page fault mutex)
29  *           anon_vma->rwsem
30  *             mm->page_table_lock or pte_lock
31  *               pgdat->lru_lock (in mark_page_accessed, isolate_lru_page)
32  *               swap_lock (in swap_duplicate, swap_info_get)
33  *                 mmlist_lock (in mmput, drain_mmlist and others)
34  *                 mapping->private_lock (in __set_page_dirty_buffers)
35  *                   mem_cgroup_{begin,end}_page_stat (memcg->move_lock)
36  *                     i_pages lock (widely used)
37  *                 inode->i_lock (in set_page_dirty's __mark_inode_dirty)
38  *                 bdi.wb->list_lock (in set_page_dirty's __mark_inode_dirty)
39  *                   sb_lock (within inode_lock in fs/fs-writeback.c)
40  *                   i_pages lock (widely used, in set_page_dirty,
41  *                             in arch-dependent flush_dcache_mmap_lock,
42  *                             within bdi.wb->list_lock in __sync_single_inode)
43  *
44  * anon_vma->rwsem,mapping->i_mutex      (memory_failure, collect_procs_anon)
45  *   ->tasklist_lock
46  *     pte map lock
47  *
48  * * hugetlbfs PageHuge() pages take locks in this order:
49  *         mapping->i_mmap_rwsem
50  *           hugetlb_fault_mutex (hugetlbfs specific page fault mutex)
51  *             page->flags PG_locked (lock_page)
52  */
53
54 #include <linux/mm.h>
55 #include <linux/sched/mm.h>
56 #include <linux/sched/task.h>
57 #include <linux/pagemap.h>
58 #include <linux/swap.h>
59 #include <linux/swapops.h>
60 #include <linux/slab.h>
61 #include <linux/init.h>
62 #include <linux/ksm.h>
63 #include <linux/rmap.h>
64 #include <linux/rcupdate.h>
65 #include <linux/export.h>
66 #include <linux/memcontrol.h>
67 #include <linux/mmu_notifier.h>
68 #include <linux/migrate.h>
69 #include <linux/hugetlb.h>
70 #include <linux/huge_mm.h>
71 #include <linux/backing-dev.h>
72 #include <linux/page_idle.h>
73 #include <linux/memremap.h>
74 #include <linux/userfaultfd_k.h>
75
76 #include <asm/tlbflush.h>
77
78 #include <trace/events/tlb.h>
79
80 #include "internal.h"
81
82 static struct kmem_cache *anon_vma_cachep;
83 static struct kmem_cache *anon_vma_chain_cachep;
84
85 static inline struct anon_vma *anon_vma_alloc(void)
86 {
87         struct anon_vma *anon_vma;
88
89         anon_vma = kmem_cache_alloc(anon_vma_cachep, GFP_KERNEL);
90         if (anon_vma) {
91                 atomic_set(&anon_vma->refcount, 1);
92                 anon_vma->degree = 1;   /* Reference for first vma */
93                 anon_vma->parent = anon_vma;
94                 /*
95                  * Initialise the anon_vma root to point to itself. If called
96                  * from fork, the root will be reset to the parents anon_vma.
97                  */
98                 anon_vma->root = anon_vma;
99         }
100
101         return anon_vma;
102 }
103
104 static inline void anon_vma_free(struct anon_vma *anon_vma)
105 {
106         VM_BUG_ON(atomic_read(&anon_vma->refcount));
107
108         /*
109          * Synchronize against page_lock_anon_vma_read() such that
110          * we can safely hold the lock without the anon_vma getting
111          * freed.
112          *
113          * Relies on the full mb implied by the atomic_dec_and_test() from
114          * put_anon_vma() against the acquire barrier implied by
115          * down_read_trylock() from page_lock_anon_vma_read(). This orders:
116          *
117          * page_lock_anon_vma_read()    VS      put_anon_vma()
118          *   down_read_trylock()                  atomic_dec_and_test()
119          *   LOCK                                 MB
120          *   atomic_read()                        rwsem_is_locked()
121          *
122          * LOCK should suffice since the actual taking of the lock must
123          * happen _before_ what follows.
124          */
125         might_sleep();
126         if (rwsem_is_locked(&anon_vma->root->rwsem)) {
127                 anon_vma_lock_write(anon_vma);
128                 anon_vma_unlock_write(anon_vma);
129         }
130
131         kmem_cache_free(anon_vma_cachep, anon_vma);
132 }
133
134 static inline struct anon_vma_chain *anon_vma_chain_alloc(gfp_t gfp)
135 {
136         return kmem_cache_alloc(anon_vma_chain_cachep, gfp);
137 }
138
139 static void anon_vma_chain_free(struct anon_vma_chain *anon_vma_chain)
140 {
141         kmem_cache_free(anon_vma_chain_cachep, anon_vma_chain);
142 }
143
144 static void anon_vma_chain_link(struct vm_area_struct *vma,
145                                 struct anon_vma_chain *avc,
146                                 struct anon_vma *anon_vma)
147 {
148         avc->vma = vma;
149         avc->anon_vma = anon_vma;
150         list_add(&avc->same_vma, &vma->anon_vma_chain);
151         anon_vma_interval_tree_insert(avc, &anon_vma->rb_root);
152 }
153
154 /**
155  * __anon_vma_prepare - attach an anon_vma to a memory region
156  * @vma: the memory region in question
157  *
158  * This makes sure the memory mapping described by 'vma' has
159  * an 'anon_vma' attached to it, so that we can associate the
160  * anonymous pages mapped into it with that anon_vma.
161  *
162  * The common case will be that we already have one, which
163  * is handled inline by anon_vma_prepare(). But if
164  * not we either need to find an adjacent mapping that we
165  * can re-use the anon_vma from (very common when the only
166  * reason for splitting a vma has been mprotect()), or we
167  * allocate a new one.
168  *
169  * Anon-vma allocations are very subtle, because we may have
170  * optimistically looked up an anon_vma in page_lock_anon_vma_read()
171  * and that may actually touch the spinlock even in the newly
172  * allocated vma (it depends on RCU to make sure that the
173  * anon_vma isn't actually destroyed).
174  *
175  * As a result, we need to do proper anon_vma locking even
176  * for the new allocation. At the same time, we do not want
177  * to do any locking for the common case of already having
178  * an anon_vma.
179  *
180  * This must be called with the mmap_lock held for reading.
181  */
182 int __anon_vma_prepare(struct vm_area_struct *vma)
183 {
184         struct mm_struct *mm = vma->vm_mm;
185         struct anon_vma *anon_vma, *allocated;
186         struct anon_vma_chain *avc;
187
188         might_sleep();
189
190         avc = anon_vma_chain_alloc(GFP_KERNEL);
191         if (!avc)
192                 goto out_enomem;
193
194         anon_vma = find_mergeable_anon_vma(vma);
195         allocated = NULL;
196         if (!anon_vma) {
197                 anon_vma = anon_vma_alloc();
198                 if (unlikely(!anon_vma))
199                         goto out_enomem_free_avc;
200                 allocated = anon_vma;
201         }
202
203         anon_vma_lock_write(anon_vma);
204         /* page_table_lock to protect against threads */
205         spin_lock(&mm->page_table_lock);
206         if (likely(!vma->anon_vma)) {
207                 vma->anon_vma = anon_vma;
208                 anon_vma_chain_link(vma, avc, anon_vma);
209                 /* vma reference or self-parent link for new root */
210                 anon_vma->degree++;
211                 allocated = NULL;
212                 avc = NULL;
213         }
214         spin_unlock(&mm->page_table_lock);
215         anon_vma_unlock_write(anon_vma);
216
217         if (unlikely(allocated))
218                 put_anon_vma(allocated);
219         if (unlikely(avc))
220                 anon_vma_chain_free(avc);
221
222         return 0;
223
224  out_enomem_free_avc:
225         anon_vma_chain_free(avc);
226  out_enomem:
227         return -ENOMEM;
228 }
229
230 /*
231  * This is a useful helper function for locking the anon_vma root as
232  * we traverse the vma->anon_vma_chain, looping over anon_vma's that
233  * have the same vma.
234  *
235  * Such anon_vma's should have the same root, so you'd expect to see
236  * just a single mutex_lock for the whole traversal.
237  */
238 static inline struct anon_vma *lock_anon_vma_root(struct anon_vma *root, struct anon_vma *anon_vma)
239 {
240         struct anon_vma *new_root = anon_vma->root;
241         if (new_root != root) {
242                 if (WARN_ON_ONCE(root))
243                         up_write(&root->rwsem);
244                 root = new_root;
245                 down_write(&root->rwsem);
246         }
247         return root;
248 }
249
250 static inline void unlock_anon_vma_root(struct anon_vma *root)
251 {
252         if (root)
253                 up_write(&root->rwsem);
254 }
255
256 /*
257  * Attach the anon_vmas from src to dst.
258  * Returns 0 on success, -ENOMEM on failure.
259  *
260  * anon_vma_clone() is called by __vma_split(), __split_vma(), copy_vma() and
261  * anon_vma_fork(). The first three want an exact copy of src, while the last
262  * one, anon_vma_fork(), may try to reuse an existing anon_vma to prevent
263  * endless growth of anon_vma. Since dst->anon_vma is set to NULL before call,
264  * we can identify this case by checking (!dst->anon_vma && src->anon_vma).
265  *
266  * If (!dst->anon_vma && src->anon_vma) is true, this function tries to find
267  * and reuse existing anon_vma which has no vmas and only one child anon_vma.
268  * This prevents degradation of anon_vma hierarchy to endless linear chain in
269  * case of constantly forking task. On the other hand, an anon_vma with more
270  * than one child isn't reused even if there was no alive vma, thus rmap
271  * walker has a good chance of avoiding scanning the whole hierarchy when it
272  * searches where page is mapped.
273  */
274 int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
275 {
276         struct anon_vma_chain *avc, *pavc;
277         struct anon_vma *root = NULL;
278
279         list_for_each_entry_reverse(pavc, &src->anon_vma_chain, same_vma) {
280                 struct anon_vma *anon_vma;
281
282                 avc = anon_vma_chain_alloc(GFP_NOWAIT | __GFP_NOWARN);
283                 if (unlikely(!avc)) {
284                         unlock_anon_vma_root(root);
285                         root = NULL;
286                         avc = anon_vma_chain_alloc(GFP_KERNEL);
287                         if (!avc)
288                                 goto enomem_failure;
289                 }
290                 anon_vma = pavc->anon_vma;
291                 root = lock_anon_vma_root(root, anon_vma);
292                 anon_vma_chain_link(dst, avc, anon_vma);
293
294                 /*
295                  * Reuse existing anon_vma if its degree lower than two,
296                  * that means it has no vma and only one anon_vma child.
297                  *
298                  * Do not chose parent anon_vma, otherwise first child
299                  * will always reuse it. Root anon_vma is never reused:
300                  * it has self-parent reference and at least one child.
301                  */
302                 if (!dst->anon_vma && src->anon_vma &&
303                     anon_vma != src->anon_vma && anon_vma->degree < 2)
304                         dst->anon_vma = anon_vma;
305         }
306         if (dst->anon_vma)
307                 dst->anon_vma->degree++;
308         unlock_anon_vma_root(root);
309         return 0;
310
311  enomem_failure:
312         /*
313          * dst->anon_vma is dropped here otherwise its degree can be incorrectly
314          * decremented in unlink_anon_vmas().
315          * We can safely do this because callers of anon_vma_clone() don't care
316          * about dst->anon_vma if anon_vma_clone() failed.
317          */
318         dst->anon_vma = NULL;
319         unlink_anon_vmas(dst);
320         return -ENOMEM;
321 }
322
323 /*
324  * Attach vma to its own anon_vma, as well as to the anon_vmas that
325  * the corresponding VMA in the parent process is attached to.
326  * Returns 0 on success, non-zero on failure.
327  */
328 int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
329 {
330         struct anon_vma_chain *avc;
331         struct anon_vma *anon_vma;
332         int error;
333
334         /* Don't bother if the parent process has no anon_vma here. */
335         if (!pvma->anon_vma)
336                 return 0;
337
338         /* Drop inherited anon_vma, we'll reuse existing or allocate new. */
339         vma->anon_vma = NULL;
340
341         /*
342          * First, attach the new VMA to the parent VMA's anon_vmas,
343          * so rmap can find non-COWed pages in child processes.
344          */
345         error = anon_vma_clone(vma, pvma);
346         if (error)
347                 return error;
348
349         /* An existing anon_vma has been reused, all done then. */
350         if (vma->anon_vma)
351                 return 0;
352
353         /* Then add our own anon_vma. */
354         anon_vma = anon_vma_alloc();
355         if (!anon_vma)
356                 goto out_error;
357         avc = anon_vma_chain_alloc(GFP_KERNEL);
358         if (!avc)
359                 goto out_error_free_anon_vma;
360
361         /*
362          * The root anon_vma's spinlock is the lock actually used when we
363          * lock any of the anon_vmas in this anon_vma tree.
364          */
365         anon_vma->root = pvma->anon_vma->root;
366         anon_vma->parent = pvma->anon_vma;
367         /*
368          * With refcounts, an anon_vma can stay around longer than the
369          * process it belongs to. The root anon_vma needs to be pinned until
370          * this anon_vma is freed, because the lock lives in the root.
371          */
372         get_anon_vma(anon_vma->root);
373         /* Mark this anon_vma as the one where our new (COWed) pages go. */
374         vma->anon_vma = anon_vma;
375         anon_vma_lock_write(anon_vma);
376         anon_vma_chain_link(vma, avc, anon_vma);
377         anon_vma->parent->degree++;
378         anon_vma_unlock_write(anon_vma);
379
380         return 0;
381
382  out_error_free_anon_vma:
383         put_anon_vma(anon_vma);
384  out_error:
385         unlink_anon_vmas(vma);
386         return -ENOMEM;
387 }
388
389 void unlink_anon_vmas(struct vm_area_struct *vma)
390 {
391         struct anon_vma_chain *avc, *next;
392         struct anon_vma *root = NULL;
393
394         /*
395          * Unlink each anon_vma chained to the VMA.  This list is ordered
396          * from newest to oldest, ensuring the root anon_vma gets freed last.
397          */
398         list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) {
399                 struct anon_vma *anon_vma = avc->anon_vma;
400
401                 root = lock_anon_vma_root(root, anon_vma);
402                 anon_vma_interval_tree_remove(avc, &anon_vma->rb_root);
403
404                 /*
405                  * Leave empty anon_vmas on the list - we'll need
406                  * to free them outside the lock.
407                  */
408                 if (RB_EMPTY_ROOT(&anon_vma->rb_root.rb_root)) {
409                         anon_vma->parent->degree--;
410                         continue;
411                 }
412
413                 list_del(&avc->same_vma);
414                 anon_vma_chain_free(avc);
415         }
416         if (vma->anon_vma)
417                 vma->anon_vma->degree--;
418         unlock_anon_vma_root(root);
419
420         /*
421          * Iterate the list once more, it now only contains empty and unlinked
422          * anon_vmas, destroy them. Could not do before due to __put_anon_vma()
423          * needing to write-acquire the anon_vma->root->rwsem.
424          */
425         list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) {
426                 struct anon_vma *anon_vma = avc->anon_vma;
427
428                 VM_WARN_ON(anon_vma->degree);
429                 put_anon_vma(anon_vma);
430
431                 list_del(&avc->same_vma);
432                 anon_vma_chain_free(avc);
433         }
434 }
435
436 static void anon_vma_ctor(void *data)
437 {
438         struct anon_vma *anon_vma = data;
439
440         init_rwsem(&anon_vma->rwsem);
441         atomic_set(&anon_vma->refcount, 0);
442         anon_vma->rb_root = RB_ROOT_CACHED;
443 }
444
445 void __init anon_vma_init(void)
446 {
447         anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma),
448                         0, SLAB_TYPESAFE_BY_RCU|SLAB_PANIC|SLAB_ACCOUNT,
449                         anon_vma_ctor);
450         anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain,
451                         SLAB_PANIC|SLAB_ACCOUNT);
452 }
453
454 /*
455  * Getting a lock on a stable anon_vma from a page off the LRU is tricky!
456  *
457  * Since there is no serialization what so ever against page_remove_rmap()
458  * the best this function can do is return a locked anon_vma that might
459  * have been relevant to this page.
460  *
461  * The page might have been remapped to a different anon_vma or the anon_vma
462  * returned may already be freed (and even reused).
463  *
464  * In case it was remapped to a different anon_vma, the new anon_vma will be a
465  * child of the old anon_vma, and the anon_vma lifetime rules will therefore
466  * ensure that any anon_vma obtained from the page will still be valid for as
467  * long as we observe page_mapped() [ hence all those page_mapped() tests ].
468  *
469  * All users of this function must be very careful when walking the anon_vma
470  * chain and verify that the page in question is indeed mapped in it
471  * [ something equivalent to page_mapped_in_vma() ].
472  *
473  * Since anon_vma's slab is SLAB_TYPESAFE_BY_RCU and we know from
474  * page_remove_rmap() that the anon_vma pointer from page->mapping is valid
475  * if there is a mapcount, we can dereference the anon_vma after observing
476  * those.
477  */
478 struct anon_vma *page_get_anon_vma(struct page *page)
479 {
480         struct anon_vma *anon_vma = NULL;
481         unsigned long anon_mapping;
482
483         rcu_read_lock();
484         anon_mapping = (unsigned long)READ_ONCE(page->mapping);
485         if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON)
486                 goto out;
487         if (!page_mapped(page))
488                 goto out;
489
490         anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON);
491         if (!atomic_inc_not_zero(&anon_vma->refcount)) {
492                 anon_vma = NULL;
493                 goto out;
494         }
495
496         /*
497          * If this page is still mapped, then its anon_vma cannot have been
498          * freed.  But if it has been unmapped, we have no security against the
499          * anon_vma structure being freed and reused (for another anon_vma:
500          * SLAB_TYPESAFE_BY_RCU guarantees that - so the atomic_inc_not_zero()
501          * above cannot corrupt).
502          */
503         if (!page_mapped(page)) {
504                 rcu_read_unlock();
505                 put_anon_vma(anon_vma);
506                 return NULL;
507         }
508 out:
509         rcu_read_unlock();
510
511         return anon_vma;
512 }
513
514 /*
515  * Similar to page_get_anon_vma() except it locks the anon_vma.
516  *
517  * Its a little more complex as it tries to keep the fast path to a single
518  * atomic op -- the trylock. If we fail the trylock, we fall back to getting a
519  * reference like with page_get_anon_vma() and then block on the mutex.
520  */
521 struct anon_vma *page_lock_anon_vma_read(struct page *page)
522 {
523         struct anon_vma *anon_vma = NULL;
524         struct anon_vma *root_anon_vma;
525         unsigned long anon_mapping;
526
527         rcu_read_lock();
528         anon_mapping = (unsigned long)READ_ONCE(page->mapping);
529         if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON)
530                 goto out;
531         if (!page_mapped(page))
532                 goto out;
533
534         anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON);
535         root_anon_vma = READ_ONCE(anon_vma->root);
536         if (down_read_trylock(&root_anon_vma->rwsem)) {
537                 /*
538                  * If the page is still mapped, then this anon_vma is still
539                  * its anon_vma, and holding the mutex ensures that it will
540                  * not go away, see anon_vma_free().
541                  */
542                 if (!page_mapped(page)) {
543                         up_read(&root_anon_vma->rwsem);
544                         anon_vma = NULL;
545                 }
546                 goto out;
547         }
548
549         /* trylock failed, we got to sleep */
550         if (!atomic_inc_not_zero(&anon_vma->refcount)) {
551                 anon_vma = NULL;
552                 goto out;
553         }
554
555         if (!page_mapped(page)) {
556                 rcu_read_unlock();
557                 put_anon_vma(anon_vma);
558                 return NULL;
559         }
560
561         /* we pinned the anon_vma, its safe to sleep */
562         rcu_read_unlock();
563         anon_vma_lock_read(anon_vma);
564
565         if (atomic_dec_and_test(&anon_vma->refcount)) {
566                 /*
567                  * Oops, we held the last refcount, release the lock
568                  * and bail -- can't simply use put_anon_vma() because
569                  * we'll deadlock on the anon_vma_lock_write() recursion.
570                  */
571                 anon_vma_unlock_read(anon_vma);
572                 __put_anon_vma(anon_vma);
573                 anon_vma = NULL;
574         }
575
576         return anon_vma;
577
578 out:
579         rcu_read_unlock();
580         return anon_vma;
581 }
582
583 void page_unlock_anon_vma_read(struct anon_vma *anon_vma)
584 {
585         anon_vma_unlock_read(anon_vma);
586 }
587
588 #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
589 /*
590  * Flush TLB entries for recently unmapped pages from remote CPUs. It is
591  * important if a PTE was dirty when it was unmapped that it's flushed
592  * before any IO is initiated on the page to prevent lost writes. Similarly,
593  * it must be flushed before freeing to prevent data leakage.
594  */
595 void try_to_unmap_flush(void)
596 {
597         struct tlbflush_unmap_batch *tlb_ubc = &current->tlb_ubc;
598
599         if (!tlb_ubc->flush_required)
600                 return;
601
602         arch_tlbbatch_flush(&tlb_ubc->arch);
603         tlb_ubc->flush_required = false;
604         tlb_ubc->writable = false;
605 }
606
607 /* Flush iff there are potentially writable TLB entries that can race with IO */
608 void try_to_unmap_flush_dirty(void)
609 {
610         struct tlbflush_unmap_batch *tlb_ubc = &current->tlb_ubc;
611
612         if (tlb_ubc->writable)
613                 try_to_unmap_flush();
614 }
615
616 static void set_tlb_ubc_flush_pending(struct mm_struct *mm, bool writable)
617 {
618         struct tlbflush_unmap_batch *tlb_ubc = &current->tlb_ubc;
619
620         arch_tlbbatch_add_mm(&tlb_ubc->arch, mm);
621         tlb_ubc->flush_required = true;
622
623         /*
624          * Ensure compiler does not re-order the setting of tlb_flush_batched
625          * before the PTE is cleared.
626          */
627         barrier();
628         mm->tlb_flush_batched = true;
629
630         /*
631          * If the PTE was dirty then it's best to assume it's writable. The
632          * caller must use try_to_unmap_flush_dirty() or try_to_unmap_flush()
633          * before the page is queued for IO.
634          */
635         if (writable)
636                 tlb_ubc->writable = true;
637 }
638
639 /*
640  * Returns true if the TLB flush should be deferred to the end of a batch of
641  * unmap operations to reduce IPIs.
642  */
643 static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags)
644 {
645         bool should_defer = false;
646
647         if (!(flags & TTU_BATCH_FLUSH))
648                 return false;
649
650         /* If remote CPUs need to be flushed then defer batch the flush */
651         if (cpumask_any_but(mm_cpumask(mm), get_cpu()) < nr_cpu_ids)
652                 should_defer = true;
653         put_cpu();
654
655         return should_defer;
656 }
657
658 /*
659  * Reclaim unmaps pages under the PTL but do not flush the TLB prior to
660  * releasing the PTL if TLB flushes are batched. It's possible for a parallel
661  * operation such as mprotect or munmap to race between reclaim unmapping
662  * the page and flushing the page. If this race occurs, it potentially allows
663  * access to data via a stale TLB entry. Tracking all mm's that have TLB
664  * batching in flight would be expensive during reclaim so instead track
665  * whether TLB batching occurred in the past and if so then do a flush here
666  * if required. This will cost one additional flush per reclaim cycle paid
667  * by the first operation at risk such as mprotect and mumap.
668  *
669  * This must be called under the PTL so that an access to tlb_flush_batched
670  * that is potentially a "reclaim vs mprotect/munmap/etc" race will synchronise
671  * via the PTL.
672  */
673 void flush_tlb_batched_pending(struct mm_struct *mm)
674 {
675         if (data_race(mm->tlb_flush_batched)) {
676                 flush_tlb_mm(mm);
677
678                 /*
679                  * Do not allow the compiler to re-order the clearing of
680                  * tlb_flush_batched before the tlb is flushed.
681                  */
682                 barrier();
683                 mm->tlb_flush_batched = false;
684         }
685 }
686 #else
687 static void set_tlb_ubc_flush_pending(struct mm_struct *mm, bool writable)
688 {
689 }
690
691 static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags)
692 {
693         return false;
694 }
695 #endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */
696
697 /*
698  * At what user virtual address is page expected in vma?
699  * Caller should check the page is actually part of the vma.
700  */
701 unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma)
702 {
703         unsigned long address;
704         if (PageAnon(page)) {
705                 struct anon_vma *page__anon_vma = page_anon_vma(page);
706                 /*
707                  * Note: swapoff's unuse_vma() is more efficient with this
708                  * check, and needs it to match anon_vma when KSM is active.
709                  */
710                 if (!vma->anon_vma || !page__anon_vma ||
711                     vma->anon_vma->root != page__anon_vma->root)
712                         return -EFAULT;
713         } else if (page->mapping) {
714                 if (!vma->vm_file || vma->vm_file->f_mapping != page->mapping)
715                         return -EFAULT;
716         } else
717                 return -EFAULT;
718         address = __vma_address(page, vma);
719         if (unlikely(address < vma->vm_start || address >= vma->vm_end))
720                 return -EFAULT;
721         return address;
722 }
723
724 pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address)
725 {
726         pgd_t *pgd;
727         p4d_t *p4d;
728         pud_t *pud;
729         pmd_t *pmd = NULL;
730         pmd_t pmde;
731
732         pgd = pgd_offset(mm, address);
733         if (!pgd_present(*pgd))
734                 goto out;
735
736         p4d = p4d_offset(pgd, address);
737         if (!p4d_present(*p4d))
738                 goto out;
739
740         pud = pud_offset(p4d, address);
741         if (!pud_present(*pud))
742                 goto out;
743
744         pmd = pmd_offset(pud, address);
745         /*
746          * Some THP functions use the sequence pmdp_huge_clear_flush(), set_pmd_at()
747          * without holding anon_vma lock for write.  So when looking for a
748          * genuine pmde (in which to find pte), test present and !THP together.
749          */
750         pmde = *pmd;
751         barrier();
752         if (!pmd_present(pmde) || pmd_trans_huge(pmde))
753                 pmd = NULL;
754 out:
755         return pmd;
756 }
757
758 struct page_referenced_arg {
759         int mapcount;
760         int referenced;
761         unsigned long vm_flags;
762         struct mem_cgroup *memcg;
763 };
764 /*
765  * arg: page_referenced_arg will be passed
766  */
767 static bool page_referenced_one(struct page *page, struct vm_area_struct *vma,
768                         unsigned long address, void *arg)
769 {
770         struct page_referenced_arg *pra = arg;
771         struct page_vma_mapped_walk pvmw = {
772                 .page = page,
773                 .vma = vma,
774                 .address = address,
775         };
776         int referenced = 0;
777
778         while (page_vma_mapped_walk(&pvmw)) {
779                 address = pvmw.address;
780
781                 if (vma->vm_flags & VM_LOCKED) {
782                         page_vma_mapped_walk_done(&pvmw);
783                         pra->vm_flags |= VM_LOCKED;
784                         return false; /* To break the loop */
785                 }
786
787                 if (pvmw.pte) {
788                         if (ptep_clear_flush_young_notify(vma, address,
789                                                 pvmw.pte)) {
790                                 /*
791                                  * Don't treat a reference through
792                                  * a sequentially read mapping as such.
793                                  * If the page has been used in another mapping,
794                                  * we will catch it; if this other mapping is
795                                  * already gone, the unmap path will have set
796                                  * PG_referenced or activated the page.
797                                  */
798                                 if (likely(!(vma->vm_flags & VM_SEQ_READ)))
799                                         referenced++;
800                         }
801                 } else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
802                         if (pmdp_clear_flush_young_notify(vma, address,
803                                                 pvmw.pmd))
804                                 referenced++;
805                 } else {
806                         /* unexpected pmd-mapped page? */
807                         WARN_ON_ONCE(1);
808                 }
809
810                 pra->mapcount--;
811         }
812
813         if (referenced)
814                 clear_page_idle(page);
815         if (test_and_clear_page_young(page))
816                 referenced++;
817
818         if (referenced) {
819                 pra->referenced++;
820                 pra->vm_flags |= vma->vm_flags;
821         }
822
823         if (!pra->mapcount)
824                 return false; /* To break the loop */
825
826         return true;
827 }
828
829 static bool invalid_page_referenced_vma(struct vm_area_struct *vma, void *arg)
830 {
831         struct page_referenced_arg *pra = arg;
832         struct mem_cgroup *memcg = pra->memcg;
833
834         if (!mm_match_cgroup(vma->vm_mm, memcg))
835                 return true;
836
837         return false;
838 }
839
840 /**
841  * page_referenced - test if the page was referenced
842  * @page: the page to test
843  * @is_locked: caller holds lock on the page
844  * @memcg: target memory cgroup
845  * @vm_flags: collect encountered vma->vm_flags who actually referenced the page
846  *
847  * Quick test_and_clear_referenced for all mappings to a page,
848  * returns the number of ptes which referenced the page.
849  */
850 int page_referenced(struct page *page,
851                     int is_locked,
852                     struct mem_cgroup *memcg,
853                     unsigned long *vm_flags)
854 {
855         int we_locked = 0;
856         struct page_referenced_arg pra = {
857                 .mapcount = total_mapcount(page),
858                 .memcg = memcg,
859         };
860         struct rmap_walk_control rwc = {
861                 .rmap_one = page_referenced_one,
862                 .arg = (void *)&pra,
863                 .anon_lock = page_lock_anon_vma_read,
864         };
865
866         *vm_flags = 0;
867         if (!pra.mapcount)
868                 return 0;
869
870         if (!page_rmapping(page))
871                 return 0;
872
873         if (!is_locked && (!PageAnon(page) || PageKsm(page))) {
874                 we_locked = trylock_page(page);
875                 if (!we_locked)
876                         return 1;
877         }
878
879         /*
880          * If we are reclaiming on behalf of a cgroup, skip
881          * counting on behalf of references from different
882          * cgroups
883          */
884         if (memcg) {
885                 rwc.invalid_vma = invalid_page_referenced_vma;
886         }
887
888         rmap_walk(page, &rwc);
889         *vm_flags = pra.vm_flags;
890
891         if (we_locked)
892                 unlock_page(page);
893
894         return pra.referenced;
895 }
896
897 static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma,
898                             unsigned long address, void *arg)
899 {
900         struct page_vma_mapped_walk pvmw = {
901                 .page = page,
902                 .vma = vma,
903                 .address = address,
904                 .flags = PVMW_SYNC,
905         };
906         struct mmu_notifier_range range;
907         int *cleaned = arg;
908
909         /*
910          * We have to assume the worse case ie pmd for invalidation. Note that
911          * the page can not be free from this function.
912          */
913         mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_PAGE,
914                                 0, vma, vma->vm_mm, address,
915                                 min(vma->vm_end, address + page_size(page)));
916         mmu_notifier_invalidate_range_start(&range);
917
918         while (page_vma_mapped_walk(&pvmw)) {
919                 int ret = 0;
920
921                 address = pvmw.address;
922                 if (pvmw.pte) {
923                         pte_t entry;
924                         pte_t *pte = pvmw.pte;
925
926                         if (!pte_dirty(*pte) && !pte_write(*pte))
927                                 continue;
928
929                         flush_cache_page(vma, address, pte_pfn(*pte));
930                         entry = ptep_clear_flush(vma, address, pte);
931                         entry = pte_wrprotect(entry);
932                         entry = pte_mkclean(entry);
933                         set_pte_at(vma->vm_mm, address, pte, entry);
934                         ret = 1;
935                 } else {
936 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
937                         pmd_t *pmd = pvmw.pmd;
938                         pmd_t entry;
939
940                         if (!pmd_dirty(*pmd) && !pmd_write(*pmd))
941                                 continue;
942
943                         flush_cache_page(vma, address, page_to_pfn(page));
944                         entry = pmdp_invalidate(vma, address, pmd);
945                         entry = pmd_wrprotect(entry);
946                         entry = pmd_mkclean(entry);
947                         set_pmd_at(vma->vm_mm, address, pmd, entry);
948                         ret = 1;
949 #else
950                         /* unexpected pmd-mapped page? */
951                         WARN_ON_ONCE(1);
952 #endif
953                 }
954
955                 /*
956                  * No need to call mmu_notifier_invalidate_range() as we are
957                  * downgrading page table protection not changing it to point
958                  * to a new page.
959                  *
960                  * See Documentation/vm/mmu_notifier.rst
961                  */
962                 if (ret)
963                         (*cleaned)++;
964         }
965
966         mmu_notifier_invalidate_range_end(&range);
967
968         return true;
969 }
970
971 static bool invalid_mkclean_vma(struct vm_area_struct *vma, void *arg)
972 {
973         if (vma->vm_flags & VM_SHARED)
974                 return false;
975
976         return true;
977 }
978
979 int page_mkclean(struct page *page)
980 {
981         int cleaned = 0;
982         struct address_space *mapping;
983         struct rmap_walk_control rwc = {
984                 .arg = (void *)&cleaned,
985                 .rmap_one = page_mkclean_one,
986                 .invalid_vma = invalid_mkclean_vma,
987         };
988
989         BUG_ON(!PageLocked(page));
990
991         if (!page_mapped(page))
992                 return 0;
993
994         mapping = page_mapping(page);
995         if (!mapping)
996                 return 0;
997
998         rmap_walk(page, &rwc);
999
1000         return cleaned;
1001 }
1002 EXPORT_SYMBOL_GPL(page_mkclean);
1003
1004 /**
1005  * page_move_anon_rmap - move a page to our anon_vma
1006  * @page:       the page to move to our anon_vma
1007  * @vma:        the vma the page belongs to
1008  *
1009  * When a page belongs exclusively to one process after a COW event,
1010  * that page can be moved into the anon_vma that belongs to just that
1011  * process, so the rmap code will not search the parent or sibling
1012  * processes.
1013  */
1014 void page_move_anon_rmap(struct page *page, struct vm_area_struct *vma)
1015 {
1016         struct anon_vma *anon_vma = vma->anon_vma;
1017
1018         page = compound_head(page);
1019
1020         VM_BUG_ON_PAGE(!PageLocked(page), page);
1021         VM_BUG_ON_VMA(!anon_vma, vma);
1022
1023         anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
1024         /*
1025          * Ensure that anon_vma and the PAGE_MAPPING_ANON bit are written
1026          * simultaneously, so a concurrent reader (eg page_referenced()'s
1027          * PageAnon()) will not see one without the other.
1028          */
1029         WRITE_ONCE(page->mapping, (struct address_space *) anon_vma);
1030 }
1031
1032 /**
1033  * __page_set_anon_rmap - set up new anonymous rmap
1034  * @page:       Page or Hugepage to add to rmap
1035  * @vma:        VM area to add page to.
1036  * @address:    User virtual address of the mapping     
1037  * @exclusive:  the page is exclusively owned by the current process
1038  */
1039 static void __page_set_anon_rmap(struct page *page,
1040         struct vm_area_struct *vma, unsigned long address, int exclusive)
1041 {
1042         struct anon_vma *anon_vma = vma->anon_vma;
1043
1044         BUG_ON(!anon_vma);
1045
1046         if (PageAnon(page))
1047                 return;
1048
1049         /*
1050          * If the page isn't exclusively mapped into this vma,
1051          * we must use the _oldest_ possible anon_vma for the
1052          * page mapping!
1053          */
1054         if (!exclusive)
1055                 anon_vma = anon_vma->root;
1056
1057         anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
1058         page->mapping = (struct address_space *) anon_vma;
1059         page->index = linear_page_index(vma, address);
1060 }
1061
1062 /**
1063  * __page_check_anon_rmap - sanity check anonymous rmap addition
1064  * @page:       the page to add the mapping to
1065  * @vma:        the vm area in which the mapping is added
1066  * @address:    the user virtual address mapped
1067  */
1068 static void __page_check_anon_rmap(struct page *page,
1069         struct vm_area_struct *vma, unsigned long address)
1070 {
1071         /*
1072          * The page's anon-rmap details (mapping and index) are guaranteed to
1073          * be set up correctly at this point.
1074          *
1075          * We have exclusion against page_add_anon_rmap because the caller
1076          * always holds the page locked, except if called from page_dup_rmap,
1077          * in which case the page is already known to be setup.
1078          *
1079          * We have exclusion against page_add_new_anon_rmap because those pages
1080          * are initially only visible via the pagetables, and the pte is locked
1081          * over the call to page_add_new_anon_rmap.
1082          */
1083         VM_BUG_ON_PAGE(page_anon_vma(page)->root != vma->anon_vma->root, page);
1084         VM_BUG_ON_PAGE(page_to_pgoff(page) != linear_page_index(vma, address),
1085                        page);
1086 }
1087
1088 /**
1089  * page_add_anon_rmap - add pte mapping to an anonymous page
1090  * @page:       the page to add the mapping to
1091  * @vma:        the vm area in which the mapping is added
1092  * @address:    the user virtual address mapped
1093  * @compound:   charge the page as compound or small page
1094  *
1095  * The caller needs to hold the pte lock, and the page must be locked in
1096  * the anon_vma case: to serialize mapping,index checking after setting,
1097  * and to ensure that PageAnon is not being upgraded racily to PageKsm
1098  * (but PageKsm is never downgraded to PageAnon).
1099  */
1100 void page_add_anon_rmap(struct page *page,
1101         struct vm_area_struct *vma, unsigned long address, bool compound)
1102 {
1103         do_page_add_anon_rmap(page, vma, address, compound ? RMAP_COMPOUND : 0);
1104 }
1105
1106 /*
1107  * Special version of the above for do_swap_page, which often runs
1108  * into pages that are exclusively owned by the current process.
1109  * Everybody else should continue to use page_add_anon_rmap above.
1110  */
1111 void do_page_add_anon_rmap(struct page *page,
1112         struct vm_area_struct *vma, unsigned long address, int flags)
1113 {
1114         bool compound = flags & RMAP_COMPOUND;
1115         bool first;
1116
1117         if (unlikely(PageKsm(page)))
1118                 lock_page_memcg(page);
1119         else
1120                 VM_BUG_ON_PAGE(!PageLocked(page), page);
1121
1122         if (compound) {
1123                 atomic_t *mapcount;
1124                 VM_BUG_ON_PAGE(!PageLocked(page), page);
1125                 VM_BUG_ON_PAGE(!PageTransHuge(page), page);
1126                 mapcount = compound_mapcount_ptr(page);
1127                 first = atomic_inc_and_test(mapcount);
1128         } else {
1129                 first = atomic_inc_and_test(&page->_mapcount);
1130         }
1131
1132         if (first) {
1133                 int nr = compound ? thp_nr_pages(page) : 1;
1134                 /*
1135                  * We use the irq-unsafe __{inc|mod}_zone_page_stat because
1136                  * these counters are not modified in interrupt context, and
1137                  * pte lock(a spinlock) is held, which implies preemption
1138                  * disabled.
1139                  */
1140                 if (compound)
1141 #ifdef CONFIG_FINEGRAINED_THP
1142                 {
1143                         if (nr == HPAGE_PMD_NR)
1144                                 __inc_lruvec_page_state(page, NR_ANON_THPS);
1145                         else
1146                                 __inc_lruvec_page_state(page, NR_ANON_64KB_THPS);
1147                 }
1148 #else /* CONFIG_FINEGRAINED_THP */
1149                         __inc_lruvec_page_state(page, NR_ANON_THPS);
1150 #endif /* CONFIG_FINEGRAINED_THP */
1151                 __mod_lruvec_page_state(page, NR_ANON_MAPPED, nr);
1152         }
1153
1154         if (unlikely(PageKsm(page))) {
1155                 unlock_page_memcg(page);
1156                 return;
1157         }
1158
1159         /* address might be in next vma when migration races vma_adjust */
1160         if (first)
1161                 __page_set_anon_rmap(page, vma, address,
1162                                 flags & RMAP_EXCLUSIVE);
1163         else
1164                 __page_check_anon_rmap(page, vma, address);
1165 }
1166
1167 /**
1168  * page_add_new_anon_rmap - add pte mapping to a new anonymous page
1169  * @page:       the page to add the mapping to
1170  * @vma:        the vm area in which the mapping is added
1171  * @address:    the user virtual address mapped
1172  * @compound:   charge the page as compound or small page
1173  *
1174  * Same as page_add_anon_rmap but must only be called on *new* pages.
1175  * This means the inc-and-test can be bypassed.
1176  * Page does not have to be locked.
1177  */
1178 void page_add_new_anon_rmap(struct page *page,
1179         struct vm_area_struct *vma, unsigned long address, bool compound)
1180 {
1181         int nr = compound ? thp_nr_pages(page) : 1;
1182
1183         VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma);
1184         __SetPageSwapBacked(page);
1185         if (compound) {
1186                 VM_BUG_ON_PAGE(!PageTransHuge(page), page);
1187                 /* increment count (starts at -1) */
1188                 atomic_set(compound_mapcount_ptr(page), 0);
1189                 if (hpage_pincount_available(page))
1190                         atomic_set(compound_pincount_ptr(page), 0);
1191 #ifdef CONFIG_FINEGRAINED_THP
1192                 if (nr == HPAGE_PMD_NR)
1193                         __inc_lruvec_page_state(page, NR_ANON_THPS);
1194                 else
1195                         __inc_lruvec_page_state(page, NR_ANON_64KB_THPS);
1196 #else /* CONFIG_FINEGRAINED_THP */
1197                 __inc_lruvec_page_state(page, NR_ANON_THPS);
1198 #endif /* CONFIG_FINEGRAINED_THP */
1199         } else {
1200                 /* Anon THP always mapped first with PMD */
1201                 VM_BUG_ON_PAGE(PageTransCompound(page), page);
1202                 /* increment count (starts at -1) */
1203                 atomic_set(&page->_mapcount, 0);
1204         }
1205         __mod_lruvec_page_state(page, NR_ANON_MAPPED, nr);
1206         __page_set_anon_rmap(page, vma, address, 1);
1207 }
1208
1209 /**
1210  * page_add_file_rmap - add pte mapping to a file page
1211  * @page: the page to add the mapping to
1212  * @compound: charge the page as compound or small page
1213  *
1214  * The caller needs to hold the pte lock.
1215  */
1216 void page_add_file_rmap(struct page *page, bool compound)
1217 {
1218         int i, nr = 1;
1219
1220         VM_BUG_ON_PAGE(compound && !PageTransHuge(page), page);
1221         lock_page_memcg(page);
1222         if (compound && PageTransHuge(page)) {
1223                 for (i = 0, nr = 0; i < thp_nr_pages(page); i++) {
1224                         if (atomic_inc_and_test(&page[i]._mapcount))
1225                                 nr++;
1226                 }
1227                 if (!atomic_inc_and_test(compound_mapcount_ptr(page)))
1228                         goto out;
1229                 if (PageSwapBacked(page))
1230 #ifdef CONFIG_FINEGRAINED_THP
1231                         __inc_node_page_state(page, nr == HPAGE_PMD_NR ?
1232                                         NR_SHMEM_PMDMAPPED : NR_SHMEM_PTEMAPPED);
1233 #else
1234                         __inc_node_page_state(page, NR_SHMEM_PMDMAPPED);
1235 #endif
1236                 else
1237 #ifdef CONFIG_FINEGRAINED_THP
1238                         __inc_node_page_state(page, nr == HPAGE_PMD_NR ?
1239                                         NR_FILE_PMDMAPPED : NR_FILE_PTEMAPPED);
1240 #else
1241                         __inc_node_page_state(page, NR_FILE_PMDMAPPED);
1242 #endif
1243         } else {
1244                 if (PageTransCompound(page) && page_mapping(page)) {
1245                         VM_WARN_ON_ONCE(!PageLocked(page));
1246
1247                         SetPageDoubleMap(compound_head(page));
1248                         if (PageMlocked(page))
1249                                 clear_page_mlock(compound_head(page));
1250                 }
1251                 if (!atomic_inc_and_test(&page->_mapcount))
1252                         goto out;
1253         }
1254         __mod_lruvec_page_state(page, NR_FILE_MAPPED, nr);
1255 out:
1256         unlock_page_memcg(page);
1257 }
1258
1259 static void page_remove_file_rmap(struct page *page, bool compound)
1260 {
1261         int i, nr = 1;
1262
1263         VM_BUG_ON_PAGE(compound && !PageHead(page), page);
1264
1265         /* Hugepages are not counted in NR_FILE_MAPPED for now. */
1266         if (unlikely(PageHuge(page))) {
1267                 /* hugetlb pages are always mapped with pmds */
1268                 atomic_dec(compound_mapcount_ptr(page));
1269                 return;
1270         }
1271
1272         /* page still mapped by someone else? */
1273         if (compound && PageTransHuge(page)) {
1274                 for (i = 0, nr = 0; i < thp_nr_pages(page); i++) {
1275                         if (atomic_add_negative(-1, &page[i]._mapcount))
1276                                 nr++;
1277                 }
1278                 if (!atomic_add_negative(-1, compound_mapcount_ptr(page)))
1279                         return;
1280                 if (PageSwapBacked(page))
1281 #ifdef CONFIG_FINEGRAINED_THP
1282                         __dec_node_page_state(page, nr == HPAGE_PMD_NR ?
1283                                         NR_SHMEM_PMDMAPPED : NR_SHMEM_PTEMAPPED);
1284 #else
1285                         __dec_node_page_state(page, NR_SHMEM_PMDMAPPED);
1286 #endif
1287                 else
1288 #ifdef CONFIG_FINEGRAINED_THP
1289                         __dec_node_page_state(page, nr == HPAGE_PMD_NR ?
1290                                         NR_FILE_PMDMAPPED : NR_FILE_PTEMAPPED);
1291 #else
1292                         __dec_node_page_state(page, NR_FILE_PMDMAPPED);
1293 #endif
1294         } else {
1295                 if (!atomic_add_negative(-1, &page->_mapcount))
1296                         return;
1297         }
1298
1299         /*
1300          * We use the irq-unsafe __{inc|mod}_lruvec_page_state because
1301          * these counters are not modified in interrupt context, and
1302          * pte lock(a spinlock) is held, which implies preemption disabled.
1303          */
1304         __mod_lruvec_page_state(page, NR_FILE_MAPPED, -nr);
1305
1306         if (unlikely(PageMlocked(page)))
1307                 clear_page_mlock(page);
1308 }
1309
1310 static void page_remove_anon_compound_rmap(struct page *page)
1311 {
1312         int i, nr;
1313
1314         if (!atomic_add_negative(-1, compound_mapcount_ptr(page)))
1315                 return;
1316
1317         /* Hugepages are not counted in NR_ANON_PAGES for now. */
1318         if (unlikely(PageHuge(page)))
1319                 return;
1320
1321         if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
1322                 return;
1323
1324 #ifdef CONFIG_FINEGRAINED_THP
1325         if (thp_nr_pages(page) == HPAGE_PMD_NR)
1326                 __dec_lruvec_page_state(page, NR_ANON_THPS);
1327         else
1328                 __dec_lruvec_page_state(page, NR_ANON_64KB_THPS);
1329 #else /* CONFIG_FINEGRAINED_THP */
1330         __dec_lruvec_page_state(page, NR_ANON_THPS);
1331 #endif /* CONFIG_FINEGRAINED_THP */
1332
1333         if (TestClearPageDoubleMap(page)) {
1334                 /*
1335                  * Subpages can be mapped with PTEs too. Check how many of
1336                  * them are still mapped.
1337                  */
1338                 for (i = 0, nr = 0; i < thp_nr_pages(page); i++) {
1339                         if (atomic_add_negative(-1, &page[i]._mapcount))
1340                                 nr++;
1341                 }
1342
1343                 /*
1344                  * Queue the page for deferred split if at least one small
1345                  * page of the compound page is unmapped, but at least one
1346                  * small page is still mapped.
1347                  */
1348                 if (nr && nr < thp_nr_pages(page))
1349                         deferred_split_huge_page(page);
1350         } else {
1351                 nr = thp_nr_pages(page);
1352         }
1353
1354         if (unlikely(PageMlocked(page)))
1355                 clear_page_mlock(page);
1356
1357         if (nr)
1358                 __mod_lruvec_page_state(page, NR_ANON_MAPPED, -nr);
1359 }
1360
1361 /**
1362  * page_remove_rmap - take down pte mapping from a page
1363  * @page:       page to remove mapping from
1364  * @compound:   uncharge the page as compound or small page
1365  *
1366  * The caller needs to hold the pte lock.
1367  */
1368 void page_remove_rmap(struct page *page, bool compound)
1369 {
1370         lock_page_memcg(page);
1371
1372         if (!PageAnon(page)) {
1373                 page_remove_file_rmap(page, compound);
1374                 goto out;
1375         }
1376
1377         if (compound) {
1378                 page_remove_anon_compound_rmap(page);
1379                 goto out;
1380         }
1381
1382         /* page still mapped by someone else? */
1383         if (!atomic_add_negative(-1, &page->_mapcount))
1384                 goto out;
1385
1386         /*
1387          * We use the irq-unsafe __{inc|mod}_zone_page_stat because
1388          * these counters are not modified in interrupt context, and
1389          * pte lock(a spinlock) is held, which implies preemption disabled.
1390          */
1391         __dec_lruvec_page_state(page, NR_ANON_MAPPED);
1392
1393         if (unlikely(PageMlocked(page))) {
1394                 if (unlikely(PageTransCompound(page)))
1395                         clear_page_mlock(compound_head(page));
1396                 else
1397                         clear_page_mlock(page);
1398         }
1399
1400         if (PageTransCompound(page))
1401                 deferred_split_huge_page(compound_head(page));
1402
1403         /*
1404          * It would be tidy to reset the PageAnon mapping here,
1405          * but that might overwrite a racing page_add_anon_rmap
1406          * which increments mapcount after us but sets mapping
1407          * before us: so leave the reset to free_unref_page,
1408          * and remember that it's only reliable while mapped.
1409          * Leaving it set also helps swapoff to reinstate ptes
1410          * faster for those pages still in swapcache.
1411          */
1412 out:
1413         unlock_page_memcg(page);
1414 }
1415
1416 /*
1417  * @arg: enum ttu_flags will be passed to this argument
1418  */
1419 static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
1420                      unsigned long address, void *arg)
1421 {
1422         struct mm_struct *mm = vma->vm_mm;
1423         struct page_vma_mapped_walk pvmw = {
1424                 .page = page,
1425                 .vma = vma,
1426                 .address = address,
1427         };
1428         pte_t pteval;
1429         struct page *subpage;
1430         bool ret = true;
1431         struct mmu_notifier_range range;
1432         enum ttu_flags flags = (enum ttu_flags)(long)arg;
1433
1434         /* munlock has nothing to gain from examining un-locked vmas */
1435         if ((flags & TTU_MUNLOCK) && !(vma->vm_flags & VM_LOCKED))
1436                 return true;
1437
1438         if (IS_ENABLED(CONFIG_MIGRATION) && (flags & TTU_MIGRATION) &&
1439             is_zone_device_page(page) && !is_device_private_page(page))
1440                 return true;
1441
1442         if (flags & TTU_SPLIT_HUGE_PMD) {
1443                 split_huge_pmd_address(vma, address,
1444                                 flags & TTU_SPLIT_FREEZE, page);
1445         }
1446
1447 #ifdef CONFIG_FINEGRAINED_THP
1448         if (flags & TTU_SPLIT_HUGE_PTE)
1449                 split_huge_pte_address(vma, address,
1450                                 flags & TTU_SPLIT_FREEZE, page);
1451 #endif
1452
1453         /*
1454          * For THP, we have to assume the worse case ie pmd for invalidation.
1455          * For hugetlb, it could be much worse if we need to do pud
1456          * invalidation in the case of pmd sharing.
1457          *
1458          * Note that the page can not be free in this function as call of
1459          * try_to_unmap() must hold a reference on the page.
1460          */
1461         mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm,
1462                                 address,
1463                                 min(vma->vm_end, address + page_size(page)));
1464         if (PageHuge(page)) {
1465                 /*
1466                  * If sharing is possible, start and end will be adjusted
1467                  * accordingly.
1468                  */
1469                 adjust_range_if_pmd_sharing_possible(vma, &range.start,
1470                                                      &range.end);
1471         }
1472         mmu_notifier_invalidate_range_start(&range);
1473
1474         while (page_vma_mapped_walk(&pvmw)) {
1475 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
1476                 /* PMD-mapped THP migration entry */
1477                 if (!pvmw.pte && (flags & TTU_MIGRATION)) {
1478                         VM_BUG_ON_PAGE(PageHuge(page) || !PageTransCompound(page), page);
1479
1480                         set_pmd_migration_entry(&pvmw, page);
1481                         continue;
1482                 }
1483 #ifdef CONFIG_FINEGRAINED_THP
1484                 if (pvmw.pte && pte_cont(*pvmw.pte) && (flags & TTU_MIGRATION)) {
1485                         VM_BUG_ON_PAGE(PageHuge(page) || !PageTransCompound(page), page);
1486
1487                         set_huge_pte_migration_entry(&pvmw, page);
1488                         continue;
1489                 }
1490 #endif /* CONFIG_FINEGRAINED_THP */
1491 #endif
1492
1493                 /*
1494                  * If the page is mlock()d, we cannot swap it out.
1495                  * If it's recently referenced (perhaps page_referenced
1496                  * skipped over this mm) then we should reactivate it.
1497                  */
1498                 if (!(flags & TTU_IGNORE_MLOCK)) {
1499                         if (vma->vm_flags & VM_LOCKED) {
1500                                 /* PTE-mapped THP are never mlocked */
1501                                 if (!PageTransCompound(page)) {
1502                                         /*
1503                                          * Holding pte lock, we do *not* need
1504                                          * mmap_lock here
1505                                          */
1506                                         mlock_vma_page(page);
1507                                 }
1508                                 ret = false;
1509                                 page_vma_mapped_walk_done(&pvmw);
1510                                 break;
1511                         }
1512                         if (flags & TTU_MUNLOCK)
1513                                 continue;
1514                 }
1515
1516                 /* Unexpected PMD-mapped THP? */
1517                 VM_BUG_ON_PAGE(!pvmw.pte, page);
1518
1519                 subpage = page - page_to_pfn(page) + pte_pfn(*pvmw.pte);
1520                 address = pvmw.address;
1521
1522                 if (PageHuge(page) && !PageAnon(page)) {
1523                         /*
1524                          * To call huge_pmd_unshare, i_mmap_rwsem must be
1525                          * held in write mode.  Caller needs to explicitly
1526                          * do this outside rmap routines.
1527                          */
1528                         VM_BUG_ON(!(flags & TTU_RMAP_LOCKED));
1529 #ifdef CONFIG_FINEGRAINED_THP
1530                         if (thp_nr_pages(page) == HPAGE_PMD_NR &&
1531                                         huge_pmd_unshare(mm, vma, &address, pvmw.pte)) {
1532                                 /*
1533                                  * huge_pmd_unshare unmapped an entire PMD
1534                                  * page.  There is no way of knowing exactly
1535                                  * which PMDs may be cached for this mm, so
1536                                  * we must flush them all.  start/end were
1537                                  * already adjusted above to cover this range.
1538                                  */
1539                                 flush_cache_range(vma, range.start, range.end);
1540                                 flush_tlb_range(vma, range.start, range.end);
1541                                 mmu_notifier_invalidate_range(mm, range.start,
1542                                                               range.end);
1543                                 /*
1544                                  * The ref count of the PMD page was dropped
1545                                  * which is part of the way map counting
1546                                  * is done for shared PMDs.  Return 'true'
1547                                  * here.  When there is no other sharing,
1548                                  * huge_pmd_unshare returns false and we will
1549                                  * unmap the actual page and drop map count
1550                                  * to zero.
1551                                  */
1552                                 page_vma_mapped_walk_done(&pvmw);
1553                                 break;
1554                         }
1555 #else /* CONFIG_FINEGRAINED_THP */
1556                         if (huge_pmd_unshare(mm, vma, &address, pvmw.pte)) {
1557                                 /*
1558                                  * huge_pmd_unshare unmapped an entire PMD
1559                                  * page.  There is no way of knowing exactly
1560                                  * which PMDs may be cached for this mm, so
1561                                  * we must flush them all.  start/end were
1562                                  * already adjusted above to cover this range.
1563                                  */
1564                                 flush_cache_range(vma, range.start, range.end);
1565                                 flush_tlb_range(vma, range.start, range.end);
1566                                 mmu_notifier_invalidate_range(mm, range.start,
1567                                                               range.end);
1568
1569                                 /*
1570                                  * The ref count of the PMD page was dropped
1571                                  * which is part of the way map counting
1572                                  * is done for shared PMDs.  Return 'true'
1573                                  * here.  When there is no other sharing,
1574                                  * huge_pmd_unshare returns false and we will
1575                                  * unmap the actual page and drop map count
1576                                  * to zero.
1577                                  */
1578                                 page_vma_mapped_walk_done(&pvmw);
1579                                 break;
1580                         }
1581 #endif /* CONFIG_FINEGRAINED_THP */
1582                 }
1583
1584                 if (IS_ENABLED(CONFIG_MIGRATION) &&
1585                     (flags & TTU_MIGRATION) &&
1586                     is_zone_device_page(page)) {
1587                         swp_entry_t entry;
1588                         pte_t swp_pte;
1589
1590                         pteval = ptep_get_and_clear(mm, pvmw.address, pvmw.pte);
1591
1592                         /*
1593                          * Store the pfn of the page in a special migration
1594                          * pte. do_swap_page() will wait until the migration
1595                          * pte is removed and then restart fault handling.
1596                          */
1597                         entry = make_migration_entry(page, 0);
1598                         swp_pte = swp_entry_to_pte(entry);
1599
1600                         /*
1601                          * pteval maps a zone device page and is therefore
1602                          * a swap pte.
1603                          */
1604                         if (pte_swp_soft_dirty(pteval))
1605                                 swp_pte = pte_swp_mksoft_dirty(swp_pte);
1606                         if (pte_swp_uffd_wp(pteval))
1607                                 swp_pte = pte_swp_mkuffd_wp(swp_pte);
1608                         set_pte_at(mm, pvmw.address, pvmw.pte, swp_pte);
1609                         /*
1610                          * No need to invalidate here it will synchronize on
1611                          * against the special swap migration pte.
1612                          *
1613                          * The assignment to subpage above was computed from a
1614                          * swap PTE which results in an invalid pointer.
1615                          * Since only PAGE_SIZE pages can currently be
1616                          * migrated, just set it to page. This will need to be
1617                          * changed when hugepage migrations to device private
1618                          * memory are supported.
1619                          */
1620                         subpage = page;
1621                         goto discard;
1622                 }
1623
1624                 /* Nuke the page table entry. */
1625                 flush_cache_page(vma, address, pte_pfn(*pvmw.pte));
1626                 if (should_defer_flush(mm, flags)) {
1627                         /*
1628                          * We clear the PTE but do not flush so potentially
1629                          * a remote CPU could still be writing to the page.
1630                          * If the entry was previously clean then the
1631                          * architecture must guarantee that a clear->dirty
1632                          * transition on a cached TLB entry is written through
1633                          * and traps if the PTE is unmapped.
1634                          */
1635                         pteval = ptep_get_and_clear(mm, address, pvmw.pte);
1636
1637                         set_tlb_ubc_flush_pending(mm, pte_dirty(pteval));
1638                 } else {
1639                         pteval = ptep_clear_flush(vma, address, pvmw.pte);
1640                 }
1641
1642                 /* Move the dirty bit to the page. Now the pte is gone. */
1643                 if (pte_dirty(pteval))
1644                         set_page_dirty(page);
1645
1646                 /* Update high watermark before we lower rss */
1647                 update_hiwater_rss(mm);
1648
1649                 if (PageHWPoison(page) && !(flags & TTU_IGNORE_HWPOISON)) {
1650                         pteval = swp_entry_to_pte(make_hwpoison_entry(subpage));
1651                         if (PageHuge(page)) {
1652                                 hugetlb_count_sub(compound_nr(page), mm);
1653                                 set_huge_swap_pte_at(mm, address,
1654                                                      pvmw.pte, pteval,
1655                                                      vma_mmu_pagesize(vma));
1656                         } else {
1657                                 dec_mm_counter(mm, mm_counter(page));
1658                                 set_pte_at(mm, address, pvmw.pte, pteval);
1659                         }
1660
1661                 } else if (pte_unused(pteval) && !userfaultfd_armed(vma)) {
1662                         /*
1663                          * The guest indicated that the page content is of no
1664                          * interest anymore. Simply discard the pte, vmscan
1665                          * will take care of the rest.
1666                          * A future reference will then fault in a new zero
1667                          * page. When userfaultfd is active, we must not drop
1668                          * this page though, as its main user (postcopy
1669                          * migration) will not expect userfaults on already
1670                          * copied pages.
1671                          */
1672                         dec_mm_counter(mm, mm_counter(page));
1673                         /* We have to invalidate as we cleared the pte */
1674                         mmu_notifier_invalidate_range(mm, address,
1675                                                       address + PAGE_SIZE);
1676                 } else if (IS_ENABLED(CONFIG_MIGRATION) &&
1677                                 (flags & (TTU_MIGRATION|TTU_SPLIT_FREEZE))) {
1678                         swp_entry_t entry;
1679                         pte_t swp_pte;
1680
1681                         if (arch_unmap_one(mm, vma, address, pteval) < 0) {
1682                                 set_pte_at(mm, address, pvmw.pte, pteval);
1683                                 ret = false;
1684                                 page_vma_mapped_walk_done(&pvmw);
1685                                 break;
1686                         }
1687
1688                         /*
1689                          * Store the pfn of the page in a special migration
1690                          * pte. do_swap_page() will wait until the migration
1691                          * pte is removed and then restart fault handling.
1692                          */
1693                         entry = make_migration_entry(subpage,
1694                                         pte_write(pteval));
1695                         swp_pte = swp_entry_to_pte(entry);
1696                         if (pte_soft_dirty(pteval))
1697                                 swp_pte = pte_swp_mksoft_dirty(swp_pte);
1698                         if (pte_uffd_wp(pteval))
1699                                 swp_pte = pte_swp_mkuffd_wp(swp_pte);
1700                         set_pte_at(mm, address, pvmw.pte, swp_pte);
1701                         /*
1702                          * No need to invalidate here it will synchronize on
1703                          * against the special swap migration pte.
1704                          */
1705                 } else if (PageAnon(page)) {
1706                         swp_entry_t entry = { .val = page_private(subpage) };
1707                         pte_t swp_pte;
1708                         /*
1709                          * Store the swap location in the pte.
1710                          * See handle_pte_fault() ...
1711                          */
1712                         if (unlikely(PageSwapBacked(page) != PageSwapCache(page))) {
1713                                 WARN_ON_ONCE(1);
1714                                 ret = false;
1715                                 /* We have to invalidate as we cleared the pte */
1716                                 mmu_notifier_invalidate_range(mm, address,
1717                                                         address + PAGE_SIZE);
1718                                 page_vma_mapped_walk_done(&pvmw);
1719                                 break;
1720                         }
1721
1722                         /* MADV_FREE page check */
1723                         if (!PageSwapBacked(page)) {
1724                                 if (!PageDirty(page)) {
1725                                         /* Invalidate as we cleared the pte */
1726                                         mmu_notifier_invalidate_range(mm,
1727                                                 address, address + PAGE_SIZE);
1728                                         dec_mm_counter(mm, MM_ANONPAGES);
1729                                         goto discard;
1730                                 }
1731
1732                                 /*
1733                                  * If the page was redirtied, it cannot be
1734                                  * discarded. Remap the page to page table.
1735                                  */
1736                                 set_pte_at(mm, address, pvmw.pte, pteval);
1737                                 SetPageSwapBacked(page);
1738                                 ret = false;
1739                                 page_vma_mapped_walk_done(&pvmw);
1740                                 break;
1741                         }
1742
1743                         if (swap_duplicate(entry) < 0) {
1744                                 set_pte_at(mm, address, pvmw.pte, pteval);
1745                                 ret = false;
1746                                 page_vma_mapped_walk_done(&pvmw);
1747                                 break;
1748                         }
1749                         if (arch_unmap_one(mm, vma, address, pteval) < 0) {
1750                                 set_pte_at(mm, address, pvmw.pte, pteval);
1751                                 ret = false;
1752                                 page_vma_mapped_walk_done(&pvmw);
1753                                 break;
1754                         }
1755                         if (list_empty(&mm->mmlist)) {
1756                                 spin_lock(&mmlist_lock);
1757                                 if (list_empty(&mm->mmlist))
1758                                         list_add(&mm->mmlist, &init_mm.mmlist);
1759                                 spin_unlock(&mmlist_lock);
1760                         }
1761                         dec_mm_counter(mm, MM_ANONPAGES);
1762                         inc_mm_counter(mm, MM_SWAPENTS);
1763                         swp_pte = swp_entry_to_pte(entry);
1764                         if (pte_soft_dirty(pteval))
1765                                 swp_pte = pte_swp_mksoft_dirty(swp_pte);
1766                         if (pte_uffd_wp(pteval))
1767                                 swp_pte = pte_swp_mkuffd_wp(swp_pte);
1768                         set_pte_at(mm, address, pvmw.pte, swp_pte);
1769                         /* Invalidate as we cleared the pte */
1770                         mmu_notifier_invalidate_range(mm, address,
1771                                                       address + PAGE_SIZE);
1772                 } else {
1773                         /*
1774                          * This is a locked file-backed page, thus it cannot
1775                          * be removed from the page cache and replaced by a new
1776                          * page before mmu_notifier_invalidate_range_end, so no
1777                          * concurrent thread might update its page table to
1778                          * point at new page while a device still is using this
1779                          * page.
1780                          *
1781                          * See Documentation/vm/mmu_notifier.rst
1782                          */
1783                         dec_mm_counter(mm, mm_counter_file(page));
1784                 }
1785 discard:
1786                 /*
1787                  * No need to call mmu_notifier_invalidate_range() it has be
1788                  * done above for all cases requiring it to happen under page
1789                  * table lock before mmu_notifier_invalidate_range_end()
1790                  *
1791                  * See Documentation/vm/mmu_notifier.rst
1792                  */
1793                 page_remove_rmap(subpage, PageHuge(page));
1794                 put_page(page);
1795         }
1796
1797         mmu_notifier_invalidate_range_end(&range);
1798
1799         return ret;
1800 }
1801
1802 static bool invalid_migration_vma(struct vm_area_struct *vma, void *arg)
1803 {
1804         return vma_is_temporary_stack(vma);
1805 }
1806
1807 static int page_mapcount_is_zero(struct page *page)
1808 {
1809         return !total_mapcount(page);
1810 }
1811
1812 /**
1813  * try_to_unmap - try to remove all page table mappings to a page
1814  * @page: the page to get unmapped
1815  * @flags: action and flags
1816  *
1817  * Tries to remove all the page table entries which are mapping this
1818  * page, used in the pageout path.  Caller must hold the page lock.
1819  *
1820  * If unmap is successful, return true. Otherwise, false.
1821  */
1822 bool try_to_unmap(struct page *page, enum ttu_flags flags)
1823 {
1824         struct rmap_walk_control rwc = {
1825                 .rmap_one = try_to_unmap_one,
1826                 .arg = (void *)flags,
1827                 .done = page_mapcount_is_zero,
1828                 .anon_lock = page_lock_anon_vma_read,
1829         };
1830
1831         /*
1832          * During exec, a temporary VMA is setup and later moved.
1833          * The VMA is moved under the anon_vma lock but not the
1834          * page tables leading to a race where migration cannot
1835          * find the migration ptes. Rather than increasing the
1836          * locking requirements of exec(), migration skips
1837          * temporary VMAs until after exec() completes.
1838          */
1839         if ((flags & (TTU_MIGRATION|TTU_SPLIT_FREEZE))
1840             && !PageKsm(page) && PageAnon(page))
1841                 rwc.invalid_vma = invalid_migration_vma;
1842
1843         if (flags & TTU_RMAP_LOCKED)
1844                 rmap_walk_locked(page, &rwc);
1845         else
1846                 rmap_walk(page, &rwc);
1847
1848         return !page_mapcount(page) ? true : false;
1849 }
1850
1851 static int page_not_mapped(struct page *page)
1852 {
1853         return !page_mapped(page);
1854 };
1855
1856 /**
1857  * try_to_munlock - try to munlock a page
1858  * @page: the page to be munlocked
1859  *
1860  * Called from munlock code.  Checks all of the VMAs mapping the page
1861  * to make sure nobody else has this page mlocked. The page will be
1862  * returned with PG_mlocked cleared if no other vmas have it mlocked.
1863  */
1864
1865 void try_to_munlock(struct page *page)
1866 {
1867         struct rmap_walk_control rwc = {
1868                 .rmap_one = try_to_unmap_one,
1869                 .arg = (void *)TTU_MUNLOCK,
1870                 .done = page_not_mapped,
1871                 .anon_lock = page_lock_anon_vma_read,
1872
1873         };
1874
1875         VM_BUG_ON_PAGE(!PageLocked(page) || PageLRU(page), page);
1876         VM_BUG_ON_PAGE(PageCompound(page) && PageDoubleMap(page), page);
1877
1878         rmap_walk(page, &rwc);
1879 }
1880
1881 void __put_anon_vma(struct anon_vma *anon_vma)
1882 {
1883         struct anon_vma *root = anon_vma->root;
1884
1885         anon_vma_free(anon_vma);
1886         if (root != anon_vma && atomic_dec_and_test(&root->refcount))
1887                 anon_vma_free(root);
1888 }
1889
1890 static struct anon_vma *rmap_walk_anon_lock(struct page *page,
1891                                         struct rmap_walk_control *rwc)
1892 {
1893         struct anon_vma *anon_vma;
1894
1895         if (rwc->anon_lock)
1896                 return rwc->anon_lock(page);
1897
1898         /*
1899          * Note: remove_migration_ptes() cannot use page_lock_anon_vma_read()
1900          * because that depends on page_mapped(); but not all its usages
1901          * are holding mmap_lock. Users without mmap_lock are required to
1902          * take a reference count to prevent the anon_vma disappearing
1903          */
1904         anon_vma = page_anon_vma(page);
1905         if (!anon_vma)
1906                 return NULL;
1907
1908         anon_vma_lock_read(anon_vma);
1909         return anon_vma;
1910 }
1911
1912 /*
1913  * rmap_walk_anon - do something to anonymous page using the object-based
1914  * rmap method
1915  * @page: the page to be handled
1916  * @rwc: control variable according to each walk type
1917  *
1918  * Find all the mappings of a page using the mapping pointer and the vma chains
1919  * contained in the anon_vma struct it points to.
1920  *
1921  * When called from try_to_munlock(), the mmap_lock of the mm containing the vma
1922  * where the page was found will be held for write.  So, we won't recheck
1923  * vm_flags for that VMA.  That should be OK, because that vma shouldn't be
1924  * LOCKED.
1925  */
1926 static void rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc,
1927                 bool locked)
1928 {
1929         struct anon_vma *anon_vma;
1930         pgoff_t pgoff_start, pgoff_end;
1931         struct anon_vma_chain *avc;
1932
1933         if (locked) {
1934                 anon_vma = page_anon_vma(page);
1935                 /* anon_vma disappear under us? */
1936                 VM_BUG_ON_PAGE(!anon_vma, page);
1937         } else {
1938                 anon_vma = rmap_walk_anon_lock(page, rwc);
1939         }
1940         if (!anon_vma)
1941                 return;
1942
1943         pgoff_start = page_to_pgoff(page);
1944         pgoff_end = pgoff_start + thp_nr_pages(page) - 1;
1945         anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root,
1946                         pgoff_start, pgoff_end) {
1947                 struct vm_area_struct *vma = avc->vma;
1948                 unsigned long address = vma_address(page, vma);
1949
1950                 cond_resched();
1951
1952                 if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg))
1953                         continue;
1954
1955                 if (!rwc->rmap_one(page, vma, address, rwc->arg))
1956                         break;
1957                 if (rwc->done && rwc->done(page))
1958                         break;
1959         }
1960
1961         if (!locked)
1962                 anon_vma_unlock_read(anon_vma);
1963 }
1964
1965 /*
1966  * rmap_walk_file - do something to file page using the object-based rmap method
1967  * @page: the page to be handled
1968  * @rwc: control variable according to each walk type
1969  *
1970  * Find all the mappings of a page using the mapping pointer and the vma chains
1971  * contained in the address_space struct it points to.
1972  *
1973  * When called from try_to_munlock(), the mmap_lock of the mm containing the vma
1974  * where the page was found will be held for write.  So, we won't recheck
1975  * vm_flags for that VMA.  That should be OK, because that vma shouldn't be
1976  * LOCKED.
1977  */
1978 static void rmap_walk_file(struct page *page, struct rmap_walk_control *rwc,
1979                 bool locked)
1980 {
1981         struct address_space *mapping = page_mapping(page);
1982         pgoff_t pgoff_start, pgoff_end;
1983         struct vm_area_struct *vma;
1984
1985         /*
1986          * The page lock not only makes sure that page->mapping cannot
1987          * suddenly be NULLified by truncation, it makes sure that the
1988          * structure at mapping cannot be freed and reused yet,
1989          * so we can safely take mapping->i_mmap_rwsem.
1990          */
1991         VM_BUG_ON_PAGE(!PageLocked(page), page);
1992
1993         if (!mapping)
1994                 return;
1995
1996         pgoff_start = page_to_pgoff(page);
1997         pgoff_end = pgoff_start + thp_nr_pages(page) - 1;
1998         if (!locked)
1999                 i_mmap_lock_read(mapping);
2000         vma_interval_tree_foreach(vma, &mapping->i_mmap,
2001                         pgoff_start, pgoff_end) {
2002                 unsigned long address = vma_address(page, vma);
2003
2004                 cond_resched();
2005
2006                 if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg))
2007                         continue;
2008
2009                 if (!rwc->rmap_one(page, vma, address, rwc->arg))
2010                         goto done;
2011                 if (rwc->done && rwc->done(page))
2012                         goto done;
2013         }
2014
2015 done:
2016         if (!locked)
2017                 i_mmap_unlock_read(mapping);
2018 }
2019
2020 void rmap_walk(struct page *page, struct rmap_walk_control *rwc)
2021 {
2022         if (unlikely(PageKsm(page)))
2023                 rmap_walk_ksm(page, rwc);
2024         else if (PageAnon(page))
2025                 rmap_walk_anon(page, rwc, false);
2026         else
2027                 rmap_walk_file(page, rwc, false);
2028 }
2029
2030 /* Like rmap_walk, but caller holds relevant rmap lock */
2031 void rmap_walk_locked(struct page *page, struct rmap_walk_control *rwc)
2032 {
2033         /* no ksm support for now */
2034         VM_BUG_ON_PAGE(PageKsm(page), page);
2035         if (PageAnon(page))
2036                 rmap_walk_anon(page, rwc, true);
2037         else
2038                 rmap_walk_file(page, rwc, true);
2039 }
2040
2041 #ifdef CONFIG_HUGETLB_PAGE
2042 /*
2043  * The following two functions are for anonymous (private mapped) hugepages.
2044  * Unlike common anonymous pages, anonymous hugepages have no accounting code
2045  * and no lru code, because we handle hugepages differently from common pages.
2046  */
2047 void hugepage_add_anon_rmap(struct page *page,
2048                             struct vm_area_struct *vma, unsigned long address)
2049 {
2050         struct anon_vma *anon_vma = vma->anon_vma;
2051         int first;
2052
2053         BUG_ON(!PageLocked(page));
2054         BUG_ON(!anon_vma);
2055         /* address might be in next vma when migration races vma_adjust */
2056         first = atomic_inc_and_test(compound_mapcount_ptr(page));
2057         if (first)
2058                 __page_set_anon_rmap(page, vma, address, 0);
2059 }
2060
2061 void hugepage_add_new_anon_rmap(struct page *page,
2062                         struct vm_area_struct *vma, unsigned long address)
2063 {
2064         BUG_ON(address < vma->vm_start || address >= vma->vm_end);
2065         atomic_set(compound_mapcount_ptr(page), 0);
2066         if (hpage_pincount_available(page))
2067                 atomic_set(compound_pincount_ptr(page), 0);
2068
2069         __page_set_anon_rmap(page, vma, address, 1);
2070 }
2071 #endif /* CONFIG_HUGETLB_PAGE */