scsi: megaraid_sas: Do not use 32-bit atomic request descriptor for Ventura controllers
[platform/kernel/linux-rpi.git] / mm / rmap.c
index c570f82..b874c47 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -63,6 +63,7 @@
 #include <linux/hugetlb.h>
 #include <linux/backing-dev.h>
 #include <linux/page_idle.h>
+#include <linux/memremap.h>
 
 #include <asm/tlbflush.h>
 
@@ -390,7 +391,7 @@ void unlink_anon_vmas(struct vm_area_struct *vma)
                 * Leave empty anon_vmas on the list - we'll need
                 * to free them outside the lock.
                 */
-               if (RB_EMPTY_ROOT(&anon_vma->rb_root)) {
+               if (RB_EMPTY_ROOT(&anon_vma->rb_root.rb_root)) {
                        anon_vma->parent->degree--;
                        continue;
                }
@@ -424,7 +425,7 @@ static void anon_vma_ctor(void *data)
 
        init_rwsem(&anon_vma->rwsem);
        atomic_set(&anon_vma->refcount, 0);
-       anon_vma->rb_root = RB_ROOT;
+       anon_vma->rb_root = RB_ROOT_CACHED;
 }
 
 void __init anon_vma_init(void)
@@ -1346,9 +1347,13 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
        if ((flags & TTU_MUNLOCK) && !(vma->vm_flags & VM_LOCKED))
                return true;
 
+       if (IS_ENABLED(CONFIG_MIGRATION) && (flags & TTU_MIGRATION) &&
+           is_zone_device_page(page) && !is_device_private_page(page))
+               return true;
+
        if (flags & TTU_SPLIT_HUGE_PMD) {
                split_huge_pmd_address(vma, address,
-                               flags & TTU_MIGRATION, page);
+                               flags & TTU_SPLIT_FREEZE, page);
        }
 
        /*
@@ -1360,6 +1365,19 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
        mmu_notifier_invalidate_range_start(vma->vm_mm, start, end);
 
        while (page_vma_mapped_walk(&pvmw)) {
+#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
+               /* PMD-mapped THP migration entry */
+               if (!pvmw.pte && (flags & TTU_MIGRATION)) {
+                       VM_BUG_ON_PAGE(PageHuge(page) || !PageTransCompound(page), page);
+
+                       if (!PageAnon(page))
+                               continue;
+
+                       set_pmd_migration_entry(&pvmw, page);
+                       continue;
+               }
+#endif
+
                /*
                 * If the page is mlock()d, we cannot swap it out.
                 * If it's recently referenced (perhaps page_referenced
@@ -1390,6 +1408,27 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
                address = pvmw.address;
 
 
+               if (IS_ENABLED(CONFIG_MIGRATION) &&
+                   (flags & TTU_MIGRATION) &&
+                   is_zone_device_page(page)) {
+                       swp_entry_t entry;
+                       pte_t swp_pte;
+
+                       pteval = ptep_get_and_clear(mm, pvmw.address, pvmw.pte);
+
+                       /*
+                        * Store the pfn of the page in a special migration
+                        * pte. do_swap_page() will wait until the migration
+                        * pte is removed and then restart fault handling.
+                        */
+                       entry = make_migration_entry(page, 0);
+                       swp_pte = swp_entry_to_pte(entry);
+                       if (pte_soft_dirty(pteval))
+                               swp_pte = pte_swp_mksoft_dirty(swp_pte);
+                       set_pte_at(mm, pvmw.address, pvmw.pte, swp_pte);
+                       goto discard;
+               }
+
                if (!(flags & TTU_IGNORE_ACCESS)) {
                        if (ptep_clear_flush_young_notify(vma, address,
                                                pvmw.pte)) {
@@ -1445,7 +1484,7 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
                         */
                        dec_mm_counter(mm, mm_counter(page));
                } else if (IS_ENABLED(CONFIG_MIGRATION) &&
-                               (flags & TTU_MIGRATION)) {
+                               (flags & (TTU_MIGRATION|TTU_SPLIT_FREEZE))) {
                        swp_entry_t entry;
                        pte_t swp_pte;
                        /*
@@ -1575,7 +1614,8 @@ bool try_to_unmap(struct page *page, enum ttu_flags flags)
         * locking requirements of exec(), migration skips
         * temporary VMAs until after exec() completes.
         */
-       if ((flags & TTU_MIGRATION) && !PageKsm(page) && PageAnon(page))
+       if ((flags & (TTU_MIGRATION|TTU_SPLIT_FREEZE))
+           && !PageKsm(page) && PageAnon(page))
                rwc.invalid_vma = invalid_migration_vma;
 
        if (flags & TTU_RMAP_LOCKED)