usb: dwc3: host: Set XHCI_SG_TRB_CACHE_SIZE_QUIRK
[platform/kernel/linux-starfive.git] / mm / migrate.c
index 2053b54..b4d972d 100644 (file)
@@ -405,6 +405,7 @@ int folio_migrate_mapping(struct address_space *mapping,
        int dirty;
        int expected_count = folio_expected_refs(mapping, folio) + extra_count;
        long nr = folio_nr_pages(folio);
+       long entries, i;
 
        if (!mapping) {
                /* Anonymous page without mapping */
@@ -442,8 +443,10 @@ int folio_migrate_mapping(struct address_space *mapping,
                        folio_set_swapcache(newfolio);
                        newfolio->private = folio_get_private(folio);
                }
+               entries = nr;
        } else {
                VM_BUG_ON_FOLIO(folio_test_swapcache(folio), folio);
+               entries = 1;
        }
 
        /* Move dirty while page refs frozen and newpage not yet exposed */
@@ -453,7 +456,11 @@ int folio_migrate_mapping(struct address_space *mapping,
                folio_set_dirty(newfolio);
        }
 
-       xas_store(&xas, newfolio);
+       /* Swap cache still stores N entries instead of a high-order entry */
+       for (i = 0; i < entries; i++) {
+               xas_store(&xas, newfolio);
+               xas_next(&xas);
+       }
 
        /*
         * Drop cache reference from old page by unfreezing
@@ -1019,32 +1026,31 @@ out:
 }
 
 /*
- * To record some information during migration, we use some unused
- * fields (mapping and private) of struct folio of the newly allocated
- * destination folio.  This is safe because nobody is using them
- * except us.
+ * To record some information during migration, we use unused private
+ * field of struct folio of the newly allocated destination folio.
+ * This is safe because nobody is using it except us.
  */
-union migration_ptr {
-       struct anon_vma *anon_vma;
-       struct address_space *mapping;
+enum {
+       PAGE_WAS_MAPPED = BIT(0),
+       PAGE_WAS_MLOCKED = BIT(1),
+       PAGE_OLD_STATES = PAGE_WAS_MAPPED | PAGE_WAS_MLOCKED,
 };
+
 static void __migrate_folio_record(struct folio *dst,
-                                  unsigned long page_was_mapped,
+                                  int old_page_state,
                                   struct anon_vma *anon_vma)
 {
-       union migration_ptr ptr = { .anon_vma = anon_vma };
-       dst->mapping = ptr.mapping;
-       dst->private = (void *)page_was_mapped;
+       dst->private = (void *)anon_vma + old_page_state;
 }
 
 static void __migrate_folio_extract(struct folio *dst,
-                                  int *page_was_mappedp,
+                                  int *old_page_state,
                                   struct anon_vma **anon_vmap)
 {
-       union migration_ptr ptr = { .mapping = dst->mapping };
-       *anon_vmap = ptr.anon_vma;
-       *page_was_mappedp = (unsigned long)dst->private;
-       dst->mapping = NULL;
+       unsigned long private = (unsigned long)dst->private;
+
+       *anon_vmap = (struct anon_vma *)(private & ~PAGE_OLD_STATES);
+       *old_page_state = private & PAGE_OLD_STATES;
        dst->private = NULL;
 }
 
@@ -1104,7 +1110,7 @@ static int migrate_folio_unmap(new_folio_t get_new_folio,
 {
        struct folio *dst;
        int rc = -EAGAIN;
-       int page_was_mapped = 0;
+       int old_page_state = 0;
        struct anon_vma *anon_vma = NULL;
        bool is_lru = !__PageMovable(&src->page);
        bool locked = false;
@@ -1158,6 +1164,8 @@ static int migrate_folio_unmap(new_folio_t get_new_folio,
                folio_lock(src);
        }
        locked = true;
+       if (folio_test_mlocked(src))
+               old_page_state |= PAGE_WAS_MLOCKED;
 
        if (folio_test_writeback(src)) {
                /*
@@ -1207,7 +1215,7 @@ static int migrate_folio_unmap(new_folio_t get_new_folio,
        dst_locked = true;
 
        if (unlikely(!is_lru)) {
-               __migrate_folio_record(dst, page_was_mapped, anon_vma);
+               __migrate_folio_record(dst, old_page_state, anon_vma);
                return MIGRATEPAGE_UNMAP;
        }
 
@@ -1233,11 +1241,11 @@ static int migrate_folio_unmap(new_folio_t get_new_folio,
                VM_BUG_ON_FOLIO(folio_test_anon(src) &&
                               !folio_test_ksm(src) && !anon_vma, src);
                try_to_migrate(src, mode == MIGRATE_ASYNC ? TTU_BATCH_FLUSH : 0);
-               page_was_mapped = 1;
+               old_page_state |= PAGE_WAS_MAPPED;
        }
 
        if (!folio_mapped(src)) {
-               __migrate_folio_record(dst, page_was_mapped, anon_vma);
+               __migrate_folio_record(dst, old_page_state, anon_vma);
                return MIGRATEPAGE_UNMAP;
        }
 
@@ -1249,7 +1257,8 @@ out:
        if (rc == -EAGAIN)
                ret = NULL;
 
-       migrate_folio_undo_src(src, page_was_mapped, anon_vma, locked, ret);
+       migrate_folio_undo_src(src, old_page_state & PAGE_WAS_MAPPED,
+                              anon_vma, locked, ret);
        migrate_folio_undo_dst(dst, dst_locked, put_new_folio, private);
 
        return rc;
@@ -1262,12 +1271,12 @@ static int migrate_folio_move(free_folio_t put_new_folio, unsigned long private,
                              struct list_head *ret)
 {
        int rc;
-       int page_was_mapped = 0;
+       int old_page_state = 0;
        struct anon_vma *anon_vma = NULL;
        bool is_lru = !__PageMovable(&src->page);
        struct list_head *prev;
 
-       __migrate_folio_extract(dst, &page_was_mapped, &anon_vma);
+       __migrate_folio_extract(dst, &old_page_state, &anon_vma);
        prev = dst->lru.prev;
        list_del(&dst->lru);
 
@@ -1288,10 +1297,10 @@ static int migrate_folio_move(free_folio_t put_new_folio, unsigned long private,
         * isolated from the unevictable LRU: but this case is the easiest.
         */
        folio_add_lru(dst);
-       if (page_was_mapped)
+       if (old_page_state & PAGE_WAS_MLOCKED)
                lru_add_drain();
 
-       if (page_was_mapped)
+       if (old_page_state & PAGE_WAS_MAPPED)
                remove_migration_ptes(src, dst, false);
 
 out_unlock_both:
@@ -1323,11 +1332,12 @@ out:
         */
        if (rc == -EAGAIN) {
                list_add(&dst->lru, prev);
-               __migrate_folio_record(dst, page_was_mapped, anon_vma);
+               __migrate_folio_record(dst, old_page_state, anon_vma);
                return rc;
        }
 
-       migrate_folio_undo_src(src, page_was_mapped, anon_vma, true, ret);
+       migrate_folio_undo_src(src, old_page_state & PAGE_WAS_MAPPED,
+                              anon_vma, true, ret);
        migrate_folio_undo_dst(dst, true, put_new_folio, private);
 
        return rc;
@@ -1795,12 +1805,12 @@ out:
        dst = list_first_entry(&dst_folios, struct folio, lru);
        dst2 = list_next_entry(dst, lru);
        list_for_each_entry_safe(folio, folio2, &unmap_folios, lru) {
-               int page_was_mapped = 0;
+               int old_page_state = 0;
                struct anon_vma *anon_vma = NULL;
 
-               __migrate_folio_extract(dst, &page_was_mapped, &anon_vma);
-               migrate_folio_undo_src(folio, page_was_mapped, anon_vma,
-                                      true, ret_folios);
+               __migrate_folio_extract(dst, &old_page_state, &anon_vma);
+               migrate_folio_undo_src(folio, old_page_state & PAGE_WAS_MAPPED,
+                                      anon_vma, true, ret_folios);
                list_del(&dst->lru);
                migrate_folio_undo_dst(dst, true, put_new_folio, private);
                dst = dst2;
@@ -2162,6 +2172,7 @@ static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes,
                         const int __user *nodes,
                         int __user *status, int flags)
 {
+       compat_uptr_t __user *compat_pages = (void __user *)pages;
        int current_node = NUMA_NO_NODE;
        LIST_HEAD(pagelist);
        int start, i;
@@ -2174,8 +2185,17 @@ static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes,
                int node;
 
                err = -EFAULT;
-               if (get_user(p, pages + i))
-                       goto out_flush;
+               if (in_compat_syscall()) {
+                       compat_uptr_t cp;
+
+                       if (get_user(cp, compat_pages + i))
+                               goto out_flush;
+
+                       p = compat_ptr(cp);
+               } else {
+                       if (get_user(p, pages + i))
+                               goto out_flush;
+               }
                if (get_user(node, nodes + i))
                        goto out_flush;