mm/gup: check for isolation errors
authorPavel Tatashin <pasha.tatashin@soleen.com>
Wed, 5 May 2021 01:38:49 +0000 (18:38 -0700)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 19 May 2021 08:13:08 +0000 (10:13 +0200)
[ Upstream commit 6e7f34ebb8d25d71ce7f4580ba3cbfc10b895580 ]

It is still possible that we pin movable CMA pages if there are
isolation errors and cma_page_list stays empty when we check again.

Check for isolation errors, and return success only when there are no
isolation errors, and cma_page_list is empty after checking.

Because isolation errors are transient, we retry indefinitely.

Link: https://lkml.kernel.org/r/20210215161349.246722-5-pasha.tatashin@soleen.com
Fixes: 9a4e9f3b2d73 ("mm: update get_user_pages_longterm to migrate pages allocated from CMA region")
Signed-off-by: Pavel Tatashin <pasha.tatashin@soleen.com>
Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Ira Weiny <ira.weiny@intel.com>
Cc: James Morris <jmorris@namei.org>
Cc: Jason Gunthorpe <jgg@ziepe.ca>
Cc: John Hubbard <jhubbard@nvidia.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Mike Kravetz <mike.kravetz@oracle.com>
Cc: Oscar Salvador <osalvador@suse.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Sasha Levin <sashal@kernel.org>
Cc: Steven Rostedt (VMware) <rostedt@goodmis.org>
Cc: Tyler Hicks <tyhicks@linux.microsoft.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Sasha Levin <sashal@kernel.org>
mm/gup.c

index 0fa8d88..c2826f3 100644 (file)
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -1561,8 +1561,8 @@ static long check_and_migrate_cma_pages(struct mm_struct *mm,
                                        struct vm_area_struct **vmas,
                                        unsigned int gup_flags)
 {
-       unsigned long i;
-       bool drain_allow = true;
+       unsigned long i, isolation_error_count;
+       bool drain_allow;
        LIST_HEAD(cma_page_list);
        long ret = nr_pages;
        struct page *prev_head, *head;
@@ -1573,6 +1573,8 @@ static long check_and_migrate_cma_pages(struct mm_struct *mm,
 
 check_again:
        prev_head = NULL;
+       isolation_error_count = 0;
+       drain_allow = true;
        for (i = 0; i < nr_pages; i++) {
                head = compound_head(pages[i]);
                if (head == prev_head)
@@ -1584,25 +1586,35 @@ check_again:
                 * of the CMA zone if possible.
                 */
                if (is_migrate_cma_page(head)) {
-                       if (PageHuge(head))
-                               isolate_huge_page(head, &cma_page_list);
-                       else {
+                       if (PageHuge(head)) {
+                               if (!isolate_huge_page(head, &cma_page_list))
+                                       isolation_error_count++;
+                       } else {
                                if (!PageLRU(head) && drain_allow) {
                                        lru_add_drain_all();
                                        drain_allow = false;
                                }
 
-                               if (!isolate_lru_page(head)) {
-                                       list_add_tail(&head->lru, &cma_page_list);
-                                       mod_node_page_state(page_pgdat(head),
-                                                           NR_ISOLATED_ANON +
-                                                           page_is_file_lru(head),
-                                                           thp_nr_pages(head));
+                               if (isolate_lru_page(head)) {
+                                       isolation_error_count++;
+                                       continue;
                                }
+                               list_add_tail(&head->lru, &cma_page_list);
+                               mod_node_page_state(page_pgdat(head),
+                                                   NR_ISOLATED_ANON +
+                                                   page_is_file_lru(head),
+                                                   thp_nr_pages(head));
                        }
                }
        }
 
+       /*
+        * If list is empty, and no isolation errors, means that all pages are
+        * in the correct zone.
+        */
+       if (list_empty(&cma_page_list) && !isolation_error_count)
+               return ret;
+
        if (!list_empty(&cma_page_list)) {
                /*
                 * drop the above get_user_pages reference.
@@ -1622,23 +1634,19 @@ check_again:
                        return ret > 0 ? -ENOMEM : ret;
                }
 
-               /*
-                * We did migrate all the pages, Try to get the page references
-                * again migrating any new CMA pages which we failed to isolate
-                * earlier.
-                */
-               ret = __get_user_pages_locked(mm, start, nr_pages,
-                                                  pages, vmas, NULL,
-                                                  gup_flags);
-
-               if (ret > 0) {
-                       nr_pages = ret;
-                       drain_allow = true;
-                       goto check_again;
-               }
+               /* We unpinned pages before migration, pin them again */
+               ret = __get_user_pages_locked(mm, start, nr_pages, pages, vmas,
+                                             NULL, gup_flags);
+               if (ret <= 0)
+                       return ret;
+               nr_pages = ret;
        }
 
-       return ret;
+       /*
+        * check again because pages were unpinned, and we also might have
+        * had isolation errors and need more pages to migrate.
+        */
+       goto check_again;
 }
 #else
 static long check_and_migrate_cma_pages(struct mm_struct *mm,