mm/page_alloc: simplify locking during free_unref_page_list
authorMel Gorman <mgorman@techsingularity.net>
Tue, 22 Nov 2022 13:12:29 +0000 (13:12 +0000)
committerAndrew Morton <akpm@linux-foundation.org>
Wed, 30 Nov 2022 23:59:01 +0000 (15:59 -0800)
While freeing a large list, the zone lock will be released and reacquired
to avoid long hold times since commit c24ad77d962c ("mm/page_alloc.c:
avoid excessive IRQ disabled times in free_unref_page_list()").  As
suggested by Vlastimil Babka, the lockrelease/reacquire logic can be
simplified by reusing the logic that acquires a different lock when
changing zones.

Link: https://lkml.kernel.org/r/20221122131229.5263-3-mgorman@techsingularity.net
Signed-off-by: Mel Gorman <mgorman@techsingularity.net>
Reviewed-by: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/page_alloc.c

index d9d8325..5ab9dd2 100644 (file)
@@ -3525,13 +3525,19 @@ void free_unref_page_list(struct list_head *list)
                list_del(&page->lru);
                migratetype = get_pcppage_migratetype(page);
 
-               /* Different zone, different pcp lock. */
-               if (zone != locked_zone) {
+               /*
+                * Either different zone requiring a different pcp lock or
+                * excessive lock hold times when freeing a large list of
+                * pages.
+                */
+               if (zone != locked_zone || batch_count == SWAP_CLUSTER_MAX) {
                        if (pcp) {
                                pcp_spin_unlock(pcp);
                                pcp_trylock_finish(UP_flags);
                        }
 
+                       batch_count = 0;
+
                        /*
                         * trylock is necessary as pages may be getting freed
                         * from IRQ or SoftIRQ context after an IO completion.
@@ -3546,7 +3552,6 @@ void free_unref_page_list(struct list_head *list)
                                continue;
                        }
                        locked_zone = zone;
-                       batch_count = 0;
                }
 
                /*
@@ -3558,19 +3563,7 @@ void free_unref_page_list(struct list_head *list)
 
                trace_mm_page_free_batched(page);
                free_unref_page_commit(zone, pcp, page, migratetype, 0);
-
-               /*
-                * Guard against excessive lock hold times when freeing
-                * a large list of pages. Lock will be reacquired if
-                * necessary on the next iteration.
-                */
-               if (++batch_count == SWAP_CLUSTER_MAX) {
-                       pcp_spin_unlock(pcp);
-                       pcp_trylock_finish(UP_flags);
-                       batch_count = 0;
-                       pcp = NULL;
-                       locked_zone = NULL;
-               }
+               batch_count++;
        }
 
        if (pcp) {