btrfs: raid56: convert kmaps to kmap_local_page
authorIra Weiny <ira.weiny@intel.com>
Wed, 17 Feb 2021 02:48:24 +0000 (18:48 -0800)
committerDavid Sterba <dsterba@suse.com>
Mon, 19 Apr 2021 15:25:16 +0000 (17:25 +0200)
These kmaps are thread local and don't need to be atomic.  So they can use
the more efficient kmap_local_page().  However, the mapping of pages in
the stripes and the additional parity and qstripe pages are a bit
trickier because the unmapping must occur in the opposite order from the
mapping.  Furthermore, the pointer array in __raid_recover_end_io() may
get reordered.

Convert these calls to kmap_local_page() taking care to reverse the
unmappings of any page arrays as well as being careful with the mappings
of any special pages such as the parity and qstripe pages.

Signed-off-by: Ira Weiny <ira.weiny@intel.com>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
fs/btrfs/raid56.c

index 633b4b1..bb768c3 100644 (file)
@@ -1232,13 +1232,13 @@ static noinline void finish_rmw(struct btrfs_raid_bio *rbio)
                /* first collect one page from each data stripe */
                for (stripe = 0; stripe < nr_data; stripe++) {
                        p = page_in_rbio(rbio, stripe, pagenr, 0);
-                       pointers[stripe] = kmap(p);
+                       pointers[stripe] = kmap_local_page(p);
                }
 
                /* then add the parity stripe */
                p = rbio_pstripe_page(rbio, pagenr);
                SetPageUptodate(p);
-               pointers[stripe++] = kmap(p);
+               pointers[stripe++] = kmap_local_page(p);
 
                if (has_qstripe) {
 
@@ -1248,7 +1248,7 @@ static noinline void finish_rmw(struct btrfs_raid_bio *rbio)
                         */
                        p = rbio_qstripe_page(rbio, pagenr);
                        SetPageUptodate(p);
-                       pointers[stripe++] = kmap(p);
+                       pointers[stripe++] = kmap_local_page(p);
 
                        raid6_call.gen_syndrome(rbio->real_stripes, PAGE_SIZE,
                                                pointers);
@@ -1257,10 +1257,8 @@ static noinline void finish_rmw(struct btrfs_raid_bio *rbio)
                        copy_page(pointers[nr_data], pointers[0]);
                        run_xor(pointers + 1, nr_data - 1, PAGE_SIZE);
                }
-
-
-               for (stripe = 0; stripe < rbio->real_stripes; stripe++)
-                       kunmap(page_in_rbio(rbio, stripe, pagenr, 0));
+               for (stripe = stripe - 1; stripe >= 0; stripe--)
+                       kunmap_local(pointers[stripe]);
        }
 
        /*
@@ -1777,6 +1775,7 @@ static void __raid_recover_end_io(struct btrfs_raid_bio *rbio)
 {
        int pagenr, stripe;
        void **pointers;
+       void **unmap_array;
        int faila = -1, failb = -1;
        struct page *page;
        blk_status_t err;
@@ -1788,6 +1787,16 @@ static void __raid_recover_end_io(struct btrfs_raid_bio *rbio)
                goto cleanup_io;
        }
 
+       /*
+        * Store copy of pointers that does not get reordered during
+        * reconstruction so that kunmap_local works.
+        */
+       unmap_array = kcalloc(rbio->real_stripes, sizeof(void *), GFP_NOFS);
+       if (!unmap_array) {
+               err = BLK_STS_RESOURCE;
+               goto cleanup_pointers;
+       }
+
        faila = rbio->faila;
        failb = rbio->failb;
 
@@ -1809,8 +1818,11 @@ static void __raid_recover_end_io(struct btrfs_raid_bio *rbio)
                    !test_bit(pagenr, rbio->dbitmap))
                        continue;
 
-               /* setup our array of pointers with pages
-                * from each stripe
+               /*
+                * Setup our array of pointers with pages from each stripe
+                *
+                * NOTE: store a duplicate array of pointers to preserve the
+                * pointer order
                 */
                for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
                        /*
@@ -1824,7 +1836,8 @@ static void __raid_recover_end_io(struct btrfs_raid_bio *rbio)
                        } else {
                                page = rbio_stripe_page(rbio, stripe, pagenr);
                        }
-                       pointers[stripe] = kmap(page);
+                       pointers[stripe] = kmap_local_page(page);
+                       unmap_array[stripe] = pointers[stripe];
                }
 
                /* all raid6 handling here */
@@ -1917,24 +1930,14 @@ pstripe:
                                }
                        }
                }
-               for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
-                       /*
-                        * if we're rebuilding a read, we have to use
-                        * pages from the bio list
-                        */
-                       if ((rbio->operation == BTRFS_RBIO_READ_REBUILD ||
-                            rbio->operation == BTRFS_RBIO_REBUILD_MISSING) &&
-                           (stripe == faila || stripe == failb)) {
-                               page = page_in_rbio(rbio, stripe, pagenr, 0);
-                       } else {
-                               page = rbio_stripe_page(rbio, stripe, pagenr);
-                       }
-                       kunmap(page);
-               }
+               for (stripe = rbio->real_stripes - 1; stripe >= 0; stripe--)
+                       kunmap_local(unmap_array[stripe]);
        }
 
        err = BLK_STS_OK;
 cleanup:
+       kfree(unmap_array);
+cleanup_pointers:
        kfree(pointers);
 
 cleanup_io:
@@ -2359,13 +2362,13 @@ static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
                        goto cleanup;
                }
                SetPageUptodate(q_page);
-               pointers[rbio->real_stripes - 1] = kmap(q_page);
+               pointers[rbio->real_stripes - 1] = kmap_local_page(q_page);
        }
 
        atomic_set(&rbio->error, 0);
 
        /* Map the parity stripe just once */
-       pointers[nr_data] = kmap(p_page);
+       pointers[nr_data] = kmap_local_page(p_page);
 
        for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) {
                struct page *p;
@@ -2373,7 +2376,7 @@ static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
                /* first collect one page from each data stripe */
                for (stripe = 0; stripe < nr_data; stripe++) {
                        p = page_in_rbio(rbio, stripe, pagenr, 0);
-                       pointers[stripe] = kmap(p);
+                       pointers[stripe] = kmap_local_page(p);
                }
 
                if (has_qstripe) {
@@ -2396,14 +2399,14 @@ static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
                        bitmap_clear(rbio->dbitmap, pagenr, 1);
                kunmap_local(parity);
 
-               for (stripe = 0; stripe < nr_data; stripe++)
-                       kunmap(page_in_rbio(rbio, stripe, pagenr, 0));
+               for (stripe = nr_data - 1; stripe >= 0; stripe--)
+                       kunmap_local(pointers[stripe]);
        }
 
-       kunmap(p_page);
+       kunmap_local(pointers[nr_data]);
        __free_page(p_page);
        if (q_page) {
-               kunmap(q_page);
+               kunmap_local(pointers[rbio->real_stripes - 1]);
                __free_page(q_page);
        }