raid5: remove gfp flags from scribble_alloc()
authorColy Li <colyli@suse.de>
Thu, 9 Apr 2020 14:17:21 +0000 (22:17 +0800)
committerSong Liu <songliubraving@fb.com>
Wed, 13 May 2020 18:22:31 +0000 (11:22 -0700)
Using GFP_NOIO flag to call scribble_alloc() from resize_chunk() does
not have the expected behavior. kvmalloc_array() inside scribble_alloc()
which receives the GFP_NOIO flag will eventually call kmalloc_node() to
allocate physically continuous pages.

Now we have memalloc scope APIs in mddev_suspend()/mddev_resume() to
prevent memory reclaim I/Os during raid array suspend context, calling
to kvmalloc_array() with GFP_KERNEL flag may avoid deadlock of recursive
I/O as expected.

This patch removes the useless gfp flags from parameters list of
scribble_alloc(), and call kvmalloc_array() with GFP_KERNEL flag. The
incorrect GFP_NOIO flag does not exist anymore.

Fixes: b330e6a49dc3 ("md: convert to kvmalloc")
Suggested-by: Michal Hocko <mhocko@suse.com>
Signed-off-by: Coly Li <colyli@suse.de>
Signed-off-by: Song Liu <songliubraving@fb.com>
drivers/md/raid5.c

index ba00e98..190dd70 100644 (file)
@@ -2228,14 +2228,19 @@ static int grow_stripes(struct r5conf *conf, int num)
  * of the P and Q blocks.
  */
 static int scribble_alloc(struct raid5_percpu *percpu,
-                         int num, int cnt, gfp_t flags)
+                         int num, int cnt)
 {
        size_t obj_size =
                sizeof(struct page *) * (num+2) +
                sizeof(addr_conv_t) * (num+2);
        void *scribble;
 
-       scribble = kvmalloc_array(cnt, obj_size, flags);
+       /*
+        * If here is in raid array suspend context, it is in memalloc noio
+        * context as well, there is no potential recursive memory reclaim
+        * I/Os with the GFP_KERNEL flag.
+        */
+       scribble = kvmalloc_array(cnt, obj_size, GFP_KERNEL);
        if (!scribble)
                return -ENOMEM;
 
@@ -2267,8 +2272,7 @@ static int resize_chunks(struct r5conf *conf, int new_disks, int new_sectors)
 
                percpu = per_cpu_ptr(conf->percpu, cpu);
                err = scribble_alloc(percpu, new_disks,
-                                    new_sectors / STRIPE_SECTORS,
-                                    GFP_NOIO);
+                                    new_sectors / STRIPE_SECTORS);
                if (err)
                        break;
        }
@@ -6759,8 +6763,7 @@ static int alloc_scratch_buffer(struct r5conf *conf, struct raid5_percpu *percpu
                               conf->previous_raid_disks),
                           max(conf->chunk_sectors,
                               conf->prev_chunk_sectors)
-                          / STRIPE_SECTORS,
-                          GFP_KERNEL)) {
+                          / STRIPE_SECTORS)) {
                free_scratch_buffer(conf, percpu);
                return -ENOMEM;
        }