mm/vmalloc: eliminate an extra orig_gfp_mask
authorUladzislau Rezki (Sony) <urezki@gmail.com>
Tue, 22 Mar 2022 21:42:56 +0000 (14:42 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Tue, 22 Mar 2022 22:57:05 +0000 (15:57 -0700)
That extra variable has been introduced just for keeping an original
passed gfp_mask because it is updated with __GFP_NOWARN on entry, thus
error handling messages were broken.

Instead we can keep an original gfp_mask without modifying it and add an
extra __GFP_NOWARN flag together with gfp_mask as a parameter to the
vm_area_alloc_pages() function.  It will make it less confused.

Link: https://lkml.kernel.org/r/20220119143540.601149-3-urezki@gmail.com
Signed-off-by: Uladzislau Rezki (Sony) <urezki@gmail.com>
Cc: Vasily Averin <vvs@virtuozzo.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Nicholas Piggin <npiggin@gmail.com>
Cc: Oleksiy Avramchenko <oleksiy.avramchenko@sonymobile.com>
Cc: Uladzislau Rezki <uladzislau.rezki@sony.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
mm/vmalloc.c

index 6755c14..4c44d40 100644 (file)
@@ -2946,7 +2946,6 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
                                 int node)
 {
        const gfp_t nested_gfp = (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO;
-       const gfp_t orig_gfp_mask = gfp_mask;
        bool nofail = gfp_mask & __GFP_NOFAIL;
        unsigned long addr = (unsigned long)area->addr;
        unsigned long size = get_vm_area_size(area);
@@ -2970,7 +2969,7 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
        }
 
        if (!area->pages) {
-               warn_alloc(orig_gfp_mask, NULL,
+               warn_alloc(gfp_mask, NULL,
                        "vmalloc error: size %lu, failed to allocated page array size %lu",
                        nr_small_pages * PAGE_SIZE, array_size);
                free_vm_area(area);
@@ -2980,8 +2979,8 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
        set_vm_area_page_order(area, page_shift - PAGE_SHIFT);
        page_order = vm_area_page_order(area);
 
-       area->nr_pages = vm_area_alloc_pages(gfp_mask, node,
-               page_order, nr_small_pages, area->pages);
+       area->nr_pages = vm_area_alloc_pages(gfp_mask | __GFP_NOWARN,
+               node, page_order, nr_small_pages, area->pages);
 
        atomic_long_add(area->nr_pages, &nr_vmalloc_pages);
        if (gfp_mask & __GFP_ACCOUNT) {
@@ -2997,7 +2996,7 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
         * allocation request, free them via __vfree() if any.
         */
        if (area->nr_pages != nr_small_pages) {
-               warn_alloc(orig_gfp_mask, NULL,
+               warn_alloc(gfp_mask, NULL,
                        "vmalloc error: size %lu, page order %u, failed to allocate pages",
                        area->nr_pages * PAGE_SIZE, page_order);
                goto fail;
@@ -3025,7 +3024,7 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
                memalloc_noio_restore(flags);
 
        if (ret < 0) {
-               warn_alloc(orig_gfp_mask, NULL,
+               warn_alloc(gfp_mask, NULL,
                        "vmalloc error: size %lu, failed to map pages",
                        area->nr_pages * PAGE_SIZE);
                goto fail;