mm, sparse: do not swamp log with huge vmemmap allocation failures
authorMichal Hocko <mhocko@suse.com>
Thu, 16 Nov 2017 01:38:56 +0000 (17:38 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 16 Nov 2017 02:21:07 +0000 (18:21 -0800)
While doing memory hotplug tests under heavy memory pressure we have
noticed too many page allocation failures when allocating vmemmap memmap
backed by huge page

  kworker/u3072:1: page allocation failure: order:9, mode:0x24084c0(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO)
  [...]
  Call Trace:
    dump_trace+0x59/0x310
    show_stack_log_lvl+0xea/0x170
    show_stack+0x21/0x40
    dump_stack+0x5c/0x7c
    warn_alloc_failed+0xe2/0x150
    __alloc_pages_nodemask+0x3ed/0xb20
    alloc_pages_current+0x7f/0x100
    vmemmap_alloc_block+0x79/0xb6
    __vmemmap_alloc_block_buf+0x136/0x145
    vmemmap_populate+0xd2/0x2b9
    sparse_mem_map_populate+0x23/0x30
    sparse_add_one_section+0x68/0x18e
    __add_pages+0x10a/0x1d0
    arch_add_memory+0x4a/0xc0
    add_memory_resource+0x89/0x160
    add_memory+0x6d/0xd0
    acpi_memory_device_add+0x181/0x251
    acpi_bus_attach+0xfd/0x19b
    acpi_bus_scan+0x59/0x69
    acpi_device_hotplug+0xd2/0x41f
    acpi_hotplug_work_fn+0x1a/0x23
    process_one_work+0x14e/0x410
    worker_thread+0x116/0x490
    kthread+0xbd/0xe0
    ret_from_fork+0x3f/0x70

and we do see many of those because essentially every allocation fails
for each memory section.  This is an excessive way to tell the user that
there is nothing to really worry about because we do have a fallback
mechanism to use base pages.  The only downside might be a performance
degradation due to TLB pressure.

This patch changes vmemmap_alloc_block() to use __GFP_NOWARN and warn
explicitly once on the first allocation failure.  This will reduce the
noise in the kernel log considerably, while we still have an indication
that a performance might be impacted.

[mhocko@kernel.org: forgot to git add the follow up fix]
Link: http://lkml.kernel.org/r/20171107090635.c27thtse2lchjgvb@dhcp22.suse.cz
Link: http://lkml.kernel.org/r/20171106092228.31098-1-mhocko@kernel.org
Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
Signed-off-by: Michal Hocko <mhocko@suse.com>
Cc: Joe Perches <joe@perches.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Khalid Aziz <khalid.aziz@oracle.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
arch/x86/mm/init_64.c
mm/sparse-vmemmap.c

index c3fc544b50d278b6fa64c8f3c9eb6afcb7d8bb0c..4a837289f2add8ac3548e68639f38f845ceee6a9 100644 (file)
@@ -1405,7 +1405,6 @@ static int __meminit vmemmap_populate_hugepages(unsigned long start,
                        vmemmap_verify((pte_t *)pmd, node, addr, next);
                        continue;
                }
-               pr_warn_once("vmemmap: falling back to regular page backing\n");
                if (vmemmap_populate_basepages(addr, next, node))
                        return -ENOMEM;
        }
index 4e49762599c87d1581823b50b7b667b150f06af7..17acf01791fa832e1c8414cecc98034f2d652662 100644 (file)
@@ -53,12 +53,20 @@ void * __meminit vmemmap_alloc_block(unsigned long size, int node)
 {
        /* If the main allocator is up use that, fallback to bootmem. */
        if (slab_is_available()) {
+               gfp_t gfp_mask = GFP_KERNEL|__GFP_RETRY_MAYFAIL|__GFP_NOWARN;
+               int order = get_order(size);
+               static bool warned;
                struct page *page;
 
-               page = alloc_pages_node(node, GFP_KERNEL | __GFP_RETRY_MAYFAIL,
-                                       get_order(size));
+               page = alloc_pages_node(node, gfp_mask, order);
                if (page)
                        return page_address(page);
+
+               if (!warned) {
+                       warn_alloc(gfp_mask & ~__GFP_NOWARN, NULL,
+                                  "vmemmap alloc failure: order:%u", order);
+                       warned = true;
+               }
                return NULL;
        } else
                return __earlyonly_bootmem_alloc(node, size, size,