powerpc/perf: fix imc allocation failure handling
authorNicholas Piggin <npiggin@gmail.com>
Wed, 24 Jul 2019 08:46:36 +0000 (18:46 +1000)
committerMichael Ellerman <mpe@ellerman.id.au>
Tue, 20 Aug 2019 11:22:20 +0000 (21:22 +1000)
The alloc_pages_node return value should be tested for failure
before being passed to page_address.

Tested-by: Anju T Sudhakar <anju@linux.vnet.ibm.com>
Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
Reviewed-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20190724084638.24982-3-npiggin@gmail.com
arch/powerpc/perf/imc-pmu.c

index dea2431..cb50a9e 100644 (file)
@@ -577,6 +577,7 @@ static int core_imc_mem_init(int cpu, int size)
 {
        int nid, rc = 0, core_id = (cpu / threads_per_core);
        struct imc_mem_info *mem_info;
+       struct page *page;
 
        /*
         * alloc_pages_node() will allocate memory for core in the
@@ -587,11 +588,12 @@ static int core_imc_mem_init(int cpu, int size)
        mem_info->id = core_id;
 
        /* We need only vbase for core counters */
-       mem_info->vbase = page_address(alloc_pages_node(nid,
-                                         GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE |
-                                         __GFP_NOWARN, get_order(size)));
-       if (!mem_info->vbase)
+       page = alloc_pages_node(nid,
+                               GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE |
+                               __GFP_NOWARN, get_order(size));
+       if (!page)
                return -ENOMEM;
+       mem_info->vbase = page_address(page);
 
        /* Init the mutex */
        core_imc_refc[core_id].id = core_id;
@@ -849,15 +851,17 @@ static int thread_imc_mem_alloc(int cpu_id, int size)
        int nid = cpu_to_node(cpu_id);
 
        if (!local_mem) {
+               struct page *page;
                /*
                 * This case could happen only once at start, since we dont
                 * free the memory in cpu offline path.
                 */
-               local_mem = page_address(alloc_pages_node(nid,
+               page = alloc_pages_node(nid,
                                  GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE |
-                                 __GFP_NOWARN, get_order(size)));
-               if (!local_mem)
+                                 __GFP_NOWARN, get_order(size));
+               if (!page)
                        return -ENOMEM;
+               local_mem = page_address(page);
 
                per_cpu(thread_imc_mem, cpu_id) = local_mem;
        }
@@ -1095,11 +1099,14 @@ static int trace_imc_mem_alloc(int cpu_id, int size)
        int core_id = (cpu_id / threads_per_core);
 
        if (!local_mem) {
-               local_mem = page_address(alloc_pages_node(phys_id,
-                                       GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE |
-                                       __GFP_NOWARN, get_order(size)));
-               if (!local_mem)
+               struct page *page;
+
+               page = alloc_pages_node(phys_id,
+                               GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE |
+                               __GFP_NOWARN, get_order(size));
+               if (!page)
                        return -ENOMEM;
+               local_mem = page_address(page);
                per_cpu(trace_imc_mem, cpu_id) = local_mem;
 
                /* Initialise the counters for trace mode */