powerpc/perf: Fix IMC_MAX_PMU macro
authorMadhavan Srinivasan <maddy@linux.vnet.ibm.com>
Wed, 22 Nov 2017 05:15:39 +0000 (10:45 +0530)
committerMichael Ellerman <mpe@ellerman.id.au>
Wed, 22 Nov 2017 12:04:23 +0000 (23:04 +1100)
IMC_MAX_PMU is used for static storage (per_nest_pmu_arr) which holds
nest pmu information. Current value for the macro is 32 based on
the initial number of nest pmu units supported by the nest microcode.
But going forward, microcode could support more nest units. Instead
of static storage, patch to fix the code to dynamically allocate an
array based on the number of nest imc units found in the device tree.

Fixes:8f95faaac56c1 ('powerpc/powernv: Detect and create IMC device')
Signed-off-by: Madhavan Srinivasan <maddy@linux.vnet.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
arch/powerpc/include/asm/imc-pmu.h
arch/powerpc/perf/imc-pmu.c
arch/powerpc/platforms/powernv/opal-imc.c

index 7f74c282710f4c232c873119ebd2118c2307eb29..fad0e6ff460f22398b8487083cb34cb7abcb1de9 100644 (file)
 #include <linux/io.h>
 #include <asm/opal.h>
 
-/*
- * For static allocation of some of the structures.
- */
-#define IMC_MAX_PMUS                   32
-
 /*
  * Compatibility macros for IMC devices
  */
@@ -125,4 +120,5 @@ enum {
 extern int init_imc_pmu(struct device_node *parent,
                                struct imc_pmu *pmu_ptr, int pmu_id);
 extern void thread_imc_disable(void);
+extern int get_max_nest_dev(void);
 #endif /* __ASM_POWERPC_IMC_PMU_H */
index cf64e16f92c255cd1fe7e054922d90b408b6f69d..0ead3cd73caa2f8816e8c04f47cca691efba0560 100644 (file)
@@ -26,7 +26,7 @@
  */
 static DEFINE_MUTEX(nest_init_lock);
 static DEFINE_PER_CPU(struct imc_pmu_ref *, local_nest_imc_refc);
-static struct imc_pmu *per_nest_pmu_arr[IMC_MAX_PMUS];
+static struct imc_pmu **per_nest_pmu_arr;
 static cpumask_t nest_imc_cpumask;
 struct imc_pmu_ref *nest_imc_refc;
 static int nest_pmus;
@@ -286,13 +286,14 @@ static struct imc_pmu_ref *get_nest_pmu_ref(int cpu)
 static void nest_change_cpu_context(int old_cpu, int new_cpu)
 {
        struct imc_pmu **pn = per_nest_pmu_arr;
-       int i;
 
        if (old_cpu < 0 || new_cpu < 0)
                return;
 
-       for (i = 0; *pn && i < IMC_MAX_PMUS; i++, pn++)
+       while (*pn) {
                perf_pmu_migrate_context(&(*pn)->pmu, old_cpu, new_cpu);
+               pn++;
+       }
 }
 
 static int ppc_nest_imc_cpu_offline(unsigned int cpu)
@@ -1194,6 +1195,7 @@ static void imc_common_cpuhp_mem_free(struct imc_pmu *pmu_ptr)
                kfree(pmu_ptr->attr_groups[IMC_EVENT_ATTR]->attrs);
        kfree(pmu_ptr->attr_groups[IMC_EVENT_ATTR]);
        kfree(pmu_ptr);
+       kfree(per_nest_pmu_arr);
        return;
 }
 
@@ -1218,6 +1220,13 @@ static int imc_mem_init(struct imc_pmu *pmu_ptr, struct device_node *parent,
                        return -ENOMEM;
 
                /* Needed for hotplug/migration */
+               if (!per_nest_pmu_arr) {
+                       per_nest_pmu_arr = kcalloc(get_max_nest_dev() + 1,
+                                               sizeof(struct imc_pmu *),
+                                               GFP_KERNEL);
+                       if (!per_nest_pmu_arr)
+                               return -ENOMEM;
+               }
                per_nest_pmu_arr[pmu_index] = pmu_ptr;
                break;
        case IMC_DOMAIN_CORE:
index b150f4deaccfc5f153e6c3527579b9dc288cdab3..465ea105b7710ecf0ff7320dcec8fd2b9775cf2e 100644 (file)
@@ -153,6 +153,22 @@ static void disable_core_pmu_counters(void)
        put_online_cpus();
 }
 
+int get_max_nest_dev(void)
+{
+       struct device_node *node;
+       u32 pmu_units = 0, type;
+
+       for_each_compatible_node(node, NULL, IMC_DTB_UNIT_COMPAT) {
+               if (of_property_read_u32(node, "type", &type))
+                       continue;
+
+               if (type == IMC_TYPE_CHIP)
+                       pmu_units++;
+       }
+
+       return pmu_units;
+}
+
 static int opal_imc_counters_probe(struct platform_device *pdev)
 {
        struct device_node *imc_dev = pdev->dev.of_node;