perf: RISC-V: exclude invalid pmu counters from SBI calls
authorSergey Matyukevich <sergey.matyukevich@syntacore.com>
Tue, 30 Aug 2022 15:53:05 +0000 (18:53 +0300)
committerPalmer Dabbelt <palmer@rivosinc.com>
Thu, 8 Sep 2022 20:34:50 +0000 (13:34 -0700)
SBI firmware may not provide information for some counters in response
to SBI_EXT_PMU_COUNTER_GET_INFO call. Exclude such counters from the
subsequent SBI requests. For this purpose use global mask to keep track
of fully specified counters.

Signed-off-by: Sergey Matyukevich <sergey.matyukevich@syntacore.com>
Reviewed-by: Atish Patra <atishp@rivosinc.com>
Link: https://lore.kernel.org/r/20220830155306.301714-3-geomatsi@gmail.com
Signed-off-by: Palmer Dabbelt <palmer@rivosinc.com>
drivers/perf/riscv_pmu_legacy.c
drivers/perf/riscv_pmu_sbi.c
include/linux/perf/riscv_pmu.h

index 3427787..7d7131c 100644 (file)
@@ -14,7 +14,6 @@
 
 #define RISCV_PMU_LEGACY_CYCLE         0
 #define RISCV_PMU_LEGACY_INSTRET       1
-#define RISCV_PMU_LEGACY_NUM_CTR       2
 
 static bool pmu_init_done;
 
@@ -83,7 +82,8 @@ static void pmu_legacy_init(struct riscv_pmu *pmu)
 {
        pr_info("Legacy PMU implementation is available\n");
 
-       pmu->num_counters = RISCV_PMU_LEGACY_NUM_CTR;
+       pmu->cmask = BIT(RISCV_PMU_LEGACY_CYCLE) |
+               BIT(RISCV_PMU_LEGACY_INSTRET);
        pmu->ctr_start = pmu_legacy_ctr_start;
        pmu->ctr_stop = NULL;
        pmu->event_map = pmu_legacy_event_map;
index 6f6681b..1d05aff 100644 (file)
@@ -271,7 +271,6 @@ static int pmu_sbi_ctr_get_idx(struct perf_event *event)
        struct sbiret ret;
        int idx;
        uint64_t cbase = 0;
-       uint64_t cmask = GENMASK_ULL(rvpmu->num_counters - 1, 0);
        unsigned long cflags = 0;
 
        if (event->attr.exclude_kernel)
@@ -281,11 +280,12 @@ static int pmu_sbi_ctr_get_idx(struct perf_event *event)
 
        /* retrieve the available counter index */
 #if defined(CONFIG_32BIT)
-       ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_CFG_MATCH, cbase, cmask,
-                       cflags, hwc->event_base, hwc->config, hwc->config >> 32);
+       ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_CFG_MATCH, cbase,
+                       rvpmu->cmask, cflags, hwc->event_base, hwc->config,
+                       hwc->config >> 32);
 #else
-       ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_CFG_MATCH, cbase, cmask,
-                       cflags, hwc->event_base, hwc->config, 0);
+       ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_CFG_MATCH, cbase,
+                       rvpmu->cmask, cflags, hwc->event_base, hwc->config, 0);
 #endif
        if (ret.error) {
                pr_debug("Not able to find a counter for event %lx config %llx\n",
@@ -294,7 +294,7 @@ static int pmu_sbi_ctr_get_idx(struct perf_event *event)
        }
 
        idx = ret.value;
-       if (idx >= rvpmu->num_counters || !pmu_ctr_list[idx].value)
+       if (!test_bit(idx, &rvpmu->cmask) || !pmu_ctr_list[idx].value)
                return -ENOENT;
 
        /* Additional sanity check for the counter id */
@@ -463,7 +463,7 @@ static int pmu_sbi_find_num_ctrs(void)
                return sbi_err_map_linux_errno(ret.error);
 }
 
-static int pmu_sbi_get_ctrinfo(int nctr)
+static int pmu_sbi_get_ctrinfo(int nctr, unsigned long *mask)
 {
        struct sbiret ret;
        int i, num_hw_ctr = 0, num_fw_ctr = 0;
@@ -478,6 +478,9 @@ static int pmu_sbi_get_ctrinfo(int nctr)
                if (ret.error)
                        /* The logical counter ids are not expected to be contiguous */
                        continue;
+
+               *mask |= BIT(i);
+
                cinfo.value = ret.value;
                if (cinfo.type == SBI_PMU_CTR_TYPE_FW)
                        num_fw_ctr++;
@@ -498,7 +501,7 @@ static inline void pmu_sbi_stop_all(struct riscv_pmu *pmu)
         * which may include counters that are not enabled yet.
         */
        sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_STOP,
-                 0, GENMASK_ULL(pmu->num_counters - 1, 0), 0, 0, 0, 0);
+                 0, pmu->cmask, 0, 0, 0, 0);
 }
 
 static inline void pmu_sbi_stop_hw_ctrs(struct riscv_pmu *pmu)
@@ -788,8 +791,9 @@ static void riscv_pmu_destroy(struct riscv_pmu *pmu)
 static int pmu_sbi_device_probe(struct platform_device *pdev)
 {
        struct riscv_pmu *pmu = NULL;
-       int num_counters;
+       unsigned long cmask = 0;
        int ret = -ENODEV;
+       int num_counters;
 
        pr_info("SBI PMU extension is available\n");
        pmu = riscv_pmu_alloc();
@@ -803,7 +807,7 @@ static int pmu_sbi_device_probe(struct platform_device *pdev)
        }
 
        /* cache all the information about counters now */
-       if (pmu_sbi_get_ctrinfo(num_counters))
+       if (pmu_sbi_get_ctrinfo(num_counters, &cmask))
                goto out_free;
 
        ret = pmu_sbi_setup_irqs(pmu, pdev);
@@ -812,8 +816,9 @@ static int pmu_sbi_device_probe(struct platform_device *pdev)
                pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
                pmu->pmu.capabilities |= PERF_PMU_CAP_NO_EXCLUDE;
        }
+
        pmu->pmu.attr_groups = riscv_pmu_attr_groups;
-       pmu->num_counters = num_counters;
+       pmu->cmask = cmask;
        pmu->ctr_start = pmu_sbi_ctr_start;
        pmu->ctr_stop = pmu_sbi_ctr_stop;
        pmu->event_map = pmu_sbi_event_map;
index bf66fe0..e17e86a 100644 (file)
@@ -45,7 +45,7 @@ struct riscv_pmu {
 
        irqreturn_t     (*handle_irq)(int irq_num, void *dev);
 
-       int             num_counters;
+       unsigned long   cmask;
        u64             (*ctr_read)(struct perf_event *event);
        int             (*ctr_get_idx)(struct perf_event *event);
        int             (*ctr_get_width)(int idx);