perf/x86/intel: Define bit macros for FixCntrCtl MSR
authorDapeng Mi <dapeng1.mi@linux.intel.com>
Thu, 4 May 2023 07:21:28 +0000 (15:21 +0800)
committerPeter Zijlstra <peterz@infradead.org>
Mon, 8 May 2023 08:58:32 +0000 (10:58 +0200)
Define bit macros for FixCntrCtl MSR and replace the bit hardcoding
with these bit macros. This would make code be more human-readable.

Perf commands 'perf stat -e "instructions,cycles,ref-cycles"' and
'perf record -e "instructions,cycles,ref-cycles"' pass.

Signed-off-by: Dapeng Mi <dapeng1.mi@linux.intel.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lkml.kernel.org/r/20230504072128.3653470-1-dapeng1.mi@linux.intel.com
arch/x86/events/intel/core.c
arch/x86/include/asm/perf_event.h

index 070cc4e..0d09245 100644 (file)
@@ -2451,7 +2451,7 @@ static void intel_pmu_disable_fixed(struct perf_event *event)
 
        intel_clear_masks(event, idx);
 
-       mask = 0xfULL << ((idx - INTEL_PMC_IDX_FIXED) * 4);
+       mask = intel_fixed_bits_by_idx(idx - INTEL_PMC_IDX_FIXED, INTEL_FIXED_BITS_MASK);
        cpuc->fixed_ctrl_val &= ~mask;
 }
 
@@ -2750,25 +2750,25 @@ static void intel_pmu_enable_fixed(struct perf_event *event)
         * if requested:
         */
        if (!event->attr.precise_ip)
-               bits |= 0x8;
+               bits |= INTEL_FIXED_0_ENABLE_PMI;
        if (hwc->config & ARCH_PERFMON_EVENTSEL_USR)
-               bits |= 0x2;
+               bits |= INTEL_FIXED_0_USER;
        if (hwc->config & ARCH_PERFMON_EVENTSEL_OS)
-               bits |= 0x1;
+               bits |= INTEL_FIXED_0_KERNEL;
 
        /*
         * ANY bit is supported in v3 and up
         */
        if (x86_pmu.version > 2 && hwc->config & ARCH_PERFMON_EVENTSEL_ANY)
-               bits |= 0x4;
+               bits |= INTEL_FIXED_0_ANYTHREAD;
 
        idx -= INTEL_PMC_IDX_FIXED;
-       bits <<= (idx * 4);
-       mask = 0xfULL << (idx * 4);
+       bits = intel_fixed_bits_by_idx(idx, bits);
+       mask = intel_fixed_bits_by_idx(idx, INTEL_FIXED_BITS_MASK);
 
        if (x86_pmu.intel_cap.pebs_baseline && event->attr.precise_ip) {
-               bits |= ICL_FIXED_0_ADAPTIVE << (idx * 4);
-               mask |= ICL_FIXED_0_ADAPTIVE << (idx * 4);
+               bits |= intel_fixed_bits_by_idx(idx, ICL_FIXED_0_ADAPTIVE);
+               mask |= intel_fixed_bits_by_idx(idx, ICL_FIXED_0_ADAPTIVE);
        }
 
        cpuc->fixed_ctrl_val &= ~mask;
index fc86248..7d16fcf 100644 (file)
 #define ARCH_PERFMON_EVENTSEL_INV                      (1ULL << 23)
 #define ARCH_PERFMON_EVENTSEL_CMASK                    0xFF000000ULL
 
+#define INTEL_FIXED_BITS_MASK                          0xFULL
+#define INTEL_FIXED_BITS_STRIDE                        4
+#define INTEL_FIXED_0_KERNEL                           (1ULL << 0)
+#define INTEL_FIXED_0_USER                             (1ULL << 1)
+#define INTEL_FIXED_0_ANYTHREAD                        (1ULL << 2)
+#define INTEL_FIXED_0_ENABLE_PMI                       (1ULL << 3)
+
 #define HSW_IN_TX                                      (1ULL << 32)
 #define HSW_IN_TX_CHECKPOINTED                         (1ULL << 33)
 #define ICL_EVENTSEL_ADAPTIVE                          (1ULL << 34)
 #define ICL_FIXED_0_ADAPTIVE                           (1ULL << 32)
 
+#define intel_fixed_bits_by_idx(_idx, _bits)                   \
+       ((_bits) << ((_idx) * INTEL_FIXED_BITS_STRIDE))
+
 #define AMD64_EVENTSEL_INT_CORE_ENABLE                 (1ULL << 36)
 #define AMD64_EVENTSEL_GUESTONLY                       (1ULL << 40)
 #define AMD64_EVENTSEL_HOSTONLY                                (1ULL << 41)