perf/x86/intel: Add generic branch tracing check to intel_pmu_has_bts()
authorJiri Olsa <jolsa@kernel.org>
Wed, 21 Nov 2018 10:16:11 +0000 (11:16 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 5 Dec 2018 18:42:39 +0000 (19:42 +0100)
commit 67266c1080ad56c31af72b9c18355fde8ccc124a upstream.

Currently we check the branch tracing only by checking for the
PERF_COUNT_HW_BRANCH_INSTRUCTIONS event of PERF_TYPE_HARDWARE
type. But we can define the same event with the PERF_TYPE_RAW
type.

Changing the intel_pmu_has_bts() code to check on event's final
hw config value, so both HW types are covered.

Adding unlikely to intel_pmu_has_bts() condition calls, because
it was used in the original code in intel_bts_constraints.

Signed-off-by: Jiri Olsa <jolsa@kernel.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: <stable@vger.kernel.org>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Arnaldo Carvalho de Melo <acme@kernel.org>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Vince Weaver <vincent.weaver@maine.edu>
Link: http://lkml.kernel.org/r/20181121101612.16272-2-jolsa@kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
arch/x86/events/intel/core.c
arch/x86/events/perf_event.h

index 369006b..4f85607 100644 (file)
@@ -2198,16 +2198,7 @@ done:
 static struct event_constraint *
 intel_bts_constraints(struct perf_event *event)
 {
-       struct hw_perf_event *hwc = &event->hw;
-       unsigned int hw_event, bts_event;
-
-       if (event->attr.freq)
-               return NULL;
-
-       hw_event = hwc->config & INTEL_ARCH_EVENT_MASK;
-       bts_event = x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS);
-
-       if (unlikely(hw_event == bts_event && hwc->sample_period == 1))
+       if (unlikely(intel_pmu_has_bts(event)))
                return &bts_constraint;
 
        return NULL;
@@ -2825,10 +2816,8 @@ static unsigned long intel_pmu_free_running_flags(struct perf_event *event)
 static int intel_pmu_bts_config(struct perf_event *event)
 {
        struct perf_event_attr *attr = &event->attr;
-       struct hw_perf_event *hwc = &event->hw;
 
-       if (attr->config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS &&
-           !attr->freq && hwc->sample_period == 1) {
+       if (unlikely(intel_pmu_has_bts(event))) {
                /* BTS is not supported by this architecture. */
                if (!x86_pmu.bts_active)
                        return -EOPNOTSUPP;
@@ -2887,7 +2876,7 @@ static int intel_pmu_hw_config(struct perf_event *event)
                /*
                 * BTS is set up earlier in this path, so don't account twice
                 */
-               if (!intel_pmu_has_bts(event)) {
+               if (!unlikely(intel_pmu_has_bts(event))) {
                        /* disallow lbr if conflicting events are present */
                        if (x86_add_exclusive(x86_lbr_exclusive_lbr))
                                return -EBUSY;
index 1bfebbc..7ace39c 100644 (file)
@@ -835,11 +835,16 @@ static inline int amd_pmu_init(void)
 
 static inline bool intel_pmu_has_bts(struct perf_event *event)
 {
-       if (event->attr.config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS &&
-           !event->attr.freq && event->hw.sample_period == 1)
-               return true;
+       struct hw_perf_event *hwc = &event->hw;
+       unsigned int hw_event, bts_event;
+
+       if (event->attr.freq)
+               return false;
+
+       hw_event = hwc->config & INTEL_ARCH_EVENT_MASK;
+       bts_event = x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS);
 
-       return false;
+       return hw_event == bts_event && hwc->sample_period == 1;
 }
 
 int intel_pmu_save_and_restart(struct perf_event *event);