perf, x86: Use helper function in x86_pmu_enable_all()
[platform/adaptation/renesas_rcar/renesas_kernel.git] / arch / x86 / kernel / cpu / perf_event.c
index 0a360d1..70d6d8f 100644 (file)
@@ -642,21 +642,24 @@ static void x86_pmu_disable(struct pmu *pmu)
        x86_pmu.disable_all();
 }
 
+static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc,
+                                         u64 enable_mask)
+{
+       wrmsrl(hwc->config_base + hwc->idx, hwc->config | enable_mask);
+}
+
 static void x86_pmu_enable_all(int added)
 {
        struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
        int idx;
 
        for (idx = 0; idx < x86_pmu.num_counters; idx++) {
-               struct perf_event *event = cpuc->events[idx];
-               u64 val;
+               struct hw_perf_event *hwc = &cpuc->events[idx]->hw;
 
                if (!test_bit(idx, cpuc->active_mask))
                        continue;
 
-               val = event->hw.config;
-               val |= ARCH_PERFMON_EVENTSEL_ENABLE;
-               wrmsrl(x86_pmu.eventsel + idx, val);
+               __x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE);
        }
 }
 
@@ -915,12 +918,6 @@ static void x86_pmu_enable(struct pmu *pmu)
        x86_pmu.enable_all(added);
 }
 
-static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc,
-                                         u64 enable_mask)
-{
-       wrmsrl(hwc->config_base + hwc->idx, hwc->config | enable_mask);
-}
-
 static inline void x86_pmu_disable_event(struct perf_event *event)
 {
        struct hw_perf_event *hwc = &event->hw;
@@ -997,8 +994,7 @@ x86_perf_event_set_period(struct perf_event *event)
 
 static void x86_pmu_enable_event(struct perf_event *event)
 {
-       struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
-       if (cpuc->enabled)
+       if (__this_cpu_read(cpu_hw_events.enabled))
                __x86_pmu_enable_event(&event->hw,
                                       ARCH_PERFMON_EVENTSEL_ENABLE);
 }
@@ -1268,11 +1264,10 @@ perf_event_nmi_handler(struct notifier_block *self,
 
        switch (cmd) {
        case DIE_NMI:
-       case DIE_NMI_IPI:
                break;
        case DIE_NMIUNKNOWN:
                this_nmi = percpu_read(irq_stat.__nmi_count);
-               if (this_nmi != __get_cpu_var(pmu_nmi).marked)
+               if (this_nmi != __this_cpu_read(pmu_nmi.marked))
                        /* let the kernel handle the unknown nmi */
                        return NOTIFY_DONE;
                /*
@@ -1296,8 +1291,8 @@ perf_event_nmi_handler(struct notifier_block *self,
        this_nmi = percpu_read(irq_stat.__nmi_count);
        if ((handled > 1) ||
                /* the next nmi could be a back-to-back nmi */
-           ((__get_cpu_var(pmu_nmi).marked == this_nmi) &&
-            (__get_cpu_var(pmu_nmi).handled > 1))) {
+           ((__this_cpu_read(pmu_nmi.marked) == this_nmi) &&
+            (__this_cpu_read(pmu_nmi.handled) > 1))) {
                /*
                 * We could have two subsequent back-to-back nmis: The
                 * first handles more than one counter, the 2nd
@@ -1308,8 +1303,8 @@ perf_event_nmi_handler(struct notifier_block *self,
                 * handling more than one counter. We will mark the
                 * next (3rd) and then drop it if unhandled.
                 */
-               __get_cpu_var(pmu_nmi).marked   = this_nmi + 1;
-               __get_cpu_var(pmu_nmi).handled  = handled;
+               __this_cpu_write(pmu_nmi.marked, this_nmi + 1);
+               __this_cpu_write(pmu_nmi.handled, handled);
        }
 
        return NOTIFY_STOP;
@@ -1318,7 +1313,7 @@ perf_event_nmi_handler(struct notifier_block *self,
 static __read_mostly struct notifier_block perf_event_nmi_notifier = {
        .notifier_call          = perf_event_nmi_handler,
        .next                   = NULL,
-       .priority               = 1
+       .priority               = NMI_LOCAL_LOW_PRIOR,
 };
 
 static struct event_constraint unconstrained;
@@ -1391,7 +1386,7 @@ static void __init pmu_check_apic(void)
        pr_info("no hardware sampling interrupt available.\n");
 }
 
-int __init init_hw_perf_events(void)
+static int __init init_hw_perf_events(void)
 {
        struct event_constraint *c;
        int err;
@@ -1484,11 +1479,9 @@ static inline void x86_pmu_read(struct perf_event *event)
  */
 static void x86_pmu_start_txn(struct pmu *pmu)
 {
-       struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
-
        perf_pmu_disable(pmu);
-       cpuc->group_flag |= PERF_EVENT_TXN;
-       cpuc->n_txn = 0;
+       __this_cpu_or(cpu_hw_events.group_flag, PERF_EVENT_TXN);
+       __this_cpu_write(cpu_hw_events.n_txn, 0);
 }
 
 /*
@@ -1498,14 +1491,12 @@ static void x86_pmu_start_txn(struct pmu *pmu)
  */
 static void x86_pmu_cancel_txn(struct pmu *pmu)
 {
-       struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
-
-       cpuc->group_flag &= ~PERF_EVENT_TXN;
+       __this_cpu_and(cpu_hw_events.group_flag, ~PERF_EVENT_TXN);
        /*
         * Truncate the collected events.
         */
-       cpuc->n_added -= cpuc->n_txn;
-       cpuc->n_events -= cpuc->n_txn;
+       __this_cpu_sub(cpu_hw_events.n_added, __this_cpu_read(cpu_hw_events.n_txn));
+       __this_cpu_sub(cpu_hw_events.n_events, __this_cpu_read(cpu_hw_events.n_txn));
        perf_pmu_enable(pmu);
 }
 
@@ -1614,7 +1605,7 @@ out:
        return ret;
 }
 
-int x86_pmu_event_init(struct perf_event *event)
+static int x86_pmu_event_init(struct perf_event *event)
 {
        struct pmu *tmp;
        int err;