Merge branch 'upstream' of git://git.linux-mips.org/pub/scm/ralf/upstream-linus
[platform/kernel/linux-starfive.git] / arch / arm / kernel / perf_event_v6.c
index 6ccc079..f3e22ff 100644 (file)
@@ -401,9 +401,10 @@ armv6_pmcr_counter_has_overflowed(unsigned long pmcr,
        return ret;
 }
 
-static inline u32
-armv6pmu_read_counter(int counter)
+static inline u32 armv6pmu_read_counter(struct perf_event *event)
 {
+       struct hw_perf_event *hwc = &event->hw;
+       int counter = hwc->idx;
        unsigned long value = 0;
 
        if (ARMV6_CYCLE_COUNTER == counter)
@@ -418,10 +419,11 @@ armv6pmu_read_counter(int counter)
        return value;
 }
 
-static inline void
-armv6pmu_write_counter(int counter,
-                      u32 value)
+static inline void armv6pmu_write_counter(struct perf_event *event, u32 value)
 {
+       struct hw_perf_event *hwc = &event->hw;
+       int counter = hwc->idx;
+
        if (ARMV6_CYCLE_COUNTER == counter)
                asm volatile("mcr   p15, 0, %0, c15, c12, 1" : : "r"(value));
        else if (ARMV6_COUNTER0 == counter)
@@ -432,12 +434,13 @@ armv6pmu_write_counter(int counter,
                WARN_ONCE(1, "invalid counter number (%d)\n", counter);
 }
 
-static void
-armv6pmu_enable_event(struct hw_perf_event *hwc,
-                     int idx)
+static void armv6pmu_enable_event(struct perf_event *event)
 {
        unsigned long val, mask, evt, flags;
+       struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
+       struct hw_perf_event *hwc = &event->hw;
        struct pmu_hw_events *events = cpu_pmu->get_hw_events();
+       int idx = hwc->idx;
 
        if (ARMV6_CYCLE_COUNTER == idx) {
                mask    = 0;
@@ -473,7 +476,8 @@ armv6pmu_handle_irq(int irq_num,
 {
        unsigned long pmcr = armv6_pmcr_read();
        struct perf_sample_data data;
-       struct pmu_hw_events *cpuc;
+       struct arm_pmu *cpu_pmu = (struct arm_pmu *)dev;
+       struct pmu_hw_events *cpuc = cpu_pmu->get_hw_events();
        struct pt_regs *regs;
        int idx;
 
@@ -489,7 +493,6 @@ armv6pmu_handle_irq(int irq_num,
         */
        armv6_pmcr_write(pmcr);
 
-       cpuc = &__get_cpu_var(cpu_hw_events);
        for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
                struct perf_event *event = cpuc->events[idx];
                struct hw_perf_event *hwc;
@@ -506,13 +509,13 @@ armv6pmu_handle_irq(int irq_num,
                        continue;
 
                hwc = &event->hw;
-               armpmu_event_update(event, hwc, idx);
+               armpmu_event_update(event);
                perf_sample_data_init(&data, 0, hwc->last_period);
-               if (!armpmu_event_set_period(event, hwc, idx))
+               if (!armpmu_event_set_period(event))
                        continue;
 
                if (perf_event_overflow(event, &data, regs))
-                       cpu_pmu->disable(hwc, idx);
+                       cpu_pmu->disable(event);
        }
 
        /*
@@ -527,8 +530,7 @@ armv6pmu_handle_irq(int irq_num,
        return IRQ_HANDLED;
 }
 
-static void
-armv6pmu_start(void)
+static void armv6pmu_start(struct arm_pmu *cpu_pmu)
 {
        unsigned long flags, val;
        struct pmu_hw_events *events = cpu_pmu->get_hw_events();
@@ -540,8 +542,7 @@ armv6pmu_start(void)
        raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
 }
 
-static void
-armv6pmu_stop(void)
+static void armv6pmu_stop(struct arm_pmu *cpu_pmu)
 {
        unsigned long flags, val;
        struct pmu_hw_events *events = cpu_pmu->get_hw_events();
@@ -555,10 +556,11 @@ armv6pmu_stop(void)
 
 static int
 armv6pmu_get_event_idx(struct pmu_hw_events *cpuc,
-                      struct hw_perf_event *event)
+                               struct perf_event *event)
 {
+       struct hw_perf_event *hwc = &event->hw;
        /* Always place a cycle counter into the cycle counter. */
-       if (ARMV6_PERFCTR_CPU_CYCLES == event->config_base) {
+       if (ARMV6_PERFCTR_CPU_CYCLES == hwc->config_base) {
                if (test_and_set_bit(ARMV6_CYCLE_COUNTER, cpuc->used_mask))
                        return -EAGAIN;
 
@@ -579,12 +581,13 @@ armv6pmu_get_event_idx(struct pmu_hw_events *cpuc,
        }
 }
 
-static void
-armv6pmu_disable_event(struct hw_perf_event *hwc,
-                      int idx)
+static void armv6pmu_disable_event(struct perf_event *event)
 {
        unsigned long val, mask, evt, flags;
+       struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
+       struct hw_perf_event *hwc = &event->hw;
        struct pmu_hw_events *events = cpu_pmu->get_hw_events();
+       int idx = hwc->idx;
 
        if (ARMV6_CYCLE_COUNTER == idx) {
                mask    = ARMV6_PMCR_CCOUNT_IEN;
@@ -613,12 +616,13 @@ armv6pmu_disable_event(struct hw_perf_event *hwc,
        raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
 }
 
-static void
-armv6mpcore_pmu_disable_event(struct hw_perf_event *hwc,
-                             int idx)
+static void armv6mpcore_pmu_disable_event(struct perf_event *event)
 {
        unsigned long val, mask, flags, evt = 0;
+       struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
+       struct hw_perf_event *hwc = &event->hw;
        struct pmu_hw_events *events = cpu_pmu->get_hw_events();
+       int idx = hwc->idx;
 
        if (ARMV6_CYCLE_COUNTER == idx) {
                mask    = ARMV6_PMCR_CCOUNT_IEN;
@@ -649,24 +653,22 @@ static int armv6_map_event(struct perf_event *event)
                                &armv6_perf_cache_map, 0xFF);
 }
 
-static struct arm_pmu armv6pmu = {
-       .name                   = "v6",
-       .handle_irq             = armv6pmu_handle_irq,
-       .enable                 = armv6pmu_enable_event,
-       .disable                = armv6pmu_disable_event,
-       .read_counter           = armv6pmu_read_counter,
-       .write_counter          = armv6pmu_write_counter,
-       .get_event_idx          = armv6pmu_get_event_idx,
-       .start                  = armv6pmu_start,
-       .stop                   = armv6pmu_stop,
-       .map_event              = armv6_map_event,
-       .num_events             = 3,
-       .max_period             = (1LLU << 32) - 1,
-};
-
-static struct arm_pmu *__devinit armv6pmu_init(void)
+static int __devinit armv6pmu_init(struct arm_pmu *cpu_pmu)
 {
-       return &armv6pmu;
+       cpu_pmu->name           = "v6";
+       cpu_pmu->handle_irq     = armv6pmu_handle_irq;
+       cpu_pmu->enable         = armv6pmu_enable_event;
+       cpu_pmu->disable        = armv6pmu_disable_event;
+       cpu_pmu->read_counter   = armv6pmu_read_counter;
+       cpu_pmu->write_counter  = armv6pmu_write_counter;
+       cpu_pmu->get_event_idx  = armv6pmu_get_event_idx;
+       cpu_pmu->start          = armv6pmu_start;
+       cpu_pmu->stop           = armv6pmu_stop;
+       cpu_pmu->map_event      = armv6_map_event;
+       cpu_pmu->num_events     = 3;
+       cpu_pmu->max_period     = (1LLU << 32) - 1;
+
+       return 0;
 }
 
 /*
@@ -683,33 +685,31 @@ static int armv6mpcore_map_event(struct perf_event *event)
                                &armv6mpcore_perf_cache_map, 0xFF);
 }
 
-static struct arm_pmu armv6mpcore_pmu = {
-       .name                   = "v6mpcore",
-       .handle_irq             = armv6pmu_handle_irq,
-       .enable                 = armv6pmu_enable_event,
-       .disable                = armv6mpcore_pmu_disable_event,
-       .read_counter           = armv6pmu_read_counter,
-       .write_counter          = armv6pmu_write_counter,
-       .get_event_idx          = armv6pmu_get_event_idx,
-       .start                  = armv6pmu_start,
-       .stop                   = armv6pmu_stop,
-       .map_event              = armv6mpcore_map_event,
-       .num_events             = 3,
-       .max_period             = (1LLU << 32) - 1,
-};
-
-static struct arm_pmu *__devinit armv6mpcore_pmu_init(void)
+static int __devinit armv6mpcore_pmu_init(struct arm_pmu *cpu_pmu)
 {
-       return &armv6mpcore_pmu;
+       cpu_pmu->name           = "v6mpcore";
+       cpu_pmu->handle_irq     = armv6pmu_handle_irq;
+       cpu_pmu->enable         = armv6pmu_enable_event;
+       cpu_pmu->disable        = armv6mpcore_pmu_disable_event;
+       cpu_pmu->read_counter   = armv6pmu_read_counter;
+       cpu_pmu->write_counter  = armv6pmu_write_counter;
+       cpu_pmu->get_event_idx  = armv6pmu_get_event_idx;
+       cpu_pmu->start          = armv6pmu_start;
+       cpu_pmu->stop           = armv6pmu_stop;
+       cpu_pmu->map_event      = armv6mpcore_map_event;
+       cpu_pmu->num_events     = 3;
+       cpu_pmu->max_period     = (1LLU << 32) - 1;
+
+       return 0;
 }
 #else
-static struct arm_pmu *__devinit armv6pmu_init(void)
+static int armv6pmu_init(struct arm_pmu *cpu_pmu)
 {
-       return NULL;
+       return -ENODEV;
 }
 
-static struct arm_pmu *__devinit armv6mpcore_pmu_init(void)
+static int armv6mpcore_pmu_init(struct arm_pmu *cpu_pmu)
 {
-       return NULL;
+       return -ENODEV;
 }
 #endif /* CONFIG_CPU_V6 || CONFIG_CPU_V6K */