perf: Clarify perf_cpu_context::active_pmu usage by renaming it to ::unique_pmu
authorPeter Zijlstra <a.p.zijlstra@chello.nl>
Tue, 2 Oct 2012 13:38:52 +0000 (15:38 +0200)
committerIngo Molnar <mingo@kernel.org>
Fri, 5 Oct 2012 11:59:06 +0000 (13:59 +0200)
Stephane thought the perf_cpu_context::active_pmu name confusing and
suggested using 'unique_pmu' instead.

This pointer is a pointer to a 'random' pmu sharing the cpuctx
instance, therefore limiting a for_each_pmu loop to those where
cpuctx->unique_pmu matches the pmu we get a loop over unique cpuctx
instances.

Suggested-by: Stephane Eranian <eranian@google.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Link: http://lkml.kernel.org/n/tip-kxyjqpfj2fn9gt7kwu5ag9ks@git.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
include/linux/perf_event.h
kernel/events/core.c

index 599afc4..b4166cd 100644 (file)
@@ -1110,7 +1110,7 @@ struct perf_cpu_context {
        int                             exclusive;
        struct list_head                rotation_list;
        int                             jiffies_interval;
-       struct pmu                      *active_pmu;
+       struct pmu                      *unique_pmu;
        struct perf_cgroup              *cgrp;
 };
 
index 7b9df35..81939e8 100644 (file)
@@ -4419,7 +4419,7 @@ static void perf_event_task_event(struct perf_task_event *task_event)
        rcu_read_lock();
        list_for_each_entry_rcu(pmu, &pmus, entry) {
                cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
-               if (cpuctx->active_pmu != pmu)
+               if (cpuctx->unique_pmu != pmu)
                        goto next;
                perf_event_task_ctx(&cpuctx->ctx, task_event);
 
@@ -4565,7 +4565,7 @@ static void perf_event_comm_event(struct perf_comm_event *comm_event)
        rcu_read_lock();
        list_for_each_entry_rcu(pmu, &pmus, entry) {
                cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
-               if (cpuctx->active_pmu != pmu)
+               if (cpuctx->unique_pmu != pmu)
                        goto next;
                perf_event_comm_ctx(&cpuctx->ctx, comm_event);
 
@@ -4761,7 +4761,7 @@ got_name:
        rcu_read_lock();
        list_for_each_entry_rcu(pmu, &pmus, entry) {
                cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
-               if (cpuctx->active_pmu != pmu)
+               if (cpuctx->unique_pmu != pmu)
                        goto next;
                perf_event_mmap_ctx(&cpuctx->ctx, mmap_event,
                                        vma->vm_flags & VM_EXEC);
@@ -5862,8 +5862,8 @@ static void update_pmu_context(struct pmu *pmu, struct pmu *old_pmu)
 
                cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
 
-               if (cpuctx->active_pmu == old_pmu)
-                       cpuctx->active_pmu = pmu;
+               if (cpuctx->unique_pmu == old_pmu)
+                       cpuctx->unique_pmu = pmu;
        }
 }
 
@@ -5998,7 +5998,7 @@ skip_type:
                cpuctx->ctx.pmu = pmu;
                cpuctx->jiffies_interval = 1;
                INIT_LIST_HEAD(&cpuctx->rotation_list);
-               cpuctx->active_pmu = pmu;
+               cpuctx->unique_pmu = pmu;
        }
 
 got_cpu_context: