perf_counter, x86: return raw count with x86_perf_counter_update()
authorRobert Richter <robert.richter@amd.com>
Wed, 29 Apr 2009 10:47:22 +0000 (12:47 +0200)
committerIngo Molnar <mingo@elte.hu>
Wed, 29 Apr 2009 12:51:13 +0000 (14:51 +0200)
To check on AMD cpus if a counter overflows, the upper bit of the raw
counter value must be checked. This value is already internally
available in x86_perf_counter_update(). Now, the value is returned so
that it can be used directly to check for overflows.

[ Impact: micro-optimization ]

Signed-off-by: Robert Richter <robert.richter@amd.com>
Cc: Paul Mackerras <paulus@samba.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <1241002046-8832-26-git-send-email-robert.richter@amd.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
arch/x86/kernel/cpu/perf_counter.c

index f4d59d4..a8a53ab 100644 (file)
@@ -132,7 +132,7 @@ static u64 amd_pmu_raw_event(u64 event)
  * Can only be executed on the CPU where the counter is active.
  * Returns the delta events processed.
  */
-static void
+static u64
 x86_perf_counter_update(struct perf_counter *counter,
                        struct hw_perf_counter *hwc, int idx)
 {
@@ -165,6 +165,8 @@ again:
 
        atomic64_add(delta, &counter->count);
        atomic64_sub(delta, &hwc->period_left);
+
+       return new_raw_count;
 }
 
 static atomic_t num_counters;
@@ -785,8 +787,7 @@ static int amd_pmu_handle_irq(struct pt_regs *regs, int nmi)
                        continue;
                counter = cpuc->counters[idx];
                hwc = &counter->hw;
-               x86_perf_counter_update(counter, hwc, idx);
-               val = atomic64_read(&hwc->prev_count);
+               val = x86_perf_counter_update(counter, hwc, idx);
                if (val & (1ULL << (x86_pmu.counter_bits - 1)))
                        continue;
                /* counter overflow */