From: Ingo Molnar Date: Mon, 4 May 2009 16:54:32 +0000 (+0200) Subject: perf_counter: round-robin per-CPU counters too X-Git-Tag: v2.6.31-rc1~383^2~306 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=b82914ce33146186d554b0f5c41e4e13693614ce;p=profile%2Fivi%2Fkernel-x86-ivi.git perf_counter: round-robin per-CPU counters too This used to be unstable when we had the rq->lock dependencies, but now that they are that of the past we can turn on percpu counter RR too. [ Impact: handle counter over-commit for per-CPU counters too ] LKML-Reference: Signed-off-by: Ingo Molnar --- diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index 8660ae5..b9679c3 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -1069,18 +1069,14 @@ void perf_counter_task_tick(struct task_struct *curr, int cpu) { struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu); struct perf_counter_context *ctx = &curr->perf_counter_ctx; - const int rotate_percpu = 0; - if (rotate_percpu) - perf_counter_cpu_sched_out(cpuctx); + perf_counter_cpu_sched_out(cpuctx); perf_counter_task_sched_out(curr, cpu); - if (rotate_percpu) - rotate_ctx(&cpuctx->ctx); + rotate_ctx(&cpuctx->ctx); rotate_ctx(ctx); - if (rotate_percpu) - perf_counter_cpu_sched_in(cpuctx, cpu); + perf_counter_cpu_sched_in(cpuctx, cpu); perf_counter_task_sched_in(curr, cpu); }