From: Nick Piggin Date: Sat, 10 Sep 2005 07:26:16 +0000 (-0700) Subject: [PATCH] sched: less newidle locking X-Git-Tag: upstream/snapshot3+hdmi~46057 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=d6d5cfaf4551aa7713ca6ab73bb77e832602204b;p=platform%2Fadaptation%2Frenesas_rcar%2Frenesas_kernel.git [PATCH] sched: less newidle locking Similarly to the earlier change in load_balance, only lock the runqueue in load_balance_newidle if the busiest queue found has a nr_running > 1. This will reduce frequency of expensive remote runqueue lock aquisitions in the schedule() path on some workloads. Signed-off-by: Nick Piggin Acked-by: Ingo Molnar Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- diff --git a/kernel/sched.c b/kernel/sched.c index c61ee34..9301895 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -2104,8 +2104,7 @@ static int load_balance(int this_cpu, runqueue_t *this_rq, */ double_lock_balance(this_rq, busiest); nr_moved = move_tasks(this_rq, this_cpu, busiest, - imbalance, sd, idle, - &all_pinned); + imbalance, sd, idle, &all_pinned); spin_unlock(&busiest->lock); /* All tasks on this runqueue were pinned by CPU affinity */ @@ -2200,18 +2199,22 @@ static int load_balance_newidle(int this_cpu, runqueue_t *this_rq, BUG_ON(busiest == this_rq); - /* Attempt to move tasks */ - double_lock_balance(this_rq, busiest); - schedstat_add(sd, lb_imbalance[NEWLY_IDLE], imbalance); - nr_moved = move_tasks(this_rq, this_cpu, busiest, + + nr_moved = 0; + if (busiest->nr_running > 1) { + /* Attempt to move tasks */ + double_lock_balance(this_rq, busiest); + nr_moved = move_tasks(this_rq, this_cpu, busiest, imbalance, sd, NEWLY_IDLE, NULL); + spin_unlock(&busiest->lock); + } + if (!nr_moved) schedstat_inc(sd, lb_failed[NEWLY_IDLE]); else sd->nr_balance_failed = 0; - spin_unlock(&busiest->lock); return nr_moved; out_balanced: