Merge branch 'tip/sched/core' of git://git.kernel.org/pub/scm/linux/kernel/git/rosted...
authorIngo Molnar <mingo@kernel.org>
Sat, 14 Apr 2012 13:12:00 +0000 (15:12 +0200)
committerIngo Molnar <mingo@kernel.org>
Sat, 14 Apr 2012 13:12:04 +0000 (15:12 +0200)
Pull a scheduler optimization commit from Steven Rostedt.

Signed-off-by: Ingo Molnar <mingo@kernel.org>
1  2 
kernel/sched/rt.c

diff --combined kernel/sched/rt.c
@@@ -1428,7 -1428,7 +1428,7 @@@ static struct task_struct *pick_next_hi
  next_idx:
                if (idx >= MAX_RT_PRIO)
                        continue;
 -              if (next && next->prio < idx)
 +              if (next && next->prio <= idx)
                        continue;
                list_for_each_entry(rt_se, array->queue + idx, run_list) {
                        struct task_struct *p;
@@@ -1803,44 -1803,40 +1803,40 @@@ static void task_woken_rt(struct rq *rq
  static void set_cpus_allowed_rt(struct task_struct *p,
                                const struct cpumask *new_mask)
  {
-       int weight = cpumask_weight(new_mask);
+       struct rq *rq;
+       int weight;
  
        BUG_ON(!rt_task(p));
  
-       /*
-        * Update the migration status of the RQ if we have an RT task
-        * which is running AND changing its weight value.
-        */
-       if (p->on_rq && (weight != p->rt.nr_cpus_allowed)) {
-               struct rq *rq = task_rq(p);
-               if (!task_current(rq, p)) {
-                       /*
-                        * Make sure we dequeue this task from the pushable list
-                        * before going further.  It will either remain off of
-                        * the list because we are no longer pushable, or it
-                        * will be requeued.
-                        */
-                       if (p->rt.nr_cpus_allowed > 1)
-                               dequeue_pushable_task(rq, p);
+       if (!p->on_rq)
+               return;
  
-                       /*
-                        * Requeue if our weight is changing and still > 1
-                        */
-                       if (weight > 1)
-                               enqueue_pushable_task(rq, p);
+       weight = cpumask_weight(new_mask);
  
-               }
+       /*
+        * Only update if the process changes its state from whether it
+        * can migrate or not.
+        */
+       if ((p->rt.nr_cpus_allowed > 1) == (weight > 1))
+               return;
  
-               if ((p->rt.nr_cpus_allowed <= 1) && (weight > 1)) {
-                       rq->rt.rt_nr_migratory++;
-               } else if ((p->rt.nr_cpus_allowed > 1) && (weight <= 1)) {
-                       BUG_ON(!rq->rt.rt_nr_migratory);
-                       rq->rt.rt_nr_migratory--;
-               }
+       rq = task_rq(p);
  
-               update_rt_migration(&rq->rt);
+       /*
+        * The process used to be able to migrate OR it can now migrate
+        */
+       if (weight <= 1) {
+               if (!task_current(rq, p))
+                       dequeue_pushable_task(rq, p);
+               BUG_ON(!rq->rt.rt_nr_migratory);
+               rq->rt.rt_nr_migratory--;
+       } else {
+               if (!task_current(rq, p))
+                       enqueue_pushable_task(rq, p);
+               rq->rt.rt_nr_migratory++;
        }
+       update_rt_migration(&rq->rt);
  }
  
  /* Assumes rq->lock is held */