Merge commit 'v3.0-rc5' into sched/core
authorIngo Molnar <mingo@elte.hu>
Fri, 1 Jul 2011 08:34:09 +0000 (10:34 +0200)
committerIngo Molnar <mingo@elte.hu>
Fri, 1 Jul 2011 08:34:24 +0000 (10:34 +0200)
Merge reason: Move to a (much) newer base.

Signed-off-by: Ingo Molnar <mingo@elte.hu>
1  2 
kernel/sched.c
kernel/sched_rt.c

diff --combined kernel/sched.c
  
  static inline int rt_policy(int policy)
  {
 -      if (unlikely(policy == SCHED_FIFO || policy == SCHED_RR))
 +      if (policy == SCHED_FIFO || policy == SCHED_RR)
                return 1;
        return 0;
  }
@@@ -605,10 -605,10 +605,10 @@@ static inline int cpu_of(struct rq *rq
  /*
   * Return the group to which this tasks belongs.
   *
-  * We use task_subsys_state_check() and extend the RCU verification
-  * with lockdep_is_held(&p->pi_lock) because cpu_cgroup_attach()
-  * holds that lock for each task it moves into the cgroup. Therefore
-  * by holding that lock, we pin the task to the current cgroup.
+  * We use task_subsys_state_check() and extend the RCU verification with
+  * pi->lock and rq->lock because cpu_cgroup_attach() holds those locks for each
+  * task it moves into the cgroup. Therefore by holding either of those locks,
+  * we pin the task to the current cgroup.
   */
  static inline struct task_group *task_group(struct task_struct *p)
  {
        struct cgroup_subsys_state *css;
  
        css = task_subsys_state_check(p, cpu_cgroup_subsys_id,
-                       lockdep_is_held(&p->pi_lock));
+                       lockdep_is_held(&p->pi_lock) ||
+                       lockdep_is_held(&task_rq(p)->lock));
        tg = container_of(css, struct task_group, css);
  
        return autogroup_task_group(p, tg);
@@@ -2200,6 -2201,16 +2201,16 @@@ void set_task_cpu(struct task_struct *p
                        !(task_thread_info(p)->preempt_count & PREEMPT_ACTIVE));
  
  #ifdef CONFIG_LOCKDEP
+       /*
+        * The caller should hold either p->pi_lock or rq->lock, when changing
+        * a task's CPU. ->pi_lock for waking tasks, rq->lock for runnable tasks.
+        *
+        * sched_move_task() holds both and thus holding either pins the cgroup,
+        * see set_task_rq().
+        *
+        * Furthermore, all task_rq users should acquire both locks, see
+        * task_rq_lock().
+        */
        WARN_ON_ONCE(debug_locks && !(lockdep_is_held(&p->pi_lock) ||
                                      lockdep_is_held(&task_rq(p)->lock)));
  #endif
@@@ -2486,7 -2497,7 +2497,7 @@@ ttwu_do_wakeup(struct rq *rq, struct ta
        if (p->sched_class->task_woken)
                p->sched_class->task_woken(rq, p);
  
 -      if (unlikely(rq->idle_stamp)) {
 +      if (rq->idle_stamp) {
                u64 delta = rq->clock - rq->idle_stamp;
                u64 max = 2*sysctl_sched_migration_cost;
  
diff --combined kernel/sched_rt.c
@@@ -1096,7 -1096,7 +1096,7 @@@ static void check_preempt_curr_rt(struc
         * to move current somewhere else, making room for our non-migratable
         * task.
         */
-       if (p->prio == rq->curr->prio && !need_resched())
+       if (p->prio == rq->curr->prio && !test_tsk_need_resched(rq->curr))
                check_preempt_equal_prio(rq, p);
  #endif
  }
@@@ -1126,7 -1126,7 +1126,7 @@@ static struct task_struct *_pick_next_t
  
        rt_rq = &rq->rt;
  
 -      if (unlikely(!rt_rq->rt_nr_running))
 +      if (!rt_rq->rt_nr_running)
                return NULL;
  
        if (rt_rq_throttled(rt_rq))
@@@ -1239,6 -1239,10 +1239,10 @@@ static int find_lowest_rq(struct task_s
        int this_cpu = smp_processor_id();
        int cpu      = task_cpu(task);
  
+       /* Make sure the mask is initialized first */
+       if (unlikely(!lowest_mask))
+               return -1;
        if (task->rt.nr_cpus_allowed == 1)
                return -1; /* No other targets possible */
  
@@@ -1544,7 -1548,7 +1548,7 @@@ skip
  static void pre_schedule_rt(struct rq *rq, struct task_struct *prev)
  {
        /* Try to pull RT tasks here if we lower this rq's prio */
 -      if (unlikely(rt_task(prev)) && rq->rt.highest_prio.curr > prev->prio)
 +      if (rq->rt.highest_prio.curr > prev->prio)
                pull_rt_task(rq);
  }