Merge remote-tracking branch 'tip/core/rcu' into next.2012.09.25b
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>
Tue, 25 Sep 2012 17:03:56 +0000 (10:03 -0700)
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>
Tue, 25 Sep 2012 17:03:56 +0000 (10:03 -0700)
Resolved conflict in kernel/sched/core.c using Peter Zijlstra's
approach from https://lkml.org/lkml/2012/9/5/585.

1  2 
kernel/sched/core.c
kernel/time/tick-sched.c

diff --combined kernel/sched/core.c
@@@ -5342,9 -5342,6 +5342,6 @@@ static void migrate_tasks(unsigned int 
         */
        rq->stop = NULL;
  
-       /* Ensure any throttled groups are reachable by pick_next_task */
-       unthrottle_offline_cfs_rqs(rq);
        for ( ; ; ) {
                /*
                 * There's this thread running, bail when that's the only
@@@ -5607,18 -5604,8 +5604,10 @@@ migration_call(struct notifier_block *n
                migrate_tasks(cpu);
                BUG_ON(rq->nr_running != 1); /* the migration thread */
                raw_spin_unlock_irqrestore(&rq->lock, flags);
 +              break;
  
-               {
-                       struct rq *dest_rq;
-                       local_irq_save(flags);
-                       dest_rq = cpu_rq(smp_processor_id());
-                       raw_spin_lock(&dest_rq->lock);
-                       calc_load_migrate(rq);
-                       raw_spin_unlock_irqrestore(&dest_rq->lock, flags);
-               }
 +      case CPU_DEAD:
+               calc_load_migrate(rq);
                break;
  #endif
        }
@@@ -6027,11 -6014,6 +6016,6 @@@ static void destroy_sched_domains(struc
   * SD_SHARE_PKG_RESOURCE set (Last Level Cache Domain) for this
   * allows us to avoid some pointer chasing select_idle_sibling().
   *
-  * Iterate domains and sched_groups downward, assigning CPUs to be
-  * select_idle_sibling() hw buddy.  Cross-wiring hw makes bouncing
-  * due to random perturbation self canceling, ie sw buddies pull
-  * their counterpart to their CPU's hw counterpart.
-  *
   * Also keep a unique ID per domain (we use the first cpu number in
   * the cpumask of the domain), this allows us to quickly tell if
   * two cpus are in the same cache domain, see cpus_share_cache().
@@@ -6045,40 -6027,8 +6029,8 @@@ static void update_top_cache_domain(in
        int id = cpu;
  
        sd = highest_flag_domain(cpu, SD_SHARE_PKG_RESOURCES);
-       if (sd) {
-               struct sched_domain *tmp = sd;
-               struct sched_group *sg, *prev;
-               bool right;
-               /*
-                * Traverse to first CPU in group, and count hops
-                * to cpu from there, switching direction on each
-                * hop, never ever pointing the last CPU rightward.
-                */
-               do {
-                       id = cpumask_first(sched_domain_span(tmp));
-                       prev = sg = tmp->groups;
-                       right = 1;
-                       while (cpumask_first(sched_group_cpus(sg)) != id)
-                               sg = sg->next;
-                       while (!cpumask_test_cpu(cpu, sched_group_cpus(sg))) {
-                               prev = sg;
-                               sg = sg->next;
-                               right = !right;
-                       }
-                       /* A CPU went down, never point back to domain start. */
-                       if (right && cpumask_first(sched_group_cpus(sg->next)) == id)
-                               right = false;
-                       sg = right ? sg->next : prev;
-                       tmp->idle_buddy = cpumask_first(sched_group_cpus(sg));
-               } while ((tmp = tmp->child));
+       if (sd)
                id = cpumask_first(sched_domain_span(sd));
-       }
  
        rcu_assign_pointer(per_cpu(sd_llc, cpu), sd);
        per_cpu(sd_llc_id, cpu) = id;
diff --combined kernel/time/tick-sched.c
@@@ -436,8 -436,7 +436,8 @@@ static bool can_stop_idle_tick(int cpu
        if (unlikely(local_softirq_pending() && cpu_online(cpu))) {
                static int ratelimit;
  
 -              if (ratelimit < 10) {
 +              if (ratelimit < 10 &&
 +                  (local_softirq_pending() & SOFTIRQ_STOP_IDLE_MASK)) {
                        printk(KERN_ERR "NOHZ: local_softirq_pending %02x\n",
                               (unsigned int) local_softirq_pending());
                        ratelimit++;
@@@ -574,6 -573,7 +574,7 @@@ static void tick_nohz_restart_sched_tic
        tick_do_update_jiffies64(now);
        update_cpu_load_nohz();
  
+       calc_load_exit_idle();
        touch_softlockup_watchdog();
        /*
         * Cancel the scheduled timer and restore the tick