Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tiwai/sound
[platform/adaptation/renesas_rcar/renesas_kernel.git] / kernel / rcutree.c
index 1c40326..6c4a672 100644 (file)
@@ -196,7 +196,7 @@ void rcu_note_context_switch(int cpu)
 EXPORT_SYMBOL_GPL(rcu_note_context_switch);
 
 DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = {
-       .dynticks_nesting = LLONG_MAX / 2,
+       .dynticks_nesting = DYNTICK_TASK_NESTING,
        .dynticks = ATOMIC_INIT(1),
 };
 
@@ -348,19 +348,19 @@ static int rcu_implicit_offline_qs(struct rcu_data *rdp)
  * we really have entered idle, and must do the appropriate accounting.
  * The caller must have disabled interrupts.
  */
-static void rcu_idle_enter_common(struct rcu_dynticks *rdtp)
+static void rcu_idle_enter_common(struct rcu_dynticks *rdtp, long long oldval)
 {
-       if (rdtp->dynticks_nesting) {
-               trace_rcu_dyntick("--=", rdtp->dynticks_nesting);
-               return;
-       }
-       trace_rcu_dyntick("Start", rdtp->dynticks_nesting);
-       if (!idle_cpu(smp_processor_id())) {
-               WARN_ON_ONCE(1);        /* must be idle task! */
-               trace_rcu_dyntick("Error on entry: not idle task",
-                                  rdtp->dynticks_nesting);
+       trace_rcu_dyntick("Start", oldval, 0);
+       if (!is_idle_task(current)) {
+               struct task_struct *idle = idle_task(smp_processor_id());
+
+               trace_rcu_dyntick("Error on entry: not idle task", oldval, 0);
                ftrace_dump(DUMP_ALL);
+               WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s",
+                         current->pid, current->comm,
+                         idle->pid, idle->comm); /* must be idle task! */
        }
+       rcu_prepare_for_idle(smp_processor_id());
        /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
        smp_mb__before_atomic_inc();  /* See above. */
        atomic_inc(&rdtp->dynticks);
@@ -383,12 +383,14 @@ static void rcu_idle_enter_common(struct rcu_dynticks *rdtp)
 void rcu_idle_enter(void)
 {
        unsigned long flags;
+       long long oldval;
        struct rcu_dynticks *rdtp;
 
        local_irq_save(flags);
        rdtp = &__get_cpu_var(rcu_dynticks);
+       oldval = rdtp->dynticks_nesting;
        rdtp->dynticks_nesting = 0;
-       rcu_idle_enter_common(rdtp);
+       rcu_idle_enter_common(rdtp, oldval);
        local_irq_restore(flags);
 }
 
@@ -411,13 +413,18 @@ void rcu_idle_enter(void)
 void rcu_irq_exit(void)
 {
        unsigned long flags;
+       long long oldval;
        struct rcu_dynticks *rdtp;
 
        local_irq_save(flags);
        rdtp = &__get_cpu_var(rcu_dynticks);
+       oldval = rdtp->dynticks_nesting;
        rdtp->dynticks_nesting--;
        WARN_ON_ONCE(rdtp->dynticks_nesting < 0);
-       rcu_idle_enter_common(rdtp);
+       if (rdtp->dynticks_nesting)
+               trace_rcu_dyntick("--=", oldval, rdtp->dynticks_nesting);
+       else
+               rcu_idle_enter_common(rdtp, oldval);
        local_irq_restore(flags);
 }
 
@@ -430,20 +437,22 @@ void rcu_irq_exit(void)
  */
 static void rcu_idle_exit_common(struct rcu_dynticks *rdtp, long long oldval)
 {
-       if (oldval) {
-               trace_rcu_dyntick("++=", rdtp->dynticks_nesting);
-               return;
-       }
        smp_mb__before_atomic_inc();  /* Force ordering w/previous sojourn. */
        atomic_inc(&rdtp->dynticks);
        /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
        smp_mb__after_atomic_inc();  /* See above. */
        WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
-       trace_rcu_dyntick("End", oldval);
-       if (!idle_cpu(smp_processor_id())) {
-               WARN_ON_ONCE(1);        /* must be idle task! */
-               trace_rcu_dyntick("Error on exit: not idle task", oldval);
+       rcu_cleanup_after_idle(smp_processor_id());
+       trace_rcu_dyntick("End", oldval, rdtp->dynticks_nesting);
+       if (!is_idle_task(current)) {
+               struct task_struct *idle = idle_task(smp_processor_id());
+
+               trace_rcu_dyntick("Error on exit: not idle task",
+                                 oldval, rdtp->dynticks_nesting);
                ftrace_dump(DUMP_ALL);
+               WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s",
+                         current->pid, current->comm,
+                         idle->pid, idle->comm); /* must be idle task! */
        }
 }
 
@@ -453,8 +462,8 @@ static void rcu_idle_exit_common(struct rcu_dynticks *rdtp, long long oldval)
  * Exit idle mode, in other words, -enter- the mode in which RCU
  * read-side critical sections can occur.
  *
- * We crowbar the ->dynticks_nesting field to LLONG_MAX/2 to allow for
- * the possibility of usermode upcalls messing up our count
+ * We crowbar the ->dynticks_nesting field to DYNTICK_TASK_NESTING to
+ * allow for the possibility of usermode upcalls messing up our count
  * of interrupt nesting level during the busy period that is just
  * now starting.
  */
@@ -468,7 +477,7 @@ void rcu_idle_exit(void)
        rdtp = &__get_cpu_var(rcu_dynticks);
        oldval = rdtp->dynticks_nesting;
        WARN_ON_ONCE(oldval != 0);
-       rdtp->dynticks_nesting = LLONG_MAX / 2;
+       rdtp->dynticks_nesting = DYNTICK_TASK_NESTING;
        rcu_idle_exit_common(rdtp, oldval);
        local_irq_restore(flags);
 }
@@ -503,7 +512,10 @@ void rcu_irq_enter(void)
        oldval = rdtp->dynticks_nesting;
        rdtp->dynticks_nesting++;
        WARN_ON_ONCE(rdtp->dynticks_nesting == 0);
-       rcu_idle_exit_common(rdtp, oldval);
+       if (oldval)
+               trace_rcu_dyntick("++=", oldval, rdtp->dynticks_nesting);
+       else
+               rcu_idle_exit_common(rdtp, oldval);
        local_irq_restore(flags);
 }
 
@@ -556,13 +568,18 @@ void rcu_nmi_exit(void)
  * rcu_is_cpu_idle - see if RCU thinks that the current CPU is idle
  *
  * If the current CPU is in its idle loop and is neither in an interrupt
- * or NMI handler, return true.  The caller must have at least disabled
- * preemption.
+ * or NMI handler, return true.
  */
 int rcu_is_cpu_idle(void)
 {
-       return (atomic_read(&__get_cpu_var(rcu_dynticks).dynticks) & 0x1) == 0;
+       int ret;
+
+       preempt_disable();
+       ret = (atomic_read(&__get_cpu_var(rcu_dynticks).dynticks) & 0x1) == 0;
+       preempt_enable();
+       return ret;
 }
+EXPORT_SYMBOL(rcu_is_cpu_idle);
 
 #endif /* #ifdef CONFIG_PROVE_RCU */
 
@@ -588,7 +605,7 @@ int rcu_is_cpu_rrupt_from_idle(void)
 static int dyntick_save_progress_counter(struct rcu_data *rdp)
 {
        rdp->dynticks_snap = atomic_add_return(0, &rdp->dynticks->dynticks);
-       return 0;
+       return (rdp->dynticks_snap & 0x1) == 0;
 }
 
 /*
@@ -625,8 +642,6 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
 
 #endif /* #ifdef CONFIG_SMP */
 
-int rcu_cpu_stall_suppress __read_mostly;
-
 static void record_gp_stall_check_time(struct rcu_state *rsp)
 {
        rsp->gp_start = jiffies;
@@ -1316,7 +1331,7 @@ static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp)
        else
                raw_spin_unlock_irqrestore(&rnp->lock, flags);
        if (need_report & RCU_OFL_TASKS_EXP_GP)
-               rcu_report_exp_rnp(rsp, rnp);
+               rcu_report_exp_rnp(rsp, rnp, true);
        rcu_node_kthread_setaffinity(rnp, -1);
 }
 
@@ -1358,7 +1373,9 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
        /* If no callbacks are ready, just return.*/
        if (!cpu_has_callbacks_ready_to_invoke(rdp)) {
                trace_rcu_batch_start(rsp->name, 0, 0);
-               trace_rcu_batch_end(rsp->name, 0);
+               trace_rcu_batch_end(rsp->name, 0, !!ACCESS_ONCE(rdp->nxtlist),
+                                   need_resched(), is_idle_task(current),
+                                   rcu_is_callbacks_kthread());
                return;
        }
 
@@ -1386,12 +1403,17 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
                debug_rcu_head_unqueue(list);
                __rcu_reclaim(rsp->name, list);
                list = next;
-               if (++count >= bl)
+               /* Stop only if limit reached and CPU has something to do. */
+               if (++count >= bl &&
+                   (need_resched() ||
+                    (!is_idle_task(current) && !rcu_is_callbacks_kthread())))
                        break;
        }
 
        local_irq_save(flags);
-       trace_rcu_batch_end(rsp->name, count);
+       trace_rcu_batch_end(rsp->name, count, !!list, need_resched(),
+                           is_idle_task(current),
+                           rcu_is_callbacks_kthread());
 
        /* Update count, and requeue any remaining callbacks. */
        rdp->qlen -= count;
@@ -1905,7 +1927,7 @@ static int rcu_pending(int cpu)
  * by the current CPU, even if none need be done immediately, returning
  * 1 if so.
  */
-static int rcu_needs_cpu_quick_check(int cpu)
+static int rcu_cpu_has_callbacks(int cpu)
 {
        /* RCU callbacks either ready or pending? */
        return per_cpu(rcu_sched_data, cpu).nxtlist ||
@@ -2007,7 +2029,7 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
                rdp->nxttail[i] = &rdp->nxtlist;
        rdp->qlen = 0;
        rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
-       WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != LLONG_MAX / 2);
+       WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_NESTING);
        WARN_ON_ONCE(atomic_read(&rdp->dynticks->dynticks) != 1);
        rdp->cpu = cpu;
        rdp->rsp = rsp;
@@ -2035,8 +2057,10 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptible)
        rdp->qlen_last_fqs_check = 0;
        rdp->n_force_qs_snap = rsp->n_force_qs;
        rdp->blimit = blimit;
-       WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != LLONG_MAX / 2);
-       WARN_ON_ONCE((atomic_read(&rdp->dynticks->dynticks) & 0x1) != 1);
+       rdp->dynticks->dynticks_nesting = DYNTICK_TASK_NESTING;
+       atomic_set(&rdp->dynticks->dynticks,
+                  (atomic_read(&rdp->dynticks->dynticks) & ~0x1) + 1);
+       rcu_prepare_for_idle_init(cpu);
        raw_spin_unlock(&rnp->lock);            /* irqs remain disabled. */
 
        /*
@@ -2118,6 +2142,7 @@ static int __cpuinit rcu_cpu_notify(struct notifier_block *self,
                rcu_send_cbs_to_online(&rcu_bh_state);
                rcu_send_cbs_to_online(&rcu_sched_state);
                rcu_preempt_send_cbs_to_online();
+               rcu_cleanup_after_idle(cpu);
                break;
        case CPU_DEAD:
        case CPU_DEAD_FROZEN: