/*
* Stop only if limit reached and CPU has something to do.
*/
- if (count >= bl && !offloaded &&
- (need_resched() ||
- (!is_idle_task(current) && !rcu_is_callbacks_kthread())))
- break;
+ if (in_serving_softirq()) {
+ if (count >= bl && (need_resched() ||
+ (!is_idle_task(current) && !rcu_is_callbacks_kthread())))
+ break;
+ } else {
+ local_bh_enable();
+ lockdep_assert_irqs_enabled();
+ cond_resched_tasks_rcu_qs();
+ lockdep_assert_irqs_enabled();
+ local_bh_disable();
+ }
+
+ /*
+ * Make sure we don't spend too much time here and deprive other
+ * softirq vectors of CPU cycles.
+ */
if (unlikely(tlimit)) {
/* only call local_clock() every 32 callbacks */
if (likely((count & 31) || local_clock() < tlimit))
/* Exceeded the time limit, so leave. */
break;
}
- if (!in_serving_softirq()) {
- local_bh_enable();
- lockdep_assert_irqs_enabled();
- cond_resched_tasks_rcu_qs();
- lockdep_assert_irqs_enabled();
- local_bh_disable();
- }
}
local_irq_save(flags);