From: Frederic Weisbecker Date: Mon, 4 Feb 2013 23:48:46 +0000 (+0100) Subject: Merge branch 'nohz/printk-v8' into irq/core X-Git-Tag: v3.9-rc1~174^2~1 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=077931446b85e7858bf9dc0927cd116669b965d2;p=platform%2Fupstream%2Fkernel-adaptation-pc.git Merge branch 'nohz/printk-v8' into irq/core Conflicts: kernel/irq_work.c Add support for printk in full dynticks CPU. * Don't stop tick with irq works pending. This fix is generally useful and concerns archs that can't raise self IPIs. * Flush irq works before CPU offlining. * Introduce "lazy" irq works that can wait for the next tick to be executed, unless it's stopped. * Implement klogd wake up using irq work. This removes the ad-hoc printk_tick()/printk_needs_cpu() hooks and make it working even in dynticks mode. Signed-off-by: Frederic Weisbecker --- 077931446b85e7858bf9dc0927cd116669b965d2 diff --cc kernel/irq_work.c index c9d7478,7f3a59b..55fcce6 --- a/kernel/irq_work.c +++ b/kernel/irq_work.c @@@ -63,37 -56,64 +56,53 @@@ void __weak arch_irq_work_raise(void } /* - * Queue the entry and raise the IPI if needed. + * Enqueue the irq_work @entry unless it's already pending + * somewhere. + * + * Can be re-enqueued while the callback is still in progress. */ -static void __irq_work_queue(struct irq_work *work) +void irq_work_queue(struct irq_work *work) { - bool empty; - + /* Only queue if not already pending */ + if (!irq_work_claim(work)) + return; + + /* Queue the entry and raise the IPI if needed. */ preempt_disable(); - empty = llist_add(&work->llnode, &__get_cpu_var(irq_work_list)); - /* The list was empty, raise self-interrupt to start processing. */ - if (empty) - arch_irq_work_raise(); + llist_add(&work->llnode, &__get_cpu_var(irq_work_list)); + + /* + * If the work is not "lazy" or the tick is stopped, raise the irq + * work interrupt (if supported by the arch), otherwise, just wait + * for the next tick. + */ + if (!(work->flags & IRQ_WORK_LAZY) || tick_nohz_tick_stopped()) { + if (!this_cpu_cmpxchg(irq_work_raised, 0, 1)) + arch_irq_work_raise(); + } preempt_enable(); } - -/* - * Enqueue the irq_work @entry, returns true on success, failure when the - * @entry was already enqueued by someone else. - * - * Can be re-enqueued while the callback is still in progress. - */ -bool irq_work_queue(struct irq_work *work) -{ - if (!irq_work_claim(work)) { - /* - * Already enqueued, can't do! - */ - return false; - } - - __irq_work_queue(work); - return true; -} EXPORT_SYMBOL_GPL(irq_work_queue); - /* - * Run the irq_work entries on this cpu. Requires to be ran from hardirq - * context with local IRQs disabled. - */ - void irq_work_run(void) + bool irq_work_needs_cpu(void) + { + struct llist_head *this_list; + + this_list = &__get_cpu_var(irq_work_list); + if (llist_empty(this_list)) + return false; + + /* All work should have been flushed before going offline */ + WARN_ON_ONCE(cpu_is_offline(smp_processor_id())); + + return true; + } + + static void __irq_work_run(void) { + unsigned long flags; struct irq_work *work; struct llist_head *this_list; struct llist_node *llnode; diff --cc kernel/time/tick-sched.c index d58e552,822d757..fb8e5e4 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c @@@ -28,10 -29,10 +29,10 @@@ /* * Per cpu nohz control structure */ - static DEFINE_PER_CPU(struct tick_sched, tick_cpu_sched); + DEFINE_PER_CPU(struct tick_sched, tick_cpu_sched); /* - * The time, when the last jiffy update happened. Protected by xtime_lock. + * The time, when the last jiffy update happened. Protected by jiffies_lock. */ static ktime_t last_jiffies_update; @@@ -329,10 -287,10 +330,10 @@@ static ktime_t tick_nohz_stop_sched_tic last_update = last_jiffies_update; last_jiffies = jiffies; time_delta = timekeeping_max_deferment(); - } while (read_seqretry(&xtime_lock, seq)); + } while (read_seqretry(&jiffies_lock, seq)); - if (rcu_needs_cpu(cpu, &rcu_delta_jiffies) || printk_needs_cpu(cpu) || - arch_needs_cpu(cpu)) { + if (rcu_needs_cpu(cpu, &rcu_delta_jiffies) || + arch_needs_cpu(cpu) || irq_work_needs_cpu()) { next_jiffies = last_jiffies + 1; delta_jiffies = 1; } else {