partitions/efi: complete documentation of gpt kernel param purpose
[platform/adaptation/renesas_rcar/renesas_kernel.git] / kernel / softirq.c
index eb0acf4..8a1e6e1 100644 (file)
@@ -6,8 +6,6 @@
  *     Distribute under GPLv2.
  *
  *     Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903)
- *
- *     Remote softirq infrastructure is by Jens Axboe.
  */
 
 #include <linux/export.h>
@@ -91,7 +89,7 @@ static void wakeup_softirqd(void)
  * where hardirqs are disabled legitimately:
  */
 #ifdef CONFIG_TRACE_IRQFLAGS
-static void __local_bh_disable(unsigned long ip, unsigned int cnt)
+void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
 {
        unsigned long flags;
 
@@ -109,33 +107,21 @@ static void __local_bh_disable(unsigned long ip, unsigned int cnt)
        /*
         * Were softirqs turned off above:
         */
-       if (softirq_count() == cnt)
+       if (softirq_count() == (cnt & SOFTIRQ_MASK))
                trace_softirqs_off(ip);
        raw_local_irq_restore(flags);
 
        if (preempt_count() == cnt)
                trace_preempt_off(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
 }
-#else /* !CONFIG_TRACE_IRQFLAGS */
-static inline void __local_bh_disable(unsigned long ip, unsigned int cnt)
-{
-       preempt_count_add(cnt);
-       barrier();
-}
+EXPORT_SYMBOL(__local_bh_disable_ip);
 #endif /* CONFIG_TRACE_IRQFLAGS */
 
-void local_bh_disable(void)
-{
-       __local_bh_disable(_RET_IP_, SOFTIRQ_DISABLE_OFFSET);
-}
-
-EXPORT_SYMBOL(local_bh_disable);
-
 static void __local_bh_enable(unsigned int cnt)
 {
        WARN_ON_ONCE(!irqs_disabled());
 
-       if (softirq_count() == cnt)
+       if (softirq_count() == (cnt & SOFTIRQ_MASK))
                trace_softirqs_on(_RET_IP_);
        preempt_count_sub(cnt);
 }
@@ -153,7 +139,7 @@ void _local_bh_enable(void)
 
 EXPORT_SYMBOL(_local_bh_enable);
 
-static inline void _local_bh_enable_ip(unsigned long ip)
+void __local_bh_enable_ip(unsigned long ip, unsigned int cnt)
 {
        WARN_ON_ONCE(in_irq() || irqs_disabled());
 #ifdef CONFIG_TRACE_IRQFLAGS
@@ -168,7 +154,7 @@ static inline void _local_bh_enable_ip(unsigned long ip)
         * Keep preemption disabled until we are done with
         * softirq processing:
         */
-       preempt_count_sub(SOFTIRQ_DISABLE_OFFSET - 1);
+       preempt_count_sub(cnt - 1);
 
        if (unlikely(!in_interrupt() && local_softirq_pending())) {
                /*
@@ -184,18 +170,7 @@ static inline void _local_bh_enable_ip(unsigned long ip)
 #endif
        preempt_check_resched();
 }
-
-void local_bh_enable(void)
-{
-       _local_bh_enable_ip(_RET_IP_);
-}
-EXPORT_SYMBOL(local_bh_enable);
-
-void local_bh_enable_ip(unsigned long ip)
-{
-       _local_bh_enable_ip(ip);
-}
-EXPORT_SYMBOL(local_bh_enable_ip);
+EXPORT_SYMBOL(__local_bh_enable_ip);
 
 /*
  * We restart softirq processing for at most MAX_SOFTIRQ_RESTART times,
@@ -215,40 +190,35 @@ EXPORT_SYMBOL(local_bh_enable_ip);
 
 #ifdef CONFIG_TRACE_IRQFLAGS
 /*
- * Convoluted means of passing __do_softirq() a message through the various
- * architecture execute_on_stack() bits.
- *
  * When we run softirqs from irq_exit() and thus on the hardirq stack we need
  * to keep the lockdep irq context tracking as tight as possible in order to
  * not miss-qualify lock contexts and miss possible deadlocks.
  */
-static DEFINE_PER_CPU(int, softirq_from_hardirq);
 
-static inline void lockdep_softirq_from_hardirq(void)
+static inline bool lockdep_softirq_start(void)
 {
-       this_cpu_write(softirq_from_hardirq, 1);
-}
+       bool in_hardirq = false;
 
-static inline void lockdep_softirq_start(void)
-{
-       if (this_cpu_read(softirq_from_hardirq))
+       if (trace_hardirq_context(current)) {
+               in_hardirq = true;
                trace_hardirq_exit();
+       }
+
        lockdep_softirq_enter();
+
+       return in_hardirq;
 }
 
-static inline void lockdep_softirq_end(void)
+static inline void lockdep_softirq_end(bool in_hardirq)
 {
        lockdep_softirq_exit();
-       if (this_cpu_read(softirq_from_hardirq)) {
-               this_cpu_write(softirq_from_hardirq, 0);
+
+       if (in_hardirq)
                trace_hardirq_enter();
-       }
 }
-
 #else
-static inline void lockdep_softirq_from_hardirq(void) { }
-static inline void lockdep_softirq_start(void) { }
-static inline void lockdep_softirq_end(void) { }
+static inline bool lockdep_softirq_start(void) { return false; }
+static inline void lockdep_softirq_end(bool in_hardirq) { }
 #endif
 
 asmlinkage void __do_softirq(void)
@@ -257,6 +227,7 @@ asmlinkage void __do_softirq(void)
        unsigned long old_flags = current->flags;
        int max_restart = MAX_SOFTIRQ_RESTART;
        struct softirq_action *h;
+       bool in_hardirq;
        __u32 pending;
        int cpu;
 
@@ -270,8 +241,8 @@ asmlinkage void __do_softirq(void)
        pending = local_softirq_pending();
        account_irq_enter_time(current);
 
-       __local_bh_disable(_RET_IP_, SOFTIRQ_OFFSET);
-       lockdep_softirq_start();
+       __local_bh_disable_ip(_RET_IP_, SOFTIRQ_OFFSET);
+       in_hardirq = lockdep_softirq_start();
 
        cpu = smp_processor_id();
 restart:
@@ -318,7 +289,7 @@ restart:
                wakeup_softirqd();
        }
 
-       lockdep_softirq_end();
+       lockdep_softirq_end(in_hardirq);
        account_irq_exit_time(current);
        __local_bh_enable(SOFTIRQ_OFFSET);
        WARN_ON_ONCE(in_interrupt());
@@ -348,8 +319,6 @@ asmlinkage void do_softirq(void)
  */
 void irq_enter(void)
 {
-       int cpu = smp_processor_id();
-
        rcu_irq_enter();
        if (is_idle_task(current) && !in_interrupt()) {
                /*
@@ -357,7 +326,7 @@ void irq_enter(void)
                 * here, as softirq will be serviced on return from interrupt.
                 */
                local_bh_disable();
-               tick_check_idle(cpu);
+               tick_check_idle();
                _local_bh_enable();
        }
 
@@ -367,7 +336,6 @@ void irq_enter(void)
 static inline void invoke_softirq(void)
 {
        if (!force_irqthreads) {
-               lockdep_softirq_from_hardirq();
 #ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK
                /*
                 * We can safely execute softirq on the current stack if
@@ -663,146 +631,17 @@ void tasklet_hrtimer_init(struct tasklet_hrtimer *ttimer,
 }
 EXPORT_SYMBOL_GPL(tasklet_hrtimer_init);
 
-/*
- * Remote softirq bits
- */
-
-DEFINE_PER_CPU(struct list_head [NR_SOFTIRQS], softirq_work_list);
-EXPORT_PER_CPU_SYMBOL(softirq_work_list);
-
-static void __local_trigger(struct call_single_data *cp, int softirq)
-{
-       struct list_head *head = &__get_cpu_var(softirq_work_list[softirq]);
-
-       list_add_tail(&cp->list, head);
-
-       /* Trigger the softirq only if the list was previously empty.  */
-       if (head->next == &cp->list)
-               raise_softirq_irqoff(softirq);
-}
-
-#ifdef CONFIG_USE_GENERIC_SMP_HELPERS
-static void remote_softirq_receive(void *data)
-{
-       struct call_single_data *cp = data;
-       unsigned long flags;
-       int softirq;
-
-       softirq = *(int *)cp->info;
-       local_irq_save(flags);
-       __local_trigger(cp, softirq);
-       local_irq_restore(flags);
-}
-
-static int __try_remote_softirq(struct call_single_data *cp, int cpu, int softirq)
-{
-       if (cpu_online(cpu)) {
-               cp->func = remote_softirq_receive;
-               cp->info = &softirq;
-               cp->flags = 0;
-
-               __smp_call_function_single(cpu, cp, 0);
-               return 0;
-       }
-       return 1;
-}
-#else /* CONFIG_USE_GENERIC_SMP_HELPERS */
-static int __try_remote_softirq(struct call_single_data *cp, int cpu, int softirq)
-{
-       return 1;
-}
-#endif
-
-/**
- * __send_remote_softirq - try to schedule softirq work on a remote cpu
- * @cp: private SMP call function data area
- * @cpu: the remote cpu
- * @this_cpu: the currently executing cpu
- * @softirq: the softirq for the work
- *
- * Attempt to schedule softirq work on a remote cpu.  If this cannot be
- * done, the work is instead queued up on the local cpu.
- *
- * Interrupts must be disabled.
- */
-void __send_remote_softirq(struct call_single_data *cp, int cpu, int this_cpu, int softirq)
-{
-       if (cpu == this_cpu || __try_remote_softirq(cp, cpu, softirq))
-               __local_trigger(cp, softirq);
-}
-EXPORT_SYMBOL(__send_remote_softirq);
-
-/**
- * send_remote_softirq - try to schedule softirq work on a remote cpu
- * @cp: private SMP call function data area
- * @cpu: the remote cpu
- * @softirq: the softirq for the work
- *
- * Like __send_remote_softirq except that disabling interrupts and
- * computing the current cpu is done for the caller.
- */
-void send_remote_softirq(struct call_single_data *cp, int cpu, int softirq)
-{
-       unsigned long flags;
-       int this_cpu;
-
-       local_irq_save(flags);
-       this_cpu = smp_processor_id();
-       __send_remote_softirq(cp, cpu, this_cpu, softirq);
-       local_irq_restore(flags);
-}
-EXPORT_SYMBOL(send_remote_softirq);
-
-static int remote_softirq_cpu_notify(struct notifier_block *self,
-                                              unsigned long action, void *hcpu)
-{
-       /*
-        * If a CPU goes away, splice its entries to the current CPU
-        * and trigger a run of the softirq
-        */
-       if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
-               int cpu = (unsigned long) hcpu;
-               int i;
-
-               local_irq_disable();
-               for (i = 0; i < NR_SOFTIRQS; i++) {
-                       struct list_head *head = &per_cpu(softirq_work_list[i], cpu);
-                       struct list_head *local_head;
-
-                       if (list_empty(head))
-                               continue;
-
-                       local_head = &__get_cpu_var(softirq_work_list[i]);
-                       list_splice_init(head, local_head);
-                       raise_softirq_irqoff(i);
-               }
-               local_irq_enable();
-       }
-
-       return NOTIFY_OK;
-}
-
-static struct notifier_block remote_softirq_cpu_notifier = {
-       .notifier_call  = remote_softirq_cpu_notify,
-};
-
 void __init softirq_init(void)
 {
        int cpu;
 
        for_each_possible_cpu(cpu) {
-               int i;
-
                per_cpu(tasklet_vec, cpu).tail =
                        &per_cpu(tasklet_vec, cpu).head;
                per_cpu(tasklet_hi_vec, cpu).tail =
                        &per_cpu(tasklet_hi_vec, cpu).head;
-               for (i = 0; i < NR_SOFTIRQS; i++)
-                       INIT_LIST_HEAD(&per_cpu(softirq_work_list[i], cpu));
        }
 
-       register_hotcpu_notifier(&remote_softirq_cpu_notifier);
-
        open_softirq(TASKLET_SOFTIRQ, tasklet_action);
        open_softirq(HI_SOFTIRQ, tasklet_hi_action);
 }