LOCAL / sched: Fix build break by using alternative function
[platform/kernel/linux-exynos.git] / kernel / smp.c
index 0785447..8eb18ff 100644 (file)
@@ -14,6 +14,8 @@
 #include <linux/smp.h>
 #include <linux/cpu.h>
 #include <linux/sched.h>
+#define CREATE_TRACE_POINTS
+#include <trace/events/smp.h>
 
 #include "smpboot.h"
 
@@ -153,7 +155,9 @@ static int generic_exec_single(int cpu, struct call_single_data *csd,
                 */
                csd_unlock(csd);
                local_irq_save(flags);
+               trace_smp_call_func_entry(func);
                func(info);
+               trace_smp_call_func_exit(func);
                local_irq_restore(flags);
                return 0;
        }
@@ -178,8 +182,10 @@ static int generic_exec_single(int cpu, struct call_single_data *csd,
         * locking and barrier primitives. Generic code isn't really
         * equipped to do the right thing...
         */
-       if (llist_add(&csd->llist, &per_cpu(call_single_queue, cpu)))
+       if (llist_add(&csd->llist, &per_cpu(call_single_queue, cpu))) {
+               trace_smp_call_func_send(csd->func, cpu);
                arch_send_call_function_single_ipi(cpu);
+       }
 
        return 0;
 }
@@ -240,14 +246,17 @@ static void flush_smp_call_function_queue(bool warn_cpu_offline)
        llist_for_each_entry_safe(csd, csd_next, entry, llist) {
                smp_call_func_t func = csd->func;
                void *info = csd->info;
-
                /* Do we wait until *after* callback? */
                if (csd->flags & CSD_FLAG_SYNCHRONOUS) {
+                       trace_smp_call_func_entry(func);
                        func(info);
+                       trace_smp_call_func_exit(func);
                        csd_unlock(csd);
                } else {
                        csd_unlock(csd);
+                       trace_smp_call_func_entry(func);
                        func(info);
+                       trace_smp_call_func_exit(func);
                }
        }
 
@@ -276,6 +285,7 @@ int smp_call_function_single(int cpu, smp_call_func_t func, void *info,
        int this_cpu;
        int err;
 
+       trace_smp_call_func_send(func, cpu);
        /*
         * prevent preemption and reschedule on another processor,
         * as well as CPU removal