X-Git-Url: http://review.tizen.org/git/?a=blobdiff_plain;f=kernel%2Fsmp.c;h=8eb18ff6c523e6aefc17a626d497fa7205deab41;hb=d5229e8d7df9e95ecfe220f1c0091a0fd65c658b;hp=07854477c16447ff87df874b4f117bcafadd4f33;hpb=1fc149933fd49a5b0e7738dc0853dbfbac4ae0e1;p=platform%2Fkernel%2Flinux-exynos.git diff --git a/kernel/smp.c b/kernel/smp.c index 0785447..8eb18ff 100644 --- a/kernel/smp.c +++ b/kernel/smp.c @@ -14,6 +14,8 @@ #include #include #include +#define CREATE_TRACE_POINTS +#include #include "smpboot.h" @@ -153,7 +155,9 @@ static int generic_exec_single(int cpu, struct call_single_data *csd, */ csd_unlock(csd); local_irq_save(flags); + trace_smp_call_func_entry(func); func(info); + trace_smp_call_func_exit(func); local_irq_restore(flags); return 0; } @@ -178,8 +182,10 @@ static int generic_exec_single(int cpu, struct call_single_data *csd, * locking and barrier primitives. Generic code isn't really * equipped to do the right thing... */ - if (llist_add(&csd->llist, &per_cpu(call_single_queue, cpu))) + if (llist_add(&csd->llist, &per_cpu(call_single_queue, cpu))) { + trace_smp_call_func_send(csd->func, cpu); arch_send_call_function_single_ipi(cpu); + } return 0; } @@ -240,14 +246,17 @@ static void flush_smp_call_function_queue(bool warn_cpu_offline) llist_for_each_entry_safe(csd, csd_next, entry, llist) { smp_call_func_t func = csd->func; void *info = csd->info; - /* Do we wait until *after* callback? */ if (csd->flags & CSD_FLAG_SYNCHRONOUS) { + trace_smp_call_func_entry(func); func(info); + trace_smp_call_func_exit(func); csd_unlock(csd); } else { csd_unlock(csd); + trace_smp_call_func_entry(func); func(info); + trace_smp_call_func_exit(func); } } @@ -276,6 +285,7 @@ int smp_call_function_single(int cpu, smp_call_func_t func, void *info, int this_cpu; int err; + trace_smp_call_func_send(func, cpu); /* * prevent preemption and reschedule on another processor, * as well as CPU removal