* sizeof(u64));
* __entry_size -= sizeof(u32);
*
- * do {
- * char raw_data[__entry_size]; <- allocate our sample in the stack
- * struct trace_entry *ent;
+ * // Protect the non nmi buffer
+ * // This also protects the rcu read side
+ * local_irq_save(irq_flags);
+ * __cpu = smp_processor_id();
+ *
+ * if (in_nmi())
+ * raw_data = rcu_dereference(trace_profile_buf_nmi);
+ * else
+ * raw_data = rcu_dereference(trace_profile_buf);
+ *
+ * if (!raw_data)
+ * goto end;
*
- * zero dead bytes from alignment to avoid stack leak to userspace:
+ * raw_data = per_cpu_ptr(raw_data, __cpu);
*
- * *(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL;
- * entry = (struct ftrace_raw_<call> *)raw_data;
- * ent = &entry->ent;
- * tracing_generic_entry_update(ent, irq_flags, pc);
- * ent->type = event_call->id;
+ * //zero dead bytes from alignment to avoid stack leak to userspace:
+ * *(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL;
+ * entry = (struct ftrace_raw_<call> *)raw_data;
+ * ent = &entry->ent;
+ * tracing_generic_entry_update(ent, irq_flags, pc);
+ * ent->type = event_call->id;
*
- * <tstruct> <- do some jobs with dynamic arrays
+ * <tstruct> <- do some jobs with dynamic arrays
*
- * <assign> <- affect our values
+ * <assign> <- affect our values
*
- * perf_tpcounter_event(event_call->id, __addr, __count, entry,
- * perf_tp_event(event_call->id, __addr, __count, entry,
- * __entry_size); <- submit them to perf counter
- * } while (0);
++ * perf_tp_event(event_call->id, __addr, __count, entry,
+ * __entry_size); <- submit them to perf counter
*
* }
*/
sizeof(u64)); \
__entry_size -= sizeof(u32); \
\
- do { \
- char raw_data[__entry_size]; \
- struct trace_entry *ent; \
+ if (WARN_ONCE(__entry_size > FTRACE_MAX_PROFILE_SIZE, \
+ "profile buffer not large enough")) \
+ return; \
+ \
+ local_irq_save(irq_flags); \
+ __cpu = smp_processor_id(); \
\
- *(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL; \
- entry = (struct ftrace_raw_##call *)raw_data; \
- ent = &entry->ent; \
- tracing_generic_entry_update(ent, irq_flags, pc); \
- ent->type = event_call->id; \
+ if (in_nmi()) \
+ raw_data = rcu_dereference(trace_profile_buf_nmi); \
+ else \
+ raw_data = rcu_dereference(trace_profile_buf); \
\
- tstruct \
+ if (!raw_data) \
+ goto end; \
\
- { assign; } \
+ raw_data = per_cpu_ptr(raw_data, __cpu); \
\
- perf_tp_event(event_call->id, __addr, __count, entry,\
+ *(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL; \
+ entry = (struct ftrace_raw_##call *)raw_data; \
+ ent = &entry->ent; \
+ tracing_generic_entry_update(ent, irq_flags, pc); \
+ ent->type = event_call->id; \
+ \
+ tstruct \
+ \
+ { assign; } \
+ \
- perf_tpcounter_event(event_call->id, __addr, __count, entry, \
++ perf_tp_event(event_call->id, __addr, __count, entry, \
__entry_size); \
- } while (0); \
+ \
+end: \
+ local_irq_restore(irq_flags); \
\
}
size = ALIGN(size + sizeof(u32), sizeof(u64));
size -= sizeof(u32);
- do {
- char raw_data[size];
+ if (WARN_ONCE(size > FTRACE_MAX_PROFILE_SIZE,
+ "profile buffer not large enough"))
+ return;
+
+ /* Protect the per cpu buffer, begin the rcu read side */
+ local_irq_save(flags);
- /* zero the dead bytes from align to not leak stack to user */
- *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL;
+ cpu = smp_processor_id();
+
+ if (in_nmi())
+ raw_data = rcu_dereference(trace_profile_buf_nmi);
+ else
+ raw_data = rcu_dereference(trace_profile_buf);
+
+ if (!raw_data)
+ goto end;
- rec = (struct syscall_trace_enter *) raw_data;
- tracing_generic_entry_update(&rec->ent, 0, 0);
- rec->ent.type = sys_data->enter_id;
- rec->nr = syscall_nr;
- syscall_get_arguments(current, regs, 0, sys_data->nb_args,
- (unsigned long *)&rec->args);
- perf_tp_event(sys_data->enter_id, 0, 1, rec, size);
- } while(0);
+ raw_data = per_cpu_ptr(raw_data, cpu);
+
+ /* zero the dead bytes from align to not leak stack to user */
+ *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL;
+
+ rec = (struct syscall_trace_enter *) raw_data;
+ tracing_generic_entry_update(&rec->ent, 0, 0);
+ rec->ent.type = sys_data->enter_id;
+ rec->nr = syscall_nr;
+ syscall_get_arguments(current, regs, 0, sys_data->nb_args,
+ (unsigned long *)&rec->args);
- perf_tpcounter_event(sys_data->enter_id, 0, 1, rec, size);
++ perf_tp_event(sys_data->enter_id, 0, 1, rec, size);
+
+end:
+ local_irq_restore(flags);
}
int reg_prof_syscall_enter(char *name)
if (!sys_data)
return;
- tracing_generic_entry_update(&rec.ent, 0, 0);
- rec.ent.type = sys_data->exit_id;
- rec.nr = syscall_nr;
- rec.ret = syscall_get_return_value(current, regs);
+ /* We can probably do that at build time */
+ size = ALIGN(sizeof(*rec) + sizeof(u32), sizeof(u64));
+ size -= sizeof(u32);
- perf_tp_event(sys_data->exit_id, 0, 1, &rec, sizeof(rec));
+ /*
+ * Impossible, but be paranoid with the future
+ * How to put this check outside runtime?
+ */
+ if (WARN_ONCE(size > FTRACE_MAX_PROFILE_SIZE,
+ "exit event has grown above profile buffer size"))
+ return;
+
+ /* Protect the per cpu buffer, begin the rcu read side */
+ local_irq_save(flags);
+ cpu = smp_processor_id();
+
+ if (in_nmi())
+ raw_data = rcu_dereference(trace_profile_buf_nmi);
+ else
+ raw_data = rcu_dereference(trace_profile_buf);
+
+ if (!raw_data)
+ goto end;
+
+ raw_data = per_cpu_ptr(raw_data, cpu);
+
+ /* zero the dead bytes from align to not leak stack to user */
+ *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL;
+
+ rec = (struct syscall_trace_exit *)raw_data;
+
+ tracing_generic_entry_update(&rec->ent, 0, 0);
+ rec->ent.type = sys_data->exit_id;
+ rec->nr = syscall_nr;
+ rec->ret = syscall_get_return_value(current, regs);
+
- perf_tpcounter_event(sys_data->exit_id, 0, 1, rec, size);
++ perf_tp_event(sys_data->exit_id, 0, 1, rec, size);
+
+end:
+ local_irq_restore(flags);
}
int reg_prof_syscall_exit(char *name)