EXPORT_SYMBOL_GPL(event_storage);
LIST_HEAD(ftrace_events);
-LIST_HEAD(ftrace_common_fields);
+static LIST_HEAD(ftrace_common_fields);
#define GFP_TRACE (GFP_KERNEL | __GFP_ZERO)
#define while_for_each_event_file() \
}
-struct list_head *
+static struct list_head *
trace_get_fields(struct ftrace_event_call *event_call)
{
if (!event_call->class->get_fields)
return event_call->class->get_fields(event_call);
}
+static struct ftrace_event_field *
+__find_event_field(struct list_head *head, char *name)
+{
+ struct ftrace_event_field *field;
+
+ list_for_each_entry(field, head, link) {
+ if (!strcmp(field->name, name))
+ return field;
+ }
+
+ return NULL;
+}
+
+struct ftrace_event_field *
+trace_find_event_field(struct ftrace_event_call *call, char *name)
+{
+ struct ftrace_event_field *field;
+ struct list_head *head;
+
+ field = __find_event_field(&ftrace_common_fields, name);
+ if (field)
+ return field;
+
+ head = trace_get_fields(call);
+ return __find_event_field(head, name);
+}
+
static int __trace_define_field(struct list_head *head, const char *type,
const char *name, int offset, int size,
int is_signed, int filter_type)
return ret;
}
-void trace_destroy_fields(struct ftrace_event_call *call)
+static void trace_destroy_fields(struct ftrace_event_call *call)
{
struct ftrace_event_field *field, *next;
struct list_head *head;
}
/*
- * Must be called under locking both of event_mutex and trace_event_mutex.
+ * Must be called under locking both of event_mutex and trace_event_sem.
*/
static void __trace_remove_event_call(struct ftrace_event_call *call)
{
void trace_remove_event_call(struct ftrace_event_call *call)
{
mutex_lock(&event_mutex);
- down_write(&trace_event_mutex);
+ down_write(&trace_event_sem);
__trace_remove_event_call(call);
- up_write(&trace_event_mutex);
+ up_write(&trace_event_sem);
mutex_unlock(&event_mutex);
}
struct ftrace_event_call *call, *p;
bool clear_trace = false;
- down_write(&trace_event_mutex);
+ down_write(&trace_event_sem);
list_for_each_entry_safe(call, p, &ftrace_events, list) {
if (call->mod == mod) {
if (call->flags & TRACE_EVENT_FL_WAS_ENABLED)
list_del(&file_ops->list);
kfree(file_ops);
}
- up_write(&trace_event_mutex);
+ up_write(&trace_event_sem);
/*
* It is safest to reset the ring buffer if the module being unloaded
if (ret)
goto out_unlock;
- down_write(&trace_event_mutex);
+ down_write(&trace_event_sem);
__trace_add_event_dirs(tr);
- up_write(&trace_event_mutex);
+ up_write(&trace_event_sem);
out_unlock:
mutex_unlock(&event_mutex);
if (ret)
goto out_unlock;
- down_write(&trace_event_mutex);
+ down_write(&trace_event_sem);
__trace_early_add_event_dirs(tr);
- up_write(&trace_event_mutex);
+ up_write(&trace_event_sem);
out_unlock:
mutex_unlock(&event_mutex);
mutex_lock(&event_mutex);
- down_write(&trace_event_mutex);
+ down_write(&trace_event_sem);
__trace_remove_event_dirs(tr);
debugfs_remove_recursive(tr->event_dir);
- up_write(&trace_event_mutex);
+ up_write(&trace_event_sem);
tr->event_dir = NULL;