tracing: Kill trace_create_file_ops() and friends
[platform/adaptation/renesas_rcar/renesas_kernel.git] / kernel / trace / trace_events.c
index 50dc8b2..2ec8273 100644 (file)
@@ -409,38 +409,42 @@ static void put_system(struct ftrace_subsystem_dir *dir)
        mutex_unlock(&event_mutex);
 }
 
-static void *event_file_data(struct file *filp)
+static void remove_subsystem(struct ftrace_subsystem_dir *dir)
 {
-       return ACCESS_ONCE(file_inode(filp)->i_private);
+       if (!dir)
+               return;
+
+       if (!--dir->nr_events) {
+               debugfs_remove_recursive(dir->entry);
+               list_del(&dir->list);
+               __put_system_dir(dir);
+       }
 }
 
-/*
- * Open and update trace_array ref count.
- * Must have the current trace_array passed to it.
- */
-static int tracing_open_generic_file(struct inode *inode, struct file *filp)
+static void *event_file_data(struct file *filp)
 {
-       struct ftrace_event_file *file = inode->i_private;
-       struct trace_array *tr = file->tr;
-       int ret;
-
-       if (trace_array_get(tr) < 0)
-               return -ENODEV;
-
-       ret = tracing_open_generic(inode, filp);
-       if (ret < 0)
-               trace_array_put(tr);
-       return ret;
+       return ACCESS_ONCE(file_inode(filp)->i_private);
 }
 
-static int tracing_release_generic_file(struct inode *inode, struct file *filp)
+static void remove_event_file_dir(struct ftrace_event_file *file)
 {
-       struct ftrace_event_file *file = inode->i_private;
-       struct trace_array *tr = file->tr;
+       struct dentry *dir = file->dir;
+       struct dentry *child;
 
-       trace_array_put(tr);
+       if (dir) {
+               spin_lock(&dir->d_lock);        /* probably unneeded */
+               list_for_each_entry(child, &dir->d_subdirs, d_u.d_child) {
+                       if (child->d_inode)     /* probably unneeded */
+                               child->d_inode->i_private = NULL;
+               }
+               spin_unlock(&dir->d_lock);
 
-       return 0;
+               debugfs_remove_recursive(dir);
+       }
+
+       list_del(&file->list);
+       remove_subsystem(file->system);
+       kmem_cache_free(file_cachep, file);
 }
 
 /*
@@ -1261,10 +1265,9 @@ static const struct file_operations ftrace_set_event_fops = {
 };
 
 static const struct file_operations ftrace_enable_fops = {
-       .open = tracing_open_generic_file,
+       .open = tracing_open_generic,
        .read = event_enable_read,
        .write = event_enable_write,
-       .release = tracing_release_generic_file,
        .llseek = default_llseek,
 };
 
@@ -1549,33 +1552,16 @@ event_create_dir(struct dentry *parent,
        return 0;
 }
 
-static void remove_subsystem(struct ftrace_subsystem_dir *dir)
-{
-       if (!dir)
-               return;
-
-       if (!--dir->nr_events) {
-               debugfs_remove_recursive(dir->entry);
-               list_del(&dir->list);
-               __put_system_dir(dir);
-       }
-}
-
 static void remove_event_from_tracers(struct ftrace_event_call *call)
 {
        struct ftrace_event_file *file;
        struct trace_array *tr;
 
        do_for_each_event_file_safe(tr, file) {
-
                if (file->event_call != call)
                        continue;
 
-               list_del(&file->list);
-               debugfs_remove_recursive(file->dir);
-               remove_subsystem(file->system);
-               kmem_cache_free(file_cachep, file);
-
+               remove_event_file_dir(file);
                /*
                 * The do_for_each_event_file_safe() is
                 * a double loop. After finding the call for this
@@ -1697,8 +1683,7 @@ __trace_early_add_new_event(struct ftrace_event_call *call,
 }
 
 struct ftrace_module_file_ops;
-static void __add_event_to_tracers(struct ftrace_event_call *call,
-                                  struct ftrace_module_file_ops *file_ops);
+static void __add_event_to_tracers(struct ftrace_event_call *call);
 
 /* Add an additional event_call dynamically */
 int trace_add_event_call(struct ftrace_event_call *call)
@@ -1709,7 +1694,7 @@ int trace_add_event_call(struct ftrace_event_call *call)
 
        ret = __register_event(call, NULL);
        if (ret >= 0)
-               __add_event_to_tracers(call, NULL);
+               __add_event_to_tracers(call);
 
        mutex_unlock(&event_mutex);
        mutex_unlock(&trace_types_lock);
@@ -1727,16 +1712,53 @@ static void __trace_remove_event_call(struct ftrace_event_call *call)
        destroy_preds(call);
 }
 
+static int probe_remove_event_call(struct ftrace_event_call *call)
+{
+       struct trace_array *tr;
+       struct ftrace_event_file *file;
+
+#ifdef CONFIG_PERF_EVENTS
+       if (call->perf_refcount)
+               return -EBUSY;
+#endif
+       do_for_each_event_file(tr, file) {
+               if (file->event_call != call)
+                       continue;
+               /*
+                * We can't rely on ftrace_event_enable_disable(enable => 0)
+                * we are going to do, FTRACE_EVENT_FL_SOFT_MODE can suppress
+                * TRACE_REG_UNREGISTER.
+                */
+               if (file->flags & FTRACE_EVENT_FL_ENABLED)
+                       return -EBUSY;
+               /*
+                * The do_for_each_event_file_safe() is
+                * a double loop. After finding the call for this
+                * trace_array, we use break to jump to the next
+                * trace_array.
+                */
+               break;
+       } while_for_each_event_file();
+
+       __trace_remove_event_call(call);
+
+       return 0;
+}
+
 /* Remove an event_call */
-void trace_remove_event_call(struct ftrace_event_call *call)
+int trace_remove_event_call(struct ftrace_event_call *call)
 {
+       int ret;
+
        mutex_lock(&trace_types_lock);
        mutex_lock(&event_mutex);
        down_write(&trace_event_sem);
-       __trace_remove_event_call(call);
+       ret = probe_remove_event_call(call);
        up_write(&trace_event_sem);
        mutex_unlock(&event_mutex);
        mutex_unlock(&trace_types_lock);
+
+       return ret;
 }
 
 #define for_each_event(event, start, end)                      \
@@ -1746,100 +1768,21 @@ void trace_remove_event_call(struct ftrace_event_call *call)
 
 #ifdef CONFIG_MODULES
 
-static LIST_HEAD(ftrace_module_file_list);
-
-/*
- * Modules must own their file_operations to keep up with
- * reference counting.
- */
-struct ftrace_module_file_ops {
-       struct list_head                list;
-       struct module                   *mod;
-       struct file_operations          id;
-       struct file_operations          enable;
-       struct file_operations          format;
-       struct file_operations          filter;
-};
-
-static struct ftrace_module_file_ops *
-find_ftrace_file_ops(struct ftrace_module_file_ops *file_ops, struct module *mod)
-{
-       /*
-        * As event_calls are added in groups by module,
-        * when we find one file_ops, we don't need to search for
-        * each call in that module, as the rest should be the
-        * same. Only search for a new one if the last one did
-        * not match.
-        */
-       if (file_ops && mod == file_ops->mod)
-               return file_ops;
-
-       list_for_each_entry(file_ops, &ftrace_module_file_list, list) {
-               if (file_ops->mod == mod)
-                       return file_ops;
-       }
-       return NULL;
-}
-
-static struct ftrace_module_file_ops *
-trace_create_file_ops(struct module *mod)
-{
-       struct ftrace_module_file_ops *file_ops;
-
-       /*
-        * This is a bit of a PITA. To allow for correct reference
-        * counting, modules must "own" their file_operations.
-        * To do this, we allocate the file operations that will be
-        * used in the event directory.
-        */
-
-       file_ops = kmalloc(sizeof(*file_ops), GFP_KERNEL);
-       if (!file_ops)
-               return NULL;
-
-       file_ops->mod = mod;
-
-       file_ops->id = ftrace_event_id_fops;
-       file_ops->id.owner = mod;
-
-       file_ops->enable = ftrace_enable_fops;
-       file_ops->enable.owner = mod;
-
-       file_ops->filter = ftrace_event_filter_fops;
-       file_ops->filter.owner = mod;
-
-       file_ops->format = ftrace_event_format_fops;
-       file_ops->format.owner = mod;
-
-       list_add(&file_ops->list, &ftrace_module_file_list);
-
-       return file_ops;
-}
-
 static void trace_module_add_events(struct module *mod)
 {
-       struct ftrace_module_file_ops *file_ops = NULL;
        struct ftrace_event_call **call, **start, **end;
 
        start = mod->trace_events;
        end = mod->trace_events + mod->num_trace_events;
 
-       if (start == end)
-               return;
-
-       file_ops = trace_create_file_ops(mod);
-       if (!file_ops)
-               return;
-
        for_each_event(call, start, end) {
                __register_event(*call, mod);
-               __add_event_to_tracers(*call, file_ops);
+               __add_event_to_tracers(*call);
        }
 }
 
 static void trace_module_remove_events(struct module *mod)
 {
-       struct ftrace_module_file_ops *file_ops;
        struct ftrace_event_call *call, *p;
        bool clear_trace = false;
 
@@ -1851,16 +1794,6 @@ static void trace_module_remove_events(struct module *mod)
                        __trace_remove_event_call(call);
                }
        }
-
-       /* Now free the file_operations */
-       list_for_each_entry(file_ops, &ftrace_module_file_list, list) {
-               if (file_ops->mod == mod)
-                       break;
-       }
-       if (&file_ops->list != &ftrace_module_file_list) {
-               list_del(&file_ops->list);
-               kfree(file_ops);
-       }
        up_write(&trace_event_sem);
 
        /*
@@ -1896,62 +1829,22 @@ static int trace_module_notify(struct notifier_block *self,
        return 0;
 }
 
-static int
-__trace_add_new_mod_event(struct ftrace_event_call *call,
-                         struct trace_array *tr,
-                         struct ftrace_module_file_ops *file_ops)
-{
-       return __trace_add_new_event(call, tr,
-                                    &file_ops->id, &file_ops->enable,
-                                    &file_ops->filter, &file_ops->format);
-}
-
 #else
-static inline struct ftrace_module_file_ops *
-find_ftrace_file_ops(struct ftrace_module_file_ops *file_ops, struct module *mod)
-{
-       return NULL;
-}
 static inline int trace_module_notify(struct notifier_block *self,
                                      unsigned long val, void *data)
 {
        return 0;
 }
-static inline int
-__trace_add_new_mod_event(struct ftrace_event_call *call,
-                         struct trace_array *tr,
-                         struct ftrace_module_file_ops *file_ops)
-{
-       return -ENODEV;
-}
 #endif /* CONFIG_MODULES */
 
 /* Create a new event directory structure for a trace directory. */
 static void
 __trace_add_event_dirs(struct trace_array *tr)
 {
-       struct ftrace_module_file_ops *file_ops = NULL;
        struct ftrace_event_call *call;
        int ret;
 
        list_for_each_entry(call, &ftrace_events, list) {
-               if (call->mod) {
-                       /*
-                        * Directories for events by modules need to
-                        * keep module ref counts when opened (as we don't
-                        * want the module to disappear when reading one
-                        * of these files). The file_ops keep account of
-                        * the module ref count.
-                        */
-                       file_ops = find_ftrace_file_ops(file_ops, call->mod);
-                       if (!file_ops)
-                               continue; /* Warn? */
-                       ret = __trace_add_new_mod_event(call, tr, file_ops);
-                       if (ret < 0)
-                               pr_warning("Could not create directory for event %s\n",
-                                          call->name);
-                       continue;
-               }
                ret = __trace_add_new_event(call, tr,
                                            &ftrace_event_id_fops,
                                            &ftrace_enable_fops,
@@ -2305,29 +2198,20 @@ __trace_remove_event_dirs(struct trace_array *tr)
 {
        struct ftrace_event_file *file, *next;
 
-       list_for_each_entry_safe(file, next, &tr->events, list) {
-               list_del(&file->list);
-               debugfs_remove_recursive(file->dir);
-               remove_subsystem(file->system);
-               kmem_cache_free(file_cachep, file);
-       }
+       list_for_each_entry_safe(file, next, &tr->events, list)
+               remove_event_file_dir(file);
 }
 
-static void
-__add_event_to_tracers(struct ftrace_event_call *call,
-                      struct ftrace_module_file_ops *file_ops)
+static void __add_event_to_tracers(struct ftrace_event_call *call)
 {
        struct trace_array *tr;
 
        list_for_each_entry(tr, &ftrace_trace_arrays, list) {
-               if (file_ops)
-                       __trace_add_new_mod_event(call, tr, file_ops);
-               else
-                       __trace_add_new_event(call, tr,
-                                             &ftrace_event_id_fops,
-                                             &ftrace_enable_fops,
-                                             &ftrace_event_filter_fops,
-                                             &ftrace_event_format_fops);
+               __trace_add_new_event(call, tr,
+                                     &ftrace_event_id_fops,
+                                     &ftrace_enable_fops,
+                                     &ftrace_event_filter_fops,
+                                     &ftrace_event_format_fops);
        }
 }