tracing: Simplify defining of the next event id
authorWei Yang <richard.weiyang@linux.alibaba.com>
Fri, 3 Jul 2020 02:06:09 +0000 (10:06 +0800)
committerSteven Rostedt (VMware) <rostedt@goodmis.org>
Thu, 9 Jul 2020 22:00:47 +0000 (18:00 -0400)
The value to be used and compared in trace_search_list() is "last + 1".
Let's just define next to be "last + 1" instead of doing the addition
each time.

Link: https://lkml.kernel.org/r/20200703020612.12930-2-richard.weiyang@linux.alibaba.com
Signed-off-by: Wei Yang <richard.weiyang@linux.alibaba.com>
Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
kernel/trace/trace_output.c

index 73976de..a35232d 100644 (file)
@@ -675,11 +675,11 @@ static LIST_HEAD(ftrace_event_list);
 static int trace_search_list(struct list_head **list)
 {
        struct trace_event *e;
-       int last = __TRACE_LAST_TYPE;
+       int next = __TRACE_LAST_TYPE + 1;
 
        if (list_empty(&ftrace_event_list)) {
                *list = &ftrace_event_list;
-               return last + 1;
+               return next;
        }
 
        /*
@@ -687,17 +687,17 @@ static int trace_search_list(struct list_head **list)
         * lets see if somebody freed one.
         */
        list_for_each_entry(e, &ftrace_event_list, list) {
-               if (e->type != last + 1)
+               if (e->type != next)
                        break;
-               last++;
+               next++;
        }
 
        /* Did we used up all 65 thousand events??? */
-       if ((last + 1) > TRACE_EVENT_TYPE_MAX)
+       if (next > TRACE_EVENT_TYPE_MAX)
                return 0;
 
        *list = &e->list;
-       return last + 1;
+       return next;
 }
 
 void trace_event_read_lock(void)