perf: Prevent false warning in perf_swevent_add
[platform/adaptation/renesas_rcar/renesas_kernel.git] / kernel / events / core.c
index f574401..e80611f 100644 (file)
@@ -119,7 +119,8 @@ static int cpu_function_call(int cpu, int (*func) (void *info), void *info)
 
 #define PERF_FLAG_ALL (PERF_FLAG_FD_NO_GROUP |\
                       PERF_FLAG_FD_OUTPUT  |\
-                      PERF_FLAG_PID_CGROUP)
+                      PERF_FLAG_PID_CGROUP |\
+                      PERF_FLAG_FD_CLOEXEC)
 
 /*
  * branch priv levels that need permission checks
@@ -3542,7 +3543,7 @@ static void perf_event_for_each(struct perf_event *event,
 static int perf_event_period(struct perf_event *event, u64 __user *arg)
 {
        struct perf_event_context *ctx = event->ctx;
-       int ret = 0;
+       int ret = 0, active;
        u64 value;
 
        if (!is_sampling_event(event))
@@ -3566,6 +3567,20 @@ static int perf_event_period(struct perf_event *event, u64 __user *arg)
                event->attr.sample_period = value;
                event->hw.sample_period = value;
        }
+
+       active = (event->state == PERF_EVENT_STATE_ACTIVE);
+       if (active) {
+               perf_pmu_disable(ctx->pmu);
+               event->pmu->stop(event, PERF_EF_UPDATE);
+       }
+
+       local64_set(&event->hw.period_left, 0);
+
+       if (active) {
+               event->pmu->start(event, PERF_EF_RELOAD);
+               perf_pmu_enable(ctx->pmu);
+       }
+
 unlock:
        raw_spin_unlock_irq(&ctx->lock);
 
@@ -5391,6 +5406,9 @@ struct swevent_htable {
 
        /* Recursion avoidance in each contexts */
        int                             recursion[PERF_NR_CONTEXTS];
+
+       /* Keeps track of cpu being initialized/exited */
+       bool                            online;
 };
 
 static DEFINE_PER_CPU(struct swevent_htable, swevent_htable);
@@ -5637,8 +5655,14 @@ static int perf_swevent_add(struct perf_event *event, int flags)
        hwc->state = !(flags & PERF_EF_START);
 
        head = find_swevent_head(swhash, event);
-       if (WARN_ON_ONCE(!head))
+       if (!head) {
+               /*
+                * We can race with cpu hotplug code. Do not
+                * WARN if the cpu just got unplugged.
+                */
+               WARN_ON_ONCE(swhash->online);
                return -EINVAL;
+       }
 
        hlist_add_head_rcu(&event->hlist_entry, head);
 
@@ -6670,6 +6694,9 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
        INIT_LIST_HEAD(&event->event_entry);
        INIT_LIST_HEAD(&event->sibling_list);
        INIT_LIST_HEAD(&event->rb_entry);
+       INIT_LIST_HEAD(&event->active_entry);
+       INIT_HLIST_NODE(&event->hlist_entry);
+
 
        init_waitqueue_head(&event->waitq);
        init_irq_work(&event->pending, perf_pending_event);
@@ -6980,6 +7007,7 @@ SYSCALL_DEFINE5(perf_event_open,
        int event_fd;
        int move_group = 0;
        int err;
+       int f_flags = O_RDWR;
 
        /* for future expandability... */
        if (flags & ~PERF_FLAG_ALL)
@@ -7008,7 +7036,10 @@ SYSCALL_DEFINE5(perf_event_open,
        if ((flags & PERF_FLAG_PID_CGROUP) && (pid == -1 || cpu == -1))
                return -EINVAL;
 
-       event_fd = get_unused_fd();
+       if (flags & PERF_FLAG_FD_CLOEXEC)
+               f_flags |= O_CLOEXEC;
+
+       event_fd = get_unused_fd_flags(f_flags);
        if (event_fd < 0)
                return event_fd;
 
@@ -7130,7 +7161,8 @@ SYSCALL_DEFINE5(perf_event_open,
                        goto err_context;
        }
 
-       event_file = anon_inode_getfile("[perf_event]", &perf_fops, event, O_RDWR);
+       event_file = anon_inode_getfile("[perf_event]", &perf_fops, event,
+                                       f_flags);
        if (IS_ERR(event_file)) {
                err = PTR_ERR(event_file);
                goto err_context;
@@ -7810,6 +7842,7 @@ static void perf_event_init_cpu(int cpu)
        struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
 
        mutex_lock(&swhash->hlist_mutex);
+       swhash->online = true;
        if (swhash->hlist_refcount > 0) {
                struct swevent_hlist *hlist;
 
@@ -7833,14 +7866,14 @@ static void perf_pmu_rotate_stop(struct pmu *pmu)
 static void __perf_event_exit_context(void *__info)
 {
        struct perf_event_context *ctx = __info;
-       struct perf_event *event, *tmp;
+       struct perf_event *event;
 
        perf_pmu_rotate_stop(ctx->pmu);
 
-       list_for_each_entry_safe(event, tmp, &ctx->pinned_groups, group_entry)
-               __perf_remove_from_context(event);
-       list_for_each_entry_safe(event, tmp, &ctx->flexible_groups, group_entry)
+       rcu_read_lock();
+       list_for_each_entry_rcu(event, &ctx->event_list, event_entry)
                __perf_remove_from_context(event);
+       rcu_read_unlock();
 }
 
 static void perf_event_exit_cpu_context(int cpu)
@@ -7864,11 +7897,12 @@ static void perf_event_exit_cpu(int cpu)
 {
        struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
 
+       perf_event_exit_cpu_context(cpu);
+
        mutex_lock(&swhash->hlist_mutex);
+       swhash->online = false;
        swevent_hlist_release(swhash);
        mutex_unlock(&swhash->hlist_mutex);
-
-       perf_event_exit_cpu_context(cpu);
 }
 #else
 static inline void perf_event_exit_cpu(int cpu) { }