Merge tag 'perf-urgent-2020-04-05' of git://git.kernel.org/pub/scm/linux/kernel/git...
[platform/kernel/linux-starfive.git] / kernel / events / core.c
index 1569979..81e6d80 100644 (file)
@@ -1296,7 +1296,7 @@ static void put_ctx(struct perf_event_context *ctx)
  * function.
  *
  * Lock order:
- *    cred_guard_mutex
+ *    exec_update_mutex
  *     task_struct::perf_event_mutex
  *       perf_event_context::mutex
  *         perf_event::child_mutex;
@@ -8535,23 +8535,22 @@ static void perf_event_bpf_emit_ksymbols(struct bpf_prog *prog,
                                         enum perf_bpf_event_type type)
 {
        bool unregister = type == PERF_BPF_EVENT_PROG_UNLOAD;
-       char sym[KSYM_NAME_LEN];
        int i;
 
        if (prog->aux->func_cnt == 0) {
-               bpf_get_prog_name(prog, sym);
                perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_BPF,
                                   (u64)(unsigned long)prog->bpf_func,
-                                  prog->jited_len, unregister, sym);
+                                  prog->jited_len, unregister,
+                                  prog->aux->ksym.name);
        } else {
                for (i = 0; i < prog->aux->func_cnt; i++) {
                        struct bpf_prog *subprog = prog->aux->func[i];
 
-                       bpf_get_prog_name(subprog, sym);
                        perf_event_ksymbol(
                                PERF_RECORD_KSYMBOL_TYPE_BPF,
                                (u64)(unsigned long)subprog->bpf_func,
-                               subprog->jited_len, unregister, sym);
+                               subprog->jited_len, unregister,
+                               prog->aux->ksym.name);
                }
        }
 }
@@ -9486,7 +9485,6 @@ static void bpf_overflow_handler(struct perf_event *event,
        int ret = 0;
 
        ctx.regs = perf_arch_bpf_user_pt_regs(regs);
-       preempt_disable();
        if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1))
                goto out;
        rcu_read_lock();
@@ -9494,7 +9492,6 @@ static void bpf_overflow_handler(struct perf_event *event,
        rcu_read_unlock();
 out:
        __this_cpu_dec(bpf_prog_active);
-       preempt_enable();
        if (!ret)
                return;
 
@@ -11554,14 +11551,14 @@ SYSCALL_DEFINE5(perf_event_open,
        }
 
        if (task) {
-               err = mutex_lock_interruptible(&task->signal->cred_guard_mutex);
+               err = mutex_lock_interruptible(&task->signal->exec_update_mutex);
                if (err)
                        goto err_task;
 
                /*
                 * Reuse ptrace permission checks for now.
                 *
-                * We must hold cred_guard_mutex across this and any potential
+                * We must hold exec_update_mutex across this and any potential
                 * perf_install_in_context() call for this new event to
                 * serialize against exec() altering our credentials (and the
                 * perf_event_exit_task() that could imply).
@@ -11850,7 +11847,7 @@ SYSCALL_DEFINE5(perf_event_open,
        mutex_unlock(&ctx->mutex);
 
        if (task) {
-               mutex_unlock(&task->signal->cred_guard_mutex);
+               mutex_unlock(&task->signal->exec_update_mutex);
                put_task_struct(task);
        }
 
@@ -11886,7 +11883,7 @@ err_alloc:
                free_event(event);
 err_cred:
        if (task)
-               mutex_unlock(&task->signal->cred_guard_mutex);
+               mutex_unlock(&task->signal->exec_update_mutex);
 err_task:
        if (task)
                put_task_struct(task);
@@ -12191,7 +12188,7 @@ static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
 /*
  * When a child task exits, feed back event values to parent events.
  *
- * Can be called with cred_guard_mutex held when called from
+ * Can be called with exec_update_mutex held when called from
  * install_exec_creds().
  */
 void perf_event_exit_task(struct task_struct *child)