perf/core: fix parent pid/tid in task exit events
[platform/kernel/linux-rpi.git] / kernel / padata.c
index da56a23..c4b7743 100644 (file)
@@ -89,7 +89,7 @@ static void padata_parallel_worker(struct work_struct *parallel_work)
 /**
  * padata_do_parallel - padata parallelization function
  *
- * @pinst: padata instance
+ * @ps: padatashell
  * @padata: object to be parallelized
  * @cb_cpu: pointer to the CPU that the serialization callback function should
  *          run on.  If it's not in the serial cpumask of @pinst
@@ -100,16 +100,17 @@ static void padata_parallel_worker(struct work_struct *parallel_work)
  * Note: Every object which is parallelized by padata_do_parallel
  * must be seen by padata_do_serial.
  */
-int padata_do_parallel(struct padata_instance *pinst,
+int padata_do_parallel(struct padata_shell *ps,
                       struct padata_priv *padata, int *cb_cpu)
 {
+       struct padata_instance *pinst = ps->pinst;
        int i, cpu, cpu_index, target_cpu, err;
        struct padata_parallel_queue *queue;
        struct parallel_data *pd;
 
        rcu_read_lock_bh();
 
-       pd = rcu_dereference_bh(pinst->pd);
+       pd = rcu_dereference_bh(ps->pd);
 
        err = -EINVAL;
        if (!(pinst->flags & PADATA_INIT) || pinst->flags & PADATA_INVALID)
@@ -212,10 +213,10 @@ static struct padata_priv *padata_find_next(struct parallel_data *pd,
 
 static void padata_reorder(struct parallel_data *pd)
 {
+       struct padata_instance *pinst = pd->ps->pinst;
        int cb_cpu;
        struct padata_priv *padata;
        struct padata_serial_queue *squeue;
-       struct padata_instance *pinst = pd->pinst;
        struct padata_parallel_queue *next_queue;
 
        /*
@@ -349,36 +350,39 @@ void padata_do_serial(struct padata_priv *padata)
 }
 EXPORT_SYMBOL(padata_do_serial);
 
-static int padata_setup_cpumasks(struct parallel_data *pd,
-                                const struct cpumask *pcpumask,
-                                const struct cpumask *cbcpumask)
+static int padata_setup_cpumasks(struct padata_instance *pinst)
 {
        struct workqueue_attrs *attrs;
+       int err;
+
+       attrs = alloc_workqueue_attrs();
+       if (!attrs)
+               return -ENOMEM;
+
+       /* Restrict parallel_wq workers to pd->cpumask.pcpu. */
+       cpumask_copy(attrs->cpumask, pinst->cpumask.pcpu);
+       err = apply_workqueue_attrs(pinst->parallel_wq, attrs);
+       free_workqueue_attrs(attrs);
+
+       return err;
+}
+
+static int pd_setup_cpumasks(struct parallel_data *pd,
+                            const struct cpumask *pcpumask,
+                            const struct cpumask *cbcpumask)
+{
        int err = -ENOMEM;
 
        if (!alloc_cpumask_var(&pd->cpumask.pcpu, GFP_KERNEL))
                goto out;
-       cpumask_and(pd->cpumask.pcpu, pcpumask, cpu_online_mask);
-
        if (!alloc_cpumask_var(&pd->cpumask.cbcpu, GFP_KERNEL))
                goto free_pcpu_mask;
-       cpumask_and(pd->cpumask.cbcpu, cbcpumask, cpu_online_mask);
 
-       attrs = alloc_workqueue_attrs();
-       if (!attrs)
-               goto free_cbcpu_mask;
-
-       /* Restrict parallel_wq workers to pd->cpumask.pcpu. */
-       cpumask_copy(attrs->cpumask, pd->cpumask.pcpu);
-       err = apply_workqueue_attrs(pd->pinst->parallel_wq, attrs);
-       free_workqueue_attrs(attrs);
-       if (err < 0)
-               goto free_cbcpu_mask;
+       cpumask_copy(pd->cpumask.pcpu, pcpumask);
+       cpumask_copy(pd->cpumask.cbcpu, cbcpumask);
 
        return 0;
 
-free_cbcpu_mask:
-       free_cpumask_var(pd->cpumask.cbcpu);
 free_pcpu_mask:
        free_cpumask_var(pd->cpumask.pcpu);
 out:
@@ -422,12 +426,16 @@ static void padata_init_pqueues(struct parallel_data *pd)
 }
 
 /* Allocate and initialize the internal cpumask dependend resources. */
-static struct parallel_data *padata_alloc_pd(struct padata_instance *pinst,
-                                            const struct cpumask *pcpumask,
-                                            const struct cpumask *cbcpumask)
+static struct parallel_data *padata_alloc_pd(struct padata_shell *ps)
 {
+       struct padata_instance *pinst = ps->pinst;
+       const struct cpumask *cbcpumask;
+       const struct cpumask *pcpumask;
        struct parallel_data *pd;
 
+       cbcpumask = pinst->rcpumask.cbcpu;
+       pcpumask = pinst->rcpumask.pcpu;
+
        pd = kzalloc(sizeof(struct parallel_data), GFP_KERNEL);
        if (!pd)
                goto err;
@@ -440,8 +448,8 @@ static struct parallel_data *padata_alloc_pd(struct padata_instance *pinst,
        if (!pd->squeue)
                goto err_free_pqueue;
 
-       pd->pinst = pinst;
-       if (padata_setup_cpumasks(pd, pcpumask, cbcpumask) < 0)
+       pd->ps = ps;
+       if (pd_setup_cpumasks(pd, pcpumask, cbcpumask))
                goto err_free_squeue;
 
        padata_init_pqueues(pd);
@@ -490,32 +498,60 @@ static void __padata_stop(struct padata_instance *pinst)
 }
 
 /* Replace the internal control structure with a new one. */
-static void padata_replace(struct padata_instance *pinst,
-                          struct parallel_data *pd_new)
+static int padata_replace_one(struct padata_shell *ps)
 {
-       struct parallel_data *pd_old = pinst->pd;
-       int notification_mask = 0;
+       struct parallel_data *pd_new;
 
-       pinst->flags |= PADATA_RESET;
+       pd_new = padata_alloc_pd(ps);
+       if (!pd_new)
+               return -ENOMEM;
 
-       rcu_assign_pointer(pinst->pd, pd_new);
+       ps->opd = rcu_dereference_protected(ps->pd, 1);
+       rcu_assign_pointer(ps->pd, pd_new);
 
-       synchronize_rcu();
+       return 0;
+}
 
-       if (!cpumask_equal(pd_old->cpumask.pcpu, pd_new->cpumask.pcpu))
+static int padata_replace(struct padata_instance *pinst)
+{
+       int notification_mask = 0;
+       struct padata_shell *ps;
+       int err = 0;
+
+       pinst->flags |= PADATA_RESET;
+
+       cpumask_copy(pinst->omask, pinst->rcpumask.pcpu);
+       cpumask_and(pinst->rcpumask.pcpu, pinst->cpumask.pcpu,
+                   cpu_online_mask);
+       if (!cpumask_equal(pinst->omask, pinst->rcpumask.pcpu))
                notification_mask |= PADATA_CPU_PARALLEL;
-       if (!cpumask_equal(pd_old->cpumask.cbcpu, pd_new->cpumask.cbcpu))
+
+       cpumask_copy(pinst->omask, pinst->rcpumask.cbcpu);
+       cpumask_and(pinst->rcpumask.cbcpu, pinst->cpumask.cbcpu,
+                   cpu_online_mask);
+       if (!cpumask_equal(pinst->omask, pinst->rcpumask.cbcpu))
                notification_mask |= PADATA_CPU_SERIAL;
 
-       if (atomic_dec_and_test(&pd_old->refcnt))
-               padata_free_pd(pd_old);
+       list_for_each_entry(ps, &pinst->pslist, list) {
+               err = padata_replace_one(ps);
+               if (err)
+                       break;
+       }
+
+       synchronize_rcu();
+
+       list_for_each_entry_continue_reverse(ps, &pinst->pslist, list)
+               if (atomic_dec_and_test(&ps->opd->refcnt))
+                       padata_free_pd(ps->opd);
 
        if (notification_mask)
                blocking_notifier_call_chain(&pinst->cpumask_change_notifier,
                                             notification_mask,
-                                            &pd_new->cpumask);
+                                            &pinst->cpumask);
 
        pinst->flags &= ~PADATA_RESET;
+
+       return err;
 }
 
 /**
@@ -568,7 +604,7 @@ static int __padata_set_cpumasks(struct padata_instance *pinst,
                                 cpumask_var_t cbcpumask)
 {
        int valid;
-       struct parallel_data *pd;
+       int err;
 
        valid = padata_validate_cpumask(pinst, pcpumask);
        if (!valid) {
@@ -581,19 +617,15 @@ static int __padata_set_cpumasks(struct padata_instance *pinst,
                __padata_stop(pinst);
 
 out_replace:
-       pd = padata_alloc_pd(pinst, pcpumask, cbcpumask);
-       if (!pd)
-               return -ENOMEM;
-
        cpumask_copy(pinst->cpumask.pcpu, pcpumask);
        cpumask_copy(pinst->cpumask.cbcpu, cbcpumask);
 
-       padata_replace(pinst, pd);
+       err = padata_setup_cpumasks(pinst) ?: padata_replace(pinst);
 
        if (valid)
                __padata_start(pinst);
 
-       return 0;
+       return err;
 }
 
 /**
@@ -611,8 +643,8 @@ int padata_set_cpumask(struct padata_instance *pinst, int cpumask_type,
        struct cpumask *serial_mask, *parallel_mask;
        int err = -EINVAL;
 
-       mutex_lock(&pinst->lock);
        get_online_cpus();
+       mutex_lock(&pinst->lock);
 
        switch (cpumask_type) {
        case PADATA_CPU_PARALLEL:
@@ -630,8 +662,8 @@ int padata_set_cpumask(struct padata_instance *pinst, int cpumask_type,
        err =  __padata_set_cpumasks(pinst, parallel_mask, serial_mask);
 
 out:
-       put_online_cpus();
        mutex_unlock(&pinst->lock);
+       put_online_cpus();
 
        return err;
 }
@@ -676,46 +708,32 @@ EXPORT_SYMBOL(padata_stop);
 
 static int __padata_add_cpu(struct padata_instance *pinst, int cpu)
 {
-       struct parallel_data *pd;
+       int err = 0;
 
        if (cpumask_test_cpu(cpu, cpu_online_mask)) {
-               pd = padata_alloc_pd(pinst, pinst->cpumask.pcpu,
-                                    pinst->cpumask.cbcpu);
-               if (!pd)
-                       return -ENOMEM;
-
-               padata_replace(pinst, pd);
+               err = padata_replace(pinst);
 
                if (padata_validate_cpumask(pinst, pinst->cpumask.pcpu) &&
                    padata_validate_cpumask(pinst, pinst->cpumask.cbcpu))
                        __padata_start(pinst);
        }
 
-       return 0;
+       return err;
 }
 
 static int __padata_remove_cpu(struct padata_instance *pinst, int cpu)
 {
-       struct parallel_data *pd = NULL;
-
-       if (cpumask_test_cpu(cpu, cpu_online_mask)) {
+       int err = 0;
 
+       if (!cpumask_test_cpu(cpu, cpu_online_mask)) {
                if (!padata_validate_cpumask(pinst, pinst->cpumask.pcpu) ||
                    !padata_validate_cpumask(pinst, pinst->cpumask.cbcpu))
                        __padata_stop(pinst);
 
-               pd = padata_alloc_pd(pinst, pinst->cpumask.pcpu,
-                                    pinst->cpumask.cbcpu);
-               if (!pd)
-                       return -ENOMEM;
-
-               padata_replace(pinst, pd);
-
-               cpumask_clear_cpu(cpu, pd->cpumask.cbcpu);
-               cpumask_clear_cpu(cpu, pd->cpumask.pcpu);
+               err = padata_replace(pinst);
        }
 
-       return 0;
+       return err;
 }
 
  /**
@@ -774,7 +792,7 @@ static int padata_cpu_online(unsigned int cpu, struct hlist_node *node)
        return ret;
 }
 
-static int padata_cpu_prep_down(unsigned int cpu, struct hlist_node *node)
+static int padata_cpu_dead(unsigned int cpu, struct hlist_node *node)
 {
        struct padata_instance *pinst;
        int ret;
@@ -795,11 +813,16 @@ static enum cpuhp_state hp_online;
 static void __padata_free(struct padata_instance *pinst)
 {
 #ifdef CONFIG_HOTPLUG_CPU
+       cpuhp_state_remove_instance_nocalls(CPUHP_PADATA_DEAD, &pinst->node);
        cpuhp_state_remove_instance_nocalls(hp_online, &pinst->node);
 #endif
 
+       WARN_ON(!list_empty(&pinst->pslist));
+
        padata_stop(pinst);
-       padata_free_pd(pinst->pd);
+       free_cpumask_var(pinst->omask);
+       free_cpumask_var(pinst->rcpumask.cbcpu);
+       free_cpumask_var(pinst->rcpumask.pcpu);
        free_cpumask_var(pinst->cpumask.pcpu);
        free_cpumask_var(pinst->cpumask.cbcpu);
        destroy_workqueue(pinst->serial_wq);
@@ -946,7 +969,6 @@ static struct padata_instance *padata_alloc(const char *name,
                                            const struct cpumask *cbcpumask)
 {
        struct padata_instance *pinst;
-       struct parallel_data *pd = NULL;
 
        pinst = kzalloc(sizeof(struct padata_instance), GFP_KERNEL);
        if (!pinst)
@@ -974,14 +996,22 @@ static struct padata_instance *padata_alloc(const char *name,
            !padata_validate_cpumask(pinst, cbcpumask))
                goto err_free_masks;
 
-       pd = padata_alloc_pd(pinst, pcpumask, cbcpumask);
-       if (!pd)
+       if (!alloc_cpumask_var(&pinst->rcpumask.pcpu, GFP_KERNEL))
                goto err_free_masks;
+       if (!alloc_cpumask_var(&pinst->rcpumask.cbcpu, GFP_KERNEL))
+               goto err_free_rcpumask_pcpu;
+       if (!alloc_cpumask_var(&pinst->omask, GFP_KERNEL))
+               goto err_free_rcpumask_cbcpu;
 
-       rcu_assign_pointer(pinst->pd, pd);
+       INIT_LIST_HEAD(&pinst->pslist);
 
        cpumask_copy(pinst->cpumask.pcpu, pcpumask);
        cpumask_copy(pinst->cpumask.cbcpu, cbcpumask);
+       cpumask_and(pinst->rcpumask.pcpu, pcpumask, cpu_online_mask);
+       cpumask_and(pinst->rcpumask.cbcpu, cbcpumask, cpu_online_mask);
+
+       if (padata_setup_cpumasks(pinst))
+               goto err_free_omask;
 
        pinst->flags = 0;
 
@@ -991,12 +1021,20 @@ static struct padata_instance *padata_alloc(const char *name,
 
 #ifdef CONFIG_HOTPLUG_CPU
        cpuhp_state_add_instance_nocalls_cpuslocked(hp_online, &pinst->node);
+       cpuhp_state_add_instance_nocalls_cpuslocked(CPUHP_PADATA_DEAD,
+                                                   &pinst->node);
 #endif
 
        put_online_cpus();
 
        return pinst;
 
+err_free_omask:
+       free_cpumask_var(pinst->omask);
+err_free_rcpumask_cbcpu:
+       free_cpumask_var(pinst->rcpumask.cbcpu);
+err_free_rcpumask_pcpu:
+       free_cpumask_var(pinst->rcpumask.pcpu);
 err_free_masks:
        free_cpumask_var(pinst->cpumask.pcpu);
        free_cpumask_var(pinst->cpumask.cbcpu);
@@ -1035,6 +1073,61 @@ void padata_free(struct padata_instance *pinst)
 }
 EXPORT_SYMBOL(padata_free);
 
+/**
+ * padata_alloc_shell - Allocate and initialize padata shell.
+ *
+ * @pinst: Parent padata_instance object.
+ */
+struct padata_shell *padata_alloc_shell(struct padata_instance *pinst)
+{
+       struct parallel_data *pd;
+       struct padata_shell *ps;
+
+       ps = kzalloc(sizeof(*ps), GFP_KERNEL);
+       if (!ps)
+               goto out;
+
+       ps->pinst = pinst;
+
+       get_online_cpus();
+       pd = padata_alloc_pd(ps);
+       put_online_cpus();
+
+       if (!pd)
+               goto out_free_ps;
+
+       mutex_lock(&pinst->lock);
+       RCU_INIT_POINTER(ps->pd, pd);
+       list_add(&ps->list, &pinst->pslist);
+       mutex_unlock(&pinst->lock);
+
+       return ps;
+
+out_free_ps:
+       kfree(ps);
+out:
+       return NULL;
+}
+EXPORT_SYMBOL(padata_alloc_shell);
+
+/**
+ * padata_free_shell - free a padata shell
+ *
+ * @ps: padata shell to free
+ */
+void padata_free_shell(struct padata_shell *ps)
+{
+       struct padata_instance *pinst = ps->pinst;
+
+       mutex_lock(&pinst->lock);
+       list_del(&ps->list);
+       padata_free_pd(rcu_dereference_protected(ps->pd, 1));
+       mutex_unlock(&pinst->lock);
+
+       kfree(ps);
+}
+EXPORT_SYMBOL(padata_free_shell);
+
 #ifdef CONFIG_HOTPLUG_CPU
 
 static __init int padata_driver_init(void)
@@ -1042,17 +1135,24 @@ static __init int padata_driver_init(void)
        int ret;
 
        ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "padata:online",
-                                     padata_cpu_online,
-                                     padata_cpu_prep_down);
+                                     padata_cpu_online, NULL);
        if (ret < 0)
                return ret;
        hp_online = ret;
+
+       ret = cpuhp_setup_state_multi(CPUHP_PADATA_DEAD, "padata:dead",
+                                     NULL, padata_cpu_dead);
+       if (ret < 0) {
+               cpuhp_remove_multi_state(hp_online);
+               return ret;
+       }
        return 0;
 }
 module_init(padata_driver_init);
 
 static __exit void padata_driver_exit(void)
 {
+       cpuhp_remove_multi_state(CPUHP_PADATA_DEAD);
        cpuhp_remove_multi_state(hp_online);
 }
 module_exit(padata_driver_exit);