1 // SPDX-License-Identifier: GPL-2.0
3 * padata.c - generic interface to process data streams in parallel
5 * See Documentation/padata.txt for an api documentation.
7 * Copyright (C) 2008, 2009 secunet Security Networks AG
8 * Copyright (C) 2008, 2009 Steffen Klassert <steffen.klassert@secunet.com>
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms and conditions of the GNU General Public License,
12 * version 2, as published by the Free Software Foundation.
14 * This program is distributed in the hope it will be useful, but WITHOUT
15 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
19 * You should have received a copy of the GNU General Public License along with
20 * this program; if not, write to the Free Software Foundation, Inc.,
21 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
24 #include <linux/export.h>
25 #include <linux/cpumask.h>
26 #include <linux/err.h>
27 #include <linux/cpu.h>
28 #include <linux/padata.h>
29 #include <linux/mutex.h>
30 #include <linux/sched.h>
31 #include <linux/slab.h>
32 #include <linux/sysfs.h>
33 #include <linux/rcupdate.h>
34 #include <linux/module.h>
36 #define MAX_OBJ_NUM 1000
38 static int padata_index_to_cpu(struct parallel_data *pd, int cpu_index)
42 target_cpu = cpumask_first(pd->cpumask.pcpu);
43 for (cpu = 0; cpu < cpu_index; cpu++)
44 target_cpu = cpumask_next(target_cpu, pd->cpumask.pcpu);
49 static int padata_cpu_hash(struct parallel_data *pd)
55 * Hash the sequence numbers to the cpus by taking
56 * seq_nr mod. number of cpus in use.
59 seq_nr = atomic_inc_return(&pd->seq_nr);
60 cpu_index = seq_nr % cpumask_weight(pd->cpumask.pcpu);
62 return padata_index_to_cpu(pd, cpu_index);
65 static void padata_parallel_worker(struct work_struct *parallel_work)
67 struct padata_parallel_queue *pqueue;
68 LIST_HEAD(local_list);
71 pqueue = container_of(parallel_work,
72 struct padata_parallel_queue, work);
74 spin_lock(&pqueue->parallel.lock);
75 list_replace_init(&pqueue->parallel.list, &local_list);
76 spin_unlock(&pqueue->parallel.lock);
78 while (!list_empty(&local_list)) {
79 struct padata_priv *padata;
81 padata = list_entry(local_list.next,
82 struct padata_priv, list);
84 list_del_init(&padata->list);
86 padata->parallel(padata);
93 * padata_do_parallel - padata parallelization function
95 * @pinst: padata instance
96 * @padata: object to be parallelized
97 * @cb_cpu: cpu the serialization callback function will run on,
98 * must be in the serial cpumask of padata(i.e. cpumask.cbcpu).
100 * The parallelization callback function will run with BHs off.
101 * Note: Every object which is parallelized by padata_do_parallel
102 * must be seen by padata_do_serial.
104 int padata_do_parallel(struct padata_instance *pinst,
105 struct padata_priv *padata, int cb_cpu)
108 struct padata_parallel_queue *queue;
109 struct parallel_data *pd;
113 pd = rcu_dereference_bh(pinst->pd);
116 if (!(pinst->flags & PADATA_INIT) || pinst->flags & PADATA_INVALID)
119 if (!cpumask_test_cpu(cb_cpu, pd->cpumask.cbcpu))
123 if ((pinst->flags & PADATA_RESET))
126 if (atomic_read(&pd->refcnt) >= MAX_OBJ_NUM)
130 atomic_inc(&pd->refcnt);
132 padata->cb_cpu = cb_cpu;
134 target_cpu = padata_cpu_hash(pd);
135 padata->cpu = target_cpu;
136 queue = per_cpu_ptr(pd->pqueue, target_cpu);
138 spin_lock(&queue->parallel.lock);
139 list_add_tail(&padata->list, &queue->parallel.list);
140 spin_unlock(&queue->parallel.lock);
142 queue_work_on(target_cpu, pinst->wq, &queue->work);
145 rcu_read_unlock_bh();
149 EXPORT_SYMBOL(padata_do_parallel);
152 * padata_get_next - Get the next object that needs serialization.
156 * A pointer to the control struct of the next object that needs
157 * serialization, if present in one of the percpu reorder queues.
159 * -EINPROGRESS, if the next object that needs serialization will
160 * be parallel processed by another cpu and is not yet present in
161 * the cpu's reorder queue.
163 * -ENODATA, if this cpu has to do the parallel processing for
166 static struct padata_priv *padata_get_next(struct parallel_data *pd)
169 unsigned int next_nr, next_index;
170 struct padata_parallel_queue *next_queue;
171 struct padata_priv *padata;
172 struct padata_list *reorder;
174 num_cpus = cpumask_weight(pd->cpumask.pcpu);
177 * Calculate the percpu reorder queue and the sequence
178 * number of the next object.
180 next_nr = pd->processed;
181 next_index = next_nr % num_cpus;
182 cpu = padata_index_to_cpu(pd, next_index);
183 next_queue = per_cpu_ptr(pd->pqueue, cpu);
185 reorder = &next_queue->reorder;
187 spin_lock(&reorder->lock);
188 if (!list_empty(&reorder->list)) {
189 padata = list_entry(reorder->list.next,
190 struct padata_priv, list);
192 list_del_init(&padata->list);
193 atomic_dec(&pd->reorder_objects);
197 spin_unlock(&reorder->lock);
200 spin_unlock(&reorder->lock);
202 if (__this_cpu_read(pd->pqueue->cpu_index) == next_queue->cpu_index) {
203 padata = ERR_PTR(-ENODATA);
207 padata = ERR_PTR(-EINPROGRESS);
212 static void padata_reorder(struct parallel_data *pd)
215 struct padata_priv *padata;
216 struct padata_serial_queue *squeue;
217 struct padata_instance *pinst = pd->pinst;
220 * We need to ensure that only one cpu can work on dequeueing of
221 * the reorder queue the time. Calculating in which percpu reorder
222 * queue the next object will arrive takes some time. A spinlock
223 * would be highly contended. Also it is not clear in which order
224 * the objects arrive to the reorder queues. So a cpu could wait to
225 * get the lock just to notice that there is nothing to do at the
226 * moment. Therefore we use a trylock and let the holder of the lock
227 * care for all the objects enqueued during the holdtime of the lock.
229 if (!spin_trylock_bh(&pd->lock))
233 padata = padata_get_next(pd);
236 * If the next object that needs serialization is parallel
237 * processed by another cpu and is still on it's way to the
238 * cpu's reorder queue, nothing to do for now.
240 if (PTR_ERR(padata) == -EINPROGRESS)
244 * This cpu has to do the parallel processing of the next
245 * object. It's waiting in the cpu's parallelization queue,
246 * so exit immediately.
248 if (PTR_ERR(padata) == -ENODATA) {
249 del_timer(&pd->timer);
250 spin_unlock_bh(&pd->lock);
254 cb_cpu = padata->cb_cpu;
255 squeue = per_cpu_ptr(pd->squeue, cb_cpu);
257 spin_lock(&squeue->serial.lock);
258 list_add_tail(&padata->list, &squeue->serial.list);
259 spin_unlock(&squeue->serial.lock);
261 queue_work_on(cb_cpu, pinst->wq, &squeue->work);
264 spin_unlock_bh(&pd->lock);
267 * The next object that needs serialization might have arrived to
268 * the reorder queues in the meantime, we will be called again
269 * from the timer function if no one else cares for it.
271 * Ensure reorder_objects is read after pd->lock is dropped so we see
272 * an increment from another task in padata_do_serial. Pairs with
273 * smp_mb__after_atomic in padata_do_serial.
276 if (atomic_read(&pd->reorder_objects)
277 && !(pinst->flags & PADATA_RESET))
278 mod_timer(&pd->timer, jiffies + HZ);
280 del_timer(&pd->timer);
285 static void invoke_padata_reorder(struct work_struct *work)
287 struct padata_parallel_queue *pqueue;
288 struct parallel_data *pd;
291 pqueue = container_of(work, struct padata_parallel_queue, reorder_work);
297 static void padata_reorder_timer(struct timer_list *t)
299 struct parallel_data *pd = from_timer(pd, t, timer);
305 /* We don't lock pd here to not interfere with parallel processing
306 * padata_reorder() calls on other CPUs. We just need any CPU out of
307 * the cpumask.pcpu set. It would be nice if it's the right one but
308 * it doesn't matter if we're off to the next one by using an outdated
309 * pd->processed value.
311 weight = cpumask_weight(pd->cpumask.pcpu);
312 target_cpu = padata_index_to_cpu(pd, pd->processed % weight);
314 /* ensure to call the reorder callback on the correct CPU */
315 if (cpu != target_cpu) {
316 struct padata_parallel_queue *pqueue;
317 struct padata_instance *pinst;
319 /* The timer function is serialized wrt itself -- no locking
323 pqueue = per_cpu_ptr(pd->pqueue, target_cpu);
324 queue_work_on(target_cpu, pinst->wq, &pqueue->reorder_work);
332 static void padata_serial_worker(struct work_struct *serial_work)
334 struct padata_serial_queue *squeue;
335 struct parallel_data *pd;
336 LIST_HEAD(local_list);
339 squeue = container_of(serial_work, struct padata_serial_queue, work);
342 spin_lock(&squeue->serial.lock);
343 list_replace_init(&squeue->serial.list, &local_list);
344 spin_unlock(&squeue->serial.lock);
346 while (!list_empty(&local_list)) {
347 struct padata_priv *padata;
349 padata = list_entry(local_list.next,
350 struct padata_priv, list);
352 list_del_init(&padata->list);
354 padata->serial(padata);
355 atomic_dec(&pd->refcnt);
361 * padata_do_serial - padata serialization function
363 * @padata: object to be serialized.
365 * padata_do_serial must be called for every parallelized object.
366 * The serialization callback function will run with BHs off.
368 void padata_do_serial(struct padata_priv *padata)
371 struct padata_parallel_queue *pqueue;
372 struct parallel_data *pd;
373 int reorder_via_wq = 0;
379 /* We need to run on the same CPU padata_do_parallel(.., padata, ..)
380 * was called on -- or, at least, enqueue the padata object into the
381 * correct per-cpu queue.
383 if (cpu != padata->cpu) {
388 pqueue = per_cpu_ptr(pd->pqueue, cpu);
390 spin_lock(&pqueue->reorder.lock);
391 atomic_inc(&pd->reorder_objects);
392 list_add_tail(&padata->list, &pqueue->reorder.list);
393 spin_unlock(&pqueue->reorder.lock);
396 * Ensure the atomic_inc of reorder_objects above is ordered correctly
397 * with the trylock of pd->lock in padata_reorder. Pairs with smp_mb
400 smp_mb__after_atomic();
404 /* If we're running on the wrong CPU, call padata_reorder() via a
408 queue_work_on(cpu, pd->pinst->wq, &pqueue->reorder_work);
412 EXPORT_SYMBOL(padata_do_serial);
414 static int padata_setup_cpumasks(struct parallel_data *pd,
415 const struct cpumask *pcpumask,
416 const struct cpumask *cbcpumask)
418 if (!alloc_cpumask_var(&pd->cpumask.pcpu, GFP_KERNEL))
421 cpumask_and(pd->cpumask.pcpu, pcpumask, cpu_online_mask);
422 if (!alloc_cpumask_var(&pd->cpumask.cbcpu, GFP_KERNEL)) {
423 free_cpumask_var(pd->cpumask.pcpu);
427 cpumask_and(pd->cpumask.cbcpu, cbcpumask, cpu_online_mask);
431 static void __padata_list_init(struct padata_list *pd_list)
433 INIT_LIST_HEAD(&pd_list->list);
434 spin_lock_init(&pd_list->lock);
437 /* Initialize all percpu queues used by serial workers */
438 static void padata_init_squeues(struct parallel_data *pd)
441 struct padata_serial_queue *squeue;
443 for_each_cpu(cpu, pd->cpumask.cbcpu) {
444 squeue = per_cpu_ptr(pd->squeue, cpu);
446 __padata_list_init(&squeue->serial);
447 INIT_WORK(&squeue->work, padata_serial_worker);
451 /* Initialize all percpu queues used by parallel workers */
452 static void padata_init_pqueues(struct parallel_data *pd)
455 struct padata_parallel_queue *pqueue;
458 for_each_possible_cpu(cpu) {
459 pqueue = per_cpu_ptr(pd->pqueue, cpu);
461 if (!cpumask_test_cpu(cpu, pd->cpumask.pcpu)) {
462 pqueue->cpu_index = -1;
467 pqueue->cpu_index = cpu_index;
470 __padata_list_init(&pqueue->reorder);
471 __padata_list_init(&pqueue->parallel);
472 INIT_WORK(&pqueue->work, padata_parallel_worker);
473 INIT_WORK(&pqueue->reorder_work, invoke_padata_reorder);
474 atomic_set(&pqueue->num_obj, 0);
478 /* Allocate and initialize the internal cpumask dependend resources. */
479 static struct parallel_data *padata_alloc_pd(struct padata_instance *pinst,
480 const struct cpumask *pcpumask,
481 const struct cpumask *cbcpumask)
483 struct parallel_data *pd;
485 pd = kzalloc(sizeof(struct parallel_data), GFP_KERNEL);
489 pd->pqueue = alloc_percpu(struct padata_parallel_queue);
493 pd->squeue = alloc_percpu(struct padata_serial_queue);
495 goto err_free_pqueue;
496 if (padata_setup_cpumasks(pd, pcpumask, cbcpumask) < 0)
497 goto err_free_squeue;
499 padata_init_pqueues(pd);
500 padata_init_squeues(pd);
501 timer_setup(&pd->timer, padata_reorder_timer, 0);
502 atomic_set(&pd->seq_nr, -1);
503 atomic_set(&pd->reorder_objects, 0);
504 atomic_set(&pd->refcnt, 0);
506 spin_lock_init(&pd->lock);
511 free_percpu(pd->squeue);
513 free_percpu(pd->pqueue);
520 static void padata_free_pd(struct parallel_data *pd)
522 free_cpumask_var(pd->cpumask.pcpu);
523 free_cpumask_var(pd->cpumask.cbcpu);
524 free_percpu(pd->pqueue);
525 free_percpu(pd->squeue);
529 /* Flush all objects out of the padata queues. */
530 static void padata_flush_queues(struct parallel_data *pd)
533 struct padata_parallel_queue *pqueue;
534 struct padata_serial_queue *squeue;
536 for_each_cpu(cpu, pd->cpumask.pcpu) {
537 pqueue = per_cpu_ptr(pd->pqueue, cpu);
538 flush_work(&pqueue->work);
541 del_timer_sync(&pd->timer);
543 if (atomic_read(&pd->reorder_objects))
546 for_each_cpu(cpu, pd->cpumask.cbcpu) {
547 squeue = per_cpu_ptr(pd->squeue, cpu);
548 flush_work(&squeue->work);
551 BUG_ON(atomic_read(&pd->refcnt) != 0);
554 static void __padata_start(struct padata_instance *pinst)
556 pinst->flags |= PADATA_INIT;
559 static void __padata_stop(struct padata_instance *pinst)
561 if (!(pinst->flags & PADATA_INIT))
564 pinst->flags &= ~PADATA_INIT;
569 padata_flush_queues(pinst->pd);
573 /* Replace the internal control structure with a new one. */
574 static void padata_replace(struct padata_instance *pinst,
575 struct parallel_data *pd_new)
577 struct parallel_data *pd_old = pinst->pd;
578 int notification_mask = 0;
580 pinst->flags |= PADATA_RESET;
582 rcu_assign_pointer(pinst->pd, pd_new);
586 if (!cpumask_equal(pd_old->cpumask.pcpu, pd_new->cpumask.pcpu))
587 notification_mask |= PADATA_CPU_PARALLEL;
588 if (!cpumask_equal(pd_old->cpumask.cbcpu, pd_new->cpumask.cbcpu))
589 notification_mask |= PADATA_CPU_SERIAL;
591 padata_flush_queues(pd_old);
592 padata_free_pd(pd_old);
594 if (notification_mask)
595 blocking_notifier_call_chain(&pinst->cpumask_change_notifier,
599 pinst->flags &= ~PADATA_RESET;
603 * padata_register_cpumask_notifier - Registers a notifier that will be called
604 * if either pcpu or cbcpu or both cpumasks change.
606 * @pinst: A poineter to padata instance
607 * @nblock: A pointer to notifier block.
609 int padata_register_cpumask_notifier(struct padata_instance *pinst,
610 struct notifier_block *nblock)
612 return blocking_notifier_chain_register(&pinst->cpumask_change_notifier,
615 EXPORT_SYMBOL(padata_register_cpumask_notifier);
618 * padata_unregister_cpumask_notifier - Unregisters cpumask notifier
619 * registered earlier using padata_register_cpumask_notifier
621 * @pinst: A pointer to data instance.
622 * @nlock: A pointer to notifier block.
624 int padata_unregister_cpumask_notifier(struct padata_instance *pinst,
625 struct notifier_block *nblock)
627 return blocking_notifier_chain_unregister(
628 &pinst->cpumask_change_notifier,
631 EXPORT_SYMBOL(padata_unregister_cpumask_notifier);
634 /* If cpumask contains no active cpu, we mark the instance as invalid. */
635 static bool padata_validate_cpumask(struct padata_instance *pinst,
636 const struct cpumask *cpumask)
638 if (!cpumask_intersects(cpumask, cpu_online_mask)) {
639 pinst->flags |= PADATA_INVALID;
643 pinst->flags &= ~PADATA_INVALID;
647 static int __padata_set_cpumasks(struct padata_instance *pinst,
648 cpumask_var_t pcpumask,
649 cpumask_var_t cbcpumask)
652 struct parallel_data *pd;
654 valid = padata_validate_cpumask(pinst, pcpumask);
656 __padata_stop(pinst);
660 valid = padata_validate_cpumask(pinst, cbcpumask);
662 __padata_stop(pinst);
665 pd = padata_alloc_pd(pinst, pcpumask, cbcpumask);
669 cpumask_copy(pinst->cpumask.pcpu, pcpumask);
670 cpumask_copy(pinst->cpumask.cbcpu, cbcpumask);
672 padata_replace(pinst, pd);
675 __padata_start(pinst);
681 * padata_set_cpumask: Sets specified by @cpumask_type cpumask to the value
682 * equivalent to @cpumask.
684 * @pinst: padata instance
685 * @cpumask_type: PADATA_CPU_SERIAL or PADATA_CPU_PARALLEL corresponding
686 * to parallel and serial cpumasks respectively.
687 * @cpumask: the cpumask to use
689 int padata_set_cpumask(struct padata_instance *pinst, int cpumask_type,
690 cpumask_var_t cpumask)
692 struct cpumask *serial_mask, *parallel_mask;
695 mutex_lock(&pinst->lock);
698 switch (cpumask_type) {
699 case PADATA_CPU_PARALLEL:
700 serial_mask = pinst->cpumask.cbcpu;
701 parallel_mask = cpumask;
703 case PADATA_CPU_SERIAL:
704 parallel_mask = pinst->cpumask.pcpu;
705 serial_mask = cpumask;
711 err = __padata_set_cpumasks(pinst, parallel_mask, serial_mask);
715 mutex_unlock(&pinst->lock);
719 EXPORT_SYMBOL(padata_set_cpumask);
722 * padata_start - start the parallel processing
724 * @pinst: padata instance to start
726 int padata_start(struct padata_instance *pinst)
730 mutex_lock(&pinst->lock);
732 if (pinst->flags & PADATA_INVALID)
735 __padata_start(pinst);
737 mutex_unlock(&pinst->lock);
741 EXPORT_SYMBOL(padata_start);
744 * padata_stop - stop the parallel processing
746 * @pinst: padata instance to stop
748 void padata_stop(struct padata_instance *pinst)
750 mutex_lock(&pinst->lock);
751 __padata_stop(pinst);
752 mutex_unlock(&pinst->lock);
754 EXPORT_SYMBOL(padata_stop);
756 #ifdef CONFIG_HOTPLUG_CPU
758 static int __padata_add_cpu(struct padata_instance *pinst, int cpu)
760 struct parallel_data *pd;
762 if (cpumask_test_cpu(cpu, cpu_online_mask)) {
763 pd = padata_alloc_pd(pinst, pinst->cpumask.pcpu,
764 pinst->cpumask.cbcpu);
768 padata_replace(pinst, pd);
770 if (padata_validate_cpumask(pinst, pinst->cpumask.pcpu) &&
771 padata_validate_cpumask(pinst, pinst->cpumask.cbcpu))
772 __padata_start(pinst);
778 static int __padata_remove_cpu(struct padata_instance *pinst, int cpu)
780 struct parallel_data *pd = NULL;
782 if (cpumask_test_cpu(cpu, cpu_online_mask)) {
784 if (!padata_validate_cpumask(pinst, pinst->cpumask.pcpu) ||
785 !padata_validate_cpumask(pinst, pinst->cpumask.cbcpu))
786 __padata_stop(pinst);
788 pd = padata_alloc_pd(pinst, pinst->cpumask.pcpu,
789 pinst->cpumask.cbcpu);
793 padata_replace(pinst, pd);
795 cpumask_clear_cpu(cpu, pd->cpumask.cbcpu);
796 cpumask_clear_cpu(cpu, pd->cpumask.pcpu);
803 * padata_remove_cpu - remove a cpu from the one or both(serial and parallel)
806 * @pinst: padata instance
807 * @cpu: cpu to remove
808 * @mask: bitmask specifying from which cpumask @cpu should be removed
809 * The @mask may be any combination of the following flags:
810 * PADATA_CPU_SERIAL - serial cpumask
811 * PADATA_CPU_PARALLEL - parallel cpumask
813 int padata_remove_cpu(struct padata_instance *pinst, int cpu, int mask)
817 if (!(mask & (PADATA_CPU_SERIAL | PADATA_CPU_PARALLEL)))
820 mutex_lock(&pinst->lock);
823 if (mask & PADATA_CPU_SERIAL)
824 cpumask_clear_cpu(cpu, pinst->cpumask.cbcpu);
825 if (mask & PADATA_CPU_PARALLEL)
826 cpumask_clear_cpu(cpu, pinst->cpumask.pcpu);
828 err = __padata_remove_cpu(pinst, cpu);
831 mutex_unlock(&pinst->lock);
835 EXPORT_SYMBOL(padata_remove_cpu);
837 static inline int pinst_has_cpu(struct padata_instance *pinst, int cpu)
839 return cpumask_test_cpu(cpu, pinst->cpumask.pcpu) ||
840 cpumask_test_cpu(cpu, pinst->cpumask.cbcpu);
843 static int padata_cpu_online(unsigned int cpu, struct hlist_node *node)
845 struct padata_instance *pinst;
848 pinst = hlist_entry_safe(node, struct padata_instance, node);
849 if (!pinst_has_cpu(pinst, cpu))
852 mutex_lock(&pinst->lock);
853 ret = __padata_add_cpu(pinst, cpu);
854 mutex_unlock(&pinst->lock);
858 static int padata_cpu_prep_down(unsigned int cpu, struct hlist_node *node)
860 struct padata_instance *pinst;
863 pinst = hlist_entry_safe(node, struct padata_instance, node);
864 if (!pinst_has_cpu(pinst, cpu))
867 mutex_lock(&pinst->lock);
868 ret = __padata_remove_cpu(pinst, cpu);
869 mutex_unlock(&pinst->lock);
873 static enum cpuhp_state hp_online;
876 static void __padata_free(struct padata_instance *pinst)
878 #ifdef CONFIG_HOTPLUG_CPU
879 cpuhp_state_remove_instance_nocalls(hp_online, &pinst->node);
883 padata_free_pd(pinst->pd);
884 free_cpumask_var(pinst->cpumask.pcpu);
885 free_cpumask_var(pinst->cpumask.cbcpu);
889 #define kobj2pinst(_kobj) \
890 container_of(_kobj, struct padata_instance, kobj)
891 #define attr2pentry(_attr) \
892 container_of(_attr, struct padata_sysfs_entry, attr)
894 static void padata_sysfs_release(struct kobject *kobj)
896 struct padata_instance *pinst = kobj2pinst(kobj);
897 __padata_free(pinst);
900 struct padata_sysfs_entry {
901 struct attribute attr;
902 ssize_t (*show)(struct padata_instance *, struct attribute *, char *);
903 ssize_t (*store)(struct padata_instance *, struct attribute *,
904 const char *, size_t);
907 static ssize_t show_cpumask(struct padata_instance *pinst,
908 struct attribute *attr, char *buf)
910 struct cpumask *cpumask;
913 mutex_lock(&pinst->lock);
914 if (!strcmp(attr->name, "serial_cpumask"))
915 cpumask = pinst->cpumask.cbcpu;
917 cpumask = pinst->cpumask.pcpu;
919 len = snprintf(buf, PAGE_SIZE, "%*pb\n",
920 nr_cpu_ids, cpumask_bits(cpumask));
921 mutex_unlock(&pinst->lock);
922 return len < PAGE_SIZE ? len : -EINVAL;
925 static ssize_t store_cpumask(struct padata_instance *pinst,
926 struct attribute *attr,
927 const char *buf, size_t count)
929 cpumask_var_t new_cpumask;
933 if (!alloc_cpumask_var(&new_cpumask, GFP_KERNEL))
936 ret = bitmap_parse(buf, count, cpumask_bits(new_cpumask),
941 mask_type = !strcmp(attr->name, "serial_cpumask") ?
942 PADATA_CPU_SERIAL : PADATA_CPU_PARALLEL;
943 ret = padata_set_cpumask(pinst, mask_type, new_cpumask);
948 free_cpumask_var(new_cpumask);
952 #define PADATA_ATTR_RW(_name, _show_name, _store_name) \
953 static struct padata_sysfs_entry _name##_attr = \
954 __ATTR(_name, 0644, _show_name, _store_name)
955 #define PADATA_ATTR_RO(_name, _show_name) \
956 static struct padata_sysfs_entry _name##_attr = \
957 __ATTR(_name, 0400, _show_name, NULL)
959 PADATA_ATTR_RW(serial_cpumask, show_cpumask, store_cpumask);
960 PADATA_ATTR_RW(parallel_cpumask, show_cpumask, store_cpumask);
963 * Padata sysfs provides the following objects:
964 * serial_cpumask [RW] - cpumask for serial workers
965 * parallel_cpumask [RW] - cpumask for parallel workers
967 static struct attribute *padata_default_attrs[] = {
968 &serial_cpumask_attr.attr,
969 ¶llel_cpumask_attr.attr,
972 ATTRIBUTE_GROUPS(padata_default);
974 static ssize_t padata_sysfs_show(struct kobject *kobj,
975 struct attribute *attr, char *buf)
977 struct padata_instance *pinst;
978 struct padata_sysfs_entry *pentry;
981 pinst = kobj2pinst(kobj);
982 pentry = attr2pentry(attr);
984 ret = pentry->show(pinst, attr, buf);
989 static ssize_t padata_sysfs_store(struct kobject *kobj, struct attribute *attr,
990 const char *buf, size_t count)
992 struct padata_instance *pinst;
993 struct padata_sysfs_entry *pentry;
996 pinst = kobj2pinst(kobj);
997 pentry = attr2pentry(attr);
999 ret = pentry->store(pinst, attr, buf, count);
1004 static const struct sysfs_ops padata_sysfs_ops = {
1005 .show = padata_sysfs_show,
1006 .store = padata_sysfs_store,
1009 static struct kobj_type padata_attr_type = {
1010 .sysfs_ops = &padata_sysfs_ops,
1011 .default_groups = padata_default_groups,
1012 .release = padata_sysfs_release,
1016 * padata_alloc - allocate and initialize a padata instance and specify
1017 * cpumasks for serial and parallel workers.
1019 * @wq: workqueue to use for the allocated padata instance
1020 * @pcpumask: cpumask that will be used for padata parallelization
1021 * @cbcpumask: cpumask that will be used for padata serialization
1023 * Must be called from a cpus_read_lock() protected region
1025 static struct padata_instance *padata_alloc(struct workqueue_struct *wq,
1026 const struct cpumask *pcpumask,
1027 const struct cpumask *cbcpumask)
1029 struct padata_instance *pinst;
1030 struct parallel_data *pd = NULL;
1032 pinst = kzalloc(sizeof(struct padata_instance), GFP_KERNEL);
1036 if (!alloc_cpumask_var(&pinst->cpumask.pcpu, GFP_KERNEL))
1038 if (!alloc_cpumask_var(&pinst->cpumask.cbcpu, GFP_KERNEL)) {
1039 free_cpumask_var(pinst->cpumask.pcpu);
1042 if (!padata_validate_cpumask(pinst, pcpumask) ||
1043 !padata_validate_cpumask(pinst, cbcpumask))
1044 goto err_free_masks;
1046 pd = padata_alloc_pd(pinst, pcpumask, cbcpumask);
1048 goto err_free_masks;
1050 rcu_assign_pointer(pinst->pd, pd);
1054 cpumask_copy(pinst->cpumask.pcpu, pcpumask);
1055 cpumask_copy(pinst->cpumask.cbcpu, cbcpumask);
1059 BLOCKING_INIT_NOTIFIER_HEAD(&pinst->cpumask_change_notifier);
1060 kobject_init(&pinst->kobj, &padata_attr_type);
1061 mutex_init(&pinst->lock);
1063 #ifdef CONFIG_HOTPLUG_CPU
1064 cpuhp_state_add_instance_nocalls_cpuslocked(hp_online, &pinst->node);
1069 free_cpumask_var(pinst->cpumask.pcpu);
1070 free_cpumask_var(pinst->cpumask.cbcpu);
1078 * padata_alloc_possible - Allocate and initialize padata instance.
1079 * Use the cpu_possible_mask for serial and
1082 * @wq: workqueue to use for the allocated padata instance
1084 * Must be called from a cpus_read_lock() protected region
1086 struct padata_instance *padata_alloc_possible(struct workqueue_struct *wq)
1088 lockdep_assert_cpus_held();
1089 return padata_alloc(wq, cpu_possible_mask, cpu_possible_mask);
1091 EXPORT_SYMBOL(padata_alloc_possible);
1094 * padata_free - free a padata instance
1096 * @padata_inst: padata instance to free
1098 void padata_free(struct padata_instance *pinst)
1100 kobject_put(&pinst->kobj);
1102 EXPORT_SYMBOL(padata_free);
1104 #ifdef CONFIG_HOTPLUG_CPU
1106 static __init int padata_driver_init(void)
1110 ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "padata:online",
1112 padata_cpu_prep_down);
1118 module_init(padata_driver_init);
1120 static __exit void padata_driver_exit(void)
1122 cpuhp_remove_multi_state(hp_online);
1124 module_exit(padata_driver_exit);