2 * padata.c - generic interface to process data streams in parallel
4 * See Documentation/padata.txt for an api documentation.
6 * Copyright (C) 2008, 2009 secunet Security Networks AG
7 * Copyright (C) 2008, 2009 Steffen Klassert <steffen.klassert@secunet.com>
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms and conditions of the GNU General Public License,
11 * version 2, as published by the Free Software Foundation.
13 * This program is distributed in the hope it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18 * You should have received a copy of the GNU General Public License along with
19 * this program; if not, write to the Free Software Foundation, Inc.,
20 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
23 #include <linux/export.h>
24 #include <linux/cpumask.h>
25 #include <linux/err.h>
26 #include <linux/cpu.h>
27 #include <linux/padata.h>
28 #include <linux/mutex.h>
29 #include <linux/sched.h>
30 #include <linux/slab.h>
31 #include <linux/sysfs.h>
32 #include <linux/rcupdate.h>
34 #define MAX_OBJ_NUM 1000
36 static int padata_index_to_cpu(struct parallel_data *pd, int cpu_index)
40 target_cpu = cpumask_first(pd->cpumask.pcpu);
41 for (cpu = 0; cpu < cpu_index; cpu++)
42 target_cpu = cpumask_next(target_cpu, pd->cpumask.pcpu);
47 static int padata_cpu_hash(struct parallel_data *pd)
52 * Hash the sequence numbers to the cpus by taking
53 * seq_nr mod. number of cpus in use.
56 spin_lock(&pd->seq_lock);
57 cpu_index = pd->seq_nr % cpumask_weight(pd->cpumask.pcpu);
59 spin_unlock(&pd->seq_lock);
61 return padata_index_to_cpu(pd, cpu_index);
64 static void padata_parallel_worker(struct work_struct *parallel_work)
66 struct padata_parallel_queue *pqueue;
67 struct parallel_data *pd;
68 struct padata_instance *pinst;
69 LIST_HEAD(local_list);
72 pqueue = container_of(parallel_work,
73 struct padata_parallel_queue, work);
77 spin_lock(&pqueue->parallel.lock);
78 list_replace_init(&pqueue->parallel.list, &local_list);
79 spin_unlock(&pqueue->parallel.lock);
81 while (!list_empty(&local_list)) {
82 struct padata_priv *padata;
84 padata = list_entry(local_list.next,
85 struct padata_priv, list);
87 list_del_init(&padata->list);
89 padata->parallel(padata);
96 * padata_do_parallel - padata parallelization function
98 * @pinst: padata instance
99 * @padata: object to be parallelized
100 * @cb_cpu: cpu the serialization callback function will run on,
101 * must be in the serial cpumask of padata(i.e. cpumask.cbcpu).
103 * The parallelization callback function will run with BHs off.
104 * Note: Every object which is parallelized by padata_do_parallel
105 * must be seen by padata_do_serial.
107 int padata_do_parallel(struct padata_instance *pinst,
108 struct padata_priv *padata, int cb_cpu)
111 struct padata_parallel_queue *queue;
112 struct parallel_data *pd;
116 pd = rcu_dereference(pinst->pd);
119 if (!(pinst->flags & PADATA_INIT) || pinst->flags & PADATA_INVALID)
122 if (!cpumask_test_cpu(cb_cpu, pd->cpumask.cbcpu))
126 if ((pinst->flags & PADATA_RESET))
129 if (atomic_read(&pd->refcnt) >= MAX_OBJ_NUM)
133 atomic_inc(&pd->refcnt);
135 padata->cb_cpu = cb_cpu;
137 target_cpu = padata_cpu_hash(pd);
138 queue = per_cpu_ptr(pd->pqueue, target_cpu);
140 spin_lock(&queue->parallel.lock);
141 list_add_tail(&padata->list, &queue->parallel.list);
142 spin_unlock(&queue->parallel.lock);
144 queue_work_on(target_cpu, pinst->wq, &queue->work);
147 rcu_read_unlock_bh();
151 EXPORT_SYMBOL(padata_do_parallel);
154 * padata_get_next - Get the next object that needs serialization.
158 * A pointer to the control struct of the next object that needs
159 * serialization, if present in one of the percpu reorder queues.
161 * NULL, if all percpu reorder queues are empty.
163 * -EINPROGRESS, if the next object that needs serialization will
164 * be parallel processed by another cpu and is not yet present in
165 * the cpu's reorder queue.
167 * -ENODATA, if this cpu has to do the parallel processing for
170 static struct padata_priv *padata_get_next(struct parallel_data *pd)
173 unsigned int next_nr, next_index;
174 struct padata_parallel_queue *queue, *next_queue;
175 struct padata_priv *padata;
176 struct padata_list *reorder;
178 num_cpus = cpumask_weight(pd->cpumask.pcpu);
181 * Calculate the percpu reorder queue and the sequence
182 * number of the next object.
184 next_nr = pd->processed;
185 next_index = next_nr % num_cpus;
186 cpu = padata_index_to_cpu(pd, next_index);
187 next_queue = per_cpu_ptr(pd->pqueue, cpu);
191 reorder = &next_queue->reorder;
193 if (!list_empty(&reorder->list)) {
194 padata = list_entry(reorder->list.next,
195 struct padata_priv, list);
197 spin_lock(&reorder->lock);
198 list_del_init(&padata->list);
199 atomic_dec(&pd->reorder_objects);
200 spin_unlock(&reorder->lock);
207 queue = per_cpu_ptr(pd->pqueue, smp_processor_id());
208 if (queue->cpu_index == next_queue->cpu_index) {
209 padata = ERR_PTR(-ENODATA);
213 padata = ERR_PTR(-EINPROGRESS);
218 static void padata_reorder(struct parallel_data *pd)
221 struct padata_priv *padata;
222 struct padata_serial_queue *squeue;
223 struct padata_instance *pinst = pd->pinst;
226 * We need to ensure that only one cpu can work on dequeueing of
227 * the reorder queue the time. Calculating in which percpu reorder
228 * queue the next object will arrive takes some time. A spinlock
229 * would be highly contended. Also it is not clear in which order
230 * the objects arrive to the reorder queues. So a cpu could wait to
231 * get the lock just to notice that there is nothing to do at the
232 * moment. Therefore we use a trylock and let the holder of the lock
233 * care for all the objects enqueued during the holdtime of the lock.
235 if (!spin_trylock_bh(&pd->lock))
239 padata = padata_get_next(pd);
242 * All reorder queues are empty, or the next object that needs
243 * serialization is parallel processed by another cpu and is
244 * still on it's way to the cpu's reorder queue, nothing to
247 if (!padata || PTR_ERR(padata) == -EINPROGRESS)
251 * This cpu has to do the parallel processing of the next
252 * object. It's waiting in the cpu's parallelization queue,
253 * so exit immediately.
255 if (PTR_ERR(padata) == -ENODATA) {
256 del_timer(&pd->timer);
257 spin_unlock_bh(&pd->lock);
261 cb_cpu = padata->cb_cpu;
262 squeue = per_cpu_ptr(pd->squeue, cb_cpu);
264 spin_lock(&squeue->serial.lock);
265 list_add_tail(&padata->list, &squeue->serial.list);
266 spin_unlock(&squeue->serial.lock);
268 queue_work_on(cb_cpu, pinst->wq, &squeue->work);
271 spin_unlock_bh(&pd->lock);
274 * The next object that needs serialization might have arrived to
275 * the reorder queues in the meantime, we will be called again
276 * from the timer function if no one else cares for it.
278 if (atomic_read(&pd->reorder_objects)
279 && !(pinst->flags & PADATA_RESET))
280 mod_timer(&pd->timer, jiffies + HZ);
282 del_timer(&pd->timer);
287 static void padata_reorder_timer(unsigned long arg)
289 struct parallel_data *pd = (struct parallel_data *)arg;
294 static void padata_serial_worker(struct work_struct *serial_work)
296 struct padata_serial_queue *squeue;
297 struct parallel_data *pd;
298 LIST_HEAD(local_list);
301 squeue = container_of(serial_work, struct padata_serial_queue, work);
304 spin_lock(&squeue->serial.lock);
305 list_replace_init(&squeue->serial.list, &local_list);
306 spin_unlock(&squeue->serial.lock);
308 while (!list_empty(&local_list)) {
309 struct padata_priv *padata;
311 padata = list_entry(local_list.next,
312 struct padata_priv, list);
314 list_del_init(&padata->list);
316 padata->serial(padata);
317 atomic_dec(&pd->refcnt);
323 * padata_do_serial - padata serialization function
325 * @padata: object to be serialized.
327 * padata_do_serial must be called for every parallelized object.
328 * The serialization callback function will run with BHs off.
330 void padata_do_serial(struct padata_priv *padata)
333 struct padata_parallel_queue *pqueue;
334 struct parallel_data *pd;
339 pqueue = per_cpu_ptr(pd->pqueue, cpu);
341 spin_lock(&pqueue->reorder.lock);
342 atomic_inc(&pd->reorder_objects);
343 list_add_tail(&padata->list, &pqueue->reorder.list);
344 spin_unlock(&pqueue->reorder.lock);
350 EXPORT_SYMBOL(padata_do_serial);
352 static int padata_setup_cpumasks(struct parallel_data *pd,
353 const struct cpumask *pcpumask,
354 const struct cpumask *cbcpumask)
356 if (!alloc_cpumask_var(&pd->cpumask.pcpu, GFP_KERNEL))
359 cpumask_and(pd->cpumask.pcpu, pcpumask, cpu_online_mask);
360 if (!alloc_cpumask_var(&pd->cpumask.cbcpu, GFP_KERNEL)) {
361 free_cpumask_var(pd->cpumask.cbcpu);
365 cpumask_and(pd->cpumask.cbcpu, cbcpumask, cpu_online_mask);
369 static void __padata_list_init(struct padata_list *pd_list)
371 INIT_LIST_HEAD(&pd_list->list);
372 spin_lock_init(&pd_list->lock);
375 /* Initialize all percpu queues used by serial workers */
376 static void padata_init_squeues(struct parallel_data *pd)
379 struct padata_serial_queue *squeue;
381 for_each_cpu(cpu, pd->cpumask.cbcpu) {
382 squeue = per_cpu_ptr(pd->squeue, cpu);
384 __padata_list_init(&squeue->serial);
385 INIT_WORK(&squeue->work, padata_serial_worker);
389 /* Initialize all percpu queues used by parallel workers */
390 static void padata_init_pqueues(struct parallel_data *pd)
393 struct padata_parallel_queue *pqueue;
396 for_each_cpu(cpu, pd->cpumask.pcpu) {
397 pqueue = per_cpu_ptr(pd->pqueue, cpu);
399 pqueue->cpu_index = cpu_index;
402 __padata_list_init(&pqueue->reorder);
403 __padata_list_init(&pqueue->parallel);
404 INIT_WORK(&pqueue->work, padata_parallel_worker);
405 atomic_set(&pqueue->num_obj, 0);
409 /* Allocate and initialize the internal cpumask dependend resources. */
410 static struct parallel_data *padata_alloc_pd(struct padata_instance *pinst,
411 const struct cpumask *pcpumask,
412 const struct cpumask *cbcpumask)
414 struct parallel_data *pd;
416 pd = kzalloc(sizeof(struct parallel_data), GFP_KERNEL);
420 pd->pqueue = alloc_percpu(struct padata_parallel_queue);
424 pd->squeue = alloc_percpu(struct padata_serial_queue);
426 goto err_free_pqueue;
427 if (padata_setup_cpumasks(pd, pcpumask, cbcpumask) < 0)
428 goto err_free_squeue;
430 padata_init_pqueues(pd);
431 padata_init_squeues(pd);
432 setup_timer(&pd->timer, padata_reorder_timer, (unsigned long)pd);
434 atomic_set(&pd->reorder_objects, 0);
435 atomic_set(&pd->refcnt, 0);
437 spin_lock_init(&pd->lock);
442 free_percpu(pd->squeue);
444 free_percpu(pd->pqueue);
451 static void padata_free_pd(struct parallel_data *pd)
453 free_cpumask_var(pd->cpumask.pcpu);
454 free_cpumask_var(pd->cpumask.cbcpu);
455 free_percpu(pd->pqueue);
456 free_percpu(pd->squeue);
460 /* Flush all objects out of the padata queues. */
461 static void padata_flush_queues(struct parallel_data *pd)
464 struct padata_parallel_queue *pqueue;
465 struct padata_serial_queue *squeue;
467 for_each_cpu(cpu, pd->cpumask.pcpu) {
468 pqueue = per_cpu_ptr(pd->pqueue, cpu);
469 flush_work(&pqueue->work);
472 del_timer_sync(&pd->timer);
474 if (atomic_read(&pd->reorder_objects))
477 for_each_cpu(cpu, pd->cpumask.cbcpu) {
478 squeue = per_cpu_ptr(pd->squeue, cpu);
479 flush_work(&squeue->work);
482 BUG_ON(atomic_read(&pd->refcnt) != 0);
485 static void __padata_start(struct padata_instance *pinst)
487 pinst->flags |= PADATA_INIT;
490 static void __padata_stop(struct padata_instance *pinst)
492 if (!(pinst->flags & PADATA_INIT))
495 pinst->flags &= ~PADATA_INIT;
500 padata_flush_queues(pinst->pd);
504 /* Replace the internal control structure with a new one. */
505 static void padata_replace(struct padata_instance *pinst,
506 struct parallel_data *pd_new)
508 struct parallel_data *pd_old = pinst->pd;
509 int notification_mask = 0;
511 pinst->flags |= PADATA_RESET;
513 rcu_assign_pointer(pinst->pd, pd_new);
517 if (!cpumask_equal(pd_old->cpumask.pcpu, pd_new->cpumask.pcpu))
518 notification_mask |= PADATA_CPU_PARALLEL;
519 if (!cpumask_equal(pd_old->cpumask.cbcpu, pd_new->cpumask.cbcpu))
520 notification_mask |= PADATA_CPU_SERIAL;
522 padata_flush_queues(pd_old);
523 padata_free_pd(pd_old);
525 if (notification_mask)
526 blocking_notifier_call_chain(&pinst->cpumask_change_notifier,
530 pinst->flags &= ~PADATA_RESET;
534 * padata_register_cpumask_notifier - Registers a notifier that will be called
535 * if either pcpu or cbcpu or both cpumasks change.
537 * @pinst: A poineter to padata instance
538 * @nblock: A pointer to notifier block.
540 int padata_register_cpumask_notifier(struct padata_instance *pinst,
541 struct notifier_block *nblock)
543 return blocking_notifier_chain_register(&pinst->cpumask_change_notifier,
546 EXPORT_SYMBOL(padata_register_cpumask_notifier);
549 * padata_unregister_cpumask_notifier - Unregisters cpumask notifier
550 * registered earlier using padata_register_cpumask_notifier
552 * @pinst: A pointer to data instance.
553 * @nlock: A pointer to notifier block.
555 int padata_unregister_cpumask_notifier(struct padata_instance *pinst,
556 struct notifier_block *nblock)
558 return blocking_notifier_chain_unregister(
559 &pinst->cpumask_change_notifier,
562 EXPORT_SYMBOL(padata_unregister_cpumask_notifier);
565 /* If cpumask contains no active cpu, we mark the instance as invalid. */
566 static bool padata_validate_cpumask(struct padata_instance *pinst,
567 const struct cpumask *cpumask)
569 if (!cpumask_intersects(cpumask, cpu_online_mask)) {
570 pinst->flags |= PADATA_INVALID;
574 pinst->flags &= ~PADATA_INVALID;
578 static int __padata_set_cpumasks(struct padata_instance *pinst,
579 cpumask_var_t pcpumask,
580 cpumask_var_t cbcpumask)
583 struct parallel_data *pd;
585 valid = padata_validate_cpumask(pinst, pcpumask);
587 __padata_stop(pinst);
591 valid = padata_validate_cpumask(pinst, cbcpumask);
593 __padata_stop(pinst);
596 pd = padata_alloc_pd(pinst, pcpumask, cbcpumask);
600 cpumask_copy(pinst->cpumask.pcpu, pcpumask);
601 cpumask_copy(pinst->cpumask.cbcpu, cbcpumask);
603 padata_replace(pinst, pd);
606 __padata_start(pinst);
612 * padata_set_cpumasks - Set both parallel and serial cpumasks. The first
613 * one is used by parallel workers and the second one
614 * by the wokers doing serialization.
616 * @pinst: padata instance
617 * @pcpumask: the cpumask to use for parallel workers
618 * @cbcpumask: the cpumsak to use for serial workers
620 int padata_set_cpumasks(struct padata_instance *pinst, cpumask_var_t pcpumask,
621 cpumask_var_t cbcpumask)
625 mutex_lock(&pinst->lock);
628 err = __padata_set_cpumasks(pinst, pcpumask, cbcpumask);
631 mutex_unlock(&pinst->lock);
636 EXPORT_SYMBOL(padata_set_cpumasks);
639 * padata_set_cpumask: Sets specified by @cpumask_type cpumask to the value
640 * equivalent to @cpumask.
642 * @pinst: padata instance
643 * @cpumask_type: PADATA_CPU_SERIAL or PADATA_CPU_PARALLEL corresponding
644 * to parallel and serial cpumasks respectively.
645 * @cpumask: the cpumask to use
647 int padata_set_cpumask(struct padata_instance *pinst, int cpumask_type,
648 cpumask_var_t cpumask)
650 struct cpumask *serial_mask, *parallel_mask;
653 mutex_lock(&pinst->lock);
656 switch (cpumask_type) {
657 case PADATA_CPU_PARALLEL:
658 serial_mask = pinst->cpumask.cbcpu;
659 parallel_mask = cpumask;
661 case PADATA_CPU_SERIAL:
662 parallel_mask = pinst->cpumask.pcpu;
663 serial_mask = cpumask;
669 err = __padata_set_cpumasks(pinst, parallel_mask, serial_mask);
673 mutex_unlock(&pinst->lock);
677 EXPORT_SYMBOL(padata_set_cpumask);
679 static int __padata_add_cpu(struct padata_instance *pinst, int cpu)
681 struct parallel_data *pd;
683 if (cpumask_test_cpu(cpu, cpu_online_mask)) {
684 pd = padata_alloc_pd(pinst, pinst->cpumask.pcpu,
685 pinst->cpumask.cbcpu);
689 padata_replace(pinst, pd);
691 if (padata_validate_cpumask(pinst, pinst->cpumask.pcpu) &&
692 padata_validate_cpumask(pinst, pinst->cpumask.cbcpu))
693 __padata_start(pinst);
700 * padata_add_cpu - add a cpu to one or both(parallel and serial)
703 * @pinst: padata instance
705 * @mask: bitmask of flags specifying to which cpumask @cpu shuld be added.
706 * The @mask may be any combination of the following flags:
707 * PADATA_CPU_SERIAL - serial cpumask
708 * PADATA_CPU_PARALLEL - parallel cpumask
711 int padata_add_cpu(struct padata_instance *pinst, int cpu, int mask)
715 if (!(mask & (PADATA_CPU_SERIAL | PADATA_CPU_PARALLEL)))
718 mutex_lock(&pinst->lock);
721 if (mask & PADATA_CPU_SERIAL)
722 cpumask_set_cpu(cpu, pinst->cpumask.cbcpu);
723 if (mask & PADATA_CPU_PARALLEL)
724 cpumask_set_cpu(cpu, pinst->cpumask.pcpu);
726 err = __padata_add_cpu(pinst, cpu);
729 mutex_unlock(&pinst->lock);
733 EXPORT_SYMBOL(padata_add_cpu);
735 static int __padata_remove_cpu(struct padata_instance *pinst, int cpu)
737 struct parallel_data *pd = NULL;
739 if (cpumask_test_cpu(cpu, cpu_online_mask)) {
741 if (!padata_validate_cpumask(pinst, pinst->cpumask.pcpu) ||
742 !padata_validate_cpumask(pinst, pinst->cpumask.cbcpu))
743 __padata_stop(pinst);
745 pd = padata_alloc_pd(pinst, pinst->cpumask.pcpu,
746 pinst->cpumask.cbcpu);
750 padata_replace(pinst, pd);
752 cpumask_clear_cpu(cpu, pd->cpumask.cbcpu);
753 cpumask_clear_cpu(cpu, pd->cpumask.pcpu);
760 * padata_remove_cpu - remove a cpu from the one or both(serial and parallel)
763 * @pinst: padata instance
764 * @cpu: cpu to remove
765 * @mask: bitmask specifying from which cpumask @cpu should be removed
766 * The @mask may be any combination of the following flags:
767 * PADATA_CPU_SERIAL - serial cpumask
768 * PADATA_CPU_PARALLEL - parallel cpumask
770 int padata_remove_cpu(struct padata_instance *pinst, int cpu, int mask)
774 if (!(mask & (PADATA_CPU_SERIAL | PADATA_CPU_PARALLEL)))
777 mutex_lock(&pinst->lock);
780 if (mask & PADATA_CPU_SERIAL)
781 cpumask_clear_cpu(cpu, pinst->cpumask.cbcpu);
782 if (mask & PADATA_CPU_PARALLEL)
783 cpumask_clear_cpu(cpu, pinst->cpumask.pcpu);
785 err = __padata_remove_cpu(pinst, cpu);
788 mutex_unlock(&pinst->lock);
792 EXPORT_SYMBOL(padata_remove_cpu);
795 * padata_start - start the parallel processing
797 * @pinst: padata instance to start
799 int padata_start(struct padata_instance *pinst)
803 mutex_lock(&pinst->lock);
805 if (pinst->flags & PADATA_INVALID)
808 __padata_start(pinst);
810 mutex_unlock(&pinst->lock);
814 EXPORT_SYMBOL(padata_start);
817 * padata_stop - stop the parallel processing
819 * @pinst: padata instance to stop
821 void padata_stop(struct padata_instance *pinst)
823 mutex_lock(&pinst->lock);
824 __padata_stop(pinst);
825 mutex_unlock(&pinst->lock);
827 EXPORT_SYMBOL(padata_stop);
829 #ifdef CONFIG_HOTPLUG_CPU
831 static inline int pinst_has_cpu(struct padata_instance *pinst, int cpu)
833 return cpumask_test_cpu(cpu, pinst->cpumask.pcpu) ||
834 cpumask_test_cpu(cpu, pinst->cpumask.cbcpu);
838 static int padata_cpu_callback(struct notifier_block *nfb,
839 unsigned long action, void *hcpu)
842 struct padata_instance *pinst;
843 int cpu = (unsigned long)hcpu;
845 pinst = container_of(nfb, struct padata_instance, cpu_notifier);
849 case CPU_ONLINE_FROZEN:
850 if (!pinst_has_cpu(pinst, cpu))
852 mutex_lock(&pinst->lock);
853 err = __padata_add_cpu(pinst, cpu);
854 mutex_unlock(&pinst->lock);
856 return notifier_from_errno(err);
859 case CPU_DOWN_PREPARE:
860 case CPU_DOWN_PREPARE_FROZEN:
861 if (!pinst_has_cpu(pinst, cpu))
863 mutex_lock(&pinst->lock);
864 err = __padata_remove_cpu(pinst, cpu);
865 mutex_unlock(&pinst->lock);
867 return notifier_from_errno(err);
870 case CPU_UP_CANCELED:
871 case CPU_UP_CANCELED_FROZEN:
872 if (!pinst_has_cpu(pinst, cpu))
874 mutex_lock(&pinst->lock);
875 __padata_remove_cpu(pinst, cpu);
876 mutex_unlock(&pinst->lock);
878 case CPU_DOWN_FAILED:
879 case CPU_DOWN_FAILED_FROZEN:
880 if (!pinst_has_cpu(pinst, cpu))
882 mutex_lock(&pinst->lock);
883 __padata_add_cpu(pinst, cpu);
884 mutex_unlock(&pinst->lock);
891 static void __padata_free(struct padata_instance *pinst)
893 #ifdef CONFIG_HOTPLUG_CPU
894 unregister_hotcpu_notifier(&pinst->cpu_notifier);
898 padata_free_pd(pinst->pd);
899 free_cpumask_var(pinst->cpumask.pcpu);
900 free_cpumask_var(pinst->cpumask.cbcpu);
904 #define kobj2pinst(_kobj) \
905 container_of(_kobj, struct padata_instance, kobj)
906 #define attr2pentry(_attr) \
907 container_of(_attr, struct padata_sysfs_entry, attr)
909 static void padata_sysfs_release(struct kobject *kobj)
911 struct padata_instance *pinst = kobj2pinst(kobj);
912 __padata_free(pinst);
915 struct padata_sysfs_entry {
916 struct attribute attr;
917 ssize_t (*show)(struct padata_instance *, struct attribute *, char *);
918 ssize_t (*store)(struct padata_instance *, struct attribute *,
919 const char *, size_t);
922 static ssize_t show_cpumask(struct padata_instance *pinst,
923 struct attribute *attr, char *buf)
925 struct cpumask *cpumask;
928 mutex_lock(&pinst->lock);
929 if (!strcmp(attr->name, "serial_cpumask"))
930 cpumask = pinst->cpumask.cbcpu;
932 cpumask = pinst->cpumask.pcpu;
934 len = bitmap_scnprintf(buf, PAGE_SIZE, cpumask_bits(cpumask),
936 if (PAGE_SIZE - len < 2)
939 len += sprintf(buf + len, "\n");
941 mutex_unlock(&pinst->lock);
945 static ssize_t store_cpumask(struct padata_instance *pinst,
946 struct attribute *attr,
947 const char *buf, size_t count)
949 cpumask_var_t new_cpumask;
953 if (!alloc_cpumask_var(&new_cpumask, GFP_KERNEL))
956 ret = bitmap_parse(buf, count, cpumask_bits(new_cpumask),
961 mask_type = !strcmp(attr->name, "serial_cpumask") ?
962 PADATA_CPU_SERIAL : PADATA_CPU_PARALLEL;
963 ret = padata_set_cpumask(pinst, mask_type, new_cpumask);
968 free_cpumask_var(new_cpumask);
972 #define PADATA_ATTR_RW(_name, _show_name, _store_name) \
973 static struct padata_sysfs_entry _name##_attr = \
974 __ATTR(_name, 0644, _show_name, _store_name)
975 #define PADATA_ATTR_RO(_name, _show_name) \
976 static struct padata_sysfs_entry _name##_attr = \
977 __ATTR(_name, 0400, _show_name, NULL)
979 PADATA_ATTR_RW(serial_cpumask, show_cpumask, store_cpumask);
980 PADATA_ATTR_RW(parallel_cpumask, show_cpumask, store_cpumask);
983 * Padata sysfs provides the following objects:
984 * serial_cpumask [RW] - cpumask for serial workers
985 * parallel_cpumask [RW] - cpumask for parallel workers
987 static struct attribute *padata_default_attrs[] = {
988 &serial_cpumask_attr.attr,
989 ¶llel_cpumask_attr.attr,
993 static ssize_t padata_sysfs_show(struct kobject *kobj,
994 struct attribute *attr, char *buf)
996 struct padata_instance *pinst;
997 struct padata_sysfs_entry *pentry;
1000 pinst = kobj2pinst(kobj);
1001 pentry = attr2pentry(attr);
1003 ret = pentry->show(pinst, attr, buf);
1008 static ssize_t padata_sysfs_store(struct kobject *kobj, struct attribute *attr,
1009 const char *buf, size_t count)
1011 struct padata_instance *pinst;
1012 struct padata_sysfs_entry *pentry;
1015 pinst = kobj2pinst(kobj);
1016 pentry = attr2pentry(attr);
1018 ret = pentry->store(pinst, attr, buf, count);
1023 static const struct sysfs_ops padata_sysfs_ops = {
1024 .show = padata_sysfs_show,
1025 .store = padata_sysfs_store,
1028 static struct kobj_type padata_attr_type = {
1029 .sysfs_ops = &padata_sysfs_ops,
1030 .default_attrs = padata_default_attrs,
1031 .release = padata_sysfs_release,
1035 * padata_alloc_possible - Allocate and initialize padata instance.
1036 * Use the cpu_possible_mask for serial and
1039 * @wq: workqueue to use for the allocated padata instance
1041 struct padata_instance *padata_alloc_possible(struct workqueue_struct *wq)
1043 return padata_alloc(wq, cpu_possible_mask, cpu_possible_mask);
1045 EXPORT_SYMBOL(padata_alloc_possible);
1048 * padata_alloc - allocate and initialize a padata instance and specify
1049 * cpumasks for serial and parallel workers.
1051 * @wq: workqueue to use for the allocated padata instance
1052 * @pcpumask: cpumask that will be used for padata parallelization
1053 * @cbcpumask: cpumask that will be used for padata serialization
1055 struct padata_instance *padata_alloc(struct workqueue_struct *wq,
1056 const struct cpumask *pcpumask,
1057 const struct cpumask *cbcpumask)
1059 struct padata_instance *pinst;
1060 struct parallel_data *pd = NULL;
1062 pinst = kzalloc(sizeof(struct padata_instance), GFP_KERNEL);
1067 if (!alloc_cpumask_var(&pinst->cpumask.pcpu, GFP_KERNEL))
1069 if (!alloc_cpumask_var(&pinst->cpumask.cbcpu, GFP_KERNEL)) {
1070 free_cpumask_var(pinst->cpumask.pcpu);
1073 if (!padata_validate_cpumask(pinst, pcpumask) ||
1074 !padata_validate_cpumask(pinst, cbcpumask))
1075 goto err_free_masks;
1077 pd = padata_alloc_pd(pinst, pcpumask, cbcpumask);
1079 goto err_free_masks;
1081 rcu_assign_pointer(pinst->pd, pd);
1085 cpumask_copy(pinst->cpumask.pcpu, pcpumask);
1086 cpumask_copy(pinst->cpumask.cbcpu, cbcpumask);
1090 #ifdef CONFIG_HOTPLUG_CPU
1091 pinst->cpu_notifier.notifier_call = padata_cpu_callback;
1092 pinst->cpu_notifier.priority = 0;
1093 register_hotcpu_notifier(&pinst->cpu_notifier);
1098 BLOCKING_INIT_NOTIFIER_HEAD(&pinst->cpumask_change_notifier);
1099 kobject_init(&pinst->kobj, &padata_attr_type);
1100 mutex_init(&pinst->lock);
1105 free_cpumask_var(pinst->cpumask.pcpu);
1106 free_cpumask_var(pinst->cpumask.cbcpu);
1113 EXPORT_SYMBOL(padata_alloc);
1116 * padata_free - free a padata instance
1118 * @padata_inst: padata instance to free
1120 void padata_free(struct padata_instance *pinst)
1122 kobject_put(&pinst->kobj);
1124 EXPORT_SYMBOL(padata_free);