1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/slab.h>
3 #include <linux/kernel.h>
4 #include <linux/bitops.h>
5 #include <linux/cpumask.h>
6 #include <linux/export.h>
7 #include <linux/memblock.h>
8 #include <linux/numa.h>
11 * cpumask_next_wrap - helper to implement for_each_cpu_wrap
12 * @n: the cpu prior to the place to search
13 * @mask: the cpumask pointer
14 * @start: the start point of the iteration
15 * @wrap: assume @n crossing @start terminates the iteration
17 * Returns >= nr_cpu_ids on completion
19 * Note: the @wrap argument is required for the start condition when
20 * we cannot assume @start is set in @mask.
22 unsigned int cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool wrap)
27 next = cpumask_next(n, mask);
29 if (wrap && n < start && next >= start) {
30 return nr_cpumask_bits;
32 } else if (next >= nr_cpumask_bits) {
40 EXPORT_SYMBOL(cpumask_next_wrap);
42 /* These are not inline because of header tangles. */
43 #ifdef CONFIG_CPUMASK_OFFSTACK
45 * alloc_cpumask_var_node - allocate a struct cpumask on a given node
46 * @mask: pointer to cpumask_var_t where the cpumask is returned
49 * Only defined when CONFIG_CPUMASK_OFFSTACK=y, otherwise is
50 * a nop returning a constant 1 (in <linux/cpumask.h>)
51 * Returns TRUE if memory allocation succeeded, FALSE otherwise.
53 * In addition, mask will be NULL if this fails. Note that gcc is
54 * usually smart enough to know that mask can never be NULL if
55 * CONFIG_CPUMASK_OFFSTACK=n, so does code elimination in that case
58 bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node)
60 *mask = kmalloc_node(cpumask_size(), flags, node);
62 #ifdef CONFIG_DEBUG_PER_CPU_MAPS
64 printk(KERN_ERR "=> alloc_cpumask_var: failed!\n");
71 EXPORT_SYMBOL(alloc_cpumask_var_node);
74 * alloc_bootmem_cpumask_var - allocate a struct cpumask from the bootmem arena.
75 * @mask: pointer to cpumask_var_t where the cpumask is returned
77 * Only defined when CONFIG_CPUMASK_OFFSTACK=y, otherwise is
78 * a nop (in <linux/cpumask.h>).
79 * Either returns an allocated (zero-filled) cpumask, or causes the
82 void __init alloc_bootmem_cpumask_var(cpumask_var_t *mask)
84 *mask = memblock_alloc(cpumask_size(), SMP_CACHE_BYTES);
86 panic("%s: Failed to allocate %u bytes\n", __func__,
91 * free_cpumask_var - frees memory allocated for a struct cpumask.
92 * @mask: cpumask to free
94 * This is safe on a NULL mask.
96 void free_cpumask_var(cpumask_var_t mask)
100 EXPORT_SYMBOL(free_cpumask_var);
103 * free_bootmem_cpumask_var - frees result of alloc_bootmem_cpumask_var
104 * @mask: cpumask to free
106 void __init free_bootmem_cpumask_var(cpumask_var_t mask)
108 memblock_free(mask, cpumask_size());
113 * cpumask_local_spread - select the i'th cpu based on NUMA distances
115 * @node: local numa_node
117 * Returns online CPU according to a numa aware policy; local cpus are returned
118 * first, followed by non-local ones, then it wraps around.
120 * For those who wants to enumerate all CPUs based on their NUMA distances,
121 * i.e. call this function in a loop, like:
123 * for (i = 0; i < num_online_cpus(); i++) {
124 * cpu = cpumask_local_spread(i, node);
128 * There's a better alternative based on for_each()-like iterators:
130 * for_each_numa_hop_mask(mask, node) {
131 * for_each_cpu_andnot(cpu, mask, prev)
136 * It's simpler and more verbose than above. Complexity of iterator-based
137 * enumeration is O(sched_domains_numa_levels * nr_cpu_ids), while
138 * cpumask_local_spread() when called for each cpu is
139 * O(sched_domains_numa_levels * nr_cpu_ids * log(nr_cpu_ids)).
141 unsigned int cpumask_local_spread(unsigned int i, int node)
145 /* Wrap: we always want a cpu. */
146 i %= num_online_cpus();
148 cpu = (node == NUMA_NO_NODE) ?
149 cpumask_nth(i, cpu_online_mask) :
150 sched_numa_find_nth_cpu(cpu_online_mask, i, node);
152 WARN_ON(cpu >= nr_cpu_ids);
155 EXPORT_SYMBOL(cpumask_local_spread);
157 static DEFINE_PER_CPU(int, distribute_cpu_mask_prev);
160 * Returns an arbitrary cpu within srcp1 & srcp2.
162 * Iterated calls using the same srcp1 and srcp2 will be distributed within
163 * their intersection.
165 * Returns >= nr_cpu_ids if the intersection is empty.
167 unsigned int cpumask_any_and_distribute(const struct cpumask *src1p,
168 const struct cpumask *src2p)
170 unsigned int next, prev;
172 /* NOTE: our first selection will skip 0. */
173 prev = __this_cpu_read(distribute_cpu_mask_prev);
175 next = find_next_and_bit_wrap(cpumask_bits(src1p), cpumask_bits(src2p),
176 nr_cpumask_bits, prev + 1);
177 if (next < nr_cpu_ids)
178 __this_cpu_write(distribute_cpu_mask_prev, next);
182 EXPORT_SYMBOL(cpumask_any_and_distribute);
184 unsigned int cpumask_any_distribute(const struct cpumask *srcp)
186 unsigned int next, prev;
188 /* NOTE: our first selection will skip 0. */
189 prev = __this_cpu_read(distribute_cpu_mask_prev);
190 next = find_next_bit_wrap(cpumask_bits(srcp), nr_cpumask_bits, prev + 1);
191 if (next < nr_cpu_ids)
192 __this_cpu_write(distribute_cpu_mask_prev, next);
196 EXPORT_SYMBOL(cpumask_any_distribute);