#define print_cpus_func(type) \
static ssize_t print_cpus_##type(struct sysdev_class *class, char *buf) \
{ \
- return print_cpus_map(buf, &cpu_##type##_map); \
+ return print_cpus_map(buf, cpu_##type##_mask); \
} \
static struct sysdev_class_attribute attr_##type##_map = \
_SYSDEV_CLASS_ATTR(type, 0444, print_cpus_##type, NULL)
static inline void cpuset_cpus_allowed(struct task_struct *p,
struct cpumask *mask)
{
- *mask = cpu_possible_map;
+ cpumask_copy(mask, cpu_possible_mask);
}
static inline void cpuset_cpus_allowed_locked(struct task_struct *p,
struct cpumask *mask)
{
- *mask = cpu_possible_map;
+ cpumask_copy(mask, cpu_possible_mask);
}
static inline nodemask_t cpuset_mems_allowed(struct task_struct *p)
might_sleep();
lock_map_acquire(&wq->lockdep_map);
lock_map_release(&wq->lockdep_map);
- for_each_cpu_mask_nr(cpu, *cpu_map)
+ for_each_cpu(cpu, cpu_map)
flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu));
}
EXPORT_SYMBOL_GPL(flush_workqueue);
wq = cwq->wq;
cpu_map = wq_cpu_map(wq);
- for_each_cpu_mask_nr(cpu, *cpu_map)
+ for_each_cpu(cpu, cpu_map)
wait_on_cpu_work(per_cpu_ptr(wq->cpu_wq, cpu), work);
}
list_del(&wq->list);
spin_unlock(&workqueue_lock);
- for_each_cpu_mask_nr(cpu, *cpu_map)
+ for_each_cpu(cpu, cpu_map)
cleanup_workqueue_thread(per_cpu_ptr(wq->cpu_wq, cpu));
cpu_maps_update_done();
{
if (unlikely(!__pdata))
return;
- __percpu_depopulate_mask(__pdata, &cpu_possible_map);
+ __percpu_depopulate_mask(__pdata, cpu_possible_mask);
kfree(__percpu_disguise(__pdata));
}
EXPORT_SYMBOL_GPL(free_percpu);
memset(ret, 0, NR_VM_EVENT_ITEMS * sizeof(unsigned long));
- for_each_cpu_mask_nr(cpu, *cpumask) {
+ for_each_cpu(cpu, cpumask) {
struct vm_event_state *this = &per_cpu(vm_event_states, cpu);
for (i = 0; i < NR_VM_EVENT_ITEMS; i++)
switch (m->mode) {
case SVC_POOL_PERCPU:
{
- set_cpus_allowed_ptr(task, &cpumask_of_cpu(node));
+ set_cpus_allowed_ptr(task, cpumask_of(node));
break;
}
case SVC_POOL_PERNODE: