SUPP_BANK_SEL(2);
SUPP_REG_WR(RW_MM_TLB_PGD, pgd);
- cpu_set(0, cpu_online_map);
+ set_cpu_online(0, true);
cpu_set(0, phys_cpu_present_map);
- cpu_set(0, cpu_possible_map);
+ set_cpu_possible(0, true);
}
void __init smp_cpus_done(unsigned int max_cpus)
cpumask_t cpu_mask;
spin_lock_irqsave(&tlbstate_lock, flags);
- cpu_mask = (mm == FLUSH_ALL ? CPU_MASK_ALL : mm->cpu_vm_mask);
+ cpu_mask = (mm == FLUSH_ALL ? cpu_all_mask : *mm_cpumask(mm));
cpu_clear(smp_processor_id(), cpu_mask);
flush_mm = mm;
flush_vma = vma;
__flush_tlb_mm(mm);
flush_tlb_common(mm, FLUSH_ALL, 0);
/* No more mappings in other CPUs */
- cpus_clear(mm->cpu_vm_mask);
- cpu_set(smp_processor_id(), mm->cpu_vm_mask);
+ cpumask_clear(mm_cpumask(mm));
+ cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
}
void flush_tlb_page(struct vm_area_struct *vma,
/* Make sure there is a MMU context. */
spin_lock(&mmu_context_lock);
get_mmu_context(next);
- cpu_set(cpu, next->cpu_vm_mask);
+ cpumask_set_cpu(cpu, mm_cpumask(next));
spin_unlock(&mmu_context_lock);
/*
static void *c_start(struct seq_file *m, loff_t *pos)
{
- return *pos < NR_CPUS ? (void *)(int)(*pos + 1): NULL;
+ return *pos < nr_cpu_ids ? (void *)(int)(*pos + 1) : NULL;
}
static void *c_next(struct seq_file *m, void *v, loff_t *pos)