/*
* acquire the CPU spinlocks
*/
- for (i = num_online_cpus()-1; i >= 0; i--)
+ for_each_online_cpu(i)
if (__raw_spin_trylock(&kgdb_cpulock[i]) == 0)
panic("kgdb: couldn't get cpulock %d\n", i);
exit_kgdb_exception:
/* release locks so other CPUs can go */
- for (i = num_online_cpus()-1; i >= 0; i--)
+ for_each_online_cpu(i)
__raw_spin_unlock(&kgdb_cpulock[i]);
spin_unlock(&kgdb_lock);
if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
smp_on_other_tlbs(flush_tlb_mm_ipi, (void *)mm);
} else {
- int i;
- for (i = 0; i < num_online_cpus(); i++)
- if (smp_processor_id() != i)
- cpu_context(i, mm) = 0;
+ cpumask_t mask = cpu_online_map;
+ unsigned int cpu;
+
+ cpu_clear(smp_processor_id(), mask);
+ for_each_online_cpu(cpu)
+ if (cpu_context(cpu, mm))
+ cpu_context(cpu, mm) = 0;
}
local_flush_tlb_mm(mm);
fd.addr2 = end;
smp_on_other_tlbs(flush_tlb_range_ipi, (void *)&fd);
} else {
- int i;
- for (i = 0; i < num_online_cpus(); i++)
- if (smp_processor_id() != i)
- cpu_context(i, mm) = 0;
+ cpumask_t mask = cpu_online_map;
+ unsigned int cpu;
+
+ cpu_clear(smp_processor_id(), mask);
+ for_each_online_cpu(cpu)
+ if (cpu_context(cpu, mm))
+ cpu_context(cpu, mm) = 0;
}
local_flush_tlb_range(vma, start, end);
preempt_enable();
fd.addr1 = page;
smp_on_other_tlbs(flush_tlb_page_ipi, (void *)&fd);
} else {
- int i;
- for (i = 0; i < num_online_cpus(); i++)
- if (smp_processor_id() != i)
- cpu_context(i, vma->vm_mm) = 0;
+ cpumask_t mask = cpu_online_map;
+ unsigned int cpu;
+
+ cpu_clear(smp_processor_id(), mask);
+ for_each_online_cpu(cpu)
+ if (cpu_context(cpu, vma->vm_mm))
+ cpu_context(cpu, vma->vm_mm) = 0;
}
local_flush_tlb_page(vma, page);
preempt_enable();
if (cpu_has_vtag_icache)
flush_icache_all();
/* Traverse all online CPUs (hack requires contigous range) */
- for (i = 0; i < num_online_cpus(); i++) {
+ for_each_online_cpu(i) {
/*
* We don't need to worry about our own CPU, nor those of
* CPUs who don't share our TLB.
/*
* SMTC shares the TLB within VPEs and possibly across all VPEs.
*/
- for (i = 0; i < num_online_cpus(); i++) {
+ for_each_online_cpu(i) {
if ((smtc_status & SMTC_TLB_SHARED) ||
(cpu_data[i].vpe_id == cpu_data[cpu].vpe_id))
cpu_context(i, mm) = asid_cache(i) = asid;