local_irq_save(flags);
cpu = smp_processor_id();
- if (cpumask_test_cpu(cpu, mm_cpumask(mm))) {
+ if (!cpu_context(cpu, mm)) {
+ /* no-op */
+ } else if (cpumask_test_cpu(cpu, mm_cpumask(mm))) {
htw_stop();
get_new_mmu_context(mm);
write_c0_entryhi(cpu_asid(cpu, mm));
}
if (exec) {
if (vaddr && cpu_has_vtag_icache && mm == current->active_mm) {
- int cpu = smp_processor_id();
-
- if (cpu_context(cpu, mm) != 0)
- drop_mmu_context(mm);
+ drop_mmu_context(mm);
} else
vaddr ? r4k_blast_icache_page(addr) :
r4k_blast_icache_user_page(addr);
void local_flush_tlb_mm(struct mm_struct *mm)
{
+#ifdef DEBUG_TLB
int cpu = smp_processor_id();
- if (cpu_context(cpu, mm) != 0) {
-#ifdef DEBUG_TLB
+ if (cpu_context(cpu, mm) != 0)
printk("[tlbmm<%lu>]", (unsigned long)cpu_context(cpu, mm));
#endif
- drop_mmu_context(mm);
- }
+
+ drop_mmu_context(mm);
}
void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
these entries, we just bump the asid. */
void local_flush_tlb_mm(struct mm_struct *mm)
{
- int cpu;
-
preempt_disable();
-
- cpu = smp_processor_id();
-
- if (cpu_context(cpu, mm) != 0) {
- drop_mmu_context(mm);
- }
-
+ drop_mmu_context(mm);
preempt_enable();
}
void local_flush_tlb_mm(struct mm_struct *mm)
{
- int cpu = smp_processor_id();
-
- if (cpu_context(cpu, mm) != 0)
- drop_mmu_context(mm);
+ drop_mmu_context(mm);
}
void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,