MIPS: mm: Unify ASID version checks
authorPaul Burton <paul.burton@mips.com>
Sat, 2 Feb 2019 01:43:25 +0000 (01:43 +0000)
committerPaul Burton <paul.burton@mips.com>
Mon, 4 Feb 2019 18:56:30 +0000 (10:56 -0800)
Introduce a new check_mmu_context() function to check an mm's ASID
version & get a new one if it's outdated, and a
check_switch_mmu_context() function which additionally sets up the new
ASID & page directory. Simplify switch_mm() & various
get_new_mmu_context() callsites in MIPS KVM by making use of the new
functions, which will help reduce the amount of code that requires
modification to gain MMID support.

Signed-off-by: Paul Burton <paul.burton@mips.com>
Cc: linux-mips@vger.kernel.org
arch/mips/include/asm/mmu_context.h
arch/mips/kvm/trap_emul.c
arch/mips/kvm/vz.c
arch/mips/mm/context.c

index cb39a39d02f632e2617a85a0e0d470b8d0ec100e..336682fb48de089c27a9c5d1c2ec33a907ffd6e8 100644 (file)
@@ -98,6 +98,8 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
 }
 
 extern void get_new_mmu_context(struct mm_struct *mm);
+extern void check_mmu_context(struct mm_struct *mm);
+extern void check_switch_mmu_context(struct mm_struct *mm);
 
 /*
  * Initialize the context related info for a new mm_struct
@@ -126,11 +128,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
        local_irq_save(flags);
 
        htw_stop();
-       /* Check if our ASID is of an older version and thus invalid */
-       if ((cpu_context(cpu, next) ^ asid_cache(cpu)) & asid_version_mask(cpu))
-               get_new_mmu_context(next);
-       write_c0_entryhi(cpu_asid(cpu, next));
-       TLBMISS_HANDLER_SETUP_PGD(next->pgd);
+       check_switch_mmu_context(next);
 
        /*
         * Mark current->active_mm as not "active" anymore.
index 503c2fb7e4da9f0f6e1c56124e45199ff99d332c..22ffe72a9e4b463a57fa70ae7e1ed38de3f192c4 100644 (file)
@@ -1056,11 +1056,7 @@ static int kvm_trap_emul_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
         */
        if (current->flags & PF_VCPU) {
                mm = KVM_GUEST_KERNEL_MODE(vcpu) ? kern_mm : user_mm;
-               if ((cpu_context(cpu, mm) ^ asid_cache(cpu)) &
-                   asid_version_mask(cpu))
-                       get_new_mmu_context(mm);
-               write_c0_entryhi(cpu_asid(cpu, mm));
-               TLBMISS_HANDLER_SETUP_PGD(mm->pgd);
+               check_switch_mmu_context(mm);
                kvm_mips_suspend_mm(cpu);
                ehb();
        }
@@ -1074,11 +1070,7 @@ static int kvm_trap_emul_vcpu_put(struct kvm_vcpu *vcpu, int cpu)
 
        if (current->flags & PF_VCPU) {
                /* Restore normal Linux process memory map */
-               if (((cpu_context(cpu, current->mm) ^ asid_cache(cpu)) &
-                    asid_version_mask(cpu)))
-                       get_new_mmu_context(current->mm);
-               write_c0_entryhi(cpu_asid(cpu, current->mm));
-               TLBMISS_HANDLER_SETUP_PGD(current->mm->pgd);
+               check_switch_mmu_context(current->mm);
                kvm_mips_resume_mm(cpu);
                ehb();
        }
@@ -1228,9 +1220,7 @@ static void kvm_trap_emul_vcpu_reenter(struct kvm_run *run,
         * Check if ASID is stale. This may happen due to a TLB flush request or
         * a lazy user MM invalidation.
         */
-       if ((cpu_context(cpu, mm) ^ asid_cache(cpu)) &
-           asid_version_mask(cpu))
-               get_new_mmu_context(mm);
+       check_mmu_context(mm);
 }
 
 static int kvm_trap_emul_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu)
@@ -1266,11 +1256,7 @@ static int kvm_trap_emul_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu)
        cpu = smp_processor_id();
 
        /* Restore normal Linux process memory map */
-       if (((cpu_context(cpu, current->mm) ^ asid_cache(cpu)) &
-            asid_version_mask(cpu)))
-               get_new_mmu_context(current->mm);
-       write_c0_entryhi(cpu_asid(cpu, current->mm));
-       TLBMISS_HANDLER_SETUP_PGD(current->mm->pgd);
+       check_switch_mmu_context(current->mm);
        kvm_mips_resume_mm(cpu);
 
        htw_start();
index d98c12a22eacc7a694bb9947b5e57431ab57369a..dde20887a70dfc643c5bb1005599055d019e2eb5 100644 (file)
@@ -2454,10 +2454,10 @@ static void kvm_vz_vcpu_load_tlb(struct kvm_vcpu *vcpu, int cpu)
                 * Root ASID dealiases guest GPA mappings in the root TLB.
                 * Allocate new root ASID if needed.
                 */
-               if (cpumask_test_and_clear_cpu(cpu, &kvm->arch.asid_flush_mask)
-                   || (cpu_context(cpu, gpa_mm) ^ asid_cache(cpu)) &
-                                               asid_version_mask(cpu))
+               if (cpumask_test_and_clear_cpu(cpu, &kvm->arch.asid_flush_mask))
                        get_new_mmu_context(gpa_mm);
+               else
+                       check_mmu_context(gpa_mm);
        }
 }
 
index b5af471006f0a5d0b9c8de73739977319692c9b2..4dd976acf41d1bc22d93fc85b084eccd497ada64 100644 (file)
@@ -17,3 +17,21 @@ void get_new_mmu_context(struct mm_struct *mm)
 
        cpu_context(cpu, mm) = asid_cache(cpu) = asid;
 }
+
+void check_mmu_context(struct mm_struct *mm)
+{
+       unsigned int cpu = smp_processor_id();
+
+       /* Check if our ASID is of an older version and thus invalid */
+       if ((cpu_context(cpu, mm) ^ asid_cache(cpu)) & asid_version_mask(cpu))
+               get_new_mmu_context(mm);
+}
+
+void check_switch_mmu_context(struct mm_struct *mm)
+{
+       unsigned int cpu = smp_processor_id();
+
+       check_mmu_context(mm);
+       write_c0_entryhi(cpu_asid(cpu, mm));
+       TLBMISS_HANDLER_SETUP_PGD(mm->pgd);
+}