nds32: To simplify the implementation of update_mmu_cache()
authorGreentime Hu <greentime@andestech.com>
Tue, 3 Jul 2018 06:07:11 +0000 (14:07 +0800)
committerGreentime Hu <greentime@andestech.com>
Tue, 3 Jul 2018 06:18:17 +0000 (14:18 +0800)
The checking code is done in kmap_atomic() so that we don't need to
check it in update_mmu_cache() again. There is no need to implement
it for cache aliasing or cache non-aliasing versions. We can just
implement one version for both.

Signed-off-by: Greentime Hu <greentime@andestech.com>
arch/nds32/mm/cacheflush.c

index 7fcaa4e..2547036 100644 (file)
@@ -45,7 +45,7 @@ void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
        flush_icache_range(kaddr, kaddr + len);
        kunmap_atomic((void *)kaddr);
 }
-#ifndef CONFIG_CPU_CACHE_ALIASING
+
 void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr,
                      pte_t * pte)
 {
@@ -67,19 +67,15 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr,
 
        if ((test_and_clear_bit(PG_dcache_dirty, &page->flags)) ||
            (vma->vm_flags & VM_EXEC)) {
-
-               if (!PageHighMem(page)) {
-                       cpu_cache_wbinval_page((unsigned long)
-                                              page_address(page),
-                                              vma->vm_flags & VM_EXEC);
-               } else {
-                       unsigned long kaddr = (unsigned long)kmap_atomic(page);
-                       cpu_cache_wbinval_page(kaddr, vma->vm_flags & VM_EXEC);
-                       kunmap_atomic((void *)kaddr);
-               }
+               unsigned long kaddr;
+               local_irq_save(flags);
+               kaddr = (unsigned long)kmap_atomic(page);
+               cpu_cache_wbinval_page(kaddr, vma->vm_flags & VM_EXEC);
+               kunmap_atomic((void *)kaddr);
+               local_irq_restore(flags);
        }
 }
-#else
+#ifdef CONFIG_CPU_CACHE_ALIASING
 extern pte_t va_present(struct mm_struct *mm, unsigned long addr);
 
 static inline unsigned long aliasing(unsigned long addr, unsigned long page)
@@ -349,31 +345,4 @@ void invalidate_kernel_vmap_range(void *addr, int size)
        local_irq_restore(flags);
 }
 EXPORT_SYMBOL(invalidate_kernel_vmap_range);
-
-void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr,
-                     pte_t * pte)
-{
-       struct page *page;
-       unsigned long flags;
-       unsigned long pfn = pte_pfn(*pte);
-
-       if (!pfn_valid(pfn))
-               return;
-
-       if (vma->vm_mm == current->active_mm) {
-               local_irq_save(flags);
-               __nds32__mtsr_dsb(addr, NDS32_SR_TLB_VPN);
-               __nds32__tlbop_rwr(*pte);
-               __nds32__isb();
-               local_irq_restore(flags);
-       }
-
-       page = pfn_to_page(pfn);
-       if (test_and_clear_bit(PG_dcache_dirty, &page->flags) ||
-           (vma->vm_flags & VM_EXEC)) {
-               local_irq_save(flags);
-               cpu_dcache_wbinval_page((unsigned long)page_address(page));
-               local_irq_restore(flags);
-       }
-}
 #endif