powerpc/mm: Export flush_all_mm()
authorFrederic Barrat <fbarrat@linux.vnet.ibm.com>
Sun, 3 Sep 2017 18:15:12 +0000 (20:15 +0200)
committerMichael Ellerman <mpe@ellerman.id.au>
Thu, 28 Sep 2017 06:28:22 +0000 (16:28 +1000)
With the optimizations introduced by commit a46cc7a90fd8
("powerpc/mm/radix: Improve TLB/PWC flushes"), flush_tlb_mm() no
longer flushes the page walk cache (PWC) with radix. This patch
introduces flush_all_mm(), which flushes everything, TLB and PWC, for
a given mm.

Signed-off-by: Frederic Barrat <fbarrat@linux.vnet.ibm.com>
Reviewed-By: Alistair Popple <alistair@popple.id.au>
[mpe: Add a WARN_ON_ONCE() in the empty hash routines]
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
arch/powerpc/include/asm/book3s/64/tlbflush-hash.h
arch/powerpc/include/asm/book3s/64/tlbflush-radix.h
arch/powerpc/include/asm/book3s/64/tlbflush.h
arch/powerpc/mm/tlb-radix.c

index 2f63731..99c99bb 100644 (file)
@@ -65,6 +65,28 @@ static inline void hash__flush_tlb_mm(struct mm_struct *mm)
 {
 }
 
+static inline void hash__local_flush_all_mm(struct mm_struct *mm)
+{
+       /*
+        * There's no Page Walk Cache for hash, so what is needed is
+        * the same as flush_tlb_mm(), which doesn't really make sense
+        * with hash. So the only thing we could do is flush the
+        * entire LPID! Punt for now, as it's not being used.
+        */
+       WARN_ON_ONCE(1);
+}
+
+static inline void hash__flush_all_mm(struct mm_struct *mm)
+{
+       /*
+        * There's no Page Walk Cache for hash, so what is needed is
+        * the same as flush_tlb_mm(), which doesn't really make sense
+        * with hash. So the only thing we could do is flush the
+        * entire LPID! Punt for now, as it's not being used.
+        */
+       WARN_ON_ONCE(1);
+}
+
 static inline void hash__local_flush_tlb_page(struct vm_area_struct *vma,
                                          unsigned long vmaddr)
 {
index 9b433a6..af06c6f 100644 (file)
@@ -21,17 +21,20 @@ extern void radix__flush_tlb_range(struct vm_area_struct *vma, unsigned long sta
 extern void radix__flush_tlb_kernel_range(unsigned long start, unsigned long end);
 
 extern void radix__local_flush_tlb_mm(struct mm_struct *mm);
+extern void radix__local_flush_all_mm(struct mm_struct *mm);
 extern void radix__local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
 extern void radix__local_flush_tlb_page_psize(struct mm_struct *mm, unsigned long vmaddr,
                                              int psize);
 extern void radix__tlb_flush(struct mmu_gather *tlb);
 #ifdef CONFIG_SMP
 extern void radix__flush_tlb_mm(struct mm_struct *mm);
+extern void radix__flush_all_mm(struct mm_struct *mm);
 extern void radix__flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
 extern void radix__flush_tlb_page_psize(struct mm_struct *mm, unsigned long vmaddr,
                                        int psize);
 #else
 #define radix__flush_tlb_mm(mm)                radix__local_flush_tlb_mm(mm)
+#define radix__flush_all_mm(mm)                radix__local_flush_all_mm(mm)
 #define radix__flush_tlb_page(vma,addr)        radix__local_flush_tlb_page(vma,addr)
 #define radix__flush_tlb_page_psize(mm,addr,p) radix__local_flush_tlb_page_psize(mm,addr,p)
 #endif
index 72b925f..70760d0 100644 (file)
@@ -57,6 +57,13 @@ static inline void local_flush_tlb_page(struct vm_area_struct *vma,
        return hash__local_flush_tlb_page(vma, vmaddr);
 }
 
+static inline void local_flush_all_mm(struct mm_struct *mm)
+{
+       if (radix_enabled())
+               return radix__local_flush_all_mm(mm);
+       return hash__local_flush_all_mm(mm);
+}
+
 static inline void tlb_flush(struct mmu_gather *tlb)
 {
        if (radix_enabled())
@@ -79,9 +86,17 @@ static inline void flush_tlb_page(struct vm_area_struct *vma,
                return radix__flush_tlb_page(vma, vmaddr);
        return hash__flush_tlb_page(vma, vmaddr);
 }
+
+static inline void flush_all_mm(struct mm_struct *mm)
+{
+       if (radix_enabled())
+               return radix__flush_all_mm(mm);
+       return hash__flush_all_mm(mm);
+}
 #else
 #define flush_tlb_mm(mm)               local_flush_tlb_mm(mm)
 #define flush_tlb_page(vma, addr)      local_flush_tlb_page(vma, addr)
+#define flush_all_mm(mm)               local_flush_all_mm(mm)
 #endif /* CONFIG_SMP */
 /*
  * flush the page walk cache for the address
index b3e849c..5a1f46e 100644 (file)
@@ -144,7 +144,7 @@ void radix__local_flush_tlb_mm(struct mm_struct *mm)
 EXPORT_SYMBOL(radix__local_flush_tlb_mm);
 
 #ifndef CONFIG_SMP
-static void radix__local_flush_all_mm(struct mm_struct *mm)
+void radix__local_flush_all_mm(struct mm_struct *mm)
 {
        unsigned long pid;
 
@@ -154,6 +154,7 @@ static void radix__local_flush_all_mm(struct mm_struct *mm)
                _tlbiel_pid(pid, RIC_FLUSH_ALL);
        preempt_enable();
 }
+EXPORT_SYMBOL(radix__local_flush_all_mm);
 #endif /* CONFIG_SMP */
 
 void radix__local_flush_tlb_page_psize(struct mm_struct *mm, unsigned long vmaddr,
@@ -200,7 +201,7 @@ no_context:
 }
 EXPORT_SYMBOL(radix__flush_tlb_mm);
 
-static void radix__flush_all_mm(struct mm_struct *mm)
+void radix__flush_all_mm(struct mm_struct *mm)
 {
        unsigned long pid;
 
@@ -216,6 +217,7 @@ static void radix__flush_all_mm(struct mm_struct *mm)
 no_context:
        preempt_enable();
 }
+EXPORT_SYMBOL(radix__flush_all_mm);
 
 void radix__flush_tlb_pwc(struct mmu_gather *tlb, unsigned long addr)
 {