powerpc/mm/radix: Change pte relax sequence to handle nest MMU hang
authorAneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
Tue, 29 May 2018 14:28:41 +0000 (19:58 +0530)
committerMichael Ellerman <mpe@ellerman.id.au>
Sun, 3 Jun 2018 10:40:34 +0000 (20:40 +1000)
When relaxing access (read -> read_write update), pte needs to be marked invalid
to handle a nest MMU bug. We also need to do a tlb flush after the pte is
marked invalid before updating the pte with new access bits.

We also move tlb flush to platform specific __ptep_set_access_flags. This will
help us to gerid of unnecessary tlb flush on BOOK3S 64 later. We don't do that
in this patch. This also helps in avoiding multiple tlbies with coprocessor
attached.

Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
arch/powerpc/include/asm/book3s/32/pgtable.h
arch/powerpc/include/asm/nohash/32/pgtable.h
arch/powerpc/include/asm/nohash/64/pgtable.h
arch/powerpc/include/asm/pgtable.h
arch/powerpc/mm/pgtable-book3s64.c
arch/powerpc/mm/pgtable-radix.c
arch/powerpc/mm/pgtable.c

index 39d3a42..02f5acd 100644 (file)
@@ -245,6 +245,8 @@ static inline void __ptep_set_access_flags(struct vm_area_struct *vma,
        unsigned long clr = ~pte_val(entry) & _PAGE_RO;
 
        pte_update(ptep, clr, set);
+
+       flush_tlb_page(vma, address);
 }
 
 #define __HAVE_ARCH_PTE_SAME
index c2471ba..7c46a98 100644 (file)
@@ -266,6 +266,8 @@ static inline void __ptep_set_access_flags(struct vm_area_struct *vma,
        unsigned long clr = ~pte_val(entry) & (_PAGE_RO | _PAGE_NA);
 
        pte_update(ptep, clr, set);
+
+       flush_tlb_page(vma, address);
 }
 
 static inline int pte_young(pte_t pte)
index 180161d..dd0c723 100644 (file)
@@ -304,6 +304,8 @@ static inline void __ptep_set_access_flags(struct vm_area_struct *vma,
        unsigned long old = pte_val(*ptep);
        *ptep = __pte(old | bits);
 #endif
+
+       flush_tlb_page(vma, address);
 }
 
 #define __HAVE_ARCH_PTE_SAME
index ab7d2d9..14c79a7 100644 (file)
@@ -8,6 +8,7 @@
 #include <asm/processor.h>             /* For TASK_SIZE */
 #include <asm/mmu.h>
 #include <asm/page.h>
+#include <asm/tlbflush.h>
 
 struct mm_struct;
 
index 4a81504..82fed87 100644 (file)
@@ -52,7 +52,6 @@ int pmdp_set_access_flags(struct vm_area_struct *vma, unsigned long address,
                 */
                __ptep_set_access_flags(vma, pmdp_ptep(pmdp),
                                        pmd_pte(entry), address, MMU_PAGE_2M);
-               flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
        }
        return changed;
 }
index 2034cbc..0ddfe59 100644 (file)
@@ -1091,8 +1091,12 @@ void radix__ptep_set_access_flags(struct vm_area_struct *vma, pte_t *ptep,
        struct mm_struct *mm = vma->vm_mm;
        unsigned long set = pte_val(entry) & (_PAGE_DIRTY | _PAGE_ACCESSED |
                                              _PAGE_RW | _PAGE_EXEC);
-
-       if (cpu_has_feature(CPU_FTR_POWER9_DD1)) {
+       /*
+        * To avoid NMMU hang while relaxing access, we need mark
+        * the pte invalid in between.
+        */
+       if (cpu_has_feature(CPU_FTR_POWER9_DD1) ||
+           atomic_read(&mm->context.copros) > 0) {
                unsigned long old_pte, new_pte;
 
                old_pte = __radix_pte_update(ptep, ~0, 0);
@@ -1100,9 +1104,11 @@ void radix__ptep_set_access_flags(struct vm_area_struct *vma, pte_t *ptep,
                 * new value of pte
                 */
                new_pte = old_pte | set;
-               radix__flush_tlb_pte_p9_dd1(old_pte, mm, address);
+               radix__flush_tlb_page_psize(mm, address, psize);
                __radix_pte_update(ptep, 0, new_pte);
-       } else
+       } else {
                __radix_pte_update(ptep, 0, set);
+               radix__flush_tlb_page_psize(mm, address, psize);
+       }
        asm volatile("ptesync" : : : "memory");
 }
index 20cacd3..5281c2c 100644 (file)
@@ -224,7 +224,6 @@ int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address,
                assert_pte_locked(vma->vm_mm, address);
                __ptep_set_access_flags(vma, ptep, entry,
                                        address, mmu_virtual_psize);
-               flush_tlb_page(vma, address);
        }
        return changed;
 }
@@ -263,7 +262,6 @@ extern int huge_ptep_set_access_flags(struct vm_area_struct *vma,
                assert_spin_locked(&vma->vm_mm->page_table_lock);
 #endif
                __ptep_set_access_flags(vma, ptep, pte, addr, psize);
-               flush_hugetlb_page(vma, addr);
        }
        return changed;
 #endif