powerpc/book3s64/hash: Use the pte_t address from the caller
authorAneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
Tue, 5 May 2020 07:17:12 +0000 (12:47 +0530)
committerMichael Ellerman <mpe@ellerman.id.au>
Tue, 5 May 2020 11:20:14 +0000 (21:20 +1000)
Don't fetch the pte value using lockless page table walk. Instead use the value from the
caller. hash_preload is called with ptl lock held. So it is safe to use the
pte_t address directly.

Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20200505071729.54912-6-aneesh.kumar@linux.ibm.com
arch/powerpc/mm/book3s64/hash_utils.c

index 525eac4..3d727f7 100644 (file)
@@ -1546,14 +1546,11 @@ static bool should_hash_preload(struct mm_struct *mm, unsigned long ea)
 }
 #endif
 
-static void hash_preload(struct mm_struct *mm, unsigned long ea,
+static void hash_preload(struct mm_struct *mm, pte_t *ptep, unsigned long ea,
                         bool is_exec, unsigned long trap)
 {
-       int hugepage_shift;
        unsigned long vsid;
        pgd_t *pgdir;
-       pte_t *ptep;
-       unsigned long flags;
        int rc, ssize, update_flags = 0;
        unsigned long access = _PAGE_PRESENT | _PAGE_READ | (is_exec ? _PAGE_EXEC : 0);
 
@@ -1575,30 +1572,18 @@ static void hash_preload(struct mm_struct *mm, unsigned long ea,
        vsid = get_user_vsid(&mm->context, ea, ssize);
        if (!vsid)
                return;
-       /*
-        * Hash doesn't like irqs. Walking linux page table with irq disabled
-        * saves us from holding multiple locks.
-        */
-       local_irq_save(flags);
 
-       /*
-        * THP pages use update_mmu_cache_pmd. We don't do
-        * hash preload there. Hence can ignore THP here
-        */
-       ptep = find_current_mm_pte(pgdir, ea, NULL, &hugepage_shift);
-       if (!ptep)
-               goto out_exit;
-
-       WARN_ON(hugepage_shift);
 #ifdef CONFIG_PPC_64K_PAGES
        /* If either H_PAGE_4K_PFN or cache inhibited is set (and we are on
         * a 64K kernel), then we don't preload, hash_page() will take
         * care of it once we actually try to access the page.
         * That way we don't have to duplicate all of the logic for segment
         * page size demotion here
+        * Called with  PTL held, hence can be sure the value won't change in
+        * between.
         */
        if ((pte_val(*ptep) & H_PAGE_4K_PFN) || pte_ci(*ptep))
-               goto out_exit;
+               return;
 #endif /* CONFIG_PPC_64K_PAGES */
 
        /* Is that local to this CPU ? */
@@ -1623,8 +1608,6 @@ static void hash_preload(struct mm_struct *mm, unsigned long ea,
                                   mm_ctx_user_psize(&mm->context),
                                   mm_ctx_user_psize(&mm->context),
                                   pte_val(*ptep));
-out_exit:
-       local_irq_restore(flags);
 }
 
 /*
@@ -1675,7 +1658,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
                return;
        }
 
-       hash_preload(vma->vm_mm, address, is_exec, trap);
+       hash_preload(vma->vm_mm, ptep, address, is_exec, trap);
 }
 
 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM