mm: thp: add acquisition/release of a lock to guarantee consistent locking state
authorSung-hun Kim <sfoon.kim@samsung.com>
Fri, 1 Oct 2021 04:01:01 +0000 (13:01 +0900)
committerHoegeun Kwon <hoegeun.kwon@samsung.com>
Mon, 7 Feb 2022 08:01:41 +0000 (17:01 +0900)
arm64_wp_huge_pte should acquire a lock before return
to keep the lock semantics of the caller.
To guarantee this, add a new lock acquisition and a
new lock release statements in proper positions.

Change-Id: I81fb8afc37f54bce83f353ca6b6894e70ef86934
Signed-off-by: Sung-hun Kim <sfoon.kim@samsung.com>
arch/arm64/mm/huge_memory.c
mm/memory.c

index 1073fde..4dbb11d 100644 (file)
@@ -470,8 +470,12 @@ vm_fault_t arm64_wp_huge_pte(struct vm_fault *vmf, pte_t orig_pte)
        pte_t *hpte_p;
 
        if (vma_is_anonymous(vmf->vma)) {
+               int ret;
+
                spin_unlock(vmf->ptl);
-               return arm64_do_huge_pte_wp_page(vmf, orig_pte);
+               ret = arm64_do_huge_pte_wp_page(vmf, orig_pte);
+               spin_lock(vmf->ptl);
+               return ret;
        }
 
        VM_BUG_ON_VMA(vmf->vma->vm_flags & VM_SHARED, vmf->vma);
index e1924c1..6797a78 100644 (file)
@@ -4635,8 +4635,14 @@ static vm_fault_t handle_pte_fault(struct vm_fault *vmf)
                if (!pte_write(entry)) {
                        int ret = arch_do_wp_page(vmf, entry);
 
-                       if (!(ret & VM_FAULT_FALLBACK))
+                       if (!(ret & VM_FAULT_FALLBACK)) {
+                               /*
+                                * arch_do_wp_page returns
+                                * VM_FAULT value with spin lock acquisition.
+                                */
+                               spin_unlock(vmf->ptl);
                                return ret;
+                       }
                        return do_wp_page(vmf);
                }
                if (arch_huge_pte_set_accessed(vmf, entry))