ksmbd: no response from compound read
[platform/kernel/linux-starfive.git] / fs / userfaultfd.c
index 4e800bb..7cecd49 100644 (file)
@@ -335,6 +335,7 @@ static inline bool userfaultfd_must_wait(struct userfaultfd_ctx *ctx,
        pud_t *pud;
        pmd_t *pmd, _pmd;
        pte_t *pte;
+       pte_t ptent;
        bool ret = true;
 
        mmap_assert_locked(mm);
@@ -349,20 +350,13 @@ static inline bool userfaultfd_must_wait(struct userfaultfd_ctx *ctx,
        if (!pud_present(*pud))
                goto out;
        pmd = pmd_offset(pud, address);
-       /*
-        * READ_ONCE must function as a barrier with narrower scope
-        * and it must be equivalent to:
-        *      _pmd = *pmd; barrier();
-        *
-        * This is to deal with the instability (as in
-        * pmd_trans_unstable) of the pmd.
-        */
-       _pmd = READ_ONCE(*pmd);
+again:
+       _pmd = pmdp_get_lockless(pmd);
        if (pmd_none(_pmd))
                goto out;
 
        ret = false;
-       if (!pmd_present(_pmd))
+       if (!pmd_present(_pmd) || pmd_devmap(_pmd))
                goto out;
 
        if (pmd_trans_huge(_pmd)) {
@@ -371,19 +365,20 @@ static inline bool userfaultfd_must_wait(struct userfaultfd_ctx *ctx,
                goto out;
        }
 
-       /*
-        * the pmd is stable (as in !pmd_trans_unstable) so we can re-read it
-        * and use the standard pte_offset_map() instead of parsing _pmd.
-        */
        pte = pte_offset_map(pmd, address);
+       if (!pte) {
+               ret = true;
+               goto again;
+       }
        /*
         * Lockless access: we're in a wait_event so it's ok if it
         * changes under us.  PTE markers should be handled the same as none
         * ptes here.
         */
-       if (pte_none_mostly(*pte))
+       ptent = ptep_get(pte);
+       if (pte_none_mostly(ptent))
                ret = true;
-       if (!pte_write(*pte) && (reason & VM_UFFD_WP))
+       if (!pte_write(ptent) && (reason & VM_UFFD_WP))
                ret = true;
        pte_unmap(pte);
 
@@ -857,31 +852,26 @@ static bool has_unmap_ctx(struct userfaultfd_ctx *ctx, struct list_head *unmaps,
        return false;
 }
 
-int userfaultfd_unmap_prep(struct mm_struct *mm, unsigned long start,
+int userfaultfd_unmap_prep(struct vm_area_struct *vma, unsigned long start,
                           unsigned long end, struct list_head *unmaps)
 {
-       VMA_ITERATOR(vmi, mm, start);
-       struct vm_area_struct *vma;
-
-       for_each_vma_range(vmi, vma, end) {
-               struct userfaultfd_unmap_ctx *unmap_ctx;
-               struct userfaultfd_ctx *ctx = vma->vm_userfaultfd_ctx.ctx;
+       struct userfaultfd_unmap_ctx *unmap_ctx;
+       struct userfaultfd_ctx *ctx = vma->vm_userfaultfd_ctx.ctx;
 
-               if (!ctx || !(ctx->features & UFFD_FEATURE_EVENT_UNMAP) ||
-                   has_unmap_ctx(ctx, unmaps, start, end))
-                       continue;
+       if (!ctx || !(ctx->features & UFFD_FEATURE_EVENT_UNMAP) ||
+           has_unmap_ctx(ctx, unmaps, start, end))
+               return 0;
 
-               unmap_ctx = kzalloc(sizeof(*unmap_ctx), GFP_KERNEL);
-               if (!unmap_ctx)
-                       return -ENOMEM;
+       unmap_ctx = kzalloc(sizeof(*unmap_ctx), GFP_KERNEL);
+       if (!unmap_ctx)
+               return -ENOMEM;
 
-               userfaultfd_ctx_get(ctx);
-               atomic_inc(&ctx->mmap_changing);
-               unmap_ctx->ctx = ctx;
-               unmap_ctx->start = start;
-               unmap_ctx->end = end;
-               list_add_tail(&unmap_ctx->list, unmaps);
-       }
+       userfaultfd_ctx_get(ctx);
+       atomic_inc(&ctx->mmap_changing);
+       unmap_ctx->ctx = ctx;
+       unmap_ctx->start = start;
+       unmap_ctx->end = end;
+       list_add_tail(&unmap_ctx->list, unmaps);
 
        return 0;
 }