s390/mm: try VMA lock-based page fault handling first
authorHeiko Carstens <hca@linux.ibm.com>
Tue, 14 Mar 2023 13:28:08 +0000 (14:28 +0100)
committerAndrew Morton <akpm@linux-foundation.org>
Thu, 6 Apr 2023 03:03:02 +0000 (20:03 -0700)
Attempt VMA lock-based page fault handling first, and fall back to the
existing mmap_lock-based handling if that fails.

This is the s390 variant of "x86/mm: try VMA lock-based page fault handling
first".

Link: https://lkml.kernel.org/r/20230314132808.1266335-1-hca@linux.ibm.com
Signed-off-by: Heiko Carstens <hca@linux.ibm.com>
Cc: Alexander Gordeev <agordeev@linux.ibm.com>
Cc: Christian Borntraeger <borntraeger@linux.ibm.com>
Cc: Gerald Schaefer <gerald.schaefer@linux.ibm.com>
Cc: Suren Baghdasaryan <surenb@google.com>
Cc: Sven Schnelle <svens@linux.ibm.com>
Cc: Vasily Gorbik <gor@linux.ibm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
arch/s390/Kconfig
arch/s390/mm/fault.c

index 9809c74..548b5b5 100644 (file)
@@ -120,6 +120,7 @@ config S390
        select ARCH_SUPPORTS_DEBUG_PAGEALLOC
        select ARCH_SUPPORTS_HUGETLBFS
        select ARCH_SUPPORTS_NUMA_BALANCING
+       select ARCH_SUPPORTS_PER_VMA_LOCK
        select ARCH_USE_BUILTIN_BSWAP
        select ARCH_USE_CMPXCHG_LOCKREF
        select ARCH_WANTS_DYNAMIC_TASK_STRUCT
index a2632fd..b65144c 100644 (file)
@@ -407,6 +407,30 @@ static inline vm_fault_t do_exception(struct pt_regs *regs, int access)
                access = VM_WRITE;
        if (access == VM_WRITE)
                flags |= FAULT_FLAG_WRITE;
+#ifdef CONFIG_PER_VMA_LOCK
+       if (!(flags & FAULT_FLAG_USER))
+               goto lock_mmap;
+       vma = lock_vma_under_rcu(mm, address);
+       if (!vma)
+               goto lock_mmap;
+       if (!(vma->vm_flags & access)) {
+               vma_end_read(vma);
+               goto lock_mmap;
+       }
+       fault = handle_mm_fault(vma, address, flags | FAULT_FLAG_VMA_LOCK, regs);
+       vma_end_read(vma);
+       if (!(fault & VM_FAULT_RETRY)) {
+               count_vm_vma_lock_event(VMA_LOCK_SUCCESS);
+               goto out;
+       }
+       count_vm_vma_lock_event(VMA_LOCK_RETRY);
+       /* Quick path to respond to signals */
+       if (fault_signal_pending(fault, regs)) {
+               fault = VM_FAULT_SIGNAL;
+               goto out;
+       }
+lock_mmap:
+#endif /* CONFIG_PER_VMA_LOCK */
        mmap_read_lock(mm);
 
        gmap = NULL;