mm/vma: make vma_is_accessible() available for general use
authorAnshuman Khandual <anshuman.khandual@arm.com>
Tue, 7 Apr 2020 03:03:47 +0000 (20:03 -0700)
committerMarek Szyprowski <m.szyprowski@samsung.com>
Wed, 17 Jan 2024 17:15:53 +0000 (18:15 +0100)
Lets move vma_is_accessible() helper to include/linux/mm.h which makes it
available for general use.  While here, this replaces all remaining open
encodings for VMA access check with vma_is_accessible().

Change-Id: I475ac8df7e31c848d3c84688f7fb9e3f27c832f5
Signed-off-by: Anshuman Khandual <anshuman.khandual@arm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Acked-by: Geert Uytterhoeven <geert@linux-m68k.org>
Acked-by: Guo Ren <guoren@kernel.org>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
Cc: Guo Ren <guoren@kernel.org>
Cc: Geert Uytterhoeven <geert@linux-m68k.org>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: Paul Burton <paulburton@kernel.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Yoshinori Sato <ysato@users.sourceforge.jp>
Cc: Rich Felker <dalias@libc.org>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Alexander Viro <viro@zeniv.linux.org.uk>
Cc: "Aneesh Kumar K.V" <aneesh.kumar@linux.ibm.com>
Cc: Arnaldo Carvalho de Melo <acme@kernel.org>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Nick Piggin <npiggin@gmail.com>
Cc: Paul Mackerras <paulus@ozlabs.org>
Cc: Will Deacon <will@kernel.org>
Link: http://lkml.kernel.org/r/1582520593-30704-3-git-send-email-anshuman.khandual@arm.com
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
[backport of the commit 3122e80efc0faf4a2accba7a46c7ed795edbfded from mainline]
Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
12 files changed:
arch/csky/mm/fault.c
arch/m68k/mm/fault.c
arch/mips/mm/fault.c
arch/powerpc/mm/fault.c
arch/sh/mm/fault.c
arch/x86/mm/fault.c
include/linux/mm.h
kernel/sched/fair.c
mm/gup.c
mm/memory.c
mm/mempolicy.c
mm/mmap.c

index 562c7f7..890b193 100644 (file)
@@ -137,7 +137,7 @@ good_area:
                if (!(vma->vm_flags & VM_WRITE))
                        goto bad_area;
        } else {
-               if (!(vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)))
+               if (!vma_is_accessible(vma))
                        goto bad_area;
        }
 
index e9b1d75..d5131ec 100644 (file)
@@ -125,7 +125,7 @@ good_area:
                case 1:         /* read, present */
                        goto acc_err;
                case 0:         /* read, not present */
-                       if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
+                       if (!vma_is_accessible(vma))
                                goto acc_err;
        }
 
index f589aa8..ce5d403 100644 (file)
@@ -142,7 +142,7 @@ good_area:
                                goto bad_area;
                        }
                } else {
-                       if (!(vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)))
+                       if (!vma_is_accessible(vma))
                                goto bad_area;
                }
        }
index 881a026..fe19731 100644 (file)
@@ -318,7 +318,7 @@ static bool access_error(bool is_write, bool is_exec,
                return false;
        }
 
-       if (unlikely(!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))))
+       if (unlikely(!vma_is_accessible(vma)))
                return true;
        /*
         * We should ideally do the vma pkey access check here. But in the
index 5f51456..a8c4253 100644 (file)
@@ -355,7 +355,7 @@ static inline int access_error(int error_code, struct vm_area_struct *vma)
                return 1;
 
        /* read, not present: */
-       if (unlikely(!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))))
+       if (unlikely(!vma_is_accessible(vma)))
                return 1;
 
        return 0;
index c494c8c..13b5417 100644 (file)
@@ -1221,7 +1221,7 @@ access_error(unsigned long error_code, struct vm_area_struct *vma)
                return 1;
 
        /* read, not present: */
-       if (unlikely(!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))))
+       if (unlikely(!vma_is_accessible(vma)))
                return 1;
 
        return 0;
index 3285dae..c3d032a 100644 (file)
@@ -546,6 +546,12 @@ static inline bool vma_is_anonymous(struct vm_area_struct *vma)
        return !vma->vm_ops;
 }
 
+
+static inline bool vma_is_accessible(struct vm_area_struct *vma)
+{
+       return vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC);
+}
+
 #ifdef CONFIG_SHMEM
 /*
  * The vma_is_shmem is not inline because it is used only by slow
index 2f81e4a..b86203e 100644 (file)
@@ -2569,7 +2569,7 @@ static void task_numa_work(struct callback_head *work)
                 * Skip inaccessible VMAs to avoid any confusion between
                 * PROT_NONE and NUMA hinting ptes
                 */
-               if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
+               if (!vma_is_accessible(vma))
                        continue;
 
                do {
index 4a8e969..ba2c6c0 100644 (file)
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -1228,7 +1228,7 @@ long populate_vma_page_range(struct vm_area_struct *vma,
         * We want mlock to succeed for regions that have any permissions
         * other than PROT_NONE.
         */
-       if (vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC))
+       if (vma_is_accessible(vma))
                gup_flags |= FOLL_FORCE;
 
        /*
index 6b71b84..b4e9146 100644 (file)
@@ -3774,11 +3774,6 @@ static inline vm_fault_t wp_huge_pmd(struct vm_fault *vmf, pmd_t orig_pmd)
        return VM_FAULT_FALLBACK;
 }
 
-static inline bool vma_is_accessible(struct vm_area_struct *vma)
-{
-       return vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE);
-}
-
 static vm_fault_t create_huge_pud(struct vm_fault *vmf)
 {
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
index 787c5fc..d16f1c8 100644 (file)
@@ -642,8 +642,7 @@ static int queue_pages_test_walk(unsigned long start, unsigned long end,
 
        if (flags & MPOL_MF_LAZY) {
                /* Similar to task_numa_work, skip inaccessible VMAs */
-               if (!is_vm_hugetlb_page(vma) &&
-                       (vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)) &&
+               if (!is_vm_hugetlb_page(vma) && vma_is_accessible(vma) &&
                        !(vma->vm_flags & VM_MIXEDMAP))
                        change_prot_numa(vma, start, endvma);
                return 1;
index 514cc19..b6cda33 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -2360,8 +2360,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
                gap_addr = TASK_SIZE;
 
        next = vma->vm_next;
-       if (next && next->vm_start < gap_addr &&
-                       (next->vm_flags & (VM_WRITE|VM_READ|VM_EXEC))) {
+       if (next && next->vm_start < gap_addr && vma_is_accessible(next)) {
                if (!(next->vm_flags & VM_GROWSUP))
                        return -ENOMEM;
                /* Check that both stack segments have the same anon_vma? */
@@ -2442,7 +2441,7 @@ int expand_downwards(struct vm_area_struct *vma,
        prev = vma->vm_prev;
        /* Check that both stack segments have the same anon_vma? */
        if (prev && !(prev->vm_flags & VM_GROWSDOWN) &&
-                       (prev->vm_flags & (VM_WRITE|VM_READ|VM_EXEC))) {
+                       vma_is_accessible(prev)) {
                if (address - prev->vm_end < stack_guard_gap)
                        return -ENOMEM;
        }