s390/mm: cleanup arch_get_unmapped_area() and friends
authorAlexander Gordeev <agordeev@linux.ibm.com>
Mon, 23 Mar 2020 08:38:37 +0000 (09:38 +0100)
committerVasily Gorbik <gor@linux.ibm.com>
Fri, 27 Mar 2020 09:22:46 +0000 (10:22 +0100)
Factor out check_asce_limit() function and fix few style
defects in arch_get_unmapped_area() family of functions.

Signed-off-by: Alexander Gordeev <agordeev@linux.ibm.com>
Reviewed-by: Heiko Carstens <heiko.carstens@de.ibm.com>
[heiko.carstens@de.ibm.com: small coding style changes]
Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: Vasily Gorbik <gor@linux.ibm.com>
arch/s390/include/asm/pgalloc.h
arch/s390/mm/hugetlbpage.c
arch/s390/mm/mmap.c

index 77606c4..f0d7457 100644 (file)
@@ -48,6 +48,20 @@ static inline unsigned long pgd_entry_type(struct mm_struct *mm)
 int crst_table_upgrade(struct mm_struct *mm, unsigned long limit);
 void crst_table_downgrade(struct mm_struct *);
 
+static inline unsigned long check_asce_limit(struct mm_struct *mm, unsigned long addr,
+                                            unsigned long len)
+{
+       int rc;
+
+       if (addr + len > mm->context.asce_limit &&
+           addr + len <= TASK_SIZE) {
+               rc = crst_table_upgrade(mm, addr + len);
+               if (rc)
+                       return (unsigned long) rc;
+       }
+       return addr;
+}
+
 static inline p4d_t *p4d_alloc_one(struct mm_struct *mm, unsigned long address)
 {
        unsigned long *table = crst_table_alloc(mm);
index 5674710..f01dadd 100644 (file)
@@ -326,7 +326,6 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
        struct hstate *h = hstate_file(file);
        struct mm_struct *mm = current->mm;
        struct vm_area_struct *vma;
-       int rc;
 
        if (len & ~huge_page_mask(h))
                return -EINVAL;
@@ -353,15 +352,9 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
        else
                addr = hugetlb_get_unmapped_area_topdown(file, addr, len,
                                pgoff, flags);
-       if (addr & ~PAGE_MASK)
+       if (offset_in_page(addr))
                return addr;
 
 check_asce_limit:
-       if (addr + len > current->mm->context.asce_limit &&
-           addr + len <= TASK_SIZE) {
-               rc = crst_table_upgrade(mm, addr + len);
-               if (rc)
-                       return (unsigned long) rc;
-       }
-       return addr;
+       return check_asce_limit(mm, addr, len);
 }
index cbc718b..1b78f63 100644 (file)
@@ -72,14 +72,13 @@ static inline unsigned long mmap_base(unsigned long rnd,
        return PAGE_ALIGN(STACK_TOP - gap - rnd);
 }
 
-unsigned long
-arch_get_unmapped_area(struct file *filp, unsigned long addr,
-               unsigned long len, unsigned long pgoff, unsigned long flags)
+unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
+                                    unsigned long len, unsigned long pgoff,
+                                    unsigned long flags)
 {
        struct mm_struct *mm = current->mm;
        struct vm_area_struct *vma;
        struct vm_unmapped_area_info info;
-       int rc;
 
        if (len > TASK_SIZE - mmap_min_addr)
                return -ENOMEM;
@@ -105,30 +104,20 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
                info.align_mask = 0;
        info.align_offset = pgoff << PAGE_SHIFT;
        addr = vm_unmapped_area(&info);
-       if (addr & ~PAGE_MASK)
+       if (offset_in_page(addr))
                return addr;
 
 check_asce_limit:
-       if (addr + len > current->mm->context.asce_limit &&
-           addr + len <= TASK_SIZE) {
-               rc = crst_table_upgrade(mm, addr + len);
-               if (rc)
-                       return (unsigned long) rc;
-       }
-
-       return addr;
+       return check_asce_limit(mm, addr, len);
 }
 
-unsigned long
-arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
-                         const unsigned long len, const unsigned long pgoff,
-                         const unsigned long flags)
+unsigned long arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
+                                            unsigned long len, unsigned long pgoff,
+                                            unsigned long flags)
 {
        struct vm_area_struct *vma;
        struct mm_struct *mm = current->mm;
-       unsigned long addr = addr0;
        struct vm_unmapped_area_info info;
-       int rc;
 
        /* requested length too big for entire address space */
        if (len > TASK_SIZE - mmap_min_addr)
@@ -163,25 +152,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
         * can happen with large stack limits and large mmap()
         * allocations.
         */
-       if (addr & ~PAGE_MASK) {
+       if (offset_in_page(addr)) {
                VM_BUG_ON(addr != -ENOMEM);
                info.flags = 0;
                info.low_limit = TASK_UNMAPPED_BASE;
                info.high_limit = TASK_SIZE;
                addr = vm_unmapped_area(&info);
-               if (addr & ~PAGE_MASK)
+               if (offset_in_page(addr))
                        return addr;
        }
 
 check_asce_limit:
-       if (addr + len > current->mm->context.asce_limit &&
-           addr + len <= TASK_SIZE) {
-               rc = crst_table_upgrade(mm, addr + len);
-               if (rc)
-                       return (unsigned long) rc;
-       }
-
-       return addr;
+       return check_asce_limit(mm, addr, len);
 }
 
 /*