Revert "mm: larger stack guard gap, between vmas" 17/155317/1 accepted/tizen/4.0/unified/20171013.101207 accepted/tizen/unified/20171013.193156 submit/tizen/20171013.022046 submit/tizen_4.0/20171013.022043 tizen_4.0.IoT.p1_release
authorSeung-Woo Kim <sw0312.kim@samsung.com>
Fri, 13 Oct 2017 02:13:45 +0000 (11:13 +0900)
committerSeung-Woo Kim <sw0312.kim@samsung.com>
Fri, 13 Oct 2017 02:15:37 +0000 (11:15 +0900)
This reverts commit f0e0e3b1f94f68b1efc9bc94c3d9eb2d7bca4f1f.

With the CVE patch, stack_guard_page_start() and stack_guard_end()
are removed but they are currently used by swap-modules kernel
modules, so swap-modules build is failed. So temporary revert
this CVE patch until swap-modules modification.

Change-Id: Ib0d196146822ccea0a562bb5bc04f325e11bf046
Signed-off-by: Seung-Woo Kim <sw0312.kim@samsung.com>
18 files changed:
Documentation/kernel-parameters.txt
arch/arc/mm/mmap.c
arch/arm/mm/mmap.c
arch/frv/mm/elf-fdpic.c
arch/mips/mm/mmap.c
arch/powerpc/mm/slice.c
arch/sh/mm/mmap.c
arch/sparc/kernel/sys_sparc_64.c
arch/sparc/mm/hugetlbpage.c
arch/tile/mm/hugetlbpage.c
arch/x86/kernel/sys_x86_64.c
arch/x86/mm/hugetlbpage.c
arch/xtensa/kernel/syscall.c
fs/hugetlbfs/inode.c
fs/proc/task_mmu.c
include/linux/mm.h
mm/memory.c
mm/mmap.c

index 45329e9..15b24a2 100644 (file)
@@ -2893,13 +2893,6 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
        spia_pedr=
        spia_peddr=
 
-       stack_guard_gap=        [MM]
-                       override the default stack gap protection. The value
-                       is in page units and it defines how many pages prior
-                       to (for stacks growing down) resp. after (for stacks
-                       growing up) the main stack are reserved for no other
-                       mapping. Default value is 256 pages.
-
        stacktrace      [FTRACE]
                        Enabled the stack tracer on boot up.
 
index cf4ae69..2e06d56 100644 (file)
@@ -64,7 +64,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
 
                vma = find_vma(mm, addr);
                if (TASK_SIZE - len >= addr &&
-                   (!vma || addr + len <= vm_start_gap(vma)))
+                   (!vma || addr + len <= vma->vm_start))
                        return addr;
        }
 
index 984509e..5ef506c 100644 (file)
@@ -89,7 +89,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
 
                vma = find_vma(mm, addr);
                if (TASK_SIZE - len >= addr &&
-                   (!vma || addr + len <= vm_start_gap(vma)))
+                   (!vma || addr + len <= vma->vm_start))
                        return addr;
        }
 
@@ -140,7 +140,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
                        addr = PAGE_ALIGN(addr);
                vma = find_vma(mm, addr);
                if (TASK_SIZE - len >= addr &&
-                               (!vma || addr + len <= vm_start_gap(vma)))
+                               (!vma || addr + len <= vma->vm_start))
                        return addr;
        }
 
index efa59f1..836f147 100644 (file)
@@ -74,7 +74,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
                addr = PAGE_ALIGN(addr);
                vma = find_vma(current->mm, addr);
                if (TASK_SIZE - len >= addr &&
-                   (!vma || addr + len <= vm_start_gap(vma)))
+                   (!vma || addr + len <= vma->vm_start))
                        goto success;
        }
 
index 0bb4295..7e5fe27 100644 (file)
@@ -92,7 +92,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
 
                vma = find_vma(mm, addr);
                if (TASK_SIZE - len >= addr &&
-                   (!vma || addr + len <= vm_start_gap(vma)))
+                   (!vma || addr + len <= vma->vm_start))
                        return addr;
        }
 
index 887365a..7ce9cf3 100644 (file)
@@ -103,7 +103,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
        if ((mm->task_size - len) < addr)
                return 0;
        vma = find_vma(mm, addr);
-       return (!vma || (addr + len) <= vm_start_gap(vma));
+       return (!vma || (addr + len) <= vma->vm_start);
 }
 
 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
index 7df7d59..6777177 100644 (file)
@@ -63,7 +63,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
 
                vma = find_vma(mm, addr);
                if (TASK_SIZE - len >= addr &&
-                   (!vma || addr + len <= vm_start_gap(vma)))
+                   (!vma || addr + len <= vma->vm_start))
                        return addr;
        }
 
@@ -113,7 +113,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
 
                vma = find_vma(mm, addr);
                if (TASK_SIZE - len >= addr &&
-                   (!vma || addr + len <= vm_start_gap(vma)))
+                   (!vma || addr + len <= vma->vm_start))
                        return addr;
        }
 
index 79b981e..2daaaa6 100644 (file)
@@ -119,7 +119,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
 
                vma = find_vma(mm, addr);
                if (task_size - len >= addr &&
-                   (!vma || addr + len <= vm_start_gap(vma)))
+                   (!vma || addr + len <= vma->vm_start))
                        return addr;
        }
 
@@ -182,7 +182,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
 
                vma = find_vma(mm, addr);
                if (task_size - len >= addr &&
-                   (!vma || addr + len <= vm_start_gap(vma)))
+                   (!vma || addr + len <= vma->vm_start))
                        return addr;
        }
 
index ce49370..d2b5944 100644 (file)
@@ -118,7 +118,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
                addr = ALIGN(addr, HPAGE_SIZE);
                vma = find_vma(mm, addr);
                if (task_size - len >= addr &&
-                   (!vma || addr + len <= vm_start_gap(vma)))
+                   (!vma || addr + len <= vma->vm_start))
                        return addr;
        }
        if (mm->get_unmapped_area == arch_get_unmapped_area)
index c75eac7..650ccff 100644 (file)
@@ -297,7 +297,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
                addr = ALIGN(addr, huge_page_size(h));
                vma = find_vma(mm, addr);
                if (TASK_SIZE - len >= addr &&
-                   (!vma || addr + len <= vm_start_gap(vma)))
+                   (!vma || addr + len <= vma->vm_start))
                        return addr;
        }
        if (current->mm->get_unmapped_area == arch_get_unmapped_area)
index d050393..30277e2 100644 (file)
@@ -127,7 +127,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
                addr = PAGE_ALIGN(addr);
                vma = find_vma(mm, addr);
                if (end - len >= addr &&
-                   (!vma || addr + len <= vm_start_gap(vma)))
+                   (!vma || addr + len <= vma->vm_start))
                        return addr;
        }
 
@@ -166,7 +166,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
                addr = PAGE_ALIGN(addr);
                vma = find_vma(mm, addr);
                if (TASK_SIZE - len >= addr &&
-                               (!vma || addr + len <= vm_start_gap(vma)))
+                               (!vma || addr + len <= vma->vm_start))
                        return addr;
        }
 
index 13849b1..7e73e8c 100644 (file)
@@ -154,7 +154,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
                addr = ALIGN(addr, huge_page_size(h));
                vma = find_vma(mm, addr);
                if (TASK_SIZE - len >= addr &&
-                   (!vma || addr + len <= vm_start_gap(vma)))
+                   (!vma || addr + len <= vma->vm_start))
                        return addr;
        }
        if (mm->get_unmapped_area == arch_get_unmapped_area)
index 1ff0b92..5d3f7a1 100644 (file)
@@ -86,7 +86,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
                /* At this point:  (!vmm || addr < vmm->vm_end). */
                if (TASK_SIZE - len < addr)
                        return -ENOMEM;
-               if (!vmm || addr + len <= vm_start_gap(vmm))
+               if (!vmm || addr + len <= vmm->vm_start)
                        return addr;
                addr = vmm->vm_end;
                if (flags & MAP_SHARED)
index db7d89c..4e5f332 100644 (file)
@@ -169,7 +169,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
                addr = ALIGN(addr, huge_page_size(h));
                vma = find_vma(mm, addr);
                if (TASK_SIZE - len >= addr &&
-                   (!vma || addr + len <= vm_start_gap(vma)))
+                   (!vma || addr + len <= vma->vm_start))
                        return addr;
        }
 
index 6d28391..0deb147 100644 (file)
@@ -346,7 +346,11 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
 
        /* We don't show the stack guard page in /proc/maps */
        start = vma->vm_start;
+       if (stack_guard_page_start(vma, start))
+               start += PAGE_SIZE;
        end = vma->vm_end;
+       if (stack_guard_page_end(vma, end))
+               end -= PAGE_SIZE;
 
        seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
                        start,
index 72f83c6..2d03dca 100644 (file)
@@ -1074,6 +1074,34 @@ int set_page_dirty(struct page *page);
 int set_page_dirty_lock(struct page *page);
 int clear_page_dirty_for_io(struct page *page);
 
+/* Is the vma a continuation of the stack vma above it? */
+static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
+{
+       return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
+}
+
+static inline int stack_guard_page_start(struct vm_area_struct *vma,
+                                            unsigned long addr)
+{
+       return (vma->vm_flags & VM_GROWSDOWN) &&
+               (vma->vm_start == addr) &&
+               !vma_growsdown(vma->vm_prev, addr);
+}
+
+/* Is the vma a continuation of the stack vma below it? */
+static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
+{
+       return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
+}
+
+static inline int stack_guard_page_end(struct vm_area_struct *vma,
+                                          unsigned long addr)
+{
+       return (vma->vm_flags & VM_GROWSUP) &&
+               (vma->vm_end == addr) &&
+               !vma_growsup(vma->vm_next, addr);
+}
+
 extern pid_t
 vm_is_stack(struct task_struct *task, struct vm_area_struct *vma, int in_group);
 
@@ -1603,7 +1631,6 @@ unsigned long ra_submit(struct file_ra_state *ra,
                        struct address_space *mapping,
                        struct file *filp);
 
-extern unsigned long stack_guard_gap;
 /* Generic expand stack which grows the stack according to GROWS{UP,DOWN} */
 extern int expand_stack(struct vm_area_struct *vma, unsigned long address);
 
@@ -1632,30 +1659,6 @@ static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * m
        return vma;
 }
 
-static inline unsigned long vm_start_gap(struct vm_area_struct *vma)
-{
-       unsigned long vm_start = vma->vm_start;
-
-       if (vma->vm_flags & VM_GROWSDOWN) {
-               vm_start -= stack_guard_gap;
-               if (vm_start > vma->vm_start)
-                       vm_start = 0;
-       }
-       return vm_start;
-}
-
-static inline unsigned long vm_end_gap(struct vm_area_struct *vma)
-{
-       unsigned long vm_end = vma->vm_end;
-
-       if (vma->vm_flags & VM_GROWSUP) {
-               vm_end += stack_guard_gap;
-               if (vm_end < vma->vm_end)
-                       vm_end = -PAGE_SIZE;
-       }
-       return vm_end;
-}
-
 static inline unsigned long vma_pages(struct vm_area_struct *vma)
 {
        return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
index e2e4822..46f8407 100644 (file)
@@ -1661,6 +1661,12 @@ no_page_table:
        return page;
 }
 
+static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
+{
+       return stack_guard_page_start(vma, addr) ||
+              stack_guard_page_end(vma, addr+PAGE_SIZE);
+}
+
 /**
  * __get_user_pages() - pin user pages in memory
  * @tsk:       task_struct of target task
@@ -1828,6 +1834,11 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
                                int ret;
                                unsigned int fault_flags = 0;
 
+                               /* For mlock, just skip the stack guard page. */
+                               if (foll_flags & FOLL_MLOCK) {
+                                       if (stack_guard_page(vma, start))
+                                               goto next_page;
+                               }
                                if (foll_flags & FOLL_WRITE)
                                        fault_flags |= FAULT_FLAG_WRITE;
                                if (nonblocking)
@@ -3211,6 +3222,40 @@ out_release:
 }
 
 /*
+ * This is like a special single-page "expand_{down|up}wards()",
+ * except we must first make sure that 'address{-|+}PAGE_SIZE'
+ * doesn't hit another vma.
+ */
+static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
+{
+       address &= PAGE_MASK;
+       if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
+               struct vm_area_struct *prev = vma->vm_prev;
+
+               /*
+                * Is there a mapping abutting this one below?
+                *
+                * That's only ok if it's the same stack mapping
+                * that has gotten split..
+                */
+               if (prev && prev->vm_end == address)
+                       return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
+
+               return expand_downwards(vma, address - PAGE_SIZE);
+       }
+       if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
+               struct vm_area_struct *next = vma->vm_next;
+
+               /* As VM_GROWSDOWN but s/below/above/ */
+               if (next && next->vm_start == address + PAGE_SIZE)
+                       return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
+
+               return expand_upwards(vma, address + PAGE_SIZE);
+       }
+       return 0;
+}
+
+/*
  * We enter with non-exclusive mmap_sem (to exclude vma changes,
  * but allow concurrent faults), and pte mapped but not yet locked.
  * We return with mmap_sem still held, but pte unmapped and unlocked.
@@ -3229,6 +3274,10 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
        if (vma->vm_flags & VM_SHARED)
                return VM_FAULT_SIGBUS;
 
+       /* Check if we need to add a guard page to the stack */
+       if (check_stack_guard_page(vma, address) < 0)
+               return VM_FAULT_SIGSEGV;
+
        /* Use the zero-page for reads */
        if (!(flags & FAULT_FLAG_WRITE)) {
                entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
index a0da640..725e59f 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -266,7 +266,6 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
        unsigned long rlim, retval;
        unsigned long newbrk, oldbrk;
        struct mm_struct *mm = current->mm;
-       struct vm_area_struct *next;
        unsigned long min_brk;
        bool populate;
 
@@ -312,8 +311,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
        }
 
        /* Check against existing mmap mappings. */
-       next = find_vma(mm, oldbrk);
-       if (next && newbrk + PAGE_SIZE > vm_start_gap(next))
+       if (find_vma_intersection(mm, oldbrk, newbrk+PAGE_SIZE))
                goto out;
 
        /* Ok, looks good - let it rip. */
@@ -336,22 +334,10 @@ out:
 
 static long vma_compute_subtree_gap(struct vm_area_struct *vma)
 {
-       unsigned long max, prev_end, subtree_gap;
-
-       /*
-        * Note: in the rare case of a VM_GROWSDOWN above a VM_GROWSUP, we
-        * allow two stack_guard_gaps between them here, and when choosing
-        * an unmapped area; whereas when expanding we only require one.
-        * That's a little inconsistent, but keeps the code here simpler.
-        */
-       max = vm_start_gap(vma);
-       if (vma->vm_prev) {
-               prev_end = vm_end_gap(vma->vm_prev);
-               if (max > prev_end)
-                       max -= prev_end;
-               else
-                       max = 0;
-       }
+       unsigned long max, subtree_gap;
+       max = vma->vm_start;
+       if (vma->vm_prev)
+               max -= vma->vm_prev->vm_end;
        if (vma->vm_rb.rb_left) {
                subtree_gap = rb_entry(vma->vm_rb.rb_left,
                                struct vm_area_struct, vm_rb)->rb_subtree_gap;
@@ -435,7 +421,7 @@ void validate_mm(struct mm_struct *mm)
                list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
                        anon_vma_interval_tree_verify(avc);
                vma_unlock_anon_vma(vma);
-               highest_address = vm_end_gap(vma);
+               highest_address = vma->vm_end;
                vma = vma->vm_next;
                i++;
        }
@@ -603,7 +589,7 @@ void __vma_link_rb(struct mm_struct *mm, struct vm_area_struct *vma,
        if (vma->vm_next)
                vma_gap_update(vma->vm_next);
        else
-               mm->highest_vm_end = vm_end_gap(vma);
+               mm->highest_vm_end = vma->vm_end;
 
        /*
         * vma->vm_prev wasn't known when we followed the rbtree to find the
@@ -852,7 +838,7 @@ again:                      remove_next = 1 + (end > next->vm_end);
                        vma_gap_update(vma);
                if (end_changed) {
                        if (!next)
-                               mm->highest_vm_end = vm_end_gap(vma);
+                               mm->highest_vm_end = end;
                        else if (!adjust_next)
                                vma_gap_update(next);
                }
@@ -895,7 +881,7 @@ again:                      remove_next = 1 + (end > next->vm_end);
                else if (next)
                        vma_gap_update(next);
                else
-                       WARN_ON(mm->highest_vm_end != vm_end_gap(vma));
+                       mm->highest_vm_end = end;
        }
        if (insert && file)
                uprobe_mmap(insert);
@@ -1698,7 +1684,7 @@ unsigned long unmapped_area(struct vm_unmapped_area_info *info)
 
        while (true) {
                /* Visit left subtree if it looks promising */
-               gap_end = vm_start_gap(vma);
+               gap_end = vma->vm_start;
                if (gap_end >= low_limit && vma->vm_rb.rb_left) {
                        struct vm_area_struct *left =
                                rb_entry(vma->vm_rb.rb_left,
@@ -1709,7 +1695,7 @@ unsigned long unmapped_area(struct vm_unmapped_area_info *info)
                        }
                }
 
-               gap_start = vma->vm_prev ? vm_end_gap(vma->vm_prev) : 0;
+               gap_start = vma->vm_prev ? vma->vm_prev->vm_end : 0;
 check_current:
                /* Check if current node has a suitable gap */
                if (gap_start > high_limit)
@@ -1736,8 +1722,8 @@ check_current:
                        vma = rb_entry(rb_parent(prev),
                                       struct vm_area_struct, vm_rb);
                        if (prev == vma->vm_rb.rb_left) {
-                               gap_start = vm_end_gap(vma->vm_prev);
-                               gap_end = vm_start_gap(vma);
+                               gap_start = vma->vm_prev->vm_end;
+                               gap_end = vma->vm_start;
                                goto check_current;
                        }
                }
@@ -1801,7 +1787,7 @@ unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info)
 
        while (true) {
                /* Visit right subtree if it looks promising */
-               gap_start = vma->vm_prev ? vm_end_gap(vma->vm_prev) : 0;
+               gap_start = vma->vm_prev ? vma->vm_prev->vm_end : 0;
                if (gap_start <= high_limit && vma->vm_rb.rb_right) {
                        struct vm_area_struct *right =
                                rb_entry(vma->vm_rb.rb_right,
@@ -1814,7 +1800,7 @@ unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info)
 
 check_current:
                /* Check if current node has a suitable gap */
-               gap_end = vm_start_gap(vma);
+               gap_end = vma->vm_start;
                if (gap_end < low_limit)
                        return -ENOMEM;
                if (gap_start <= high_limit && gap_end - gap_start >= length)
@@ -1840,7 +1826,7 @@ check_current:
                                       struct vm_area_struct, vm_rb);
                        if (prev == vma->vm_rb.rb_right) {
                                gap_start = vma->vm_prev ?
-                                       vm_end_gap(vma->vm_prev) : 0;
+                                       vma->vm_prev->vm_end : 0;
                                goto check_current;
                        }
                }
@@ -1878,7 +1864,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
                unsigned long len, unsigned long pgoff, unsigned long flags)
 {
        struct mm_struct *mm = current->mm;
-       struct vm_area_struct *vma, *prev;
+       struct vm_area_struct *vma;
        struct vm_unmapped_area_info info;
 
        if (len > TASK_SIZE - mmap_min_addr)
@@ -1889,10 +1875,9 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
 
        if (addr) {
                addr = PAGE_ALIGN(addr);
-               vma = find_vma_prev(mm, addr, &prev);
+               vma = find_vma(mm, addr);
                if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
-                   (!vma || addr + len <= vm_start_gap(vma)) &&
-                   (!prev || addr >= vm_end_gap(prev)))
+                   (!vma || addr + len <= vma->vm_start))
                        return addr;
        }
 
@@ -1924,7 +1909,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
                          const unsigned long len, const unsigned long pgoff,
                          const unsigned long flags)
 {
-       struct vm_area_struct *vma, *prev;
+       struct vm_area_struct *vma;
        struct mm_struct *mm = current->mm;
        unsigned long addr = addr0;
        struct vm_unmapped_area_info info;
@@ -1939,10 +1924,9 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
        /* requesting a specific address */
        if (addr) {
                addr = PAGE_ALIGN(addr);
-               vma = find_vma_prev(mm, addr, &prev);
+               vma = find_vma(mm, addr);
                if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
-                               (!vma || addr + len <= vm_start_gap(vma)) &&
-                               (!prev || addr >= vm_end_gap(prev)))
+                               (!vma || addr + len <= vma->vm_start))
                        return addr;
        }
 
@@ -2082,19 +2066,21 @@ find_vma_prev(struct mm_struct *mm, unsigned long addr,
  * update accounting. This is shared with both the
  * grow-up and grow-down cases.
  */
-static int acct_stack_growth(struct vm_area_struct *vma,
-                            unsigned long size, unsigned long grow)
+static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, unsigned long grow)
 {
        struct mm_struct *mm = vma->vm_mm;
        struct rlimit *rlim = current->signal->rlim;
-       unsigned long new_start;
+       unsigned long new_start, actual_size;
 
        /* address space limit tests */
        if (!may_expand_vm(mm, grow))
                return -ENOMEM;
 
        /* Stack limit test */
-       if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
+       actual_size = size;
+       if (size && (vma->vm_flags & (VM_GROWSUP | VM_GROWSDOWN)))
+               actual_size -= PAGE_SIZE;
+       if (actual_size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
                return -ENOMEM;
 
        /* mlock limit tests */
@@ -2135,40 +2121,32 @@ static int acct_stack_growth(struct vm_area_struct *vma,
  */
 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
 {
-       struct vm_area_struct *next;
-       unsigned long gap_addr;
-       int error = 0;
+       int error;
 
        if (!(vma->vm_flags & VM_GROWSUP))
                return -EFAULT;
 
-       /* Guard against wrapping around to address 0. */
-       address &= PAGE_MASK;
-       address += PAGE_SIZE;
-       if (!address)
-               return -ENOMEM;
-
-       /* Enforce stack_guard_gap */
-       gap_addr = address + stack_guard_gap;
-       if (gap_addr < address)
-               return -ENOMEM;
-       next = vma->vm_next;
-       if (next && next->vm_start < gap_addr) {
-               if (!(next->vm_flags & VM_GROWSUP))
-                       return -ENOMEM;
-               /* Check that both stack segments have the same anon_vma? */
-       }
-
-       /* We must make sure the anon_vma is allocated. */
+       /*
+        * We must make sure the anon_vma is allocated
+        * so that the anon_vma locking is not a noop.
+        */
        if (unlikely(anon_vma_prepare(vma)))
                return -ENOMEM;
+       vma_lock_anon_vma(vma);
 
        /*
         * vma->vm_start/vm_end cannot change under us because the caller
         * is required to hold the mmap_sem in read mode.  We need the
         * anon_vma lock to serialize against concurrent expand_stacks.
+        * Also guard against wrapping around to address 0.
         */
-       vma_lock_anon_vma(vma);
+       if (address < PAGE_ALIGN(address+4))
+               address = PAGE_ALIGN(address+4);
+       else {
+               vma_unlock_anon_vma(vma);
+               return -ENOMEM;
+       }
+       error = 0;
 
        /* Somebody else might have raced and expanded it already */
        if (address > vma->vm_end) {
@@ -2199,7 +2177,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
                                if (vma->vm_next)
                                        vma_gap_update(vma->vm_next);
                                else
-                                       vma->vm_mm->highest_vm_end = vm_end_gap(vma);
+                                       vma->vm_mm->highest_vm_end = address;
                                spin_unlock(&vma->vm_mm->page_table_lock);
 
                                perf_event_mmap(vma);
@@ -2219,36 +2197,27 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
 int expand_downwards(struct vm_area_struct *vma,
                                   unsigned long address)
 {
-       struct vm_area_struct *prev;
-       unsigned long gap_addr;
        int error;
 
+       /*
+        * We must make sure the anon_vma is allocated
+        * so that the anon_vma locking is not a noop.
+        */
+       if (unlikely(anon_vma_prepare(vma)))
+               return -ENOMEM;
+
        address &= PAGE_MASK;
        error = security_mmap_addr(address);
        if (error)
                return error;
 
-       /* Enforce stack_guard_gap */
-       gap_addr = address - stack_guard_gap;
-       if (gap_addr > address)
-               return -ENOMEM;
-       prev = vma->vm_prev;
-       if (prev && prev->vm_end > gap_addr) {
-               if (!(prev->vm_flags & VM_GROWSDOWN))
-                       return -ENOMEM;
-               /* Check that both stack segments have the same anon_vma? */
-       }
-
-       /* We must make sure the anon_vma is allocated. */
-       if (unlikely(anon_vma_prepare(vma)))
-               return -ENOMEM;
+       vma_lock_anon_vma(vma);
 
        /*
         * vma->vm_start/vm_end cannot change under us because the caller
         * is required to hold the mmap_sem in read mode.  We need the
         * anon_vma lock to serialize against concurrent expand_stacks.
         */
-       vma_lock_anon_vma(vma);
 
        /* Somebody else might have raced and expanded it already */
        if (address < vma->vm_start) {
@@ -2290,25 +2259,28 @@ int expand_downwards(struct vm_area_struct *vma,
        return error;
 }
 
-/* enforced gap between the expanding stack and other mappings. */
-unsigned long stack_guard_gap = 256UL<<PAGE_SHIFT;
-
-static int __init cmdline_parse_stack_guard_gap(char *p)
-{
-       unsigned long val;
-       char *endptr;
-
-       val = simple_strtoul(p, &endptr, 10);
-       if (!*endptr)
-               stack_guard_gap = val << PAGE_SHIFT;
-
-       return 0;
-}
-__setup("stack_guard_gap=", cmdline_parse_stack_guard_gap);
-
+/*
+ * Note how expand_stack() refuses to expand the stack all the way to
+ * abut the next virtual mapping, *unless* that mapping itself is also
+ * a stack mapping. We want to leave room for a guard page, after all
+ * (the guard page itself is not added here, that is done by the
+ * actual page faulting logic)
+ *
+ * This matches the behavior of the guard page logic (see mm/memory.c:
+ * check_stack_guard_page()), which only allows the guard page to be
+ * removed under these circumstances.
+ */
 #ifdef CONFIG_STACK_GROWSUP
 int expand_stack(struct vm_area_struct *vma, unsigned long address)
 {
+       struct vm_area_struct *next;
+
+       address &= PAGE_MASK;
+       next = vma->vm_next;
+       if (next && next->vm_start == address + PAGE_SIZE) {
+               if (!(next->vm_flags & VM_GROWSUP))
+                       return -ENOMEM;
+       }
        return expand_upwards(vma, address);
 }
 
@@ -2330,6 +2302,14 @@ find_extend_vma(struct mm_struct *mm, unsigned long addr)
 #else
 int expand_stack(struct vm_area_struct *vma, unsigned long address)
 {
+       struct vm_area_struct *prev;
+
+       address &= PAGE_MASK;
+       prev = vma->vm_prev;
+       if (prev && prev->vm_end == address) {
+               if (!(prev->vm_flags & VM_GROWSDOWN))
+                       return -ENOMEM;
+       }
        return expand_downwards(vma, address);
 }
 
@@ -2426,7 +2406,7 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
                vma->vm_prev = prev;
                vma_gap_update(vma);
        } else
-               mm->highest_vm_end = prev ? vm_end_gap(prev) : 0;
+               mm->highest_vm_end = prev ? prev->vm_end : 0;
        tail_vma->vm_next = NULL;
        if (mm->unmap_area == arch_unmap_area)
                addr = prev ? prev->vm_end : mm->mmap_base;