vmalloc: fix remap_vmalloc_range() bounds checks
authorJann Horn <jannh@google.com>
Tue, 21 Apr 2020 01:14:11 +0000 (18:14 -0700)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sat, 2 May 2020 15:23:10 +0000 (17:23 +0200)
commit bdebd6a2831b6fab69eb85cee74a8ba77f1a1cc2 upstream.

remap_vmalloc_range() has had various issues with the bounds checks it
promises to perform ("This function checks that addr is a valid
vmalloc'ed area, and that it is big enough to cover the vma") over time,
e.g.:

 - not detecting pgoff<<PAGE_SHIFT overflow

 - not detecting (pgoff<<PAGE_SHIFT)+usize overflow

 - not checking whether addr and addr+(pgoff<<PAGE_SHIFT) are the same
   vmalloc allocation

 - comparing a potentially wildly out-of-bounds pointer with the end of
   the vmalloc region

In particular, since commit fc9702273e2e ("bpf: Add mmap() support for
BPF_MAP_TYPE_ARRAY"), unprivileged users can cause kernel null pointer
dereferences by calling mmap() on a BPF map with a size that is bigger
than the distance from the start of the BPF map to the end of the
address space.

This could theoretically be used as a kernel ASLR bypass, by using
whether mmap() with a given offset oopses or returns an error code to
perform a binary search over the possible address range.

To allow remap_vmalloc_range_partial() to verify that addr and
addr+(pgoff<<PAGE_SHIFT) are in the same vmalloc region, pass the offset
to remap_vmalloc_range_partial() instead of adding it to the pointer in
remap_vmalloc_range().

In remap_vmalloc_range_partial(), fix the check against
get_vm_area_size() by using size comparisons instead of pointer
comparisons, and add checks for pgoff.

Fixes: 833423143c3a ("[PATCH] mm: introduce remap_vmalloc_range()")
Signed-off-by: Jann Horn <jannh@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Cc: stable@vger.kernel.org
Cc: Alexei Starovoitov <ast@kernel.org>
Cc: Daniel Borkmann <daniel@iogearbox.net>
Cc: Martin KaFai Lau <kafai@fb.com>
Cc: Song Liu <songliubraving@fb.com>
Cc: Yonghong Song <yhs@fb.com>
Cc: Andrii Nakryiko <andriin@fb.com>
Cc: John Fastabend <john.fastabend@gmail.com>
Cc: KP Singh <kpsingh@chromium.org>
Link: http://lkml.kernel.org/r/20200415222312.236431-1-jannh@google.com
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
fs/proc/vmcore.c
include/linux/vmalloc.h
mm/vmalloc.c

index 93d13f4..8e80127 100644 (file)
@@ -459,7 +459,7 @@ static int mmap_vmcore(struct file *file, struct vm_area_struct *vma)
                tsz = min(elfcorebuf_sz + elfnotes_sz - (size_t)start, size);
                kaddr = elfnotes_buf + start - elfcorebuf_sz;
                if (remap_vmalloc_range_partial(vma, vma->vm_start + len,
-                                               kaddr, tsz))
+                                               kaddr, 0, tsz))
                        goto fail;
                size -= tsz;
                start += tsz;
index d868a73..72c9c75 100644 (file)
@@ -89,7 +89,7 @@ extern void vunmap(const void *addr);
 
 extern int remap_vmalloc_range_partial(struct vm_area_struct *vma,
                                       unsigned long uaddr, void *kaddr,
-                                      unsigned long size);
+                                      unsigned long pgoff, unsigned long size);
 
 extern int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
                                                        unsigned long pgoff);
index 153deec..c74a087 100644 (file)
@@ -31,6 +31,7 @@
 #include <linux/compiler.h>
 #include <linux/llist.h>
 #include <linux/bitops.h>
+#include <linux/overflow.h>
 
 #include <asm/uaccess.h>
 #include <asm/tlbflush.h>
@@ -2173,6 +2174,7 @@ finished:
  *     @vma:           vma to cover
  *     @uaddr:         target user address to start at
  *     @kaddr:         virtual address of vmalloc kernel memory
+ *     @pgoff:         offset from @kaddr to start at
  *     @size:          size of map area
  *
  *     Returns:        0 for success, -Exxx on failure
@@ -2185,9 +2187,15 @@ finished:
  *     Similar to remap_pfn_range() (see mm/memory.c)
  */
 int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr,
-                               void *kaddr, unsigned long size)
+                               void *kaddr, unsigned long pgoff,
+                               unsigned long size)
 {
        struct vm_struct *area;
+       unsigned long off;
+       unsigned long end_index;
+
+       if (check_shl_overflow(pgoff, PAGE_SHIFT, &off))
+               return -EINVAL;
 
        size = PAGE_ALIGN(size);
 
@@ -2201,8 +2209,10 @@ int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr,
        if (!(area->flags & VM_USERMAP))
                return -EINVAL;
 
-       if (kaddr + size > area->addr + get_vm_area_size(area))
+       if (check_add_overflow(size, off, &end_index) ||
+           end_index > get_vm_area_size(area))
                return -EINVAL;
+       kaddr += off;
 
        do {
                struct page *page = vmalloc_to_page(kaddr);
@@ -2241,7 +2251,7 @@ int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
                                                unsigned long pgoff)
 {
        return remap_vmalloc_range_partial(vma, vma->vm_start,
-                                          addr + (pgoff << PAGE_SHIFT),
+                                          addr, pgoff,
                                           vma->vm_end - vma->vm_start);
 }
 EXPORT_SYMBOL(remap_vmalloc_range);