kasan: fix crashes on access to memory mapped by vm_map_ram()
authorAndrey Ryabinin <aryabinin@virtuozzo.com>
Wed, 18 Dec 2019 04:51:38 +0000 (20:51 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Wed, 18 Dec 2019 04:59:59 +0000 (20:59 -0800)
With CONFIG_KASAN_VMALLOC=y any use of memory obtained via vm_map_ram()
will crash because there is no shadow backing that memory.

Instead of sprinkling additional kasan_populate_vmalloc() calls all over
the vmalloc code, move it into alloc_vmap_area(). This will fix
vm_map_ram() and simplify the code a bit.

[aryabinin@virtuozzo.com: v2]
Link: http://lkml.kernel.org/r/20191205095942.1761-1-aryabinin@virtuozzo.comLink:
Fixes: 3c5c3cfb9ef4 ("kasan: support backing vmalloc space with real shadow memory")
Signed-off-by: Andrey Ryabinin <aryabinin@virtuozzo.com>
Reported-by: Dmitry Vyukov <dvyukov@google.com>
Reviewed-by: Uladzislau Rezki (Sony) <urezki@gmail.com>
Cc: Daniel Axtens <dja@axtens.net>
Cc: Alexander Potapenko <glider@google.com>
Cc: Daniel Axtens <dja@axtens.net>
Cc: Qian Cai <cai@lca.pw>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
include/linux/kasan.h
mm/kasan/common.c
mm/vmalloc.c

index 4f404c565db1baa7688e3a4527b9523da009c64d..e18fe54969e97ede27c426277d84601d0c6aec6b 100644 (file)
@@ -205,20 +205,23 @@ static inline void *kasan_reset_tag(const void *addr)
 #endif /* CONFIG_KASAN_SW_TAGS */
 
 #ifdef CONFIG_KASAN_VMALLOC
-int kasan_populate_vmalloc(unsigned long requested_size,
-                          struct vm_struct *area);
-void kasan_poison_vmalloc(void *start, unsigned long size);
+int kasan_populate_vmalloc(unsigned long addr, unsigned long size);
+void kasan_poison_vmalloc(const void *start, unsigned long size);
+void kasan_unpoison_vmalloc(const void *start, unsigned long size);
 void kasan_release_vmalloc(unsigned long start, unsigned long end,
                           unsigned long free_region_start,
                           unsigned long free_region_end);
 #else
-static inline int kasan_populate_vmalloc(unsigned long requested_size,
-                                        struct vm_struct *area)
+static inline int kasan_populate_vmalloc(unsigned long start,
+                                       unsigned long size)
 {
        return 0;
 }
 
-static inline void kasan_poison_vmalloc(void *start, unsigned long size) {}
+static inline void kasan_poison_vmalloc(const void *start, unsigned long size)
+{ }
+static inline void kasan_unpoison_vmalloc(const void *start, unsigned long size)
+{ }
 static inline void kasan_release_vmalloc(unsigned long start,
                                         unsigned long end,
                                         unsigned long free_region_start,
index 2fa710bb6358a7d8bf2ed483c3adf324b8386e55..e04e73603dfc9fe76e01148d0bdb643499b38703 100644 (file)
@@ -778,15 +778,17 @@ static int kasan_populate_vmalloc_pte(pte_t *ptep, unsigned long addr,
        return 0;
 }
 
-int kasan_populate_vmalloc(unsigned long requested_size, struct vm_struct *area)
+int kasan_populate_vmalloc(unsigned long addr, unsigned long size)
 {
        unsigned long shadow_start, shadow_end;
        int ret;
 
-       shadow_start = (unsigned long)kasan_mem_to_shadow(area->addr);
+       if (!is_vmalloc_or_module_addr((void *)addr))
+               return 0;
+
+       shadow_start = (unsigned long)kasan_mem_to_shadow((void *)addr);
        shadow_start = ALIGN_DOWN(shadow_start, PAGE_SIZE);
-       shadow_end = (unsigned long)kasan_mem_to_shadow(area->addr +
-                                                       area->size);
+       shadow_end = (unsigned long)kasan_mem_to_shadow((void *)addr + size);
        shadow_end = ALIGN(shadow_end, PAGE_SIZE);
 
        ret = apply_to_page_range(&init_mm, shadow_start,
@@ -797,10 +799,6 @@ int kasan_populate_vmalloc(unsigned long requested_size, struct vm_struct *area)
 
        flush_cache_vmap(shadow_start, shadow_end);
 
-       kasan_unpoison_shadow(area->addr, requested_size);
-
-       area->flags |= VM_KASAN;
-
        /*
         * We need to be careful about inter-cpu effects here. Consider:
         *
@@ -843,12 +841,23 @@ int kasan_populate_vmalloc(unsigned long requested_size, struct vm_struct *area)
  * Poison the shadow for a vmalloc region. Called as part of the
  * freeing process at the time the region is freed.
  */
-void kasan_poison_vmalloc(void *start, unsigned long size)
+void kasan_poison_vmalloc(const void *start, unsigned long size)
 {
+       if (!is_vmalloc_or_module_addr(start))
+               return;
+
        size = round_up(size, KASAN_SHADOW_SCALE_SIZE);
        kasan_poison_shadow(start, size, KASAN_VMALLOC_INVALID);
 }
 
+void kasan_unpoison_vmalloc(const void *start, unsigned long size)
+{
+       if (!is_vmalloc_or_module_addr(start))
+               return;
+
+       kasan_unpoison_shadow(start, size);
+}
+
 static int kasan_depopulate_vmalloc_pte(pte_t *ptep, unsigned long addr,
                                        void *unused)
 {
index 4d3b3d60d8939b65f65bfcaad9c4d4c497f83a7a..6e865cea846c87a51951151d50b65ffc95175be8 100644 (file)
@@ -1061,6 +1061,26 @@ __alloc_vmap_area(unsigned long size, unsigned long align,
        return nva_start_addr;
 }
 
+/*
+ * Free a region of KVA allocated by alloc_vmap_area
+ */
+static void free_vmap_area(struct vmap_area *va)
+{
+       /*
+        * Remove from the busy tree/list.
+        */
+       spin_lock(&vmap_area_lock);
+       unlink_va(va, &vmap_area_root);
+       spin_unlock(&vmap_area_lock);
+
+       /*
+        * Insert/Merge it back to the free tree/list.
+        */
+       spin_lock(&free_vmap_area_lock);
+       merge_or_add_vmap_area(va, &free_vmap_area_root, &free_vmap_area_list);
+       spin_unlock(&free_vmap_area_lock);
+}
+
 /*
  * Allocate a region of KVA of the specified size and alignment, within the
  * vstart and vend.
@@ -1073,6 +1093,7 @@ static struct vmap_area *alloc_vmap_area(unsigned long size,
        struct vmap_area *va, *pva;
        unsigned long addr;
        int purged = 0;
+       int ret;
 
        BUG_ON(!size);
        BUG_ON(offset_in_page(size));
@@ -1139,6 +1160,7 @@ retry:
        va->va_end = addr + size;
        va->vm = NULL;
 
+
        spin_lock(&vmap_area_lock);
        insert_vmap_area(va, &vmap_area_root, &vmap_area_list);
        spin_unlock(&vmap_area_lock);
@@ -1147,6 +1169,12 @@ retry:
        BUG_ON(va->va_start < vstart);
        BUG_ON(va->va_end > vend);
 
+       ret = kasan_populate_vmalloc(addr, size);
+       if (ret) {
+               free_vmap_area(va);
+               return ERR_PTR(ret);
+       }
+
        return va;
 
 overflow:
@@ -1185,26 +1213,6 @@ int unregister_vmap_purge_notifier(struct notifier_block *nb)
 }
 EXPORT_SYMBOL_GPL(unregister_vmap_purge_notifier);
 
-/*
- * Free a region of KVA allocated by alloc_vmap_area
- */
-static void free_vmap_area(struct vmap_area *va)
-{
-       /*
-        * Remove from the busy tree/list.
-        */
-       spin_lock(&vmap_area_lock);
-       unlink_va(va, &vmap_area_root);
-       spin_unlock(&vmap_area_lock);
-
-       /*
-        * Insert/Merge it back to the free tree/list.
-        */
-       spin_lock(&free_vmap_area_lock);
-       merge_or_add_vmap_area(va, &free_vmap_area_root, &free_vmap_area_list);
-       spin_unlock(&free_vmap_area_lock);
-}
-
 /*
  * Clear the pagetable entries of a given vmap_area
  */
@@ -1771,6 +1779,8 @@ void vm_unmap_ram(const void *mem, unsigned int count)
        BUG_ON(addr > VMALLOC_END);
        BUG_ON(!PAGE_ALIGNED(addr));
 
+       kasan_poison_vmalloc(mem, size);
+
        if (likely(count <= VMAP_MAX_ALLOC)) {
                debug_check_no_locks_freed(mem, size);
                vb_free(mem, size);
@@ -1821,6 +1831,9 @@ void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t pro
                addr = va->va_start;
                mem = (void *)addr;
        }
+
+       kasan_unpoison_vmalloc(mem, size);
+
        if (vmap_page_range(addr, addr + size, prot, pages) < 0) {
                vm_unmap_ram(mem, count);
                return NULL;
@@ -2075,6 +2088,7 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
 {
        struct vmap_area *va;
        struct vm_struct *area;
+       unsigned long requested_size = size;
 
        BUG_ON(in_interrupt());
        size = PAGE_ALIGN(size);
@@ -2098,23 +2112,9 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
                return NULL;
        }
 
-       setup_vmalloc_vm(area, va, flags, caller);
+       kasan_unpoison_vmalloc((void *)va->va_start, requested_size);
 
-       /*
-        * For KASAN, if we are in vmalloc space, we need to cover the shadow
-        * area with real memory. If we come here through VM_ALLOC, this is
-        * done by a higher level function that has access to the true size,
-        * which might not be a full page.
-        *
-        * We assume module space comes via VM_ALLOC path.
-        */
-       if (is_vmalloc_addr(area->addr) && !(area->flags & VM_ALLOC)) {
-               if (kasan_populate_vmalloc(area->size, area)) {
-                       unmap_vmap_area(va);
-                       kfree(area);
-                       return NULL;
-               }
-       }
+       setup_vmalloc_vm(area, va, flags, caller);
 
        return area;
 }
@@ -2293,8 +2293,7 @@ static void __vunmap(const void *addr, int deallocate_pages)
        debug_check_no_locks_freed(area->addr, get_vm_area_size(area));
        debug_check_no_obj_freed(area->addr, get_vm_area_size(area));
 
-       if (area->flags & VM_KASAN)
-               kasan_poison_vmalloc(area->addr, area->size);
+       kasan_poison_vmalloc(area->addr, area->size);
 
        vm_remove_mappings(area, deallocate_pages);
 
@@ -2539,7 +2538,7 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
        if (!size || (size >> PAGE_SHIFT) > totalram_pages())
                goto fail;
 
-       area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNINITIALIZED |
+       area = __get_vm_area_node(real_size, align, VM_ALLOC | VM_UNINITIALIZED |
                                vm_flags, start, end, node, gfp_mask, caller);
        if (!area)
                goto fail;
@@ -2548,11 +2547,6 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
        if (!addr)
                return NULL;
 
-       if (is_vmalloc_or_module_addr(area->addr)) {
-               if (kasan_populate_vmalloc(real_size, area))
-                       return NULL;
-       }
-
        /*
         * In this function, newly allocated vm_struct has VM_UNINITIALIZED
         * flag. It means that vm_struct is not fully initialized.
@@ -3437,7 +3431,8 @@ retry:
        /* populate the shadow space outside of the lock */
        for (area = 0; area < nr_vms; area++) {
                /* assume success here */
-               kasan_populate_vmalloc(sizes[area], vms[area]);
+               kasan_populate_vmalloc(vas[area]->va_start, sizes[area]);
+               kasan_unpoison_vmalloc((void *)vms[area]->addr, sizes[area]);
        }
 
        kfree(vas);