mm: fix too many wrong kasan report [1/1]
authorTao Zeng <tao.zeng@amlogic.com>
Tue, 25 Dec 2018 09:43:02 +0000 (17:43 +0800)
committerJianxin Pan <jianxin.pan@amlogic.com>
Wed, 26 Dec 2018 01:56:28 +0000 (17:56 -0800)
PD#SWPL-3583

Problem:
After merge change:
http://scgit.amlogic.com:8080/#/c/55018/
Too many wrong kasan report occur when booting kernel

Solution:
Fix kasan poison address range when free wasted memory.

Verify:
P212

Change-Id: I576a7edb7b4e70f9c6c782639f433ad45bafba1d
Signed-off-by: Tao Zeng <tao.zeng@amlogic.com>
include/linux/kasan.h
mm/kasan/kasan.c
mm/slab_common.c
mm/slub.c

index b37afd1..ecd57cf 100644 (file)
@@ -54,6 +54,9 @@ void kasan_poison_object_data(struct kmem_cache *cache, void *object);
 void kasan_init_slab_obj(struct kmem_cache *cache, const void *object);
 
 void kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags);
+#ifdef CONFIG_AMLOGIC_MEMORY_EXTEND
+void kasan_kmalloc_save(const void *ptr, size_t size, gfp_t flags);
+#endif
 void kasan_kfree_large(const void *ptr);
 void kasan_poison_kfree(void *ptr);
 void kasan_kmalloc(struct kmem_cache *s, const void *object, size_t size,
@@ -106,6 +109,12 @@ static inline void kasan_init_slab_obj(struct kmem_cache *cache,
                                const void *object) {}
 
 static inline void kasan_kmalloc_large(void *ptr, size_t size, gfp_t flags) {}
+#ifdef CONFIG_AMLOGIC_MEMORY_EXTEND
+static inline void kasan_kmalloc_save(const void *ptr, size_t size,
+                                     gfp_t flags)
+{
+}
+#endif
 static inline void kasan_kfree_large(const void *ptr) {}
 static inline void kasan_poison_kfree(void *ptr) {}
 static inline void kasan_kmalloc(struct kmem_cache *s, const void *object,
index 7d78b5f..8622541 100644 (file)
@@ -632,6 +632,31 @@ void kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags)
                KASAN_PAGE_REDZONE);
 }
 
+#ifdef CONFIG_AMLOGIC_MEMORY_EXTEND
+void kasan_kmalloc_save(const void *ptr, size_t size, gfp_t flags)
+{
+       struct page *page;
+       unsigned long redzone_start;
+       unsigned long redzone_end;
+
+       if (gfpflags_allow_blocking(flags))
+               quarantine_reduce();
+
+       if (unlikely(ptr == NULL))
+               return;
+
+       page = virt_to_page(ptr);
+       redzone_start = round_up((unsigned long)(ptr + size),
+                               KASAN_SHADOW_SCALE_SIZE);
+       redzone_end = (unsigned long)ptr + PAGE_ALIGN(size);
+
+       kasan_unpoison_shadow(ptr, size);
+       kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start,
+               KASAN_PAGE_REDZONE);
+}
+
+#endif
+
 void kasan_krealloc(const void *object, size_t size, gfp_t flags)
 {
        struct page *page;
index 07b8fc8..45b23eb 100644 (file)
@@ -1091,19 +1091,31 @@ void *kmalloc_order(size_t size, gfp_t flags, unsigned int order)
 {
        void *ret;
        struct page *page;
+#ifdef CONFIG_AMLOGIC_MEMORY_EXTEND
+       int saved = 0;
+#endif
 
        flags |= __GFP_COMP;
 #ifdef CONFIG_AMLOGIC_MEMORY_EXTEND
-       if (size < (PAGE_SIZE * (1 << order)))
+       if (size < (PAGE_SIZE * (1 << order))) {
                page = aml_slub_alloc_large(size, flags, order);
-       else
+               saved = 1;
+       } else
                page = alloc_pages(flags, order);
 #else
        page = alloc_pages(flags, order);
 #endif
        ret = page ? page_address(page) : NULL;
        kmemleak_alloc(ret, size, 1, flags);
+#ifdef CONFIG_AMLOGIC_MEMORY_EXTEND
+       /* only need poison used pages */
+       if (saved && ret)
+               kasan_kmalloc_save(ret, size, flags);
+       else
+               kasan_kmalloc_large(ret, size, flags);
+#else
        kasan_kmalloc_large(ret, size, flags);
+#endif
        return ret;
 }
 EXPORT_SYMBOL(kmalloc_order);
index 71ae980..59ec527 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -3734,6 +3734,7 @@ static void aml_slub_free_large(struct page *page, const void *obj)
                        __func__, page_address(page), nr_pages, obj);
                for (i = 0; i < nr_pages; i++)  {
                        __free_pages(page, 0);
+                       kasan_free_pages(page, 0);
                        page++;
                }
        }
@@ -3900,14 +3901,17 @@ void kfree(const void *x)
        page = virt_to_head_page(x);
        if (unlikely(!PageSlab(page))) {
                BUG_ON(!PageCompound(page));
-               kfree_hook(x);
        #ifdef CONFIG_AMLOGIC_MEMORY_EXTEND
+               kmemleak_free(x);
                if (unlikely(PageOwnerPriv1(page)))
                        aml_slub_free_large(page, x);
-               else
+               else {
                        __free_pages(page, compound_order(page));
+                       kasan_kfree_large(x);
+               }
                return;
        #else
+               kfree_hook(x);
                __free_pages(page, compound_order(page));
                return;
        #endif /* CONFIG_AMLOGIC_MEMORY_EXTEND */