From f79f520dc86a49487b124f17347cbb17a98abbf9 Mon Sep 17 00:00:00 2001 From: Tao Zeng Date: Tue, 25 Dec 2018 17:43:02 +0800 Subject: [PATCH] mm: fix too many wrong kasan report [1/1] PD#SWPL-3583 Problem: After merge change: http://scgit.amlogic.com:8080/#/c/55018/ Too many wrong kasan report occur when booting kernel Solution: Fix kasan poison address range when free wasted memory. Verify: P212 Change-Id: I576a7edb7b4e70f9c6c782639f433ad45bafba1d Signed-off-by: Tao Zeng --- include/linux/kasan.h | 9 +++++++++ mm/kasan/kasan.c | 25 +++++++++++++++++++++++++ mm/slab_common.c | 16 ++++++++++++++-- mm/slub.c | 8 ++++++-- 4 files changed, 54 insertions(+), 4 deletions(-) diff --git a/include/linux/kasan.h b/include/linux/kasan.h index b37afd1..ecd57cf 100644 --- a/include/linux/kasan.h +++ b/include/linux/kasan.h @@ -54,6 +54,9 @@ void kasan_poison_object_data(struct kmem_cache *cache, void *object); void kasan_init_slab_obj(struct kmem_cache *cache, const void *object); void kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags); +#ifdef CONFIG_AMLOGIC_MEMORY_EXTEND +void kasan_kmalloc_save(const void *ptr, size_t size, gfp_t flags); +#endif void kasan_kfree_large(const void *ptr); void kasan_poison_kfree(void *ptr); void kasan_kmalloc(struct kmem_cache *s, const void *object, size_t size, @@ -106,6 +109,12 @@ static inline void kasan_init_slab_obj(struct kmem_cache *cache, const void *object) {} static inline void kasan_kmalloc_large(void *ptr, size_t size, gfp_t flags) {} +#ifdef CONFIG_AMLOGIC_MEMORY_EXTEND +static inline void kasan_kmalloc_save(const void *ptr, size_t size, + gfp_t flags) +{ +} +#endif static inline void kasan_kfree_large(const void *ptr) {} static inline void kasan_poison_kfree(void *ptr) {} static inline void kasan_kmalloc(struct kmem_cache *s, const void *object, diff --git a/mm/kasan/kasan.c b/mm/kasan/kasan.c index 7d78b5f..8622541 100644 --- a/mm/kasan/kasan.c +++ b/mm/kasan/kasan.c @@ -632,6 +632,31 @@ void kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags) KASAN_PAGE_REDZONE); } +#ifdef CONFIG_AMLOGIC_MEMORY_EXTEND +void kasan_kmalloc_save(const void *ptr, size_t size, gfp_t flags) +{ + struct page *page; + unsigned long redzone_start; + unsigned long redzone_end; + + if (gfpflags_allow_blocking(flags)) + quarantine_reduce(); + + if (unlikely(ptr == NULL)) + return; + + page = virt_to_page(ptr); + redzone_start = round_up((unsigned long)(ptr + size), + KASAN_SHADOW_SCALE_SIZE); + redzone_end = (unsigned long)ptr + PAGE_ALIGN(size); + + kasan_unpoison_shadow(ptr, size); + kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start, + KASAN_PAGE_REDZONE); +} + +#endif + void kasan_krealloc(const void *object, size_t size, gfp_t flags) { struct page *page; diff --git a/mm/slab_common.c b/mm/slab_common.c index 07b8fc8..45b23eb 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -1091,19 +1091,31 @@ void *kmalloc_order(size_t size, gfp_t flags, unsigned int order) { void *ret; struct page *page; +#ifdef CONFIG_AMLOGIC_MEMORY_EXTEND + int saved = 0; +#endif flags |= __GFP_COMP; #ifdef CONFIG_AMLOGIC_MEMORY_EXTEND - if (size < (PAGE_SIZE * (1 << order))) + if (size < (PAGE_SIZE * (1 << order))) { page = aml_slub_alloc_large(size, flags, order); - else + saved = 1; + } else page = alloc_pages(flags, order); #else page = alloc_pages(flags, order); #endif ret = page ? page_address(page) : NULL; kmemleak_alloc(ret, size, 1, flags); +#ifdef CONFIG_AMLOGIC_MEMORY_EXTEND + /* only need poison used pages */ + if (saved && ret) + kasan_kmalloc_save(ret, size, flags); + else + kasan_kmalloc_large(ret, size, flags); +#else kasan_kmalloc_large(ret, size, flags); +#endif return ret; } EXPORT_SYMBOL(kmalloc_order); diff --git a/mm/slub.c b/mm/slub.c index 71ae980..59ec527 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -3734,6 +3734,7 @@ static void aml_slub_free_large(struct page *page, const void *obj) __func__, page_address(page), nr_pages, obj); for (i = 0; i < nr_pages; i++) { __free_pages(page, 0); + kasan_free_pages(page, 0); page++; } } @@ -3900,14 +3901,17 @@ void kfree(const void *x) page = virt_to_head_page(x); if (unlikely(!PageSlab(page))) { BUG_ON(!PageCompound(page)); - kfree_hook(x); #ifdef CONFIG_AMLOGIC_MEMORY_EXTEND + kmemleak_free(x); if (unlikely(PageOwnerPriv1(page))) aml_slub_free_large(page, x); - else + else { __free_pages(page, compound_order(page)); + kasan_kfree_large(x); + } return; #else + kfree_hook(x); __free_pages(page, compound_order(page)); return; #endif /* CONFIG_AMLOGIC_MEMORY_EXTEND */ -- 2.7.4