From: Tao Zeng Date: Wed, 28 Aug 2019 07:25:40 +0000 (+0800) Subject: mm: fix wrong kasan report [1/1] X-Git-Tag: hardkernel-4.9.236-104~717 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=becb83999e19d2055458f08a2b7a44bd1170853e;p=platform%2Fkernel%2Flinux-amlogic.git mm: fix wrong kasan report [1/1] PD#SWPL-13281 Problem: There are 2 types of wrong kasan report after merge change of save wasted slab. 1, slab-out-of-bounds, which is caused by krealloc set shadow memory out-of-range, since tail of page was freed. 2, use-after-free, which is caused by kasan_free_pages called after a page freed. Because this function already called in free_page, so it marked shadow memory twice. Solution: 1, make shadow do not out of range if a tail page was freed and been realloc again. 2, remove call of kasan_free_pages. Verify: X301 Change-Id: Ib5bdcbb618a783920009bb97d112c361888b0d7c Signed-off-by: Tao Zeng --- diff --git a/drivers/amlogic/memory_ext/Kconfig b/drivers/amlogic/memory_ext/Kconfig index 8bb640e..0da0422 100644 --- a/drivers/amlogic/memory_ext/Kconfig +++ b/drivers/amlogic/memory_ext/Kconfig @@ -55,7 +55,6 @@ config AMLOGIC_KASAN32 config AMLOGIC_VMAP bool "Amlogic kernel stack" depends on AMLOGIC_MEMORY_EXTEND - depends on !KASAN default y help This config is used to enable amlogic kernel stack diff --git a/mm/kasan/kasan.c b/mm/kasan/kasan.c index 35408ec..55cd682 100644 --- a/mm/kasan/kasan.c +++ b/mm/kasan/kasan.c @@ -641,6 +641,11 @@ void kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags) redzone_start = round_up((unsigned long)(ptr + size), KASAN_SHADOW_SCALE_SIZE); redzone_end = (unsigned long)ptr + (PAGE_SIZE << compound_order(page)); +#ifdef CONFIG_AMLOGIC_MEMORY_EXTEND + if (PageOwnerPriv1(page)) { /* end of this page was freed */ + redzone_end = (unsigned long)ptr + PAGE_ALIGN(size); + } +#endif kasan_unpoison_shadow(ptr, size); kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start, diff --git a/mm/page_alloc.c b/mm/page_alloc.c index b453904..9c45a55 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -931,7 +931,17 @@ done_merging: } } +#if defined(CONFIG_AMLOGIC_MEMORY_EXTEND) && defined(CONFIG_KASAN) + /* + * always put freed page to tail of buddy system, in + * order to increase probability of use-after-free + * for KASAN check. + */ + list_add_tail(&page->lru, + &zone->free_area[order].free_list[migratetype]); +#else list_add(&page->lru, &zone->free_area[order].free_list[migratetype]); +#endif out: zone->free_area[order].nr_free++; #ifdef CONFIG_AMLOGIC_MEMORY_EXTEND @@ -2622,10 +2632,19 @@ void free_hot_cold_page(struct page *page, bool cold) } pcp = &this_cpu_ptr(zone->pageset)->pcp; +#if defined(CONFIG_AMLOGIC_MEMORY_EXTEND) && defined(CONFIG_KASAN) + /* + * always put freed page to tail of buddy system, in + * order to increase probability of use-after-free + * for KASAN check. + */ + list_add_tail(&page->lru, &pcp->lists[migratetype]); +#else if (!cold) list_add(&page->lru, &pcp->lists[migratetype]); else list_add_tail(&page->lru, &pcp->lists[migratetype]); +#endif pcp->count++; if (pcp->count >= pcp->high) { unsigned long batch = READ_ONCE(pcp->batch); diff --git a/mm/slub.c b/mm/slub.c index 59ec527..7dfc2a4 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -3734,7 +3734,6 @@ static void aml_slub_free_large(struct page *page, const void *obj) __func__, page_address(page), nr_pages, obj); for (i = 0; i < nr_pages; i++) { __free_pages(page, 0); - kasan_free_pages(page, 0); page++; } }