kasan: rebuild address layout after vmalloc increased [1/1]
authorTao Zeng <tao.zeng@amlogic.com>
Tue, 7 May 2019 02:51:42 +0000 (10:51 +0800)
committerJianxin Pan <jianxin.pan@amlogic.com>
Thu, 9 May 2019 09:46:38 +0000 (02:46 -0700)
PD#SWPL-8132

Problem:
In Jira TV-5143, final fix change have increased 128MB address space
for vmalloc. Because binder wasted too many vmalloc space but it's
hard to fix it in kernel side.
Due to incease of vmalloc address space, old design of address space
layout for KASAN32 is not suitable after this change. So we need to
change memory layout to fix this problem and let KASAN can running OK
again.

Solution:
1, rebuild address space layout for kasan
2, make kasan compatible with vmap stack config

Verify:
p212

Change-Id: I2ce8a840df0ce1fcda61ebeb14a64b1d609719ca
Signed-off-by: Tao Zeng <tao.zeng@amlogic.com>
12 files changed:
arch/arm/Kconfig
arch/arm/Makefile
arch/arm/include/asm/kasan.h
arch/arm/include/asm/memory.h
arch/arm/kernel/unwind.c
arch/arm/mm/kasan_init.c
arch/arm64/include/asm/kasan.h
arch/arm64/mm/kasan_init.c
drivers/amlogic/memory_ext/Kconfig
drivers/amlogic/memory_ext/vmap_stack.c
include/linux/amlogic/vmap_stack.h
mm/kasan/report.c

index d341592..d74f489 100644 (file)
@@ -1433,6 +1433,7 @@ choice
                bool "3G/1G user/kernel split"
        config VMSPLIT_3G_OPT
                bool "3G/1G user/kernel split (for full 1G low memory)"
+               depends on !AMLOGIC_KASAN32
        config VMSPLIT_2G
                bool "2G/2G user/kernel split"
        config VMSPLIT_1G
index 62732af..4a1ada2 100644 (file)
@@ -53,7 +53,7 @@ LD            += -EL
 endif
 
 ifeq ($(CONFIG_KASAN),y)
-KASAN_SHADOW_OFFSET := 0xA0000000
+KASAN_SHADOW_OFFSET := 0x99000000
 endif
 
 #
index 7b86cb0..1bff547 100644 (file)
  *  0x00000000 +--------+
  *             |        |
  *             |        |
- *             |        |  User space memory,         2944MB
+ *             |        |  User space memory,         2816MB
  *             |        |
  *             |        |
- *  0xb8000000 +--------+
- *             |        |  Kasan shaddow memory,       128MB
- *  0xc0000000 +--------+
- *             |        |  Vmalloc address,            240MB
- *             |        |
+ *  0xb0000000 +--------+
+ *             |        |  Kasan shaddow memory,       144MB
+ *  0xb9000000 +--------+
+ *             |        |  Vmalloc address,            356MB
  *  0xCF400000 +--------+
  *  0xCF600000 +--------+  PKmap, for kmap               2MB
- *  0xD0000000 +--------+  Module and pkmap,            10MB
+ *  0xD0000000 +--------+  Modul                        10MB
  *             |        |
  *             |        |  Kernel linear mapped space, 762MB
+ *             |        |
  *  0xFFa00000 +--------+
  *  0xFFFc0000 +--------+  static map,                   2MB
  *  0xFFF00000 +--------+  Fixed map, for kmap_atomic,   3MB
  *  0xFFFF0000 +--------+  High vector,                  4KB
  *
  */
-#define KADDR_SIZE             (SZ_1G)
-#define KASAN_SHADOW_SIZE      (KADDR_SIZE >> 3)
+#define KASAN_SHADOW_SIZE      (0x09000000)
 #define KASAN_SHADOW_START     (TASK_SIZE)
 #define KASAN_SHADOW_END       (KASAN_SHADOW_START + KASAN_SHADOW_SIZE)
 
  *     shadow_addr = (address >> 3) + KASAN_SHADOW_OFFSET;
  *
  */
-#define KASAN_SHADOW_OFFSET    (KASAN_SHADOW_START - (VMALLOC_START >> 3))
+#define KASAN_SHADOW_OFFSET    0x99000000UL
 struct map_desc;
 void kasan_init(void);
 void kasan_copy_shadow(pgd_t *pgdir);
 asmlinkage void kasan_early_init(void);
 void cpu_v7_set_pte_ext(pte_t *ptep, pte_t pte, unsigned int ext);
 void create_mapping(struct map_desc *md);
+#ifdef CONFIG_AMLOGIC_VMAP
+void clear_pgds(unsigned long start, unsigned long end);
+#endif
 #else
 static inline void kasan_init(void) { }
 static inline void kasan_copy_shadow(pgd_t *pgdir) { }
index f033759..1e81f57 100644 (file)
  * TASK_SIZE - the maximum size of a user space task.
  * TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area
  */
-#define TASK_SIZE              (UL(CONFIG_PAGE_OFFSET) - UL(SZ_64M))
-#elif defined(CONFIG_AMLOGIC_KASAN32)
+#ifdef CONFIG_AMLOGIC_KASAN32
 /*
- * reserve 128MB address space for kasan
- * for this memory layout implementation, PAGE_OFFSET should be 0xD0000000
+ * if open AMLOGIC_KASAN32, PAGE_OFFSET is set to 0xD0000000
+ * we config 0xB0000000 as shadow memory start. so vmalloc
+ * can be 0xb9000000 and total 368mb space for vmalloc
  */
-#define VMALLOC_START          (UL(CONFIG_PAGE_OFFSET) - UL(SZ_256M))
-#define TASK_SIZE              (VMALLOC_START - UL(SZ_128M))
+#define VMALLOC_START          (UL(0xB9000000))
+#define TASK_SIZE              (UL(0xB0000000))
 #define KMEM_END               (0xffa00000UL)
+#else /* CONFIG_AMLOGIC_KASAN32 */
+#define TASK_SIZE              (UL(CONFIG_PAGE_OFFSET) - UL(SZ_64M))
+#endif
 #else
 /*
  * TASK_SIZE - the maximum size of a user space task.
 
 #ifdef CONFIG_AMLOGIC_VMAP
 #ifndef CONFIG_THUMB2_KERNEL
+#ifdef CONFIG_AMLOGIC_KASAN32
+#define MODULES_VADDR          (PAGE_OFFSET - SZ_16M + SZ_4M + SZ_2M)
+#else
 #define MODULES_VADDR          (PAGE_OFFSET - SZ_64M)
+#endif /* CONFIG_AMLOGIC_KASAN32 */
 #else
 #define MODULES_VADDR          (PAGE_OFFSET - SZ_8M)
 #endif
  * and PAGE_OFFSET - it must be within 32MB of the kernel text.
  */
 #ifndef CONFIG_THUMB2_KERNEL
-#ifdef CONFIG_AMLOGIC_KASAN32
 /*
  * to fix module link problem
  */
-#define MODULES_VADDR          (PAGE_OFFSET - SZ_16M + SZ_4M + SZ_2M)
-#else
 #define MODULES_VADDR          (PAGE_OFFSET - SZ_16M)
-#endif
 #else
 /* smaller range for Thumb-2 symbols relocation (2^24)*/
 #define MODULES_VADDR          (PAGE_OFFSET - SZ_8M)
index 4682dd2..7ad8026 100644 (file)
@@ -246,21 +246,8 @@ static unsigned long unwind_get_byte(struct unwind_ctrl_block *ctrl)
 }
 
 /* Before poping a register check whether it is feasible or not */
-#ifdef CONFIG_AMLOGIC_KASAN32
-/*
- * If enabled KASAN and unwind_frame is called under IRQ routine,
- * an value-less kasan report will trigger. Because IRQ is using
- * thread context and don't initialized shadow memory when irq_svc
- * saving irq context. Since it's hard to guess reserved memory for
- * shadow in stack by compiler, so we just tell compiler do not
- * sanitize for this function
- */
-int __no_sanitize_address unwind_pop_register(struct unwind_ctrl_block *ctrl,
-                               unsigned long **vsp, unsigned int reg)
-#else
 static int unwind_pop_register(struct unwind_ctrl_block *ctrl,
                                unsigned long **vsp, unsigned int reg)
-#endif
 {
        if (unlikely(ctrl->check_each_pop))
                if (*vsp >= (unsigned long *)ctrl->sp_high)
index cd16c6f..d5416b3 100644 (file)
@@ -120,14 +120,18 @@ static inline pmd_t *pmd_off_k(unsigned long virt)
        return pmd_offset(pud_offset(pgd_offset_k(virt), virt), virt);
 }
 
-static void __init clear_pmds(unsigned long start,
-                       unsigned long end)
+#ifdef CONFIG_AMLOGIC_VMAP
+void __init clear_pgds(unsigned long start, unsigned long end)
+#else
+static void __init clear_pgds(unsigned long start, unsigned long end)
+#endif
 {
        /*
         * Remove references to kasan page tables from
         * swapper_pg_dir. pmd_clear() can't be used
         * here because it's nop on 2,3-level pagetable setups
         */
+       pr_debug("%s, clear %lx %lx\n", __func__, start, end);
        for (; start < end; start += PGDIR_SIZE)
                pmd_clear(pmd_off_k(start));
 }
@@ -168,7 +172,7 @@ void __init kasan_init(void)
        memcpy(tmp_pg_dir, swapper_pg_dir, sizeof(tmp_pg_dir));
        dsb(ishst);
        cpu_switch_mm(tmp_pg_dir, &init_mm);
-       clear_pmds(KASAN_SHADOW_START, KASAN_SHADOW_END);
+       clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END);
 
        for_each_memblock(memory, reg) {
                mem_size += reg->size;
index 71ad0f9..c609dcd 100644 (file)
@@ -31,6 +31,9 @@
 void kasan_init(void);
 void kasan_copy_shadow(pgd_t *pgdir);
 asmlinkage void kasan_early_init(void);
+#ifdef CONFIG_AMLOGIC_VMAP
+void clear_pgds(unsigned long start, unsigned long end);
+#endif
 
 #else
 static inline void kasan_init(void) { }
index 201d918..01967b4 100644 (file)
@@ -122,8 +122,13 @@ void __init kasan_copy_shadow(pgd_t *pgdir)
        } while (pgd++, pgd_new++, pgd != pgd_end);
 }
 
+#ifdef CONFIG_AMLOGIC_VMAP
+void __init clear_pgds(unsigned long start,
+                       unsigned long end)
+#else
 static void __init clear_pgds(unsigned long start,
                        unsigned long end)
+#endif
 {
        /*
         * Remove references to kasan page tables from
index f40e0e3..0da0422 100644 (file)
@@ -55,8 +55,6 @@ config AMLOGIC_KASAN32
 config AMLOGIC_VMAP
        bool "Amlogic kernel stack"
        depends on AMLOGIC_MEMORY_EXTEND
-       depends on !KASAN
-       depends on !AMLOGIC_KASAN32
        default y
        help
                This config is used to enable amlogic kernel stack
index d199f76..dae386a 100644 (file)
 #include <linux/memcontrol.h>
 #include <linux/amlogic/vmap_stack.h>
 #include <linux/highmem.h>
+#include <linux/delay.h>
+#ifdef CONFIG_KASAN
+#include <linux/kasan.h>
+#endif
 #include <asm/tlbflush.h>
 #include <asm/stacktrace.h>
 
@@ -455,16 +459,18 @@ static void check_sp_fault_again(struct pt_regs *regs)
                 * will fault when we copy back context, so handle
                 * it first
                 */
-               E("fault again, sp:%lx, addr:%lx\n", sp, addr);
+               D("fault again, sp:%lx, addr:%lx\n", sp, addr);
                page = get_vmap_cached_page(&cache);
                WARN_ON(!page);
                vmap_mmu_set(page, addr, 1);
                update_vmap_stack(1);
+       #ifndef CONFIG_KASAN
                if ((THREAD_SIZE_ORDER > 1) && stack_floor_page(addr)) {
                        E("task:%d %s, stack near overflow, addr:%lx\n",
                                current->pid, current->comm, addr);
                        show_fault_stack(addr, regs);
                }
+       #endif
 
                /* cache is not enough */
                if (cache <= (VMAP_CACHE_PAGE / 2))
@@ -683,6 +689,40 @@ void aml_account_task_stack(struct task_struct *tsk, int account)
        }
 }
 
+#ifdef CONFIG_KASAN
+DEFINE_MUTEX(stack_shadow_lock);
+static void check_and_map_stack_shadow(unsigned long addr)
+{
+       unsigned long shadow;
+       struct page *page, *pages[2] = {};
+       int ret;
+
+       shadow = (unsigned long)kasan_mem_to_shadow((void *)addr);
+       page   = check_pte_exist(shadow);
+       if (page) {
+               WARN(page_address(page) == (void *)kasan_zero_page,
+                    "bad pte, page:%p, %lx, addr:%lx\n",
+                    page_address(page), page_to_pfn(page), addr);
+               return;
+       }
+       shadow = shadow & PAGE_MASK;
+       page   = alloc_page(GFP_KERNEL | __GFP_HIGHMEM |
+                           __GFP_ZERO | __GFP_REPEAT);
+       if (!page) {
+               WARN(!page,
+                    "alloc page for addr:%lx, shadow:%lx fail\n",
+                    addr, shadow);
+               return;
+       }
+       pages[0] = page;
+       ret = map_kernel_range_noflush(shadow, PAGE_SIZE, PAGE_KERNEL, pages);
+       if (ret < 0) {
+               pr_err("%s, map shadow:%lx failed:%d\n", __func__, shadow, ret);
+               __free_page(page);
+       }
+}
+#endif
+
 void *aml_stack_alloc(int node, struct task_struct *tsk)
 {
        unsigned long bitmap_no, raw_start;
@@ -722,6 +762,12 @@ void *aml_stack_alloc(int node, struct task_struct *tsk)
        map_addr = addr + STACK_TOP_PAGE_OFF;
        vmap_mmu_set(page, map_addr, 1);
        update_vmap_stack(1);
+#ifdef CONFIG_KASAN
+       /* 2 thread stack can be a single shadow page, we need use lock */
+       mutex_lock(&stack_shadow_lock);
+       check_and_map_stack_shadow(addr);
+       mutex_unlock(&stack_shadow_lock);
+#endif
 
        D("bit idx:%5ld, start:%5ld, addr:%lx, page:%lx\n",
                bitmap_no, raw_start, addr, page_to_pfn(page));
@@ -747,6 +793,9 @@ void aml_stack_free(struct task_struct *tsk)
                page = vmalloc_to_page((const void *)addr);
                if (!page)
                        break;
+       #ifdef CONFIG_KASAN
+               kasan_unpoison_shadow((void *)addr, PAGE_SIZE);
+       #endif
                vmap_mmu_set(page, addr, 0);
                /* supplement for stack page cache first */
                spin_lock_irqsave(&avmap->page_lock, flags);
@@ -813,6 +862,9 @@ void __init thread_stack_cache_init(void)
 {
        int i;
        struct page *page;
+#ifdef CONFIG_KASAN
+       unsigned long align, size;
+#endif
 
        page = alloc_pages(GFP_KERNEL | __GFP_HIGHMEM, VMAP_CACHE_PAGE_ORDER);
        if (!page)
@@ -832,11 +884,31 @@ void __init thread_stack_cache_init(void)
        }
        pr_info("%s, vmap:%p, bitmap:%p, cache page:%lx\n",
                __func__, avmap, avmap->bitmap, page_to_pfn(page));
+#ifdef CONFIG_KASAN
+       align = PGDIR_SIZE << KASAN_SHADOW_SCALE_SHIFT;
+       size  = VM_STACK_AREA_SIZE;
+       size  = ALIGN(size, align);
+       avmap->root_vm = __get_vm_area_node(size, align, VM_NO_GUARD,
+                                           VMALLOC_START, VMALLOC_END,
+                                           NUMA_NO_NODE, GFP_KERNEL,
+                                           __builtin_return_address(0));
+       WARN(!avmap->root_vm, "alloc vmap area %lx failed\n", size);
+       if (avmap->root_vm) {
+               unsigned long s, e;
+
+               s = (unsigned long)kasan_mem_to_shadow(avmap->root_vm->addr);
+               e = (unsigned long)avmap->root_vm->addr + size;
+               e = (unsigned long)kasan_mem_to_shadow((void *)e);
+               pr_info("%s, s:%lx, e:%lx, size:%lx\n", __func__, s, e, size);
+               clear_pgds(s, e);
+       }
+#else
        avmap->root_vm = __get_vm_area_node(VM_STACK_AREA_SIZE,
                                            VMAP_ALIGN,
                                            0, VMAP_ADDR_START, VMAP_ADDR_END,
                                            NUMA_NO_NODE, GFP_KERNEL,
                                            __builtin_return_address(0));
+#endif
        if (!avmap->root_vm) {
                __free_pages(page, VMAP_CACHE_PAGE_ORDER);
                kfree(avmap->bitmap);
index a8aa35f..9b1f868 100644 (file)
 #else
 /* currently support max 6144 tasks on 32bit */
 #define VM_STACK_AREA_SIZE             (SZ_64M - SZ_16M)
+#ifdef CONFIG_AMLOGIC_KASAN32          /* change place if open kasan */
+#define VMAP_ADDR_START                        VMALLOC_START
+#define VMAP_ADDR_END                  (VMALLOC_START + VM_STACK_AREA_SIZE)
+#else
 #define VMAP_ADDR_START                        MODULES_VADDR
 #define VMAP_ADDR_END                  MODULES_END
+#endif /* CONFIG_AMLOGIC_KASAN32 */
 #define VMAP_ALIGN                     SZ_64M
 #endif
 
 #define VMAP_PAGE_FLAG                 (__GFP_ZERO | __GFP_HIGH |\
                                         __GFP_ATOMIC | __GFP_REPEAT)
 
+#ifdef CONFIG_KASAN
+#define VMAP_CACHE_PAGE_ORDER          7
+#else
 #define VMAP_CACHE_PAGE_ORDER          5
+#endif
 #define VMAP_CACHE_PAGE                        (1 << VMAP_CACHE_PAGE_ORDER)
-#define CACHE_MAINTAIN_DELAY           (HZ)
+#define CACHE_MAINTAIN_DELAY           (HZ / 2)
 
 struct aml_vmap {
        spinlock_t vmap_lock;
index a324797..21f67cb 100644 (file)
@@ -397,7 +397,11 @@ static inline bool kasan_report_enabled(void)
 void kasan_report(unsigned long addr, size_t size,
                bool is_write, unsigned long ip)
 {
+#ifdef CONFIG_AMLOGIC_MODIFY
+       struct kasan_access_info info = {};
+#else
        struct kasan_access_info info;
+#endif
 
        if (likely(!kasan_report_enabled()))
                return;