binder: revert dynamic vmalloc for binder [1/1]
authorTao Zeng <tao.zeng@amlogic.com>
Tue, 30 Apr 2019 07:29:17 +0000 (15:29 +0800)
committerJianxin Pan <jianxin.pan@amlogic.com>
Mon, 6 May 2019 02:58:14 +0000 (19:58 -0700)
PD#TV-5143

Problem:
Kernel use dynamic vmalloc for binder. It have data sync problems
and will cause bad object when binder transaction.

Solution:
1. Revert dynamic vmalloc for binder.
2. Increase 128MB size for vmalloc

Verify:
P212

Change-Id: I20198b18d171fde0314868af394a6881979b3605
Signed-off-by: Tao Zeng <tao.zeng@amlogic.com>
arch/arm/Kconfig
arch/arm/configs/meson64_a32_defconfig
arch/arm/mm/mmu.c
arch/arm/mm/proc-v7.S
drivers/android/binder_alloc.c
drivers/android/binder_alloc.h

index da92f87..d341592 100644 (file)
@@ -1444,7 +1444,7 @@ config PAGE_OFFSET
        default PHYS_OFFSET if !MMU
        default 0x40000000 if VMSPLIT_1G
        default 0x80000000 if VMSPLIT_2G
-       default 0xB0000000 if VMSPLIT_3G_OPT
+       default 0xB8000000 if VMSPLIT_3G_OPT
        default 0xD0000000 if AMLOGIC_KASAN32
        default 0xC0000000
 
index 9e89e68..9fb0bc0 100644 (file)
@@ -43,6 +43,7 @@ CONFIG_PCI=y
 CONFIG_PCIE_DW_PLAT=y
 CONFIG_SMP=y
 CONFIG_SCHED_MC=y
+CONFIG_VMSPLIT_3G_OPT=y
 CONFIG_NR_CPUS=8
 CONFIG_PREEMPT=y
 CONFIG_HZ_250=y
index c7e41d4..8b20f8b 100644 (file)
@@ -1129,8 +1129,13 @@ void __init debug_ll_io_init(void)
 #endif
 
 #ifndef CONFIG_AMLOGIC_KASAN32
+#ifdef CONFIG_AMLOGIC_MODIFY
+static void * __initdata vmalloc_min =
+       (void *)(VMALLOC_END - (368 << 20) - VMALLOC_OFFSET);
+#else
 static void * __initdata vmalloc_min =
        (void *)(VMALLOC_END - (240 << 20) - VMALLOC_OFFSET);
+#endif
 
 /*
  * vmalloc=size forces the vmalloc area to be exactly 'size'
index d516a9f..d00d52c 100644 (file)
@@ -78,9 +78,7 @@ ENDPROC(cpu_v7_do_idle)
 ENTRY(cpu_v7_dcache_clean_area)
        ALT_SMP(W(nop))                 @ MP extensions imply L1 PTW
        ALT_UP_B(1f)
-#ifndef CONFIG_AMLOGIC_MODIFY
        ret     lr
-#endif
 1:     dcache_line_size r2, r3
 2:     mcr     p15, 0, r0, c7, c10, 1          @ clean D entry
        add     r0, r0, r2
index dfb9a24..bec6c0a 100644 (file)
@@ -185,152 +185,6 @@ struct binder_buffer *binder_alloc_prepare_to_free(struct binder_alloc *alloc,
        return buffer;
 }
 
-#ifdef CONFIG_AMLOGIC_BINDER_VMALLOC
-static unsigned long check_range(struct binder_alloc *alloc, void *end)
-{
-       unsigned long size;
-
-       size = (unsigned long)end - (unsigned long)alloc->buffer;
-       if (size > PAGE_SIZE && size > alloc->mapped_size) {
-               pr_debug("%s, %d, base:%p, end:%p, size:%ld, task:%d %s\n",
-                       __func__, __LINE__, alloc->buffer, end, size,
-                       current->pid, current->comm);
-               return size;
-       }
-       return 0;
-}
-
-static int move_vma_mapping(void *old, void *new_addr, unsigned long size)
-{
-       struct page *page, *pages[2] = {};
-       unsigned long addr, end, new, moved = 0;
-       int ret;
-
-       addr = (unsigned long)old;
-       end  = (unsigned long)old + size;
-       new  = (unsigned long)new_addr;
-       for (; addr < end; addr += PAGE_SIZE) {
-               page = vmalloc_to_page((void *)addr);
-               if (!page) {
-                       new += PAGE_SIZE;
-                       continue;
-               }
-               /* Make sure cache same for other address */
-               cpu_ca8_dcache_clean_area((void *)addr, PAGE_SIZE);
-               pages[0] = page;
-               ret = map_kernel_range_noflush(new, PAGE_SIZE,
-                                              PAGE_KERNEL, pages);
-               pr_debug("%s, addr:%lx->%lx, old:%p, new:%p, page:%lx %p %p\n",
-                       __func__, addr, new, old, new_addr,
-                       page_to_pfn(page), pages[0], pages[1]);
-               flush_cache_vmap(addr, addr + PAGE_SIZE);
-               new += PAGE_SIZE;
-               moved++;
-               if (ret < 0)
-                       return ret;
-       }
-       return moved;
-}
-
-static void free_back_buffer(struct binder_alloc *alloc)
-{
-       int i;
-       void *p;
-
-       for (i = 0; i < MAX_BUFFER; i++) {
-               p = alloc->back_buffer[i];
-               if (p && p != alloc->buffer) {
-                       vfree(p);
-                       pr_debug("free alloc %p, buffer:%p@%d\n", alloc, p, i);
-               }
-       }
-}
-
-static unsigned long get_new_size(unsigned long curr_size,
-                          unsigned long max_size, int next_step)
-{
-       int order, step;
-       long next_size;
-
-       order = get_order(max_size);
-       step  = (order + MAX_BUFFER / 2) / MAX_BUFFER;
-       next_step += 1;
-       next_size = (1 << (next_step * step + PAGE_SHIFT));
-       if (next_size <= curr_size)
-               return curr_size + PAGE_SIZE * 4;
-       return next_size;
-}
-
-static int try_to_replace_vmap(struct binder_alloc *alloc,
-                              unsigned long new_sz,
-                              void **start, void **endp)
-{
-       unsigned long max_size, diff, size;
-       unsigned long *old_buffer;
-       struct binder_buffer *buffer;
-       struct vm_struct *area;
-       int ret, i;
-
-       for (i = 0; i < MAX_BUFFER; i++) {
-               if (!alloc->back_buffer[i])
-                       break;
-       }
-
-       if (i == MAX_BUFFER) {
-               pr_info("max buffer:%d, new_sz:%lx, buffer:%p %p %p %p\n",
-                       i, new_sz, alloc->back_buffer[0],
-                       alloc->back_buffer[1], alloc->back_buffer[2],
-                       alloc->back_buffer[3]);
-               dump_stack();
-               return -ENOMEM;
-       }
-
-       max_size = alloc->vma->vm_end - alloc->vma->vm_start;
-       size     = get_new_size(new_sz, max_size, i);
-       if (size >= max_size)
-               size = max_size;
-       area = get_vm_area(size, VM_ALLOC | VM_NO_GUARD);
-       if (area == NULL) {
-               pr_err("%s, get vmalloc size:%lx failed\n", __func__, size);
-               return -ENOMEM;
-       }
-
-       ret = move_vma_mapping(alloc->buffer, area->addr, alloc->mapped_size);
-       pr_debug("%s, move %p:%p, ret:%d, vm size:%x:%lx, want:%lx, alloc:%p\n",
-                __func__, alloc->buffer, area->addr,
-                ret, alloc->mapped_size, size, new_sz, alloc);
-       if (ret < 0) {
-               free_vm_area(area);
-               return -ENOMEM;
-       }
-
-       old_buffer    = alloc->buffer;
-       alloc->buffer = area->addr;
-       diff          = (unsigned long)old_buffer -
-                       (unsigned long)alloc->buffer;
-       pr_debug("old:%p, new:%p, size:%ld, mapped:%d, alloc:%p\n",
-               old_buffer, alloc->buffer, size, alloc->mapped_size, alloc);
-       alloc->user_buffer_offset = alloc->vma->vm_start -
-                                   (uintptr_t)alloc->buffer;
-       list_for_each_entry(buffer, &alloc->buffers, entry) {
-               void *tmp;
-
-               tmp = buffer->data;
-               buffer->data = buffer->data - diff;
-               pr_debug("replace:%p, new:%p, diff:%lx\n",
-                        tmp, buffer->data, diff);
-       }
-       *start = *start - diff;
-       *endp  = *endp  - diff;
-       alloc->mapped_size = size;
-       alloc->back_buffer[i] = area->addr;
-       pr_debug("replace %i, alloc:%p, size:%lx:%lx, start:%p, end:%p\n",
-               i, alloc, new_sz, size, *start, *endp);
-
-       return 0;
-}
-#endif
-
 static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
                                    void *start, void *end)
 {
@@ -340,9 +194,6 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
        struct vm_area_struct *vma = NULL;
        struct mm_struct *mm = NULL;
        bool need_mm = false;
-#ifdef CONFIG_AMLOGIC_BINDER_VMALLOC
-       unsigned long map_size;
-#endif
 
        binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
                     "%d: %s pages %pK-%pK\n", alloc->pid,
@@ -378,14 +229,6 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
                goto err_no_vma;
        }
 
-#ifdef CONFIG_AMLOGIC_BINDER_VMALLOC
-       /* try to replace vmalloc */
-       map_size = check_range(alloc, end);
-       if (map_size) {
-               if (try_to_replace_vmap(alloc, map_size, &start, &end))
-                       return -ENOMEM;
-       }
-#endif
        for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
                int ret;
                bool on_lru;
@@ -827,20 +670,13 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
                goto err_already_mapped;
        }
 
-#ifdef CONFIG_AMLOGIC_BINDER_VMALLOC
-       area = get_vm_area(PAGE_SIZE, VM_ALLOC | VM_NO_GUARD);
-#else
        area = get_vm_area(vma->vm_end - vma->vm_start, VM_ALLOC);
-#endif
        if (area == NULL) {
                ret = -ENOMEM;
                failure_string = "get_vm_area";
                goto err_get_vm_area_failed;
        }
        alloc->buffer = area->addr;
-#ifdef CONFIG_AMLOGIC_BINDER_VMALLOC
-       alloc->first_addr = area->addr;
-#endif
        alloc->user_buffer_offset =
                vma->vm_start - (uintptr_t)alloc->buffer;
        mutex_unlock(&binder_alloc_mmap_lock);
@@ -865,9 +701,6 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
                goto err_alloc_pages_failed;
        }
        alloc->buffer_size = vma->vm_end - vma->vm_start;
-#ifdef CONFIG_AMLOGIC_BINDER_VMALLOC
-       alloc->mapped_size = PAGE_SIZE;
-#endif
 
        buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
        if (!buffer) {
@@ -959,11 +792,6 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc)
                }
                kfree(alloc->pages);
                vfree(alloc->buffer);
-       #ifdef CONFIG_AMLOGIC_BINDER_VMALLOC
-               free_back_buffer(alloc);
-               if (alloc->first_addr && alloc->first_addr != alloc->buffer)
-                       vfree(alloc->first_addr);
-       #endif
        }
        mutex_unlock(&alloc->mutex);
        if (alloc->vma_vm_mm)
index 2bda6b1..9ef64e5 100644 (file)
@@ -99,22 +99,11 @@ struct binder_lru_page {
  * calls. The address space is used for both user-visible buffers and for
  * struct binder_buffer objects used to track the user buffers
  */
-#if defined(CONFIG_AMLOGIC_MODIFY) && defined(CONFIG_ARM)
-#define CONFIG_AMLOGIC_BINDER_VMALLOC
-#define MAX_BUFFER     4
-extern void cpu_ca8_dcache_clean_area(void *addr, int size);
-#endif
-
 struct binder_alloc {
        struct mutex mutex;
        struct vm_area_struct *vma;
        struct mm_struct *vma_vm_mm;
        void *buffer;
-#ifdef CONFIG_AMLOGIC_BINDER_VMALLOC
-       size_t mapped_size;
-       void *back_buffer[MAX_BUFFER];
-       void *first_addr;
-#endif /* CONFIG_AMLOGIC_BINDER_VMALLOC */
        ptrdiff_t user_buffer_offset;
        struct list_head buffers;
        struct rb_root free_buffers;