* Interrupt handling.
*/
.macro irq_handler
+#ifdef CONFIG_AMLOGIC_VMAP
+ mov r8, sp /* back up sp */
+ mov r0, sp
+ bl irq_stack_entry /* switch IRQ stack */
+ mov sp, r0
+#endif
#ifdef CONFIG_MULTI_IRQ_HANDLER
ldr r1, =handle_arch_irq
mov r0, sp
arch_irq_handler_default
#endif
9997:
+#ifdef CONFIG_AMLOGIC_VMAP
+ mov sp, r8 /* switch stack back to task stack */
+#endif
.endm
.macro pabt_helper
#define SPFIX(code...)
#endif
+#ifdef CONFIG_AMLOGIC_VMAP
+ .macro svc_entry, stack_hole=0, trace=1, uaccess=1, vmap=0
+#else
.macro svc_entry, stack_hole=0, trace=1, uaccess=1
+#endif
UNWIND(.fnstart )
UNWIND(.save {r0 - pc} )
+#ifdef CONFIG_AMLOGIC_VMAP
+ .if \vmap
+ /* keep using stack of abt mode */
+ str sp, [r0, #TI_VMAP_BACK_SP]
+ sub sp, r0, #(SVC_REGS_SIZE + \stack_hole - 4)
+ .else
+ sub sp, sp, #(SVC_REGS_SIZE + \stack_hole - 4)
+ .endif
+#else /* !CONFIG_AMLOGIC_VMAP */
sub sp, sp, #(SVC_REGS_SIZE + \stack_hole - 4)
+#endif /* CONFIG_AMLOGIC_VMAP */
#ifdef CONFIG_THUMB2_KERNEL
SPFIX( str r0, [sp] ) @ temporarily saved
SPFIX( mov r0, sp )
ldmia r0, {r3 - r5}
add r7, sp, #S_SP - 4 @ here for interlock avoidance
mov r6, #-1 @ "" "" "" ""
+#ifdef CONFIG_AMLOGIC_VMAP
+ .if \vmap
+ ldr r2, [sp, #(TI_VMAP_BACK_SP + SVC_REGS_SIZE - 4)]
+ .else
add r2, sp, #(SVC_REGS_SIZE + \stack_hole - 4)
+ .endif
+#else
+ add r2, sp, #(SVC_REGS_SIZE + \stack_hole - 4)
+#endif
SPFIX( addeq r2, r2, #4 )
str r3, [sp, #-4]! @ save the "real" r0 copied
@ from the exception stack
@
stmia r7, {r2 - r6}
+#ifdef CONFIG_AMLOGIC_VMAP
+ .if \vmap
+ /*
+ * get fault task thread info
+ */
+ ldr r0, [sp, #(SVC_REGS_SIZE + TI_VMAP_BACK_SP)]
+ mrc p15, 0, r1, c6, c0, 0 @ get FAR
+ bl pmd_check
+ mov tsk, r0
+ mov tsk, tsk, lsr #THREAD_SIZE_ORDER + PAGE_SHIFT
+ mov tsk, tsk, lsl #THREAD_SIZE_ORDER + PAGE_SHIFT
+ add tsk, tsk, #TI_THREAD_SIZE
+ sub tsk, tsk, #TI_THREAD_INFO_SIZE
+
+ /*
+ * copy some important member of thread_info from current
+ * task to vmap stack
+ */
+ ldr r0, [tsk, #TI_FLAGS]
+ ldr r1, [tsk, #TI_PREEMPT]
+ str r0, [sp, #(SVC_REGS_SIZE + TI_VMAP_RESERVE_LEN + TI_FLAGS)]
+ str r1, [sp, #(SVC_REGS_SIZE + TI_VMAP_RESERVE_LEN + TI_PREEMPT)]
+
+ ldr r0, [tsk, #TI_ADDR_LIMIT]
+ ldr r1, [tsk, #TI_TASK]
+ str r0, [sp, #(SVC_REGS_SIZE + TI_VMAP_RESERVE_LEN + TI_ADDR_LIMIT)]
+ str r1, [sp, #(SVC_REGS_SIZE + TI_VMAP_RESERVE_LEN + TI_TASK)]
+
+ ldr r0, [tsk, #TI_CPU]
+ ldr r1, [tsk, #TI_CPU_DOMAIN]
+ str r0, [sp, #(SVC_REGS_SIZE + TI_VMAP_RESERVE_LEN + TI_CPU)]
+ str r1, [sp, #(SVC_REGS_SIZE + TI_VMAP_RESERVE_LEN + TI_CPU_DOMAIN)]
+ .else
+ get_thread_info tsk
+ .endif
+#else
get_thread_info tsk
+#endif
ldr r0, [tsk, #TI_ADDR_LIMIT]
mov r1, #TASK_SIZE
str r1, [tsk, #TI_ADDR_LIMIT]
.align 5
__dabt_svc:
+#ifdef CONFIG_AMLOGIC_VMAP
+ svc_entry uaccess=0, vmap=1
+ mrc p15, 0, r1, c5, c0, 0 @ get FSR
+ mrc p15, 0, r0, c6, c0, 0 @ get FAR
+ mov r2, sp
+ uaccess_disable ip @ disable userspace access
+ bl handle_vmap_fault
+ cmp r0, #0
+ bne .L__dabt_svc_next
+ /* handled by vmap fault handler */
+ svc_exit r5, vmap=1 @ return from exception
+.L__dabt_svc_next:
+ /* re-build context for normal abort handler */
+ ldr r0, [sp, #(SVC_REGS_SIZE + TI_VMAP_BACK_SP)]
+ sub r0, #SVC_REGS_SIZE
+ mov r1, sp
+ mov r2, #SVC_REGS_SIZE
+ bl memcpy /* copy back sp */
+ mov sp, r0
+#else
svc_entry uaccess=0
+#endif
mov r2, sp
dabt_helper
THUMB( ldr r5, [sp, #S_PSR] ) @ potentially updated CPSR
static atomic_t vmap_stack_size;
static struct aml_vmap *avmap;
+#ifdef CONFIG_ARM64
DEFINE_PER_CPU(unsigned long [THREAD_SIZE/sizeof(long)], vmap_stack)
__aligned(16);
+#else
+static unsigned long irq_stack1[(THREAD_SIZE/sizeof(long))]
+ __aligned(THREAD_SIZE);
+static void *irq_stack[NR_CPUS] = {
+ irq_stack1, /* only assign 1st irq stack ,other need alloc */
+};
+static unsigned long vmap_stack1[(THREAD_SIZE/sizeof(long))]
+ __aligned(THREAD_SIZE);
+static void *vmap_stack[NR_CPUS] = {
+ vmap_stack1, /* only assign 1st vmap stack ,other need alloc */
+};
+#endif
void update_vmap_stack(int diff)
{
}
EXPORT_SYMBOL(get_vmap_stack_size);
-static int is_vmap_addr(unsigned long addr)
+#ifdef CONFIG_ARM64
+bool on_vmap_stack(unsigned long sp, int cpu)
+{
+ /* variable names the same as kernel/stacktrace.c */
+ unsigned long low = (unsigned long)per_cpu(vmap_stack, cpu);
+ unsigned long high = low + THREAD_START_SP;
+
+ return (low <= sp && sp <= high);
+}
+#endif
+
+#ifdef CONFIG_ARM
+void notrace __setup_vmap_stack(unsigned long cpu)
+{
+ void *stack;
+
+#ifdef CONFIG_THUMB2_KERNEL
+#define TAG "r"
+#else
+#define TAG "I"
+#endif
+ stack = vmap_stack[cpu];
+ if (!stack) {
+ stack = kmalloc(THREAD_SIZE, GFP_ATOMIC | __GFP_ZERO);
+ WARN_ON(!stack);
+ vmap_stack[cpu] = stack;
+ irq_stack[cpu] = kmalloc(THREAD_SIZE, GFP_ATOMIC | __GFP_ZERO);
+ WARN_ON(!irq_stack[cpu]);
+ }
+
+ pr_info("cpu %ld, vmap stack:[%lx-%lx]\n",
+ cpu, (unsigned long)stack,
+ (unsigned long)stack + THREAD_START_SP);
+ pr_info("cpu %ld, irq stack:[%lx-%lx]\n",
+ cpu, (unsigned long)irq_stack[cpu],
+ (unsigned long)irq_stack[cpu] + THREAD_START_SP);
+ stack += THREAD_SIZE;
+ stack -= sizeof(struct thread_info);
+ /*
+ * reserve 24 byte for r0, lr, spsr, sp_svc and 8 bytes gap
+ */
+ stack -= (24);
+ asm volatile (
+ "msr cpsr_c, %1 \n"
+ "mov sp, %0 \n"
+ "msr cpsr_c, %2 \n"
+ :
+ : "r" (stack),
+ TAG(PSR_F_BIT | PSR_I_BIT | ABT_MODE),
+ TAG(PSR_F_BIT | PSR_I_BIT | SVC_MODE)
+ : "memory", "cc"
+ );
+}
+
+int on_irq_stack(unsigned long sp, int cpu)
+{
+ unsigned long sp_irq;
+
+ sp_irq = (unsigned long)irq_stack[cpu];
+ if ((sp & ~(THREAD_SIZE - 1)) == (sp_irq & ~(THREAD_SIZE - 1)))
+ return 1;
+ return 0;
+}
+
+unsigned long notrace irq_stack_entry(unsigned long sp_irq)
+{
+ int cpu = raw_smp_processor_id();
+
+ if (!on_irq_stack(sp_irq, cpu)) {
+ unsigned long sp = (unsigned long)irq_stack[cpu];
+ void *src, *dst;
+
+ /*
+ * copy some data to irq stack
+ */
+ src = current_thread_info();
+ dst = (void *)(sp + THREAD_INFO_OFFSET);
+ memcpy(dst, src, offsetof(struct thread_info, cpu_context));
+ sp_irq = (unsigned long)dst - 8;
+ }
+ return sp_irq;
+}
+
+unsigned long notrace pmd_check(unsigned long addr, unsigned long far)
+{
+ unsigned int index;
+ pgd_t *pgd, *pgd_k;
+ pud_t *pud, *pud_k;
+ pmd_t *pmd, *pmd_k;
+
+ if (addr < TASK_SIZE)
+ return addr;
+
+ index = pgd_index(addr);
+
+ pgd = cpu_get_pgd() + index;
+ pgd_k = init_mm.pgd + index;
+
+ if (pgd_none(*pgd_k))
+ goto bad_area;
+ if (!pgd_present(*pgd))
+ set_pgd(pgd, *pgd_k);
+
+ pud = pud_offset(pgd, addr);
+ pud_k = pud_offset(pgd_k, addr);
+
+ if (pud_none(*pud_k))
+ goto bad_area;
+ if (!pud_present(*pud))
+ set_pud(pud, *pud_k);
+
+ pmd = pmd_offset(pud, addr);
+ pmd_k = pmd_offset(pud_k, addr);
+
+#ifdef CONFIG_ARM_LPAE
+ /*
+ * Only one hardware entry per PMD with LPAE.
+ */
+ index = 0;
+#else
+ /*
+ * On ARM one Linux PGD entry contains two hardware entries (see page
+ * tables layout in pgtable.h). We normally guarantee that we always
+ * fill both L1 entries. But create_mapping() doesn't follow the rule.
+ * It can create inidividual L1 entries, so here we have to call
+ * pmd_none() check for the entry really corresponded to address, not
+ * for the first of pair.
+ */
+ index = (addr >> SECTION_SHIFT) & 1;
+#endif
+ if (pmd_none(pmd_k[index]))
+ goto bad_area;
+
+ copy_pmd(pmd, pmd_k);
+bad_area:
+ return addr;
+}
+#endif
+
+int is_vmap_addr(unsigned long addr)
{
unsigned long start, end;
return page;
}
+static struct page *check_pte_exist(unsigned long addr)
+{
+ struct mm_struct *mm;
+ pgd_t *pgd;
+ pud_t *pud;
+ pmd_t *pmd;
+ pte_t *pte;
+
+ mm = &init_mm;
+
+ pgd = pgd_offset(mm, addr);
+
+ if (pgd_none(*pgd))
+ return NULL;
+
+ if (pgd_bad(*pgd))
+ return NULL;
+
+ pud = pud_offset(pgd, addr);
+ if (pud_none(*pud))
+ return NULL;
+
+ if (pud_bad(*pud))
+ return NULL;
+
+ pmd = pmd_offset(pud, addr);
+ if (pmd_none(*pmd))
+ return NULL;
+
+ if (pmd_bad(*pmd))
+ return NULL;
+
+ pte = pte_offset_kernel(pmd, addr);
+ if (pte_none(*pte))
+ return NULL;
+#ifdef CONFIG_ARM64
+ return pte_page(*pte);
+#elif defined(CONFIG_ARM)
+ return pte_page(pte_val(*pte));
+#else
+ return NULL; /* not supported */
+#endif
+}
+
static int vmap_mmu_set(struct page *page, unsigned long addr, int set)
{
pgd_t *pgd = NULL;
goto nomem;
}
- pte = pte_offset_map(pmd, addr);
+ pte = pte_offset_kernel(pmd, addr);
if (set)
set_pte_at(&init_mm, addr, pte, mk_pte(page, PAGE_KERNEL));
else
pte_clear(&init_mm, addr, pte);
- pte_unmap(pte);
flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
+#ifdef CONFIG_ARM64
D("add:%lx, pgd:%p %llx, pmd:%p %llx, pte:%p %llx\n",
addr, pgd, pgd_val(*pgd), pmd, pmd_val(*pmd),
pte, pte_val(*pte));
+#elif defined(CONFIG_ARM)
+ D("add:%lx, pgd:%p %x, pmd:%p %x, pte:%p %x\n",
+ addr, pgd, (unsigned int)pgd_val(*pgd),
+ pmd, (unsigned int)pmd_val(*pmd),
+ pte, pte_val(*pte));
+#endif
return 0;
nomem:
E("allocation page talbe failed, G:%p, U:%p, M:%p, T:%p",
static int stack_floor_page(unsigned long addr)
{
+ unsigned long pos;
+
+ pos = addr & (THREAD_SIZE - 1);
/*
* stack address must align to THREAD_SIZE
*/
- return ((addr & (THREAD_SIZE - 1)) < PAGE_SIZE);
+ if (THREAD_SIZE_ORDER > 1)
+ return pos < PAGE_SIZE;
+ else
+ return pos < (PAGE_SIZE / 4);
}
static int check_addr_up_flow(unsigned long addr)
* rage(aligned to THREAD_SIZE) but next page of this
* addr is not mapped
*/
- if (stack_floor_page(addr) &&
- !vmalloc_to_page((const void *)(addr + PAGE_SIZE)))
+ if (stack_floor_page(addr) && !check_pte_exist(addr + PAGE_SIZE))
return 1;
return 0;
}
-#if DEBUG
-static void dump_backtrace_entry(unsigned long ip, unsigned long fp)
+static void dump_backtrace_entry(unsigned long ip, unsigned long fp,
+ unsigned long sp)
{
unsigned long fp_size = 0;
+#ifdef CONFIG_ARM64
if (fp >= VMALLOC_START) {
fp_size = *((unsigned long *)fp) - fp;
/* fp cross IRQ or vmap stack */
}
pr_info("[%016lx+%4ld][<%p>] %pS\n",
fp, fp_size, (void *) ip, (void *) ip);
+#elif defined(CONFIG_ARM)
+ if (fp >= TASK_SIZE) {
+ fp_size = fp - sp + 4;
+ /* fp cross IRQ or vmap stack */
+ if (fp_size >= THREAD_SIZE)
+ fp_size = 0;
+ }
+ pr_info("[%08lx+%4ld][<%p>] %pS\n",
+ fp, fp_size, (void *) ip, (void *) ip);
+#endif
}
-static void show_fault_stack(unsigned long addr, struct pt_regs *regs)
+static noinline void show_fault_stack(unsigned long addr, struct pt_regs *regs)
{
struct stackframe frame;
+#ifdef CONFIG_ARM64
frame.fp = regs->regs[29];
frame.sp = addr;
frame.pc = (unsigned long)regs->regs[30];
+#elif defined(CONFIG_ARM)
+ frame.fp = regs->ARM_fp;
+ frame.sp = regs->ARM_sp;
+ frame.pc = (unsigned long)regs->uregs[15];
+#endif
- pr_info("Call trace:\n");
+ pr_info("Addr:%lx, Call trace:\n", addr);
+#ifdef CONFIG_ARM64
pr_info("[%016lx+%4ld][<%p>] %pS\n",
addr, frame.fp - addr, (void *)regs->pc, (void *) regs->pc);
+#elif defined(CONFIG_ARM)
+ pr_info("[%08lx+%4ld][<%p>] %pS\n",
+ addr, frame.fp - addr, (void *)regs->uregs[15],
+ (void *) regs->uregs[15]);
+#endif
while (1) {
int ret;
- dump_backtrace_entry(frame.pc, frame.fp);
+ dump_backtrace_entry(frame.pc, frame.fp, frame.sp);
+ #ifdef CONFIG_ARM64
ret = unwind_frame(current, &frame);
+ #elif defined(CONFIG_ARM)
+ ret = unwind_frame(&frame);
+ #endif
if (ret < 0)
break;
}
}
+
+static void check_sp_fault_again(struct pt_regs *regs)
+{
+ unsigned long sp = 0, addr;
+ struct page *page;
+ int cache;
+
+#ifdef CONFIG_ARM
+ sp = regs->ARM_sp;
+#elif defined(CONFIG_ARM64)
+ sp = regs->sp;
#endif
+ addr = sp - sizeof(*regs);
+
+ if (sp && ((addr & PAGE_MASK) != (sp & PAGE_MASK))) {
+ /*
+ * will fault when we copy back context, so handle
+ * it first
+ */
+ E("fault again, sp:%lx, addr:%lx\n", sp, addr);
+ page = get_vmap_cached_page(&cache);
+ WARN_ON(!page);
+ vmap_mmu_set(page, addr, 1);
+ update_vmap_stack(1);
+ if ((THREAD_SIZE_ORDER > 1) && stack_floor_page(addr)) {
+ E("task:%d %s, stack near overflow, addr:%lx\n",
+ current->pid, current->comm, addr);
+ show_fault_stack(addr, regs);
+ }
+
+ /* cache is not enough */
+ if (cache <= (VMAP_CACHE_PAGE / 2))
+ mod_delayed_work(system_highpri_wq, &avmap->mwork, 0);
+
+ D("map page:%5lx for addr:%lx\n", page_to_pfn(page), addr);
+ #if DEBUG
+ show_fault_stack(addr, regs);
+ #endif
+ }
+}
/*
* IRQ should *NEVER* been opened in this handler
struct page *page;
int cache = 0;
- if (!is_vmap_addr(addr))
+ if (!is_vmap_addr(addr)) {
+ check_sp_fault_again(regs);
return -EINVAL;
+ }
D("addr:%lx, esr:%x, task:%5d %s\n",
addr, esr, current->pid, current->comm);
+#ifdef CONFIG_ARM64
D("pc:%pf, %llx, lr:%pf, %llx, sp:%llx, %lx\n",
(void *)regs->pc, regs->pc,
(void *)regs->regs[30], regs->regs[30], regs->sp,
current_stack_pointer);
+#elif defined(CONFIG_ARM)
+ D("pc:%pf, %lx, lr:%pf, %lx, sp:%lx, %lx\n",
+ (void *)regs->uregs[15], regs->uregs[15],
+ (void *)regs->uregs[14], regs->uregs[14], regs->uregs[13],
+ current_stack_pointer);
+#endif
if (check_addr_up_flow(addr)) {
E("address %lx out of range\n", addr);
+ #ifdef CONFIG_ARM64
E("PC is:%llx, %pf, LR is:%llx %pf\n",
regs->pc, (void *)regs->pc,
regs->regs[30], (void *)regs->regs[30]);
+ #elif defined(CONFIG_ARM)
+ E("PC is:%lx, %pf, LR is:%lx %pf\n",
+ regs->uregs[15], (void *)regs->uregs[15],
+ regs->uregs[14], (void *)regs->uregs[14]);
+ #endif
E("task:%d %s, stack:%p, %lx\n",
current->pid, current->comm, current->stack,
current_stack_pointer);
- dump_stack();
+ show_fault_stack(addr, regs);
+ check_sp_fault_again(regs);
return -ERANGE;
}
+#ifdef CONFIG_ARM
+ page = check_pte_exist(addr);
+ if (page) {
+ D("task:%d %s, page:%lx mapped for addr:%lx\n",
+ current->pid, current->comm, page_to_pfn(page), addr);
+ check_sp_fault_again(regs);
+ return -EINVAL;
+ }
+#endif
+
/*
* allocate a new page for vmap
*/
WARN_ON(!page);
vmap_mmu_set(page, addr, 1);
update_vmap_stack(1);
- if ((THREAD_SIZE_ORDER > 1) && stack_floor_page(addr)) {
+ if ((THREAD_SIZE_ORDER > 1) && stack_floor_page(addr)) {
E("task:%d %s, stack near overflow, addr:%lx\n",
current->pid, current->comm, addr);
- dump_stack();
+ show_fault_stack(addr, regs);
}
/* cache is not enough */
#if DEBUG
show_fault_stack(addr, regs);
#endif
-
return 0;
}
EXPORT_SYMBOL(handle_vmap_fault);
unsigned long stack = (unsigned long)task_stack_page(tsk);
struct page *first_page;
+ if (unlikely(!is_vmap_addr(stack))) {
+ /* stack get from kmalloc */
+ first_page = virt_to_page((void *)stack);
+ mod_zone_page_state(page_zone(first_page), NR_KERNEL_STACK_KB,
+ THREAD_SIZE / 1024 * account);
+
+ memcg_kmem_update_page_stat(first_page, MEMCG_KERNEL_STACK_KB,
+ account * (THREAD_SIZE / 1024));
+ update_vmap_stack(account * (THREAD_SIZE / PAGE_SIZE));
+ return;
+ }
stack += STACK_TOP_PAGE_OFF;
first_page = vmalloc_to_page((void *)stack);
mod_zone_page_state(page_zone(first_page), NR_KERNEL_STACK_KB,
avmap->start_bit = bitmap_no + 1; /* next idle address space */
if (bitmap_no >= MAX_TASKS) {
spin_unlock_irqrestore(&avmap->vmap_lock, flags);
- E("BITMAP FULL!!!\n");
- return NULL;
+ /*
+ * if vmap address space is full, we still need to try
+ * to get stack from kmalloc
+ */
+ addr = (unsigned long)kmalloc(THREAD_SIZE, GFP_KERNEL);
+ E("BITMAP FULL, kmalloc task stack:%lx\n", addr);
+ return (void *)addr;
}
bitmap_set(avmap->bitmap, bitmap_no, 1);
spin_unlock_irqrestore(&avmap->vmap_lock, flags);
- page = alloc_page(THREADINFO_GFP | __GFP_ZERO);
+ page = alloc_page(THREADINFO_GFP | __GFP_ZERO | __GFP_HIGHMEM);
if (!page) {
spin_lock_irqsave(&avmap->vmap_lock, flags);
bitmap_clear(avmap->bitmap, bitmap_no, 1);
map_addr = addr + STACK_TOP_PAGE_OFF;
vmap_mmu_set(page, map_addr, 1);
update_vmap_stack(1);
+
D("bit idx:%5ld, start:%5ld, addr:%lx, page:%lx\n",
bitmap_no, raw_start, addr, page_to_pfn(page));
struct page *page;
unsigned long flags;
+ if (unlikely(!is_vmap_addr(stack))) {
+ /* stack get from kmalloc */
+ kfree((void *)stack);
+ return;
+ }
+
addr = stack + STACK_TOP_PAGE_OFF;
for (; addr >= stack; addr -= PAGE_SIZE) {
page = vmalloc_to_page((const void *)addr);
INIT_LIST_HEAD(&head);
for (i = 0; i < VMAP_CACHE_PAGE - cnt; i++) {
- page = alloc_page(GFP_KERNEL | __GFP_HIGH);
+ page = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
if (!page) {
E("get page failed, allocated:%d, cnt:%d\n", i, cnt);
break;
void __init thread_stack_cache_init(void)
{
int i;
- unsigned long addr;
struct page *page;
- page = alloc_pages(GFP_KERNEL, VMAP_CACHE_PAGE_ORDER);
+ page = alloc_pages(GFP_KERNEL | __GFP_HIGHMEM, VMAP_CACHE_PAGE_ORDER);
if (!page)
return;
pr_info("%s, vmap:%p, bitmap:%p, cache page:%lx\n",
__func__, avmap, avmap->bitmap, page_to_pfn(page));
avmap->root_vm = __get_vm_area_node(VM_STACK_AREA_SIZE,
- VM_STACK_AREA_SIZE,
- 0, VMALLOC_START, VMALLOC_END,
+ VMAP_ALIGN,
+ 0, VMAP_ADDR_START, VMAP_ADDR_END,
NUMA_NO_NODE, GFP_KERNEL,
__builtin_return_address(0));
if (!avmap->root_vm) {
avmap->cached_pages = VMAP_CACHE_PAGE;
INIT_DELAYED_WORK(&avmap->mwork, page_cache_maintain_work);
+#ifdef CONFIG_ARM64
for_each_possible_cpu(i) {
+ unsigned long addr;
addr = (unsigned long)per_cpu_ptr(vmap_stack, i);
pr_info("cpu %d, vmap_stack:[%lx-%lx]\n",
i, addr, addr + THREAD_START_SP);
pr_info("cpu %d, irq_stack: [%lx-%lx]\n",
i, addr, addr + THREAD_START_SP);
}
+#endif
register_shrinker(&vmap_shrinker);
}