1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Copyright (C) 2012 Regents of the University of California
6 #include <asm/asm-offsets.h>
8 #include <linux/init.h>
9 #include <linux/linkage.h>
10 #include <asm/thread_info.h>
12 #include <asm/pgtable.h>
14 #include <asm/cpu_ops_sbi.h>
15 #include <asm/hwcap.h>
16 #include <asm/image.h>
17 #include "efi-header.S"
22 * Image header expected by Linux boot-loaders. The image header data
23 * structure is described in asm/image.h.
24 * Do not modify it without modifying the structure and all bootloaders
25 * that expects this header format!!
29 * This instruction decodes to "MZ" ASCII required by UEFI.
34 /* jump to start kernel */
40 #ifdef CONFIG_RISCV_M_MODE
41 /* Image load offset (0MB) from start of RAM for M-mode */
44 #if __riscv_xlen == 64
45 /* Image load offset(2MB) from start of RAM */
48 /* Image load offset(4MB) from start of RAM */
52 /* Effective size of kernel image */
55 .word RISCV_HEADER_VERSION
58 .ascii RISCV_IMAGE_MAGIC
60 .ascii RISCV_IMAGE_MAGIC2
62 .word pe_head_start - _start
72 .global relocate_enable_mmu
74 /* Relocate return address */
77 REG_L a1, KERNEL_MAP_VIRT_ADDR(a1)
82 /* Point stvec to virtual address of intruction after satp write */
87 /* Compute satp for kernel page tables, but don't load it yet */
88 srl a2, a0, PAGE_SHIFT
93 * Load trampoline page directory, which will cause us to trap to
94 * stvec if VA != PA, or simply fall through if VA == PA. We need a
95 * full fence here because setup_vm() just wrote these PTEs and we need
96 * to ensure the new translations are in use.
98 la a0, trampoline_pg_dir
100 srl a0, a0, PAGE_SHIFT
106 /* Set trap vector to spin forever to help debug */
107 la a0, .Lsecondary_park
110 /* Reload the global pointer */
113 la gp, __global_pointer$
117 * Switch to kernel page tables. A full fence is necessary in order to
118 * avoid using the trampoline translations, which are only correct for
119 * the first superpage. Fetching the fence is guarnteed to work
120 * because that first superpage is translated the same way.
126 #endif /* CONFIG_MMU */
128 .global secondary_start_sbi
130 /* Mask all interrupts */
134 /* Load the global pointer */
137 la gp, __global_pointer$
141 * Disable FPU to detect illegal usage of
142 * floating point in kernel space
147 /* Set trap vector to spin forever to help debug */
148 la a3, .Lsecondary_park
151 /* a0 contains the hartid & a1 contains boot data */
152 li a2, SBI_HART_BOOT_TASK_PTR_OFFSET
156 li a3, SBI_HART_BOOT_STACK_PTR_OFFSET
161 .Lsecondary_start_common:
164 /* Enable virtual memory and relocate to virtual address */
165 la a0, swapper_pg_dir
167 call relocate_enable_mmu
169 call setup_trap_vector
171 #endif /* CONFIG_SMP */
175 /* Set trap vector to exception handler */
176 la a0, handle_exception
180 * Set sup0 scratch register to 0, indicating to exception vector that
181 * we are presently executing in kernel.
183 csrw CSR_SCRATCH, zero
188 /* We lack SMP support or have too many harts, so park this hart */
195 /* Mask all interrupts */
199 #ifdef CONFIG_RISCV_M_MODE
200 /* flush the instruction cache */
203 /* Reset all registers except ra, a0, a1 */
207 * Setup a PMP to permit access to all of memory. Some machines may
208 * not implement PMPs, so we set up a quick trap handler to just skip
209 * touching the PMPs on any trap.
215 csrw CSR_PMPADDR0, a0
216 li a0, (PMP_A_NAPOT | PMP_R | PMP_W | PMP_X)
222 * The hartid in a0 is expected later on, and we have no firmware
226 #endif /* CONFIG_RISCV_M_MODE */
228 /* Load the global pointer */
231 la gp, __global_pointer$
235 * Disable FPU to detect illegal usage of
236 * floating point in kernel space
242 li t0, CONFIG_NR_CPUS
243 blt a0, t0, .Lgood_cores
244 tail .Lsecondary_park
248 #ifndef CONFIG_XIP_KERNEL
249 /* Pick one hart to run the main boot sequence */
252 amoadd.w a3, a2, (a3)
253 bnez a3, .Lsecondary_start
256 /* hart_lottery in flash contains a magic number */
261 amoswap.w t0, t1, (a2)
262 /* first time here if hart_lottery in RAM is not set */
263 beq t0, t1, .Lsecondary_start
265 la sp, _end + THREAD_SIZE
270 /* Restore a0 copy */
274 #ifndef CONFIG_XIP_KERNEL
275 /* Clear BSS for flat non-ELF images */
278 ble a4, a3, clear_bss_done
281 add a3, a3, RISCV_SZPTR
282 blt a3, a4, clear_bss
285 /* Save hart ID and DTB physical address */
289 la a2, boot_cpu_hartid
293 /* Initialize page tables and relocate to virtual addresses */
294 la sp, init_thread_union + THREAD_SIZE
296 #ifdef CONFIG_BUILTIN_DTB
300 #endif /* CONFIG_BUILTIN_DTB */
305 call relocate_enable_mmu
306 #endif /* CONFIG_MMU */
308 call setup_trap_vector
309 /* Restore C environment */
311 sw zero, TASK_TI_CPU(tp)
312 la sp, init_thread_union + THREAD_SIZE
315 call kasan_early_init
317 /* Start the kernel */
323 /* Set trap vector to spin forever to help debug */
324 la a3, .Lsecondary_park
328 la a1, __cpu_up_stack_pointer
330 la a2, __cpu_up_task_pointer
336 * This hart didn't win the lottery, so we wait for the winning hart to
337 * get far enough along the boot process that it should continue.
340 /* FIXME: We should WFI to save some energy here. */
343 beqz sp, .Lwait_for_cpu_up
344 beqz tp, .Lwait_for_cpu_up
347 tail .Lsecondary_start_common
352 #ifdef CONFIG_RISCV_M_MODE
386 andi t0, t0, (COMPAT_HWCAP_ISA_F | COMPAT_HWCAP_ISA_D)
387 beqz t0, .Lreset_regs_done
424 /* note that the caller must clear SR_FS */
425 #endif /* CONFIG_FPU */
429 #endif /* CONFIG_RISCV_M_MODE */
432 /* Empty zero page */