RISC-V: Add supported for ordered booting method using HSM
authorAtish Patra <atish.patra@wdc.com>
Wed, 18 Mar 2020 01:11:43 +0000 (18:11 -0700)
committerPalmer Dabbelt <palmerdabbelt@google.com>
Tue, 31 Mar 2020 18:27:50 +0000 (11:27 -0700)
Currently, all harts have to jump Linux in RISC-V. This complicates the
multi-stage boot process as every transient stage also has to ensure all
harts enter to that stage and jump to Linux afterwards. It also obstructs
a clean Kexec implementation.

SBI HSM extension provides alternate solutions where only a single hart
need to boot and enter Linux. The booting hart can bring up secondary
harts one by one afterwards.

Add SBI HSM based cpu_ops that implements an ordered booting method in
RISC-V. This change is also backward compatible with older firmware not
implementing HSM extension. If a latest kernel is used with older
firmware, it will continue to use the default spinning booting method.

Signed-off-by: Atish Patra <atish.patra@wdc.com>
Reviewed-by: Anup Patel <anup@brainfault.org>
Signed-off-by: Palmer Dabbelt <palmerdabbelt@google.com>
arch/riscv/kernel/Makefile
arch/riscv/kernel/cpu_ops.c
arch/riscv/kernel/cpu_ops_sbi.c [new file with mode: 0644]
arch/riscv/kernel/head.S
arch/riscv/kernel/smpboot.c
arch/riscv/kernel/traps.c

index 43d49ea..674a23c 100644 (file)
@@ -46,5 +46,8 @@ obj-$(CONFIG_PERF_EVENTS)     += perf_event.o
 obj-$(CONFIG_PERF_EVENTS)      += perf_callchain.o
 obj-$(CONFIG_HAVE_PERF_REGS)   += perf_regs.o
 obj-$(CONFIG_RISCV_SBI)                += sbi.o
+ifeq ($(CONFIG_RISCV_SBI), y)
+obj-$(CONFIG_SMP) += cpu_ops_sbi.o
+endif
 
 clean:
index 6270590..c4c33bf 100644 (file)
@@ -18,6 +18,7 @@ const struct cpu_operations *cpu_ops[NR_CPUS] __ro_after_init;
 void *__cpu_up_stack_pointer[NR_CPUS];
 void *__cpu_up_task_pointer[NR_CPUS];
 
+extern const struct cpu_operations cpu_ops_sbi;
 extern const struct cpu_operations cpu_ops_spinwait;
 
 void cpu_update_secondary_bootdata(unsigned int cpuid,
@@ -34,5 +35,12 @@ void cpu_update_secondary_bootdata(unsigned int cpuid,
 
 void __init cpu_set_ops(int cpuid)
 {
-       cpu_ops[cpuid] = &cpu_ops_spinwait;
+#if IS_ENABLED(CONFIG_RISCV_SBI)
+       if (sbi_probe_extension(SBI_EXT_HSM) > 0) {
+               if (!cpuid)
+                       pr_info("SBI v0.2 HSM extension detected\n");
+               cpu_ops[cpuid] = &cpu_ops_sbi;
+       } else
+#endif
+               cpu_ops[cpuid] = &cpu_ops_spinwait;
 }
diff --git a/arch/riscv/kernel/cpu_ops_sbi.c b/arch/riscv/kernel/cpu_ops_sbi.c
new file mode 100644 (file)
index 0000000..66f3cde
--- /dev/null
@@ -0,0 +1,81 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * HSM extension and cpu_ops implementation.
+ *
+ * Copyright (c) 2020 Western Digital Corporation or its affiliates.
+ */
+
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <asm/cpu_ops.h>
+#include <asm/sbi.h>
+#include <asm/smp.h>
+
+extern char secondary_start_sbi[];
+const struct cpu_operations cpu_ops_sbi;
+
+static int sbi_hsm_hart_start(unsigned long hartid, unsigned long saddr,
+                             unsigned long priv)
+{
+       struct sbiret ret;
+
+       ret = sbi_ecall(SBI_EXT_HSM, SBI_EXT_HSM_HART_START,
+                       hartid, saddr, priv, 0, 0, 0);
+       if (ret.error)
+               return sbi_err_map_linux_errno(ret.error);
+       else
+               return 0;
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+static int sbi_hsm_hart_stop(void)
+{
+       struct sbiret ret;
+
+       ret = sbi_ecall(SBI_EXT_HSM, SBI_EXT_HSM_HART_STOP, 0, 0, 0, 0, 0, 0);
+
+       if (ret.error)
+               return sbi_err_map_linux_errno(ret.error);
+       else
+               return 0;
+}
+
+static int sbi_hsm_hart_get_status(unsigned long hartid)
+{
+       struct sbiret ret;
+
+       ret = sbi_ecall(SBI_EXT_HSM, SBI_EXT_HSM_HART_STATUS,
+                       hartid, 0, 0, 0, 0, 0);
+       if (ret.error)
+               return sbi_err_map_linux_errno(ret.error);
+       else
+               return ret.value;
+}
+#endif
+
+static int sbi_cpu_start(unsigned int cpuid, struct task_struct *tidle)
+{
+       int rc;
+       unsigned long boot_addr = __pa_symbol(secondary_start_sbi);
+       int hartid = cpuid_to_hartid_map(cpuid);
+
+       cpu_update_secondary_bootdata(cpuid, tidle);
+       rc = sbi_hsm_hart_start(hartid, boot_addr, 0);
+
+       return rc;
+}
+
+static int sbi_cpu_prepare(unsigned int cpuid)
+{
+       if (!cpu_ops_sbi.cpu_start) {
+               pr_err("cpu start method not defined for CPU [%d]\n", cpuid);
+               return -ENODEV;
+       }
+       return 0;
+}
+
+const struct cpu_operations cpu_ops_sbi = {
+       .name           = "sbi",
+       .cpu_prepare    = sbi_cpu_prepare,
+       .cpu_start      = sbi_cpu_start,
+};
index 1735073..e5115d5 100644 (file)
@@ -99,11 +99,37 @@ relocate:
        ret
 #endif /* CONFIG_MMU */
 #ifdef CONFIG_SMP
+       .global secondary_start_sbi
+secondary_start_sbi:
+       /* Mask all interrupts */
+       csrw CSR_IE, zero
+       csrw CSR_IP, zero
+
+       /* Load the global pointer */
+       .option push
+       .option norelax
+               la gp, __global_pointer$
+       .option pop
+
+       /*
+        * Disable FPU to detect illegal usage of
+        * floating point in kernel space
+        */
+       li t0, SR_FS
+       csrc CSR_STATUS, t0
+
        /* Set trap vector to spin forever to help debug */
        la a3, .Lsecondary_park
        csrw CSR_TVEC, a3
 
        slli a3, a0, LGREG
+       la a4, __cpu_up_stack_pointer
+       la a5, __cpu_up_task_pointer
+       add a4, a3, a4
+       add a5, a3, a5
+       REG_L sp, (a4)
+       REG_L tp, (a5)
+
        .global secondary_start_common
 secondary_start_common:
 
index e89396a..4e99227 100644 (file)
@@ -143,7 +143,7 @@ void __init smp_cpus_done(unsigned int max_cpus)
 /*
  * C entry point for a secondary processor.
  */
-asmlinkage __visible void __init smp_callin(void)
+asmlinkage __visible void smp_callin(void)
 {
        struct mm_struct *mm = &init_mm;
 
index a4d1363..23a57b9 100644 (file)
@@ -148,7 +148,7 @@ int is_valid_bugaddr(unsigned long pc)
 }
 #endif /* CONFIG_GENERIC_BUG */
 
-void __init trap_init(void)
+void trap_init(void)
 {
        /*
         * Set sup0 scratch register to 0, indicating to exception vector