riscv: kernel: Support hibernation resume for JH7110
authorSia Jee Heng <jeeheng.sia@starfivetech.com>
Sun, 6 Nov 2022 11:24:44 +0000 (19:24 +0800)
committermason.huo <mason.huo@starfivetech.com>
Thu, 5 Jan 2023 05:24:40 +0000 (13:24 +0800)
Further expand the support for hibernation resume so that the hibernated
image can be restore from the disk.

Signed-off-by: Sia Jee Heng <jeeheng.sia@starfivetech.com>
arch/riscv/include/asm/sections.h
arch/riscv/include/asm/suspend.h
arch/riscv/kernel/Makefile
arch/riscv/kernel/asm-offsets.c
arch/riscv/kernel/hibernate-asm.S [new file with mode: 0644]
arch/riscv/kernel/hibernate.c
arch/riscv/mm/init.c

index 32336e8..ad1588d 100644 (file)
@@ -13,6 +13,7 @@ extern char _start_kernel[];
 extern char __init_data_begin[], __init_data_end[];
 extern char __init_text_begin[], __init_text_end[];
 extern char __alt_start[], __alt_end[];
+extern phys_addr_t end_linear_map;
 
 static inline bool is_va_kernel_text(uintptr_t va)
 {
index 9a98e0d..de8b4ea 100644 (file)
@@ -43,6 +43,7 @@ int swsusp_arch_suspend(void);
 int swsusp_arch_resume(void);
 int arch_hibernation_header_save(void *addr, unsigned int max_size);
 int arch_hibernation_header_restore(void *addr);
+int __hibernate_cpu_resume(unsigned long context);
 
 /* Used to resume on the CPU we hibernated on */
 int hibernate_resume_nonboot_cpu_disable(void);
@@ -50,4 +51,8 @@ int hibernate_resume_nonboot_cpu_disable(void);
 /* Used to save and restore the csr */
 void suspend_save_csrs(struct suspend_context *context);
 void suspend_restore_csrs(struct suspend_context *context);
+
+asmlinkage void restore_image(unsigned long resume_satp, unsigned long satp_temp,
+                               unsigned long cpu_resume, unsigned long resume_context);
+asmlinkage int core_restore_code(void);
 #endif
index 6016033..297a6b6 100644 (file)
@@ -48,7 +48,7 @@ obj-$(CONFIG_MODULES)         += module.o
 obj-$(CONFIG_MODULE_SECTIONS)  += module-sections.o
 
 obj-$(CONFIG_CPU_PM)           += suspend_entry.o suspend.o
-obj-$(CONFIG_HIBERNATION)      += hibernate.o
+obj-$(CONFIG_HIBERNATION)      += hibernate.o hibernate-asm.o
 obj-$(CONFIG_FUNCTION_TRACER)  += mcount.o ftrace.o
 obj-$(CONFIG_DYNAMIC_FTRACE)   += mcount-dyn.o
 
index 9ec6731..6758453 100644 (file)
@@ -8,6 +8,7 @@
 
 #include <linux/kbuild.h>
 #include <linux/sched.h>
+#include <linux/suspend.h>
 #include <asm/thread_info.h>
 #include <asm/ptrace.h>
 #include <asm/suspend.h>
@@ -115,6 +116,10 @@ void asm_offsets(void)
 
        OFFSET(SUSPEND_CONTEXT_REGS, suspend_context, regs);
 
+       OFFSET(HIBERN_PBE_ADDR, pbe, address);
+       OFFSET(HIBERN_PBE_ORIG, pbe, orig_address);
+       OFFSET(HIBERN_PBE_NEXT, pbe, next);
+
        /*
         * THREAD_{F,X}* might be larger than a S-type offset can handle, but
         * these are used in performance-sensitive assembly so we can't resort
diff --git a/arch/riscv/kernel/hibernate-asm.S b/arch/riscv/kernel/hibernate-asm.S
new file mode 100644 (file)
index 0000000..60a9891
--- /dev/null
@@ -0,0 +1,114 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Hibernate support specific for RISCV
+ *
+ * Copyright (C) 2022 Shanghai StarFive Technology Co., Ltd.
+ *
+ * Author: Jee Heng Sia <jeeheng.sia@starfivetech.com>
+ *
+ */
+
+
+#include <linux/linkage.h>
+#include <asm/asm.h>
+#include <asm/asm-offsets.h>
+#include <asm/csr.h>
+
+       .text
+       .altmacro
+       .option norelax
+
+ENTRY(__hibernate_cpu_resume)
+       /* Load the global pointer */
+       .option push
+       .option norelax
+       la gp, __global_pointer$
+       .option pop
+
+        /* switch to root  page table */
+        csrw CSR_SATP, s0
+        sfence.vma
+
+       ld      a0, hibernate_cpu_context
+
+       /* Restore CSRs */
+       REG_L   t0, (SUSPEND_CONTEXT_REGS + PT_EPC)(a0)
+       csrw    CSR_EPC, t0
+       REG_L   t0, (SUSPEND_CONTEXT_REGS + PT_STATUS)(a0)
+       csrw    CSR_STATUS, t0
+       REG_L   t0, (SUSPEND_CONTEXT_REGS + PT_BADADDR)(a0)
+       csrw    CSR_TVAL, t0
+       REG_L   t0, (SUSPEND_CONTEXT_REGS + PT_CAUSE)(a0)
+       csrw    CSR_CAUSE, t0
+
+       /* Restore registers (except A0 and T0-T6) */
+       REG_L   ra, (SUSPEND_CONTEXT_REGS + PT_RA)(a0)
+       REG_L   sp, (SUSPEND_CONTEXT_REGS + PT_SP)(a0)
+       REG_L   gp, (SUSPEND_CONTEXT_REGS + PT_GP)(a0)
+       REG_L   tp, (SUSPEND_CONTEXT_REGS + PT_TP)(a0)
+
+       REG_L   s0, (SUSPEND_CONTEXT_REGS + PT_S0)(a0)
+       REG_L   s1, (SUSPEND_CONTEXT_REGS + PT_S1)(a0)
+       REG_L   a1, (SUSPEND_CONTEXT_REGS + PT_A1)(a0)
+       REG_L   a2, (SUSPEND_CONTEXT_REGS + PT_A2)(a0)
+       REG_L   a3, (SUSPEND_CONTEXT_REGS + PT_A3)(a0)
+       REG_L   a4, (SUSPEND_CONTEXT_REGS + PT_A4)(a0)
+       REG_L   a5, (SUSPEND_CONTEXT_REGS + PT_A5)(a0)
+       REG_L   a6, (SUSPEND_CONTEXT_REGS + PT_A6)(a0)
+       REG_L   a7, (SUSPEND_CONTEXT_REGS + PT_A7)(a0)
+       REG_L   s2, (SUSPEND_CONTEXT_REGS + PT_S2)(a0)
+       REG_L   s3, (SUSPEND_CONTEXT_REGS + PT_S3)(a0)
+       REG_L   s4, (SUSPEND_CONTEXT_REGS + PT_S4)(a0)
+       REG_L   s5, (SUSPEND_CONTEXT_REGS + PT_S5)(a0)
+       REG_L   s6, (SUSPEND_CONTEXT_REGS + PT_S6)(a0)
+       REG_L   s7, (SUSPEND_CONTEXT_REGS + PT_S7)(a0)
+       REG_L   s8, (SUSPEND_CONTEXT_REGS + PT_S8)(a0)
+       REG_L   s9, (SUSPEND_CONTEXT_REGS + PT_S9)(a0)
+       REG_L   s10, (SUSPEND_CONTEXT_REGS + PT_S10)(a0)
+       REG_L   s11, (SUSPEND_CONTEXT_REGS + PT_S11)(a0)
+
+       /* Return zero value */
+       add     a0, zero, zero
+
+       ret
+END(__hibernate_cpu_resume)
+
+/* a0: satp of saved page tables
+ * a1: satp of temporary page tables
+ * a2: cpu_resume
+ * a3: saved cpu_context
+ */
+ENTRY(restore_image)
+       mv      s0, a0
+       mv      s1, a1
+       mv      s2, a2
+       mv      s3, a3
+       ld      s4, restore_pblist
+       ld      a1, relocated_restore_code
+
+       jalr    a1
+END(restore_image)
+
+ENTRY(core_restore_code)
+       /* switch to temp page table */
+       csrw satp, s1
+       sfence.vma
+       beqz    s4, done
+loop:
+       ld      a1, HIBERN_PBE_ADDR(s4)
+       ld      a0, HIBERN_PBE_ORIG(s4)
+
+       lui     a4, 0x1
+       add     a4, a4, a0
+copy:  ld      a5, 0(a1)
+       addi    a0, a0, 8
+       addi    a1, a1, 8
+       sd      a5, -8(a0)
+       bne     a4, a0, copy
+
+       ld      s4, HIBERN_PBE_NEXT(s4)
+       bnez    s4, loop
+done:
+       mv      a0, s3
+       jalr    s2
+END(core_restore_code)
index 603c435..7a27d94 100644 (file)
@@ -9,6 +9,7 @@
  */
 
 #include <linux/cpu.h>
+#include <linux/memblock.h>
 #include <linux/pm.h>
 #include <linux/sched.h>
 #include <linux/suspend.h>
 #include <asm/mmu_context.h>
 #include <asm/page.h>
 #include <asm/sections.h>
+#include <asm/set_memory.h>
 #include <asm/smp.h>
 #include <asm/suspend.h>
 
+#include <soc/sifive/sifive_l2_cache.h>
+
 /*
  * The logical cpu number we should resume on, initialised to a non-cpu
  * number.
 static int sleep_cpu = -EINVAL;
 
 /* CPU context to be saved */
-struct suspend_context context = { 0 };
+struct suspend_context *hibernate_cpu_context;
+
+unsigned long relocated_restore_code;
+
+/* Pointer to the temporary resume page tables */
+pgd_t *resume_pg_dir;
 
 /*
  * Values that may not change over hibernate/resume. We put the build number
@@ -45,7 +54,9 @@ struct arch_hibernate_hdr_invariants {
 /* These values need to be known across a hibernate/restore. */
 static struct arch_hibernate_hdr {
        struct arch_hibernate_hdr_invariants invariants;
-       u64             hartid;
+       unsigned long   hartid;
+       unsigned long   saved_satp;
+       unsigned long   restore_cpu_addr;
 } resume_hdr;
 
 static inline void arch_hdr_invariants(struct arch_hibernate_hdr_invariants *i)
@@ -54,7 +65,6 @@ static inline void arch_hdr_invariants(struct arch_hibernate_hdr_invariants *i)
        memcpy(i->uts_version, init_utsname()->version, sizeof(i->uts_version));
 }
 
-
 int pfn_is_nosave(unsigned long pfn)
 {
        unsigned long nosave_begin_pfn = sym_to_pfn(&__nosave_begin);
@@ -83,8 +93,8 @@ int arch_hibernation_header_save(void *addr, unsigned int max_size)
        arch_hdr_invariants(&hdr->invariants);
 
        hdr->hartid = cpuid_to_hartid_map(sleep_cpu);
-
-       pr_debug("Hibernating on CPU %x hartid %llx\n", sleep_cpu, hdr->hartid);
+       hdr->saved_satp = csr_read(CSR_SATP);
+       hdr->restore_cpu_addr = (unsigned long) __hibernate_cpu_resume;
 
        return 0;
 }
@@ -94,7 +104,7 @@ int arch_hibernation_header_restore(void *addr)
 {
        struct arch_hibernate_hdr_invariants invariants;
        struct arch_hibernate_hdr *hdr = addr;
-       int ret;
+       int ret = 0;
 
        arch_hdr_invariants(&invariants);
 
@@ -109,18 +119,16 @@ int arch_hibernation_header_restore(void *addr)
                sleep_cpu = -EINVAL;
                return -EINVAL;
        }
-
-       pr_debug("Hibernated on CPU %x hartid %llx\n", sleep_cpu, hdr->hartid);
-
+#ifdef CONFIG_SMP
        ret = bringup_hibernate_cpu(sleep_cpu);
        if (ret) {
                sleep_cpu = -EINVAL;
                return ret;
        }
-
+#endif
        resume_hdr = *hdr;
 
-       return 0;
+       return ret;
 }
 EXPORT_SYMBOL(arch_hibernation_header_restore);
 
@@ -128,30 +136,110 @@ int swsusp_arch_suspend(void)
 {
        int ret = 0;
 
-       if (__cpu_suspend_enter(&context)) {
+       if (__cpu_suspend_enter(hibernate_cpu_context)) {
                sleep_cpu = smp_processor_id();
-               suspend_save_csrs(&context);
+               suspend_save_csrs(hibernate_cpu_context);
                ret = swsusp_save();
        } else {
-               local_flush_icache_all();
+               suspend_restore_csrs(hibernate_cpu_context);
+               flush_tlb_all();
+
+               /* Invalidated Icache */
+               flush_icache_all();
 
                /*
                 * Tell the hibernation core that we've just restored
                 * the memory
                 */
                in_suspend = 0;
-
                sleep_cpu = -EINVAL;
        }
 
        return ret;
 }
 
+void temp_page_mapping(pgd_t *pgdp, unsigned long va, pgprot_t prot)
+{
+       uintptr_t pgd_idx = pgd_index(va);
+       phys_addr_t pmd_phys;
+       phys_addr_t pte_phys;
+       uintptr_t pmd_idx;
+       uintptr_t pte_idx;
+       pmd_t *pmdp;
+       pte_t *ptep;
+
+       if (pgd_val(pgdp[pgd_idx]) == 0) {
+               pmdp = (pmd_t *)get_safe_page(GFP_ATOMIC);
+               if (!pmdp)
+                       return;
+
+               memset(pmdp, 0, PAGE_SIZE);
+               pmd_phys = __pa(pmdp);
+               pgdp[pgd_idx] = pfn_pgd(PFN_DOWN(pmd_phys), PAGE_TABLE);
+       } else {
+               pmd_phys = PFN_PHYS(_pgd_pfn(pgdp[pgd_idx]));
+               pmdp = (pmd_t *) __va(pmd_phys);
+       }
+
+       pmd_idx = pmd_index(va);
+
+       if (pmd_none(pmdp[pmd_idx])) {
+               ptep = (pte_t *)get_safe_page(GFP_ATOMIC);
+               if (!ptep)
+                       return;
+
+               memset(ptep, 0, PAGE_SIZE);
+               pte_phys = __pa(ptep);
+               pmdp[pmd_idx] = pfn_pmd(PFN_DOWN(pte_phys), PAGE_TABLE);
+       } else {
+               pte_phys = PFN_PHYS(_pmd_pfn(pmdp[pmd_idx]));
+               ptep = (pte_t *) __va(pte_phys);
+       }
+
+       pte_idx = pte_index(va);
+
+       ptep[pte_idx] = pfn_pte(PFN_DOWN(__pa(va)), prot);
+}
+
+unsigned long relocate_restore_code(void)
+{
+       void *page = (void *)get_safe_page(GFP_ATOMIC);
+
+       if (!page)
+               return -ENOMEM;
+
+       memcpy(page, core_restore_code, PAGE_SIZE);
+
+       /* Make the page containing the relocated code executable */
+       set_memory_x((unsigned long)page, 1);
+
+       temp_page_mapping(resume_pg_dir, (unsigned long)page, PAGE_KERNEL_READ_EXEC);
+
+       return (unsigned long)page;
+}
+
 int swsusp_arch_resume(void)
 {
+       unsigned long addr;
+
+       resume_pg_dir = (pgd_t *)get_safe_page(GFP_ATOMIC);
+       if (!resume_pg_dir)
+               return -ENOMEM;
+
+       for (addr = PAGE_OFFSET; addr <= (unsigned long)__va(end_linear_map); addr += PAGE_SIZE)
+               temp_page_mapping(resume_pg_dir, addr, PAGE_KERNEL);
+
+       relocated_restore_code = relocate_restore_code();
+       temp_page_mapping(resume_pg_dir, (unsigned long)resume_hdr.restore_cpu_addr,
+                               PAGE_KERNEL_READ_EXEC);
+
+       restore_image(resume_hdr.saved_satp, (PFN_DOWN(__pa(resume_pg_dir)) | SATP_MODE),
+                       resume_hdr.restore_cpu_addr, (unsigned long)hibernate_cpu_context);
+
        return 0;
 }
 
+#ifdef CONFIG_SMP
 int hibernate_resume_nonboot_cpu_disable(void)
 {
        if (sleep_cpu < 0) {
@@ -161,4 +249,17 @@ int hibernate_resume_nonboot_cpu_disable(void)
 
        return freeze_secondary_cpus(sleep_cpu);
 }
+#endif
+
+static int __init riscv_hibernate__init(void)
+{
+       hibernate_cpu_context = kcalloc(1, sizeof(struct suspend_context), GFP_KERNEL);
+
+       if (WARN_ON(!hibernate_cpu_context))
+               return -ENOMEM;
+
+       return 0;
+}
+
+early_initcall(riscv_hibernate__init);
 
index c0cddf0..ce07555 100644 (file)
@@ -48,6 +48,8 @@ unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]
                                                        __page_aligned_bss;
 EXPORT_SYMBOL(empty_zero_page);
 
+phys_addr_t end_linear_map;
+
 extern char _start[];
 #define DTB_EARLY_BASE_VA      PGDIR_SIZE
 void *_dtb_early_va __initdata;
@@ -726,7 +728,7 @@ static void __init setup_vm_final(void)
                        start = __pa(PAGE_OFFSET);
                if (end >= __pa(PAGE_OFFSET) + memory_limit)
                        end = __pa(PAGE_OFFSET) + memory_limit;
-
+               end_linear_map = end;
                map_size = best_map_size(start, end - start);
                for (pa = start; pa < end; pa += map_size) {
                        va = (uintptr_t)__va(pa);