1 // SPDX-License-Identifier: GPL-2.0-only
3 * Hibernate support specific for ARM64
5 * Derived from work on ARM hibernation support by:
7 * Ubuntu project, hibernation support for mach-dove
8 * Copyright (C) 2010 Nokia Corporation (Hiroshi Doyu)
9 * Copyright (C) 2010 Texas Instruments, Inc. (Teerth Reddy et al.)
10 * https://lkml.org/lkml/2010/6/18/4
11 * https://lists.linux-foundation.org/pipermail/linux-pm/2010-June/027422.html
12 * https://patchwork.kernel.org/patch/96442/
14 * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl>
16 #define pr_fmt(x) "hibernate: " x
17 #include <linux/cpu.h>
18 #include <linux/kvm_host.h>
21 #include <linux/sched.h>
22 #include <linux/suspend.h>
23 #include <linux/utsname.h>
25 #include <asm/barrier.h>
26 #include <asm/cacheflush.h>
27 #include <asm/cputype.h>
28 #include <asm/daifflags.h>
29 #include <asm/irqflags.h>
30 #include <asm/kexec.h>
31 #include <asm/memory.h>
32 #include <asm/mmu_context.h>
34 #include <asm/pgalloc.h>
35 #include <asm/pgtable-hwdef.h>
36 #include <asm/sections.h>
38 #include <asm/smp_plat.h>
39 #include <asm/suspend.h>
40 #include <asm/sysreg.h>
44 * Hibernate core relies on this value being 0 on resume, and marks it
45 * __nosavedata assuming it will keep the resume kernel's '0' value. This
46 * doesn't happen with either KASLR.
48 * defined as "__visible int in_suspend __nosavedata" in
49 * kernel/power/hibernate.c
51 extern int in_suspend;
53 /* Do we need to reset el2? */
54 #define el2_reset_needed() (is_hyp_mode_available() && !is_kernel_in_hyp_mode())
56 /* temporary el2 vectors in the __hibernate_exit_text section. */
57 extern char hibernate_el2_vectors[];
59 /* hyp-stub vectors, used to restore el2 during resume from hibernate. */
60 extern char __hyp_stub_vectors[];
63 * The logical cpu number we should resume on, initialised to a non-cpu
66 static int sleep_cpu = -EINVAL;
69 * Values that may not change over hibernate/resume. We put the build number
70 * and date in here so that we guarantee not to resume with a different
73 struct arch_hibernate_hdr_invariants {
74 char uts_version[__NEW_UTS_LEN + 1];
77 /* These values need to be know across a hibernate/restore. */
78 static struct arch_hibernate_hdr {
79 struct arch_hibernate_hdr_invariants invariants;
81 /* These are needed to find the relocated kernel if built with kaslr */
82 phys_addr_t ttbr1_el1;
83 void (*reenter_kernel)(void);
86 * We need to know where the __hyp_stub_vectors are after restore to
89 phys_addr_t __hyp_stub_vectors;
94 static inline void arch_hdr_invariants(struct arch_hibernate_hdr_invariants *i)
96 memset(i, 0, sizeof(*i));
97 memcpy(i->uts_version, init_utsname()->version, sizeof(i->uts_version));
100 int pfn_is_nosave(unsigned long pfn)
102 unsigned long nosave_begin_pfn = sym_to_pfn(&__nosave_begin);
103 unsigned long nosave_end_pfn = sym_to_pfn(&__nosave_end - 1);
105 return ((pfn >= nosave_begin_pfn) && (pfn <= nosave_end_pfn)) ||
106 crash_is_nosave(pfn);
109 void notrace save_processor_state(void)
111 WARN_ON(num_online_cpus() != 1);
114 void notrace restore_processor_state(void)
118 int arch_hibernation_header_save(void *addr, unsigned int max_size)
120 struct arch_hibernate_hdr *hdr = addr;
122 if (max_size < sizeof(*hdr))
125 arch_hdr_invariants(&hdr->invariants);
126 hdr->ttbr1_el1 = __pa_symbol(swapper_pg_dir);
127 hdr->reenter_kernel = _cpu_resume;
129 /* We can't use __hyp_get_vectors() because kvm may still be loaded */
130 if (el2_reset_needed())
131 hdr->__hyp_stub_vectors = __pa_symbol(__hyp_stub_vectors);
133 hdr->__hyp_stub_vectors = 0;
135 /* Save the mpidr of the cpu we called cpu_suspend() on... */
137 pr_err("Failing to hibernate on an unknown CPU.\n");
140 hdr->sleep_cpu_mpidr = cpu_logical_map(sleep_cpu);
141 pr_info("Hibernating on CPU %d [mpidr:0x%llx]\n", sleep_cpu,
142 hdr->sleep_cpu_mpidr);
146 EXPORT_SYMBOL(arch_hibernation_header_save);
148 int arch_hibernation_header_restore(void *addr)
151 struct arch_hibernate_hdr_invariants invariants;
152 struct arch_hibernate_hdr *hdr = addr;
154 arch_hdr_invariants(&invariants);
155 if (memcmp(&hdr->invariants, &invariants, sizeof(invariants))) {
156 pr_crit("Hibernate image not generated by this kernel!\n");
160 sleep_cpu = get_logical_index(hdr->sleep_cpu_mpidr);
161 pr_info("Hibernated on CPU %d [mpidr:0x%llx]\n", sleep_cpu,
162 hdr->sleep_cpu_mpidr);
164 pr_crit("Hibernated on a CPU not known to this kernel!\n");
169 ret = bringup_hibernate_cpu(sleep_cpu);
179 EXPORT_SYMBOL(arch_hibernation_header_restore);
181 static int trans_pgd_map_page(pgd_t *trans_pgd, void *page,
182 unsigned long dst_addr,
191 pgdp = pgd_offset_pgd(trans_pgd, dst_addr);
192 if (pgd_none(READ_ONCE(*pgdp))) {
193 pudp = (void *)get_safe_page(GFP_ATOMIC);
196 pgd_populate(&init_mm, pgdp, pudp);
199 p4dp = p4d_offset(pgdp, dst_addr);
200 if (p4d_none(READ_ONCE(*p4dp))) {
201 pudp = (void *)get_safe_page(GFP_ATOMIC);
204 p4d_populate(&init_mm, p4dp, pudp);
207 pudp = pud_offset(p4dp, dst_addr);
208 if (pud_none(READ_ONCE(*pudp))) {
209 pmdp = (void *)get_safe_page(GFP_ATOMIC);
212 pud_populate(&init_mm, pudp, pmdp);
215 pmdp = pmd_offset(pudp, dst_addr);
216 if (pmd_none(READ_ONCE(*pmdp))) {
217 ptep = (void *)get_safe_page(GFP_ATOMIC);
220 pmd_populate_kernel(&init_mm, pmdp, ptep);
223 ptep = pte_offset_kernel(pmdp, dst_addr);
224 set_pte(ptep, pfn_pte(virt_to_pfn(page), PAGE_KERNEL_EXEC));
230 * Copies length bytes, starting at src_start into an new page,
231 * perform cache maintenance, then maps it at the specified address low
232 * address as executable.
234 * This is used by hibernate to copy the code it needs to execute when
235 * overwriting the kernel text. This function generates a new set of page
236 * tables, which it loads into ttbr0.
238 * Length is provided as we probably only want 4K of data, even on a 64K
241 static int create_safe_exec_page(void *src_start, size_t length,
242 unsigned long dst_addr,
243 phys_addr_t *phys_dst_addr)
245 void *page = (void *)get_safe_page(GFP_ATOMIC);
252 memcpy(page, src_start, length);
253 __flush_icache_range((unsigned long)page, (unsigned long)page + length);
255 trans_pgd = (void *)get_safe_page(GFP_ATOMIC);
259 rc = trans_pgd_map_page(trans_pgd, page, dst_addr,
265 * Load our new page tables. A strict BBM approach requires that we
266 * ensure that TLBs are free of any entries that may overlap with the
267 * global mappings we are about to install.
269 * For a real hibernate/resume cycle TTBR0 currently points to a zero
270 * page, but TLBs may contain stale ASID-tagged entries (e.g. for EFI
271 * runtime services), while for a userspace-driven test_resume cycle it
272 * points to userspace page tables (and we must point it at a zero page
273 * ourselves). Elsewhere we only (un)install the idmap with preemption
274 * disabled, so T0SZ should be as required regardless.
276 cpu_set_reserved_ttbr0();
277 local_flush_tlb_all();
278 write_sysreg(phys_to_ttbr(virt_to_phys(trans_pgd)), ttbr0_el1);
281 *phys_dst_addr = virt_to_phys(page);
286 #define dcache_clean_range(start, end) __flush_dcache_area(start, (end - start))
288 #ifdef CONFIG_ARM64_MTE
290 static DEFINE_XARRAY(mte_pages);
292 static int save_tags(struct page *page, unsigned long pfn)
294 void *tag_storage, *ret;
296 tag_storage = mte_allocate_tag_storage();
300 mte_save_page_tags(page_address(page), tag_storage);
302 ret = xa_store(&mte_pages, pfn, tag_storage, GFP_KERNEL);
303 if (WARN(xa_is_err(ret), "Failed to store MTE tags")) {
304 mte_free_tag_storage(tag_storage);
306 } else if (WARN(ret, "swsusp: %s: Duplicate entry", __func__)) {
307 mte_free_tag_storage(ret);
313 static void swsusp_mte_free_storage(void)
315 XA_STATE(xa_state, &mte_pages, 0);
319 xas_for_each(&xa_state, tags, ULONG_MAX) {
320 mte_free_tag_storage(tags);
322 xa_unlock(&mte_pages);
324 xa_destroy(&mte_pages);
327 static int swsusp_mte_save_tags(void)
330 unsigned long pfn, max_zone_pfn;
334 if (!system_supports_mte())
337 for_each_populated_zone(zone) {
338 max_zone_pfn = zone_end_pfn(zone);
339 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) {
340 struct page *page = pfn_to_online_page(pfn);
345 if (!test_bit(PG_mte_tagged, &page->flags))
348 ret = save_tags(page, pfn);
350 swsusp_mte_free_storage();
357 pr_info("Saved %d MTE pages\n", n);
363 static void swsusp_mte_restore_tags(void)
365 XA_STATE(xa_state, &mte_pages, 0);
370 xas_for_each(&xa_state, tags, ULONG_MAX) {
371 unsigned long pfn = xa_state.xa_index;
372 struct page *page = pfn_to_online_page(pfn);
374 mte_restore_page_tags(page_address(page), tags);
376 mte_free_tag_storage(tags);
379 xa_unlock(&mte_pages);
381 pr_info("Restored %d MTE pages\n", n);
383 xa_destroy(&mte_pages);
386 #else /* CONFIG_ARM64_MTE */
388 static int swsusp_mte_save_tags(void)
393 static void swsusp_mte_restore_tags(void)
397 #endif /* CONFIG_ARM64_MTE */
399 int swsusp_arch_suspend(void)
403 struct sleep_stack_data state;
405 if (cpus_are_stuck_in_kernel()) {
406 pr_err("Can't hibernate: no mechanism to offline secondary CPUs.\n");
410 flags = local_daif_save();
412 if (__cpu_suspend_enter(&state)) {
413 /* make the crash dump kernel image visible/saveable */
414 crash_prepare_suspend();
416 ret = swsusp_mte_save_tags();
420 sleep_cpu = smp_processor_id();
423 /* Clean kernel core startup/idle code to PoC*/
424 dcache_clean_range(__mmuoff_data_start, __mmuoff_data_end);
425 dcache_clean_range(__idmap_text_start, __idmap_text_end);
427 /* Clean kvm setup code to PoC? */
428 if (el2_reset_needed()) {
429 dcache_clean_range(__hyp_idmap_text_start, __hyp_idmap_text_end);
430 dcache_clean_range(__hyp_text_start, __hyp_text_end);
433 swsusp_mte_restore_tags();
435 /* make the crash dump kernel image protected again */
439 * Tell the hibernation core that we've just restored
445 __cpu_suspend_exit();
448 * Just in case the boot kernel did turn the SSBD
449 * mitigation off behind our back, let's set the state
450 * to what we expect it to be.
452 spectre_v4_enable_mitigation(NULL);
455 local_daif_restore(flags);
460 static void _copy_pte(pte_t *dst_ptep, pte_t *src_ptep, unsigned long addr)
462 pte_t pte = READ_ONCE(*src_ptep);
464 if (pte_valid(pte)) {
466 * Resume will overwrite areas that may be marked
467 * read only (code, rodata). Clear the RDONLY bit from
468 * the temporary mappings we use during restore.
470 set_pte(dst_ptep, pte_mkwrite(pte));
471 } else if (debug_pagealloc_enabled() && !pte_none(pte)) {
473 * debug_pagealloc will removed the PTE_VALID bit if
474 * the page isn't in use by the resume kernel. It may have
475 * been in use by the original kernel, in which case we need
476 * to put it back in our copy to do the restore.
478 * Before marking this entry valid, check the pfn should
481 BUG_ON(!pfn_valid(pte_pfn(pte)));
483 set_pte(dst_ptep, pte_mkpresent(pte_mkwrite(pte)));
487 static int copy_pte(pmd_t *dst_pmdp, pmd_t *src_pmdp, unsigned long start,
492 unsigned long addr = start;
494 dst_ptep = (pte_t *)get_safe_page(GFP_ATOMIC);
497 pmd_populate_kernel(&init_mm, dst_pmdp, dst_ptep);
498 dst_ptep = pte_offset_kernel(dst_pmdp, start);
500 src_ptep = pte_offset_kernel(src_pmdp, start);
502 _copy_pte(dst_ptep, src_ptep, addr);
503 } while (dst_ptep++, src_ptep++, addr += PAGE_SIZE, addr != end);
508 static int copy_pmd(pud_t *dst_pudp, pud_t *src_pudp, unsigned long start,
514 unsigned long addr = start;
516 if (pud_none(READ_ONCE(*dst_pudp))) {
517 dst_pmdp = (pmd_t *)get_safe_page(GFP_ATOMIC);
520 pud_populate(&init_mm, dst_pudp, dst_pmdp);
522 dst_pmdp = pmd_offset(dst_pudp, start);
524 src_pmdp = pmd_offset(src_pudp, start);
526 pmd_t pmd = READ_ONCE(*src_pmdp);
528 next = pmd_addr_end(addr, end);
531 if (pmd_table(pmd)) {
532 if (copy_pte(dst_pmdp, src_pmdp, addr, next))
536 __pmd(pmd_val(pmd) & ~PMD_SECT_RDONLY));
538 } while (dst_pmdp++, src_pmdp++, addr = next, addr != end);
543 static int copy_pud(p4d_t *dst_p4dp, p4d_t *src_p4dp, unsigned long start,
549 unsigned long addr = start;
551 if (p4d_none(READ_ONCE(*dst_p4dp))) {
552 dst_pudp = (pud_t *)get_safe_page(GFP_ATOMIC);
555 p4d_populate(&init_mm, dst_p4dp, dst_pudp);
557 dst_pudp = pud_offset(dst_p4dp, start);
559 src_pudp = pud_offset(src_p4dp, start);
561 pud_t pud = READ_ONCE(*src_pudp);
563 next = pud_addr_end(addr, end);
566 if (pud_table(pud)) {
567 if (copy_pmd(dst_pudp, src_pudp, addr, next))
571 __pud(pud_val(pud) & ~PUD_SECT_RDONLY));
573 } while (dst_pudp++, src_pudp++, addr = next, addr != end);
578 static int copy_p4d(pgd_t *dst_pgdp, pgd_t *src_pgdp, unsigned long start,
584 unsigned long addr = start;
586 dst_p4dp = p4d_offset(dst_pgdp, start);
587 src_p4dp = p4d_offset(src_pgdp, start);
589 next = p4d_addr_end(addr, end);
590 if (p4d_none(READ_ONCE(*src_p4dp)))
592 if (copy_pud(dst_p4dp, src_p4dp, addr, next))
594 } while (dst_p4dp++, src_p4dp++, addr = next, addr != end);
599 static int copy_page_tables(pgd_t *dst_pgdp, unsigned long start,
603 unsigned long addr = start;
604 pgd_t *src_pgdp = pgd_offset_k(start);
606 dst_pgdp = pgd_offset_pgd(dst_pgdp, start);
608 next = pgd_addr_end(addr, end);
609 if (pgd_none(READ_ONCE(*src_pgdp)))
611 if (copy_p4d(dst_pgdp, src_pgdp, addr, next))
613 } while (dst_pgdp++, src_pgdp++, addr = next, addr != end);
618 static int trans_pgd_create_copy(pgd_t **dst_pgdp, unsigned long start,
622 pgd_t *trans_pgd = (pgd_t *)get_safe_page(GFP_ATOMIC);
625 pr_err("Failed to allocate memory for temporary page tables.\n");
629 rc = copy_page_tables(trans_pgd, start, end);
631 *dst_pgdp = trans_pgd;
637 * Setup then Resume from the hibernate image using swsusp_arch_suspend_exit().
639 * Memory allocated by get_safe_page() will be dealt with by the hibernate code,
640 * we don't need to free it here.
642 int swsusp_arch_resume(void)
648 phys_addr_t phys_hibernate_exit;
649 void __noreturn (*hibernate_exit)(phys_addr_t, phys_addr_t, void *,
650 void *, phys_addr_t, phys_addr_t);
653 * Restoring the memory image will overwrite the ttbr1 page tables.
654 * Create a second copy of just the linear map, and use this when
657 rc = trans_pgd_create_copy(&tmp_pg_dir, PAGE_OFFSET, PAGE_END);
662 * We need a zero page that is zero before & after resume in order to
663 * to break before make on the ttbr1 page tables.
665 zero_page = (void *)get_safe_page(GFP_ATOMIC);
667 pr_err("Failed to allocate zero page.\n");
672 * Locate the exit code in the bottom-but-one page, so that *NULL
673 * still has disastrous affects.
675 hibernate_exit = (void *)PAGE_SIZE;
676 exit_size = __hibernate_exit_text_end - __hibernate_exit_text_start;
678 * Copy swsusp_arch_suspend_exit() to a safe page. This will generate
679 * a new set of ttbr0 page tables and load them.
681 rc = create_safe_exec_page(__hibernate_exit_text_start, exit_size,
682 (unsigned long)hibernate_exit,
683 &phys_hibernate_exit);
685 pr_err("Failed to create safe executable page for hibernate_exit code.\n");
690 * The hibernate exit text contains a set of el2 vectors, that will
691 * be executed at el2 with the mmu off in order to reload hyp-stub.
693 __flush_dcache_area(hibernate_exit, exit_size);
696 * KASLR will cause the el2 vectors to be in a different location in
697 * the resumed kernel. Load hibernate's temporary copy into el2.
699 * We can skip this step if we booted at EL1, or are running with VHE.
701 if (el2_reset_needed()) {
702 phys_addr_t el2_vectors = phys_hibernate_exit; /* base */
703 el2_vectors += hibernate_el2_vectors -
704 __hibernate_exit_text_start; /* offset */
706 __hyp_set_vectors(el2_vectors);
709 hibernate_exit(virt_to_phys(tmp_pg_dir), resume_hdr.ttbr1_el1,
710 resume_hdr.reenter_kernel, restore_pblist,
711 resume_hdr.__hyp_stub_vectors, virt_to_phys(zero_page));
716 int hibernate_resume_nonboot_cpu_disable(void)
719 pr_err("Failing to resume from hibernate on an unknown CPU.\n");
723 return freeze_secondary_cpus(sleep_cpu);