1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Hibernate low-level support
5 * Copyright (C) 2016 ARM Ltd.
6 * Author: James Morse <james.morse@arm.com>
8 #include <linux/linkage.h>
9 #include <linux/errno.h>
11 #include <asm/asm-offsets.h>
12 #include <asm/assembler.h>
13 #include <asm/cputype.h>
14 #include <asm/memory.h>
19 * Resume from hibernate
21 * Loads temporary page tables then restores the memory image.
22 * Finally branches to cpu_resume() to restore the state saved by
23 * swsusp_arch_suspend().
25 * Because this code has to be copied to a 'safe' page, it can't call out to
26 * other functions by PC-relative address. Also remember that it may be
27 * mid-way through over-writing other functions. For this reason it contains
28 * code from caches_clean_inval_pou() and uses the copy_page() macro.
30 * This 'safe' page is mapped via ttbr0, and executed from there. This function
31 * switches to a copy of the linear map in ttbr1, performs the restore, then
32 * switches ttbr1 to the original kernel's swapper_pg_dir.
34 * All of memory gets written to, including code. We need to clean the kernel
35 * text to the Point of Coherence (PoC) before secondary cores can be booted.
36 * Because the kernel modules and executable pages mapped to user space are
37 * also written as data, we clean all pages we touch to the Point of
40 * x0: physical address of temporary page tables
41 * x1: physical address of swapper page tables
42 * x2: address of cpu_resume
43 * x3: linear map address of restore_pblist in the current kernel
44 * x4: physical address of __hyp_stub_vectors, or 0
45 * x5: physical address of a zero page that remains zero after resume
47 .pushsection ".hibernate_exit.text", "ax"
48 SYM_CODE_START(swsusp_arch_suspend_exit)
50 * We execute from ttbr0, change ttbr1 to our copied linear map tables
51 * with a break-before-make via the zero page
53 break_before_make_ttbr_switch x5, x0, x6, x8
60 /* walk the restore_pblist and use copy_page() to over-write memory */
63 1: ldr x10, [x19, #HIBERN_PBE_ORIG]
65 ldr x1, [x19, #HIBERN_PBE_ADDR]
67 copy_page x0, x1, x2, x3, x4, x5, x6, x7, x8, x9
69 add x1, x10, #PAGE_SIZE
70 /* Clean the copied page to PoU - based on caches_clean_inval_pou() */
71 raw_dcache_line_size x2, x3
74 2: /* clean D line / unified line */
75 alternative_insn "dc cvau, x4", "dc civac, x4", ARM64_WORKAROUND_CLEAN_CACHE
80 ldr x19, [x19, #HIBERN_PBE_NEXT]
82 dsb ish /* wait for PoU cleaning to finish */
84 /* switch to the restored kernels page tables */
85 break_before_make_ttbr_switch x25, x21, x6, x8
91 cbz x24, 3f /* Do we need to re-initialise EL2? */
94 SYM_CODE_END(swsusp_arch_suspend_exit)