2 * handle transition of Linux booting another kernel
3 * Copyright (C) 2002-2005 Eric Biederman <ebiederm@xmission.com>
5 * This source code is licensed under the GNU General Public License,
6 * Version 2. See the file COPYING for more details.
9 #define pr_fmt(fmt) "kexec: " fmt
12 #include <linux/kexec.h>
13 #include <linux/string.h>
14 #include <linux/gfp.h>
15 #include <linux/reboot.h>
16 #include <linux/numa.h>
17 #include <linux/ftrace.h>
19 #include <linux/suspend.h>
20 #include <linux/vmalloc.h>
23 #include <asm/pgtable.h>
24 #include <asm/tlbflush.h>
25 #include <asm/mmu_context.h>
26 #include <asm/io_apic.h>
27 #include <asm/debugreg.h>
28 #include <asm/kexec-bzimage64.h>
29 #include <asm/setup.h>
30 #include <asm/set_memory.h>
32 #ifdef CONFIG_KEXEC_FILE
33 static struct kexec_file_ops *kexec_file_loaders[] = {
38 static void free_transition_pgtable(struct kimage *image)
40 free_page((unsigned long)image->arch.p4d);
41 free_page((unsigned long)image->arch.pud);
42 free_page((unsigned long)image->arch.pmd);
43 free_page((unsigned long)image->arch.pte);
46 static int init_transition_pgtable(struct kimage *image, pgd_t *pgd)
52 unsigned long vaddr, paddr;
55 vaddr = (unsigned long)relocate_kernel;
56 paddr = __pa(page_address(image->control_code_page)+PAGE_SIZE);
57 pgd += pgd_index(vaddr);
58 if (!pgd_present(*pgd)) {
59 p4d = (p4d_t *)get_zeroed_page(GFP_KERNEL);
62 image->arch.p4d = p4d;
63 set_pgd(pgd, __pgd(__pa(p4d) | _KERNPG_TABLE));
65 p4d = p4d_offset(pgd, vaddr);
66 if (!p4d_present(*p4d)) {
67 pud = (pud_t *)get_zeroed_page(GFP_KERNEL);
70 image->arch.pud = pud;
71 set_p4d(p4d, __p4d(__pa(pud) | _KERNPG_TABLE));
73 pud = pud_offset(p4d, vaddr);
74 if (!pud_present(*pud)) {
75 pmd = (pmd_t *)get_zeroed_page(GFP_KERNEL);
78 image->arch.pmd = pmd;
79 set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE));
81 pmd = pmd_offset(pud, vaddr);
82 if (!pmd_present(*pmd)) {
83 pte = (pte_t *)get_zeroed_page(GFP_KERNEL);
86 image->arch.pte = pte;
87 set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
89 pte = pte_offset_kernel(pmd, vaddr);
90 set_pte(pte, pfn_pte(paddr >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
93 free_transition_pgtable(image);
97 static void *alloc_pgt_page(void *data)
99 struct kimage *image = (struct kimage *)data;
103 page = kimage_alloc_control_pages(image, 0);
105 p = page_address(page);
112 static int init_pgtable(struct kimage *image, unsigned long start_pgtable)
114 struct x86_mapping_info info = {
115 .alloc_pgt_page = alloc_pgt_page,
117 .pmd_flag = __PAGE_KERNEL_LARGE_EXEC,
119 unsigned long mstart, mend;
124 level4p = (pgd_t *)__va(start_pgtable);
126 for (i = 0; i < nr_pfn_mapped; i++) {
127 mstart = pfn_mapped[i].start << PAGE_SHIFT;
128 mend = pfn_mapped[i].end << PAGE_SHIFT;
130 result = kernel_ident_mapping_init(&info,
131 level4p, mstart, mend);
137 * segments's mem ranges could be outside 0 ~ max_pfn,
138 * for example when jump back to original kernel from kexeced kernel.
139 * or first kernel is booted with user mem map, and second kernel
140 * could be loaded out of that range.
142 for (i = 0; i < image->nr_segments; i++) {
143 mstart = image->segment[i].mem;
144 mend = mstart + image->segment[i].memsz;
146 result = kernel_ident_mapping_init(&info,
147 level4p, mstart, mend);
153 return init_transition_pgtable(image, level4p);
156 static void set_idt(void *newidt, u16 limit)
158 struct desc_ptr curidt;
160 /* x86-64 supports unaliged loads & stores */
162 curidt.address = (unsigned long)newidt;
164 __asm__ __volatile__ (
171 static void set_gdt(void *newgdt, u16 limit)
173 struct desc_ptr curgdt;
175 /* x86-64 supports unaligned loads & stores */
177 curgdt.address = (unsigned long)newgdt;
179 __asm__ __volatile__ (
185 static void load_segments(void)
187 __asm__ __volatile__ (
193 : : "a" (__KERNEL_DS) : "memory"
197 #ifdef CONFIG_KEXEC_FILE
198 /* Update purgatory as needed after various image segments have been prepared */
199 static int arch_update_purgatory(struct kimage *image)
203 if (!image->file_mode)
206 /* Setup copying of backup region */
207 if (image->type == KEXEC_TYPE_CRASH) {
208 ret = kexec_purgatory_get_set_symbol(image,
209 "purgatory_backup_dest",
210 &image->arch.backup_load_addr,
211 sizeof(image->arch.backup_load_addr), 0);
215 ret = kexec_purgatory_get_set_symbol(image,
216 "purgatory_backup_src",
217 &image->arch.backup_src_start,
218 sizeof(image->arch.backup_src_start), 0);
222 ret = kexec_purgatory_get_set_symbol(image,
223 "purgatory_backup_sz",
224 &image->arch.backup_src_sz,
225 sizeof(image->arch.backup_src_sz), 0);
232 #else /* !CONFIG_KEXEC_FILE */
233 static inline int arch_update_purgatory(struct kimage *image)
237 #endif /* CONFIG_KEXEC_FILE */
239 int machine_kexec_prepare(struct kimage *image)
241 unsigned long start_pgtable;
244 /* Calculate the offsets */
245 start_pgtable = page_to_pfn(image->control_code_page) << PAGE_SHIFT;
247 /* Setup the identity mapped 64bit page table */
248 result = init_pgtable(image, start_pgtable);
252 /* update purgatory as needed */
253 result = arch_update_purgatory(image);
260 void machine_kexec_cleanup(struct kimage *image)
262 free_transition_pgtable(image);
266 * Do not allocate memory (or fail in any way) in machine_kexec().
267 * We are past the point of no return, committed to rebooting now.
269 void machine_kexec(struct kimage *image)
271 unsigned long page_list[PAGES_NR];
273 int save_ftrace_enabled;
275 #ifdef CONFIG_KEXEC_JUMP
276 if (image->preserve_context)
277 save_processor_state();
280 save_ftrace_enabled = __ftrace_enabled_save();
282 /* Interrupts aren't acceptable while we reboot */
284 hw_breakpoint_disable();
286 if (image->preserve_context) {
287 #ifdef CONFIG_X86_IO_APIC
289 * We need to put APICs in legacy mode so that we can
290 * get timer interrupts in second kernel. kexec/kdump
291 * paths already have calls to disable_IO_APIC() in
292 * one form or other. kexec jump path also need
299 control_page = page_address(image->control_code_page) + PAGE_SIZE;
300 memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
302 page_list[PA_CONTROL_PAGE] = virt_to_phys(control_page);
303 page_list[VA_CONTROL_PAGE] = (unsigned long)control_page;
304 page_list[PA_TABLE_PAGE] =
305 (unsigned long)__pa(page_address(image->control_code_page));
307 if (image->type == KEXEC_TYPE_DEFAULT)
308 page_list[PA_SWAP_PAGE] = (page_to_pfn(image->swap_page)
312 * The segment registers are funny things, they have both a
313 * visible and an invisible part. Whenever the visible part is
314 * set to a specific selector, the invisible part is loaded
315 * with from a table in memory. At no other time is the
316 * descriptor table in memory accessed.
318 * I take advantage of this here by force loading the
319 * segments, before I zap the gdt with an invalid value.
323 * The gdt & idt are now invalid.
324 * If you want to load them you must set up your own idt & gdt.
326 set_gdt(phys_to_virt(0), 0);
327 set_idt(phys_to_virt(0), 0);
330 image->start = relocate_kernel((unsigned long)image->head,
331 (unsigned long)page_list,
333 image->preserve_context);
335 #ifdef CONFIG_KEXEC_JUMP
336 if (image->preserve_context)
337 restore_processor_state();
340 __ftrace_enabled_restore(save_ftrace_enabled);
343 void arch_crash_save_vmcoreinfo(void)
345 VMCOREINFO_NUMBER(phys_base);
346 VMCOREINFO_SYMBOL(init_level4_pgt);
349 VMCOREINFO_SYMBOL(node_data);
350 VMCOREINFO_LENGTH(node_data, MAX_NUMNODES);
352 vmcoreinfo_append_str("KERNELOFFSET=%lx\n",
354 VMCOREINFO_NUMBER(KERNEL_IMAGE_SIZE);
357 /* arch-dependent functionality related to kexec file-based syscall */
359 #ifdef CONFIG_KEXEC_FILE
360 int arch_kexec_kernel_image_probe(struct kimage *image, void *buf,
361 unsigned long buf_len)
363 int i, ret = -ENOEXEC;
364 struct kexec_file_ops *fops;
366 for (i = 0; i < ARRAY_SIZE(kexec_file_loaders); i++) {
367 fops = kexec_file_loaders[i];
368 if (!fops || !fops->probe)
371 ret = fops->probe(buf, buf_len);
381 void *arch_kexec_kernel_image_load(struct kimage *image)
383 vfree(image->arch.elf_headers);
384 image->arch.elf_headers = NULL;
386 if (!image->fops || !image->fops->load)
387 return ERR_PTR(-ENOEXEC);
389 return image->fops->load(image, image->kernel_buf,
390 image->kernel_buf_len, image->initrd_buf,
391 image->initrd_buf_len, image->cmdline_buf,
392 image->cmdline_buf_len);
395 int arch_kimage_file_post_load_cleanup(struct kimage *image)
397 if (!image->fops || !image->fops->cleanup)
400 return image->fops->cleanup(image->image_loader_data);
403 #ifdef CONFIG_KEXEC_VERIFY_SIG
404 int arch_kexec_kernel_verify_sig(struct kimage *image, void *kernel,
405 unsigned long kernel_len)
407 if (!image->fops || !image->fops->verify_sig) {
408 pr_debug("kernel loader does not support signature verification.");
409 return -EKEYREJECTED;
412 return image->fops->verify_sig(kernel, kernel_len);
417 * Apply purgatory relocations.
419 * ehdr: Pointer to elf headers
420 * sechdrs: Pointer to section headers.
421 * relsec: section index of SHT_RELA section.
423 * TODO: Some of the code belongs to generic code. Move that in kexec.c.
425 int arch_kexec_apply_relocations_add(const Elf64_Ehdr *ehdr,
426 Elf64_Shdr *sechdrs, unsigned int relsec)
432 Elf64_Shdr *section, *symtabsec;
433 unsigned long address, sec_base, value;
434 const char *strtab, *name, *shstrtab;
437 * ->sh_offset has been modified to keep the pointer to section
440 rel = (void *)sechdrs[relsec].sh_offset;
442 /* Section to which relocations apply */
443 section = &sechdrs[sechdrs[relsec].sh_info];
445 pr_debug("Applying relocate section %u to %u\n", relsec,
446 sechdrs[relsec].sh_info);
448 /* Associated symbol table */
449 symtabsec = &sechdrs[sechdrs[relsec].sh_link];
452 if (symtabsec->sh_link >= ehdr->e_shnum) {
453 /* Invalid strtab section number */
454 pr_err("Invalid string table section index %d\n",
459 strtab = (char *)sechdrs[symtabsec->sh_link].sh_offset;
461 /* section header string table */
462 shstrtab = (char *)sechdrs[ehdr->e_shstrndx].sh_offset;
464 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
467 * rel[i].r_offset contains byte offset from beginning
468 * of section to the storage unit affected.
470 * This is location to update (->sh_offset). This is temporary
471 * buffer where section is currently loaded. This will finally
472 * be loaded to a different address later, pointed to by
473 * ->sh_addr. kexec takes care of moving it
474 * (kexec_load_segment()).
476 location = (void *)(section->sh_offset + rel[i].r_offset);
478 /* Final address of the location */
479 address = section->sh_addr + rel[i].r_offset;
482 * rel[i].r_info contains information about symbol table index
483 * w.r.t which relocation must be made and type of relocation
484 * to apply. ELF64_R_SYM() and ELF64_R_TYPE() macros get
485 * these respectively.
487 sym = (Elf64_Sym *)symtabsec->sh_offset +
488 ELF64_R_SYM(rel[i].r_info);
491 name = strtab + sym->st_name;
493 name = shstrtab + sechdrs[sym->st_shndx].sh_name;
495 pr_debug("Symbol: %s info: %02x shndx: %02x value=%llx size: %llx\n",
496 name, sym->st_info, sym->st_shndx, sym->st_value,
499 if (sym->st_shndx == SHN_UNDEF) {
500 pr_err("Undefined symbol: %s\n", name);
504 if (sym->st_shndx == SHN_COMMON) {
505 pr_err("symbol '%s' in common section\n", name);
509 if (sym->st_shndx == SHN_ABS)
511 else if (sym->st_shndx >= ehdr->e_shnum) {
512 pr_err("Invalid section %d for symbol %s\n",
513 sym->st_shndx, name);
516 sec_base = sechdrs[sym->st_shndx].sh_addr;
518 value = sym->st_value;
520 value += rel[i].r_addend;
522 switch (ELF64_R_TYPE(rel[i].r_info)) {
526 *(u64 *)location = value;
529 *(u32 *)location = value;
530 if (value != *(u32 *)location)
534 *(s32 *)location = value;
535 if ((s64)value != *(s32 *)location)
539 value -= (u64)address;
540 *(u32 *)location = value;
543 pr_err("Unknown rela relocation: %llu\n",
544 ELF64_R_TYPE(rel[i].r_info));
551 pr_err("Overflow in relocation type %d value 0x%lx\n",
552 (int)ELF64_R_TYPE(rel[i].r_info), value);
555 #endif /* CONFIG_KEXEC_FILE */
558 kexec_mark_range(unsigned long start, unsigned long end, bool protect)
561 unsigned int nr_pages;
564 * For physical range: [start, end]. We must skip the unassigned
565 * crashk resource with zero-valued "end" member.
567 if (!end || start > end)
570 page = pfn_to_page(start >> PAGE_SHIFT);
571 nr_pages = (end >> PAGE_SHIFT) - (start >> PAGE_SHIFT) + 1;
573 return set_pages_ro(page, nr_pages);
575 return set_pages_rw(page, nr_pages);
578 static void kexec_mark_crashkres(bool protect)
580 unsigned long control;
582 kexec_mark_range(crashk_low_res.start, crashk_low_res.end, protect);
584 /* Don't touch the control code page used in crash_kexec().*/
585 control = PFN_PHYS(page_to_pfn(kexec_crash_image->control_code_page));
586 /* Control code page is located in the 2nd page. */
587 kexec_mark_range(crashk_res.start, control + PAGE_SIZE - 1, protect);
588 control += KEXEC_CONTROL_PAGE_SIZE;
589 kexec_mark_range(control, crashk_res.end, protect);
592 void arch_kexec_protect_crashkres(void)
594 kexec_mark_crashkres(true);
597 void arch_kexec_unprotect_crashkres(void)
599 kexec_mark_crashkres(false);