2 * kexec.c - kexec system call
3 * Copyright (C) 2002-2004 Eric Biederman <ebiederm@xmission.com>
5 * This source code is licensed under the GNU General Public License,
6 * Version 2. See the file COPYING for more details.
9 #include <linux/capability.h>
11 #include <linux/file.h>
12 #include <linux/slab.h>
14 #include <linux/kexec.h>
15 #include <linux/mutex.h>
16 #include <linux/list.h>
17 #include <linux/highmem.h>
18 #include <linux/syscalls.h>
19 #include <linux/reboot.h>
20 #include <linux/ioport.h>
21 #include <linux/hardirq.h>
22 #include <linux/elf.h>
23 #include <linux/elfcore.h>
24 #include <linux/utsname.h>
25 #include <linux/numa.h>
26 #include <linux/suspend.h>
27 #include <linux/device.h>
28 #include <linux/freezer.h>
30 #include <linux/cpu.h>
31 #include <linux/console.h>
32 #include <linux/vmalloc.h>
33 #include <linux/swap.h>
34 #include <linux/syscore_ops.h>
37 #include <asm/uaccess.h>
39 #include <asm/sections.h>
41 /* Per cpu memory for storing cpu states in case of system crash. */
42 note_buf_t __percpu *crash_notes;
44 /* vmcoreinfo stuff */
45 static unsigned char vmcoreinfo_data[VMCOREINFO_BYTES];
46 u32 vmcoreinfo_note[VMCOREINFO_NOTE_SIZE/4];
47 size_t vmcoreinfo_size;
48 size_t vmcoreinfo_max_size = sizeof(vmcoreinfo_data);
50 /* Location of the reserved area for the crash kernel */
51 struct resource crashk_res = {
52 .name = "Crash kernel",
55 .flags = IORESOURCE_BUSY | IORESOURCE_MEM
57 struct resource crashk_low_res = {
58 .name = "Crash kernel low",
61 .flags = IORESOURCE_BUSY | IORESOURCE_MEM
64 int kexec_should_crash(struct task_struct *p)
66 if (in_interrupt() || !p->pid || is_global_init(p) || panic_on_oops)
72 * When kexec transitions to the new kernel there is a one-to-one
73 * mapping between physical and virtual addresses. On processors
74 * where you can disable the MMU this is trivial, and easy. For
75 * others it is still a simple predictable page table to setup.
77 * In that environment kexec copies the new kernel to its final
78 * resting place. This means I can only support memory whose
79 * physical address can fit in an unsigned long. In particular
80 * addresses where (pfn << PAGE_SHIFT) > ULONG_MAX cannot be handled.
81 * If the assembly stub has more restrictive requirements
82 * KEXEC_SOURCE_MEMORY_LIMIT and KEXEC_DEST_MEMORY_LIMIT can be
83 * defined more restrictively in <asm/kexec.h>.
85 * The code for the transition from the current kernel to the
86 * the new kernel is placed in the control_code_buffer, whose size
87 * is given by KEXEC_CONTROL_PAGE_SIZE. In the best case only a single
88 * page of memory is necessary, but some architectures require more.
89 * Because this memory must be identity mapped in the transition from
90 * virtual to physical addresses it must live in the range
91 * 0 - TASK_SIZE, as only the user space mappings are arbitrarily
94 * The assembly stub in the control code buffer is passed a linked list
95 * of descriptor pages detailing the source pages of the new kernel,
96 * and the destination addresses of those source pages. As this data
97 * structure is not used in the context of the current OS, it must
100 * The code has been made to work with highmem pages and will use a
101 * destination page in its final resting place (if it happens
102 * to allocate it). The end product of this is that most of the
103 * physical address space, and most of RAM can be used.
105 * Future directions include:
106 * - allocating a page table with the control code buffer identity
107 * mapped, to simplify machine_kexec and make kexec_on_panic more
112 * KIMAGE_NO_DEST is an impossible destination address..., for
113 * allocating pages whose destination address we do not care about.
115 #define KIMAGE_NO_DEST (-1UL)
117 static int kimage_is_destination_range(struct kimage *image,
118 unsigned long start, unsigned long end);
119 static struct page *kimage_alloc_page(struct kimage *image,
123 static int do_kimage_alloc(struct kimage **rimage, unsigned long entry,
124 unsigned long nr_segments,
125 struct kexec_segment __user *segments)
127 size_t segment_bytes;
128 struct kimage *image;
132 /* Allocate a controlling structure */
134 image = kzalloc(sizeof(*image), GFP_KERNEL);
139 image->entry = &image->head;
140 image->last_entry = &image->head;
141 image->control_page = ~0; /* By default this does not apply */
142 image->start = entry;
143 image->type = KEXEC_TYPE_DEFAULT;
145 /* Initialize the list of control pages */
146 INIT_LIST_HEAD(&image->control_pages);
148 /* Initialize the list of destination pages */
149 INIT_LIST_HEAD(&image->dest_pages);
151 /* Initialize the list of unusable pages */
152 INIT_LIST_HEAD(&image->unuseable_pages);
154 /* Read in the segments */
155 image->nr_segments = nr_segments;
156 segment_bytes = nr_segments * sizeof(*segments);
157 result = copy_from_user(image->segment, segments, segment_bytes);
164 * Verify we have good destination addresses. The caller is
165 * responsible for making certain we don't attempt to load
166 * the new image into invalid or reserved areas of RAM. This
167 * just verifies it is an address we can use.
169 * Since the kernel does everything in page size chunks ensure
170 * the destination addresses are page aligned. Too many
171 * special cases crop of when we don't do this. The most
172 * insidious is getting overlapping destination addresses
173 * simply because addresses are changed to page size
176 result = -EADDRNOTAVAIL;
177 for (i = 0; i < nr_segments; i++) {
178 unsigned long mstart, mend;
180 mstart = image->segment[i].mem;
181 mend = mstart + image->segment[i].memsz;
182 if ((mstart & ~PAGE_MASK) || (mend & ~PAGE_MASK))
184 if (mend >= KEXEC_DESTINATION_MEMORY_LIMIT)
188 /* Verify our destination addresses do not overlap.
189 * If we alloed overlapping destination addresses
190 * through very weird things can happen with no
191 * easy explanation as one segment stops on another.
194 for (i = 0; i < nr_segments; i++) {
195 unsigned long mstart, mend;
198 mstart = image->segment[i].mem;
199 mend = mstart + image->segment[i].memsz;
200 for (j = 0; j < i; j++) {
201 unsigned long pstart, pend;
202 pstart = image->segment[j].mem;
203 pend = pstart + image->segment[j].memsz;
204 /* Do the segments overlap ? */
205 if ((mend > pstart) && (mstart < pend))
210 /* Ensure our buffer sizes are strictly less than
211 * our memory sizes. This should always be the case,
212 * and it is easier to check up front than to be surprised
216 for (i = 0; i < nr_segments; i++) {
217 if (image->segment[i].bufsz > image->segment[i].memsz)
232 static int kimage_normal_alloc(struct kimage **rimage, unsigned long entry,
233 unsigned long nr_segments,
234 struct kexec_segment __user *segments)
237 struct kimage *image;
239 /* Allocate and initialize a controlling structure */
241 result = do_kimage_alloc(&image, entry, nr_segments, segments);
248 * Find a location for the control code buffer, and add it
249 * the vector of segments so that it's pages will also be
250 * counted as destination pages.
253 image->control_code_page = kimage_alloc_control_pages(image,
254 get_order(KEXEC_CONTROL_PAGE_SIZE));
255 if (!image->control_code_page) {
256 printk(KERN_ERR "Could not allocate control_code_buffer\n");
260 image->swap_page = kimage_alloc_control_pages(image, 0);
261 if (!image->swap_page) {
262 printk(KERN_ERR "Could not allocate swap buffer\n");
276 static int kimage_crash_alloc(struct kimage **rimage, unsigned long entry,
277 unsigned long nr_segments,
278 struct kexec_segment __user *segments)
281 struct kimage *image;
285 /* Verify we have a valid entry point */
286 if ((entry < crashk_res.start) || (entry > crashk_res.end)) {
287 result = -EADDRNOTAVAIL;
291 /* Allocate and initialize a controlling structure */
292 result = do_kimage_alloc(&image, entry, nr_segments, segments);
296 /* Enable the special crash kernel control page
299 image->control_page = crashk_res.start;
300 image->type = KEXEC_TYPE_CRASH;
303 * Verify we have good destination addresses. Normally
304 * the caller is responsible for making certain we don't
305 * attempt to load the new image into invalid or reserved
306 * areas of RAM. But crash kernels are preloaded into a
307 * reserved area of ram. We must ensure the addresses
308 * are in the reserved area otherwise preloading the
309 * kernel could corrupt things.
311 result = -EADDRNOTAVAIL;
312 for (i = 0; i < nr_segments; i++) {
313 unsigned long mstart, mend;
315 mstart = image->segment[i].mem;
316 mend = mstart + image->segment[i].memsz - 1;
317 /* Ensure we are within the crash kernel limits */
318 if ((mstart < crashk_res.start) || (mend > crashk_res.end))
323 * Find a location for the control code buffer, and add
324 * the vector of segments so that it's pages will also be
325 * counted as destination pages.
328 image->control_code_page = kimage_alloc_control_pages(image,
329 get_order(KEXEC_CONTROL_PAGE_SIZE));
330 if (!image->control_code_page) {
331 printk(KERN_ERR "Could not allocate control_code_buffer\n");
345 static int kimage_is_destination_range(struct kimage *image,
351 for (i = 0; i < image->nr_segments; i++) {
352 unsigned long mstart, mend;
354 mstart = image->segment[i].mem;
355 mend = mstart + image->segment[i].memsz;
356 if ((end > mstart) && (start < mend))
363 static struct page *kimage_alloc_pages(gfp_t gfp_mask, unsigned int order)
367 pages = alloc_pages(gfp_mask, order);
369 unsigned int count, i;
370 pages->mapping = NULL;
371 set_page_private(pages, order);
373 for (i = 0; i < count; i++)
374 SetPageReserved(pages + i);
380 static void kimage_free_pages(struct page *page)
382 unsigned int order, count, i;
384 order = page_private(page);
386 for (i = 0; i < count; i++)
387 ClearPageReserved(page + i);
388 __free_pages(page, order);
391 static void kimage_free_page_list(struct list_head *list)
393 struct list_head *pos, *next;
395 list_for_each_safe(pos, next, list) {
398 page = list_entry(pos, struct page, lru);
399 list_del(&page->lru);
400 kimage_free_pages(page);
404 static struct page *kimage_alloc_normal_control_pages(struct kimage *image,
407 /* Control pages are special, they are the intermediaries
408 * that are needed while we copy the rest of the pages
409 * to their final resting place. As such they must
410 * not conflict with either the destination addresses
411 * or memory the kernel is already using.
413 * The only case where we really need more than one of
414 * these are for architectures where we cannot disable
415 * the MMU and must instead generate an identity mapped
416 * page table for all of the memory.
418 * At worst this runs in O(N) of the image size.
420 struct list_head extra_pages;
425 INIT_LIST_HEAD(&extra_pages);
427 /* Loop while I can allocate a page and the page allocated
428 * is a destination page.
431 unsigned long pfn, epfn, addr, eaddr;
433 pages = kimage_alloc_pages(GFP_KERNEL, order);
436 pfn = page_to_pfn(pages);
438 addr = pfn << PAGE_SHIFT;
439 eaddr = epfn << PAGE_SHIFT;
440 if ((epfn >= (KEXEC_CONTROL_MEMORY_LIMIT >> PAGE_SHIFT)) ||
441 kimage_is_destination_range(image, addr, eaddr)) {
442 list_add(&pages->lru, &extra_pages);
448 /* Remember the allocated page... */
449 list_add(&pages->lru, &image->control_pages);
451 /* Because the page is already in it's destination
452 * location we will never allocate another page at
453 * that address. Therefore kimage_alloc_pages
454 * will not return it (again) and we don't need
455 * to give it an entry in image->segment[].
458 /* Deal with the destination pages I have inadvertently allocated.
460 * Ideally I would convert multi-page allocations into single
461 * page allocations, and add everything to image->dest_pages.
463 * For now it is simpler to just free the pages.
465 kimage_free_page_list(&extra_pages);
470 static struct page *kimage_alloc_crash_control_pages(struct kimage *image,
473 /* Control pages are special, they are the intermediaries
474 * that are needed while we copy the rest of the pages
475 * to their final resting place. As such they must
476 * not conflict with either the destination addresses
477 * or memory the kernel is already using.
479 * Control pages are also the only pags we must allocate
480 * when loading a crash kernel. All of the other pages
481 * are specified by the segments and we just memcpy
482 * into them directly.
484 * The only case where we really need more than one of
485 * these are for architectures where we cannot disable
486 * the MMU and must instead generate an identity mapped
487 * page table for all of the memory.
489 * Given the low demand this implements a very simple
490 * allocator that finds the first hole of the appropriate
491 * size in the reserved memory region, and allocates all
492 * of the memory up to and including the hole.
494 unsigned long hole_start, hole_end, size;
498 size = (1 << order) << PAGE_SHIFT;
499 hole_start = (image->control_page + (size - 1)) & ~(size - 1);
500 hole_end = hole_start + size - 1;
501 while (hole_end <= crashk_res.end) {
504 if (hole_end > KEXEC_CRASH_CONTROL_MEMORY_LIMIT)
506 if (hole_end > crashk_res.end)
508 /* See if I overlap any of the segments */
509 for (i = 0; i < image->nr_segments; i++) {
510 unsigned long mstart, mend;
512 mstart = image->segment[i].mem;
513 mend = mstart + image->segment[i].memsz - 1;
514 if ((hole_end >= mstart) && (hole_start <= mend)) {
515 /* Advance the hole to the end of the segment */
516 hole_start = (mend + (size - 1)) & ~(size - 1);
517 hole_end = hole_start + size - 1;
521 /* If I don't overlap any segments I have found my hole! */
522 if (i == image->nr_segments) {
523 pages = pfn_to_page(hole_start >> PAGE_SHIFT);
528 image->control_page = hole_end;
534 struct page *kimage_alloc_control_pages(struct kimage *image,
537 struct page *pages = NULL;
539 switch (image->type) {
540 case KEXEC_TYPE_DEFAULT:
541 pages = kimage_alloc_normal_control_pages(image, order);
543 case KEXEC_TYPE_CRASH:
544 pages = kimage_alloc_crash_control_pages(image, order);
551 static int kimage_add_entry(struct kimage *image, kimage_entry_t entry)
553 if (*image->entry != 0)
556 if (image->entry == image->last_entry) {
557 kimage_entry_t *ind_page;
560 page = kimage_alloc_page(image, GFP_KERNEL, KIMAGE_NO_DEST);
564 ind_page = page_address(page);
565 *image->entry = virt_to_phys(ind_page) | IND_INDIRECTION;
566 image->entry = ind_page;
567 image->last_entry = ind_page +
568 ((PAGE_SIZE/sizeof(kimage_entry_t)) - 1);
570 *image->entry = entry;
577 static int kimage_set_destination(struct kimage *image,
578 unsigned long destination)
582 destination &= PAGE_MASK;
583 result = kimage_add_entry(image, destination | IND_DESTINATION);
585 image->destination = destination;
591 static int kimage_add_page(struct kimage *image, unsigned long page)
596 result = kimage_add_entry(image, page | IND_SOURCE);
598 image->destination += PAGE_SIZE;
604 static void kimage_free_extra_pages(struct kimage *image)
606 /* Walk through and free any extra destination pages I may have */
607 kimage_free_page_list(&image->dest_pages);
609 /* Walk through and free any unusable pages I have cached */
610 kimage_free_page_list(&image->unuseable_pages);
613 static void kimage_terminate(struct kimage *image)
615 if (*image->entry != 0)
618 *image->entry = IND_DONE;
621 #define for_each_kimage_entry(image, ptr, entry) \
622 for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE); \
623 ptr = (entry & IND_INDIRECTION)? \
624 phys_to_virt((entry & PAGE_MASK)): ptr +1)
626 static void kimage_free_entry(kimage_entry_t entry)
630 page = pfn_to_page(entry >> PAGE_SHIFT);
631 kimage_free_pages(page);
634 static void kimage_free(struct kimage *image)
636 kimage_entry_t *ptr, entry;
637 kimage_entry_t ind = 0;
642 kimage_free_extra_pages(image);
643 for_each_kimage_entry(image, ptr, entry) {
644 if (entry & IND_INDIRECTION) {
645 /* Free the previous indirection page */
646 if (ind & IND_INDIRECTION)
647 kimage_free_entry(ind);
648 /* Save this indirection page until we are
653 else if (entry & IND_SOURCE)
654 kimage_free_entry(entry);
656 /* Free the final indirection page */
657 if (ind & IND_INDIRECTION)
658 kimage_free_entry(ind);
660 /* Handle any machine specific cleanup */
661 machine_kexec_cleanup(image);
663 /* Free the kexec control pages... */
664 kimage_free_page_list(&image->control_pages);
668 static kimage_entry_t *kimage_dst_used(struct kimage *image,
671 kimage_entry_t *ptr, entry;
672 unsigned long destination = 0;
674 for_each_kimage_entry(image, ptr, entry) {
675 if (entry & IND_DESTINATION)
676 destination = entry & PAGE_MASK;
677 else if (entry & IND_SOURCE) {
678 if (page == destination)
680 destination += PAGE_SIZE;
687 static struct page *kimage_alloc_page(struct kimage *image,
689 unsigned long destination)
692 * Here we implement safeguards to ensure that a source page
693 * is not copied to its destination page before the data on
694 * the destination page is no longer useful.
696 * To do this we maintain the invariant that a source page is
697 * either its own destination page, or it is not a
698 * destination page at all.
700 * That is slightly stronger than required, but the proof
701 * that no problems will not occur is trivial, and the
702 * implementation is simply to verify.
704 * When allocating all pages normally this algorithm will run
705 * in O(N) time, but in the worst case it will run in O(N^2)
706 * time. If the runtime is a problem the data structures can
713 * Walk through the list of destination pages, and see if I
716 list_for_each_entry(page, &image->dest_pages, lru) {
717 addr = page_to_pfn(page) << PAGE_SHIFT;
718 if (addr == destination) {
719 list_del(&page->lru);
727 /* Allocate a page, if we run out of memory give up */
728 page = kimage_alloc_pages(gfp_mask, 0);
731 /* If the page cannot be used file it away */
732 if (page_to_pfn(page) >
733 (KEXEC_SOURCE_MEMORY_LIMIT >> PAGE_SHIFT)) {
734 list_add(&page->lru, &image->unuseable_pages);
737 addr = page_to_pfn(page) << PAGE_SHIFT;
739 /* If it is the destination page we want use it */
740 if (addr == destination)
743 /* If the page is not a destination page use it */
744 if (!kimage_is_destination_range(image, addr,
749 * I know that the page is someones destination page.
750 * See if there is already a source page for this
751 * destination page. And if so swap the source pages.
753 old = kimage_dst_used(image, addr);
756 unsigned long old_addr;
757 struct page *old_page;
759 old_addr = *old & PAGE_MASK;
760 old_page = pfn_to_page(old_addr >> PAGE_SHIFT);
761 copy_highpage(page, old_page);
762 *old = addr | (*old & ~PAGE_MASK);
764 /* The old page I have found cannot be a
765 * destination page, so return it if it's
766 * gfp_flags honor the ones passed in.
768 if (!(gfp_mask & __GFP_HIGHMEM) &&
769 PageHighMem(old_page)) {
770 kimage_free_pages(old_page);
778 /* Place the page on the destination list I
781 list_add(&page->lru, &image->dest_pages);
788 static int kimage_load_normal_segment(struct kimage *image,
789 struct kexec_segment *segment)
792 unsigned long ubytes, mbytes;
794 unsigned char __user *buf;
798 ubytes = segment->bufsz;
799 mbytes = segment->memsz;
800 maddr = segment->mem;
802 result = kimage_set_destination(image, maddr);
809 size_t uchunk, mchunk;
811 page = kimage_alloc_page(image, GFP_HIGHUSER, maddr);
816 result = kimage_add_page(image, page_to_pfn(page)
822 /* Start with a clear page */
824 ptr += maddr & ~PAGE_MASK;
825 mchunk = PAGE_SIZE - (maddr & ~PAGE_MASK);
833 result = copy_from_user(ptr, buf, uchunk);
848 static int kimage_load_crash_segment(struct kimage *image,
849 struct kexec_segment *segment)
851 /* For crash dumps kernels we simply copy the data from
852 * user space to it's destination.
853 * We do things a page at a time for the sake of kmap.
856 unsigned long ubytes, mbytes;
858 unsigned char __user *buf;
862 ubytes = segment->bufsz;
863 mbytes = segment->memsz;
864 maddr = segment->mem;
868 size_t uchunk, mchunk;
870 page = pfn_to_page(maddr >> PAGE_SHIFT);
876 ptr += maddr & ~PAGE_MASK;
877 mchunk = PAGE_SIZE - (maddr & ~PAGE_MASK);
882 if (uchunk > ubytes) {
884 /* Zero the trailing part of the page */
885 memset(ptr + uchunk, 0, mchunk - uchunk);
887 result = copy_from_user(ptr, buf, uchunk);
888 kexec_flush_icache_page(page);
903 static int kimage_load_segment(struct kimage *image,
904 struct kexec_segment *segment)
906 int result = -ENOMEM;
908 switch (image->type) {
909 case KEXEC_TYPE_DEFAULT:
910 result = kimage_load_normal_segment(image, segment);
912 case KEXEC_TYPE_CRASH:
913 result = kimage_load_crash_segment(image, segment);
921 * Exec Kernel system call: for obvious reasons only root may call it.
923 * This call breaks up into three pieces.
924 * - A generic part which loads the new kernel from the current
925 * address space, and very carefully places the data in the
928 * - A generic part that interacts with the kernel and tells all of
929 * the devices to shut down. Preventing on-going dmas, and placing
930 * the devices in a consistent state so a later kernel can
933 * - A machine specific part that includes the syscall number
934 * and the copies the image to it's final destination. And
935 * jumps into the image at entry.
937 * kexec does not sync, or unmount filesystems so if you need
938 * that to happen you need to do that yourself.
940 struct kimage *kexec_image;
941 struct kimage *kexec_crash_image;
943 static DEFINE_MUTEX(kexec_mutex);
945 SYSCALL_DEFINE4(kexec_load, unsigned long, entry, unsigned long, nr_segments,
946 struct kexec_segment __user *, segments, unsigned long, flags)
948 struct kimage **dest_image, *image;
951 /* We only trust the superuser with rebooting the system. */
952 if (!capable(CAP_SYS_BOOT))
956 * Verify we have a legal set of flags
957 * This leaves us room for future extensions.
959 if ((flags & KEXEC_FLAGS) != (flags & ~KEXEC_ARCH_MASK))
962 /* Verify we are on the appropriate architecture */
963 if (((flags & KEXEC_ARCH_MASK) != KEXEC_ARCH) &&
964 ((flags & KEXEC_ARCH_MASK) != KEXEC_ARCH_DEFAULT))
967 /* Put an artificial cap on the number
968 * of segments passed to kexec_load.
970 if (nr_segments > KEXEC_SEGMENT_MAX)
976 /* Because we write directly to the reserved memory
977 * region when loading crash kernels we need a mutex here to
978 * prevent multiple crash kernels from attempting to load
979 * simultaneously, and to prevent a crash kernel from loading
980 * over the top of a in use crash kernel.
982 * KISS: always take the mutex.
984 if (!mutex_trylock(&kexec_mutex))
987 dest_image = &kexec_image;
988 if (flags & KEXEC_ON_CRASH)
989 dest_image = &kexec_crash_image;
990 if (nr_segments > 0) {
993 /* Loading another kernel to reboot into */
994 if ((flags & KEXEC_ON_CRASH) == 0)
995 result = kimage_normal_alloc(&image, entry,
996 nr_segments, segments);
997 /* Loading another kernel to switch to if this one crashes */
998 else if (flags & KEXEC_ON_CRASH) {
999 /* Free any current crash dump kernel before
1002 kimage_free(xchg(&kexec_crash_image, NULL));
1003 result = kimage_crash_alloc(&image, entry,
1004 nr_segments, segments);
1005 crash_map_reserved_pages();
1010 if (flags & KEXEC_PRESERVE_CONTEXT)
1011 image->preserve_context = 1;
1012 result = machine_kexec_prepare(image);
1016 for (i = 0; i < nr_segments; i++) {
1017 result = kimage_load_segment(image, &image->segment[i]);
1021 kimage_terminate(image);
1022 if (flags & KEXEC_ON_CRASH)
1023 crash_unmap_reserved_pages();
1025 /* Install the new kernel, and Uninstall the old */
1026 image = xchg(dest_image, image);
1029 mutex_unlock(&kexec_mutex);
1036 * Add and remove page tables for crashkernel memory
1038 * Provide an empty default implementation here -- architecture
1039 * code may override this
1041 void __weak crash_map_reserved_pages(void)
1044 void __weak crash_unmap_reserved_pages(void)
1047 #ifdef CONFIG_COMPAT
1048 asmlinkage long compat_sys_kexec_load(unsigned long entry,
1049 unsigned long nr_segments,
1050 struct compat_kexec_segment __user *segments,
1051 unsigned long flags)
1053 struct compat_kexec_segment in;
1054 struct kexec_segment out, __user *ksegments;
1055 unsigned long i, result;
1057 /* Don't allow clients that don't understand the native
1058 * architecture to do anything.
1060 if ((flags & KEXEC_ARCH_MASK) == KEXEC_ARCH_DEFAULT)
1063 if (nr_segments > KEXEC_SEGMENT_MAX)
1066 ksegments = compat_alloc_user_space(nr_segments * sizeof(out));
1067 for (i=0; i < nr_segments; i++) {
1068 result = copy_from_user(&in, &segments[i], sizeof(in));
1072 out.buf = compat_ptr(in.buf);
1073 out.bufsz = in.bufsz;
1075 out.memsz = in.memsz;
1077 result = copy_to_user(&ksegments[i], &out, sizeof(out));
1082 return sys_kexec_load(entry, nr_segments, ksegments, flags);
1086 void crash_kexec(struct pt_regs *regs)
1088 /* Take the kexec_mutex here to prevent sys_kexec_load
1089 * running on one cpu from replacing the crash kernel
1090 * we are using after a panic on a different cpu.
1092 * If the crash kernel was not located in a fixed area
1093 * of memory the xchg(&kexec_crash_image) would be
1094 * sufficient. But since I reuse the memory...
1096 if (mutex_trylock(&kexec_mutex)) {
1097 if (kexec_crash_image) {
1098 struct pt_regs fixed_regs;
1100 crash_setup_regs(&fixed_regs, regs);
1101 crash_save_vmcoreinfo();
1102 machine_crash_shutdown(&fixed_regs);
1103 machine_kexec(kexec_crash_image);
1105 mutex_unlock(&kexec_mutex);
1109 size_t crash_get_memory_size(void)
1112 mutex_lock(&kexec_mutex);
1113 if (crashk_res.end != crashk_res.start)
1114 size = resource_size(&crashk_res);
1115 mutex_unlock(&kexec_mutex);
1119 void __weak crash_free_reserved_phys_range(unsigned long begin,
1124 for (addr = begin; addr < end; addr += PAGE_SIZE) {
1125 ClearPageReserved(pfn_to_page(addr >> PAGE_SHIFT));
1126 init_page_count(pfn_to_page(addr >> PAGE_SHIFT));
1127 free_page((unsigned long)__va(addr));
1132 int crash_shrink_memory(unsigned long new_size)
1135 unsigned long start, end;
1136 unsigned long old_size;
1137 struct resource *ram_res;
1139 mutex_lock(&kexec_mutex);
1141 if (kexec_crash_image) {
1145 start = crashk_res.start;
1146 end = crashk_res.end;
1147 old_size = (end == 0) ? 0 : end - start + 1;
1148 if (new_size >= old_size) {
1149 ret = (new_size == old_size) ? 0 : -EINVAL;
1153 ram_res = kzalloc(sizeof(*ram_res), GFP_KERNEL);
1159 start = roundup(start, KEXEC_CRASH_MEM_ALIGN);
1160 end = roundup(start + new_size, KEXEC_CRASH_MEM_ALIGN);
1162 crash_map_reserved_pages();
1163 crash_free_reserved_phys_range(end, crashk_res.end);
1165 if ((start == end) && (crashk_res.parent != NULL))
1166 release_resource(&crashk_res);
1168 ram_res->start = end;
1169 ram_res->end = crashk_res.end;
1170 ram_res->flags = IORESOURCE_BUSY | IORESOURCE_MEM;
1171 ram_res->name = "System RAM";
1173 crashk_res.end = end - 1;
1175 insert_resource(&iomem_resource, ram_res);
1176 crash_unmap_reserved_pages();
1179 mutex_unlock(&kexec_mutex);
1183 static u32 *append_elf_note(u32 *buf, char *name, unsigned type, void *data,
1186 struct elf_note note;
1188 note.n_namesz = strlen(name) + 1;
1189 note.n_descsz = data_len;
1191 memcpy(buf, ¬e, sizeof(note));
1192 buf += (sizeof(note) + 3)/4;
1193 memcpy(buf, name, note.n_namesz);
1194 buf += (note.n_namesz + 3)/4;
1195 memcpy(buf, data, note.n_descsz);
1196 buf += (note.n_descsz + 3)/4;
1201 static void final_note(u32 *buf)
1203 struct elf_note note;
1208 memcpy(buf, ¬e, sizeof(note));
1211 void crash_save_cpu(struct pt_regs *regs, int cpu)
1213 struct elf_prstatus prstatus;
1216 if ((cpu < 0) || (cpu >= nr_cpu_ids))
1219 /* Using ELF notes here is opportunistic.
1220 * I need a well defined structure format
1221 * for the data I pass, and I need tags
1222 * on the data to indicate what information I have
1223 * squirrelled away. ELF notes happen to provide
1224 * all of that, so there is no need to invent something new.
1226 buf = (u32*)per_cpu_ptr(crash_notes, cpu);
1229 memset(&prstatus, 0, sizeof(prstatus));
1230 prstatus.pr_pid = current->pid;
1231 elf_core_copy_kernel_regs(&prstatus.pr_reg, regs);
1232 buf = append_elf_note(buf, KEXEC_CORE_NOTE_NAME, NT_PRSTATUS,
1233 &prstatus, sizeof(prstatus));
1237 static int __init crash_notes_memory_init(void)
1239 /* Allocate memory for saving cpu registers. */
1240 crash_notes = alloc_percpu(note_buf_t);
1242 printk("Kexec: Memory allocation for saving cpu register"
1243 " states failed\n");
1248 module_init(crash_notes_memory_init)
1252 * parsing the "crashkernel" commandline
1254 * this code is intended to be called from architecture specific code
1259 * This function parses command lines in the format
1261 * crashkernel=ramsize-range:size[,...][@offset]
1263 * The function returns 0 on success and -EINVAL on failure.
1265 static int __init parse_crashkernel_mem(char *cmdline,
1266 unsigned long long system_ram,
1267 unsigned long long *crash_size,
1268 unsigned long long *crash_base)
1270 char *cur = cmdline, *tmp;
1272 /* for each entry of the comma-separated list */
1274 unsigned long long start, end = ULLONG_MAX, size;
1276 /* get the start of the range */
1277 start = memparse(cur, &tmp);
1279 pr_warning("crashkernel: Memory value expected\n");
1284 pr_warning("crashkernel: '-' expected\n");
1289 /* if no ':' is here, than we read the end */
1291 end = memparse(cur, &tmp);
1293 pr_warning("crashkernel: Memory "
1294 "value expected\n");
1299 pr_warning("crashkernel: end <= start\n");
1305 pr_warning("crashkernel: ':' expected\n");
1310 size = memparse(cur, &tmp);
1312 pr_warning("Memory value expected\n");
1316 if (size >= system_ram) {
1317 pr_warning("crashkernel: invalid size\n");
1322 if (system_ram >= start && system_ram < end) {
1326 } while (*cur++ == ',');
1328 if (*crash_size > 0) {
1329 while (*cur && *cur != ' ' && *cur != '@')
1333 *crash_base = memparse(cur, &tmp);
1335 pr_warning("Memory value expected "
1346 * That function parses "simple" (old) crashkernel command lines like
1348 * crashkernel=size[@offset]
1350 * It returns 0 on success and -EINVAL on failure.
1352 static int __init parse_crashkernel_simple(char *cmdline,
1353 unsigned long long *crash_size,
1354 unsigned long long *crash_base)
1356 char *cur = cmdline;
1358 *crash_size = memparse(cmdline, &cur);
1359 if (cmdline == cur) {
1360 pr_warning("crashkernel: memory value expected\n");
1365 *crash_base = memparse(cur+1, &cur);
1366 else if (*cur != ' ' && *cur != '\0') {
1367 pr_warning("crashkernel: unrecognized char\n");
1375 * That function is the entry point for command line parsing and should be
1376 * called from the arch-specific code.
1378 static int __init __parse_crashkernel(char *cmdline,
1379 unsigned long long system_ram,
1380 unsigned long long *crash_size,
1381 unsigned long long *crash_base,
1384 char *p = cmdline, *ck_cmdline = NULL;
1385 char *first_colon, *first_space;
1387 BUG_ON(!crash_size || !crash_base);
1391 /* find crashkernel and use the last one if there are more */
1392 p = strstr(p, name);
1395 p = strstr(p+1, name);
1401 ck_cmdline += strlen(name);
1404 * if the commandline contains a ':', then that's the extended
1405 * syntax -- if not, it must be the classic syntax
1407 first_colon = strchr(ck_cmdline, ':');
1408 first_space = strchr(ck_cmdline, ' ');
1409 if (first_colon && (!first_space || first_colon < first_space))
1410 return parse_crashkernel_mem(ck_cmdline, system_ram,
1411 crash_size, crash_base);
1413 return parse_crashkernel_simple(ck_cmdline, crash_size,
1419 int __init parse_crashkernel(char *cmdline,
1420 unsigned long long system_ram,
1421 unsigned long long *crash_size,
1422 unsigned long long *crash_base)
1424 return __parse_crashkernel(cmdline, system_ram, crash_size, crash_base,
1428 int __init parse_crashkernel_low(char *cmdline,
1429 unsigned long long system_ram,
1430 unsigned long long *crash_size,
1431 unsigned long long *crash_base)
1433 return __parse_crashkernel(cmdline, system_ram, crash_size, crash_base,
1434 "crashkernel_low=");
1437 static void update_vmcoreinfo_note(void)
1439 u32 *buf = vmcoreinfo_note;
1441 if (!vmcoreinfo_size)
1443 buf = append_elf_note(buf, VMCOREINFO_NOTE_NAME, 0, vmcoreinfo_data,
1448 void crash_save_vmcoreinfo(void)
1450 vmcoreinfo_append_str("CRASHTIME=%ld\n", get_seconds());
1451 update_vmcoreinfo_note();
1454 void vmcoreinfo_append_str(const char *fmt, ...)
1460 va_start(args, fmt);
1461 r = vsnprintf(buf, sizeof(buf), fmt, args);
1464 if (r + vmcoreinfo_size > vmcoreinfo_max_size)
1465 r = vmcoreinfo_max_size - vmcoreinfo_size;
1467 memcpy(&vmcoreinfo_data[vmcoreinfo_size], buf, r);
1469 vmcoreinfo_size += r;
1473 * provide an empty default implementation here -- architecture
1474 * code may override this
1476 void __attribute__ ((weak)) arch_crash_save_vmcoreinfo(void)
1479 unsigned long __attribute__ ((weak)) paddr_vmcoreinfo_note(void)
1481 return __pa((unsigned long)(char *)&vmcoreinfo_note);
1484 static int __init crash_save_vmcoreinfo_init(void)
1486 VMCOREINFO_OSRELEASE(init_uts_ns.name.release);
1487 VMCOREINFO_PAGESIZE(PAGE_SIZE);
1489 VMCOREINFO_SYMBOL(init_uts_ns);
1490 VMCOREINFO_SYMBOL(node_online_map);
1492 VMCOREINFO_SYMBOL(swapper_pg_dir);
1494 VMCOREINFO_SYMBOL(_stext);
1495 VMCOREINFO_SYMBOL(vmlist);
1497 #ifndef CONFIG_NEED_MULTIPLE_NODES
1498 VMCOREINFO_SYMBOL(mem_map);
1499 VMCOREINFO_SYMBOL(contig_page_data);
1501 #ifdef CONFIG_SPARSEMEM
1502 VMCOREINFO_SYMBOL(mem_section);
1503 VMCOREINFO_LENGTH(mem_section, NR_SECTION_ROOTS);
1504 VMCOREINFO_STRUCT_SIZE(mem_section);
1505 VMCOREINFO_OFFSET(mem_section, section_mem_map);
1507 VMCOREINFO_STRUCT_SIZE(page);
1508 VMCOREINFO_STRUCT_SIZE(pglist_data);
1509 VMCOREINFO_STRUCT_SIZE(zone);
1510 VMCOREINFO_STRUCT_SIZE(free_area);
1511 VMCOREINFO_STRUCT_SIZE(list_head);
1512 VMCOREINFO_SIZE(nodemask_t);
1513 VMCOREINFO_OFFSET(page, flags);
1514 VMCOREINFO_OFFSET(page, _count);
1515 VMCOREINFO_OFFSET(page, mapping);
1516 VMCOREINFO_OFFSET(page, lru);
1517 VMCOREINFO_OFFSET(pglist_data, node_zones);
1518 VMCOREINFO_OFFSET(pglist_data, nr_zones);
1519 #ifdef CONFIG_FLAT_NODE_MEM_MAP
1520 VMCOREINFO_OFFSET(pglist_data, node_mem_map);
1522 VMCOREINFO_OFFSET(pglist_data, node_start_pfn);
1523 VMCOREINFO_OFFSET(pglist_data, node_spanned_pages);
1524 VMCOREINFO_OFFSET(pglist_data, node_id);
1525 VMCOREINFO_OFFSET(zone, free_area);
1526 VMCOREINFO_OFFSET(zone, vm_stat);
1527 VMCOREINFO_OFFSET(zone, spanned_pages);
1528 VMCOREINFO_OFFSET(free_area, free_list);
1529 VMCOREINFO_OFFSET(list_head, next);
1530 VMCOREINFO_OFFSET(list_head, prev);
1531 VMCOREINFO_OFFSET(vm_struct, addr);
1532 VMCOREINFO_LENGTH(zone.free_area, MAX_ORDER);
1533 log_buf_kexec_setup();
1534 VMCOREINFO_LENGTH(free_area.free_list, MIGRATE_TYPES);
1535 VMCOREINFO_NUMBER(NR_FREE_PAGES);
1536 VMCOREINFO_NUMBER(PG_lru);
1537 VMCOREINFO_NUMBER(PG_private);
1538 VMCOREINFO_NUMBER(PG_swapcache);
1540 arch_crash_save_vmcoreinfo();
1541 update_vmcoreinfo_note();
1546 module_init(crash_save_vmcoreinfo_init)
1549 * Move into place and start executing a preloaded standalone
1550 * executable. If nothing was preloaded return an error.
1552 int kernel_kexec(void)
1556 if (!mutex_trylock(&kexec_mutex))
1563 #ifdef CONFIG_KEXEC_JUMP
1564 if (kexec_image->preserve_context) {
1565 lock_system_sleep();
1566 pm_prepare_console();
1567 error = freeze_processes();
1570 goto Restore_console;
1573 error = dpm_suspend_start(PMSG_FREEZE);
1575 goto Resume_console;
1576 /* At this point, dpm_suspend_start() has been called,
1577 * but *not* dpm_suspend_end(). We *must* call
1578 * dpm_suspend_end() now. Otherwise, drivers for
1579 * some devices (e.g. interrupt controllers) become
1580 * desynchronized with the actual state of the
1581 * hardware at resume time, and evil weirdness ensues.
1583 error = dpm_suspend_end(PMSG_FREEZE);
1585 goto Resume_devices;
1586 error = disable_nonboot_cpus();
1589 local_irq_disable();
1590 error = syscore_suspend();
1596 kernel_restart_prepare(NULL);
1597 printk(KERN_EMERG "Starting new kernel\n");
1601 machine_kexec(kexec_image);
1603 #ifdef CONFIG_KEXEC_JUMP
1604 if (kexec_image->preserve_context) {
1609 enable_nonboot_cpus();
1610 dpm_resume_start(PMSG_RESTORE);
1612 dpm_resume_end(PMSG_RESTORE);
1617 pm_restore_console();
1618 unlock_system_sleep();
1623 mutex_unlock(&kexec_mutex);