2 * kexec.c - kexec system call
3 * Copyright (C) 2002-2004 Eric Biederman <ebiederm@xmission.com>
5 * This source code is licensed under the GNU General Public License,
6 * Version 2. See the file COPYING for more details.
9 #include <linux/capability.h>
11 #include <linux/file.h>
12 #include <linux/slab.h>
14 #include <linux/kexec.h>
15 #include <linux/mutex.h>
16 #include <linux/list.h>
17 #include <linux/highmem.h>
18 #include <linux/syscalls.h>
19 #include <linux/reboot.h>
20 #include <linux/ioport.h>
21 #include <linux/hardirq.h>
22 #include <linux/elf.h>
23 #include <linux/elfcore.h>
24 #include <generated/utsrelease.h>
25 #include <linux/utsname.h>
26 #include <linux/numa.h>
27 #include <linux/suspend.h>
28 #include <linux/device.h>
29 #include <linux/freezer.h>
31 #include <linux/cpu.h>
32 #include <linux/console.h>
33 #include <linux/vmalloc.h>
34 #include <linux/swap.h>
35 #include <linux/kmsg_dump.h>
38 #include <asm/uaccess.h>
40 #include <asm/system.h>
41 #include <asm/sections.h>
43 /* Per cpu memory for storing cpu states in case of system crash. */
44 note_buf_t __percpu *crash_notes;
46 /* vmcoreinfo stuff */
47 static unsigned char vmcoreinfo_data[VMCOREINFO_BYTES];
48 u32 vmcoreinfo_note[VMCOREINFO_NOTE_SIZE/4];
49 size_t vmcoreinfo_size;
50 size_t vmcoreinfo_max_size = sizeof(vmcoreinfo_data);
52 /* Location of the reserved area for the crash kernel */
53 struct resource crashk_res = {
54 .name = "Crash kernel",
57 .flags = IORESOURCE_BUSY | IORESOURCE_MEM
60 int kexec_should_crash(struct task_struct *p)
62 if (in_interrupt() || !p->pid || is_global_init(p) || panic_on_oops)
68 * When kexec transitions to the new kernel there is a one-to-one
69 * mapping between physical and virtual addresses. On processors
70 * where you can disable the MMU this is trivial, and easy. For
71 * others it is still a simple predictable page table to setup.
73 * In that environment kexec copies the new kernel to its final
74 * resting place. This means I can only support memory whose
75 * physical address can fit in an unsigned long. In particular
76 * addresses where (pfn << PAGE_SHIFT) > ULONG_MAX cannot be handled.
77 * If the assembly stub has more restrictive requirements
78 * KEXEC_SOURCE_MEMORY_LIMIT and KEXEC_DEST_MEMORY_LIMIT can be
79 * defined more restrictively in <asm/kexec.h>.
81 * The code for the transition from the current kernel to the
82 * the new kernel is placed in the control_code_buffer, whose size
83 * is given by KEXEC_CONTROL_PAGE_SIZE. In the best case only a single
84 * page of memory is necessary, but some architectures require more.
85 * Because this memory must be identity mapped in the transition from
86 * virtual to physical addresses it must live in the range
87 * 0 - TASK_SIZE, as only the user space mappings are arbitrarily
90 * The assembly stub in the control code buffer is passed a linked list
91 * of descriptor pages detailing the source pages of the new kernel,
92 * and the destination addresses of those source pages. As this data
93 * structure is not used in the context of the current OS, it must
96 * The code has been made to work with highmem pages and will use a
97 * destination page in its final resting place (if it happens
98 * to allocate it). The end product of this is that most of the
99 * physical address space, and most of RAM can be used.
101 * Future directions include:
102 * - allocating a page table with the control code buffer identity
103 * mapped, to simplify machine_kexec and make kexec_on_panic more
108 * KIMAGE_NO_DEST is an impossible destination address..., for
109 * allocating pages whose destination address we do not care about.
111 #define KIMAGE_NO_DEST (-1UL)
113 static int kimage_is_destination_range(struct kimage *image,
114 unsigned long start, unsigned long end);
115 static struct page *kimage_alloc_page(struct kimage *image,
119 static int do_kimage_alloc(struct kimage **rimage, unsigned long entry,
120 unsigned long nr_segments,
121 struct kexec_segment __user *segments)
123 size_t segment_bytes;
124 struct kimage *image;
128 /* Allocate a controlling structure */
130 image = kzalloc(sizeof(*image), GFP_KERNEL);
135 image->entry = &image->head;
136 image->last_entry = &image->head;
137 image->control_page = ~0; /* By default this does not apply */
138 image->start = entry;
139 image->type = KEXEC_TYPE_DEFAULT;
141 /* Initialize the list of control pages */
142 INIT_LIST_HEAD(&image->control_pages);
144 /* Initialize the list of destination pages */
145 INIT_LIST_HEAD(&image->dest_pages);
147 /* Initialize the list of unuseable pages */
148 INIT_LIST_HEAD(&image->unuseable_pages);
150 /* Read in the segments */
151 image->nr_segments = nr_segments;
152 segment_bytes = nr_segments * sizeof(*segments);
153 result = copy_from_user(image->segment, segments, segment_bytes);
158 * Verify we have good destination addresses. The caller is
159 * responsible for making certain we don't attempt to load
160 * the new image into invalid or reserved areas of RAM. This
161 * just verifies it is an address we can use.
163 * Since the kernel does everything in page size chunks ensure
164 * the destination addreses are page aligned. Too many
165 * special cases crop of when we don't do this. The most
166 * insidious is getting overlapping destination addresses
167 * simply because addresses are changed to page size
170 result = -EADDRNOTAVAIL;
171 for (i = 0; i < nr_segments; i++) {
172 unsigned long mstart, mend;
174 mstart = image->segment[i].mem;
175 mend = mstart + image->segment[i].memsz;
176 if ((mstart & ~PAGE_MASK) || (mend & ~PAGE_MASK))
178 if (mend >= KEXEC_DESTINATION_MEMORY_LIMIT)
182 /* Verify our destination addresses do not overlap.
183 * If we alloed overlapping destination addresses
184 * through very weird things can happen with no
185 * easy explanation as one segment stops on another.
188 for (i = 0; i < nr_segments; i++) {
189 unsigned long mstart, mend;
192 mstart = image->segment[i].mem;
193 mend = mstart + image->segment[i].memsz;
194 for (j = 0; j < i; j++) {
195 unsigned long pstart, pend;
196 pstart = image->segment[j].mem;
197 pend = pstart + image->segment[j].memsz;
198 /* Do the segments overlap ? */
199 if ((mend > pstart) && (mstart < pend))
204 /* Ensure our buffer sizes are strictly less than
205 * our memory sizes. This should always be the case,
206 * and it is easier to check up front than to be surprised
210 for (i = 0; i < nr_segments; i++) {
211 if (image->segment[i].bufsz > image->segment[i].memsz)
226 static int kimage_normal_alloc(struct kimage **rimage, unsigned long entry,
227 unsigned long nr_segments,
228 struct kexec_segment __user *segments)
231 struct kimage *image;
233 /* Allocate and initialize a controlling structure */
235 result = do_kimage_alloc(&image, entry, nr_segments, segments);
242 * Find a location for the control code buffer, and add it
243 * the vector of segments so that it's pages will also be
244 * counted as destination pages.
247 image->control_code_page = kimage_alloc_control_pages(image,
248 get_order(KEXEC_CONTROL_PAGE_SIZE));
249 if (!image->control_code_page) {
250 printk(KERN_ERR "Could not allocate control_code_buffer\n");
254 image->swap_page = kimage_alloc_control_pages(image, 0);
255 if (!image->swap_page) {
256 printk(KERN_ERR "Could not allocate swap buffer\n");
270 static int kimage_crash_alloc(struct kimage **rimage, unsigned long entry,
271 unsigned long nr_segments,
272 struct kexec_segment __user *segments)
275 struct kimage *image;
279 /* Verify we have a valid entry point */
280 if ((entry < crashk_res.start) || (entry > crashk_res.end)) {
281 result = -EADDRNOTAVAIL;
285 /* Allocate and initialize a controlling structure */
286 result = do_kimage_alloc(&image, entry, nr_segments, segments);
290 /* Enable the special crash kernel control page
293 image->control_page = crashk_res.start;
294 image->type = KEXEC_TYPE_CRASH;
297 * Verify we have good destination addresses. Normally
298 * the caller is responsible for making certain we don't
299 * attempt to load the new image into invalid or reserved
300 * areas of RAM. But crash kernels are preloaded into a
301 * reserved area of ram. We must ensure the addresses
302 * are in the reserved area otherwise preloading the
303 * kernel could corrupt things.
305 result = -EADDRNOTAVAIL;
306 for (i = 0; i < nr_segments; i++) {
307 unsigned long mstart, mend;
309 mstart = image->segment[i].mem;
310 mend = mstart + image->segment[i].memsz - 1;
311 /* Ensure we are within the crash kernel limits */
312 if ((mstart < crashk_res.start) || (mend > crashk_res.end))
317 * Find a location for the control code buffer, and add
318 * the vector of segments so that it's pages will also be
319 * counted as destination pages.
322 image->control_code_page = kimage_alloc_control_pages(image,
323 get_order(KEXEC_CONTROL_PAGE_SIZE));
324 if (!image->control_code_page) {
325 printk(KERN_ERR "Could not allocate control_code_buffer\n");
339 static int kimage_is_destination_range(struct kimage *image,
345 for (i = 0; i < image->nr_segments; i++) {
346 unsigned long mstart, mend;
348 mstart = image->segment[i].mem;
349 mend = mstart + image->segment[i].memsz;
350 if ((end > mstart) && (start < mend))
357 static struct page *kimage_alloc_pages(gfp_t gfp_mask, unsigned int order)
361 pages = alloc_pages(gfp_mask, order);
363 unsigned int count, i;
364 pages->mapping = NULL;
365 set_page_private(pages, order);
367 for (i = 0; i < count; i++)
368 SetPageReserved(pages + i);
374 static void kimage_free_pages(struct page *page)
376 unsigned int order, count, i;
378 order = page_private(page);
380 for (i = 0; i < count; i++)
381 ClearPageReserved(page + i);
382 __free_pages(page, order);
385 static void kimage_free_page_list(struct list_head *list)
387 struct list_head *pos, *next;
389 list_for_each_safe(pos, next, list) {
392 page = list_entry(pos, struct page, lru);
393 list_del(&page->lru);
394 kimage_free_pages(page);
398 static struct page *kimage_alloc_normal_control_pages(struct kimage *image,
401 /* Control pages are special, they are the intermediaries
402 * that are needed while we copy the rest of the pages
403 * to their final resting place. As such they must
404 * not conflict with either the destination addresses
405 * or memory the kernel is already using.
407 * The only case where we really need more than one of
408 * these are for architectures where we cannot disable
409 * the MMU and must instead generate an identity mapped
410 * page table for all of the memory.
412 * At worst this runs in O(N) of the image size.
414 struct list_head extra_pages;
419 INIT_LIST_HEAD(&extra_pages);
421 /* Loop while I can allocate a page and the page allocated
422 * is a destination page.
425 unsigned long pfn, epfn, addr, eaddr;
427 pages = kimage_alloc_pages(GFP_KERNEL, order);
430 pfn = page_to_pfn(pages);
432 addr = pfn << PAGE_SHIFT;
433 eaddr = epfn << PAGE_SHIFT;
434 if ((epfn >= (KEXEC_CONTROL_MEMORY_LIMIT >> PAGE_SHIFT)) ||
435 kimage_is_destination_range(image, addr, eaddr)) {
436 list_add(&pages->lru, &extra_pages);
442 /* Remember the allocated page... */
443 list_add(&pages->lru, &image->control_pages);
445 /* Because the page is already in it's destination
446 * location we will never allocate another page at
447 * that address. Therefore kimage_alloc_pages
448 * will not return it (again) and we don't need
449 * to give it an entry in image->segment[].
452 /* Deal with the destination pages I have inadvertently allocated.
454 * Ideally I would convert multi-page allocations into single
455 * page allocations, and add everyting to image->dest_pages.
457 * For now it is simpler to just free the pages.
459 kimage_free_page_list(&extra_pages);
464 static struct page *kimage_alloc_crash_control_pages(struct kimage *image,
467 /* Control pages are special, they are the intermediaries
468 * that are needed while we copy the rest of the pages
469 * to their final resting place. As such they must
470 * not conflict with either the destination addresses
471 * or memory the kernel is already using.
473 * Control pages are also the only pags we must allocate
474 * when loading a crash kernel. All of the other pages
475 * are specified by the segments and we just memcpy
476 * into them directly.
478 * The only case where we really need more than one of
479 * these are for architectures where we cannot disable
480 * the MMU and must instead generate an identity mapped
481 * page table for all of the memory.
483 * Given the low demand this implements a very simple
484 * allocator that finds the first hole of the appropriate
485 * size in the reserved memory region, and allocates all
486 * of the memory up to and including the hole.
488 unsigned long hole_start, hole_end, size;
492 size = (1 << order) << PAGE_SHIFT;
493 hole_start = (image->control_page + (size - 1)) & ~(size - 1);
494 hole_end = hole_start + size - 1;
495 while (hole_end <= crashk_res.end) {
498 if (hole_end > KEXEC_CONTROL_MEMORY_LIMIT)
500 if (hole_end > crashk_res.end)
502 /* See if I overlap any of the segments */
503 for (i = 0; i < image->nr_segments; i++) {
504 unsigned long mstart, mend;
506 mstart = image->segment[i].mem;
507 mend = mstart + image->segment[i].memsz - 1;
508 if ((hole_end >= mstart) && (hole_start <= mend)) {
509 /* Advance the hole to the end of the segment */
510 hole_start = (mend + (size - 1)) & ~(size - 1);
511 hole_end = hole_start + size - 1;
515 /* If I don't overlap any segments I have found my hole! */
516 if (i == image->nr_segments) {
517 pages = pfn_to_page(hole_start >> PAGE_SHIFT);
522 image->control_page = hole_end;
528 struct page *kimage_alloc_control_pages(struct kimage *image,
531 struct page *pages = NULL;
533 switch (image->type) {
534 case KEXEC_TYPE_DEFAULT:
535 pages = kimage_alloc_normal_control_pages(image, order);
537 case KEXEC_TYPE_CRASH:
538 pages = kimage_alloc_crash_control_pages(image, order);
545 static int kimage_add_entry(struct kimage *image, kimage_entry_t entry)
547 if (*image->entry != 0)
550 if (image->entry == image->last_entry) {
551 kimage_entry_t *ind_page;
554 page = kimage_alloc_page(image, GFP_KERNEL, KIMAGE_NO_DEST);
558 ind_page = page_address(page);
559 *image->entry = virt_to_phys(ind_page) | IND_INDIRECTION;
560 image->entry = ind_page;
561 image->last_entry = ind_page +
562 ((PAGE_SIZE/sizeof(kimage_entry_t)) - 1);
564 *image->entry = entry;
571 static int kimage_set_destination(struct kimage *image,
572 unsigned long destination)
576 destination &= PAGE_MASK;
577 result = kimage_add_entry(image, destination | IND_DESTINATION);
579 image->destination = destination;
585 static int kimage_add_page(struct kimage *image, unsigned long page)
590 result = kimage_add_entry(image, page | IND_SOURCE);
592 image->destination += PAGE_SIZE;
598 static void kimage_free_extra_pages(struct kimage *image)
600 /* Walk through and free any extra destination pages I may have */
601 kimage_free_page_list(&image->dest_pages);
603 /* Walk through and free any unuseable pages I have cached */
604 kimage_free_page_list(&image->unuseable_pages);
607 static void kimage_terminate(struct kimage *image)
609 if (*image->entry != 0)
612 *image->entry = IND_DONE;
615 #define for_each_kimage_entry(image, ptr, entry) \
616 for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE); \
617 ptr = (entry & IND_INDIRECTION)? \
618 phys_to_virt((entry & PAGE_MASK)): ptr +1)
620 static void kimage_free_entry(kimage_entry_t entry)
624 page = pfn_to_page(entry >> PAGE_SHIFT);
625 kimage_free_pages(page);
628 static void kimage_free(struct kimage *image)
630 kimage_entry_t *ptr, entry;
631 kimage_entry_t ind = 0;
636 kimage_free_extra_pages(image);
637 for_each_kimage_entry(image, ptr, entry) {
638 if (entry & IND_INDIRECTION) {
639 /* Free the previous indirection page */
640 if (ind & IND_INDIRECTION)
641 kimage_free_entry(ind);
642 /* Save this indirection page until we are
647 else if (entry & IND_SOURCE)
648 kimage_free_entry(entry);
650 /* Free the final indirection page */
651 if (ind & IND_INDIRECTION)
652 kimage_free_entry(ind);
654 /* Handle any machine specific cleanup */
655 machine_kexec_cleanup(image);
657 /* Free the kexec control pages... */
658 kimage_free_page_list(&image->control_pages);
662 static kimage_entry_t *kimage_dst_used(struct kimage *image,
665 kimage_entry_t *ptr, entry;
666 unsigned long destination = 0;
668 for_each_kimage_entry(image, ptr, entry) {
669 if (entry & IND_DESTINATION)
670 destination = entry & PAGE_MASK;
671 else if (entry & IND_SOURCE) {
672 if (page == destination)
674 destination += PAGE_SIZE;
681 static struct page *kimage_alloc_page(struct kimage *image,
683 unsigned long destination)
686 * Here we implement safeguards to ensure that a source page
687 * is not copied to its destination page before the data on
688 * the destination page is no longer useful.
690 * To do this we maintain the invariant that a source page is
691 * either its own destination page, or it is not a
692 * destination page at all.
694 * That is slightly stronger than required, but the proof
695 * that no problems will not occur is trivial, and the
696 * implementation is simply to verify.
698 * When allocating all pages normally this algorithm will run
699 * in O(N) time, but in the worst case it will run in O(N^2)
700 * time. If the runtime is a problem the data structures can
707 * Walk through the list of destination pages, and see if I
710 list_for_each_entry(page, &image->dest_pages, lru) {
711 addr = page_to_pfn(page) << PAGE_SHIFT;
712 if (addr == destination) {
713 list_del(&page->lru);
721 /* Allocate a page, if we run out of memory give up */
722 page = kimage_alloc_pages(gfp_mask, 0);
725 /* If the page cannot be used file it away */
726 if (page_to_pfn(page) >
727 (KEXEC_SOURCE_MEMORY_LIMIT >> PAGE_SHIFT)) {
728 list_add(&page->lru, &image->unuseable_pages);
731 addr = page_to_pfn(page) << PAGE_SHIFT;
733 /* If it is the destination page we want use it */
734 if (addr == destination)
737 /* If the page is not a destination page use it */
738 if (!kimage_is_destination_range(image, addr,
743 * I know that the page is someones destination page.
744 * See if there is already a source page for this
745 * destination page. And if so swap the source pages.
747 old = kimage_dst_used(image, addr);
750 unsigned long old_addr;
751 struct page *old_page;
753 old_addr = *old & PAGE_MASK;
754 old_page = pfn_to_page(old_addr >> PAGE_SHIFT);
755 copy_highpage(page, old_page);
756 *old = addr | (*old & ~PAGE_MASK);
758 /* The old page I have found cannot be a
759 * destination page, so return it if it's
760 * gfp_flags honor the ones passed in.
762 if (!(gfp_mask & __GFP_HIGHMEM) &&
763 PageHighMem(old_page)) {
764 kimage_free_pages(old_page);
772 /* Place the page on the destination list I
775 list_add(&page->lru, &image->dest_pages);
782 static int kimage_load_normal_segment(struct kimage *image,
783 struct kexec_segment *segment)
786 unsigned long ubytes, mbytes;
788 unsigned char __user *buf;
792 ubytes = segment->bufsz;
793 mbytes = segment->memsz;
794 maddr = segment->mem;
796 result = kimage_set_destination(image, maddr);
803 size_t uchunk, mchunk;
805 page = kimage_alloc_page(image, GFP_HIGHUSER, maddr);
810 result = kimage_add_page(image, page_to_pfn(page)
816 /* Start with a clear page */
817 memset(ptr, 0, PAGE_SIZE);
818 ptr += maddr & ~PAGE_MASK;
819 mchunk = PAGE_SIZE - (maddr & ~PAGE_MASK);
827 result = copy_from_user(ptr, buf, uchunk);
830 result = (result < 0) ? result : -EIO;
842 static int kimage_load_crash_segment(struct kimage *image,
843 struct kexec_segment *segment)
845 /* For crash dumps kernels we simply copy the data from
846 * user space to it's destination.
847 * We do things a page at a time for the sake of kmap.
850 unsigned long ubytes, mbytes;
852 unsigned char __user *buf;
856 ubytes = segment->bufsz;
857 mbytes = segment->memsz;
858 maddr = segment->mem;
862 size_t uchunk, mchunk;
864 page = pfn_to_page(maddr >> PAGE_SHIFT);
870 ptr += maddr & ~PAGE_MASK;
871 mchunk = PAGE_SIZE - (maddr & ~PAGE_MASK);
876 if (uchunk > ubytes) {
878 /* Zero the trailing part of the page */
879 memset(ptr + uchunk, 0, mchunk - uchunk);
881 result = copy_from_user(ptr, buf, uchunk);
882 kexec_flush_icache_page(page);
885 result = (result < 0) ? result : -EIO;
897 static int kimage_load_segment(struct kimage *image,
898 struct kexec_segment *segment)
900 int result = -ENOMEM;
902 switch (image->type) {
903 case KEXEC_TYPE_DEFAULT:
904 result = kimage_load_normal_segment(image, segment);
906 case KEXEC_TYPE_CRASH:
907 result = kimage_load_crash_segment(image, segment);
915 * Exec Kernel system call: for obvious reasons only root may call it.
917 * This call breaks up into three pieces.
918 * - A generic part which loads the new kernel from the current
919 * address space, and very carefully places the data in the
922 * - A generic part that interacts with the kernel and tells all of
923 * the devices to shut down. Preventing on-going dmas, and placing
924 * the devices in a consistent state so a later kernel can
927 * - A machine specific part that includes the syscall number
928 * and the copies the image to it's final destination. And
929 * jumps into the image at entry.
931 * kexec does not sync, or unmount filesystems so if you need
932 * that to happen you need to do that yourself.
934 struct kimage *kexec_image;
935 struct kimage *kexec_crash_image;
937 static DEFINE_MUTEX(kexec_mutex);
939 SYSCALL_DEFINE4(kexec_load, unsigned long, entry, unsigned long, nr_segments,
940 struct kexec_segment __user *, segments, unsigned long, flags)
942 struct kimage **dest_image, *image;
945 /* We only trust the superuser with rebooting the system. */
946 if (!capable(CAP_SYS_BOOT))
950 * Verify we have a legal set of flags
951 * This leaves us room for future extensions.
953 if ((flags & KEXEC_FLAGS) != (flags & ~KEXEC_ARCH_MASK))
956 /* Verify we are on the appropriate architecture */
957 if (((flags & KEXEC_ARCH_MASK) != KEXEC_ARCH) &&
958 ((flags & KEXEC_ARCH_MASK) != KEXEC_ARCH_DEFAULT))
961 /* Put an artificial cap on the number
962 * of segments passed to kexec_load.
964 if (nr_segments > KEXEC_SEGMENT_MAX)
970 /* Because we write directly to the reserved memory
971 * region when loading crash kernels we need a mutex here to
972 * prevent multiple crash kernels from attempting to load
973 * simultaneously, and to prevent a crash kernel from loading
974 * over the top of a in use crash kernel.
976 * KISS: always take the mutex.
978 if (!mutex_trylock(&kexec_mutex))
981 dest_image = &kexec_image;
982 if (flags & KEXEC_ON_CRASH)
983 dest_image = &kexec_crash_image;
984 if (nr_segments > 0) {
987 /* Loading another kernel to reboot into */
988 if ((flags & KEXEC_ON_CRASH) == 0)
989 result = kimage_normal_alloc(&image, entry,
990 nr_segments, segments);
991 /* Loading another kernel to switch to if this one crashes */
992 else if (flags & KEXEC_ON_CRASH) {
993 /* Free any current crash dump kernel before
996 kimage_free(xchg(&kexec_crash_image, NULL));
997 result = kimage_crash_alloc(&image, entry,
998 nr_segments, segments);
1003 if (flags & KEXEC_PRESERVE_CONTEXT)
1004 image->preserve_context = 1;
1005 result = machine_kexec_prepare(image);
1009 for (i = 0; i < nr_segments; i++) {
1010 result = kimage_load_segment(image, &image->segment[i]);
1014 kimage_terminate(image);
1016 /* Install the new kernel, and Uninstall the old */
1017 image = xchg(dest_image, image);
1020 mutex_unlock(&kexec_mutex);
1026 #ifdef CONFIG_COMPAT
1027 asmlinkage long compat_sys_kexec_load(unsigned long entry,
1028 unsigned long nr_segments,
1029 struct compat_kexec_segment __user *segments,
1030 unsigned long flags)
1032 struct compat_kexec_segment in;
1033 struct kexec_segment out, __user *ksegments;
1034 unsigned long i, result;
1036 /* Don't allow clients that don't understand the native
1037 * architecture to do anything.
1039 if ((flags & KEXEC_ARCH_MASK) == KEXEC_ARCH_DEFAULT)
1042 if (nr_segments > KEXEC_SEGMENT_MAX)
1045 ksegments = compat_alloc_user_space(nr_segments * sizeof(out));
1046 for (i=0; i < nr_segments; i++) {
1047 result = copy_from_user(&in, &segments[i], sizeof(in));
1051 out.buf = compat_ptr(in.buf);
1052 out.bufsz = in.bufsz;
1054 out.memsz = in.memsz;
1056 result = copy_to_user(&ksegments[i], &out, sizeof(out));
1061 return sys_kexec_load(entry, nr_segments, ksegments, flags);
1065 void crash_kexec(struct pt_regs *regs)
1067 /* Take the kexec_mutex here to prevent sys_kexec_load
1068 * running on one cpu from replacing the crash kernel
1069 * we are using after a panic on a different cpu.
1071 * If the crash kernel was not located in a fixed area
1072 * of memory the xchg(&kexec_crash_image) would be
1073 * sufficient. But since I reuse the memory...
1075 if (mutex_trylock(&kexec_mutex)) {
1076 if (kexec_crash_image) {
1077 struct pt_regs fixed_regs;
1079 kmsg_dump(KMSG_DUMP_KEXEC);
1081 crash_setup_regs(&fixed_regs, regs);
1082 crash_save_vmcoreinfo();
1083 machine_crash_shutdown(&fixed_regs);
1084 machine_kexec(kexec_crash_image);
1086 mutex_unlock(&kexec_mutex);
1090 size_t crash_get_memory_size(void)
1093 mutex_lock(&kexec_mutex);
1094 size = crashk_res.end - crashk_res.start + 1;
1095 mutex_unlock(&kexec_mutex);
1099 static void free_reserved_phys_range(unsigned long begin, unsigned long end)
1103 for (addr = begin; addr < end; addr += PAGE_SIZE) {
1104 ClearPageReserved(pfn_to_page(addr >> PAGE_SHIFT));
1105 init_page_count(pfn_to_page(addr >> PAGE_SHIFT));
1106 free_page((unsigned long)__va(addr));
1111 int crash_shrink_memory(unsigned long new_size)
1114 unsigned long start, end;
1116 mutex_lock(&kexec_mutex);
1118 if (kexec_crash_image) {
1122 start = crashk_res.start;
1123 end = crashk_res.end;
1125 if (new_size >= end - start + 1) {
1127 if (new_size == end - start + 1)
1132 start = roundup(start, PAGE_SIZE);
1133 end = roundup(start + new_size, PAGE_SIZE);
1135 free_reserved_phys_range(end, crashk_res.end);
1138 release_resource(&crashk_res);
1139 crashk_res.end = end - 1;
1142 mutex_unlock(&kexec_mutex);
1146 static u32 *append_elf_note(u32 *buf, char *name, unsigned type, void *data,
1149 struct elf_note note;
1151 note.n_namesz = strlen(name) + 1;
1152 note.n_descsz = data_len;
1154 memcpy(buf, ¬e, sizeof(note));
1155 buf += (sizeof(note) + 3)/4;
1156 memcpy(buf, name, note.n_namesz);
1157 buf += (note.n_namesz + 3)/4;
1158 memcpy(buf, data, note.n_descsz);
1159 buf += (note.n_descsz + 3)/4;
1164 static void final_note(u32 *buf)
1166 struct elf_note note;
1171 memcpy(buf, ¬e, sizeof(note));
1174 void crash_save_cpu(struct pt_regs *regs, int cpu)
1176 struct elf_prstatus prstatus;
1179 if ((cpu < 0) || (cpu >= nr_cpu_ids))
1182 /* Using ELF notes here is opportunistic.
1183 * I need a well defined structure format
1184 * for the data I pass, and I need tags
1185 * on the data to indicate what information I have
1186 * squirrelled away. ELF notes happen to provide
1187 * all of that, so there is no need to invent something new.
1189 buf = (u32*)per_cpu_ptr(crash_notes, cpu);
1192 memset(&prstatus, 0, sizeof(prstatus));
1193 prstatus.pr_pid = current->pid;
1194 elf_core_copy_kernel_regs(&prstatus.pr_reg, regs);
1195 buf = append_elf_note(buf, KEXEC_CORE_NOTE_NAME, NT_PRSTATUS,
1196 &prstatus, sizeof(prstatus));
1200 static int __init crash_notes_memory_init(void)
1202 /* Allocate memory for saving cpu registers. */
1203 crash_notes = alloc_percpu(note_buf_t);
1205 printk("Kexec: Memory allocation for saving cpu register"
1206 " states failed\n");
1211 module_init(crash_notes_memory_init)
1215 * parsing the "crashkernel" commandline
1217 * this code is intended to be called from architecture specific code
1222 * This function parses command lines in the format
1224 * crashkernel=ramsize-range:size[,...][@offset]
1226 * The function returns 0 on success and -EINVAL on failure.
1228 static int __init parse_crashkernel_mem(char *cmdline,
1229 unsigned long long system_ram,
1230 unsigned long long *crash_size,
1231 unsigned long long *crash_base)
1233 char *cur = cmdline, *tmp;
1235 /* for each entry of the comma-separated list */
1237 unsigned long long start, end = ULLONG_MAX, size;
1239 /* get the start of the range */
1240 start = memparse(cur, &tmp);
1242 pr_warning("crashkernel: Memory value expected\n");
1247 pr_warning("crashkernel: '-' expected\n");
1252 /* if no ':' is here, than we read the end */
1254 end = memparse(cur, &tmp);
1256 pr_warning("crashkernel: Memory "
1257 "value expected\n");
1262 pr_warning("crashkernel: end <= start\n");
1268 pr_warning("crashkernel: ':' expected\n");
1273 size = memparse(cur, &tmp);
1275 pr_warning("Memory value expected\n");
1279 if (size >= system_ram) {
1280 pr_warning("crashkernel: invalid size\n");
1285 if (system_ram >= start && system_ram < end) {
1289 } while (*cur++ == ',');
1291 if (*crash_size > 0) {
1292 while (*cur && *cur != ' ' && *cur != '@')
1296 *crash_base = memparse(cur, &tmp);
1298 pr_warning("Memory value expected "
1309 * That function parses "simple" (old) crashkernel command lines like
1311 * crashkernel=size[@offset]
1313 * It returns 0 on success and -EINVAL on failure.
1315 static int __init parse_crashkernel_simple(char *cmdline,
1316 unsigned long long *crash_size,
1317 unsigned long long *crash_base)
1319 char *cur = cmdline;
1321 *crash_size = memparse(cmdline, &cur);
1322 if (cmdline == cur) {
1323 pr_warning("crashkernel: memory value expected\n");
1328 *crash_base = memparse(cur+1, &cur);
1334 * That function is the entry point for command line parsing and should be
1335 * called from the arch-specific code.
1337 int __init parse_crashkernel(char *cmdline,
1338 unsigned long long system_ram,
1339 unsigned long long *crash_size,
1340 unsigned long long *crash_base)
1342 char *p = cmdline, *ck_cmdline = NULL;
1343 char *first_colon, *first_space;
1345 BUG_ON(!crash_size || !crash_base);
1349 /* find crashkernel and use the last one if there are more */
1350 p = strstr(p, "crashkernel=");
1353 p = strstr(p+1, "crashkernel=");
1359 ck_cmdline += 12; /* strlen("crashkernel=") */
1362 * if the commandline contains a ':', then that's the extended
1363 * syntax -- if not, it must be the classic syntax
1365 first_colon = strchr(ck_cmdline, ':');
1366 first_space = strchr(ck_cmdline, ' ');
1367 if (first_colon && (!first_space || first_colon < first_space))
1368 return parse_crashkernel_mem(ck_cmdline, system_ram,
1369 crash_size, crash_base);
1371 return parse_crashkernel_simple(ck_cmdline, crash_size,
1379 void crash_save_vmcoreinfo(void)
1383 if (!vmcoreinfo_size)
1386 vmcoreinfo_append_str("CRASHTIME=%ld", get_seconds());
1388 buf = (u32 *)vmcoreinfo_note;
1390 buf = append_elf_note(buf, VMCOREINFO_NOTE_NAME, 0, vmcoreinfo_data,
1396 void vmcoreinfo_append_str(const char *fmt, ...)
1402 va_start(args, fmt);
1403 r = vsnprintf(buf, sizeof(buf), fmt, args);
1406 if (r + vmcoreinfo_size > vmcoreinfo_max_size)
1407 r = vmcoreinfo_max_size - vmcoreinfo_size;
1409 memcpy(&vmcoreinfo_data[vmcoreinfo_size], buf, r);
1411 vmcoreinfo_size += r;
1415 * provide an empty default implementation here -- architecture
1416 * code may override this
1418 void __attribute__ ((weak)) arch_crash_save_vmcoreinfo(void)
1421 unsigned long __attribute__ ((weak)) paddr_vmcoreinfo_note(void)
1423 return __pa((unsigned long)(char *)&vmcoreinfo_note);
1426 static int __init crash_save_vmcoreinfo_init(void)
1428 VMCOREINFO_OSRELEASE(init_uts_ns.name.release);
1429 VMCOREINFO_PAGESIZE(PAGE_SIZE);
1431 VMCOREINFO_SYMBOL(init_uts_ns);
1432 VMCOREINFO_SYMBOL(node_online_map);
1433 VMCOREINFO_SYMBOL(swapper_pg_dir);
1434 VMCOREINFO_SYMBOL(_stext);
1435 VMCOREINFO_SYMBOL(vmlist);
1437 #ifndef CONFIG_NEED_MULTIPLE_NODES
1438 VMCOREINFO_SYMBOL(mem_map);
1439 VMCOREINFO_SYMBOL(contig_page_data);
1441 #ifdef CONFIG_SPARSEMEM
1442 VMCOREINFO_SYMBOL(mem_section);
1443 VMCOREINFO_LENGTH(mem_section, NR_SECTION_ROOTS);
1444 VMCOREINFO_STRUCT_SIZE(mem_section);
1445 VMCOREINFO_OFFSET(mem_section, section_mem_map);
1447 VMCOREINFO_STRUCT_SIZE(page);
1448 VMCOREINFO_STRUCT_SIZE(pglist_data);
1449 VMCOREINFO_STRUCT_SIZE(zone);
1450 VMCOREINFO_STRUCT_SIZE(free_area);
1451 VMCOREINFO_STRUCT_SIZE(list_head);
1452 VMCOREINFO_SIZE(nodemask_t);
1453 VMCOREINFO_OFFSET(page, flags);
1454 VMCOREINFO_OFFSET(page, _count);
1455 VMCOREINFO_OFFSET(page, mapping);
1456 VMCOREINFO_OFFSET(page, lru);
1457 VMCOREINFO_OFFSET(pglist_data, node_zones);
1458 VMCOREINFO_OFFSET(pglist_data, nr_zones);
1459 #ifdef CONFIG_FLAT_NODE_MEM_MAP
1460 VMCOREINFO_OFFSET(pglist_data, node_mem_map);
1462 VMCOREINFO_OFFSET(pglist_data, node_start_pfn);
1463 VMCOREINFO_OFFSET(pglist_data, node_spanned_pages);
1464 VMCOREINFO_OFFSET(pglist_data, node_id);
1465 VMCOREINFO_OFFSET(zone, free_area);
1466 VMCOREINFO_OFFSET(zone, vm_stat);
1467 VMCOREINFO_OFFSET(zone, spanned_pages);
1468 VMCOREINFO_OFFSET(free_area, free_list);
1469 VMCOREINFO_OFFSET(list_head, next);
1470 VMCOREINFO_OFFSET(list_head, prev);
1471 VMCOREINFO_OFFSET(vm_struct, addr);
1472 VMCOREINFO_LENGTH(zone.free_area, MAX_ORDER);
1473 log_buf_kexec_setup();
1474 VMCOREINFO_LENGTH(free_area.free_list, MIGRATE_TYPES);
1475 VMCOREINFO_NUMBER(NR_FREE_PAGES);
1476 VMCOREINFO_NUMBER(PG_lru);
1477 VMCOREINFO_NUMBER(PG_private);
1478 VMCOREINFO_NUMBER(PG_swapcache);
1480 arch_crash_save_vmcoreinfo();
1485 module_init(crash_save_vmcoreinfo_init)
1488 * Move into place and start executing a preloaded standalone
1489 * executable. If nothing was preloaded return an error.
1491 int kernel_kexec(void)
1495 if (!mutex_trylock(&kexec_mutex))
1502 #ifdef CONFIG_KEXEC_JUMP
1503 if (kexec_image->preserve_context) {
1504 mutex_lock(&pm_mutex);
1505 pm_prepare_console();
1506 error = freeze_processes();
1509 goto Restore_console;
1512 error = dpm_suspend_start(PMSG_FREEZE);
1514 goto Resume_console;
1515 /* At this point, dpm_suspend_start() has been called,
1516 * but *not* dpm_suspend_noirq(). We *must* call
1517 * dpm_suspend_noirq() now. Otherwise, drivers for
1518 * some devices (e.g. interrupt controllers) become
1519 * desynchronized with the actual state of the
1520 * hardware at resume time, and evil weirdness ensues.
1522 error = dpm_suspend_noirq(PMSG_FREEZE);
1524 goto Resume_devices;
1525 error = disable_nonboot_cpus();
1528 local_irq_disable();
1529 /* Suspend system devices */
1530 error = sysdev_suspend(PMSG_FREEZE);
1536 kernel_restart_prepare(NULL);
1537 printk(KERN_EMERG "Starting new kernel\n");
1541 machine_kexec(kexec_image);
1543 #ifdef CONFIG_KEXEC_JUMP
1544 if (kexec_image->preserve_context) {
1549 enable_nonboot_cpus();
1550 dpm_resume_noirq(PMSG_RESTORE);
1552 dpm_resume_end(PMSG_RESTORE);
1557 pm_restore_console();
1558 mutex_unlock(&pm_mutex);
1563 mutex_unlock(&kexec_mutex);