2 * linux/kernel/power/snapshot.c
4 * This file provides system snapshot/restore functionality for swsusp.
6 * Copyright (C) 1998-2005 Pavel Machek <pavel@suse.cz>
7 * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl>
9 * This file is released under the GPLv2.
13 #include <linux/version.h>
14 #include <linux/module.h>
16 #include <linux/suspend.h>
17 #include <linux/smp_lock.h>
18 #include <linux/delay.h>
19 #include <linux/bitops.h>
20 #include <linux/spinlock.h>
21 #include <linux/kernel.h>
23 #include <linux/device.h>
24 #include <linux/bootmem.h>
25 #include <linux/syscalls.h>
26 #include <linux/console.h>
27 #include <linux/highmem.h>
29 #include <asm/uaccess.h>
30 #include <asm/mmu_context.h>
31 #include <asm/pgtable.h>
32 #include <asm/tlbflush.h>
37 /* List of PBEs needed for restoring the pages that were allocated before
38 * the suspend and included in the suspend image, but have also been
39 * allocated by the "resume" kernel, so their contents cannot be written
40 * directly to their "original" page frames.
42 struct pbe *restore_pblist;
44 /* Pointer to an auxiliary buffer (1 page) */
48 * @safe_needed - on resume, for storing the PBE list and the image,
49 * we can only use memory pages that do not conflict with the pages
50 * used before suspend. The unsafe pages have PageNosaveFree set
51 * and we count them using unsafe_pages.
53 * Each allocated image page is marked as PageNosave and PageNosaveFree
54 * so that swsusp_free() can release it.
59 #define PG_UNSAFE_CLEAR 1
60 #define PG_UNSAFE_KEEP 0
62 static unsigned int allocated_unsafe_pages;
64 static void *get_image_page(gfp_t gfp_mask, int safe_needed)
68 res = (void *)get_zeroed_page(gfp_mask);
70 while (res && swsusp_page_is_free(virt_to_page(res))) {
71 /* The page is unsafe, mark it for swsusp_free() */
72 swsusp_set_page_forbidden(virt_to_page(res));
73 allocated_unsafe_pages++;
74 res = (void *)get_zeroed_page(gfp_mask);
77 swsusp_set_page_forbidden(virt_to_page(res));
78 swsusp_set_page_free(virt_to_page(res));
83 unsigned long get_safe_page(gfp_t gfp_mask)
85 return (unsigned long)get_image_page(gfp_mask, PG_SAFE);
88 static struct page *alloc_image_page(gfp_t gfp_mask)
92 page = alloc_page(gfp_mask);
94 swsusp_set_page_forbidden(page);
95 swsusp_set_page_free(page);
101 * free_image_page - free page represented by @addr, allocated with
102 * get_image_page (page flags set by it must be cleared)
105 static inline void free_image_page(void *addr, int clear_nosave_free)
109 BUG_ON(!virt_addr_valid(addr));
111 page = virt_to_page(addr);
113 swsusp_unset_page_forbidden(page);
114 if (clear_nosave_free)
115 swsusp_unset_page_free(page);
120 /* struct linked_page is used to build chains of pages */
122 #define LINKED_PAGE_DATA_SIZE (PAGE_SIZE - sizeof(void *))
125 struct linked_page *next;
126 char data[LINKED_PAGE_DATA_SIZE];
127 } __attribute__((packed));
130 free_list_of_pages(struct linked_page *list, int clear_page_nosave)
133 struct linked_page *lp = list->next;
135 free_image_page(list, clear_page_nosave);
141 * struct chain_allocator is used for allocating small objects out of
142 * a linked list of pages called 'the chain'.
144 * The chain grows each time when there is no room for a new object in
145 * the current page. The allocated objects cannot be freed individually.
146 * It is only possible to free them all at once, by freeing the entire
149 * NOTE: The chain allocator may be inefficient if the allocated objects
150 * are not much smaller than PAGE_SIZE.
153 struct chain_allocator {
154 struct linked_page *chain; /* the chain */
155 unsigned int used_space; /* total size of objects allocated out
156 * of the current page
158 gfp_t gfp_mask; /* mask for allocating pages */
159 int safe_needed; /* if set, only "safe" pages are allocated */
163 chain_init(struct chain_allocator *ca, gfp_t gfp_mask, int safe_needed)
166 ca->used_space = LINKED_PAGE_DATA_SIZE;
167 ca->gfp_mask = gfp_mask;
168 ca->safe_needed = safe_needed;
171 static void *chain_alloc(struct chain_allocator *ca, unsigned int size)
175 if (LINKED_PAGE_DATA_SIZE - ca->used_space < size) {
176 struct linked_page *lp;
178 lp = get_image_page(ca->gfp_mask, ca->safe_needed);
182 lp->next = ca->chain;
186 ret = ca->chain->data + ca->used_space;
187 ca->used_space += size;
191 static void chain_free(struct chain_allocator *ca, int clear_page_nosave)
193 free_list_of_pages(ca->chain, clear_page_nosave);
194 memset(ca, 0, sizeof(struct chain_allocator));
198 * Data types related to memory bitmaps.
200 * Memory bitmap is a structure consiting of many linked lists of
201 * objects. The main list's elements are of type struct zone_bitmap
202 * and each of them corresonds to one zone. For each zone bitmap
203 * object there is a list of objects of type struct bm_block that
204 * represent each blocks of bit chunks in which information is
207 * struct memory_bitmap contains a pointer to the main list of zone
208 * bitmap objects, a struct bm_position used for browsing the bitmap,
209 * and a pointer to the list of pages used for allocating all of the
210 * zone bitmap objects and bitmap block objects.
212 * NOTE: It has to be possible to lay out the bitmap in memory
213 * using only allocations of order 0. Additionally, the bitmap is
214 * designed to work with arbitrary number of zones (this is over the
215 * top for now, but let's avoid making unnecessary assumptions ;-).
217 * struct zone_bitmap contains a pointer to a list of bitmap block
218 * objects and a pointer to the bitmap block object that has been
219 * most recently used for setting bits. Additionally, it contains the
220 * pfns that correspond to the start and end of the represented zone.
222 * struct bm_block contains a pointer to the memory page in which
223 * information is stored (in the form of a block of bit chunks
224 * of type unsigned long each). It also contains the pfns that
225 * correspond to the start and end of the represented memory area and
226 * the number of bit chunks in the block.
228 * NOTE: Memory bitmaps are used for two types of operations only:
229 * "set a bit" and "find the next bit set". Moreover, the searching
230 * is always carried out after all of the "set a bit" operations
234 #define BM_END_OF_MAP (~0UL)
236 #define BM_CHUNKS_PER_BLOCK (PAGE_SIZE / sizeof(long))
237 #define BM_BITS_PER_CHUNK (sizeof(long) << 3)
238 #define BM_BITS_PER_BLOCK (PAGE_SIZE << 3)
241 struct bm_block *next; /* next element of the list */
242 unsigned long start_pfn; /* pfn represented by the first bit */
243 unsigned long end_pfn; /* pfn represented by the last bit plus 1 */
244 unsigned int size; /* number of bit chunks */
245 unsigned long *data; /* chunks of bits representing pages */
249 struct zone_bitmap *next; /* next element of the list */
250 unsigned long start_pfn; /* minimal pfn in this zone */
251 unsigned long end_pfn; /* maximal pfn in this zone plus 1 */
252 struct bm_block *bm_blocks; /* list of bitmap blocks */
253 struct bm_block *cur_block; /* recently used bitmap block */
256 /* strcut bm_position is used for browsing memory bitmaps */
259 struct zone_bitmap *zone_bm;
260 struct bm_block *block;
265 struct memory_bitmap {
266 struct zone_bitmap *zone_bm_list; /* list of zone bitmaps */
267 struct linked_page *p_list; /* list of pages used to store zone
268 * bitmap objects and bitmap block
271 struct bm_position cur; /* most recently used bit position */
274 /* Functions that operate on memory bitmaps */
276 static inline void memory_bm_reset_chunk(struct memory_bitmap *bm)
282 static void memory_bm_position_reset(struct memory_bitmap *bm)
284 struct zone_bitmap *zone_bm;
286 zone_bm = bm->zone_bm_list;
287 bm->cur.zone_bm = zone_bm;
288 bm->cur.block = zone_bm->bm_blocks;
289 memory_bm_reset_chunk(bm);
292 static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free);
295 * create_bm_block_list - create a list of block bitmap objects
298 static inline struct bm_block *
299 create_bm_block_list(unsigned int nr_blocks, struct chain_allocator *ca)
301 struct bm_block *bblist = NULL;
303 while (nr_blocks-- > 0) {
306 bb = chain_alloc(ca, sizeof(struct bm_block));
317 * create_zone_bm_list - create a list of zone bitmap objects
320 static inline struct zone_bitmap *
321 create_zone_bm_list(unsigned int nr_zones, struct chain_allocator *ca)
323 struct zone_bitmap *zbmlist = NULL;
325 while (nr_zones-- > 0) {
326 struct zone_bitmap *zbm;
328 zbm = chain_alloc(ca, sizeof(struct zone_bitmap));
339 * memory_bm_create - allocate memory for a memory bitmap
343 memory_bm_create(struct memory_bitmap *bm, gfp_t gfp_mask, int safe_needed)
345 struct chain_allocator ca;
347 struct zone_bitmap *zone_bm;
351 chain_init(&ca, gfp_mask, safe_needed);
353 /* Compute the number of zones */
356 if (populated_zone(zone))
359 /* Allocate the list of zones bitmap objects */
360 zone_bm = create_zone_bm_list(nr, &ca);
361 bm->zone_bm_list = zone_bm;
363 chain_free(&ca, PG_UNSAFE_CLEAR);
367 /* Initialize the zone bitmap objects */
368 for_each_zone(zone) {
371 if (!populated_zone(zone))
374 zone_bm->start_pfn = zone->zone_start_pfn;
375 zone_bm->end_pfn = zone->zone_start_pfn + zone->spanned_pages;
376 /* Allocate the list of bitmap block objects */
377 nr = DIV_ROUND_UP(zone->spanned_pages, BM_BITS_PER_BLOCK);
378 bb = create_bm_block_list(nr, &ca);
379 zone_bm->bm_blocks = bb;
380 zone_bm->cur_block = bb;
384 nr = zone->spanned_pages;
385 pfn = zone->zone_start_pfn;
386 /* Initialize the bitmap block objects */
390 ptr = get_image_page(gfp_mask, safe_needed);
396 if (nr >= BM_BITS_PER_BLOCK) {
397 pfn += BM_BITS_PER_BLOCK;
398 bb->size = BM_CHUNKS_PER_BLOCK;
399 nr -= BM_BITS_PER_BLOCK;
401 /* This is executed only once in the loop */
403 bb->size = DIV_ROUND_UP(nr, BM_BITS_PER_CHUNK);
408 zone_bm = zone_bm->next;
410 bm->p_list = ca.chain;
411 memory_bm_position_reset(bm);
415 bm->p_list = ca.chain;
416 memory_bm_free(bm, PG_UNSAFE_CLEAR);
421 * memory_bm_free - free memory occupied by the memory bitmap @bm
424 static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free)
426 struct zone_bitmap *zone_bm;
428 /* Free the list of bit blocks for each zone_bitmap object */
429 zone_bm = bm->zone_bm_list;
433 bb = zone_bm->bm_blocks;
436 free_image_page(bb->data, clear_nosave_free);
439 zone_bm = zone_bm->next;
441 free_list_of_pages(bm->p_list, clear_nosave_free);
442 bm->zone_bm_list = NULL;
446 * memory_bm_set_bit - set the bit in the bitmap @bm that corresponds
447 * to given pfn. The cur_zone_bm member of @bm and the cur_block member
448 * of @bm->cur_zone_bm are updated.
450 * If the bit cannot be set, the function returns -EINVAL .
454 memory_bm_set_bit(struct memory_bitmap *bm, unsigned long pfn)
456 struct zone_bitmap *zone_bm;
459 /* Check if the pfn is from the current zone */
460 zone_bm = bm->cur.zone_bm;
461 if (pfn < zone_bm->start_pfn || pfn >= zone_bm->end_pfn) {
462 zone_bm = bm->zone_bm_list;
463 /* We don't assume that the zones are sorted by pfns */
464 while (pfn < zone_bm->start_pfn || pfn >= zone_bm->end_pfn) {
465 zone_bm = zone_bm->next;
466 if (unlikely(!zone_bm))
469 bm->cur.zone_bm = zone_bm;
471 /* Check if the pfn corresponds to the current bitmap block */
472 bb = zone_bm->cur_block;
473 if (pfn < bb->start_pfn)
474 bb = zone_bm->bm_blocks;
476 while (pfn >= bb->end_pfn) {
481 zone_bm->cur_block = bb;
482 pfn -= bb->start_pfn;
483 set_bit(pfn % BM_BITS_PER_CHUNK, bb->data + pfn / BM_BITS_PER_CHUNK);
487 /* Two auxiliary functions for memory_bm_next_pfn */
489 /* Find the first set bit in the given chunk, if there is one */
491 static inline int next_bit_in_chunk(int bit, unsigned long *chunk_p)
494 while (bit < BM_BITS_PER_CHUNK) {
495 if (test_bit(bit, chunk_p))
503 /* Find a chunk containing some bits set in given block of bits */
505 static inline int next_chunk_in_block(int n, struct bm_block *bb)
508 while (n < bb->size) {
518 * memory_bm_next_pfn - find the pfn that corresponds to the next set bit
519 * in the bitmap @bm. If the pfn cannot be found, BM_END_OF_MAP is
522 * It is required to run memory_bm_position_reset() before the first call to
526 static unsigned long memory_bm_next_pfn(struct memory_bitmap *bm)
528 struct zone_bitmap *zone_bm;
536 chunk = bm->cur.chunk;
539 bit = next_bit_in_chunk(bit, bb->data + chunk);
543 chunk = next_chunk_in_block(chunk, bb);
545 } while (chunk >= 0);
548 memory_bm_reset_chunk(bm);
550 zone_bm = bm->cur.zone_bm->next;
552 bm->cur.zone_bm = zone_bm;
553 bm->cur.block = zone_bm->bm_blocks;
554 memory_bm_reset_chunk(bm);
557 memory_bm_position_reset(bm);
558 return BM_END_OF_MAP;
561 bm->cur.chunk = chunk;
563 return bb->start_pfn + chunk * BM_BITS_PER_CHUNK + bit;
567 * snapshot_additional_pages - estimate the number of additional pages
568 * be needed for setting up the suspend image data structures for given
569 * zone (usually the returned value is greater than the exact number)
572 unsigned int snapshot_additional_pages(struct zone *zone)
576 res = DIV_ROUND_UP(zone->spanned_pages, BM_BITS_PER_BLOCK);
577 res += DIV_ROUND_UP(res * sizeof(struct bm_block), PAGE_SIZE);
581 #ifdef CONFIG_HIGHMEM
583 * count_free_highmem_pages - compute the total number of free highmem
584 * pages, system-wide.
587 static unsigned int count_free_highmem_pages(void)
590 unsigned int cnt = 0;
593 if (populated_zone(zone) && is_highmem(zone))
594 cnt += zone_page_state(zone, NR_FREE_PAGES);
600 * saveable_highmem_page - Determine whether a highmem page should be
601 * included in the suspend image.
603 * We should save the page if it isn't Nosave or NosaveFree, or Reserved,
604 * and it isn't a part of a free chunk of pages.
607 static struct page *saveable_highmem_page(unsigned long pfn)
614 page = pfn_to_page(pfn);
616 BUG_ON(!PageHighMem(page));
618 if (swsusp_page_is_forbidden(page) || swsusp_page_is_free(page) ||
626 * count_highmem_pages - compute the total number of saveable highmem
630 unsigned int count_highmem_pages(void)
635 for_each_zone(zone) {
636 unsigned long pfn, max_zone_pfn;
638 if (!is_highmem(zone))
641 mark_free_pages(zone);
642 max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
643 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
644 if (saveable_highmem_page(pfn))
650 static inline void *saveable_highmem_page(unsigned long pfn) { return NULL; }
651 static inline unsigned int count_highmem_pages(void) { return 0; }
652 #endif /* CONFIG_HIGHMEM */
655 * saveable - Determine whether a non-highmem page should be included in
658 * We should save the page if it isn't Nosave, and is not in the range
659 * of pages statically defined as 'unsaveable', and it isn't a part of
660 * a free chunk of pages.
663 static struct page *saveable_page(unsigned long pfn)
670 page = pfn_to_page(pfn);
672 BUG_ON(PageHighMem(page));
674 if (swsusp_page_is_forbidden(page) || swsusp_page_is_free(page))
677 if (PageReserved(page) && pfn_is_nosave(pfn))
684 * count_data_pages - compute the total number of saveable non-highmem
688 unsigned int count_data_pages(void)
691 unsigned long pfn, max_zone_pfn;
694 for_each_zone(zone) {
695 if (is_highmem(zone))
698 mark_free_pages(zone);
699 max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
700 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
701 if(saveable_page(pfn))
707 /* This is needed, because copy_page and memcpy are not usable for copying
710 static inline void do_copy_page(long *dst, long *src)
714 for (n = PAGE_SIZE / sizeof(long); n; n--)
718 #ifdef CONFIG_HIGHMEM
719 static inline struct page *
720 page_is_saveable(struct zone *zone, unsigned long pfn)
722 return is_highmem(zone) ?
723 saveable_highmem_page(pfn) : saveable_page(pfn);
727 copy_data_page(unsigned long dst_pfn, unsigned long src_pfn)
729 struct page *s_page, *d_page;
732 s_page = pfn_to_page(src_pfn);
733 d_page = pfn_to_page(dst_pfn);
734 if (PageHighMem(s_page)) {
735 src = kmap_atomic(s_page, KM_USER0);
736 dst = kmap_atomic(d_page, KM_USER1);
737 do_copy_page(dst, src);
738 kunmap_atomic(src, KM_USER0);
739 kunmap_atomic(dst, KM_USER1);
741 src = page_address(s_page);
742 if (PageHighMem(d_page)) {
743 /* Page pointed to by src may contain some kernel
744 * data modified by kmap_atomic()
746 do_copy_page(buffer, src);
747 dst = kmap_atomic(pfn_to_page(dst_pfn), KM_USER0);
748 memcpy(dst, buffer, PAGE_SIZE);
749 kunmap_atomic(dst, KM_USER0);
751 dst = page_address(d_page);
752 do_copy_page(dst, src);
757 #define page_is_saveable(zone, pfn) saveable_page(pfn)
760 copy_data_page(unsigned long dst_pfn, unsigned long src_pfn)
762 do_copy_page(page_address(pfn_to_page(dst_pfn)),
763 page_address(pfn_to_page(src_pfn)));
765 #endif /* CONFIG_HIGHMEM */
768 copy_data_pages(struct memory_bitmap *copy_bm, struct memory_bitmap *orig_bm)
773 for_each_zone(zone) {
774 unsigned long max_zone_pfn;
776 mark_free_pages(zone);
777 max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
778 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
779 if (page_is_saveable(zone, pfn))
780 memory_bm_set_bit(orig_bm, pfn);
782 memory_bm_position_reset(orig_bm);
783 memory_bm_position_reset(copy_bm);
785 pfn = memory_bm_next_pfn(orig_bm);
786 if (likely(pfn != BM_END_OF_MAP))
787 copy_data_page(memory_bm_next_pfn(copy_bm), pfn);
788 } while (pfn != BM_END_OF_MAP);
791 /* Total number of image pages */
792 static unsigned int nr_copy_pages;
793 /* Number of pages needed for saving the original pfns of the image pages */
794 static unsigned int nr_meta_pages;
797 * swsusp_free - free pages allocated for the suspend.
799 * Suspend pages are alocated before the atomic copy is made, so we
800 * need to release them after the resume.
803 void swsusp_free(void)
806 unsigned long pfn, max_zone_pfn;
808 for_each_zone(zone) {
809 max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
810 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
811 if (pfn_valid(pfn)) {
812 struct page *page = pfn_to_page(pfn);
814 if (swsusp_page_is_forbidden(page) &&
815 swsusp_page_is_free(page)) {
816 swsusp_unset_page_forbidden(page);
817 swsusp_unset_page_free(page);
824 restore_pblist = NULL;
828 #ifdef CONFIG_HIGHMEM
830 * count_pages_for_highmem - compute the number of non-highmem pages
831 * that will be necessary for creating copies of highmem pages.
834 static unsigned int count_pages_for_highmem(unsigned int nr_highmem)
836 unsigned int free_highmem = count_free_highmem_pages();
838 if (free_highmem >= nr_highmem)
841 nr_highmem -= free_highmem;
847 count_pages_for_highmem(unsigned int nr_highmem) { return 0; }
848 #endif /* CONFIG_HIGHMEM */
851 * enough_free_mem - Make sure we have enough free memory for the
855 static int enough_free_mem(unsigned int nr_pages, unsigned int nr_highmem)
858 unsigned int free = 0, meta = 0;
860 for_each_zone(zone) {
861 meta += snapshot_additional_pages(zone);
862 if (!is_highmem(zone))
863 free += zone_page_state(zone, NR_FREE_PAGES);
866 nr_pages += count_pages_for_highmem(nr_highmem);
867 pr_debug("swsusp: Normal pages needed: %u + %u + %u, available pages: %u\n",
868 nr_pages, PAGES_FOR_IO, meta, free);
870 return free > nr_pages + PAGES_FOR_IO + meta;
873 #ifdef CONFIG_HIGHMEM
875 * get_highmem_buffer - if there are some highmem pages in the suspend
876 * image, we may need the buffer to copy them and/or load their data.
879 static inline int get_highmem_buffer(int safe_needed)
881 buffer = get_image_page(GFP_ATOMIC | __GFP_COLD, safe_needed);
882 return buffer ? 0 : -ENOMEM;
886 * alloc_highmem_image_pages - allocate some highmem pages for the image.
887 * Try to allocate as many pages as needed, but if the number of free
888 * highmem pages is lesser than that, allocate them all.
891 static inline unsigned int
892 alloc_highmem_image_pages(struct memory_bitmap *bm, unsigned int nr_highmem)
894 unsigned int to_alloc = count_free_highmem_pages();
896 if (to_alloc > nr_highmem)
897 to_alloc = nr_highmem;
899 nr_highmem -= to_alloc;
900 while (to_alloc-- > 0) {
903 page = alloc_image_page(__GFP_HIGHMEM);
904 memory_bm_set_bit(bm, page_to_pfn(page));
909 static inline int get_highmem_buffer(int safe_needed) { return 0; }
911 static inline unsigned int
912 alloc_highmem_image_pages(struct memory_bitmap *bm, unsigned int n) { return 0; }
913 #endif /* CONFIG_HIGHMEM */
916 * swsusp_alloc - allocate memory for the suspend image
918 * We first try to allocate as many highmem pages as there are
919 * saveable highmem pages in the system. If that fails, we allocate
920 * non-highmem pages for the copies of the remaining highmem ones.
922 * In this approach it is likely that the copies of highmem pages will
923 * also be located in the high memory, because of the way in which
924 * copy_data_pages() works.
928 swsusp_alloc(struct memory_bitmap *orig_bm, struct memory_bitmap *copy_bm,
929 unsigned int nr_pages, unsigned int nr_highmem)
933 error = memory_bm_create(orig_bm, GFP_ATOMIC | __GFP_COLD, PG_ANY);
937 error = memory_bm_create(copy_bm, GFP_ATOMIC | __GFP_COLD, PG_ANY);
941 if (nr_highmem > 0) {
942 error = get_highmem_buffer(PG_ANY);
946 nr_pages += alloc_highmem_image_pages(copy_bm, nr_highmem);
948 while (nr_pages-- > 0) {
949 struct page *page = alloc_image_page(GFP_ATOMIC | __GFP_COLD);
954 memory_bm_set_bit(copy_bm, page_to_pfn(page));
963 /* Memory bitmap used for marking saveable pages (during suspend) or the
964 * suspend image pages (during resume)
966 static struct memory_bitmap orig_bm;
967 /* Memory bitmap used on suspend for marking allocated pages that will contain
968 * the copies of saveable pages. During resume it is initially used for
969 * marking the suspend image pages, but then its set bits are duplicated in
970 * @orig_bm and it is released. Next, on systems with high memory, it may be
971 * used for marking "safe" highmem pages, but it has to be reinitialized for
974 static struct memory_bitmap copy_bm;
976 asmlinkage int swsusp_save(void)
978 unsigned int nr_pages, nr_highmem;
980 printk("swsusp: critical section: \n");
983 nr_pages = count_data_pages();
984 nr_highmem = count_highmem_pages();
985 printk("swsusp: Need to copy %u pages\n", nr_pages + nr_highmem);
987 if (!enough_free_mem(nr_pages, nr_highmem)) {
988 printk(KERN_ERR "swsusp: Not enough free memory\n");
992 if (swsusp_alloc(&orig_bm, ©_bm, nr_pages, nr_highmem)) {
993 printk(KERN_ERR "swsusp: Memory allocation failed\n");
997 /* During allocating of suspend pagedir, new cold pages may appear.
1000 drain_local_pages();
1001 copy_data_pages(©_bm, &orig_bm);
1004 * End of critical section. From now on, we can write to memory,
1005 * but we should not touch disk. This specially means we must _not_
1006 * touch swap space! Except we must write out our image of course.
1009 nr_pages += nr_highmem;
1010 nr_copy_pages = nr_pages;
1011 nr_meta_pages = DIV_ROUND_UP(nr_pages * sizeof(long), PAGE_SIZE);
1013 printk("swsusp: critical section/: done (%d pages copied)\n", nr_pages);
1018 static void init_header(struct swsusp_info *info)
1020 memset(info, 0, sizeof(struct swsusp_info));
1021 info->version_code = LINUX_VERSION_CODE;
1022 info->num_physpages = num_physpages;
1023 memcpy(&info->uts, init_utsname(), sizeof(struct new_utsname));
1024 info->cpus = num_online_cpus();
1025 info->image_pages = nr_copy_pages;
1026 info->pages = nr_copy_pages + nr_meta_pages + 1;
1027 info->size = info->pages;
1028 info->size <<= PAGE_SHIFT;
1032 * pack_pfns - pfns corresponding to the set bits found in the bitmap @bm
1033 * are stored in the array @buf[] (1 page at a time)
1037 pack_pfns(unsigned long *buf, struct memory_bitmap *bm)
1041 for (j = 0; j < PAGE_SIZE / sizeof(long); j++) {
1042 buf[j] = memory_bm_next_pfn(bm);
1043 if (unlikely(buf[j] == BM_END_OF_MAP))
1049 * snapshot_read_next - used for reading the system memory snapshot.
1051 * On the first call to it @handle should point to a zeroed
1052 * snapshot_handle structure. The structure gets updated and a pointer
1053 * to it should be passed to this function every next time.
1055 * The @count parameter should contain the number of bytes the caller
1056 * wants to read from the snapshot. It must not be zero.
1058 * On success the function returns a positive number. Then, the caller
1059 * is allowed to read up to the returned number of bytes from the memory
1060 * location computed by the data_of() macro. The number returned
1061 * may be smaller than @count, but this only happens if the read would
1062 * cross a page boundary otherwise.
1064 * The function returns 0 to indicate the end of data stream condition,
1065 * and a negative number is returned on error. In such cases the
1066 * structure pointed to by @handle is not updated and should not be used
1070 int snapshot_read_next(struct snapshot_handle *handle, size_t count)
1072 if (handle->cur > nr_meta_pages + nr_copy_pages)
1076 /* This makes the buffer be freed by swsusp_free() */
1077 buffer = get_image_page(GFP_ATOMIC, PG_ANY);
1081 if (!handle->offset) {
1082 init_header((struct swsusp_info *)buffer);
1083 handle->buffer = buffer;
1084 memory_bm_position_reset(&orig_bm);
1085 memory_bm_position_reset(©_bm);
1087 if (handle->prev < handle->cur) {
1088 if (handle->cur <= nr_meta_pages) {
1089 memset(buffer, 0, PAGE_SIZE);
1090 pack_pfns(buffer, &orig_bm);
1094 page = pfn_to_page(memory_bm_next_pfn(©_bm));
1095 if (PageHighMem(page)) {
1096 /* Highmem pages are copied to the buffer,
1097 * because we can't return with a kmapped
1098 * highmem page (we may not be called again).
1102 kaddr = kmap_atomic(page, KM_USER0);
1103 memcpy(buffer, kaddr, PAGE_SIZE);
1104 kunmap_atomic(kaddr, KM_USER0);
1105 handle->buffer = buffer;
1107 handle->buffer = page_address(page);
1110 handle->prev = handle->cur;
1112 handle->buf_offset = handle->cur_offset;
1113 if (handle->cur_offset + count >= PAGE_SIZE) {
1114 count = PAGE_SIZE - handle->cur_offset;
1115 handle->cur_offset = 0;
1118 handle->cur_offset += count;
1120 handle->offset += count;
1125 * mark_unsafe_pages - mark the pages that cannot be used for storing
1126 * the image during resume, because they conflict with the pages that
1127 * had been used before suspend
1130 static int mark_unsafe_pages(struct memory_bitmap *bm)
1133 unsigned long pfn, max_zone_pfn;
1135 /* Clear page flags */
1136 for_each_zone(zone) {
1137 max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
1138 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1140 swsusp_unset_page_free(pfn_to_page(pfn));
1143 /* Mark pages that correspond to the "original" pfns as "unsafe" */
1144 memory_bm_position_reset(bm);
1146 pfn = memory_bm_next_pfn(bm);
1147 if (likely(pfn != BM_END_OF_MAP)) {
1148 if (likely(pfn_valid(pfn)))
1149 swsusp_set_page_free(pfn_to_page(pfn));
1153 } while (pfn != BM_END_OF_MAP);
1155 allocated_unsafe_pages = 0;
1161 duplicate_memory_bitmap(struct memory_bitmap *dst, struct memory_bitmap *src)
1165 memory_bm_position_reset(src);
1166 pfn = memory_bm_next_pfn(src);
1167 while (pfn != BM_END_OF_MAP) {
1168 memory_bm_set_bit(dst, pfn);
1169 pfn = memory_bm_next_pfn(src);
1173 static inline int check_header(struct swsusp_info *info)
1175 char *reason = NULL;
1177 if (info->version_code != LINUX_VERSION_CODE)
1178 reason = "kernel version";
1179 if (info->num_physpages != num_physpages)
1180 reason = "memory size";
1181 if (strcmp(info->uts.sysname,init_utsname()->sysname))
1182 reason = "system type";
1183 if (strcmp(info->uts.release,init_utsname()->release))
1184 reason = "kernel release";
1185 if (strcmp(info->uts.version,init_utsname()->version))
1187 if (strcmp(info->uts.machine,init_utsname()->machine))
1190 printk(KERN_ERR "swsusp: Resume mismatch: %s\n", reason);
1197 * load header - check the image header and copy data from it
1201 load_header(struct swsusp_info *info)
1205 restore_pblist = NULL;
1206 error = check_header(info);
1208 nr_copy_pages = info->image_pages;
1209 nr_meta_pages = info->pages - info->image_pages - 1;
1215 * unpack_orig_pfns - for each element of @buf[] (1 page at a time) set
1216 * the corresponding bit in the memory bitmap @bm
1220 unpack_orig_pfns(unsigned long *buf, struct memory_bitmap *bm)
1224 for (j = 0; j < PAGE_SIZE / sizeof(long); j++) {
1225 if (unlikely(buf[j] == BM_END_OF_MAP))
1228 memory_bm_set_bit(bm, buf[j]);
1232 /* List of "safe" pages that may be used to store data loaded from the suspend
1235 static struct linked_page *safe_pages_list;
1237 #ifdef CONFIG_HIGHMEM
1238 /* struct highmem_pbe is used for creating the list of highmem pages that
1239 * should be restored atomically during the resume from disk, because the page
1240 * frames they have occupied before the suspend are in use.
1242 struct highmem_pbe {
1243 struct page *copy_page; /* data is here now */
1244 struct page *orig_page; /* data was here before the suspend */
1245 struct highmem_pbe *next;
1248 /* List of highmem PBEs needed for restoring the highmem pages that were
1249 * allocated before the suspend and included in the suspend image, but have
1250 * also been allocated by the "resume" kernel, so their contents cannot be
1251 * written directly to their "original" page frames.
1253 static struct highmem_pbe *highmem_pblist;
1256 * count_highmem_image_pages - compute the number of highmem pages in the
1257 * suspend image. The bits in the memory bitmap @bm that correspond to the
1258 * image pages are assumed to be set.
1261 static unsigned int count_highmem_image_pages(struct memory_bitmap *bm)
1264 unsigned int cnt = 0;
1266 memory_bm_position_reset(bm);
1267 pfn = memory_bm_next_pfn(bm);
1268 while (pfn != BM_END_OF_MAP) {
1269 if (PageHighMem(pfn_to_page(pfn)))
1272 pfn = memory_bm_next_pfn(bm);
1278 * prepare_highmem_image - try to allocate as many highmem pages as
1279 * there are highmem image pages (@nr_highmem_p points to the variable
1280 * containing the number of highmem image pages). The pages that are
1281 * "safe" (ie. will not be overwritten when the suspend image is
1282 * restored) have the corresponding bits set in @bm (it must be
1285 * NOTE: This function should not be called if there are no highmem
1289 static unsigned int safe_highmem_pages;
1291 static struct memory_bitmap *safe_highmem_bm;
1294 prepare_highmem_image(struct memory_bitmap *bm, unsigned int *nr_highmem_p)
1296 unsigned int to_alloc;
1298 if (memory_bm_create(bm, GFP_ATOMIC, PG_SAFE))
1301 if (get_highmem_buffer(PG_SAFE))
1304 to_alloc = count_free_highmem_pages();
1305 if (to_alloc > *nr_highmem_p)
1306 to_alloc = *nr_highmem_p;
1308 *nr_highmem_p = to_alloc;
1310 safe_highmem_pages = 0;
1311 while (to_alloc-- > 0) {
1314 page = alloc_page(__GFP_HIGHMEM);
1315 if (!swsusp_page_is_free(page)) {
1316 /* The page is "safe", set its bit the bitmap */
1317 memory_bm_set_bit(bm, page_to_pfn(page));
1318 safe_highmem_pages++;
1320 /* Mark the page as allocated */
1321 swsusp_set_page_forbidden(page);
1322 swsusp_set_page_free(page);
1324 memory_bm_position_reset(bm);
1325 safe_highmem_bm = bm;
1330 * get_highmem_page_buffer - for given highmem image page find the buffer
1331 * that suspend_write_next() should set for its caller to write to.
1333 * If the page is to be saved to its "original" page frame or a copy of
1334 * the page is to be made in the highmem, @buffer is returned. Otherwise,
1335 * the copy of the page is to be made in normal memory, so the address of
1336 * the copy is returned.
1338 * If @buffer is returned, the caller of suspend_write_next() will write
1339 * the page's contents to @buffer, so they will have to be copied to the
1340 * right location on the next call to suspend_write_next() and it is done
1341 * with the help of copy_last_highmem_page(). For this purpose, if
1342 * @buffer is returned, @last_highmem page is set to the page to which
1343 * the data will have to be copied from @buffer.
1346 static struct page *last_highmem_page;
1349 get_highmem_page_buffer(struct page *page, struct chain_allocator *ca)
1351 struct highmem_pbe *pbe;
1354 if (swsusp_page_is_forbidden(page) && swsusp_page_is_free(page)) {
1355 /* We have allocated the "original" page frame and we can
1356 * use it directly to store the loaded page.
1358 last_highmem_page = page;
1361 /* The "original" page frame has not been allocated and we have to
1362 * use a "safe" page frame to store the loaded page.
1364 pbe = chain_alloc(ca, sizeof(struct highmem_pbe));
1369 pbe->orig_page = page;
1370 if (safe_highmem_pages > 0) {
1373 /* Copy of the page will be stored in high memory */
1375 tmp = pfn_to_page(memory_bm_next_pfn(safe_highmem_bm));
1376 safe_highmem_pages--;
1377 last_highmem_page = tmp;
1378 pbe->copy_page = tmp;
1380 /* Copy of the page will be stored in normal memory */
1381 kaddr = safe_pages_list;
1382 safe_pages_list = safe_pages_list->next;
1383 pbe->copy_page = virt_to_page(kaddr);
1385 pbe->next = highmem_pblist;
1386 highmem_pblist = pbe;
1391 * copy_last_highmem_page - copy the contents of a highmem image from
1392 * @buffer, where the caller of snapshot_write_next() has place them,
1393 * to the right location represented by @last_highmem_page .
1396 static void copy_last_highmem_page(void)
1398 if (last_highmem_page) {
1401 dst = kmap_atomic(last_highmem_page, KM_USER0);
1402 memcpy(dst, buffer, PAGE_SIZE);
1403 kunmap_atomic(dst, KM_USER0);
1404 last_highmem_page = NULL;
1408 static inline int last_highmem_page_copied(void)
1410 return !last_highmem_page;
1413 static inline void free_highmem_data(void)
1415 if (safe_highmem_bm)
1416 memory_bm_free(safe_highmem_bm, PG_UNSAFE_CLEAR);
1419 free_image_page(buffer, PG_UNSAFE_CLEAR);
1422 static inline int get_safe_write_buffer(void) { return 0; }
1425 count_highmem_image_pages(struct memory_bitmap *bm) { return 0; }
1428 prepare_highmem_image(struct memory_bitmap *bm, unsigned int *nr_highmem_p)
1433 static inline void *
1434 get_highmem_page_buffer(struct page *page, struct chain_allocator *ca)
1439 static inline void copy_last_highmem_page(void) {}
1440 static inline int last_highmem_page_copied(void) { return 1; }
1441 static inline void free_highmem_data(void) {}
1442 #endif /* CONFIG_HIGHMEM */
1445 * prepare_image - use the memory bitmap @bm to mark the pages that will
1446 * be overwritten in the process of restoring the system memory state
1447 * from the suspend image ("unsafe" pages) and allocate memory for the
1450 * The idea is to allocate a new memory bitmap first and then allocate
1451 * as many pages as needed for the image data, but not to assign these
1452 * pages to specific tasks initially. Instead, we just mark them as
1453 * allocated and create a lists of "safe" pages that will be used
1454 * later. On systems with high memory a list of "safe" highmem pages is
1458 #define PBES_PER_LINKED_PAGE (LINKED_PAGE_DATA_SIZE / sizeof(struct pbe))
1461 prepare_image(struct memory_bitmap *new_bm, struct memory_bitmap *bm)
1463 unsigned int nr_pages, nr_highmem;
1464 struct linked_page *sp_list, *lp;
1467 /* If there is no highmem, the buffer will not be necessary */
1468 free_image_page(buffer, PG_UNSAFE_CLEAR);
1471 nr_highmem = count_highmem_image_pages(bm);
1472 error = mark_unsafe_pages(bm);
1476 error = memory_bm_create(new_bm, GFP_ATOMIC, PG_SAFE);
1480 duplicate_memory_bitmap(new_bm, bm);
1481 memory_bm_free(bm, PG_UNSAFE_KEEP);
1482 if (nr_highmem > 0) {
1483 error = prepare_highmem_image(bm, &nr_highmem);
1487 /* Reserve some safe pages for potential later use.
1489 * NOTE: This way we make sure there will be enough safe pages for the
1490 * chain_alloc() in get_buffer(). It is a bit wasteful, but
1491 * nr_copy_pages cannot be greater than 50% of the memory anyway.
1494 /* nr_copy_pages cannot be lesser than allocated_unsafe_pages */
1495 nr_pages = nr_copy_pages - nr_highmem - allocated_unsafe_pages;
1496 nr_pages = DIV_ROUND_UP(nr_pages, PBES_PER_LINKED_PAGE);
1497 while (nr_pages > 0) {
1498 lp = get_image_page(GFP_ATOMIC, PG_SAFE);
1507 /* Preallocate memory for the image */
1508 safe_pages_list = NULL;
1509 nr_pages = nr_copy_pages - nr_highmem - allocated_unsafe_pages;
1510 while (nr_pages > 0) {
1511 lp = (struct linked_page *)get_zeroed_page(GFP_ATOMIC);
1516 if (!swsusp_page_is_free(virt_to_page(lp))) {
1517 /* The page is "safe", add it to the list */
1518 lp->next = safe_pages_list;
1519 safe_pages_list = lp;
1521 /* Mark the page as allocated */
1522 swsusp_set_page_forbidden(virt_to_page(lp));
1523 swsusp_set_page_free(virt_to_page(lp));
1526 /* Free the reserved safe pages so that chain_alloc() can use them */
1529 free_image_page(sp_list, PG_UNSAFE_CLEAR);
1540 * get_buffer - compute the address that snapshot_write_next() should
1541 * set for its caller to write to.
1544 static void *get_buffer(struct memory_bitmap *bm, struct chain_allocator *ca)
1547 struct page *page = pfn_to_page(memory_bm_next_pfn(bm));
1549 if (PageHighMem(page))
1550 return get_highmem_page_buffer(page, ca);
1552 if (swsusp_page_is_forbidden(page) && swsusp_page_is_free(page))
1553 /* We have allocated the "original" page frame and we can
1554 * use it directly to store the loaded page.
1556 return page_address(page);
1558 /* The "original" page frame has not been allocated and we have to
1559 * use a "safe" page frame to store the loaded page.
1561 pbe = chain_alloc(ca, sizeof(struct pbe));
1566 pbe->orig_address = page_address(page);
1567 pbe->address = safe_pages_list;
1568 safe_pages_list = safe_pages_list->next;
1569 pbe->next = restore_pblist;
1570 restore_pblist = pbe;
1571 return pbe->address;
1575 * snapshot_write_next - used for writing the system memory snapshot.
1577 * On the first call to it @handle should point to a zeroed
1578 * snapshot_handle structure. The structure gets updated and a pointer
1579 * to it should be passed to this function every next time.
1581 * The @count parameter should contain the number of bytes the caller
1582 * wants to write to the image. It must not be zero.
1584 * On success the function returns a positive number. Then, the caller
1585 * is allowed to write up to the returned number of bytes to the memory
1586 * location computed by the data_of() macro. The number returned
1587 * may be smaller than @count, but this only happens if the write would
1588 * cross a page boundary otherwise.
1590 * The function returns 0 to indicate the "end of file" condition,
1591 * and a negative number is returned on error. In such cases the
1592 * structure pointed to by @handle is not updated and should not be used
1596 int snapshot_write_next(struct snapshot_handle *handle, size_t count)
1598 static struct chain_allocator ca;
1601 /* Check if we have already loaded the entire image */
1602 if (handle->prev && handle->cur > nr_meta_pages + nr_copy_pages)
1605 if (handle->offset == 0) {
1607 /* This makes the buffer be freed by swsusp_free() */
1608 buffer = get_image_page(GFP_ATOMIC, PG_ANY);
1613 handle->buffer = buffer;
1615 handle->sync_read = 1;
1616 if (handle->prev < handle->cur) {
1617 if (handle->prev == 0) {
1618 error = load_header(buffer);
1622 error = memory_bm_create(©_bm, GFP_ATOMIC, PG_ANY);
1626 } else if (handle->prev <= nr_meta_pages) {
1627 unpack_orig_pfns(buffer, ©_bm);
1628 if (handle->prev == nr_meta_pages) {
1629 error = prepare_image(&orig_bm, ©_bm);
1633 chain_init(&ca, GFP_ATOMIC, PG_SAFE);
1634 memory_bm_position_reset(&orig_bm);
1635 restore_pblist = NULL;
1636 handle->buffer = get_buffer(&orig_bm, &ca);
1637 handle->sync_read = 0;
1638 if (!handle->buffer)
1642 copy_last_highmem_page();
1643 handle->buffer = get_buffer(&orig_bm, &ca);
1644 if (handle->buffer != buffer)
1645 handle->sync_read = 0;
1647 handle->prev = handle->cur;
1649 handle->buf_offset = handle->cur_offset;
1650 if (handle->cur_offset + count >= PAGE_SIZE) {
1651 count = PAGE_SIZE - handle->cur_offset;
1652 handle->cur_offset = 0;
1655 handle->cur_offset += count;
1657 handle->offset += count;
1662 * snapshot_write_finalize - must be called after the last call to
1663 * snapshot_write_next() in case the last page in the image happens
1664 * to be a highmem page and its contents should be stored in the
1665 * highmem. Additionally, it releases the memory that will not be
1669 void snapshot_write_finalize(struct snapshot_handle *handle)
1671 copy_last_highmem_page();
1672 /* Free only if we have loaded the image entirely */
1673 if (handle->prev && handle->cur > nr_meta_pages + nr_copy_pages) {
1674 memory_bm_free(&orig_bm, PG_UNSAFE_CLEAR);
1675 free_highmem_data();
1679 int snapshot_image_loaded(struct snapshot_handle *handle)
1681 return !(!nr_copy_pages || !last_highmem_page_copied() ||
1682 handle->cur <= nr_meta_pages + nr_copy_pages);
1685 #ifdef CONFIG_HIGHMEM
1686 /* Assumes that @buf is ready and points to a "safe" page */
1688 swap_two_pages_data(struct page *p1, struct page *p2, void *buf)
1690 void *kaddr1, *kaddr2;
1692 kaddr1 = kmap_atomic(p1, KM_USER0);
1693 kaddr2 = kmap_atomic(p2, KM_USER1);
1694 memcpy(buf, kaddr1, PAGE_SIZE);
1695 memcpy(kaddr1, kaddr2, PAGE_SIZE);
1696 memcpy(kaddr2, buf, PAGE_SIZE);
1697 kunmap_atomic(kaddr1, KM_USER0);
1698 kunmap_atomic(kaddr2, KM_USER1);
1702 * restore_highmem - for each highmem page that was allocated before
1703 * the suspend and included in the suspend image, and also has been
1704 * allocated by the "resume" kernel swap its current (ie. "before
1705 * resume") contents with the previous (ie. "before suspend") one.
1707 * If the resume eventually fails, we can call this function once
1708 * again and restore the "before resume" highmem state.
1711 int restore_highmem(void)
1713 struct highmem_pbe *pbe = highmem_pblist;
1719 buf = get_image_page(GFP_ATOMIC, PG_SAFE);
1724 swap_two_pages_data(pbe->copy_page, pbe->orig_page, buf);
1727 free_image_page(buf, PG_UNSAFE_CLEAR);
1730 #endif /* CONFIG_HIGHMEM */