1 // SPDX-License-Identifier: GPL-2.0-only
3 * linux/kernel/power/swap.c
5 * This file provides functions for reading the suspend image from
6 * and writing it to a swap partition.
8 * Copyright (C) 1998,2001-2005 Pavel Machek <pavel@ucw.cz>
9 * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl>
10 * Copyright (C) 2010-2012 Bojan Smojver <bojan@rexursive.com>
13 #define pr_fmt(fmt) "PM: " fmt
15 #include <linux/module.h>
16 #include <linux/file.h>
17 #include <linux/delay.h>
18 #include <linux/bitops.h>
19 #include <linux/genhd.h>
20 #include <linux/device.h>
21 #include <linux/bio.h>
22 #include <linux/blkdev.h>
23 #include <linux/swap.h>
24 #include <linux/swapops.h>
26 #include <linux/slab.h>
27 #include <linux/lzo.h>
28 #include <linux/vmalloc.h>
29 #include <linux/cpumask.h>
30 #include <linux/atomic.h>
31 #include <linux/kthread.h>
32 #include <linux/crc32.h>
33 #include <linux/ktime.h>
37 #define HIBERNATE_SIG "S1SUSPEND"
40 * When reading an {un,}compressed image, we may restore pages in place,
41 * in which case some architectures need these pages cleaning before they
42 * can be executed. We don't know which pages these may be, so clean the lot.
44 static bool clean_pages_on_read;
45 static bool clean_pages_on_decompress;
48 * The swap map is a data structure used for keeping track of each page
49 * written to a swap partition. It consists of many swap_map_page
50 * structures that contain each an array of MAP_PAGE_ENTRIES swap entries.
51 * These structures are stored on the swap and linked together with the
52 * help of the .next_swap member.
54 * The swap map is created during suspend. The swap map pages are
55 * allocated and populated one at a time, so we only need one memory
56 * page to set up the entire structure.
58 * During resume we pick up all swap_map_page structures into a list.
61 #define MAP_PAGE_ENTRIES (PAGE_SIZE / sizeof(sector_t) - 1)
64 * Number of free pages that are not high.
66 static inline unsigned long low_free_pages(void)
68 return nr_free_pages() - nr_free_highpages();
72 * Number of pages required to be kept free while writing the image. Always
73 * half of all available low pages before the writing starts.
75 static inline unsigned long reqd_free_pages(void)
77 return low_free_pages() / 2;
80 struct swap_map_page {
81 sector_t entries[MAP_PAGE_ENTRIES];
85 struct swap_map_page_list {
86 struct swap_map_page *map;
87 struct swap_map_page_list *next;
91 * The swap_map_handle structure is used for handling swap in
95 struct swap_map_handle {
96 struct swap_map_page *cur;
97 struct swap_map_page_list *maps;
99 sector_t first_sector;
101 unsigned long reqd_free_pages;
105 struct swsusp_header {
106 char reserved[PAGE_SIZE - 20 - sizeof(sector_t) - sizeof(int) -
110 unsigned int flags; /* Flags to pass to the "boot" kernel */
115 static struct swsusp_header *swsusp_header;
118 * The following functions are used for tracing the allocated
119 * swap pages, so that they can be freed in case of an error.
122 struct swsusp_extent {
128 static struct rb_root swsusp_extents = RB_ROOT;
130 static int swsusp_extents_insert(unsigned long swap_offset)
132 struct rb_node **new = &(swsusp_extents.rb_node);
133 struct rb_node *parent = NULL;
134 struct swsusp_extent *ext;
136 /* Figure out where to put the new node */
138 ext = rb_entry(*new, struct swsusp_extent, node);
140 if (swap_offset < ext->start) {
142 if (swap_offset == ext->start - 1) {
146 new = &((*new)->rb_left);
147 } else if (swap_offset > ext->end) {
149 if (swap_offset == ext->end + 1) {
153 new = &((*new)->rb_right);
155 /* It already is in the tree */
159 /* Add the new node and rebalance the tree. */
160 ext = kzalloc(sizeof(struct swsusp_extent), GFP_KERNEL);
164 ext->start = swap_offset;
165 ext->end = swap_offset;
166 rb_link_node(&ext->node, parent, new);
167 rb_insert_color(&ext->node, &swsusp_extents);
172 * alloc_swapdev_block - allocate a swap page and register that it has
173 * been allocated, so that it can be freed in case of an error.
176 sector_t alloc_swapdev_block(int swap)
178 unsigned long offset;
180 offset = swp_offset(get_swap_page_of_type(swap));
182 if (swsusp_extents_insert(offset))
183 swap_free(swp_entry(swap, offset));
185 return swapdev_block(swap, offset);
191 * free_all_swap_pages - free swap pages allocated for saving image data.
192 * It also frees the extents used to register which swap entries had been
196 void free_all_swap_pages(int swap)
198 struct rb_node *node;
200 while ((node = swsusp_extents.rb_node)) {
201 struct swsusp_extent *ext;
202 unsigned long offset;
204 ext = rb_entry(node, struct swsusp_extent, node);
205 rb_erase(node, &swsusp_extents);
206 for (offset = ext->start; offset <= ext->end; offset++)
207 swap_free(swp_entry(swap, offset));
213 int swsusp_swap_in_use(void)
215 return (swsusp_extents.rb_node != NULL);
222 static unsigned short root_swap = 0xffff;
223 static struct block_device *hib_resume_bdev;
225 struct hib_bio_batch {
227 wait_queue_head_t wait;
231 static void hib_init_batch(struct hib_bio_batch *hb)
233 atomic_set(&hb->count, 0);
234 init_waitqueue_head(&hb->wait);
235 hb->error = BLK_STS_OK;
238 static void hib_end_io(struct bio *bio)
240 struct hib_bio_batch *hb = bio->bi_private;
241 struct page *page = bio_first_page_all(bio);
243 if (bio->bi_status) {
244 pr_alert("Read-error on swap-device (%u:%u:%Lu)\n",
245 MAJOR(bio_dev(bio)), MINOR(bio_dev(bio)),
246 (unsigned long long)bio->bi_iter.bi_sector);
249 if (bio_data_dir(bio) == WRITE)
251 else if (clean_pages_on_read)
252 flush_icache_range((unsigned long)page_address(page),
253 (unsigned long)page_address(page) + PAGE_SIZE);
255 if (bio->bi_status && !hb->error)
256 hb->error = bio->bi_status;
257 if (atomic_dec_and_test(&hb->count))
263 static int hib_submit_io(int op, int op_flags, pgoff_t page_off, void *addr,
264 struct hib_bio_batch *hb)
266 struct page *page = virt_to_page(addr);
270 bio = bio_alloc(GFP_NOIO | __GFP_HIGH, 1);
271 bio->bi_iter.bi_sector = page_off * (PAGE_SIZE >> 9);
272 bio_set_dev(bio, hib_resume_bdev);
273 bio_set_op_attrs(bio, op, op_flags);
275 if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
276 pr_err("Adding page to bio failed at %llu\n",
277 (unsigned long long)bio->bi_iter.bi_sector);
283 bio->bi_end_io = hib_end_io;
284 bio->bi_private = hb;
285 atomic_inc(&hb->count);
288 error = submit_bio_wait(bio);
295 static blk_status_t hib_wait_io(struct hib_bio_batch *hb)
297 wait_event(hb->wait, atomic_read(&hb->count) == 0);
298 return blk_status_to_errno(hb->error);
305 static int mark_swapfiles(struct swap_map_handle *handle, unsigned int flags)
309 hib_submit_io(REQ_OP_READ, 0, swsusp_resume_block,
310 swsusp_header, NULL);
311 if (!memcmp("SWAP-SPACE",swsusp_header->sig, 10) ||
312 !memcmp("SWAPSPACE2",swsusp_header->sig, 10)) {
313 memcpy(swsusp_header->orig_sig,swsusp_header->sig, 10);
314 memcpy(swsusp_header->sig, HIBERNATE_SIG, 10);
315 swsusp_header->image = handle->first_sector;
316 swsusp_header->flags = flags;
317 if (flags & SF_CRC32_MODE)
318 swsusp_header->crc32 = handle->crc32;
319 error = hib_submit_io(REQ_OP_WRITE, REQ_SYNC,
320 swsusp_resume_block, swsusp_header, NULL);
322 pr_err("Swap header not found!\n");
329 * swsusp_swap_check - check if the resume device is a swap device
330 * and get its index (if so)
332 * This is called before saving image
334 static int swsusp_swap_check(void)
338 if (swsusp_resume_device)
339 res = swap_type_of(swsusp_resume_device, swsusp_resume_block);
341 res = find_first_swap(&swsusp_resume_device);
346 hib_resume_bdev = bdget(swsusp_resume_device);
347 if (!hib_resume_bdev)
349 res = blkdev_get(hib_resume_bdev, FMODE_WRITE, NULL);
353 res = set_blocksize(hib_resume_bdev, PAGE_SIZE);
355 blkdev_put(hib_resume_bdev, FMODE_WRITE);
361 * write_page - Write one page to given swap location.
362 * @buf: Address we're writing.
363 * @offset: Offset of the swap page we're writing to.
364 * @hb: bio completion batch
367 static int write_page(void *buf, sector_t offset, struct hib_bio_batch *hb)
376 src = (void *)__get_free_page(GFP_NOIO | __GFP_NOWARN |
381 ret = hib_wait_io(hb); /* Free pages */
384 src = (void *)__get_free_page(GFP_NOIO |
391 hb = NULL; /* Go synchronous */
398 return hib_submit_io(REQ_OP_WRITE, REQ_SYNC, offset, src, hb);
401 static void release_swap_writer(struct swap_map_handle *handle)
404 free_page((unsigned long)handle->cur);
408 static int get_swap_writer(struct swap_map_handle *handle)
412 ret = swsusp_swap_check();
415 pr_err("Cannot find swap device, try swapon -a\n");
418 handle->cur = (struct swap_map_page *)get_zeroed_page(GFP_KERNEL);
423 handle->cur_swap = alloc_swapdev_block(root_swap);
424 if (!handle->cur_swap) {
429 handle->reqd_free_pages = reqd_free_pages();
430 handle->first_sector = handle->cur_swap;
433 release_swap_writer(handle);
435 swsusp_close(FMODE_WRITE);
439 static int swap_write_page(struct swap_map_handle *handle, void *buf,
440 struct hib_bio_batch *hb)
447 offset = alloc_swapdev_block(root_swap);
448 error = write_page(buf, offset, hb);
451 handle->cur->entries[handle->k++] = offset;
452 if (handle->k >= MAP_PAGE_ENTRIES) {
453 offset = alloc_swapdev_block(root_swap);
456 handle->cur->next_swap = offset;
457 error = write_page(handle->cur, handle->cur_swap, hb);
460 clear_page(handle->cur);
461 handle->cur_swap = offset;
464 if (hb && low_free_pages() <= handle->reqd_free_pages) {
465 error = hib_wait_io(hb);
469 * Recalculate the number of required free pages, to
470 * make sure we never take more than half.
472 handle->reqd_free_pages = reqd_free_pages();
479 static int flush_swap_writer(struct swap_map_handle *handle)
481 if (handle->cur && handle->cur_swap)
482 return write_page(handle->cur, handle->cur_swap, NULL);
487 static int swap_writer_finish(struct swap_map_handle *handle,
488 unsigned int flags, int error)
491 flush_swap_writer(handle);
493 error = mark_swapfiles(handle, flags);
498 free_all_swap_pages(root_swap);
499 release_swap_writer(handle);
500 swsusp_close(FMODE_WRITE);
505 /* We need to remember how much compressed data we need to read. */
506 #define LZO_HEADER sizeof(size_t)
508 /* Number of pages/bytes we'll compress at one time. */
509 #define LZO_UNC_PAGES 32
510 #define LZO_UNC_SIZE (LZO_UNC_PAGES * PAGE_SIZE)
512 /* Number of pages/bytes we need for compressed data (worst case). */
513 #define LZO_CMP_PAGES DIV_ROUND_UP(lzo1x_worst_compress(LZO_UNC_SIZE) + \
514 LZO_HEADER, PAGE_SIZE)
515 #define LZO_CMP_SIZE (LZO_CMP_PAGES * PAGE_SIZE)
517 /* Maximum number of threads for compression/decompression. */
518 #define LZO_THREADS 3
520 /* Minimum/maximum number of pages for read buffering. */
521 #define LZO_MIN_RD_PAGES 1024
522 #define LZO_MAX_RD_PAGES 8192
526 * save_image - save the suspend image data
529 static int save_image(struct swap_map_handle *handle,
530 struct snapshot_handle *snapshot,
531 unsigned int nr_to_write)
537 struct hib_bio_batch hb;
543 pr_info("Saving image data pages (%u pages)...\n",
545 m = nr_to_write / 10;
551 ret = snapshot_read_next(snapshot);
554 ret = swap_write_page(handle, data_of(*snapshot), &hb);
558 pr_info("Image saving progress: %3d%%\n",
562 err2 = hib_wait_io(&hb);
567 pr_info("Image saving done\n");
568 swsusp_show_speed(start, stop, nr_to_write, "Wrote");
573 * Structure used for CRC32.
576 struct task_struct *thr; /* thread */
577 atomic_t ready; /* ready to start flag */
578 atomic_t stop; /* ready to stop flag */
579 unsigned run_threads; /* nr current threads */
580 wait_queue_head_t go; /* start crc update */
581 wait_queue_head_t done; /* crc update done */
582 u32 *crc32; /* points to handle's crc32 */
583 size_t *unc_len[LZO_THREADS]; /* uncompressed lengths */
584 unsigned char *unc[LZO_THREADS]; /* uncompressed data */
588 * CRC32 update function that runs in its own thread.
590 static int crc32_threadfn(void *data)
592 struct crc_data *d = data;
596 wait_event(d->go, atomic_read(&d->ready) ||
597 kthread_should_stop());
598 if (kthread_should_stop()) {
600 atomic_set(&d->stop, 1);
604 atomic_set(&d->ready, 0);
606 for (i = 0; i < d->run_threads; i++)
607 *d->crc32 = crc32_le(*d->crc32,
608 d->unc[i], *d->unc_len[i]);
609 atomic_set(&d->stop, 1);
615 * Structure used for LZO data compression.
618 struct task_struct *thr; /* thread */
619 atomic_t ready; /* ready to start flag */
620 atomic_t stop; /* ready to stop flag */
621 int ret; /* return code */
622 wait_queue_head_t go; /* start compression */
623 wait_queue_head_t done; /* compression done */
624 size_t unc_len; /* uncompressed length */
625 size_t cmp_len; /* compressed length */
626 unsigned char unc[LZO_UNC_SIZE]; /* uncompressed buffer */
627 unsigned char cmp[LZO_CMP_SIZE]; /* compressed buffer */
628 unsigned char wrk[LZO1X_1_MEM_COMPRESS]; /* compression workspace */
632 * Compression function that runs in its own thread.
634 static int lzo_compress_threadfn(void *data)
636 struct cmp_data *d = data;
639 wait_event(d->go, atomic_read(&d->ready) ||
640 kthread_should_stop());
641 if (kthread_should_stop()) {
644 atomic_set(&d->stop, 1);
648 atomic_set(&d->ready, 0);
650 d->ret = lzo1x_1_compress(d->unc, d->unc_len,
651 d->cmp + LZO_HEADER, &d->cmp_len,
653 atomic_set(&d->stop, 1);
660 * save_image_lzo - Save the suspend image data compressed with LZO.
661 * @handle: Swap map handle to use for saving the image.
662 * @snapshot: Image to read data from.
663 * @nr_to_write: Number of pages to save.
665 static int save_image_lzo(struct swap_map_handle *handle,
666 struct snapshot_handle *snapshot,
667 unsigned int nr_to_write)
673 struct hib_bio_batch hb;
677 unsigned thr, run_threads, nr_threads;
678 unsigned char *page = NULL;
679 struct cmp_data *data = NULL;
680 struct crc_data *crc = NULL;
685 * We'll limit the number of threads for compression to limit memory
688 nr_threads = num_online_cpus() - 1;
689 nr_threads = clamp_val(nr_threads, 1, LZO_THREADS);
691 page = (void *)__get_free_page(GFP_NOIO | __GFP_HIGH);
693 pr_err("Failed to allocate LZO page\n");
698 data = vmalloc(array_size(nr_threads, sizeof(*data)));
700 pr_err("Failed to allocate LZO data\n");
704 for (thr = 0; thr < nr_threads; thr++)
705 memset(&data[thr], 0, offsetof(struct cmp_data, go));
707 crc = kmalloc(sizeof(*crc), GFP_KERNEL);
709 pr_err("Failed to allocate crc\n");
713 memset(crc, 0, offsetof(struct crc_data, go));
716 * Start the compression threads.
718 for (thr = 0; thr < nr_threads; thr++) {
719 init_waitqueue_head(&data[thr].go);
720 init_waitqueue_head(&data[thr].done);
722 data[thr].thr = kthread_run(lzo_compress_threadfn,
724 "image_compress/%u", thr);
725 if (IS_ERR(data[thr].thr)) {
726 data[thr].thr = NULL;
727 pr_err("Cannot start compression threads\n");
734 * Start the CRC32 thread.
736 init_waitqueue_head(&crc->go);
737 init_waitqueue_head(&crc->done);
740 crc->crc32 = &handle->crc32;
741 for (thr = 0; thr < nr_threads; thr++) {
742 crc->unc[thr] = data[thr].unc;
743 crc->unc_len[thr] = &data[thr].unc_len;
746 crc->thr = kthread_run(crc32_threadfn, crc, "image_crc32");
747 if (IS_ERR(crc->thr)) {
749 pr_err("Cannot start CRC32 thread\n");
755 * Adjust the number of required free pages after all allocations have
756 * been done. We don't want to run out of pages when writing.
758 handle->reqd_free_pages = reqd_free_pages();
760 pr_info("Using %u thread(s) for compression\n", nr_threads);
761 pr_info("Compressing and saving image data (%u pages)...\n",
763 m = nr_to_write / 10;
769 for (thr = 0; thr < nr_threads; thr++) {
770 for (off = 0; off < LZO_UNC_SIZE; off += PAGE_SIZE) {
771 ret = snapshot_read_next(snapshot);
778 memcpy(data[thr].unc + off,
779 data_of(*snapshot), PAGE_SIZE);
782 pr_info("Image saving progress: %3d%%\n",
789 data[thr].unc_len = off;
791 atomic_set(&data[thr].ready, 1);
792 wake_up(&data[thr].go);
798 crc->run_threads = thr;
799 atomic_set(&crc->ready, 1);
802 for (run_threads = thr, thr = 0; thr < run_threads; thr++) {
803 wait_event(data[thr].done,
804 atomic_read(&data[thr].stop));
805 atomic_set(&data[thr].stop, 0);
810 pr_err("LZO compression failed\n");
814 if (unlikely(!data[thr].cmp_len ||
816 lzo1x_worst_compress(data[thr].unc_len))) {
817 pr_err("Invalid LZO compressed length\n");
822 *(size_t *)data[thr].cmp = data[thr].cmp_len;
825 * Given we are writing one page at a time to disk, we
826 * copy that much from the buffer, although the last
827 * bit will likely be smaller than full page. This is
828 * OK - we saved the length of the compressed data, so
829 * any garbage at the end will be discarded when we
833 off < LZO_HEADER + data[thr].cmp_len;
835 memcpy(page, data[thr].cmp + off, PAGE_SIZE);
837 ret = swap_write_page(handle, page, &hb);
843 wait_event(crc->done, atomic_read(&crc->stop));
844 atomic_set(&crc->stop, 0);
848 err2 = hib_wait_io(&hb);
853 pr_info("Image saving done\n");
854 swsusp_show_speed(start, stop, nr_to_write, "Wrote");
858 kthread_stop(crc->thr);
862 for (thr = 0; thr < nr_threads; thr++)
864 kthread_stop(data[thr].thr);
867 if (page) free_page((unsigned long)page);
873 * enough_swap - Make sure we have enough swap to save the image.
875 * Returns TRUE or FALSE after checking the total amount of swap
876 * space avaiable from the resume partition.
879 static int enough_swap(unsigned int nr_pages)
881 unsigned int free_swap = count_swap_pages(root_swap, 1);
882 unsigned int required;
884 pr_debug("Free swap pages: %u\n", free_swap);
886 required = PAGES_FOR_IO + nr_pages;
887 return free_swap > required;
891 * swsusp_write - Write entire image and metadata.
892 * @flags: flags to pass to the "boot" kernel in the image header
894 * It is important _NOT_ to umount filesystems at this point. We want
895 * them synced (in case something goes wrong) but we DO not want to mark
896 * filesystem clean: it is not. (And it does not matter, if we resume
897 * correctly, we'll mark system clean, anyway.)
900 int swsusp_write(unsigned int flags)
902 struct swap_map_handle handle;
903 struct snapshot_handle snapshot;
904 struct swsusp_info *header;
908 pages = snapshot_get_image_size();
909 error = get_swap_writer(&handle);
911 pr_err("Cannot get swap writer\n");
914 if (flags & SF_NOCOMPRESS_MODE) {
915 if (!enough_swap(pages)) {
916 pr_err("Not enough free swap\n");
921 memset(&snapshot, 0, sizeof(struct snapshot_handle));
922 error = snapshot_read_next(&snapshot);
923 if (error < (int)PAGE_SIZE) {
929 header = (struct swsusp_info *)data_of(snapshot);
930 error = swap_write_page(&handle, header, NULL);
932 error = (flags & SF_NOCOMPRESS_MODE) ?
933 save_image(&handle, &snapshot, pages - 1) :
934 save_image_lzo(&handle, &snapshot, pages - 1);
937 error = swap_writer_finish(&handle, flags, error);
942 * The following functions allow us to read data using a swap map
943 * in a file-alike way
946 static void release_swap_reader(struct swap_map_handle *handle)
948 struct swap_map_page_list *tmp;
950 while (handle->maps) {
951 if (handle->maps->map)
952 free_page((unsigned long)handle->maps->map);
954 handle->maps = handle->maps->next;
960 static int get_swap_reader(struct swap_map_handle *handle,
961 unsigned int *flags_p)
964 struct swap_map_page_list *tmp, *last;
967 *flags_p = swsusp_header->flags;
969 if (!swsusp_header->image) /* how can this happen? */
973 last = handle->maps = NULL;
974 offset = swsusp_header->image;
976 tmp = kzalloc(sizeof(*handle->maps), GFP_KERNEL);
978 release_swap_reader(handle);
987 tmp->map = (struct swap_map_page *)
988 __get_free_page(GFP_NOIO | __GFP_HIGH);
990 release_swap_reader(handle);
994 error = hib_submit_io(REQ_OP_READ, 0, offset, tmp->map, NULL);
996 release_swap_reader(handle);
999 offset = tmp->map->next_swap;
1002 handle->cur = handle->maps->map;
1006 static int swap_read_page(struct swap_map_handle *handle, void *buf,
1007 struct hib_bio_batch *hb)
1011 struct swap_map_page_list *tmp;
1015 offset = handle->cur->entries[handle->k];
1018 error = hib_submit_io(REQ_OP_READ, 0, offset, buf, hb);
1021 if (++handle->k >= MAP_PAGE_ENTRIES) {
1023 free_page((unsigned long)handle->maps->map);
1025 handle->maps = handle->maps->next;
1028 release_swap_reader(handle);
1030 handle->cur = handle->maps->map;
1035 static int swap_reader_finish(struct swap_map_handle *handle)
1037 release_swap_reader(handle);
1043 * load_image - load the image using the swap map handle
1044 * @handle and the snapshot handle @snapshot
1045 * (assume there are @nr_pages pages to load)
1048 static int load_image(struct swap_map_handle *handle,
1049 struct snapshot_handle *snapshot,
1050 unsigned int nr_to_read)
1056 struct hib_bio_batch hb;
1060 hib_init_batch(&hb);
1062 clean_pages_on_read = true;
1063 pr_info("Loading image data pages (%u pages)...\n", nr_to_read);
1064 m = nr_to_read / 10;
1068 start = ktime_get();
1070 ret = snapshot_write_next(snapshot);
1073 ret = swap_read_page(handle, data_of(*snapshot), &hb);
1076 if (snapshot->sync_read)
1077 ret = hib_wait_io(&hb);
1080 if (!(nr_pages % m))
1081 pr_info("Image loading progress: %3d%%\n",
1085 err2 = hib_wait_io(&hb);
1090 pr_info("Image loading done\n");
1091 snapshot_write_finalize(snapshot);
1092 if (!snapshot_image_loaded(snapshot))
1095 swsusp_show_speed(start, stop, nr_to_read, "Read");
1100 * Structure used for LZO data decompression.
1103 struct task_struct *thr; /* thread */
1104 atomic_t ready; /* ready to start flag */
1105 atomic_t stop; /* ready to stop flag */
1106 int ret; /* return code */
1107 wait_queue_head_t go; /* start decompression */
1108 wait_queue_head_t done; /* decompression done */
1109 size_t unc_len; /* uncompressed length */
1110 size_t cmp_len; /* compressed length */
1111 unsigned char unc[LZO_UNC_SIZE]; /* uncompressed buffer */
1112 unsigned char cmp[LZO_CMP_SIZE]; /* compressed buffer */
1116 * Deompression function that runs in its own thread.
1118 static int lzo_decompress_threadfn(void *data)
1120 struct dec_data *d = data;
1123 wait_event(d->go, atomic_read(&d->ready) ||
1124 kthread_should_stop());
1125 if (kthread_should_stop()) {
1128 atomic_set(&d->stop, 1);
1132 atomic_set(&d->ready, 0);
1134 d->unc_len = LZO_UNC_SIZE;
1135 d->ret = lzo1x_decompress_safe(d->cmp + LZO_HEADER, d->cmp_len,
1136 d->unc, &d->unc_len);
1137 if (clean_pages_on_decompress)
1138 flush_icache_range((unsigned long)d->unc,
1139 (unsigned long)d->unc + d->unc_len);
1141 atomic_set(&d->stop, 1);
1148 * load_image_lzo - Load compressed image data and decompress them with LZO.
1149 * @handle: Swap map handle to use for loading data.
1150 * @snapshot: Image to copy uncompressed data into.
1151 * @nr_to_read: Number of pages to load.
1153 static int load_image_lzo(struct swap_map_handle *handle,
1154 struct snapshot_handle *snapshot,
1155 unsigned int nr_to_read)
1160 struct hib_bio_batch hb;
1165 unsigned i, thr, run_threads, nr_threads;
1166 unsigned ring = 0, pg = 0, ring_size = 0,
1167 have = 0, want, need, asked = 0;
1168 unsigned long read_pages = 0;
1169 unsigned char **page = NULL;
1170 struct dec_data *data = NULL;
1171 struct crc_data *crc = NULL;
1173 hib_init_batch(&hb);
1176 * We'll limit the number of threads for decompression to limit memory
1179 nr_threads = num_online_cpus() - 1;
1180 nr_threads = clamp_val(nr_threads, 1, LZO_THREADS);
1182 page = vmalloc(array_size(LZO_MAX_RD_PAGES, sizeof(*page)));
1184 pr_err("Failed to allocate LZO page\n");
1189 data = vmalloc(array_size(nr_threads, sizeof(*data)));
1191 pr_err("Failed to allocate LZO data\n");
1195 for (thr = 0; thr < nr_threads; thr++)
1196 memset(&data[thr], 0, offsetof(struct dec_data, go));
1198 crc = kmalloc(sizeof(*crc), GFP_KERNEL);
1200 pr_err("Failed to allocate crc\n");
1204 memset(crc, 0, offsetof(struct crc_data, go));
1206 clean_pages_on_decompress = true;
1209 * Start the decompression threads.
1211 for (thr = 0; thr < nr_threads; thr++) {
1212 init_waitqueue_head(&data[thr].go);
1213 init_waitqueue_head(&data[thr].done);
1215 data[thr].thr = kthread_run(lzo_decompress_threadfn,
1217 "image_decompress/%u", thr);
1218 if (IS_ERR(data[thr].thr)) {
1219 data[thr].thr = NULL;
1220 pr_err("Cannot start decompression threads\n");
1227 * Start the CRC32 thread.
1229 init_waitqueue_head(&crc->go);
1230 init_waitqueue_head(&crc->done);
1233 crc->crc32 = &handle->crc32;
1234 for (thr = 0; thr < nr_threads; thr++) {
1235 crc->unc[thr] = data[thr].unc;
1236 crc->unc_len[thr] = &data[thr].unc_len;
1239 crc->thr = kthread_run(crc32_threadfn, crc, "image_crc32");
1240 if (IS_ERR(crc->thr)) {
1242 pr_err("Cannot start CRC32 thread\n");
1248 * Set the number of pages for read buffering.
1249 * This is complete guesswork, because we'll only know the real
1250 * picture once prepare_image() is called, which is much later on
1251 * during the image load phase. We'll assume the worst case and
1252 * say that none of the image pages are from high memory.
1254 if (low_free_pages() > snapshot_get_image_size())
1255 read_pages = (low_free_pages() - snapshot_get_image_size()) / 2;
1256 read_pages = clamp_val(read_pages, LZO_MIN_RD_PAGES, LZO_MAX_RD_PAGES);
1258 for (i = 0; i < read_pages; i++) {
1259 page[i] = (void *)__get_free_page(i < LZO_CMP_PAGES ?
1260 GFP_NOIO | __GFP_HIGH :
1261 GFP_NOIO | __GFP_NOWARN |
1265 if (i < LZO_CMP_PAGES) {
1267 pr_err("Failed to allocate LZO pages\n");
1275 want = ring_size = i;
1277 pr_info("Using %u thread(s) for decompression\n", nr_threads);
1278 pr_info("Loading and decompressing image data (%u pages)...\n",
1280 m = nr_to_read / 10;
1284 start = ktime_get();
1286 ret = snapshot_write_next(snapshot);
1291 for (i = 0; !eof && i < want; i++) {
1292 ret = swap_read_page(handle, page[ring], &hb);
1295 * On real read error, finish. On end of data,
1296 * set EOF flag and just exit the read loop.
1299 handle->cur->entries[handle->k]) {
1306 if (++ring >= ring_size)
1313 * We are out of data, wait for some more.
1319 ret = hib_wait_io(&hb);
1328 if (crc->run_threads) {
1329 wait_event(crc->done, atomic_read(&crc->stop));
1330 atomic_set(&crc->stop, 0);
1331 crc->run_threads = 0;
1334 for (thr = 0; have && thr < nr_threads; thr++) {
1335 data[thr].cmp_len = *(size_t *)page[pg];
1336 if (unlikely(!data[thr].cmp_len ||
1338 lzo1x_worst_compress(LZO_UNC_SIZE))) {
1339 pr_err("Invalid LZO compressed length\n");
1344 need = DIV_ROUND_UP(data[thr].cmp_len + LZO_HEADER,
1355 off < LZO_HEADER + data[thr].cmp_len;
1357 memcpy(data[thr].cmp + off,
1358 page[pg], PAGE_SIZE);
1361 if (++pg >= ring_size)
1365 atomic_set(&data[thr].ready, 1);
1366 wake_up(&data[thr].go);
1370 * Wait for more data while we are decompressing.
1372 if (have < LZO_CMP_PAGES && asked) {
1373 ret = hib_wait_io(&hb);
1382 for (run_threads = thr, thr = 0; thr < run_threads; thr++) {
1383 wait_event(data[thr].done,
1384 atomic_read(&data[thr].stop));
1385 atomic_set(&data[thr].stop, 0);
1387 ret = data[thr].ret;
1390 pr_err("LZO decompression failed\n");
1394 if (unlikely(!data[thr].unc_len ||
1395 data[thr].unc_len > LZO_UNC_SIZE ||
1396 data[thr].unc_len & (PAGE_SIZE - 1))) {
1397 pr_err("Invalid LZO uncompressed length\n");
1403 off < data[thr].unc_len; off += PAGE_SIZE) {
1404 memcpy(data_of(*snapshot),
1405 data[thr].unc + off, PAGE_SIZE);
1407 if (!(nr_pages % m))
1408 pr_info("Image loading progress: %3d%%\n",
1412 ret = snapshot_write_next(snapshot);
1414 crc->run_threads = thr + 1;
1415 atomic_set(&crc->ready, 1);
1422 crc->run_threads = thr;
1423 atomic_set(&crc->ready, 1);
1428 if (crc->run_threads) {
1429 wait_event(crc->done, atomic_read(&crc->stop));
1430 atomic_set(&crc->stop, 0);
1434 pr_info("Image loading done\n");
1435 snapshot_write_finalize(snapshot);
1436 if (!snapshot_image_loaded(snapshot))
1439 if (swsusp_header->flags & SF_CRC32_MODE) {
1440 if(handle->crc32 != swsusp_header->crc32) {
1441 pr_err("Invalid image CRC32!\n");
1447 swsusp_show_speed(start, stop, nr_to_read, "Read");
1449 for (i = 0; i < ring_size; i++)
1450 free_page((unsigned long)page[i]);
1453 kthread_stop(crc->thr);
1457 for (thr = 0; thr < nr_threads; thr++)
1459 kthread_stop(data[thr].thr);
1468 * swsusp_read - read the hibernation image.
1469 * @flags_p: flags passed by the "frozen" kernel in the image header should
1470 * be written into this memory location
1473 int swsusp_read(unsigned int *flags_p)
1476 struct swap_map_handle handle;
1477 struct snapshot_handle snapshot;
1478 struct swsusp_info *header;
1480 memset(&snapshot, 0, sizeof(struct snapshot_handle));
1481 error = snapshot_write_next(&snapshot);
1482 if (error < (int)PAGE_SIZE)
1483 return error < 0 ? error : -EFAULT;
1484 header = (struct swsusp_info *)data_of(snapshot);
1485 error = get_swap_reader(&handle, flags_p);
1489 error = swap_read_page(&handle, header, NULL);
1491 error = (*flags_p & SF_NOCOMPRESS_MODE) ?
1492 load_image(&handle, &snapshot, header->pages - 1) :
1493 load_image_lzo(&handle, &snapshot, header->pages - 1);
1495 swap_reader_finish(&handle);
1498 pr_debug("Image successfully loaded\n");
1500 pr_debug("Error %d resuming\n", error);
1505 * swsusp_check - Check for swsusp signature in the resume device
1508 int swsusp_check(void)
1512 hib_resume_bdev = blkdev_get_by_dev(swsusp_resume_device,
1514 if (!IS_ERR(hib_resume_bdev)) {
1515 set_blocksize(hib_resume_bdev, PAGE_SIZE);
1516 clear_page(swsusp_header);
1517 error = hib_submit_io(REQ_OP_READ, 0,
1518 swsusp_resume_block,
1519 swsusp_header, NULL);
1523 if (!memcmp(HIBERNATE_SIG, swsusp_header->sig, 10)) {
1524 memcpy(swsusp_header->sig, swsusp_header->orig_sig, 10);
1525 /* Reset swap signature now */
1526 error = hib_submit_io(REQ_OP_WRITE, REQ_SYNC,
1527 swsusp_resume_block,
1528 swsusp_header, NULL);
1535 blkdev_put(hib_resume_bdev, FMODE_READ);
1537 pr_debug("Image signature found, resuming\n");
1539 error = PTR_ERR(hib_resume_bdev);
1543 pr_debug("Image not found (code %d)\n", error);
1549 * swsusp_close - close swap device.
1552 void swsusp_close(fmode_t mode)
1554 if (IS_ERR(hib_resume_bdev)) {
1555 pr_debug("Image device not initialised\n");
1559 blkdev_put(hib_resume_bdev, mode);
1563 * swsusp_unmark - Unmark swsusp signature in the resume device
1566 #ifdef CONFIG_SUSPEND
1567 int swsusp_unmark(void)
1571 hib_submit_io(REQ_OP_READ, 0, swsusp_resume_block,
1572 swsusp_header, NULL);
1573 if (!memcmp(HIBERNATE_SIG,swsusp_header->sig, 10)) {
1574 memcpy(swsusp_header->sig,swsusp_header->orig_sig, 10);
1575 error = hib_submit_io(REQ_OP_WRITE, REQ_SYNC,
1576 swsusp_resume_block,
1577 swsusp_header, NULL);
1579 pr_err("Cannot find swsusp signature!\n");
1584 * We just returned from suspend, we don't need the image any more.
1586 free_all_swap_pages(root_swap);
1592 static int __init swsusp_header_init(void)
1594 swsusp_header = (struct swsusp_header*) __get_free_page(GFP_KERNEL);
1596 panic("Could not allocate memory for swsusp_header\n");
1600 core_initcall(swsusp_header_init);