PM / Hibernate: fix the number of pages used for hibernate/thaw buffering
[platform/adaptation/renesas_rcar/renesas_kernel.git] / kernel / power / swap.c
1 /*
2  * linux/kernel/power/swap.c
3  *
4  * This file provides functions for reading the suspend image from
5  * and writing it to a swap partition.
6  *
7  * Copyright (C) 1998,2001-2005 Pavel Machek <pavel@ucw.cz>
8  * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl>
9  * Copyright (C) 2010 Bojan Smojver <bojan@rexursive.com>
10  *
11  * This file is released under the GPLv2.
12  *
13  */
14
15 #include <linux/module.h>
16 #include <linux/file.h>
17 #include <linux/delay.h>
18 #include <linux/bitops.h>
19 #include <linux/genhd.h>
20 #include <linux/device.h>
21 #include <linux/bio.h>
22 #include <linux/blkdev.h>
23 #include <linux/swap.h>
24 #include <linux/swapops.h>
25 #include <linux/pm.h>
26 #include <linux/slab.h>
27 #include <linux/lzo.h>
28 #include <linux/vmalloc.h>
29 #include <linux/cpumask.h>
30 #include <linux/atomic.h>
31 #include <linux/kthread.h>
32 #include <linux/crc32.h>
33
34 #include "power.h"
35
36 #define HIBERNATE_SIG   "S1SUSPEND"
37
38 /*
39  *      The swap map is a data structure used for keeping track of each page
40  *      written to a swap partition.  It consists of many swap_map_page
41  *      structures that contain each an array of MAP_PAGE_ENTRIES swap entries.
42  *      These structures are stored on the swap and linked together with the
43  *      help of the .next_swap member.
44  *
45  *      The swap map is created during suspend.  The swap map pages are
46  *      allocated and populated one at a time, so we only need one memory
47  *      page to set up the entire structure.
48  *
49  *      During resume we pick up all swap_map_page structures into a list.
50  */
51
52 #define MAP_PAGE_ENTRIES        (PAGE_SIZE / sizeof(sector_t) - 1)
53
54 /*
55  * Number of free pages that are not high.
56  */
57 static inline unsigned long low_free_pages(void)
58 {
59         return nr_free_pages() - nr_free_highpages();
60 }
61
62 /*
63  * Number of pages required to be kept free while writing the image. Always
64  * half of all available low pages before the writing starts.
65  */
66 static inline unsigned long reqd_free_pages(void)
67 {
68         return low_free_pages() / 2;
69 }
70
71 struct swap_map_page {
72         sector_t entries[MAP_PAGE_ENTRIES];
73         sector_t next_swap;
74 };
75
76 struct swap_map_page_list {
77         struct swap_map_page *map;
78         struct swap_map_page_list *next;
79 };
80
81 /**
82  *      The swap_map_handle structure is used for handling swap in
83  *      a file-alike way
84  */
85
86 struct swap_map_handle {
87         struct swap_map_page *cur;
88         struct swap_map_page_list *maps;
89         sector_t cur_swap;
90         sector_t first_sector;
91         unsigned int k;
92         unsigned long reqd_free_pages;
93         u32 crc32;
94 };
95
96 struct swsusp_header {
97         char reserved[PAGE_SIZE - 20 - sizeof(sector_t) - sizeof(int) -
98                       sizeof(u32)];
99         u32     crc32;
100         sector_t image;
101         unsigned int flags;     /* Flags to pass to the "boot" kernel */
102         char    orig_sig[10];
103         char    sig[10];
104 } __attribute__((packed));
105
106 static struct swsusp_header *swsusp_header;
107
108 /**
109  *      The following functions are used for tracing the allocated
110  *      swap pages, so that they can be freed in case of an error.
111  */
112
113 struct swsusp_extent {
114         struct rb_node node;
115         unsigned long start;
116         unsigned long end;
117 };
118
119 static struct rb_root swsusp_extents = RB_ROOT;
120
121 static int swsusp_extents_insert(unsigned long swap_offset)
122 {
123         struct rb_node **new = &(swsusp_extents.rb_node);
124         struct rb_node *parent = NULL;
125         struct swsusp_extent *ext;
126
127         /* Figure out where to put the new node */
128         while (*new) {
129                 ext = container_of(*new, struct swsusp_extent, node);
130                 parent = *new;
131                 if (swap_offset < ext->start) {
132                         /* Try to merge */
133                         if (swap_offset == ext->start - 1) {
134                                 ext->start--;
135                                 return 0;
136                         }
137                         new = &((*new)->rb_left);
138                 } else if (swap_offset > ext->end) {
139                         /* Try to merge */
140                         if (swap_offset == ext->end + 1) {
141                                 ext->end++;
142                                 return 0;
143                         }
144                         new = &((*new)->rb_right);
145                 } else {
146                         /* It already is in the tree */
147                         return -EINVAL;
148                 }
149         }
150         /* Add the new node and rebalance the tree. */
151         ext = kzalloc(sizeof(struct swsusp_extent), GFP_KERNEL);
152         if (!ext)
153                 return -ENOMEM;
154
155         ext->start = swap_offset;
156         ext->end = swap_offset;
157         rb_link_node(&ext->node, parent, new);
158         rb_insert_color(&ext->node, &swsusp_extents);
159         return 0;
160 }
161
162 /**
163  *      alloc_swapdev_block - allocate a swap page and register that it has
164  *      been allocated, so that it can be freed in case of an error.
165  */
166
167 sector_t alloc_swapdev_block(int swap)
168 {
169         unsigned long offset;
170
171         offset = swp_offset(get_swap_page_of_type(swap));
172         if (offset) {
173                 if (swsusp_extents_insert(offset))
174                         swap_free(swp_entry(swap, offset));
175                 else
176                         return swapdev_block(swap, offset);
177         }
178         return 0;
179 }
180
181 /**
182  *      free_all_swap_pages - free swap pages allocated for saving image data.
183  *      It also frees the extents used to register which swap entries had been
184  *      allocated.
185  */
186
187 void free_all_swap_pages(int swap)
188 {
189         struct rb_node *node;
190
191         while ((node = swsusp_extents.rb_node)) {
192                 struct swsusp_extent *ext;
193                 unsigned long offset;
194
195                 ext = container_of(node, struct swsusp_extent, node);
196                 rb_erase(node, &swsusp_extents);
197                 for (offset = ext->start; offset <= ext->end; offset++)
198                         swap_free(swp_entry(swap, offset));
199
200                 kfree(ext);
201         }
202 }
203
204 int swsusp_swap_in_use(void)
205 {
206         return (swsusp_extents.rb_node != NULL);
207 }
208
209 /*
210  * General things
211  */
212
213 static unsigned short root_swap = 0xffff;
214 struct block_device *hib_resume_bdev;
215
216 /*
217  * Saving part
218  */
219
220 static int mark_swapfiles(struct swap_map_handle *handle, unsigned int flags)
221 {
222         int error;
223
224         hib_bio_read_page(swsusp_resume_block, swsusp_header, NULL);
225         if (!memcmp("SWAP-SPACE",swsusp_header->sig, 10) ||
226             !memcmp("SWAPSPACE2",swsusp_header->sig, 10)) {
227                 memcpy(swsusp_header->orig_sig,swsusp_header->sig, 10);
228                 memcpy(swsusp_header->sig, HIBERNATE_SIG, 10);
229                 swsusp_header->image = handle->first_sector;
230                 swsusp_header->flags = flags;
231                 if (flags & SF_CRC32_MODE)
232                         swsusp_header->crc32 = handle->crc32;
233                 error = hib_bio_write_page(swsusp_resume_block,
234                                         swsusp_header, NULL);
235         } else {
236                 printk(KERN_ERR "PM: Swap header not found!\n");
237                 error = -ENODEV;
238         }
239         return error;
240 }
241
242 /**
243  *      swsusp_swap_check - check if the resume device is a swap device
244  *      and get its index (if so)
245  *
246  *      This is called before saving image
247  */
248 static int swsusp_swap_check(void)
249 {
250         int res;
251
252         res = swap_type_of(swsusp_resume_device, swsusp_resume_block,
253                         &hib_resume_bdev);
254         if (res < 0)
255                 return res;
256
257         root_swap = res;
258         res = blkdev_get(hib_resume_bdev, FMODE_WRITE, NULL);
259         if (res)
260                 return res;
261
262         res = set_blocksize(hib_resume_bdev, PAGE_SIZE);
263         if (res < 0)
264                 blkdev_put(hib_resume_bdev, FMODE_WRITE);
265
266         return res;
267 }
268
269 /**
270  *      write_page - Write one page to given swap location.
271  *      @buf:           Address we're writing.
272  *      @offset:        Offset of the swap page we're writing to.
273  *      @bio_chain:     Link the next write BIO here
274  */
275
276 static int write_page(void *buf, sector_t offset, struct bio **bio_chain)
277 {
278         void *src;
279         int ret;
280
281         if (!offset)
282                 return -ENOSPC;
283
284         if (bio_chain) {
285                 src = (void *)__get_free_page(__GFP_WAIT | __GFP_HIGH);
286                 if (src) {
287                         copy_page(src, buf);
288                 } else {
289                         ret = hib_wait_on_bio_chain(bio_chain); /* Free pages */
290                         if (ret)
291                                 return ret;
292                         src = (void *)__get_free_page(__GFP_WAIT | __GFP_HIGH);
293                         if (src) {
294                                 copy_page(src, buf);
295                         } else {
296                                 WARN_ON_ONCE(1);
297                                 bio_chain = NULL;       /* Go synchronous */
298                                 src = buf;
299                         }
300                 }
301         } else {
302                 src = buf;
303         }
304         return hib_bio_write_page(offset, src, bio_chain);
305 }
306
307 static void release_swap_writer(struct swap_map_handle *handle)
308 {
309         if (handle->cur)
310                 free_page((unsigned long)handle->cur);
311         handle->cur = NULL;
312 }
313
314 static int get_swap_writer(struct swap_map_handle *handle)
315 {
316         int ret;
317
318         ret = swsusp_swap_check();
319         if (ret) {
320                 if (ret != -ENOSPC)
321                         printk(KERN_ERR "PM: Cannot find swap device, try "
322                                         "swapon -a.\n");
323                 return ret;
324         }
325         handle->cur = (struct swap_map_page *)get_zeroed_page(GFP_KERNEL);
326         if (!handle->cur) {
327                 ret = -ENOMEM;
328                 goto err_close;
329         }
330         handle->cur_swap = alloc_swapdev_block(root_swap);
331         if (!handle->cur_swap) {
332                 ret = -ENOSPC;
333                 goto err_rel;
334         }
335         handle->k = 0;
336         handle->reqd_free_pages = reqd_free_pages();
337         handle->first_sector = handle->cur_swap;
338         return 0;
339 err_rel:
340         release_swap_writer(handle);
341 err_close:
342         swsusp_close(FMODE_WRITE);
343         return ret;
344 }
345
346 static int swap_write_page(struct swap_map_handle *handle, void *buf,
347                                 struct bio **bio_chain)
348 {
349         int error = 0;
350         sector_t offset;
351
352         if (!handle->cur)
353                 return -EINVAL;
354         offset = alloc_swapdev_block(root_swap);
355         error = write_page(buf, offset, bio_chain);
356         if (error)
357                 return error;
358         handle->cur->entries[handle->k++] = offset;
359         if (handle->k >= MAP_PAGE_ENTRIES) {
360                 offset = alloc_swapdev_block(root_swap);
361                 if (!offset)
362                         return -ENOSPC;
363                 handle->cur->next_swap = offset;
364                 error = write_page(handle->cur, handle->cur_swap, bio_chain);
365                 if (error)
366                         goto out;
367                 clear_page(handle->cur);
368                 handle->cur_swap = offset;
369                 handle->k = 0;
370         }
371         if (bio_chain && low_free_pages() <= handle->reqd_free_pages) {
372                 error = hib_wait_on_bio_chain(bio_chain);
373                 if (error)
374                         goto out;
375                 handle->reqd_free_pages = reqd_free_pages();
376         }
377  out:
378         return error;
379 }
380
381 static int flush_swap_writer(struct swap_map_handle *handle)
382 {
383         if (handle->cur && handle->cur_swap)
384                 return write_page(handle->cur, handle->cur_swap, NULL);
385         else
386                 return -EINVAL;
387 }
388
389 static int swap_writer_finish(struct swap_map_handle *handle,
390                 unsigned int flags, int error)
391 {
392         if (!error) {
393                 flush_swap_writer(handle);
394                 printk(KERN_INFO "PM: S");
395                 error = mark_swapfiles(handle, flags);
396                 printk("|\n");
397         }
398
399         if (error)
400                 free_all_swap_pages(root_swap);
401         release_swap_writer(handle);
402         swsusp_close(FMODE_WRITE);
403
404         return error;
405 }
406
407 /* We need to remember how much compressed data we need to read. */
408 #define LZO_HEADER      sizeof(size_t)
409
410 /* Number of pages/bytes we'll compress at one time. */
411 #define LZO_UNC_PAGES   32
412 #define LZO_UNC_SIZE    (LZO_UNC_PAGES * PAGE_SIZE)
413
414 /* Number of pages/bytes we need for compressed data (worst case). */
415 #define LZO_CMP_PAGES   DIV_ROUND_UP(lzo1x_worst_compress(LZO_UNC_SIZE) + \
416                                      LZO_HEADER, PAGE_SIZE)
417 #define LZO_CMP_SIZE    (LZO_CMP_PAGES * PAGE_SIZE)
418
419 /* Maximum number of threads for compression/decompression. */
420 #define LZO_THREADS     3
421
422 /* Maximum number of pages for read buffering. */
423 #define LZO_READ_PAGES  (MAP_PAGE_ENTRIES * 8)
424
425
426 /**
427  *      save_image - save the suspend image data
428  */
429
430 static int save_image(struct swap_map_handle *handle,
431                       struct snapshot_handle *snapshot,
432                       unsigned int nr_to_write)
433 {
434         unsigned int m;
435         int ret;
436         int nr_pages;
437         int err2;
438         struct bio *bio;
439         struct timeval start;
440         struct timeval stop;
441
442         printk(KERN_INFO "PM: Saving image data pages (%u pages) ...     ",
443                 nr_to_write);
444         m = nr_to_write / 100;
445         if (!m)
446                 m = 1;
447         nr_pages = 0;
448         bio = NULL;
449         do_gettimeofday(&start);
450         while (1) {
451                 ret = snapshot_read_next(snapshot);
452                 if (ret <= 0)
453                         break;
454                 ret = swap_write_page(handle, data_of(*snapshot), &bio);
455                 if (ret)
456                         break;
457                 if (!(nr_pages % m))
458                         printk(KERN_CONT "\b\b\b\b%3d%%", nr_pages / m);
459                 nr_pages++;
460         }
461         err2 = hib_wait_on_bio_chain(&bio);
462         do_gettimeofday(&stop);
463         if (!ret)
464                 ret = err2;
465         if (!ret)
466                 printk(KERN_CONT "\b\b\b\bdone\n");
467         else
468                 printk(KERN_CONT "\n");
469         swsusp_show_speed(&start, &stop, nr_to_write, "Wrote");
470         return ret;
471 }
472
473 /**
474  * Structure used for CRC32.
475  */
476 struct crc_data {
477         struct task_struct *thr;                  /* thread */
478         atomic_t ready;                           /* ready to start flag */
479         atomic_t stop;                            /* ready to stop flag */
480         unsigned run_threads;                     /* nr current threads */
481         wait_queue_head_t go;                     /* start crc update */
482         wait_queue_head_t done;                   /* crc update done */
483         u32 *crc32;                               /* points to handle's crc32 */
484         size_t *unc_len[LZO_THREADS];             /* uncompressed lengths */
485         unsigned char *unc[LZO_THREADS];          /* uncompressed data */
486 };
487
488 /**
489  * CRC32 update function that runs in its own thread.
490  */
491 static int crc32_threadfn(void *data)
492 {
493         struct crc_data *d = data;
494         unsigned i;
495
496         while (1) {
497                 wait_event(d->go, atomic_read(&d->ready) ||
498                                   kthread_should_stop());
499                 if (kthread_should_stop()) {
500                         d->thr = NULL;
501                         atomic_set(&d->stop, 1);
502                         wake_up(&d->done);
503                         break;
504                 }
505                 atomic_set(&d->ready, 0);
506
507                 for (i = 0; i < d->run_threads; i++)
508                         *d->crc32 = crc32_le(*d->crc32,
509                                              d->unc[i], *d->unc_len[i]);
510                 atomic_set(&d->stop, 1);
511                 wake_up(&d->done);
512         }
513         return 0;
514 }
515 /**
516  * Structure used for LZO data compression.
517  */
518 struct cmp_data {
519         struct task_struct *thr;                  /* thread */
520         atomic_t ready;                           /* ready to start flag */
521         atomic_t stop;                            /* ready to stop flag */
522         int ret;                                  /* return code */
523         wait_queue_head_t go;                     /* start compression */
524         wait_queue_head_t done;                   /* compression done */
525         size_t unc_len;                           /* uncompressed length */
526         size_t cmp_len;                           /* compressed length */
527         unsigned char unc[LZO_UNC_SIZE];          /* uncompressed buffer */
528         unsigned char cmp[LZO_CMP_SIZE];          /* compressed buffer */
529         unsigned char wrk[LZO1X_1_MEM_COMPRESS];  /* compression workspace */
530 };
531
532 /**
533  * Compression function that runs in its own thread.
534  */
535 static int lzo_compress_threadfn(void *data)
536 {
537         struct cmp_data *d = data;
538
539         while (1) {
540                 wait_event(d->go, atomic_read(&d->ready) ||
541                                   kthread_should_stop());
542                 if (kthread_should_stop()) {
543                         d->thr = NULL;
544                         d->ret = -1;
545                         atomic_set(&d->stop, 1);
546                         wake_up(&d->done);
547                         break;
548                 }
549                 atomic_set(&d->ready, 0);
550
551                 d->ret = lzo1x_1_compress(d->unc, d->unc_len,
552                                           d->cmp + LZO_HEADER, &d->cmp_len,
553                                           d->wrk);
554                 atomic_set(&d->stop, 1);
555                 wake_up(&d->done);
556         }
557         return 0;
558 }
559
560 /**
561  * save_image_lzo - Save the suspend image data compressed with LZO.
562  * @handle: Swap mam handle to use for saving the image.
563  * @snapshot: Image to read data from.
564  * @nr_to_write: Number of pages to save.
565  */
566 static int save_image_lzo(struct swap_map_handle *handle,
567                           struct snapshot_handle *snapshot,
568                           unsigned int nr_to_write)
569 {
570         unsigned int m;
571         int ret = 0;
572         int nr_pages;
573         int err2;
574         struct bio *bio;
575         struct timeval start;
576         struct timeval stop;
577         size_t off;
578         unsigned thr, run_threads, nr_threads;
579         unsigned char *page = NULL;
580         struct cmp_data *data = NULL;
581         struct crc_data *crc = NULL;
582
583         /*
584          * We'll limit the number of threads for compression to limit memory
585          * footprint.
586          */
587         nr_threads = num_online_cpus() - 1;
588         nr_threads = clamp_val(nr_threads, 1, LZO_THREADS);
589
590         page = (void *)__get_free_page(__GFP_WAIT | __GFP_HIGH);
591         if (!page) {
592                 printk(KERN_ERR "PM: Failed to allocate LZO page\n");
593                 ret = -ENOMEM;
594                 goto out_clean;
595         }
596
597         data = vmalloc(sizeof(*data) * nr_threads);
598         if (!data) {
599                 printk(KERN_ERR "PM: Failed to allocate LZO data\n");
600                 ret = -ENOMEM;
601                 goto out_clean;
602         }
603         for (thr = 0; thr < nr_threads; thr++)
604                 memset(&data[thr], 0, offsetof(struct cmp_data, go));
605
606         crc = kmalloc(sizeof(*crc), GFP_KERNEL);
607         if (!crc) {
608                 printk(KERN_ERR "PM: Failed to allocate crc\n");
609                 ret = -ENOMEM;
610                 goto out_clean;
611         }
612         memset(crc, 0, offsetof(struct crc_data, go));
613
614         /*
615          * Start the compression threads.
616          */
617         for (thr = 0; thr < nr_threads; thr++) {
618                 init_waitqueue_head(&data[thr].go);
619                 init_waitqueue_head(&data[thr].done);
620
621                 data[thr].thr = kthread_run(lzo_compress_threadfn,
622                                             &data[thr],
623                                             "image_compress/%u", thr);
624                 if (IS_ERR(data[thr].thr)) {
625                         data[thr].thr = NULL;
626                         printk(KERN_ERR
627                                "PM: Cannot start compression threads\n");
628                         ret = -ENOMEM;
629                         goto out_clean;
630                 }
631         }
632
633         /*
634          * Adjust number of free pages after all allocations have been done.
635          * We don't want to run out of pages when writing.
636          */
637         handle->reqd_free_pages = reqd_free_pages();
638
639         /*
640          * Start the CRC32 thread.
641          */
642         init_waitqueue_head(&crc->go);
643         init_waitqueue_head(&crc->done);
644
645         handle->crc32 = 0;
646         crc->crc32 = &handle->crc32;
647         for (thr = 0; thr < nr_threads; thr++) {
648                 crc->unc[thr] = data[thr].unc;
649                 crc->unc_len[thr] = &data[thr].unc_len;
650         }
651
652         crc->thr = kthread_run(crc32_threadfn, crc, "image_crc32");
653         if (IS_ERR(crc->thr)) {
654                 crc->thr = NULL;
655                 printk(KERN_ERR "PM: Cannot start CRC32 thread\n");
656                 ret = -ENOMEM;
657                 goto out_clean;
658         }
659
660         printk(KERN_INFO
661                 "PM: Using %u thread(s) for compression.\n"
662                 "PM: Compressing and saving image data (%u pages) ...     ",
663                 nr_threads, nr_to_write);
664         m = nr_to_write / 100;
665         if (!m)
666                 m = 1;
667         nr_pages = 0;
668         bio = NULL;
669         do_gettimeofday(&start);
670         for (;;) {
671                 for (thr = 0; thr < nr_threads; thr++) {
672                         for (off = 0; off < LZO_UNC_SIZE; off += PAGE_SIZE) {
673                                 ret = snapshot_read_next(snapshot);
674                                 if (ret < 0)
675                                         goto out_finish;
676
677                                 if (!ret)
678                                         break;
679
680                                 memcpy(data[thr].unc + off,
681                                        data_of(*snapshot), PAGE_SIZE);
682
683                                 if (!(nr_pages % m))
684                                         printk(KERN_CONT "\b\b\b\b%3d%%",
685                                                nr_pages / m);
686                                 nr_pages++;
687                         }
688                         if (!off)
689                                 break;
690
691                         data[thr].unc_len = off;
692
693                         atomic_set(&data[thr].ready, 1);
694                         wake_up(&data[thr].go);
695                 }
696
697                 if (!thr)
698                         break;
699
700                 crc->run_threads = thr;
701                 atomic_set(&crc->ready, 1);
702                 wake_up(&crc->go);
703
704                 for (run_threads = thr, thr = 0; thr < run_threads; thr++) {
705                         wait_event(data[thr].done,
706                                    atomic_read(&data[thr].stop));
707                         atomic_set(&data[thr].stop, 0);
708
709                         ret = data[thr].ret;
710
711                         if (ret < 0) {
712                                 printk(KERN_ERR "PM: LZO compression failed\n");
713                                 goto out_finish;
714                         }
715
716                         if (unlikely(!data[thr].cmp_len ||
717                                      data[thr].cmp_len >
718                                      lzo1x_worst_compress(data[thr].unc_len))) {
719                                 printk(KERN_ERR
720                                        "PM: Invalid LZO compressed length\n");
721                                 ret = -1;
722                                 goto out_finish;
723                         }
724
725                         *(size_t *)data[thr].cmp = data[thr].cmp_len;
726
727                         /*
728                          * Given we are writing one page at a time to disk, we
729                          * copy that much from the buffer, although the last
730                          * bit will likely be smaller than full page. This is
731                          * OK - we saved the length of the compressed data, so
732                          * any garbage at the end will be discarded when we
733                          * read it.
734                          */
735                         for (off = 0;
736                              off < LZO_HEADER + data[thr].cmp_len;
737                              off += PAGE_SIZE) {
738                                 memcpy(page, data[thr].cmp + off, PAGE_SIZE);
739
740                                 ret = swap_write_page(handle, page, &bio);
741                                 if (ret)
742                                         goto out_finish;
743                         }
744                 }
745
746                 wait_event(crc->done, atomic_read(&crc->stop));
747                 atomic_set(&crc->stop, 0);
748         }
749
750 out_finish:
751         err2 = hib_wait_on_bio_chain(&bio);
752         do_gettimeofday(&stop);
753         if (!ret)
754                 ret = err2;
755         if (!ret) {
756                 printk(KERN_CONT "\b\b\b\bdone\n");
757         } else {
758                 printk(KERN_CONT "\n");
759         }
760         swsusp_show_speed(&start, &stop, nr_to_write, "Wrote");
761 out_clean:
762         if (crc) {
763                 if (crc->thr)
764                         kthread_stop(crc->thr);
765                 kfree(crc);
766         }
767         if (data) {
768                 for (thr = 0; thr < nr_threads; thr++)
769                         if (data[thr].thr)
770                                 kthread_stop(data[thr].thr);
771                 vfree(data);
772         }
773         if (page) free_page((unsigned long)page);
774
775         return ret;
776 }
777
778 /**
779  *      enough_swap - Make sure we have enough swap to save the image.
780  *
781  *      Returns TRUE or FALSE after checking the total amount of swap
782  *      space avaiable from the resume partition.
783  */
784
785 static int enough_swap(unsigned int nr_pages, unsigned int flags)
786 {
787         unsigned int free_swap = count_swap_pages(root_swap, 1);
788         unsigned int required;
789
790         pr_debug("PM: Free swap pages: %u\n", free_swap);
791
792         required = PAGES_FOR_IO + nr_pages;
793         return free_swap > required;
794 }
795
796 /**
797  *      swsusp_write - Write entire image and metadata.
798  *      @flags: flags to pass to the "boot" kernel in the image header
799  *
800  *      It is important _NOT_ to umount filesystems at this point. We want
801  *      them synced (in case something goes wrong) but we DO not want to mark
802  *      filesystem clean: it is not. (And it does not matter, if we resume
803  *      correctly, we'll mark system clean, anyway.)
804  */
805
806 int swsusp_write(unsigned int flags)
807 {
808         struct swap_map_handle handle;
809         struct snapshot_handle snapshot;
810         struct swsusp_info *header;
811         unsigned long pages;
812         int error;
813
814         pages = snapshot_get_image_size();
815         error = get_swap_writer(&handle);
816         if (error) {
817                 printk(KERN_ERR "PM: Cannot get swap writer\n");
818                 return error;
819         }
820         if (flags & SF_NOCOMPRESS_MODE) {
821                 if (!enough_swap(pages, flags)) {
822                         printk(KERN_ERR "PM: Not enough free swap\n");
823                         error = -ENOSPC;
824                         goto out_finish;
825                 }
826         }
827         memset(&snapshot, 0, sizeof(struct snapshot_handle));
828         error = snapshot_read_next(&snapshot);
829         if (error < PAGE_SIZE) {
830                 if (error >= 0)
831                         error = -EFAULT;
832
833                 goto out_finish;
834         }
835         header = (struct swsusp_info *)data_of(snapshot);
836         error = swap_write_page(&handle, header, NULL);
837         if (!error) {
838                 error = (flags & SF_NOCOMPRESS_MODE) ?
839                         save_image(&handle, &snapshot, pages - 1) :
840                         save_image_lzo(&handle, &snapshot, pages - 1);
841         }
842 out_finish:
843         error = swap_writer_finish(&handle, flags, error);
844         return error;
845 }
846
847 /**
848  *      The following functions allow us to read data using a swap map
849  *      in a file-alike way
850  */
851
852 static void release_swap_reader(struct swap_map_handle *handle)
853 {
854         struct swap_map_page_list *tmp;
855
856         while (handle->maps) {
857                 if (handle->maps->map)
858                         free_page((unsigned long)handle->maps->map);
859                 tmp = handle->maps;
860                 handle->maps = handle->maps->next;
861                 kfree(tmp);
862         }
863         handle->cur = NULL;
864 }
865
866 static int get_swap_reader(struct swap_map_handle *handle,
867                 unsigned int *flags_p)
868 {
869         int error;
870         struct swap_map_page_list *tmp, *last;
871         sector_t offset;
872
873         *flags_p = swsusp_header->flags;
874
875         if (!swsusp_header->image) /* how can this happen? */
876                 return -EINVAL;
877
878         handle->cur = NULL;
879         last = handle->maps = NULL;
880         offset = swsusp_header->image;
881         while (offset) {
882                 tmp = kmalloc(sizeof(*handle->maps), GFP_KERNEL);
883                 if (!tmp) {
884                         release_swap_reader(handle);
885                         return -ENOMEM;
886                 }
887                 memset(tmp, 0, sizeof(*tmp));
888                 if (!handle->maps)
889                         handle->maps = tmp;
890                 if (last)
891                         last->next = tmp;
892                 last = tmp;
893
894                 tmp->map = (struct swap_map_page *)
895                            __get_free_page(__GFP_WAIT | __GFP_HIGH);
896                 if (!tmp->map) {
897                         release_swap_reader(handle);
898                         return -ENOMEM;
899                 }
900
901                 error = hib_bio_read_page(offset, tmp->map, NULL);
902                 if (error) {
903                         release_swap_reader(handle);
904                         return error;
905                 }
906                 offset = tmp->map->next_swap;
907         }
908         handle->k = 0;
909         handle->cur = handle->maps->map;
910         return 0;
911 }
912
913 static int swap_read_page(struct swap_map_handle *handle, void *buf,
914                                 struct bio **bio_chain)
915 {
916         sector_t offset;
917         int error;
918         struct swap_map_page_list *tmp;
919
920         if (!handle->cur)
921                 return -EINVAL;
922         offset = handle->cur->entries[handle->k];
923         if (!offset)
924                 return -EFAULT;
925         error = hib_bio_read_page(offset, buf, bio_chain);
926         if (error)
927                 return error;
928         if (++handle->k >= MAP_PAGE_ENTRIES) {
929                 handle->k = 0;
930                 free_page((unsigned long)handle->maps->map);
931                 tmp = handle->maps;
932                 handle->maps = handle->maps->next;
933                 kfree(tmp);
934                 if (!handle->maps)
935                         release_swap_reader(handle);
936                 else
937                         handle->cur = handle->maps->map;
938         }
939         return error;
940 }
941
942 static int swap_reader_finish(struct swap_map_handle *handle)
943 {
944         release_swap_reader(handle);
945
946         return 0;
947 }
948
949 /**
950  *      load_image - load the image using the swap map handle
951  *      @handle and the snapshot handle @snapshot
952  *      (assume there are @nr_pages pages to load)
953  */
954
955 static int load_image(struct swap_map_handle *handle,
956                       struct snapshot_handle *snapshot,
957                       unsigned int nr_to_read)
958 {
959         unsigned int m;
960         int ret = 0;
961         struct timeval start;
962         struct timeval stop;
963         struct bio *bio;
964         int err2;
965         unsigned nr_pages;
966
967         printk(KERN_INFO "PM: Loading image data pages (%u pages) ...     ",
968                 nr_to_read);
969         m = nr_to_read / 100;
970         if (!m)
971                 m = 1;
972         nr_pages = 0;
973         bio = NULL;
974         do_gettimeofday(&start);
975         for ( ; ; ) {
976                 ret = snapshot_write_next(snapshot);
977                 if (ret <= 0)
978                         break;
979                 ret = swap_read_page(handle, data_of(*snapshot), &bio);
980                 if (ret)
981                         break;
982                 if (snapshot->sync_read)
983                         ret = hib_wait_on_bio_chain(&bio);
984                 if (ret)
985                         break;
986                 if (!(nr_pages % m))
987                         printk("\b\b\b\b%3d%%", nr_pages / m);
988                 nr_pages++;
989         }
990         err2 = hib_wait_on_bio_chain(&bio);
991         do_gettimeofday(&stop);
992         if (!ret)
993                 ret = err2;
994         if (!ret) {
995                 printk("\b\b\b\bdone\n");
996                 snapshot_write_finalize(snapshot);
997                 if (!snapshot_image_loaded(snapshot))
998                         ret = -ENODATA;
999         } else
1000                 printk("\n");
1001         swsusp_show_speed(&start, &stop, nr_to_read, "Read");
1002         return ret;
1003 }
1004
1005 /**
1006  * Structure used for LZO data decompression.
1007  */
1008 struct dec_data {
1009         struct task_struct *thr;                  /* thread */
1010         atomic_t ready;                           /* ready to start flag */
1011         atomic_t stop;                            /* ready to stop flag */
1012         int ret;                                  /* return code */
1013         wait_queue_head_t go;                     /* start decompression */
1014         wait_queue_head_t done;                   /* decompression done */
1015         size_t unc_len;                           /* uncompressed length */
1016         size_t cmp_len;                           /* compressed length */
1017         unsigned char unc[LZO_UNC_SIZE];          /* uncompressed buffer */
1018         unsigned char cmp[LZO_CMP_SIZE];          /* compressed buffer */
1019 };
1020
1021 /**
1022  * Deompression function that runs in its own thread.
1023  */
1024 static int lzo_decompress_threadfn(void *data)
1025 {
1026         struct dec_data *d = data;
1027
1028         while (1) {
1029                 wait_event(d->go, atomic_read(&d->ready) ||
1030                                   kthread_should_stop());
1031                 if (kthread_should_stop()) {
1032                         d->thr = NULL;
1033                         d->ret = -1;
1034                         atomic_set(&d->stop, 1);
1035                         wake_up(&d->done);
1036                         break;
1037                 }
1038                 atomic_set(&d->ready, 0);
1039
1040                 d->unc_len = LZO_UNC_SIZE;
1041                 d->ret = lzo1x_decompress_safe(d->cmp + LZO_HEADER, d->cmp_len,
1042                                                d->unc, &d->unc_len);
1043                 atomic_set(&d->stop, 1);
1044                 wake_up(&d->done);
1045         }
1046         return 0;
1047 }
1048
1049 /**
1050  * load_image_lzo - Load compressed image data and decompress them with LZO.
1051  * @handle: Swap map handle to use for loading data.
1052  * @snapshot: Image to copy uncompressed data into.
1053  * @nr_to_read: Number of pages to load.
1054  */
1055 static int load_image_lzo(struct swap_map_handle *handle,
1056                           struct snapshot_handle *snapshot,
1057                           unsigned int nr_to_read)
1058 {
1059         unsigned int m;
1060         int ret = 0;
1061         int eof = 0;
1062         struct bio *bio;
1063         struct timeval start;
1064         struct timeval stop;
1065         unsigned nr_pages;
1066         size_t off;
1067         unsigned i, thr, run_threads, nr_threads;
1068         unsigned ring = 0, pg = 0, ring_size = 0,
1069                  have = 0, want, need, asked = 0;
1070         unsigned long read_pages;
1071         unsigned char **page = NULL;
1072         struct dec_data *data = NULL;
1073         struct crc_data *crc = NULL;
1074
1075         /*
1076          * We'll limit the number of threads for decompression to limit memory
1077          * footprint.
1078          */
1079         nr_threads = num_online_cpus() - 1;
1080         nr_threads = clamp_val(nr_threads, 1, LZO_THREADS);
1081
1082         page = vmalloc(sizeof(*page) * LZO_READ_PAGES);
1083         if (!page) {
1084                 printk(KERN_ERR "PM: Failed to allocate LZO page\n");
1085                 ret = -ENOMEM;
1086                 goto out_clean;
1087         }
1088
1089         data = vmalloc(sizeof(*data) * nr_threads);
1090         if (!data) {
1091                 printk(KERN_ERR "PM: Failed to allocate LZO data\n");
1092                 ret = -ENOMEM;
1093                 goto out_clean;
1094         }
1095         for (thr = 0; thr < nr_threads; thr++)
1096                 memset(&data[thr], 0, offsetof(struct dec_data, go));
1097
1098         crc = kmalloc(sizeof(*crc), GFP_KERNEL);
1099         if (!crc) {
1100                 printk(KERN_ERR "PM: Failed to allocate crc\n");
1101                 ret = -ENOMEM;
1102                 goto out_clean;
1103         }
1104         memset(crc, 0, offsetof(struct crc_data, go));
1105
1106         /*
1107          * Start the decompression threads.
1108          */
1109         for (thr = 0; thr < nr_threads; thr++) {
1110                 init_waitqueue_head(&data[thr].go);
1111                 init_waitqueue_head(&data[thr].done);
1112
1113                 data[thr].thr = kthread_run(lzo_decompress_threadfn,
1114                                             &data[thr],
1115                                             "image_decompress/%u", thr);
1116                 if (IS_ERR(data[thr].thr)) {
1117                         data[thr].thr = NULL;
1118                         printk(KERN_ERR
1119                                "PM: Cannot start decompression threads\n");
1120                         ret = -ENOMEM;
1121                         goto out_clean;
1122                 }
1123         }
1124
1125         /*
1126          * Start the CRC32 thread.
1127          */
1128         init_waitqueue_head(&crc->go);
1129         init_waitqueue_head(&crc->done);
1130
1131         handle->crc32 = 0;
1132         crc->crc32 = &handle->crc32;
1133         for (thr = 0; thr < nr_threads; thr++) {
1134                 crc->unc[thr] = data[thr].unc;
1135                 crc->unc_len[thr] = &data[thr].unc_len;
1136         }
1137
1138         crc->thr = kthread_run(crc32_threadfn, crc, "image_crc32");
1139         if (IS_ERR(crc->thr)) {
1140                 crc->thr = NULL;
1141                 printk(KERN_ERR "PM: Cannot start CRC32 thread\n");
1142                 ret = -ENOMEM;
1143                 goto out_clean;
1144         }
1145
1146         /*
1147          * Adjust number of pages for read buffering, in case we are short.
1148          */
1149         read_pages = (nr_free_pages() - snapshot_get_image_size()) >> 1;
1150         read_pages = clamp_val(read_pages, LZO_CMP_PAGES, LZO_READ_PAGES);
1151
1152         for (i = 0; i < read_pages; i++) {
1153                 page[i] = (void *)__get_free_page(i < LZO_CMP_PAGES ?
1154                                                   __GFP_WAIT | __GFP_HIGH :
1155                                                   __GFP_WAIT);
1156                 if (!page[i]) {
1157                         if (i < LZO_CMP_PAGES) {
1158                                 ring_size = i;
1159                                 printk(KERN_ERR
1160                                        "PM: Failed to allocate LZO pages\n");
1161                                 ret = -ENOMEM;
1162                                 goto out_clean;
1163                         } else {
1164                                 break;
1165                         }
1166                 }
1167         }
1168         want = ring_size = i;
1169
1170         printk(KERN_INFO
1171                 "PM: Using %u thread(s) for decompression.\n"
1172                 "PM: Loading and decompressing image data (%u pages) ...     ",
1173                 nr_threads, nr_to_read);
1174         m = nr_to_read / 100;
1175         if (!m)
1176                 m = 1;
1177         nr_pages = 0;
1178         bio = NULL;
1179         do_gettimeofday(&start);
1180
1181         ret = snapshot_write_next(snapshot);
1182         if (ret <= 0)
1183                 goto out_finish;
1184
1185         for(;;) {
1186                 for (i = 0; !eof && i < want; i++) {
1187                         ret = swap_read_page(handle, page[ring], &bio);
1188                         if (ret) {
1189                                 /*
1190                                  * On real read error, finish. On end of data,
1191                                  * set EOF flag and just exit the read loop.
1192                                  */
1193                                 if (handle->cur &&
1194                                     handle->cur->entries[handle->k]) {
1195                                         goto out_finish;
1196                                 } else {
1197                                         eof = 1;
1198                                         break;
1199                                 }
1200                         }
1201                         if (++ring >= ring_size)
1202                                 ring = 0;
1203                 }
1204                 asked += i;
1205                 want -= i;
1206
1207                 /*
1208                  * We are out of data, wait for some more.
1209                  */
1210                 if (!have) {
1211                         if (!asked)
1212                                 break;
1213
1214                         ret = hib_wait_on_bio_chain(&bio);
1215                         if (ret)
1216                                 goto out_finish;
1217                         have += asked;
1218                         asked = 0;
1219                         if (eof)
1220                                 eof = 2;
1221                 }
1222
1223                 if (crc->run_threads) {
1224                         wait_event(crc->done, atomic_read(&crc->stop));
1225                         atomic_set(&crc->stop, 0);
1226                         crc->run_threads = 0;
1227                 }
1228
1229                 for (thr = 0; have && thr < nr_threads; thr++) {
1230                         data[thr].cmp_len = *(size_t *)page[pg];
1231                         if (unlikely(!data[thr].cmp_len ||
1232                                      data[thr].cmp_len >
1233                                      lzo1x_worst_compress(LZO_UNC_SIZE))) {
1234                                 printk(KERN_ERR
1235                                        "PM: Invalid LZO compressed length\n");
1236                                 ret = -1;
1237                                 goto out_finish;
1238                         }
1239
1240                         need = DIV_ROUND_UP(data[thr].cmp_len + LZO_HEADER,
1241                                             PAGE_SIZE);
1242                         if (need > have) {
1243                                 if (eof > 1) {
1244                                         ret = -1;
1245                                         goto out_finish;
1246                                 }
1247                                 break;
1248                         }
1249
1250                         for (off = 0;
1251                              off < LZO_HEADER + data[thr].cmp_len;
1252                              off += PAGE_SIZE) {
1253                                 memcpy(data[thr].cmp + off,
1254                                        page[pg], PAGE_SIZE);
1255                                 have--;
1256                                 want++;
1257                                 if (++pg >= ring_size)
1258                                         pg = 0;
1259                         }
1260
1261                         atomic_set(&data[thr].ready, 1);
1262                         wake_up(&data[thr].go);
1263                 }
1264
1265                 /*
1266                  * Wait for more data while we are decompressing.
1267                  */
1268                 if (have < LZO_CMP_PAGES && asked) {
1269                         ret = hib_wait_on_bio_chain(&bio);
1270                         if (ret)
1271                                 goto out_finish;
1272                         have += asked;
1273                         asked = 0;
1274                         if (eof)
1275                                 eof = 2;
1276                 }
1277
1278                 for (run_threads = thr, thr = 0; thr < run_threads; thr++) {
1279                         wait_event(data[thr].done,
1280                                    atomic_read(&data[thr].stop));
1281                         atomic_set(&data[thr].stop, 0);
1282
1283                         ret = data[thr].ret;
1284
1285                         if (ret < 0) {
1286                                 printk(KERN_ERR
1287                                        "PM: LZO decompression failed\n");
1288                                 goto out_finish;
1289                         }
1290
1291                         if (unlikely(!data[thr].unc_len ||
1292                                      data[thr].unc_len > LZO_UNC_SIZE ||
1293                                      data[thr].unc_len & (PAGE_SIZE - 1))) {
1294                                 printk(KERN_ERR
1295                                        "PM: Invalid LZO uncompressed length\n");
1296                                 ret = -1;
1297                                 goto out_finish;
1298                         }
1299
1300                         for (off = 0;
1301                              off < data[thr].unc_len; off += PAGE_SIZE) {
1302                                 memcpy(data_of(*snapshot),
1303                                        data[thr].unc + off, PAGE_SIZE);
1304
1305                                 if (!(nr_pages % m))
1306                                         printk("\b\b\b\b%3d%%", nr_pages / m);
1307                                 nr_pages++;
1308
1309                                 ret = snapshot_write_next(snapshot);
1310                                 if (ret <= 0) {
1311                                         crc->run_threads = thr + 1;
1312                                         atomic_set(&crc->ready, 1);
1313                                         wake_up(&crc->go);
1314                                         goto out_finish;
1315                                 }
1316                         }
1317                 }
1318
1319                 crc->run_threads = thr;
1320                 atomic_set(&crc->ready, 1);
1321                 wake_up(&crc->go);
1322         }
1323
1324 out_finish:
1325         if (crc->run_threads) {
1326                 wait_event(crc->done, atomic_read(&crc->stop));
1327                 atomic_set(&crc->stop, 0);
1328         }
1329         do_gettimeofday(&stop);
1330         if (!ret) {
1331                 printk("\b\b\b\bdone\n");
1332                 snapshot_write_finalize(snapshot);
1333                 if (!snapshot_image_loaded(snapshot))
1334                         ret = -ENODATA;
1335                 if (!ret) {
1336                         if (swsusp_header->flags & SF_CRC32_MODE) {
1337                                 if(handle->crc32 != swsusp_header->crc32) {
1338                                         printk(KERN_ERR
1339                                                "PM: Invalid image CRC32!\n");
1340                                         ret = -ENODATA;
1341                                 }
1342                         }
1343                 }
1344         } else
1345                 printk("\n");
1346         swsusp_show_speed(&start, &stop, nr_to_read, "Read");
1347 out_clean:
1348         for (i = 0; i < ring_size; i++)
1349                 free_page((unsigned long)page[i]);
1350         if (crc) {
1351                 if (crc->thr)
1352                         kthread_stop(crc->thr);
1353                 kfree(crc);
1354         }
1355         if (data) {
1356                 for (thr = 0; thr < nr_threads; thr++)
1357                         if (data[thr].thr)
1358                                 kthread_stop(data[thr].thr);
1359                 vfree(data);
1360         }
1361         if (page) vfree(page);
1362
1363         return ret;
1364 }
1365
1366 /**
1367  *      swsusp_read - read the hibernation image.
1368  *      @flags_p: flags passed by the "frozen" kernel in the image header should
1369  *                be written into this memory location
1370  */
1371
1372 int swsusp_read(unsigned int *flags_p)
1373 {
1374         int error;
1375         struct swap_map_handle handle;
1376         struct snapshot_handle snapshot;
1377         struct swsusp_info *header;
1378
1379         memset(&snapshot, 0, sizeof(struct snapshot_handle));
1380         error = snapshot_write_next(&snapshot);
1381         if (error < PAGE_SIZE)
1382                 return error < 0 ? error : -EFAULT;
1383         header = (struct swsusp_info *)data_of(snapshot);
1384         error = get_swap_reader(&handle, flags_p);
1385         if (error)
1386                 goto end;
1387         if (!error)
1388                 error = swap_read_page(&handle, header, NULL);
1389         if (!error) {
1390                 error = (*flags_p & SF_NOCOMPRESS_MODE) ?
1391                         load_image(&handle, &snapshot, header->pages - 1) :
1392                         load_image_lzo(&handle, &snapshot, header->pages - 1);
1393         }
1394         swap_reader_finish(&handle);
1395 end:
1396         if (!error)
1397                 pr_debug("PM: Image successfully loaded\n");
1398         else
1399                 pr_debug("PM: Error %d resuming\n", error);
1400         return error;
1401 }
1402
1403 /**
1404  *      swsusp_check - Check for swsusp signature in the resume device
1405  */
1406
1407 int swsusp_check(void)
1408 {
1409         int error;
1410
1411         hib_resume_bdev = blkdev_get_by_dev(swsusp_resume_device,
1412                                             FMODE_READ, NULL);
1413         if (!IS_ERR(hib_resume_bdev)) {
1414                 set_blocksize(hib_resume_bdev, PAGE_SIZE);
1415                 clear_page(swsusp_header);
1416                 error = hib_bio_read_page(swsusp_resume_block,
1417                                         swsusp_header, NULL);
1418                 if (error)
1419                         goto put;
1420
1421                 if (!memcmp(HIBERNATE_SIG, swsusp_header->sig, 10)) {
1422                         memcpy(swsusp_header->sig, swsusp_header->orig_sig, 10);
1423                         /* Reset swap signature now */
1424                         error = hib_bio_write_page(swsusp_resume_block,
1425                                                 swsusp_header, NULL);
1426                 } else {
1427                         error = -EINVAL;
1428                 }
1429
1430 put:
1431                 if (error)
1432                         blkdev_put(hib_resume_bdev, FMODE_READ);
1433                 else
1434                         pr_debug("PM: Image signature found, resuming\n");
1435         } else {
1436                 error = PTR_ERR(hib_resume_bdev);
1437         }
1438
1439         if (error)
1440                 pr_debug("PM: Image not found (code %d)\n", error);
1441
1442         return error;
1443 }
1444
1445 /**
1446  *      swsusp_close - close swap device.
1447  */
1448
1449 void swsusp_close(fmode_t mode)
1450 {
1451         if (IS_ERR(hib_resume_bdev)) {
1452                 pr_debug("PM: Image device not initialised\n");
1453                 return;
1454         }
1455
1456         blkdev_put(hib_resume_bdev, mode);
1457 }
1458
1459 static int swsusp_header_init(void)
1460 {
1461         swsusp_header = (struct swsusp_header*) __get_free_page(GFP_KERNEL);
1462         if (!swsusp_header)
1463                 panic("Could not allocate memory for swsusp_header\n");
1464         return 0;
1465 }
1466
1467 core_initcall(swsusp_header_init);