f89af5ba2eb29cc7cda8199555c7915095e8f8e0
[profile/ivi/kernel-adaptation-intel-automotive.git] / mm / swapfile.c
1 /*
2  *  linux/mm/swapfile.c
3  *
4  *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
5  *  Swap reorganised 29.12.95, Stephen Tweedie
6  */
7
8 #include <linux/mm.h>
9 #include <linux/hugetlb.h>
10 #include <linux/mman.h>
11 #include <linux/slab.h>
12 #include <linux/kernel_stat.h>
13 #include <linux/swap.h>
14 #include <linux/vmalloc.h>
15 #include <linux/pagemap.h>
16 #include <linux/namei.h>
17 #include <linux/shmem_fs.h>
18 #include <linux/blkdev.h>
19 #include <linux/random.h>
20 #include <linux/writeback.h>
21 #include <linux/proc_fs.h>
22 #include <linux/seq_file.h>
23 #include <linux/init.h>
24 #include <linux/ksm.h>
25 #include <linux/rmap.h>
26 #include <linux/security.h>
27 #include <linux/backing-dev.h>
28 #include <linux/mutex.h>
29 #include <linux/capability.h>
30 #include <linux/syscalls.h>
31 #include <linux/memcontrol.h>
32 #include <linux/poll.h>
33 #include <linux/oom.h>
34 #include <linux/frontswap.h>
35 #include <linux/swapfile.h>
36 #include <linux/export.h>
37
38 #include <asm/pgtable.h>
39 #include <asm/tlbflush.h>
40 #include <linux/swapops.h>
41 #include <linux/page_cgroup.h>
42
43 static bool swap_count_continued(struct swap_info_struct *, pgoff_t,
44                                  unsigned char);
45 static void free_swap_count_continuations(struct swap_info_struct *);
46 static sector_t map_swap_entry(swp_entry_t, struct block_device**);
47
48 DEFINE_SPINLOCK(swap_lock);
49 static unsigned int nr_swapfiles;
50 long nr_swap_pages;
51 long total_swap_pages;
52 static int least_priority;
53
54 static const char Bad_file[] = "Bad swap file entry ";
55 static const char Unused_file[] = "Unused swap file entry ";
56 static const char Bad_offset[] = "Bad swap offset entry ";
57 static const char Unused_offset[] = "Unused swap offset entry ";
58
59 struct swap_list_t swap_list = {-1, -1};
60
61 struct swap_info_struct *swap_info[MAX_SWAPFILES];
62
63 static DEFINE_MUTEX(swapon_mutex);
64
65 static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait);
66 /* Activity counter to indicate that a swapon or swapoff has occurred */
67 static atomic_t proc_poll_event = ATOMIC_INIT(0);
68
69 static inline unsigned char swap_count(unsigned char ent)
70 {
71         return ent & ~SWAP_HAS_CACHE;   /* may include SWAP_HAS_CONT flag */
72 }
73
74 /* returns 1 if swap entry is freed */
75 static int
76 __try_to_reclaim_swap(struct swap_info_struct *si, unsigned long offset)
77 {
78         swp_entry_t entry = swp_entry(si->type, offset);
79         struct page *page;
80         int ret = 0;
81
82         page = find_get_page(&swapper_space, entry.val);
83         if (!page)
84                 return 0;
85         /*
86          * This function is called from scan_swap_map() and it's called
87          * by vmscan.c at reclaiming pages. So, we hold a lock on a page, here.
88          * We have to use trylock for avoiding deadlock. This is a special
89          * case and you should use try_to_free_swap() with explicit lock_page()
90          * in usual operations.
91          */
92         if (trylock_page(page)) {
93                 ret = try_to_free_swap(page);
94                 unlock_page(page);
95         }
96         page_cache_release(page);
97         return ret;
98 }
99
100 /*
101  * swapon tell device that all the old swap contents can be discarded,
102  * to allow the swap device to optimize its wear-levelling.
103  */
104 static int discard_swap(struct swap_info_struct *si)
105 {
106         struct swap_extent *se;
107         sector_t start_block;
108         sector_t nr_blocks;
109         int err = 0;
110
111         /* Do not discard the swap header page! */
112         se = &si->first_swap_extent;
113         start_block = (se->start_block + 1) << (PAGE_SHIFT - 9);
114         nr_blocks = ((sector_t)se->nr_pages - 1) << (PAGE_SHIFT - 9);
115         if (nr_blocks) {
116                 err = blkdev_issue_discard(si->bdev, start_block,
117                                 nr_blocks, GFP_KERNEL, 0);
118                 if (err)
119                         return err;
120                 cond_resched();
121         }
122
123         list_for_each_entry(se, &si->first_swap_extent.list, list) {
124                 start_block = se->start_block << (PAGE_SHIFT - 9);
125                 nr_blocks = (sector_t)se->nr_pages << (PAGE_SHIFT - 9);
126
127                 err = blkdev_issue_discard(si->bdev, start_block,
128                                 nr_blocks, GFP_KERNEL, 0);
129                 if (err)
130                         break;
131
132                 cond_resched();
133         }
134         return err;             /* That will often be -EOPNOTSUPP */
135 }
136
137 /*
138  * swap allocation tell device that a cluster of swap can now be discarded,
139  * to allow the swap device to optimize its wear-levelling.
140  */
141 static void discard_swap_cluster(struct swap_info_struct *si,
142                                  pgoff_t start_page, pgoff_t nr_pages)
143 {
144         struct swap_extent *se = si->curr_swap_extent;
145         int found_extent = 0;
146
147         while (nr_pages) {
148                 struct list_head *lh;
149
150                 if (se->start_page <= start_page &&
151                     start_page < se->start_page + se->nr_pages) {
152                         pgoff_t offset = start_page - se->start_page;
153                         sector_t start_block = se->start_block + offset;
154                         sector_t nr_blocks = se->nr_pages - offset;
155
156                         if (nr_blocks > nr_pages)
157                                 nr_blocks = nr_pages;
158                         start_page += nr_blocks;
159                         nr_pages -= nr_blocks;
160
161                         if (!found_extent++)
162                                 si->curr_swap_extent = se;
163
164                         start_block <<= PAGE_SHIFT - 9;
165                         nr_blocks <<= PAGE_SHIFT - 9;
166                         if (blkdev_issue_discard(si->bdev, start_block,
167                                     nr_blocks, GFP_NOIO, 0))
168                                 break;
169                 }
170
171                 lh = se->list.next;
172                 se = list_entry(lh, struct swap_extent, list);
173         }
174 }
175
176 static int wait_for_discard(void *word)
177 {
178         schedule();
179         return 0;
180 }
181
182 #define SWAPFILE_CLUSTER        256
183 #define LATENCY_LIMIT           256
184
185 static unsigned long scan_swap_map(struct swap_info_struct *si,
186                                    unsigned char usage)
187 {
188         unsigned long offset;
189         unsigned long scan_base;
190         unsigned long last_in_cluster = 0;
191         int latency_ration = LATENCY_LIMIT;
192         int found_free_cluster = 0;
193
194         /*
195          * We try to cluster swap pages by allocating them sequentially
196          * in swap.  Once we've allocated SWAPFILE_CLUSTER pages this
197          * way, however, we resort to first-free allocation, starting
198          * a new cluster.  This prevents us from scattering swap pages
199          * all over the entire swap partition, so that we reduce
200          * overall disk seek times between swap pages.  -- sct
201          * But we do now try to find an empty cluster.  -Andrea
202          * And we let swap pages go all over an SSD partition.  Hugh
203          */
204
205         si->flags += SWP_SCANNING;
206         scan_base = offset = si->cluster_next;
207
208         if (unlikely(!si->cluster_nr--)) {
209                 if (si->pages - si->inuse_pages < SWAPFILE_CLUSTER) {
210                         si->cluster_nr = SWAPFILE_CLUSTER - 1;
211                         goto checks;
212                 }
213                 if (si->flags & SWP_DISCARDABLE) {
214                         /*
215                          * Start range check on racing allocations, in case
216                          * they overlap the cluster we eventually decide on
217                          * (we scan without swap_lock to allow preemption).
218                          * It's hardly conceivable that cluster_nr could be
219                          * wrapped during our scan, but don't depend on it.
220                          */
221                         if (si->lowest_alloc)
222                                 goto checks;
223                         si->lowest_alloc = si->max;
224                         si->highest_alloc = 0;
225                 }
226                 spin_unlock(&swap_lock);
227
228                 /*
229                  * If seek is expensive, start searching for new cluster from
230                  * start of partition, to minimize the span of allocated swap.
231                  * But if seek is cheap, search from our current position, so
232                  * that swap is allocated from all over the partition: if the
233                  * Flash Translation Layer only remaps within limited zones,
234                  * we don't want to wear out the first zone too quickly.
235                  */
236                 if (!(si->flags & SWP_SOLIDSTATE))
237                         scan_base = offset = si->lowest_bit;
238                 last_in_cluster = offset + SWAPFILE_CLUSTER - 1;
239
240                 /* Locate the first empty (unaligned) cluster */
241                 for (; last_in_cluster <= si->highest_bit; offset++) {
242                         if (si->swap_map[offset])
243                                 last_in_cluster = offset + SWAPFILE_CLUSTER;
244                         else if (offset == last_in_cluster) {
245                                 spin_lock(&swap_lock);
246                                 offset -= SWAPFILE_CLUSTER - 1;
247                                 si->cluster_next = offset;
248                                 si->cluster_nr = SWAPFILE_CLUSTER - 1;
249                                 found_free_cluster = 1;
250                                 goto checks;
251                         }
252                         if (unlikely(--latency_ration < 0)) {
253                                 cond_resched();
254                                 latency_ration = LATENCY_LIMIT;
255                         }
256                 }
257
258                 offset = si->lowest_bit;
259                 last_in_cluster = offset + SWAPFILE_CLUSTER - 1;
260
261                 /* Locate the first empty (unaligned) cluster */
262                 for (; last_in_cluster < scan_base; offset++) {
263                         if (si->swap_map[offset])
264                                 last_in_cluster = offset + SWAPFILE_CLUSTER;
265                         else if (offset == last_in_cluster) {
266                                 spin_lock(&swap_lock);
267                                 offset -= SWAPFILE_CLUSTER - 1;
268                                 si->cluster_next = offset;
269                                 si->cluster_nr = SWAPFILE_CLUSTER - 1;
270                                 found_free_cluster = 1;
271                                 goto checks;
272                         }
273                         if (unlikely(--latency_ration < 0)) {
274                                 cond_resched();
275                                 latency_ration = LATENCY_LIMIT;
276                         }
277                 }
278
279                 offset = scan_base;
280                 spin_lock(&swap_lock);
281                 si->cluster_nr = SWAPFILE_CLUSTER - 1;
282                 si->lowest_alloc = 0;
283         }
284
285 checks:
286         if (!(si->flags & SWP_WRITEOK))
287                 goto no_page;
288         if (!si->highest_bit)
289                 goto no_page;
290         if (offset > si->highest_bit)
291                 scan_base = offset = si->lowest_bit;
292
293         /* reuse swap entry of cache-only swap if not busy. */
294         if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) {
295                 int swap_was_freed;
296                 spin_unlock(&swap_lock);
297                 swap_was_freed = __try_to_reclaim_swap(si, offset);
298                 spin_lock(&swap_lock);
299                 /* entry was freed successfully, try to use this again */
300                 if (swap_was_freed)
301                         goto checks;
302                 goto scan; /* check next one */
303         }
304
305         if (si->swap_map[offset])
306                 goto scan;
307
308         if (offset == si->lowest_bit)
309                 si->lowest_bit++;
310         if (offset == si->highest_bit)
311                 si->highest_bit--;
312         si->inuse_pages++;
313         if (si->inuse_pages == si->pages) {
314                 si->lowest_bit = si->max;
315                 si->highest_bit = 0;
316         }
317         si->swap_map[offset] = usage;
318         si->cluster_next = offset + 1;
319         si->flags -= SWP_SCANNING;
320
321         if (si->lowest_alloc) {
322                 /*
323                  * Only set when SWP_DISCARDABLE, and there's a scan
324                  * for a free cluster in progress or just completed.
325                  */
326                 if (found_free_cluster) {
327                         /*
328                          * To optimize wear-levelling, discard the
329                          * old data of the cluster, taking care not to
330                          * discard any of its pages that have already
331                          * been allocated by racing tasks (offset has
332                          * already stepped over any at the beginning).
333                          */
334                         if (offset < si->highest_alloc &&
335                             si->lowest_alloc <= last_in_cluster)
336                                 last_in_cluster = si->lowest_alloc - 1;
337                         si->flags |= SWP_DISCARDING;
338                         spin_unlock(&swap_lock);
339
340                         if (offset < last_in_cluster)
341                                 discard_swap_cluster(si, offset,
342                                         last_in_cluster - offset + 1);
343
344                         spin_lock(&swap_lock);
345                         si->lowest_alloc = 0;
346                         si->flags &= ~SWP_DISCARDING;
347
348                         smp_mb();       /* wake_up_bit advises this */
349                         wake_up_bit(&si->flags, ilog2(SWP_DISCARDING));
350
351                 } else if (si->flags & SWP_DISCARDING) {
352                         /*
353                          * Delay using pages allocated by racing tasks
354                          * until the whole discard has been issued. We
355                          * could defer that delay until swap_writepage,
356                          * but it's easier to keep this self-contained.
357                          */
358                         spin_unlock(&swap_lock);
359                         wait_on_bit(&si->flags, ilog2(SWP_DISCARDING),
360                                 wait_for_discard, TASK_UNINTERRUPTIBLE);
361                         spin_lock(&swap_lock);
362                 } else {
363                         /*
364                          * Note pages allocated by racing tasks while
365                          * scan for a free cluster is in progress, so
366                          * that its final discard can exclude them.
367                          */
368                         if (offset < si->lowest_alloc)
369                                 si->lowest_alloc = offset;
370                         if (offset > si->highest_alloc)
371                                 si->highest_alloc = offset;
372                 }
373         }
374         return offset;
375
376 scan:
377         spin_unlock(&swap_lock);
378         while (++offset <= si->highest_bit) {
379                 if (!si->swap_map[offset]) {
380                         spin_lock(&swap_lock);
381                         goto checks;
382                 }
383                 if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) {
384                         spin_lock(&swap_lock);
385                         goto checks;
386                 }
387                 if (unlikely(--latency_ration < 0)) {
388                         cond_resched();
389                         latency_ration = LATENCY_LIMIT;
390                 }
391         }
392         offset = si->lowest_bit;
393         while (++offset < scan_base) {
394                 if (!si->swap_map[offset]) {
395                         spin_lock(&swap_lock);
396                         goto checks;
397                 }
398                 if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) {
399                         spin_lock(&swap_lock);
400                         goto checks;
401                 }
402                 if (unlikely(--latency_ration < 0)) {
403                         cond_resched();
404                         latency_ration = LATENCY_LIMIT;
405                 }
406         }
407         spin_lock(&swap_lock);
408
409 no_page:
410         si->flags -= SWP_SCANNING;
411         return 0;
412 }
413
414 swp_entry_t get_swap_page(void)
415 {
416         struct swap_info_struct *si;
417         pgoff_t offset;
418         int type, next;
419         int wrapped = 0;
420
421         spin_lock(&swap_lock);
422         if (nr_swap_pages <= 0)
423                 goto noswap;
424         nr_swap_pages--;
425
426         for (type = swap_list.next; type >= 0 && wrapped < 2; type = next) {
427                 si = swap_info[type];
428                 next = si->next;
429                 if (next < 0 ||
430                     (!wrapped && si->prio != swap_info[next]->prio)) {
431                         next = swap_list.head;
432                         wrapped++;
433                 }
434
435                 if (!si->highest_bit)
436                         continue;
437                 if (!(si->flags & SWP_WRITEOK))
438                         continue;
439
440                 swap_list.next = next;
441                 /* This is called for allocating swap entry for cache */
442                 offset = scan_swap_map(si, SWAP_HAS_CACHE);
443                 if (offset) {
444                         spin_unlock(&swap_lock);
445                         return swp_entry(type, offset);
446                 }
447                 next = swap_list.next;
448         }
449
450         nr_swap_pages++;
451 noswap:
452         spin_unlock(&swap_lock);
453         return (swp_entry_t) {0};
454 }
455
456 /* The only caller of this function is now susupend routine */
457 swp_entry_t get_swap_page_of_type(int type)
458 {
459         struct swap_info_struct *si;
460         pgoff_t offset;
461
462         spin_lock(&swap_lock);
463         si = swap_info[type];
464         if (si && (si->flags & SWP_WRITEOK)) {
465                 nr_swap_pages--;
466                 /* This is called for allocating swap entry, not cache */
467                 offset = scan_swap_map(si, 1);
468                 if (offset) {
469                         spin_unlock(&swap_lock);
470                         return swp_entry(type, offset);
471                 }
472                 nr_swap_pages++;
473         }
474         spin_unlock(&swap_lock);
475         return (swp_entry_t) {0};
476 }
477
478 static struct swap_info_struct *swap_info_get(swp_entry_t entry)
479 {
480         struct swap_info_struct *p;
481         unsigned long offset, type;
482
483         if (!entry.val)
484                 goto out;
485         type = swp_type(entry);
486         if (type >= nr_swapfiles)
487                 goto bad_nofile;
488         p = swap_info[type];
489         if (!(p->flags & SWP_USED))
490                 goto bad_device;
491         offset = swp_offset(entry);
492         if (offset >= p->max)
493                 goto bad_offset;
494         if (!p->swap_map[offset])
495                 goto bad_free;
496         spin_lock(&swap_lock);
497         return p;
498
499 bad_free:
500         printk(KERN_ERR "swap_free: %s%08lx\n", Unused_offset, entry.val);
501         goto out;
502 bad_offset:
503         printk(KERN_ERR "swap_free: %s%08lx\n", Bad_offset, entry.val);
504         goto out;
505 bad_device:
506         printk(KERN_ERR "swap_free: %s%08lx\n", Unused_file, entry.val);
507         goto out;
508 bad_nofile:
509         printk(KERN_ERR "swap_free: %s%08lx\n", Bad_file, entry.val);
510 out:
511         return NULL;
512 }
513
514 static unsigned char swap_entry_free(struct swap_info_struct *p,
515                                      swp_entry_t entry, unsigned char usage)
516 {
517         unsigned long offset = swp_offset(entry);
518         unsigned char count;
519         unsigned char has_cache;
520
521         count = p->swap_map[offset];
522         has_cache = count & SWAP_HAS_CACHE;
523         count &= ~SWAP_HAS_CACHE;
524
525         if (usage == SWAP_HAS_CACHE) {
526                 VM_BUG_ON(!has_cache);
527                 has_cache = 0;
528         } else if (count == SWAP_MAP_SHMEM) {
529                 /*
530                  * Or we could insist on shmem.c using a special
531                  * swap_shmem_free() and free_shmem_swap_and_cache()...
532                  */
533                 count = 0;
534         } else if ((count & ~COUNT_CONTINUED) <= SWAP_MAP_MAX) {
535                 if (count == COUNT_CONTINUED) {
536                         if (swap_count_continued(p, offset, count))
537                                 count = SWAP_MAP_MAX | COUNT_CONTINUED;
538                         else
539                                 count = SWAP_MAP_MAX;
540                 } else
541                         count--;
542         }
543
544         if (!count)
545                 mem_cgroup_uncharge_swap(entry);
546
547         usage = count | has_cache;
548         p->swap_map[offset] = usage;
549
550         /* free if no reference */
551         if (!usage) {
552                 struct gendisk *disk = p->bdev->bd_disk;
553                 if (offset < p->lowest_bit)
554                         p->lowest_bit = offset;
555                 if (offset > p->highest_bit)
556                         p->highest_bit = offset;
557                 if (swap_list.next >= 0 &&
558                     p->prio > swap_info[swap_list.next]->prio)
559                         swap_list.next = p->type;
560                 nr_swap_pages++;
561                 p->inuse_pages--;
562                 frontswap_invalidate_page(p->type, offset);
563                 if ((p->flags & SWP_BLKDEV) &&
564                                 disk->fops->swap_slot_free_notify)
565                         disk->fops->swap_slot_free_notify(p->bdev, offset);
566         }
567
568         return usage;
569 }
570
571 /*
572  * Caller has made sure that the swapdevice corresponding to entry
573  * is still around or has not been recycled.
574  */
575 void swap_free(swp_entry_t entry)
576 {
577         struct swap_info_struct *p;
578
579         p = swap_info_get(entry);
580         if (p) {
581                 swap_entry_free(p, entry, 1);
582                 spin_unlock(&swap_lock);
583         }
584 }
585
586 /*
587  * Called after dropping swapcache to decrease refcnt to swap entries.
588  */
589 void swapcache_free(swp_entry_t entry, struct page *page)
590 {
591         struct swap_info_struct *p;
592         unsigned char count;
593
594         p = swap_info_get(entry);
595         if (p) {
596                 count = swap_entry_free(p, entry, SWAP_HAS_CACHE);
597                 if (page)
598                         mem_cgroup_uncharge_swapcache(page, entry, count != 0);
599                 spin_unlock(&swap_lock);
600         }
601 }
602
603 /*
604  * How many references to page are currently swapped out?
605  * This does not give an exact answer when swap count is continued,
606  * but does include the high COUNT_CONTINUED flag to allow for that.
607  */
608 int page_swapcount(struct page *page)
609 {
610         int count = 0;
611         struct swap_info_struct *p;
612         swp_entry_t entry;
613
614         entry.val = page_private(page);
615         p = swap_info_get(entry);
616         if (p) {
617                 count = swap_count(p->swap_map[swp_offset(entry)]);
618                 spin_unlock(&swap_lock);
619         }
620         return count;
621 }
622
623 /*
624  * We can write to an anon page without COW if there are no other references
625  * to it.  And as a side-effect, free up its swap: because the old content
626  * on disk will never be read, and seeking back there to write new content
627  * later would only waste time away from clustering.
628  */
629 int reuse_swap_page(struct page *page)
630 {
631         int count;
632
633         VM_BUG_ON(!PageLocked(page));
634         if (unlikely(PageKsm(page)))
635                 return 0;
636         count = page_mapcount(page);
637         if (count <= 1 && PageSwapCache(page)) {
638                 count += page_swapcount(page);
639                 if (count == 1 && !PageWriteback(page)) {
640                         delete_from_swap_cache(page);
641                         SetPageDirty(page);
642                 }
643         }
644         return count <= 1;
645 }
646
647 /*
648  * If swap is getting full, or if there are no more mappings of this page,
649  * then try_to_free_swap is called to free its swap space.
650  */
651 int try_to_free_swap(struct page *page)
652 {
653         VM_BUG_ON(!PageLocked(page));
654
655         if (!PageSwapCache(page))
656                 return 0;
657         if (PageWriteback(page))
658                 return 0;
659         if (page_swapcount(page))
660                 return 0;
661
662         /*
663          * Once hibernation has begun to create its image of memory,
664          * there's a danger that one of the calls to try_to_free_swap()
665          * - most probably a call from __try_to_reclaim_swap() while
666          * hibernation is allocating its own swap pages for the image,
667          * but conceivably even a call from memory reclaim - will free
668          * the swap from a page which has already been recorded in the
669          * image as a clean swapcache page, and then reuse its swap for
670          * another page of the image.  On waking from hibernation, the
671          * original page might be freed under memory pressure, then
672          * later read back in from swap, now with the wrong data.
673          *
674          * Hibration suspends storage while it is writing the image
675          * to disk so check that here.
676          */
677         if (pm_suspended_storage())
678                 return 0;
679
680         delete_from_swap_cache(page);
681         SetPageDirty(page);
682         return 1;
683 }
684
685 /*
686  * Free the swap entry like above, but also try to
687  * free the page cache entry if it is the last user.
688  */
689 int free_swap_and_cache(swp_entry_t entry)
690 {
691         struct swap_info_struct *p;
692         struct page *page = NULL;
693
694         if (non_swap_entry(entry))
695                 return 1;
696
697         p = swap_info_get(entry);
698         if (p) {
699                 if (swap_entry_free(p, entry, 1) == SWAP_HAS_CACHE) {
700                         page = find_get_page(&swapper_space, entry.val);
701                         if (page && !trylock_page(page)) {
702                                 page_cache_release(page);
703                                 page = NULL;
704                         }
705                 }
706                 spin_unlock(&swap_lock);
707         }
708         if (page) {
709                 /*
710                  * Not mapped elsewhere, or swap space full? Free it!
711                  * Also recheck PageSwapCache now page is locked (above).
712                  */
713                 if (PageSwapCache(page) && !PageWriteback(page) &&
714                                 (!page_mapped(page) || vm_swap_full())) {
715                         delete_from_swap_cache(page);
716                         SetPageDirty(page);
717                 }
718                 unlock_page(page);
719                 page_cache_release(page);
720         }
721         return p != NULL;
722 }
723
724 #ifdef CONFIG_HIBERNATION
725 /*
726  * Find the swap type that corresponds to given device (if any).
727  *
728  * @offset - number of the PAGE_SIZE-sized block of the device, starting
729  * from 0, in which the swap header is expected to be located.
730  *
731  * This is needed for the suspend to disk (aka swsusp).
732  */
733 int swap_type_of(dev_t device, sector_t offset, struct block_device **bdev_p)
734 {
735         struct block_device *bdev = NULL;
736         int type;
737
738         if (device)
739                 bdev = bdget(device);
740
741         spin_lock(&swap_lock);
742         for (type = 0; type < nr_swapfiles; type++) {
743                 struct swap_info_struct *sis = swap_info[type];
744
745                 if (!(sis->flags & SWP_WRITEOK))
746                         continue;
747
748                 if (!bdev) {
749                         if (bdev_p)
750                                 *bdev_p = bdgrab(sis->bdev);
751
752                         spin_unlock(&swap_lock);
753                         return type;
754                 }
755                 if (bdev == sis->bdev) {
756                         struct swap_extent *se = &sis->first_swap_extent;
757
758                         if (se->start_block == offset) {
759                                 if (bdev_p)
760                                         *bdev_p = bdgrab(sis->bdev);
761
762                                 spin_unlock(&swap_lock);
763                                 bdput(bdev);
764                                 return type;
765                         }
766                 }
767         }
768         spin_unlock(&swap_lock);
769         if (bdev)
770                 bdput(bdev);
771
772         return -ENODEV;
773 }
774
775 /*
776  * Get the (PAGE_SIZE) block corresponding to given offset on the swapdev
777  * corresponding to given index in swap_info (swap type).
778  */
779 sector_t swapdev_block(int type, pgoff_t offset)
780 {
781         struct block_device *bdev;
782
783         if ((unsigned int)type >= nr_swapfiles)
784                 return 0;
785         if (!(swap_info[type]->flags & SWP_WRITEOK))
786                 return 0;
787         return map_swap_entry(swp_entry(type, offset), &bdev);
788 }
789
790 /*
791  * Return either the total number of swap pages of given type, or the number
792  * of free pages of that type (depending on @free)
793  *
794  * This is needed for software suspend
795  */
796 unsigned int count_swap_pages(int type, int free)
797 {
798         unsigned int n = 0;
799
800         spin_lock(&swap_lock);
801         if ((unsigned int)type < nr_swapfiles) {
802                 struct swap_info_struct *sis = swap_info[type];
803
804                 if (sis->flags & SWP_WRITEOK) {
805                         n = sis->pages;
806                         if (free)
807                                 n -= sis->inuse_pages;
808                 }
809         }
810         spin_unlock(&swap_lock);
811         return n;
812 }
813 #endif /* CONFIG_HIBERNATION */
814
815 /*
816  * No need to decide whether this PTE shares the swap entry with others,
817  * just let do_wp_page work it out if a write is requested later - to
818  * force COW, vm_page_prot omits write permission from any private vma.
819  */
820 static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
821                 unsigned long addr, swp_entry_t entry, struct page *page)
822 {
823         struct mem_cgroup *memcg;
824         spinlock_t *ptl;
825         pte_t *pte;
826         int ret = 1;
827
828         if (mem_cgroup_try_charge_swapin(vma->vm_mm, page,
829                                          GFP_KERNEL, &memcg)) {
830                 ret = -ENOMEM;
831                 goto out_nolock;
832         }
833
834         pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
835         if (unlikely(!pte_same(*pte, swp_entry_to_pte(entry)))) {
836                 if (ret > 0)
837                         mem_cgroup_cancel_charge_swapin(memcg);
838                 ret = 0;
839                 goto out;
840         }
841
842         dec_mm_counter(vma->vm_mm, MM_SWAPENTS);
843         inc_mm_counter(vma->vm_mm, MM_ANONPAGES);
844         get_page(page);
845         set_pte_at(vma->vm_mm, addr, pte,
846                    pte_mkold(mk_pte(page, vma->vm_page_prot)));
847         page_add_anon_rmap(page, vma, addr);
848         mem_cgroup_commit_charge_swapin(page, memcg);
849         swap_free(entry);
850         /*
851          * Move the page to the active list so it is not
852          * immediately swapped out again after swapon.
853          */
854         activate_page(page);
855 out:
856         pte_unmap_unlock(pte, ptl);
857 out_nolock:
858         return ret;
859 }
860
861 static int unuse_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
862                                 unsigned long addr, unsigned long end,
863                                 swp_entry_t entry, struct page *page)
864 {
865         pte_t swp_pte = swp_entry_to_pte(entry);
866         pte_t *pte;
867         int ret = 0;
868
869         /*
870          * We don't actually need pte lock while scanning for swp_pte: since
871          * we hold page lock and mmap_sem, swp_pte cannot be inserted into the
872          * page table while we're scanning; though it could get zapped, and on
873          * some architectures (e.g. x86_32 with PAE) we might catch a glimpse
874          * of unmatched parts which look like swp_pte, so unuse_pte must
875          * recheck under pte lock.  Scanning without pte lock lets it be
876          * preemptible whenever CONFIG_PREEMPT but not CONFIG_HIGHPTE.
877          */
878         pte = pte_offset_map(pmd, addr);
879         do {
880                 /*
881                  * swapoff spends a _lot_ of time in this loop!
882                  * Test inline before going to call unuse_pte.
883                  */
884                 if (unlikely(pte_same(*pte, swp_pte))) {
885                         pte_unmap(pte);
886                         ret = unuse_pte(vma, pmd, addr, entry, page);
887                         if (ret)
888                                 goto out;
889                         pte = pte_offset_map(pmd, addr);
890                 }
891         } while (pte++, addr += PAGE_SIZE, addr != end);
892         pte_unmap(pte - 1);
893 out:
894         return ret;
895 }
896
897 static inline int unuse_pmd_range(struct vm_area_struct *vma, pud_t *pud,
898                                 unsigned long addr, unsigned long end,
899                                 swp_entry_t entry, struct page *page)
900 {
901         pmd_t *pmd;
902         unsigned long next;
903         int ret;
904
905         pmd = pmd_offset(pud, addr);
906         do {
907                 next = pmd_addr_end(addr, end);
908                 if (pmd_none_or_trans_huge_or_clear_bad(pmd))
909                         continue;
910                 ret = unuse_pte_range(vma, pmd, addr, next, entry, page);
911                 if (ret)
912                         return ret;
913         } while (pmd++, addr = next, addr != end);
914         return 0;
915 }
916
917 static inline int unuse_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
918                                 unsigned long addr, unsigned long end,
919                                 swp_entry_t entry, struct page *page)
920 {
921         pud_t *pud;
922         unsigned long next;
923         int ret;
924
925         pud = pud_offset(pgd, addr);
926         do {
927                 next = pud_addr_end(addr, end);
928                 if (pud_none_or_clear_bad(pud))
929                         continue;
930                 ret = unuse_pmd_range(vma, pud, addr, next, entry, page);
931                 if (ret)
932                         return ret;
933         } while (pud++, addr = next, addr != end);
934         return 0;
935 }
936
937 static int unuse_vma(struct vm_area_struct *vma,
938                                 swp_entry_t entry, struct page *page)
939 {
940         pgd_t *pgd;
941         unsigned long addr, end, next;
942         int ret;
943
944         if (page_anon_vma(page)) {
945                 addr = page_address_in_vma(page, vma);
946                 if (addr == -EFAULT)
947                         return 0;
948                 else
949                         end = addr + PAGE_SIZE;
950         } else {
951                 addr = vma->vm_start;
952                 end = vma->vm_end;
953         }
954
955         pgd = pgd_offset(vma->vm_mm, addr);
956         do {
957                 next = pgd_addr_end(addr, end);
958                 if (pgd_none_or_clear_bad(pgd))
959                         continue;
960                 ret = unuse_pud_range(vma, pgd, addr, next, entry, page);
961                 if (ret)
962                         return ret;
963         } while (pgd++, addr = next, addr != end);
964         return 0;
965 }
966
967 static int unuse_mm(struct mm_struct *mm,
968                                 swp_entry_t entry, struct page *page)
969 {
970         struct vm_area_struct *vma;
971         int ret = 0;
972
973         if (!down_read_trylock(&mm->mmap_sem)) {
974                 /*
975                  * Activate page so shrink_inactive_list is unlikely to unmap
976                  * its ptes while lock is dropped, so swapoff can make progress.
977                  */
978                 activate_page(page);
979                 unlock_page(page);
980                 down_read(&mm->mmap_sem);
981                 lock_page(page);
982         }
983         for (vma = mm->mmap; vma; vma = vma->vm_next) {
984                 if (vma->anon_vma && (ret = unuse_vma(vma, entry, page)))
985                         break;
986         }
987         up_read(&mm->mmap_sem);
988         return (ret < 0)? ret: 0;
989 }
990
991 /*
992  * Scan swap_map (or frontswap_map if frontswap parameter is true)
993  * from current position to next entry still in use.
994  * Recycle to start on reaching the end, returning 0 when empty.
995  */
996 static unsigned int find_next_to_unuse(struct swap_info_struct *si,
997                                         unsigned int prev, bool frontswap)
998 {
999         unsigned int max = si->max;
1000         unsigned int i = prev;
1001         unsigned char count;
1002
1003         /*
1004          * No need for swap_lock here: we're just looking
1005          * for whether an entry is in use, not modifying it; false
1006          * hits are okay, and sys_swapoff() has already prevented new
1007          * allocations from this area (while holding swap_lock).
1008          */
1009         for (;;) {
1010                 if (++i >= max) {
1011                         if (!prev) {
1012                                 i = 0;
1013                                 break;
1014                         }
1015                         /*
1016                          * No entries in use at top of swap_map,
1017                          * loop back to start and recheck there.
1018                          */
1019                         max = prev + 1;
1020                         prev = 0;
1021                         i = 1;
1022                 }
1023                 if (frontswap) {
1024                         if (frontswap_test(si, i))
1025                                 break;
1026                         else
1027                                 continue;
1028                 }
1029                 count = si->swap_map[i];
1030                 if (count && swap_count(count) != SWAP_MAP_BAD)
1031                         break;
1032         }
1033         return i;
1034 }
1035
1036 /*
1037  * We completely avoid races by reading each swap page in advance,
1038  * and then search for the process using it.  All the necessary
1039  * page table adjustments can then be made atomically.
1040  *
1041  * if the boolean frontswap is true, only unuse pages_to_unuse pages;
1042  * pages_to_unuse==0 means all pages; ignored if frontswap is false
1043  */
1044 int try_to_unuse(unsigned int type, bool frontswap,
1045                  unsigned long pages_to_unuse)
1046 {
1047         struct swap_info_struct *si = swap_info[type];
1048         struct mm_struct *start_mm;
1049         unsigned char *swap_map;
1050         unsigned char swcount;
1051         struct page *page;
1052         swp_entry_t entry;
1053         unsigned int i = 0;
1054         int retval = 0;
1055
1056         /*
1057          * When searching mms for an entry, a good strategy is to
1058          * start at the first mm we freed the previous entry from
1059          * (though actually we don't notice whether we or coincidence
1060          * freed the entry).  Initialize this start_mm with a hold.
1061          *
1062          * A simpler strategy would be to start at the last mm we
1063          * freed the previous entry from; but that would take less
1064          * advantage of mmlist ordering, which clusters forked mms
1065          * together, child after parent.  If we race with dup_mmap(), we
1066          * prefer to resolve parent before child, lest we miss entries
1067          * duplicated after we scanned child: using last mm would invert
1068          * that.
1069          */
1070         start_mm = &init_mm;
1071         atomic_inc(&init_mm.mm_users);
1072
1073         /*
1074          * Keep on scanning until all entries have gone.  Usually,
1075          * one pass through swap_map is enough, but not necessarily:
1076          * there are races when an instance of an entry might be missed.
1077          */
1078         while ((i = find_next_to_unuse(si, i, frontswap)) != 0) {
1079                 if (signal_pending(current)) {
1080                         retval = -EINTR;
1081                         break;
1082                 }
1083
1084                 /*
1085                  * Get a page for the entry, using the existing swap
1086                  * cache page if there is one.  Otherwise, get a clean
1087                  * page and read the swap into it.
1088                  */
1089                 swap_map = &si->swap_map[i];
1090                 entry = swp_entry(type, i);
1091                 page = read_swap_cache_async(entry,
1092                                         GFP_HIGHUSER_MOVABLE, NULL, 0);
1093                 if (!page) {
1094                         /*
1095                          * Either swap_duplicate() failed because entry
1096                          * has been freed independently, and will not be
1097                          * reused since sys_swapoff() already disabled
1098                          * allocation from here, or alloc_page() failed.
1099                          */
1100                         if (!*swap_map)
1101                                 continue;
1102                         retval = -ENOMEM;
1103                         break;
1104                 }
1105
1106                 /*
1107                  * Don't hold on to start_mm if it looks like exiting.
1108                  */
1109                 if (atomic_read(&start_mm->mm_users) == 1) {
1110                         mmput(start_mm);
1111                         start_mm = &init_mm;
1112                         atomic_inc(&init_mm.mm_users);
1113                 }
1114
1115                 /*
1116                  * Wait for and lock page.  When do_swap_page races with
1117                  * try_to_unuse, do_swap_page can handle the fault much
1118                  * faster than try_to_unuse can locate the entry.  This
1119                  * apparently redundant "wait_on_page_locked" lets try_to_unuse
1120                  * defer to do_swap_page in such a case - in some tests,
1121                  * do_swap_page and try_to_unuse repeatedly compete.
1122                  */
1123                 wait_on_page_locked(page);
1124                 wait_on_page_writeback(page);
1125                 lock_page(page);
1126                 wait_on_page_writeback(page);
1127
1128                 /*
1129                  * Remove all references to entry.
1130                  */
1131                 swcount = *swap_map;
1132                 if (swap_count(swcount) == SWAP_MAP_SHMEM) {
1133                         retval = shmem_unuse(entry, page);
1134                         /* page has already been unlocked and released */
1135                         if (retval < 0)
1136                                 break;
1137                         continue;
1138                 }
1139                 if (swap_count(swcount) && start_mm != &init_mm)
1140                         retval = unuse_mm(start_mm, entry, page);
1141
1142                 if (swap_count(*swap_map)) {
1143                         int set_start_mm = (*swap_map >= swcount);
1144                         struct list_head *p = &start_mm->mmlist;
1145                         struct mm_struct *new_start_mm = start_mm;
1146                         struct mm_struct *prev_mm = start_mm;
1147                         struct mm_struct *mm;
1148
1149                         atomic_inc(&new_start_mm->mm_users);
1150                         atomic_inc(&prev_mm->mm_users);
1151                         spin_lock(&mmlist_lock);
1152                         while (swap_count(*swap_map) && !retval &&
1153                                         (p = p->next) != &start_mm->mmlist) {
1154                                 mm = list_entry(p, struct mm_struct, mmlist);
1155                                 if (!atomic_inc_not_zero(&mm->mm_users))
1156                                         continue;
1157                                 spin_unlock(&mmlist_lock);
1158                                 mmput(prev_mm);
1159                                 prev_mm = mm;
1160
1161                                 cond_resched();
1162
1163                                 swcount = *swap_map;
1164                                 if (!swap_count(swcount)) /* any usage ? */
1165                                         ;
1166                                 else if (mm == &init_mm)
1167                                         set_start_mm = 1;
1168                                 else
1169                                         retval = unuse_mm(mm, entry, page);
1170
1171                                 if (set_start_mm && *swap_map < swcount) {
1172                                         mmput(new_start_mm);
1173                                         atomic_inc(&mm->mm_users);
1174                                         new_start_mm = mm;
1175                                         set_start_mm = 0;
1176                                 }
1177                                 spin_lock(&mmlist_lock);
1178                         }
1179                         spin_unlock(&mmlist_lock);
1180                         mmput(prev_mm);
1181                         mmput(start_mm);
1182                         start_mm = new_start_mm;
1183                 }
1184                 if (retval) {
1185                         unlock_page(page);
1186                         page_cache_release(page);
1187                         break;
1188                 }
1189
1190                 /*
1191                  * If a reference remains (rare), we would like to leave
1192                  * the page in the swap cache; but try_to_unmap could
1193                  * then re-duplicate the entry once we drop page lock,
1194                  * so we might loop indefinitely; also, that page could
1195                  * not be swapped out to other storage meanwhile.  So:
1196                  * delete from cache even if there's another reference,
1197                  * after ensuring that the data has been saved to disk -
1198                  * since if the reference remains (rarer), it will be
1199                  * read from disk into another page.  Splitting into two
1200                  * pages would be incorrect if swap supported "shared
1201                  * private" pages, but they are handled by tmpfs files.
1202                  *
1203                  * Given how unuse_vma() targets one particular offset
1204                  * in an anon_vma, once the anon_vma has been determined,
1205                  * this splitting happens to be just what is needed to
1206                  * handle where KSM pages have been swapped out: re-reading
1207                  * is unnecessarily slow, but we can fix that later on.
1208                  */
1209                 if (swap_count(*swap_map) &&
1210                      PageDirty(page) && PageSwapCache(page)) {
1211                         struct writeback_control wbc = {
1212                                 .sync_mode = WB_SYNC_NONE,
1213                         };
1214
1215                         swap_writepage(page, &wbc);
1216                         lock_page(page);
1217                         wait_on_page_writeback(page);
1218                 }
1219
1220                 /*
1221                  * It is conceivable that a racing task removed this page from
1222                  * swap cache just before we acquired the page lock at the top,
1223                  * or while we dropped it in unuse_mm().  The page might even
1224                  * be back in swap cache on another swap area: that we must not
1225                  * delete, since it may not have been written out to swap yet.
1226                  */
1227                 if (PageSwapCache(page) &&
1228                     likely(page_private(page) == entry.val))
1229                         delete_from_swap_cache(page);
1230
1231                 /*
1232                  * So we could skip searching mms once swap count went
1233                  * to 1, we did not mark any present ptes as dirty: must
1234                  * mark page dirty so shrink_page_list will preserve it.
1235                  */
1236                 SetPageDirty(page);
1237                 unlock_page(page);
1238                 page_cache_release(page);
1239
1240                 /*
1241                  * Make sure that we aren't completely killing
1242                  * interactive performance.
1243                  */
1244                 cond_resched();
1245                 if (frontswap && pages_to_unuse > 0) {
1246                         if (!--pages_to_unuse)
1247                                 break;
1248                 }
1249         }
1250
1251         mmput(start_mm);
1252         return retval;
1253 }
1254
1255 /*
1256  * After a successful try_to_unuse, if no swap is now in use, we know
1257  * we can empty the mmlist.  swap_lock must be held on entry and exit.
1258  * Note that mmlist_lock nests inside swap_lock, and an mm must be
1259  * added to the mmlist just after page_duplicate - before would be racy.
1260  */
1261 static void drain_mmlist(void)
1262 {
1263         struct list_head *p, *next;
1264         unsigned int type;
1265
1266         for (type = 0; type < nr_swapfiles; type++)
1267                 if (swap_info[type]->inuse_pages)
1268                         return;
1269         spin_lock(&mmlist_lock);
1270         list_for_each_safe(p, next, &init_mm.mmlist)
1271                 list_del_init(p);
1272         spin_unlock(&mmlist_lock);
1273 }
1274
1275 /*
1276  * Use this swapdev's extent info to locate the (PAGE_SIZE) block which
1277  * corresponds to page offset for the specified swap entry.
1278  * Note that the type of this function is sector_t, but it returns page offset
1279  * into the bdev, not sector offset.
1280  */
1281 static sector_t map_swap_entry(swp_entry_t entry, struct block_device **bdev)
1282 {
1283         struct swap_info_struct *sis;
1284         struct swap_extent *start_se;
1285         struct swap_extent *se;
1286         pgoff_t offset;
1287
1288         sis = swap_info[swp_type(entry)];
1289         *bdev = sis->bdev;
1290
1291         offset = swp_offset(entry);
1292         start_se = sis->curr_swap_extent;
1293         se = start_se;
1294
1295         for ( ; ; ) {
1296                 struct list_head *lh;
1297
1298                 if (se->start_page <= offset &&
1299                                 offset < (se->start_page + se->nr_pages)) {
1300                         return se->start_block + (offset - se->start_page);
1301                 }
1302                 lh = se->list.next;
1303                 se = list_entry(lh, struct swap_extent, list);
1304                 sis->curr_swap_extent = se;
1305                 BUG_ON(se == start_se);         /* It *must* be present */
1306         }
1307 }
1308
1309 /*
1310  * Returns the page offset into bdev for the specified page's swap entry.
1311  */
1312 sector_t map_swap_page(struct page *page, struct block_device **bdev)
1313 {
1314         swp_entry_t entry;
1315         entry.val = page_private(page);
1316         return map_swap_entry(entry, bdev);
1317 }
1318
1319 /*
1320  * Free all of a swapdev's extent information
1321  */
1322 static void destroy_swap_extents(struct swap_info_struct *sis)
1323 {
1324         while (!list_empty(&sis->first_swap_extent.list)) {
1325                 struct swap_extent *se;
1326
1327                 se = list_entry(sis->first_swap_extent.list.next,
1328                                 struct swap_extent, list);
1329                 list_del(&se->list);
1330                 kfree(se);
1331         }
1332 }
1333
1334 /*
1335  * Add a block range (and the corresponding page range) into this swapdev's
1336  * extent list.  The extent list is kept sorted in page order.
1337  *
1338  * This function rather assumes that it is called in ascending page order.
1339  */
1340 static int
1341 add_swap_extent(struct swap_info_struct *sis, unsigned long start_page,
1342                 unsigned long nr_pages, sector_t start_block)
1343 {
1344         struct swap_extent *se;
1345         struct swap_extent *new_se;
1346         struct list_head *lh;
1347
1348         if (start_page == 0) {
1349                 se = &sis->first_swap_extent;
1350                 sis->curr_swap_extent = se;
1351                 se->start_page = 0;
1352                 se->nr_pages = nr_pages;
1353                 se->start_block = start_block;
1354                 return 1;
1355         } else {
1356                 lh = sis->first_swap_extent.list.prev;  /* Highest extent */
1357                 se = list_entry(lh, struct swap_extent, list);
1358                 BUG_ON(se->start_page + se->nr_pages != start_page);
1359                 if (se->start_block + se->nr_pages == start_block) {
1360                         /* Merge it */
1361                         se->nr_pages += nr_pages;
1362                         return 0;
1363                 }
1364         }
1365
1366         /*
1367          * No merge.  Insert a new extent, preserving ordering.
1368          */
1369         new_se = kmalloc(sizeof(*se), GFP_KERNEL);
1370         if (new_se == NULL)
1371                 return -ENOMEM;
1372         new_se->start_page = start_page;
1373         new_se->nr_pages = nr_pages;
1374         new_se->start_block = start_block;
1375
1376         list_add_tail(&new_se->list, &sis->first_swap_extent.list);
1377         return 1;
1378 }
1379
1380 /*
1381  * A `swap extent' is a simple thing which maps a contiguous range of pages
1382  * onto a contiguous range of disk blocks.  An ordered list of swap extents
1383  * is built at swapon time and is then used at swap_writepage/swap_readpage
1384  * time for locating where on disk a page belongs.
1385  *
1386  * If the swapfile is an S_ISBLK block device, a single extent is installed.
1387  * This is done so that the main operating code can treat S_ISBLK and S_ISREG
1388  * swap files identically.
1389  *
1390  * Whether the swapdev is an S_ISREG file or an S_ISBLK blockdev, the swap
1391  * extent list operates in PAGE_SIZE disk blocks.  Both S_ISREG and S_ISBLK
1392  * swapfiles are handled *identically* after swapon time.
1393  *
1394  * For S_ISREG swapfiles, setup_swap_extents() will walk all the file's blocks
1395  * and will parse them into an ordered extent list, in PAGE_SIZE chunks.  If
1396  * some stray blocks are found which do not fall within the PAGE_SIZE alignment
1397  * requirements, they are simply tossed out - we will never use those blocks
1398  * for swapping.
1399  *
1400  * For S_ISREG swapfiles we set S_SWAPFILE across the life of the swapon.  This
1401  * prevents root from shooting her foot off by ftruncating an in-use swapfile,
1402  * which will scribble on the fs.
1403  *
1404  * The amount of disk space which a single swap extent represents varies.
1405  * Typically it is in the 1-4 megabyte range.  So we can have hundreds of
1406  * extents in the list.  To avoid much list walking, we cache the previous
1407  * search location in `curr_swap_extent', and start new searches from there.
1408  * This is extremely effective.  The average number of iterations in
1409  * map_swap_page() has been measured at about 0.3 per page.  - akpm.
1410  */
1411 static int setup_swap_extents(struct swap_info_struct *sis, sector_t *span)
1412 {
1413         struct inode *inode;
1414         unsigned blocks_per_page;
1415         unsigned long page_no;
1416         unsigned blkbits;
1417         sector_t probe_block;
1418         sector_t last_block;
1419         sector_t lowest_block = -1;
1420         sector_t highest_block = 0;
1421         int nr_extents = 0;
1422         int ret;
1423
1424         inode = sis->swap_file->f_mapping->host;
1425         if (S_ISBLK(inode->i_mode)) {
1426                 ret = add_swap_extent(sis, 0, sis->max, 0);
1427                 *span = sis->pages;
1428                 goto out;
1429         }
1430
1431         blkbits = inode->i_blkbits;
1432         blocks_per_page = PAGE_SIZE >> blkbits;
1433
1434         /*
1435          * Map all the blocks into the extent list.  This code doesn't try
1436          * to be very smart.
1437          */
1438         probe_block = 0;
1439         page_no = 0;
1440         last_block = i_size_read(inode) >> blkbits;
1441         while ((probe_block + blocks_per_page) <= last_block &&
1442                         page_no < sis->max) {
1443                 unsigned block_in_page;
1444                 sector_t first_block;
1445
1446                 first_block = bmap(inode, probe_block);
1447                 if (first_block == 0)
1448                         goto bad_bmap;
1449
1450                 /*
1451                  * It must be PAGE_SIZE aligned on-disk
1452                  */
1453                 if (first_block & (blocks_per_page - 1)) {
1454                         probe_block++;
1455                         goto reprobe;
1456                 }
1457
1458                 for (block_in_page = 1; block_in_page < blocks_per_page;
1459                                         block_in_page++) {
1460                         sector_t block;
1461
1462                         block = bmap(inode, probe_block + block_in_page);
1463                         if (block == 0)
1464                                 goto bad_bmap;
1465                         if (block != first_block + block_in_page) {
1466                                 /* Discontiguity */
1467                                 probe_block++;
1468                                 goto reprobe;
1469                         }
1470                 }
1471
1472                 first_block >>= (PAGE_SHIFT - blkbits);
1473                 if (page_no) {  /* exclude the header page */
1474                         if (first_block < lowest_block)
1475                                 lowest_block = first_block;
1476                         if (first_block > highest_block)
1477                                 highest_block = first_block;
1478                 }
1479
1480                 /*
1481                  * We found a PAGE_SIZE-length, PAGE_SIZE-aligned run of blocks
1482                  */
1483                 ret = add_swap_extent(sis, page_no, 1, first_block);
1484                 if (ret < 0)
1485                         goto out;
1486                 nr_extents += ret;
1487                 page_no++;
1488                 probe_block += blocks_per_page;
1489 reprobe:
1490                 continue;
1491         }
1492         ret = nr_extents;
1493         *span = 1 + highest_block - lowest_block;
1494         if (page_no == 0)
1495                 page_no = 1;    /* force Empty message */
1496         sis->max = page_no;
1497         sis->pages = page_no - 1;
1498         sis->highest_bit = page_no - 1;
1499 out:
1500         return ret;
1501 bad_bmap:
1502         printk(KERN_ERR "swapon: swapfile has holes\n");
1503         ret = -EINVAL;
1504         goto out;
1505 }
1506
1507 static void enable_swap_info(struct swap_info_struct *p, int prio,
1508                                 unsigned char *swap_map,
1509                                 unsigned long *frontswap_map)
1510 {
1511         int i, prev;
1512
1513         spin_lock(&swap_lock);
1514         if (prio >= 0)
1515                 p->prio = prio;
1516         else
1517                 p->prio = --least_priority;
1518         p->swap_map = swap_map;
1519         frontswap_map_set(p, frontswap_map);
1520         p->flags |= SWP_WRITEOK;
1521         nr_swap_pages += p->pages;
1522         total_swap_pages += p->pages;
1523
1524         /* insert swap space into swap_list: */
1525         prev = -1;
1526         for (i = swap_list.head; i >= 0; i = swap_info[i]->next) {
1527                 if (p->prio >= swap_info[i]->prio)
1528                         break;
1529                 prev = i;
1530         }
1531         p->next = i;
1532         if (prev < 0)
1533                 swap_list.head = swap_list.next = p->type;
1534         else
1535                 swap_info[prev]->next = p->type;
1536         frontswap_init(p->type);
1537         spin_unlock(&swap_lock);
1538 }
1539
1540 SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
1541 {
1542         struct swap_info_struct *p = NULL;
1543         unsigned char *swap_map;
1544         struct file *swap_file, *victim;
1545         struct address_space *mapping;
1546         struct inode *inode;
1547         char *pathname;
1548         int oom_score_adj;
1549         int i, type, prev;
1550         int err;
1551
1552         if (!capable(CAP_SYS_ADMIN))
1553                 return -EPERM;
1554
1555         BUG_ON(!current->mm);
1556
1557         pathname = getname(specialfile);
1558         err = PTR_ERR(pathname);
1559         if (IS_ERR(pathname))
1560                 goto out;
1561
1562         victim = filp_open(pathname, O_RDWR|O_LARGEFILE, 0);
1563         putname(pathname);
1564         err = PTR_ERR(victim);
1565         if (IS_ERR(victim))
1566                 goto out;
1567
1568         mapping = victim->f_mapping;
1569         prev = -1;
1570         spin_lock(&swap_lock);
1571         for (type = swap_list.head; type >= 0; type = swap_info[type]->next) {
1572                 p = swap_info[type];
1573                 if (p->flags & SWP_WRITEOK) {
1574                         if (p->swap_file->f_mapping == mapping)
1575                                 break;
1576                 }
1577                 prev = type;
1578         }
1579         if (type < 0) {
1580                 err = -EINVAL;
1581                 spin_unlock(&swap_lock);
1582                 goto out_dput;
1583         }
1584         if (!security_vm_enough_memory_mm(current->mm, p->pages))
1585                 vm_unacct_memory(p->pages);
1586         else {
1587                 err = -ENOMEM;
1588                 spin_unlock(&swap_lock);
1589                 goto out_dput;
1590         }
1591         if (prev < 0)
1592                 swap_list.head = p->next;
1593         else
1594                 swap_info[prev]->next = p->next;
1595         if (type == swap_list.next) {
1596                 /* just pick something that's safe... */
1597                 swap_list.next = swap_list.head;
1598         }
1599         if (p->prio < 0) {
1600                 for (i = p->next; i >= 0; i = swap_info[i]->next)
1601                         swap_info[i]->prio = p->prio--;
1602                 least_priority++;
1603         }
1604         nr_swap_pages -= p->pages;
1605         total_swap_pages -= p->pages;
1606         p->flags &= ~SWP_WRITEOK;
1607         spin_unlock(&swap_lock);
1608
1609         oom_score_adj = test_set_oom_score_adj(OOM_SCORE_ADJ_MAX);
1610         err = try_to_unuse(type, false, 0); /* force all pages to be unused */
1611         compare_swap_oom_score_adj(OOM_SCORE_ADJ_MAX, oom_score_adj);
1612
1613         if (err) {
1614                 /*
1615                  * reading p->prio and p->swap_map outside the lock is
1616                  * safe here because only sys_swapon and sys_swapoff
1617                  * change them, and there can be no other sys_swapon or
1618                  * sys_swapoff for this swap_info_struct at this point.
1619                  */
1620                 /* re-insert swap space back into swap_list */
1621                 enable_swap_info(p, p->prio, p->swap_map, frontswap_map_get(p));
1622                 goto out_dput;
1623         }
1624
1625         destroy_swap_extents(p);
1626         if (p->flags & SWP_CONTINUED)
1627                 free_swap_count_continuations(p);
1628
1629         mutex_lock(&swapon_mutex);
1630         spin_lock(&swap_lock);
1631         drain_mmlist();
1632
1633         /* wait for anyone still in scan_swap_map */
1634         p->highest_bit = 0;             /* cuts scans short */
1635         while (p->flags >= SWP_SCANNING) {
1636                 spin_unlock(&swap_lock);
1637                 schedule_timeout_uninterruptible(1);
1638                 spin_lock(&swap_lock);
1639         }
1640
1641         swap_file = p->swap_file;
1642         p->swap_file = NULL;
1643         p->max = 0;
1644         swap_map = p->swap_map;
1645         p->swap_map = NULL;
1646         p->flags = 0;
1647         frontswap_invalidate_area(type);
1648         spin_unlock(&swap_lock);
1649         mutex_unlock(&swapon_mutex);
1650         vfree(swap_map);
1651         vfree(frontswap_map_get(p));
1652         /* Destroy swap account informatin */
1653         swap_cgroup_swapoff(type);
1654
1655         inode = mapping->host;
1656         if (S_ISBLK(inode->i_mode)) {
1657                 struct block_device *bdev = I_BDEV(inode);
1658                 set_blocksize(bdev, p->old_block_size);
1659                 blkdev_put(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
1660         } else {
1661                 mutex_lock(&inode->i_mutex);
1662                 inode->i_flags &= ~S_SWAPFILE;
1663                 mutex_unlock(&inode->i_mutex);
1664         }
1665         filp_close(swap_file, NULL);
1666         err = 0;
1667         atomic_inc(&proc_poll_event);
1668         wake_up_interruptible(&proc_poll_wait);
1669
1670 out_dput:
1671         filp_close(victim, NULL);
1672 out:
1673         return err;
1674 }
1675
1676 #ifdef CONFIG_PROC_FS
1677 static unsigned swaps_poll(struct file *file, poll_table *wait)
1678 {
1679         struct seq_file *seq = file->private_data;
1680
1681         poll_wait(file, &proc_poll_wait, wait);
1682
1683         if (seq->poll_event != atomic_read(&proc_poll_event)) {
1684                 seq->poll_event = atomic_read(&proc_poll_event);
1685                 return POLLIN | POLLRDNORM | POLLERR | POLLPRI;
1686         }
1687
1688         return POLLIN | POLLRDNORM;
1689 }
1690
1691 /* iterator */
1692 static void *swap_start(struct seq_file *swap, loff_t *pos)
1693 {
1694         struct swap_info_struct *si;
1695         int type;
1696         loff_t l = *pos;
1697
1698         mutex_lock(&swapon_mutex);
1699
1700         if (!l)
1701                 return SEQ_START_TOKEN;
1702
1703         for (type = 0; type < nr_swapfiles; type++) {
1704                 smp_rmb();      /* read nr_swapfiles before swap_info[type] */
1705                 si = swap_info[type];
1706                 if (!(si->flags & SWP_USED) || !si->swap_map)
1707                         continue;
1708                 if (!--l)
1709                         return si;
1710         }
1711
1712         return NULL;
1713 }
1714
1715 static void *swap_next(struct seq_file *swap, void *v, loff_t *pos)
1716 {
1717         struct swap_info_struct *si = v;
1718         int type;
1719
1720         if (v == SEQ_START_TOKEN)
1721                 type = 0;
1722         else
1723                 type = si->type + 1;
1724
1725         for (; type < nr_swapfiles; type++) {
1726                 smp_rmb();      /* read nr_swapfiles before swap_info[type] */
1727                 si = swap_info[type];
1728                 if (!(si->flags & SWP_USED) || !si->swap_map)
1729                         continue;
1730                 ++*pos;
1731                 return si;
1732         }
1733
1734         return NULL;
1735 }
1736
1737 static void swap_stop(struct seq_file *swap, void *v)
1738 {
1739         mutex_unlock(&swapon_mutex);
1740 }
1741
1742 static int swap_show(struct seq_file *swap, void *v)
1743 {
1744         struct swap_info_struct *si = v;
1745         struct file *file;
1746         int len;
1747
1748         if (si == SEQ_START_TOKEN) {
1749                 seq_puts(swap,"Filename\t\t\t\tType\t\tSize\tUsed\tPriority\n");
1750                 return 0;
1751         }
1752
1753         file = si->swap_file;
1754         len = seq_path(swap, &file->f_path, " \t\n\\");
1755         seq_printf(swap, "%*s%s\t%u\t%u\t%d\n",
1756                         len < 40 ? 40 - len : 1, " ",
1757                         S_ISBLK(file->f_path.dentry->d_inode->i_mode) ?
1758                                 "partition" : "file\t",
1759                         si->pages << (PAGE_SHIFT - 10),
1760                         si->inuse_pages << (PAGE_SHIFT - 10),
1761                         si->prio);
1762         return 0;
1763 }
1764
1765 static const struct seq_operations swaps_op = {
1766         .start =        swap_start,
1767         .next =         swap_next,
1768         .stop =         swap_stop,
1769         .show =         swap_show
1770 };
1771
1772 static int swaps_open(struct inode *inode, struct file *file)
1773 {
1774         struct seq_file *seq;
1775         int ret;
1776
1777         ret = seq_open(file, &swaps_op);
1778         if (ret)
1779                 return ret;
1780
1781         seq = file->private_data;
1782         seq->poll_event = atomic_read(&proc_poll_event);
1783         return 0;
1784 }
1785
1786 static const struct file_operations proc_swaps_operations = {
1787         .open           = swaps_open,
1788         .read           = seq_read,
1789         .llseek         = seq_lseek,
1790         .release        = seq_release,
1791         .poll           = swaps_poll,
1792 };
1793
1794 static int __init procswaps_init(void)
1795 {
1796         proc_create("swaps", 0, NULL, &proc_swaps_operations);
1797         return 0;
1798 }
1799 __initcall(procswaps_init);
1800 #endif /* CONFIG_PROC_FS */
1801
1802 #ifdef MAX_SWAPFILES_CHECK
1803 static int __init max_swapfiles_check(void)
1804 {
1805         MAX_SWAPFILES_CHECK();
1806         return 0;
1807 }
1808 late_initcall(max_swapfiles_check);
1809 #endif
1810
1811 static struct swap_info_struct *alloc_swap_info(void)
1812 {
1813         struct swap_info_struct *p;
1814         unsigned int type;
1815
1816         p = kzalloc(sizeof(*p), GFP_KERNEL);
1817         if (!p)
1818                 return ERR_PTR(-ENOMEM);
1819
1820         spin_lock(&swap_lock);
1821         for (type = 0; type < nr_swapfiles; type++) {
1822                 if (!(swap_info[type]->flags & SWP_USED))
1823                         break;
1824         }
1825         if (type >= MAX_SWAPFILES) {
1826                 spin_unlock(&swap_lock);
1827                 kfree(p);
1828                 return ERR_PTR(-EPERM);
1829         }
1830         if (type >= nr_swapfiles) {
1831                 p->type = type;
1832                 swap_info[type] = p;
1833                 /*
1834                  * Write swap_info[type] before nr_swapfiles, in case a
1835                  * racing procfs swap_start() or swap_next() is reading them.
1836                  * (We never shrink nr_swapfiles, we never free this entry.)
1837                  */
1838                 smp_wmb();
1839                 nr_swapfiles++;
1840         } else {
1841                 kfree(p);
1842                 p = swap_info[type];
1843                 /*
1844                  * Do not memset this entry: a racing procfs swap_next()
1845                  * would be relying on p->type to remain valid.
1846                  */
1847         }
1848         INIT_LIST_HEAD(&p->first_swap_extent.list);
1849         p->flags = SWP_USED;
1850         p->next = -1;
1851         spin_unlock(&swap_lock);
1852
1853         return p;
1854 }
1855
1856 static int claim_swapfile(struct swap_info_struct *p, struct inode *inode)
1857 {
1858         int error;
1859
1860         if (S_ISBLK(inode->i_mode)) {
1861                 p->bdev = bdgrab(I_BDEV(inode));
1862                 error = blkdev_get(p->bdev,
1863                                    FMODE_READ | FMODE_WRITE | FMODE_EXCL,
1864                                    sys_swapon);
1865                 if (error < 0) {
1866                         p->bdev = NULL;
1867                         return -EINVAL;
1868                 }
1869                 p->old_block_size = block_size(p->bdev);
1870                 error = set_blocksize(p->bdev, PAGE_SIZE);
1871                 if (error < 0)
1872                         return error;
1873                 p->flags |= SWP_BLKDEV;
1874         } else if (S_ISREG(inode->i_mode)) {
1875                 p->bdev = inode->i_sb->s_bdev;
1876                 mutex_lock(&inode->i_mutex);
1877                 if (IS_SWAPFILE(inode))
1878                         return -EBUSY;
1879         } else
1880                 return -EINVAL;
1881
1882         return 0;
1883 }
1884
1885 static unsigned long read_swap_header(struct swap_info_struct *p,
1886                                         union swap_header *swap_header,
1887                                         struct inode *inode)
1888 {
1889         int i;
1890         unsigned long maxpages;
1891         unsigned long swapfilepages;
1892
1893         if (memcmp("SWAPSPACE2", swap_header->magic.magic, 10)) {
1894                 printk(KERN_ERR "Unable to find swap-space signature\n");
1895                 return 0;
1896         }
1897
1898         /* swap partition endianess hack... */
1899         if (swab32(swap_header->info.version) == 1) {
1900                 swab32s(&swap_header->info.version);
1901                 swab32s(&swap_header->info.last_page);
1902                 swab32s(&swap_header->info.nr_badpages);
1903                 for (i = 0; i < swap_header->info.nr_badpages; i++)
1904                         swab32s(&swap_header->info.badpages[i]);
1905         }
1906         /* Check the swap header's sub-version */
1907         if (swap_header->info.version != 1) {
1908                 printk(KERN_WARNING
1909                        "Unable to handle swap header version %d\n",
1910                        swap_header->info.version);
1911                 return 0;
1912         }
1913
1914         p->lowest_bit  = 1;
1915         p->cluster_next = 1;
1916         p->cluster_nr = 0;
1917
1918         /*
1919          * Find out how many pages are allowed for a single swap
1920          * device. There are two limiting factors: 1) the number
1921          * of bits for the swap offset in the swp_entry_t type, and
1922          * 2) the number of bits in the swap pte as defined by the
1923          * different architectures. In order to find the
1924          * largest possible bit mask, a swap entry with swap type 0
1925          * and swap offset ~0UL is created, encoded to a swap pte,
1926          * decoded to a swp_entry_t again, and finally the swap
1927          * offset is extracted. This will mask all the bits from
1928          * the initial ~0UL mask that can't be encoded in either
1929          * the swp_entry_t or the architecture definition of a
1930          * swap pte.
1931          */
1932         maxpages = swp_offset(pte_to_swp_entry(
1933                         swp_entry_to_pte(swp_entry(0, ~0UL)))) + 1;
1934         if (maxpages > swap_header->info.last_page) {
1935                 maxpages = swap_header->info.last_page + 1;
1936                 /* p->max is an unsigned int: don't overflow it */
1937                 if ((unsigned int)maxpages == 0)
1938                         maxpages = UINT_MAX;
1939         }
1940         p->highest_bit = maxpages - 1;
1941
1942         if (!maxpages)
1943                 return 0;
1944         swapfilepages = i_size_read(inode) >> PAGE_SHIFT;
1945         if (swapfilepages && maxpages > swapfilepages) {
1946                 printk(KERN_WARNING
1947                        "Swap area shorter than signature indicates\n");
1948                 return 0;
1949         }
1950         if (swap_header->info.nr_badpages && S_ISREG(inode->i_mode))
1951                 return 0;
1952         if (swap_header->info.nr_badpages > MAX_SWAP_BADPAGES)
1953                 return 0;
1954
1955         return maxpages;
1956 }
1957
1958 static int setup_swap_map_and_extents(struct swap_info_struct *p,
1959                                         union swap_header *swap_header,
1960                                         unsigned char *swap_map,
1961                                         unsigned long maxpages,
1962                                         sector_t *span)
1963 {
1964         int i;
1965         unsigned int nr_good_pages;
1966         int nr_extents;
1967
1968         nr_good_pages = maxpages - 1;   /* omit header page */
1969
1970         for (i = 0; i < swap_header->info.nr_badpages; i++) {
1971                 unsigned int page_nr = swap_header->info.badpages[i];
1972                 if (page_nr == 0 || page_nr > swap_header->info.last_page)
1973                         return -EINVAL;
1974                 if (page_nr < maxpages) {
1975                         swap_map[page_nr] = SWAP_MAP_BAD;
1976                         nr_good_pages--;
1977                 }
1978         }
1979
1980         if (nr_good_pages) {
1981                 swap_map[0] = SWAP_MAP_BAD;
1982                 p->max = maxpages;
1983                 p->pages = nr_good_pages;
1984                 nr_extents = setup_swap_extents(p, span);
1985                 if (nr_extents < 0)
1986                         return nr_extents;
1987                 nr_good_pages = p->pages;
1988         }
1989         if (!nr_good_pages) {
1990                 printk(KERN_WARNING "Empty swap-file\n");
1991                 return -EINVAL;
1992         }
1993
1994         return nr_extents;
1995 }
1996
1997 SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
1998 {
1999         struct swap_info_struct *p;
2000         char *name;
2001         struct file *swap_file = NULL;
2002         struct address_space *mapping;
2003         int i;
2004         int prio;
2005         int error;
2006         union swap_header *swap_header;
2007         int nr_extents;
2008         sector_t span;
2009         unsigned long maxpages;
2010         unsigned char *swap_map = NULL;
2011         unsigned long *frontswap_map = NULL;
2012         struct page *page = NULL;
2013         struct inode *inode = NULL;
2014
2015         if (swap_flags & ~SWAP_FLAGS_VALID)
2016                 return -EINVAL;
2017
2018         if (!capable(CAP_SYS_ADMIN))
2019                 return -EPERM;
2020
2021         p = alloc_swap_info();
2022         if (IS_ERR(p))
2023                 return PTR_ERR(p);
2024
2025         name = getname(specialfile);
2026         if (IS_ERR(name)) {
2027                 error = PTR_ERR(name);
2028                 name = NULL;
2029                 goto bad_swap;
2030         }
2031         swap_file = filp_open(name, O_RDWR|O_LARGEFILE, 0);
2032         if (IS_ERR(swap_file)) {
2033                 error = PTR_ERR(swap_file);
2034                 swap_file = NULL;
2035                 goto bad_swap;
2036         }
2037
2038         p->swap_file = swap_file;
2039         mapping = swap_file->f_mapping;
2040
2041         for (i = 0; i < nr_swapfiles; i++) {
2042                 struct swap_info_struct *q = swap_info[i];
2043
2044                 if (q == p || !q->swap_file)
2045                         continue;
2046                 if (mapping == q->swap_file->f_mapping) {
2047                         error = -EBUSY;
2048                         goto bad_swap;
2049                 }
2050         }
2051
2052         inode = mapping->host;
2053         /* If S_ISREG(inode->i_mode) will do mutex_lock(&inode->i_mutex); */
2054         error = claim_swapfile(p, inode);
2055         if (unlikely(error))
2056                 goto bad_swap;
2057
2058         /*
2059          * Read the swap header.
2060          */
2061         if (!mapping->a_ops->readpage) {
2062                 error = -EINVAL;
2063                 goto bad_swap;
2064         }
2065         page = read_mapping_page(mapping, 0, swap_file);
2066         if (IS_ERR(page)) {
2067                 error = PTR_ERR(page);
2068                 goto bad_swap;
2069         }
2070         swap_header = kmap(page);
2071
2072         maxpages = read_swap_header(p, swap_header, inode);
2073         if (unlikely(!maxpages)) {
2074                 error = -EINVAL;
2075                 goto bad_swap;
2076         }
2077
2078         /* OK, set up the swap map and apply the bad block list */
2079         swap_map = vzalloc(maxpages);
2080         if (!swap_map) {
2081                 error = -ENOMEM;
2082                 goto bad_swap;
2083         }
2084
2085         error = swap_cgroup_swapon(p->type, maxpages);
2086         if (error)
2087                 goto bad_swap;
2088
2089         nr_extents = setup_swap_map_and_extents(p, swap_header, swap_map,
2090                 maxpages, &span);
2091         if (unlikely(nr_extents < 0)) {
2092                 error = nr_extents;
2093                 goto bad_swap;
2094         }
2095         /* frontswap enabled? set up bit-per-page map for frontswap */
2096         if (frontswap_enabled)
2097                 frontswap_map = vzalloc(maxpages / sizeof(long));
2098
2099         if (p->bdev) {
2100                 if (blk_queue_nonrot(bdev_get_queue(p->bdev))) {
2101                         p->flags |= SWP_SOLIDSTATE;
2102                         p->cluster_next = 1 + (random32() % p->highest_bit);
2103                 }
2104                 if ((swap_flags & SWAP_FLAG_DISCARD) && discard_swap(p) == 0)
2105                         p->flags |= SWP_DISCARDABLE;
2106         }
2107
2108         mutex_lock(&swapon_mutex);
2109         prio = -1;
2110         if (swap_flags & SWAP_FLAG_PREFER)
2111                 prio =
2112                   (swap_flags & SWAP_FLAG_PRIO_MASK) >> SWAP_FLAG_PRIO_SHIFT;
2113         enable_swap_info(p, prio, swap_map, frontswap_map);
2114
2115         printk(KERN_INFO "Adding %uk swap on %s.  "
2116                         "Priority:%d extents:%d across:%lluk %s%s%s\n",
2117                 p->pages<<(PAGE_SHIFT-10), name, p->prio,
2118                 nr_extents, (unsigned long long)span<<(PAGE_SHIFT-10),
2119                 (p->flags & SWP_SOLIDSTATE) ? "SS" : "",
2120                 (p->flags & SWP_DISCARDABLE) ? "D" : "",
2121                 (frontswap_map) ? "FS" : "");
2122
2123         mutex_unlock(&swapon_mutex);
2124         atomic_inc(&proc_poll_event);
2125         wake_up_interruptible(&proc_poll_wait);
2126
2127         if (S_ISREG(inode->i_mode))
2128                 inode->i_flags |= S_SWAPFILE;
2129         error = 0;
2130         goto out;
2131 bad_swap:
2132         if (inode && S_ISBLK(inode->i_mode) && p->bdev) {
2133                 set_blocksize(p->bdev, p->old_block_size);
2134                 blkdev_put(p->bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
2135         }
2136         destroy_swap_extents(p);
2137         swap_cgroup_swapoff(p->type);
2138         spin_lock(&swap_lock);
2139         p->swap_file = NULL;
2140         p->flags = 0;
2141         spin_unlock(&swap_lock);
2142         vfree(swap_map);
2143         if (swap_file) {
2144                 if (inode && S_ISREG(inode->i_mode)) {
2145                         mutex_unlock(&inode->i_mutex);
2146                         inode = NULL;
2147                 }
2148                 filp_close(swap_file, NULL);
2149         }
2150 out:
2151         if (page && !IS_ERR(page)) {
2152                 kunmap(page);
2153                 page_cache_release(page);
2154         }
2155         if (name)
2156                 putname(name);
2157         if (inode && S_ISREG(inode->i_mode))
2158                 mutex_unlock(&inode->i_mutex);
2159         return error;
2160 }
2161
2162 void si_swapinfo(struct sysinfo *val)
2163 {
2164         unsigned int type;
2165         unsigned long nr_to_be_unused = 0;
2166
2167         spin_lock(&swap_lock);
2168         for (type = 0; type < nr_swapfiles; type++) {
2169                 struct swap_info_struct *si = swap_info[type];
2170
2171                 if ((si->flags & SWP_USED) && !(si->flags & SWP_WRITEOK))
2172                         nr_to_be_unused += si->inuse_pages;
2173         }
2174         val->freeswap = nr_swap_pages + nr_to_be_unused;
2175         val->totalswap = total_swap_pages + nr_to_be_unused;
2176         spin_unlock(&swap_lock);
2177 }
2178
2179 /*
2180  * Verify that a swap entry is valid and increment its swap map count.
2181  *
2182  * Returns error code in following case.
2183  * - success -> 0
2184  * - swp_entry is invalid -> EINVAL
2185  * - swp_entry is migration entry -> EINVAL
2186  * - swap-cache reference is requested but there is already one. -> EEXIST
2187  * - swap-cache reference is requested but the entry is not used. -> ENOENT
2188  * - swap-mapped reference requested but needs continued swap count. -> ENOMEM
2189  */
2190 static int __swap_duplicate(swp_entry_t entry, unsigned char usage)
2191 {
2192         struct swap_info_struct *p;
2193         unsigned long offset, type;
2194         unsigned char count;
2195         unsigned char has_cache;
2196         int err = -EINVAL;
2197
2198         if (non_swap_entry(entry))
2199                 goto out;
2200
2201         type = swp_type(entry);
2202         if (type >= nr_swapfiles)
2203                 goto bad_file;
2204         p = swap_info[type];
2205         offset = swp_offset(entry);
2206
2207         spin_lock(&swap_lock);
2208         if (unlikely(offset >= p->max))
2209                 goto unlock_out;
2210
2211         count = p->swap_map[offset];
2212         has_cache = count & SWAP_HAS_CACHE;
2213         count &= ~SWAP_HAS_CACHE;
2214         err = 0;
2215
2216         if (usage == SWAP_HAS_CACHE) {
2217
2218                 /* set SWAP_HAS_CACHE if there is no cache and entry is used */
2219                 if (!has_cache && count)
2220                         has_cache = SWAP_HAS_CACHE;
2221                 else if (has_cache)             /* someone else added cache */
2222                         err = -EEXIST;
2223                 else                            /* no users remaining */
2224                         err = -ENOENT;
2225
2226         } else if (count || has_cache) {
2227
2228                 if ((count & ~COUNT_CONTINUED) < SWAP_MAP_MAX)
2229                         count += usage;
2230                 else if ((count & ~COUNT_CONTINUED) > SWAP_MAP_MAX)
2231                         err = -EINVAL;
2232                 else if (swap_count_continued(p, offset, count))
2233                         count = COUNT_CONTINUED;
2234                 else
2235                         err = -ENOMEM;
2236         } else
2237                 err = -ENOENT;                  /* unused swap entry */
2238
2239         p->swap_map[offset] = count | has_cache;
2240
2241 unlock_out:
2242         spin_unlock(&swap_lock);
2243 out:
2244         return err;
2245
2246 bad_file:
2247         printk(KERN_ERR "swap_dup: %s%08lx\n", Bad_file, entry.val);
2248         goto out;
2249 }
2250
2251 /*
2252  * Help swapoff by noting that swap entry belongs to shmem/tmpfs
2253  * (in which case its reference count is never incremented).
2254  */
2255 void swap_shmem_alloc(swp_entry_t entry)
2256 {
2257         __swap_duplicate(entry, SWAP_MAP_SHMEM);
2258 }
2259
2260 /*
2261  * Increase reference count of swap entry by 1.
2262  * Returns 0 for success, or -ENOMEM if a swap_count_continuation is required
2263  * but could not be atomically allocated.  Returns 0, just as if it succeeded,
2264  * if __swap_duplicate() fails for another reason (-EINVAL or -ENOENT), which
2265  * might occur if a page table entry has got corrupted.
2266  */
2267 int swap_duplicate(swp_entry_t entry)
2268 {
2269         int err = 0;
2270
2271         while (!err && __swap_duplicate(entry, 1) == -ENOMEM)
2272                 err = add_swap_count_continuation(entry, GFP_ATOMIC);
2273         return err;
2274 }
2275
2276 /*
2277  * @entry: swap entry for which we allocate swap cache.
2278  *
2279  * Called when allocating swap cache for existing swap entry,
2280  * This can return error codes. Returns 0 at success.
2281  * -EBUSY means there is a swap cache.
2282  * Note: return code is different from swap_duplicate().
2283  */
2284 int swapcache_prepare(swp_entry_t entry)
2285 {
2286         return __swap_duplicate(entry, SWAP_HAS_CACHE);
2287 }
2288
2289 struct swap_info_struct *page_swap_info(struct page *page)
2290 {
2291         swp_entry_t swap = { .val = page_private(page) };
2292         BUG_ON(!PageSwapCache(page));
2293         return swap_info[swp_type(swap)];
2294 }
2295
2296 /*
2297  * out-of-line __page_file_ methods to avoid include hell.
2298  */
2299 struct address_space *__page_file_mapping(struct page *page)
2300 {
2301         VM_BUG_ON(!PageSwapCache(page));
2302         return page_swap_info(page)->swap_file->f_mapping;
2303 }
2304 EXPORT_SYMBOL_GPL(__page_file_mapping);
2305
2306 pgoff_t __page_file_index(struct page *page)
2307 {
2308         swp_entry_t swap = { .val = page_private(page) };
2309         VM_BUG_ON(!PageSwapCache(page));
2310         return swp_offset(swap);
2311 }
2312 EXPORT_SYMBOL_GPL(__page_file_index);
2313
2314 /*
2315  * add_swap_count_continuation - called when a swap count is duplicated
2316  * beyond SWAP_MAP_MAX, it allocates a new page and links that to the entry's
2317  * page of the original vmalloc'ed swap_map, to hold the continuation count
2318  * (for that entry and for its neighbouring PAGE_SIZE swap entries).  Called
2319  * again when count is duplicated beyond SWAP_MAP_MAX * SWAP_CONT_MAX, etc.
2320  *
2321  * These continuation pages are seldom referenced: the common paths all work
2322  * on the original swap_map, only referring to a continuation page when the
2323  * low "digit" of a count is incremented or decremented through SWAP_MAP_MAX.
2324  *
2325  * add_swap_count_continuation(, GFP_ATOMIC) can be called while holding
2326  * page table locks; if it fails, add_swap_count_continuation(, GFP_KERNEL)
2327  * can be called after dropping locks.
2328  */
2329 int add_swap_count_continuation(swp_entry_t entry, gfp_t gfp_mask)
2330 {
2331         struct swap_info_struct *si;
2332         struct page *head;
2333         struct page *page;
2334         struct page *list_page;
2335         pgoff_t offset;
2336         unsigned char count;
2337
2338         /*
2339          * When debugging, it's easier to use __GFP_ZERO here; but it's better
2340          * for latency not to zero a page while GFP_ATOMIC and holding locks.
2341          */
2342         page = alloc_page(gfp_mask | __GFP_HIGHMEM);
2343
2344         si = swap_info_get(entry);
2345         if (!si) {
2346                 /*
2347                  * An acceptable race has occurred since the failing
2348                  * __swap_duplicate(): the swap entry has been freed,
2349                  * perhaps even the whole swap_map cleared for swapoff.
2350                  */
2351                 goto outer;
2352         }
2353
2354         offset = swp_offset(entry);
2355         count = si->swap_map[offset] & ~SWAP_HAS_CACHE;
2356
2357         if ((count & ~COUNT_CONTINUED) != SWAP_MAP_MAX) {
2358                 /*
2359                  * The higher the swap count, the more likely it is that tasks
2360                  * will race to add swap count continuation: we need to avoid
2361                  * over-provisioning.
2362                  */
2363                 goto out;
2364         }
2365
2366         if (!page) {
2367                 spin_unlock(&swap_lock);
2368                 return -ENOMEM;
2369         }
2370
2371         /*
2372          * We are fortunate that although vmalloc_to_page uses pte_offset_map,
2373          * no architecture is using highmem pages for kernel pagetables: so it
2374          * will not corrupt the GFP_ATOMIC caller's atomic pagetable kmaps.
2375          */
2376         head = vmalloc_to_page(si->swap_map + offset);
2377         offset &= ~PAGE_MASK;
2378
2379         /*
2380          * Page allocation does not initialize the page's lru field,
2381          * but it does always reset its private field.
2382          */
2383         if (!page_private(head)) {
2384                 BUG_ON(count & COUNT_CONTINUED);
2385                 INIT_LIST_HEAD(&head->lru);
2386                 set_page_private(head, SWP_CONTINUED);
2387                 si->flags |= SWP_CONTINUED;
2388         }
2389
2390         list_for_each_entry(list_page, &head->lru, lru) {
2391                 unsigned char *map;
2392
2393                 /*
2394                  * If the previous map said no continuation, but we've found
2395                  * a continuation page, free our allocation and use this one.
2396                  */
2397                 if (!(count & COUNT_CONTINUED))
2398                         goto out;
2399
2400                 map = kmap_atomic(list_page) + offset;
2401                 count = *map;
2402                 kunmap_atomic(map);
2403
2404                 /*
2405                  * If this continuation count now has some space in it,
2406                  * free our allocation and use this one.
2407                  */
2408                 if ((count & ~COUNT_CONTINUED) != SWAP_CONT_MAX)
2409                         goto out;
2410         }
2411
2412         list_add_tail(&page->lru, &head->lru);
2413         page = NULL;                    /* now it's attached, don't free it */
2414 out:
2415         spin_unlock(&swap_lock);
2416 outer:
2417         if (page)
2418                 __free_page(page);
2419         return 0;
2420 }
2421
2422 /*
2423  * swap_count_continued - when the original swap_map count is incremented
2424  * from SWAP_MAP_MAX, check if there is already a continuation page to carry
2425  * into, carry if so, or else fail until a new continuation page is allocated;
2426  * when the original swap_map count is decremented from 0 with continuation,
2427  * borrow from the continuation and report whether it still holds more.
2428  * Called while __swap_duplicate() or swap_entry_free() holds swap_lock.
2429  */
2430 static bool swap_count_continued(struct swap_info_struct *si,
2431                                  pgoff_t offset, unsigned char count)
2432 {
2433         struct page *head;
2434         struct page *page;
2435         unsigned char *map;
2436
2437         head = vmalloc_to_page(si->swap_map + offset);
2438         if (page_private(head) != SWP_CONTINUED) {
2439                 BUG_ON(count & COUNT_CONTINUED);
2440                 return false;           /* need to add count continuation */
2441         }
2442
2443         offset &= ~PAGE_MASK;
2444         page = list_entry(head->lru.next, struct page, lru);
2445         map = kmap_atomic(page) + offset;
2446
2447         if (count == SWAP_MAP_MAX)      /* initial increment from swap_map */
2448                 goto init_map;          /* jump over SWAP_CONT_MAX checks */
2449
2450         if (count == (SWAP_MAP_MAX | COUNT_CONTINUED)) { /* incrementing */
2451                 /*
2452                  * Think of how you add 1 to 999
2453                  */
2454                 while (*map == (SWAP_CONT_MAX | COUNT_CONTINUED)) {
2455                         kunmap_atomic(map);
2456                         page = list_entry(page->lru.next, struct page, lru);
2457                         BUG_ON(page == head);
2458                         map = kmap_atomic(page) + offset;
2459                 }
2460                 if (*map == SWAP_CONT_MAX) {
2461                         kunmap_atomic(map);
2462                         page = list_entry(page->lru.next, struct page, lru);
2463                         if (page == head)
2464                                 return false;   /* add count continuation */
2465                         map = kmap_atomic(page) + offset;
2466 init_map:               *map = 0;               /* we didn't zero the page */
2467                 }
2468                 *map += 1;
2469                 kunmap_atomic(map);
2470                 page = list_entry(page->lru.prev, struct page, lru);
2471                 while (page != head) {
2472                         map = kmap_atomic(page) + offset;
2473                         *map = COUNT_CONTINUED;
2474                         kunmap_atomic(map);
2475                         page = list_entry(page->lru.prev, struct page, lru);
2476                 }
2477                 return true;                    /* incremented */
2478
2479         } else {                                /* decrementing */
2480                 /*
2481                  * Think of how you subtract 1 from 1000
2482                  */
2483                 BUG_ON(count != COUNT_CONTINUED);
2484                 while (*map == COUNT_CONTINUED) {
2485                         kunmap_atomic(map);
2486                         page = list_entry(page->lru.next, struct page, lru);
2487                         BUG_ON(page == head);
2488                         map = kmap_atomic(page) + offset;
2489                 }
2490                 BUG_ON(*map == 0);
2491                 *map -= 1;
2492                 if (*map == 0)
2493                         count = 0;
2494                 kunmap_atomic(map);
2495                 page = list_entry(page->lru.prev, struct page, lru);
2496                 while (page != head) {
2497                         map = kmap_atomic(page) + offset;
2498                         *map = SWAP_CONT_MAX | count;
2499                         count = COUNT_CONTINUED;
2500                         kunmap_atomic(map);
2501                         page = list_entry(page->lru.prev, struct page, lru);
2502                 }
2503                 return count == COUNT_CONTINUED;
2504         }
2505 }
2506
2507 /*
2508  * free_swap_count_continuations - swapoff free all the continuation pages
2509  * appended to the swap_map, after swap_map is quiesced, before vfree'ing it.
2510  */
2511 static void free_swap_count_continuations(struct swap_info_struct *si)
2512 {
2513         pgoff_t offset;
2514
2515         for (offset = 0; offset < si->max; offset += PAGE_SIZE) {
2516                 struct page *head;
2517                 head = vmalloc_to_page(si->swap_map + offset);
2518                 if (page_private(head)) {
2519                         struct list_head *this, *next;
2520                         list_for_each_safe(this, next, &head->lru) {
2521                                 struct page *page;
2522                                 page = list_entry(this, struct page, lru);
2523                                 list_del(this);
2524                                 __free_page(page);
2525                         }
2526                 }
2527         }
2528 }