4 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
5 * Swap reorganised 29.12.95, Stephen Tweedie
9 #include <linux/hugetlb.h>
10 #include <linux/mman.h>
11 #include <linux/slab.h>
12 #include <linux/kernel_stat.h>
13 #include <linux/swap.h>
14 #include <linux/vmalloc.h>
15 #include <linux/pagemap.h>
16 #include <linux/namei.h>
17 #include <linux/shmem_fs.h>
18 #include <linux/blkdev.h>
19 #include <linux/random.h>
20 #include <linux/writeback.h>
21 #include <linux/proc_fs.h>
22 #include <linux/seq_file.h>
23 #include <linux/init.h>
24 #include <linux/ksm.h>
25 #include <linux/rmap.h>
26 #include <linux/security.h>
27 #include <linux/backing-dev.h>
28 #include <linux/mutex.h>
29 #include <linux/capability.h>
30 #include <linux/syscalls.h>
31 #include <linux/memcontrol.h>
32 #include <linux/poll.h>
33 #include <linux/oom.h>
34 #include <linux/frontswap.h>
35 #include <linux/swapfile.h>
36 #include <linux/export.h>
38 #include <asm/pgtable.h>
39 #include <asm/tlbflush.h>
40 #include <linux/swapops.h>
41 #include <linux/page_cgroup.h>
43 static bool swap_count_continued(struct swap_info_struct *, pgoff_t,
45 static void free_swap_count_continuations(struct swap_info_struct *);
46 static sector_t map_swap_entry(swp_entry_t, struct block_device**);
48 DEFINE_SPINLOCK(swap_lock);
49 static unsigned int nr_swapfiles;
50 atomic_long_t nr_swap_pages;
51 /* protected with swap_lock. reading in vm_swap_full() doesn't need lock */
52 long total_swap_pages;
53 static int least_priority;
54 static atomic_t highest_priority_index = ATOMIC_INIT(-1);
56 static const char Bad_file[] = "Bad swap file entry ";
57 static const char Unused_file[] = "Unused swap file entry ";
58 static const char Bad_offset[] = "Bad swap offset entry ";
59 static const char Unused_offset[] = "Unused swap offset entry ";
61 struct swap_list_t swap_list = {-1, -1};
63 struct swap_info_struct *swap_info[MAX_SWAPFILES];
65 static DEFINE_MUTEX(swapon_mutex);
67 static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait);
68 /* Activity counter to indicate that a swapon or swapoff has occurred */
69 static atomic_t proc_poll_event = ATOMIC_INIT(0);
71 static inline unsigned char swap_count(unsigned char ent)
73 return ent & ~SWAP_HAS_CACHE; /* may include SWAP_HAS_CONT flag */
76 /* returns 1 if swap entry is freed */
78 __try_to_reclaim_swap(struct swap_info_struct *si, unsigned long offset)
80 swp_entry_t entry = swp_entry(si->type, offset);
84 page = find_get_page(swap_address_space(entry), entry.val);
88 * This function is called from scan_swap_map() and it's called
89 * by vmscan.c at reclaiming pages. So, we hold a lock on a page, here.
90 * We have to use trylock for avoiding deadlock. This is a special
91 * case and you should use try_to_free_swap() with explicit lock_page()
92 * in usual operations.
94 if (trylock_page(page)) {
95 ret = try_to_free_swap(page);
98 page_cache_release(page);
103 * swapon tell device that all the old swap contents can be discarded,
104 * to allow the swap device to optimize its wear-levelling.
106 static int discard_swap(struct swap_info_struct *si)
108 struct swap_extent *se;
109 sector_t start_block;
113 /* Do not discard the swap header page! */
114 se = &si->first_swap_extent;
115 start_block = (se->start_block + 1) << (PAGE_SHIFT - 9);
116 nr_blocks = ((sector_t)se->nr_pages - 1) << (PAGE_SHIFT - 9);
118 err = blkdev_issue_discard(si->bdev, start_block,
119 nr_blocks, GFP_KERNEL, 0);
125 list_for_each_entry(se, &si->first_swap_extent.list, list) {
126 start_block = se->start_block << (PAGE_SHIFT - 9);
127 nr_blocks = (sector_t)se->nr_pages << (PAGE_SHIFT - 9);
129 err = blkdev_issue_discard(si->bdev, start_block,
130 nr_blocks, GFP_KERNEL, 0);
136 return err; /* That will often be -EOPNOTSUPP */
140 * swap allocation tell device that a cluster of swap can now be discarded,
141 * to allow the swap device to optimize its wear-levelling.
143 static void discard_swap_cluster(struct swap_info_struct *si,
144 pgoff_t start_page, pgoff_t nr_pages)
146 struct swap_extent *se = si->curr_swap_extent;
147 int found_extent = 0;
150 struct list_head *lh;
152 if (se->start_page <= start_page &&
153 start_page < se->start_page + se->nr_pages) {
154 pgoff_t offset = start_page - se->start_page;
155 sector_t start_block = se->start_block + offset;
156 sector_t nr_blocks = se->nr_pages - offset;
158 if (nr_blocks > nr_pages)
159 nr_blocks = nr_pages;
160 start_page += nr_blocks;
161 nr_pages -= nr_blocks;
164 si->curr_swap_extent = se;
166 start_block <<= PAGE_SHIFT - 9;
167 nr_blocks <<= PAGE_SHIFT - 9;
168 if (blkdev_issue_discard(si->bdev, start_block,
169 nr_blocks, GFP_NOIO, 0))
174 se = list_entry(lh, struct swap_extent, list);
178 static int wait_for_discard(void *word)
184 #define SWAPFILE_CLUSTER 256
185 #define LATENCY_LIMIT 256
187 static unsigned long scan_swap_map(struct swap_info_struct *si,
190 unsigned long offset;
191 unsigned long scan_base;
192 unsigned long last_in_cluster = 0;
193 int latency_ration = LATENCY_LIMIT;
194 int found_free_cluster = 0;
197 * We try to cluster swap pages by allocating them sequentially
198 * in swap. Once we've allocated SWAPFILE_CLUSTER pages this
199 * way, however, we resort to first-free allocation, starting
200 * a new cluster. This prevents us from scattering swap pages
201 * all over the entire swap partition, so that we reduce
202 * overall disk seek times between swap pages. -- sct
203 * But we do now try to find an empty cluster. -Andrea
204 * And we let swap pages go all over an SSD partition. Hugh
207 si->flags += SWP_SCANNING;
208 scan_base = offset = si->cluster_next;
210 if (unlikely(!si->cluster_nr--)) {
211 if (si->pages - si->inuse_pages < SWAPFILE_CLUSTER) {
212 si->cluster_nr = SWAPFILE_CLUSTER - 1;
215 if (si->flags & SWP_DISCARDABLE) {
217 * Start range check on racing allocations, in case
218 * they overlap the cluster we eventually decide on
219 * (we scan without swap_lock to allow preemption).
220 * It's hardly conceivable that cluster_nr could be
221 * wrapped during our scan, but don't depend on it.
223 if (si->lowest_alloc)
225 si->lowest_alloc = si->max;
226 si->highest_alloc = 0;
228 spin_unlock(&si->lock);
231 * If seek is expensive, start searching for new cluster from
232 * start of partition, to minimize the span of allocated swap.
233 * But if seek is cheap, search from our current position, so
234 * that swap is allocated from all over the partition: if the
235 * Flash Translation Layer only remaps within limited zones,
236 * we don't want to wear out the first zone too quickly.
238 if (!(si->flags & SWP_SOLIDSTATE))
239 scan_base = offset = si->lowest_bit;
240 last_in_cluster = offset + SWAPFILE_CLUSTER - 1;
242 /* Locate the first empty (unaligned) cluster */
243 for (; last_in_cluster <= si->highest_bit; offset++) {
244 if (si->swap_map[offset])
245 last_in_cluster = offset + SWAPFILE_CLUSTER;
246 else if (offset == last_in_cluster) {
247 spin_lock(&si->lock);
248 offset -= SWAPFILE_CLUSTER - 1;
249 si->cluster_next = offset;
250 si->cluster_nr = SWAPFILE_CLUSTER - 1;
251 found_free_cluster = 1;
254 if (unlikely(--latency_ration < 0)) {
256 latency_ration = LATENCY_LIMIT;
260 offset = si->lowest_bit;
261 last_in_cluster = offset + SWAPFILE_CLUSTER - 1;
263 /* Locate the first empty (unaligned) cluster */
264 for (; last_in_cluster < scan_base; offset++) {
265 if (si->swap_map[offset])
266 last_in_cluster = offset + SWAPFILE_CLUSTER;
267 else if (offset == last_in_cluster) {
268 spin_lock(&si->lock);
269 offset -= SWAPFILE_CLUSTER - 1;
270 si->cluster_next = offset;
271 si->cluster_nr = SWAPFILE_CLUSTER - 1;
272 found_free_cluster = 1;
275 if (unlikely(--latency_ration < 0)) {
277 latency_ration = LATENCY_LIMIT;
282 spin_lock(&si->lock);
283 si->cluster_nr = SWAPFILE_CLUSTER - 1;
284 si->lowest_alloc = 0;
288 if (!(si->flags & SWP_WRITEOK))
290 if (!si->highest_bit)
292 if (offset > si->highest_bit)
293 scan_base = offset = si->lowest_bit;
295 /* reuse swap entry of cache-only swap if not busy. */
296 if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) {
298 spin_unlock(&si->lock);
299 swap_was_freed = __try_to_reclaim_swap(si, offset);
300 spin_lock(&si->lock);
301 /* entry was freed successfully, try to use this again */
304 goto scan; /* check next one */
307 if (si->swap_map[offset])
310 if (offset == si->lowest_bit)
312 if (offset == si->highest_bit)
315 if (si->inuse_pages == si->pages) {
316 si->lowest_bit = si->max;
319 si->swap_map[offset] = usage;
320 si->cluster_next = offset + 1;
321 si->flags -= SWP_SCANNING;
323 if (si->lowest_alloc) {
325 * Only set when SWP_DISCARDABLE, and there's a scan
326 * for a free cluster in progress or just completed.
328 if (found_free_cluster) {
330 * To optimize wear-levelling, discard the
331 * old data of the cluster, taking care not to
332 * discard any of its pages that have already
333 * been allocated by racing tasks (offset has
334 * already stepped over any at the beginning).
336 if (offset < si->highest_alloc &&
337 si->lowest_alloc <= last_in_cluster)
338 last_in_cluster = si->lowest_alloc - 1;
339 si->flags |= SWP_DISCARDING;
340 spin_unlock(&si->lock);
342 if (offset < last_in_cluster)
343 discard_swap_cluster(si, offset,
344 last_in_cluster - offset + 1);
346 spin_lock(&si->lock);
347 si->lowest_alloc = 0;
348 si->flags &= ~SWP_DISCARDING;
350 smp_mb(); /* wake_up_bit advises this */
351 wake_up_bit(&si->flags, ilog2(SWP_DISCARDING));
353 } else if (si->flags & SWP_DISCARDING) {
355 * Delay using pages allocated by racing tasks
356 * until the whole discard has been issued. We
357 * could defer that delay until swap_writepage,
358 * but it's easier to keep this self-contained.
360 spin_unlock(&si->lock);
361 wait_on_bit(&si->flags, ilog2(SWP_DISCARDING),
362 wait_for_discard, TASK_UNINTERRUPTIBLE);
363 spin_lock(&si->lock);
366 * Note pages allocated by racing tasks while
367 * scan for a free cluster is in progress, so
368 * that its final discard can exclude them.
370 if (offset < si->lowest_alloc)
371 si->lowest_alloc = offset;
372 if (offset > si->highest_alloc)
373 si->highest_alloc = offset;
379 spin_unlock(&si->lock);
380 while (++offset <= si->highest_bit) {
381 if (!si->swap_map[offset]) {
382 spin_lock(&si->lock);
385 if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) {
386 spin_lock(&si->lock);
389 if (unlikely(--latency_ration < 0)) {
391 latency_ration = LATENCY_LIMIT;
394 offset = si->lowest_bit;
395 while (++offset < scan_base) {
396 if (!si->swap_map[offset]) {
397 spin_lock(&si->lock);
400 if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) {
401 spin_lock(&si->lock);
404 if (unlikely(--latency_ration < 0)) {
406 latency_ration = LATENCY_LIMIT;
409 spin_lock(&si->lock);
412 si->flags -= SWP_SCANNING;
416 swp_entry_t get_swap_page(void)
418 struct swap_info_struct *si;
424 spin_lock(&swap_lock);
425 if (atomic_long_read(&nr_swap_pages) <= 0)
427 atomic_long_dec(&nr_swap_pages);
429 for (type = swap_list.next; type >= 0 && wrapped < 2; type = next) {
430 hp_index = atomic_xchg(&highest_priority_index, -1);
432 * highest_priority_index records current highest priority swap
433 * type which just frees swap entries. If its priority is
434 * higher than that of swap_list.next swap type, we use it. It
435 * isn't protected by swap_lock, so it can be an invalid value
436 * if the corresponding swap type is swapoff. We double check
437 * the flags here. It's even possible the swap type is swapoff
438 * and swapon again and its priority is changed. In such rare
439 * case, low prority swap type might be used, but eventually
440 * high priority swap will be used after several rounds of
443 if (hp_index != -1 && hp_index != type &&
444 swap_info[type]->prio < swap_info[hp_index]->prio &&
445 (swap_info[hp_index]->flags & SWP_WRITEOK)) {
447 swap_list.next = type;
450 si = swap_info[type];
453 (!wrapped && si->prio != swap_info[next]->prio)) {
454 next = swap_list.head;
458 spin_lock(&si->lock);
459 if (!si->highest_bit) {
460 spin_unlock(&si->lock);
463 if (!(si->flags & SWP_WRITEOK)) {
464 spin_unlock(&si->lock);
468 swap_list.next = next;
470 spin_unlock(&swap_lock);
471 /* This is called for allocating swap entry for cache */
472 offset = scan_swap_map(si, SWAP_HAS_CACHE);
473 spin_unlock(&si->lock);
475 return swp_entry(type, offset);
476 spin_lock(&swap_lock);
477 next = swap_list.next;
480 atomic_long_inc(&nr_swap_pages);
482 spin_unlock(&swap_lock);
483 return (swp_entry_t) {0};
486 /* The only caller of this function is now susupend routine */
487 swp_entry_t get_swap_page_of_type(int type)
489 struct swap_info_struct *si;
492 si = swap_info[type];
493 spin_lock(&si->lock);
494 if (si && (si->flags & SWP_WRITEOK)) {
495 atomic_long_dec(&nr_swap_pages);
496 /* This is called for allocating swap entry, not cache */
497 offset = scan_swap_map(si, 1);
499 spin_unlock(&si->lock);
500 return swp_entry(type, offset);
502 atomic_long_inc(&nr_swap_pages);
504 spin_unlock(&si->lock);
505 return (swp_entry_t) {0};
508 static struct swap_info_struct *swap_info_get(swp_entry_t entry)
510 struct swap_info_struct *p;
511 unsigned long offset, type;
515 type = swp_type(entry);
516 if (type >= nr_swapfiles)
519 if (!(p->flags & SWP_USED))
521 offset = swp_offset(entry);
522 if (offset >= p->max)
524 if (!p->swap_map[offset])
530 printk(KERN_ERR "swap_free: %s%08lx\n", Unused_offset, entry.val);
533 printk(KERN_ERR "swap_free: %s%08lx\n", Bad_offset, entry.val);
536 printk(KERN_ERR "swap_free: %s%08lx\n", Unused_file, entry.val);
539 printk(KERN_ERR "swap_free: %s%08lx\n", Bad_file, entry.val);
545 * This swap type frees swap entry, check if it is the highest priority swap
546 * type which just frees swap entry. get_swap_page() uses
547 * highest_priority_index to search highest priority swap type. The
548 * swap_info_struct.lock can't protect us if there are multiple swap types
549 * active, so we use atomic_cmpxchg.
551 static void set_highest_priority_index(int type)
553 int old_hp_index, new_hp_index;
556 old_hp_index = atomic_read(&highest_priority_index);
557 if (old_hp_index != -1 &&
558 swap_info[old_hp_index]->prio >= swap_info[type]->prio)
561 } while (atomic_cmpxchg(&highest_priority_index,
562 old_hp_index, new_hp_index) != old_hp_index);
565 static unsigned char swap_entry_free(struct swap_info_struct *p,
566 swp_entry_t entry, unsigned char usage)
568 unsigned long offset = swp_offset(entry);
570 unsigned char has_cache;
572 count = p->swap_map[offset];
573 has_cache = count & SWAP_HAS_CACHE;
574 count &= ~SWAP_HAS_CACHE;
576 if (usage == SWAP_HAS_CACHE) {
577 VM_BUG_ON(!has_cache);
579 } else if (count == SWAP_MAP_SHMEM) {
581 * Or we could insist on shmem.c using a special
582 * swap_shmem_free() and free_shmem_swap_and_cache()...
585 } else if ((count & ~COUNT_CONTINUED) <= SWAP_MAP_MAX) {
586 if (count == COUNT_CONTINUED) {
587 if (swap_count_continued(p, offset, count))
588 count = SWAP_MAP_MAX | COUNT_CONTINUED;
590 count = SWAP_MAP_MAX;
596 mem_cgroup_uncharge_swap(entry);
598 usage = count | has_cache;
599 p->swap_map[offset] = usage;
601 /* free if no reference */
603 if (offset < p->lowest_bit)
604 p->lowest_bit = offset;
605 if (offset > p->highest_bit)
606 p->highest_bit = offset;
607 set_highest_priority_index(p->type);
608 atomic_long_inc(&nr_swap_pages);
610 frontswap_invalidate_page(p->type, offset);
611 if (p->flags & SWP_BLKDEV) {
612 struct gendisk *disk = p->bdev->bd_disk;
613 if (disk->fops->swap_slot_free_notify)
614 disk->fops->swap_slot_free_notify(p->bdev,
623 * Caller has made sure that the swapdevice corresponding to entry
624 * is still around or has not been recycled.
626 void swap_free(swp_entry_t entry)
628 struct swap_info_struct *p;
630 p = swap_info_get(entry);
632 swap_entry_free(p, entry, 1);
633 spin_unlock(&p->lock);
638 * Called after dropping swapcache to decrease refcnt to swap entries.
640 void swapcache_free(swp_entry_t entry, struct page *page)
642 struct swap_info_struct *p;
645 p = swap_info_get(entry);
647 count = swap_entry_free(p, entry, SWAP_HAS_CACHE);
649 mem_cgroup_uncharge_swapcache(page, entry, count != 0);
650 spin_unlock(&p->lock);
655 * How many references to page are currently swapped out?
656 * This does not give an exact answer when swap count is continued,
657 * but does include the high COUNT_CONTINUED flag to allow for that.
659 int page_swapcount(struct page *page)
662 struct swap_info_struct *p;
665 entry.val = page_private(page);
666 p = swap_info_get(entry);
668 count = swap_count(p->swap_map[swp_offset(entry)]);
669 spin_unlock(&p->lock);
675 * We can write to an anon page without COW if there are no other references
676 * to it. And as a side-effect, free up its swap: because the old content
677 * on disk will never be read, and seeking back there to write new content
678 * later would only waste time away from clustering.
680 int reuse_swap_page(struct page *page)
684 VM_BUG_ON(!PageLocked(page));
685 if (unlikely(PageKsm(page)))
687 count = page_mapcount(page);
688 if (count <= 1 && PageSwapCache(page)) {
689 count += page_swapcount(page);
690 if (count == 1 && !PageWriteback(page)) {
691 delete_from_swap_cache(page);
699 * If swap is getting full, or if there are no more mappings of this page,
700 * then try_to_free_swap is called to free its swap space.
702 int try_to_free_swap(struct page *page)
704 VM_BUG_ON(!PageLocked(page));
706 if (!PageSwapCache(page))
708 if (PageWriteback(page))
710 if (page_swapcount(page))
714 * Once hibernation has begun to create its image of memory,
715 * there's a danger that one of the calls to try_to_free_swap()
716 * - most probably a call from __try_to_reclaim_swap() while
717 * hibernation is allocating its own swap pages for the image,
718 * but conceivably even a call from memory reclaim - will free
719 * the swap from a page which has already been recorded in the
720 * image as a clean swapcache page, and then reuse its swap for
721 * another page of the image. On waking from hibernation, the
722 * original page might be freed under memory pressure, then
723 * later read back in from swap, now with the wrong data.
725 * Hibration suspends storage while it is writing the image
726 * to disk so check that here.
728 if (pm_suspended_storage())
731 delete_from_swap_cache(page);
737 * Free the swap entry like above, but also try to
738 * free the page cache entry if it is the last user.
740 int free_swap_and_cache(swp_entry_t entry)
742 struct swap_info_struct *p;
743 struct page *page = NULL;
745 if (non_swap_entry(entry))
748 p = swap_info_get(entry);
750 if (swap_entry_free(p, entry, 1) == SWAP_HAS_CACHE) {
751 page = find_get_page(swap_address_space(entry),
753 if (page && !trylock_page(page)) {
754 page_cache_release(page);
758 spin_unlock(&p->lock);
762 * Not mapped elsewhere, or swap space full? Free it!
763 * Also recheck PageSwapCache now page is locked (above).
765 if (PageSwapCache(page) && !PageWriteback(page) &&
766 (!page_mapped(page) || vm_swap_full())) {
767 delete_from_swap_cache(page);
771 page_cache_release(page);
776 #ifdef CONFIG_HIBERNATION
778 * Find the swap type that corresponds to given device (if any).
780 * @offset - number of the PAGE_SIZE-sized block of the device, starting
781 * from 0, in which the swap header is expected to be located.
783 * This is needed for the suspend to disk (aka swsusp).
785 int swap_type_of(dev_t device, sector_t offset, struct block_device **bdev_p)
787 struct block_device *bdev = NULL;
791 bdev = bdget(device);
793 spin_lock(&swap_lock);
794 for (type = 0; type < nr_swapfiles; type++) {
795 struct swap_info_struct *sis = swap_info[type];
797 if (!(sis->flags & SWP_WRITEOK))
802 *bdev_p = bdgrab(sis->bdev);
804 spin_unlock(&swap_lock);
807 if (bdev == sis->bdev) {
808 struct swap_extent *se = &sis->first_swap_extent;
810 if (se->start_block == offset) {
812 *bdev_p = bdgrab(sis->bdev);
814 spin_unlock(&swap_lock);
820 spin_unlock(&swap_lock);
828 * Get the (PAGE_SIZE) block corresponding to given offset on the swapdev
829 * corresponding to given index in swap_info (swap type).
831 sector_t swapdev_block(int type, pgoff_t offset)
833 struct block_device *bdev;
835 if ((unsigned int)type >= nr_swapfiles)
837 if (!(swap_info[type]->flags & SWP_WRITEOK))
839 return map_swap_entry(swp_entry(type, offset), &bdev);
843 * Return either the total number of swap pages of given type, or the number
844 * of free pages of that type (depending on @free)
846 * This is needed for software suspend
848 unsigned int count_swap_pages(int type, int free)
852 spin_lock(&swap_lock);
853 if ((unsigned int)type < nr_swapfiles) {
854 struct swap_info_struct *sis = swap_info[type];
856 spin_lock(&sis->lock);
857 if (sis->flags & SWP_WRITEOK) {
860 n -= sis->inuse_pages;
862 spin_unlock(&sis->lock);
864 spin_unlock(&swap_lock);
867 #endif /* CONFIG_HIBERNATION */
870 * No need to decide whether this PTE shares the swap entry with others,
871 * just let do_wp_page work it out if a write is requested later - to
872 * force COW, vm_page_prot omits write permission from any private vma.
874 static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
875 unsigned long addr, swp_entry_t entry, struct page *page)
877 struct page *swapcache;
878 struct mem_cgroup *memcg;
884 page = ksm_might_need_to_copy(page, vma, addr);
888 if (mem_cgroup_try_charge_swapin(vma->vm_mm, page,
889 GFP_KERNEL, &memcg)) {
894 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
895 if (unlikely(!pte_same(*pte, swp_entry_to_pte(entry)))) {
896 mem_cgroup_cancel_charge_swapin(memcg);
901 dec_mm_counter(vma->vm_mm, MM_SWAPENTS);
902 inc_mm_counter(vma->vm_mm, MM_ANONPAGES);
904 set_pte_at(vma->vm_mm, addr, pte,
905 pte_mkold(mk_pte(page, vma->vm_page_prot)));
906 if (page == swapcache)
907 page_add_anon_rmap(page, vma, addr);
908 else /* ksm created a completely new copy */
909 page_add_new_anon_rmap(page, vma, addr);
910 mem_cgroup_commit_charge_swapin(page, memcg);
913 * Move the page to the active list so it is not
914 * immediately swapped out again after swapon.
918 pte_unmap_unlock(pte, ptl);
920 if (page != swapcache) {
927 static int unuse_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
928 unsigned long addr, unsigned long end,
929 swp_entry_t entry, struct page *page)
931 pte_t swp_pte = swp_entry_to_pte(entry);
936 * We don't actually need pte lock while scanning for swp_pte: since
937 * we hold page lock and mmap_sem, swp_pte cannot be inserted into the
938 * page table while we're scanning; though it could get zapped, and on
939 * some architectures (e.g. x86_32 with PAE) we might catch a glimpse
940 * of unmatched parts which look like swp_pte, so unuse_pte must
941 * recheck under pte lock. Scanning without pte lock lets it be
942 * preemptible whenever CONFIG_PREEMPT but not CONFIG_HIGHPTE.
944 pte = pte_offset_map(pmd, addr);
947 * swapoff spends a _lot_ of time in this loop!
948 * Test inline before going to call unuse_pte.
950 if (unlikely(pte_same(*pte, swp_pte))) {
952 ret = unuse_pte(vma, pmd, addr, entry, page);
955 pte = pte_offset_map(pmd, addr);
957 } while (pte++, addr += PAGE_SIZE, addr != end);
963 static inline int unuse_pmd_range(struct vm_area_struct *vma, pud_t *pud,
964 unsigned long addr, unsigned long end,
965 swp_entry_t entry, struct page *page)
971 pmd = pmd_offset(pud, addr);
973 next = pmd_addr_end(addr, end);
974 if (pmd_none_or_trans_huge_or_clear_bad(pmd))
976 ret = unuse_pte_range(vma, pmd, addr, next, entry, page);
979 } while (pmd++, addr = next, addr != end);
983 static inline int unuse_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
984 unsigned long addr, unsigned long end,
985 swp_entry_t entry, struct page *page)
991 pud = pud_offset(pgd, addr);
993 next = pud_addr_end(addr, end);
994 if (pud_none_or_clear_bad(pud))
996 ret = unuse_pmd_range(vma, pud, addr, next, entry, page);
999 } while (pud++, addr = next, addr != end);
1003 static int unuse_vma(struct vm_area_struct *vma,
1004 swp_entry_t entry, struct page *page)
1007 unsigned long addr, end, next;
1010 if (page_anon_vma(page)) {
1011 addr = page_address_in_vma(page, vma);
1012 if (addr == -EFAULT)
1015 end = addr + PAGE_SIZE;
1017 addr = vma->vm_start;
1021 pgd = pgd_offset(vma->vm_mm, addr);
1023 next = pgd_addr_end(addr, end);
1024 if (pgd_none_or_clear_bad(pgd))
1026 ret = unuse_pud_range(vma, pgd, addr, next, entry, page);
1029 } while (pgd++, addr = next, addr != end);
1033 static int unuse_mm(struct mm_struct *mm,
1034 swp_entry_t entry, struct page *page)
1036 struct vm_area_struct *vma;
1039 if (!down_read_trylock(&mm->mmap_sem)) {
1041 * Activate page so shrink_inactive_list is unlikely to unmap
1042 * its ptes while lock is dropped, so swapoff can make progress.
1044 activate_page(page);
1046 down_read(&mm->mmap_sem);
1049 for (vma = mm->mmap; vma; vma = vma->vm_next) {
1050 if (vma->anon_vma && (ret = unuse_vma(vma, entry, page)))
1053 up_read(&mm->mmap_sem);
1054 return (ret < 0)? ret: 0;
1058 * Scan swap_map (or frontswap_map if frontswap parameter is true)
1059 * from current position to next entry still in use.
1060 * Recycle to start on reaching the end, returning 0 when empty.
1062 static unsigned int find_next_to_unuse(struct swap_info_struct *si,
1063 unsigned int prev, bool frontswap)
1065 unsigned int max = si->max;
1066 unsigned int i = prev;
1067 unsigned char count;
1070 * No need for swap_lock here: we're just looking
1071 * for whether an entry is in use, not modifying it; false
1072 * hits are okay, and sys_swapoff() has already prevented new
1073 * allocations from this area (while holding swap_lock).
1082 * No entries in use at top of swap_map,
1083 * loop back to start and recheck there.
1090 if (frontswap_test(si, i))
1095 count = si->swap_map[i];
1096 if (count && swap_count(count) != SWAP_MAP_BAD)
1103 * We completely avoid races by reading each swap page in advance,
1104 * and then search for the process using it. All the necessary
1105 * page table adjustments can then be made atomically.
1107 * if the boolean frontswap is true, only unuse pages_to_unuse pages;
1108 * pages_to_unuse==0 means all pages; ignored if frontswap is false
1110 int try_to_unuse(unsigned int type, bool frontswap,
1111 unsigned long pages_to_unuse)
1113 struct swap_info_struct *si = swap_info[type];
1114 struct mm_struct *start_mm;
1115 unsigned char *swap_map;
1116 unsigned char swcount;
1123 * When searching mms for an entry, a good strategy is to
1124 * start at the first mm we freed the previous entry from
1125 * (though actually we don't notice whether we or coincidence
1126 * freed the entry). Initialize this start_mm with a hold.
1128 * A simpler strategy would be to start at the last mm we
1129 * freed the previous entry from; but that would take less
1130 * advantage of mmlist ordering, which clusters forked mms
1131 * together, child after parent. If we race with dup_mmap(), we
1132 * prefer to resolve parent before child, lest we miss entries
1133 * duplicated after we scanned child: using last mm would invert
1136 start_mm = &init_mm;
1137 atomic_inc(&init_mm.mm_users);
1140 * Keep on scanning until all entries have gone. Usually,
1141 * one pass through swap_map is enough, but not necessarily:
1142 * there are races when an instance of an entry might be missed.
1144 while ((i = find_next_to_unuse(si, i, frontswap)) != 0) {
1145 if (signal_pending(current)) {
1151 * Get a page for the entry, using the existing swap
1152 * cache page if there is one. Otherwise, get a clean
1153 * page and read the swap into it.
1155 swap_map = &si->swap_map[i];
1156 entry = swp_entry(type, i);
1157 page = read_swap_cache_async(entry,
1158 GFP_HIGHUSER_MOVABLE, NULL, 0);
1161 * Either swap_duplicate() failed because entry
1162 * has been freed independently, and will not be
1163 * reused since sys_swapoff() already disabled
1164 * allocation from here, or alloc_page() failed.
1173 * Don't hold on to start_mm if it looks like exiting.
1175 if (atomic_read(&start_mm->mm_users) == 1) {
1177 start_mm = &init_mm;
1178 atomic_inc(&init_mm.mm_users);
1182 * Wait for and lock page. When do_swap_page races with
1183 * try_to_unuse, do_swap_page can handle the fault much
1184 * faster than try_to_unuse can locate the entry. This
1185 * apparently redundant "wait_on_page_locked" lets try_to_unuse
1186 * defer to do_swap_page in such a case - in some tests,
1187 * do_swap_page and try_to_unuse repeatedly compete.
1189 wait_on_page_locked(page);
1190 wait_on_page_writeback(page);
1192 wait_on_page_writeback(page);
1195 * Remove all references to entry.
1197 swcount = *swap_map;
1198 if (swap_count(swcount) == SWAP_MAP_SHMEM) {
1199 retval = shmem_unuse(entry, page);
1200 /* page has already been unlocked and released */
1205 if (swap_count(swcount) && start_mm != &init_mm)
1206 retval = unuse_mm(start_mm, entry, page);
1208 if (swap_count(*swap_map)) {
1209 int set_start_mm = (*swap_map >= swcount);
1210 struct list_head *p = &start_mm->mmlist;
1211 struct mm_struct *new_start_mm = start_mm;
1212 struct mm_struct *prev_mm = start_mm;
1213 struct mm_struct *mm;
1215 atomic_inc(&new_start_mm->mm_users);
1216 atomic_inc(&prev_mm->mm_users);
1217 spin_lock(&mmlist_lock);
1218 while (swap_count(*swap_map) && !retval &&
1219 (p = p->next) != &start_mm->mmlist) {
1220 mm = list_entry(p, struct mm_struct, mmlist);
1221 if (!atomic_inc_not_zero(&mm->mm_users))
1223 spin_unlock(&mmlist_lock);
1229 swcount = *swap_map;
1230 if (!swap_count(swcount)) /* any usage ? */
1232 else if (mm == &init_mm)
1235 retval = unuse_mm(mm, entry, page);
1237 if (set_start_mm && *swap_map < swcount) {
1238 mmput(new_start_mm);
1239 atomic_inc(&mm->mm_users);
1243 spin_lock(&mmlist_lock);
1245 spin_unlock(&mmlist_lock);
1248 start_mm = new_start_mm;
1252 page_cache_release(page);
1257 * If a reference remains (rare), we would like to leave
1258 * the page in the swap cache; but try_to_unmap could
1259 * then re-duplicate the entry once we drop page lock,
1260 * so we might loop indefinitely; also, that page could
1261 * not be swapped out to other storage meanwhile. So:
1262 * delete from cache even if there's another reference,
1263 * after ensuring that the data has been saved to disk -
1264 * since if the reference remains (rarer), it will be
1265 * read from disk into another page. Splitting into two
1266 * pages would be incorrect if swap supported "shared
1267 * private" pages, but they are handled by tmpfs files.
1269 * Given how unuse_vma() targets one particular offset
1270 * in an anon_vma, once the anon_vma has been determined,
1271 * this splitting happens to be just what is needed to
1272 * handle where KSM pages have been swapped out: re-reading
1273 * is unnecessarily slow, but we can fix that later on.
1275 if (swap_count(*swap_map) &&
1276 PageDirty(page) && PageSwapCache(page)) {
1277 struct writeback_control wbc = {
1278 .sync_mode = WB_SYNC_NONE,
1281 swap_writepage(page, &wbc);
1283 wait_on_page_writeback(page);
1287 * It is conceivable that a racing task removed this page from
1288 * swap cache just before we acquired the page lock at the top,
1289 * or while we dropped it in unuse_mm(). The page might even
1290 * be back in swap cache on another swap area: that we must not
1291 * delete, since it may not have been written out to swap yet.
1293 if (PageSwapCache(page) &&
1294 likely(page_private(page) == entry.val))
1295 delete_from_swap_cache(page);
1298 * So we could skip searching mms once swap count went
1299 * to 1, we did not mark any present ptes as dirty: must
1300 * mark page dirty so shrink_page_list will preserve it.
1304 page_cache_release(page);
1307 * Make sure that we aren't completely killing
1308 * interactive performance.
1311 if (frontswap && pages_to_unuse > 0) {
1312 if (!--pages_to_unuse)
1322 * After a successful try_to_unuse, if no swap is now in use, we know
1323 * we can empty the mmlist. swap_lock must be held on entry and exit.
1324 * Note that mmlist_lock nests inside swap_lock, and an mm must be
1325 * added to the mmlist just after page_duplicate - before would be racy.
1327 static void drain_mmlist(void)
1329 struct list_head *p, *next;
1332 for (type = 0; type < nr_swapfiles; type++)
1333 if (swap_info[type]->inuse_pages)
1335 spin_lock(&mmlist_lock);
1336 list_for_each_safe(p, next, &init_mm.mmlist)
1338 spin_unlock(&mmlist_lock);
1342 * Use this swapdev's extent info to locate the (PAGE_SIZE) block which
1343 * corresponds to page offset for the specified swap entry.
1344 * Note that the type of this function is sector_t, but it returns page offset
1345 * into the bdev, not sector offset.
1347 static sector_t map_swap_entry(swp_entry_t entry, struct block_device **bdev)
1349 struct swap_info_struct *sis;
1350 struct swap_extent *start_se;
1351 struct swap_extent *se;
1354 sis = swap_info[swp_type(entry)];
1357 offset = swp_offset(entry);
1358 start_se = sis->curr_swap_extent;
1362 struct list_head *lh;
1364 if (se->start_page <= offset &&
1365 offset < (se->start_page + se->nr_pages)) {
1366 return se->start_block + (offset - se->start_page);
1369 se = list_entry(lh, struct swap_extent, list);
1370 sis->curr_swap_extent = se;
1371 BUG_ON(se == start_se); /* It *must* be present */
1376 * Returns the page offset into bdev for the specified page's swap entry.
1378 sector_t map_swap_page(struct page *page, struct block_device **bdev)
1381 entry.val = page_private(page);
1382 return map_swap_entry(entry, bdev);
1386 * Free all of a swapdev's extent information
1388 static void destroy_swap_extents(struct swap_info_struct *sis)
1390 while (!list_empty(&sis->first_swap_extent.list)) {
1391 struct swap_extent *se;
1393 se = list_entry(sis->first_swap_extent.list.next,
1394 struct swap_extent, list);
1395 list_del(&se->list);
1399 if (sis->flags & SWP_FILE) {
1400 struct file *swap_file = sis->swap_file;
1401 struct address_space *mapping = swap_file->f_mapping;
1403 sis->flags &= ~SWP_FILE;
1404 mapping->a_ops->swap_deactivate(swap_file);
1409 * Add a block range (and the corresponding page range) into this swapdev's
1410 * extent list. The extent list is kept sorted in page order.
1412 * This function rather assumes that it is called in ascending page order.
1415 add_swap_extent(struct swap_info_struct *sis, unsigned long start_page,
1416 unsigned long nr_pages, sector_t start_block)
1418 struct swap_extent *se;
1419 struct swap_extent *new_se;
1420 struct list_head *lh;
1422 if (start_page == 0) {
1423 se = &sis->first_swap_extent;
1424 sis->curr_swap_extent = se;
1426 se->nr_pages = nr_pages;
1427 se->start_block = start_block;
1430 lh = sis->first_swap_extent.list.prev; /* Highest extent */
1431 se = list_entry(lh, struct swap_extent, list);
1432 BUG_ON(se->start_page + se->nr_pages != start_page);
1433 if (se->start_block + se->nr_pages == start_block) {
1435 se->nr_pages += nr_pages;
1441 * No merge. Insert a new extent, preserving ordering.
1443 new_se = kmalloc(sizeof(*se), GFP_KERNEL);
1446 new_se->start_page = start_page;
1447 new_se->nr_pages = nr_pages;
1448 new_se->start_block = start_block;
1450 list_add_tail(&new_se->list, &sis->first_swap_extent.list);
1455 * A `swap extent' is a simple thing which maps a contiguous range of pages
1456 * onto a contiguous range of disk blocks. An ordered list of swap extents
1457 * is built at swapon time and is then used at swap_writepage/swap_readpage
1458 * time for locating where on disk a page belongs.
1460 * If the swapfile is an S_ISBLK block device, a single extent is installed.
1461 * This is done so that the main operating code can treat S_ISBLK and S_ISREG
1462 * swap files identically.
1464 * Whether the swapdev is an S_ISREG file or an S_ISBLK blockdev, the swap
1465 * extent list operates in PAGE_SIZE disk blocks. Both S_ISREG and S_ISBLK
1466 * swapfiles are handled *identically* after swapon time.
1468 * For S_ISREG swapfiles, setup_swap_extents() will walk all the file's blocks
1469 * and will parse them into an ordered extent list, in PAGE_SIZE chunks. If
1470 * some stray blocks are found which do not fall within the PAGE_SIZE alignment
1471 * requirements, they are simply tossed out - we will never use those blocks
1474 * For S_ISREG swapfiles we set S_SWAPFILE across the life of the swapon. This
1475 * prevents root from shooting her foot off by ftruncating an in-use swapfile,
1476 * which will scribble on the fs.
1478 * The amount of disk space which a single swap extent represents varies.
1479 * Typically it is in the 1-4 megabyte range. So we can have hundreds of
1480 * extents in the list. To avoid much list walking, we cache the previous
1481 * search location in `curr_swap_extent', and start new searches from there.
1482 * This is extremely effective. The average number of iterations in
1483 * map_swap_page() has been measured at about 0.3 per page. - akpm.
1485 static int setup_swap_extents(struct swap_info_struct *sis, sector_t *span)
1487 struct file *swap_file = sis->swap_file;
1488 struct address_space *mapping = swap_file->f_mapping;
1489 struct inode *inode = mapping->host;
1492 if (S_ISBLK(inode->i_mode)) {
1493 ret = add_swap_extent(sis, 0, sis->max, 0);
1498 if (mapping->a_ops->swap_activate) {
1499 ret = mapping->a_ops->swap_activate(sis, swap_file, span);
1501 sis->flags |= SWP_FILE;
1502 ret = add_swap_extent(sis, 0, sis->max, 0);
1508 return generic_swapfile_activate(sis, swap_file, span);
1511 static void _enable_swap_info(struct swap_info_struct *p, int prio,
1512 unsigned char *swap_map,
1513 unsigned long *frontswap_map)
1520 p->prio = --least_priority;
1521 p->swap_map = swap_map;
1522 frontswap_map_set(p, frontswap_map);
1523 p->flags |= SWP_WRITEOK;
1524 atomic_long_add(p->pages, &nr_swap_pages);
1525 total_swap_pages += p->pages;
1527 /* insert swap space into swap_list: */
1529 for (i = swap_list.head; i >= 0; i = swap_info[i]->next) {
1530 if (p->prio >= swap_info[i]->prio)
1536 swap_list.head = swap_list.next = p->type;
1538 swap_info[prev]->next = p->type;
1541 static void enable_swap_info(struct swap_info_struct *p, int prio,
1542 unsigned char *swap_map,
1543 unsigned long *frontswap_map)
1545 spin_lock(&swap_lock);
1546 spin_lock(&p->lock);
1547 _enable_swap_info(p, prio, swap_map, frontswap_map);
1548 frontswap_init(p->type);
1549 spin_unlock(&p->lock);
1550 spin_unlock(&swap_lock);
1553 static void reinsert_swap_info(struct swap_info_struct *p)
1555 spin_lock(&swap_lock);
1556 spin_lock(&p->lock);
1557 _enable_swap_info(p, p->prio, p->swap_map, frontswap_map_get(p));
1558 spin_unlock(&p->lock);
1559 spin_unlock(&swap_lock);
1562 SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
1564 struct swap_info_struct *p = NULL;
1565 unsigned char *swap_map;
1566 struct file *swap_file, *victim;
1567 struct address_space *mapping;
1568 struct inode *inode;
1569 struct filename *pathname;
1573 if (!capable(CAP_SYS_ADMIN))
1576 BUG_ON(!current->mm);
1578 pathname = getname(specialfile);
1579 if (IS_ERR(pathname))
1580 return PTR_ERR(pathname);
1582 victim = file_open_name(pathname, O_RDWR|O_LARGEFILE, 0);
1583 err = PTR_ERR(victim);
1587 mapping = victim->f_mapping;
1589 spin_lock(&swap_lock);
1590 for (type = swap_list.head; type >= 0; type = swap_info[type]->next) {
1591 p = swap_info[type];
1592 if (p->flags & SWP_WRITEOK) {
1593 if (p->swap_file->f_mapping == mapping)
1600 spin_unlock(&swap_lock);
1603 if (!security_vm_enough_memory_mm(current->mm, p->pages))
1604 vm_unacct_memory(p->pages);
1607 spin_unlock(&swap_lock);
1611 swap_list.head = p->next;
1613 swap_info[prev]->next = p->next;
1614 if (type == swap_list.next) {
1615 /* just pick something that's safe... */
1616 swap_list.next = swap_list.head;
1618 spin_lock(&p->lock);
1620 for (i = p->next; i >= 0; i = swap_info[i]->next)
1621 swap_info[i]->prio = p->prio--;
1624 atomic_long_sub(p->pages, &nr_swap_pages);
1625 total_swap_pages -= p->pages;
1626 p->flags &= ~SWP_WRITEOK;
1627 spin_unlock(&p->lock);
1628 spin_unlock(&swap_lock);
1630 set_current_oom_origin();
1631 err = try_to_unuse(type, false, 0); /* force all pages to be unused */
1632 clear_current_oom_origin();
1635 /* re-insert swap space back into swap_list */
1636 reinsert_swap_info(p);
1640 destroy_swap_extents(p);
1641 if (p->flags & SWP_CONTINUED)
1642 free_swap_count_continuations(p);
1644 mutex_lock(&swapon_mutex);
1645 spin_lock(&swap_lock);
1646 spin_lock(&p->lock);
1649 /* wait for anyone still in scan_swap_map */
1650 p->highest_bit = 0; /* cuts scans short */
1651 while (p->flags >= SWP_SCANNING) {
1652 spin_unlock(&p->lock);
1653 spin_unlock(&swap_lock);
1654 schedule_timeout_uninterruptible(1);
1655 spin_lock(&swap_lock);
1656 spin_lock(&p->lock);
1659 swap_file = p->swap_file;
1660 p->swap_file = NULL;
1662 swap_map = p->swap_map;
1665 frontswap_invalidate_area(type);
1666 spin_unlock(&p->lock);
1667 spin_unlock(&swap_lock);
1668 mutex_unlock(&swapon_mutex);
1670 vfree(frontswap_map_get(p));
1671 /* Destroy swap account informatin */
1672 swap_cgroup_swapoff(type);
1674 inode = mapping->host;
1675 if (S_ISBLK(inode->i_mode)) {
1676 struct block_device *bdev = I_BDEV(inode);
1677 set_blocksize(bdev, p->old_block_size);
1678 blkdev_put(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
1680 mutex_lock(&inode->i_mutex);
1681 inode->i_flags &= ~S_SWAPFILE;
1682 mutex_unlock(&inode->i_mutex);
1684 filp_close(swap_file, NULL);
1686 atomic_inc(&proc_poll_event);
1687 wake_up_interruptible(&proc_poll_wait);
1690 filp_close(victim, NULL);
1696 #ifdef CONFIG_PROC_FS
1697 static unsigned swaps_poll(struct file *file, poll_table *wait)
1699 struct seq_file *seq = file->private_data;
1701 poll_wait(file, &proc_poll_wait, wait);
1703 if (seq->poll_event != atomic_read(&proc_poll_event)) {
1704 seq->poll_event = atomic_read(&proc_poll_event);
1705 return POLLIN | POLLRDNORM | POLLERR | POLLPRI;
1708 return POLLIN | POLLRDNORM;
1712 static void *swap_start(struct seq_file *swap, loff_t *pos)
1714 struct swap_info_struct *si;
1718 mutex_lock(&swapon_mutex);
1721 return SEQ_START_TOKEN;
1723 for (type = 0; type < nr_swapfiles; type++) {
1724 smp_rmb(); /* read nr_swapfiles before swap_info[type] */
1725 si = swap_info[type];
1726 if (!(si->flags & SWP_USED) || !si->swap_map)
1735 static void *swap_next(struct seq_file *swap, void *v, loff_t *pos)
1737 struct swap_info_struct *si = v;
1740 if (v == SEQ_START_TOKEN)
1743 type = si->type + 1;
1745 for (; type < nr_swapfiles; type++) {
1746 smp_rmb(); /* read nr_swapfiles before swap_info[type] */
1747 si = swap_info[type];
1748 if (!(si->flags & SWP_USED) || !si->swap_map)
1757 static void swap_stop(struct seq_file *swap, void *v)
1759 mutex_unlock(&swapon_mutex);
1762 static int swap_show(struct seq_file *swap, void *v)
1764 struct swap_info_struct *si = v;
1768 if (si == SEQ_START_TOKEN) {
1769 seq_puts(swap,"Filename\t\t\t\tType\t\tSize\tUsed\tPriority\n");
1773 file = si->swap_file;
1774 len = seq_path(swap, &file->f_path, " \t\n\\");
1775 seq_printf(swap, "%*s%s\t%u\t%u\t%d\n",
1776 len < 40 ? 40 - len : 1, " ",
1777 S_ISBLK(file_inode(file)->i_mode) ?
1778 "partition" : "file\t",
1779 si->pages << (PAGE_SHIFT - 10),
1780 si->inuse_pages << (PAGE_SHIFT - 10),
1785 static const struct seq_operations swaps_op = {
1786 .start = swap_start,
1792 static int swaps_open(struct inode *inode, struct file *file)
1794 struct seq_file *seq;
1797 ret = seq_open(file, &swaps_op);
1801 seq = file->private_data;
1802 seq->poll_event = atomic_read(&proc_poll_event);
1806 static const struct file_operations proc_swaps_operations = {
1809 .llseek = seq_lseek,
1810 .release = seq_release,
1814 static int __init procswaps_init(void)
1816 proc_create("swaps", 0, NULL, &proc_swaps_operations);
1819 __initcall(procswaps_init);
1820 #endif /* CONFIG_PROC_FS */
1822 #ifdef MAX_SWAPFILES_CHECK
1823 static int __init max_swapfiles_check(void)
1825 MAX_SWAPFILES_CHECK();
1828 late_initcall(max_swapfiles_check);
1831 static struct swap_info_struct *alloc_swap_info(void)
1833 struct swap_info_struct *p;
1836 p = kzalloc(sizeof(*p), GFP_KERNEL);
1838 return ERR_PTR(-ENOMEM);
1840 spin_lock(&swap_lock);
1841 for (type = 0; type < nr_swapfiles; type++) {
1842 if (!(swap_info[type]->flags & SWP_USED))
1845 if (type >= MAX_SWAPFILES) {
1846 spin_unlock(&swap_lock);
1848 return ERR_PTR(-EPERM);
1850 if (type >= nr_swapfiles) {
1852 swap_info[type] = p;
1854 * Write swap_info[type] before nr_swapfiles, in case a
1855 * racing procfs swap_start() or swap_next() is reading them.
1856 * (We never shrink nr_swapfiles, we never free this entry.)
1862 p = swap_info[type];
1864 * Do not memset this entry: a racing procfs swap_next()
1865 * would be relying on p->type to remain valid.
1868 INIT_LIST_HEAD(&p->first_swap_extent.list);
1869 p->flags = SWP_USED;
1871 spin_unlock(&swap_lock);
1872 spin_lock_init(&p->lock);
1877 static int claim_swapfile(struct swap_info_struct *p, struct inode *inode)
1881 if (S_ISBLK(inode->i_mode)) {
1882 p->bdev = bdgrab(I_BDEV(inode));
1883 error = blkdev_get(p->bdev,
1884 FMODE_READ | FMODE_WRITE | FMODE_EXCL,
1890 p->old_block_size = block_size(p->bdev);
1891 error = set_blocksize(p->bdev, PAGE_SIZE);
1894 p->flags |= SWP_BLKDEV;
1895 } else if (S_ISREG(inode->i_mode)) {
1896 p->bdev = inode->i_sb->s_bdev;
1897 mutex_lock(&inode->i_mutex);
1898 if (IS_SWAPFILE(inode))
1906 static unsigned long read_swap_header(struct swap_info_struct *p,
1907 union swap_header *swap_header,
1908 struct inode *inode)
1911 unsigned long maxpages;
1912 unsigned long swapfilepages;
1914 if (memcmp("SWAPSPACE2", swap_header->magic.magic, 10)) {
1915 printk(KERN_ERR "Unable to find swap-space signature\n");
1919 /* swap partition endianess hack... */
1920 if (swab32(swap_header->info.version) == 1) {
1921 swab32s(&swap_header->info.version);
1922 swab32s(&swap_header->info.last_page);
1923 swab32s(&swap_header->info.nr_badpages);
1924 for (i = 0; i < swap_header->info.nr_badpages; i++)
1925 swab32s(&swap_header->info.badpages[i]);
1927 /* Check the swap header's sub-version */
1928 if (swap_header->info.version != 1) {
1930 "Unable to handle swap header version %d\n",
1931 swap_header->info.version);
1936 p->cluster_next = 1;
1940 * Find out how many pages are allowed for a single swap
1941 * device. There are two limiting factors: 1) the number
1942 * of bits for the swap offset in the swp_entry_t type, and
1943 * 2) the number of bits in the swap pte as defined by the
1944 * different architectures. In order to find the
1945 * largest possible bit mask, a swap entry with swap type 0
1946 * and swap offset ~0UL is created, encoded to a swap pte,
1947 * decoded to a swp_entry_t again, and finally the swap
1948 * offset is extracted. This will mask all the bits from
1949 * the initial ~0UL mask that can't be encoded in either
1950 * the swp_entry_t or the architecture definition of a
1953 maxpages = swp_offset(pte_to_swp_entry(
1954 swp_entry_to_pte(swp_entry(0, ~0UL)))) + 1;
1955 if (maxpages > swap_header->info.last_page) {
1956 maxpages = swap_header->info.last_page + 1;
1957 /* p->max is an unsigned int: don't overflow it */
1958 if ((unsigned int)maxpages == 0)
1959 maxpages = UINT_MAX;
1961 p->highest_bit = maxpages - 1;
1965 swapfilepages = i_size_read(inode) >> PAGE_SHIFT;
1966 if (swapfilepages && maxpages > swapfilepages) {
1968 "Swap area shorter than signature indicates\n");
1971 if (swap_header->info.nr_badpages && S_ISREG(inode->i_mode))
1973 if (swap_header->info.nr_badpages > MAX_SWAP_BADPAGES)
1979 static int setup_swap_map_and_extents(struct swap_info_struct *p,
1980 union swap_header *swap_header,
1981 unsigned char *swap_map,
1982 unsigned long maxpages,
1986 unsigned int nr_good_pages;
1989 nr_good_pages = maxpages - 1; /* omit header page */
1991 for (i = 0; i < swap_header->info.nr_badpages; i++) {
1992 unsigned int page_nr = swap_header->info.badpages[i];
1993 if (page_nr == 0 || page_nr > swap_header->info.last_page)
1995 if (page_nr < maxpages) {
1996 swap_map[page_nr] = SWAP_MAP_BAD;
2001 if (nr_good_pages) {
2002 swap_map[0] = SWAP_MAP_BAD;
2004 p->pages = nr_good_pages;
2005 nr_extents = setup_swap_extents(p, span);
2008 nr_good_pages = p->pages;
2010 if (!nr_good_pages) {
2011 printk(KERN_WARNING "Empty swap-file\n");
2018 SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
2020 struct swap_info_struct *p;
2021 struct filename *name;
2022 struct file *swap_file = NULL;
2023 struct address_space *mapping;
2027 union swap_header *swap_header;
2030 unsigned long maxpages;
2031 unsigned char *swap_map = NULL;
2032 unsigned long *frontswap_map = NULL;
2033 struct page *page = NULL;
2034 struct inode *inode = NULL;
2036 if (swap_flags & ~SWAP_FLAGS_VALID)
2039 if (!capable(CAP_SYS_ADMIN))
2042 p = alloc_swap_info();
2046 name = getname(specialfile);
2048 error = PTR_ERR(name);
2052 swap_file = file_open_name(name, O_RDWR|O_LARGEFILE, 0);
2053 if (IS_ERR(swap_file)) {
2054 error = PTR_ERR(swap_file);
2059 p->swap_file = swap_file;
2060 mapping = swap_file->f_mapping;
2062 for (i = 0; i < nr_swapfiles; i++) {
2063 struct swap_info_struct *q = swap_info[i];
2065 if (q == p || !q->swap_file)
2067 if (mapping == q->swap_file->f_mapping) {
2073 inode = mapping->host;
2074 /* If S_ISREG(inode->i_mode) will do mutex_lock(&inode->i_mutex); */
2075 error = claim_swapfile(p, inode);
2076 if (unlikely(error))
2080 * Read the swap header.
2082 if (!mapping->a_ops->readpage) {
2086 page = read_mapping_page(mapping, 0, swap_file);
2088 error = PTR_ERR(page);
2091 swap_header = kmap(page);
2093 maxpages = read_swap_header(p, swap_header, inode);
2094 if (unlikely(!maxpages)) {
2099 /* OK, set up the swap map and apply the bad block list */
2100 swap_map = vzalloc(maxpages);
2106 error = swap_cgroup_swapon(p->type, maxpages);
2110 nr_extents = setup_swap_map_and_extents(p, swap_header, swap_map,
2112 if (unlikely(nr_extents < 0)) {
2116 /* frontswap enabled? set up bit-per-page map for frontswap */
2117 if (frontswap_enabled)
2118 frontswap_map = vzalloc(maxpages / sizeof(long));
2121 if (blk_queue_nonrot(bdev_get_queue(p->bdev))) {
2122 p->flags |= SWP_SOLIDSTATE;
2123 p->cluster_next = 1 + (random32() % p->highest_bit);
2125 if ((swap_flags & SWAP_FLAG_DISCARD) && discard_swap(p) == 0)
2126 p->flags |= SWP_DISCARDABLE;
2129 mutex_lock(&swapon_mutex);
2131 if (swap_flags & SWAP_FLAG_PREFER)
2133 (swap_flags & SWAP_FLAG_PRIO_MASK) >> SWAP_FLAG_PRIO_SHIFT;
2134 enable_swap_info(p, prio, swap_map, frontswap_map);
2136 printk(KERN_INFO "Adding %uk swap on %s. "
2137 "Priority:%d extents:%d across:%lluk %s%s%s\n",
2138 p->pages<<(PAGE_SHIFT-10), name->name, p->prio,
2139 nr_extents, (unsigned long long)span<<(PAGE_SHIFT-10),
2140 (p->flags & SWP_SOLIDSTATE) ? "SS" : "",
2141 (p->flags & SWP_DISCARDABLE) ? "D" : "",
2142 (frontswap_map) ? "FS" : "");
2144 mutex_unlock(&swapon_mutex);
2145 atomic_inc(&proc_poll_event);
2146 wake_up_interruptible(&proc_poll_wait);
2148 if (S_ISREG(inode->i_mode))
2149 inode->i_flags |= S_SWAPFILE;
2153 if (inode && S_ISBLK(inode->i_mode) && p->bdev) {
2154 set_blocksize(p->bdev, p->old_block_size);
2155 blkdev_put(p->bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
2157 destroy_swap_extents(p);
2158 swap_cgroup_swapoff(p->type);
2159 spin_lock(&swap_lock);
2160 p->swap_file = NULL;
2162 spin_unlock(&swap_lock);
2165 if (inode && S_ISREG(inode->i_mode)) {
2166 mutex_unlock(&inode->i_mutex);
2169 filp_close(swap_file, NULL);
2172 if (page && !IS_ERR(page)) {
2174 page_cache_release(page);
2178 if (inode && S_ISREG(inode->i_mode))
2179 mutex_unlock(&inode->i_mutex);
2183 void si_swapinfo(struct sysinfo *val)
2186 unsigned long nr_to_be_unused = 0;
2188 spin_lock(&swap_lock);
2189 for (type = 0; type < nr_swapfiles; type++) {
2190 struct swap_info_struct *si = swap_info[type];
2192 if ((si->flags & SWP_USED) && !(si->flags & SWP_WRITEOK))
2193 nr_to_be_unused += si->inuse_pages;
2195 val->freeswap = atomic_long_read(&nr_swap_pages) + nr_to_be_unused;
2196 val->totalswap = total_swap_pages + nr_to_be_unused;
2197 spin_unlock(&swap_lock);
2201 * Verify that a swap entry is valid and increment its swap map count.
2203 * Returns error code in following case.
2205 * - swp_entry is invalid -> EINVAL
2206 * - swp_entry is migration entry -> EINVAL
2207 * - swap-cache reference is requested but there is already one. -> EEXIST
2208 * - swap-cache reference is requested but the entry is not used. -> ENOENT
2209 * - swap-mapped reference requested but needs continued swap count. -> ENOMEM
2211 static int __swap_duplicate(swp_entry_t entry, unsigned char usage)
2213 struct swap_info_struct *p;
2214 unsigned long offset, type;
2215 unsigned char count;
2216 unsigned char has_cache;
2219 if (non_swap_entry(entry))
2222 type = swp_type(entry);
2223 if (type >= nr_swapfiles)
2225 p = swap_info[type];
2226 offset = swp_offset(entry);
2228 spin_lock(&p->lock);
2229 if (unlikely(offset >= p->max))
2232 count = p->swap_map[offset];
2233 has_cache = count & SWAP_HAS_CACHE;
2234 count &= ~SWAP_HAS_CACHE;
2237 if (usage == SWAP_HAS_CACHE) {
2239 /* set SWAP_HAS_CACHE if there is no cache and entry is used */
2240 if (!has_cache && count)
2241 has_cache = SWAP_HAS_CACHE;
2242 else if (has_cache) /* someone else added cache */
2244 else /* no users remaining */
2247 } else if (count || has_cache) {
2249 if ((count & ~COUNT_CONTINUED) < SWAP_MAP_MAX)
2251 else if ((count & ~COUNT_CONTINUED) > SWAP_MAP_MAX)
2253 else if (swap_count_continued(p, offset, count))
2254 count = COUNT_CONTINUED;
2258 err = -ENOENT; /* unused swap entry */
2260 p->swap_map[offset] = count | has_cache;
2263 spin_unlock(&p->lock);
2268 printk(KERN_ERR "swap_dup: %s%08lx\n", Bad_file, entry.val);
2273 * Help swapoff by noting that swap entry belongs to shmem/tmpfs
2274 * (in which case its reference count is never incremented).
2276 void swap_shmem_alloc(swp_entry_t entry)
2278 __swap_duplicate(entry, SWAP_MAP_SHMEM);
2282 * Increase reference count of swap entry by 1.
2283 * Returns 0 for success, or -ENOMEM if a swap_count_continuation is required
2284 * but could not be atomically allocated. Returns 0, just as if it succeeded,
2285 * if __swap_duplicate() fails for another reason (-EINVAL or -ENOENT), which
2286 * might occur if a page table entry has got corrupted.
2288 int swap_duplicate(swp_entry_t entry)
2292 while (!err && __swap_duplicate(entry, 1) == -ENOMEM)
2293 err = add_swap_count_continuation(entry, GFP_ATOMIC);
2298 * @entry: swap entry for which we allocate swap cache.
2300 * Called when allocating swap cache for existing swap entry,
2301 * This can return error codes. Returns 0 at success.
2302 * -EBUSY means there is a swap cache.
2303 * Note: return code is different from swap_duplicate().
2305 int swapcache_prepare(swp_entry_t entry)
2307 return __swap_duplicate(entry, SWAP_HAS_CACHE);
2310 struct swap_info_struct *page_swap_info(struct page *page)
2312 swp_entry_t swap = { .val = page_private(page) };
2313 BUG_ON(!PageSwapCache(page));
2314 return swap_info[swp_type(swap)];
2318 * out-of-line __page_file_ methods to avoid include hell.
2320 struct address_space *__page_file_mapping(struct page *page)
2322 VM_BUG_ON(!PageSwapCache(page));
2323 return page_swap_info(page)->swap_file->f_mapping;
2325 EXPORT_SYMBOL_GPL(__page_file_mapping);
2327 pgoff_t __page_file_index(struct page *page)
2329 swp_entry_t swap = { .val = page_private(page) };
2330 VM_BUG_ON(!PageSwapCache(page));
2331 return swp_offset(swap);
2333 EXPORT_SYMBOL_GPL(__page_file_index);
2336 * add_swap_count_continuation - called when a swap count is duplicated
2337 * beyond SWAP_MAP_MAX, it allocates a new page and links that to the entry's
2338 * page of the original vmalloc'ed swap_map, to hold the continuation count
2339 * (for that entry and for its neighbouring PAGE_SIZE swap entries). Called
2340 * again when count is duplicated beyond SWAP_MAP_MAX * SWAP_CONT_MAX, etc.
2342 * These continuation pages are seldom referenced: the common paths all work
2343 * on the original swap_map, only referring to a continuation page when the
2344 * low "digit" of a count is incremented or decremented through SWAP_MAP_MAX.
2346 * add_swap_count_continuation(, GFP_ATOMIC) can be called while holding
2347 * page table locks; if it fails, add_swap_count_continuation(, GFP_KERNEL)
2348 * can be called after dropping locks.
2350 int add_swap_count_continuation(swp_entry_t entry, gfp_t gfp_mask)
2352 struct swap_info_struct *si;
2355 struct page *list_page;
2357 unsigned char count;
2360 * When debugging, it's easier to use __GFP_ZERO here; but it's better
2361 * for latency not to zero a page while GFP_ATOMIC and holding locks.
2363 page = alloc_page(gfp_mask | __GFP_HIGHMEM);
2365 si = swap_info_get(entry);
2368 * An acceptable race has occurred since the failing
2369 * __swap_duplicate(): the swap entry has been freed,
2370 * perhaps even the whole swap_map cleared for swapoff.
2375 offset = swp_offset(entry);
2376 count = si->swap_map[offset] & ~SWAP_HAS_CACHE;
2378 if ((count & ~COUNT_CONTINUED) != SWAP_MAP_MAX) {
2380 * The higher the swap count, the more likely it is that tasks
2381 * will race to add swap count continuation: we need to avoid
2382 * over-provisioning.
2388 spin_unlock(&si->lock);
2393 * We are fortunate that although vmalloc_to_page uses pte_offset_map,
2394 * no architecture is using highmem pages for kernel pagetables: so it
2395 * will not corrupt the GFP_ATOMIC caller's atomic pagetable kmaps.
2397 head = vmalloc_to_page(si->swap_map + offset);
2398 offset &= ~PAGE_MASK;
2401 * Page allocation does not initialize the page's lru field,
2402 * but it does always reset its private field.
2404 if (!page_private(head)) {
2405 BUG_ON(count & COUNT_CONTINUED);
2406 INIT_LIST_HEAD(&head->lru);
2407 set_page_private(head, SWP_CONTINUED);
2408 si->flags |= SWP_CONTINUED;
2411 list_for_each_entry(list_page, &head->lru, lru) {
2415 * If the previous map said no continuation, but we've found
2416 * a continuation page, free our allocation and use this one.
2418 if (!(count & COUNT_CONTINUED))
2421 map = kmap_atomic(list_page) + offset;
2426 * If this continuation count now has some space in it,
2427 * free our allocation and use this one.
2429 if ((count & ~COUNT_CONTINUED) != SWAP_CONT_MAX)
2433 list_add_tail(&page->lru, &head->lru);
2434 page = NULL; /* now it's attached, don't free it */
2436 spin_unlock(&si->lock);
2444 * swap_count_continued - when the original swap_map count is incremented
2445 * from SWAP_MAP_MAX, check if there is already a continuation page to carry
2446 * into, carry if so, or else fail until a new continuation page is allocated;
2447 * when the original swap_map count is decremented from 0 with continuation,
2448 * borrow from the continuation and report whether it still holds more.
2449 * Called while __swap_duplicate() or swap_entry_free() holds swap_lock.
2451 static bool swap_count_continued(struct swap_info_struct *si,
2452 pgoff_t offset, unsigned char count)
2458 head = vmalloc_to_page(si->swap_map + offset);
2459 if (page_private(head) != SWP_CONTINUED) {
2460 BUG_ON(count & COUNT_CONTINUED);
2461 return false; /* need to add count continuation */
2464 offset &= ~PAGE_MASK;
2465 page = list_entry(head->lru.next, struct page, lru);
2466 map = kmap_atomic(page) + offset;
2468 if (count == SWAP_MAP_MAX) /* initial increment from swap_map */
2469 goto init_map; /* jump over SWAP_CONT_MAX checks */
2471 if (count == (SWAP_MAP_MAX | COUNT_CONTINUED)) { /* incrementing */
2473 * Think of how you add 1 to 999
2475 while (*map == (SWAP_CONT_MAX | COUNT_CONTINUED)) {
2477 page = list_entry(page->lru.next, struct page, lru);
2478 BUG_ON(page == head);
2479 map = kmap_atomic(page) + offset;
2481 if (*map == SWAP_CONT_MAX) {
2483 page = list_entry(page->lru.next, struct page, lru);
2485 return false; /* add count continuation */
2486 map = kmap_atomic(page) + offset;
2487 init_map: *map = 0; /* we didn't zero the page */
2491 page = list_entry(page->lru.prev, struct page, lru);
2492 while (page != head) {
2493 map = kmap_atomic(page) + offset;
2494 *map = COUNT_CONTINUED;
2496 page = list_entry(page->lru.prev, struct page, lru);
2498 return true; /* incremented */
2500 } else { /* decrementing */
2502 * Think of how you subtract 1 from 1000
2504 BUG_ON(count != COUNT_CONTINUED);
2505 while (*map == COUNT_CONTINUED) {
2507 page = list_entry(page->lru.next, struct page, lru);
2508 BUG_ON(page == head);
2509 map = kmap_atomic(page) + offset;
2516 page = list_entry(page->lru.prev, struct page, lru);
2517 while (page != head) {
2518 map = kmap_atomic(page) + offset;
2519 *map = SWAP_CONT_MAX | count;
2520 count = COUNT_CONTINUED;
2522 page = list_entry(page->lru.prev, struct page, lru);
2524 return count == COUNT_CONTINUED;
2529 * free_swap_count_continuations - swapoff free all the continuation pages
2530 * appended to the swap_map, after swap_map is quiesced, before vfree'ing it.
2532 static void free_swap_count_continuations(struct swap_info_struct *si)
2536 for (offset = 0; offset < si->max; offset += PAGE_SIZE) {
2538 head = vmalloc_to_page(si->swap_map + offset);
2539 if (page_private(head)) {
2540 struct list_head *this, *next;
2541 list_for_each_safe(this, next, &head->lru) {
2543 page = list_entry(this, struct page, lru);