1 // SPDX-License-Identifier: GPL-2.0
3 * linux/mm/swap_state.c
5 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
6 * Swap reorganised 29.12.95, Stephen Tweedie
8 * Rewritten to use page cache, (C) 1998 Stephen Tweedie
11 #include <linux/gfp.h>
12 #include <linux/kernel_stat.h>
13 #include <linux/swap.h>
14 #include <linux/swapops.h>
15 #include <linux/init.h>
16 #include <linux/pagemap.h>
17 #include <linux/backing-dev.h>
18 #include <linux/blkdev.h>
19 #include <linux/migrate.h>
20 #include <linux/vmalloc.h>
21 #include <linux/swap_slots.h>
22 #include <linux/huge_mm.h>
23 #include <linux/shmem_fs.h>
28 * swapper_space is a fiction, retained to simplify the path through
29 * vmscan's shrink_page_list.
31 static const struct address_space_operations swap_aops = {
32 .writepage = swap_writepage,
33 .dirty_folio = noop_dirty_folio,
34 #ifdef CONFIG_MIGRATION
35 .migrate_folio = migrate_folio,
39 struct address_space *swapper_spaces[MAX_SWAPFILES] __read_mostly;
40 static unsigned int nr_swapper_spaces[MAX_SWAPFILES] __read_mostly;
41 static bool enable_vma_readahead __read_mostly = true;
43 #define SWAP_RA_WIN_SHIFT (PAGE_SHIFT / 2)
44 #define SWAP_RA_HITS_MASK ((1UL << SWAP_RA_WIN_SHIFT) - 1)
45 #define SWAP_RA_HITS_MAX SWAP_RA_HITS_MASK
46 #define SWAP_RA_WIN_MASK (~PAGE_MASK & ~SWAP_RA_HITS_MASK)
48 #define SWAP_RA_HITS(v) ((v) & SWAP_RA_HITS_MASK)
49 #define SWAP_RA_WIN(v) (((v) & SWAP_RA_WIN_MASK) >> SWAP_RA_WIN_SHIFT)
50 #define SWAP_RA_ADDR(v) ((v) & PAGE_MASK)
52 #define SWAP_RA_VAL(addr, win, hits) \
53 (((addr) & PAGE_MASK) | \
54 (((win) << SWAP_RA_WIN_SHIFT) & SWAP_RA_WIN_MASK) | \
55 ((hits) & SWAP_RA_HITS_MASK))
57 /* Initial readahead hits is 4 to start up with a small window */
58 #define GET_SWAP_RA_VAL(vma) \
59 (atomic_long_read(&(vma)->swap_readahead_info) ? : 4)
61 static atomic_t swapin_readahead_hits = ATOMIC_INIT(4);
63 void show_swap_cache_info(void)
65 printk("%lu pages in swap cache\n", total_swapcache_pages());
66 printk("Free swap = %ldkB\n",
67 get_nr_swap_pages() << (PAGE_SHIFT - 10));
68 printk("Total swap = %lukB\n", total_swap_pages << (PAGE_SHIFT - 10));
71 void *get_shadow_from_swap_cache(swp_entry_t entry)
73 struct address_space *address_space = swap_address_space(entry);
74 pgoff_t idx = swp_offset(entry);
77 page = xa_load(&address_space->i_pages, idx);
78 if (xa_is_value(page))
84 * add_to_swap_cache resembles filemap_add_folio on swapper_space,
85 * but sets SwapCache flag and private instead of mapping and index.
87 int add_to_swap_cache(struct folio *folio, swp_entry_t entry,
88 gfp_t gfp, void **shadowp)
90 struct address_space *address_space = swap_address_space(entry);
91 pgoff_t idx = swp_offset(entry);
92 XA_STATE_ORDER(xas, &address_space->i_pages, idx, folio_order(folio));
93 unsigned long i, nr = folio_nr_pages(folio);
96 xas_set_update(&xas, workingset_update_node);
98 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
99 VM_BUG_ON_FOLIO(folio_test_swapcache(folio), folio);
100 VM_BUG_ON_FOLIO(!folio_test_swapbacked(folio), folio);
102 folio_ref_add(folio, nr);
103 folio_set_swapcache(folio);
107 xas_create_range(&xas);
110 for (i = 0; i < nr; i++) {
111 VM_BUG_ON_FOLIO(xas.xa_index != idx + i, folio);
112 old = xas_load(&xas);
113 if (xa_is_value(old)) {
117 set_page_private(folio_page(folio, i), entry.val + i);
118 xas_store(&xas, folio);
121 address_space->nrpages += nr;
122 __node_stat_mod_folio(folio, NR_FILE_PAGES, nr);
123 __lruvec_stat_mod_folio(folio, NR_SWAPCACHE, nr);
125 xas_unlock_irq(&xas);
126 } while (xas_nomem(&xas, gfp));
128 if (!xas_error(&xas))
131 folio_clear_swapcache(folio);
132 folio_ref_sub(folio, nr);
133 return xas_error(&xas);
137 * This must be called only on folios that have
138 * been verified to be in the swap cache.
140 void __delete_from_swap_cache(struct folio *folio,
141 swp_entry_t entry, void *shadow)
143 struct address_space *address_space = swap_address_space(entry);
145 long nr = folio_nr_pages(folio);
146 pgoff_t idx = swp_offset(entry);
147 XA_STATE(xas, &address_space->i_pages, idx);
149 xas_set_update(&xas, workingset_update_node);
151 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
152 VM_BUG_ON_FOLIO(!folio_test_swapcache(folio), folio);
153 VM_BUG_ON_FOLIO(folio_test_writeback(folio), folio);
155 for (i = 0; i < nr; i++) {
156 void *entry = xas_store(&xas, shadow);
157 VM_BUG_ON_PAGE(entry != folio, entry);
158 set_page_private(folio_page(folio, i), 0);
161 folio_clear_swapcache(folio);
162 address_space->nrpages -= nr;
163 __node_stat_mod_folio(folio, NR_FILE_PAGES, -nr);
164 __lruvec_stat_mod_folio(folio, NR_SWAPCACHE, -nr);
168 * add_to_swap - allocate swap space for a folio
169 * @folio: folio we want to move to swap
171 * Allocate swap space for the folio and add the folio to the
174 * Context: Caller needs to hold the folio lock.
175 * Return: Whether the folio was added to the swap cache.
177 bool add_to_swap(struct folio *folio)
182 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
183 VM_BUG_ON_FOLIO(!folio_test_uptodate(folio), folio);
185 entry = folio_alloc_swap(folio);
190 * XArray node allocations from PF_MEMALLOC contexts could
191 * completely exhaust the page allocator. __GFP_NOMEMALLOC
192 * stops emergency reserves from being allocated.
194 * TODO: this could cause a theoretical memory reclaim
195 * deadlock in the swap out path.
198 * Add it to the swap cache.
200 err = add_to_swap_cache(folio, entry,
201 __GFP_HIGH|__GFP_NOMEMALLOC|__GFP_NOWARN, NULL);
204 * add_to_swap_cache() doesn't return -EEXIST, so we can safely
205 * clear SWAP_HAS_CACHE flag.
209 * Normally the folio will be dirtied in unmap because its
210 * pte should be dirty. A special case is MADV_FREE page. The
211 * page's pte could have dirty bit cleared but the folio's
212 * SwapBacked flag is still set because clearing the dirty bit
213 * and SwapBacked flag has no lock protected. For such folio,
214 * unmap will not set dirty bit for it, so folio reclaim will
215 * not write the folio out. This can cause data corruption when
216 * the folio is swapped in later. Always setting the dirty flag
217 * for the folio solves the problem.
219 folio_mark_dirty(folio);
224 put_swap_folio(folio, entry);
229 * This must be called only on folios that have
230 * been verified to be in the swap cache and locked.
231 * It will never put the folio into the free list,
232 * the caller has a reference on the folio.
234 void delete_from_swap_cache(struct folio *folio)
236 swp_entry_t entry = folio_swap_entry(folio);
237 struct address_space *address_space = swap_address_space(entry);
239 xa_lock_irq(&address_space->i_pages);
240 __delete_from_swap_cache(folio, entry, NULL);
241 xa_unlock_irq(&address_space->i_pages);
243 put_swap_folio(folio, entry);
244 folio_ref_sub(folio, folio_nr_pages(folio));
247 void clear_shadow_from_swap_cache(int type, unsigned long begin,
250 unsigned long curr = begin;
254 swp_entry_t entry = swp_entry(type, curr);
255 struct address_space *address_space = swap_address_space(entry);
256 XA_STATE(xas, &address_space->i_pages, curr);
258 xas_set_update(&xas, workingset_update_node);
260 xa_lock_irq(&address_space->i_pages);
261 xas_for_each(&xas, old, end) {
262 if (!xa_is_value(old))
264 xas_store(&xas, NULL);
266 xa_unlock_irq(&address_space->i_pages);
268 /* search the next swapcache until we meet end */
269 curr >>= SWAP_ADDRESS_SPACE_SHIFT;
271 curr <<= SWAP_ADDRESS_SPACE_SHIFT;
278 * If we are the only user, then try to free up the swap cache.
280 * Its ok to check the swapcache flag without the folio lock
281 * here because we are going to recheck again inside
282 * folio_free_swap() _with_ the lock.
285 void free_swap_cache(struct page *page)
287 struct folio *folio = page_folio(page);
289 if (folio_test_swapcache(folio) && !folio_mapped(folio) &&
290 folio_trylock(folio)) {
291 folio_free_swap(folio);
297 * Perform a free_page(), also freeing any swap cache associated with
298 * this page if it is the last user of the page.
300 void free_page_and_swap_cache(struct page *page)
302 free_swap_cache(page);
303 if (!is_huge_zero_page(page))
308 * Passed an array of pages, drop them all from swapcache and then release
309 * them. They are removed from the LRU and freed if this is their last use.
311 void free_pages_and_swap_cache(struct encoded_page **pages, int nr)
314 for (int i = 0; i < nr; i++)
315 free_swap_cache(encoded_page_ptr(pages[i]));
316 release_pages(pages, nr);
319 static inline bool swap_use_vma_readahead(void)
321 return READ_ONCE(enable_vma_readahead) && !atomic_read(&nr_rotate_swap);
325 * Lookup a swap entry in the swap cache. A found folio will be returned
326 * unlocked and with its refcount incremented - we rely on the kernel
327 * lock getting page table operations atomic even if we drop the folio
328 * lock before returning.
330 * Caller must lock the swap device or hold a reference to keep it valid.
332 struct folio *swap_cache_get_folio(swp_entry_t entry,
333 struct vm_area_struct *vma, unsigned long addr)
337 folio = filemap_get_folio(swap_address_space(entry), swp_offset(entry));
338 if (!IS_ERR(folio)) {
339 bool vma_ra = swap_use_vma_readahead();
343 * At the moment, we don't support PG_readahead for anon THP
344 * so let's bail out rather than confusing the readahead stat.
346 if (unlikely(folio_test_large(folio)))
349 readahead = folio_test_clear_readahead(folio);
351 unsigned long ra_val;
354 ra_val = GET_SWAP_RA_VAL(vma);
355 win = SWAP_RA_WIN(ra_val);
356 hits = SWAP_RA_HITS(ra_val);
358 hits = min_t(int, hits + 1, SWAP_RA_HITS_MAX);
359 atomic_long_set(&vma->swap_readahead_info,
360 SWAP_RA_VAL(addr, win, hits));
364 count_vm_event(SWAP_RA_HIT);
366 atomic_inc(&swapin_readahead_hits);
376 * filemap_get_incore_folio - Find and get a folio from the page or swap caches.
377 * @mapping: The address_space to search.
378 * @index: The page cache index.
380 * This differs from filemap_get_folio() in that it will also look for the
381 * folio in the swap cache.
383 * Return: The found folio or %NULL.
385 struct folio *filemap_get_incore_folio(struct address_space *mapping,
389 struct swap_info_struct *si;
390 struct folio *folio = filemap_get_entry(mapping, index);
393 return ERR_PTR(-ENOENT);
394 if (!xa_is_value(folio))
396 if (!shmem_mapping(mapping))
397 return ERR_PTR(-ENOENT);
399 swp = radix_to_swp_entry(folio);
400 /* There might be swapin error entries in shmem mapping. */
401 if (non_swap_entry(swp))
402 return ERR_PTR(-ENOENT);
403 /* Prevent swapoff from happening to us */
404 si = get_swap_device(swp);
406 return ERR_PTR(-ENOENT);
407 index = swp_offset(swp);
408 folio = filemap_get_folio(swap_address_space(swp), index);
413 struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
414 struct vm_area_struct *vma, unsigned long addr,
415 bool *new_page_allocated)
417 struct swap_info_struct *si;
422 *new_page_allocated = false;
423 si = get_swap_device(entry);
430 * First check the swap cache. Since this is normally
431 * called after swap_cache_get_folio() failed, re-calling
432 * that would confuse statistics.
434 folio = filemap_get_folio(swap_address_space(entry),
436 if (!IS_ERR(folio)) {
437 page = folio_file_page(folio, swp_offset(entry));
442 * Just skip read ahead for unused swap slot.
443 * During swap_off when swap_slot_cache is disabled,
444 * we have to handle the race between putting
445 * swap entry in swap cache and marking swap slot
446 * as SWAP_HAS_CACHE. That's done in later part of code or
447 * else swap_off will be aborted if we return NULL.
449 if (!swap_swapcount(si, entry) && swap_slot_cache_enabled)
453 * Get a new page to read into from swap. Allocate it now,
454 * before marking swap_map SWAP_HAS_CACHE, when -EEXIST will
455 * cause any racers to loop around until we add it to cache.
457 folio = vma_alloc_folio(gfp_mask, 0, vma, addr, false);
462 * Swap entry may have been freed since our caller observed it.
464 err = swapcache_prepare(entry);
473 * We might race against __delete_from_swap_cache(), and
474 * stumble across a swap_map entry whose SWAP_HAS_CACHE
475 * has not yet been cleared. Or race against another
476 * __read_swap_cache_async(), which has set SWAP_HAS_CACHE
477 * in swap_map, but not yet added its page to swap cache.
479 schedule_timeout_uninterruptible(1);
483 * The swap entry is ours to swap in. Prepare the new page.
486 __folio_set_locked(folio);
487 __folio_set_swapbacked(folio);
489 if (mem_cgroup_swapin_charge_folio(folio, NULL, gfp_mask, entry))
492 /* May fail (-ENOMEM) if XArray node allocation failed. */
493 if (add_to_swap_cache(folio, entry, gfp_mask & GFP_RECLAIM_MASK, &shadow))
496 mem_cgroup_swapin_uncharge_swap(entry);
499 workingset_refault(folio, shadow);
501 /* Caller will initiate read into locked folio */
502 folio_add_lru(folio);
503 *new_page_allocated = true;
510 put_swap_folio(folio, entry);
519 * Locate a page of swap in physical memory, reserving swap cache space
520 * and reading the disk if it is not already cached.
521 * A failure return means that either the page allocation failed or that
522 * the swap entry is no longer in use.
524 * get/put_swap_device() aren't needed to call this function, because
525 * __read_swap_cache_async() call them and swap_readpage() holds the
526 * swap cache folio lock.
528 struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
529 struct vm_area_struct *vma,
530 unsigned long addr, bool do_poll,
531 struct swap_iocb **plug)
533 bool page_was_allocated;
534 struct page *retpage = __read_swap_cache_async(entry, gfp_mask,
535 vma, addr, &page_was_allocated);
537 if (page_was_allocated)
538 swap_readpage(retpage, do_poll, plug);
543 static unsigned int __swapin_nr_pages(unsigned long prev_offset,
544 unsigned long offset,
549 unsigned int pages, last_ra;
552 * This heuristic has been found to work well on both sequential and
553 * random loads, swapping to hard disk or to SSD: please don't ask
554 * what the "+ 2" means, it just happens to work well, that's all.
559 * We can have no readahead hits to judge by: but must not get
560 * stuck here forever, so check for an adjacent offset instead
561 * (and don't even bother to check whether swap type is same).
563 if (offset != prev_offset + 1 && offset != prev_offset - 1)
566 unsigned int roundup = 4;
567 while (roundup < pages)
572 if (pages > max_pages)
575 /* Don't shrink readahead too fast */
576 last_ra = prev_win / 2;
583 static unsigned long swapin_nr_pages(unsigned long offset)
585 static unsigned long prev_offset;
586 unsigned int hits, pages, max_pages;
587 static atomic_t last_readahead_pages;
589 max_pages = 1 << READ_ONCE(page_cluster);
593 hits = atomic_xchg(&swapin_readahead_hits, 0);
594 pages = __swapin_nr_pages(READ_ONCE(prev_offset), offset, hits,
596 atomic_read(&last_readahead_pages));
598 WRITE_ONCE(prev_offset, offset);
599 atomic_set(&last_readahead_pages, pages);
605 * swap_cluster_readahead - swap in pages in hope we need them soon
606 * @entry: swap entry of this memory
607 * @gfp_mask: memory allocation flags
608 * @vmf: fault information
610 * Returns the struct page for entry and addr, after queueing swapin.
612 * Primitive swap readahead code. We simply read an aligned block of
613 * (1 << page_cluster) entries in the swap area. This method is chosen
614 * because it doesn't cost us any seek time. We also make sure to queue
615 * the 'original' request together with the readahead ones...
617 * This has been extended to use the NUMA policies from the mm triggering
620 * Caller must hold read mmap_lock if vmf->vma is not NULL.
622 struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask,
623 struct vm_fault *vmf)
626 unsigned long entry_offset = swp_offset(entry);
627 unsigned long offset = entry_offset;
628 unsigned long start_offset, end_offset;
630 struct swap_info_struct *si = swp_swap_info(entry);
631 struct blk_plug plug;
632 struct swap_iocb *splug = NULL;
633 bool do_poll = true, page_allocated;
634 struct vm_area_struct *vma = vmf->vma;
635 unsigned long addr = vmf->address;
637 mask = swapin_nr_pages(offset) - 1;
642 /* Read a page_cluster sized and aligned cluster around offset. */
643 start_offset = offset & ~mask;
644 end_offset = offset | mask;
645 if (!start_offset) /* First page is swap header. */
647 if (end_offset >= si->max)
648 end_offset = si->max - 1;
650 blk_start_plug(&plug);
651 for (offset = start_offset; offset <= end_offset ; offset++) {
652 /* Ok, do the async read-ahead now */
653 page = __read_swap_cache_async(
654 swp_entry(swp_type(entry), offset),
655 gfp_mask, vma, addr, &page_allocated);
658 if (page_allocated) {
659 swap_readpage(page, false, &splug);
660 if (offset != entry_offset) {
661 SetPageReadahead(page);
662 count_vm_event(SWAP_RA);
667 blk_finish_plug(&plug);
668 swap_read_unplug(splug);
670 lru_add_drain(); /* Push any new pages onto the LRU now */
672 /* The page was likely read above, so no need for plugging here */
673 return read_swap_cache_async(entry, gfp_mask, vma, addr, do_poll, NULL);
676 int init_swap_address_space(unsigned int type, unsigned long nr_pages)
678 struct address_space *spaces, *space;
681 nr = DIV_ROUND_UP(nr_pages, SWAP_ADDRESS_SPACE_PAGES);
682 spaces = kvcalloc(nr, sizeof(struct address_space), GFP_KERNEL);
685 for (i = 0; i < nr; i++) {
687 xa_init_flags(&space->i_pages, XA_FLAGS_LOCK_IRQ);
688 atomic_set(&space->i_mmap_writable, 0);
689 space->a_ops = &swap_aops;
690 /* swap cache doesn't use writeback related tags */
691 mapping_set_no_writeback_tags(space);
693 nr_swapper_spaces[type] = nr;
694 swapper_spaces[type] = spaces;
699 void exit_swap_address_space(unsigned int type)
702 struct address_space *spaces = swapper_spaces[type];
704 for (i = 0; i < nr_swapper_spaces[type]; i++)
705 VM_WARN_ON_ONCE(!mapping_empty(&spaces[i]));
707 nr_swapper_spaces[type] = 0;
708 swapper_spaces[type] = NULL;
711 #define SWAP_RA_ORDER_CEILING 5
713 struct vma_swap_readahead {
715 unsigned short offset;
716 unsigned short nr_pte;
719 static void swap_ra_info(struct vm_fault *vmf,
720 struct vma_swap_readahead *ra_info)
722 struct vm_area_struct *vma = vmf->vma;
723 unsigned long ra_val;
724 unsigned long faddr, pfn, fpfn, lpfn, rpfn;
725 unsigned long start, end;
726 unsigned int max_win, hits, prev_win, win;
728 max_win = 1 << min_t(unsigned int, READ_ONCE(page_cluster),
729 SWAP_RA_ORDER_CEILING);
735 faddr = vmf->address;
736 fpfn = PFN_DOWN(faddr);
737 ra_val = GET_SWAP_RA_VAL(vma);
738 pfn = PFN_DOWN(SWAP_RA_ADDR(ra_val));
739 prev_win = SWAP_RA_WIN(ra_val);
740 hits = SWAP_RA_HITS(ra_val);
741 ra_info->win = win = __swapin_nr_pages(pfn, fpfn, hits,
743 atomic_long_set(&vma->swap_readahead_info,
744 SWAP_RA_VAL(faddr, win, 0));
748 if (fpfn == pfn + 1) {
751 } else if (pfn == fpfn + 1) {
752 lpfn = fpfn - win + 1;
755 unsigned int left = (win - 1) / 2;
758 rpfn = fpfn + win - left;
760 start = max3(lpfn, PFN_DOWN(vma->vm_start),
761 PFN_DOWN(faddr & PMD_MASK));
762 end = min3(rpfn, PFN_DOWN(vma->vm_end),
763 PFN_DOWN((faddr & PMD_MASK) + PMD_SIZE));
765 ra_info->nr_pte = end - start;
766 ra_info->offset = fpfn - start;
770 * swap_vma_readahead - swap in pages in hope we need them soon
771 * @fentry: swap entry of this memory
772 * @gfp_mask: memory allocation flags
773 * @vmf: fault information
775 * Returns the struct page for entry and addr, after queueing swapin.
777 * Primitive swap readahead code. We simply read in a few pages whose
778 * virtual addresses are around the fault address in the same vma.
780 * Caller must hold read mmap_lock if vmf->vma is not NULL.
783 static struct page *swap_vma_readahead(swp_entry_t fentry, gfp_t gfp_mask,
784 struct vm_fault *vmf)
786 struct blk_plug plug;
787 struct swap_iocb *splug = NULL;
788 struct vm_area_struct *vma = vmf->vma;
790 pte_t *pte = NULL, pentry;
795 struct vma_swap_readahead ra_info = {
799 swap_ra_info(vmf, &ra_info);
800 if (ra_info.win == 1)
803 addr = vmf->address - (ra_info.offset * PAGE_SIZE);
805 blk_start_plug(&plug);
806 for (i = 0; i < ra_info.nr_pte; i++, addr += PAGE_SIZE) {
808 pte = pte_offset_map(vmf->pmd, addr);
812 pentry = ptep_get_lockless(pte);
813 if (!is_swap_pte(pentry))
815 entry = pte_to_swp_entry(pentry);
816 if (unlikely(non_swap_entry(entry)))
820 page = __read_swap_cache_async(entry, gfp_mask, vma,
821 addr, &page_allocated);
824 if (page_allocated) {
825 swap_readpage(page, false, &splug);
826 if (i != ra_info.offset) {
827 SetPageReadahead(page);
828 count_vm_event(SWAP_RA);
835 blk_finish_plug(&plug);
836 swap_read_unplug(splug);
839 /* The page was likely read above, so no need for plugging here */
840 return read_swap_cache_async(fentry, gfp_mask, vma, vmf->address,
841 ra_info.win == 1, NULL);
845 * swapin_readahead - swap in pages in hope we need them soon
846 * @entry: swap entry of this memory
847 * @gfp_mask: memory allocation flags
848 * @vmf: fault information
850 * Returns the struct page for entry and addr, after queueing swapin.
852 * It's a main entry function for swap readahead. By the configuration,
853 * it will read ahead blocks by cluster-based(ie, physical disk based)
854 * or vma-based(ie, virtual address based on faulty address) readahead.
856 struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
857 struct vm_fault *vmf)
859 return swap_use_vma_readahead() ?
860 swap_vma_readahead(entry, gfp_mask, vmf) :
861 swap_cluster_readahead(entry, gfp_mask, vmf);
865 static ssize_t vma_ra_enabled_show(struct kobject *kobj,
866 struct kobj_attribute *attr, char *buf)
868 return sysfs_emit(buf, "%s\n",
869 enable_vma_readahead ? "true" : "false");
871 static ssize_t vma_ra_enabled_store(struct kobject *kobj,
872 struct kobj_attribute *attr,
873 const char *buf, size_t count)
877 ret = kstrtobool(buf, &enable_vma_readahead);
883 static struct kobj_attribute vma_ra_enabled_attr = __ATTR_RW(vma_ra_enabled);
885 static struct attribute *swap_attrs[] = {
886 &vma_ra_enabled_attr.attr,
890 static const struct attribute_group swap_attr_group = {
894 static int __init swap_init_sysfs(void)
897 struct kobject *swap_kobj;
899 swap_kobj = kobject_create_and_add("swap", mm_kobj);
901 pr_err("failed to create swap kobject\n");
904 err = sysfs_create_group(swap_kobj, &swap_attr_group);
906 pr_err("failed to register swap group\n");
912 kobject_put(swap_kobj);
915 subsys_initcall(swap_init_sysfs);