2 * Resizable virtual memory filesystem for Linux.
4 * Copyright (C) 2000 Linus Torvalds.
6 * 2000-2001 Christoph Rohland
9 * Copyright (C) 2002-2011 Hugh Dickins.
10 * Copyright (C) 2011 Google Inc.
11 * Copyright (C) 2002-2005 VERITAS Software Corporation.
12 * Copyright (C) 2004 Andi Kleen, SuSE Labs
14 * Extended attribute support for tmpfs:
15 * Copyright (c) 2004, Luke Kenneth Casson Leighton <lkcl@lkcl.net>
16 * Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com>
19 * Copyright (c) 2004, 2008 Matt Mackall <mpm@selenic.com>
21 * This file is released under the GPL.
25 #include <linux/init.h>
26 #include <linux/vfs.h>
27 #include <linux/mount.h>
28 #include <linux/ramfs.h>
29 #include <linux/pagemap.h>
30 #include <linux/file.h>
32 #include <linux/random.h>
33 #include <linux/sched/signal.h>
34 #include <linux/export.h>
35 #include <linux/swap.h>
36 #include <linux/uio.h>
37 #include <linux/khugepaged.h>
38 #include <linux/hugetlb.h>
39 #include <linux/frontswap.h>
40 #include <linux/fs_parser.h>
42 #include <asm/tlbflush.h> /* for arch/microblaze update_mmu_cache() */
44 static struct vfsmount *shm_mnt;
48 * This virtual memory filesystem is heavily based on the ramfs. It
49 * extends ramfs by the ability to use swap and honor resource limits
50 * which makes it a completely usable filesystem.
53 #include <linux/xattr.h>
54 #include <linux/exportfs.h>
55 #include <linux/posix_acl.h>
56 #include <linux/posix_acl_xattr.h>
57 #include <linux/mman.h>
58 #include <linux/string.h>
59 #include <linux/slab.h>
60 #include <linux/backing-dev.h>
61 #include <linux/shmem_fs.h>
62 #include <linux/writeback.h>
63 #include <linux/blkdev.h>
64 #include <linux/pagevec.h>
65 #include <linux/percpu_counter.h>
66 #include <linux/falloc.h>
67 #include <linux/splice.h>
68 #include <linux/security.h>
69 #include <linux/swapops.h>
70 #include <linux/mempolicy.h>
71 #include <linux/namei.h>
72 #include <linux/ctype.h>
73 #include <linux/migrate.h>
74 #include <linux/highmem.h>
75 #include <linux/seq_file.h>
76 #include <linux/magic.h>
77 #include <linux/syscalls.h>
78 #include <linux/fcntl.h>
79 #include <uapi/linux/memfd.h>
80 #include <linux/userfaultfd_k.h>
81 #include <linux/rmap.h>
82 #include <linux/uuid.h>
84 #include <linux/uaccess.h>
88 #define BLOCKS_PER_PAGE (PAGE_SIZE/512)
89 #define VM_ACCT(size) (PAGE_ALIGN(size) >> PAGE_SHIFT)
91 /* Pretend that each entry is of this size in directory's i_size */
92 #define BOGO_DIRENT_SIZE 20
94 /* Symlink up to this size is kmalloc'ed instead of using a swappable page */
95 #define SHORT_SYMLINK_LEN 128
98 * shmem_fallocate communicates with shmem_fault or shmem_writepage via
99 * inode->i_private (with i_mutex making sure that it has only one user at
100 * a time): we would prefer not to enlarge the shmem inode just for that.
102 struct shmem_falloc {
103 wait_queue_head_t *waitq; /* faults into hole wait for punch to end */
104 pgoff_t start; /* start of range currently being fallocated */
105 pgoff_t next; /* the next page offset to be fallocated */
106 pgoff_t nr_falloced; /* how many new pages have been fallocated */
107 pgoff_t nr_unswapped; /* how often writepage refused to swap out */
110 struct shmem_options {
111 unsigned long long blocks;
112 unsigned long long inodes;
113 struct mempolicy *mpol;
120 #define SHMEM_SEEN_BLOCKS 1
121 #define SHMEM_SEEN_INODES 2
122 #define SHMEM_SEEN_HUGE 4
123 #define SHMEM_SEEN_INUMS 8
127 static unsigned long shmem_default_max_blocks(void)
129 return totalram_pages() / 2;
132 static unsigned long shmem_default_max_inodes(void)
134 unsigned long nr_pages = totalram_pages();
136 return min(nr_pages - totalhigh_pages(), nr_pages / 2);
140 static bool shmem_should_replace_page(struct page *page, gfp_t gfp);
141 static int shmem_replace_page(struct page **pagep, gfp_t gfp,
142 struct shmem_inode_info *info, pgoff_t index);
143 static int shmem_swapin_page(struct inode *inode, pgoff_t index,
144 struct page **pagep, enum sgp_type sgp,
145 gfp_t gfp, struct vm_area_struct *vma,
146 vm_fault_t *fault_type);
147 static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
148 struct page **pagep, enum sgp_type sgp,
149 gfp_t gfp, struct vm_area_struct *vma,
150 struct vm_fault *vmf, vm_fault_t *fault_type);
152 int shmem_getpage(struct inode *inode, pgoff_t index,
153 struct page **pagep, enum sgp_type sgp)
155 return shmem_getpage_gfp(inode, index, pagep, sgp,
156 mapping_gfp_mask(inode->i_mapping), NULL, NULL, NULL);
159 static inline struct shmem_sb_info *SHMEM_SB(struct super_block *sb)
161 return sb->s_fs_info;
165 * shmem_file_setup pre-accounts the whole fixed size of a VM object,
166 * for shared memory and for shared anonymous (/dev/zero) mappings
167 * (unless MAP_NORESERVE and sysctl_overcommit_memory <= 1),
168 * consistent with the pre-accounting of private mappings ...
170 static inline int shmem_acct_size(unsigned long flags, loff_t size)
172 return (flags & VM_NORESERVE) ?
173 0 : security_vm_enough_memory_mm(current->mm, VM_ACCT(size));
176 static inline void shmem_unacct_size(unsigned long flags, loff_t size)
178 if (!(flags & VM_NORESERVE))
179 vm_unacct_memory(VM_ACCT(size));
182 static inline int shmem_reacct_size(unsigned long flags,
183 loff_t oldsize, loff_t newsize)
185 if (!(flags & VM_NORESERVE)) {
186 if (VM_ACCT(newsize) > VM_ACCT(oldsize))
187 return security_vm_enough_memory_mm(current->mm,
188 VM_ACCT(newsize) - VM_ACCT(oldsize));
189 else if (VM_ACCT(newsize) < VM_ACCT(oldsize))
190 vm_unacct_memory(VM_ACCT(oldsize) - VM_ACCT(newsize));
196 * ... whereas tmpfs objects are accounted incrementally as
197 * pages are allocated, in order to allow large sparse files.
198 * shmem_getpage reports shmem_acct_block failure as -ENOSPC not -ENOMEM,
199 * so that a failure on a sparse tmpfs mapping will give SIGBUS not OOM.
201 static inline int shmem_acct_block(unsigned long flags, long pages)
203 if (!(flags & VM_NORESERVE))
206 return security_vm_enough_memory_mm(current->mm,
207 pages * VM_ACCT(PAGE_SIZE));
210 static inline void shmem_unacct_blocks(unsigned long flags, long pages)
212 if (flags & VM_NORESERVE)
213 vm_unacct_memory(pages * VM_ACCT(PAGE_SIZE));
216 static inline bool shmem_inode_acct_block(struct inode *inode, long pages)
218 struct shmem_inode_info *info = SHMEM_I(inode);
219 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
221 if (shmem_acct_block(info->flags, pages))
224 if (sbinfo->max_blocks) {
225 if (percpu_counter_compare(&sbinfo->used_blocks,
226 sbinfo->max_blocks - pages) > 0)
228 percpu_counter_add(&sbinfo->used_blocks, pages);
234 shmem_unacct_blocks(info->flags, pages);
238 static inline void shmem_inode_unacct_blocks(struct inode *inode, long pages)
240 struct shmem_inode_info *info = SHMEM_I(inode);
241 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
243 if (sbinfo->max_blocks)
244 percpu_counter_sub(&sbinfo->used_blocks, pages);
245 shmem_unacct_blocks(info->flags, pages);
248 static const struct super_operations shmem_ops;
249 static const struct address_space_operations shmem_aops;
250 static const struct file_operations shmem_file_operations;
251 static const struct inode_operations shmem_inode_operations;
252 static const struct inode_operations shmem_dir_inode_operations;
253 static const struct inode_operations shmem_special_inode_operations;
254 static const struct vm_operations_struct shmem_vm_ops;
255 static struct file_system_type shmem_fs_type;
257 bool vma_is_shmem(struct vm_area_struct *vma)
259 return vma->vm_ops == &shmem_vm_ops;
262 static LIST_HEAD(shmem_swaplist);
263 static DEFINE_MUTEX(shmem_swaplist_mutex);
266 * shmem_reserve_inode() performs bookkeeping to reserve a shmem inode, and
267 * produces a novel ino for the newly allocated inode.
269 * It may also be called when making a hard link to permit the space needed by
270 * each dentry. However, in that case, no new inode number is needed since that
271 * internally draws from another pool of inode numbers (currently global
272 * get_next_ino()). This case is indicated by passing NULL as inop.
274 #define SHMEM_INO_BATCH 1024
275 static int shmem_reserve_inode(struct super_block *sb, ino_t *inop)
277 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
280 if (!(sb->s_flags & SB_KERNMOUNT)) {
281 spin_lock(&sbinfo->stat_lock);
282 if (sbinfo->max_inodes) {
283 if (!sbinfo->free_inodes) {
284 spin_unlock(&sbinfo->stat_lock);
287 sbinfo->free_inodes--;
290 ino = sbinfo->next_ino++;
291 if (unlikely(is_zero_ino(ino)))
292 ino = sbinfo->next_ino++;
293 if (unlikely(!sbinfo->full_inums &&
296 * Emulate get_next_ino uint wraparound for
299 if (IS_ENABLED(CONFIG_64BIT))
300 pr_warn("%s: inode number overflow on device %d, consider using inode64 mount option\n",
301 __func__, MINOR(sb->s_dev));
302 sbinfo->next_ino = 1;
303 ino = sbinfo->next_ino++;
307 spin_unlock(&sbinfo->stat_lock);
310 * __shmem_file_setup, one of our callers, is lock-free: it
311 * doesn't hold stat_lock in shmem_reserve_inode since
312 * max_inodes is always 0, and is called from potentially
313 * unknown contexts. As such, use a per-cpu batched allocator
314 * which doesn't require the per-sb stat_lock unless we are at
315 * the batch boundary.
317 * We don't need to worry about inode{32,64} since SB_KERNMOUNT
318 * shmem mounts are not exposed to userspace, so we don't need
319 * to worry about things like glibc compatibility.
322 next_ino = per_cpu_ptr(sbinfo->ino_batch, get_cpu());
324 if (unlikely(ino % SHMEM_INO_BATCH == 0)) {
325 spin_lock(&sbinfo->stat_lock);
326 ino = sbinfo->next_ino;
327 sbinfo->next_ino += SHMEM_INO_BATCH;
328 spin_unlock(&sbinfo->stat_lock);
329 if (unlikely(is_zero_ino(ino)))
340 static void shmem_free_inode(struct super_block *sb)
342 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
343 if (sbinfo->max_inodes) {
344 spin_lock(&sbinfo->stat_lock);
345 sbinfo->free_inodes++;
346 spin_unlock(&sbinfo->stat_lock);
351 * shmem_recalc_inode - recalculate the block usage of an inode
352 * @inode: inode to recalc
354 * We have to calculate the free blocks since the mm can drop
355 * undirtied hole pages behind our back.
357 * But normally info->alloced == inode->i_mapping->nrpages + info->swapped
358 * So mm freed is info->alloced - (inode->i_mapping->nrpages + info->swapped)
360 * It has to be called with the spinlock held.
362 static void shmem_recalc_inode(struct inode *inode)
364 struct shmem_inode_info *info = SHMEM_I(inode);
367 freed = info->alloced - info->swapped - inode->i_mapping->nrpages;
369 info->alloced -= freed;
370 inode->i_blocks -= freed * BLOCKS_PER_PAGE;
371 shmem_inode_unacct_blocks(inode, freed);
375 bool shmem_charge(struct inode *inode, long pages)
377 struct shmem_inode_info *info = SHMEM_I(inode);
380 if (!shmem_inode_acct_block(inode, pages))
383 /* nrpages adjustment first, then shmem_recalc_inode() when balanced */
384 inode->i_mapping->nrpages += pages;
386 spin_lock_irqsave(&info->lock, flags);
387 info->alloced += pages;
388 inode->i_blocks += pages * BLOCKS_PER_PAGE;
389 shmem_recalc_inode(inode);
390 spin_unlock_irqrestore(&info->lock, flags);
395 void shmem_uncharge(struct inode *inode, long pages)
397 struct shmem_inode_info *info = SHMEM_I(inode);
400 /* nrpages adjustment done by __delete_from_page_cache() or caller */
402 spin_lock_irqsave(&info->lock, flags);
403 info->alloced -= pages;
404 inode->i_blocks -= pages * BLOCKS_PER_PAGE;
405 shmem_recalc_inode(inode);
406 spin_unlock_irqrestore(&info->lock, flags);
408 shmem_inode_unacct_blocks(inode, pages);
412 * Replace item expected in xarray by a new item, while holding xa_lock.
414 static int shmem_replace_entry(struct address_space *mapping,
415 pgoff_t index, void *expected, void *replacement)
417 XA_STATE(xas, &mapping->i_pages, index);
420 VM_BUG_ON(!expected);
421 VM_BUG_ON(!replacement);
422 item = xas_load(&xas);
423 if (item != expected)
425 xas_store(&xas, replacement);
430 * Sometimes, before we decide whether to proceed or to fail, we must check
431 * that an entry was not already brought back from swap by a racing thread.
433 * Checking page is not enough: by the time a SwapCache page is locked, it
434 * might be reused, and again be SwapCache, using the same swap as before.
436 static bool shmem_confirm_swap(struct address_space *mapping,
437 pgoff_t index, swp_entry_t swap)
439 return xa_load(&mapping->i_pages, index) == swp_to_radix_entry(swap);
443 * Definitions for "huge tmpfs": tmpfs mounted with the huge= option
446 * disables huge pages for the mount;
448 * enables huge pages for the mount;
449 * SHMEM_HUGE_WITHIN_SIZE:
450 * only allocate huge pages if the page will be fully within i_size,
451 * also respect fadvise()/madvise() hints;
453 * only allocate huge pages if requested with fadvise()/madvise();
456 #define SHMEM_HUGE_NEVER 0
457 #define SHMEM_HUGE_ALWAYS 1
458 #define SHMEM_HUGE_WITHIN_SIZE 2
459 #define SHMEM_HUGE_ADVISE 3
463 * Only can be set via /sys/kernel/mm/transparent_hugepage/shmem_enabled:
466 * disables huge on shm_mnt and all mounts, for emergency use;
468 * enables huge on shm_mnt and all mounts, w/o needing option, for testing;
471 #define SHMEM_HUGE_DENY (-1)
472 #define SHMEM_HUGE_FORCE (-2)
474 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
475 /* ifdef here to avoid bloating shmem.o when not necessary */
477 static int shmem_huge __read_mostly;
479 #if defined(CONFIG_SYSFS)
480 static int shmem_parse_huge(const char *str)
482 if (!strcmp(str, "never"))
483 return SHMEM_HUGE_NEVER;
484 if (!strcmp(str, "always"))
485 return SHMEM_HUGE_ALWAYS;
486 if (!strcmp(str, "within_size"))
487 return SHMEM_HUGE_WITHIN_SIZE;
488 if (!strcmp(str, "advise"))
489 return SHMEM_HUGE_ADVISE;
490 if (!strcmp(str, "deny"))
491 return SHMEM_HUGE_DENY;
492 if (!strcmp(str, "force"))
493 return SHMEM_HUGE_FORCE;
498 #if defined(CONFIG_SYSFS) || defined(CONFIG_TMPFS)
499 static const char *shmem_format_huge(int huge)
502 case SHMEM_HUGE_NEVER:
504 case SHMEM_HUGE_ALWAYS:
506 case SHMEM_HUGE_WITHIN_SIZE:
507 return "within_size";
508 case SHMEM_HUGE_ADVISE:
510 case SHMEM_HUGE_DENY:
512 case SHMEM_HUGE_FORCE:
521 static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
522 struct shrink_control *sc, unsigned long nr_to_split)
524 LIST_HEAD(list), *pos, *next;
525 LIST_HEAD(to_remove);
527 struct shmem_inode_info *info;
529 unsigned long batch = sc ? sc->nr_to_scan : 128;
530 int removed = 0, split = 0;
532 if (list_empty(&sbinfo->shrinklist))
535 spin_lock(&sbinfo->shrinklist_lock);
536 list_for_each_safe(pos, next, &sbinfo->shrinklist) {
537 info = list_entry(pos, struct shmem_inode_info, shrinklist);
540 inode = igrab(&info->vfs_inode);
542 /* inode is about to be evicted */
544 list_del_init(&info->shrinklist);
549 /* Check if there's anything to gain */
550 if (round_up(inode->i_size, PAGE_SIZE) ==
551 round_up(inode->i_size, HPAGE_PMD_SIZE)) {
552 list_move(&info->shrinklist, &to_remove);
557 list_move(&info->shrinklist, &list);
562 spin_unlock(&sbinfo->shrinklist_lock);
564 list_for_each_safe(pos, next, &to_remove) {
565 info = list_entry(pos, struct shmem_inode_info, shrinklist);
566 inode = &info->vfs_inode;
567 list_del_init(&info->shrinklist);
571 list_for_each_safe(pos, next, &list) {
574 info = list_entry(pos, struct shmem_inode_info, shrinklist);
575 inode = &info->vfs_inode;
577 if (nr_to_split && split >= nr_to_split)
580 page = find_get_page(inode->i_mapping,
581 (inode->i_size & HPAGE_PMD_MASK) >> PAGE_SHIFT);
585 /* No huge page at the end of the file: nothing to split */
586 if (!PageTransHuge(page)) {
592 * Leave the inode on the list if we failed to lock
593 * the page at this time.
595 * Waiting for the lock may lead to deadlock in the
598 if (!trylock_page(page)) {
603 ret = split_huge_page(page);
607 /* If split failed leave the inode on the list */
613 list_del_init(&info->shrinklist);
619 spin_lock(&sbinfo->shrinklist_lock);
620 list_splice_tail(&list, &sbinfo->shrinklist);
621 sbinfo->shrinklist_len -= removed;
622 spin_unlock(&sbinfo->shrinklist_lock);
627 static long shmem_unused_huge_scan(struct super_block *sb,
628 struct shrink_control *sc)
630 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
632 if (!READ_ONCE(sbinfo->shrinklist_len))
635 return shmem_unused_huge_shrink(sbinfo, sc, 0);
638 static long shmem_unused_huge_count(struct super_block *sb,
639 struct shrink_control *sc)
641 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
642 return READ_ONCE(sbinfo->shrinklist_len);
644 #else /* !CONFIG_TRANSPARENT_HUGEPAGE */
646 #define shmem_huge SHMEM_HUGE_DENY
648 static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
649 struct shrink_control *sc, unsigned long nr_to_split)
653 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
655 static inline bool is_huge_enabled(struct shmem_sb_info *sbinfo)
657 if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
658 (shmem_huge == SHMEM_HUGE_FORCE || sbinfo->huge) &&
659 shmem_huge != SHMEM_HUGE_DENY)
665 * Like add_to_page_cache_locked, but error if expected item has gone.
667 static int shmem_add_to_page_cache(struct page *page,
668 struct address_space *mapping,
669 pgoff_t index, void *expected, gfp_t gfp,
670 struct mm_struct *charge_mm)
672 XA_STATE_ORDER(xas, &mapping->i_pages, index, compound_order(page));
674 unsigned long nr = compound_nr(page);
677 VM_BUG_ON_PAGE(PageTail(page), page);
678 VM_BUG_ON_PAGE(index != round_down(index, nr), page);
679 VM_BUG_ON_PAGE(!PageLocked(page), page);
680 VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
681 VM_BUG_ON(expected && PageTransHuge(page));
683 page_ref_add(page, nr);
684 page->mapping = mapping;
687 if (!PageSwapCache(page)) {
688 error = mem_cgroup_charge(page, charge_mm, gfp);
690 if (PageTransHuge(page)) {
691 count_vm_event(THP_FILE_FALLBACK);
692 count_vm_event(THP_FILE_FALLBACK_CHARGE);
697 cgroup_throttle_swaprate(page, gfp);
702 entry = xas_find_conflict(&xas);
703 if (entry != expected)
704 xas_set_err(&xas, -EEXIST);
705 xas_create_range(&xas);
709 xas_store(&xas, page);
714 if (PageTransHuge(page)) {
715 count_vm_event(THP_FILE_ALLOC);
716 #ifdef CONFIG_FINEGRAINED_THP
717 if (thp_nr_pages(page) == HPAGE_CONT_PTE_NR)
718 __inc_node_page_state(page, NR_SHMEM_64KB_THPS);
720 #endif /* CONFIG_FINEGRAINED_THP */
721 __inc_node_page_state(page, NR_SHMEM_THPS);
723 mapping->nrpages += nr;
724 __mod_lruvec_page_state(page, NR_FILE_PAGES, nr);
725 __mod_lruvec_page_state(page, NR_SHMEM, nr);
727 xas_unlock_irq(&xas);
728 } while (xas_nomem(&xas, gfp));
730 if (xas_error(&xas)) {
731 error = xas_error(&xas);
737 page->mapping = NULL;
738 page_ref_sub(page, nr);
743 * Like delete_from_page_cache, but substitutes swap for page.
745 static void shmem_delete_from_page_cache(struct page *page, void *radswap)
747 struct address_space *mapping = page->mapping;
750 VM_BUG_ON_PAGE(PageCompound(page), page);
752 xa_lock_irq(&mapping->i_pages);
753 error = shmem_replace_entry(mapping, page->index, page, radswap);
754 page->mapping = NULL;
756 __dec_lruvec_page_state(page, NR_FILE_PAGES);
757 __dec_lruvec_page_state(page, NR_SHMEM);
758 xa_unlock_irq(&mapping->i_pages);
764 * Remove swap entry from page cache, free the swap and its page cache.
766 static int shmem_free_swap(struct address_space *mapping,
767 pgoff_t index, void *radswap)
771 old = xa_cmpxchg_irq(&mapping->i_pages, index, radswap, NULL, 0);
774 free_swap_and_cache(radix_to_swp_entry(radswap));
779 * Determine (in bytes) how many of the shmem object's pages mapped by the
780 * given offsets are swapped out.
782 * This is safe to call without i_mutex or the i_pages lock thanks to RCU,
783 * as long as the inode doesn't go away and racy results are not a problem.
785 unsigned long shmem_partial_swap_usage(struct address_space *mapping,
786 pgoff_t start, pgoff_t end)
788 XA_STATE(xas, &mapping->i_pages, start);
790 unsigned long swapped = 0;
793 xas_for_each(&xas, page, end - 1) {
794 if (xas_retry(&xas, page))
796 if (xa_is_value(page))
799 if (need_resched()) {
807 return swapped << PAGE_SHIFT;
811 * Determine (in bytes) how many of the shmem object's pages mapped by the
812 * given vma is swapped out.
814 * This is safe to call without i_mutex or the i_pages lock thanks to RCU,
815 * as long as the inode doesn't go away and racy results are not a problem.
817 unsigned long shmem_swap_usage(struct vm_area_struct *vma)
819 struct inode *inode = file_inode(vma->vm_file);
820 struct shmem_inode_info *info = SHMEM_I(inode);
821 struct address_space *mapping = inode->i_mapping;
822 unsigned long swapped;
824 /* Be careful as we don't hold info->lock */
825 swapped = READ_ONCE(info->swapped);
828 * The easier cases are when the shmem object has nothing in swap, or
829 * the vma maps it whole. Then we can simply use the stats that we
835 if (!vma->vm_pgoff && vma->vm_end - vma->vm_start >= inode->i_size)
836 return swapped << PAGE_SHIFT;
838 /* Here comes the more involved part */
839 return shmem_partial_swap_usage(mapping,
840 linear_page_index(vma, vma->vm_start),
841 linear_page_index(vma, vma->vm_end));
845 * SysV IPC SHM_UNLOCK restore Unevictable pages to their evictable lists.
847 void shmem_unlock_mapping(struct address_space *mapping)
850 pgoff_t indices[PAGEVEC_SIZE];
855 * Minor point, but we might as well stop if someone else SHM_LOCKs it.
857 while (!mapping_unevictable(mapping)) {
859 * Avoid pagevec_lookup(): find_get_pages() returns 0 as if it
860 * has finished, if it hits a row of PAGEVEC_SIZE swap entries.
862 pvec.nr = find_get_entries(mapping, index,
863 PAGEVEC_SIZE, pvec.pages, indices);
866 index = indices[pvec.nr - 1] + 1;
867 pagevec_remove_exceptionals(&pvec);
868 check_move_unevictable_pages(&pvec);
869 pagevec_release(&pvec);
875 * Check whether a hole-punch or truncation needs to split a huge page,
876 * returning true if no split was required, or the split has been successful.
878 * Eviction (or truncation to 0 size) should never need to split a huge page;
879 * but in rare cases might do so, if shmem_undo_range() failed to trylock on
880 * head, and then succeeded to trylock on tail.
882 * A split can only succeed when there are no additional references on the
883 * huge page: so the split below relies upon find_get_entries() having stopped
884 * when it found a subpage of the huge page, without getting further references.
886 static bool shmem_punch_compound(struct page *page, pgoff_t start, pgoff_t end)
888 if (!PageTransCompound(page))
891 /* Just proceed to delete a huge page wholly within the range punched */
892 #ifdef CONFIG_FINEGRAINED_THP
893 if (PageHead(page) &&
894 page->index >= start && page->index + thp_nr_pages(page) <= end)
897 if (PageHead(page) &&
898 page->index >= start && page->index + HPAGE_PMD_NR <= end)
900 #endif /* CONFIG_FINEGRAINED_THP */
902 /* Try to split huge page, so we can truly punch the hole or truncate */
903 return split_huge_page(page) >= 0;
907 * Remove range of pages and swap entries from page cache, and free them.
908 * If !unfalloc, truncate or punch hole; if unfalloc, undo failed fallocate.
910 static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
913 struct address_space *mapping = inode->i_mapping;
914 struct shmem_inode_info *info = SHMEM_I(inode);
915 pgoff_t start = (lstart + PAGE_SIZE - 1) >> PAGE_SHIFT;
916 pgoff_t end = (lend + 1) >> PAGE_SHIFT;
917 unsigned int partial_start = lstart & (PAGE_SIZE - 1);
918 unsigned int partial_end = (lend + 1) & (PAGE_SIZE - 1);
920 pgoff_t indices[PAGEVEC_SIZE];
921 long nr_swaps_freed = 0;
926 end = -1; /* unsigned, so actually very big */
930 while (index < end) {
931 pvec.nr = find_get_entries(mapping, index,
932 min(end - index, (pgoff_t)PAGEVEC_SIZE),
933 pvec.pages, indices);
936 for (i = 0; i < pagevec_count(&pvec); i++) {
937 struct page *page = pvec.pages[i];
943 if (xa_is_value(page)) {
946 nr_swaps_freed += !shmem_free_swap(mapping,
951 VM_BUG_ON_PAGE(page_to_pgoff(page) != index, page);
953 if (!trylock_page(page))
956 if ((!unfalloc || !PageUptodate(page)) &&
957 page_mapping(page) == mapping) {
958 VM_BUG_ON_PAGE(PageWriteback(page), page);
959 if (shmem_punch_compound(page, start, end))
960 truncate_inode_page(mapping, page);
964 pagevec_remove_exceptionals(&pvec);
965 pagevec_release(&pvec);
971 struct page *page = NULL;
972 shmem_getpage(inode, start - 1, &page, SGP_READ);
974 unsigned int top = PAGE_SIZE;
979 zero_user_segment(page, partial_start, top);
980 set_page_dirty(page);
986 struct page *page = NULL;
987 shmem_getpage(inode, end, &page, SGP_READ);
989 zero_user_segment(page, 0, partial_end);
990 set_page_dirty(page);
999 while (index < end) {
1002 pvec.nr = find_get_entries(mapping, index,
1003 min(end - index, (pgoff_t)PAGEVEC_SIZE),
1004 pvec.pages, indices);
1006 /* If all gone or hole-punch or unfalloc, we're done */
1007 if (index == start || end != -1)
1009 /* But if truncating, restart to make sure all gone */
1013 for (i = 0; i < pagevec_count(&pvec); i++) {
1014 struct page *page = pvec.pages[i];
1020 if (xa_is_value(page)) {
1023 if (shmem_free_swap(mapping, index, page)) {
1024 /* Swap was replaced by page: retry */
1034 if (!unfalloc || !PageUptodate(page)) {
1035 if (page_mapping(page) != mapping) {
1036 /* Page was replaced by swap: retry */
1041 VM_BUG_ON_PAGE(PageWriteback(page), page);
1042 if (shmem_punch_compound(page, start, end))
1043 truncate_inode_page(mapping, page);
1044 else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
1045 /* Wipe the page and don't get stuck */
1046 clear_highpage(page);
1047 flush_dcache_page(page);
1048 set_page_dirty(page);
1049 #ifdef CONFIG_FINEGRAINED_THP
1051 round_up(start, thp_nr_pages(page)))
1053 #else /* CONFIG_FINEGRAINED_THP */
1055 round_up(start, HPAGE_PMD_NR))
1057 #endif /* CONFIG_FINEGRAINED_THP */
1062 pagevec_remove_exceptionals(&pvec);
1063 pagevec_release(&pvec);
1067 spin_lock_irq(&info->lock);
1068 info->swapped -= nr_swaps_freed;
1069 shmem_recalc_inode(inode);
1070 spin_unlock_irq(&info->lock);
1073 void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
1075 shmem_undo_range(inode, lstart, lend, false);
1076 inode->i_ctime = inode->i_mtime = current_time(inode);
1078 EXPORT_SYMBOL_GPL(shmem_truncate_range);
1080 static int shmem_getattr(const struct path *path, struct kstat *stat,
1081 u32 request_mask, unsigned int query_flags)
1083 struct inode *inode = path->dentry->d_inode;
1084 struct shmem_inode_info *info = SHMEM_I(inode);
1085 struct shmem_sb_info *sb_info = SHMEM_SB(inode->i_sb);
1087 if (info->alloced - info->swapped != inode->i_mapping->nrpages) {
1088 spin_lock_irq(&info->lock);
1089 shmem_recalc_inode(inode);
1090 spin_unlock_irq(&info->lock);
1092 generic_fillattr(inode, stat);
1094 if (is_huge_enabled(sb_info))
1095 stat->blksize = HPAGE_PMD_SIZE;
1100 static int shmem_setattr(struct dentry *dentry, struct iattr *attr)
1102 struct inode *inode = d_inode(dentry);
1103 struct shmem_inode_info *info = SHMEM_I(inode);
1104 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
1107 error = setattr_prepare(dentry, attr);
1111 if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
1112 loff_t oldsize = inode->i_size;
1113 loff_t newsize = attr->ia_size;
1115 /* protected by i_mutex */
1116 if ((newsize < oldsize && (info->seals & F_SEAL_SHRINK)) ||
1117 (newsize > oldsize && (info->seals & F_SEAL_GROW)))
1120 if (newsize != oldsize) {
1121 error = shmem_reacct_size(SHMEM_I(inode)->flags,
1125 i_size_write(inode, newsize);
1126 inode->i_ctime = inode->i_mtime = current_time(inode);
1128 if (newsize <= oldsize) {
1129 loff_t holebegin = round_up(newsize, PAGE_SIZE);
1130 if (oldsize > holebegin)
1131 unmap_mapping_range(inode->i_mapping,
1134 shmem_truncate_range(inode,
1135 newsize, (loff_t)-1);
1136 /* unmap again to remove racily COWed private pages */
1137 if (oldsize > holebegin)
1138 unmap_mapping_range(inode->i_mapping,
1142 * Part of the huge page can be beyond i_size: subject
1143 * to shrink under memory pressure.
1145 if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
1146 spin_lock(&sbinfo->shrinklist_lock);
1148 * _careful to defend against unlocked access to
1149 * ->shrink_list in shmem_unused_huge_shrink()
1151 if (list_empty_careful(&info->shrinklist)) {
1152 list_add_tail(&info->shrinklist,
1153 &sbinfo->shrinklist);
1154 sbinfo->shrinklist_len++;
1156 spin_unlock(&sbinfo->shrinklist_lock);
1161 setattr_copy(inode, attr);
1162 if (attr->ia_valid & ATTR_MODE)
1163 error = posix_acl_chmod(inode, inode->i_mode);
1167 static void shmem_evict_inode(struct inode *inode)
1169 struct shmem_inode_info *info = SHMEM_I(inode);
1170 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
1172 if (inode->i_mapping->a_ops == &shmem_aops) {
1173 shmem_unacct_size(info->flags, inode->i_size);
1175 shmem_truncate_range(inode, 0, (loff_t)-1);
1176 if (!list_empty(&info->shrinklist)) {
1177 spin_lock(&sbinfo->shrinklist_lock);
1178 if (!list_empty(&info->shrinklist)) {
1179 list_del_init(&info->shrinklist);
1180 sbinfo->shrinklist_len--;
1182 spin_unlock(&sbinfo->shrinklist_lock);
1184 while (!list_empty(&info->swaplist)) {
1185 /* Wait while shmem_unuse() is scanning this inode... */
1186 wait_var_event(&info->stop_eviction,
1187 !atomic_read(&info->stop_eviction));
1188 mutex_lock(&shmem_swaplist_mutex);
1189 /* ...but beware of the race if we peeked too early */
1190 if (!atomic_read(&info->stop_eviction))
1191 list_del_init(&info->swaplist);
1192 mutex_unlock(&shmem_swaplist_mutex);
1196 simple_xattrs_free(&info->xattrs);
1197 WARN_ON(inode->i_blocks);
1198 shmem_free_inode(inode->i_sb);
1202 extern struct swap_info_struct *swap_info[];
1204 static int shmem_find_swap_entries(struct address_space *mapping,
1205 pgoff_t start, unsigned int nr_entries,
1206 struct page **entries, pgoff_t *indices,
1207 unsigned int type, bool frontswap)
1209 XA_STATE(xas, &mapping->i_pages, start);
1212 unsigned int ret = 0;
1218 xas_for_each(&xas, page, ULONG_MAX) {
1219 if (xas_retry(&xas, page))
1222 if (!xa_is_value(page))
1225 entry = radix_to_swp_entry(page);
1226 if (swp_type(entry) != type)
1229 !frontswap_test(swap_info[type], swp_offset(entry)))
1232 indices[ret] = xas.xa_index;
1233 entries[ret] = page;
1235 if (need_resched()) {
1239 if (++ret == nr_entries)
1248 * Move the swapped pages for an inode to page cache. Returns the count
1249 * of pages swapped in, or the error in case of failure.
1251 static int shmem_unuse_swap_entries(struct inode *inode, struct pagevec pvec,
1257 struct address_space *mapping = inode->i_mapping;
1259 for (i = 0; i < pvec.nr; i++) {
1260 struct page *page = pvec.pages[i];
1262 if (!xa_is_value(page))
1264 error = shmem_swapin_page(inode, indices[i],
1266 mapping_gfp_mask(mapping),
1273 if (error == -ENOMEM)
1277 return error ? error : ret;
1281 * If swap found in inode, free it and move page from swapcache to filecache.
1283 static int shmem_unuse_inode(struct inode *inode, unsigned int type,
1284 bool frontswap, unsigned long *fs_pages_to_unuse)
1286 struct address_space *mapping = inode->i_mapping;
1288 struct pagevec pvec;
1289 pgoff_t indices[PAGEVEC_SIZE];
1290 bool frontswap_partial = (frontswap && *fs_pages_to_unuse > 0);
1293 pagevec_init(&pvec);
1295 unsigned int nr_entries = PAGEVEC_SIZE;
1297 if (frontswap_partial && *fs_pages_to_unuse < PAGEVEC_SIZE)
1298 nr_entries = *fs_pages_to_unuse;
1300 pvec.nr = shmem_find_swap_entries(mapping, start, nr_entries,
1301 pvec.pages, indices,
1308 ret = shmem_unuse_swap_entries(inode, pvec, indices);
1312 if (frontswap_partial) {
1313 *fs_pages_to_unuse -= ret;
1314 if (*fs_pages_to_unuse == 0) {
1315 ret = FRONTSWAP_PAGES_UNUSED;
1320 start = indices[pvec.nr - 1];
1327 * Read all the shared memory data that resides in the swap
1328 * device 'type' back into memory, so the swap device can be
1331 int shmem_unuse(unsigned int type, bool frontswap,
1332 unsigned long *fs_pages_to_unuse)
1334 struct shmem_inode_info *info, *next;
1337 if (list_empty(&shmem_swaplist))
1340 mutex_lock(&shmem_swaplist_mutex);
1341 list_for_each_entry_safe(info, next, &shmem_swaplist, swaplist) {
1342 if (!info->swapped) {
1343 list_del_init(&info->swaplist);
1347 * Drop the swaplist mutex while searching the inode for swap;
1348 * but before doing so, make sure shmem_evict_inode() will not
1349 * remove placeholder inode from swaplist, nor let it be freed
1350 * (igrab() would protect from unlink, but not from unmount).
1352 atomic_inc(&info->stop_eviction);
1353 mutex_unlock(&shmem_swaplist_mutex);
1355 error = shmem_unuse_inode(&info->vfs_inode, type, frontswap,
1359 mutex_lock(&shmem_swaplist_mutex);
1360 next = list_next_entry(info, swaplist);
1362 list_del_init(&info->swaplist);
1363 if (atomic_dec_and_test(&info->stop_eviction))
1364 wake_up_var(&info->stop_eviction);
1368 mutex_unlock(&shmem_swaplist_mutex);
1374 * Move the page from the page cache to the swap cache.
1376 static int shmem_writepage(struct page *page, struct writeback_control *wbc)
1378 struct shmem_inode_info *info;
1379 struct address_space *mapping;
1380 struct inode *inode;
1384 VM_BUG_ON_PAGE(PageCompound(page), page);
1385 BUG_ON(!PageLocked(page));
1386 mapping = page->mapping;
1387 index = page->index;
1388 inode = mapping->host;
1389 info = SHMEM_I(inode);
1390 if (info->flags & VM_LOCKED)
1392 if (!total_swap_pages)
1396 * Our capabilities prevent regular writeback or sync from ever calling
1397 * shmem_writepage; but a stacking filesystem might use ->writepage of
1398 * its underlying filesystem, in which case tmpfs should write out to
1399 * swap only in response to memory pressure, and not for the writeback
1402 if (!wbc->for_reclaim) {
1403 WARN_ON_ONCE(1); /* Still happens? Tell us about it! */
1408 * This is somewhat ridiculous, but without plumbing a SWAP_MAP_FALLOC
1409 * value into swapfile.c, the only way we can correctly account for a
1410 * fallocated page arriving here is now to initialize it and write it.
1412 * That's okay for a page already fallocated earlier, but if we have
1413 * not yet completed the fallocation, then (a) we want to keep track
1414 * of this page in case we have to undo it, and (b) it may not be a
1415 * good idea to continue anyway, once we're pushing into swap. So
1416 * reactivate the page, and let shmem_fallocate() quit when too many.
1418 if (!PageUptodate(page)) {
1419 if (inode->i_private) {
1420 struct shmem_falloc *shmem_falloc;
1421 spin_lock(&inode->i_lock);
1422 shmem_falloc = inode->i_private;
1424 !shmem_falloc->waitq &&
1425 index >= shmem_falloc->start &&
1426 index < shmem_falloc->next)
1427 shmem_falloc->nr_unswapped++;
1429 shmem_falloc = NULL;
1430 spin_unlock(&inode->i_lock);
1434 clear_highpage(page);
1435 flush_dcache_page(page);
1436 SetPageUptodate(page);
1439 swap = get_swap_page(page);
1444 * Add inode to shmem_unuse()'s list of swapped-out inodes,
1445 * if it's not already there. Do it now before the page is
1446 * moved to swap cache, when its pagelock no longer protects
1447 * the inode from eviction. But don't unlock the mutex until
1448 * we've incremented swapped, because shmem_unuse_inode() will
1449 * prune a !swapped inode from the swaplist under this mutex.
1451 mutex_lock(&shmem_swaplist_mutex);
1452 if (list_empty(&info->swaplist))
1453 list_add(&info->swaplist, &shmem_swaplist);
1455 if (add_to_swap_cache(page, swap,
1456 __GFP_HIGH | __GFP_NOMEMALLOC | __GFP_NOWARN,
1458 spin_lock_irq(&info->lock);
1459 shmem_recalc_inode(inode);
1461 spin_unlock_irq(&info->lock);
1463 swap_shmem_alloc(swap);
1464 shmem_delete_from_page_cache(page, swp_to_radix_entry(swap));
1466 mutex_unlock(&shmem_swaplist_mutex);
1467 BUG_ON(page_mapped(page));
1468 swap_writepage(page, wbc);
1472 mutex_unlock(&shmem_swaplist_mutex);
1473 put_swap_page(page, swap);
1475 set_page_dirty(page);
1476 if (wbc->for_reclaim)
1477 return AOP_WRITEPAGE_ACTIVATE; /* Return with page locked */
1482 #if defined(CONFIG_NUMA) && defined(CONFIG_TMPFS)
1483 static void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol)
1487 if (!mpol || mpol->mode == MPOL_DEFAULT)
1488 return; /* show nothing */
1490 mpol_to_str(buffer, sizeof(buffer), mpol);
1492 seq_printf(seq, ",mpol=%s", buffer);
1495 static struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
1497 struct mempolicy *mpol = NULL;
1499 spin_lock(&sbinfo->stat_lock); /* prevent replace/use races */
1500 mpol = sbinfo->mpol;
1502 spin_unlock(&sbinfo->stat_lock);
1506 #else /* !CONFIG_NUMA || !CONFIG_TMPFS */
1507 static inline void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol)
1510 static inline struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
1514 #endif /* CONFIG_NUMA && CONFIG_TMPFS */
1516 #define vm_policy vm_private_data
1519 static void shmem_pseudo_vma_init(struct vm_area_struct *vma,
1520 struct shmem_inode_info *info, pgoff_t index)
1522 /* Create a pseudo vma that just contains the policy */
1523 vma_init(vma, NULL);
1524 /* Bias interleave by inode number to distribute better across nodes */
1525 vma->vm_pgoff = index + info->vfs_inode.i_ino;
1526 vma->vm_policy = mpol_shared_policy_lookup(&info->policy, index);
1529 static void shmem_pseudo_vma_destroy(struct vm_area_struct *vma)
1531 /* Drop reference taken by mpol_shared_policy_lookup() */
1532 mpol_cond_put(vma->vm_policy);
1535 static struct page *shmem_swapin(swp_entry_t swap, gfp_t gfp,
1536 struct shmem_inode_info *info, pgoff_t index)
1538 struct vm_area_struct pvma;
1540 struct vm_fault vmf;
1542 shmem_pseudo_vma_init(&pvma, info, index);
1545 page = swap_cluster_readahead(swap, gfp, &vmf);
1546 shmem_pseudo_vma_destroy(&pvma);
1551 #ifdef CONFIG_FINEGRAINED_THP
1552 static struct page *shmem_alloc_hugepage(gfp_t gfp,
1553 struct shmem_inode_info *info, pgoff_t index, int page_nr)
1554 #else /* CONFIG_FINEGRAINED_THP */
1555 static struct page *shmem_alloc_hugepage(gfp_t gfp,
1556 struct shmem_inode_info *info, pgoff_t index)
1557 #endif/* CONFIG_FINEGRAINED_THP */
1559 struct vm_area_struct pvma;
1560 struct address_space *mapping = info->vfs_inode.i_mapping;
1564 #ifdef CONFIG_FINEGRAINED_THP
1565 hindex = round_down(index, page_nr);
1566 if (xa_find(&mapping->i_pages, &hindex, hindex + page_nr - 1,
1569 #else /* CONFIG_FINEGRAINED_THP */
1570 hindex = round_down(index, HPAGE_PMD_NR);
1571 if (xa_find(&mapping->i_pages, &hindex, hindex + HPAGE_PMD_NR - 1,
1574 #endif /* CONFIG_FINEGRAINED_THP */
1576 shmem_pseudo_vma_init(&pvma, info, hindex);
1577 #ifdef CONFIG_FINEGRAINED_THP
1578 page = alloc_pages_vma(gfp | __GFP_COMP | __GFP_NORETRY | __GFP_NOWARN,
1579 page_nr == HPAGE_PMD_NR ? HPAGE_PMD_ORDER : HPAGE_CONT_PTE_ORDER,
1580 &pvma, 0, numa_node_id(), true);
1581 #else /* CONFIG_FINEGRAINED_THP */
1582 page = alloc_pages_vma(gfp | __GFP_COMP | __GFP_NORETRY | __GFP_NOWARN,
1583 HPAGE_PMD_ORDER, &pvma, 0, numa_node_id(), true);
1584 #endif /* CONFIG_FINEGRAINED_THP */
1585 shmem_pseudo_vma_destroy(&pvma);
1587 prep_transhuge_page(page);
1589 count_vm_event(THP_FILE_FALLBACK);
1593 static struct page *shmem_alloc_page(gfp_t gfp,
1594 struct shmem_inode_info *info, pgoff_t index)
1596 struct vm_area_struct pvma;
1599 shmem_pseudo_vma_init(&pvma, info, index);
1600 page = alloc_page_vma(gfp, &pvma, 0);
1601 shmem_pseudo_vma_destroy(&pvma);
1606 #ifdef CONFIG_FINEGRAINED_THP
1607 static struct page *shmem_alloc_and_acct_page(gfp_t gfp,
1608 struct inode *inode,
1609 pgoff_t index, bool huge, int page_nr)
1610 #else /* CONFIG_FINEGRAINED_THP */
1611 static struct page *shmem_alloc_and_acct_page(gfp_t gfp,
1612 struct inode *inode,
1613 pgoff_t index, bool huge)
1614 #endif /* CONFIG_FINEGRAINED_THP */
1616 struct shmem_inode_info *info = SHMEM_I(inode);
1621 if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
1623 #ifdef CONFIG_FINEGRAINED_THP
1624 nr = huge ? page_nr : 1;
1626 nr = huge ? HPAGE_PMD_NR : 1;
1629 if (!shmem_inode_acct_block(inode, nr))
1633 #ifdef CONFIG_FINEGRAINED_THP
1634 page = shmem_alloc_hugepage(gfp, info, index, nr);
1636 page = shmem_alloc_hugepage(gfp, info, index);
1639 page = shmem_alloc_page(gfp, info, index);
1641 __SetPageLocked(page);
1642 __SetPageSwapBacked(page);
1647 shmem_inode_unacct_blocks(inode, nr);
1649 return ERR_PTR(err);
1653 * When a page is moved from swapcache to shmem filecache (either by the
1654 * usual swapin of shmem_getpage_gfp(), or by the less common swapoff of
1655 * shmem_unuse_inode()), it may have been read in earlier from swap, in
1656 * ignorance of the mapping it belongs to. If that mapping has special
1657 * constraints (like the gma500 GEM driver, which requires RAM below 4GB),
1658 * we may need to copy to a suitable page before moving to filecache.
1660 * In a future release, this may well be extended to respect cpuset and
1661 * NUMA mempolicy, and applied also to anonymous pages in do_swap_page();
1662 * but for now it is a simple matter of zone.
1664 static bool shmem_should_replace_page(struct page *page, gfp_t gfp)
1666 return page_zonenum(page) > gfp_zone(gfp);
1669 static int shmem_replace_page(struct page **pagep, gfp_t gfp,
1670 struct shmem_inode_info *info, pgoff_t index)
1672 struct page *oldpage, *newpage;
1673 struct address_space *swap_mapping;
1679 entry.val = page_private(oldpage);
1680 swap_index = swp_offset(entry);
1681 swap_mapping = page_mapping(oldpage);
1684 * We have arrived here because our zones are constrained, so don't
1685 * limit chance of success by further cpuset and node constraints.
1687 gfp &= ~GFP_CONSTRAINT_MASK;
1688 newpage = shmem_alloc_page(gfp, info, index);
1693 copy_highpage(newpage, oldpage);
1694 flush_dcache_page(newpage);
1696 __SetPageLocked(newpage);
1697 __SetPageSwapBacked(newpage);
1698 SetPageUptodate(newpage);
1699 set_page_private(newpage, entry.val);
1700 SetPageSwapCache(newpage);
1703 * Our caller will very soon move newpage out of swapcache, but it's
1704 * a nice clean interface for us to replace oldpage by newpage there.
1706 xa_lock_irq(&swap_mapping->i_pages);
1707 error = shmem_replace_entry(swap_mapping, swap_index, oldpage, newpage);
1709 mem_cgroup_migrate(oldpage, newpage);
1710 __inc_lruvec_page_state(newpage, NR_FILE_PAGES);
1711 __dec_lruvec_page_state(oldpage, NR_FILE_PAGES);
1713 xa_unlock_irq(&swap_mapping->i_pages);
1715 if (unlikely(error)) {
1717 * Is this possible? I think not, now that our callers check
1718 * both PageSwapCache and page_private after getting page lock;
1719 * but be defensive. Reverse old to newpage for clear and free.
1723 lru_cache_add(newpage);
1727 ClearPageSwapCache(oldpage);
1728 set_page_private(oldpage, 0);
1730 unlock_page(oldpage);
1737 * Swap in the page pointed to by *pagep.
1738 * Caller has to make sure that *pagep contains a valid swapped page.
1739 * Returns 0 and the page in pagep if success. On failure, returns the
1740 * error code and NULL in *pagep.
1742 static int shmem_swapin_page(struct inode *inode, pgoff_t index,
1743 struct page **pagep, enum sgp_type sgp,
1744 gfp_t gfp, struct vm_area_struct *vma,
1745 vm_fault_t *fault_type)
1747 struct address_space *mapping = inode->i_mapping;
1748 struct shmem_inode_info *info = SHMEM_I(inode);
1749 struct mm_struct *charge_mm = vma ? vma->vm_mm : current->mm;
1754 VM_BUG_ON(!*pagep || !xa_is_value(*pagep));
1755 swap = radix_to_swp_entry(*pagep);
1758 /* Look it up and read it in.. */
1759 page = lookup_swap_cache(swap, NULL, 0);
1761 /* Or update major stats only when swapin succeeds?? */
1763 *fault_type |= VM_FAULT_MAJOR;
1764 count_vm_event(PGMAJFAULT);
1765 count_memcg_event_mm(charge_mm, PGMAJFAULT);
1767 /* Here we actually start the io */
1768 page = shmem_swapin(swap, gfp, info, index);
1775 /* We have to do this with page locked to prevent races */
1777 if (!PageSwapCache(page) || page_private(page) != swap.val ||
1778 !shmem_confirm_swap(mapping, index, swap)) {
1782 if (!PageUptodate(page)) {
1786 wait_on_page_writeback(page);
1789 * Some architectures may have to restore extra metadata to the
1790 * physical page after reading from swap.
1792 arch_swap_restore(swap, page);
1794 if (shmem_should_replace_page(page, gfp)) {
1795 error = shmem_replace_page(&page, gfp, info, index);
1800 error = shmem_add_to_page_cache(page, mapping, index,
1801 swp_to_radix_entry(swap), gfp,
1806 spin_lock_irq(&info->lock);
1808 shmem_recalc_inode(inode);
1809 spin_unlock_irq(&info->lock);
1811 if (sgp == SGP_WRITE)
1812 mark_page_accessed(page);
1814 delete_from_swap_cache(page);
1815 set_page_dirty(page);
1821 if (!shmem_confirm_swap(mapping, index, swap))
1833 * shmem_getpage_gfp - find page in cache, or get from swap, or allocate
1835 * If we allocate a new one we do not mark it dirty. That's up to the
1836 * vm. If we swap it in we mark it dirty since we also free the swap
1837 * entry since a page cannot live in both the swap and page cache.
1839 * vmf and fault_type are only supplied by shmem_fault:
1840 * otherwise they are NULL.
1842 static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
1843 struct page **pagep, enum sgp_type sgp, gfp_t gfp,
1844 struct vm_area_struct *vma, struct vm_fault *vmf,
1845 vm_fault_t *fault_type)
1847 struct address_space *mapping = inode->i_mapping;
1848 struct shmem_inode_info *info = SHMEM_I(inode);
1849 struct shmem_sb_info *sbinfo;
1850 struct mm_struct *charge_mm;
1852 enum sgp_type sgp_huge = sgp;
1853 pgoff_t hindex = index;
1857 #ifdef CONFIG_FINEGRAINED_THP
1858 int nr_pages = HPAGE_PMD_NR;
1861 if (index > (MAX_LFS_FILESIZE >> PAGE_SHIFT))
1863 if (sgp == SGP_NOHUGE || sgp == SGP_HUGE)
1866 if (sgp <= SGP_CACHE &&
1867 ((loff_t)index << PAGE_SHIFT) >= i_size_read(inode)) {
1871 sbinfo = SHMEM_SB(inode->i_sb);
1872 charge_mm = vma ? vma->vm_mm : current->mm;
1874 page = find_lock_entry(mapping, index);
1875 if (xa_is_value(page)) {
1876 error = shmem_swapin_page(inode, index, &page,
1877 sgp, gfp, vma, fault_type);
1878 if (error == -EEXIST)
1886 hindex = page->index;
1887 if (page && sgp == SGP_WRITE)
1888 mark_page_accessed(page);
1890 #ifdef CONFIG_FINEGRAINED_THP
1892 nr_pages = thp_nr_pages(page);
1895 /* fallocated page? */
1896 if (page && !PageUptodate(page)) {
1897 if (sgp != SGP_READ)
1904 if (page || sgp == SGP_READ)
1908 * Fast cache lookup did not find it:
1909 * bring it back from swap or allocate.
1912 if (vma && userfaultfd_missing(vma)) {
1913 *fault_type = handle_userfault(vmf, VM_UFFD_MISSING);
1917 /* shmem_symlink() */
1918 if (mapping->a_ops != &shmem_aops)
1920 if (shmem_huge == SHMEM_HUGE_DENY || sgp_huge == SGP_NOHUGE)
1922 if (shmem_huge == SHMEM_HUGE_FORCE)
1924 switch (sbinfo->huge) {
1925 case SHMEM_HUGE_NEVER:
1927 case SHMEM_HUGE_WITHIN_SIZE: {
1930 #ifdef CONFIG_FINEGRAINED_THP
1931 off = round_up(index, nr_pages);
1933 off = round_up(index, HPAGE_PMD_NR);
1935 i_size = round_up(i_size_read(inode), PAGE_SIZE);
1936 #ifdef CONFIG_FINEGRAINED_THP
1937 if (i_size >= nr_pages * PAGE_SIZE &&
1938 i_size >> PAGE_SHIFT >= off)
1941 if (i_size >= HPAGE_PMD_SIZE &&
1942 i_size >> PAGE_SHIFT >= off)
1948 case SHMEM_HUGE_ADVISE:
1949 if (sgp_huge == SGP_HUGE)
1951 /* TODO: implement fadvise() hints */
1956 #ifdef CONFIG_FINEGRAINED_THP
1957 page = shmem_alloc_and_acct_page(gfp, inode, index, true, nr_pages);
1959 page = shmem_alloc_and_acct_page(gfp, inode, index, true);
1963 #ifdef CONFIG_FINEGRAINED_THP
1964 page = shmem_alloc_and_acct_page(gfp, inode,
1967 page = shmem_alloc_and_acct_page(gfp, inode,
1974 error = PTR_ERR(page);
1976 if (error != -ENOSPC)
1979 * Try to reclaim some space by splitting a huge page
1980 * beyond i_size on the filesystem.
1985 ret = shmem_unused_huge_shrink(sbinfo, NULL, 1);
1986 if (ret == SHRINK_STOP)
1994 if (PageTransHuge(page))
1995 #ifdef CONFIG_FINEGRAINED_THP
1996 hindex = round_down(index, nr_pages);
1998 hindex = round_down(index, HPAGE_PMD_NR);
2003 if (sgp == SGP_WRITE)
2004 __SetPageReferenced(page);
2006 error = shmem_add_to_page_cache(page, mapping, hindex,
2007 NULL, gfp & GFP_RECLAIM_MASK,
2011 lru_cache_add(page);
2013 spin_lock_irq(&info->lock);
2014 info->alloced += compound_nr(page);
2015 inode->i_blocks += BLOCKS_PER_PAGE << compound_order(page);
2016 shmem_recalc_inode(inode);
2017 spin_unlock_irq(&info->lock);
2020 #ifdef CONFIG_FINEGRAINED_THP
2021 if (PageTransHuge(page) &&
2022 DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE) <
2023 hindex + nr_pages - 1) {
2025 * Part of the huge page is beyond i_size: subject
2026 * to shrink under memory pressure.
2028 spin_lock(&sbinfo->shrinklist_lock);
2030 * _careful to defend against unlocked access to
2031 * ->shrink_list in shmem_unused_huge_shrink()
2033 if (list_empty_careful(&info->shrinklist)) {
2034 list_add_tail(&info->shrinklist,
2035 &sbinfo->shrinklist);
2036 sbinfo->shrinklist_len++;
2038 spin_unlock(&sbinfo->shrinklist_lock);
2040 #else /* CONFIG_FINEGRAINED_THP */
2041 if (PageTransHuge(page) &&
2042 DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE) <
2043 hindex + HPAGE_PMD_NR - 1) {
2045 * Part of the huge page is beyond i_size: subject
2046 * to shrink under memory pressure.
2048 spin_lock(&sbinfo->shrinklist_lock);
2050 * _careful to defend against unlocked access to
2051 * ->shrink_list in shmem_unused_huge_shrink()
2053 if (list_empty_careful(&info->shrinklist)) {
2054 list_add_tail(&info->shrinklist,
2055 &sbinfo->shrinklist);
2056 sbinfo->shrinklist_len++;
2058 spin_unlock(&sbinfo->shrinklist_lock);
2060 #endif /* CONFIG_FINEGRAINED_THP */
2062 * Let SGP_FALLOC use the SGP_WRITE optimization on a new page.
2064 if (sgp == SGP_FALLOC)
2068 * Let SGP_WRITE caller clear ends if write does not fill page;
2069 * but SGP_FALLOC on a page fallocated earlier must initialize
2070 * it now, lest undo on failure cancel our earlier guarantee.
2072 if (sgp != SGP_WRITE && !PageUptodate(page)) {
2075 for (i = 0; i < compound_nr(page); i++) {
2076 clear_highpage(page + i);
2077 flush_dcache_page(page + i);
2079 SetPageUptodate(page);
2082 /* Perhaps the file has been truncated since we checked */
2083 if (sgp <= SGP_CACHE &&
2084 ((loff_t)index << PAGE_SHIFT) >= i_size_read(inode)) {
2086 ClearPageDirty(page);
2087 delete_from_page_cache(page);
2088 spin_lock_irq(&info->lock);
2089 shmem_recalc_inode(inode);
2090 spin_unlock_irq(&info->lock);
2096 *pagep = page + index - hindex;
2103 shmem_inode_unacct_blocks(inode, compound_nr(page));
2105 if (PageTransHuge(page)) {
2115 if (error == -ENOSPC && !once++) {
2116 spin_lock_irq(&info->lock);
2117 shmem_recalc_inode(inode);
2118 spin_unlock_irq(&info->lock);
2121 if (error == -EEXIST)
2127 * This is like autoremove_wake_function, but it removes the wait queue
2128 * entry unconditionally - even if something else had already woken the
2131 static int synchronous_wake_function(wait_queue_entry_t *wait, unsigned mode, int sync, void *key)
2133 int ret = default_wake_function(wait, mode, sync, key);
2134 list_del_init(&wait->entry);
2138 static vm_fault_t shmem_fault(struct vm_fault *vmf)
2140 struct vm_area_struct *vma = vmf->vma;
2141 struct inode *inode = file_inode(vma->vm_file);
2142 gfp_t gfp = mapping_gfp_mask(inode->i_mapping);
2145 vm_fault_t ret = VM_FAULT_LOCKED;
2148 * Trinity finds that probing a hole which tmpfs is punching can
2149 * prevent the hole-punch from ever completing: which in turn
2150 * locks writers out with its hold on i_mutex. So refrain from
2151 * faulting pages into the hole while it's being punched. Although
2152 * shmem_undo_range() does remove the additions, it may be unable to
2153 * keep up, as each new page needs its own unmap_mapping_range() call,
2154 * and the i_mmap tree grows ever slower to scan if new vmas are added.
2156 * It does not matter if we sometimes reach this check just before the
2157 * hole-punch begins, so that one fault then races with the punch:
2158 * we just need to make racing faults a rare case.
2160 * The implementation below would be much simpler if we just used a
2161 * standard mutex or completion: but we cannot take i_mutex in fault,
2162 * and bloating every shmem inode for this unlikely case would be sad.
2164 if (unlikely(inode->i_private)) {
2165 struct shmem_falloc *shmem_falloc;
2167 spin_lock(&inode->i_lock);
2168 shmem_falloc = inode->i_private;
2170 shmem_falloc->waitq &&
2171 vmf->pgoff >= shmem_falloc->start &&
2172 vmf->pgoff < shmem_falloc->next) {
2174 wait_queue_head_t *shmem_falloc_waitq;
2175 DEFINE_WAIT_FUNC(shmem_fault_wait, synchronous_wake_function);
2177 ret = VM_FAULT_NOPAGE;
2178 fpin = maybe_unlock_mmap_for_io(vmf, NULL);
2180 ret = VM_FAULT_RETRY;
2182 shmem_falloc_waitq = shmem_falloc->waitq;
2183 prepare_to_wait(shmem_falloc_waitq, &shmem_fault_wait,
2184 TASK_UNINTERRUPTIBLE);
2185 spin_unlock(&inode->i_lock);
2189 * shmem_falloc_waitq points into the shmem_fallocate()
2190 * stack of the hole-punching task: shmem_falloc_waitq
2191 * is usually invalid by the time we reach here, but
2192 * finish_wait() does not dereference it in that case;
2193 * though i_lock needed lest racing with wake_up_all().
2195 spin_lock(&inode->i_lock);
2196 finish_wait(shmem_falloc_waitq, &shmem_fault_wait);
2197 spin_unlock(&inode->i_lock);
2203 spin_unlock(&inode->i_lock);
2208 if ((vma->vm_flags & VM_NOHUGEPAGE) ||
2209 test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
2211 else if (vma->vm_flags & VM_HUGEPAGE)
2214 err = shmem_getpage_gfp(inode, vmf->pgoff, &vmf->page, sgp,
2215 gfp, vma, vmf, &ret);
2217 return vmf_error(err);
2221 unsigned long shmem_get_unmapped_area(struct file *file,
2222 unsigned long uaddr, unsigned long len,
2223 unsigned long pgoff, unsigned long flags)
2225 unsigned long (*get_area)(struct file *,
2226 unsigned long, unsigned long, unsigned long, unsigned long);
2228 unsigned long offset;
2229 unsigned long inflated_len;
2230 unsigned long inflated_addr;
2231 unsigned long inflated_offset;
2233 if (len > TASK_SIZE)
2236 get_area = current->mm->get_unmapped_area;
2237 addr = get_area(file, uaddr, len, pgoff, flags);
2239 if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
2241 if (IS_ERR_VALUE(addr))
2243 if (addr & ~PAGE_MASK)
2245 if (addr > TASK_SIZE - len)
2248 if (shmem_huge == SHMEM_HUGE_DENY)
2250 if (len < HPAGE_PMD_SIZE)
2252 if (flags & MAP_FIXED)
2255 * Our priority is to support MAP_SHARED mapped hugely;
2256 * and support MAP_PRIVATE mapped hugely too, until it is COWed.
2257 * But if caller specified an address hint and we allocated area there
2258 * successfully, respect that as before.
2263 if (shmem_huge != SHMEM_HUGE_FORCE) {
2264 struct super_block *sb;
2267 VM_BUG_ON(file->f_op != &shmem_file_operations);
2268 sb = file_inode(file)->i_sb;
2271 * Called directly from mm/mmap.c, or drivers/char/mem.c
2272 * for "/dev/zero", to create a shared anonymous object.
2274 if (IS_ERR(shm_mnt))
2276 sb = shm_mnt->mnt_sb;
2278 if (SHMEM_SB(sb)->huge == SHMEM_HUGE_NEVER)
2282 offset = (pgoff << PAGE_SHIFT) & (HPAGE_PMD_SIZE-1);
2283 if (offset && offset + len < 2 * HPAGE_PMD_SIZE)
2285 if ((addr & (HPAGE_PMD_SIZE-1)) == offset)
2288 inflated_len = len + HPAGE_PMD_SIZE - PAGE_SIZE;
2289 if (inflated_len > TASK_SIZE)
2291 if (inflated_len < len)
2294 inflated_addr = get_area(NULL, uaddr, inflated_len, 0, flags);
2295 if (IS_ERR_VALUE(inflated_addr))
2297 if (inflated_addr & ~PAGE_MASK)
2300 inflated_offset = inflated_addr & (HPAGE_PMD_SIZE-1);
2301 inflated_addr += offset - inflated_offset;
2302 if (inflated_offset > offset)
2303 inflated_addr += HPAGE_PMD_SIZE;
2305 if (inflated_addr > TASK_SIZE - len)
2307 return inflated_addr;
2311 static int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *mpol)
2313 struct inode *inode = file_inode(vma->vm_file);
2314 return mpol_set_shared_policy(&SHMEM_I(inode)->policy, vma, mpol);
2317 static struct mempolicy *shmem_get_policy(struct vm_area_struct *vma,
2320 struct inode *inode = file_inode(vma->vm_file);
2323 index = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
2324 return mpol_shared_policy_lookup(&SHMEM_I(inode)->policy, index);
2328 int shmem_lock(struct file *file, int lock, struct user_struct *user)
2330 struct inode *inode = file_inode(file);
2331 struct shmem_inode_info *info = SHMEM_I(inode);
2332 int retval = -ENOMEM;
2335 * What serializes the accesses to info->flags?
2336 * ipc_lock_object() when called from shmctl_do_lock(),
2337 * no serialization needed when called from shm_destroy().
2339 if (lock && !(info->flags & VM_LOCKED)) {
2340 if (!user_shm_lock(inode->i_size, user))
2342 info->flags |= VM_LOCKED;
2343 mapping_set_unevictable(file->f_mapping);
2345 if (!lock && (info->flags & VM_LOCKED) && user) {
2346 user_shm_unlock(inode->i_size, user);
2347 info->flags &= ~VM_LOCKED;
2348 mapping_clear_unevictable(file->f_mapping);
2356 static int shmem_mmap(struct file *file, struct vm_area_struct *vma)
2358 struct shmem_inode_info *info = SHMEM_I(file_inode(file));
2360 if (info->seals & F_SEAL_FUTURE_WRITE) {
2362 * New PROT_WRITE and MAP_SHARED mmaps are not allowed when
2363 * "future write" seal active.
2365 if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_WRITE))
2369 * Since an F_SEAL_FUTURE_WRITE sealed memfd can be mapped as
2370 * MAP_SHARED and read-only, take care to not allow mprotect to
2371 * revert protections on such mappings. Do this only for shared
2372 * mappings. For private mappings, don't need to mask
2373 * VM_MAYWRITE as we still want them to be COW-writable.
2375 if (vma->vm_flags & VM_SHARED)
2376 vma->vm_flags &= ~(VM_MAYWRITE);
2379 /* arm64 - allow memory tagging on RAM-based files */
2380 vma->vm_flags |= VM_MTE_ALLOWED;
2382 file_accessed(file);
2383 vma->vm_ops = &shmem_vm_ops;
2384 if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
2385 ((vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK) <
2386 (vma->vm_end & HPAGE_PMD_MASK)) {
2387 khugepaged_enter(vma, vma->vm_flags);
2392 static struct inode *shmem_get_inode(struct super_block *sb, const struct inode *dir,
2393 umode_t mode, dev_t dev, unsigned long flags)
2395 struct inode *inode;
2396 struct shmem_inode_info *info;
2397 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
2400 if (shmem_reserve_inode(sb, &ino))
2403 inode = new_inode(sb);
2406 inode_init_owner(inode, dir, mode);
2407 inode->i_blocks = 0;
2408 inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
2409 inode->i_generation = prandom_u32();
2410 info = SHMEM_I(inode);
2411 memset(info, 0, (char *)inode - (char *)info);
2412 spin_lock_init(&info->lock);
2413 atomic_set(&info->stop_eviction, 0);
2414 info->seals = F_SEAL_SEAL;
2415 info->flags = flags & VM_NORESERVE;
2416 INIT_LIST_HEAD(&info->shrinklist);
2417 INIT_LIST_HEAD(&info->swaplist);
2418 simple_xattrs_init(&info->xattrs);
2419 cache_no_acl(inode);
2421 switch (mode & S_IFMT) {
2423 inode->i_op = &shmem_special_inode_operations;
2424 init_special_inode(inode, mode, dev);
2427 inode->i_mapping->a_ops = &shmem_aops;
2428 inode->i_op = &shmem_inode_operations;
2429 inode->i_fop = &shmem_file_operations;
2430 mpol_shared_policy_init(&info->policy,
2431 shmem_get_sbmpol(sbinfo));
2435 /* Some things misbehave if size == 0 on a directory */
2436 inode->i_size = 2 * BOGO_DIRENT_SIZE;
2437 inode->i_op = &shmem_dir_inode_operations;
2438 inode->i_fop = &simple_dir_operations;
2442 * Must not load anything in the rbtree,
2443 * mpol_free_shared_policy will not be called.
2445 mpol_shared_policy_init(&info->policy, NULL);
2449 lockdep_annotate_inode_mutex_key(inode);
2451 shmem_free_inode(sb);
2455 bool shmem_mapping(struct address_space *mapping)
2457 return mapping->a_ops == &shmem_aops;
2460 static int shmem_mfill_atomic_pte(struct mm_struct *dst_mm,
2462 struct vm_area_struct *dst_vma,
2463 unsigned long dst_addr,
2464 unsigned long src_addr,
2466 struct page **pagep)
2468 struct inode *inode = file_inode(dst_vma->vm_file);
2469 struct shmem_inode_info *info = SHMEM_I(inode);
2470 struct address_space *mapping = inode->i_mapping;
2471 gfp_t gfp = mapping_gfp_mask(mapping);
2472 pgoff_t pgoff = linear_page_index(dst_vma, dst_addr);
2476 pte_t _dst_pte, *dst_pte;
2478 pgoff_t offset, max_off;
2481 if (!shmem_inode_acct_block(inode, 1))
2485 page = shmem_alloc_page(gfp, info, pgoff);
2487 goto out_unacct_blocks;
2489 if (!zeropage) { /* mcopy_atomic */
2490 page_kaddr = kmap_atomic(page);
2491 ret = copy_from_user(page_kaddr,
2492 (const void __user *)src_addr,
2494 kunmap_atomic(page_kaddr);
2496 /* fallback to copy_from_user outside mmap_lock */
2497 if (unlikely(ret)) {
2499 shmem_inode_unacct_blocks(inode, 1);
2500 /* don't free the page */
2503 } else { /* mfill_zeropage_atomic */
2504 clear_highpage(page);
2511 VM_BUG_ON(PageLocked(page) || PageSwapBacked(page));
2512 __SetPageLocked(page);
2513 __SetPageSwapBacked(page);
2514 __SetPageUptodate(page);
2517 offset = linear_page_index(dst_vma, dst_addr);
2518 max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
2519 if (unlikely(offset >= max_off))
2522 ret = shmem_add_to_page_cache(page, mapping, pgoff, NULL,
2523 gfp & GFP_RECLAIM_MASK, dst_mm);
2527 _dst_pte = mk_pte(page, dst_vma->vm_page_prot);
2528 if (dst_vma->vm_flags & VM_WRITE)
2529 _dst_pte = pte_mkwrite(pte_mkdirty(_dst_pte));
2532 * We don't set the pte dirty if the vma has no
2533 * VM_WRITE permission, so mark the page dirty or it
2534 * could be freed from under us. We could do it
2535 * unconditionally before unlock_page(), but doing it
2536 * only if VM_WRITE is not set is faster.
2538 set_page_dirty(page);
2541 dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl);
2544 max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
2545 if (unlikely(offset >= max_off))
2546 goto out_release_unlock;
2549 if (!pte_none(*dst_pte))
2550 goto out_release_unlock;
2552 lru_cache_add(page);
2554 spin_lock_irq(&info->lock);
2556 inode->i_blocks += BLOCKS_PER_PAGE;
2557 shmem_recalc_inode(inode);
2558 spin_unlock_irq(&info->lock);
2560 inc_mm_counter(dst_mm, mm_counter_file(page));
2561 page_add_file_rmap(page, false);
2562 set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte);
2564 /* No need to invalidate - it was non-present before */
2565 update_mmu_cache(dst_vma, dst_addr, dst_pte);
2566 pte_unmap_unlock(dst_pte, ptl);
2572 pte_unmap_unlock(dst_pte, ptl);
2573 ClearPageDirty(page);
2574 delete_from_page_cache(page);
2579 shmem_inode_unacct_blocks(inode, 1);
2583 int shmem_mcopy_atomic_pte(struct mm_struct *dst_mm,
2585 struct vm_area_struct *dst_vma,
2586 unsigned long dst_addr,
2587 unsigned long src_addr,
2588 struct page **pagep)
2590 return shmem_mfill_atomic_pte(dst_mm, dst_pmd, dst_vma,
2591 dst_addr, src_addr, false, pagep);
2594 int shmem_mfill_zeropage_pte(struct mm_struct *dst_mm,
2596 struct vm_area_struct *dst_vma,
2597 unsigned long dst_addr)
2599 struct page *page = NULL;
2601 return shmem_mfill_atomic_pte(dst_mm, dst_pmd, dst_vma,
2602 dst_addr, 0, true, &page);
2606 static const struct inode_operations shmem_symlink_inode_operations;
2607 static const struct inode_operations shmem_short_symlink_operations;
2609 #ifdef CONFIG_TMPFS_XATTR
2610 static int shmem_initxattrs(struct inode *, const struct xattr *, void *);
2612 #define shmem_initxattrs NULL
2616 shmem_write_begin(struct file *file, struct address_space *mapping,
2617 loff_t pos, unsigned len, unsigned flags,
2618 struct page **pagep, void **fsdata)
2620 struct inode *inode = mapping->host;
2621 struct shmem_inode_info *info = SHMEM_I(inode);
2622 pgoff_t index = pos >> PAGE_SHIFT;
2624 /* i_mutex is held by caller */
2625 if (unlikely(info->seals & (F_SEAL_GROW |
2626 F_SEAL_WRITE | F_SEAL_FUTURE_WRITE))) {
2627 if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE))
2629 if ((info->seals & F_SEAL_GROW) && pos + len > inode->i_size)
2633 return shmem_getpage(inode, index, pagep, SGP_WRITE);
2637 shmem_write_end(struct file *file, struct address_space *mapping,
2638 loff_t pos, unsigned len, unsigned copied,
2639 struct page *page, void *fsdata)
2641 struct inode *inode = mapping->host;
2643 if (pos + copied > inode->i_size)
2644 i_size_write(inode, pos + copied);
2646 if (!PageUptodate(page)) {
2647 struct page *head = compound_head(page);
2648 if (PageTransCompound(page)) {
2650 #ifdef CONFIG_FINEGRAINED_THP
2651 for (i = 0; i < thp_nr_pages(page); i++) {
2652 if (head + i == page)
2654 clear_highpage(head + i);
2655 flush_dcache_page(head + i);
2657 #else /* CONFIG_FINEGRAINED_THP */
2658 for (i = 0; i < HPAGE_PMD_NR; i++) {
2659 if (head + i == page)
2661 clear_highpage(head + i);
2662 flush_dcache_page(head + i);
2664 #endif /* CONFIG_FINEGRAINED_THP */
2666 if (copied < PAGE_SIZE) {
2667 unsigned from = pos & (PAGE_SIZE - 1);
2668 zero_user_segments(page, 0, from,
2669 from + copied, PAGE_SIZE);
2671 SetPageUptodate(head);
2673 set_page_dirty(page);
2680 static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
2682 struct file *file = iocb->ki_filp;
2683 struct inode *inode = file_inode(file);
2684 struct address_space *mapping = inode->i_mapping;
2686 unsigned long offset;
2687 enum sgp_type sgp = SGP_READ;
2690 loff_t *ppos = &iocb->ki_pos;
2693 * Might this read be for a stacking filesystem? Then when reading
2694 * holes of a sparse file, we actually need to allocate those pages,
2695 * and even mark them dirty, so it cannot exceed the max_blocks limit.
2697 if (!iter_is_iovec(to))
2700 index = *ppos >> PAGE_SHIFT;
2701 offset = *ppos & ~PAGE_MASK;
2704 struct page *page = NULL;
2706 unsigned long nr, ret;
2707 loff_t i_size = i_size_read(inode);
2709 end_index = i_size >> PAGE_SHIFT;
2710 if (index > end_index)
2712 if (index == end_index) {
2713 nr = i_size & ~PAGE_MASK;
2718 error = shmem_getpage(inode, index, &page, sgp);
2720 if (error == -EINVAL)
2725 if (sgp == SGP_CACHE)
2726 set_page_dirty(page);
2731 * We must evaluate after, since reads (unlike writes)
2732 * are called without i_mutex protection against truncate
2735 i_size = i_size_read(inode);
2736 end_index = i_size >> PAGE_SHIFT;
2737 if (index == end_index) {
2738 nr = i_size & ~PAGE_MASK;
2749 * If users can be writing to this page using arbitrary
2750 * virtual addresses, take care about potential aliasing
2751 * before reading the page on the kernel side.
2753 if (mapping_writably_mapped(mapping))
2754 flush_dcache_page(page);
2756 * Mark the page accessed if we read the beginning.
2759 mark_page_accessed(page);
2761 page = ZERO_PAGE(0);
2766 * Ok, we have the page, and it's up-to-date, so
2767 * now we can copy it to user space...
2769 ret = copy_page_to_iter(page, offset, nr, to);
2772 index += offset >> PAGE_SHIFT;
2773 offset &= ~PAGE_MASK;
2776 if (!iov_iter_count(to))
2785 *ppos = ((loff_t) index << PAGE_SHIFT) + offset;
2786 file_accessed(file);
2787 return retval ? retval : error;
2791 * llseek SEEK_DATA or SEEK_HOLE through the page cache.
2793 static pgoff_t shmem_seek_hole_data(struct address_space *mapping,
2794 pgoff_t index, pgoff_t end, int whence)
2797 struct pagevec pvec;
2798 pgoff_t indices[PAGEVEC_SIZE];
2802 pagevec_init(&pvec);
2803 pvec.nr = 1; /* start small: we may be there already */
2805 pvec.nr = find_get_entries(mapping, index,
2806 pvec.nr, pvec.pages, indices);
2808 if (whence == SEEK_DATA)
2812 for (i = 0; i < pvec.nr; i++, index++) {
2813 if (index < indices[i]) {
2814 if (whence == SEEK_HOLE) {
2820 page = pvec.pages[i];
2821 if (page && !xa_is_value(page)) {
2822 if (!PageUptodate(page))
2826 (page && whence == SEEK_DATA) ||
2827 (!page && whence == SEEK_HOLE)) {
2832 pagevec_remove_exceptionals(&pvec);
2833 pagevec_release(&pvec);
2834 pvec.nr = PAGEVEC_SIZE;
2840 static loff_t shmem_file_llseek(struct file *file, loff_t offset, int whence)
2842 struct address_space *mapping = file->f_mapping;
2843 struct inode *inode = mapping->host;
2847 if (whence != SEEK_DATA && whence != SEEK_HOLE)
2848 return generic_file_llseek_size(file, offset, whence,
2849 MAX_LFS_FILESIZE, i_size_read(inode));
2851 /* We're holding i_mutex so we can access i_size directly */
2853 if (offset < 0 || offset >= inode->i_size)
2856 start = offset >> PAGE_SHIFT;
2857 end = (inode->i_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
2858 new_offset = shmem_seek_hole_data(mapping, start, end, whence);
2859 new_offset <<= PAGE_SHIFT;
2860 if (new_offset > offset) {
2861 if (new_offset < inode->i_size)
2862 offset = new_offset;
2863 else if (whence == SEEK_DATA)
2866 offset = inode->i_size;
2871 offset = vfs_setpos(file, offset, MAX_LFS_FILESIZE);
2872 inode_unlock(inode);
2876 static long shmem_fallocate(struct file *file, int mode, loff_t offset,
2879 struct inode *inode = file_inode(file);
2880 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
2881 struct shmem_inode_info *info = SHMEM_I(inode);
2882 struct shmem_falloc shmem_falloc;
2883 pgoff_t start, index, end;
2886 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
2891 if (mode & FALLOC_FL_PUNCH_HOLE) {
2892 struct address_space *mapping = file->f_mapping;
2893 loff_t unmap_start = round_up(offset, PAGE_SIZE);
2894 loff_t unmap_end = round_down(offset + len, PAGE_SIZE) - 1;
2895 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(shmem_falloc_waitq);
2897 /* protected by i_mutex */
2898 if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE)) {
2903 shmem_falloc.waitq = &shmem_falloc_waitq;
2904 shmem_falloc.start = (u64)unmap_start >> PAGE_SHIFT;
2905 shmem_falloc.next = (unmap_end + 1) >> PAGE_SHIFT;
2906 spin_lock(&inode->i_lock);
2907 inode->i_private = &shmem_falloc;
2908 spin_unlock(&inode->i_lock);
2910 if ((u64)unmap_end > (u64)unmap_start)
2911 unmap_mapping_range(mapping, unmap_start,
2912 1 + unmap_end - unmap_start, 0);
2913 shmem_truncate_range(inode, offset, offset + len - 1);
2914 /* No need to unmap again: hole-punching leaves COWed pages */
2916 spin_lock(&inode->i_lock);
2917 inode->i_private = NULL;
2918 wake_up_all(&shmem_falloc_waitq);
2919 WARN_ON_ONCE(!list_empty(&shmem_falloc_waitq.head));
2920 spin_unlock(&inode->i_lock);
2925 /* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */
2926 error = inode_newsize_ok(inode, offset + len);
2930 if ((info->seals & F_SEAL_GROW) && offset + len > inode->i_size) {
2935 start = offset >> PAGE_SHIFT;
2936 end = (offset + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
2937 /* Try to avoid a swapstorm if len is impossible to satisfy */
2938 if (sbinfo->max_blocks && end - start > sbinfo->max_blocks) {
2943 shmem_falloc.waitq = NULL;
2944 shmem_falloc.start = start;
2945 shmem_falloc.next = start;
2946 shmem_falloc.nr_falloced = 0;
2947 shmem_falloc.nr_unswapped = 0;
2948 spin_lock(&inode->i_lock);
2949 inode->i_private = &shmem_falloc;
2950 spin_unlock(&inode->i_lock);
2952 for (index = start; index < end; index++) {
2956 * Good, the fallocate(2) manpage permits EINTR: we may have
2957 * been interrupted because we are using up too much memory.
2959 if (signal_pending(current))
2961 else if (shmem_falloc.nr_unswapped > shmem_falloc.nr_falloced)
2964 error = shmem_getpage(inode, index, &page, SGP_FALLOC);
2966 /* Remove the !PageUptodate pages we added */
2967 if (index > start) {
2968 shmem_undo_range(inode,
2969 (loff_t)start << PAGE_SHIFT,
2970 ((loff_t)index << PAGE_SHIFT) - 1, true);
2976 * Inform shmem_writepage() how far we have reached.
2977 * No need for lock or barrier: we have the page lock.
2979 shmem_falloc.next++;
2980 if (!PageUptodate(page))
2981 shmem_falloc.nr_falloced++;
2984 * If !PageUptodate, leave it that way so that freeable pages
2985 * can be recognized if we need to rollback on error later.
2986 * But set_page_dirty so that memory pressure will swap rather
2987 * than free the pages we are allocating (and SGP_CACHE pages
2988 * might still be clean: we now need to mark those dirty too).
2990 set_page_dirty(page);
2996 if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size)
2997 i_size_write(inode, offset + len);
2998 inode->i_ctime = current_time(inode);
3000 spin_lock(&inode->i_lock);
3001 inode->i_private = NULL;
3002 spin_unlock(&inode->i_lock);
3004 inode_unlock(inode);
3008 static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf)
3010 struct shmem_sb_info *sbinfo = SHMEM_SB(dentry->d_sb);
3012 buf->f_type = TMPFS_MAGIC;
3013 buf->f_bsize = PAGE_SIZE;
3014 buf->f_namelen = NAME_MAX;
3015 if (sbinfo->max_blocks) {
3016 buf->f_blocks = sbinfo->max_blocks;
3018 buf->f_bfree = sbinfo->max_blocks -
3019 percpu_counter_sum(&sbinfo->used_blocks);
3021 if (sbinfo->max_inodes) {
3022 buf->f_files = sbinfo->max_inodes;
3023 buf->f_ffree = sbinfo->free_inodes;
3025 /* else leave those fields 0 like simple_statfs */
3030 * File creation. Allocate an inode, and we're done..
3033 shmem_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
3035 struct inode *inode;
3036 int error = -ENOSPC;
3038 inode = shmem_get_inode(dir->i_sb, dir, mode, dev, VM_NORESERVE);
3040 error = simple_acl_create(dir, inode);
3043 error = security_inode_init_security(inode, dir,
3045 shmem_initxattrs, NULL);
3046 if (error && error != -EOPNOTSUPP)
3050 dir->i_size += BOGO_DIRENT_SIZE;
3051 dir->i_ctime = dir->i_mtime = current_time(dir);
3052 d_instantiate(dentry, inode);
3053 dget(dentry); /* Extra count - pin the dentry in core */
3062 shmem_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode)
3064 struct inode *inode;
3065 int error = -ENOSPC;
3067 inode = shmem_get_inode(dir->i_sb, dir, mode, 0, VM_NORESERVE);
3069 error = security_inode_init_security(inode, dir,
3071 shmem_initxattrs, NULL);
3072 if (error && error != -EOPNOTSUPP)
3074 error = simple_acl_create(dir, inode);
3077 d_tmpfile(dentry, inode);
3085 static int shmem_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
3089 if ((error = shmem_mknod(dir, dentry, mode | S_IFDIR, 0)))
3095 static int shmem_create(struct inode *dir, struct dentry *dentry, umode_t mode,
3098 return shmem_mknod(dir, dentry, mode | S_IFREG, 0);
3104 static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry)
3106 struct inode *inode = d_inode(old_dentry);
3110 * No ordinary (disk based) filesystem counts links as inodes;
3111 * but each new link needs a new dentry, pinning lowmem, and
3112 * tmpfs dentries cannot be pruned until they are unlinked.
3113 * But if an O_TMPFILE file is linked into the tmpfs, the
3114 * first link must skip that, to get the accounting right.
3116 if (inode->i_nlink) {
3117 ret = shmem_reserve_inode(inode->i_sb, NULL);
3122 dir->i_size += BOGO_DIRENT_SIZE;
3123 inode->i_ctime = dir->i_ctime = dir->i_mtime = current_time(inode);
3125 ihold(inode); /* New dentry reference */
3126 dget(dentry); /* Extra pinning count for the created dentry */
3127 d_instantiate(dentry, inode);
3132 static int shmem_unlink(struct inode *dir, struct dentry *dentry)
3134 struct inode *inode = d_inode(dentry);
3136 if (inode->i_nlink > 1 && !S_ISDIR(inode->i_mode))
3137 shmem_free_inode(inode->i_sb);
3139 dir->i_size -= BOGO_DIRENT_SIZE;
3140 inode->i_ctime = dir->i_ctime = dir->i_mtime = current_time(inode);
3142 dput(dentry); /* Undo the count from "create" - this does all the work */
3146 static int shmem_rmdir(struct inode *dir, struct dentry *dentry)
3148 if (!simple_empty(dentry))
3151 drop_nlink(d_inode(dentry));
3153 return shmem_unlink(dir, dentry);
3156 static int shmem_exchange(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry)
3158 bool old_is_dir = d_is_dir(old_dentry);
3159 bool new_is_dir = d_is_dir(new_dentry);
3161 if (old_dir != new_dir && old_is_dir != new_is_dir) {
3163 drop_nlink(old_dir);
3166 drop_nlink(new_dir);
3170 old_dir->i_ctime = old_dir->i_mtime =
3171 new_dir->i_ctime = new_dir->i_mtime =
3172 d_inode(old_dentry)->i_ctime =
3173 d_inode(new_dentry)->i_ctime = current_time(old_dir);
3178 static int shmem_whiteout(struct inode *old_dir, struct dentry *old_dentry)
3180 struct dentry *whiteout;
3183 whiteout = d_alloc(old_dentry->d_parent, &old_dentry->d_name);
3187 error = shmem_mknod(old_dir, whiteout,
3188 S_IFCHR | WHITEOUT_MODE, WHITEOUT_DEV);
3194 * Cheat and hash the whiteout while the old dentry is still in
3195 * place, instead of playing games with FS_RENAME_DOES_D_MOVE.
3197 * d_lookup() will consistently find one of them at this point,
3198 * not sure which one, but that isn't even important.
3205 * The VFS layer already does all the dentry stuff for rename,
3206 * we just have to decrement the usage count for the target if
3207 * it exists so that the VFS layer correctly free's it when it
3210 static int shmem_rename2(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry, unsigned int flags)
3212 struct inode *inode = d_inode(old_dentry);
3213 int they_are_dirs = S_ISDIR(inode->i_mode);
3215 if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE | RENAME_WHITEOUT))
3218 if (flags & RENAME_EXCHANGE)
3219 return shmem_exchange(old_dir, old_dentry, new_dir, new_dentry);
3221 if (!simple_empty(new_dentry))
3224 if (flags & RENAME_WHITEOUT) {
3227 error = shmem_whiteout(old_dir, old_dentry);
3232 if (d_really_is_positive(new_dentry)) {
3233 (void) shmem_unlink(new_dir, new_dentry);
3234 if (they_are_dirs) {
3235 drop_nlink(d_inode(new_dentry));
3236 drop_nlink(old_dir);
3238 } else if (they_are_dirs) {
3239 drop_nlink(old_dir);
3243 old_dir->i_size -= BOGO_DIRENT_SIZE;
3244 new_dir->i_size += BOGO_DIRENT_SIZE;
3245 old_dir->i_ctime = old_dir->i_mtime =
3246 new_dir->i_ctime = new_dir->i_mtime =
3247 inode->i_ctime = current_time(old_dir);
3251 static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *symname)
3255 struct inode *inode;
3258 len = strlen(symname) + 1;
3259 if (len > PAGE_SIZE)
3260 return -ENAMETOOLONG;
3262 inode = shmem_get_inode(dir->i_sb, dir, S_IFLNK | 0777, 0,
3267 error = security_inode_init_security(inode, dir, &dentry->d_name,
3268 shmem_initxattrs, NULL);
3269 if (error && error != -EOPNOTSUPP) {
3274 inode->i_size = len-1;
3275 if (len <= SHORT_SYMLINK_LEN) {
3276 inode->i_link = kmemdup(symname, len, GFP_KERNEL);
3277 if (!inode->i_link) {
3281 inode->i_op = &shmem_short_symlink_operations;
3283 inode_nohighmem(inode);
3284 error = shmem_getpage(inode, 0, &page, SGP_WRITE);
3289 inode->i_mapping->a_ops = &shmem_aops;
3290 inode->i_op = &shmem_symlink_inode_operations;
3291 memcpy(page_address(page), symname, len);
3292 SetPageUptodate(page);
3293 set_page_dirty(page);
3297 dir->i_size += BOGO_DIRENT_SIZE;
3298 dir->i_ctime = dir->i_mtime = current_time(dir);
3299 d_instantiate(dentry, inode);
3304 static void shmem_put_link(void *arg)
3306 mark_page_accessed(arg);
3310 static const char *shmem_get_link(struct dentry *dentry,
3311 struct inode *inode,
3312 struct delayed_call *done)
3314 struct page *page = NULL;
3317 page = find_get_page(inode->i_mapping, 0);
3319 return ERR_PTR(-ECHILD);
3320 if (!PageUptodate(page)) {
3322 return ERR_PTR(-ECHILD);
3325 error = shmem_getpage(inode, 0, &page, SGP_READ);
3327 return ERR_PTR(error);
3330 set_delayed_call(done, shmem_put_link, page);
3331 return page_address(page);
3334 #ifdef CONFIG_TMPFS_XATTR
3336 * Superblocks without xattr inode operations may get some security.* xattr
3337 * support from the LSM "for free". As soon as we have any other xattrs
3338 * like ACLs, we also need to implement the security.* handlers at
3339 * filesystem level, though.
3343 * Callback for security_inode_init_security() for acquiring xattrs.
3345 static int shmem_initxattrs(struct inode *inode,
3346 const struct xattr *xattr_array,
3349 struct shmem_inode_info *info = SHMEM_I(inode);
3350 const struct xattr *xattr;
3351 struct simple_xattr *new_xattr;
3354 for (xattr = xattr_array; xattr->name != NULL; xattr++) {
3355 new_xattr = simple_xattr_alloc(xattr->value, xattr->value_len);
3359 len = strlen(xattr->name) + 1;
3360 new_xattr->name = kmalloc(XATTR_SECURITY_PREFIX_LEN + len,
3362 if (!new_xattr->name) {
3367 memcpy(new_xattr->name, XATTR_SECURITY_PREFIX,
3368 XATTR_SECURITY_PREFIX_LEN);
3369 memcpy(new_xattr->name + XATTR_SECURITY_PREFIX_LEN,
3372 simple_xattr_list_add(&info->xattrs, new_xattr);
3378 static int shmem_xattr_handler_get(const struct xattr_handler *handler,
3379 struct dentry *unused, struct inode *inode,
3380 const char *name, void *buffer, size_t size)
3382 struct shmem_inode_info *info = SHMEM_I(inode);
3384 name = xattr_full_name(handler, name);
3385 return simple_xattr_get(&info->xattrs, name, buffer, size);
3388 static int shmem_xattr_handler_set(const struct xattr_handler *handler,
3389 struct dentry *unused, struct inode *inode,
3390 const char *name, const void *value,
3391 size_t size, int flags)
3393 struct shmem_inode_info *info = SHMEM_I(inode);
3395 name = xattr_full_name(handler, name);
3396 return simple_xattr_set(&info->xattrs, name, value, size, flags, NULL);
3399 static const struct xattr_handler shmem_security_xattr_handler = {
3400 .prefix = XATTR_SECURITY_PREFIX,
3401 .get = shmem_xattr_handler_get,
3402 .set = shmem_xattr_handler_set,
3405 static const struct xattr_handler shmem_trusted_xattr_handler = {
3406 .prefix = XATTR_TRUSTED_PREFIX,
3407 .get = shmem_xattr_handler_get,
3408 .set = shmem_xattr_handler_set,
3411 static const struct xattr_handler *shmem_xattr_handlers[] = {
3412 #ifdef CONFIG_TMPFS_POSIX_ACL
3413 &posix_acl_access_xattr_handler,
3414 &posix_acl_default_xattr_handler,
3416 &shmem_security_xattr_handler,
3417 &shmem_trusted_xattr_handler,
3421 static ssize_t shmem_listxattr(struct dentry *dentry, char *buffer, size_t size)
3423 struct shmem_inode_info *info = SHMEM_I(d_inode(dentry));
3424 return simple_xattr_list(d_inode(dentry), &info->xattrs, buffer, size);
3426 #endif /* CONFIG_TMPFS_XATTR */
3428 static const struct inode_operations shmem_short_symlink_operations = {
3429 .get_link = simple_get_link,
3430 #ifdef CONFIG_TMPFS_XATTR
3431 .listxattr = shmem_listxattr,
3435 static const struct inode_operations shmem_symlink_inode_operations = {
3436 .get_link = shmem_get_link,
3437 #ifdef CONFIG_TMPFS_XATTR
3438 .listxattr = shmem_listxattr,
3442 static struct dentry *shmem_get_parent(struct dentry *child)
3444 return ERR_PTR(-ESTALE);
3447 static int shmem_match(struct inode *ino, void *vfh)
3451 inum = (inum << 32) | fh[1];
3452 return ino->i_ino == inum && fh[0] == ino->i_generation;
3455 /* Find any alias of inode, but prefer a hashed alias */
3456 static struct dentry *shmem_find_alias(struct inode *inode)
3458 struct dentry *alias = d_find_alias(inode);
3460 return alias ?: d_find_any_alias(inode);
3464 static struct dentry *shmem_fh_to_dentry(struct super_block *sb,
3465 struct fid *fid, int fh_len, int fh_type)
3467 struct inode *inode;
3468 struct dentry *dentry = NULL;
3475 inum = (inum << 32) | fid->raw[1];
3477 inode = ilookup5(sb, (unsigned long)(inum + fid->raw[0]),
3478 shmem_match, fid->raw);
3480 dentry = shmem_find_alias(inode);
3487 static int shmem_encode_fh(struct inode *inode, __u32 *fh, int *len,
3488 struct inode *parent)
3492 return FILEID_INVALID;
3495 if (inode_unhashed(inode)) {
3496 /* Unfortunately insert_inode_hash is not idempotent,
3497 * so as we hash inodes here rather than at creation
3498 * time, we need a lock to ensure we only try
3501 static DEFINE_SPINLOCK(lock);
3503 if (inode_unhashed(inode))
3504 __insert_inode_hash(inode,
3505 inode->i_ino + inode->i_generation);
3509 fh[0] = inode->i_generation;
3510 fh[1] = inode->i_ino;
3511 fh[2] = ((__u64)inode->i_ino) >> 32;
3517 static const struct export_operations shmem_export_ops = {
3518 .get_parent = shmem_get_parent,
3519 .encode_fh = shmem_encode_fh,
3520 .fh_to_dentry = shmem_fh_to_dentry,
3536 static const struct constant_table shmem_param_enums_huge[] = {
3537 {"never", SHMEM_HUGE_NEVER },
3538 {"always", SHMEM_HUGE_ALWAYS },
3539 {"within_size", SHMEM_HUGE_WITHIN_SIZE },
3540 {"advise", SHMEM_HUGE_ADVISE },
3544 const struct fs_parameter_spec shmem_fs_parameters[] = {
3545 fsparam_u32 ("gid", Opt_gid),
3546 fsparam_enum ("huge", Opt_huge, shmem_param_enums_huge),
3547 fsparam_u32oct("mode", Opt_mode),
3548 fsparam_string("mpol", Opt_mpol),
3549 fsparam_string("nr_blocks", Opt_nr_blocks),
3550 fsparam_string("nr_inodes", Opt_nr_inodes),
3551 fsparam_string("size", Opt_size),
3552 fsparam_u32 ("uid", Opt_uid),
3553 fsparam_flag ("inode32", Opt_inode32),
3554 fsparam_flag ("inode64", Opt_inode64),
3558 static int shmem_parse_one(struct fs_context *fc, struct fs_parameter *param)
3560 struct shmem_options *ctx = fc->fs_private;
3561 struct fs_parse_result result;
3562 unsigned long long size;
3566 opt = fs_parse(fc, shmem_fs_parameters, param, &result);
3572 size = memparse(param->string, &rest);
3574 size <<= PAGE_SHIFT;
3575 size *= totalram_pages();
3581 ctx->blocks = DIV_ROUND_UP(size, PAGE_SIZE);
3582 ctx->seen |= SHMEM_SEEN_BLOCKS;
3585 ctx->blocks = memparse(param->string, &rest);
3588 ctx->seen |= SHMEM_SEEN_BLOCKS;
3591 ctx->inodes = memparse(param->string, &rest);
3594 ctx->seen |= SHMEM_SEEN_INODES;
3597 ctx->mode = result.uint_32 & 07777;
3600 ctx->uid = make_kuid(current_user_ns(), result.uint_32);
3601 if (!uid_valid(ctx->uid))
3605 ctx->gid = make_kgid(current_user_ns(), result.uint_32);
3606 if (!gid_valid(ctx->gid))
3610 ctx->huge = result.uint_32;
3611 if (ctx->huge != SHMEM_HUGE_NEVER &&
3612 !(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
3613 has_transparent_hugepage()))
3614 goto unsupported_parameter;
3615 ctx->seen |= SHMEM_SEEN_HUGE;
3618 if (IS_ENABLED(CONFIG_NUMA)) {
3619 mpol_put(ctx->mpol);
3621 if (mpol_parse_str(param->string, &ctx->mpol))
3625 goto unsupported_parameter;
3627 ctx->full_inums = false;
3628 ctx->seen |= SHMEM_SEEN_INUMS;
3631 if (sizeof(ino_t) < 8) {
3633 "Cannot use inode64 with <64bit inums in kernel\n");
3635 ctx->full_inums = true;
3636 ctx->seen |= SHMEM_SEEN_INUMS;
3641 unsupported_parameter:
3642 return invalfc(fc, "Unsupported parameter '%s'", param->key);
3644 return invalfc(fc, "Bad value for '%s'", param->key);
3647 static int shmem_parse_options(struct fs_context *fc, void *data)
3649 char *options = data;
3652 int err = security_sb_eat_lsm_opts(options, &fc->security);
3657 while (options != NULL) {
3658 char *this_char = options;
3661 * NUL-terminate this option: unfortunately,
3662 * mount options form a comma-separated list,
3663 * but mpol's nodelist may also contain commas.
3665 options = strchr(options, ',');
3666 if (options == NULL)
3669 if (!isdigit(*options)) {
3675 char *value = strchr(this_char,'=');
3681 len = strlen(value);
3683 err = vfs_parse_fs_string(fc, this_char, value, len);
3692 * Reconfigure a shmem filesystem.
3694 * Note that we disallow change from limited->unlimited blocks/inodes while any
3695 * are in use; but we must separately disallow unlimited->limited, because in
3696 * that case we have no record of how much is already in use.
3698 static int shmem_reconfigure(struct fs_context *fc)
3700 struct shmem_options *ctx = fc->fs_private;
3701 struct shmem_sb_info *sbinfo = SHMEM_SB(fc->root->d_sb);
3702 unsigned long inodes;
3705 spin_lock(&sbinfo->stat_lock);
3706 inodes = sbinfo->max_inodes - sbinfo->free_inodes;
3707 if ((ctx->seen & SHMEM_SEEN_BLOCKS) && ctx->blocks) {
3708 if (!sbinfo->max_blocks) {
3709 err = "Cannot retroactively limit size";
3712 if (percpu_counter_compare(&sbinfo->used_blocks,
3714 err = "Too small a size for current use";
3718 if ((ctx->seen & SHMEM_SEEN_INODES) && ctx->inodes) {
3719 if (!sbinfo->max_inodes) {
3720 err = "Cannot retroactively limit inodes";
3723 if (ctx->inodes < inodes) {
3724 err = "Too few inodes for current use";
3729 if ((ctx->seen & SHMEM_SEEN_INUMS) && !ctx->full_inums &&
3730 sbinfo->next_ino > UINT_MAX) {
3731 err = "Current inum too high to switch to 32-bit inums";
3735 if (ctx->seen & SHMEM_SEEN_HUGE)
3736 sbinfo->huge = ctx->huge;
3737 if (ctx->seen & SHMEM_SEEN_INUMS)
3738 sbinfo->full_inums = ctx->full_inums;
3739 if (ctx->seen & SHMEM_SEEN_BLOCKS)
3740 sbinfo->max_blocks = ctx->blocks;
3741 if (ctx->seen & SHMEM_SEEN_INODES) {
3742 sbinfo->max_inodes = ctx->inodes;
3743 sbinfo->free_inodes = ctx->inodes - inodes;
3747 * Preserve previous mempolicy unless mpol remount option was specified.
3750 mpol_put(sbinfo->mpol);
3751 sbinfo->mpol = ctx->mpol; /* transfers initial ref */
3754 spin_unlock(&sbinfo->stat_lock);
3757 spin_unlock(&sbinfo->stat_lock);
3758 return invalfc(fc, "%s", err);
3761 static int shmem_show_options(struct seq_file *seq, struct dentry *root)
3763 struct shmem_sb_info *sbinfo = SHMEM_SB(root->d_sb);
3765 if (sbinfo->max_blocks != shmem_default_max_blocks())
3766 seq_printf(seq, ",size=%luk",
3767 sbinfo->max_blocks << (PAGE_SHIFT - 10));
3768 if (sbinfo->max_inodes != shmem_default_max_inodes())
3769 seq_printf(seq, ",nr_inodes=%lu", sbinfo->max_inodes);
3770 if (sbinfo->mode != (0777 | S_ISVTX))
3771 seq_printf(seq, ",mode=%03ho", sbinfo->mode);
3772 if (!uid_eq(sbinfo->uid, GLOBAL_ROOT_UID))
3773 seq_printf(seq, ",uid=%u",
3774 from_kuid_munged(&init_user_ns, sbinfo->uid));
3775 if (!gid_eq(sbinfo->gid, GLOBAL_ROOT_GID))
3776 seq_printf(seq, ",gid=%u",
3777 from_kgid_munged(&init_user_ns, sbinfo->gid));
3780 * Showing inode{64,32} might be useful even if it's the system default,
3781 * since then people don't have to resort to checking both here and
3782 * /proc/config.gz to confirm 64-bit inums were successfully applied
3783 * (which may not even exist if IKCONFIG_PROC isn't enabled).
3785 * We hide it when inode64 isn't the default and we are using 32-bit
3786 * inodes, since that probably just means the feature isn't even under
3791 * +-----------------+-----------------+
3792 * | TMPFS_INODE64=y | TMPFS_INODE64=n |
3793 * +------------------+-----------------+-----------------+
3794 * | full_inums=true | show | show |
3795 * | full_inums=false | show | hide |
3796 * +------------------+-----------------+-----------------+
3799 if (IS_ENABLED(CONFIG_TMPFS_INODE64) || sbinfo->full_inums)
3800 seq_printf(seq, ",inode%d", (sbinfo->full_inums ? 64 : 32));
3801 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
3802 /* Rightly or wrongly, show huge mount option unmasked by shmem_huge */
3804 seq_printf(seq, ",huge=%s", shmem_format_huge(sbinfo->huge));
3806 shmem_show_mpol(seq, sbinfo->mpol);
3810 #endif /* CONFIG_TMPFS */
3812 static void shmem_put_super(struct super_block *sb)
3814 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
3816 free_percpu(sbinfo->ino_batch);
3817 percpu_counter_destroy(&sbinfo->used_blocks);
3818 mpol_put(sbinfo->mpol);
3820 sb->s_fs_info = NULL;
3823 static int shmem_fill_super(struct super_block *sb, struct fs_context *fc)
3825 struct shmem_options *ctx = fc->fs_private;
3826 struct inode *inode;
3827 struct shmem_sb_info *sbinfo;
3830 /* Round up to L1_CACHE_BYTES to resist false sharing */
3831 sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
3832 L1_CACHE_BYTES), GFP_KERNEL);
3836 sb->s_fs_info = sbinfo;
3840 * Per default we only allow half of the physical ram per
3841 * tmpfs instance, limiting inodes to one per page of lowmem;
3842 * but the internal instance is left unlimited.
3844 if (!(sb->s_flags & SB_KERNMOUNT)) {
3845 if (!(ctx->seen & SHMEM_SEEN_BLOCKS))
3846 ctx->blocks = shmem_default_max_blocks();
3847 if (!(ctx->seen & SHMEM_SEEN_INODES))
3848 ctx->inodes = shmem_default_max_inodes();
3849 if (!(ctx->seen & SHMEM_SEEN_INUMS))
3850 ctx->full_inums = IS_ENABLED(CONFIG_TMPFS_INODE64);
3852 sb->s_flags |= SB_NOUSER;
3854 sb->s_export_op = &shmem_export_ops;
3855 sb->s_flags |= SB_NOSEC;
3857 sb->s_flags |= SB_NOUSER;
3859 sbinfo->max_blocks = ctx->blocks;
3860 sbinfo->free_inodes = sbinfo->max_inodes = ctx->inodes;
3861 if (sb->s_flags & SB_KERNMOUNT) {
3862 sbinfo->ino_batch = alloc_percpu(ino_t);
3863 if (!sbinfo->ino_batch)
3866 sbinfo->uid = ctx->uid;
3867 sbinfo->gid = ctx->gid;
3868 sbinfo->full_inums = ctx->full_inums;
3869 sbinfo->mode = ctx->mode;
3870 sbinfo->huge = ctx->huge;
3871 sbinfo->mpol = ctx->mpol;
3874 spin_lock_init(&sbinfo->stat_lock);
3875 if (percpu_counter_init(&sbinfo->used_blocks, 0, GFP_KERNEL))
3877 spin_lock_init(&sbinfo->shrinklist_lock);
3878 INIT_LIST_HEAD(&sbinfo->shrinklist);
3880 sb->s_maxbytes = MAX_LFS_FILESIZE;
3881 sb->s_blocksize = PAGE_SIZE;
3882 sb->s_blocksize_bits = PAGE_SHIFT;
3883 sb->s_magic = TMPFS_MAGIC;
3884 sb->s_op = &shmem_ops;
3885 sb->s_time_gran = 1;
3886 #ifdef CONFIG_TMPFS_XATTR
3887 sb->s_xattr = shmem_xattr_handlers;
3889 #ifdef CONFIG_TMPFS_POSIX_ACL
3890 sb->s_flags |= SB_POSIXACL;
3892 uuid_gen(&sb->s_uuid);
3894 inode = shmem_get_inode(sb, NULL, S_IFDIR | sbinfo->mode, 0, VM_NORESERVE);
3897 inode->i_uid = sbinfo->uid;
3898 inode->i_gid = sbinfo->gid;
3899 sb->s_root = d_make_root(inode);
3905 shmem_put_super(sb);
3909 static int shmem_get_tree(struct fs_context *fc)
3911 return get_tree_nodev(fc, shmem_fill_super);
3914 static void shmem_free_fc(struct fs_context *fc)
3916 struct shmem_options *ctx = fc->fs_private;
3919 mpol_put(ctx->mpol);
3924 static const struct fs_context_operations shmem_fs_context_ops = {
3925 .free = shmem_free_fc,
3926 .get_tree = shmem_get_tree,
3928 .parse_monolithic = shmem_parse_options,
3929 .parse_param = shmem_parse_one,
3930 .reconfigure = shmem_reconfigure,
3934 static struct kmem_cache *shmem_inode_cachep;
3936 static struct inode *shmem_alloc_inode(struct super_block *sb)
3938 struct shmem_inode_info *info;
3939 info = kmem_cache_alloc(shmem_inode_cachep, GFP_KERNEL);
3942 return &info->vfs_inode;
3945 static void shmem_free_in_core_inode(struct inode *inode)
3947 if (S_ISLNK(inode->i_mode))
3948 kfree(inode->i_link);
3949 kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode));
3952 static void shmem_destroy_inode(struct inode *inode)
3954 if (S_ISREG(inode->i_mode))
3955 mpol_free_shared_policy(&SHMEM_I(inode)->policy);
3958 static void shmem_init_inode(void *foo)
3960 struct shmem_inode_info *info = foo;
3961 inode_init_once(&info->vfs_inode);
3964 static void shmem_init_inodecache(void)
3966 shmem_inode_cachep = kmem_cache_create("shmem_inode_cache",
3967 sizeof(struct shmem_inode_info),
3968 0, SLAB_PANIC|SLAB_ACCOUNT, shmem_init_inode);
3971 static void shmem_destroy_inodecache(void)
3973 kmem_cache_destroy(shmem_inode_cachep);
3976 static const struct address_space_operations shmem_aops = {
3977 .writepage = shmem_writepage,
3978 .set_page_dirty = __set_page_dirty_no_writeback,
3980 .write_begin = shmem_write_begin,
3981 .write_end = shmem_write_end,
3983 #ifdef CONFIG_MIGRATION
3984 .migratepage = migrate_page,
3986 .error_remove_page = generic_error_remove_page,
3989 static const struct file_operations shmem_file_operations = {
3991 .get_unmapped_area = shmem_get_unmapped_area,
3993 .llseek = shmem_file_llseek,
3994 .read_iter = shmem_file_read_iter,
3995 .write_iter = generic_file_write_iter,
3996 .fsync = noop_fsync,
3997 .splice_read = generic_file_splice_read,
3998 .splice_write = iter_file_splice_write,
3999 .fallocate = shmem_fallocate,
4003 static const struct inode_operations shmem_inode_operations = {
4004 .getattr = shmem_getattr,
4005 .setattr = shmem_setattr,
4006 #ifdef CONFIG_TMPFS_XATTR
4007 .listxattr = shmem_listxattr,
4008 .set_acl = simple_set_acl,
4012 static const struct inode_operations shmem_dir_inode_operations = {
4014 .create = shmem_create,
4015 .lookup = simple_lookup,
4017 .unlink = shmem_unlink,
4018 .symlink = shmem_symlink,
4019 .mkdir = shmem_mkdir,
4020 .rmdir = shmem_rmdir,
4021 .mknod = shmem_mknod,
4022 .rename = shmem_rename2,
4023 .tmpfile = shmem_tmpfile,
4025 #ifdef CONFIG_TMPFS_XATTR
4026 .listxattr = shmem_listxattr,
4028 #ifdef CONFIG_TMPFS_POSIX_ACL
4029 .setattr = shmem_setattr,
4030 .set_acl = simple_set_acl,
4034 static const struct inode_operations shmem_special_inode_operations = {
4035 #ifdef CONFIG_TMPFS_XATTR
4036 .listxattr = shmem_listxattr,
4038 #ifdef CONFIG_TMPFS_POSIX_ACL
4039 .setattr = shmem_setattr,
4040 .set_acl = simple_set_acl,
4044 static const struct super_operations shmem_ops = {
4045 .alloc_inode = shmem_alloc_inode,
4046 .free_inode = shmem_free_in_core_inode,
4047 .destroy_inode = shmem_destroy_inode,
4049 .statfs = shmem_statfs,
4050 .show_options = shmem_show_options,
4052 .evict_inode = shmem_evict_inode,
4053 .drop_inode = generic_delete_inode,
4054 .put_super = shmem_put_super,
4055 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
4056 .nr_cached_objects = shmem_unused_huge_count,
4057 .free_cached_objects = shmem_unused_huge_scan,
4061 static const struct vm_operations_struct shmem_vm_ops = {
4062 .fault = shmem_fault,
4063 .map_pages = filemap_map_pages,
4065 .set_policy = shmem_set_policy,
4066 .get_policy = shmem_get_policy,
4070 int shmem_init_fs_context(struct fs_context *fc)
4072 struct shmem_options *ctx;
4074 ctx = kzalloc(sizeof(struct shmem_options), GFP_KERNEL);
4078 ctx->mode = 0777 | S_ISVTX;
4079 ctx->uid = current_fsuid();
4080 ctx->gid = current_fsgid();
4082 fc->fs_private = ctx;
4083 fc->ops = &shmem_fs_context_ops;
4087 static struct file_system_type shmem_fs_type = {
4088 .owner = THIS_MODULE,
4090 .init_fs_context = shmem_init_fs_context,
4092 .parameters = shmem_fs_parameters,
4094 .kill_sb = kill_litter_super,
4095 .fs_flags = FS_USERNS_MOUNT | FS_THP_SUPPORT,
4098 int __init shmem_init(void)
4102 shmem_init_inodecache();
4104 error = register_filesystem(&shmem_fs_type);
4106 pr_err("Could not register tmpfs\n");
4110 shm_mnt = kern_mount(&shmem_fs_type);
4111 if (IS_ERR(shm_mnt)) {
4112 error = PTR_ERR(shm_mnt);
4113 pr_err("Could not kern_mount tmpfs\n");
4117 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
4118 if (has_transparent_hugepage() && shmem_huge > SHMEM_HUGE_DENY)
4119 SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge;
4121 shmem_huge = 0; /* just in case it was patched */
4126 unregister_filesystem(&shmem_fs_type);
4128 shmem_destroy_inodecache();
4129 shm_mnt = ERR_PTR(error);
4133 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && defined(CONFIG_SYSFS)
4134 static ssize_t shmem_enabled_show(struct kobject *kobj,
4135 struct kobj_attribute *attr, char *buf)
4137 static const int values[] = {
4139 SHMEM_HUGE_WITHIN_SIZE,
4147 for (i = 0, count = 0; i < ARRAY_SIZE(values); i++) {
4148 const char *fmt = shmem_huge == values[i] ? "[%s] " : "%s ";
4150 count += sprintf(buf + count, fmt,
4151 shmem_format_huge(values[i]));
4153 buf[count - 1] = '\n';
4157 static ssize_t shmem_enabled_store(struct kobject *kobj,
4158 struct kobj_attribute *attr, const char *buf, size_t count)
4163 if (count + 1 > sizeof(tmp))
4165 memcpy(tmp, buf, count);
4167 if (count && tmp[count - 1] == '\n')
4168 tmp[count - 1] = '\0';
4170 huge = shmem_parse_huge(tmp);
4171 if (huge == -EINVAL)
4173 if (!has_transparent_hugepage() &&
4174 huge != SHMEM_HUGE_NEVER && huge != SHMEM_HUGE_DENY)
4178 if (shmem_huge > SHMEM_HUGE_DENY)
4179 SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge;
4183 struct kobj_attribute shmem_enabled_attr =
4184 __ATTR(shmem_enabled, 0644, shmem_enabled_show, shmem_enabled_store);
4185 #endif /* CONFIG_TRANSPARENT_HUGEPAGE && CONFIG_SYSFS */
4187 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
4188 bool shmem_huge_enabled(struct vm_area_struct *vma)
4190 struct inode *inode = file_inode(vma->vm_file);
4191 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
4195 if ((vma->vm_flags & VM_NOHUGEPAGE) ||
4196 test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
4198 if (shmem_huge == SHMEM_HUGE_FORCE)
4200 if (shmem_huge == SHMEM_HUGE_DENY)
4202 switch (sbinfo->huge) {
4203 case SHMEM_HUGE_NEVER:
4205 case SHMEM_HUGE_ALWAYS:
4207 case SHMEM_HUGE_WITHIN_SIZE:
4208 off = round_up(vma->vm_pgoff, HPAGE_PMD_NR);
4209 i_size = round_up(i_size_read(inode), PAGE_SIZE);
4210 if (i_size >= HPAGE_PMD_SIZE &&
4211 i_size >> PAGE_SHIFT >= off)
4213 #ifdef CONFIG_FINEGRAINED_THP
4214 off = round_up(vma->vm_pgoff, HPAGE_CONT_PTE_NR);
4215 if (i_size >= HPAGE_CONT_PTE_SIZE &&
4216 i_size >> PAGE_SHIFT >= off)
4218 #endif /* CONFIG_FINEGRAINED_THP */
4220 case SHMEM_HUGE_ADVISE:
4221 /* TODO: implement fadvise() hints */
4222 return (vma->vm_flags & VM_HUGEPAGE);
4228 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
4230 #else /* !CONFIG_SHMEM */
4233 * tiny-shmem: simple shmemfs and tmpfs using ramfs code
4235 * This is intended for small system where the benefits of the full
4236 * shmem code (swap-backed and resource-limited) are outweighed by
4237 * their complexity. On systems without swap this code should be
4238 * effectively equivalent, but much lighter weight.
4241 static struct file_system_type shmem_fs_type = {
4243 .init_fs_context = ramfs_init_fs_context,
4244 .parameters = ramfs_fs_parameters,
4245 .kill_sb = kill_litter_super,
4246 .fs_flags = FS_USERNS_MOUNT,
4249 int __init shmem_init(void)
4251 BUG_ON(register_filesystem(&shmem_fs_type) != 0);
4253 shm_mnt = kern_mount(&shmem_fs_type);
4254 BUG_ON(IS_ERR(shm_mnt));
4259 int shmem_unuse(unsigned int type, bool frontswap,
4260 unsigned long *fs_pages_to_unuse)
4265 int shmem_lock(struct file *file, int lock, struct user_struct *user)
4270 void shmem_unlock_mapping(struct address_space *mapping)
4275 unsigned long shmem_get_unmapped_area(struct file *file,
4276 unsigned long addr, unsigned long len,
4277 unsigned long pgoff, unsigned long flags)
4279 return current->mm->get_unmapped_area(file, addr, len, pgoff, flags);
4283 void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
4285 truncate_inode_pages_range(inode->i_mapping, lstart, lend);
4287 EXPORT_SYMBOL_GPL(shmem_truncate_range);
4289 #define shmem_vm_ops generic_file_vm_ops
4290 #define shmem_file_operations ramfs_file_operations
4291 #define shmem_get_inode(sb, dir, mode, dev, flags) ramfs_get_inode(sb, dir, mode, dev)
4292 #define shmem_acct_size(flags, size) 0
4293 #define shmem_unacct_size(flags, size) do {} while (0)
4295 #endif /* CONFIG_SHMEM */
4299 static struct file *__shmem_file_setup(struct vfsmount *mnt, const char *name, loff_t size,
4300 unsigned long flags, unsigned int i_flags)
4302 struct inode *inode;
4306 return ERR_CAST(mnt);
4308 if (size < 0 || size > MAX_LFS_FILESIZE)
4309 return ERR_PTR(-EINVAL);
4311 if (shmem_acct_size(flags, size))
4312 return ERR_PTR(-ENOMEM);
4314 inode = shmem_get_inode(mnt->mnt_sb, NULL, S_IFREG | S_IRWXUGO, 0,
4316 if (unlikely(!inode)) {
4317 shmem_unacct_size(flags, size);
4318 return ERR_PTR(-ENOSPC);
4320 inode->i_flags |= i_flags;
4321 inode->i_size = size;
4322 clear_nlink(inode); /* It is unlinked */
4323 res = ERR_PTR(ramfs_nommu_expand_for_mapping(inode, size));
4325 res = alloc_file_pseudo(inode, mnt, name, O_RDWR,
4326 &shmem_file_operations);
4333 * shmem_kernel_file_setup - get an unlinked file living in tmpfs which must be
4334 * kernel internal. There will be NO LSM permission checks against the
4335 * underlying inode. So users of this interface must do LSM checks at a
4336 * higher layer. The users are the big_key and shm implementations. LSM
4337 * checks are provided at the key or shm level rather than the inode.
4338 * @name: name for dentry (to be seen in /proc/<pid>/maps
4339 * @size: size to be set for the file
4340 * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
4342 struct file *shmem_kernel_file_setup(const char *name, loff_t size, unsigned long flags)
4344 return __shmem_file_setup(shm_mnt, name, size, flags, S_PRIVATE);
4348 * shmem_file_setup - get an unlinked file living in tmpfs
4349 * @name: name for dentry (to be seen in /proc/<pid>/maps
4350 * @size: size to be set for the file
4351 * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
4353 struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags)
4355 return __shmem_file_setup(shm_mnt, name, size, flags, 0);
4357 EXPORT_SYMBOL_GPL(shmem_file_setup);
4360 * shmem_file_setup_with_mnt - get an unlinked file living in tmpfs
4361 * @mnt: the tmpfs mount where the file will be created
4362 * @name: name for dentry (to be seen in /proc/<pid>/maps
4363 * @size: size to be set for the file
4364 * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
4366 struct file *shmem_file_setup_with_mnt(struct vfsmount *mnt, const char *name,
4367 loff_t size, unsigned long flags)
4369 return __shmem_file_setup(mnt, name, size, flags, 0);
4371 EXPORT_SYMBOL_GPL(shmem_file_setup_with_mnt);
4374 * shmem_zero_setup - setup a shared anonymous mapping
4375 * @vma: the vma to be mmapped is prepared by do_mmap
4377 int shmem_zero_setup(struct vm_area_struct *vma)
4380 loff_t size = vma->vm_end - vma->vm_start;
4383 * Cloning a new file under mmap_lock leads to a lock ordering conflict
4384 * between XFS directory reading and selinux: since this file is only
4385 * accessible to the user through its mapping, use S_PRIVATE flag to
4386 * bypass file security, in the same way as shmem_kernel_file_setup().
4388 file = shmem_kernel_file_setup("dev/zero", size, vma->vm_flags);
4390 return PTR_ERR(file);
4394 vma->vm_file = file;
4395 vma->vm_ops = &shmem_vm_ops;
4397 if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
4398 ((vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK) <
4399 (vma->vm_end & HPAGE_PMD_MASK)) {
4400 khugepaged_enter(vma, vma->vm_flags);
4407 * shmem_read_mapping_page_gfp - read into page cache, using specified page allocation flags.
4408 * @mapping: the page's address_space
4409 * @index: the page index
4410 * @gfp: the page allocator flags to use if allocating
4412 * This behaves as a tmpfs "read_cache_page_gfp(mapping, index, gfp)",
4413 * with any new page allocations done using the specified allocation flags.
4414 * But read_cache_page_gfp() uses the ->readpage() method: which does not
4415 * suit tmpfs, since it may have pages in swapcache, and needs to find those
4416 * for itself; although drivers/gpu/drm i915 and ttm rely upon this support.
4418 * i915_gem_object_get_pages_gtt() mixes __GFP_NORETRY | __GFP_NOWARN in
4419 * with the mapping_gfp_mask(), to avoid OOMing the machine unnecessarily.
4421 struct page *shmem_read_mapping_page_gfp(struct address_space *mapping,
4422 pgoff_t index, gfp_t gfp)
4425 struct inode *inode = mapping->host;
4429 BUG_ON(mapping->a_ops != &shmem_aops);
4430 error = shmem_getpage_gfp(inode, index, &page, SGP_CACHE,
4431 gfp, NULL, NULL, NULL);
4433 page = ERR_PTR(error);
4439 * The tiny !SHMEM case uses ramfs without swap
4441 return read_cache_page_gfp(mapping, index, gfp);
4444 EXPORT_SYMBOL_GPL(shmem_read_mapping_page_gfp);