2 * Resizable virtual memory filesystem for Linux.
4 * Copyright (C) 2000 Linus Torvalds.
6 * 2000-2001 Christoph Rohland
9 * Copyright (C) 2002-2011 Hugh Dickins.
10 * Copyright (C) 2011 Google Inc.
11 * Copyright (C) 2002-2005 VERITAS Software Corporation.
12 * Copyright (C) 2004 Andi Kleen, SuSE Labs
14 * Extended attribute support for tmpfs:
15 * Copyright (c) 2004, Luke Kenneth Casson Leighton <lkcl@lkcl.net>
16 * Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com>
19 * Copyright (c) 2004, 2008 Matt Mackall <mpm@selenic.com>
21 * This file is released under the GPL.
25 #include <linux/init.h>
26 #include <linux/vfs.h>
27 #include <linux/mount.h>
28 #include <linux/ramfs.h>
29 #include <linux/pagemap.h>
30 #include <linux/file.h>
32 #include <linux/random.h>
33 #include <linux/sched/signal.h>
34 #include <linux/export.h>
35 #include <linux/swap.h>
36 #include <linux/uio.h>
37 #include <linux/khugepaged.h>
38 #include <linux/hugetlb.h>
40 #include <asm/tlbflush.h> /* for arch/microblaze update_mmu_cache() */
42 static struct vfsmount *shm_mnt;
46 * This virtual memory filesystem is heavily based on the ramfs. It
47 * extends ramfs by the ability to use swap and honor resource limits
48 * which makes it a completely usable filesystem.
51 #include <linux/xattr.h>
52 #include <linux/exportfs.h>
53 #include <linux/posix_acl.h>
54 #include <linux/posix_acl_xattr.h>
55 #include <linux/mman.h>
56 #include <linux/string.h>
57 #include <linux/slab.h>
58 #include <linux/backing-dev.h>
59 #include <linux/shmem_fs.h>
60 #include <linux/writeback.h>
61 #include <linux/blkdev.h>
62 #include <linux/pagevec.h>
63 #include <linux/percpu_counter.h>
64 #include <linux/falloc.h>
65 #include <linux/splice.h>
66 #include <linux/security.h>
67 #include <linux/swapops.h>
68 #include <linux/mempolicy.h>
69 #include <linux/namei.h>
70 #include <linux/ctype.h>
71 #include <linux/migrate.h>
72 #include <linux/highmem.h>
73 #include <linux/seq_file.h>
74 #include <linux/magic.h>
75 #include <linux/syscalls.h>
76 #include <linux/fcntl.h>
77 #include <uapi/linux/memfd.h>
78 #include <linux/userfaultfd_k.h>
79 #include <linux/rmap.h>
80 #include <linux/uuid.h>
82 #include <linux/uaccess.h>
83 #include <asm/pgtable.h>
87 #define BLOCKS_PER_PAGE (PAGE_SIZE/512)
88 #define VM_ACCT(size) (PAGE_ALIGN(size) >> PAGE_SHIFT)
90 /* Pretend that each entry is of this size in directory's i_size */
91 #define BOGO_DIRENT_SIZE 20
93 /* Symlink up to this size is kmalloc'ed instead of using a swappable page */
94 #define SHORT_SYMLINK_LEN 128
97 * shmem_fallocate communicates with shmem_fault or shmem_writepage via
98 * inode->i_private (with i_mutex making sure that it has only one user at
99 * a time): we would prefer not to enlarge the shmem inode just for that.
101 struct shmem_falloc {
102 wait_queue_head_t *waitq; /* faults into hole wait for punch to end */
103 pgoff_t start; /* start of range currently being fallocated */
104 pgoff_t next; /* the next page offset to be fallocated */
105 pgoff_t nr_falloced; /* how many new pages have been fallocated */
106 pgoff_t nr_unswapped; /* how often writepage refused to swap out */
110 static unsigned long shmem_default_max_blocks(void)
112 return totalram_pages / 2;
115 static unsigned long shmem_default_max_inodes(void)
117 return min(totalram_pages - totalhigh_pages, totalram_pages / 2);
121 static bool shmem_should_replace_page(struct page *page, gfp_t gfp);
122 static int shmem_replace_page(struct page **pagep, gfp_t gfp,
123 struct shmem_inode_info *info, pgoff_t index);
124 static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
125 struct page **pagep, enum sgp_type sgp,
126 gfp_t gfp, struct vm_area_struct *vma,
127 struct vm_fault *vmf, vm_fault_t *fault_type);
129 int shmem_getpage(struct inode *inode, pgoff_t index,
130 struct page **pagep, enum sgp_type sgp)
132 return shmem_getpage_gfp(inode, index, pagep, sgp,
133 mapping_gfp_mask(inode->i_mapping), NULL, NULL, NULL);
136 static inline struct shmem_sb_info *SHMEM_SB(struct super_block *sb)
138 return sb->s_fs_info;
142 * shmem_file_setup pre-accounts the whole fixed size of a VM object,
143 * for shared memory and for shared anonymous (/dev/zero) mappings
144 * (unless MAP_NORESERVE and sysctl_overcommit_memory <= 1),
145 * consistent with the pre-accounting of private mappings ...
147 static inline int shmem_acct_size(unsigned long flags, loff_t size)
149 return (flags & VM_NORESERVE) ?
150 0 : security_vm_enough_memory_mm(current->mm, VM_ACCT(size));
153 static inline void shmem_unacct_size(unsigned long flags, loff_t size)
155 if (!(flags & VM_NORESERVE))
156 vm_unacct_memory(VM_ACCT(size));
159 static inline int shmem_reacct_size(unsigned long flags,
160 loff_t oldsize, loff_t newsize)
162 if (!(flags & VM_NORESERVE)) {
163 if (VM_ACCT(newsize) > VM_ACCT(oldsize))
164 return security_vm_enough_memory_mm(current->mm,
165 VM_ACCT(newsize) - VM_ACCT(oldsize));
166 else if (VM_ACCT(newsize) < VM_ACCT(oldsize))
167 vm_unacct_memory(VM_ACCT(oldsize) - VM_ACCT(newsize));
173 * ... whereas tmpfs objects are accounted incrementally as
174 * pages are allocated, in order to allow large sparse files.
175 * shmem_getpage reports shmem_acct_block failure as -ENOSPC not -ENOMEM,
176 * so that a failure on a sparse tmpfs mapping will give SIGBUS not OOM.
178 static inline int shmem_acct_block(unsigned long flags, long pages)
180 if (!(flags & VM_NORESERVE))
183 return security_vm_enough_memory_mm(current->mm,
184 pages * VM_ACCT(PAGE_SIZE));
187 static inline void shmem_unacct_blocks(unsigned long flags, long pages)
189 if (flags & VM_NORESERVE)
190 vm_unacct_memory(pages * VM_ACCT(PAGE_SIZE));
193 static inline bool shmem_inode_acct_block(struct inode *inode, long pages)
195 struct shmem_inode_info *info = SHMEM_I(inode);
196 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
198 if (shmem_acct_block(info->flags, pages))
201 if (sbinfo->max_blocks) {
202 if (percpu_counter_compare(&sbinfo->used_blocks,
203 sbinfo->max_blocks - pages) > 0)
205 percpu_counter_add(&sbinfo->used_blocks, pages);
211 shmem_unacct_blocks(info->flags, pages);
215 static inline void shmem_inode_unacct_blocks(struct inode *inode, long pages)
217 struct shmem_inode_info *info = SHMEM_I(inode);
218 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
220 if (sbinfo->max_blocks)
221 percpu_counter_sub(&sbinfo->used_blocks, pages);
222 shmem_unacct_blocks(info->flags, pages);
225 static const struct super_operations shmem_ops;
226 static const struct address_space_operations shmem_aops;
227 static const struct file_operations shmem_file_operations;
228 static const struct inode_operations shmem_inode_operations;
229 static const struct inode_operations shmem_dir_inode_operations;
230 static const struct inode_operations shmem_special_inode_operations;
231 static const struct vm_operations_struct shmem_vm_ops;
232 static struct file_system_type shmem_fs_type;
234 bool vma_is_shmem(struct vm_area_struct *vma)
236 return vma->vm_ops == &shmem_vm_ops;
239 static LIST_HEAD(shmem_swaplist);
240 static DEFINE_MUTEX(shmem_swaplist_mutex);
242 static int shmem_reserve_inode(struct super_block *sb)
244 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
245 if (sbinfo->max_inodes) {
246 spin_lock(&sbinfo->stat_lock);
247 if (!sbinfo->free_inodes) {
248 spin_unlock(&sbinfo->stat_lock);
251 sbinfo->free_inodes--;
252 spin_unlock(&sbinfo->stat_lock);
257 static void shmem_free_inode(struct super_block *sb)
259 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
260 if (sbinfo->max_inodes) {
261 spin_lock(&sbinfo->stat_lock);
262 sbinfo->free_inodes++;
263 spin_unlock(&sbinfo->stat_lock);
268 * shmem_recalc_inode - recalculate the block usage of an inode
269 * @inode: inode to recalc
271 * We have to calculate the free blocks since the mm can drop
272 * undirtied hole pages behind our back.
274 * But normally info->alloced == inode->i_mapping->nrpages + info->swapped
275 * So mm freed is info->alloced - (inode->i_mapping->nrpages + info->swapped)
277 * It has to be called with the spinlock held.
279 static void shmem_recalc_inode(struct inode *inode)
281 struct shmem_inode_info *info = SHMEM_I(inode);
284 freed = info->alloced - info->swapped - inode->i_mapping->nrpages;
286 info->alloced -= freed;
287 inode->i_blocks -= freed * BLOCKS_PER_PAGE;
288 shmem_inode_unacct_blocks(inode, freed);
292 bool shmem_charge(struct inode *inode, long pages)
294 struct shmem_inode_info *info = SHMEM_I(inode);
297 if (!shmem_inode_acct_block(inode, pages))
300 /* nrpages adjustment first, then shmem_recalc_inode() when balanced */
301 inode->i_mapping->nrpages += pages;
303 spin_lock_irqsave(&info->lock, flags);
304 info->alloced += pages;
305 inode->i_blocks += pages * BLOCKS_PER_PAGE;
306 shmem_recalc_inode(inode);
307 spin_unlock_irqrestore(&info->lock, flags);
312 void shmem_uncharge(struct inode *inode, long pages)
314 struct shmem_inode_info *info = SHMEM_I(inode);
317 /* nrpages adjustment done by __delete_from_page_cache() or caller */
319 spin_lock_irqsave(&info->lock, flags);
320 info->alloced -= pages;
321 inode->i_blocks -= pages * BLOCKS_PER_PAGE;
322 shmem_recalc_inode(inode);
323 spin_unlock_irqrestore(&info->lock, flags);
325 shmem_inode_unacct_blocks(inode, pages);
329 * Replace item expected in xarray by a new item, while holding xa_lock.
331 static int shmem_replace_entry(struct address_space *mapping,
332 pgoff_t index, void *expected, void *replacement)
334 XA_STATE(xas, &mapping->i_pages, index);
337 VM_BUG_ON(!expected);
338 VM_BUG_ON(!replacement);
339 item = xas_load(&xas);
340 if (item != expected)
342 xas_store(&xas, replacement);
347 * Sometimes, before we decide whether to proceed or to fail, we must check
348 * that an entry was not already brought back from swap by a racing thread.
350 * Checking page is not enough: by the time a SwapCache page is locked, it
351 * might be reused, and again be SwapCache, using the same swap as before.
353 static bool shmem_confirm_swap(struct address_space *mapping,
354 pgoff_t index, swp_entry_t swap)
356 return xa_load(&mapping->i_pages, index) == swp_to_radix_entry(swap);
360 * Definitions for "huge tmpfs": tmpfs mounted with the huge= option
363 * disables huge pages for the mount;
365 * enables huge pages for the mount;
366 * SHMEM_HUGE_WITHIN_SIZE:
367 * only allocate huge pages if the page will be fully within i_size,
368 * also respect fadvise()/madvise() hints;
370 * only allocate huge pages if requested with fadvise()/madvise();
373 #define SHMEM_HUGE_NEVER 0
374 #define SHMEM_HUGE_ALWAYS 1
375 #define SHMEM_HUGE_WITHIN_SIZE 2
376 #define SHMEM_HUGE_ADVISE 3
380 * Only can be set via /sys/kernel/mm/transparent_hugepage/shmem_enabled:
383 * disables huge on shm_mnt and all mounts, for emergency use;
385 * enables huge on shm_mnt and all mounts, w/o needing option, for testing;
388 #define SHMEM_HUGE_DENY (-1)
389 #define SHMEM_HUGE_FORCE (-2)
391 #ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE
392 /* ifdef here to avoid bloating shmem.o when not necessary */
394 static int shmem_huge __read_mostly;
396 #if defined(CONFIG_SYSFS) || defined(CONFIG_TMPFS)
397 static int shmem_parse_huge(const char *str)
399 if (!strcmp(str, "never"))
400 return SHMEM_HUGE_NEVER;
401 if (!strcmp(str, "always"))
402 return SHMEM_HUGE_ALWAYS;
403 if (!strcmp(str, "within_size"))
404 return SHMEM_HUGE_WITHIN_SIZE;
405 if (!strcmp(str, "advise"))
406 return SHMEM_HUGE_ADVISE;
407 if (!strcmp(str, "deny"))
408 return SHMEM_HUGE_DENY;
409 if (!strcmp(str, "force"))
410 return SHMEM_HUGE_FORCE;
414 static const char *shmem_format_huge(int huge)
417 case SHMEM_HUGE_NEVER:
419 case SHMEM_HUGE_ALWAYS:
421 case SHMEM_HUGE_WITHIN_SIZE:
422 return "within_size";
423 case SHMEM_HUGE_ADVISE:
425 case SHMEM_HUGE_DENY:
427 case SHMEM_HUGE_FORCE:
436 static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
437 struct shrink_control *sc, unsigned long nr_to_split)
439 LIST_HEAD(list), *pos, *next;
440 LIST_HEAD(to_remove);
442 struct shmem_inode_info *info;
444 unsigned long batch = sc ? sc->nr_to_scan : 128;
445 int removed = 0, split = 0;
447 if (list_empty(&sbinfo->shrinklist))
450 spin_lock(&sbinfo->shrinklist_lock);
451 list_for_each_safe(pos, next, &sbinfo->shrinklist) {
452 info = list_entry(pos, struct shmem_inode_info, shrinklist);
455 inode = igrab(&info->vfs_inode);
457 /* inode is about to be evicted */
459 list_del_init(&info->shrinklist);
464 /* Check if there's anything to gain */
465 if (round_up(inode->i_size, PAGE_SIZE) ==
466 round_up(inode->i_size, HPAGE_PMD_SIZE)) {
467 list_move(&info->shrinklist, &to_remove);
472 list_move(&info->shrinklist, &list);
477 spin_unlock(&sbinfo->shrinklist_lock);
479 list_for_each_safe(pos, next, &to_remove) {
480 info = list_entry(pos, struct shmem_inode_info, shrinklist);
481 inode = &info->vfs_inode;
482 list_del_init(&info->shrinklist);
486 list_for_each_safe(pos, next, &list) {
489 info = list_entry(pos, struct shmem_inode_info, shrinklist);
490 inode = &info->vfs_inode;
492 if (nr_to_split && split >= nr_to_split)
495 page = find_get_page(inode->i_mapping,
496 (inode->i_size & HPAGE_PMD_MASK) >> PAGE_SHIFT);
500 /* No huge page at the end of the file: nothing to split */
501 if (!PageTransHuge(page)) {
507 * Leave the inode on the list if we failed to lock
508 * the page at this time.
510 * Waiting for the lock may lead to deadlock in the
513 if (!trylock_page(page)) {
518 ret = split_huge_page(page);
522 /* If split failed leave the inode on the list */
528 list_del_init(&info->shrinklist);
534 spin_lock(&sbinfo->shrinklist_lock);
535 list_splice_tail(&list, &sbinfo->shrinklist);
536 sbinfo->shrinklist_len -= removed;
537 spin_unlock(&sbinfo->shrinklist_lock);
542 static long shmem_unused_huge_scan(struct super_block *sb,
543 struct shrink_control *sc)
545 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
547 if (!READ_ONCE(sbinfo->shrinklist_len))
550 return shmem_unused_huge_shrink(sbinfo, sc, 0);
553 static long shmem_unused_huge_count(struct super_block *sb,
554 struct shrink_control *sc)
556 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
557 return READ_ONCE(sbinfo->shrinklist_len);
559 #else /* !CONFIG_TRANSPARENT_HUGE_PAGECACHE */
561 #define shmem_huge SHMEM_HUGE_DENY
563 static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
564 struct shrink_control *sc, unsigned long nr_to_split)
568 #endif /* CONFIG_TRANSPARENT_HUGE_PAGECACHE */
570 static inline bool is_huge_enabled(struct shmem_sb_info *sbinfo)
572 if (IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE) &&
573 (shmem_huge == SHMEM_HUGE_FORCE || sbinfo->huge) &&
574 shmem_huge != SHMEM_HUGE_DENY)
580 * Like add_to_page_cache_locked, but error if expected item has gone.
582 static int shmem_add_to_page_cache(struct page *page,
583 struct address_space *mapping,
584 pgoff_t index, void *expected, gfp_t gfp)
586 XA_STATE_ORDER(xas, &mapping->i_pages, index, compound_order(page));
588 unsigned long nr = 1UL << compound_order(page);
590 VM_BUG_ON_PAGE(PageTail(page), page);
591 VM_BUG_ON_PAGE(index != round_down(index, nr), page);
592 VM_BUG_ON_PAGE(!PageLocked(page), page);
593 VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
594 VM_BUG_ON(expected && PageTransHuge(page));
596 page_ref_add(page, nr);
597 page->mapping = mapping;
603 entry = xas_find_conflict(&xas);
604 if (entry != expected)
605 xas_set_err(&xas, -EEXIST);
606 xas_create_range(&xas);
610 xas_store(&xas, page + i);
615 if (PageTransHuge(page)) {
616 count_vm_event(THP_FILE_ALLOC);
617 __inc_node_page_state(page, NR_SHMEM_THPS);
619 mapping->nrpages += nr;
620 __mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, nr);
621 __mod_node_page_state(page_pgdat(page), NR_SHMEM, nr);
623 xas_unlock_irq(&xas);
624 } while (xas_nomem(&xas, gfp));
626 if (xas_error(&xas)) {
627 page->mapping = NULL;
628 page_ref_sub(page, nr);
629 return xas_error(&xas);
636 * Like delete_from_page_cache, but substitutes swap for page.
638 static void shmem_delete_from_page_cache(struct page *page, void *radswap)
640 struct address_space *mapping = page->mapping;
643 VM_BUG_ON_PAGE(PageCompound(page), page);
645 xa_lock_irq(&mapping->i_pages);
646 error = shmem_replace_entry(mapping, page->index, page, radswap);
647 page->mapping = NULL;
649 __dec_node_page_state(page, NR_FILE_PAGES);
650 __dec_node_page_state(page, NR_SHMEM);
651 xa_unlock_irq(&mapping->i_pages);
657 * Remove swap entry from page cache, free the swap and its page cache.
659 static int shmem_free_swap(struct address_space *mapping,
660 pgoff_t index, void *radswap)
664 xa_lock_irq(&mapping->i_pages);
665 old = __xa_cmpxchg(&mapping->i_pages, index, radswap, NULL, 0);
666 xa_unlock_irq(&mapping->i_pages);
669 free_swap_and_cache(radix_to_swp_entry(radswap));
674 * Determine (in bytes) how many of the shmem object's pages mapped by the
675 * given offsets are swapped out.
677 * This is safe to call without i_mutex or the i_pages lock thanks to RCU,
678 * as long as the inode doesn't go away and racy results are not a problem.
680 unsigned long shmem_partial_swap_usage(struct address_space *mapping,
681 pgoff_t start, pgoff_t end)
683 XA_STATE(xas, &mapping->i_pages, start);
685 unsigned long swapped = 0;
688 xas_for_each(&xas, page, end - 1) {
689 if (xas_retry(&xas, page))
691 if (xa_is_value(page))
694 if (need_resched()) {
702 return swapped << PAGE_SHIFT;
706 * Determine (in bytes) how many of the shmem object's pages mapped by the
707 * given vma is swapped out.
709 * This is safe to call without i_mutex or the i_pages lock thanks to RCU,
710 * as long as the inode doesn't go away and racy results are not a problem.
712 unsigned long shmem_swap_usage(struct vm_area_struct *vma)
714 struct inode *inode = file_inode(vma->vm_file);
715 struct shmem_inode_info *info = SHMEM_I(inode);
716 struct address_space *mapping = inode->i_mapping;
717 unsigned long swapped;
719 /* Be careful as we don't hold info->lock */
720 swapped = READ_ONCE(info->swapped);
723 * The easier cases are when the shmem object has nothing in swap, or
724 * the vma maps it whole. Then we can simply use the stats that we
730 if (!vma->vm_pgoff && vma->vm_end - vma->vm_start >= inode->i_size)
731 return swapped << PAGE_SHIFT;
733 /* Here comes the more involved part */
734 return shmem_partial_swap_usage(mapping,
735 linear_page_index(vma, vma->vm_start),
736 linear_page_index(vma, vma->vm_end));
740 * SysV IPC SHM_UNLOCK restore Unevictable pages to their evictable lists.
742 void shmem_unlock_mapping(struct address_space *mapping)
745 pgoff_t indices[PAGEVEC_SIZE];
750 * Minor point, but we might as well stop if someone else SHM_LOCKs it.
752 while (!mapping_unevictable(mapping)) {
754 * Avoid pagevec_lookup(): find_get_pages() returns 0 as if it
755 * has finished, if it hits a row of PAGEVEC_SIZE swap entries.
757 pvec.nr = find_get_entries(mapping, index,
758 PAGEVEC_SIZE, pvec.pages, indices);
761 index = indices[pvec.nr - 1] + 1;
762 pagevec_remove_exceptionals(&pvec);
763 check_move_unevictable_pages(pvec.pages, pvec.nr);
764 pagevec_release(&pvec);
770 * Remove range of pages and swap entries from page cache, and free them.
771 * If !unfalloc, truncate or punch hole; if unfalloc, undo failed fallocate.
773 static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
776 struct address_space *mapping = inode->i_mapping;
777 struct shmem_inode_info *info = SHMEM_I(inode);
778 pgoff_t start = (lstart + PAGE_SIZE - 1) >> PAGE_SHIFT;
779 pgoff_t end = (lend + 1) >> PAGE_SHIFT;
780 unsigned int partial_start = lstart & (PAGE_SIZE - 1);
781 unsigned int partial_end = (lend + 1) & (PAGE_SIZE - 1);
783 pgoff_t indices[PAGEVEC_SIZE];
784 long nr_swaps_freed = 0;
789 end = -1; /* unsigned, so actually very big */
793 while (index < end) {
794 pvec.nr = find_get_entries(mapping, index,
795 min(end - index, (pgoff_t)PAGEVEC_SIZE),
796 pvec.pages, indices);
799 for (i = 0; i < pagevec_count(&pvec); i++) {
800 struct page *page = pvec.pages[i];
806 if (xa_is_value(page)) {
809 nr_swaps_freed += !shmem_free_swap(mapping,
814 VM_BUG_ON_PAGE(page_to_pgoff(page) != index, page);
816 if (!trylock_page(page))
819 if (PageTransTail(page)) {
820 /* Middle of THP: zero out the page */
821 clear_highpage(page);
824 } else if (PageTransHuge(page)) {
825 if (index == round_down(end, HPAGE_PMD_NR)) {
827 * Range ends in the middle of THP:
830 clear_highpage(page);
834 index += HPAGE_PMD_NR - 1;
835 i += HPAGE_PMD_NR - 1;
838 if (!unfalloc || !PageUptodate(page)) {
839 VM_BUG_ON_PAGE(PageTail(page), page);
840 if (page_mapping(page) == mapping) {
841 VM_BUG_ON_PAGE(PageWriteback(page), page);
842 truncate_inode_page(mapping, page);
847 pagevec_remove_exceptionals(&pvec);
848 pagevec_release(&pvec);
854 struct page *page = NULL;
855 shmem_getpage(inode, start - 1, &page, SGP_READ);
857 unsigned int top = PAGE_SIZE;
862 zero_user_segment(page, partial_start, top);
863 set_page_dirty(page);
869 struct page *page = NULL;
870 shmem_getpage(inode, end, &page, SGP_READ);
872 zero_user_segment(page, 0, partial_end);
873 set_page_dirty(page);
882 while (index < end) {
885 pvec.nr = find_get_entries(mapping, index,
886 min(end - index, (pgoff_t)PAGEVEC_SIZE),
887 pvec.pages, indices);
889 /* If all gone or hole-punch or unfalloc, we're done */
890 if (index == start || end != -1)
892 /* But if truncating, restart to make sure all gone */
896 for (i = 0; i < pagevec_count(&pvec); i++) {
897 struct page *page = pvec.pages[i];
903 if (xa_is_value(page)) {
906 if (shmem_free_swap(mapping, index, page)) {
907 /* Swap was replaced by page: retry */
917 if (PageTransTail(page)) {
918 /* Middle of THP: zero out the page */
919 clear_highpage(page);
922 * Partial thp truncate due 'start' in middle
923 * of THP: don't need to look on these pages
924 * again on !pvec.nr restart.
926 if (index != round_down(end, HPAGE_PMD_NR))
929 } else if (PageTransHuge(page)) {
930 if (index == round_down(end, HPAGE_PMD_NR)) {
932 * Range ends in the middle of THP:
935 clear_highpage(page);
939 index += HPAGE_PMD_NR - 1;
940 i += HPAGE_PMD_NR - 1;
943 if (!unfalloc || !PageUptodate(page)) {
944 VM_BUG_ON_PAGE(PageTail(page), page);
945 if (page_mapping(page) == mapping) {
946 VM_BUG_ON_PAGE(PageWriteback(page), page);
947 truncate_inode_page(mapping, page);
949 /* Page was replaced by swap: retry */
957 pagevec_remove_exceptionals(&pvec);
958 pagevec_release(&pvec);
962 spin_lock_irq(&info->lock);
963 info->swapped -= nr_swaps_freed;
964 shmem_recalc_inode(inode);
965 spin_unlock_irq(&info->lock);
968 void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
970 shmem_undo_range(inode, lstart, lend, false);
971 inode->i_ctime = inode->i_mtime = current_time(inode);
973 EXPORT_SYMBOL_GPL(shmem_truncate_range);
975 static int shmem_getattr(const struct path *path, struct kstat *stat,
976 u32 request_mask, unsigned int query_flags)
978 struct inode *inode = path->dentry->d_inode;
979 struct shmem_inode_info *info = SHMEM_I(inode);
980 struct shmem_sb_info *sb_info = SHMEM_SB(inode->i_sb);
982 if (info->alloced - info->swapped != inode->i_mapping->nrpages) {
983 spin_lock_irq(&info->lock);
984 shmem_recalc_inode(inode);
985 spin_unlock_irq(&info->lock);
987 generic_fillattr(inode, stat);
989 if (is_huge_enabled(sb_info))
990 stat->blksize = HPAGE_PMD_SIZE;
995 static int shmem_setattr(struct dentry *dentry, struct iattr *attr)
997 struct inode *inode = d_inode(dentry);
998 struct shmem_inode_info *info = SHMEM_I(inode);
999 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
1002 error = setattr_prepare(dentry, attr);
1006 if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
1007 loff_t oldsize = inode->i_size;
1008 loff_t newsize = attr->ia_size;
1010 /* protected by i_mutex */
1011 if ((newsize < oldsize && (info->seals & F_SEAL_SHRINK)) ||
1012 (newsize > oldsize && (info->seals & F_SEAL_GROW)))
1015 if (newsize != oldsize) {
1016 error = shmem_reacct_size(SHMEM_I(inode)->flags,
1020 i_size_write(inode, newsize);
1021 inode->i_ctime = inode->i_mtime = current_time(inode);
1023 if (newsize <= oldsize) {
1024 loff_t holebegin = round_up(newsize, PAGE_SIZE);
1025 if (oldsize > holebegin)
1026 unmap_mapping_range(inode->i_mapping,
1029 shmem_truncate_range(inode,
1030 newsize, (loff_t)-1);
1031 /* unmap again to remove racily COWed private pages */
1032 if (oldsize > holebegin)
1033 unmap_mapping_range(inode->i_mapping,
1037 * Part of the huge page can be beyond i_size: subject
1038 * to shrink under memory pressure.
1040 if (IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE)) {
1041 spin_lock(&sbinfo->shrinklist_lock);
1043 * _careful to defend against unlocked access to
1044 * ->shrink_list in shmem_unused_huge_shrink()
1046 if (list_empty_careful(&info->shrinklist)) {
1047 list_add_tail(&info->shrinklist,
1048 &sbinfo->shrinklist);
1049 sbinfo->shrinklist_len++;
1051 spin_unlock(&sbinfo->shrinklist_lock);
1056 setattr_copy(inode, attr);
1057 if (attr->ia_valid & ATTR_MODE)
1058 error = posix_acl_chmod(inode, inode->i_mode);
1062 static void shmem_evict_inode(struct inode *inode)
1064 struct shmem_inode_info *info = SHMEM_I(inode);
1065 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
1067 if (inode->i_mapping->a_ops == &shmem_aops) {
1068 shmem_unacct_size(info->flags, inode->i_size);
1070 shmem_truncate_range(inode, 0, (loff_t)-1);
1071 if (!list_empty(&info->shrinklist)) {
1072 spin_lock(&sbinfo->shrinklist_lock);
1073 if (!list_empty(&info->shrinklist)) {
1074 list_del_init(&info->shrinklist);
1075 sbinfo->shrinklist_len--;
1077 spin_unlock(&sbinfo->shrinklist_lock);
1079 if (!list_empty(&info->swaplist)) {
1080 mutex_lock(&shmem_swaplist_mutex);
1081 list_del_init(&info->swaplist);
1082 mutex_unlock(&shmem_swaplist_mutex);
1086 simple_xattrs_free(&info->xattrs);
1087 WARN_ON(inode->i_blocks);
1088 shmem_free_inode(inode->i_sb);
1092 static unsigned long find_swap_entry(struct xarray *xa, void *item)
1094 XA_STATE(xas, xa, 0);
1095 unsigned int checked = 0;
1099 xas_for_each(&xas, entry, ULONG_MAX) {
1100 if (xas_retry(&xas, entry))
1105 if ((checked % XA_CHECK_SCHED) != 0)
1112 return entry ? xas.xa_index : -1;
1116 * If swap found in inode, free it and move page from swapcache to filecache.
1118 static int shmem_unuse_inode(struct shmem_inode_info *info,
1119 swp_entry_t swap, struct page **pagep)
1121 struct address_space *mapping = info->vfs_inode.i_mapping;
1127 radswap = swp_to_radix_entry(swap);
1128 index = find_swap_entry(&mapping->i_pages, radswap);
1130 return -EAGAIN; /* tell shmem_unuse we found nothing */
1133 * Move _head_ to start search for next from here.
1134 * But be careful: shmem_evict_inode checks list_empty without taking
1135 * mutex, and there's an instant in list_move_tail when info->swaplist
1136 * would appear empty, if it were the only one on shmem_swaplist.
1138 if (shmem_swaplist.next != &info->swaplist)
1139 list_move_tail(&shmem_swaplist, &info->swaplist);
1141 gfp = mapping_gfp_mask(mapping);
1142 if (shmem_should_replace_page(*pagep, gfp)) {
1143 mutex_unlock(&shmem_swaplist_mutex);
1144 error = shmem_replace_page(pagep, gfp, info, index);
1145 mutex_lock(&shmem_swaplist_mutex);
1147 * We needed to drop mutex to make that restrictive page
1148 * allocation, but the inode might have been freed while we
1149 * dropped it: although a racing shmem_evict_inode() cannot
1150 * complete without emptying the page cache, our page lock
1151 * on this swapcache page is not enough to prevent that -
1152 * free_swap_and_cache() of our swap entry will only
1153 * trylock_page(), removing swap from page cache whatever.
1155 * We must not proceed to shmem_add_to_page_cache() if the
1156 * inode has been freed, but of course we cannot rely on
1157 * inode or mapping or info to check that. However, we can
1158 * safely check if our swap entry is still in use (and here
1159 * it can't have got reused for another page): if it's still
1160 * in use, then the inode cannot have been freed yet, and we
1161 * can safely proceed (if it's no longer in use, that tells
1162 * nothing about the inode, but we don't need to unuse swap).
1164 if (!page_swapcount(*pagep))
1169 * We rely on shmem_swaplist_mutex, not only to protect the swaplist,
1170 * but also to hold up shmem_evict_inode(): so inode cannot be freed
1171 * beneath us (pagelock doesn't help until the page is in pagecache).
1174 error = shmem_add_to_page_cache(*pagep, mapping, index,
1176 if (error != -ENOMEM) {
1178 * Truncation and eviction use free_swap_and_cache(), which
1179 * only does trylock page: if we raced, best clean up here.
1181 delete_from_swap_cache(*pagep);
1182 set_page_dirty(*pagep);
1184 spin_lock_irq(&info->lock);
1186 spin_unlock_irq(&info->lock);
1194 * Search through swapped inodes to find and replace swap by page.
1196 int shmem_unuse(swp_entry_t swap, struct page *page)
1198 struct list_head *this, *next;
1199 struct shmem_inode_info *info;
1200 struct mem_cgroup *memcg;
1204 * There's a faint possibility that swap page was replaced before
1205 * caller locked it: caller will come back later with the right page.
1207 if (unlikely(!PageSwapCache(page) || page_private(page) != swap.val))
1211 * Charge page using GFP_KERNEL while we can wait, before taking
1212 * the shmem_swaplist_mutex which might hold up shmem_writepage().
1213 * Charged back to the user (not to caller) when swap account is used.
1215 error = mem_cgroup_try_charge_delay(page, current->mm, GFP_KERNEL,
1219 /* No memory allocation: swap entry occupies the slot for the page */
1222 mutex_lock(&shmem_swaplist_mutex);
1223 list_for_each_safe(this, next, &shmem_swaplist) {
1224 info = list_entry(this, struct shmem_inode_info, swaplist);
1226 error = shmem_unuse_inode(info, swap, &page);
1228 list_del_init(&info->swaplist);
1230 if (error != -EAGAIN)
1232 /* found nothing in this: move on to search the next */
1234 mutex_unlock(&shmem_swaplist_mutex);
1237 if (error != -ENOMEM)
1239 mem_cgroup_cancel_charge(page, memcg, false);
1241 mem_cgroup_commit_charge(page, memcg, true, false);
1249 * Move the page from the page cache to the swap cache.
1251 static int shmem_writepage(struct page *page, struct writeback_control *wbc)
1253 struct shmem_inode_info *info;
1254 struct address_space *mapping;
1255 struct inode *inode;
1259 VM_BUG_ON_PAGE(PageCompound(page), page);
1260 BUG_ON(!PageLocked(page));
1261 mapping = page->mapping;
1262 index = page->index;
1263 inode = mapping->host;
1264 info = SHMEM_I(inode);
1265 if (info->flags & VM_LOCKED)
1267 if (!total_swap_pages)
1271 * Our capabilities prevent regular writeback or sync from ever calling
1272 * shmem_writepage; but a stacking filesystem might use ->writepage of
1273 * its underlying filesystem, in which case tmpfs should write out to
1274 * swap only in response to memory pressure, and not for the writeback
1277 if (!wbc->for_reclaim) {
1278 WARN_ON_ONCE(1); /* Still happens? Tell us about it! */
1283 * This is somewhat ridiculous, but without plumbing a SWAP_MAP_FALLOC
1284 * value into swapfile.c, the only way we can correctly account for a
1285 * fallocated page arriving here is now to initialize it and write it.
1287 * That's okay for a page already fallocated earlier, but if we have
1288 * not yet completed the fallocation, then (a) we want to keep track
1289 * of this page in case we have to undo it, and (b) it may not be a
1290 * good idea to continue anyway, once we're pushing into swap. So
1291 * reactivate the page, and let shmem_fallocate() quit when too many.
1293 if (!PageUptodate(page)) {
1294 if (inode->i_private) {
1295 struct shmem_falloc *shmem_falloc;
1296 spin_lock(&inode->i_lock);
1297 shmem_falloc = inode->i_private;
1299 !shmem_falloc->waitq &&
1300 index >= shmem_falloc->start &&
1301 index < shmem_falloc->next)
1302 shmem_falloc->nr_unswapped++;
1304 shmem_falloc = NULL;
1305 spin_unlock(&inode->i_lock);
1309 clear_highpage(page);
1310 flush_dcache_page(page);
1311 SetPageUptodate(page);
1314 swap = get_swap_page(page);
1319 * Add inode to shmem_unuse()'s list of swapped-out inodes,
1320 * if it's not already there. Do it now before the page is
1321 * moved to swap cache, when its pagelock no longer protects
1322 * the inode from eviction. But don't unlock the mutex until
1323 * we've incremented swapped, because shmem_unuse_inode() will
1324 * prune a !swapped inode from the swaplist under this mutex.
1326 mutex_lock(&shmem_swaplist_mutex);
1327 if (list_empty(&info->swaplist))
1328 list_add_tail(&info->swaplist, &shmem_swaplist);
1330 if (add_to_swap_cache(page, swap, GFP_ATOMIC) == 0) {
1331 spin_lock_irq(&info->lock);
1332 shmem_recalc_inode(inode);
1334 spin_unlock_irq(&info->lock);
1336 swap_shmem_alloc(swap);
1337 shmem_delete_from_page_cache(page, swp_to_radix_entry(swap));
1339 mutex_unlock(&shmem_swaplist_mutex);
1340 BUG_ON(page_mapped(page));
1341 swap_writepage(page, wbc);
1345 mutex_unlock(&shmem_swaplist_mutex);
1346 put_swap_page(page, swap);
1348 set_page_dirty(page);
1349 if (wbc->for_reclaim)
1350 return AOP_WRITEPAGE_ACTIVATE; /* Return with page locked */
1355 #if defined(CONFIG_NUMA) && defined(CONFIG_TMPFS)
1356 static void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol)
1360 if (!mpol || mpol->mode == MPOL_DEFAULT)
1361 return; /* show nothing */
1363 mpol_to_str(buffer, sizeof(buffer), mpol);
1365 seq_printf(seq, ",mpol=%s", buffer);
1368 static struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
1370 struct mempolicy *mpol = NULL;
1372 spin_lock(&sbinfo->stat_lock); /* prevent replace/use races */
1373 mpol = sbinfo->mpol;
1375 spin_unlock(&sbinfo->stat_lock);
1379 #else /* !CONFIG_NUMA || !CONFIG_TMPFS */
1380 static inline void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol)
1383 static inline struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
1387 #endif /* CONFIG_NUMA && CONFIG_TMPFS */
1389 #define vm_policy vm_private_data
1392 static void shmem_pseudo_vma_init(struct vm_area_struct *vma,
1393 struct shmem_inode_info *info, pgoff_t index)
1395 /* Create a pseudo vma that just contains the policy */
1396 vma_init(vma, NULL);
1397 /* Bias interleave by inode number to distribute better across nodes */
1398 vma->vm_pgoff = index + info->vfs_inode.i_ino;
1399 vma->vm_policy = mpol_shared_policy_lookup(&info->policy, index);
1402 static void shmem_pseudo_vma_destroy(struct vm_area_struct *vma)
1404 /* Drop reference taken by mpol_shared_policy_lookup() */
1405 mpol_cond_put(vma->vm_policy);
1408 static struct page *shmem_swapin(swp_entry_t swap, gfp_t gfp,
1409 struct shmem_inode_info *info, pgoff_t index)
1411 struct vm_area_struct pvma;
1413 struct vm_fault vmf;
1415 shmem_pseudo_vma_init(&pvma, info, index);
1418 page = swap_cluster_readahead(swap, gfp, &vmf);
1419 shmem_pseudo_vma_destroy(&pvma);
1424 static struct page *shmem_alloc_hugepage(gfp_t gfp,
1425 struct shmem_inode_info *info, pgoff_t index)
1427 struct vm_area_struct pvma;
1428 struct address_space *mapping = info->vfs_inode.i_mapping;
1432 if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE))
1435 hindex = round_down(index, HPAGE_PMD_NR);
1436 if (xa_find(&mapping->i_pages, &hindex, hindex + HPAGE_PMD_NR - 1,
1440 shmem_pseudo_vma_init(&pvma, info, hindex);
1441 page = alloc_pages_vma(gfp | __GFP_COMP | __GFP_NORETRY | __GFP_NOWARN,
1442 HPAGE_PMD_ORDER, &pvma, 0, numa_node_id());
1443 shmem_pseudo_vma_destroy(&pvma);
1445 prep_transhuge_page(page);
1449 static struct page *shmem_alloc_page(gfp_t gfp,
1450 struct shmem_inode_info *info, pgoff_t index)
1452 struct vm_area_struct pvma;
1455 shmem_pseudo_vma_init(&pvma, info, index);
1456 page = alloc_page_vma(gfp, &pvma, 0);
1457 shmem_pseudo_vma_destroy(&pvma);
1462 static struct page *shmem_alloc_and_acct_page(gfp_t gfp,
1463 struct inode *inode,
1464 pgoff_t index, bool huge)
1466 struct shmem_inode_info *info = SHMEM_I(inode);
1471 if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE))
1473 nr = huge ? HPAGE_PMD_NR : 1;
1475 if (!shmem_inode_acct_block(inode, nr))
1479 page = shmem_alloc_hugepage(gfp, info, index);
1481 page = shmem_alloc_page(gfp, info, index);
1483 __SetPageLocked(page);
1484 __SetPageSwapBacked(page);
1489 shmem_inode_unacct_blocks(inode, nr);
1491 return ERR_PTR(err);
1495 * When a page is moved from swapcache to shmem filecache (either by the
1496 * usual swapin of shmem_getpage_gfp(), or by the less common swapoff of
1497 * shmem_unuse_inode()), it may have been read in earlier from swap, in
1498 * ignorance of the mapping it belongs to. If that mapping has special
1499 * constraints (like the gma500 GEM driver, which requires RAM below 4GB),
1500 * we may need to copy to a suitable page before moving to filecache.
1502 * In a future release, this may well be extended to respect cpuset and
1503 * NUMA mempolicy, and applied also to anonymous pages in do_swap_page();
1504 * but for now it is a simple matter of zone.
1506 static bool shmem_should_replace_page(struct page *page, gfp_t gfp)
1508 return page_zonenum(page) > gfp_zone(gfp);
1511 static int shmem_replace_page(struct page **pagep, gfp_t gfp,
1512 struct shmem_inode_info *info, pgoff_t index)
1514 struct page *oldpage, *newpage;
1515 struct address_space *swap_mapping;
1521 entry.val = page_private(oldpage);
1522 swap_index = swp_offset(entry);
1523 swap_mapping = page_mapping(oldpage);
1526 * We have arrived here because our zones are constrained, so don't
1527 * limit chance of success by further cpuset and node constraints.
1529 gfp &= ~GFP_CONSTRAINT_MASK;
1530 newpage = shmem_alloc_page(gfp, info, index);
1535 copy_highpage(newpage, oldpage);
1536 flush_dcache_page(newpage);
1538 __SetPageLocked(newpage);
1539 __SetPageSwapBacked(newpage);
1540 SetPageUptodate(newpage);
1541 set_page_private(newpage, entry.val);
1542 SetPageSwapCache(newpage);
1545 * Our caller will very soon move newpage out of swapcache, but it's
1546 * a nice clean interface for us to replace oldpage by newpage there.
1548 xa_lock_irq(&swap_mapping->i_pages);
1549 error = shmem_replace_entry(swap_mapping, swap_index, oldpage, newpage);
1551 __inc_node_page_state(newpage, NR_FILE_PAGES);
1552 __dec_node_page_state(oldpage, NR_FILE_PAGES);
1554 xa_unlock_irq(&swap_mapping->i_pages);
1556 if (unlikely(error)) {
1558 * Is this possible? I think not, now that our callers check
1559 * both PageSwapCache and page_private after getting page lock;
1560 * but be defensive. Reverse old to newpage for clear and free.
1564 mem_cgroup_migrate(oldpage, newpage);
1565 lru_cache_add_anon(newpage);
1569 ClearPageSwapCache(oldpage);
1570 set_page_private(oldpage, 0);
1572 unlock_page(oldpage);
1579 * shmem_getpage_gfp - find page in cache, or get from swap, or allocate
1581 * If we allocate a new one we do not mark it dirty. That's up to the
1582 * vm. If we swap it in we mark it dirty since we also free the swap
1583 * entry since a page cannot live in both the swap and page cache.
1585 * fault_mm and fault_type are only supplied by shmem_fault:
1586 * otherwise they are NULL.
1588 static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
1589 struct page **pagep, enum sgp_type sgp, gfp_t gfp,
1590 struct vm_area_struct *vma, struct vm_fault *vmf,
1591 vm_fault_t *fault_type)
1593 struct address_space *mapping = inode->i_mapping;
1594 struct shmem_inode_info *info = SHMEM_I(inode);
1595 struct shmem_sb_info *sbinfo;
1596 struct mm_struct *charge_mm;
1597 struct mem_cgroup *memcg;
1600 enum sgp_type sgp_huge = sgp;
1601 pgoff_t hindex = index;
1606 if (index > (MAX_LFS_FILESIZE >> PAGE_SHIFT))
1608 if (sgp == SGP_NOHUGE || sgp == SGP_HUGE)
1612 page = find_lock_entry(mapping, index);
1613 if (xa_is_value(page)) {
1614 swap = radix_to_swp_entry(page);
1618 if (sgp <= SGP_CACHE &&
1619 ((loff_t)index << PAGE_SHIFT) >= i_size_read(inode)) {
1624 if (page && sgp == SGP_WRITE)
1625 mark_page_accessed(page);
1627 /* fallocated page? */
1628 if (page && !PageUptodate(page)) {
1629 if (sgp != SGP_READ)
1635 if (page || (sgp == SGP_READ && !swap.val)) {
1641 * Fast cache lookup did not find it:
1642 * bring it back from swap or allocate.
1644 sbinfo = SHMEM_SB(inode->i_sb);
1645 charge_mm = vma ? vma->vm_mm : current->mm;
1648 /* Look it up and read it in.. */
1649 page = lookup_swap_cache(swap, NULL, 0);
1651 /* Or update major stats only when swapin succeeds?? */
1653 *fault_type |= VM_FAULT_MAJOR;
1654 count_vm_event(PGMAJFAULT);
1655 count_memcg_event_mm(charge_mm, PGMAJFAULT);
1657 /* Here we actually start the io */
1658 page = shmem_swapin(swap, gfp, info, index);
1665 /* We have to do this with page locked to prevent races */
1667 if (!PageSwapCache(page) || page_private(page) != swap.val ||
1668 !shmem_confirm_swap(mapping, index, swap)) {
1669 error = -EEXIST; /* try again */
1672 if (!PageUptodate(page)) {
1676 wait_on_page_writeback(page);
1678 if (shmem_should_replace_page(page, gfp)) {
1679 error = shmem_replace_page(&page, gfp, info, index);
1684 error = mem_cgroup_try_charge_delay(page, charge_mm, gfp, &memcg,
1687 error = shmem_add_to_page_cache(page, mapping, index,
1688 swp_to_radix_entry(swap), gfp);
1690 * We already confirmed swap under page lock, and make
1691 * no memory allocation here, so usually no possibility
1692 * of error; but free_swap_and_cache() only trylocks a
1693 * page, so it is just possible that the entry has been
1694 * truncated or holepunched since swap was confirmed.
1695 * shmem_undo_range() will have done some of the
1696 * unaccounting, now delete_from_swap_cache() will do
1698 * Reset swap.val? No, leave it so "failed" goes back to
1699 * "repeat": reading a hole and writing should succeed.
1702 mem_cgroup_cancel_charge(page, memcg, false);
1703 delete_from_swap_cache(page);
1709 mem_cgroup_commit_charge(page, memcg, true, false);
1711 spin_lock_irq(&info->lock);
1713 shmem_recalc_inode(inode);
1714 spin_unlock_irq(&info->lock);
1716 if (sgp == SGP_WRITE)
1717 mark_page_accessed(page);
1719 delete_from_swap_cache(page);
1720 set_page_dirty(page);
1724 if (vma && userfaultfd_missing(vma)) {
1725 *fault_type = handle_userfault(vmf, VM_UFFD_MISSING);
1729 /* shmem_symlink() */
1730 if (mapping->a_ops != &shmem_aops)
1732 if (shmem_huge == SHMEM_HUGE_DENY || sgp_huge == SGP_NOHUGE)
1734 if (shmem_huge == SHMEM_HUGE_FORCE)
1736 switch (sbinfo->huge) {
1739 case SHMEM_HUGE_NEVER:
1741 case SHMEM_HUGE_WITHIN_SIZE:
1742 off = round_up(index, HPAGE_PMD_NR);
1743 i_size = round_up(i_size_read(inode), PAGE_SIZE);
1744 if (i_size >= HPAGE_PMD_SIZE &&
1745 i_size >> PAGE_SHIFT >= off)
1748 case SHMEM_HUGE_ADVISE:
1749 if (sgp_huge == SGP_HUGE)
1751 /* TODO: implement fadvise() hints */
1756 page = shmem_alloc_and_acct_page(gfp, inode, index, true);
1758 alloc_nohuge: page = shmem_alloc_and_acct_page(gfp, inode,
1763 error = PTR_ERR(page);
1765 if (error != -ENOSPC)
1768 * Try to reclaim some spece by splitting a huge page
1769 * beyond i_size on the filesystem.
1773 ret = shmem_unused_huge_shrink(sbinfo, NULL, 1);
1774 if (ret == SHRINK_STOP)
1782 if (PageTransHuge(page))
1783 hindex = round_down(index, HPAGE_PMD_NR);
1787 if (sgp == SGP_WRITE)
1788 __SetPageReferenced(page);
1790 error = mem_cgroup_try_charge_delay(page, charge_mm, gfp, &memcg,
1791 PageTransHuge(page));
1794 error = shmem_add_to_page_cache(page, mapping, hindex,
1795 NULL, gfp & GFP_RECLAIM_MASK);
1797 mem_cgroup_cancel_charge(page, memcg,
1798 PageTransHuge(page));
1801 mem_cgroup_commit_charge(page, memcg, false,
1802 PageTransHuge(page));
1803 lru_cache_add_anon(page);
1805 spin_lock_irq(&info->lock);
1806 info->alloced += 1 << compound_order(page);
1807 inode->i_blocks += BLOCKS_PER_PAGE << compound_order(page);
1808 shmem_recalc_inode(inode);
1809 spin_unlock_irq(&info->lock);
1812 if (PageTransHuge(page) &&
1813 DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE) <
1814 hindex + HPAGE_PMD_NR - 1) {
1816 * Part of the huge page is beyond i_size: subject
1817 * to shrink under memory pressure.
1819 spin_lock(&sbinfo->shrinklist_lock);
1821 * _careful to defend against unlocked access to
1822 * ->shrink_list in shmem_unused_huge_shrink()
1824 if (list_empty_careful(&info->shrinklist)) {
1825 list_add_tail(&info->shrinklist,
1826 &sbinfo->shrinklist);
1827 sbinfo->shrinklist_len++;
1829 spin_unlock(&sbinfo->shrinklist_lock);
1833 * Let SGP_FALLOC use the SGP_WRITE optimization on a new page.
1835 if (sgp == SGP_FALLOC)
1839 * Let SGP_WRITE caller clear ends if write does not fill page;
1840 * but SGP_FALLOC on a page fallocated earlier must initialize
1841 * it now, lest undo on failure cancel our earlier guarantee.
1843 if (sgp != SGP_WRITE && !PageUptodate(page)) {
1844 struct page *head = compound_head(page);
1847 for (i = 0; i < (1 << compound_order(head)); i++) {
1848 clear_highpage(head + i);
1849 flush_dcache_page(head + i);
1851 SetPageUptodate(head);
1855 /* Perhaps the file has been truncated since we checked */
1856 if (sgp <= SGP_CACHE &&
1857 ((loff_t)index << PAGE_SHIFT) >= i_size_read(inode)) {
1859 ClearPageDirty(page);
1860 delete_from_page_cache(page);
1861 spin_lock_irq(&info->lock);
1862 shmem_recalc_inode(inode);
1863 spin_unlock_irq(&info->lock);
1868 *pagep = page + index - hindex;
1875 shmem_inode_unacct_blocks(inode, 1 << compound_order(page));
1877 if (PageTransHuge(page)) {
1883 if (swap.val && !shmem_confirm_swap(mapping, index, swap))
1890 if (error == -ENOSPC && !once++) {
1891 spin_lock_irq(&info->lock);
1892 shmem_recalc_inode(inode);
1893 spin_unlock_irq(&info->lock);
1896 if (error == -EEXIST)
1902 * This is like autoremove_wake_function, but it removes the wait queue
1903 * entry unconditionally - even if something else had already woken the
1906 static int synchronous_wake_function(wait_queue_entry_t *wait, unsigned mode, int sync, void *key)
1908 int ret = default_wake_function(wait, mode, sync, key);
1909 list_del_init(&wait->entry);
1913 static vm_fault_t shmem_fault(struct vm_fault *vmf)
1915 struct vm_area_struct *vma = vmf->vma;
1916 struct inode *inode = file_inode(vma->vm_file);
1917 gfp_t gfp = mapping_gfp_mask(inode->i_mapping);
1920 vm_fault_t ret = VM_FAULT_LOCKED;
1923 * Trinity finds that probing a hole which tmpfs is punching can
1924 * prevent the hole-punch from ever completing: which in turn
1925 * locks writers out with its hold on i_mutex. So refrain from
1926 * faulting pages into the hole while it's being punched. Although
1927 * shmem_undo_range() does remove the additions, it may be unable to
1928 * keep up, as each new page needs its own unmap_mapping_range() call,
1929 * and the i_mmap tree grows ever slower to scan if new vmas are added.
1931 * It does not matter if we sometimes reach this check just before the
1932 * hole-punch begins, so that one fault then races with the punch:
1933 * we just need to make racing faults a rare case.
1935 * The implementation below would be much simpler if we just used a
1936 * standard mutex or completion: but we cannot take i_mutex in fault,
1937 * and bloating every shmem inode for this unlikely case would be sad.
1939 if (unlikely(inode->i_private)) {
1940 struct shmem_falloc *shmem_falloc;
1942 spin_lock(&inode->i_lock);
1943 shmem_falloc = inode->i_private;
1945 shmem_falloc->waitq &&
1946 vmf->pgoff >= shmem_falloc->start &&
1947 vmf->pgoff < shmem_falloc->next) {
1948 wait_queue_head_t *shmem_falloc_waitq;
1949 DEFINE_WAIT_FUNC(shmem_fault_wait, synchronous_wake_function);
1951 ret = VM_FAULT_NOPAGE;
1952 if ((vmf->flags & FAULT_FLAG_ALLOW_RETRY) &&
1953 !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
1954 /* It's polite to up mmap_sem if we can */
1955 up_read(&vma->vm_mm->mmap_sem);
1956 ret = VM_FAULT_RETRY;
1959 shmem_falloc_waitq = shmem_falloc->waitq;
1960 prepare_to_wait(shmem_falloc_waitq, &shmem_fault_wait,
1961 TASK_UNINTERRUPTIBLE);
1962 spin_unlock(&inode->i_lock);
1966 * shmem_falloc_waitq points into the shmem_fallocate()
1967 * stack of the hole-punching task: shmem_falloc_waitq
1968 * is usually invalid by the time we reach here, but
1969 * finish_wait() does not dereference it in that case;
1970 * though i_lock needed lest racing with wake_up_all().
1972 spin_lock(&inode->i_lock);
1973 finish_wait(shmem_falloc_waitq, &shmem_fault_wait);
1974 spin_unlock(&inode->i_lock);
1977 spin_unlock(&inode->i_lock);
1982 if ((vma->vm_flags & VM_NOHUGEPAGE) ||
1983 test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
1985 else if (vma->vm_flags & VM_HUGEPAGE)
1988 err = shmem_getpage_gfp(inode, vmf->pgoff, &vmf->page, sgp,
1989 gfp, vma, vmf, &ret);
1991 return vmf_error(err);
1995 unsigned long shmem_get_unmapped_area(struct file *file,
1996 unsigned long uaddr, unsigned long len,
1997 unsigned long pgoff, unsigned long flags)
1999 unsigned long (*get_area)(struct file *,
2000 unsigned long, unsigned long, unsigned long, unsigned long);
2002 unsigned long offset;
2003 unsigned long inflated_len;
2004 unsigned long inflated_addr;
2005 unsigned long inflated_offset;
2007 if (len > TASK_SIZE)
2010 get_area = current->mm->get_unmapped_area;
2011 addr = get_area(file, uaddr, len, pgoff, flags);
2013 if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE))
2015 if (IS_ERR_VALUE(addr))
2017 if (addr & ~PAGE_MASK)
2019 if (addr > TASK_SIZE - len)
2022 if (shmem_huge == SHMEM_HUGE_DENY)
2024 if (len < HPAGE_PMD_SIZE)
2026 if (flags & MAP_FIXED)
2029 * Our priority is to support MAP_SHARED mapped hugely;
2030 * and support MAP_PRIVATE mapped hugely too, until it is COWed.
2031 * But if caller specified an address hint, respect that as before.
2036 if (shmem_huge != SHMEM_HUGE_FORCE) {
2037 struct super_block *sb;
2040 VM_BUG_ON(file->f_op != &shmem_file_operations);
2041 sb = file_inode(file)->i_sb;
2044 * Called directly from mm/mmap.c, or drivers/char/mem.c
2045 * for "/dev/zero", to create a shared anonymous object.
2047 if (IS_ERR(shm_mnt))
2049 sb = shm_mnt->mnt_sb;
2051 if (SHMEM_SB(sb)->huge == SHMEM_HUGE_NEVER)
2055 offset = (pgoff << PAGE_SHIFT) & (HPAGE_PMD_SIZE-1);
2056 if (offset && offset + len < 2 * HPAGE_PMD_SIZE)
2058 if ((addr & (HPAGE_PMD_SIZE-1)) == offset)
2061 inflated_len = len + HPAGE_PMD_SIZE - PAGE_SIZE;
2062 if (inflated_len > TASK_SIZE)
2064 if (inflated_len < len)
2067 inflated_addr = get_area(NULL, 0, inflated_len, 0, flags);
2068 if (IS_ERR_VALUE(inflated_addr))
2070 if (inflated_addr & ~PAGE_MASK)
2073 inflated_offset = inflated_addr & (HPAGE_PMD_SIZE-1);
2074 inflated_addr += offset - inflated_offset;
2075 if (inflated_offset > offset)
2076 inflated_addr += HPAGE_PMD_SIZE;
2078 if (inflated_addr > TASK_SIZE - len)
2080 return inflated_addr;
2084 static int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *mpol)
2086 struct inode *inode = file_inode(vma->vm_file);
2087 return mpol_set_shared_policy(&SHMEM_I(inode)->policy, vma, mpol);
2090 static struct mempolicy *shmem_get_policy(struct vm_area_struct *vma,
2093 struct inode *inode = file_inode(vma->vm_file);
2096 index = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
2097 return mpol_shared_policy_lookup(&SHMEM_I(inode)->policy, index);
2101 int shmem_lock(struct file *file, int lock, struct user_struct *user)
2103 struct inode *inode = file_inode(file);
2104 struct shmem_inode_info *info = SHMEM_I(inode);
2105 int retval = -ENOMEM;
2107 spin_lock_irq(&info->lock);
2108 if (lock && !(info->flags & VM_LOCKED)) {
2109 if (!user_shm_lock(inode->i_size, user))
2111 info->flags |= VM_LOCKED;
2112 mapping_set_unevictable(file->f_mapping);
2114 if (!lock && (info->flags & VM_LOCKED) && user) {
2115 user_shm_unlock(inode->i_size, user);
2116 info->flags &= ~VM_LOCKED;
2117 mapping_clear_unevictable(file->f_mapping);
2122 spin_unlock_irq(&info->lock);
2126 static int shmem_mmap(struct file *file, struct vm_area_struct *vma)
2128 file_accessed(file);
2129 vma->vm_ops = &shmem_vm_ops;
2130 if (IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE) &&
2131 ((vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK) <
2132 (vma->vm_end & HPAGE_PMD_MASK)) {
2133 khugepaged_enter(vma, vma->vm_flags);
2138 static struct inode *shmem_get_inode(struct super_block *sb, const struct inode *dir,
2139 umode_t mode, dev_t dev, unsigned long flags)
2141 struct inode *inode;
2142 struct shmem_inode_info *info;
2143 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
2145 if (shmem_reserve_inode(sb))
2148 inode = new_inode(sb);
2150 inode->i_ino = get_next_ino();
2151 inode_init_owner(inode, dir, mode);
2152 inode->i_blocks = 0;
2153 inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
2154 inode->i_generation = prandom_u32();
2155 info = SHMEM_I(inode);
2156 memset(info, 0, (char *)inode - (char *)info);
2157 spin_lock_init(&info->lock);
2158 info->seals = F_SEAL_SEAL;
2159 info->flags = flags & VM_NORESERVE;
2160 INIT_LIST_HEAD(&info->shrinklist);
2161 INIT_LIST_HEAD(&info->swaplist);
2162 simple_xattrs_init(&info->xattrs);
2163 cache_no_acl(inode);
2165 switch (mode & S_IFMT) {
2167 inode->i_op = &shmem_special_inode_operations;
2168 init_special_inode(inode, mode, dev);
2171 inode->i_mapping->a_ops = &shmem_aops;
2172 inode->i_op = &shmem_inode_operations;
2173 inode->i_fop = &shmem_file_operations;
2174 mpol_shared_policy_init(&info->policy,
2175 shmem_get_sbmpol(sbinfo));
2179 /* Some things misbehave if size == 0 on a directory */
2180 inode->i_size = 2 * BOGO_DIRENT_SIZE;
2181 inode->i_op = &shmem_dir_inode_operations;
2182 inode->i_fop = &simple_dir_operations;
2186 * Must not load anything in the rbtree,
2187 * mpol_free_shared_policy will not be called.
2189 mpol_shared_policy_init(&info->policy, NULL);
2193 lockdep_annotate_inode_mutex_key(inode);
2195 shmem_free_inode(sb);
2199 bool shmem_mapping(struct address_space *mapping)
2201 return mapping->a_ops == &shmem_aops;
2204 static int shmem_mfill_atomic_pte(struct mm_struct *dst_mm,
2206 struct vm_area_struct *dst_vma,
2207 unsigned long dst_addr,
2208 unsigned long src_addr,
2210 struct page **pagep)
2212 struct inode *inode = file_inode(dst_vma->vm_file);
2213 struct shmem_inode_info *info = SHMEM_I(inode);
2214 struct address_space *mapping = inode->i_mapping;
2215 gfp_t gfp = mapping_gfp_mask(mapping);
2216 pgoff_t pgoff = linear_page_index(dst_vma, dst_addr);
2217 struct mem_cgroup *memcg;
2221 pte_t _dst_pte, *dst_pte;
2223 pgoff_t offset, max_off;
2226 if (!shmem_inode_acct_block(inode, 1))
2230 page = shmem_alloc_page(gfp, info, pgoff);
2232 goto out_unacct_blocks;
2234 if (!zeropage) { /* mcopy_atomic */
2235 page_kaddr = kmap_atomic(page);
2236 ret = copy_from_user(page_kaddr,
2237 (const void __user *)src_addr,
2239 kunmap_atomic(page_kaddr);
2241 /* fallback to copy_from_user outside mmap_sem */
2242 if (unlikely(ret)) {
2244 shmem_inode_unacct_blocks(inode, 1);
2245 /* don't free the page */
2248 } else { /* mfill_zeropage_atomic */
2249 clear_highpage(page);
2256 VM_BUG_ON(PageLocked(page) || PageSwapBacked(page));
2257 __SetPageLocked(page);
2258 __SetPageSwapBacked(page);
2259 __SetPageUptodate(page);
2262 offset = linear_page_index(dst_vma, dst_addr);
2263 max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
2264 if (unlikely(offset >= max_off))
2267 ret = mem_cgroup_try_charge_delay(page, dst_mm, gfp, &memcg, false);
2271 ret = shmem_add_to_page_cache(page, mapping, pgoff, NULL,
2272 gfp & GFP_RECLAIM_MASK);
2274 goto out_release_uncharge;
2276 mem_cgroup_commit_charge(page, memcg, false, false);
2278 _dst_pte = mk_pte(page, dst_vma->vm_page_prot);
2279 if (dst_vma->vm_flags & VM_WRITE)
2280 _dst_pte = pte_mkwrite(pte_mkdirty(_dst_pte));
2283 * We don't set the pte dirty if the vma has no
2284 * VM_WRITE permission, so mark the page dirty or it
2285 * could be freed from under us. We could do it
2286 * unconditionally before unlock_page(), but doing it
2287 * only if VM_WRITE is not set is faster.
2289 set_page_dirty(page);
2292 dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl);
2295 max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
2296 if (unlikely(offset >= max_off))
2297 goto out_release_uncharge_unlock;
2300 if (!pte_none(*dst_pte))
2301 goto out_release_uncharge_unlock;
2303 lru_cache_add_anon(page);
2305 spin_lock(&info->lock);
2307 inode->i_blocks += BLOCKS_PER_PAGE;
2308 shmem_recalc_inode(inode);
2309 spin_unlock(&info->lock);
2311 inc_mm_counter(dst_mm, mm_counter_file(page));
2312 page_add_file_rmap(page, false);
2313 set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte);
2315 /* No need to invalidate - it was non-present before */
2316 update_mmu_cache(dst_vma, dst_addr, dst_pte);
2317 pte_unmap_unlock(dst_pte, ptl);
2322 out_release_uncharge_unlock:
2323 pte_unmap_unlock(dst_pte, ptl);
2324 ClearPageDirty(page);
2325 delete_from_page_cache(page);
2326 out_release_uncharge:
2327 mem_cgroup_cancel_charge(page, memcg, false);
2332 shmem_inode_unacct_blocks(inode, 1);
2336 int shmem_mcopy_atomic_pte(struct mm_struct *dst_mm,
2338 struct vm_area_struct *dst_vma,
2339 unsigned long dst_addr,
2340 unsigned long src_addr,
2341 struct page **pagep)
2343 return shmem_mfill_atomic_pte(dst_mm, dst_pmd, dst_vma,
2344 dst_addr, src_addr, false, pagep);
2347 int shmem_mfill_zeropage_pte(struct mm_struct *dst_mm,
2349 struct vm_area_struct *dst_vma,
2350 unsigned long dst_addr)
2352 struct page *page = NULL;
2354 return shmem_mfill_atomic_pte(dst_mm, dst_pmd, dst_vma,
2355 dst_addr, 0, true, &page);
2359 static const struct inode_operations shmem_symlink_inode_operations;
2360 static const struct inode_operations shmem_short_symlink_operations;
2362 #ifdef CONFIG_TMPFS_XATTR
2363 static int shmem_initxattrs(struct inode *, const struct xattr *, void *);
2365 #define shmem_initxattrs NULL
2369 shmem_write_begin(struct file *file, struct address_space *mapping,
2370 loff_t pos, unsigned len, unsigned flags,
2371 struct page **pagep, void **fsdata)
2373 struct inode *inode = mapping->host;
2374 struct shmem_inode_info *info = SHMEM_I(inode);
2375 pgoff_t index = pos >> PAGE_SHIFT;
2377 /* i_mutex is held by caller */
2378 if (unlikely(info->seals & (F_SEAL_WRITE | F_SEAL_GROW))) {
2379 if (info->seals & F_SEAL_WRITE)
2381 if ((info->seals & F_SEAL_GROW) && pos + len > inode->i_size)
2385 return shmem_getpage(inode, index, pagep, SGP_WRITE);
2389 shmem_write_end(struct file *file, struct address_space *mapping,
2390 loff_t pos, unsigned len, unsigned copied,
2391 struct page *page, void *fsdata)
2393 struct inode *inode = mapping->host;
2395 if (pos + copied > inode->i_size)
2396 i_size_write(inode, pos + copied);
2398 if (!PageUptodate(page)) {
2399 struct page *head = compound_head(page);
2400 if (PageTransCompound(page)) {
2403 for (i = 0; i < HPAGE_PMD_NR; i++) {
2404 if (head + i == page)
2406 clear_highpage(head + i);
2407 flush_dcache_page(head + i);
2410 if (copied < PAGE_SIZE) {
2411 unsigned from = pos & (PAGE_SIZE - 1);
2412 zero_user_segments(page, 0, from,
2413 from + copied, PAGE_SIZE);
2415 SetPageUptodate(head);
2417 set_page_dirty(page);
2424 static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
2426 struct file *file = iocb->ki_filp;
2427 struct inode *inode = file_inode(file);
2428 struct address_space *mapping = inode->i_mapping;
2430 unsigned long offset;
2431 enum sgp_type sgp = SGP_READ;
2434 loff_t *ppos = &iocb->ki_pos;
2437 * Might this read be for a stacking filesystem? Then when reading
2438 * holes of a sparse file, we actually need to allocate those pages,
2439 * and even mark them dirty, so it cannot exceed the max_blocks limit.
2441 if (!iter_is_iovec(to))
2444 index = *ppos >> PAGE_SHIFT;
2445 offset = *ppos & ~PAGE_MASK;
2448 struct page *page = NULL;
2450 unsigned long nr, ret;
2451 loff_t i_size = i_size_read(inode);
2453 end_index = i_size >> PAGE_SHIFT;
2454 if (index > end_index)
2456 if (index == end_index) {
2457 nr = i_size & ~PAGE_MASK;
2462 error = shmem_getpage(inode, index, &page, sgp);
2464 if (error == -EINVAL)
2469 if (sgp == SGP_CACHE)
2470 set_page_dirty(page);
2475 * We must evaluate after, since reads (unlike writes)
2476 * are called without i_mutex protection against truncate
2479 i_size = i_size_read(inode);
2480 end_index = i_size >> PAGE_SHIFT;
2481 if (index == end_index) {
2482 nr = i_size & ~PAGE_MASK;
2493 * If users can be writing to this page using arbitrary
2494 * virtual addresses, take care about potential aliasing
2495 * before reading the page on the kernel side.
2497 if (mapping_writably_mapped(mapping))
2498 flush_dcache_page(page);
2500 * Mark the page accessed if we read the beginning.
2503 mark_page_accessed(page);
2505 page = ZERO_PAGE(0);
2510 * Ok, we have the page, and it's up-to-date, so
2511 * now we can copy it to user space...
2513 ret = copy_page_to_iter(page, offset, nr, to);
2516 index += offset >> PAGE_SHIFT;
2517 offset &= ~PAGE_MASK;
2520 if (!iov_iter_count(to))
2529 *ppos = ((loff_t) index << PAGE_SHIFT) + offset;
2530 file_accessed(file);
2531 return retval ? retval : error;
2535 * llseek SEEK_DATA or SEEK_HOLE through the page cache.
2537 static pgoff_t shmem_seek_hole_data(struct address_space *mapping,
2538 pgoff_t index, pgoff_t end, int whence)
2541 struct pagevec pvec;
2542 pgoff_t indices[PAGEVEC_SIZE];
2546 pagevec_init(&pvec);
2547 pvec.nr = 1; /* start small: we may be there already */
2549 pvec.nr = find_get_entries(mapping, index,
2550 pvec.nr, pvec.pages, indices);
2552 if (whence == SEEK_DATA)
2556 for (i = 0; i < pvec.nr; i++, index++) {
2557 if (index < indices[i]) {
2558 if (whence == SEEK_HOLE) {
2564 page = pvec.pages[i];
2565 if (page && !xa_is_value(page)) {
2566 if (!PageUptodate(page))
2570 (page && whence == SEEK_DATA) ||
2571 (!page && whence == SEEK_HOLE)) {
2576 pagevec_remove_exceptionals(&pvec);
2577 pagevec_release(&pvec);
2578 pvec.nr = PAGEVEC_SIZE;
2584 static loff_t shmem_file_llseek(struct file *file, loff_t offset, int whence)
2586 struct address_space *mapping = file->f_mapping;
2587 struct inode *inode = mapping->host;
2591 if (whence != SEEK_DATA && whence != SEEK_HOLE)
2592 return generic_file_llseek_size(file, offset, whence,
2593 MAX_LFS_FILESIZE, i_size_read(inode));
2595 /* We're holding i_mutex so we can access i_size directly */
2597 if (offset < 0 || offset >= inode->i_size)
2600 start = offset >> PAGE_SHIFT;
2601 end = (inode->i_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
2602 new_offset = shmem_seek_hole_data(mapping, start, end, whence);
2603 new_offset <<= PAGE_SHIFT;
2604 if (new_offset > offset) {
2605 if (new_offset < inode->i_size)
2606 offset = new_offset;
2607 else if (whence == SEEK_DATA)
2610 offset = inode->i_size;
2615 offset = vfs_setpos(file, offset, MAX_LFS_FILESIZE);
2616 inode_unlock(inode);
2620 static long shmem_fallocate(struct file *file, int mode, loff_t offset,
2623 struct inode *inode = file_inode(file);
2624 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
2625 struct shmem_inode_info *info = SHMEM_I(inode);
2626 struct shmem_falloc shmem_falloc;
2627 pgoff_t start, index, end;
2630 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
2635 if (mode & FALLOC_FL_PUNCH_HOLE) {
2636 struct address_space *mapping = file->f_mapping;
2637 loff_t unmap_start = round_up(offset, PAGE_SIZE);
2638 loff_t unmap_end = round_down(offset + len, PAGE_SIZE) - 1;
2639 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(shmem_falloc_waitq);
2641 /* protected by i_mutex */
2642 if (info->seals & F_SEAL_WRITE) {
2647 shmem_falloc.waitq = &shmem_falloc_waitq;
2648 shmem_falloc.start = unmap_start >> PAGE_SHIFT;
2649 shmem_falloc.next = (unmap_end + 1) >> PAGE_SHIFT;
2650 spin_lock(&inode->i_lock);
2651 inode->i_private = &shmem_falloc;
2652 spin_unlock(&inode->i_lock);
2654 if ((u64)unmap_end > (u64)unmap_start)
2655 unmap_mapping_range(mapping, unmap_start,
2656 1 + unmap_end - unmap_start, 0);
2657 shmem_truncate_range(inode, offset, offset + len - 1);
2658 /* No need to unmap again: hole-punching leaves COWed pages */
2660 spin_lock(&inode->i_lock);
2661 inode->i_private = NULL;
2662 wake_up_all(&shmem_falloc_waitq);
2663 WARN_ON_ONCE(!list_empty(&shmem_falloc_waitq.head));
2664 spin_unlock(&inode->i_lock);
2669 /* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */
2670 error = inode_newsize_ok(inode, offset + len);
2674 if ((info->seals & F_SEAL_GROW) && offset + len > inode->i_size) {
2679 start = offset >> PAGE_SHIFT;
2680 end = (offset + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
2681 /* Try to avoid a swapstorm if len is impossible to satisfy */
2682 if (sbinfo->max_blocks && end - start > sbinfo->max_blocks) {
2687 shmem_falloc.waitq = NULL;
2688 shmem_falloc.start = start;
2689 shmem_falloc.next = start;
2690 shmem_falloc.nr_falloced = 0;
2691 shmem_falloc.nr_unswapped = 0;
2692 spin_lock(&inode->i_lock);
2693 inode->i_private = &shmem_falloc;
2694 spin_unlock(&inode->i_lock);
2696 for (index = start; index < end; index++) {
2700 * Good, the fallocate(2) manpage permits EINTR: we may have
2701 * been interrupted because we are using up too much memory.
2703 if (signal_pending(current))
2705 else if (shmem_falloc.nr_unswapped > shmem_falloc.nr_falloced)
2708 error = shmem_getpage(inode, index, &page, SGP_FALLOC);
2710 /* Remove the !PageUptodate pages we added */
2711 if (index > start) {
2712 shmem_undo_range(inode,
2713 (loff_t)start << PAGE_SHIFT,
2714 ((loff_t)index << PAGE_SHIFT) - 1, true);
2720 * Inform shmem_writepage() how far we have reached.
2721 * No need for lock or barrier: we have the page lock.
2723 shmem_falloc.next++;
2724 if (!PageUptodate(page))
2725 shmem_falloc.nr_falloced++;
2728 * If !PageUptodate, leave it that way so that freeable pages
2729 * can be recognized if we need to rollback on error later.
2730 * But set_page_dirty so that memory pressure will swap rather
2731 * than free the pages we are allocating (and SGP_CACHE pages
2732 * might still be clean: we now need to mark those dirty too).
2734 set_page_dirty(page);
2740 if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size)
2741 i_size_write(inode, offset + len);
2742 inode->i_ctime = current_time(inode);
2744 spin_lock(&inode->i_lock);
2745 inode->i_private = NULL;
2746 spin_unlock(&inode->i_lock);
2748 inode_unlock(inode);
2752 static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf)
2754 struct shmem_sb_info *sbinfo = SHMEM_SB(dentry->d_sb);
2756 buf->f_type = TMPFS_MAGIC;
2757 buf->f_bsize = PAGE_SIZE;
2758 buf->f_namelen = NAME_MAX;
2759 if (sbinfo->max_blocks) {
2760 buf->f_blocks = sbinfo->max_blocks;
2762 buf->f_bfree = sbinfo->max_blocks -
2763 percpu_counter_sum(&sbinfo->used_blocks);
2765 if (sbinfo->max_inodes) {
2766 buf->f_files = sbinfo->max_inodes;
2767 buf->f_ffree = sbinfo->free_inodes;
2769 /* else leave those fields 0 like simple_statfs */
2774 * File creation. Allocate an inode, and we're done..
2777 shmem_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
2779 struct inode *inode;
2780 int error = -ENOSPC;
2782 inode = shmem_get_inode(dir->i_sb, dir, mode, dev, VM_NORESERVE);
2784 error = simple_acl_create(dir, inode);
2787 error = security_inode_init_security(inode, dir,
2789 shmem_initxattrs, NULL);
2790 if (error && error != -EOPNOTSUPP)
2794 dir->i_size += BOGO_DIRENT_SIZE;
2795 dir->i_ctime = dir->i_mtime = current_time(dir);
2796 d_instantiate(dentry, inode);
2797 dget(dentry); /* Extra count - pin the dentry in core */
2806 shmem_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode)
2808 struct inode *inode;
2809 int error = -ENOSPC;
2811 inode = shmem_get_inode(dir->i_sb, dir, mode, 0, VM_NORESERVE);
2813 error = security_inode_init_security(inode, dir,
2815 shmem_initxattrs, NULL);
2816 if (error && error != -EOPNOTSUPP)
2818 error = simple_acl_create(dir, inode);
2821 d_tmpfile(dentry, inode);
2829 static int shmem_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
2833 if ((error = shmem_mknod(dir, dentry, mode | S_IFDIR, 0)))
2839 static int shmem_create(struct inode *dir, struct dentry *dentry, umode_t mode,
2842 return shmem_mknod(dir, dentry, mode | S_IFREG, 0);
2848 static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry)
2850 struct inode *inode = d_inode(old_dentry);
2854 * No ordinary (disk based) filesystem counts links as inodes;
2855 * but each new link needs a new dentry, pinning lowmem, and
2856 * tmpfs dentries cannot be pruned until they are unlinked.
2858 ret = shmem_reserve_inode(inode->i_sb);
2862 dir->i_size += BOGO_DIRENT_SIZE;
2863 inode->i_ctime = dir->i_ctime = dir->i_mtime = current_time(inode);
2865 ihold(inode); /* New dentry reference */
2866 dget(dentry); /* Extra pinning count for the created dentry */
2867 d_instantiate(dentry, inode);
2872 static int shmem_unlink(struct inode *dir, struct dentry *dentry)
2874 struct inode *inode = d_inode(dentry);
2876 if (inode->i_nlink > 1 && !S_ISDIR(inode->i_mode))
2877 shmem_free_inode(inode->i_sb);
2879 dir->i_size -= BOGO_DIRENT_SIZE;
2880 inode->i_ctime = dir->i_ctime = dir->i_mtime = current_time(inode);
2882 dput(dentry); /* Undo the count from "create" - this does all the work */
2886 static int shmem_rmdir(struct inode *dir, struct dentry *dentry)
2888 if (!simple_empty(dentry))
2891 drop_nlink(d_inode(dentry));
2893 return shmem_unlink(dir, dentry);
2896 static int shmem_exchange(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry)
2898 bool old_is_dir = d_is_dir(old_dentry);
2899 bool new_is_dir = d_is_dir(new_dentry);
2901 if (old_dir != new_dir && old_is_dir != new_is_dir) {
2903 drop_nlink(old_dir);
2906 drop_nlink(new_dir);
2910 old_dir->i_ctime = old_dir->i_mtime =
2911 new_dir->i_ctime = new_dir->i_mtime =
2912 d_inode(old_dentry)->i_ctime =
2913 d_inode(new_dentry)->i_ctime = current_time(old_dir);
2918 static int shmem_whiteout(struct inode *old_dir, struct dentry *old_dentry)
2920 struct dentry *whiteout;
2923 whiteout = d_alloc(old_dentry->d_parent, &old_dentry->d_name);
2927 error = shmem_mknod(old_dir, whiteout,
2928 S_IFCHR | WHITEOUT_MODE, WHITEOUT_DEV);
2934 * Cheat and hash the whiteout while the old dentry is still in
2935 * place, instead of playing games with FS_RENAME_DOES_D_MOVE.
2937 * d_lookup() will consistently find one of them at this point,
2938 * not sure which one, but that isn't even important.
2945 * The VFS layer already does all the dentry stuff for rename,
2946 * we just have to decrement the usage count for the target if
2947 * it exists so that the VFS layer correctly free's it when it
2950 static int shmem_rename2(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry, unsigned int flags)
2952 struct inode *inode = d_inode(old_dentry);
2953 int they_are_dirs = S_ISDIR(inode->i_mode);
2955 if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE | RENAME_WHITEOUT))
2958 if (flags & RENAME_EXCHANGE)
2959 return shmem_exchange(old_dir, old_dentry, new_dir, new_dentry);
2961 if (!simple_empty(new_dentry))
2964 if (flags & RENAME_WHITEOUT) {
2967 error = shmem_whiteout(old_dir, old_dentry);
2972 if (d_really_is_positive(new_dentry)) {
2973 (void) shmem_unlink(new_dir, new_dentry);
2974 if (they_are_dirs) {
2975 drop_nlink(d_inode(new_dentry));
2976 drop_nlink(old_dir);
2978 } else if (they_are_dirs) {
2979 drop_nlink(old_dir);
2983 old_dir->i_size -= BOGO_DIRENT_SIZE;
2984 new_dir->i_size += BOGO_DIRENT_SIZE;
2985 old_dir->i_ctime = old_dir->i_mtime =
2986 new_dir->i_ctime = new_dir->i_mtime =
2987 inode->i_ctime = current_time(old_dir);
2991 static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *symname)
2995 struct inode *inode;
2998 len = strlen(symname) + 1;
2999 if (len > PAGE_SIZE)
3000 return -ENAMETOOLONG;
3002 inode = shmem_get_inode(dir->i_sb, dir, S_IFLNK | 0777, 0,
3007 error = security_inode_init_security(inode, dir, &dentry->d_name,
3008 shmem_initxattrs, NULL);
3010 if (error != -EOPNOTSUPP) {
3017 inode->i_size = len-1;
3018 if (len <= SHORT_SYMLINK_LEN) {
3019 inode->i_link = kmemdup(symname, len, GFP_KERNEL);
3020 if (!inode->i_link) {
3024 inode->i_op = &shmem_short_symlink_operations;
3026 inode_nohighmem(inode);
3027 error = shmem_getpage(inode, 0, &page, SGP_WRITE);
3032 inode->i_mapping->a_ops = &shmem_aops;
3033 inode->i_op = &shmem_symlink_inode_operations;
3034 memcpy(page_address(page), symname, len);
3035 SetPageUptodate(page);
3036 set_page_dirty(page);
3040 dir->i_size += BOGO_DIRENT_SIZE;
3041 dir->i_ctime = dir->i_mtime = current_time(dir);
3042 d_instantiate(dentry, inode);
3047 static void shmem_put_link(void *arg)
3049 mark_page_accessed(arg);
3053 static const char *shmem_get_link(struct dentry *dentry,
3054 struct inode *inode,
3055 struct delayed_call *done)
3057 struct page *page = NULL;
3060 page = find_get_page(inode->i_mapping, 0);
3062 return ERR_PTR(-ECHILD);
3063 if (!PageUptodate(page)) {
3065 return ERR_PTR(-ECHILD);
3068 error = shmem_getpage(inode, 0, &page, SGP_READ);
3070 return ERR_PTR(error);
3073 set_delayed_call(done, shmem_put_link, page);
3074 return page_address(page);
3077 #ifdef CONFIG_TMPFS_XATTR
3079 * Superblocks without xattr inode operations may get some security.* xattr
3080 * support from the LSM "for free". As soon as we have any other xattrs
3081 * like ACLs, we also need to implement the security.* handlers at
3082 * filesystem level, though.
3086 * Callback for security_inode_init_security() for acquiring xattrs.
3088 static int shmem_initxattrs(struct inode *inode,
3089 const struct xattr *xattr_array,
3092 struct shmem_inode_info *info = SHMEM_I(inode);
3093 const struct xattr *xattr;
3094 struct simple_xattr *new_xattr;
3097 for (xattr = xattr_array; xattr->name != NULL; xattr++) {
3098 new_xattr = simple_xattr_alloc(xattr->value, xattr->value_len);
3102 len = strlen(xattr->name) + 1;
3103 new_xattr->name = kmalloc(XATTR_SECURITY_PREFIX_LEN + len,
3105 if (!new_xattr->name) {
3110 memcpy(new_xattr->name, XATTR_SECURITY_PREFIX,
3111 XATTR_SECURITY_PREFIX_LEN);
3112 memcpy(new_xattr->name + XATTR_SECURITY_PREFIX_LEN,
3115 simple_xattr_list_add(&info->xattrs, new_xattr);
3121 static int shmem_xattr_handler_get(const struct xattr_handler *handler,
3122 struct dentry *unused, struct inode *inode,
3123 const char *name, void *buffer, size_t size)
3125 struct shmem_inode_info *info = SHMEM_I(inode);
3127 name = xattr_full_name(handler, name);
3128 return simple_xattr_get(&info->xattrs, name, buffer, size);
3131 static int shmem_xattr_handler_set(const struct xattr_handler *handler,
3132 struct dentry *unused, struct inode *inode,
3133 const char *name, const void *value,
3134 size_t size, int flags)
3136 struct shmem_inode_info *info = SHMEM_I(inode);
3138 name = xattr_full_name(handler, name);
3139 return simple_xattr_set(&info->xattrs, name, value, size, flags);
3142 static const struct xattr_handler shmem_security_xattr_handler = {
3143 .prefix = XATTR_SECURITY_PREFIX,
3144 .get = shmem_xattr_handler_get,
3145 .set = shmem_xattr_handler_set,
3148 static const struct xattr_handler shmem_trusted_xattr_handler = {
3149 .prefix = XATTR_TRUSTED_PREFIX,
3150 .get = shmem_xattr_handler_get,
3151 .set = shmem_xattr_handler_set,
3154 static const struct xattr_handler *shmem_xattr_handlers[] = {
3155 #ifdef CONFIG_TMPFS_POSIX_ACL
3156 &posix_acl_access_xattr_handler,
3157 &posix_acl_default_xattr_handler,
3159 &shmem_security_xattr_handler,
3160 &shmem_trusted_xattr_handler,
3164 static ssize_t shmem_listxattr(struct dentry *dentry, char *buffer, size_t size)
3166 struct shmem_inode_info *info = SHMEM_I(d_inode(dentry));
3167 return simple_xattr_list(d_inode(dentry), &info->xattrs, buffer, size);
3169 #endif /* CONFIG_TMPFS_XATTR */
3171 static const struct inode_operations shmem_short_symlink_operations = {
3172 .get_link = simple_get_link,
3173 #ifdef CONFIG_TMPFS_XATTR
3174 .listxattr = shmem_listxattr,
3178 static const struct inode_operations shmem_symlink_inode_operations = {
3179 .get_link = shmem_get_link,
3180 #ifdef CONFIG_TMPFS_XATTR
3181 .listxattr = shmem_listxattr,
3185 static struct dentry *shmem_get_parent(struct dentry *child)
3187 return ERR_PTR(-ESTALE);
3190 static int shmem_match(struct inode *ino, void *vfh)
3194 inum = (inum << 32) | fh[1];
3195 return ino->i_ino == inum && fh[0] == ino->i_generation;
3198 /* Find any alias of inode, but prefer a hashed alias */
3199 static struct dentry *shmem_find_alias(struct inode *inode)
3201 struct dentry *alias = d_find_alias(inode);
3203 return alias ?: d_find_any_alias(inode);
3207 static struct dentry *shmem_fh_to_dentry(struct super_block *sb,
3208 struct fid *fid, int fh_len, int fh_type)
3210 struct inode *inode;
3211 struct dentry *dentry = NULL;
3218 inum = (inum << 32) | fid->raw[1];
3220 inode = ilookup5(sb, (unsigned long)(inum + fid->raw[0]),
3221 shmem_match, fid->raw);
3223 dentry = shmem_find_alias(inode);
3230 static int shmem_encode_fh(struct inode *inode, __u32 *fh, int *len,
3231 struct inode *parent)
3235 return FILEID_INVALID;
3238 if (inode_unhashed(inode)) {
3239 /* Unfortunately insert_inode_hash is not idempotent,
3240 * so as we hash inodes here rather than at creation
3241 * time, we need a lock to ensure we only try
3244 static DEFINE_SPINLOCK(lock);
3246 if (inode_unhashed(inode))
3247 __insert_inode_hash(inode,
3248 inode->i_ino + inode->i_generation);
3252 fh[0] = inode->i_generation;
3253 fh[1] = inode->i_ino;
3254 fh[2] = ((__u64)inode->i_ino) >> 32;
3260 static const struct export_operations shmem_export_ops = {
3261 .get_parent = shmem_get_parent,
3262 .encode_fh = shmem_encode_fh,
3263 .fh_to_dentry = shmem_fh_to_dentry,
3266 static int shmem_parse_options(char *options, struct shmem_sb_info *sbinfo,
3269 char *this_char, *value, *rest;
3270 struct mempolicy *mpol = NULL;
3274 while (options != NULL) {
3275 this_char = options;
3278 * NUL-terminate this option: unfortunately,
3279 * mount options form a comma-separated list,
3280 * but mpol's nodelist may also contain commas.
3282 options = strchr(options, ',');
3283 if (options == NULL)
3286 if (!isdigit(*options)) {
3293 if ((value = strchr(this_char,'=')) != NULL) {
3296 pr_err("tmpfs: No value for mount option '%s'\n",
3301 if (!strcmp(this_char,"size")) {
3302 unsigned long long size;
3303 size = memparse(value,&rest);
3305 size <<= PAGE_SHIFT;
3306 size *= totalram_pages;
3312 sbinfo->max_blocks =
3313 DIV_ROUND_UP(size, PAGE_SIZE);
3314 } else if (!strcmp(this_char,"nr_blocks")) {
3315 sbinfo->max_blocks = memparse(value, &rest);
3318 } else if (!strcmp(this_char,"nr_inodes")) {
3319 sbinfo->max_inodes = memparse(value, &rest);
3322 } else if (!strcmp(this_char,"mode")) {
3325 sbinfo->mode = simple_strtoul(value, &rest, 8) & 07777;
3328 } else if (!strcmp(this_char,"uid")) {
3331 uid = simple_strtoul(value, &rest, 0);
3334 sbinfo->uid = make_kuid(current_user_ns(), uid);
3335 if (!uid_valid(sbinfo->uid))
3337 } else if (!strcmp(this_char,"gid")) {
3340 gid = simple_strtoul(value, &rest, 0);
3343 sbinfo->gid = make_kgid(current_user_ns(), gid);
3344 if (!gid_valid(sbinfo->gid))
3346 #ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE
3347 } else if (!strcmp(this_char, "huge")) {
3349 huge = shmem_parse_huge(value);
3352 if (!has_transparent_hugepage() &&
3353 huge != SHMEM_HUGE_NEVER)
3355 sbinfo->huge = huge;
3358 } else if (!strcmp(this_char,"mpol")) {
3361 if (mpol_parse_str(value, &mpol))
3365 pr_err("tmpfs: Bad mount option %s\n", this_char);
3369 sbinfo->mpol = mpol;
3373 pr_err("tmpfs: Bad value '%s' for mount option '%s'\n",
3381 static int shmem_remount_fs(struct super_block *sb, int *flags, char *data)
3383 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
3384 struct shmem_sb_info config = *sbinfo;
3385 unsigned long inodes;
3386 int error = -EINVAL;
3389 if (shmem_parse_options(data, &config, true))
3392 spin_lock(&sbinfo->stat_lock);
3393 inodes = sbinfo->max_inodes - sbinfo->free_inodes;
3394 if (percpu_counter_compare(&sbinfo->used_blocks, config.max_blocks) > 0)
3396 if (config.max_inodes < inodes)
3399 * Those tests disallow limited->unlimited while any are in use;
3400 * but we must separately disallow unlimited->limited, because
3401 * in that case we have no record of how much is already in use.
3403 if (config.max_blocks && !sbinfo->max_blocks)
3405 if (config.max_inodes && !sbinfo->max_inodes)
3409 sbinfo->huge = config.huge;
3410 sbinfo->max_blocks = config.max_blocks;
3411 sbinfo->max_inodes = config.max_inodes;
3412 sbinfo->free_inodes = config.max_inodes - inodes;
3415 * Preserve previous mempolicy unless mpol remount option was specified.
3418 mpol_put(sbinfo->mpol);
3419 sbinfo->mpol = config.mpol; /* transfers initial ref */
3422 spin_unlock(&sbinfo->stat_lock);
3426 static int shmem_show_options(struct seq_file *seq, struct dentry *root)
3428 struct shmem_sb_info *sbinfo = SHMEM_SB(root->d_sb);
3430 if (sbinfo->max_blocks != shmem_default_max_blocks())
3431 seq_printf(seq, ",size=%luk",
3432 sbinfo->max_blocks << (PAGE_SHIFT - 10));
3433 if (sbinfo->max_inodes != shmem_default_max_inodes())
3434 seq_printf(seq, ",nr_inodes=%lu", sbinfo->max_inodes);
3435 if (sbinfo->mode != (0777 | S_ISVTX))
3436 seq_printf(seq, ",mode=%03ho", sbinfo->mode);
3437 if (!uid_eq(sbinfo->uid, GLOBAL_ROOT_UID))
3438 seq_printf(seq, ",uid=%u",
3439 from_kuid_munged(&init_user_ns, sbinfo->uid));
3440 if (!gid_eq(sbinfo->gid, GLOBAL_ROOT_GID))
3441 seq_printf(seq, ",gid=%u",
3442 from_kgid_munged(&init_user_ns, sbinfo->gid));
3443 #ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE
3444 /* Rightly or wrongly, show huge mount option unmasked by shmem_huge */
3446 seq_printf(seq, ",huge=%s", shmem_format_huge(sbinfo->huge));
3448 shmem_show_mpol(seq, sbinfo->mpol);
3452 #endif /* CONFIG_TMPFS */
3454 static void shmem_put_super(struct super_block *sb)
3456 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
3458 percpu_counter_destroy(&sbinfo->used_blocks);
3459 mpol_put(sbinfo->mpol);
3461 sb->s_fs_info = NULL;
3464 int shmem_fill_super(struct super_block *sb, void *data, int silent)
3466 struct inode *inode;
3467 struct shmem_sb_info *sbinfo;
3470 /* Round up to L1_CACHE_BYTES to resist false sharing */
3471 sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
3472 L1_CACHE_BYTES), GFP_KERNEL);
3476 sbinfo->mode = 0777 | S_ISVTX;
3477 sbinfo->uid = current_fsuid();
3478 sbinfo->gid = current_fsgid();
3479 sb->s_fs_info = sbinfo;
3483 * Per default we only allow half of the physical ram per
3484 * tmpfs instance, limiting inodes to one per page of lowmem;
3485 * but the internal instance is left unlimited.
3487 if (!(sb->s_flags & SB_KERNMOUNT)) {
3488 sbinfo->max_blocks = shmem_default_max_blocks();
3489 sbinfo->max_inodes = shmem_default_max_inodes();
3490 if (shmem_parse_options(data, sbinfo, false)) {
3495 sb->s_flags |= SB_NOUSER;
3497 sb->s_export_op = &shmem_export_ops;
3498 sb->s_flags |= SB_NOSEC;
3500 sb->s_flags |= SB_NOUSER;
3503 spin_lock_init(&sbinfo->stat_lock);
3504 if (percpu_counter_init(&sbinfo->used_blocks, 0, GFP_KERNEL))
3506 sbinfo->free_inodes = sbinfo->max_inodes;
3507 spin_lock_init(&sbinfo->shrinklist_lock);
3508 INIT_LIST_HEAD(&sbinfo->shrinklist);
3510 sb->s_maxbytes = MAX_LFS_FILESIZE;
3511 sb->s_blocksize = PAGE_SIZE;
3512 sb->s_blocksize_bits = PAGE_SHIFT;
3513 sb->s_magic = TMPFS_MAGIC;
3514 sb->s_op = &shmem_ops;
3515 sb->s_time_gran = 1;
3516 #ifdef CONFIG_TMPFS_XATTR
3517 sb->s_xattr = shmem_xattr_handlers;
3519 #ifdef CONFIG_TMPFS_POSIX_ACL
3520 sb->s_flags |= SB_POSIXACL;
3522 uuid_gen(&sb->s_uuid);
3524 inode = shmem_get_inode(sb, NULL, S_IFDIR | sbinfo->mode, 0, VM_NORESERVE);
3527 inode->i_uid = sbinfo->uid;
3528 inode->i_gid = sbinfo->gid;
3529 sb->s_root = d_make_root(inode);
3535 shmem_put_super(sb);
3539 static struct kmem_cache *shmem_inode_cachep;
3541 static struct inode *shmem_alloc_inode(struct super_block *sb)
3543 struct shmem_inode_info *info;
3544 info = kmem_cache_alloc(shmem_inode_cachep, GFP_KERNEL);
3547 return &info->vfs_inode;
3550 static void shmem_destroy_callback(struct rcu_head *head)
3552 struct inode *inode = container_of(head, struct inode, i_rcu);
3553 if (S_ISLNK(inode->i_mode))
3554 kfree(inode->i_link);
3555 kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode));
3558 static void shmem_destroy_inode(struct inode *inode)
3560 if (S_ISREG(inode->i_mode))
3561 mpol_free_shared_policy(&SHMEM_I(inode)->policy);
3562 call_rcu(&inode->i_rcu, shmem_destroy_callback);
3565 static void shmem_init_inode(void *foo)
3567 struct shmem_inode_info *info = foo;
3568 inode_init_once(&info->vfs_inode);
3571 static void shmem_init_inodecache(void)
3573 shmem_inode_cachep = kmem_cache_create("shmem_inode_cache",
3574 sizeof(struct shmem_inode_info),
3575 0, SLAB_PANIC|SLAB_ACCOUNT, shmem_init_inode);
3578 static void shmem_destroy_inodecache(void)
3580 kmem_cache_destroy(shmem_inode_cachep);
3583 static const struct address_space_operations shmem_aops = {
3584 .writepage = shmem_writepage,
3585 .set_page_dirty = __set_page_dirty_no_writeback,
3587 .write_begin = shmem_write_begin,
3588 .write_end = shmem_write_end,
3590 #ifdef CONFIG_MIGRATION
3591 .migratepage = migrate_page,
3593 .error_remove_page = generic_error_remove_page,
3596 static const struct file_operations shmem_file_operations = {
3598 .get_unmapped_area = shmem_get_unmapped_area,
3600 .llseek = shmem_file_llseek,
3601 .read_iter = shmem_file_read_iter,
3602 .write_iter = generic_file_write_iter,
3603 .fsync = noop_fsync,
3604 .splice_read = generic_file_splice_read,
3605 .splice_write = iter_file_splice_write,
3606 .fallocate = shmem_fallocate,
3610 static const struct inode_operations shmem_inode_operations = {
3611 .getattr = shmem_getattr,
3612 .setattr = shmem_setattr,
3613 #ifdef CONFIG_TMPFS_XATTR
3614 .listxattr = shmem_listxattr,
3615 .set_acl = simple_set_acl,
3619 static const struct inode_operations shmem_dir_inode_operations = {
3621 .create = shmem_create,
3622 .lookup = simple_lookup,
3624 .unlink = shmem_unlink,
3625 .symlink = shmem_symlink,
3626 .mkdir = shmem_mkdir,
3627 .rmdir = shmem_rmdir,
3628 .mknod = shmem_mknod,
3629 .rename = shmem_rename2,
3630 .tmpfile = shmem_tmpfile,
3632 #ifdef CONFIG_TMPFS_XATTR
3633 .listxattr = shmem_listxattr,
3635 #ifdef CONFIG_TMPFS_POSIX_ACL
3636 .setattr = shmem_setattr,
3637 .set_acl = simple_set_acl,
3641 static const struct inode_operations shmem_special_inode_operations = {
3642 #ifdef CONFIG_TMPFS_XATTR
3643 .listxattr = shmem_listxattr,
3645 #ifdef CONFIG_TMPFS_POSIX_ACL
3646 .setattr = shmem_setattr,
3647 .set_acl = simple_set_acl,
3651 static const struct super_operations shmem_ops = {
3652 .alloc_inode = shmem_alloc_inode,
3653 .destroy_inode = shmem_destroy_inode,
3655 .statfs = shmem_statfs,
3656 .remount_fs = shmem_remount_fs,
3657 .show_options = shmem_show_options,
3659 .evict_inode = shmem_evict_inode,
3660 .drop_inode = generic_delete_inode,
3661 .put_super = shmem_put_super,
3662 #ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE
3663 .nr_cached_objects = shmem_unused_huge_count,
3664 .free_cached_objects = shmem_unused_huge_scan,
3668 static const struct vm_operations_struct shmem_vm_ops = {
3669 .fault = shmem_fault,
3670 .map_pages = filemap_map_pages,
3672 .set_policy = shmem_set_policy,
3673 .get_policy = shmem_get_policy,
3677 static struct dentry *shmem_mount(struct file_system_type *fs_type,
3678 int flags, const char *dev_name, void *data)
3680 return mount_nodev(fs_type, flags, data, shmem_fill_super);
3683 static struct file_system_type shmem_fs_type = {
3684 .owner = THIS_MODULE,
3686 .mount = shmem_mount,
3687 .kill_sb = kill_litter_super,
3688 .fs_flags = FS_USERNS_MOUNT,
3691 int __init shmem_init(void)
3695 /* If rootfs called this, don't re-init */
3696 if (shmem_inode_cachep)
3699 shmem_init_inodecache();
3701 error = register_filesystem(&shmem_fs_type);
3703 pr_err("Could not register tmpfs\n");
3707 shm_mnt = kern_mount(&shmem_fs_type);
3708 if (IS_ERR(shm_mnt)) {
3709 error = PTR_ERR(shm_mnt);
3710 pr_err("Could not kern_mount tmpfs\n");
3714 #ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE
3715 if (has_transparent_hugepage() && shmem_huge > SHMEM_HUGE_DENY)
3716 SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge;
3718 shmem_huge = 0; /* just in case it was patched */
3723 unregister_filesystem(&shmem_fs_type);
3725 shmem_destroy_inodecache();
3726 shm_mnt = ERR_PTR(error);
3730 #if defined(CONFIG_TRANSPARENT_HUGE_PAGECACHE) && defined(CONFIG_SYSFS)
3731 static ssize_t shmem_enabled_show(struct kobject *kobj,
3732 struct kobj_attribute *attr, char *buf)
3736 SHMEM_HUGE_WITHIN_SIZE,
3744 for (i = 0, count = 0; i < ARRAY_SIZE(values); i++) {
3745 const char *fmt = shmem_huge == values[i] ? "[%s] " : "%s ";
3747 count += sprintf(buf + count, fmt,
3748 shmem_format_huge(values[i]));
3750 buf[count - 1] = '\n';
3754 static ssize_t shmem_enabled_store(struct kobject *kobj,
3755 struct kobj_attribute *attr, const char *buf, size_t count)
3760 if (count + 1 > sizeof(tmp))
3762 memcpy(tmp, buf, count);
3764 if (count && tmp[count - 1] == '\n')
3765 tmp[count - 1] = '\0';
3767 huge = shmem_parse_huge(tmp);
3768 if (huge == -EINVAL)
3770 if (!has_transparent_hugepage() &&
3771 huge != SHMEM_HUGE_NEVER && huge != SHMEM_HUGE_DENY)
3775 if (shmem_huge > SHMEM_HUGE_DENY)
3776 SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge;
3780 struct kobj_attribute shmem_enabled_attr =
3781 __ATTR(shmem_enabled, 0644, shmem_enabled_show, shmem_enabled_store);
3782 #endif /* CONFIG_TRANSPARENT_HUGE_PAGECACHE && CONFIG_SYSFS */
3784 #ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE
3785 bool shmem_huge_enabled(struct vm_area_struct *vma)
3787 struct inode *inode = file_inode(vma->vm_file);
3788 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
3792 if (shmem_huge == SHMEM_HUGE_FORCE)
3794 if (shmem_huge == SHMEM_HUGE_DENY)
3796 switch (sbinfo->huge) {
3797 case SHMEM_HUGE_NEVER:
3799 case SHMEM_HUGE_ALWAYS:
3801 case SHMEM_HUGE_WITHIN_SIZE:
3802 off = round_up(vma->vm_pgoff, HPAGE_PMD_NR);
3803 i_size = round_up(i_size_read(inode), PAGE_SIZE);
3804 if (i_size >= HPAGE_PMD_SIZE &&
3805 i_size >> PAGE_SHIFT >= off)
3808 case SHMEM_HUGE_ADVISE:
3809 /* TODO: implement fadvise() hints */
3810 return (vma->vm_flags & VM_HUGEPAGE);
3816 #endif /* CONFIG_TRANSPARENT_HUGE_PAGECACHE */
3818 #else /* !CONFIG_SHMEM */
3821 * tiny-shmem: simple shmemfs and tmpfs using ramfs code
3823 * This is intended for small system where the benefits of the full
3824 * shmem code (swap-backed and resource-limited) are outweighed by
3825 * their complexity. On systems without swap this code should be
3826 * effectively equivalent, but much lighter weight.
3829 static struct file_system_type shmem_fs_type = {
3831 .mount = ramfs_mount,
3832 .kill_sb = kill_litter_super,
3833 .fs_flags = FS_USERNS_MOUNT,
3836 int __init shmem_init(void)
3838 BUG_ON(register_filesystem(&shmem_fs_type) != 0);
3840 shm_mnt = kern_mount(&shmem_fs_type);
3841 BUG_ON(IS_ERR(shm_mnt));
3846 int shmem_unuse(swp_entry_t swap, struct page *page)
3851 int shmem_lock(struct file *file, int lock, struct user_struct *user)
3856 void shmem_unlock_mapping(struct address_space *mapping)
3861 unsigned long shmem_get_unmapped_area(struct file *file,
3862 unsigned long addr, unsigned long len,
3863 unsigned long pgoff, unsigned long flags)
3865 return current->mm->get_unmapped_area(file, addr, len, pgoff, flags);
3869 void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
3871 truncate_inode_pages_range(inode->i_mapping, lstart, lend);
3873 EXPORT_SYMBOL_GPL(shmem_truncate_range);
3875 #define shmem_vm_ops generic_file_vm_ops
3876 #define shmem_file_operations ramfs_file_operations
3877 #define shmem_get_inode(sb, dir, mode, dev, flags) ramfs_get_inode(sb, dir, mode, dev)
3878 #define shmem_acct_size(flags, size) 0
3879 #define shmem_unacct_size(flags, size) do {} while (0)
3881 #endif /* CONFIG_SHMEM */
3885 static struct file *__shmem_file_setup(struct vfsmount *mnt, const char *name, loff_t size,
3886 unsigned long flags, unsigned int i_flags)
3888 struct inode *inode;
3892 return ERR_CAST(mnt);
3894 if (size < 0 || size > MAX_LFS_FILESIZE)
3895 return ERR_PTR(-EINVAL);
3897 if (shmem_acct_size(flags, size))
3898 return ERR_PTR(-ENOMEM);
3900 inode = shmem_get_inode(mnt->mnt_sb, NULL, S_IFREG | S_IRWXUGO, 0,
3902 if (unlikely(!inode)) {
3903 shmem_unacct_size(flags, size);
3904 return ERR_PTR(-ENOSPC);
3906 inode->i_flags |= i_flags;
3907 inode->i_size = size;
3908 clear_nlink(inode); /* It is unlinked */
3909 res = ERR_PTR(ramfs_nommu_expand_for_mapping(inode, size));
3911 res = alloc_file_pseudo(inode, mnt, name, O_RDWR,
3912 &shmem_file_operations);
3919 * shmem_kernel_file_setup - get an unlinked file living in tmpfs which must be
3920 * kernel internal. There will be NO LSM permission checks against the
3921 * underlying inode. So users of this interface must do LSM checks at a
3922 * higher layer. The users are the big_key and shm implementations. LSM
3923 * checks are provided at the key or shm level rather than the inode.
3924 * @name: name for dentry (to be seen in /proc/<pid>/maps
3925 * @size: size to be set for the file
3926 * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
3928 struct file *shmem_kernel_file_setup(const char *name, loff_t size, unsigned long flags)
3930 return __shmem_file_setup(shm_mnt, name, size, flags, S_PRIVATE);
3934 * shmem_file_setup - get an unlinked file living in tmpfs
3935 * @name: name for dentry (to be seen in /proc/<pid>/maps
3936 * @size: size to be set for the file
3937 * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
3939 struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags)
3941 return __shmem_file_setup(shm_mnt, name, size, flags, 0);
3943 EXPORT_SYMBOL_GPL(shmem_file_setup);
3946 * shmem_file_setup_with_mnt - get an unlinked file living in tmpfs
3947 * @mnt: the tmpfs mount where the file will be created
3948 * @name: name for dentry (to be seen in /proc/<pid>/maps
3949 * @size: size to be set for the file
3950 * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
3952 struct file *shmem_file_setup_with_mnt(struct vfsmount *mnt, const char *name,
3953 loff_t size, unsigned long flags)
3955 return __shmem_file_setup(mnt, name, size, flags, 0);
3957 EXPORT_SYMBOL_GPL(shmem_file_setup_with_mnt);
3960 * shmem_zero_setup - setup a shared anonymous mapping
3961 * @vma: the vma to be mmapped is prepared by do_mmap_pgoff
3963 int shmem_zero_setup(struct vm_area_struct *vma)
3966 loff_t size = vma->vm_end - vma->vm_start;
3969 * Cloning a new file under mmap_sem leads to a lock ordering conflict
3970 * between XFS directory reading and selinux: since this file is only
3971 * accessible to the user through its mapping, use S_PRIVATE flag to
3972 * bypass file security, in the same way as shmem_kernel_file_setup().
3974 file = shmem_kernel_file_setup("dev/zero", size, vma->vm_flags);
3976 return PTR_ERR(file);
3980 vma->vm_file = file;
3981 vma->vm_ops = &shmem_vm_ops;
3983 if (IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE) &&
3984 ((vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK) <
3985 (vma->vm_end & HPAGE_PMD_MASK)) {
3986 khugepaged_enter(vma, vma->vm_flags);
3993 * shmem_read_mapping_page_gfp - read into page cache, using specified page allocation flags.
3994 * @mapping: the page's address_space
3995 * @index: the page index
3996 * @gfp: the page allocator flags to use if allocating
3998 * This behaves as a tmpfs "read_cache_page_gfp(mapping, index, gfp)",
3999 * with any new page allocations done using the specified allocation flags.
4000 * But read_cache_page_gfp() uses the ->readpage() method: which does not
4001 * suit tmpfs, since it may have pages in swapcache, and needs to find those
4002 * for itself; although drivers/gpu/drm i915 and ttm rely upon this support.
4004 * i915_gem_object_get_pages_gtt() mixes __GFP_NORETRY | __GFP_NOWARN in
4005 * with the mapping_gfp_mask(), to avoid OOMing the machine unnecessarily.
4007 struct page *shmem_read_mapping_page_gfp(struct address_space *mapping,
4008 pgoff_t index, gfp_t gfp)
4011 struct inode *inode = mapping->host;
4015 BUG_ON(mapping->a_ops != &shmem_aops);
4016 error = shmem_getpage_gfp(inode, index, &page, SGP_CACHE,
4017 gfp, NULL, NULL, NULL);
4019 page = ERR_PTR(error);
4025 * The tiny !SHMEM case uses ramfs without swap
4027 return read_cache_page_gfp(mapping, index, gfp);
4030 EXPORT_SYMBOL_GPL(shmem_read_mapping_page_gfp);