tmpfs: pass gfp to shmem_getpage_gfp
[platform/adaptation/renesas_rcar/renesas_kernel.git] / mm / shmem.c
1 /*
2  * Resizable virtual memory filesystem for Linux.
3  *
4  * Copyright (C) 2000 Linus Torvalds.
5  *               2000 Transmeta Corp.
6  *               2000-2001 Christoph Rohland
7  *               2000-2001 SAP AG
8  *               2002 Red Hat Inc.
9  * Copyright (C) 2002-2005 Hugh Dickins.
10  * Copyright (C) 2002-2005 VERITAS Software Corporation.
11  * Copyright (C) 2004 Andi Kleen, SuSE Labs
12  *
13  * Extended attribute support for tmpfs:
14  * Copyright (c) 2004, Luke Kenneth Casson Leighton <lkcl@lkcl.net>
15  * Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com>
16  *
17  * tiny-shmem:
18  * Copyright (c) 2004, 2008 Matt Mackall <mpm@selenic.com>
19  *
20  * This file is released under the GPL.
21  */
22
23 #include <linux/fs.h>
24 #include <linux/init.h>
25 #include <linux/vfs.h>
26 #include <linux/mount.h>
27 #include <linux/pagemap.h>
28 #include <linux/file.h>
29 #include <linux/mm.h>
30 #include <linux/module.h>
31 #include <linux/percpu_counter.h>
32 #include <linux/swap.h>
33
34 static struct vfsmount *shm_mnt;
35
36 #ifdef CONFIG_SHMEM
37 /*
38  * This virtual memory filesystem is heavily based on the ramfs. It
39  * extends ramfs by the ability to use swap and honor resource limits
40  * which makes it a completely usable filesystem.
41  */
42
43 #include <linux/xattr.h>
44 #include <linux/exportfs.h>
45 #include <linux/posix_acl.h>
46 #include <linux/generic_acl.h>
47 #include <linux/mman.h>
48 #include <linux/string.h>
49 #include <linux/slab.h>
50 #include <linux/backing-dev.h>
51 #include <linux/shmem_fs.h>
52 #include <linux/writeback.h>
53 #include <linux/blkdev.h>
54 #include <linux/splice.h>
55 #include <linux/security.h>
56 #include <linux/swapops.h>
57 #include <linux/mempolicy.h>
58 #include <linux/namei.h>
59 #include <linux/ctype.h>
60 #include <linux/migrate.h>
61 #include <linux/highmem.h>
62 #include <linux/seq_file.h>
63 #include <linux/magic.h>
64
65 #include <asm/uaccess.h>
66 #include <asm/div64.h>
67 #include <asm/pgtable.h>
68
69 /*
70  * The maximum size of a shmem/tmpfs file is limited by the maximum size of
71  * its triple-indirect swap vector - see illustration at shmem_swp_entry().
72  *
73  * With 4kB page size, maximum file size is just over 2TB on a 32-bit kernel,
74  * but one eighth of that on a 64-bit kernel.  With 8kB page size, maximum
75  * file size is just over 4TB on a 64-bit kernel, but 16TB on a 32-bit kernel,
76  * MAX_LFS_FILESIZE being then more restrictive than swap vector layout.
77  *
78  * We use / and * instead of shifts in the definitions below, so that the swap
79  * vector can be tested with small even values (e.g. 20) for ENTRIES_PER_PAGE.
80  */
81 #define ENTRIES_PER_PAGE (PAGE_CACHE_SIZE/sizeof(unsigned long))
82 #define ENTRIES_PER_PAGEPAGE ((unsigned long long)ENTRIES_PER_PAGE*ENTRIES_PER_PAGE)
83
84 #define SHMSWP_MAX_INDEX (SHMEM_NR_DIRECT + (ENTRIES_PER_PAGEPAGE/2) * (ENTRIES_PER_PAGE+1))
85 #define SHMSWP_MAX_BYTES (SHMSWP_MAX_INDEX << PAGE_CACHE_SHIFT)
86
87 #define SHMEM_MAX_BYTES  min_t(unsigned long long, SHMSWP_MAX_BYTES, MAX_LFS_FILESIZE)
88 #define SHMEM_MAX_INDEX  ((unsigned long)((SHMEM_MAX_BYTES+1) >> PAGE_CACHE_SHIFT))
89
90 #define BLOCKS_PER_PAGE  (PAGE_CACHE_SIZE/512)
91 #define VM_ACCT(size)    (PAGE_CACHE_ALIGN(size) >> PAGE_SHIFT)
92
93 /* info->flags needs VM_flags to handle pagein/truncate races efficiently */
94 #define SHMEM_PAGEIN     VM_READ
95 #define SHMEM_TRUNCATE   VM_WRITE
96
97 /* Definition to limit shmem_truncate's steps between cond_rescheds */
98 #define LATENCY_LIMIT    64
99
100 /* Pretend that each entry is of this size in directory's i_size */
101 #define BOGO_DIRENT_SIZE 20
102
103 struct shmem_xattr {
104         struct list_head list;  /* anchored by shmem_inode_info->xattr_list */
105         char *name;             /* xattr name */
106         size_t size;
107         char value[0];
108 };
109
110 /* Flag allocation requirements to shmem_getpage and shmem_swp_alloc */
111 enum sgp_type {
112         SGP_READ,       /* don't exceed i_size, don't allocate page */
113         SGP_CACHE,      /* don't exceed i_size, may allocate page */
114         SGP_DIRTY,      /* like SGP_CACHE, but set new page dirty */
115         SGP_WRITE,      /* may exceed i_size, may allocate page */
116 };
117
118 #ifdef CONFIG_TMPFS
119 static unsigned long shmem_default_max_blocks(void)
120 {
121         return totalram_pages / 2;
122 }
123
124 static unsigned long shmem_default_max_inodes(void)
125 {
126         return min(totalram_pages - totalhigh_pages, totalram_pages / 2);
127 }
128 #endif
129
130 static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
131         struct page **pagep, enum sgp_type sgp, gfp_t gfp, int *fault_type);
132
133 static inline int shmem_getpage(struct inode *inode, pgoff_t index,
134         struct page **pagep, enum sgp_type sgp, int *fault_type)
135 {
136         return shmem_getpage_gfp(inode, index, pagep, sgp,
137                         mapping_gfp_mask(inode->i_mapping), fault_type);
138 }
139
140 static inline struct page *shmem_dir_alloc(gfp_t gfp_mask)
141 {
142         /*
143          * The above definition of ENTRIES_PER_PAGE, and the use of
144          * BLOCKS_PER_PAGE on indirect pages, assume PAGE_CACHE_SIZE:
145          * might be reconsidered if it ever diverges from PAGE_SIZE.
146          *
147          * Mobility flags are masked out as swap vectors cannot move
148          */
149         return alloc_pages((gfp_mask & ~GFP_MOVABLE_MASK) | __GFP_ZERO,
150                                 PAGE_CACHE_SHIFT-PAGE_SHIFT);
151 }
152
153 static inline void shmem_dir_free(struct page *page)
154 {
155         __free_pages(page, PAGE_CACHE_SHIFT-PAGE_SHIFT);
156 }
157
158 static struct page **shmem_dir_map(struct page *page)
159 {
160         return (struct page **)kmap_atomic(page, KM_USER0);
161 }
162
163 static inline void shmem_dir_unmap(struct page **dir)
164 {
165         kunmap_atomic(dir, KM_USER0);
166 }
167
168 static swp_entry_t *shmem_swp_map(struct page *page)
169 {
170         return (swp_entry_t *)kmap_atomic(page, KM_USER1);
171 }
172
173 static inline void shmem_swp_balance_unmap(void)
174 {
175         /*
176          * When passing a pointer to an i_direct entry, to code which
177          * also handles indirect entries and so will shmem_swp_unmap,
178          * we must arrange for the preempt count to remain in balance.
179          * What kmap_atomic of a lowmem page does depends on config
180          * and architecture, so pretend to kmap_atomic some lowmem page.
181          */
182         (void) kmap_atomic(ZERO_PAGE(0), KM_USER1);
183 }
184
185 static inline void shmem_swp_unmap(swp_entry_t *entry)
186 {
187         kunmap_atomic(entry, KM_USER1);
188 }
189
190 static inline struct shmem_sb_info *SHMEM_SB(struct super_block *sb)
191 {
192         return sb->s_fs_info;
193 }
194
195 /*
196  * shmem_file_setup pre-accounts the whole fixed size of a VM object,
197  * for shared memory and for shared anonymous (/dev/zero) mappings
198  * (unless MAP_NORESERVE and sysctl_overcommit_memory <= 1),
199  * consistent with the pre-accounting of private mappings ...
200  */
201 static inline int shmem_acct_size(unsigned long flags, loff_t size)
202 {
203         return (flags & VM_NORESERVE) ?
204                 0 : security_vm_enough_memory_kern(VM_ACCT(size));
205 }
206
207 static inline void shmem_unacct_size(unsigned long flags, loff_t size)
208 {
209         if (!(flags & VM_NORESERVE))
210                 vm_unacct_memory(VM_ACCT(size));
211 }
212
213 /*
214  * ... whereas tmpfs objects are accounted incrementally as
215  * pages are allocated, in order to allow huge sparse files.
216  * shmem_getpage reports shmem_acct_block failure as -ENOSPC not -ENOMEM,
217  * so that a failure on a sparse tmpfs mapping will give SIGBUS not OOM.
218  */
219 static inline int shmem_acct_block(unsigned long flags)
220 {
221         return (flags & VM_NORESERVE) ?
222                 security_vm_enough_memory_kern(VM_ACCT(PAGE_CACHE_SIZE)) : 0;
223 }
224
225 static inline void shmem_unacct_blocks(unsigned long flags, long pages)
226 {
227         if (flags & VM_NORESERVE)
228                 vm_unacct_memory(pages * VM_ACCT(PAGE_CACHE_SIZE));
229 }
230
231 static const struct super_operations shmem_ops;
232 static const struct address_space_operations shmem_aops;
233 static const struct file_operations shmem_file_operations;
234 static const struct inode_operations shmem_inode_operations;
235 static const struct inode_operations shmem_dir_inode_operations;
236 static const struct inode_operations shmem_special_inode_operations;
237 static const struct vm_operations_struct shmem_vm_ops;
238
239 static struct backing_dev_info shmem_backing_dev_info  __read_mostly = {
240         .ra_pages       = 0,    /* No readahead */
241         .capabilities   = BDI_CAP_NO_ACCT_AND_WRITEBACK | BDI_CAP_SWAP_BACKED,
242 };
243
244 static LIST_HEAD(shmem_swaplist);
245 static DEFINE_MUTEX(shmem_swaplist_mutex);
246
247 static void shmem_free_blocks(struct inode *inode, long pages)
248 {
249         struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
250         if (sbinfo->max_blocks) {
251                 percpu_counter_add(&sbinfo->used_blocks, -pages);
252                 inode->i_blocks -= pages*BLOCKS_PER_PAGE;
253         }
254 }
255
256 static int shmem_reserve_inode(struct super_block *sb)
257 {
258         struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
259         if (sbinfo->max_inodes) {
260                 spin_lock(&sbinfo->stat_lock);
261                 if (!sbinfo->free_inodes) {
262                         spin_unlock(&sbinfo->stat_lock);
263                         return -ENOSPC;
264                 }
265                 sbinfo->free_inodes--;
266                 spin_unlock(&sbinfo->stat_lock);
267         }
268         return 0;
269 }
270
271 static void shmem_free_inode(struct super_block *sb)
272 {
273         struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
274         if (sbinfo->max_inodes) {
275                 spin_lock(&sbinfo->stat_lock);
276                 sbinfo->free_inodes++;
277                 spin_unlock(&sbinfo->stat_lock);
278         }
279 }
280
281 /**
282  * shmem_recalc_inode - recalculate the size of an inode
283  * @inode: inode to recalc
284  *
285  * We have to calculate the free blocks since the mm can drop
286  * undirtied hole pages behind our back.
287  *
288  * But normally   info->alloced == inode->i_mapping->nrpages + info->swapped
289  * So mm freed is info->alloced - (inode->i_mapping->nrpages + info->swapped)
290  *
291  * It has to be called with the spinlock held.
292  */
293 static void shmem_recalc_inode(struct inode *inode)
294 {
295         struct shmem_inode_info *info = SHMEM_I(inode);
296         long freed;
297
298         freed = info->alloced - info->swapped - inode->i_mapping->nrpages;
299         if (freed > 0) {
300                 info->alloced -= freed;
301                 shmem_unacct_blocks(info->flags, freed);
302                 shmem_free_blocks(inode, freed);
303         }
304 }
305
306 /**
307  * shmem_swp_entry - find the swap vector position in the info structure
308  * @info:  info structure for the inode
309  * @index: index of the page to find
310  * @page:  optional page to add to the structure. Has to be preset to
311  *         all zeros
312  *
313  * If there is no space allocated yet it will return NULL when
314  * page is NULL, else it will use the page for the needed block,
315  * setting it to NULL on return to indicate that it has been used.
316  *
317  * The swap vector is organized the following way:
318  *
319  * There are SHMEM_NR_DIRECT entries directly stored in the
320  * shmem_inode_info structure. So small files do not need an addional
321  * allocation.
322  *
323  * For pages with index > SHMEM_NR_DIRECT there is the pointer
324  * i_indirect which points to a page which holds in the first half
325  * doubly indirect blocks, in the second half triple indirect blocks:
326  *
327  * For an artificial ENTRIES_PER_PAGE = 4 this would lead to the
328  * following layout (for SHMEM_NR_DIRECT == 16):
329  *
330  * i_indirect -> dir --> 16-19
331  *            |      +-> 20-23
332  *            |
333  *            +-->dir2 --> 24-27
334  *            |        +-> 28-31
335  *            |        +-> 32-35
336  *            |        +-> 36-39
337  *            |
338  *            +-->dir3 --> 40-43
339  *                     +-> 44-47
340  *                     +-> 48-51
341  *                     +-> 52-55
342  */
343 static swp_entry_t *shmem_swp_entry(struct shmem_inode_info *info, unsigned long index, struct page **page)
344 {
345         unsigned long offset;
346         struct page **dir;
347         struct page *subdir;
348
349         if (index < SHMEM_NR_DIRECT) {
350                 shmem_swp_balance_unmap();
351                 return info->i_direct+index;
352         }
353         if (!info->i_indirect) {
354                 if (page) {
355                         info->i_indirect = *page;
356                         *page = NULL;
357                 }
358                 return NULL;                    /* need another page */
359         }
360
361         index -= SHMEM_NR_DIRECT;
362         offset = index % ENTRIES_PER_PAGE;
363         index /= ENTRIES_PER_PAGE;
364         dir = shmem_dir_map(info->i_indirect);
365
366         if (index >= ENTRIES_PER_PAGE/2) {
367                 index -= ENTRIES_PER_PAGE/2;
368                 dir += ENTRIES_PER_PAGE/2 + index/ENTRIES_PER_PAGE;
369                 index %= ENTRIES_PER_PAGE;
370                 subdir = *dir;
371                 if (!subdir) {
372                         if (page) {
373                                 *dir = *page;
374                                 *page = NULL;
375                         }
376                         shmem_dir_unmap(dir);
377                         return NULL;            /* need another page */
378                 }
379                 shmem_dir_unmap(dir);
380                 dir = shmem_dir_map(subdir);
381         }
382
383         dir += index;
384         subdir = *dir;
385         if (!subdir) {
386                 if (!page || !(subdir = *page)) {
387                         shmem_dir_unmap(dir);
388                         return NULL;            /* need a page */
389                 }
390                 *dir = subdir;
391                 *page = NULL;
392         }
393         shmem_dir_unmap(dir);
394         return shmem_swp_map(subdir) + offset;
395 }
396
397 static void shmem_swp_set(struct shmem_inode_info *info, swp_entry_t *entry, unsigned long value)
398 {
399         long incdec = value? 1: -1;
400
401         entry->val = value;
402         info->swapped += incdec;
403         if ((unsigned long)(entry - info->i_direct) >= SHMEM_NR_DIRECT) {
404                 struct page *page = kmap_atomic_to_page(entry);
405                 set_page_private(page, page_private(page) + incdec);
406         }
407 }
408
409 /**
410  * shmem_swp_alloc - get the position of the swap entry for the page.
411  * @info:       info structure for the inode
412  * @index:      index of the page to find
413  * @sgp:        check and recheck i_size? skip allocation?
414  * @gfp:        gfp mask to use for any page allocation
415  *
416  * If the entry does not exist, allocate it.
417  */
418 static swp_entry_t *shmem_swp_alloc(struct shmem_inode_info *info,
419                         unsigned long index, enum sgp_type sgp, gfp_t gfp)
420 {
421         struct inode *inode = &info->vfs_inode;
422         struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
423         struct page *page = NULL;
424         swp_entry_t *entry;
425
426         if (sgp != SGP_WRITE &&
427             ((loff_t) index << PAGE_CACHE_SHIFT) >= i_size_read(inode))
428                 return ERR_PTR(-EINVAL);
429
430         while (!(entry = shmem_swp_entry(info, index, &page))) {
431                 if (sgp == SGP_READ)
432                         return shmem_swp_map(ZERO_PAGE(0));
433                 /*
434                  * Test used_blocks against 1 less max_blocks, since we have 1 data
435                  * page (and perhaps indirect index pages) yet to allocate:
436                  * a waste to allocate index if we cannot allocate data.
437                  */
438                 if (sbinfo->max_blocks) {
439                         if (percpu_counter_compare(&sbinfo->used_blocks,
440                                                 sbinfo->max_blocks - 1) >= 0)
441                                 return ERR_PTR(-ENOSPC);
442                         percpu_counter_inc(&sbinfo->used_blocks);
443                         inode->i_blocks += BLOCKS_PER_PAGE;
444                 }
445
446                 spin_unlock(&info->lock);
447                 page = shmem_dir_alloc(gfp);
448                 spin_lock(&info->lock);
449
450                 if (!page) {
451                         shmem_free_blocks(inode, 1);
452                         return ERR_PTR(-ENOMEM);
453                 }
454                 if (sgp != SGP_WRITE &&
455                     ((loff_t) index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) {
456                         entry = ERR_PTR(-EINVAL);
457                         break;
458                 }
459                 if (info->next_index <= index)
460                         info->next_index = index + 1;
461         }
462         if (page) {
463                 /* another task gave its page, or truncated the file */
464                 shmem_free_blocks(inode, 1);
465                 shmem_dir_free(page);
466         }
467         if (info->next_index <= index && !IS_ERR(entry))
468                 info->next_index = index + 1;
469         return entry;
470 }
471
472 /**
473  * shmem_free_swp - free some swap entries in a directory
474  * @dir:        pointer to the directory
475  * @edir:       pointer after last entry of the directory
476  * @punch_lock: pointer to spinlock when needed for the holepunch case
477  */
478 static int shmem_free_swp(swp_entry_t *dir, swp_entry_t *edir,
479                                                 spinlock_t *punch_lock)
480 {
481         spinlock_t *punch_unlock = NULL;
482         swp_entry_t *ptr;
483         int freed = 0;
484
485         for (ptr = dir; ptr < edir; ptr++) {
486                 if (ptr->val) {
487                         if (unlikely(punch_lock)) {
488                                 punch_unlock = punch_lock;
489                                 punch_lock = NULL;
490                                 spin_lock(punch_unlock);
491                                 if (!ptr->val)
492                                         continue;
493                         }
494                         free_swap_and_cache(*ptr);
495                         *ptr = (swp_entry_t){0};
496                         freed++;
497                 }
498         }
499         if (punch_unlock)
500                 spin_unlock(punch_unlock);
501         return freed;
502 }
503
504 static int shmem_map_and_free_swp(struct page *subdir, int offset,
505                 int limit, struct page ***dir, spinlock_t *punch_lock)
506 {
507         swp_entry_t *ptr;
508         int freed = 0;
509
510         ptr = shmem_swp_map(subdir);
511         for (; offset < limit; offset += LATENCY_LIMIT) {
512                 int size = limit - offset;
513                 if (size > LATENCY_LIMIT)
514                         size = LATENCY_LIMIT;
515                 freed += shmem_free_swp(ptr+offset, ptr+offset+size,
516                                                         punch_lock);
517                 if (need_resched()) {
518                         shmem_swp_unmap(ptr);
519                         if (*dir) {
520                                 shmem_dir_unmap(*dir);
521                                 *dir = NULL;
522                         }
523                         cond_resched();
524                         ptr = shmem_swp_map(subdir);
525                 }
526         }
527         shmem_swp_unmap(ptr);
528         return freed;
529 }
530
531 static void shmem_free_pages(struct list_head *next)
532 {
533         struct page *page;
534         int freed = 0;
535
536         do {
537                 page = container_of(next, struct page, lru);
538                 next = next->next;
539                 shmem_dir_free(page);
540                 freed++;
541                 if (freed >= LATENCY_LIMIT) {
542                         cond_resched();
543                         freed = 0;
544                 }
545         } while (next);
546 }
547
548 void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end)
549 {
550         struct shmem_inode_info *info = SHMEM_I(inode);
551         unsigned long idx;
552         unsigned long size;
553         unsigned long limit;
554         unsigned long stage;
555         unsigned long diroff;
556         struct page **dir;
557         struct page *topdir;
558         struct page *middir;
559         struct page *subdir;
560         swp_entry_t *ptr;
561         LIST_HEAD(pages_to_free);
562         long nr_pages_to_free = 0;
563         long nr_swaps_freed = 0;
564         int offset;
565         int freed;
566         int punch_hole;
567         spinlock_t *needs_lock;
568         spinlock_t *punch_lock;
569         unsigned long upper_limit;
570
571         truncate_inode_pages_range(inode->i_mapping, start, end);
572
573         inode->i_ctime = inode->i_mtime = CURRENT_TIME;
574         idx = (start + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
575         if (idx >= info->next_index)
576                 return;
577
578         spin_lock(&info->lock);
579         info->flags |= SHMEM_TRUNCATE;
580         if (likely(end == (loff_t) -1)) {
581                 limit = info->next_index;
582                 upper_limit = SHMEM_MAX_INDEX;
583                 info->next_index = idx;
584                 needs_lock = NULL;
585                 punch_hole = 0;
586         } else {
587                 if (end + 1 >= inode->i_size) { /* we may free a little more */
588                         limit = (inode->i_size + PAGE_CACHE_SIZE - 1) >>
589                                                         PAGE_CACHE_SHIFT;
590                         upper_limit = SHMEM_MAX_INDEX;
591                 } else {
592                         limit = (end + 1) >> PAGE_CACHE_SHIFT;
593                         upper_limit = limit;
594                 }
595                 needs_lock = &info->lock;
596                 punch_hole = 1;
597         }
598
599         topdir = info->i_indirect;
600         if (topdir && idx <= SHMEM_NR_DIRECT && !punch_hole) {
601                 info->i_indirect = NULL;
602                 nr_pages_to_free++;
603                 list_add(&topdir->lru, &pages_to_free);
604         }
605         spin_unlock(&info->lock);
606
607         if (info->swapped && idx < SHMEM_NR_DIRECT) {
608                 ptr = info->i_direct;
609                 size = limit;
610                 if (size > SHMEM_NR_DIRECT)
611                         size = SHMEM_NR_DIRECT;
612                 nr_swaps_freed = shmem_free_swp(ptr+idx, ptr+size, needs_lock);
613         }
614
615         /*
616          * If there are no indirect blocks or we are punching a hole
617          * below indirect blocks, nothing to be done.
618          */
619         if (!topdir || limit <= SHMEM_NR_DIRECT)
620                 goto done2;
621
622         /*
623          * The truncation case has already dropped info->lock, and we're safe
624          * because i_size and next_index have already been lowered, preventing
625          * access beyond.  But in the punch_hole case, we still need to take
626          * the lock when updating the swap directory, because there might be
627          * racing accesses by shmem_getpage(SGP_CACHE), shmem_unuse_inode or
628          * shmem_writepage.  However, whenever we find we can remove a whole
629          * directory page (not at the misaligned start or end of the range),
630          * we first NULLify its pointer in the level above, and then have no
631          * need to take the lock when updating its contents: needs_lock and
632          * punch_lock (either pointing to info->lock or NULL) manage this.
633          */
634
635         upper_limit -= SHMEM_NR_DIRECT;
636         limit -= SHMEM_NR_DIRECT;
637         idx = (idx > SHMEM_NR_DIRECT)? (idx - SHMEM_NR_DIRECT): 0;
638         offset = idx % ENTRIES_PER_PAGE;
639         idx -= offset;
640
641         dir = shmem_dir_map(topdir);
642         stage = ENTRIES_PER_PAGEPAGE/2;
643         if (idx < ENTRIES_PER_PAGEPAGE/2) {
644                 middir = topdir;
645                 diroff = idx/ENTRIES_PER_PAGE;
646         } else {
647                 dir += ENTRIES_PER_PAGE/2;
648                 dir += (idx - ENTRIES_PER_PAGEPAGE/2)/ENTRIES_PER_PAGEPAGE;
649                 while (stage <= idx)
650                         stage += ENTRIES_PER_PAGEPAGE;
651                 middir = *dir;
652                 if (*dir) {
653                         diroff = ((idx - ENTRIES_PER_PAGEPAGE/2) %
654                                 ENTRIES_PER_PAGEPAGE) / ENTRIES_PER_PAGE;
655                         if (!diroff && !offset && upper_limit >= stage) {
656                                 if (needs_lock) {
657                                         spin_lock(needs_lock);
658                                         *dir = NULL;
659                                         spin_unlock(needs_lock);
660                                         needs_lock = NULL;
661                                 } else
662                                         *dir = NULL;
663                                 nr_pages_to_free++;
664                                 list_add(&middir->lru, &pages_to_free);
665                         }
666                         shmem_dir_unmap(dir);
667                         dir = shmem_dir_map(middir);
668                 } else {
669                         diroff = 0;
670                         offset = 0;
671                         idx = stage;
672                 }
673         }
674
675         for (; idx < limit; idx += ENTRIES_PER_PAGE, diroff++) {
676                 if (unlikely(idx == stage)) {
677                         shmem_dir_unmap(dir);
678                         dir = shmem_dir_map(topdir) +
679                             ENTRIES_PER_PAGE/2 + idx/ENTRIES_PER_PAGEPAGE;
680                         while (!*dir) {
681                                 dir++;
682                                 idx += ENTRIES_PER_PAGEPAGE;
683                                 if (idx >= limit)
684                                         goto done1;
685                         }
686                         stage = idx + ENTRIES_PER_PAGEPAGE;
687                         middir = *dir;
688                         if (punch_hole)
689                                 needs_lock = &info->lock;
690                         if (upper_limit >= stage) {
691                                 if (needs_lock) {
692                                         spin_lock(needs_lock);
693                                         *dir = NULL;
694                                         spin_unlock(needs_lock);
695                                         needs_lock = NULL;
696                                 } else
697                                         *dir = NULL;
698                                 nr_pages_to_free++;
699                                 list_add(&middir->lru, &pages_to_free);
700                         }
701                         shmem_dir_unmap(dir);
702                         cond_resched();
703                         dir = shmem_dir_map(middir);
704                         diroff = 0;
705                 }
706                 punch_lock = needs_lock;
707                 subdir = dir[diroff];
708                 if (subdir && !offset && upper_limit-idx >= ENTRIES_PER_PAGE) {
709                         if (needs_lock) {
710                                 spin_lock(needs_lock);
711                                 dir[diroff] = NULL;
712                                 spin_unlock(needs_lock);
713                                 punch_lock = NULL;
714                         } else
715                                 dir[diroff] = NULL;
716                         nr_pages_to_free++;
717                         list_add(&subdir->lru, &pages_to_free);
718                 }
719                 if (subdir && page_private(subdir) /* has swap entries */) {
720                         size = limit - idx;
721                         if (size > ENTRIES_PER_PAGE)
722                                 size = ENTRIES_PER_PAGE;
723                         freed = shmem_map_and_free_swp(subdir,
724                                         offset, size, &dir, punch_lock);
725                         if (!dir)
726                                 dir = shmem_dir_map(middir);
727                         nr_swaps_freed += freed;
728                         if (offset || punch_lock) {
729                                 spin_lock(&info->lock);
730                                 set_page_private(subdir,
731                                         page_private(subdir) - freed);
732                                 spin_unlock(&info->lock);
733                         } else
734                                 BUG_ON(page_private(subdir) != freed);
735                 }
736                 offset = 0;
737         }
738 done1:
739         shmem_dir_unmap(dir);
740 done2:
741         if (inode->i_mapping->nrpages && (info->flags & SHMEM_PAGEIN)) {
742                 /*
743                  * Call truncate_inode_pages again: racing shmem_unuse_inode
744                  * may have swizzled a page in from swap since
745                  * truncate_pagecache or generic_delete_inode did it, before we
746                  * lowered next_index.  Also, though shmem_getpage checks
747                  * i_size before adding to cache, no recheck after: so fix the
748                  * narrow window there too.
749                  */
750                 truncate_inode_pages_range(inode->i_mapping, start, end);
751         }
752
753         spin_lock(&info->lock);
754         info->flags &= ~SHMEM_TRUNCATE;
755         info->swapped -= nr_swaps_freed;
756         if (nr_pages_to_free)
757                 shmem_free_blocks(inode, nr_pages_to_free);
758         shmem_recalc_inode(inode);
759         spin_unlock(&info->lock);
760
761         /*
762          * Empty swap vector directory pages to be freed?
763          */
764         if (!list_empty(&pages_to_free)) {
765                 pages_to_free.prev->next = NULL;
766                 shmem_free_pages(pages_to_free.next);
767         }
768 }
769 EXPORT_SYMBOL_GPL(shmem_truncate_range);
770
771 static int shmem_setattr(struct dentry *dentry, struct iattr *attr)
772 {
773         struct inode *inode = dentry->d_inode;
774         int error;
775
776         error = inode_change_ok(inode, attr);
777         if (error)
778                 return error;
779
780         if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
781                 loff_t oldsize = inode->i_size;
782                 loff_t newsize = attr->ia_size;
783                 struct page *page = NULL;
784
785                 if (newsize < oldsize) {
786                         /*
787                          * If truncating down to a partial page, then
788                          * if that page is already allocated, hold it
789                          * in memory until the truncation is over, so
790                          * truncate_partial_page cannot miss it were
791                          * it assigned to swap.
792                          */
793                         if (newsize & (PAGE_CACHE_SIZE-1)) {
794                                 (void) shmem_getpage(inode,
795                                         newsize >> PAGE_CACHE_SHIFT,
796                                                 &page, SGP_READ, NULL);
797                                 if (page)
798                                         unlock_page(page);
799                         }
800                         /*
801                          * Reset SHMEM_PAGEIN flag so that shmem_truncate can
802                          * detect if any pages might have been added to cache
803                          * after truncate_inode_pages.  But we needn't bother
804                          * if it's being fully truncated to zero-length: the
805                          * nrpages check is efficient enough in that case.
806                          */
807                         if (newsize) {
808                                 struct shmem_inode_info *info = SHMEM_I(inode);
809                                 spin_lock(&info->lock);
810                                 info->flags &= ~SHMEM_PAGEIN;
811                                 spin_unlock(&info->lock);
812                         }
813                 }
814                 if (newsize != oldsize) {
815                         i_size_write(inode, newsize);
816                         inode->i_ctime = inode->i_mtime = CURRENT_TIME;
817                 }
818                 if (newsize < oldsize) {
819                         loff_t holebegin = round_up(newsize, PAGE_SIZE);
820                         unmap_mapping_range(inode->i_mapping, holebegin, 0, 1);
821                         shmem_truncate_range(inode, newsize, (loff_t)-1);
822                         /* unmap again to remove racily COWed private pages */
823                         unmap_mapping_range(inode->i_mapping, holebegin, 0, 1);
824                 }
825                 if (page)
826                         page_cache_release(page);
827         }
828
829         setattr_copy(inode, attr);
830 #ifdef CONFIG_TMPFS_POSIX_ACL
831         if (attr->ia_valid & ATTR_MODE)
832                 error = generic_acl_chmod(inode);
833 #endif
834         return error;
835 }
836
837 static void shmem_evict_inode(struct inode *inode)
838 {
839         struct shmem_inode_info *info = SHMEM_I(inode);
840         struct shmem_xattr *xattr, *nxattr;
841
842         if (inode->i_mapping->a_ops == &shmem_aops) {
843                 shmem_unacct_size(info->flags, inode->i_size);
844                 inode->i_size = 0;
845                 shmem_truncate_range(inode, 0, (loff_t)-1);
846                 if (!list_empty(&info->swaplist)) {
847                         mutex_lock(&shmem_swaplist_mutex);
848                         list_del_init(&info->swaplist);
849                         mutex_unlock(&shmem_swaplist_mutex);
850                 }
851         }
852
853         list_for_each_entry_safe(xattr, nxattr, &info->xattr_list, list) {
854                 kfree(xattr->name);
855                 kfree(xattr);
856         }
857         BUG_ON(inode->i_blocks);
858         shmem_free_inode(inode->i_sb);
859         end_writeback(inode);
860 }
861
862 static inline int shmem_find_swp(swp_entry_t entry, swp_entry_t *dir, swp_entry_t *edir)
863 {
864         swp_entry_t *ptr;
865
866         for (ptr = dir; ptr < edir; ptr++) {
867                 if (ptr->val == entry.val)
868                         return ptr - dir;
869         }
870         return -1;
871 }
872
873 static int shmem_unuse_inode(struct shmem_inode_info *info, swp_entry_t entry, struct page *page)
874 {
875         struct address_space *mapping;
876         unsigned long idx;
877         unsigned long size;
878         unsigned long limit;
879         unsigned long stage;
880         struct page **dir;
881         struct page *subdir;
882         swp_entry_t *ptr;
883         int offset;
884         int error;
885
886         idx = 0;
887         ptr = info->i_direct;
888         spin_lock(&info->lock);
889         if (!info->swapped) {
890                 list_del_init(&info->swaplist);
891                 goto lost2;
892         }
893         limit = info->next_index;
894         size = limit;
895         if (size > SHMEM_NR_DIRECT)
896                 size = SHMEM_NR_DIRECT;
897         offset = shmem_find_swp(entry, ptr, ptr+size);
898         if (offset >= 0) {
899                 shmem_swp_balance_unmap();
900                 goto found;
901         }
902         if (!info->i_indirect)
903                 goto lost2;
904
905         dir = shmem_dir_map(info->i_indirect);
906         stage = SHMEM_NR_DIRECT + ENTRIES_PER_PAGEPAGE/2;
907
908         for (idx = SHMEM_NR_DIRECT; idx < limit; idx += ENTRIES_PER_PAGE, dir++) {
909                 if (unlikely(idx == stage)) {
910                         shmem_dir_unmap(dir-1);
911                         if (cond_resched_lock(&info->lock)) {
912                                 /* check it has not been truncated */
913                                 if (limit > info->next_index) {
914                                         limit = info->next_index;
915                                         if (idx >= limit)
916                                                 goto lost2;
917                                 }
918                         }
919                         dir = shmem_dir_map(info->i_indirect) +
920                             ENTRIES_PER_PAGE/2 + idx/ENTRIES_PER_PAGEPAGE;
921                         while (!*dir) {
922                                 dir++;
923                                 idx += ENTRIES_PER_PAGEPAGE;
924                                 if (idx >= limit)
925                                         goto lost1;
926                         }
927                         stage = idx + ENTRIES_PER_PAGEPAGE;
928                         subdir = *dir;
929                         shmem_dir_unmap(dir);
930                         dir = shmem_dir_map(subdir);
931                 }
932                 subdir = *dir;
933                 if (subdir && page_private(subdir)) {
934                         ptr = shmem_swp_map(subdir);
935                         size = limit - idx;
936                         if (size > ENTRIES_PER_PAGE)
937                                 size = ENTRIES_PER_PAGE;
938                         offset = shmem_find_swp(entry, ptr, ptr+size);
939                         shmem_swp_unmap(ptr);
940                         if (offset >= 0) {
941                                 shmem_dir_unmap(dir);
942                                 ptr = shmem_swp_map(subdir);
943                                 goto found;
944                         }
945                 }
946         }
947 lost1:
948         shmem_dir_unmap(dir-1);
949 lost2:
950         spin_unlock(&info->lock);
951         return 0;
952 found:
953         idx += offset;
954         ptr += offset;
955
956         /*
957          * Move _head_ to start search for next from here.
958          * But be careful: shmem_evict_inode checks list_empty without taking
959          * mutex, and there's an instant in list_move_tail when info->swaplist
960          * would appear empty, if it were the only one on shmem_swaplist.  We
961          * could avoid doing it if inode NULL; or use this minor optimization.
962          */
963         if (shmem_swaplist.next != &info->swaplist)
964                 list_move_tail(&shmem_swaplist, &info->swaplist);
965
966         /*
967          * We rely on shmem_swaplist_mutex, not only to protect the swaplist,
968          * but also to hold up shmem_evict_inode(): so inode cannot be freed
969          * beneath us (pagelock doesn't help until the page is in pagecache).
970          */
971         mapping = info->vfs_inode.i_mapping;
972         error = add_to_page_cache_locked(page, mapping, idx, GFP_NOWAIT);
973         /* which does mem_cgroup_uncharge_cache_page on error */
974
975         if (error == -EEXIST) {
976                 struct page *filepage = find_get_page(mapping, idx);
977                 error = 1;
978                 if (filepage) {
979                         /*
980                          * There might be a more uptodate page coming down
981                          * from a stacked writepage: forget our swappage if so.
982                          */
983                         if (PageUptodate(filepage))
984                                 error = 0;
985                         page_cache_release(filepage);
986                 }
987         }
988         if (!error) {
989                 delete_from_swap_cache(page);
990                 set_page_dirty(page);
991                 info->flags |= SHMEM_PAGEIN;
992                 shmem_swp_set(info, ptr, 0);
993                 swap_free(entry);
994                 error = 1;      /* not an error, but entry was found */
995         }
996         shmem_swp_unmap(ptr);
997         spin_unlock(&info->lock);
998         return error;
999 }
1000
1001 /*
1002  * shmem_unuse() search for an eventually swapped out shmem page.
1003  */
1004 int shmem_unuse(swp_entry_t entry, struct page *page)
1005 {
1006         struct list_head *p, *next;
1007         struct shmem_inode_info *info;
1008         int found = 0;
1009         int error;
1010
1011         /*
1012          * Charge page using GFP_KERNEL while we can wait, before taking
1013          * the shmem_swaplist_mutex which might hold up shmem_writepage().
1014          * Charged back to the user (not to caller) when swap account is used.
1015          * add_to_page_cache() will be called with GFP_NOWAIT.
1016          */
1017         error = mem_cgroup_cache_charge(page, current->mm, GFP_KERNEL);
1018         if (error)
1019                 goto out;
1020         /*
1021          * Try to preload while we can wait, to not make a habit of
1022          * draining atomic reserves; but don't latch on to this cpu,
1023          * it's okay if sometimes we get rescheduled after this.
1024          */
1025         error = radix_tree_preload(GFP_KERNEL);
1026         if (error)
1027                 goto uncharge;
1028         radix_tree_preload_end();
1029
1030         mutex_lock(&shmem_swaplist_mutex);
1031         list_for_each_safe(p, next, &shmem_swaplist) {
1032                 info = list_entry(p, struct shmem_inode_info, swaplist);
1033                 found = shmem_unuse_inode(info, entry, page);
1034                 cond_resched();
1035                 if (found)
1036                         break;
1037         }
1038         mutex_unlock(&shmem_swaplist_mutex);
1039
1040 uncharge:
1041         if (!found)
1042                 mem_cgroup_uncharge_cache_page(page);
1043         if (found < 0)
1044                 error = found;
1045 out:
1046         unlock_page(page);
1047         page_cache_release(page);
1048         return error;
1049 }
1050
1051 /*
1052  * Move the page from the page cache to the swap cache.
1053  */
1054 static int shmem_writepage(struct page *page, struct writeback_control *wbc)
1055 {
1056         struct shmem_inode_info *info;
1057         swp_entry_t *entry, swap;
1058         struct address_space *mapping;
1059         unsigned long index;
1060         struct inode *inode;
1061
1062         BUG_ON(!PageLocked(page));
1063         mapping = page->mapping;
1064         index = page->index;
1065         inode = mapping->host;
1066         info = SHMEM_I(inode);
1067         if (info->flags & VM_LOCKED)
1068                 goto redirty;
1069         if (!total_swap_pages)
1070                 goto redirty;
1071
1072         /*
1073          * shmem_backing_dev_info's capabilities prevent regular writeback or
1074          * sync from ever calling shmem_writepage; but a stacking filesystem
1075          * may use the ->writepage of its underlying filesystem, in which case
1076          * tmpfs should write out to swap only in response to memory pressure,
1077          * and not for the writeback threads or sync.  However, in those cases,
1078          * we do still want to check if there's a redundant swappage to be
1079          * discarded.
1080          */
1081         if (wbc->for_reclaim)
1082                 swap = get_swap_page();
1083         else
1084                 swap.val = 0;
1085
1086         /*
1087          * Add inode to shmem_unuse()'s list of swapped-out inodes,
1088          * if it's not already there.  Do it now because we cannot take
1089          * mutex while holding spinlock, and must do so before the page
1090          * is moved to swap cache, when its pagelock no longer protects
1091          * the inode from eviction.  But don't unlock the mutex until
1092          * we've taken the spinlock, because shmem_unuse_inode() will
1093          * prune a !swapped inode from the swaplist under both locks.
1094          */
1095         if (swap.val) {
1096                 mutex_lock(&shmem_swaplist_mutex);
1097                 if (list_empty(&info->swaplist))
1098                         list_add_tail(&info->swaplist, &shmem_swaplist);
1099         }
1100
1101         spin_lock(&info->lock);
1102         if (swap.val)
1103                 mutex_unlock(&shmem_swaplist_mutex);
1104
1105         if (index >= info->next_index) {
1106                 BUG_ON(!(info->flags & SHMEM_TRUNCATE));
1107                 goto unlock;
1108         }
1109         entry = shmem_swp_entry(info, index, NULL);
1110         if (entry->val) {
1111                 /*
1112                  * The more uptodate page coming down from a stacked
1113                  * writepage should replace our old swappage.
1114                  */
1115                 free_swap_and_cache(*entry);
1116                 shmem_swp_set(info, entry, 0);
1117         }
1118         shmem_recalc_inode(inode);
1119
1120         if (swap.val && add_to_swap_cache(page, swap, GFP_ATOMIC) == 0) {
1121                 delete_from_page_cache(page);
1122                 shmem_swp_set(info, entry, swap.val);
1123                 shmem_swp_unmap(entry);
1124                 swap_shmem_alloc(swap);
1125                 spin_unlock(&info->lock);
1126                 BUG_ON(page_mapped(page));
1127                 swap_writepage(page, wbc);
1128                 return 0;
1129         }
1130
1131         shmem_swp_unmap(entry);
1132 unlock:
1133         spin_unlock(&info->lock);
1134         /*
1135          * add_to_swap_cache() doesn't return -EEXIST, so we can safely
1136          * clear SWAP_HAS_CACHE flag.
1137          */
1138         swapcache_free(swap, NULL);
1139 redirty:
1140         set_page_dirty(page);
1141         if (wbc->for_reclaim)
1142                 return AOP_WRITEPAGE_ACTIVATE;  /* Return with page locked */
1143         unlock_page(page);
1144         return 0;
1145 }
1146
1147 #ifdef CONFIG_NUMA
1148 #ifdef CONFIG_TMPFS
1149 static void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol)
1150 {
1151         char buffer[64];
1152
1153         if (!mpol || mpol->mode == MPOL_DEFAULT)
1154                 return;         /* show nothing */
1155
1156         mpol_to_str(buffer, sizeof(buffer), mpol, 1);
1157
1158         seq_printf(seq, ",mpol=%s", buffer);
1159 }
1160
1161 static struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
1162 {
1163         struct mempolicy *mpol = NULL;
1164         if (sbinfo->mpol) {
1165                 spin_lock(&sbinfo->stat_lock);  /* prevent replace/use races */
1166                 mpol = sbinfo->mpol;
1167                 mpol_get(mpol);
1168                 spin_unlock(&sbinfo->stat_lock);
1169         }
1170         return mpol;
1171 }
1172 #endif /* CONFIG_TMPFS */
1173
1174 static struct page *shmem_swapin(swp_entry_t entry, gfp_t gfp,
1175                         struct shmem_inode_info *info, unsigned long idx)
1176 {
1177         struct mempolicy mpol, *spol;
1178         struct vm_area_struct pvma;
1179         struct page *page;
1180
1181         spol = mpol_cond_copy(&mpol,
1182                                 mpol_shared_policy_lookup(&info->policy, idx));
1183
1184         /* Create a pseudo vma that just contains the policy */
1185         pvma.vm_start = 0;
1186         pvma.vm_pgoff = idx;
1187         pvma.vm_ops = NULL;
1188         pvma.vm_policy = spol;
1189         page = swapin_readahead(entry, gfp, &pvma, 0);
1190         return page;
1191 }
1192
1193 static struct page *shmem_alloc_page(gfp_t gfp,
1194                         struct shmem_inode_info *info, unsigned long idx)
1195 {
1196         struct vm_area_struct pvma;
1197
1198         /* Create a pseudo vma that just contains the policy */
1199         pvma.vm_start = 0;
1200         pvma.vm_pgoff = idx;
1201         pvma.vm_ops = NULL;
1202         pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, idx);
1203
1204         /*
1205          * alloc_page_vma() will drop the shared policy reference
1206          */
1207         return alloc_page_vma(gfp, &pvma, 0);
1208 }
1209 #else /* !CONFIG_NUMA */
1210 #ifdef CONFIG_TMPFS
1211 static inline void shmem_show_mpol(struct seq_file *seq, struct mempolicy *p)
1212 {
1213 }
1214 #endif /* CONFIG_TMPFS */
1215
1216 static inline struct page *shmem_swapin(swp_entry_t entry, gfp_t gfp,
1217                         struct shmem_inode_info *info, unsigned long idx)
1218 {
1219         return swapin_readahead(entry, gfp, NULL, 0);
1220 }
1221
1222 static inline struct page *shmem_alloc_page(gfp_t gfp,
1223                         struct shmem_inode_info *info, unsigned long idx)
1224 {
1225         return alloc_page(gfp);
1226 }
1227 #endif /* CONFIG_NUMA */
1228
1229 #if !defined(CONFIG_NUMA) || !defined(CONFIG_TMPFS)
1230 static inline struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
1231 {
1232         return NULL;
1233 }
1234 #endif
1235
1236 /*
1237  * shmem_getpage_gfp - find page in cache, or get from swap, or allocate
1238  *
1239  * If we allocate a new one we do not mark it dirty. That's up to the
1240  * vm. If we swap it in we mark it dirty since we also free the swap
1241  * entry since a page cannot live in both the swap and page cache
1242  */
1243 static int shmem_getpage_gfp(struct inode *inode, pgoff_t idx,
1244         struct page **pagep, enum sgp_type sgp, gfp_t gfp, int *fault_type)
1245 {
1246         struct address_space *mapping = inode->i_mapping;
1247         struct shmem_inode_info *info = SHMEM_I(inode);
1248         struct shmem_sb_info *sbinfo;
1249         struct page *filepage = *pagep;
1250         struct page *swappage;
1251         struct page *prealloc_page = NULL;
1252         swp_entry_t *entry;
1253         swp_entry_t swap;
1254         int error;
1255
1256         if (idx >= SHMEM_MAX_INDEX)
1257                 return -EFBIG;
1258
1259         /*
1260          * Normally, filepage is NULL on entry, and either found
1261          * uptodate immediately, or allocated and zeroed, or read
1262          * in under swappage, which is then assigned to filepage.
1263          * But shmem_readpage (required for splice) passes in a locked
1264          * filepage, which may be found not uptodate by other callers
1265          * too, and may need to be copied from the swappage read in.
1266          */
1267 repeat:
1268         if (!filepage)
1269                 filepage = find_lock_page(mapping, idx);
1270         if (filepage && PageUptodate(filepage))
1271                 goto done;
1272         if (!filepage) {
1273                 /*
1274                  * Try to preload while we can wait, to not make a habit of
1275                  * draining atomic reserves; but don't latch on to this cpu.
1276                  */
1277                 error = radix_tree_preload(gfp & GFP_RECLAIM_MASK);
1278                 if (error)
1279                         goto failed;
1280                 radix_tree_preload_end();
1281                 if (sgp != SGP_READ && !prealloc_page) {
1282                         /* We don't care if this fails */
1283                         prealloc_page = shmem_alloc_page(gfp, info, idx);
1284                         if (prealloc_page) {
1285                                 if (mem_cgroup_cache_charge(prealloc_page,
1286                                                 current->mm, GFP_KERNEL)) {
1287                                         page_cache_release(prealloc_page);
1288                                         prealloc_page = NULL;
1289                                 }
1290                         }
1291                 }
1292         }
1293         error = 0;
1294
1295         spin_lock(&info->lock);
1296         shmem_recalc_inode(inode);
1297         entry = shmem_swp_alloc(info, idx, sgp, gfp);
1298         if (IS_ERR(entry)) {
1299                 spin_unlock(&info->lock);
1300                 error = PTR_ERR(entry);
1301                 goto failed;
1302         }
1303         swap = *entry;
1304
1305         if (swap.val) {
1306                 /* Look it up and read it in.. */
1307                 swappage = lookup_swap_cache(swap);
1308                 if (!swappage) {
1309                         shmem_swp_unmap(entry);
1310                         spin_unlock(&info->lock);
1311                         /* here we actually do the io */
1312                         if (fault_type)
1313                                 *fault_type |= VM_FAULT_MAJOR;
1314                         swappage = shmem_swapin(swap, gfp, info, idx);
1315                         if (!swappage) {
1316                                 spin_lock(&info->lock);
1317                                 entry = shmem_swp_alloc(info, idx, sgp, gfp);
1318                                 if (IS_ERR(entry))
1319                                         error = PTR_ERR(entry);
1320                                 else {
1321                                         if (entry->val == swap.val)
1322                                                 error = -ENOMEM;
1323                                         shmem_swp_unmap(entry);
1324                                 }
1325                                 spin_unlock(&info->lock);
1326                                 if (error)
1327                                         goto failed;
1328                                 goto repeat;
1329                         }
1330                         wait_on_page_locked(swappage);
1331                         page_cache_release(swappage);
1332                         goto repeat;
1333                 }
1334
1335                 /* We have to do this with page locked to prevent races */
1336                 if (!trylock_page(swappage)) {
1337                         shmem_swp_unmap(entry);
1338                         spin_unlock(&info->lock);
1339                         wait_on_page_locked(swappage);
1340                         page_cache_release(swappage);
1341                         goto repeat;
1342                 }
1343                 if (PageWriteback(swappage)) {
1344                         shmem_swp_unmap(entry);
1345                         spin_unlock(&info->lock);
1346                         wait_on_page_writeback(swappage);
1347                         unlock_page(swappage);
1348                         page_cache_release(swappage);
1349                         goto repeat;
1350                 }
1351                 if (!PageUptodate(swappage)) {
1352                         shmem_swp_unmap(entry);
1353                         spin_unlock(&info->lock);
1354                         unlock_page(swappage);
1355                         page_cache_release(swappage);
1356                         error = -EIO;
1357                         goto failed;
1358                 }
1359
1360                 if (filepage) {
1361                         shmem_swp_set(info, entry, 0);
1362                         shmem_swp_unmap(entry);
1363                         delete_from_swap_cache(swappage);
1364                         spin_unlock(&info->lock);
1365                         copy_highpage(filepage, swappage);
1366                         unlock_page(swappage);
1367                         page_cache_release(swappage);
1368                         flush_dcache_page(filepage);
1369                         SetPageUptodate(filepage);
1370                         set_page_dirty(filepage);
1371                         swap_free(swap);
1372                 } else if (!(error = add_to_page_cache_locked(swappage, mapping,
1373                                         idx, GFP_NOWAIT))) {
1374                         info->flags |= SHMEM_PAGEIN;
1375                         shmem_swp_set(info, entry, 0);
1376                         shmem_swp_unmap(entry);
1377                         delete_from_swap_cache(swappage);
1378                         spin_unlock(&info->lock);
1379                         filepage = swappage;
1380                         set_page_dirty(filepage);
1381                         swap_free(swap);
1382                 } else {
1383                         shmem_swp_unmap(entry);
1384                         spin_unlock(&info->lock);
1385                         if (error == -ENOMEM) {
1386                                 /*
1387                                  * reclaim from proper memory cgroup and
1388                                  * call memcg's OOM if needed.
1389                                  */
1390                                 error = mem_cgroup_shmem_charge_fallback(
1391                                                                 swappage,
1392                                                                 current->mm,
1393                                                                 gfp);
1394                                 if (error) {
1395                                         unlock_page(swappage);
1396                                         page_cache_release(swappage);
1397                                         goto failed;
1398                                 }
1399                         }
1400                         unlock_page(swappage);
1401                         page_cache_release(swappage);
1402                         goto repeat;
1403                 }
1404         } else if (sgp == SGP_READ && !filepage) {
1405                 shmem_swp_unmap(entry);
1406                 filepage = find_get_page(mapping, idx);
1407                 if (filepage &&
1408                     (!PageUptodate(filepage) || !trylock_page(filepage))) {
1409                         spin_unlock(&info->lock);
1410                         wait_on_page_locked(filepage);
1411                         page_cache_release(filepage);
1412                         filepage = NULL;
1413                         goto repeat;
1414                 }
1415                 spin_unlock(&info->lock);
1416         } else {
1417                 shmem_swp_unmap(entry);
1418                 sbinfo = SHMEM_SB(inode->i_sb);
1419                 if (sbinfo->max_blocks) {
1420                         if (percpu_counter_compare(&sbinfo->used_blocks,
1421                                                 sbinfo->max_blocks) >= 0 ||
1422                             shmem_acct_block(info->flags))
1423                                 goto nospace;
1424                         percpu_counter_inc(&sbinfo->used_blocks);
1425                         inode->i_blocks += BLOCKS_PER_PAGE;
1426                 } else if (shmem_acct_block(info->flags))
1427                         goto nospace;
1428
1429                 if (!filepage) {
1430                         int ret;
1431
1432                         if (!prealloc_page) {
1433                                 spin_unlock(&info->lock);
1434                                 filepage = shmem_alloc_page(gfp, info, idx);
1435                                 if (!filepage) {
1436                                         spin_lock(&info->lock);
1437                                         shmem_unacct_blocks(info->flags, 1);
1438                                         shmem_free_blocks(inode, 1);
1439                                         spin_unlock(&info->lock);
1440                                         error = -ENOMEM;
1441                                         goto failed;
1442                                 }
1443                                 SetPageSwapBacked(filepage);
1444
1445                                 /*
1446                                  * Precharge page while we can wait, compensate
1447                                  * after
1448                                  */
1449                                 error = mem_cgroup_cache_charge(filepage,
1450                                         current->mm, GFP_KERNEL);
1451                                 if (error) {
1452                                         page_cache_release(filepage);
1453                                         spin_lock(&info->lock);
1454                                         shmem_unacct_blocks(info->flags, 1);
1455                                         shmem_free_blocks(inode, 1);
1456                                         spin_unlock(&info->lock);
1457                                         filepage = NULL;
1458                                         goto failed;
1459                                 }
1460
1461                                 spin_lock(&info->lock);
1462                         } else {
1463                                 filepage = prealloc_page;
1464                                 prealloc_page = NULL;
1465                                 SetPageSwapBacked(filepage);
1466                         }
1467
1468                         entry = shmem_swp_alloc(info, idx, sgp, gfp);
1469                         if (IS_ERR(entry))
1470                                 error = PTR_ERR(entry);
1471                         else {
1472                                 swap = *entry;
1473                                 shmem_swp_unmap(entry);
1474                         }
1475                         ret = error || swap.val;
1476                         if (ret)
1477                                 mem_cgroup_uncharge_cache_page(filepage);
1478                         else
1479                                 ret = add_to_page_cache_lru(filepage, mapping,
1480                                                 idx, GFP_NOWAIT);
1481                         /*
1482                          * At add_to_page_cache_lru() failure, uncharge will
1483                          * be done automatically.
1484                          */
1485                         if (ret) {
1486                                 shmem_unacct_blocks(info->flags, 1);
1487                                 shmem_free_blocks(inode, 1);
1488                                 spin_unlock(&info->lock);
1489                                 page_cache_release(filepage);
1490                                 filepage = NULL;
1491                                 if (error)
1492                                         goto failed;
1493                                 goto repeat;
1494                         }
1495                         info->flags |= SHMEM_PAGEIN;
1496                 }
1497
1498                 info->alloced++;
1499                 spin_unlock(&info->lock);
1500                 clear_highpage(filepage);
1501                 flush_dcache_page(filepage);
1502                 SetPageUptodate(filepage);
1503                 if (sgp == SGP_DIRTY)
1504                         set_page_dirty(filepage);
1505         }
1506 done:
1507         *pagep = filepage;
1508         error = 0;
1509         goto out;
1510
1511 nospace:
1512         /*
1513          * Perhaps the page was brought in from swap between find_lock_page
1514          * and taking info->lock?  We allow for that at add_to_page_cache_lru,
1515          * but must also avoid reporting a spurious ENOSPC while working on a
1516          * full tmpfs.  (When filepage has been passed in to shmem_getpage, it
1517          * is already in page cache, which prevents this race from occurring.)
1518          */
1519         if (!filepage) {
1520                 struct page *page = find_get_page(mapping, idx);
1521                 if (page) {
1522                         spin_unlock(&info->lock);
1523                         page_cache_release(page);
1524                         goto repeat;
1525                 }
1526         }
1527         spin_unlock(&info->lock);
1528         error = -ENOSPC;
1529 failed:
1530         if (*pagep != filepage) {
1531                 unlock_page(filepage);
1532                 page_cache_release(filepage);
1533         }
1534 out:
1535         if (prealloc_page) {
1536                 mem_cgroup_uncharge_cache_page(prealloc_page);
1537                 page_cache_release(prealloc_page);
1538         }
1539         return error;
1540 }
1541
1542 static int shmem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1543 {
1544         struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
1545         int error;
1546         int ret = VM_FAULT_LOCKED;
1547
1548         if (((loff_t)vmf->pgoff << PAGE_CACHE_SHIFT) >= i_size_read(inode))
1549                 return VM_FAULT_SIGBUS;
1550
1551         error = shmem_getpage(inode, vmf->pgoff, &vmf->page, SGP_CACHE, &ret);
1552         if (error)
1553                 return ((error == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS);
1554
1555         if (ret & VM_FAULT_MAJOR) {
1556                 count_vm_event(PGMAJFAULT);
1557                 mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT);
1558         }
1559         return ret;
1560 }
1561
1562 #ifdef CONFIG_NUMA
1563 static int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *new)
1564 {
1565         struct inode *i = vma->vm_file->f_path.dentry->d_inode;
1566         return mpol_set_shared_policy(&SHMEM_I(i)->policy, vma, new);
1567 }
1568
1569 static struct mempolicy *shmem_get_policy(struct vm_area_struct *vma,
1570                                           unsigned long addr)
1571 {
1572         struct inode *i = vma->vm_file->f_path.dentry->d_inode;
1573         unsigned long idx;
1574
1575         idx = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
1576         return mpol_shared_policy_lookup(&SHMEM_I(i)->policy, idx);
1577 }
1578 #endif
1579
1580 int shmem_lock(struct file *file, int lock, struct user_struct *user)
1581 {
1582         struct inode *inode = file->f_path.dentry->d_inode;
1583         struct shmem_inode_info *info = SHMEM_I(inode);
1584         int retval = -ENOMEM;
1585
1586         spin_lock(&info->lock);
1587         if (lock && !(info->flags & VM_LOCKED)) {
1588                 if (!user_shm_lock(inode->i_size, user))
1589                         goto out_nomem;
1590                 info->flags |= VM_LOCKED;
1591                 mapping_set_unevictable(file->f_mapping);
1592         }
1593         if (!lock && (info->flags & VM_LOCKED) && user) {
1594                 user_shm_unlock(inode->i_size, user);
1595                 info->flags &= ~VM_LOCKED;
1596                 mapping_clear_unevictable(file->f_mapping);
1597                 scan_mapping_unevictable_pages(file->f_mapping);
1598         }
1599         retval = 0;
1600
1601 out_nomem:
1602         spin_unlock(&info->lock);
1603         return retval;
1604 }
1605
1606 static int shmem_mmap(struct file *file, struct vm_area_struct *vma)
1607 {
1608         file_accessed(file);
1609         vma->vm_ops = &shmem_vm_ops;
1610         vma->vm_flags |= VM_CAN_NONLINEAR;
1611         return 0;
1612 }
1613
1614 static struct inode *shmem_get_inode(struct super_block *sb, const struct inode *dir,
1615                                      int mode, dev_t dev, unsigned long flags)
1616 {
1617         struct inode *inode;
1618         struct shmem_inode_info *info;
1619         struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
1620
1621         if (shmem_reserve_inode(sb))
1622                 return NULL;
1623
1624         inode = new_inode(sb);
1625         if (inode) {
1626                 inode->i_ino = get_next_ino();
1627                 inode_init_owner(inode, dir, mode);
1628                 inode->i_blocks = 0;
1629                 inode->i_mapping->backing_dev_info = &shmem_backing_dev_info;
1630                 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
1631                 inode->i_generation = get_seconds();
1632                 info = SHMEM_I(inode);
1633                 memset(info, 0, (char *)inode - (char *)info);
1634                 spin_lock_init(&info->lock);
1635                 info->flags = flags & VM_NORESERVE;
1636                 INIT_LIST_HEAD(&info->swaplist);
1637                 INIT_LIST_HEAD(&info->xattr_list);
1638                 cache_no_acl(inode);
1639
1640                 switch (mode & S_IFMT) {
1641                 default:
1642                         inode->i_op = &shmem_special_inode_operations;
1643                         init_special_inode(inode, mode, dev);
1644                         break;
1645                 case S_IFREG:
1646                         inode->i_mapping->a_ops = &shmem_aops;
1647                         inode->i_op = &shmem_inode_operations;
1648                         inode->i_fop = &shmem_file_operations;
1649                         mpol_shared_policy_init(&info->policy,
1650                                                  shmem_get_sbmpol(sbinfo));
1651                         break;
1652                 case S_IFDIR:
1653                         inc_nlink(inode);
1654                         /* Some things misbehave if size == 0 on a directory */
1655                         inode->i_size = 2 * BOGO_DIRENT_SIZE;
1656                         inode->i_op = &shmem_dir_inode_operations;
1657                         inode->i_fop = &simple_dir_operations;
1658                         break;
1659                 case S_IFLNK:
1660                         /*
1661                          * Must not load anything in the rbtree,
1662                          * mpol_free_shared_policy will not be called.
1663                          */
1664                         mpol_shared_policy_init(&info->policy, NULL);
1665                         break;
1666                 }
1667         } else
1668                 shmem_free_inode(sb);
1669         return inode;
1670 }
1671
1672 #ifdef CONFIG_TMPFS
1673 static const struct inode_operations shmem_symlink_inode_operations;
1674 static const struct inode_operations shmem_symlink_inline_operations;
1675
1676 /*
1677  * Normally tmpfs avoids the use of shmem_readpage and shmem_write_begin;
1678  * but providing them allows a tmpfs file to be used for splice, sendfile, and
1679  * below the loop driver, in the generic fashion that many filesystems support.
1680  */
1681 static int shmem_readpage(struct file *file, struct page *page)
1682 {
1683         struct inode *inode = page->mapping->host;
1684         int error = shmem_getpage(inode, page->index, &page, SGP_CACHE, NULL);
1685         unlock_page(page);
1686         return error;
1687 }
1688
1689 static int
1690 shmem_write_begin(struct file *file, struct address_space *mapping,
1691                         loff_t pos, unsigned len, unsigned flags,
1692                         struct page **pagep, void **fsdata)
1693 {
1694         struct inode *inode = mapping->host;
1695         pgoff_t index = pos >> PAGE_CACHE_SHIFT;
1696         *pagep = NULL;
1697         return shmem_getpage(inode, index, pagep, SGP_WRITE, NULL);
1698 }
1699
1700 static int
1701 shmem_write_end(struct file *file, struct address_space *mapping,
1702                         loff_t pos, unsigned len, unsigned copied,
1703                         struct page *page, void *fsdata)
1704 {
1705         struct inode *inode = mapping->host;
1706
1707         if (pos + copied > inode->i_size)
1708                 i_size_write(inode, pos + copied);
1709
1710         set_page_dirty(page);
1711         unlock_page(page);
1712         page_cache_release(page);
1713
1714         return copied;
1715 }
1716
1717 static void do_shmem_file_read(struct file *filp, loff_t *ppos, read_descriptor_t *desc, read_actor_t actor)
1718 {
1719         struct inode *inode = filp->f_path.dentry->d_inode;
1720         struct address_space *mapping = inode->i_mapping;
1721         unsigned long index, offset;
1722         enum sgp_type sgp = SGP_READ;
1723
1724         /*
1725          * Might this read be for a stacking filesystem?  Then when reading
1726          * holes of a sparse file, we actually need to allocate those pages,
1727          * and even mark them dirty, so it cannot exceed the max_blocks limit.
1728          */
1729         if (segment_eq(get_fs(), KERNEL_DS))
1730                 sgp = SGP_DIRTY;
1731
1732         index = *ppos >> PAGE_CACHE_SHIFT;
1733         offset = *ppos & ~PAGE_CACHE_MASK;
1734
1735         for (;;) {
1736                 struct page *page = NULL;
1737                 unsigned long end_index, nr, ret;
1738                 loff_t i_size = i_size_read(inode);
1739
1740                 end_index = i_size >> PAGE_CACHE_SHIFT;
1741                 if (index > end_index)
1742                         break;
1743                 if (index == end_index) {
1744                         nr = i_size & ~PAGE_CACHE_MASK;
1745                         if (nr <= offset)
1746                                 break;
1747                 }
1748
1749                 desc->error = shmem_getpage(inode, index, &page, sgp, NULL);
1750                 if (desc->error) {
1751                         if (desc->error == -EINVAL)
1752                                 desc->error = 0;
1753                         break;
1754                 }
1755                 if (page)
1756                         unlock_page(page);
1757
1758                 /*
1759                  * We must evaluate after, since reads (unlike writes)
1760                  * are called without i_mutex protection against truncate
1761                  */
1762                 nr = PAGE_CACHE_SIZE;
1763                 i_size = i_size_read(inode);
1764                 end_index = i_size >> PAGE_CACHE_SHIFT;
1765                 if (index == end_index) {
1766                         nr = i_size & ~PAGE_CACHE_MASK;
1767                         if (nr <= offset) {
1768                                 if (page)
1769                                         page_cache_release(page);
1770                                 break;
1771                         }
1772                 }
1773                 nr -= offset;
1774
1775                 if (page) {
1776                         /*
1777                          * If users can be writing to this page using arbitrary
1778                          * virtual addresses, take care about potential aliasing
1779                          * before reading the page on the kernel side.
1780                          */
1781                         if (mapping_writably_mapped(mapping))
1782                                 flush_dcache_page(page);
1783                         /*
1784                          * Mark the page accessed if we read the beginning.
1785                          */
1786                         if (!offset)
1787                                 mark_page_accessed(page);
1788                 } else {
1789                         page = ZERO_PAGE(0);
1790                         page_cache_get(page);
1791                 }
1792
1793                 /*
1794                  * Ok, we have the page, and it's up-to-date, so
1795                  * now we can copy it to user space...
1796                  *
1797                  * The actor routine returns how many bytes were actually used..
1798                  * NOTE! This may not be the same as how much of a user buffer
1799                  * we filled up (we may be padding etc), so we can only update
1800                  * "pos" here (the actor routine has to update the user buffer
1801                  * pointers and the remaining count).
1802                  */
1803                 ret = actor(desc, page, offset, nr);
1804                 offset += ret;
1805                 index += offset >> PAGE_CACHE_SHIFT;
1806                 offset &= ~PAGE_CACHE_MASK;
1807
1808                 page_cache_release(page);
1809                 if (ret != nr || !desc->count)
1810                         break;
1811
1812                 cond_resched();
1813         }
1814
1815         *ppos = ((loff_t) index << PAGE_CACHE_SHIFT) + offset;
1816         file_accessed(filp);
1817 }
1818
1819 static ssize_t shmem_file_aio_read(struct kiocb *iocb,
1820                 const struct iovec *iov, unsigned long nr_segs, loff_t pos)
1821 {
1822         struct file *filp = iocb->ki_filp;
1823         ssize_t retval;
1824         unsigned long seg;
1825         size_t count;
1826         loff_t *ppos = &iocb->ki_pos;
1827
1828         retval = generic_segment_checks(iov, &nr_segs, &count, VERIFY_WRITE);
1829         if (retval)
1830                 return retval;
1831
1832         for (seg = 0; seg < nr_segs; seg++) {
1833                 read_descriptor_t desc;
1834
1835                 desc.written = 0;
1836                 desc.arg.buf = iov[seg].iov_base;
1837                 desc.count = iov[seg].iov_len;
1838                 if (desc.count == 0)
1839                         continue;
1840                 desc.error = 0;
1841                 do_shmem_file_read(filp, ppos, &desc, file_read_actor);
1842                 retval += desc.written;
1843                 if (desc.error) {
1844                         retval = retval ?: desc.error;
1845                         break;
1846                 }
1847                 if (desc.count > 0)
1848                         break;
1849         }
1850         return retval;
1851 }
1852
1853 static ssize_t shmem_file_splice_read(struct file *in, loff_t *ppos,
1854                                 struct pipe_inode_info *pipe, size_t len,
1855                                 unsigned int flags)
1856 {
1857         struct address_space *mapping = in->f_mapping;
1858         struct inode *inode = mapping->host;
1859         unsigned int loff, nr_pages, req_pages;
1860         struct page *pages[PIPE_DEF_BUFFERS];
1861         struct partial_page partial[PIPE_DEF_BUFFERS];
1862         struct page *page;
1863         pgoff_t index, end_index;
1864         loff_t isize, left;
1865         int error, page_nr;
1866         struct splice_pipe_desc spd = {
1867                 .pages = pages,
1868                 .partial = partial,
1869                 .flags = flags,
1870                 .ops = &page_cache_pipe_buf_ops,
1871                 .spd_release = spd_release_page,
1872         };
1873
1874         isize = i_size_read(inode);
1875         if (unlikely(*ppos >= isize))
1876                 return 0;
1877
1878         left = isize - *ppos;
1879         if (unlikely(left < len))
1880                 len = left;
1881
1882         if (splice_grow_spd(pipe, &spd))
1883                 return -ENOMEM;
1884
1885         index = *ppos >> PAGE_CACHE_SHIFT;
1886         loff = *ppos & ~PAGE_CACHE_MASK;
1887         req_pages = (len + loff + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
1888         nr_pages = min(req_pages, pipe->buffers);
1889
1890         spd.nr_pages = find_get_pages_contig(mapping, index,
1891                                                 nr_pages, spd.pages);
1892         index += spd.nr_pages;
1893         error = 0;
1894
1895         while (spd.nr_pages < nr_pages) {
1896                 page = NULL;
1897                 error = shmem_getpage(inode, index, &page, SGP_CACHE, NULL);
1898                 if (error)
1899                         break;
1900                 unlock_page(page);
1901                 spd.pages[spd.nr_pages++] = page;
1902                 index++;
1903         }
1904
1905         index = *ppos >> PAGE_CACHE_SHIFT;
1906         nr_pages = spd.nr_pages;
1907         spd.nr_pages = 0;
1908
1909         for (page_nr = 0; page_nr < nr_pages; page_nr++) {
1910                 unsigned int this_len;
1911
1912                 if (!len)
1913                         break;
1914
1915                 this_len = min_t(unsigned long, len, PAGE_CACHE_SIZE - loff);
1916                 page = spd.pages[page_nr];
1917
1918                 if (!PageUptodate(page) || page->mapping != mapping) {
1919                         page = NULL;
1920                         error = shmem_getpage(inode, index, &page,
1921                                                         SGP_CACHE, NULL);
1922                         if (error)
1923                                 break;
1924                         unlock_page(page);
1925                         page_cache_release(spd.pages[page_nr]);
1926                         spd.pages[page_nr] = page;
1927                 }
1928
1929                 isize = i_size_read(inode);
1930                 end_index = (isize - 1) >> PAGE_CACHE_SHIFT;
1931                 if (unlikely(!isize || index > end_index))
1932                         break;
1933
1934                 if (end_index == index) {
1935                         unsigned int plen;
1936
1937                         plen = ((isize - 1) & ~PAGE_CACHE_MASK) + 1;
1938                         if (plen <= loff)
1939                                 break;
1940
1941                         this_len = min(this_len, plen - loff);
1942                         len = this_len;
1943                 }
1944
1945                 spd.partial[page_nr].offset = loff;
1946                 spd.partial[page_nr].len = this_len;
1947                 len -= this_len;
1948                 loff = 0;
1949                 spd.nr_pages++;
1950                 index++;
1951         }
1952
1953         while (page_nr < nr_pages)
1954                 page_cache_release(spd.pages[page_nr++]);
1955
1956         if (spd.nr_pages)
1957                 error = splice_to_pipe(pipe, &spd);
1958
1959         splice_shrink_spd(pipe, &spd);
1960
1961         if (error > 0) {
1962                 *ppos += error;
1963                 file_accessed(in);
1964         }
1965         return error;
1966 }
1967
1968 static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf)
1969 {
1970         struct shmem_sb_info *sbinfo = SHMEM_SB(dentry->d_sb);
1971
1972         buf->f_type = TMPFS_MAGIC;
1973         buf->f_bsize = PAGE_CACHE_SIZE;
1974         buf->f_namelen = NAME_MAX;
1975         if (sbinfo->max_blocks) {
1976                 buf->f_blocks = sbinfo->max_blocks;
1977                 buf->f_bavail = buf->f_bfree =
1978                                 sbinfo->max_blocks - percpu_counter_sum(&sbinfo->used_blocks);
1979         }
1980         if (sbinfo->max_inodes) {
1981                 buf->f_files = sbinfo->max_inodes;
1982                 buf->f_ffree = sbinfo->free_inodes;
1983         }
1984         /* else leave those fields 0 like simple_statfs */
1985         return 0;
1986 }
1987
1988 /*
1989  * File creation. Allocate an inode, and we're done..
1990  */
1991 static int
1992 shmem_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev)
1993 {
1994         struct inode *inode;
1995         int error = -ENOSPC;
1996
1997         inode = shmem_get_inode(dir->i_sb, dir, mode, dev, VM_NORESERVE);
1998         if (inode) {
1999                 error = security_inode_init_security(inode, dir,
2000                                                      &dentry->d_name, NULL,
2001                                                      NULL, NULL);
2002                 if (error) {
2003                         if (error != -EOPNOTSUPP) {
2004                                 iput(inode);
2005                                 return error;
2006                         }
2007                 }
2008 #ifdef CONFIG_TMPFS_POSIX_ACL
2009                 error = generic_acl_init(inode, dir);
2010                 if (error) {
2011                         iput(inode);
2012                         return error;
2013                 }
2014 #else
2015                 error = 0;
2016 #endif
2017                 dir->i_size += BOGO_DIRENT_SIZE;
2018                 dir->i_ctime = dir->i_mtime = CURRENT_TIME;
2019                 d_instantiate(dentry, inode);
2020                 dget(dentry); /* Extra count - pin the dentry in core */
2021         }
2022         return error;
2023 }
2024
2025 static int shmem_mkdir(struct inode *dir, struct dentry *dentry, int mode)
2026 {
2027         int error;
2028
2029         if ((error = shmem_mknod(dir, dentry, mode | S_IFDIR, 0)))
2030                 return error;
2031         inc_nlink(dir);
2032         return 0;
2033 }
2034
2035 static int shmem_create(struct inode *dir, struct dentry *dentry, int mode,
2036                 struct nameidata *nd)
2037 {
2038         return shmem_mknod(dir, dentry, mode | S_IFREG, 0);
2039 }
2040
2041 /*
2042  * Link a file..
2043  */
2044 static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry)
2045 {
2046         struct inode *inode = old_dentry->d_inode;
2047         int ret;
2048
2049         /*
2050          * No ordinary (disk based) filesystem counts links as inodes;
2051          * but each new link needs a new dentry, pinning lowmem, and
2052          * tmpfs dentries cannot be pruned until they are unlinked.
2053          */
2054         ret = shmem_reserve_inode(inode->i_sb);
2055         if (ret)
2056                 goto out;
2057
2058         dir->i_size += BOGO_DIRENT_SIZE;
2059         inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME;
2060         inc_nlink(inode);
2061         ihold(inode);   /* New dentry reference */
2062         dget(dentry);           /* Extra pinning count for the created dentry */
2063         d_instantiate(dentry, inode);
2064 out:
2065         return ret;
2066 }
2067
2068 static int shmem_unlink(struct inode *dir, struct dentry *dentry)
2069 {
2070         struct inode *inode = dentry->d_inode;
2071
2072         if (inode->i_nlink > 1 && !S_ISDIR(inode->i_mode))
2073                 shmem_free_inode(inode->i_sb);
2074
2075         dir->i_size -= BOGO_DIRENT_SIZE;
2076         inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME;
2077         drop_nlink(inode);
2078         dput(dentry);   /* Undo the count from "create" - this does all the work */
2079         return 0;
2080 }
2081
2082 static int shmem_rmdir(struct inode *dir, struct dentry *dentry)
2083 {
2084         if (!simple_empty(dentry))
2085                 return -ENOTEMPTY;
2086
2087         drop_nlink(dentry->d_inode);
2088         drop_nlink(dir);
2089         return shmem_unlink(dir, dentry);
2090 }
2091
2092 /*
2093  * The VFS layer already does all the dentry stuff for rename,
2094  * we just have to decrement the usage count for the target if
2095  * it exists so that the VFS layer correctly free's it when it
2096  * gets overwritten.
2097  */
2098 static int shmem_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry)
2099 {
2100         struct inode *inode = old_dentry->d_inode;
2101         int they_are_dirs = S_ISDIR(inode->i_mode);
2102
2103         if (!simple_empty(new_dentry))
2104                 return -ENOTEMPTY;
2105
2106         if (new_dentry->d_inode) {
2107                 (void) shmem_unlink(new_dir, new_dentry);
2108                 if (they_are_dirs)
2109                         drop_nlink(old_dir);
2110         } else if (they_are_dirs) {
2111                 drop_nlink(old_dir);
2112                 inc_nlink(new_dir);
2113         }
2114
2115         old_dir->i_size -= BOGO_DIRENT_SIZE;
2116         new_dir->i_size += BOGO_DIRENT_SIZE;
2117         old_dir->i_ctime = old_dir->i_mtime =
2118         new_dir->i_ctime = new_dir->i_mtime =
2119         inode->i_ctime = CURRENT_TIME;
2120         return 0;
2121 }
2122
2123 static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *symname)
2124 {
2125         int error;
2126         int len;
2127         struct inode *inode;
2128         struct page *page = NULL;
2129         char *kaddr;
2130         struct shmem_inode_info *info;
2131
2132         len = strlen(symname) + 1;
2133         if (len > PAGE_CACHE_SIZE)
2134                 return -ENAMETOOLONG;
2135
2136         inode = shmem_get_inode(dir->i_sb, dir, S_IFLNK|S_IRWXUGO, 0, VM_NORESERVE);
2137         if (!inode)
2138                 return -ENOSPC;
2139
2140         error = security_inode_init_security(inode, dir, &dentry->d_name, NULL,
2141                                              NULL, NULL);
2142         if (error) {
2143                 if (error != -EOPNOTSUPP) {
2144                         iput(inode);
2145                         return error;
2146                 }
2147                 error = 0;
2148         }
2149
2150         info = SHMEM_I(inode);
2151         inode->i_size = len-1;
2152         if (len <= SHMEM_SYMLINK_INLINE_LEN) {
2153                 /* do it inline */
2154                 memcpy(info->inline_symlink, symname, len);
2155                 inode->i_op = &shmem_symlink_inline_operations;
2156         } else {
2157                 error = shmem_getpage(inode, 0, &page, SGP_WRITE, NULL);
2158                 if (error) {
2159                         iput(inode);
2160                         return error;
2161                 }
2162                 inode->i_mapping->a_ops = &shmem_aops;
2163                 inode->i_op = &shmem_symlink_inode_operations;
2164                 kaddr = kmap_atomic(page, KM_USER0);
2165                 memcpy(kaddr, symname, len);
2166                 kunmap_atomic(kaddr, KM_USER0);
2167                 set_page_dirty(page);
2168                 unlock_page(page);
2169                 page_cache_release(page);
2170         }
2171         dir->i_size += BOGO_DIRENT_SIZE;
2172         dir->i_ctime = dir->i_mtime = CURRENT_TIME;
2173         d_instantiate(dentry, inode);
2174         dget(dentry);
2175         return 0;
2176 }
2177
2178 static void *shmem_follow_link_inline(struct dentry *dentry, struct nameidata *nd)
2179 {
2180         nd_set_link(nd, SHMEM_I(dentry->d_inode)->inline_symlink);
2181         return NULL;
2182 }
2183
2184 static void *shmem_follow_link(struct dentry *dentry, struct nameidata *nd)
2185 {
2186         struct page *page = NULL;
2187         int res = shmem_getpage(dentry->d_inode, 0, &page, SGP_READ, NULL);
2188         nd_set_link(nd, res ? ERR_PTR(res) : kmap(page));
2189         if (page)
2190                 unlock_page(page);
2191         return page;
2192 }
2193
2194 static void shmem_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
2195 {
2196         if (!IS_ERR(nd_get_link(nd))) {
2197                 struct page *page = cookie;
2198                 kunmap(page);
2199                 mark_page_accessed(page);
2200                 page_cache_release(page);
2201         }
2202 }
2203
2204 #ifdef CONFIG_TMPFS_XATTR
2205 /*
2206  * Superblocks without xattr inode operations may get some security.* xattr
2207  * support from the LSM "for free". As soon as we have any other xattrs
2208  * like ACLs, we also need to implement the security.* handlers at
2209  * filesystem level, though.
2210  */
2211
2212 static int shmem_xattr_get(struct dentry *dentry, const char *name,
2213                            void *buffer, size_t size)
2214 {
2215         struct shmem_inode_info *info;
2216         struct shmem_xattr *xattr;
2217         int ret = -ENODATA;
2218
2219         info = SHMEM_I(dentry->d_inode);
2220
2221         spin_lock(&info->lock);
2222         list_for_each_entry(xattr, &info->xattr_list, list) {
2223                 if (strcmp(name, xattr->name))
2224                         continue;
2225
2226                 ret = xattr->size;
2227                 if (buffer) {
2228                         if (size < xattr->size)
2229                                 ret = -ERANGE;
2230                         else
2231                                 memcpy(buffer, xattr->value, xattr->size);
2232                 }
2233                 break;
2234         }
2235         spin_unlock(&info->lock);
2236         return ret;
2237 }
2238
2239 static int shmem_xattr_set(struct dentry *dentry, const char *name,
2240                            const void *value, size_t size, int flags)
2241 {
2242         struct inode *inode = dentry->d_inode;
2243         struct shmem_inode_info *info = SHMEM_I(inode);
2244         struct shmem_xattr *xattr;
2245         struct shmem_xattr *new_xattr = NULL;
2246         size_t len;
2247         int err = 0;
2248
2249         /* value == NULL means remove */
2250         if (value) {
2251                 /* wrap around? */
2252                 len = sizeof(*new_xattr) + size;
2253                 if (len <= sizeof(*new_xattr))
2254                         return -ENOMEM;
2255
2256                 new_xattr = kmalloc(len, GFP_KERNEL);
2257                 if (!new_xattr)
2258                         return -ENOMEM;
2259
2260                 new_xattr->name = kstrdup(name, GFP_KERNEL);
2261                 if (!new_xattr->name) {
2262                         kfree(new_xattr);
2263                         return -ENOMEM;
2264                 }
2265
2266                 new_xattr->size = size;
2267                 memcpy(new_xattr->value, value, size);
2268         }
2269
2270         spin_lock(&info->lock);
2271         list_for_each_entry(xattr, &info->xattr_list, list) {
2272                 if (!strcmp(name, xattr->name)) {
2273                         if (flags & XATTR_CREATE) {
2274                                 xattr = new_xattr;
2275                                 err = -EEXIST;
2276                         } else if (new_xattr) {
2277                                 list_replace(&xattr->list, &new_xattr->list);
2278                         } else {
2279                                 list_del(&xattr->list);
2280                         }
2281                         goto out;
2282                 }
2283         }
2284         if (flags & XATTR_REPLACE) {
2285                 xattr = new_xattr;
2286                 err = -ENODATA;
2287         } else {
2288                 list_add(&new_xattr->list, &info->xattr_list);
2289                 xattr = NULL;
2290         }
2291 out:
2292         spin_unlock(&info->lock);
2293         if (xattr)
2294                 kfree(xattr->name);
2295         kfree(xattr);
2296         return err;
2297 }
2298
2299
2300 static const struct xattr_handler *shmem_xattr_handlers[] = {
2301 #ifdef CONFIG_TMPFS_POSIX_ACL
2302         &generic_acl_access_handler,
2303         &generic_acl_default_handler,
2304 #endif
2305         NULL
2306 };
2307
2308 static int shmem_xattr_validate(const char *name)
2309 {
2310         struct { const char *prefix; size_t len; } arr[] = {
2311                 { XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN },
2312                 { XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN }
2313         };
2314         int i;
2315
2316         for (i = 0; i < ARRAY_SIZE(arr); i++) {
2317                 size_t preflen = arr[i].len;
2318                 if (strncmp(name, arr[i].prefix, preflen) == 0) {
2319                         if (!name[preflen])
2320                                 return -EINVAL;
2321                         return 0;
2322                 }
2323         }
2324         return -EOPNOTSUPP;
2325 }
2326
2327 static ssize_t shmem_getxattr(struct dentry *dentry, const char *name,
2328                               void *buffer, size_t size)
2329 {
2330         int err;
2331
2332         /*
2333          * If this is a request for a synthetic attribute in the system.*
2334          * namespace use the generic infrastructure to resolve a handler
2335          * for it via sb->s_xattr.
2336          */
2337         if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN))
2338                 return generic_getxattr(dentry, name, buffer, size);
2339
2340         err = shmem_xattr_validate(name);
2341         if (err)
2342                 return err;
2343
2344         return shmem_xattr_get(dentry, name, buffer, size);
2345 }
2346
2347 static int shmem_setxattr(struct dentry *dentry, const char *name,
2348                           const void *value, size_t size, int flags)
2349 {
2350         int err;
2351
2352         /*
2353          * If this is a request for a synthetic attribute in the system.*
2354          * namespace use the generic infrastructure to resolve a handler
2355          * for it via sb->s_xattr.
2356          */
2357         if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN))
2358                 return generic_setxattr(dentry, name, value, size, flags);
2359
2360         err = shmem_xattr_validate(name);
2361         if (err)
2362                 return err;
2363
2364         if (size == 0)
2365                 value = "";  /* empty EA, do not remove */
2366
2367         return shmem_xattr_set(dentry, name, value, size, flags);
2368
2369 }
2370
2371 static int shmem_removexattr(struct dentry *dentry, const char *name)
2372 {
2373         int err;
2374
2375         /*
2376          * If this is a request for a synthetic attribute in the system.*
2377          * namespace use the generic infrastructure to resolve a handler
2378          * for it via sb->s_xattr.
2379          */
2380         if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN))
2381                 return generic_removexattr(dentry, name);
2382
2383         err = shmem_xattr_validate(name);
2384         if (err)
2385                 return err;
2386
2387         return shmem_xattr_set(dentry, name, NULL, 0, XATTR_REPLACE);
2388 }
2389
2390 static bool xattr_is_trusted(const char *name)
2391 {
2392         return !strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN);
2393 }
2394
2395 static ssize_t shmem_listxattr(struct dentry *dentry, char *buffer, size_t size)
2396 {
2397         bool trusted = capable(CAP_SYS_ADMIN);
2398         struct shmem_xattr *xattr;
2399         struct shmem_inode_info *info;
2400         size_t used = 0;
2401
2402         info = SHMEM_I(dentry->d_inode);
2403
2404         spin_lock(&info->lock);
2405         list_for_each_entry(xattr, &info->xattr_list, list) {
2406                 size_t len;
2407
2408                 /* skip "trusted." attributes for unprivileged callers */
2409                 if (!trusted && xattr_is_trusted(xattr->name))
2410                         continue;
2411
2412                 len = strlen(xattr->name) + 1;
2413                 used += len;
2414                 if (buffer) {
2415                         if (size < used) {
2416                                 used = -ERANGE;
2417                                 break;
2418                         }
2419                         memcpy(buffer, xattr->name, len);
2420                         buffer += len;
2421                 }
2422         }
2423         spin_unlock(&info->lock);
2424
2425         return used;
2426 }
2427 #endif /* CONFIG_TMPFS_XATTR */
2428
2429 static const struct inode_operations shmem_symlink_inline_operations = {
2430         .readlink       = generic_readlink,
2431         .follow_link    = shmem_follow_link_inline,
2432 #ifdef CONFIG_TMPFS_XATTR
2433         .setxattr       = shmem_setxattr,
2434         .getxattr       = shmem_getxattr,
2435         .listxattr      = shmem_listxattr,
2436         .removexattr    = shmem_removexattr,
2437 #endif
2438 };
2439
2440 static const struct inode_operations shmem_symlink_inode_operations = {
2441         .readlink       = generic_readlink,
2442         .follow_link    = shmem_follow_link,
2443         .put_link       = shmem_put_link,
2444 #ifdef CONFIG_TMPFS_XATTR
2445         .setxattr       = shmem_setxattr,
2446         .getxattr       = shmem_getxattr,
2447         .listxattr      = shmem_listxattr,
2448         .removexattr    = shmem_removexattr,
2449 #endif
2450 };
2451
2452 static struct dentry *shmem_get_parent(struct dentry *child)
2453 {
2454         return ERR_PTR(-ESTALE);
2455 }
2456
2457 static int shmem_match(struct inode *ino, void *vfh)
2458 {
2459         __u32 *fh = vfh;
2460         __u64 inum = fh[2];
2461         inum = (inum << 32) | fh[1];
2462         return ino->i_ino == inum && fh[0] == ino->i_generation;
2463 }
2464
2465 static struct dentry *shmem_fh_to_dentry(struct super_block *sb,
2466                 struct fid *fid, int fh_len, int fh_type)
2467 {
2468         struct inode *inode;
2469         struct dentry *dentry = NULL;
2470         u64 inum = fid->raw[2];
2471         inum = (inum << 32) | fid->raw[1];
2472
2473         if (fh_len < 3)
2474                 return NULL;
2475
2476         inode = ilookup5(sb, (unsigned long)(inum + fid->raw[0]),
2477                         shmem_match, fid->raw);
2478         if (inode) {
2479                 dentry = d_find_alias(inode);
2480                 iput(inode);
2481         }
2482
2483         return dentry;
2484 }
2485
2486 static int shmem_encode_fh(struct dentry *dentry, __u32 *fh, int *len,
2487                                 int connectable)
2488 {
2489         struct inode *inode = dentry->d_inode;
2490
2491         if (*len < 3) {
2492                 *len = 3;
2493                 return 255;
2494         }
2495
2496         if (inode_unhashed(inode)) {
2497                 /* Unfortunately insert_inode_hash is not idempotent,
2498                  * so as we hash inodes here rather than at creation
2499                  * time, we need a lock to ensure we only try
2500                  * to do it once
2501                  */
2502                 static DEFINE_SPINLOCK(lock);
2503                 spin_lock(&lock);
2504                 if (inode_unhashed(inode))
2505                         __insert_inode_hash(inode,
2506                                             inode->i_ino + inode->i_generation);
2507                 spin_unlock(&lock);
2508         }
2509
2510         fh[0] = inode->i_generation;
2511         fh[1] = inode->i_ino;
2512         fh[2] = ((__u64)inode->i_ino) >> 32;
2513
2514         *len = 3;
2515         return 1;
2516 }
2517
2518 static const struct export_operations shmem_export_ops = {
2519         .get_parent     = shmem_get_parent,
2520         .encode_fh      = shmem_encode_fh,
2521         .fh_to_dentry   = shmem_fh_to_dentry,
2522 };
2523
2524 static int shmem_parse_options(char *options, struct shmem_sb_info *sbinfo,
2525                                bool remount)
2526 {
2527         char *this_char, *value, *rest;
2528
2529         while (options != NULL) {
2530                 this_char = options;
2531                 for (;;) {
2532                         /*
2533                          * NUL-terminate this option: unfortunately,
2534                          * mount options form a comma-separated list,
2535                          * but mpol's nodelist may also contain commas.
2536                          */
2537                         options = strchr(options, ',');
2538                         if (options == NULL)
2539                                 break;
2540                         options++;
2541                         if (!isdigit(*options)) {
2542                                 options[-1] = '\0';
2543                                 break;
2544                         }
2545                 }
2546                 if (!*this_char)
2547                         continue;
2548                 if ((value = strchr(this_char,'=')) != NULL) {
2549                         *value++ = 0;
2550                 } else {
2551                         printk(KERN_ERR
2552                             "tmpfs: No value for mount option '%s'\n",
2553                             this_char);
2554                         return 1;
2555                 }
2556
2557                 if (!strcmp(this_char,"size")) {
2558                         unsigned long long size;
2559                         size = memparse(value,&rest);
2560                         if (*rest == '%') {
2561                                 size <<= PAGE_SHIFT;
2562                                 size *= totalram_pages;
2563                                 do_div(size, 100);
2564                                 rest++;
2565                         }
2566                         if (*rest)
2567                                 goto bad_val;
2568                         sbinfo->max_blocks =
2569                                 DIV_ROUND_UP(size, PAGE_CACHE_SIZE);
2570                 } else if (!strcmp(this_char,"nr_blocks")) {
2571                         sbinfo->max_blocks = memparse(value, &rest);
2572                         if (*rest)
2573                                 goto bad_val;
2574                 } else if (!strcmp(this_char,"nr_inodes")) {
2575                         sbinfo->max_inodes = memparse(value, &rest);
2576                         if (*rest)
2577                                 goto bad_val;
2578                 } else if (!strcmp(this_char,"mode")) {
2579                         if (remount)
2580                                 continue;
2581                         sbinfo->mode = simple_strtoul(value, &rest, 8) & 07777;
2582                         if (*rest)
2583                                 goto bad_val;
2584                 } else if (!strcmp(this_char,"uid")) {
2585                         if (remount)
2586                                 continue;
2587                         sbinfo->uid = simple_strtoul(value, &rest, 0);
2588                         if (*rest)
2589                                 goto bad_val;
2590                 } else if (!strcmp(this_char,"gid")) {
2591                         if (remount)
2592                                 continue;
2593                         sbinfo->gid = simple_strtoul(value, &rest, 0);
2594                         if (*rest)
2595                                 goto bad_val;
2596                 } else if (!strcmp(this_char,"mpol")) {
2597                         if (mpol_parse_str(value, &sbinfo->mpol, 1))
2598                                 goto bad_val;
2599                 } else {
2600                         printk(KERN_ERR "tmpfs: Bad mount option %s\n",
2601                                this_char);
2602                         return 1;
2603                 }
2604         }
2605         return 0;
2606
2607 bad_val:
2608         printk(KERN_ERR "tmpfs: Bad value '%s' for mount option '%s'\n",
2609                value, this_char);
2610         return 1;
2611
2612 }
2613
2614 static int shmem_remount_fs(struct super_block *sb, int *flags, char *data)
2615 {
2616         struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
2617         struct shmem_sb_info config = *sbinfo;
2618         unsigned long inodes;
2619         int error = -EINVAL;
2620
2621         if (shmem_parse_options(data, &config, true))
2622                 return error;
2623
2624         spin_lock(&sbinfo->stat_lock);
2625         inodes = sbinfo->max_inodes - sbinfo->free_inodes;
2626         if (percpu_counter_compare(&sbinfo->used_blocks, config.max_blocks) > 0)
2627                 goto out;
2628         if (config.max_inodes < inodes)
2629                 goto out;
2630         /*
2631          * Those tests also disallow limited->unlimited while any are in
2632          * use, so i_blocks will always be zero when max_blocks is zero;
2633          * but we must separately disallow unlimited->limited, because
2634          * in that case we have no record of how much is already in use.
2635          */
2636         if (config.max_blocks && !sbinfo->max_blocks)
2637                 goto out;
2638         if (config.max_inodes && !sbinfo->max_inodes)
2639                 goto out;
2640
2641         error = 0;
2642         sbinfo->max_blocks  = config.max_blocks;
2643         sbinfo->max_inodes  = config.max_inodes;
2644         sbinfo->free_inodes = config.max_inodes - inodes;
2645
2646         mpol_put(sbinfo->mpol);
2647         sbinfo->mpol        = config.mpol;      /* transfers initial ref */
2648 out:
2649         spin_unlock(&sbinfo->stat_lock);
2650         return error;
2651 }
2652
2653 static int shmem_show_options(struct seq_file *seq, struct vfsmount *vfs)
2654 {
2655         struct shmem_sb_info *sbinfo = SHMEM_SB(vfs->mnt_sb);
2656
2657         if (sbinfo->max_blocks != shmem_default_max_blocks())
2658                 seq_printf(seq, ",size=%luk",
2659                         sbinfo->max_blocks << (PAGE_CACHE_SHIFT - 10));
2660         if (sbinfo->max_inodes != shmem_default_max_inodes())
2661                 seq_printf(seq, ",nr_inodes=%lu", sbinfo->max_inodes);
2662         if (sbinfo->mode != (S_IRWXUGO | S_ISVTX))
2663                 seq_printf(seq, ",mode=%03o", sbinfo->mode);
2664         if (sbinfo->uid != 0)
2665                 seq_printf(seq, ",uid=%u", sbinfo->uid);
2666         if (sbinfo->gid != 0)
2667                 seq_printf(seq, ",gid=%u", sbinfo->gid);
2668         shmem_show_mpol(seq, sbinfo->mpol);
2669         return 0;
2670 }
2671 #endif /* CONFIG_TMPFS */
2672
2673 static void shmem_put_super(struct super_block *sb)
2674 {
2675         struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
2676
2677         percpu_counter_destroy(&sbinfo->used_blocks);
2678         kfree(sbinfo);
2679         sb->s_fs_info = NULL;
2680 }
2681
2682 int shmem_fill_super(struct super_block *sb, void *data, int silent)
2683 {
2684         struct inode *inode;
2685         struct dentry *root;
2686         struct shmem_sb_info *sbinfo;
2687         int err = -ENOMEM;
2688
2689         /* Round up to L1_CACHE_BYTES to resist false sharing */
2690         sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
2691                                 L1_CACHE_BYTES), GFP_KERNEL);
2692         if (!sbinfo)
2693                 return -ENOMEM;
2694
2695         sbinfo->mode = S_IRWXUGO | S_ISVTX;
2696         sbinfo->uid = current_fsuid();
2697         sbinfo->gid = current_fsgid();
2698         sb->s_fs_info = sbinfo;
2699
2700 #ifdef CONFIG_TMPFS
2701         /*
2702          * Per default we only allow half of the physical ram per
2703          * tmpfs instance, limiting inodes to one per page of lowmem;
2704          * but the internal instance is left unlimited.
2705          */
2706         if (!(sb->s_flags & MS_NOUSER)) {
2707                 sbinfo->max_blocks = shmem_default_max_blocks();
2708                 sbinfo->max_inodes = shmem_default_max_inodes();
2709                 if (shmem_parse_options(data, sbinfo, false)) {
2710                         err = -EINVAL;
2711                         goto failed;
2712                 }
2713         }
2714         sb->s_export_op = &shmem_export_ops;
2715 #else
2716         sb->s_flags |= MS_NOUSER;
2717 #endif
2718
2719         spin_lock_init(&sbinfo->stat_lock);
2720         if (percpu_counter_init(&sbinfo->used_blocks, 0))
2721                 goto failed;
2722         sbinfo->free_inodes = sbinfo->max_inodes;
2723
2724         sb->s_maxbytes = SHMEM_MAX_BYTES;
2725         sb->s_blocksize = PAGE_CACHE_SIZE;
2726         sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
2727         sb->s_magic = TMPFS_MAGIC;
2728         sb->s_op = &shmem_ops;
2729         sb->s_time_gran = 1;
2730 #ifdef CONFIG_TMPFS_XATTR
2731         sb->s_xattr = shmem_xattr_handlers;
2732 #endif
2733 #ifdef CONFIG_TMPFS_POSIX_ACL
2734         sb->s_flags |= MS_POSIXACL;
2735 #endif
2736
2737         inode = shmem_get_inode(sb, NULL, S_IFDIR | sbinfo->mode, 0, VM_NORESERVE);
2738         if (!inode)
2739                 goto failed;
2740         inode->i_uid = sbinfo->uid;
2741         inode->i_gid = sbinfo->gid;
2742         root = d_alloc_root(inode);
2743         if (!root)
2744                 goto failed_iput;
2745         sb->s_root = root;
2746         return 0;
2747
2748 failed_iput:
2749         iput(inode);
2750 failed:
2751         shmem_put_super(sb);
2752         return err;
2753 }
2754
2755 static struct kmem_cache *shmem_inode_cachep;
2756
2757 static struct inode *shmem_alloc_inode(struct super_block *sb)
2758 {
2759         struct shmem_inode_info *p;
2760         p = (struct shmem_inode_info *)kmem_cache_alloc(shmem_inode_cachep, GFP_KERNEL);
2761         if (!p)
2762                 return NULL;
2763         return &p->vfs_inode;
2764 }
2765
2766 static void shmem_i_callback(struct rcu_head *head)
2767 {
2768         struct inode *inode = container_of(head, struct inode, i_rcu);
2769         INIT_LIST_HEAD(&inode->i_dentry);
2770         kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode));
2771 }
2772
2773 static void shmem_destroy_inode(struct inode *inode)
2774 {
2775         if ((inode->i_mode & S_IFMT) == S_IFREG) {
2776                 /* only struct inode is valid if it's an inline symlink */
2777                 mpol_free_shared_policy(&SHMEM_I(inode)->policy);
2778         }
2779         call_rcu(&inode->i_rcu, shmem_i_callback);
2780 }
2781
2782 static void init_once(void *foo)
2783 {
2784         struct shmem_inode_info *p = (struct shmem_inode_info *) foo;
2785
2786         inode_init_once(&p->vfs_inode);
2787 }
2788
2789 static int init_inodecache(void)
2790 {
2791         shmem_inode_cachep = kmem_cache_create("shmem_inode_cache",
2792                                 sizeof(struct shmem_inode_info),
2793                                 0, SLAB_PANIC, init_once);
2794         return 0;
2795 }
2796
2797 static void destroy_inodecache(void)
2798 {
2799         kmem_cache_destroy(shmem_inode_cachep);
2800 }
2801
2802 static const struct address_space_operations shmem_aops = {
2803         .writepage      = shmem_writepage,
2804         .set_page_dirty = __set_page_dirty_no_writeback,
2805 #ifdef CONFIG_TMPFS
2806         .readpage       = shmem_readpage,
2807         .write_begin    = shmem_write_begin,
2808         .write_end      = shmem_write_end,
2809 #endif
2810         .migratepage    = migrate_page,
2811         .error_remove_page = generic_error_remove_page,
2812 };
2813
2814 static const struct file_operations shmem_file_operations = {
2815         .mmap           = shmem_mmap,
2816 #ifdef CONFIG_TMPFS
2817         .llseek         = generic_file_llseek,
2818         .read           = do_sync_read,
2819         .write          = do_sync_write,
2820         .aio_read       = shmem_file_aio_read,
2821         .aio_write      = generic_file_aio_write,
2822         .fsync          = noop_fsync,
2823         .splice_read    = shmem_file_splice_read,
2824         .splice_write   = generic_file_splice_write,
2825 #endif
2826 };
2827
2828 static const struct inode_operations shmem_inode_operations = {
2829         .setattr        = shmem_setattr,
2830         .truncate_range = shmem_truncate_range,
2831 #ifdef CONFIG_TMPFS_XATTR
2832         .setxattr       = shmem_setxattr,
2833         .getxattr       = shmem_getxattr,
2834         .listxattr      = shmem_listxattr,
2835         .removexattr    = shmem_removexattr,
2836 #endif
2837 #ifdef CONFIG_TMPFS_POSIX_ACL
2838         .check_acl      = generic_check_acl,
2839 #endif
2840
2841 };
2842
2843 static const struct inode_operations shmem_dir_inode_operations = {
2844 #ifdef CONFIG_TMPFS
2845         .create         = shmem_create,
2846         .lookup         = simple_lookup,
2847         .link           = shmem_link,
2848         .unlink         = shmem_unlink,
2849         .symlink        = shmem_symlink,
2850         .mkdir          = shmem_mkdir,
2851         .rmdir          = shmem_rmdir,
2852         .mknod          = shmem_mknod,
2853         .rename         = shmem_rename,
2854 #endif
2855 #ifdef CONFIG_TMPFS_XATTR
2856         .setxattr       = shmem_setxattr,
2857         .getxattr       = shmem_getxattr,
2858         .listxattr      = shmem_listxattr,
2859         .removexattr    = shmem_removexattr,
2860 #endif
2861 #ifdef CONFIG_TMPFS_POSIX_ACL
2862         .setattr        = shmem_setattr,
2863         .check_acl      = generic_check_acl,
2864 #endif
2865 };
2866
2867 static const struct inode_operations shmem_special_inode_operations = {
2868 #ifdef CONFIG_TMPFS_XATTR
2869         .setxattr       = shmem_setxattr,
2870         .getxattr       = shmem_getxattr,
2871         .listxattr      = shmem_listxattr,
2872         .removexattr    = shmem_removexattr,
2873 #endif
2874 #ifdef CONFIG_TMPFS_POSIX_ACL
2875         .setattr        = shmem_setattr,
2876         .check_acl      = generic_check_acl,
2877 #endif
2878 };
2879
2880 static const struct super_operations shmem_ops = {
2881         .alloc_inode    = shmem_alloc_inode,
2882         .destroy_inode  = shmem_destroy_inode,
2883 #ifdef CONFIG_TMPFS
2884         .statfs         = shmem_statfs,
2885         .remount_fs     = shmem_remount_fs,
2886         .show_options   = shmem_show_options,
2887 #endif
2888         .evict_inode    = shmem_evict_inode,
2889         .drop_inode     = generic_delete_inode,
2890         .put_super      = shmem_put_super,
2891 };
2892
2893 static const struct vm_operations_struct shmem_vm_ops = {
2894         .fault          = shmem_fault,
2895 #ifdef CONFIG_NUMA
2896         .set_policy     = shmem_set_policy,
2897         .get_policy     = shmem_get_policy,
2898 #endif
2899 };
2900
2901
2902 static struct dentry *shmem_mount(struct file_system_type *fs_type,
2903         int flags, const char *dev_name, void *data)
2904 {
2905         return mount_nodev(fs_type, flags, data, shmem_fill_super);
2906 }
2907
2908 static struct file_system_type tmpfs_fs_type = {
2909         .owner          = THIS_MODULE,
2910         .name           = "tmpfs",
2911         .mount          = shmem_mount,
2912         .kill_sb        = kill_litter_super,
2913 };
2914
2915 int __init init_tmpfs(void)
2916 {
2917         int error;
2918
2919         error = bdi_init(&shmem_backing_dev_info);
2920         if (error)
2921                 goto out4;
2922
2923         error = init_inodecache();
2924         if (error)
2925                 goto out3;
2926
2927         error = register_filesystem(&tmpfs_fs_type);
2928         if (error) {
2929                 printk(KERN_ERR "Could not register tmpfs\n");
2930                 goto out2;
2931         }
2932
2933         shm_mnt = vfs_kern_mount(&tmpfs_fs_type, MS_NOUSER,
2934                                 tmpfs_fs_type.name, NULL);
2935         if (IS_ERR(shm_mnt)) {
2936                 error = PTR_ERR(shm_mnt);
2937                 printk(KERN_ERR "Could not kern_mount tmpfs\n");
2938                 goto out1;
2939         }
2940         return 0;
2941
2942 out1:
2943         unregister_filesystem(&tmpfs_fs_type);
2944 out2:
2945         destroy_inodecache();
2946 out3:
2947         bdi_destroy(&shmem_backing_dev_info);
2948 out4:
2949         shm_mnt = ERR_PTR(error);
2950         return error;
2951 }
2952
2953 #ifdef CONFIG_CGROUP_MEM_RES_CTLR
2954 /**
2955  * mem_cgroup_get_shmem_target - find a page or entry assigned to the shmem file
2956  * @inode: the inode to be searched
2957  * @pgoff: the offset to be searched
2958  * @pagep: the pointer for the found page to be stored
2959  * @ent: the pointer for the found swap entry to be stored
2960  *
2961  * If a page is found, refcount of it is incremented. Callers should handle
2962  * these refcount.
2963  */
2964 void mem_cgroup_get_shmem_target(struct inode *inode, pgoff_t pgoff,
2965                                         struct page **pagep, swp_entry_t *ent)
2966 {
2967         swp_entry_t entry = { .val = 0 }, *ptr;
2968         struct page *page = NULL;
2969         struct shmem_inode_info *info = SHMEM_I(inode);
2970
2971         if ((pgoff << PAGE_CACHE_SHIFT) >= i_size_read(inode))
2972                 goto out;
2973
2974         spin_lock(&info->lock);
2975         ptr = shmem_swp_entry(info, pgoff, NULL);
2976 #ifdef CONFIG_SWAP
2977         if (ptr && ptr->val) {
2978                 entry.val = ptr->val;
2979                 page = find_get_page(&swapper_space, entry.val);
2980         } else
2981 #endif
2982                 page = find_get_page(inode->i_mapping, pgoff);
2983         if (ptr)
2984                 shmem_swp_unmap(ptr);
2985         spin_unlock(&info->lock);
2986 out:
2987         *pagep = page;
2988         *ent = entry;
2989 }
2990 #endif
2991
2992 #else /* !CONFIG_SHMEM */
2993
2994 /*
2995  * tiny-shmem: simple shmemfs and tmpfs using ramfs code
2996  *
2997  * This is intended for small system where the benefits of the full
2998  * shmem code (swap-backed and resource-limited) are outweighed by
2999  * their complexity. On systems without swap this code should be
3000  * effectively equivalent, but much lighter weight.
3001  */
3002
3003 #include <linux/ramfs.h>
3004
3005 static struct file_system_type tmpfs_fs_type = {
3006         .name           = "tmpfs",
3007         .mount          = ramfs_mount,
3008         .kill_sb        = kill_litter_super,
3009 };
3010
3011 int __init init_tmpfs(void)
3012 {
3013         BUG_ON(register_filesystem(&tmpfs_fs_type) != 0);
3014
3015         shm_mnt = kern_mount(&tmpfs_fs_type);
3016         BUG_ON(IS_ERR(shm_mnt));
3017
3018         return 0;
3019 }
3020
3021 int shmem_unuse(swp_entry_t entry, struct page *page)
3022 {
3023         return 0;
3024 }
3025
3026 int shmem_lock(struct file *file, int lock, struct user_struct *user)
3027 {
3028         return 0;
3029 }
3030
3031 void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end)
3032 {
3033         truncate_inode_pages_range(inode->i_mapping, start, end);
3034 }
3035 EXPORT_SYMBOL_GPL(shmem_truncate_range);
3036
3037 #ifdef CONFIG_CGROUP_MEM_RES_CTLR
3038 /**
3039  * mem_cgroup_get_shmem_target - find a page or entry assigned to the shmem file
3040  * @inode: the inode to be searched
3041  * @pgoff: the offset to be searched
3042  * @pagep: the pointer for the found page to be stored
3043  * @ent: the pointer for the found swap entry to be stored
3044  *
3045  * If a page is found, refcount of it is incremented. Callers should handle
3046  * these refcount.
3047  */
3048 void mem_cgroup_get_shmem_target(struct inode *inode, pgoff_t pgoff,
3049                                         struct page **pagep, swp_entry_t *ent)
3050 {
3051         struct page *page = NULL;
3052
3053         if ((pgoff << PAGE_CACHE_SHIFT) >= i_size_read(inode))
3054                 goto out;
3055         page = find_get_page(inode->i_mapping, pgoff);
3056 out:
3057         *pagep = page;
3058         *ent = (swp_entry_t){ .val = 0 };
3059 }
3060 #endif
3061
3062 #define shmem_vm_ops                            generic_file_vm_ops
3063 #define shmem_file_operations                   ramfs_file_operations
3064 #define shmem_get_inode(sb, dir, mode, dev, flags)      ramfs_get_inode(sb, dir, mode, dev)
3065 #define shmem_acct_size(flags, size)            0
3066 #define shmem_unacct_size(flags, size)          do {} while (0)
3067 #define SHMEM_MAX_BYTES                         MAX_LFS_FILESIZE
3068
3069 #endif /* CONFIG_SHMEM */
3070
3071 /* common code */
3072
3073 /**
3074  * shmem_file_setup - get an unlinked file living in tmpfs
3075  * @name: name for dentry (to be seen in /proc/<pid>/maps
3076  * @size: size to be set for the file
3077  * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
3078  */
3079 struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags)
3080 {
3081         int error;
3082         struct file *file;
3083         struct inode *inode;
3084         struct path path;
3085         struct dentry *root;
3086         struct qstr this;
3087
3088         if (IS_ERR(shm_mnt))
3089                 return (void *)shm_mnt;
3090
3091         if (size < 0 || size > SHMEM_MAX_BYTES)
3092                 return ERR_PTR(-EINVAL);
3093
3094         if (shmem_acct_size(flags, size))
3095                 return ERR_PTR(-ENOMEM);
3096
3097         error = -ENOMEM;
3098         this.name = name;
3099         this.len = strlen(name);
3100         this.hash = 0; /* will go */
3101         root = shm_mnt->mnt_root;
3102         path.dentry = d_alloc(root, &this);
3103         if (!path.dentry)
3104                 goto put_memory;
3105         path.mnt = mntget(shm_mnt);
3106
3107         error = -ENOSPC;
3108         inode = shmem_get_inode(root->d_sb, NULL, S_IFREG | S_IRWXUGO, 0, flags);
3109         if (!inode)
3110                 goto put_dentry;
3111
3112         d_instantiate(path.dentry, inode);
3113         inode->i_size = size;
3114         inode->i_nlink = 0;     /* It is unlinked */
3115 #ifndef CONFIG_MMU
3116         error = ramfs_nommu_expand_for_mapping(inode, size);
3117         if (error)
3118                 goto put_dentry;
3119 #endif
3120
3121         error = -ENFILE;
3122         file = alloc_file(&path, FMODE_WRITE | FMODE_READ,
3123                   &shmem_file_operations);
3124         if (!file)
3125                 goto put_dentry;
3126
3127         return file;
3128
3129 put_dentry:
3130         path_put(&path);
3131 put_memory:
3132         shmem_unacct_size(flags, size);
3133         return ERR_PTR(error);
3134 }
3135 EXPORT_SYMBOL_GPL(shmem_file_setup);
3136
3137 /**
3138  * shmem_zero_setup - setup a shared anonymous mapping
3139  * @vma: the vma to be mmapped is prepared by do_mmap_pgoff
3140  */
3141 int shmem_zero_setup(struct vm_area_struct *vma)
3142 {
3143         struct file *file;
3144         loff_t size = vma->vm_end - vma->vm_start;
3145
3146         file = shmem_file_setup("dev/zero", size, vma->vm_flags);
3147         if (IS_ERR(file))
3148                 return PTR_ERR(file);
3149
3150         if (vma->vm_file)
3151                 fput(vma->vm_file);
3152         vma->vm_file = file;
3153         vma->vm_ops = &shmem_vm_ops;
3154         vma->vm_flags |= VM_CAN_NONLINEAR;
3155         return 0;
3156 }
3157
3158 /**
3159  * shmem_read_mapping_page_gfp - read into page cache, using specified page allocation flags.
3160  * @mapping:    the page's address_space
3161  * @index:      the page index
3162  * @gfp:        the page allocator flags to use if allocating
3163  *
3164  * This behaves as a tmpfs "read_cache_page_gfp(mapping, index, gfp)",
3165  * with any new page allocations done using the specified allocation flags.
3166  * But read_cache_page_gfp() uses the ->readpage() method: which does not
3167  * suit tmpfs, since it may have pages in swapcache, and needs to find those
3168  * for itself; although drivers/gpu/drm i915 and ttm rely upon this support.
3169  *
3170  * i915_gem_object_get_pages_gtt() mixes __GFP_NORETRY | __GFP_NOWARN in
3171  * with the mapping_gfp_mask(), to avoid OOMing the machine unnecessarily.
3172  */
3173 struct page *shmem_read_mapping_page_gfp(struct address_space *mapping,
3174                                          pgoff_t index, gfp_t gfp)
3175 {
3176 #ifdef CONFIG_SHMEM
3177         struct inode *inode = mapping->host;
3178         struct page *page = NULL;
3179         int error;
3180
3181         BUG_ON(mapping->a_ops != &shmem_aops);
3182         error = shmem_getpage_gfp(inode, index, &page, SGP_CACHE, gfp, NULL);
3183         if (error)
3184                 page = ERR_PTR(error);
3185         else
3186                 unlock_page(page);
3187         return page;
3188 #else
3189         /*
3190          * The tiny !SHMEM case uses ramfs without swap
3191          */
3192         return read_cache_page_gfp(mapping, index, gfp);
3193 #endif
3194 }
3195 EXPORT_SYMBOL_GPL(shmem_read_mapping_page_gfp);