Merge branch 'akpm' (patches from Andrew Morton)
[platform/adaptation/renesas_rcar/renesas_kernel.git] / mm / madvise.c
1 /*
2  *      linux/mm/madvise.c
3  *
4  * Copyright (C) 1999  Linus Torvalds
5  * Copyright (C) 2002  Christoph Hellwig
6  */
7
8 #include <linux/mman.h>
9 #include <linux/pagemap.h>
10 #include <linux/syscalls.h>
11 #include <linux/mempolicy.h>
12 #include <linux/page-isolation.h>
13 #include <linux/hugetlb.h>
14 #include <linux/falloc.h>
15 #include <linux/sched.h>
16 #include <linux/ksm.h>
17 #include <linux/fs.h>
18 #include <linux/file.h>
19 #include <linux/blkdev.h>
20 #include <linux/swap.h>
21 #include <linux/swapops.h>
22
23 /*
24  * Any behaviour which results in changes to the vma->vm_flags needs to
25  * take mmap_sem for writing. Others, which simply traverse vmas, need
26  * to only take it for reading.
27  */
28 static int madvise_need_mmap_write(int behavior)
29 {
30         switch (behavior) {
31         case MADV_REMOVE:
32         case MADV_WILLNEED:
33         case MADV_DONTNEED:
34                 return 0;
35         default:
36                 /* be safe, default to 1. list exceptions explicitly */
37                 return 1;
38         }
39 }
40
41 /*
42  * We can potentially split a vm area into separate
43  * areas, each area with its own behavior.
44  */
45 static long madvise_behavior(struct vm_area_struct *vma,
46                      struct vm_area_struct **prev,
47                      unsigned long start, unsigned long end, int behavior)
48 {
49         struct mm_struct *mm = vma->vm_mm;
50         int error = 0;
51         pgoff_t pgoff;
52         unsigned long new_flags = vma->vm_flags;
53
54         switch (behavior) {
55         case MADV_NORMAL:
56                 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
57                 break;
58         case MADV_SEQUENTIAL:
59                 new_flags = (new_flags & ~VM_RAND_READ) | VM_SEQ_READ;
60                 break;
61         case MADV_RANDOM:
62                 new_flags = (new_flags & ~VM_SEQ_READ) | VM_RAND_READ;
63                 break;
64         case MADV_DONTFORK:
65                 new_flags |= VM_DONTCOPY;
66                 break;
67         case MADV_DOFORK:
68                 if (vma->vm_flags & VM_IO) {
69                         error = -EINVAL;
70                         goto out;
71                 }
72                 new_flags &= ~VM_DONTCOPY;
73                 break;
74         case MADV_DONTDUMP:
75                 new_flags |= VM_DONTDUMP;
76                 break;
77         case MADV_DODUMP:
78                 if (new_flags & VM_SPECIAL) {
79                         error = -EINVAL;
80                         goto out;
81                 }
82                 new_flags &= ~VM_DONTDUMP;
83                 break;
84         case MADV_MERGEABLE:
85         case MADV_UNMERGEABLE:
86                 error = ksm_madvise(vma, start, end, behavior, &new_flags);
87                 if (error)
88                         goto out;
89                 break;
90         case MADV_HUGEPAGE:
91         case MADV_NOHUGEPAGE:
92                 error = hugepage_madvise(vma, &new_flags, behavior);
93                 if (error)
94                         goto out;
95                 break;
96         }
97
98         if (new_flags == vma->vm_flags) {
99                 *prev = vma;
100                 goto out;
101         }
102
103         pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
104         *prev = vma_merge(mm, *prev, start, end, new_flags, vma->anon_vma,
105                                 vma->vm_file, pgoff, vma_policy(vma));
106         if (*prev) {
107                 vma = *prev;
108                 goto success;
109         }
110
111         *prev = vma;
112
113         if (start != vma->vm_start) {
114                 error = split_vma(mm, vma, start, 1);
115                 if (error)
116                         goto out;
117         }
118
119         if (end != vma->vm_end) {
120                 error = split_vma(mm, vma, end, 0);
121                 if (error)
122                         goto out;
123         }
124
125 success:
126         /*
127          * vm_flags is protected by the mmap_sem held in write mode.
128          */
129         vma->vm_flags = new_flags;
130
131 out:
132         if (error == -ENOMEM)
133                 error = -EAGAIN;
134         return error;
135 }
136
137 #ifdef CONFIG_SWAP
138 static int swapin_walk_pmd_entry(pmd_t *pmd, unsigned long start,
139         unsigned long end, struct mm_walk *walk)
140 {
141         pte_t *orig_pte;
142         struct vm_area_struct *vma = walk->private;
143         unsigned long index;
144
145         if (pmd_none_or_trans_huge_or_clear_bad(pmd))
146                 return 0;
147
148         for (index = start; index != end; index += PAGE_SIZE) {
149                 pte_t pte;
150                 swp_entry_t entry;
151                 struct page *page;
152                 spinlock_t *ptl;
153
154                 orig_pte = pte_offset_map_lock(vma->vm_mm, pmd, start, &ptl);
155                 pte = *(orig_pte + ((index - start) / PAGE_SIZE));
156                 pte_unmap_unlock(orig_pte, ptl);
157
158                 if (pte_present(pte) || pte_none(pte) || pte_file(pte))
159                         continue;
160                 entry = pte_to_swp_entry(pte);
161                 if (unlikely(non_swap_entry(entry)))
162                         continue;
163
164                 page = read_swap_cache_async(entry, GFP_HIGHUSER_MOVABLE,
165                                                                 vma, index);
166                 if (page)
167                         page_cache_release(page);
168         }
169
170         return 0;
171 }
172
173 static void force_swapin_readahead(struct vm_area_struct *vma,
174                 unsigned long start, unsigned long end)
175 {
176         struct mm_walk walk = {
177                 .mm = vma->vm_mm,
178                 .pmd_entry = swapin_walk_pmd_entry,
179                 .private = vma,
180         };
181
182         walk_page_range(start, end, &walk);
183
184         lru_add_drain();        /* Push any new pages onto the LRU now */
185 }
186
187 static void force_shm_swapin_readahead(struct vm_area_struct *vma,
188                 unsigned long start, unsigned long end,
189                 struct address_space *mapping)
190 {
191         pgoff_t index;
192         struct page *page;
193         swp_entry_t swap;
194
195         for (; start < end; start += PAGE_SIZE) {
196                 index = ((start - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
197
198                 page = find_get_page(mapping, index);
199                 if (!radix_tree_exceptional_entry(page)) {
200                         if (page)
201                                 page_cache_release(page);
202                         continue;
203                 }
204                 swap = radix_to_swp_entry(page);
205                 page = read_swap_cache_async(swap, GFP_HIGHUSER_MOVABLE,
206                                                                 NULL, 0);
207                 if (page)
208                         page_cache_release(page);
209         }
210
211         lru_add_drain();        /* Push any new pages onto the LRU now */
212 }
213 #endif          /* CONFIG_SWAP */
214
215 /*
216  * Schedule all required I/O operations.  Do not wait for completion.
217  */
218 static long madvise_willneed(struct vm_area_struct *vma,
219                              struct vm_area_struct **prev,
220                              unsigned long start, unsigned long end)
221 {
222         struct file *file = vma->vm_file;
223
224 #ifdef CONFIG_SWAP
225         if (!file || mapping_cap_swap_backed(file->f_mapping)) {
226                 *prev = vma;
227                 if (!file)
228                         force_swapin_readahead(vma, start, end);
229                 else
230                         force_shm_swapin_readahead(vma, start, end,
231                                                 file->f_mapping);
232                 return 0;
233         }
234 #endif
235
236         if (!file)
237                 return -EBADF;
238
239         if (file->f_mapping->a_ops->get_xip_mem) {
240                 /* no bad return value, but ignore advice */
241                 return 0;
242         }
243
244         *prev = vma;
245         start = ((start - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
246         if (end > vma->vm_end)
247                 end = vma->vm_end;
248         end = ((end - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
249
250         force_page_cache_readahead(file->f_mapping, file, start, end - start);
251         return 0;
252 }
253
254 /*
255  * Application no longer needs these pages.  If the pages are dirty,
256  * it's OK to just throw them away.  The app will be more careful about
257  * data it wants to keep.  Be sure to free swap resources too.  The
258  * zap_page_range call sets things up for shrink_active_list to actually free
259  * these pages later if no one else has touched them in the meantime,
260  * although we could add these pages to a global reuse list for
261  * shrink_active_list to pick up before reclaiming other pages.
262  *
263  * NB: This interface discards data rather than pushes it out to swap,
264  * as some implementations do.  This has performance implications for
265  * applications like large transactional databases which want to discard
266  * pages in anonymous maps after committing to backing store the data
267  * that was kept in them.  There is no reason to write this data out to
268  * the swap area if the application is discarding it.
269  *
270  * An interface that causes the system to free clean pages and flush
271  * dirty pages is already available as msync(MS_INVALIDATE).
272  */
273 static long madvise_dontneed(struct vm_area_struct *vma,
274                              struct vm_area_struct **prev,
275                              unsigned long start, unsigned long end)
276 {
277         *prev = vma;
278         if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
279                 return -EINVAL;
280
281         if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
282                 struct zap_details details = {
283                         .nonlinear_vma = vma,
284                         .last_index = ULONG_MAX,
285                 };
286                 zap_page_range(vma, start, end - start, &details);
287         } else
288                 zap_page_range(vma, start, end - start, NULL);
289         return 0;
290 }
291
292 /*
293  * Application wants to free up the pages and associated backing store.
294  * This is effectively punching a hole into the middle of a file.
295  *
296  * NOTE: Currently, only shmfs/tmpfs is supported for this operation.
297  * Other filesystems return -ENOSYS.
298  */
299 static long madvise_remove(struct vm_area_struct *vma,
300                                 struct vm_area_struct **prev,
301                                 unsigned long start, unsigned long end)
302 {
303         loff_t offset;
304         int error;
305         struct file *f;
306
307         *prev = NULL;   /* tell sys_madvise we drop mmap_sem */
308
309         if (vma->vm_flags & (VM_LOCKED|VM_NONLINEAR|VM_HUGETLB))
310                 return -EINVAL;
311
312         f = vma->vm_file;
313
314         if (!f || !f->f_mapping || !f->f_mapping->host) {
315                         return -EINVAL;
316         }
317
318         if ((vma->vm_flags & (VM_SHARED|VM_WRITE)) != (VM_SHARED|VM_WRITE))
319                 return -EACCES;
320
321         offset = (loff_t)(start - vma->vm_start)
322                         + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
323
324         /*
325          * Filesystem's fallocate may need to take i_mutex.  We need to
326          * explicitly grab a reference because the vma (and hence the
327          * vma's reference to the file) can go away as soon as we drop
328          * mmap_sem.
329          */
330         get_file(f);
331         up_read(&current->mm->mmap_sem);
332         error = do_fallocate(f,
333                                 FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
334                                 offset, end - start);
335         fput(f);
336         down_read(&current->mm->mmap_sem);
337         return error;
338 }
339
340 #ifdef CONFIG_MEMORY_FAILURE
341 /*
342  * Error injection support for memory error handling.
343  */
344 static int madvise_hwpoison(int bhv, unsigned long start, unsigned long end)
345 {
346         struct page *p;
347         if (!capable(CAP_SYS_ADMIN))
348                 return -EPERM;
349         for (; start < end; start += PAGE_SIZE <<
350                                 compound_order(compound_head(p))) {
351                 int ret;
352
353                 ret = get_user_pages_fast(start, 1, 0, &p);
354                 if (ret != 1)
355                         return ret;
356
357                 if (PageHWPoison(p)) {
358                         put_page(p);
359                         continue;
360                 }
361                 if (bhv == MADV_SOFT_OFFLINE) {
362                         pr_info("Soft offlining page %#lx at %#lx\n",
363                                 page_to_pfn(p), start);
364                         ret = soft_offline_page(p, MF_COUNT_INCREASED);
365                         if (ret)
366                                 return ret;
367                         continue;
368                 }
369                 pr_info("Injecting memory failure for page %#lx at %#lx\n",
370                        page_to_pfn(p), start);
371                 /* Ignore return value for now */
372                 memory_failure(page_to_pfn(p), 0, MF_COUNT_INCREASED);
373         }
374         return 0;
375 }
376 #endif
377
378 static long
379 madvise_vma(struct vm_area_struct *vma, struct vm_area_struct **prev,
380                 unsigned long start, unsigned long end, int behavior)
381 {
382         switch (behavior) {
383         case MADV_REMOVE:
384                 return madvise_remove(vma, prev, start, end);
385         case MADV_WILLNEED:
386                 return madvise_willneed(vma, prev, start, end);
387         case MADV_DONTNEED:
388                 return madvise_dontneed(vma, prev, start, end);
389         default:
390                 return madvise_behavior(vma, prev, start, end, behavior);
391         }
392 }
393
394 static int
395 madvise_behavior_valid(int behavior)
396 {
397         switch (behavior) {
398         case MADV_DOFORK:
399         case MADV_DONTFORK:
400         case MADV_NORMAL:
401         case MADV_SEQUENTIAL:
402         case MADV_RANDOM:
403         case MADV_REMOVE:
404         case MADV_WILLNEED:
405         case MADV_DONTNEED:
406 #ifdef CONFIG_KSM
407         case MADV_MERGEABLE:
408         case MADV_UNMERGEABLE:
409 #endif
410 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
411         case MADV_HUGEPAGE:
412         case MADV_NOHUGEPAGE:
413 #endif
414         case MADV_DONTDUMP:
415         case MADV_DODUMP:
416                 return 1;
417
418         default:
419                 return 0;
420         }
421 }
422
423 /*
424  * The madvise(2) system call.
425  *
426  * Applications can use madvise() to advise the kernel how it should
427  * handle paging I/O in this VM area.  The idea is to help the kernel
428  * use appropriate read-ahead and caching techniques.  The information
429  * provided is advisory only, and can be safely disregarded by the
430  * kernel without affecting the correct operation of the application.
431  *
432  * behavior values:
433  *  MADV_NORMAL - the default behavior is to read clusters.  This
434  *              results in some read-ahead and read-behind.
435  *  MADV_RANDOM - the system should read the minimum amount of data
436  *              on any access, since it is unlikely that the appli-
437  *              cation will need more than what it asks for.
438  *  MADV_SEQUENTIAL - pages in the given range will probably be accessed
439  *              once, so they can be aggressively read ahead, and
440  *              can be freed soon after they are accessed.
441  *  MADV_WILLNEED - the application is notifying the system to read
442  *              some pages ahead.
443  *  MADV_DONTNEED - the application is finished with the given range,
444  *              so the kernel can free resources associated with it.
445  *  MADV_REMOVE - the application wants to free up the given range of
446  *              pages and associated backing store.
447  *  MADV_DONTFORK - omit this area from child's address space when forking:
448  *              typically, to avoid COWing pages pinned by get_user_pages().
449  *  MADV_DOFORK - cancel MADV_DONTFORK: no longer omit this area when forking.
450  *  MADV_MERGEABLE - the application recommends that KSM try to merge pages in
451  *              this area with pages of identical content from other such areas.
452  *  MADV_UNMERGEABLE- cancel MADV_MERGEABLE: no longer merge pages with others.
453  *
454  * return values:
455  *  zero    - success
456  *  -EINVAL - start + len < 0, start is not page-aligned,
457  *              "behavior" is not a valid value, or application
458  *              is attempting to release locked or shared pages.
459  *  -ENOMEM - addresses in the specified range are not currently
460  *              mapped, or are outside the AS of the process.
461  *  -EIO    - an I/O error occurred while paging in data.
462  *  -EBADF  - map exists, but area maps something that isn't a file.
463  *  -EAGAIN - a kernel resource was temporarily unavailable.
464  */
465 SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
466 {
467         unsigned long end, tmp;
468         struct vm_area_struct *vma, *prev;
469         int unmapped_error = 0;
470         int error = -EINVAL;
471         int write;
472         size_t len;
473         struct blk_plug plug;
474
475 #ifdef CONFIG_MEMORY_FAILURE
476         if (behavior == MADV_HWPOISON || behavior == MADV_SOFT_OFFLINE)
477                 return madvise_hwpoison(behavior, start, start+len_in);
478 #endif
479         if (!madvise_behavior_valid(behavior))
480                 return error;
481
482         if (start & ~PAGE_MASK)
483                 return error;
484         len = (len_in + ~PAGE_MASK) & PAGE_MASK;
485
486         /* Check to see whether len was rounded up from small -ve to zero */
487         if (len_in && !len)
488                 return error;
489
490         end = start + len;
491         if (end < start)
492                 return error;
493
494         error = 0;
495         if (end == start)
496                 return error;
497
498         write = madvise_need_mmap_write(behavior);
499         if (write)
500                 down_write(&current->mm->mmap_sem);
501         else
502                 down_read(&current->mm->mmap_sem);
503
504         /*
505          * If the interval [start,end) covers some unmapped address
506          * ranges, just ignore them, but return -ENOMEM at the end.
507          * - different from the way of handling in mlock etc.
508          */
509         vma = find_vma_prev(current->mm, start, &prev);
510         if (vma && start > vma->vm_start)
511                 prev = vma;
512
513         blk_start_plug(&plug);
514         for (;;) {
515                 /* Still start < end. */
516                 error = -ENOMEM;
517                 if (!vma)
518                         goto out;
519
520                 /* Here start < (end|vma->vm_end). */
521                 if (start < vma->vm_start) {
522                         unmapped_error = -ENOMEM;
523                         start = vma->vm_start;
524                         if (start >= end)
525                                 goto out;
526                 }
527
528                 /* Here vma->vm_start <= start < (end|vma->vm_end) */
529                 tmp = vma->vm_end;
530                 if (end < tmp)
531                         tmp = end;
532
533                 /* Here vma->vm_start <= start < tmp <= (end|vma->vm_end). */
534                 error = madvise_vma(vma, &prev, start, tmp, behavior);
535                 if (error)
536                         goto out;
537                 start = tmp;
538                 if (prev && start < prev->vm_end)
539                         start = prev->vm_end;
540                 error = unmapped_error;
541                 if (start >= end)
542                         goto out;
543                 if (prev)
544                         vma = prev->vm_next;
545                 else    /* madvise_remove dropped mmap_sem */
546                         vma = find_vma(current->mm, start);
547         }
548 out:
549         blk_finish_plug(&plug);
550         if (write)
551                 up_write(&current->mm->mmap_sem);
552         else
553                 up_read(&current->mm->mmap_sem);
554
555         return error;
556 }