mm: introduce MADV_COLD
[platform/kernel/linux-starfive.git] / mm / madvise.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *      linux/mm/madvise.c
4  *
5  * Copyright (C) 1999  Linus Torvalds
6  * Copyright (C) 2002  Christoph Hellwig
7  */
8
9 #include <linux/mman.h>
10 #include <linux/pagemap.h>
11 #include <linux/syscalls.h>
12 #include <linux/mempolicy.h>
13 #include <linux/page-isolation.h>
14 #include <linux/page_idle.h>
15 #include <linux/userfaultfd_k.h>
16 #include <linux/hugetlb.h>
17 #include <linux/falloc.h>
18 #include <linux/fadvise.h>
19 #include <linux/sched.h>
20 #include <linux/ksm.h>
21 #include <linux/fs.h>
22 #include <linux/file.h>
23 #include <linux/blkdev.h>
24 #include <linux/backing-dev.h>
25 #include <linux/pagewalk.h>
26 #include <linux/swap.h>
27 #include <linux/swapops.h>
28 #include <linux/shmem_fs.h>
29 #include <linux/mmu_notifier.h>
30
31 #include <asm/tlb.h>
32
33 #include "internal.h"
34
35 /*
36  * Any behaviour which results in changes to the vma->vm_flags needs to
37  * take mmap_sem for writing. Others, which simply traverse vmas, need
38  * to only take it for reading.
39  */
40 static int madvise_need_mmap_write(int behavior)
41 {
42         switch (behavior) {
43         case MADV_REMOVE:
44         case MADV_WILLNEED:
45         case MADV_DONTNEED:
46         case MADV_COLD:
47         case MADV_FREE:
48                 return 0;
49         default:
50                 /* be safe, default to 1. list exceptions explicitly */
51                 return 1;
52         }
53 }
54
55 /*
56  * We can potentially split a vm area into separate
57  * areas, each area with its own behavior.
58  */
59 static long madvise_behavior(struct vm_area_struct *vma,
60                      struct vm_area_struct **prev,
61                      unsigned long start, unsigned long end, int behavior)
62 {
63         struct mm_struct *mm = vma->vm_mm;
64         int error = 0;
65         pgoff_t pgoff;
66         unsigned long new_flags = vma->vm_flags;
67
68         switch (behavior) {
69         case MADV_NORMAL:
70                 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
71                 break;
72         case MADV_SEQUENTIAL:
73                 new_flags = (new_flags & ~VM_RAND_READ) | VM_SEQ_READ;
74                 break;
75         case MADV_RANDOM:
76                 new_flags = (new_flags & ~VM_SEQ_READ) | VM_RAND_READ;
77                 break;
78         case MADV_DONTFORK:
79                 new_flags |= VM_DONTCOPY;
80                 break;
81         case MADV_DOFORK:
82                 if (vma->vm_flags & VM_IO) {
83                         error = -EINVAL;
84                         goto out;
85                 }
86                 new_flags &= ~VM_DONTCOPY;
87                 break;
88         case MADV_WIPEONFORK:
89                 /* MADV_WIPEONFORK is only supported on anonymous memory. */
90                 if (vma->vm_file || vma->vm_flags & VM_SHARED) {
91                         error = -EINVAL;
92                         goto out;
93                 }
94                 new_flags |= VM_WIPEONFORK;
95                 break;
96         case MADV_KEEPONFORK:
97                 new_flags &= ~VM_WIPEONFORK;
98                 break;
99         case MADV_DONTDUMP:
100                 new_flags |= VM_DONTDUMP;
101                 break;
102         case MADV_DODUMP:
103                 if (!is_vm_hugetlb_page(vma) && new_flags & VM_SPECIAL) {
104                         error = -EINVAL;
105                         goto out;
106                 }
107                 new_flags &= ~VM_DONTDUMP;
108                 break;
109         case MADV_MERGEABLE:
110         case MADV_UNMERGEABLE:
111                 error = ksm_madvise(vma, start, end, behavior, &new_flags);
112                 if (error)
113                         goto out_convert_errno;
114                 break;
115         case MADV_HUGEPAGE:
116         case MADV_NOHUGEPAGE:
117                 error = hugepage_madvise(vma, &new_flags, behavior);
118                 if (error)
119                         goto out_convert_errno;
120                 break;
121         }
122
123         if (new_flags == vma->vm_flags) {
124                 *prev = vma;
125                 goto out;
126         }
127
128         pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
129         *prev = vma_merge(mm, *prev, start, end, new_flags, vma->anon_vma,
130                           vma->vm_file, pgoff, vma_policy(vma),
131                           vma->vm_userfaultfd_ctx);
132         if (*prev) {
133                 vma = *prev;
134                 goto success;
135         }
136
137         *prev = vma;
138
139         if (start != vma->vm_start) {
140                 if (unlikely(mm->map_count >= sysctl_max_map_count)) {
141                         error = -ENOMEM;
142                         goto out;
143                 }
144                 error = __split_vma(mm, vma, start, 1);
145                 if (error)
146                         goto out_convert_errno;
147         }
148
149         if (end != vma->vm_end) {
150                 if (unlikely(mm->map_count >= sysctl_max_map_count)) {
151                         error = -ENOMEM;
152                         goto out;
153                 }
154                 error = __split_vma(mm, vma, end, 0);
155                 if (error)
156                         goto out_convert_errno;
157         }
158
159 success:
160         /*
161          * vm_flags is protected by the mmap_sem held in write mode.
162          */
163         vma->vm_flags = new_flags;
164
165 out_convert_errno:
166         /*
167          * madvise() returns EAGAIN if kernel resources, such as
168          * slab, are temporarily unavailable.
169          */
170         if (error == -ENOMEM)
171                 error = -EAGAIN;
172 out:
173         return error;
174 }
175
176 #ifdef CONFIG_SWAP
177 static int swapin_walk_pmd_entry(pmd_t *pmd, unsigned long start,
178         unsigned long end, struct mm_walk *walk)
179 {
180         pte_t *orig_pte;
181         struct vm_area_struct *vma = walk->private;
182         unsigned long index;
183
184         if (pmd_none_or_trans_huge_or_clear_bad(pmd))
185                 return 0;
186
187         for (index = start; index != end; index += PAGE_SIZE) {
188                 pte_t pte;
189                 swp_entry_t entry;
190                 struct page *page;
191                 spinlock_t *ptl;
192
193                 orig_pte = pte_offset_map_lock(vma->vm_mm, pmd, start, &ptl);
194                 pte = *(orig_pte + ((index - start) / PAGE_SIZE));
195                 pte_unmap_unlock(orig_pte, ptl);
196
197                 if (pte_present(pte) || pte_none(pte))
198                         continue;
199                 entry = pte_to_swp_entry(pte);
200                 if (unlikely(non_swap_entry(entry)))
201                         continue;
202
203                 page = read_swap_cache_async(entry, GFP_HIGHUSER_MOVABLE,
204                                                         vma, index, false);
205                 if (page)
206                         put_page(page);
207         }
208
209         return 0;
210 }
211
212 static const struct mm_walk_ops swapin_walk_ops = {
213         .pmd_entry              = swapin_walk_pmd_entry,
214 };
215
216 static void force_shm_swapin_readahead(struct vm_area_struct *vma,
217                 unsigned long start, unsigned long end,
218                 struct address_space *mapping)
219 {
220         pgoff_t index;
221         struct page *page;
222         swp_entry_t swap;
223
224         for (; start < end; start += PAGE_SIZE) {
225                 index = ((start - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
226
227                 page = find_get_entry(mapping, index);
228                 if (!xa_is_value(page)) {
229                         if (page)
230                                 put_page(page);
231                         continue;
232                 }
233                 swap = radix_to_swp_entry(page);
234                 page = read_swap_cache_async(swap, GFP_HIGHUSER_MOVABLE,
235                                                         NULL, 0, false);
236                 if (page)
237                         put_page(page);
238         }
239
240         lru_add_drain();        /* Push any new pages onto the LRU now */
241 }
242 #endif          /* CONFIG_SWAP */
243
244 /*
245  * Schedule all required I/O operations.  Do not wait for completion.
246  */
247 static long madvise_willneed(struct vm_area_struct *vma,
248                              struct vm_area_struct **prev,
249                              unsigned long start, unsigned long end)
250 {
251         struct file *file = vma->vm_file;
252         loff_t offset;
253
254         *prev = vma;
255 #ifdef CONFIG_SWAP
256         if (!file) {
257                 walk_page_range(vma->vm_mm, start, end, &swapin_walk_ops, vma);
258                 lru_add_drain(); /* Push any new pages onto the LRU now */
259                 return 0;
260         }
261
262         if (shmem_mapping(file->f_mapping)) {
263                 force_shm_swapin_readahead(vma, start, end,
264                                         file->f_mapping);
265                 return 0;
266         }
267 #else
268         if (!file)
269                 return -EBADF;
270 #endif
271
272         if (IS_DAX(file_inode(file))) {
273                 /* no bad return value, but ignore advice */
274                 return 0;
275         }
276
277         /*
278          * Filesystem's fadvise may need to take various locks.  We need to
279          * explicitly grab a reference because the vma (and hence the
280          * vma's reference to the file) can go away as soon as we drop
281          * mmap_sem.
282          */
283         *prev = NULL;   /* tell sys_madvise we drop mmap_sem */
284         get_file(file);
285         up_read(&current->mm->mmap_sem);
286         offset = (loff_t)(start - vma->vm_start)
287                         + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
288         vfs_fadvise(file, offset, end - start, POSIX_FADV_WILLNEED);
289         fput(file);
290         down_read(&current->mm->mmap_sem);
291         return 0;
292 }
293
294 static int madvise_cold_pte_range(pmd_t *pmd, unsigned long addr,
295                                 unsigned long end, struct mm_walk *walk)
296 {
297         struct mmu_gather *tlb = walk->private;
298         struct mm_struct *mm = tlb->mm;
299         struct vm_area_struct *vma = walk->vma;
300         pte_t *orig_pte, *pte, ptent;
301         spinlock_t *ptl;
302         struct page *page;
303
304 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
305         if (pmd_trans_huge(*pmd)) {
306                 pmd_t orig_pmd;
307                 unsigned long next = pmd_addr_end(addr, end);
308
309                 tlb_change_page_size(tlb, HPAGE_PMD_SIZE);
310                 ptl = pmd_trans_huge_lock(pmd, vma);
311                 if (!ptl)
312                         return 0;
313
314                 orig_pmd = *pmd;
315                 if (is_huge_zero_pmd(orig_pmd))
316                         goto huge_unlock;
317
318                 if (unlikely(!pmd_present(orig_pmd))) {
319                         VM_BUG_ON(thp_migration_supported() &&
320                                         !is_pmd_migration_entry(orig_pmd));
321                         goto huge_unlock;
322                 }
323
324                 page = pmd_page(orig_pmd);
325                 if (next - addr != HPAGE_PMD_SIZE) {
326                         int err;
327
328                         if (page_mapcount(page) != 1)
329                                 goto huge_unlock;
330
331                         get_page(page);
332                         spin_unlock(ptl);
333                         lock_page(page);
334                         err = split_huge_page(page);
335                         unlock_page(page);
336                         put_page(page);
337                         if (!err)
338                                 goto regular_page;
339                         return 0;
340                 }
341
342                 if (pmd_young(orig_pmd)) {
343                         pmdp_invalidate(vma, addr, pmd);
344                         orig_pmd = pmd_mkold(orig_pmd);
345
346                         set_pmd_at(mm, addr, pmd, orig_pmd);
347                         tlb_remove_pmd_tlb_entry(tlb, pmd, addr);
348                 }
349
350                 test_and_clear_page_young(page);
351                 deactivate_page(page);
352 huge_unlock:
353                 spin_unlock(ptl);
354                 return 0;
355         }
356
357         if (pmd_trans_unstable(pmd))
358                 return 0;
359 regular_page:
360 #endif
361         tlb_change_page_size(tlb, PAGE_SIZE);
362         orig_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
363         flush_tlb_batched_pending(mm);
364         arch_enter_lazy_mmu_mode();
365         for (; addr < end; pte++, addr += PAGE_SIZE) {
366                 ptent = *pte;
367
368                 if (pte_none(ptent))
369                         continue;
370
371                 if (!pte_present(ptent))
372                         continue;
373
374                 page = vm_normal_page(vma, addr, ptent);
375                 if (!page)
376                         continue;
377
378                 /*
379                  * Creating a THP page is expensive so split it only if we
380                  * are sure it's worth. Split it if we are only owner.
381                  */
382                 if (PageTransCompound(page)) {
383                         if (page_mapcount(page) != 1)
384                                 break;
385                         get_page(page);
386                         if (!trylock_page(page)) {
387                                 put_page(page);
388                                 break;
389                         }
390                         pte_unmap_unlock(orig_pte, ptl);
391                         if (split_huge_page(page)) {
392                                 unlock_page(page);
393                                 put_page(page);
394                                 pte_offset_map_lock(mm, pmd, addr, &ptl);
395                                 break;
396                         }
397                         unlock_page(page);
398                         put_page(page);
399                         pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
400                         pte--;
401                         addr -= PAGE_SIZE;
402                         continue;
403                 }
404
405                 VM_BUG_ON_PAGE(PageTransCompound(page), page);
406
407                 if (pte_young(ptent)) {
408                         ptent = ptep_get_and_clear_full(mm, addr, pte,
409                                                         tlb->fullmm);
410                         ptent = pte_mkold(ptent);
411                         set_pte_at(mm, addr, pte, ptent);
412                         tlb_remove_tlb_entry(tlb, pte, addr);
413                 }
414
415                 /*
416                  * We are deactivating a page for accelerating reclaiming.
417                  * VM couldn't reclaim the page unless we clear PG_young.
418                  * As a side effect, it makes confuse idle-page tracking
419                  * because they will miss recent referenced history.
420                  */
421                 test_and_clear_page_young(page);
422                 deactivate_page(page);
423         }
424
425         arch_leave_lazy_mmu_mode();
426         pte_unmap_unlock(orig_pte, ptl);
427         cond_resched();
428
429         return 0;
430 }
431
432 static const struct mm_walk_ops cold_walk_ops = {
433         .pmd_entry = madvise_cold_pte_range,
434 };
435
436 static void madvise_cold_page_range(struct mmu_gather *tlb,
437                              struct vm_area_struct *vma,
438                              unsigned long addr, unsigned long end)
439 {
440         tlb_start_vma(tlb, vma);
441         walk_page_range(vma->vm_mm, addr, end, &cold_walk_ops, NULL);
442         tlb_end_vma(tlb, vma);
443 }
444
445 static long madvise_cold(struct vm_area_struct *vma,
446                         struct vm_area_struct **prev,
447                         unsigned long start_addr, unsigned long end_addr)
448 {
449         struct mm_struct *mm = vma->vm_mm;
450         struct mmu_gather tlb;
451
452         *prev = vma;
453         if (!can_madv_lru_vma(vma))
454                 return -EINVAL;
455
456         lru_add_drain();
457         tlb_gather_mmu(&tlb, mm, start_addr, end_addr);
458         madvise_cold_page_range(&tlb, vma, start_addr, end_addr);
459         tlb_finish_mmu(&tlb, start_addr, end_addr);
460
461         return 0;
462 }
463
464 static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr,
465                                 unsigned long end, struct mm_walk *walk)
466
467 {
468         struct mmu_gather *tlb = walk->private;
469         struct mm_struct *mm = tlb->mm;
470         struct vm_area_struct *vma = walk->vma;
471         spinlock_t *ptl;
472         pte_t *orig_pte, *pte, ptent;
473         struct page *page;
474         int nr_swap = 0;
475         unsigned long next;
476
477         next = pmd_addr_end(addr, end);
478         if (pmd_trans_huge(*pmd))
479                 if (madvise_free_huge_pmd(tlb, vma, pmd, addr, next))
480                         goto next;
481
482         if (pmd_trans_unstable(pmd))
483                 return 0;
484
485         tlb_change_page_size(tlb, PAGE_SIZE);
486         orig_pte = pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
487         flush_tlb_batched_pending(mm);
488         arch_enter_lazy_mmu_mode();
489         for (; addr != end; pte++, addr += PAGE_SIZE) {
490                 ptent = *pte;
491
492                 if (pte_none(ptent))
493                         continue;
494                 /*
495                  * If the pte has swp_entry, just clear page table to
496                  * prevent swap-in which is more expensive rather than
497                  * (page allocation + zeroing).
498                  */
499                 if (!pte_present(ptent)) {
500                         swp_entry_t entry;
501
502                         entry = pte_to_swp_entry(ptent);
503                         if (non_swap_entry(entry))
504                                 continue;
505                         nr_swap--;
506                         free_swap_and_cache(entry);
507                         pte_clear_not_present_full(mm, addr, pte, tlb->fullmm);
508                         continue;
509                 }
510
511                 page = vm_normal_page(vma, addr, ptent);
512                 if (!page)
513                         continue;
514
515                 /*
516                  * If pmd isn't transhuge but the page is THP and
517                  * is owned by only this process, split it and
518                  * deactivate all pages.
519                  */
520                 if (PageTransCompound(page)) {
521                         if (page_mapcount(page) != 1)
522                                 goto out;
523                         get_page(page);
524                         if (!trylock_page(page)) {
525                                 put_page(page);
526                                 goto out;
527                         }
528                         pte_unmap_unlock(orig_pte, ptl);
529                         if (split_huge_page(page)) {
530                                 unlock_page(page);
531                                 put_page(page);
532                                 pte_offset_map_lock(mm, pmd, addr, &ptl);
533                                 goto out;
534                         }
535                         unlock_page(page);
536                         put_page(page);
537                         pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
538                         pte--;
539                         addr -= PAGE_SIZE;
540                         continue;
541                 }
542
543                 VM_BUG_ON_PAGE(PageTransCompound(page), page);
544
545                 if (PageSwapCache(page) || PageDirty(page)) {
546                         if (!trylock_page(page))
547                                 continue;
548                         /*
549                          * If page is shared with others, we couldn't clear
550                          * PG_dirty of the page.
551                          */
552                         if (page_mapcount(page) != 1) {
553                                 unlock_page(page);
554                                 continue;
555                         }
556
557                         if (PageSwapCache(page) && !try_to_free_swap(page)) {
558                                 unlock_page(page);
559                                 continue;
560                         }
561
562                         ClearPageDirty(page);
563                         unlock_page(page);
564                 }
565
566                 if (pte_young(ptent) || pte_dirty(ptent)) {
567                         /*
568                          * Some of architecture(ex, PPC) don't update TLB
569                          * with set_pte_at and tlb_remove_tlb_entry so for
570                          * the portability, remap the pte with old|clean
571                          * after pte clearing.
572                          */
573                         ptent = ptep_get_and_clear_full(mm, addr, pte,
574                                                         tlb->fullmm);
575
576                         ptent = pte_mkold(ptent);
577                         ptent = pte_mkclean(ptent);
578                         set_pte_at(mm, addr, pte, ptent);
579                         tlb_remove_tlb_entry(tlb, pte, addr);
580                 }
581                 mark_page_lazyfree(page);
582         }
583 out:
584         if (nr_swap) {
585                 if (current->mm == mm)
586                         sync_mm_rss(mm);
587
588                 add_mm_counter(mm, MM_SWAPENTS, nr_swap);
589         }
590         arch_leave_lazy_mmu_mode();
591         pte_unmap_unlock(orig_pte, ptl);
592         cond_resched();
593 next:
594         return 0;
595 }
596
597 static const struct mm_walk_ops madvise_free_walk_ops = {
598         .pmd_entry              = madvise_free_pte_range,
599 };
600
601 static int madvise_free_single_vma(struct vm_area_struct *vma,
602                         unsigned long start_addr, unsigned long end_addr)
603 {
604         struct mm_struct *mm = vma->vm_mm;
605         struct mmu_notifier_range range;
606         struct mmu_gather tlb;
607
608         /* MADV_FREE works for only anon vma at the moment */
609         if (!vma_is_anonymous(vma))
610                 return -EINVAL;
611
612         range.start = max(vma->vm_start, start_addr);
613         if (range.start >= vma->vm_end)
614                 return -EINVAL;
615         range.end = min(vma->vm_end, end_addr);
616         if (range.end <= vma->vm_start)
617                 return -EINVAL;
618         mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm,
619                                 range.start, range.end);
620
621         lru_add_drain();
622         tlb_gather_mmu(&tlb, mm, range.start, range.end);
623         update_hiwater_rss(mm);
624
625         mmu_notifier_invalidate_range_start(&range);
626         tlb_start_vma(&tlb, vma);
627         walk_page_range(vma->vm_mm, range.start, range.end,
628                         &madvise_free_walk_ops, &tlb);
629         tlb_end_vma(&tlb, vma);
630         mmu_notifier_invalidate_range_end(&range);
631         tlb_finish_mmu(&tlb, range.start, range.end);
632
633         return 0;
634 }
635
636 /*
637  * Application no longer needs these pages.  If the pages are dirty,
638  * it's OK to just throw them away.  The app will be more careful about
639  * data it wants to keep.  Be sure to free swap resources too.  The
640  * zap_page_range call sets things up for shrink_active_list to actually free
641  * these pages later if no one else has touched them in the meantime,
642  * although we could add these pages to a global reuse list for
643  * shrink_active_list to pick up before reclaiming other pages.
644  *
645  * NB: This interface discards data rather than pushes it out to swap,
646  * as some implementations do.  This has performance implications for
647  * applications like large transactional databases which want to discard
648  * pages in anonymous maps after committing to backing store the data
649  * that was kept in them.  There is no reason to write this data out to
650  * the swap area if the application is discarding it.
651  *
652  * An interface that causes the system to free clean pages and flush
653  * dirty pages is already available as msync(MS_INVALIDATE).
654  */
655 static long madvise_dontneed_single_vma(struct vm_area_struct *vma,
656                                         unsigned long start, unsigned long end)
657 {
658         zap_page_range(vma, start, end - start);
659         return 0;
660 }
661
662 static long madvise_dontneed_free(struct vm_area_struct *vma,
663                                   struct vm_area_struct **prev,
664                                   unsigned long start, unsigned long end,
665                                   int behavior)
666 {
667         *prev = vma;
668         if (!can_madv_lru_vma(vma))
669                 return -EINVAL;
670
671         if (!userfaultfd_remove(vma, start, end)) {
672                 *prev = NULL; /* mmap_sem has been dropped, prev is stale */
673
674                 down_read(&current->mm->mmap_sem);
675                 vma = find_vma(current->mm, start);
676                 if (!vma)
677                         return -ENOMEM;
678                 if (start < vma->vm_start) {
679                         /*
680                          * This "vma" under revalidation is the one
681                          * with the lowest vma->vm_start where start
682                          * is also < vma->vm_end. If start <
683                          * vma->vm_start it means an hole materialized
684                          * in the user address space within the
685                          * virtual range passed to MADV_DONTNEED
686                          * or MADV_FREE.
687                          */
688                         return -ENOMEM;
689                 }
690                 if (!can_madv_lru_vma(vma))
691                         return -EINVAL;
692                 if (end > vma->vm_end) {
693                         /*
694                          * Don't fail if end > vma->vm_end. If the old
695                          * vma was splitted while the mmap_sem was
696                          * released the effect of the concurrent
697                          * operation may not cause madvise() to
698                          * have an undefined result. There may be an
699                          * adjacent next vma that we'll walk
700                          * next. userfaultfd_remove() will generate an
701                          * UFFD_EVENT_REMOVE repetition on the
702                          * end-vma->vm_end range, but the manager can
703                          * handle a repetition fine.
704                          */
705                         end = vma->vm_end;
706                 }
707                 VM_WARN_ON(start >= end);
708         }
709
710         if (behavior == MADV_DONTNEED)
711                 return madvise_dontneed_single_vma(vma, start, end);
712         else if (behavior == MADV_FREE)
713                 return madvise_free_single_vma(vma, start, end);
714         else
715                 return -EINVAL;
716 }
717
718 /*
719  * Application wants to free up the pages and associated backing store.
720  * This is effectively punching a hole into the middle of a file.
721  */
722 static long madvise_remove(struct vm_area_struct *vma,
723                                 struct vm_area_struct **prev,
724                                 unsigned long start, unsigned long end)
725 {
726         loff_t offset;
727         int error;
728         struct file *f;
729
730         *prev = NULL;   /* tell sys_madvise we drop mmap_sem */
731
732         if (vma->vm_flags & VM_LOCKED)
733                 return -EINVAL;
734
735         f = vma->vm_file;
736
737         if (!f || !f->f_mapping || !f->f_mapping->host) {
738                         return -EINVAL;
739         }
740
741         if ((vma->vm_flags & (VM_SHARED|VM_WRITE)) != (VM_SHARED|VM_WRITE))
742                 return -EACCES;
743
744         offset = (loff_t)(start - vma->vm_start)
745                         + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
746
747         /*
748          * Filesystem's fallocate may need to take i_mutex.  We need to
749          * explicitly grab a reference because the vma (and hence the
750          * vma's reference to the file) can go away as soon as we drop
751          * mmap_sem.
752          */
753         get_file(f);
754         if (userfaultfd_remove(vma, start, end)) {
755                 /* mmap_sem was not released by userfaultfd_remove() */
756                 up_read(&current->mm->mmap_sem);
757         }
758         error = vfs_fallocate(f,
759                                 FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
760                                 offset, end - start);
761         fput(f);
762         down_read(&current->mm->mmap_sem);
763         return error;
764 }
765
766 #ifdef CONFIG_MEMORY_FAILURE
767 /*
768  * Error injection support for memory error handling.
769  */
770 static int madvise_inject_error(int behavior,
771                 unsigned long start, unsigned long end)
772 {
773         struct page *page;
774         struct zone *zone;
775         unsigned int order;
776
777         if (!capable(CAP_SYS_ADMIN))
778                 return -EPERM;
779
780
781         for (; start < end; start += PAGE_SIZE << order) {
782                 unsigned long pfn;
783                 int ret;
784
785                 ret = get_user_pages_fast(start, 1, 0, &page);
786                 if (ret != 1)
787                         return ret;
788                 pfn = page_to_pfn(page);
789
790                 /*
791                  * When soft offlining hugepages, after migrating the page
792                  * we dissolve it, therefore in the second loop "page" will
793                  * no longer be a compound page, and order will be 0.
794                  */
795                 order = compound_order(compound_head(page));
796
797                 if (PageHWPoison(page)) {
798                         put_page(page);
799                         continue;
800                 }
801
802                 if (behavior == MADV_SOFT_OFFLINE) {
803                         pr_info("Soft offlining pfn %#lx at process virtual address %#lx\n",
804                                         pfn, start);
805
806                         ret = soft_offline_page(page, MF_COUNT_INCREASED);
807                         if (ret)
808                                 return ret;
809                         continue;
810                 }
811
812                 pr_info("Injecting memory failure for pfn %#lx at process virtual address %#lx\n",
813                                 pfn, start);
814
815                 /*
816                  * Drop the page reference taken by get_user_pages_fast(). In
817                  * the absence of MF_COUNT_INCREASED the memory_failure()
818                  * routine is responsible for pinning the page to prevent it
819                  * from being released back to the page allocator.
820                  */
821                 put_page(page);
822                 ret = memory_failure(pfn, 0);
823                 if (ret)
824                         return ret;
825         }
826
827         /* Ensure that all poisoned pages are removed from per-cpu lists */
828         for_each_populated_zone(zone)
829                 drain_all_pages(zone);
830
831         return 0;
832 }
833 #endif
834
835 static long
836 madvise_vma(struct vm_area_struct *vma, struct vm_area_struct **prev,
837                 unsigned long start, unsigned long end, int behavior)
838 {
839         switch (behavior) {
840         case MADV_REMOVE:
841                 return madvise_remove(vma, prev, start, end);
842         case MADV_WILLNEED:
843                 return madvise_willneed(vma, prev, start, end);
844         case MADV_COLD:
845                 return madvise_cold(vma, prev, start, end);
846         case MADV_FREE:
847         case MADV_DONTNEED:
848                 return madvise_dontneed_free(vma, prev, start, end, behavior);
849         default:
850                 return madvise_behavior(vma, prev, start, end, behavior);
851         }
852 }
853
854 static bool
855 madvise_behavior_valid(int behavior)
856 {
857         switch (behavior) {
858         case MADV_DOFORK:
859         case MADV_DONTFORK:
860         case MADV_NORMAL:
861         case MADV_SEQUENTIAL:
862         case MADV_RANDOM:
863         case MADV_REMOVE:
864         case MADV_WILLNEED:
865         case MADV_DONTNEED:
866         case MADV_FREE:
867         case MADV_COLD:
868 #ifdef CONFIG_KSM
869         case MADV_MERGEABLE:
870         case MADV_UNMERGEABLE:
871 #endif
872 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
873         case MADV_HUGEPAGE:
874         case MADV_NOHUGEPAGE:
875 #endif
876         case MADV_DONTDUMP:
877         case MADV_DODUMP:
878         case MADV_WIPEONFORK:
879         case MADV_KEEPONFORK:
880 #ifdef CONFIG_MEMORY_FAILURE
881         case MADV_SOFT_OFFLINE:
882         case MADV_HWPOISON:
883 #endif
884                 return true;
885
886         default:
887                 return false;
888         }
889 }
890
891 /*
892  * The madvise(2) system call.
893  *
894  * Applications can use madvise() to advise the kernel how it should
895  * handle paging I/O in this VM area.  The idea is to help the kernel
896  * use appropriate read-ahead and caching techniques.  The information
897  * provided is advisory only, and can be safely disregarded by the
898  * kernel without affecting the correct operation of the application.
899  *
900  * behavior values:
901  *  MADV_NORMAL - the default behavior is to read clusters.  This
902  *              results in some read-ahead and read-behind.
903  *  MADV_RANDOM - the system should read the minimum amount of data
904  *              on any access, since it is unlikely that the appli-
905  *              cation will need more than what it asks for.
906  *  MADV_SEQUENTIAL - pages in the given range will probably be accessed
907  *              once, so they can be aggressively read ahead, and
908  *              can be freed soon after they are accessed.
909  *  MADV_WILLNEED - the application is notifying the system to read
910  *              some pages ahead.
911  *  MADV_DONTNEED - the application is finished with the given range,
912  *              so the kernel can free resources associated with it.
913  *  MADV_FREE - the application marks pages in the given range as lazy free,
914  *              where actual purges are postponed until memory pressure happens.
915  *  MADV_REMOVE - the application wants to free up the given range of
916  *              pages and associated backing store.
917  *  MADV_DONTFORK - omit this area from child's address space when forking:
918  *              typically, to avoid COWing pages pinned by get_user_pages().
919  *  MADV_DOFORK - cancel MADV_DONTFORK: no longer omit this area when forking.
920  *  MADV_WIPEONFORK - present the child process with zero-filled memory in this
921  *              range after a fork.
922  *  MADV_KEEPONFORK - undo the effect of MADV_WIPEONFORK
923  *  MADV_HWPOISON - trigger memory error handler as if the given memory range
924  *              were corrupted by unrecoverable hardware memory failure.
925  *  MADV_SOFT_OFFLINE - try to soft-offline the given range of memory.
926  *  MADV_MERGEABLE - the application recommends that KSM try to merge pages in
927  *              this area with pages of identical content from other such areas.
928  *  MADV_UNMERGEABLE- cancel MADV_MERGEABLE: no longer merge pages with others.
929  *  MADV_HUGEPAGE - the application wants to back the given range by transparent
930  *              huge pages in the future. Existing pages might be coalesced and
931  *              new pages might be allocated as THP.
932  *  MADV_NOHUGEPAGE - mark the given range as not worth being backed by
933  *              transparent huge pages so the existing pages will not be
934  *              coalesced into THP and new pages will not be allocated as THP.
935  *  MADV_DONTDUMP - the application wants to prevent pages in the given range
936  *              from being included in its core dump.
937  *  MADV_DODUMP - cancel MADV_DONTDUMP: no longer exclude from core dump.
938  *
939  * return values:
940  *  zero    - success
941  *  -EINVAL - start + len < 0, start is not page-aligned,
942  *              "behavior" is not a valid value, or application
943  *              is attempting to release locked or shared pages,
944  *              or the specified address range includes file, Huge TLB,
945  *              MAP_SHARED or VMPFNMAP range.
946  *  -ENOMEM - addresses in the specified range are not currently
947  *              mapped, or are outside the AS of the process.
948  *  -EIO    - an I/O error occurred while paging in data.
949  *  -EBADF  - map exists, but area maps something that isn't a file.
950  *  -EAGAIN - a kernel resource was temporarily unavailable.
951  */
952 SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
953 {
954         unsigned long end, tmp;
955         struct vm_area_struct *vma, *prev;
956         int unmapped_error = 0;
957         int error = -EINVAL;
958         int write;
959         size_t len;
960         struct blk_plug plug;
961
962         start = untagged_addr(start);
963
964         if (!madvise_behavior_valid(behavior))
965                 return error;
966
967         if (start & ~PAGE_MASK)
968                 return error;
969         len = (len_in + ~PAGE_MASK) & PAGE_MASK;
970
971         /* Check to see whether len was rounded up from small -ve to zero */
972         if (len_in && !len)
973                 return error;
974
975         end = start + len;
976         if (end < start)
977                 return error;
978
979         error = 0;
980         if (end == start)
981                 return error;
982
983 #ifdef CONFIG_MEMORY_FAILURE
984         if (behavior == MADV_HWPOISON || behavior == MADV_SOFT_OFFLINE)
985                 return madvise_inject_error(behavior, start, start + len_in);
986 #endif
987
988         write = madvise_need_mmap_write(behavior);
989         if (write) {
990                 if (down_write_killable(&current->mm->mmap_sem))
991                         return -EINTR;
992         } else {
993                 down_read(&current->mm->mmap_sem);
994         }
995
996         /*
997          * If the interval [start,end) covers some unmapped address
998          * ranges, just ignore them, but return -ENOMEM at the end.
999          * - different from the way of handling in mlock etc.
1000          */
1001         vma = find_vma_prev(current->mm, start, &prev);
1002         if (vma && start > vma->vm_start)
1003                 prev = vma;
1004
1005         blk_start_plug(&plug);
1006         for (;;) {
1007                 /* Still start < end. */
1008                 error = -ENOMEM;
1009                 if (!vma)
1010                         goto out;
1011
1012                 /* Here start < (end|vma->vm_end). */
1013                 if (start < vma->vm_start) {
1014                         unmapped_error = -ENOMEM;
1015                         start = vma->vm_start;
1016                         if (start >= end)
1017                                 goto out;
1018                 }
1019
1020                 /* Here vma->vm_start <= start < (end|vma->vm_end) */
1021                 tmp = vma->vm_end;
1022                 if (end < tmp)
1023                         tmp = end;
1024
1025                 /* Here vma->vm_start <= start < tmp <= (end|vma->vm_end). */
1026                 error = madvise_vma(vma, &prev, start, tmp, behavior);
1027                 if (error)
1028                         goto out;
1029                 start = tmp;
1030                 if (prev && start < prev->vm_end)
1031                         start = prev->vm_end;
1032                 error = unmapped_error;
1033                 if (start >= end)
1034                         goto out;
1035                 if (prev)
1036                         vma = prev->vm_next;
1037                 else    /* madvise_remove dropped mmap_sem */
1038                         vma = find_vma(current->mm, start);
1039         }
1040 out:
1041         blk_finish_plug(&plug);
1042         if (write)
1043                 up_write(&current->mm->mmap_sem);
1044         else
1045                 up_read(&current->mm->mmap_sem);
1046
1047         return error;
1048 }