ASoC: samsung: i2s: Move registers cache to common driver data structure
[platform/kernel/linux-exynos.git] / mm / memory.c
1 /*
2  *  linux/mm/memory.c
3  *
4  *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
5  */
6
7 /*
8  * demand-loading started 01.12.91 - seems it is high on the list of
9  * things wanted, and it should be easy to implement. - Linus
10  */
11
12 /*
13  * Ok, demand-loading was easy, shared pages a little bit tricker. Shared
14  * pages started 02.12.91, seems to work. - Linus.
15  *
16  * Tested sharing by executing about 30 /bin/sh: under the old kernel it
17  * would have taken more than the 6M I have free, but it worked well as
18  * far as I could see.
19  *
20  * Also corrected some "invalidate()"s - I wasn't doing enough of them.
21  */
22
23 /*
24  * Real VM (paging to/from disk) started 18.12.91. Much more work and
25  * thought has to go into this. Oh, well..
26  * 19.12.91  -  works, somewhat. Sometimes I get faults, don't know why.
27  *              Found it. Everything seems to work now.
28  * 20.12.91  -  Ok, making the swap-device changeable like the root.
29  */
30
31 /*
32  * 05.04.94  -  Multi-page memory management added for v1.1.
33  *              Idea by Alex Bligh (alex@cconcepts.co.uk)
34  *
35  * 16.07.99  -  Support of BIGMEM added by Gerhard Wichert, Siemens AG
36  *              (Gerhard.Wichert@pdb.siemens.de)
37  *
38  * Aug/Sep 2004 Changed to four level page tables (Andi Kleen)
39  */
40
41 #include <linux/kernel_stat.h>
42 #include <linux/mm.h>
43 #include <linux/sched/mm.h>
44 #include <linux/sched/coredump.h>
45 #include <linux/sched/numa_balancing.h>
46 #include <linux/sched/task.h>
47 #include <linux/hugetlb.h>
48 #include <linux/mman.h>
49 #include <linux/swap.h>
50 #include <linux/highmem.h>
51 #include <linux/pagemap.h>
52 #include <linux/memremap.h>
53 #include <linux/ksm.h>
54 #include <linux/rmap.h>
55 #include <linux/export.h>
56 #include <linux/delayacct.h>
57 #include <linux/init.h>
58 #include <linux/pfn_t.h>
59 #include <linux/writeback.h>
60 #include <linux/memcontrol.h>
61 #include <linux/mmu_notifier.h>
62 #include <linux/kallsyms.h>
63 #include <linux/swapops.h>
64 #include <linux/elf.h>
65 #include <linux/gfp.h>
66 #include <linux/migrate.h>
67 #include <linux/string.h>
68 #include <linux/dma-debug.h>
69 #include <linux/debugfs.h>
70 #include <linux/userfaultfd_k.h>
71 #include <linux/dax.h>
72 #include <linux/oom.h>
73
74 #include <asm/io.h>
75 #include <asm/mmu_context.h>
76 #include <asm/pgalloc.h>
77 #include <linux/uaccess.h>
78 #include <asm/tlb.h>
79 #include <asm/tlbflush.h>
80 #include <asm/pgtable.h>
81
82 #include "internal.h"
83
84 #if defined(LAST_CPUPID_NOT_IN_PAGE_FLAGS) && !defined(CONFIG_COMPILE_TEST)
85 #warning Unfortunate NUMA and NUMA Balancing config, growing page-frame for last_cpupid.
86 #endif
87
88 #ifndef CONFIG_NEED_MULTIPLE_NODES
89 /* use the per-pgdat data instead for discontigmem - mbligh */
90 unsigned long max_mapnr;
91 EXPORT_SYMBOL(max_mapnr);
92
93 struct page *mem_map;
94 EXPORT_SYMBOL(mem_map);
95 #endif
96
97 /*
98  * A number of key systems in x86 including ioremap() rely on the assumption
99  * that high_memory defines the upper bound on direct map memory, then end
100  * of ZONE_NORMAL.  Under CONFIG_DISCONTIG this means that max_low_pfn and
101  * highstart_pfn must be the same; there must be no gap between ZONE_NORMAL
102  * and ZONE_HIGHMEM.
103  */
104 void *high_memory;
105 EXPORT_SYMBOL(high_memory);
106
107 /*
108  * Randomize the address space (stacks, mmaps, brk, etc.).
109  *
110  * ( When CONFIG_COMPAT_BRK=y we exclude brk from randomization,
111  *   as ancient (libc5 based) binaries can segfault. )
112  */
113 int randomize_va_space __read_mostly =
114 #ifdef CONFIG_COMPAT_BRK
115                                         1;
116 #else
117                                         2;
118 #endif
119
120 static int __init disable_randmaps(char *s)
121 {
122         randomize_va_space = 0;
123         return 1;
124 }
125 __setup("norandmaps", disable_randmaps);
126
127 unsigned long zero_pfn __read_mostly;
128 EXPORT_SYMBOL(zero_pfn);
129
130 unsigned long highest_memmap_pfn __read_mostly;
131
132 /*
133  * CONFIG_MMU architectures set up ZERO_PAGE in their paging_init()
134  */
135 static int __init init_zero_pfn(void)
136 {
137         zero_pfn = page_to_pfn(ZERO_PAGE(0));
138         return 0;
139 }
140 core_initcall(init_zero_pfn);
141
142
143 #if defined(SPLIT_RSS_COUNTING)
144
145 void sync_mm_rss(struct mm_struct *mm)
146 {
147         int i;
148
149         for (i = 0; i < NR_MM_COUNTERS; i++) {
150                 if (current->rss_stat.count[i]) {
151                         add_mm_counter(mm, i, current->rss_stat.count[i]);
152                         current->rss_stat.count[i] = 0;
153                 }
154         }
155         current->rss_stat.events = 0;
156 }
157
158 static void add_mm_counter_fast(struct mm_struct *mm, int member, int val)
159 {
160         struct task_struct *task = current;
161
162         if (likely(task->mm == mm))
163                 task->rss_stat.count[member] += val;
164         else
165                 add_mm_counter(mm, member, val);
166 }
167 #define inc_mm_counter_fast(mm, member) add_mm_counter_fast(mm, member, 1)
168 #define dec_mm_counter_fast(mm, member) add_mm_counter_fast(mm, member, -1)
169
170 /* sync counter once per 64 page faults */
171 #define TASK_RSS_EVENTS_THRESH  (64)
172 static void check_sync_rss_stat(struct task_struct *task)
173 {
174         if (unlikely(task != current))
175                 return;
176         if (unlikely(task->rss_stat.events++ > TASK_RSS_EVENTS_THRESH))
177                 sync_mm_rss(task->mm);
178 }
179 #else /* SPLIT_RSS_COUNTING */
180
181 #define inc_mm_counter_fast(mm, member) inc_mm_counter(mm, member)
182 #define dec_mm_counter_fast(mm, member) dec_mm_counter(mm, member)
183
184 static void check_sync_rss_stat(struct task_struct *task)
185 {
186 }
187
188 #endif /* SPLIT_RSS_COUNTING */
189
190 #ifdef HAVE_GENERIC_MMU_GATHER
191
192 static bool tlb_next_batch(struct mmu_gather *tlb)
193 {
194         struct mmu_gather_batch *batch;
195
196         batch = tlb->active;
197         if (batch->next) {
198                 tlb->active = batch->next;
199                 return true;
200         }
201
202         if (tlb->batch_count == MAX_GATHER_BATCH_COUNT)
203                 return false;
204
205         batch = (void *)__get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0);
206         if (!batch)
207                 return false;
208
209         tlb->batch_count++;
210         batch->next = NULL;
211         batch->nr   = 0;
212         batch->max  = MAX_GATHER_BATCH;
213
214         tlb->active->next = batch;
215         tlb->active = batch;
216
217         return true;
218 }
219
220 void arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
221                                 unsigned long start, unsigned long end)
222 {
223         tlb->mm = mm;
224
225         /* Is it from 0 to ~0? */
226         tlb->fullmm     = !(start | (end+1));
227         tlb->need_flush_all = 0;
228         tlb->local.next = NULL;
229         tlb->local.nr   = 0;
230         tlb->local.max  = ARRAY_SIZE(tlb->__pages);
231         tlb->active     = &tlb->local;
232         tlb->batch_count = 0;
233
234 #ifdef CONFIG_HAVE_RCU_TABLE_FREE
235         tlb->batch = NULL;
236 #endif
237         tlb->page_size = 0;
238
239         __tlb_reset_range(tlb);
240 }
241
242 static void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
243 {
244         if (!tlb->end)
245                 return;
246
247         tlb_flush(tlb);
248         mmu_notifier_invalidate_range(tlb->mm, tlb->start, tlb->end);
249         __tlb_reset_range(tlb);
250 }
251
252 static void tlb_flush_mmu_free(struct mmu_gather *tlb)
253 {
254         struct mmu_gather_batch *batch;
255
256 #ifdef CONFIG_HAVE_RCU_TABLE_FREE
257         tlb_table_flush(tlb);
258 #endif
259         for (batch = &tlb->local; batch && batch->nr; batch = batch->next) {
260                 free_pages_and_swap_cache(batch->pages, batch->nr);
261                 batch->nr = 0;
262         }
263         tlb->active = &tlb->local;
264 }
265
266 void tlb_flush_mmu(struct mmu_gather *tlb)
267 {
268         tlb_flush_mmu_tlbonly(tlb);
269         tlb_flush_mmu_free(tlb);
270 }
271
272 /* tlb_finish_mmu
273  *      Called at the end of the shootdown operation to free up any resources
274  *      that were required.
275  */
276 void arch_tlb_finish_mmu(struct mmu_gather *tlb,
277                 unsigned long start, unsigned long end, bool force)
278 {
279         struct mmu_gather_batch *batch, *next;
280
281         if (force)
282                 __tlb_adjust_range(tlb, start, end - start);
283
284         tlb_flush_mmu(tlb);
285
286         /* keep the page table cache within bounds */
287         check_pgt_cache();
288
289         for (batch = tlb->local.next; batch; batch = next) {
290                 next = batch->next;
291                 free_pages((unsigned long)batch, 0);
292         }
293         tlb->local.next = NULL;
294 }
295
296 /* __tlb_remove_page
297  *      Must perform the equivalent to __free_pte(pte_get_and_clear(ptep)), while
298  *      handling the additional races in SMP caused by other CPUs caching valid
299  *      mappings in their TLBs. Returns the number of free page slots left.
300  *      When out of page slots we must call tlb_flush_mmu().
301  *returns true if the caller should flush.
302  */
303 bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, int page_size)
304 {
305         struct mmu_gather_batch *batch;
306
307         VM_BUG_ON(!tlb->end);
308         VM_WARN_ON(tlb->page_size != page_size);
309
310         batch = tlb->active;
311         /*
312          * Add the page and check if we are full. If so
313          * force a flush.
314          */
315         batch->pages[batch->nr++] = page;
316         if (batch->nr == batch->max) {
317                 if (!tlb_next_batch(tlb))
318                         return true;
319                 batch = tlb->active;
320         }
321         VM_BUG_ON_PAGE(batch->nr > batch->max, page);
322
323         return false;
324 }
325
326 #endif /* HAVE_GENERIC_MMU_GATHER */
327
328 #ifdef CONFIG_HAVE_RCU_TABLE_FREE
329
330 /*
331  * See the comment near struct mmu_table_batch.
332  */
333
334 /*
335  * If we want tlb_remove_table() to imply TLB invalidates.
336  */
337 static inline void tlb_table_invalidate(struct mmu_gather *tlb)
338 {
339 #ifdef CONFIG_HAVE_RCU_TABLE_INVALIDATE
340         /*
341          * Invalidate page-table caches used by hardware walkers. Then we still
342          * need to RCU-sched wait while freeing the pages because software
343          * walkers can still be in-flight.
344          */
345         tlb_flush_mmu_tlbonly(tlb);
346 #endif
347 }
348
349 static void tlb_remove_table_smp_sync(void *arg)
350 {
351         /* Simply deliver the interrupt */
352 }
353
354 static void tlb_remove_table_one(void *table)
355 {
356         /*
357          * This isn't an RCU grace period and hence the page-tables cannot be
358          * assumed to be actually RCU-freed.
359          *
360          * It is however sufficient for software page-table walkers that rely on
361          * IRQ disabling. See the comment near struct mmu_table_batch.
362          */
363         smp_call_function(tlb_remove_table_smp_sync, NULL, 1);
364         __tlb_remove_table(table);
365 }
366
367 static void tlb_remove_table_rcu(struct rcu_head *head)
368 {
369         struct mmu_table_batch *batch;
370         int i;
371
372         batch = container_of(head, struct mmu_table_batch, rcu);
373
374         for (i = 0; i < batch->nr; i++)
375                 __tlb_remove_table(batch->tables[i]);
376
377         free_page((unsigned long)batch);
378 }
379
380 void tlb_table_flush(struct mmu_gather *tlb)
381 {
382         struct mmu_table_batch **batch = &tlb->batch;
383
384         if (*batch) {
385                 tlb_table_invalidate(tlb);
386                 call_rcu_sched(&(*batch)->rcu, tlb_remove_table_rcu);
387                 *batch = NULL;
388         }
389 }
390
391 void tlb_remove_table(struct mmu_gather *tlb, void *table)
392 {
393         struct mmu_table_batch **batch = &tlb->batch;
394
395         if (*batch == NULL) {
396                 *batch = (struct mmu_table_batch *)__get_free_page(GFP_NOWAIT | __GFP_NOWARN);
397                 if (*batch == NULL) {
398                         tlb_table_invalidate(tlb);
399                         tlb_remove_table_one(table);
400                         return;
401                 }
402                 (*batch)->nr = 0;
403         }
404
405         (*batch)->tables[(*batch)->nr++] = table;
406         if ((*batch)->nr == MAX_TABLE_BATCH)
407                 tlb_table_flush(tlb);
408 }
409
410 #endif /* CONFIG_HAVE_RCU_TABLE_FREE */
411
412 /* tlb_gather_mmu
413  *      Called to initialize an (on-stack) mmu_gather structure for page-table
414  *      tear-down from @mm. The @fullmm argument is used when @mm is without
415  *      users and we're going to destroy the full address space (exit/execve).
416  */
417 void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
418                         unsigned long start, unsigned long end)
419 {
420         arch_tlb_gather_mmu(tlb, mm, start, end);
421         inc_tlb_flush_pending(tlb->mm);
422 }
423
424 void tlb_finish_mmu(struct mmu_gather *tlb,
425                 unsigned long start, unsigned long end)
426 {
427         /*
428          * If there are parallel threads are doing PTE changes on same range
429          * under non-exclusive lock(e.g., mmap_sem read-side) but defer TLB
430          * flush by batching, a thread has stable TLB entry can fail to flush
431          * the TLB by observing pte_none|!pte_dirty, for example so flush TLB
432          * forcefully if we detect parallel PTE batching threads.
433          */
434         bool force = mm_tlb_flush_nested(tlb->mm);
435
436         arch_tlb_finish_mmu(tlb, start, end, force);
437         dec_tlb_flush_pending(tlb->mm);
438 }
439
440 /*
441  * Note: this doesn't free the actual pages themselves. That
442  * has been handled earlier when unmapping all the memory regions.
443  */
444 static void free_pte_range(struct mmu_gather *tlb, pmd_t *pmd,
445                            unsigned long addr)
446 {
447         pgtable_t token = pmd_pgtable(*pmd);
448         pmd_clear(pmd);
449         pte_free_tlb(tlb, token, addr);
450         atomic_long_dec(&tlb->mm->nr_ptes);
451 }
452
453 static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
454                                 unsigned long addr, unsigned long end,
455                                 unsigned long floor, unsigned long ceiling)
456 {
457         pmd_t *pmd;
458         unsigned long next;
459         unsigned long start;
460
461         start = addr;
462         pmd = pmd_offset(pud, addr);
463         do {
464                 next = pmd_addr_end(addr, end);
465                 if (pmd_none_or_clear_bad(pmd))
466                         continue;
467                 free_pte_range(tlb, pmd, addr);
468         } while (pmd++, addr = next, addr != end);
469
470         start &= PUD_MASK;
471         if (start < floor)
472                 return;
473         if (ceiling) {
474                 ceiling &= PUD_MASK;
475                 if (!ceiling)
476                         return;
477         }
478         if (end - 1 > ceiling - 1)
479                 return;
480
481         pmd = pmd_offset(pud, start);
482         pud_clear(pud);
483         pmd_free_tlb(tlb, pmd, start);
484         mm_dec_nr_pmds(tlb->mm);
485 }
486
487 static inline void free_pud_range(struct mmu_gather *tlb, p4d_t *p4d,
488                                 unsigned long addr, unsigned long end,
489                                 unsigned long floor, unsigned long ceiling)
490 {
491         pud_t *pud;
492         unsigned long next;
493         unsigned long start;
494
495         start = addr;
496         pud = pud_offset(p4d, addr);
497         do {
498                 next = pud_addr_end(addr, end);
499                 if (pud_none_or_clear_bad(pud))
500                         continue;
501                 free_pmd_range(tlb, pud, addr, next, floor, ceiling);
502         } while (pud++, addr = next, addr != end);
503
504         start &= P4D_MASK;
505         if (start < floor)
506                 return;
507         if (ceiling) {
508                 ceiling &= P4D_MASK;
509                 if (!ceiling)
510                         return;
511         }
512         if (end - 1 > ceiling - 1)
513                 return;
514
515         pud = pud_offset(p4d, start);
516         p4d_clear(p4d);
517         pud_free_tlb(tlb, pud, start);
518 }
519
520 static inline void free_p4d_range(struct mmu_gather *tlb, pgd_t *pgd,
521                                 unsigned long addr, unsigned long end,
522                                 unsigned long floor, unsigned long ceiling)
523 {
524         p4d_t *p4d;
525         unsigned long next;
526         unsigned long start;
527
528         start = addr;
529         p4d = p4d_offset(pgd, addr);
530         do {
531                 next = p4d_addr_end(addr, end);
532                 if (p4d_none_or_clear_bad(p4d))
533                         continue;
534                 free_pud_range(tlb, p4d, addr, next, floor, ceiling);
535         } while (p4d++, addr = next, addr != end);
536
537         start &= PGDIR_MASK;
538         if (start < floor)
539                 return;
540         if (ceiling) {
541                 ceiling &= PGDIR_MASK;
542                 if (!ceiling)
543                         return;
544         }
545         if (end - 1 > ceiling - 1)
546                 return;
547
548         p4d = p4d_offset(pgd, start);
549         pgd_clear(pgd);
550         p4d_free_tlb(tlb, p4d, start);
551 }
552
553 /*
554  * This function frees user-level page tables of a process.
555  */
556 void free_pgd_range(struct mmu_gather *tlb,
557                         unsigned long addr, unsigned long end,
558                         unsigned long floor, unsigned long ceiling)
559 {
560         pgd_t *pgd;
561         unsigned long next;
562
563         /*
564          * The next few lines have given us lots of grief...
565          *
566          * Why are we testing PMD* at this top level?  Because often
567          * there will be no work to do at all, and we'd prefer not to
568          * go all the way down to the bottom just to discover that.
569          *
570          * Why all these "- 1"s?  Because 0 represents both the bottom
571          * of the address space and the top of it (using -1 for the
572          * top wouldn't help much: the masks would do the wrong thing).
573          * The rule is that addr 0 and floor 0 refer to the bottom of
574          * the address space, but end 0 and ceiling 0 refer to the top
575          * Comparisons need to use "end - 1" and "ceiling - 1" (though
576          * that end 0 case should be mythical).
577          *
578          * Wherever addr is brought up or ceiling brought down, we must
579          * be careful to reject "the opposite 0" before it confuses the
580          * subsequent tests.  But what about where end is brought down
581          * by PMD_SIZE below? no, end can't go down to 0 there.
582          *
583          * Whereas we round start (addr) and ceiling down, by different
584          * masks at different levels, in order to test whether a table
585          * now has no other vmas using it, so can be freed, we don't
586          * bother to round floor or end up - the tests don't need that.
587          */
588
589         addr &= PMD_MASK;
590         if (addr < floor) {
591                 addr += PMD_SIZE;
592                 if (!addr)
593                         return;
594         }
595         if (ceiling) {
596                 ceiling &= PMD_MASK;
597                 if (!ceiling)
598                         return;
599         }
600         if (end - 1 > ceiling - 1)
601                 end -= PMD_SIZE;
602         if (addr > end - 1)
603                 return;
604         /*
605          * We add page table cache pages with PAGE_SIZE,
606          * (see pte_free_tlb()), flush the tlb if we need
607          */
608         tlb_remove_check_page_size_change(tlb, PAGE_SIZE);
609         pgd = pgd_offset(tlb->mm, addr);
610         do {
611                 next = pgd_addr_end(addr, end);
612                 if (pgd_none_or_clear_bad(pgd))
613                         continue;
614                 free_p4d_range(tlb, pgd, addr, next, floor, ceiling);
615         } while (pgd++, addr = next, addr != end);
616 }
617
618 void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *vma,
619                 unsigned long floor, unsigned long ceiling)
620 {
621         while (vma) {
622                 struct vm_area_struct *next = vma->vm_next;
623                 unsigned long addr = vma->vm_start;
624
625                 /*
626                  * Hide vma from rmap and truncate_pagecache before freeing
627                  * pgtables
628                  */
629                 unlink_anon_vmas(vma);
630                 unlink_file_vma(vma);
631
632                 if (is_vm_hugetlb_page(vma)) {
633                         hugetlb_free_pgd_range(tlb, addr, vma->vm_end,
634                                 floor, next ? next->vm_start : ceiling);
635                 } else {
636                         /*
637                          * Optimization: gather nearby vmas into one call down
638                          */
639                         while (next && next->vm_start <= vma->vm_end + PMD_SIZE
640                                && !is_vm_hugetlb_page(next)) {
641                                 vma = next;
642                                 next = vma->vm_next;
643                                 unlink_anon_vmas(vma);
644                                 unlink_file_vma(vma);
645                         }
646                         free_pgd_range(tlb, addr, vma->vm_end,
647                                 floor, next ? next->vm_start : ceiling);
648                 }
649                 vma = next;
650         }
651 }
652
653 int __pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address)
654 {
655         spinlock_t *ptl;
656         pgtable_t new = pte_alloc_one(mm, address);
657         if (!new)
658                 return -ENOMEM;
659
660         /*
661          * Ensure all pte setup (eg. pte page lock and page clearing) are
662          * visible before the pte is made visible to other CPUs by being
663          * put into page tables.
664          *
665          * The other side of the story is the pointer chasing in the page
666          * table walking code (when walking the page table without locking;
667          * ie. most of the time). Fortunately, these data accesses consist
668          * of a chain of data-dependent loads, meaning most CPUs (alpha
669          * being the notable exception) will already guarantee loads are
670          * seen in-order. See the alpha page table accessors for the
671          * smp_read_barrier_depends() barriers in page table walking code.
672          */
673         smp_wmb(); /* Could be smp_wmb__xxx(before|after)_spin_lock */
674
675         ptl = pmd_lock(mm, pmd);
676         if (likely(pmd_none(*pmd))) {   /* Has another populated it ? */
677                 atomic_long_inc(&mm->nr_ptes);
678                 pmd_populate(mm, pmd, new);
679                 new = NULL;
680         }
681         spin_unlock(ptl);
682         if (new)
683                 pte_free(mm, new);
684         return 0;
685 }
686
687 int __pte_alloc_kernel(pmd_t *pmd, unsigned long address)
688 {
689         pte_t *new = pte_alloc_one_kernel(&init_mm, address);
690         if (!new)
691                 return -ENOMEM;
692
693         smp_wmb(); /* See comment in __pte_alloc */
694
695         spin_lock(&init_mm.page_table_lock);
696         if (likely(pmd_none(*pmd))) {   /* Has another populated it ? */
697                 pmd_populate_kernel(&init_mm, pmd, new);
698                 new = NULL;
699         }
700         spin_unlock(&init_mm.page_table_lock);
701         if (new)
702                 pte_free_kernel(&init_mm, new);
703         return 0;
704 }
705
706 static inline void init_rss_vec(int *rss)
707 {
708         memset(rss, 0, sizeof(int) * NR_MM_COUNTERS);
709 }
710
711 static inline void add_mm_rss_vec(struct mm_struct *mm, int *rss)
712 {
713         int i;
714
715         if (current->mm == mm)
716                 sync_mm_rss(mm);
717         for (i = 0; i < NR_MM_COUNTERS; i++)
718                 if (rss[i])
719                         add_mm_counter(mm, i, rss[i]);
720 }
721
722 /*
723  * This function is called to print an error when a bad pte
724  * is found. For example, we might have a PFN-mapped pte in
725  * a region that doesn't allow it.
726  *
727  * The calling function must still handle the error.
728  */
729 static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr,
730                           pte_t pte, struct page *page)
731 {
732         pgd_t *pgd = pgd_offset(vma->vm_mm, addr);
733         p4d_t *p4d = p4d_offset(pgd, addr);
734         pud_t *pud = pud_offset(p4d, addr);
735         pmd_t *pmd = pmd_offset(pud, addr);
736         struct address_space *mapping;
737         pgoff_t index;
738         static unsigned long resume;
739         static unsigned long nr_shown;
740         static unsigned long nr_unshown;
741
742         /*
743          * Allow a burst of 60 reports, then keep quiet for that minute;
744          * or allow a steady drip of one report per second.
745          */
746         if (nr_shown == 60) {
747                 if (time_before(jiffies, resume)) {
748                         nr_unshown++;
749                         return;
750                 }
751                 if (nr_unshown) {
752                         pr_alert("BUG: Bad page map: %lu messages suppressed\n",
753                                  nr_unshown);
754                         nr_unshown = 0;
755                 }
756                 nr_shown = 0;
757         }
758         if (nr_shown++ == 0)
759                 resume = jiffies + 60 * HZ;
760
761         mapping = vma->vm_file ? vma->vm_file->f_mapping : NULL;
762         index = linear_page_index(vma, addr);
763
764         pr_alert("BUG: Bad page map in process %s  pte:%08llx pmd:%08llx\n",
765                  current->comm,
766                  (long long)pte_val(pte), (long long)pmd_val(*pmd));
767         if (page)
768                 dump_page(page, "bad pte");
769         pr_alert("addr:%p vm_flags:%08lx anon_vma:%p mapping:%p index:%lx\n",
770                  (void *)addr, vma->vm_flags, vma->anon_vma, mapping, index);
771         /*
772          * Choose text because data symbols depend on CONFIG_KALLSYMS_ALL=y
773          */
774         pr_alert("file:%pD fault:%pf mmap:%pf readpage:%pf\n",
775                  vma->vm_file,
776                  vma->vm_ops ? vma->vm_ops->fault : NULL,
777                  vma->vm_file ? vma->vm_file->f_op->mmap : NULL,
778                  mapping ? mapping->a_ops->readpage : NULL);
779         dump_stack();
780         add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
781 }
782
783 /*
784  * vm_normal_page -- This function gets the "struct page" associated with a pte.
785  *
786  * "Special" mappings do not wish to be associated with a "struct page" (either
787  * it doesn't exist, or it exists but they don't want to touch it). In this
788  * case, NULL is returned here. "Normal" mappings do have a struct page.
789  *
790  * There are 2 broad cases. Firstly, an architecture may define a pte_special()
791  * pte bit, in which case this function is trivial. Secondly, an architecture
792  * may not have a spare pte bit, which requires a more complicated scheme,
793  * described below.
794  *
795  * A raw VM_PFNMAP mapping (ie. one that is not COWed) is always considered a
796  * special mapping (even if there are underlying and valid "struct pages").
797  * COWed pages of a VM_PFNMAP are always normal.
798  *
799  * The way we recognize COWed pages within VM_PFNMAP mappings is through the
800  * rules set up by "remap_pfn_range()": the vma will have the VM_PFNMAP bit
801  * set, and the vm_pgoff will point to the first PFN mapped: thus every special
802  * mapping will always honor the rule
803  *
804  *      pfn_of_page == vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT)
805  *
806  * And for normal mappings this is false.
807  *
808  * This restricts such mappings to be a linear translation from virtual address
809  * to pfn. To get around this restriction, we allow arbitrary mappings so long
810  * as the vma is not a COW mapping; in that case, we know that all ptes are
811  * special (because none can have been COWed).
812  *
813  *
814  * In order to support COW of arbitrary special mappings, we have VM_MIXEDMAP.
815  *
816  * VM_MIXEDMAP mappings can likewise contain memory with or without "struct
817  * page" backing, however the difference is that _all_ pages with a struct
818  * page (that is, those where pfn_valid is true) are refcounted and considered
819  * normal pages by the VM. The disadvantage is that pages are refcounted
820  * (which can be slower and simply not an option for some PFNMAP users). The
821  * advantage is that we don't have to follow the strict linearity rule of
822  * PFNMAP mappings in order to support COWable mappings.
823  *
824  */
825 #ifdef __HAVE_ARCH_PTE_SPECIAL
826 # define HAVE_PTE_SPECIAL 1
827 #else
828 # define HAVE_PTE_SPECIAL 0
829 #endif
830 struct page *_vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
831                              pte_t pte, bool with_public_device)
832 {
833         unsigned long pfn = pte_pfn(pte);
834
835         if (HAVE_PTE_SPECIAL) {
836                 if (likely(!pte_special(pte)))
837                         goto check_pfn;
838                 if (vma->vm_ops && vma->vm_ops->find_special_page)
839                         return vma->vm_ops->find_special_page(vma, addr);
840                 if (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP))
841                         return NULL;
842                 if (is_zero_pfn(pfn))
843                         return NULL;
844
845                 /*
846                  * Device public pages are special pages (they are ZONE_DEVICE
847                  * pages but different from persistent memory). They behave
848                  * allmost like normal pages. The difference is that they are
849                  * not on the lru and thus should never be involve with any-
850                  * thing that involve lru manipulation (mlock, numa balancing,
851                  * ...).
852                  *
853                  * This is why we still want to return NULL for such page from
854                  * vm_normal_page() so that we do not have to special case all
855                  * call site of vm_normal_page().
856                  */
857                 if (likely(pfn <= highest_memmap_pfn)) {
858                         struct page *page = pfn_to_page(pfn);
859
860                         if (is_device_public_page(page)) {
861                                 if (with_public_device)
862                                         return page;
863                                 return NULL;
864                         }
865                 }
866                 print_bad_pte(vma, addr, pte, NULL);
867                 return NULL;
868         }
869
870         /* !HAVE_PTE_SPECIAL case follows: */
871
872         if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) {
873                 if (vma->vm_flags & VM_MIXEDMAP) {
874                         if (!pfn_valid(pfn))
875                                 return NULL;
876                         goto out;
877                 } else {
878                         unsigned long off;
879                         off = (addr - vma->vm_start) >> PAGE_SHIFT;
880                         if (pfn == vma->vm_pgoff + off)
881                                 return NULL;
882                         if (!is_cow_mapping(vma->vm_flags))
883                                 return NULL;
884                 }
885         }
886
887         if (is_zero_pfn(pfn))
888                 return NULL;
889 check_pfn:
890         if (unlikely(pfn > highest_memmap_pfn)) {
891                 print_bad_pte(vma, addr, pte, NULL);
892                 return NULL;
893         }
894
895         /*
896          * NOTE! We still have PageReserved() pages in the page tables.
897          * eg. VDSO mappings can cause them to exist.
898          */
899 out:
900         return pfn_to_page(pfn);
901 }
902
903 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
904 struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
905                                 pmd_t pmd)
906 {
907         unsigned long pfn = pmd_pfn(pmd);
908
909         /*
910          * There is no pmd_special() but there may be special pmds, e.g.
911          * in a direct-access (dax) mapping, so let's just replicate the
912          * !HAVE_PTE_SPECIAL case from vm_normal_page() here.
913          */
914         if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) {
915                 if (vma->vm_flags & VM_MIXEDMAP) {
916                         if (!pfn_valid(pfn))
917                                 return NULL;
918                         goto out;
919                 } else {
920                         unsigned long off;
921                         off = (addr - vma->vm_start) >> PAGE_SHIFT;
922                         if (pfn == vma->vm_pgoff + off)
923                                 return NULL;
924                         if (!is_cow_mapping(vma->vm_flags))
925                                 return NULL;
926                 }
927         }
928
929         if (is_zero_pfn(pfn))
930                 return NULL;
931         if (unlikely(pfn > highest_memmap_pfn))
932                 return NULL;
933
934         /*
935          * NOTE! We still have PageReserved() pages in the page tables.
936          * eg. VDSO mappings can cause them to exist.
937          */
938 out:
939         return pfn_to_page(pfn);
940 }
941 #endif
942
943 /*
944  * copy one vm_area from one task to the other. Assumes the page tables
945  * already present in the new task to be cleared in the whole range
946  * covered by this vma.
947  */
948
949 static inline unsigned long
950 copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
951                 pte_t *dst_pte, pte_t *src_pte, struct vm_area_struct *vma,
952                 unsigned long addr, int *rss)
953 {
954         unsigned long vm_flags = vma->vm_flags;
955         pte_t pte = *src_pte;
956         struct page *page;
957
958         /* pte contains position in swap or file, so copy. */
959         if (unlikely(!pte_present(pte))) {
960                 swp_entry_t entry = pte_to_swp_entry(pte);
961
962                 if (likely(!non_swap_entry(entry))) {
963                         if (swap_duplicate(entry) < 0)
964                                 return entry.val;
965
966                         /* make sure dst_mm is on swapoff's mmlist. */
967                         if (unlikely(list_empty(&dst_mm->mmlist))) {
968                                 spin_lock(&mmlist_lock);
969                                 if (list_empty(&dst_mm->mmlist))
970                                         list_add(&dst_mm->mmlist,
971                                                         &src_mm->mmlist);
972                                 spin_unlock(&mmlist_lock);
973                         }
974                         rss[MM_SWAPENTS]++;
975                 } else if (is_migration_entry(entry)) {
976                         page = migration_entry_to_page(entry);
977
978                         rss[mm_counter(page)]++;
979
980                         if (is_write_migration_entry(entry) &&
981                                         is_cow_mapping(vm_flags)) {
982                                 /*
983                                  * COW mappings require pages in both
984                                  * parent and child to be set to read.
985                                  */
986                                 make_migration_entry_read(&entry);
987                                 pte = swp_entry_to_pte(entry);
988                                 if (pte_swp_soft_dirty(*src_pte))
989                                         pte = pte_swp_mksoft_dirty(pte);
990                                 set_pte_at(src_mm, addr, src_pte, pte);
991                         }
992                 } else if (is_device_private_entry(entry)) {
993                         page = device_private_entry_to_page(entry);
994
995                         /*
996                          * Update rss count even for unaddressable pages, as
997                          * they should treated just like normal pages in this
998                          * respect.
999                          *
1000                          * We will likely want to have some new rss counters
1001                          * for unaddressable pages, at some point. But for now
1002                          * keep things as they are.
1003                          */
1004                         get_page(page);
1005                         rss[mm_counter(page)]++;
1006                         page_dup_rmap(page, false);
1007
1008                         /*
1009                          * We do not preserve soft-dirty information, because so
1010                          * far, checkpoint/restore is the only feature that
1011                          * requires that. And checkpoint/restore does not work
1012                          * when a device driver is involved (you cannot easily
1013                          * save and restore device driver state).
1014                          */
1015                         if (is_write_device_private_entry(entry) &&
1016                             is_cow_mapping(vm_flags)) {
1017                                 make_device_private_entry_read(&entry);
1018                                 pte = swp_entry_to_pte(entry);
1019                                 set_pte_at(src_mm, addr, src_pte, pte);
1020                         }
1021                 }
1022                 goto out_set_pte;
1023         }
1024
1025         /*
1026          * If it's a COW mapping, write protect it both
1027          * in the parent and the child
1028          */
1029         if (is_cow_mapping(vm_flags)) {
1030                 ptep_set_wrprotect(src_mm, addr, src_pte);
1031                 pte = pte_wrprotect(pte);
1032         }
1033
1034         /*
1035          * If it's a shared mapping, mark it clean in
1036          * the child
1037          */
1038         if (vm_flags & VM_SHARED)
1039                 pte = pte_mkclean(pte);
1040         pte = pte_mkold(pte);
1041
1042         page = vm_normal_page(vma, addr, pte);
1043         if (page) {
1044                 get_page(page);
1045                 page_dup_rmap(page, false);
1046                 rss[mm_counter(page)]++;
1047         } else if (pte_devmap(pte)) {
1048                 page = pte_page(pte);
1049
1050                 /*
1051                  * Cache coherent device memory behave like regular page and
1052                  * not like persistent memory page. For more informations see
1053                  * MEMORY_DEVICE_CACHE_COHERENT in memory_hotplug.h
1054                  */
1055                 if (is_device_public_page(page)) {
1056                         get_page(page);
1057                         page_dup_rmap(page, false);
1058                         rss[mm_counter(page)]++;
1059                 }
1060         }
1061
1062 out_set_pte:
1063         set_pte_at(dst_mm, addr, dst_pte, pte);
1064         return 0;
1065 }
1066
1067 static int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
1068                    pmd_t *dst_pmd, pmd_t *src_pmd, struct vm_area_struct *vma,
1069                    unsigned long addr, unsigned long end)
1070 {
1071         pte_t *orig_src_pte, *orig_dst_pte;
1072         pte_t *src_pte, *dst_pte;
1073         spinlock_t *src_ptl, *dst_ptl;
1074         int progress = 0;
1075         int rss[NR_MM_COUNTERS];
1076         swp_entry_t entry = (swp_entry_t){0};
1077
1078 again:
1079         init_rss_vec(rss);
1080
1081         dst_pte = pte_alloc_map_lock(dst_mm, dst_pmd, addr, &dst_ptl);
1082         if (!dst_pte)
1083                 return -ENOMEM;
1084         src_pte = pte_offset_map(src_pmd, addr);
1085         src_ptl = pte_lockptr(src_mm, src_pmd);
1086         spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
1087         orig_src_pte = src_pte;
1088         orig_dst_pte = dst_pte;
1089         arch_enter_lazy_mmu_mode();
1090
1091         do {
1092                 /*
1093                  * We are holding two locks at this point - either of them
1094                  * could generate latencies in another task on another CPU.
1095                  */
1096                 if (progress >= 32) {
1097                         progress = 0;
1098                         if (need_resched() ||
1099                             spin_needbreak(src_ptl) || spin_needbreak(dst_ptl))
1100                                 break;
1101                 }
1102                 if (pte_none(*src_pte)) {
1103                         progress++;
1104                         continue;
1105                 }
1106                 entry.val = copy_one_pte(dst_mm, src_mm, dst_pte, src_pte,
1107                                                         vma, addr, rss);
1108                 if (entry.val)
1109                         break;
1110                 progress += 8;
1111         } while (dst_pte++, src_pte++, addr += PAGE_SIZE, addr != end);
1112
1113         arch_leave_lazy_mmu_mode();
1114         spin_unlock(src_ptl);
1115         pte_unmap(orig_src_pte);
1116         add_mm_rss_vec(dst_mm, rss);
1117         pte_unmap_unlock(orig_dst_pte, dst_ptl);
1118         cond_resched();
1119
1120         if (entry.val) {
1121                 if (add_swap_count_continuation(entry, GFP_KERNEL) < 0)
1122                         return -ENOMEM;
1123                 progress = 0;
1124         }
1125         if (addr != end)
1126                 goto again;
1127         return 0;
1128 }
1129
1130 static inline int copy_pmd_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
1131                 pud_t *dst_pud, pud_t *src_pud, struct vm_area_struct *vma,
1132                 unsigned long addr, unsigned long end)
1133 {
1134         pmd_t *src_pmd, *dst_pmd;
1135         unsigned long next;
1136
1137         dst_pmd = pmd_alloc(dst_mm, dst_pud, addr);
1138         if (!dst_pmd)
1139                 return -ENOMEM;
1140         src_pmd = pmd_offset(src_pud, addr);
1141         do {
1142                 next = pmd_addr_end(addr, end);
1143                 if (is_swap_pmd(*src_pmd) || pmd_trans_huge(*src_pmd)
1144                         || pmd_devmap(*src_pmd)) {
1145                         int err;
1146                         VM_BUG_ON_VMA(next-addr != HPAGE_PMD_SIZE, vma);
1147                         err = copy_huge_pmd(dst_mm, src_mm,
1148                                             dst_pmd, src_pmd, addr, vma);
1149                         if (err == -ENOMEM)
1150                                 return -ENOMEM;
1151                         if (!err)
1152                                 continue;
1153                         /* fall through */
1154                 }
1155                 if (pmd_none_or_clear_bad(src_pmd))
1156                         continue;
1157                 if (copy_pte_range(dst_mm, src_mm, dst_pmd, src_pmd,
1158                                                 vma, addr, next))
1159                         return -ENOMEM;
1160         } while (dst_pmd++, src_pmd++, addr = next, addr != end);
1161         return 0;
1162 }
1163
1164 static inline int copy_pud_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
1165                 p4d_t *dst_p4d, p4d_t *src_p4d, struct vm_area_struct *vma,
1166                 unsigned long addr, unsigned long end)
1167 {
1168         pud_t *src_pud, *dst_pud;
1169         unsigned long next;
1170
1171         dst_pud = pud_alloc(dst_mm, dst_p4d, addr);
1172         if (!dst_pud)
1173                 return -ENOMEM;
1174         src_pud = pud_offset(src_p4d, addr);
1175         do {
1176                 next = pud_addr_end(addr, end);
1177                 if (pud_trans_huge(*src_pud) || pud_devmap(*src_pud)) {
1178                         int err;
1179
1180                         VM_BUG_ON_VMA(next-addr != HPAGE_PUD_SIZE, vma);
1181                         err = copy_huge_pud(dst_mm, src_mm,
1182                                             dst_pud, src_pud, addr, vma);
1183                         if (err == -ENOMEM)
1184                                 return -ENOMEM;
1185                         if (!err)
1186                                 continue;
1187                         /* fall through */
1188                 }
1189                 if (pud_none_or_clear_bad(src_pud))
1190                         continue;
1191                 if (copy_pmd_range(dst_mm, src_mm, dst_pud, src_pud,
1192                                                 vma, addr, next))
1193                         return -ENOMEM;
1194         } while (dst_pud++, src_pud++, addr = next, addr != end);
1195         return 0;
1196 }
1197
1198 static inline int copy_p4d_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
1199                 pgd_t *dst_pgd, pgd_t *src_pgd, struct vm_area_struct *vma,
1200                 unsigned long addr, unsigned long end)
1201 {
1202         p4d_t *src_p4d, *dst_p4d;
1203         unsigned long next;
1204
1205         dst_p4d = p4d_alloc(dst_mm, dst_pgd, addr);
1206         if (!dst_p4d)
1207                 return -ENOMEM;
1208         src_p4d = p4d_offset(src_pgd, addr);
1209         do {
1210                 next = p4d_addr_end(addr, end);
1211                 if (p4d_none_or_clear_bad(src_p4d))
1212                         continue;
1213                 if (copy_pud_range(dst_mm, src_mm, dst_p4d, src_p4d,
1214                                                 vma, addr, next))
1215                         return -ENOMEM;
1216         } while (dst_p4d++, src_p4d++, addr = next, addr != end);
1217         return 0;
1218 }
1219
1220 int copy_page_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
1221                 struct vm_area_struct *vma)
1222 {
1223         pgd_t *src_pgd, *dst_pgd;
1224         unsigned long next;
1225         unsigned long addr = vma->vm_start;
1226         unsigned long end = vma->vm_end;
1227         unsigned long mmun_start;       /* For mmu_notifiers */
1228         unsigned long mmun_end;         /* For mmu_notifiers */
1229         bool is_cow;
1230         int ret;
1231
1232         /*
1233          * Don't copy ptes where a page fault will fill them correctly.
1234          * Fork becomes much lighter when there are big shared or private
1235          * readonly mappings. The tradeoff is that copy_page_range is more
1236          * efficient than faulting.
1237          */
1238         if (!(vma->vm_flags & (VM_HUGETLB | VM_PFNMAP | VM_MIXEDMAP)) &&
1239                         !vma->anon_vma)
1240                 return 0;
1241
1242         if (is_vm_hugetlb_page(vma))
1243                 return copy_hugetlb_page_range(dst_mm, src_mm, vma);
1244
1245         if (unlikely(vma->vm_flags & VM_PFNMAP)) {
1246                 /*
1247                  * We do not free on error cases below as remove_vma
1248                  * gets called on error from higher level routine
1249                  */
1250                 ret = track_pfn_copy(vma);
1251                 if (ret)
1252                         return ret;
1253         }
1254
1255         /*
1256          * We need to invalidate the secondary MMU mappings only when
1257          * there could be a permission downgrade on the ptes of the
1258          * parent mm. And a permission downgrade will only happen if
1259          * is_cow_mapping() returns true.
1260          */
1261         is_cow = is_cow_mapping(vma->vm_flags);
1262         mmun_start = addr;
1263         mmun_end   = end;
1264         if (is_cow)
1265                 mmu_notifier_invalidate_range_start(src_mm, mmun_start,
1266                                                     mmun_end);
1267
1268         ret = 0;
1269         dst_pgd = pgd_offset(dst_mm, addr);
1270         src_pgd = pgd_offset(src_mm, addr);
1271         do {
1272                 next = pgd_addr_end(addr, end);
1273                 if (pgd_none_or_clear_bad(src_pgd))
1274                         continue;
1275                 if (unlikely(copy_p4d_range(dst_mm, src_mm, dst_pgd, src_pgd,
1276                                             vma, addr, next))) {
1277                         ret = -ENOMEM;
1278                         break;
1279                 }
1280         } while (dst_pgd++, src_pgd++, addr = next, addr != end);
1281
1282         if (is_cow)
1283                 mmu_notifier_invalidate_range_end(src_mm, mmun_start, mmun_end);
1284         return ret;
1285 }
1286
1287 static unsigned long zap_pte_range(struct mmu_gather *tlb,
1288                                 struct vm_area_struct *vma, pmd_t *pmd,
1289                                 unsigned long addr, unsigned long end,
1290                                 struct zap_details *details)
1291 {
1292         struct mm_struct *mm = tlb->mm;
1293         int force_flush = 0;
1294         int rss[NR_MM_COUNTERS];
1295         spinlock_t *ptl;
1296         pte_t *start_pte;
1297         pte_t *pte;
1298         swp_entry_t entry;
1299
1300         tlb_remove_check_page_size_change(tlb, PAGE_SIZE);
1301 again:
1302         init_rss_vec(rss);
1303         start_pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
1304         pte = start_pte;
1305         flush_tlb_batched_pending(mm);
1306         arch_enter_lazy_mmu_mode();
1307         do {
1308                 pte_t ptent = *pte;
1309                 if (pte_none(ptent))
1310                         continue;
1311
1312                 if (pte_present(ptent)) {
1313                         struct page *page;
1314
1315                         page = _vm_normal_page(vma, addr, ptent, true);
1316                         if (unlikely(details) && page) {
1317                                 /*
1318                                  * unmap_shared_mapping_pages() wants to
1319                                  * invalidate cache without truncating:
1320                                  * unmap shared but keep private pages.
1321                                  */
1322                                 if (details->check_mapping &&
1323                                     details->check_mapping != page_rmapping(page))
1324                                         continue;
1325                         }
1326                         ptent = ptep_get_and_clear_full(mm, addr, pte,
1327                                                         tlb->fullmm);
1328                         tlb_remove_tlb_entry(tlb, pte, addr);
1329                         if (unlikely(!page))
1330                                 continue;
1331
1332                         if (!PageAnon(page)) {
1333                                 if (pte_dirty(ptent)) {
1334                                         force_flush = 1;
1335                                         set_page_dirty(page);
1336                                 }
1337                                 if (pte_young(ptent) &&
1338                                     likely(!(vma->vm_flags & VM_SEQ_READ)))
1339                                         mark_page_accessed(page);
1340                         }
1341                         rss[mm_counter(page)]--;
1342                         page_remove_rmap(page, false);
1343                         if (unlikely(page_mapcount(page) < 0))
1344                                 print_bad_pte(vma, addr, ptent, page);
1345                         if (unlikely(__tlb_remove_page(tlb, page))) {
1346                                 force_flush = 1;
1347                                 addr += PAGE_SIZE;
1348                                 break;
1349                         }
1350                         continue;
1351                 }
1352
1353                 entry = pte_to_swp_entry(ptent);
1354                 if (non_swap_entry(entry) && is_device_private_entry(entry)) {
1355                         struct page *page = device_private_entry_to_page(entry);
1356
1357                         if (unlikely(details && details->check_mapping)) {
1358                                 /*
1359                                  * unmap_shared_mapping_pages() wants to
1360                                  * invalidate cache without truncating:
1361                                  * unmap shared but keep private pages.
1362                                  */
1363                                 if (details->check_mapping !=
1364                                     page_rmapping(page))
1365                                         continue;
1366                         }
1367
1368                         pte_clear_not_present_full(mm, addr, pte, tlb->fullmm);
1369                         rss[mm_counter(page)]--;
1370                         page_remove_rmap(page, false);
1371                         put_page(page);
1372                         continue;
1373                 }
1374
1375                 /* If details->check_mapping, we leave swap entries. */
1376                 if (unlikely(details))
1377                         continue;
1378
1379                 entry = pte_to_swp_entry(ptent);
1380                 if (!non_swap_entry(entry))
1381                         rss[MM_SWAPENTS]--;
1382                 else if (is_migration_entry(entry)) {
1383                         struct page *page;
1384
1385                         page = migration_entry_to_page(entry);
1386                         rss[mm_counter(page)]--;
1387                 }
1388                 if (unlikely(!free_swap_and_cache(entry)))
1389                         print_bad_pte(vma, addr, ptent, NULL);
1390                 pte_clear_not_present_full(mm, addr, pte, tlb->fullmm);
1391         } while (pte++, addr += PAGE_SIZE, addr != end);
1392
1393         add_mm_rss_vec(mm, rss);
1394         arch_leave_lazy_mmu_mode();
1395
1396         /* Do the actual TLB flush before dropping ptl */
1397         if (force_flush)
1398                 tlb_flush_mmu_tlbonly(tlb);
1399         pte_unmap_unlock(start_pte, ptl);
1400
1401         /*
1402          * If we forced a TLB flush (either due to running out of
1403          * batch buffers or because we needed to flush dirty TLB
1404          * entries before releasing the ptl), free the batched
1405          * memory too. Restart if we didn't do everything.
1406          */
1407         if (force_flush) {
1408                 force_flush = 0;
1409                 tlb_flush_mmu_free(tlb);
1410                 if (addr != end)
1411                         goto again;
1412         }
1413
1414         return addr;
1415 }
1416
1417 static inline unsigned long zap_pmd_range(struct mmu_gather *tlb,
1418                                 struct vm_area_struct *vma, pud_t *pud,
1419                                 unsigned long addr, unsigned long end,
1420                                 struct zap_details *details)
1421 {
1422         pmd_t *pmd;
1423         unsigned long next;
1424
1425         pmd = pmd_offset(pud, addr);
1426         do {
1427                 next = pmd_addr_end(addr, end);
1428                 if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) {
1429                         if (next - addr != HPAGE_PMD_SIZE)
1430                                 __split_huge_pmd(vma, pmd, addr, false, NULL);
1431                         else if (zap_huge_pmd(tlb, vma, pmd, addr))
1432                                 goto next;
1433                         /* fall through */
1434                 }
1435                 /*
1436                  * Here there can be other concurrent MADV_DONTNEED or
1437                  * trans huge page faults running, and if the pmd is
1438                  * none or trans huge it can change under us. This is
1439                  * because MADV_DONTNEED holds the mmap_sem in read
1440                  * mode.
1441                  */
1442                 if (pmd_none_or_trans_huge_or_clear_bad(pmd))
1443                         goto next;
1444                 next = zap_pte_range(tlb, vma, pmd, addr, next, details);
1445 next:
1446                 cond_resched();
1447         } while (pmd++, addr = next, addr != end);
1448
1449         return addr;
1450 }
1451
1452 static inline unsigned long zap_pud_range(struct mmu_gather *tlb,
1453                                 struct vm_area_struct *vma, p4d_t *p4d,
1454                                 unsigned long addr, unsigned long end,
1455                                 struct zap_details *details)
1456 {
1457         pud_t *pud;
1458         unsigned long next;
1459
1460         pud = pud_offset(p4d, addr);
1461         do {
1462                 next = pud_addr_end(addr, end);
1463                 if (pud_trans_huge(*pud) || pud_devmap(*pud)) {
1464                         if (next - addr != HPAGE_PUD_SIZE) {
1465                                 VM_BUG_ON_VMA(!rwsem_is_locked(&tlb->mm->mmap_sem), vma);
1466                                 split_huge_pud(vma, pud, addr);
1467                         } else if (zap_huge_pud(tlb, vma, pud, addr))
1468                                 goto next;
1469                         /* fall through */
1470                 }
1471                 if (pud_none_or_clear_bad(pud))
1472                         continue;
1473                 next = zap_pmd_range(tlb, vma, pud, addr, next, details);
1474 next:
1475                 cond_resched();
1476         } while (pud++, addr = next, addr != end);
1477
1478         return addr;
1479 }
1480
1481 static inline unsigned long zap_p4d_range(struct mmu_gather *tlb,
1482                                 struct vm_area_struct *vma, pgd_t *pgd,
1483                                 unsigned long addr, unsigned long end,
1484                                 struct zap_details *details)
1485 {
1486         p4d_t *p4d;
1487         unsigned long next;
1488
1489         p4d = p4d_offset(pgd, addr);
1490         do {
1491                 next = p4d_addr_end(addr, end);
1492                 if (p4d_none_or_clear_bad(p4d))
1493                         continue;
1494                 next = zap_pud_range(tlb, vma, p4d, addr, next, details);
1495         } while (p4d++, addr = next, addr != end);
1496
1497         return addr;
1498 }
1499
1500 void unmap_page_range(struct mmu_gather *tlb,
1501                              struct vm_area_struct *vma,
1502                              unsigned long addr, unsigned long end,
1503                              struct zap_details *details)
1504 {
1505         pgd_t *pgd;
1506         unsigned long next;
1507
1508         BUG_ON(addr >= end);
1509         tlb_start_vma(tlb, vma);
1510         pgd = pgd_offset(vma->vm_mm, addr);
1511         do {
1512                 next = pgd_addr_end(addr, end);
1513                 if (pgd_none_or_clear_bad(pgd))
1514                         continue;
1515                 next = zap_p4d_range(tlb, vma, pgd, addr, next, details);
1516         } while (pgd++, addr = next, addr != end);
1517         tlb_end_vma(tlb, vma);
1518 }
1519
1520
1521 static void unmap_single_vma(struct mmu_gather *tlb,
1522                 struct vm_area_struct *vma, unsigned long start_addr,
1523                 unsigned long end_addr,
1524                 struct zap_details *details)
1525 {
1526         unsigned long start = max(vma->vm_start, start_addr);
1527         unsigned long end;
1528
1529         if (start >= vma->vm_end)
1530                 return;
1531         end = min(vma->vm_end, end_addr);
1532         if (end <= vma->vm_start)
1533                 return;
1534
1535         if (vma->vm_file)
1536                 uprobe_munmap(vma, start, end);
1537
1538         if (unlikely(vma->vm_flags & VM_PFNMAP))
1539                 untrack_pfn(vma, 0, 0);
1540
1541         if (start != end) {
1542                 if (unlikely(is_vm_hugetlb_page(vma))) {
1543                         /*
1544                          * It is undesirable to test vma->vm_file as it
1545                          * should be non-null for valid hugetlb area.
1546                          * However, vm_file will be NULL in the error
1547                          * cleanup path of mmap_region. When
1548                          * hugetlbfs ->mmap method fails,
1549                          * mmap_region() nullifies vma->vm_file
1550                          * before calling this function to clean up.
1551                          * Since no pte has actually been setup, it is
1552                          * safe to do nothing in this case.
1553                          */
1554                         if (vma->vm_file) {
1555                                 i_mmap_lock_write(vma->vm_file->f_mapping);
1556                                 __unmap_hugepage_range_final(tlb, vma, start, end, NULL);
1557                                 i_mmap_unlock_write(vma->vm_file->f_mapping);
1558                         }
1559                 } else
1560                         unmap_page_range(tlb, vma, start, end, details);
1561         }
1562 }
1563
1564 /**
1565  * unmap_vmas - unmap a range of memory covered by a list of vma's
1566  * @tlb: address of the caller's struct mmu_gather
1567  * @vma: the starting vma
1568  * @start_addr: virtual address at which to start unmapping
1569  * @end_addr: virtual address at which to end unmapping
1570  *
1571  * Unmap all pages in the vma list.
1572  *
1573  * Only addresses between `start' and `end' will be unmapped.
1574  *
1575  * The VMA list must be sorted in ascending virtual address order.
1576  *
1577  * unmap_vmas() assumes that the caller will flush the whole unmapped address
1578  * range after unmap_vmas() returns.  So the only responsibility here is to
1579  * ensure that any thus-far unmapped pages are flushed before unmap_vmas()
1580  * drops the lock and schedules.
1581  */
1582 void unmap_vmas(struct mmu_gather *tlb,
1583                 struct vm_area_struct *vma, unsigned long start_addr,
1584                 unsigned long end_addr)
1585 {
1586         struct mm_struct *mm = vma->vm_mm;
1587
1588         mmu_notifier_invalidate_range_start(mm, start_addr, end_addr);
1589         for ( ; vma && vma->vm_start < end_addr; vma = vma->vm_next)
1590                 unmap_single_vma(tlb, vma, start_addr, end_addr, NULL);
1591         mmu_notifier_invalidate_range_end(mm, start_addr, end_addr);
1592 }
1593
1594 /**
1595  * zap_page_range - remove user pages in a given range
1596  * @vma: vm_area_struct holding the applicable pages
1597  * @start: starting address of pages to zap
1598  * @size: number of bytes to zap
1599  *
1600  * Caller must protect the VMA list
1601  */
1602 void zap_page_range(struct vm_area_struct *vma, unsigned long start,
1603                 unsigned long size)
1604 {
1605         struct mm_struct *mm = vma->vm_mm;
1606         struct mmu_gather tlb;
1607         unsigned long end = start + size;
1608
1609         lru_add_drain();
1610         tlb_gather_mmu(&tlb, mm, start, end);
1611         update_hiwater_rss(mm);
1612         mmu_notifier_invalidate_range_start(mm, start, end);
1613         for ( ; vma && vma->vm_start < end; vma = vma->vm_next) {
1614                 unmap_single_vma(&tlb, vma, start, end, NULL);
1615
1616                 /*
1617                  * zap_page_range does not specify whether mmap_sem should be
1618                  * held for read or write. That allows parallel zap_page_range
1619                  * operations to unmap a PTE and defer a flush meaning that
1620                  * this call observes pte_none and fails to flush the TLB.
1621                  * Rather than adding a complex API, ensure that no stale
1622                  * TLB entries exist when this call returns.
1623                  */
1624                 flush_tlb_range(vma, start, end);
1625         }
1626
1627         mmu_notifier_invalidate_range_end(mm, start, end);
1628         tlb_finish_mmu(&tlb, start, end);
1629 }
1630
1631 /**
1632  * zap_page_range_single - remove user pages in a given range
1633  * @vma: vm_area_struct holding the applicable pages
1634  * @address: starting address of pages to zap
1635  * @size: number of bytes to zap
1636  * @details: details of shared cache invalidation
1637  *
1638  * The range must fit into one VMA.
1639  */
1640 static void zap_page_range_single(struct vm_area_struct *vma, unsigned long address,
1641                 unsigned long size, struct zap_details *details)
1642 {
1643         struct mm_struct *mm = vma->vm_mm;
1644         struct mmu_gather tlb;
1645         unsigned long end = address + size;
1646
1647         lru_add_drain();
1648         tlb_gather_mmu(&tlb, mm, address, end);
1649         update_hiwater_rss(mm);
1650         mmu_notifier_invalidate_range_start(mm, address, end);
1651         unmap_single_vma(&tlb, vma, address, end, details);
1652         mmu_notifier_invalidate_range_end(mm, address, end);
1653         tlb_finish_mmu(&tlb, address, end);
1654 }
1655
1656 /**
1657  * zap_vma_ptes - remove ptes mapping the vma
1658  * @vma: vm_area_struct holding ptes to be zapped
1659  * @address: starting address of pages to zap
1660  * @size: number of bytes to zap
1661  *
1662  * This function only unmaps ptes assigned to VM_PFNMAP vmas.
1663  *
1664  * The entire address range must be fully contained within the vma.
1665  *
1666  * Returns 0 if successful.
1667  */
1668 int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
1669                 unsigned long size)
1670 {
1671         if (address < vma->vm_start || address + size > vma->vm_end ||
1672                         !(vma->vm_flags & VM_PFNMAP))
1673                 return -1;
1674         zap_page_range_single(vma, address, size, NULL);
1675         return 0;
1676 }
1677 EXPORT_SYMBOL_GPL(zap_vma_ptes);
1678
1679 pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
1680                         spinlock_t **ptl)
1681 {
1682         pgd_t *pgd;
1683         p4d_t *p4d;
1684         pud_t *pud;
1685         pmd_t *pmd;
1686
1687         pgd = pgd_offset(mm, addr);
1688         p4d = p4d_alloc(mm, pgd, addr);
1689         if (!p4d)
1690                 return NULL;
1691         pud = pud_alloc(mm, p4d, addr);
1692         if (!pud)
1693                 return NULL;
1694         pmd = pmd_alloc(mm, pud, addr);
1695         if (!pmd)
1696                 return NULL;
1697
1698         VM_BUG_ON(pmd_trans_huge(*pmd));
1699         return pte_alloc_map_lock(mm, pmd, addr, ptl);
1700 }
1701
1702 /*
1703  * This is the old fallback for page remapping.
1704  *
1705  * For historical reasons, it only allows reserved pages. Only
1706  * old drivers should use this, and they needed to mark their
1707  * pages reserved for the old functions anyway.
1708  */
1709 static int insert_page(struct vm_area_struct *vma, unsigned long addr,
1710                         struct page *page, pgprot_t prot)
1711 {
1712         struct mm_struct *mm = vma->vm_mm;
1713         int retval;
1714         pte_t *pte;
1715         spinlock_t *ptl;
1716
1717         retval = -EINVAL;
1718         if (PageAnon(page))
1719                 goto out;
1720         retval = -ENOMEM;
1721         flush_dcache_page(page);
1722         pte = get_locked_pte(mm, addr, &ptl);
1723         if (!pte)
1724                 goto out;
1725         retval = -EBUSY;
1726         if (!pte_none(*pte))
1727                 goto out_unlock;
1728
1729         /* Ok, finally just insert the thing.. */
1730         get_page(page);
1731         inc_mm_counter_fast(mm, mm_counter_file(page));
1732         page_add_file_rmap(page, false);
1733         set_pte_at(mm, addr, pte, mk_pte(page, prot));
1734
1735         retval = 0;
1736         pte_unmap_unlock(pte, ptl);
1737         return retval;
1738 out_unlock:
1739         pte_unmap_unlock(pte, ptl);
1740 out:
1741         return retval;
1742 }
1743
1744 /**
1745  * vm_insert_page - insert single page into user vma
1746  * @vma: user vma to map to
1747  * @addr: target user address of this page
1748  * @page: source kernel page
1749  *
1750  * This allows drivers to insert individual pages they've allocated
1751  * into a user vma.
1752  *
1753  * The page has to be a nice clean _individual_ kernel allocation.
1754  * If you allocate a compound page, you need to have marked it as
1755  * such (__GFP_COMP), or manually just split the page up yourself
1756  * (see split_page()).
1757  *
1758  * NOTE! Traditionally this was done with "remap_pfn_range()" which
1759  * took an arbitrary page protection parameter. This doesn't allow
1760  * that. Your vma protection will have to be set up correctly, which
1761  * means that if you want a shared writable mapping, you'd better
1762  * ask for a shared writable mapping!
1763  *
1764  * The page does not need to be reserved.
1765  *
1766  * Usually this function is called from f_op->mmap() handler
1767  * under mm->mmap_sem write-lock, so it can change vma->vm_flags.
1768  * Caller must set VM_MIXEDMAP on vma if it wants to call this
1769  * function from other places, for example from page-fault handler.
1770  */
1771 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
1772                         struct page *page)
1773 {
1774         if (addr < vma->vm_start || addr >= vma->vm_end)
1775                 return -EFAULT;
1776         if (!page_count(page))
1777                 return -EINVAL;
1778         if (!(vma->vm_flags & VM_MIXEDMAP)) {
1779                 BUG_ON(down_read_trylock(&vma->vm_mm->mmap_sem));
1780                 BUG_ON(vma->vm_flags & VM_PFNMAP);
1781                 vma->vm_flags |= VM_MIXEDMAP;
1782         }
1783         return insert_page(vma, addr, page, vma->vm_page_prot);
1784 }
1785 EXPORT_SYMBOL(vm_insert_page);
1786
1787 static int insert_pfn(struct vm_area_struct *vma, unsigned long addr,
1788                         pfn_t pfn, pgprot_t prot, bool mkwrite)
1789 {
1790         struct mm_struct *mm = vma->vm_mm;
1791         int retval;
1792         pte_t *pte, entry;
1793         spinlock_t *ptl;
1794
1795         retval = -ENOMEM;
1796         pte = get_locked_pte(mm, addr, &ptl);
1797         if (!pte)
1798                 goto out;
1799         retval = -EBUSY;
1800         if (!pte_none(*pte)) {
1801                 if (mkwrite) {
1802                         /*
1803                          * For read faults on private mappings the PFN passed
1804                          * in may not match the PFN we have mapped if the
1805                          * mapped PFN is a writeable COW page.  In the mkwrite
1806                          * case we are creating a writable PTE for a shared
1807                          * mapping and we expect the PFNs to match.
1808                          */
1809                         if (WARN_ON_ONCE(pte_pfn(*pte) != pfn_t_to_pfn(pfn)))
1810                                 goto out_unlock;
1811                         entry = *pte;
1812                         goto out_mkwrite;
1813                 } else
1814                         goto out_unlock;
1815         }
1816
1817         /* Ok, finally just insert the thing.. */
1818         if (pfn_t_devmap(pfn))
1819                 entry = pte_mkdevmap(pfn_t_pte(pfn, prot));
1820         else
1821                 entry = pte_mkspecial(pfn_t_pte(pfn, prot));
1822
1823 out_mkwrite:
1824         if (mkwrite) {
1825                 entry = pte_mkyoung(entry);
1826                 entry = maybe_mkwrite(pte_mkdirty(entry), vma);
1827         }
1828
1829         set_pte_at(mm, addr, pte, entry);
1830         update_mmu_cache(vma, addr, pte); /* XXX: why not for insert_page? */
1831
1832         retval = 0;
1833 out_unlock:
1834         pte_unmap_unlock(pte, ptl);
1835 out:
1836         return retval;
1837 }
1838
1839 /**
1840  * vm_insert_pfn - insert single pfn into user vma
1841  * @vma: user vma to map to
1842  * @addr: target user address of this page
1843  * @pfn: source kernel pfn
1844  *
1845  * Similar to vm_insert_page, this allows drivers to insert individual pages
1846  * they've allocated into a user vma. Same comments apply.
1847  *
1848  * This function should only be called from a vm_ops->fault handler, and
1849  * in that case the handler should return NULL.
1850  *
1851  * vma cannot be a COW mapping.
1852  *
1853  * As this is called only for pages that do not currently exist, we
1854  * do not need to flush old virtual caches or the TLB.
1855  */
1856 int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
1857                         unsigned long pfn)
1858 {
1859         return vm_insert_pfn_prot(vma, addr, pfn, vma->vm_page_prot);
1860 }
1861 EXPORT_SYMBOL(vm_insert_pfn);
1862
1863 /**
1864  * vm_insert_pfn_prot - insert single pfn into user vma with specified pgprot
1865  * @vma: user vma to map to
1866  * @addr: target user address of this page
1867  * @pfn: source kernel pfn
1868  * @pgprot: pgprot flags for the inserted page
1869  *
1870  * This is exactly like vm_insert_pfn, except that it allows drivers to
1871  * to override pgprot on a per-page basis.
1872  *
1873  * This only makes sense for IO mappings, and it makes no sense for
1874  * cow mappings.  In general, using multiple vmas is preferable;
1875  * vm_insert_pfn_prot should only be used if using multiple VMAs is
1876  * impractical.
1877  */
1878 int vm_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr,
1879                         unsigned long pfn, pgprot_t pgprot)
1880 {
1881         int ret;
1882         /*
1883          * Technically, architectures with pte_special can avoid all these
1884          * restrictions (same for remap_pfn_range).  However we would like
1885          * consistency in testing and feature parity among all, so we should
1886          * try to keep these invariants in place for everybody.
1887          */
1888         BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)));
1889         BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) ==
1890                                                 (VM_PFNMAP|VM_MIXEDMAP));
1891         BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags));
1892         BUG_ON((vma->vm_flags & VM_MIXEDMAP) && pfn_valid(pfn));
1893
1894         if (addr < vma->vm_start || addr >= vma->vm_end)
1895                 return -EFAULT;
1896
1897         if (!pfn_modify_allowed(pfn, pgprot))
1898                 return -EACCES;
1899
1900         track_pfn_insert(vma, &pgprot, __pfn_to_pfn_t(pfn, PFN_DEV));
1901
1902         ret = insert_pfn(vma, addr, __pfn_to_pfn_t(pfn, PFN_DEV), pgprot,
1903                         false);
1904
1905         return ret;
1906 }
1907 EXPORT_SYMBOL(vm_insert_pfn_prot);
1908
1909 static int __vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
1910                         pfn_t pfn, bool mkwrite)
1911 {
1912         pgprot_t pgprot = vma->vm_page_prot;
1913
1914         BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
1915
1916         if (addr < vma->vm_start || addr >= vma->vm_end)
1917                 return -EFAULT;
1918
1919         track_pfn_insert(vma, &pgprot, pfn);
1920
1921         if (!pfn_modify_allowed(pfn_t_to_pfn(pfn), pgprot))
1922                 return -EACCES;
1923
1924         /*
1925          * If we don't have pte special, then we have to use the pfn_valid()
1926          * based VM_MIXEDMAP scheme (see vm_normal_page), and thus we *must*
1927          * refcount the page if pfn_valid is true (hence insert_page rather
1928          * than insert_pfn).  If a zero_pfn were inserted into a VM_MIXEDMAP
1929          * without pte special, it would there be refcounted as a normal page.
1930          */
1931         if (!HAVE_PTE_SPECIAL && !pfn_t_devmap(pfn) && pfn_t_valid(pfn)) {
1932                 struct page *page;
1933
1934                 /*
1935                  * At this point we are committed to insert_page()
1936                  * regardless of whether the caller specified flags that
1937                  * result in pfn_t_has_page() == false.
1938                  */
1939                 page = pfn_to_page(pfn_t_to_pfn(pfn));
1940                 return insert_page(vma, addr, page, pgprot);
1941         }
1942         return insert_pfn(vma, addr, pfn, pgprot, mkwrite);
1943 }
1944
1945 int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
1946                         pfn_t pfn)
1947 {
1948         return __vm_insert_mixed(vma, addr, pfn, false);
1949
1950 }
1951 EXPORT_SYMBOL(vm_insert_mixed);
1952
1953 int vm_insert_mixed_mkwrite(struct vm_area_struct *vma, unsigned long addr,
1954                         pfn_t pfn)
1955 {
1956         return __vm_insert_mixed(vma, addr, pfn, true);
1957 }
1958 EXPORT_SYMBOL(vm_insert_mixed_mkwrite);
1959
1960 /*
1961  * maps a range of physical memory into the requested pages. the old
1962  * mappings are removed. any references to nonexistent pages results
1963  * in null mappings (currently treated as "copy-on-access")
1964  */
1965 static int remap_pte_range(struct mm_struct *mm, pmd_t *pmd,
1966                         unsigned long addr, unsigned long end,
1967                         unsigned long pfn, pgprot_t prot)
1968 {
1969         pte_t *pte;
1970         spinlock_t *ptl;
1971         int err = 0;
1972
1973         pte = pte_alloc_map_lock(mm, pmd, addr, &ptl);
1974         if (!pte)
1975                 return -ENOMEM;
1976         arch_enter_lazy_mmu_mode();
1977         do {
1978                 BUG_ON(!pte_none(*pte));
1979                 if (!pfn_modify_allowed(pfn, prot)) {
1980                         err = -EACCES;
1981                         break;
1982                 }
1983                 set_pte_at(mm, addr, pte, pte_mkspecial(pfn_pte(pfn, prot)));
1984                 pfn++;
1985         } while (pte++, addr += PAGE_SIZE, addr != end);
1986         arch_leave_lazy_mmu_mode();
1987         pte_unmap_unlock(pte - 1, ptl);
1988         return err;
1989 }
1990
1991 static inline int remap_pmd_range(struct mm_struct *mm, pud_t *pud,
1992                         unsigned long addr, unsigned long end,
1993                         unsigned long pfn, pgprot_t prot)
1994 {
1995         pmd_t *pmd;
1996         unsigned long next;
1997         int err;
1998
1999         pfn -= addr >> PAGE_SHIFT;
2000         pmd = pmd_alloc(mm, pud, addr);
2001         if (!pmd)
2002                 return -ENOMEM;
2003         VM_BUG_ON(pmd_trans_huge(*pmd));
2004         do {
2005                 next = pmd_addr_end(addr, end);
2006                 err = remap_pte_range(mm, pmd, addr, next,
2007                                 pfn + (addr >> PAGE_SHIFT), prot);
2008                 if (err)
2009                         return err;
2010         } while (pmd++, addr = next, addr != end);
2011         return 0;
2012 }
2013
2014 static inline int remap_pud_range(struct mm_struct *mm, p4d_t *p4d,
2015                         unsigned long addr, unsigned long end,
2016                         unsigned long pfn, pgprot_t prot)
2017 {
2018         pud_t *pud;
2019         unsigned long next;
2020         int err;
2021
2022         pfn -= addr >> PAGE_SHIFT;
2023         pud = pud_alloc(mm, p4d, addr);
2024         if (!pud)
2025                 return -ENOMEM;
2026         do {
2027                 next = pud_addr_end(addr, end);
2028                 err = remap_pmd_range(mm, pud, addr, next,
2029                                 pfn + (addr >> PAGE_SHIFT), prot);
2030                 if (err)
2031                         return err;
2032         } while (pud++, addr = next, addr != end);
2033         return 0;
2034 }
2035
2036 static inline int remap_p4d_range(struct mm_struct *mm, pgd_t *pgd,
2037                         unsigned long addr, unsigned long end,
2038                         unsigned long pfn, pgprot_t prot)
2039 {
2040         p4d_t *p4d;
2041         unsigned long next;
2042         int err;
2043
2044         pfn -= addr >> PAGE_SHIFT;
2045         p4d = p4d_alloc(mm, pgd, addr);
2046         if (!p4d)
2047                 return -ENOMEM;
2048         do {
2049                 next = p4d_addr_end(addr, end);
2050                 err = remap_pud_range(mm, p4d, addr, next,
2051                                 pfn + (addr >> PAGE_SHIFT), prot);
2052                 if (err)
2053                         return err;
2054         } while (p4d++, addr = next, addr != end);
2055         return 0;
2056 }
2057
2058 /**
2059  * remap_pfn_range - remap kernel memory to userspace
2060  * @vma: user vma to map to
2061  * @addr: target user address to start at
2062  * @pfn: physical address of kernel memory
2063  * @size: size of map area
2064  * @prot: page protection flags for this mapping
2065  *
2066  *  Note: this is only safe if the mm semaphore is held when called.
2067  */
2068 int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
2069                     unsigned long pfn, unsigned long size, pgprot_t prot)
2070 {
2071         pgd_t *pgd;
2072         unsigned long next;
2073         unsigned long end = addr + PAGE_ALIGN(size);
2074         struct mm_struct *mm = vma->vm_mm;
2075         unsigned long remap_pfn = pfn;
2076         int err;
2077
2078         /*
2079          * Physically remapped pages are special. Tell the
2080          * rest of the world about it:
2081          *   VM_IO tells people not to look at these pages
2082          *      (accesses can have side effects).
2083          *   VM_PFNMAP tells the core MM that the base pages are just
2084          *      raw PFN mappings, and do not have a "struct page" associated
2085          *      with them.
2086          *   VM_DONTEXPAND
2087          *      Disable vma merging and expanding with mremap().
2088          *   VM_DONTDUMP
2089          *      Omit vma from core dump, even when VM_IO turned off.
2090          *
2091          * There's a horrible special case to handle copy-on-write
2092          * behaviour that some programs depend on. We mark the "original"
2093          * un-COW'ed pages by matching them up with "vma->vm_pgoff".
2094          * See vm_normal_page() for details.
2095          */
2096         if (is_cow_mapping(vma->vm_flags)) {
2097                 if (addr != vma->vm_start || end != vma->vm_end)
2098                         return -EINVAL;
2099                 vma->vm_pgoff = pfn;
2100         }
2101
2102         err = track_pfn_remap(vma, &prot, remap_pfn, addr, PAGE_ALIGN(size));
2103         if (err)
2104                 return -EINVAL;
2105
2106         vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
2107
2108         BUG_ON(addr >= end);
2109         pfn -= addr >> PAGE_SHIFT;
2110         pgd = pgd_offset(mm, addr);
2111         flush_cache_range(vma, addr, end);
2112         do {
2113                 next = pgd_addr_end(addr, end);
2114                 err = remap_p4d_range(mm, pgd, addr, next,
2115                                 pfn + (addr >> PAGE_SHIFT), prot);
2116                 if (err)
2117                         break;
2118         } while (pgd++, addr = next, addr != end);
2119
2120         if (err)
2121                 untrack_pfn(vma, remap_pfn, PAGE_ALIGN(size));
2122
2123         return err;
2124 }
2125 EXPORT_SYMBOL(remap_pfn_range);
2126
2127 /**
2128  * vm_iomap_memory - remap memory to userspace
2129  * @vma: user vma to map to
2130  * @start: start of area
2131  * @len: size of area
2132  *
2133  * This is a simplified io_remap_pfn_range() for common driver use. The
2134  * driver just needs to give us the physical memory range to be mapped,
2135  * we'll figure out the rest from the vma information.
2136  *
2137  * NOTE! Some drivers might want to tweak vma->vm_page_prot first to get
2138  * whatever write-combining details or similar.
2139  */
2140 int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len)
2141 {
2142         unsigned long vm_len, pfn, pages;
2143
2144         /* Check that the physical memory area passed in looks valid */
2145         if (start + len < start)
2146                 return -EINVAL;
2147         /*
2148          * You *really* shouldn't map things that aren't page-aligned,
2149          * but we've historically allowed it because IO memory might
2150          * just have smaller alignment.
2151          */
2152         len += start & ~PAGE_MASK;
2153         pfn = start >> PAGE_SHIFT;
2154         pages = (len + ~PAGE_MASK) >> PAGE_SHIFT;
2155         if (pfn + pages < pfn)
2156                 return -EINVAL;
2157
2158         /* We start the mapping 'vm_pgoff' pages into the area */
2159         if (vma->vm_pgoff > pages)
2160                 return -EINVAL;
2161         pfn += vma->vm_pgoff;
2162         pages -= vma->vm_pgoff;
2163
2164         /* Can we fit all of the mapping? */
2165         vm_len = vma->vm_end - vma->vm_start;
2166         if (vm_len >> PAGE_SHIFT > pages)
2167                 return -EINVAL;
2168
2169         /* Ok, let it rip */
2170         return io_remap_pfn_range(vma, vma->vm_start, pfn, vm_len, vma->vm_page_prot);
2171 }
2172 EXPORT_SYMBOL(vm_iomap_memory);
2173
2174 static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd,
2175                                      unsigned long addr, unsigned long end,
2176                                      pte_fn_t fn, void *data)
2177 {
2178         pte_t *pte;
2179         int err;
2180         pgtable_t token;
2181         spinlock_t *uninitialized_var(ptl);
2182
2183         pte = (mm == &init_mm) ?
2184                 pte_alloc_kernel(pmd, addr) :
2185                 pte_alloc_map_lock(mm, pmd, addr, &ptl);
2186         if (!pte)
2187                 return -ENOMEM;
2188
2189         BUG_ON(pmd_huge(*pmd));
2190
2191         arch_enter_lazy_mmu_mode();
2192
2193         token = pmd_pgtable(*pmd);
2194
2195         do {
2196                 err = fn(pte++, token, addr, data);
2197                 if (err)
2198                         break;
2199         } while (addr += PAGE_SIZE, addr != end);
2200
2201         arch_leave_lazy_mmu_mode();
2202
2203         if (mm != &init_mm)
2204                 pte_unmap_unlock(pte-1, ptl);
2205         return err;
2206 }
2207
2208 static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
2209                                      unsigned long addr, unsigned long end,
2210                                      pte_fn_t fn, void *data)
2211 {
2212         pmd_t *pmd;
2213         unsigned long next;
2214         int err;
2215
2216         BUG_ON(pud_huge(*pud));
2217
2218         pmd = pmd_alloc(mm, pud, addr);
2219         if (!pmd)
2220                 return -ENOMEM;
2221         do {
2222                 next = pmd_addr_end(addr, end);
2223                 err = apply_to_pte_range(mm, pmd, addr, next, fn, data);
2224                 if (err)
2225                         break;
2226         } while (pmd++, addr = next, addr != end);
2227         return err;
2228 }
2229
2230 static int apply_to_pud_range(struct mm_struct *mm, p4d_t *p4d,
2231                                      unsigned long addr, unsigned long end,
2232                                      pte_fn_t fn, void *data)
2233 {
2234         pud_t *pud;
2235         unsigned long next;
2236         int err;
2237
2238         pud = pud_alloc(mm, p4d, addr);
2239         if (!pud)
2240                 return -ENOMEM;
2241         do {
2242                 next = pud_addr_end(addr, end);
2243                 err = apply_to_pmd_range(mm, pud, addr, next, fn, data);
2244                 if (err)
2245                         break;
2246         } while (pud++, addr = next, addr != end);
2247         return err;
2248 }
2249
2250 static int apply_to_p4d_range(struct mm_struct *mm, pgd_t *pgd,
2251                                      unsigned long addr, unsigned long end,
2252                                      pte_fn_t fn, void *data)
2253 {
2254         p4d_t *p4d;
2255         unsigned long next;
2256         int err;
2257
2258         p4d = p4d_alloc(mm, pgd, addr);
2259         if (!p4d)
2260                 return -ENOMEM;
2261         do {
2262                 next = p4d_addr_end(addr, end);
2263                 err = apply_to_pud_range(mm, p4d, addr, next, fn, data);
2264                 if (err)
2265                         break;
2266         } while (p4d++, addr = next, addr != end);
2267         return err;
2268 }
2269
2270 /*
2271  * Scan a region of virtual memory, filling in page tables as necessary
2272  * and calling a provided function on each leaf page table.
2273  */
2274 int apply_to_page_range(struct mm_struct *mm, unsigned long addr,
2275                         unsigned long size, pte_fn_t fn, void *data)
2276 {
2277         pgd_t *pgd;
2278         unsigned long next;
2279         unsigned long end = addr + size;
2280         int err;
2281
2282         if (WARN_ON(addr >= end))
2283                 return -EINVAL;
2284
2285         pgd = pgd_offset(mm, addr);
2286         do {
2287                 next = pgd_addr_end(addr, end);
2288                 err = apply_to_p4d_range(mm, pgd, addr, next, fn, data);
2289                 if (err)
2290                         break;
2291         } while (pgd++, addr = next, addr != end);
2292
2293         return err;
2294 }
2295 EXPORT_SYMBOL_GPL(apply_to_page_range);
2296
2297 /*
2298  * handle_pte_fault chooses page fault handler according to an entry which was
2299  * read non-atomically.  Before making any commitment, on those architectures
2300  * or configurations (e.g. i386 with PAE) which might give a mix of unmatched
2301  * parts, do_swap_page must check under lock before unmapping the pte and
2302  * proceeding (but do_wp_page is only called after already making such a check;
2303  * and do_anonymous_page can safely check later on).
2304  */
2305 static inline int pte_unmap_same(struct mm_struct *mm, pmd_t *pmd,
2306                                 pte_t *page_table, pte_t orig_pte)
2307 {
2308         int same = 1;
2309 #if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT)
2310         if (sizeof(pte_t) > sizeof(unsigned long)) {
2311                 spinlock_t *ptl = pte_lockptr(mm, pmd);
2312                 spin_lock(ptl);
2313                 same = pte_same(*page_table, orig_pte);
2314                 spin_unlock(ptl);
2315         }
2316 #endif
2317         pte_unmap(page_table);
2318         return same;
2319 }
2320
2321 static inline void cow_user_page(struct page *dst, struct page *src, unsigned long va, struct vm_area_struct *vma)
2322 {
2323         debug_dma_assert_idle(src);
2324
2325         /*
2326          * If the source page was a PFN mapping, we don't have
2327          * a "struct page" for it. We do a best-effort copy by
2328          * just copying from the original user address. If that
2329          * fails, we just zero-fill it. Live with it.
2330          */
2331         if (unlikely(!src)) {
2332                 void *kaddr = kmap_atomic(dst);
2333                 void __user *uaddr = (void __user *)(va & PAGE_MASK);
2334
2335                 /*
2336                  * This really shouldn't fail, because the page is there
2337                  * in the page tables. But it might just be unreadable,
2338                  * in which case we just give up and fill the result with
2339                  * zeroes.
2340                  */
2341                 if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE))
2342                         clear_page(kaddr);
2343                 kunmap_atomic(kaddr);
2344                 flush_dcache_page(dst);
2345         } else
2346                 copy_user_highpage(dst, src, va, vma);
2347 }
2348
2349 static gfp_t __get_fault_gfp_mask(struct vm_area_struct *vma)
2350 {
2351         struct file *vm_file = vma->vm_file;
2352
2353         if (vm_file)
2354                 return mapping_gfp_mask(vm_file->f_mapping) | __GFP_FS | __GFP_IO;
2355
2356         /*
2357          * Special mappings (e.g. VDSO) do not have any file so fake
2358          * a default GFP_KERNEL for them.
2359          */
2360         return GFP_KERNEL;
2361 }
2362
2363 /*
2364  * Notify the address space that the page is about to become writable so that
2365  * it can prohibit this or wait for the page to get into an appropriate state.
2366  *
2367  * We do this without the lock held, so that it can sleep if it needs to.
2368  */
2369 static int do_page_mkwrite(struct vm_fault *vmf)
2370 {
2371         int ret;
2372         struct page *page = vmf->page;
2373         unsigned int old_flags = vmf->flags;
2374
2375         vmf->flags = FAULT_FLAG_WRITE|FAULT_FLAG_MKWRITE;
2376
2377         ret = vmf->vma->vm_ops->page_mkwrite(vmf);
2378         /* Restore original flags so that caller is not surprised */
2379         vmf->flags = old_flags;
2380         if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))
2381                 return ret;
2382         if (unlikely(!(ret & VM_FAULT_LOCKED))) {
2383                 lock_page(page);
2384                 if (!page->mapping) {
2385                         unlock_page(page);
2386                         return 0; /* retry */
2387                 }
2388                 ret |= VM_FAULT_LOCKED;
2389         } else
2390                 VM_BUG_ON_PAGE(!PageLocked(page), page);
2391         return ret;
2392 }
2393
2394 /*
2395  * Handle dirtying of a page in shared file mapping on a write fault.
2396  *
2397  * The function expects the page to be locked and unlocks it.
2398  */
2399 static void fault_dirty_shared_page(struct vm_area_struct *vma,
2400                                     struct page *page)
2401 {
2402         struct address_space *mapping;
2403         bool dirtied;
2404         bool page_mkwrite = vma->vm_ops && vma->vm_ops->page_mkwrite;
2405
2406         dirtied = set_page_dirty(page);
2407         VM_BUG_ON_PAGE(PageAnon(page), page);
2408         /*
2409          * Take a local copy of the address_space - page.mapping may be zeroed
2410          * by truncate after unlock_page().   The address_space itself remains
2411          * pinned by vma->vm_file's reference.  We rely on unlock_page()'s
2412          * release semantics to prevent the compiler from undoing this copying.
2413          */
2414         mapping = page_rmapping(page);
2415         unlock_page(page);
2416
2417         if ((dirtied || page_mkwrite) && mapping) {
2418                 /*
2419                  * Some device drivers do not set page.mapping
2420                  * but still dirty their pages
2421                  */
2422                 balance_dirty_pages_ratelimited(mapping);
2423         }
2424
2425         if (!page_mkwrite)
2426                 file_update_time(vma->vm_file);
2427 }
2428
2429 /*
2430  * Handle write page faults for pages that can be reused in the current vma
2431  *
2432  * This can happen either due to the mapping being with the VM_SHARED flag,
2433  * or due to us being the last reference standing to the page. In either
2434  * case, all we need to do here is to mark the page as writable and update
2435  * any related book-keeping.
2436  */
2437 static inline void wp_page_reuse(struct vm_fault *vmf)
2438         __releases(vmf->ptl)
2439 {
2440         struct vm_area_struct *vma = vmf->vma;
2441         struct page *page = vmf->page;
2442         pte_t entry;
2443         /*
2444          * Clear the pages cpupid information as the existing
2445          * information potentially belongs to a now completely
2446          * unrelated process.
2447          */
2448         if (page)
2449                 page_cpupid_xchg_last(page, (1 << LAST_CPUPID_SHIFT) - 1);
2450
2451         flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte));
2452         entry = pte_mkyoung(vmf->orig_pte);
2453         entry = maybe_mkwrite(pte_mkdirty(entry), vma);
2454         if (ptep_set_access_flags(vma, vmf->address, vmf->pte, entry, 1))
2455                 update_mmu_cache(vma, vmf->address, vmf->pte);
2456         pte_unmap_unlock(vmf->pte, vmf->ptl);
2457 }
2458
2459 /*
2460  * Handle the case of a page which we actually need to copy to a new page.
2461  *
2462  * Called with mmap_sem locked and the old page referenced, but
2463  * without the ptl held.
2464  *
2465  * High level logic flow:
2466  *
2467  * - Allocate a page, copy the content of the old page to the new one.
2468  * - Handle book keeping and accounting - cgroups, mmu-notifiers, etc.
2469  * - Take the PTL. If the pte changed, bail out and release the allocated page
2470  * - If the pte is still the way we remember it, update the page table and all
2471  *   relevant references. This includes dropping the reference the page-table
2472  *   held to the old page, as well as updating the rmap.
2473  * - In any case, unlock the PTL and drop the reference we took to the old page.
2474  */
2475 static int wp_page_copy(struct vm_fault *vmf)
2476 {
2477         struct vm_area_struct *vma = vmf->vma;
2478         struct mm_struct *mm = vma->vm_mm;
2479         struct page *old_page = vmf->page;
2480         struct page *new_page = NULL;
2481         pte_t entry;
2482         int page_copied = 0;
2483         const unsigned long mmun_start = vmf->address & PAGE_MASK;
2484         const unsigned long mmun_end = mmun_start + PAGE_SIZE;
2485         struct mem_cgroup *memcg;
2486
2487         if (unlikely(anon_vma_prepare(vma)))
2488                 goto oom;
2489
2490         if (is_zero_pfn(pte_pfn(vmf->orig_pte))) {
2491                 new_page = alloc_zeroed_user_highpage_movable(vma,
2492                                                               vmf->address);
2493                 if (!new_page)
2494                         goto oom;
2495         } else {
2496                 new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma,
2497                                 vmf->address);
2498                 if (!new_page)
2499                         goto oom;
2500                 cow_user_page(new_page, old_page, vmf->address, vma);
2501         }
2502
2503         if (mem_cgroup_try_charge(new_page, mm, GFP_KERNEL, &memcg, false))
2504                 goto oom_free_new;
2505
2506         __SetPageUptodate(new_page);
2507
2508         mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
2509
2510         /*
2511          * Re-check the pte - we dropped the lock
2512          */
2513         vmf->pte = pte_offset_map_lock(mm, vmf->pmd, vmf->address, &vmf->ptl);
2514         if (likely(pte_same(*vmf->pte, vmf->orig_pte))) {
2515                 if (old_page) {
2516                         if (!PageAnon(old_page)) {
2517                                 dec_mm_counter_fast(mm,
2518                                                 mm_counter_file(old_page));
2519                                 inc_mm_counter_fast(mm, MM_ANONPAGES);
2520                         }
2521                 } else {
2522                         inc_mm_counter_fast(mm, MM_ANONPAGES);
2523                 }
2524                 flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte));
2525                 entry = mk_pte(new_page, vma->vm_page_prot);
2526                 entry = maybe_mkwrite(pte_mkdirty(entry), vma);
2527                 /*
2528                  * Clear the pte entry and flush it first, before updating the
2529                  * pte with the new entry. This will avoid a race condition
2530                  * seen in the presence of one thread doing SMC and another
2531                  * thread doing COW.
2532                  */
2533                 ptep_clear_flush_notify(vma, vmf->address, vmf->pte);
2534                 page_add_new_anon_rmap(new_page, vma, vmf->address, false);
2535                 mem_cgroup_commit_charge(new_page, memcg, false, false);
2536                 lru_cache_add_active_or_unevictable(new_page, vma);
2537                 /*
2538                  * We call the notify macro here because, when using secondary
2539                  * mmu page tables (such as kvm shadow page tables), we want the
2540                  * new page to be mapped directly into the secondary page table.
2541                  */
2542                 set_pte_at_notify(mm, vmf->address, vmf->pte, entry);
2543                 update_mmu_cache(vma, vmf->address, vmf->pte);
2544                 if (old_page) {
2545                         /*
2546                          * Only after switching the pte to the new page may
2547                          * we remove the mapcount here. Otherwise another
2548                          * process may come and find the rmap count decremented
2549                          * before the pte is switched to the new page, and
2550                          * "reuse" the old page writing into it while our pte
2551                          * here still points into it and can be read by other
2552                          * threads.
2553                          *
2554                          * The critical issue is to order this
2555                          * page_remove_rmap with the ptp_clear_flush above.
2556                          * Those stores are ordered by (if nothing else,)
2557                          * the barrier present in the atomic_add_negative
2558                          * in page_remove_rmap.
2559                          *
2560                          * Then the TLB flush in ptep_clear_flush ensures that
2561                          * no process can access the old page before the
2562                          * decremented mapcount is visible. And the old page
2563                          * cannot be reused until after the decremented
2564                          * mapcount is visible. So transitively, TLBs to
2565                          * old page will be flushed before it can be reused.
2566                          */
2567                         page_remove_rmap(old_page, false);
2568                 }
2569
2570                 /* Free the old page.. */
2571                 new_page = old_page;
2572                 page_copied = 1;
2573         } else {
2574                 mem_cgroup_cancel_charge(new_page, memcg, false);
2575         }
2576
2577         if (new_page)
2578                 put_page(new_page);
2579
2580         pte_unmap_unlock(vmf->pte, vmf->ptl);
2581         mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
2582         if (old_page) {
2583                 /*
2584                  * Don't let another task, with possibly unlocked vma,
2585                  * keep the mlocked page.
2586                  */
2587                 if (page_copied && (vma->vm_flags & VM_LOCKED)) {
2588                         lock_page(old_page);    /* LRU manipulation */
2589                         if (PageMlocked(old_page))
2590                                 munlock_vma_page(old_page);
2591                         unlock_page(old_page);
2592                 }
2593                 put_page(old_page);
2594         }
2595         return page_copied ? VM_FAULT_WRITE : 0;
2596 oom_free_new:
2597         put_page(new_page);
2598 oom:
2599         if (old_page)
2600                 put_page(old_page);
2601         return VM_FAULT_OOM;
2602 }
2603
2604 /**
2605  * finish_mkwrite_fault - finish page fault for a shared mapping, making PTE
2606  *                        writeable once the page is prepared
2607  *
2608  * @vmf: structure describing the fault
2609  *
2610  * This function handles all that is needed to finish a write page fault in a
2611  * shared mapping due to PTE being read-only once the mapped page is prepared.
2612  * It handles locking of PTE and modifying it. The function returns
2613  * VM_FAULT_WRITE on success, 0 when PTE got changed before we acquired PTE
2614  * lock.
2615  *
2616  * The function expects the page to be locked or other protection against
2617  * concurrent faults / writeback (such as DAX radix tree locks).
2618  */
2619 int finish_mkwrite_fault(struct vm_fault *vmf)
2620 {
2621         WARN_ON_ONCE(!(vmf->vma->vm_flags & VM_SHARED));
2622         vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, vmf->address,
2623                                        &vmf->ptl);
2624         /*
2625          * We might have raced with another page fault while we released the
2626          * pte_offset_map_lock.
2627          */
2628         if (!pte_same(*vmf->pte, vmf->orig_pte)) {
2629                 pte_unmap_unlock(vmf->pte, vmf->ptl);
2630                 return VM_FAULT_NOPAGE;
2631         }
2632         wp_page_reuse(vmf);
2633         return 0;
2634 }
2635
2636 /*
2637  * Handle write page faults for VM_MIXEDMAP or VM_PFNMAP for a VM_SHARED
2638  * mapping
2639  */
2640 static int wp_pfn_shared(struct vm_fault *vmf)
2641 {
2642         struct vm_area_struct *vma = vmf->vma;
2643
2644         if (vma->vm_ops && vma->vm_ops->pfn_mkwrite) {
2645                 int ret;
2646
2647                 pte_unmap_unlock(vmf->pte, vmf->ptl);
2648                 vmf->flags |= FAULT_FLAG_MKWRITE;
2649                 ret = vma->vm_ops->pfn_mkwrite(vmf);
2650                 if (ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE))
2651                         return ret;
2652                 return finish_mkwrite_fault(vmf);
2653         }
2654         wp_page_reuse(vmf);
2655         return VM_FAULT_WRITE;
2656 }
2657
2658 static int wp_page_shared(struct vm_fault *vmf)
2659         __releases(vmf->ptl)
2660 {
2661         struct vm_area_struct *vma = vmf->vma;
2662
2663         get_page(vmf->page);
2664
2665         if (vma->vm_ops && vma->vm_ops->page_mkwrite) {
2666                 int tmp;
2667
2668                 pte_unmap_unlock(vmf->pte, vmf->ptl);
2669                 tmp = do_page_mkwrite(vmf);
2670                 if (unlikely(!tmp || (tmp &
2671                                       (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) {
2672                         put_page(vmf->page);
2673                         return tmp;
2674                 }
2675                 tmp = finish_mkwrite_fault(vmf);
2676                 if (unlikely(tmp & (VM_FAULT_ERROR | VM_FAULT_NOPAGE))) {
2677                         unlock_page(vmf->page);
2678                         put_page(vmf->page);
2679                         return tmp;
2680                 }
2681         } else {
2682                 wp_page_reuse(vmf);
2683                 lock_page(vmf->page);
2684         }
2685         fault_dirty_shared_page(vma, vmf->page);
2686         put_page(vmf->page);
2687
2688         return VM_FAULT_WRITE;
2689 }
2690
2691 /*
2692  * This routine handles present pages, when users try to write
2693  * to a shared page. It is done by copying the page to a new address
2694  * and decrementing the shared-page counter for the old page.
2695  *
2696  * Note that this routine assumes that the protection checks have been
2697  * done by the caller (the low-level page fault routine in most cases).
2698  * Thus we can safely just mark it writable once we've done any necessary
2699  * COW.
2700  *
2701  * We also mark the page dirty at this point even though the page will
2702  * change only once the write actually happens. This avoids a few races,
2703  * and potentially makes it more efficient.
2704  *
2705  * We enter with non-exclusive mmap_sem (to exclude vma changes,
2706  * but allow concurrent faults), with pte both mapped and locked.
2707  * We return with mmap_sem still held, but pte unmapped and unlocked.
2708  */
2709 static int do_wp_page(struct vm_fault *vmf)
2710         __releases(vmf->ptl)
2711 {
2712         struct vm_area_struct *vma = vmf->vma;
2713
2714         vmf->page = vm_normal_page(vma, vmf->address, vmf->orig_pte);
2715         if (!vmf->page) {
2716                 /*
2717                  * VM_MIXEDMAP !pfn_valid() case, or VM_SOFTDIRTY clear on a
2718                  * VM_PFNMAP VMA.
2719                  *
2720                  * We should not cow pages in a shared writeable mapping.
2721                  * Just mark the pages writable and/or call ops->pfn_mkwrite.
2722                  */
2723                 if ((vma->vm_flags & (VM_WRITE|VM_SHARED)) ==
2724                                      (VM_WRITE|VM_SHARED))
2725                         return wp_pfn_shared(vmf);
2726
2727                 pte_unmap_unlock(vmf->pte, vmf->ptl);
2728                 return wp_page_copy(vmf);
2729         }
2730
2731         /*
2732          * Take out anonymous pages first, anonymous shared vmas are
2733          * not dirty accountable.
2734          */
2735         if (PageAnon(vmf->page) && !PageKsm(vmf->page)) {
2736                 int total_map_swapcount;
2737                 if (!trylock_page(vmf->page)) {
2738                         get_page(vmf->page);
2739                         pte_unmap_unlock(vmf->pte, vmf->ptl);
2740                         lock_page(vmf->page);
2741                         vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
2742                                         vmf->address, &vmf->ptl);
2743                         if (!pte_same(*vmf->pte, vmf->orig_pte)) {
2744                                 unlock_page(vmf->page);
2745                                 pte_unmap_unlock(vmf->pte, vmf->ptl);
2746                                 put_page(vmf->page);
2747                                 return 0;
2748                         }
2749                         put_page(vmf->page);
2750                 }
2751                 if (reuse_swap_page(vmf->page, &total_map_swapcount)) {
2752                         if (total_map_swapcount == 1) {
2753                                 /*
2754                                  * The page is all ours. Move it to
2755                                  * our anon_vma so the rmap code will
2756                                  * not search our parent or siblings.
2757                                  * Protected against the rmap code by
2758                                  * the page lock.
2759                                  */
2760                                 page_move_anon_rmap(vmf->page, vma);
2761                         }
2762                         unlock_page(vmf->page);
2763                         wp_page_reuse(vmf);
2764                         return VM_FAULT_WRITE;
2765                 }
2766                 unlock_page(vmf->page);
2767         } else if (unlikely((vma->vm_flags & (VM_WRITE|VM_SHARED)) ==
2768                                         (VM_WRITE|VM_SHARED))) {
2769                 return wp_page_shared(vmf);
2770         }
2771
2772         /*
2773          * Ok, we need to copy. Oh, well..
2774          */
2775         get_page(vmf->page);
2776
2777         pte_unmap_unlock(vmf->pte, vmf->ptl);
2778         return wp_page_copy(vmf);
2779 }
2780
2781 static void unmap_mapping_range_vma(struct vm_area_struct *vma,
2782                 unsigned long start_addr, unsigned long end_addr,
2783                 struct zap_details *details)
2784 {
2785         zap_page_range_single(vma, start_addr, end_addr - start_addr, details);
2786 }
2787
2788 static inline void unmap_mapping_range_tree(struct rb_root_cached *root,
2789                                             struct zap_details *details)
2790 {
2791         struct vm_area_struct *vma;
2792         pgoff_t vba, vea, zba, zea;
2793
2794         vma_interval_tree_foreach(vma, root,
2795                         details->first_index, details->last_index) {
2796
2797                 vba = vma->vm_pgoff;
2798                 vea = vba + vma_pages(vma) - 1;
2799                 zba = details->first_index;
2800                 if (zba < vba)
2801                         zba = vba;
2802                 zea = details->last_index;
2803                 if (zea > vea)
2804                         zea = vea;
2805
2806                 unmap_mapping_range_vma(vma,
2807                         ((zba - vba) << PAGE_SHIFT) + vma->vm_start,
2808                         ((zea - vba + 1) << PAGE_SHIFT) + vma->vm_start,
2809                                 details);
2810         }
2811 }
2812
2813 /**
2814  * unmap_mapping_range - unmap the portion of all mmaps in the specified
2815  * address_space corresponding to the specified page range in the underlying
2816  * file.
2817  *
2818  * @mapping: the address space containing mmaps to be unmapped.
2819  * @holebegin: byte in first page to unmap, relative to the start of
2820  * the underlying file.  This will be rounded down to a PAGE_SIZE
2821  * boundary.  Note that this is different from truncate_pagecache(), which
2822  * must keep the partial page.  In contrast, we must get rid of
2823  * partial pages.
2824  * @holelen: size of prospective hole in bytes.  This will be rounded
2825  * up to a PAGE_SIZE boundary.  A holelen of zero truncates to the
2826  * end of the file.
2827  * @even_cows: 1 when truncating a file, unmap even private COWed pages;
2828  * but 0 when invalidating pagecache, don't throw away private data.
2829  */
2830 void unmap_mapping_range(struct address_space *mapping,
2831                 loff_t const holebegin, loff_t const holelen, int even_cows)
2832 {
2833         struct zap_details details = { };
2834         pgoff_t hba = holebegin >> PAGE_SHIFT;
2835         pgoff_t hlen = (holelen + PAGE_SIZE - 1) >> PAGE_SHIFT;
2836
2837         /* Check for overflow. */
2838         if (sizeof(holelen) > sizeof(hlen)) {
2839                 long long holeend =
2840                         (holebegin + holelen + PAGE_SIZE - 1) >> PAGE_SHIFT;
2841                 if (holeend & ~(long long)ULONG_MAX)
2842                         hlen = ULONG_MAX - hba + 1;
2843         }
2844
2845         details.check_mapping = even_cows ? NULL : mapping;
2846         details.first_index = hba;
2847         details.last_index = hba + hlen - 1;
2848         if (details.last_index < details.first_index)
2849                 details.last_index = ULONG_MAX;
2850
2851         i_mmap_lock_write(mapping);
2852         if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root)))
2853                 unmap_mapping_range_tree(&mapping->i_mmap, &details);
2854         i_mmap_unlock_write(mapping);
2855 }
2856 EXPORT_SYMBOL(unmap_mapping_range);
2857
2858 /*
2859  * We enter with non-exclusive mmap_sem (to exclude vma changes,
2860  * but allow concurrent faults), and pte mapped but not yet locked.
2861  * We return with pte unmapped and unlocked.
2862  *
2863  * We return with the mmap_sem locked or unlocked in the same cases
2864  * as does filemap_fault().
2865  */
2866 int do_swap_page(struct vm_fault *vmf)
2867 {
2868         struct vm_area_struct *vma = vmf->vma;
2869         struct page *page = NULL, *swapcache;
2870         struct mem_cgroup *memcg;
2871         struct vma_swap_readahead swap_ra;
2872         swp_entry_t entry;
2873         pte_t pte;
2874         int locked;
2875         int exclusive = 0;
2876         int ret = 0;
2877         bool vma_readahead = swap_use_vma_readahead();
2878
2879         if (vma_readahead)
2880                 page = swap_readahead_detect(vmf, &swap_ra);
2881         if (!pte_unmap_same(vma->vm_mm, vmf->pmd, vmf->pte, vmf->orig_pte)) {
2882                 if (page)
2883                         put_page(page);
2884                 goto out;
2885         }
2886
2887         entry = pte_to_swp_entry(vmf->orig_pte);
2888         if (unlikely(non_swap_entry(entry))) {
2889                 if (is_migration_entry(entry)) {
2890                         migration_entry_wait(vma->vm_mm, vmf->pmd,
2891                                              vmf->address);
2892                 } else if (is_device_private_entry(entry)) {
2893                         /*
2894                          * For un-addressable device memory we call the pgmap
2895                          * fault handler callback. The callback must migrate
2896                          * the page back to some CPU accessible page.
2897                          */
2898                         ret = device_private_entry_fault(vma, vmf->address, entry,
2899                                                  vmf->flags, vmf->pmd);
2900                 } else if (is_hwpoison_entry(entry)) {
2901                         ret = VM_FAULT_HWPOISON;
2902                 } else {
2903                         print_bad_pte(vma, vmf->address, vmf->orig_pte, NULL);
2904                         ret = VM_FAULT_SIGBUS;
2905                 }
2906                 goto out;
2907         }
2908         delayacct_set_flag(DELAYACCT_PF_SWAPIN);
2909         if (!page)
2910                 page = lookup_swap_cache(entry, vma_readahead ? vma : NULL,
2911                                          vmf->address);
2912         if (!page) {
2913                 if (vma_readahead)
2914                         page = do_swap_page_readahead(entry,
2915                                 GFP_HIGHUSER_MOVABLE, vmf, &swap_ra);
2916                 else
2917                         page = swapin_readahead(entry,
2918                                 GFP_HIGHUSER_MOVABLE, vma, vmf->address);
2919                 if (!page) {
2920                         /*
2921                          * Back out if somebody else faulted in this pte
2922                          * while we released the pte lock.
2923                          */
2924                         vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
2925                                         vmf->address, &vmf->ptl);
2926                         if (likely(pte_same(*vmf->pte, vmf->orig_pte)))
2927                                 ret = VM_FAULT_OOM;
2928                         delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
2929                         goto unlock;
2930                 }
2931
2932                 /* Had to read the page from swap area: Major fault */
2933                 ret = VM_FAULT_MAJOR;
2934                 count_vm_event(PGMAJFAULT);
2935                 count_memcg_event_mm(vma->vm_mm, PGMAJFAULT);
2936         } else if (PageHWPoison(page)) {
2937                 /*
2938                  * hwpoisoned dirty swapcache pages are kept for killing
2939                  * owner processes (which may be unknown at hwpoison time)
2940                  */
2941                 ret = VM_FAULT_HWPOISON;
2942                 delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
2943                 swapcache = page;
2944                 goto out_release;
2945         }
2946
2947         swapcache = page;
2948         locked = lock_page_or_retry(page, vma->vm_mm, vmf->flags);
2949
2950         delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
2951         if (!locked) {
2952                 ret |= VM_FAULT_RETRY;
2953                 goto out_release;
2954         }
2955
2956         /*
2957          * Make sure try_to_free_swap or reuse_swap_page or swapoff did not
2958          * release the swapcache from under us.  The page pin, and pte_same
2959          * test below, are not enough to exclude that.  Even if it is still
2960          * swapcache, we need to check that the page's swap has not changed.
2961          */
2962         if (unlikely(!PageSwapCache(page) || page_private(page) != entry.val))
2963                 goto out_page;
2964
2965         page = ksm_might_need_to_copy(page, vma, vmf->address);
2966         if (unlikely(!page)) {
2967                 ret = VM_FAULT_OOM;
2968                 page = swapcache;
2969                 goto out_page;
2970         }
2971
2972         if (mem_cgroup_try_charge(page, vma->vm_mm, GFP_KERNEL,
2973                                 &memcg, false)) {
2974                 ret = VM_FAULT_OOM;
2975                 goto out_page;
2976         }
2977
2978         /*
2979          * Back out if somebody else already faulted in this pte.
2980          */
2981         vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address,
2982                         &vmf->ptl);
2983         if (unlikely(!pte_same(*vmf->pte, vmf->orig_pte)))
2984                 goto out_nomap;
2985
2986         if (unlikely(!PageUptodate(page))) {
2987                 ret = VM_FAULT_SIGBUS;
2988                 goto out_nomap;
2989         }
2990
2991         /*
2992          * The page isn't present yet, go ahead with the fault.
2993          *
2994          * Be careful about the sequence of operations here.
2995          * To get its accounting right, reuse_swap_page() must be called
2996          * while the page is counted on swap but not yet in mapcount i.e.
2997          * before page_add_anon_rmap() and swap_free(); try_to_free_swap()
2998          * must be called after the swap_free(), or it will never succeed.
2999          */
3000
3001         inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES);
3002         dec_mm_counter_fast(vma->vm_mm, MM_SWAPENTS);
3003         pte = mk_pte(page, vma->vm_page_prot);
3004         if ((vmf->flags & FAULT_FLAG_WRITE) && reuse_swap_page(page, NULL)) {
3005                 pte = maybe_mkwrite(pte_mkdirty(pte), vma);
3006                 vmf->flags &= ~FAULT_FLAG_WRITE;
3007                 ret |= VM_FAULT_WRITE;
3008                 exclusive = RMAP_EXCLUSIVE;
3009         }
3010         flush_icache_page(vma, page);
3011         if (pte_swp_soft_dirty(vmf->orig_pte))
3012                 pte = pte_mksoft_dirty(pte);
3013         set_pte_at(vma->vm_mm, vmf->address, vmf->pte, pte);
3014         vmf->orig_pte = pte;
3015         if (page == swapcache) {
3016                 do_page_add_anon_rmap(page, vma, vmf->address, exclusive);
3017                 mem_cgroup_commit_charge(page, memcg, true, false);
3018                 activate_page(page);
3019         } else { /* ksm created a completely new copy */
3020                 page_add_new_anon_rmap(page, vma, vmf->address, false);
3021                 mem_cgroup_commit_charge(page, memcg, false, false);
3022                 lru_cache_add_active_or_unevictable(page, vma);
3023         }
3024
3025         swap_free(entry);
3026         if (mem_cgroup_swap_full(page) ||
3027             (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
3028                 try_to_free_swap(page);
3029         unlock_page(page);
3030         if (page != swapcache) {
3031                 /*
3032                  * Hold the lock to avoid the swap entry to be reused
3033                  * until we take the PT lock for the pte_same() check
3034                  * (to avoid false positives from pte_same). For
3035                  * further safety release the lock after the swap_free
3036                  * so that the swap count won't change under a
3037                  * parallel locked swapcache.
3038                  */
3039                 unlock_page(swapcache);
3040                 put_page(swapcache);
3041         }
3042
3043         if (vmf->flags & FAULT_FLAG_WRITE) {
3044                 ret |= do_wp_page(vmf);
3045                 if (ret & VM_FAULT_ERROR)
3046                         ret &= VM_FAULT_ERROR;
3047                 goto out;
3048         }
3049
3050         /* No need to invalidate - it was non-present before */
3051         update_mmu_cache(vma, vmf->address, vmf->pte);
3052 unlock:
3053         pte_unmap_unlock(vmf->pte, vmf->ptl);
3054 out:
3055         return ret;
3056 out_nomap:
3057         mem_cgroup_cancel_charge(page, memcg, false);
3058         pte_unmap_unlock(vmf->pte, vmf->ptl);
3059 out_page:
3060         unlock_page(page);
3061 out_release:
3062         put_page(page);
3063         if (page != swapcache) {
3064                 unlock_page(swapcache);
3065                 put_page(swapcache);
3066         }
3067         return ret;
3068 }
3069
3070 /*
3071  * We enter with non-exclusive mmap_sem (to exclude vma changes,
3072  * but allow concurrent faults), and pte mapped but not yet locked.
3073  * We return with mmap_sem still held, but pte unmapped and unlocked.
3074  */
3075 static int do_anonymous_page(struct vm_fault *vmf)
3076 {
3077         struct vm_area_struct *vma = vmf->vma;
3078         struct mem_cgroup *memcg;
3079         struct page *page;
3080         int ret = 0;
3081         pte_t entry;
3082
3083         /* File mapping without ->vm_ops ? */
3084         if (vma->vm_flags & VM_SHARED)
3085                 return VM_FAULT_SIGBUS;
3086
3087         /*
3088          * Use pte_alloc() instead of pte_alloc_map().  We can't run
3089          * pte_offset_map() on pmds where a huge pmd might be created
3090          * from a different thread.
3091          *
3092          * pte_alloc_map() is safe to use under down_write(mmap_sem) or when
3093          * parallel threads are excluded by other means.
3094          *
3095          * Here we only have down_read(mmap_sem).
3096          */
3097         if (pte_alloc(vma->vm_mm, vmf->pmd, vmf->address))
3098                 return VM_FAULT_OOM;
3099
3100         /* See the comment in pte_alloc_one_map() */
3101         if (unlikely(pmd_trans_unstable(vmf->pmd)))
3102                 return 0;
3103
3104         /* Use the zero-page for reads */
3105         if (!(vmf->flags & FAULT_FLAG_WRITE) &&
3106                         !mm_forbids_zeropage(vma->vm_mm)) {
3107                 entry = pte_mkspecial(pfn_pte(my_zero_pfn(vmf->address),
3108                                                 vma->vm_page_prot));
3109                 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
3110                                 vmf->address, &vmf->ptl);
3111                 if (!pte_none(*vmf->pte))
3112                         goto unlock;
3113                 ret = check_stable_address_space(vma->vm_mm);
3114                 if (ret)
3115                         goto unlock;
3116                 /* Deliver the page fault to userland, check inside PT lock */
3117                 if (userfaultfd_missing(vma)) {
3118                         pte_unmap_unlock(vmf->pte, vmf->ptl);
3119                         return handle_userfault(vmf, VM_UFFD_MISSING);
3120                 }
3121                 goto setpte;
3122         }
3123
3124         /* Allocate our own private page. */
3125         if (unlikely(anon_vma_prepare(vma)))
3126                 goto oom;
3127         page = alloc_zeroed_user_highpage_movable(vma, vmf->address);
3128         if (!page)
3129                 goto oom;
3130
3131         if (mem_cgroup_try_charge(page, vma->vm_mm, GFP_KERNEL, &memcg, false))
3132                 goto oom_free_page;
3133
3134         /*
3135          * The memory barrier inside __SetPageUptodate makes sure that
3136          * preceeding stores to the page contents become visible before
3137          * the set_pte_at() write.
3138          */
3139         __SetPageUptodate(page);
3140
3141         entry = mk_pte(page, vma->vm_page_prot);
3142         if (vma->vm_flags & VM_WRITE)
3143                 entry = pte_mkwrite(pte_mkdirty(entry));
3144
3145         vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address,
3146                         &vmf->ptl);
3147         if (!pte_none(*vmf->pte))
3148                 goto release;
3149
3150         ret = check_stable_address_space(vma->vm_mm);
3151         if (ret)
3152                 goto release;
3153
3154         /* Deliver the page fault to userland, check inside PT lock */
3155         if (userfaultfd_missing(vma)) {
3156                 pte_unmap_unlock(vmf->pte, vmf->ptl);
3157                 mem_cgroup_cancel_charge(page, memcg, false);
3158                 put_page(page);
3159                 return handle_userfault(vmf, VM_UFFD_MISSING);
3160         }
3161
3162         inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES);
3163         page_add_new_anon_rmap(page, vma, vmf->address, false);
3164         mem_cgroup_commit_charge(page, memcg, false, false);
3165         lru_cache_add_active_or_unevictable(page, vma);
3166 setpte:
3167         set_pte_at(vma->vm_mm, vmf->address, vmf->pte, entry);
3168
3169         /* No need to invalidate - it was non-present before */
3170         update_mmu_cache(vma, vmf->address, vmf->pte);
3171 unlock:
3172         pte_unmap_unlock(vmf->pte, vmf->ptl);
3173         return ret;
3174 release:
3175         mem_cgroup_cancel_charge(page, memcg, false);
3176         put_page(page);
3177         goto unlock;
3178 oom_free_page:
3179         put_page(page);
3180 oom:
3181         return VM_FAULT_OOM;
3182 }
3183
3184 /*
3185  * The mmap_sem must have been held on entry, and may have been
3186  * released depending on flags and vma->vm_ops->fault() return value.
3187  * See filemap_fault() and __lock_page_retry().
3188  */
3189 static int __do_fault(struct vm_fault *vmf)
3190 {
3191         struct vm_area_struct *vma = vmf->vma;
3192         int ret;
3193
3194         /*
3195          * Preallocate pte before we take page_lock because this might lead to
3196          * deadlocks for memcg reclaim which waits for pages under writeback:
3197          *                              lock_page(A)
3198          *                              SetPageWriteback(A)
3199          *                              unlock_page(A)
3200          * lock_page(B)
3201          *                              lock_page(B)
3202          * pte_alloc_pne
3203          *   shrink_page_list
3204          *     wait_on_page_writeback(A)
3205          *                              SetPageWriteback(B)
3206          *                              unlock_page(B)
3207          *                              # flush A, B to clear the writeback
3208          */
3209         if (pmd_none(*vmf->pmd) && !vmf->prealloc_pte) {
3210                 vmf->prealloc_pte = pte_alloc_one(vmf->vma->vm_mm,
3211                                                   vmf->address);
3212                 if (!vmf->prealloc_pte)
3213                         return VM_FAULT_OOM;
3214                 smp_wmb(); /* See comment in __pte_alloc() */
3215         }
3216
3217         ret = vma->vm_ops->fault(vmf);
3218         if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY |
3219                             VM_FAULT_DONE_COW)))
3220                 return ret;
3221
3222         if (unlikely(PageHWPoison(vmf->page))) {
3223                 if (ret & VM_FAULT_LOCKED)
3224                         unlock_page(vmf->page);
3225                 put_page(vmf->page);
3226                 vmf->page = NULL;
3227                 return VM_FAULT_HWPOISON;
3228         }
3229
3230         if (unlikely(!(ret & VM_FAULT_LOCKED)))
3231                 lock_page(vmf->page);
3232         else
3233                 VM_BUG_ON_PAGE(!PageLocked(vmf->page), vmf->page);
3234
3235         return ret;
3236 }
3237
3238 /*
3239  * The ordering of these checks is important for pmds with _PAGE_DEVMAP set.
3240  * If we check pmd_trans_unstable() first we will trip the bad_pmd() check
3241  * inside of pmd_none_or_trans_huge_or_clear_bad(). This will end up correctly
3242  * returning 1 but not before it spams dmesg with the pmd_clear_bad() output.
3243  */
3244 static int pmd_devmap_trans_unstable(pmd_t *pmd)
3245 {
3246         return pmd_devmap(*pmd) || pmd_trans_unstable(pmd);
3247 }
3248
3249 static int pte_alloc_one_map(struct vm_fault *vmf)
3250 {
3251         struct vm_area_struct *vma = vmf->vma;
3252
3253         if (!pmd_none(*vmf->pmd))
3254                 goto map_pte;
3255         if (vmf->prealloc_pte) {
3256                 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
3257                 if (unlikely(!pmd_none(*vmf->pmd))) {
3258                         spin_unlock(vmf->ptl);
3259                         goto map_pte;
3260                 }
3261
3262                 atomic_long_inc(&vma->vm_mm->nr_ptes);
3263                 pmd_populate(vma->vm_mm, vmf->pmd, vmf->prealloc_pte);
3264                 spin_unlock(vmf->ptl);
3265                 vmf->prealloc_pte = NULL;
3266         } else if (unlikely(pte_alloc(vma->vm_mm, vmf->pmd, vmf->address))) {
3267                 return VM_FAULT_OOM;
3268         }
3269 map_pte:
3270         /*
3271          * If a huge pmd materialized under us just retry later.  Use
3272          * pmd_trans_unstable() via pmd_devmap_trans_unstable() instead of
3273          * pmd_trans_huge() to ensure the pmd didn't become pmd_trans_huge
3274          * under us and then back to pmd_none, as a result of MADV_DONTNEED
3275          * running immediately after a huge pmd fault in a different thread of
3276          * this mm, in turn leading to a misleading pmd_trans_huge() retval.
3277          * All we have to ensure is that it is a regular pmd that we can walk
3278          * with pte_offset_map() and we can do that through an atomic read in
3279          * C, which is what pmd_trans_unstable() provides.
3280          */
3281         if (pmd_devmap_trans_unstable(vmf->pmd))
3282                 return VM_FAULT_NOPAGE;
3283
3284         /*
3285          * At this point we know that our vmf->pmd points to a page of ptes
3286          * and it cannot become pmd_none(), pmd_devmap() or pmd_trans_huge()
3287          * for the duration of the fault.  If a racing MADV_DONTNEED runs and
3288          * we zap the ptes pointed to by our vmf->pmd, the vmf->ptl will still
3289          * be valid and we will re-check to make sure the vmf->pte isn't
3290          * pte_none() under vmf->ptl protection when we return to
3291          * alloc_set_pte().
3292          */
3293         vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address,
3294                         &vmf->ptl);
3295         return 0;
3296 }
3297
3298 #ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE
3299
3300 #define HPAGE_CACHE_INDEX_MASK (HPAGE_PMD_NR - 1)
3301 static inline bool transhuge_vma_suitable(struct vm_area_struct *vma,
3302                 unsigned long haddr)
3303 {
3304         if (((vma->vm_start >> PAGE_SHIFT) & HPAGE_CACHE_INDEX_MASK) !=
3305                         (vma->vm_pgoff & HPAGE_CACHE_INDEX_MASK))
3306                 return false;
3307         if (haddr < vma->vm_start || haddr + HPAGE_PMD_SIZE > vma->vm_end)
3308                 return false;
3309         return true;
3310 }
3311
3312 static void deposit_prealloc_pte(struct vm_fault *vmf)
3313 {
3314         struct vm_area_struct *vma = vmf->vma;
3315
3316         pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, vmf->prealloc_pte);
3317         /*
3318          * We are going to consume the prealloc table,
3319          * count that as nr_ptes.
3320          */
3321         atomic_long_inc(&vma->vm_mm->nr_ptes);
3322         vmf->prealloc_pte = NULL;
3323 }
3324
3325 static int do_set_pmd(struct vm_fault *vmf, struct page *page)
3326 {
3327         struct vm_area_struct *vma = vmf->vma;
3328         bool write = vmf->flags & FAULT_FLAG_WRITE;
3329         unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
3330         pmd_t entry;
3331         int i, ret;
3332
3333         if (!transhuge_vma_suitable(vma, haddr))
3334                 return VM_FAULT_FALLBACK;
3335
3336         ret = VM_FAULT_FALLBACK;
3337         page = compound_head(page);
3338
3339         /*
3340          * Archs like ppc64 need additonal space to store information
3341          * related to pte entry. Use the preallocated table for that.
3342          */
3343         if (arch_needs_pgtable_deposit() && !vmf->prealloc_pte) {
3344                 vmf->prealloc_pte = pte_alloc_one(vma->vm_mm, vmf->address);
3345                 if (!vmf->prealloc_pte)
3346                         return VM_FAULT_OOM;
3347                 smp_wmb(); /* See comment in __pte_alloc() */
3348         }
3349
3350         vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
3351         if (unlikely(!pmd_none(*vmf->pmd)))
3352                 goto out;
3353
3354         for (i = 0; i < HPAGE_PMD_NR; i++)
3355                 flush_icache_page(vma, page + i);
3356
3357         entry = mk_huge_pmd(page, vma->vm_page_prot);
3358         if (write)
3359                 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
3360
3361         add_mm_counter(vma->vm_mm, MM_FILEPAGES, HPAGE_PMD_NR);
3362         page_add_file_rmap(page, true);
3363         /*
3364          * deposit and withdraw with pmd lock held
3365          */
3366         if (arch_needs_pgtable_deposit())
3367                 deposit_prealloc_pte(vmf);
3368
3369         set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry);
3370
3371         update_mmu_cache_pmd(vma, haddr, vmf->pmd);
3372
3373         /* fault is handled */
3374         ret = 0;
3375         count_vm_event(THP_FILE_MAPPED);
3376 out:
3377         spin_unlock(vmf->ptl);
3378         return ret;
3379 }
3380 #else
3381 static int do_set_pmd(struct vm_fault *vmf, struct page *page)
3382 {
3383         BUILD_BUG();
3384         return 0;
3385 }
3386 #endif
3387
3388 /**
3389  * alloc_set_pte - setup new PTE entry for given page and add reverse page
3390  * mapping. If needed, the fucntion allocates page table or use pre-allocated.
3391  *
3392  * @vmf: fault environment
3393  * @memcg: memcg to charge page (only for private mappings)
3394  * @page: page to map
3395  *
3396  * Caller must take care of unlocking vmf->ptl, if vmf->pte is non-NULL on
3397  * return.
3398  *
3399  * Target users are page handler itself and implementations of
3400  * vm_ops->map_pages.
3401  */
3402 int alloc_set_pte(struct vm_fault *vmf, struct mem_cgroup *memcg,
3403                 struct page *page)
3404 {
3405         struct vm_area_struct *vma = vmf->vma;
3406         bool write = vmf->flags & FAULT_FLAG_WRITE;
3407         pte_t entry;
3408         int ret;
3409
3410         if (pmd_none(*vmf->pmd) && PageTransCompound(page) &&
3411                         IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE)) {
3412                 /* THP on COW? */
3413                 VM_BUG_ON_PAGE(memcg, page);
3414
3415                 ret = do_set_pmd(vmf, page);
3416                 if (ret != VM_FAULT_FALLBACK)
3417                         return ret;
3418         }
3419
3420         if (!vmf->pte) {
3421                 ret = pte_alloc_one_map(vmf);
3422                 if (ret)
3423                         return ret;
3424         }
3425
3426         /* Re-check under ptl */
3427         if (unlikely(!pte_none(*vmf->pte)))
3428                 return VM_FAULT_NOPAGE;
3429
3430         flush_icache_page(vma, page);
3431         entry = mk_pte(page, vma->vm_page_prot);
3432         if (write)
3433                 entry = maybe_mkwrite(pte_mkdirty(entry), vma);
3434         /* copy-on-write page */
3435         if (write && !(vma->vm_flags & VM_SHARED)) {
3436                 inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES);
3437                 page_add_new_anon_rmap(page, vma, vmf->address, false);
3438                 mem_cgroup_commit_charge(page, memcg, false, false);
3439                 lru_cache_add_active_or_unevictable(page, vma);
3440         } else {
3441                 inc_mm_counter_fast(vma->vm_mm, mm_counter_file(page));
3442                 page_add_file_rmap(page, false);
3443         }
3444         set_pte_at(vma->vm_mm, vmf->address, vmf->pte, entry);
3445
3446         /* no need to invalidate: a not-present page won't be cached */
3447         update_mmu_cache(vma, vmf->address, vmf->pte);
3448
3449         return 0;
3450 }
3451
3452
3453 /**
3454  * finish_fault - finish page fault once we have prepared the page to fault
3455  *
3456  * @vmf: structure describing the fault
3457  *
3458  * This function handles all that is needed to finish a page fault once the
3459  * page to fault in is prepared. It handles locking of PTEs, inserts PTE for
3460  * given page, adds reverse page mapping, handles memcg charges and LRU
3461  * addition. The function returns 0 on success, VM_FAULT_ code in case of
3462  * error.
3463  *
3464  * The function expects the page to be locked and on success it consumes a
3465  * reference of a page being mapped (for the PTE which maps it).
3466  */
3467 int finish_fault(struct vm_fault *vmf)
3468 {
3469         struct page *page;
3470         int ret = 0;
3471
3472         /* Did we COW the page? */
3473         if ((vmf->flags & FAULT_FLAG_WRITE) &&
3474             !(vmf->vma->vm_flags & VM_SHARED))
3475                 page = vmf->cow_page;
3476         else
3477                 page = vmf->page;
3478
3479         /*
3480          * check even for read faults because we might have lost our CoWed
3481          * page
3482          */
3483         if (!(vmf->vma->vm_flags & VM_SHARED))
3484                 ret = check_stable_address_space(vmf->vma->vm_mm);
3485         if (!ret)
3486                 ret = alloc_set_pte(vmf, vmf->memcg, page);
3487         if (vmf->pte)
3488                 pte_unmap_unlock(vmf->pte, vmf->ptl);
3489         return ret;
3490 }
3491
3492 static unsigned long fault_around_bytes __read_mostly =
3493         rounddown_pow_of_two(65536);
3494
3495 #ifdef CONFIG_DEBUG_FS
3496 static int fault_around_bytes_get(void *data, u64 *val)
3497 {
3498         *val = fault_around_bytes;
3499         return 0;
3500 }
3501
3502 /*
3503  * fault_around_pages() and fault_around_mask() expects fault_around_bytes
3504  * rounded down to nearest page order. It's what do_fault_around() expects to
3505  * see.
3506  */
3507 static int fault_around_bytes_set(void *data, u64 val)
3508 {
3509         if (val / PAGE_SIZE > PTRS_PER_PTE)
3510                 return -EINVAL;
3511         if (val > PAGE_SIZE)
3512                 fault_around_bytes = rounddown_pow_of_two(val);
3513         else
3514                 fault_around_bytes = PAGE_SIZE; /* rounddown_pow_of_two(0) is undefined */
3515         return 0;
3516 }
3517 DEFINE_DEBUGFS_ATTRIBUTE(fault_around_bytes_fops,
3518                 fault_around_bytes_get, fault_around_bytes_set, "%llu\n");
3519
3520 static int __init fault_around_debugfs(void)
3521 {
3522         void *ret;
3523
3524         ret = debugfs_create_file_unsafe("fault_around_bytes", 0644, NULL, NULL,
3525                         &fault_around_bytes_fops);
3526         if (!ret)
3527                 pr_warn("Failed to create fault_around_bytes in debugfs");
3528         return 0;
3529 }
3530 late_initcall(fault_around_debugfs);
3531 #endif
3532
3533 /*
3534  * do_fault_around() tries to map few pages around the fault address. The hope
3535  * is that the pages will be needed soon and this will lower the number of
3536  * faults to handle.
3537  *
3538  * It uses vm_ops->map_pages() to map the pages, which skips the page if it's
3539  * not ready to be mapped: not up-to-date, locked, etc.
3540  *
3541  * This function is called with the page table lock taken. In the split ptlock
3542  * case the page table lock only protects only those entries which belong to
3543  * the page table corresponding to the fault address.
3544  *
3545  * This function doesn't cross the VMA boundaries, in order to call map_pages()
3546  * only once.
3547  *
3548  * fault_around_pages() defines how many pages we'll try to map.
3549  * do_fault_around() expects it to return a power of two less than or equal to
3550  * PTRS_PER_PTE.
3551  *
3552  * The virtual address of the area that we map is naturally aligned to the
3553  * fault_around_pages() value (and therefore to page order).  This way it's
3554  * easier to guarantee that we don't cross page table boundaries.
3555  */
3556 static int do_fault_around(struct vm_fault *vmf)
3557 {
3558         unsigned long address = vmf->address, nr_pages, mask;
3559         pgoff_t start_pgoff = vmf->pgoff;
3560         pgoff_t end_pgoff;
3561         int off, ret = 0;
3562
3563         nr_pages = READ_ONCE(fault_around_bytes) >> PAGE_SHIFT;
3564         mask = ~(nr_pages * PAGE_SIZE - 1) & PAGE_MASK;
3565
3566         vmf->address = max(address & mask, vmf->vma->vm_start);
3567         off = ((address - vmf->address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
3568         start_pgoff -= off;
3569
3570         /*
3571          *  end_pgoff is either end of page table or end of vma
3572          *  or fault_around_pages() from start_pgoff, depending what is nearest.
3573          */
3574         end_pgoff = start_pgoff -
3575                 ((vmf->address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) +
3576                 PTRS_PER_PTE - 1;
3577         end_pgoff = min3(end_pgoff, vma_pages(vmf->vma) + vmf->vma->vm_pgoff - 1,
3578                         start_pgoff + nr_pages - 1);
3579
3580         if (pmd_none(*vmf->pmd)) {
3581                 vmf->prealloc_pte = pte_alloc_one(vmf->vma->vm_mm,
3582                                                   vmf->address);
3583                 if (!vmf->prealloc_pte)
3584                         goto out;
3585                 smp_wmb(); /* See comment in __pte_alloc() */
3586         }
3587
3588         vmf->vma->vm_ops->map_pages(vmf, start_pgoff, end_pgoff);
3589
3590         /* Huge page is mapped? Page fault is solved */
3591         if (pmd_trans_huge(*vmf->pmd)) {
3592                 ret = VM_FAULT_NOPAGE;
3593                 goto out;
3594         }
3595
3596         /* ->map_pages() haven't done anything useful. Cold page cache? */
3597         if (!vmf->pte)
3598                 goto out;
3599
3600         /* check if the page fault is solved */
3601         vmf->pte -= (vmf->address >> PAGE_SHIFT) - (address >> PAGE_SHIFT);
3602         if (!pte_none(*vmf->pte))
3603                 ret = VM_FAULT_NOPAGE;
3604         pte_unmap_unlock(vmf->pte, vmf->ptl);
3605 out:
3606         vmf->address = address;
3607         vmf->pte = NULL;
3608         return ret;
3609 }
3610
3611 static int do_read_fault(struct vm_fault *vmf)
3612 {
3613         struct vm_area_struct *vma = vmf->vma;
3614         int ret = 0;
3615
3616         /*
3617          * Let's call ->map_pages() first and use ->fault() as fallback
3618          * if page by the offset is not ready to be mapped (cold cache or
3619          * something).
3620          */
3621         if (vma->vm_ops->map_pages && fault_around_bytes >> PAGE_SHIFT > 1) {
3622                 ret = do_fault_around(vmf);
3623                 if (ret)
3624                         return ret;
3625         }
3626
3627         ret = __do_fault(vmf);
3628         if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
3629                 return ret;
3630
3631         ret |= finish_fault(vmf);
3632         unlock_page(vmf->page);
3633         if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
3634                 put_page(vmf->page);
3635         return ret;
3636 }
3637
3638 static int do_cow_fault(struct vm_fault *vmf)
3639 {
3640         struct vm_area_struct *vma = vmf->vma;
3641         int ret;
3642
3643         if (unlikely(anon_vma_prepare(vma)))
3644                 return VM_FAULT_OOM;
3645
3646         vmf->cow_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vmf->address);
3647         if (!vmf->cow_page)
3648                 return VM_FAULT_OOM;
3649
3650         if (mem_cgroup_try_charge(vmf->cow_page, vma->vm_mm, GFP_KERNEL,
3651                                 &vmf->memcg, false)) {
3652                 put_page(vmf->cow_page);
3653                 return VM_FAULT_OOM;
3654         }
3655
3656         ret = __do_fault(vmf);
3657         if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
3658                 goto uncharge_out;
3659         if (ret & VM_FAULT_DONE_COW)
3660                 return ret;
3661
3662         copy_user_highpage(vmf->cow_page, vmf->page, vmf->address, vma);
3663         __SetPageUptodate(vmf->cow_page);
3664
3665         ret |= finish_fault(vmf);
3666         unlock_page(vmf->page);
3667         put_page(vmf->page);
3668         if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
3669                 goto uncharge_out;
3670         return ret;
3671 uncharge_out:
3672         mem_cgroup_cancel_charge(vmf->cow_page, vmf->memcg, false);
3673         put_page(vmf->cow_page);
3674         return ret;
3675 }
3676
3677 static int do_shared_fault(struct vm_fault *vmf)
3678 {
3679         struct vm_area_struct *vma = vmf->vma;
3680         int ret, tmp;
3681
3682         ret = __do_fault(vmf);
3683         if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
3684                 return ret;
3685
3686         /*
3687          * Check if the backing address space wants to know that the page is
3688          * about to become writable
3689          */
3690         if (vma->vm_ops->page_mkwrite) {
3691                 unlock_page(vmf->page);
3692                 tmp = do_page_mkwrite(vmf);
3693                 if (unlikely(!tmp ||
3694                                 (tmp & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) {
3695                         put_page(vmf->page);
3696                         return tmp;
3697                 }
3698         }
3699
3700         ret |= finish_fault(vmf);
3701         if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE |
3702                                         VM_FAULT_RETRY))) {
3703                 unlock_page(vmf->page);
3704                 put_page(vmf->page);
3705                 return ret;
3706         }
3707
3708         fault_dirty_shared_page(vma, vmf->page);
3709         return ret;
3710 }
3711
3712 /*
3713  * We enter with non-exclusive mmap_sem (to exclude vma changes,
3714  * but allow concurrent faults).
3715  * The mmap_sem may have been released depending on flags and our
3716  * return value.  See filemap_fault() and __lock_page_or_retry().
3717  */
3718 static int do_fault(struct vm_fault *vmf)
3719 {
3720         struct vm_area_struct *vma = vmf->vma;
3721         int ret;
3722
3723         /*
3724          * The VMA was not fully populated on mmap() or missing VM_DONTEXPAND
3725          */
3726         if (!vma->vm_ops->fault) {
3727                 /*
3728                  * If we find a migration pmd entry or a none pmd entry, which
3729                  * should never happen, return SIGBUS
3730                  */
3731                 if (unlikely(!pmd_present(*vmf->pmd)))
3732                         ret = VM_FAULT_SIGBUS;
3733                 else {
3734                         vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm,
3735                                                        vmf->pmd,
3736                                                        vmf->address,
3737                                                        &vmf->ptl);
3738                         /*
3739                          * Make sure this is not a temporary clearing of pte
3740                          * by holding ptl and checking again. A R/M/W update
3741                          * of pte involves: take ptl, clearing the pte so that
3742                          * we don't have concurrent modification by hardware
3743                          * followed by an update.
3744                          */
3745                         if (unlikely(pte_none(*vmf->pte)))
3746                                 ret = VM_FAULT_SIGBUS;
3747                         else
3748                                 ret = VM_FAULT_NOPAGE;
3749
3750                         pte_unmap_unlock(vmf->pte, vmf->ptl);
3751                 }
3752         } else if (!(vmf->flags & FAULT_FLAG_WRITE))
3753                 ret = do_read_fault(vmf);
3754         else if (!(vma->vm_flags & VM_SHARED))
3755                 ret = do_cow_fault(vmf);
3756         else
3757                 ret = do_shared_fault(vmf);
3758
3759         /* preallocated pagetable is unused: free it */
3760         if (vmf->prealloc_pte) {
3761                 pte_free(vma->vm_mm, vmf->prealloc_pte);
3762                 vmf->prealloc_pte = NULL;
3763         }
3764         return ret;
3765 }
3766
3767 static int numa_migrate_prep(struct page *page, struct vm_area_struct *vma,
3768                                 unsigned long addr, int page_nid,
3769                                 int *flags)
3770 {
3771         get_page(page);
3772
3773         count_vm_numa_event(NUMA_HINT_FAULTS);
3774         if (page_nid == numa_node_id()) {
3775                 count_vm_numa_event(NUMA_HINT_FAULTS_LOCAL);
3776                 *flags |= TNF_FAULT_LOCAL;
3777         }
3778
3779         return mpol_misplaced(page, vma, addr);
3780 }
3781
3782 static int do_numa_page(struct vm_fault *vmf)
3783 {
3784         struct vm_area_struct *vma = vmf->vma;
3785         struct page *page = NULL;
3786         int page_nid = -1;
3787         int last_cpupid;
3788         int target_nid;
3789         bool migrated = false;
3790         pte_t pte;
3791         bool was_writable = pte_savedwrite(vmf->orig_pte);
3792         int flags = 0;
3793
3794         /*
3795          * The "pte" at this point cannot be used safely without
3796          * validation through pte_unmap_same(). It's of NUMA type but
3797          * the pfn may be screwed if the read is non atomic.
3798          */
3799         vmf->ptl = pte_lockptr(vma->vm_mm, vmf->pmd);
3800         spin_lock(vmf->ptl);
3801         if (unlikely(!pte_same(*vmf->pte, vmf->orig_pte))) {
3802                 pte_unmap_unlock(vmf->pte, vmf->ptl);
3803                 goto out;
3804         }
3805
3806         /*
3807          * Make it present again, Depending on how arch implementes non
3808          * accessible ptes, some can allow access by kernel mode.
3809          */
3810         pte = ptep_modify_prot_start(vma->vm_mm, vmf->address, vmf->pte);
3811         pte = pte_modify(pte, vma->vm_page_prot);
3812         pte = pte_mkyoung(pte);
3813         if (was_writable)
3814                 pte = pte_mkwrite(pte);
3815         ptep_modify_prot_commit(vma->vm_mm, vmf->address, vmf->pte, pte);
3816         update_mmu_cache(vma, vmf->address, vmf->pte);
3817
3818         page = vm_normal_page(vma, vmf->address, pte);
3819         if (!page) {
3820                 pte_unmap_unlock(vmf->pte, vmf->ptl);
3821                 return 0;
3822         }
3823
3824         /* TODO: handle PTE-mapped THP */
3825         if (PageCompound(page)) {
3826                 pte_unmap_unlock(vmf->pte, vmf->ptl);
3827                 return 0;
3828         }
3829
3830         /*
3831          * Avoid grouping on RO pages in general. RO pages shouldn't hurt as
3832          * much anyway since they can be in shared cache state. This misses
3833          * the case where a mapping is writable but the process never writes
3834          * to it but pte_write gets cleared during protection updates and
3835          * pte_dirty has unpredictable behaviour between PTE scan updates,
3836          * background writeback, dirty balancing and application behaviour.
3837          */
3838         if (!pte_write(pte))
3839                 flags |= TNF_NO_GROUP;
3840
3841         /*
3842          * Flag if the page is shared between multiple address spaces. This
3843          * is later used when determining whether to group tasks together
3844          */
3845         if (page_mapcount(page) > 1 && (vma->vm_flags & VM_SHARED))
3846                 flags |= TNF_SHARED;
3847
3848         last_cpupid = page_cpupid_last(page);
3849         page_nid = page_to_nid(page);
3850         target_nid = numa_migrate_prep(page, vma, vmf->address, page_nid,
3851                         &flags);
3852         pte_unmap_unlock(vmf->pte, vmf->ptl);
3853         if (target_nid == -1) {
3854                 put_page(page);
3855                 goto out;
3856         }
3857
3858         /* Migrate to the requested node */
3859         migrated = migrate_misplaced_page(page, vma, target_nid);
3860         if (migrated) {
3861                 page_nid = target_nid;
3862                 flags |= TNF_MIGRATED;
3863         } else
3864                 flags |= TNF_MIGRATE_FAIL;
3865
3866 out:
3867         if (page_nid != -1)
3868                 task_numa_fault(last_cpupid, page_nid, 1, flags);
3869         return 0;
3870 }
3871
3872 static inline int create_huge_pmd(struct vm_fault *vmf)
3873 {
3874         if (vma_is_anonymous(vmf->vma))
3875                 return do_huge_pmd_anonymous_page(vmf);
3876         if (vmf->vma->vm_ops->huge_fault)
3877                 return vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PMD);
3878         return VM_FAULT_FALLBACK;
3879 }
3880
3881 static int wp_huge_pmd(struct vm_fault *vmf, pmd_t orig_pmd)
3882 {
3883         if (vma_is_anonymous(vmf->vma))
3884                 return do_huge_pmd_wp_page(vmf, orig_pmd);
3885         if (vmf->vma->vm_ops->huge_fault)
3886                 return vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PMD);
3887
3888         /* COW handled on pte level: split pmd */
3889         VM_BUG_ON_VMA(vmf->vma->vm_flags & VM_SHARED, vmf->vma);
3890         __split_huge_pmd(vmf->vma, vmf->pmd, vmf->address, false, NULL);
3891
3892         return VM_FAULT_FALLBACK;
3893 }
3894
3895 static inline bool vma_is_accessible(struct vm_area_struct *vma)
3896 {
3897         return vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE);
3898 }
3899
3900 static int create_huge_pud(struct vm_fault *vmf)
3901 {
3902 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
3903         /* No support for anonymous transparent PUD pages yet */
3904         if (vma_is_anonymous(vmf->vma))
3905                 return VM_FAULT_FALLBACK;
3906         if (vmf->vma->vm_ops->huge_fault)
3907                 return vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PUD);
3908 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
3909         return VM_FAULT_FALLBACK;
3910 }
3911
3912 static int wp_huge_pud(struct vm_fault *vmf, pud_t orig_pud)
3913 {
3914 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
3915         /* No support for anonymous transparent PUD pages yet */
3916         if (vma_is_anonymous(vmf->vma))
3917                 return VM_FAULT_FALLBACK;
3918         if (vmf->vma->vm_ops->huge_fault)
3919                 return vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PUD);
3920 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
3921         return VM_FAULT_FALLBACK;
3922 }
3923
3924 /*
3925  * These routines also need to handle stuff like marking pages dirty
3926  * and/or accessed for architectures that don't do it in hardware (most
3927  * RISC architectures).  The early dirtying is also good on the i386.
3928  *
3929  * There is also a hook called "update_mmu_cache()" that architectures
3930  * with external mmu caches can use to update those (ie the Sparc or
3931  * PowerPC hashed page tables that act as extended TLBs).
3932  *
3933  * We enter with non-exclusive mmap_sem (to exclude vma changes, but allow
3934  * concurrent faults).
3935  *
3936  * The mmap_sem may have been released depending on flags and our return value.
3937  * See filemap_fault() and __lock_page_or_retry().
3938  */
3939 static int handle_pte_fault(struct vm_fault *vmf)
3940 {
3941         pte_t entry;
3942
3943         if (unlikely(pmd_none(*vmf->pmd))) {
3944                 /*
3945                  * Leave __pte_alloc() until later: because vm_ops->fault may
3946                  * want to allocate huge page, and if we expose page table
3947                  * for an instant, it will be difficult to retract from
3948                  * concurrent faults and from rmap lookups.
3949                  */
3950                 vmf->pte = NULL;
3951         } else {
3952                 /* See comment in pte_alloc_one_map() */
3953                 if (pmd_devmap_trans_unstable(vmf->pmd))
3954                         return 0;
3955                 /*
3956                  * A regular pmd is established and it can't morph into a huge
3957                  * pmd from under us anymore at this point because we hold the
3958                  * mmap_sem read mode and khugepaged takes it in write mode.
3959                  * So now it's safe to run pte_offset_map().
3960                  */
3961                 vmf->pte = pte_offset_map(vmf->pmd, vmf->address);
3962                 vmf->orig_pte = *vmf->pte;
3963
3964                 /*
3965                  * some architectures can have larger ptes than wordsize,
3966                  * e.g.ppc44x-defconfig has CONFIG_PTE_64BIT=y and
3967                  * CONFIG_32BIT=y, so READ_ONCE or ACCESS_ONCE cannot guarantee
3968                  * atomic accesses.  The code below just needs a consistent
3969                  * view for the ifs and we later double check anyway with the
3970                  * ptl lock held. So here a barrier will do.
3971                  */
3972                 barrier();
3973                 if (pte_none(vmf->orig_pte)) {
3974                         pte_unmap(vmf->pte);
3975                         vmf->pte = NULL;
3976                 }
3977         }
3978
3979         if (!vmf->pte) {
3980                 if (vma_is_anonymous(vmf->vma))
3981                         return do_anonymous_page(vmf);
3982                 else
3983                         return do_fault(vmf);
3984         }
3985
3986         if (!pte_present(vmf->orig_pte))
3987                 return do_swap_page(vmf);
3988
3989         if (pte_protnone(vmf->orig_pte) && vma_is_accessible(vmf->vma))
3990                 return do_numa_page(vmf);
3991
3992         vmf->ptl = pte_lockptr(vmf->vma->vm_mm, vmf->pmd);
3993         spin_lock(vmf->ptl);
3994         entry = vmf->orig_pte;
3995         if (unlikely(!pte_same(*vmf->pte, entry)))
3996                 goto unlock;
3997         if (vmf->flags & FAULT_FLAG_WRITE) {
3998                 if (!pte_write(entry))
3999                         return do_wp_page(vmf);
4000                 entry = pte_mkdirty(entry);
4001         }
4002         entry = pte_mkyoung(entry);
4003         if (ptep_set_access_flags(vmf->vma, vmf->address, vmf->pte, entry,
4004                                 vmf->flags & FAULT_FLAG_WRITE)) {
4005                 update_mmu_cache(vmf->vma, vmf->address, vmf->pte);
4006         } else {
4007                 /*
4008                  * This is needed only for protection faults but the arch code
4009                  * is not yet telling us if this is a protection fault or not.
4010                  * This still avoids useless tlb flushes for .text page faults
4011                  * with threads.
4012                  */
4013                 if (vmf->flags & FAULT_FLAG_WRITE)
4014                         flush_tlb_fix_spurious_fault(vmf->vma, vmf->address);
4015         }
4016 unlock:
4017         pte_unmap_unlock(vmf->pte, vmf->ptl);
4018         return 0;
4019 }
4020
4021 /*
4022  * By the time we get here, we already hold the mm semaphore
4023  *
4024  * The mmap_sem may have been released depending on flags and our
4025  * return value.  See filemap_fault() and __lock_page_or_retry().
4026  */
4027 static int __handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
4028                 unsigned int flags)
4029 {
4030         struct vm_fault vmf = {
4031                 .vma = vma,
4032                 .address = address & PAGE_MASK,
4033                 .flags = flags,
4034                 .pgoff = linear_page_index(vma, address),
4035                 .gfp_mask = __get_fault_gfp_mask(vma),
4036         };
4037         unsigned int dirty = flags & FAULT_FLAG_WRITE;
4038         struct mm_struct *mm = vma->vm_mm;
4039         pgd_t *pgd;
4040         p4d_t *p4d;
4041         int ret;
4042
4043         pgd = pgd_offset(mm, address);
4044         p4d = p4d_alloc(mm, pgd, address);
4045         if (!p4d)
4046                 return VM_FAULT_OOM;
4047
4048         vmf.pud = pud_alloc(mm, p4d, address);
4049         if (!vmf.pud)
4050                 return VM_FAULT_OOM;
4051         if (pud_none(*vmf.pud) && transparent_hugepage_enabled(vma)) {
4052                 ret = create_huge_pud(&vmf);
4053                 if (!(ret & VM_FAULT_FALLBACK))
4054                         return ret;
4055         } else {
4056                 pud_t orig_pud = *vmf.pud;
4057
4058                 barrier();
4059                 if (pud_trans_huge(orig_pud) || pud_devmap(orig_pud)) {
4060
4061                         /* NUMA case for anonymous PUDs would go here */
4062
4063                         if (dirty && !pud_write(orig_pud)) {
4064                                 ret = wp_huge_pud(&vmf, orig_pud);
4065                                 if (!(ret & VM_FAULT_FALLBACK))
4066                                         return ret;
4067                         } else {
4068                                 huge_pud_set_accessed(&vmf, orig_pud);
4069                                 return 0;
4070                         }
4071                 }
4072         }
4073
4074         vmf.pmd = pmd_alloc(mm, vmf.pud, address);
4075         if (!vmf.pmd)
4076                 return VM_FAULT_OOM;
4077         if (pmd_none(*vmf.pmd) && transparent_hugepage_enabled(vma)) {
4078                 ret = create_huge_pmd(&vmf);
4079                 if (!(ret & VM_FAULT_FALLBACK))
4080                         return ret;
4081         } else {
4082                 pmd_t orig_pmd = *vmf.pmd;
4083
4084                 barrier();
4085                 if (unlikely(is_swap_pmd(orig_pmd))) {
4086                         VM_BUG_ON(thp_migration_supported() &&
4087                                           !is_pmd_migration_entry(orig_pmd));
4088                         if (is_pmd_migration_entry(orig_pmd))
4089                                 pmd_migration_entry_wait(mm, vmf.pmd);
4090                         return 0;
4091                 }
4092                 if (pmd_trans_huge(orig_pmd) || pmd_devmap(orig_pmd)) {
4093                         if (pmd_protnone(orig_pmd) && vma_is_accessible(vma))
4094                                 return do_huge_pmd_numa_page(&vmf, orig_pmd);
4095
4096                         if (dirty && !pmd_write(orig_pmd)) {
4097                                 ret = wp_huge_pmd(&vmf, orig_pmd);
4098                                 if (!(ret & VM_FAULT_FALLBACK))
4099                                         return ret;
4100                         } else {
4101                                 huge_pmd_set_accessed(&vmf, orig_pmd);
4102                                 return 0;
4103                         }
4104                 }
4105         }
4106
4107         return handle_pte_fault(&vmf);
4108 }
4109
4110 /*
4111  * By the time we get here, we already hold the mm semaphore
4112  *
4113  * The mmap_sem may have been released depending on flags and our
4114  * return value.  See filemap_fault() and __lock_page_or_retry().
4115  */
4116 int handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
4117                 unsigned int flags)
4118 {
4119         int ret;
4120
4121         __set_current_state(TASK_RUNNING);
4122
4123         count_vm_event(PGFAULT);
4124         count_memcg_event_mm(vma->vm_mm, PGFAULT);
4125
4126         /* do counter updates before entering really critical section. */
4127         check_sync_rss_stat(current);
4128
4129         if (!arch_vma_access_permitted(vma, flags & FAULT_FLAG_WRITE,
4130                                             flags & FAULT_FLAG_INSTRUCTION,
4131                                             flags & FAULT_FLAG_REMOTE))
4132                 return VM_FAULT_SIGSEGV;
4133
4134         /*
4135          * Enable the memcg OOM handling for faults triggered in user
4136          * space.  Kernel faults are handled more gracefully.
4137          */
4138         if (flags & FAULT_FLAG_USER)
4139                 mem_cgroup_oom_enable();
4140
4141         if (unlikely(is_vm_hugetlb_page(vma)))
4142                 ret = hugetlb_fault(vma->vm_mm, vma, address, flags);
4143         else
4144                 ret = __handle_mm_fault(vma, address, flags);
4145
4146         if (flags & FAULT_FLAG_USER) {
4147                 mem_cgroup_oom_disable();
4148                 /*
4149                  * The task may have entered a memcg OOM situation but
4150                  * if the allocation error was handled gracefully (no
4151                  * VM_FAULT_OOM), there is no need to kill anything.
4152                  * Just clean up the OOM state peacefully.
4153                  */
4154                 if (task_in_memcg_oom(current) && !(ret & VM_FAULT_OOM))
4155                         mem_cgroup_oom_synchronize(false);
4156         }
4157
4158         return ret;
4159 }
4160 EXPORT_SYMBOL_GPL(handle_mm_fault);
4161
4162 #ifndef __PAGETABLE_P4D_FOLDED
4163 /*
4164  * Allocate p4d page table.
4165  * We've already handled the fast-path in-line.
4166  */
4167 int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
4168 {
4169         p4d_t *new = p4d_alloc_one(mm, address);
4170         if (!new)
4171                 return -ENOMEM;
4172
4173         smp_wmb(); /* See comment in __pte_alloc */
4174
4175         spin_lock(&mm->page_table_lock);
4176         if (pgd_present(*pgd))          /* Another has populated it */
4177                 p4d_free(mm, new);
4178         else
4179                 pgd_populate(mm, pgd, new);
4180         spin_unlock(&mm->page_table_lock);
4181         return 0;
4182 }
4183 #endif /* __PAGETABLE_P4D_FOLDED */
4184
4185 #ifndef __PAGETABLE_PUD_FOLDED
4186 /*
4187  * Allocate page upper directory.
4188  * We've already handled the fast-path in-line.
4189  */
4190 int __pud_alloc(struct mm_struct *mm, p4d_t *p4d, unsigned long address)
4191 {
4192         pud_t *new = pud_alloc_one(mm, address);
4193         if (!new)
4194                 return -ENOMEM;
4195
4196         smp_wmb(); /* See comment in __pte_alloc */
4197
4198         spin_lock(&mm->page_table_lock);
4199 #ifndef __ARCH_HAS_5LEVEL_HACK
4200         if (p4d_present(*p4d))          /* Another has populated it */
4201                 pud_free(mm, new);
4202         else
4203                 p4d_populate(mm, p4d, new);
4204 #else
4205         if (pgd_present(*p4d))          /* Another has populated it */
4206                 pud_free(mm, new);
4207         else
4208                 pgd_populate(mm, p4d, new);
4209 #endif /* __ARCH_HAS_5LEVEL_HACK */
4210         spin_unlock(&mm->page_table_lock);
4211         return 0;
4212 }
4213 #endif /* __PAGETABLE_PUD_FOLDED */
4214
4215 #ifndef __PAGETABLE_PMD_FOLDED
4216 /*
4217  * Allocate page middle directory.
4218  * We've already handled the fast-path in-line.
4219  */
4220 int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
4221 {
4222         spinlock_t *ptl;
4223         pmd_t *new = pmd_alloc_one(mm, address);
4224         if (!new)
4225                 return -ENOMEM;
4226
4227         smp_wmb(); /* See comment in __pte_alloc */
4228
4229         ptl = pud_lock(mm, pud);
4230 #ifndef __ARCH_HAS_4LEVEL_HACK
4231         if (!pud_present(*pud)) {
4232                 mm_inc_nr_pmds(mm);
4233                 pud_populate(mm, pud, new);
4234         } else  /* Another has populated it */
4235                 pmd_free(mm, new);
4236 #else
4237         if (!pgd_present(*pud)) {
4238                 mm_inc_nr_pmds(mm);
4239                 pgd_populate(mm, pud, new);
4240         } else /* Another has populated it */
4241                 pmd_free(mm, new);
4242 #endif /* __ARCH_HAS_4LEVEL_HACK */
4243         spin_unlock(ptl);
4244         return 0;
4245 }
4246 #endif /* __PAGETABLE_PMD_FOLDED */
4247
4248 static int __follow_pte_pmd(struct mm_struct *mm, unsigned long address,
4249                             unsigned long *start, unsigned long *end,
4250                             pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp)
4251 {
4252         pgd_t *pgd;
4253         p4d_t *p4d;
4254         pud_t *pud;
4255         pmd_t *pmd;
4256         pte_t *ptep;
4257
4258         pgd = pgd_offset(mm, address);
4259         if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
4260                 goto out;
4261
4262         p4d = p4d_offset(pgd, address);
4263         if (p4d_none(*p4d) || unlikely(p4d_bad(*p4d)))
4264                 goto out;
4265
4266         pud = pud_offset(p4d, address);
4267         if (pud_none(*pud) || unlikely(pud_bad(*pud)))
4268                 goto out;
4269
4270         pmd = pmd_offset(pud, address);
4271         VM_BUG_ON(pmd_trans_huge(*pmd));
4272
4273         if (pmd_huge(*pmd)) {
4274                 if (!pmdpp)
4275                         goto out;
4276
4277                 if (start && end) {
4278                         *start = address & PMD_MASK;
4279                         *end = *start + PMD_SIZE;
4280                         mmu_notifier_invalidate_range_start(mm, *start, *end);
4281                 }
4282                 *ptlp = pmd_lock(mm, pmd);
4283                 if (pmd_huge(*pmd)) {
4284                         *pmdpp = pmd;
4285                         return 0;
4286                 }
4287                 spin_unlock(*ptlp);
4288                 if (start && end)
4289                         mmu_notifier_invalidate_range_end(mm, *start, *end);
4290         }
4291
4292         if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
4293                 goto out;
4294
4295         if (start && end) {
4296                 *start = address & PAGE_MASK;
4297                 *end = *start + PAGE_SIZE;
4298                 mmu_notifier_invalidate_range_start(mm, *start, *end);
4299         }
4300         ptep = pte_offset_map_lock(mm, pmd, address, ptlp);
4301         if (!pte_present(*ptep))
4302                 goto unlock;
4303         *ptepp = ptep;
4304         return 0;
4305 unlock:
4306         pte_unmap_unlock(ptep, *ptlp);
4307         if (start && end)
4308                 mmu_notifier_invalidate_range_end(mm, *start, *end);
4309 out:
4310         return -EINVAL;
4311 }
4312
4313 static inline int follow_pte(struct mm_struct *mm, unsigned long address,
4314                              pte_t **ptepp, spinlock_t **ptlp)
4315 {
4316         int res;
4317
4318         /* (void) is needed to make gcc happy */
4319         (void) __cond_lock(*ptlp,
4320                            !(res = __follow_pte_pmd(mm, address, NULL, NULL,
4321                                                     ptepp, NULL, ptlp)));
4322         return res;
4323 }
4324
4325 int follow_pte_pmd(struct mm_struct *mm, unsigned long address,
4326                              unsigned long *start, unsigned long *end,
4327                              pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp)
4328 {
4329         int res;
4330
4331         /* (void) is needed to make gcc happy */
4332         (void) __cond_lock(*ptlp,
4333                            !(res = __follow_pte_pmd(mm, address, start, end,
4334                                                     ptepp, pmdpp, ptlp)));
4335         return res;
4336 }
4337 EXPORT_SYMBOL(follow_pte_pmd);
4338
4339 /**
4340  * follow_pfn - look up PFN at a user virtual address
4341  * @vma: memory mapping
4342  * @address: user virtual address
4343  * @pfn: location to store found PFN
4344  *
4345  * Only IO mappings and raw PFN mappings are allowed.
4346  *
4347  * Returns zero and the pfn at @pfn on success, -ve otherwise.
4348  */
4349 int follow_pfn(struct vm_area_struct *vma, unsigned long address,
4350         unsigned long *pfn)
4351 {
4352         int ret = -EINVAL;
4353         spinlock_t *ptl;
4354         pte_t *ptep;
4355
4356         if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
4357                 return ret;
4358
4359         ret = follow_pte(vma->vm_mm, address, &ptep, &ptl);
4360         if (ret)
4361                 return ret;
4362         *pfn = pte_pfn(*ptep);
4363         pte_unmap_unlock(ptep, ptl);
4364         return 0;
4365 }
4366 EXPORT_SYMBOL(follow_pfn);
4367
4368 #ifdef CONFIG_HAVE_IOREMAP_PROT
4369 int follow_phys(struct vm_area_struct *vma,
4370                 unsigned long address, unsigned int flags,
4371                 unsigned long *prot, resource_size_t *phys)
4372 {
4373         int ret = -EINVAL;
4374         pte_t *ptep, pte;
4375         spinlock_t *ptl;
4376
4377         if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
4378                 goto out;
4379
4380         if (follow_pte(vma->vm_mm, address, &ptep, &ptl))
4381                 goto out;
4382         pte = *ptep;
4383
4384         if ((flags & FOLL_WRITE) && !pte_write(pte))
4385                 goto unlock;
4386
4387         *prot = pgprot_val(pte_pgprot(pte));
4388         *phys = (resource_size_t)pte_pfn(pte) << PAGE_SHIFT;
4389
4390         ret = 0;
4391 unlock:
4392         pte_unmap_unlock(ptep, ptl);
4393 out:
4394         return ret;
4395 }
4396
4397 int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
4398                         void *buf, int len, int write)
4399 {
4400         resource_size_t phys_addr;
4401         unsigned long prot = 0;
4402         void __iomem *maddr;
4403         int offset = addr & (PAGE_SIZE-1);
4404
4405         if (follow_phys(vma, addr, write, &prot, &phys_addr))
4406                 return -EINVAL;
4407
4408         maddr = ioremap_prot(phys_addr, PAGE_ALIGN(len + offset), prot);
4409         if (!maddr)
4410                 return -ENOMEM;
4411
4412         if (write)
4413                 memcpy_toio(maddr + offset, buf, len);
4414         else
4415                 memcpy_fromio(buf, maddr + offset, len);
4416         iounmap(maddr);
4417
4418         return len;
4419 }
4420 EXPORT_SYMBOL_GPL(generic_access_phys);
4421 #endif
4422
4423 /*
4424  * Access another process' address space as given in mm.  If non-NULL, use the
4425  * given task for page fault accounting.
4426  */
4427 int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
4428                 unsigned long addr, void *buf, int len, unsigned int gup_flags)
4429 {
4430         struct vm_area_struct *vma;
4431         void *old_buf = buf;
4432         int write = gup_flags & FOLL_WRITE;
4433
4434         down_read(&mm->mmap_sem);
4435         /* ignore errors, just check how much was successfully transferred */
4436         while (len) {
4437                 int bytes, ret, offset;
4438                 void *maddr;
4439                 struct page *page = NULL;
4440
4441                 ret = get_user_pages_remote(tsk, mm, addr, 1,
4442                                 gup_flags, &page, &vma, NULL);
4443                 if (ret <= 0) {
4444 #ifndef CONFIG_HAVE_IOREMAP_PROT
4445                         break;
4446 #else
4447                         /*
4448                          * Check if this is a VM_IO | VM_PFNMAP VMA, which
4449                          * we can access using slightly different code.
4450                          */
4451                         vma = find_vma(mm, addr);
4452                         if (!vma || vma->vm_start > addr)
4453                                 break;
4454                         if (vma->vm_ops && vma->vm_ops->access)
4455                                 ret = vma->vm_ops->access(vma, addr, buf,
4456                                                           len, write);
4457                         if (ret <= 0)
4458                                 break;
4459                         bytes = ret;
4460 #endif
4461                 } else {
4462                         bytes = len;
4463                         offset = addr & (PAGE_SIZE-1);
4464                         if (bytes > PAGE_SIZE-offset)
4465                                 bytes = PAGE_SIZE-offset;
4466
4467                         maddr = kmap(page);
4468                         if (write) {
4469                                 copy_to_user_page(vma, page, addr,
4470                                                   maddr + offset, buf, bytes);
4471                                 set_page_dirty_lock(page);
4472                         } else {
4473                                 copy_from_user_page(vma, page, addr,
4474                                                     buf, maddr + offset, bytes);
4475                         }
4476                         kunmap(page);
4477                         put_page(page);
4478                 }
4479                 len -= bytes;
4480                 buf += bytes;
4481                 addr += bytes;
4482         }
4483         up_read(&mm->mmap_sem);
4484
4485         return buf - old_buf;
4486 }
4487
4488 /**
4489  * access_remote_vm - access another process' address space
4490  * @mm:         the mm_struct of the target address space
4491  * @addr:       start address to access
4492  * @buf:        source or destination buffer
4493  * @len:        number of bytes to transfer
4494  * @gup_flags:  flags modifying lookup behaviour
4495  *
4496  * The caller must hold a reference on @mm.
4497  */
4498 int access_remote_vm(struct mm_struct *mm, unsigned long addr,
4499                 void *buf, int len, unsigned int gup_flags)
4500 {
4501         return __access_remote_vm(NULL, mm, addr, buf, len, gup_flags);
4502 }
4503
4504 /*
4505  * Access another process' address space.
4506  * Source/target buffer must be kernel space,
4507  * Do not walk the page table directly, use get_user_pages
4508  */
4509 int access_process_vm(struct task_struct *tsk, unsigned long addr,
4510                 void *buf, int len, unsigned int gup_flags)
4511 {
4512         struct mm_struct *mm;
4513         int ret;
4514
4515         mm = get_task_mm(tsk);
4516         if (!mm)
4517                 return 0;
4518
4519         ret = __access_remote_vm(tsk, mm, addr, buf, len, gup_flags);
4520
4521         mmput(mm);
4522
4523         return ret;
4524 }
4525 EXPORT_SYMBOL_GPL(access_process_vm);
4526
4527 /*
4528  * Print the name of a VMA.
4529  */
4530 void print_vma_addr(char *prefix, unsigned long ip)
4531 {
4532         struct mm_struct *mm = current->mm;
4533         struct vm_area_struct *vma;
4534
4535         /*
4536          * Do not print if we are in atomic
4537          * contexts (in exception stacks, etc.):
4538          */
4539         if (preempt_count())
4540                 return;
4541
4542         down_read(&mm->mmap_sem);
4543         vma = find_vma(mm, ip);
4544         if (vma && vma->vm_file) {
4545                 struct file *f = vma->vm_file;
4546                 char *buf = (char *)__get_free_page(GFP_KERNEL);
4547                 if (buf) {
4548                         char *p;
4549
4550                         p = file_path(f, buf, PAGE_SIZE);
4551                         if (IS_ERR(p))
4552                                 p = "?";
4553                         printk("%s%s[%lx+%lx]", prefix, kbasename(p),
4554                                         vma->vm_start,
4555                                         vma->vm_end - vma->vm_start);
4556                         free_page((unsigned long)buf);
4557                 }
4558         }
4559         up_read(&mm->mmap_sem);
4560 }
4561
4562 #if defined(CONFIG_PROVE_LOCKING) || defined(CONFIG_DEBUG_ATOMIC_SLEEP)
4563 void __might_fault(const char *file, int line)
4564 {
4565         /*
4566          * Some code (nfs/sunrpc) uses socket ops on kernel memory while
4567          * holding the mmap_sem, this is safe because kernel memory doesn't
4568          * get paged out, therefore we'll never actually fault, and the
4569          * below annotations will generate false positives.
4570          */
4571         if (uaccess_kernel())
4572                 return;
4573         if (pagefault_disabled())
4574                 return;
4575         __might_sleep(file, line, 0);
4576 #if defined(CONFIG_DEBUG_ATOMIC_SLEEP)
4577         if (current->mm)
4578                 might_lock_read(&current->mm->mmap_sem);
4579 #endif
4580 }
4581 EXPORT_SYMBOL(__might_fault);
4582 #endif
4583
4584 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
4585 static void clear_gigantic_page(struct page *page,
4586                                 unsigned long addr,
4587                                 unsigned int pages_per_huge_page)
4588 {
4589         int i;
4590         struct page *p = page;
4591
4592         might_sleep();
4593         for (i = 0; i < pages_per_huge_page;
4594              i++, p = mem_map_next(p, page, i)) {
4595                 cond_resched();
4596                 clear_user_highpage(p, addr + i * PAGE_SIZE);
4597         }
4598 }
4599 void clear_huge_page(struct page *page,
4600                      unsigned long addr_hint, unsigned int pages_per_huge_page)
4601 {
4602         int i, n, base, l;
4603         unsigned long addr = addr_hint &
4604                 ~(((unsigned long)pages_per_huge_page << PAGE_SHIFT) - 1);
4605
4606         if (unlikely(pages_per_huge_page > MAX_ORDER_NR_PAGES)) {
4607                 clear_gigantic_page(page, addr, pages_per_huge_page);
4608                 return;
4609         }
4610
4611         /* Clear sub-page to access last to keep its cache lines hot */
4612         might_sleep();
4613         n = (addr_hint - addr) / PAGE_SIZE;
4614         if (2 * n <= pages_per_huge_page) {
4615                 /* If sub-page to access in first half of huge page */
4616                 base = 0;
4617                 l = n;
4618                 /* Clear sub-pages at the end of huge page */
4619                 for (i = pages_per_huge_page - 1; i >= 2 * n; i--) {
4620                         cond_resched();
4621                         clear_user_highpage(page + i, addr + i * PAGE_SIZE);
4622                 }
4623         } else {
4624                 /* If sub-page to access in second half of huge page */
4625                 base = pages_per_huge_page - 2 * (pages_per_huge_page - n);
4626                 l = pages_per_huge_page - n;
4627                 /* Clear sub-pages at the begin of huge page */
4628                 for (i = 0; i < base; i++) {
4629                         cond_resched();
4630                         clear_user_highpage(page + i, addr + i * PAGE_SIZE);
4631                 }
4632         }
4633         /*
4634          * Clear remaining sub-pages in left-right-left-right pattern
4635          * towards the sub-page to access
4636          */
4637         for (i = 0; i < l; i++) {
4638                 int left_idx = base + i;
4639                 int right_idx = base + 2 * l - 1 - i;
4640
4641                 cond_resched();
4642                 clear_user_highpage(page + left_idx,
4643                                     addr + left_idx * PAGE_SIZE);
4644                 cond_resched();
4645                 clear_user_highpage(page + right_idx,
4646                                     addr + right_idx * PAGE_SIZE);
4647         }
4648 }
4649
4650 static void copy_user_gigantic_page(struct page *dst, struct page *src,
4651                                     unsigned long addr,
4652                                     struct vm_area_struct *vma,
4653                                     unsigned int pages_per_huge_page)
4654 {
4655         int i;
4656         struct page *dst_base = dst;
4657         struct page *src_base = src;
4658
4659         for (i = 0; i < pages_per_huge_page; ) {
4660                 cond_resched();
4661                 copy_user_highpage(dst, src, addr + i*PAGE_SIZE, vma);
4662
4663                 i++;
4664                 dst = mem_map_next(dst, dst_base, i);
4665                 src = mem_map_next(src, src_base, i);
4666         }
4667 }
4668
4669 void copy_user_huge_page(struct page *dst, struct page *src,
4670                          unsigned long addr, struct vm_area_struct *vma,
4671                          unsigned int pages_per_huge_page)
4672 {
4673         int i;
4674
4675         if (unlikely(pages_per_huge_page > MAX_ORDER_NR_PAGES)) {
4676                 copy_user_gigantic_page(dst, src, addr, vma,
4677                                         pages_per_huge_page);
4678                 return;
4679         }
4680
4681         might_sleep();
4682         for (i = 0; i < pages_per_huge_page; i++) {
4683                 cond_resched();
4684                 copy_user_highpage(dst + i, src + i, addr + i*PAGE_SIZE, vma);
4685         }
4686 }
4687
4688 long copy_huge_page_from_user(struct page *dst_page,
4689                                 const void __user *usr_src,
4690                                 unsigned int pages_per_huge_page,
4691                                 bool allow_pagefault)
4692 {
4693         void *src = (void *)usr_src;
4694         void *page_kaddr;
4695         unsigned long i, rc = 0;
4696         unsigned long ret_val = pages_per_huge_page * PAGE_SIZE;
4697
4698         for (i = 0; i < pages_per_huge_page; i++) {
4699                 if (allow_pagefault)
4700                         page_kaddr = kmap(dst_page + i);
4701                 else
4702                         page_kaddr = kmap_atomic(dst_page + i);
4703                 rc = copy_from_user(page_kaddr,
4704                                 (const void __user *)(src + i * PAGE_SIZE),
4705                                 PAGE_SIZE);
4706                 if (allow_pagefault)
4707                         kunmap(dst_page + i);
4708                 else
4709                         kunmap_atomic(page_kaddr);
4710
4711                 ret_val -= (PAGE_SIZE - rc);
4712                 if (rc)
4713                         break;
4714
4715                 cond_resched();
4716         }
4717         return ret_val;
4718 }
4719 #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
4720
4721 #if USE_SPLIT_PTE_PTLOCKS && ALLOC_SPLIT_PTLOCKS
4722
4723 static struct kmem_cache *page_ptl_cachep;
4724
4725 void __init ptlock_cache_init(void)
4726 {
4727         page_ptl_cachep = kmem_cache_create("page->ptl", sizeof(spinlock_t), 0,
4728                         SLAB_PANIC, NULL);
4729 }
4730
4731 bool ptlock_alloc(struct page *page)
4732 {
4733         spinlock_t *ptl;
4734
4735         ptl = kmem_cache_alloc(page_ptl_cachep, GFP_KERNEL);
4736         if (!ptl)
4737                 return false;
4738         page->ptl = ptl;
4739         return true;
4740 }
4741
4742 void ptlock_free(struct page *page)
4743 {
4744         kmem_cache_free(page_ptl_cachep, page->ptl);
4745 }
4746 #endif