netfilter: conntrack: revisit the gc initial rescheduling bias
[platform/kernel/linux-rpi.git] / mm / mprotect.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  mm/mprotect.c
4  *
5  *  (C) Copyright 1994 Linus Torvalds
6  *  (C) Copyright 2002 Christoph Hellwig
7  *
8  *  Address space accounting code       <alan@lxorguk.ukuu.org.uk>
9  *  (C) Copyright 2002 Red Hat Inc, All Rights Reserved
10  */
11
12 #include <linux/pagewalk.h>
13 #include <linux/hugetlb.h>
14 #include <linux/shm.h>
15 #include <linux/mman.h>
16 #include <linux/fs.h>
17 #include <linux/highmem.h>
18 #include <linux/security.h>
19 #include <linux/mempolicy.h>
20 #include <linux/personality.h>
21 #include <linux/syscalls.h>
22 #include <linux/swap.h>
23 #include <linux/swapops.h>
24 #include <linux/mmu_notifier.h>
25 #include <linux/migrate.h>
26 #include <linux/perf_event.h>
27 #include <linux/pkeys.h>
28 #include <linux/ksm.h>
29 #include <linux/uaccess.h>
30 #include <linux/mm_inline.h>
31 #include <linux/pgtable.h>
32 #include <linux/sched/sysctl.h>
33 #include <linux/userfaultfd_k.h>
34 #include <asm/cacheflush.h>
35 #include <asm/mmu_context.h>
36 #include <asm/tlbflush.h>
37 #include <asm/tlb.h>
38
39 #include "internal.h"
40
41 static inline bool can_change_pte_writable(struct vm_area_struct *vma,
42                                            unsigned long addr, pte_t pte)
43 {
44         struct page *page;
45
46         VM_BUG_ON(!(vma->vm_flags & VM_WRITE) || pte_write(pte));
47
48         if (pte_protnone(pte) || !pte_dirty(pte))
49                 return false;
50
51         /* Do we need write faults for softdirty tracking? */
52         if (vma_soft_dirty_enabled(vma) && !pte_soft_dirty(pte))
53                 return false;
54
55         /* Do we need write faults for uffd-wp tracking? */
56         if (userfaultfd_pte_wp(vma, pte))
57                 return false;
58
59         if (!(vma->vm_flags & VM_SHARED)) {
60                 /*
61                  * We can only special-case on exclusive anonymous pages,
62                  * because we know that our write-fault handler similarly would
63                  * map them writable without any additional checks while holding
64                  * the PT lock.
65                  */
66                 page = vm_normal_page(vma, addr, pte);
67                 if (!page || !PageAnon(page) || !PageAnonExclusive(page))
68                         return false;
69         }
70
71         return true;
72 }
73
74 static unsigned long change_pte_range(struct mmu_gather *tlb,
75                 struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr,
76                 unsigned long end, pgprot_t newprot, unsigned long cp_flags)
77 {
78         pte_t *pte, oldpte;
79         spinlock_t *ptl;
80         unsigned long pages = 0;
81         int target_node = NUMA_NO_NODE;
82         bool prot_numa = cp_flags & MM_CP_PROT_NUMA;
83         bool uffd_wp = cp_flags & MM_CP_UFFD_WP;
84         bool uffd_wp_resolve = cp_flags & MM_CP_UFFD_WP_RESOLVE;
85
86         tlb_change_page_size(tlb, PAGE_SIZE);
87
88         /*
89          * Can be called with only the mmap_lock for reading by
90          * prot_numa so we must check the pmd isn't constantly
91          * changing from under us from pmd_none to pmd_trans_huge
92          * and/or the other way around.
93          */
94         if (pmd_trans_unstable(pmd))
95                 return 0;
96
97         /*
98          * The pmd points to a regular pte so the pmd can't change
99          * from under us even if the mmap_lock is only hold for
100          * reading.
101          */
102         pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
103
104         /* Get target node for single threaded private VMAs */
105         if (prot_numa && !(vma->vm_flags & VM_SHARED) &&
106             atomic_read(&vma->vm_mm->mm_users) == 1)
107                 target_node = numa_node_id();
108
109         flush_tlb_batched_pending(vma->vm_mm);
110         arch_enter_lazy_mmu_mode();
111         do {
112                 oldpte = *pte;
113                 if (pte_present(oldpte)) {
114                         pte_t ptent;
115                         bool preserve_write = prot_numa && pte_write(oldpte);
116
117                         /*
118                          * Avoid trapping faults against the zero or KSM
119                          * pages. See similar comment in change_huge_pmd.
120                          */
121                         if (prot_numa) {
122                                 struct page *page;
123                                 int nid;
124
125                                 /* Avoid TLB flush if possible */
126                                 if (pte_protnone(oldpte))
127                                         continue;
128
129                                 page = vm_normal_page(vma, addr, oldpte);
130                                 if (!page || is_zone_device_page(page) || PageKsm(page))
131                                         continue;
132
133                                 /* Also skip shared copy-on-write pages */
134                                 if (is_cow_mapping(vma->vm_flags) &&
135                                     page_count(page) != 1)
136                                         continue;
137
138                                 /*
139                                  * While migration can move some dirty pages,
140                                  * it cannot move them all from MIGRATE_ASYNC
141                                  * context.
142                                  */
143                                 if (page_is_file_lru(page) && PageDirty(page))
144                                         continue;
145
146                                 /*
147                                  * Don't mess with PTEs if page is already on the node
148                                  * a single-threaded process is running on.
149                                  */
150                                 nid = page_to_nid(page);
151                                 if (target_node == nid)
152                                         continue;
153
154                                 /*
155                                  * Skip scanning top tier node if normal numa
156                                  * balancing is disabled
157                                  */
158                                 if (!(sysctl_numa_balancing_mode & NUMA_BALANCING_NORMAL) &&
159                                     node_is_toptier(nid))
160                                         continue;
161                         }
162
163                         oldpte = ptep_modify_prot_start(vma, addr, pte);
164                         ptent = pte_modify(oldpte, newprot);
165                         if (preserve_write)
166                                 ptent = pte_mk_savedwrite(ptent);
167
168                         if (uffd_wp) {
169                                 ptent = pte_wrprotect(ptent);
170                                 ptent = pte_mkuffd_wp(ptent);
171                         } else if (uffd_wp_resolve) {
172                                 ptent = pte_clear_uffd_wp(ptent);
173                         }
174
175                         /*
176                          * In some writable, shared mappings, we might want
177                          * to catch actual write access -- see
178                          * vma_wants_writenotify().
179                          *
180                          * In all writable, private mappings, we have to
181                          * properly handle COW.
182                          *
183                          * In both cases, we can sometimes still change PTEs
184                          * writable and avoid the write-fault handler, for
185                          * example, if a PTE is already dirty and no other
186                          * COW or special handling is required.
187                          */
188                         if ((cp_flags & MM_CP_TRY_CHANGE_WRITABLE) &&
189                             !pte_write(ptent) &&
190                             can_change_pte_writable(vma, addr, ptent))
191                                 ptent = pte_mkwrite(ptent);
192
193                         ptep_modify_prot_commit(vma, addr, pte, oldpte, ptent);
194                         if (pte_needs_flush(oldpte, ptent))
195                                 tlb_flush_pte_range(tlb, addr, PAGE_SIZE);
196                         pages++;
197                 } else if (is_swap_pte(oldpte)) {
198                         swp_entry_t entry = pte_to_swp_entry(oldpte);
199                         pte_t newpte;
200
201                         if (is_writable_migration_entry(entry)) {
202                                 struct page *page = pfn_swap_entry_to_page(entry);
203
204                                 /*
205                                  * A protection check is difficult so
206                                  * just be safe and disable write
207                                  */
208                                 if (PageAnon(page))
209                                         entry = make_readable_exclusive_migration_entry(
210                                                              swp_offset(entry));
211                                 else
212                                         entry = make_readable_migration_entry(swp_offset(entry));
213                                 newpte = swp_entry_to_pte(entry);
214                                 if (pte_swp_soft_dirty(oldpte))
215                                         newpte = pte_swp_mksoft_dirty(newpte);
216                                 if (pte_swp_uffd_wp(oldpte))
217                                         newpte = pte_swp_mkuffd_wp(newpte);
218                         } else if (is_writable_device_private_entry(entry)) {
219                                 /*
220                                  * We do not preserve soft-dirtiness. See
221                                  * copy_one_pte() for explanation.
222                                  */
223                                 entry = make_readable_device_private_entry(
224                                                         swp_offset(entry));
225                                 newpte = swp_entry_to_pte(entry);
226                                 if (pte_swp_uffd_wp(oldpte))
227                                         newpte = pte_swp_mkuffd_wp(newpte);
228                         } else if (is_writable_device_exclusive_entry(entry)) {
229                                 entry = make_readable_device_exclusive_entry(
230                                                         swp_offset(entry));
231                                 newpte = swp_entry_to_pte(entry);
232                                 if (pte_swp_soft_dirty(oldpte))
233                                         newpte = pte_swp_mksoft_dirty(newpte);
234                                 if (pte_swp_uffd_wp(oldpte))
235                                         newpte = pte_swp_mkuffd_wp(newpte);
236                         } else if (pte_marker_entry_uffd_wp(entry)) {
237                                 /*
238                                  * If this is uffd-wp pte marker and we'd like
239                                  * to unprotect it, drop it; the next page
240                                  * fault will trigger without uffd trapping.
241                                  */
242                                 if (uffd_wp_resolve) {
243                                         pte_clear(vma->vm_mm, addr, pte);
244                                         pages++;
245                                 }
246                                 continue;
247                         } else {
248                                 newpte = oldpte;
249                         }
250
251                         if (uffd_wp)
252                                 newpte = pte_swp_mkuffd_wp(newpte);
253                         else if (uffd_wp_resolve)
254                                 newpte = pte_swp_clear_uffd_wp(newpte);
255
256                         if (!pte_same(oldpte, newpte)) {
257                                 set_pte_at(vma->vm_mm, addr, pte, newpte);
258                                 pages++;
259                         }
260                 } else {
261                         /* It must be an none page, or what else?.. */
262                         WARN_ON_ONCE(!pte_none(oldpte));
263                         if (unlikely(uffd_wp && !vma_is_anonymous(vma))) {
264                                 /*
265                                  * For file-backed mem, we need to be able to
266                                  * wr-protect a none pte, because even if the
267                                  * pte is none, the page/swap cache could
268                                  * exist.  Doing that by install a marker.
269                                  */
270                                 set_pte_at(vma->vm_mm, addr, pte,
271                                            make_pte_marker(PTE_MARKER_UFFD_WP));
272                                 pages++;
273                         }
274                 }
275         } while (pte++, addr += PAGE_SIZE, addr != end);
276         arch_leave_lazy_mmu_mode();
277         pte_unmap_unlock(pte - 1, ptl);
278
279         return pages;
280 }
281
282 /*
283  * Used when setting automatic NUMA hinting protection where it is
284  * critical that a numa hinting PMD is not confused with a bad PMD.
285  */
286 static inline int pmd_none_or_clear_bad_unless_trans_huge(pmd_t *pmd)
287 {
288         pmd_t pmdval = pmd_read_atomic(pmd);
289
290         /* See pmd_none_or_trans_huge_or_clear_bad for info on barrier */
291 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
292         barrier();
293 #endif
294
295         if (pmd_none(pmdval))
296                 return 1;
297         if (pmd_trans_huge(pmdval))
298                 return 0;
299         if (unlikely(pmd_bad(pmdval))) {
300                 pmd_clear_bad(pmd);
301                 return 1;
302         }
303
304         return 0;
305 }
306
307 /* Return true if we're uffd wr-protecting file-backed memory, or false */
308 static inline bool
309 uffd_wp_protect_file(struct vm_area_struct *vma, unsigned long cp_flags)
310 {
311         return (cp_flags & MM_CP_UFFD_WP) && !vma_is_anonymous(vma);
312 }
313
314 /*
315  * If wr-protecting the range for file-backed, populate pgtable for the case
316  * when pgtable is empty but page cache exists.  When {pte|pmd|...}_alloc()
317  * failed it means no memory, we don't have a better option but stop.
318  */
319 #define  change_pmd_prepare(vma, pmd, cp_flags)                         \
320         do {                                                            \
321                 if (unlikely(uffd_wp_protect_file(vma, cp_flags))) {    \
322                         if (WARN_ON_ONCE(pte_alloc(vma->vm_mm, pmd)))   \
323                                 break;                                  \
324                 }                                                       \
325         } while (0)
326 /*
327  * This is the general pud/p4d/pgd version of change_pmd_prepare(). We need to
328  * have separate change_pmd_prepare() because pte_alloc() returns 0 on success,
329  * while {pmd|pud|p4d}_alloc() returns the valid pointer on success.
330  */
331 #define  change_prepare(vma, high, low, addr, cp_flags)                 \
332         do {                                                            \
333                 if (unlikely(uffd_wp_protect_file(vma, cp_flags))) {    \
334                         low##_t *p = low##_alloc(vma->vm_mm, high, addr); \
335                         if (WARN_ON_ONCE(p == NULL))                    \
336                                 break;                                  \
337                 }                                                       \
338         } while (0)
339
340 static inline unsigned long change_pmd_range(struct mmu_gather *tlb,
341                 struct vm_area_struct *vma, pud_t *pud, unsigned long addr,
342                 unsigned long end, pgprot_t newprot, unsigned long cp_flags)
343 {
344         pmd_t *pmd;
345         unsigned long next;
346         unsigned long pages = 0;
347         unsigned long nr_huge_updates = 0;
348         struct mmu_notifier_range range;
349
350         range.start = 0;
351
352         pmd = pmd_offset(pud, addr);
353         do {
354                 unsigned long this_pages;
355
356                 next = pmd_addr_end(addr, end);
357
358                 change_pmd_prepare(vma, pmd, cp_flags);
359                 /*
360                  * Automatic NUMA balancing walks the tables with mmap_lock
361                  * held for read. It's possible a parallel update to occur
362                  * between pmd_trans_huge() and a pmd_none_or_clear_bad()
363                  * check leading to a false positive and clearing.
364                  * Hence, it's necessary to atomically read the PMD value
365                  * for all the checks.
366                  */
367                 if (!is_swap_pmd(*pmd) && !pmd_devmap(*pmd) &&
368                      pmd_none_or_clear_bad_unless_trans_huge(pmd))
369                         goto next;
370
371                 /* invoke the mmu notifier if the pmd is populated */
372                 if (!range.start) {
373                         mmu_notifier_range_init(&range,
374                                 MMU_NOTIFY_PROTECTION_VMA, 0,
375                                 vma, vma->vm_mm, addr, end);
376                         mmu_notifier_invalidate_range_start(&range);
377                 }
378
379                 if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) {
380                         if ((next - addr != HPAGE_PMD_SIZE) ||
381                             uffd_wp_protect_file(vma, cp_flags)) {
382                                 __split_huge_pmd(vma, pmd, addr, false, NULL);
383                                 /*
384                                  * For file-backed, the pmd could have been
385                                  * cleared; make sure pmd populated if
386                                  * necessary, then fall-through to pte level.
387                                  */
388                                 change_pmd_prepare(vma, pmd, cp_flags);
389                         } else {
390                                 /*
391                                  * change_huge_pmd() does not defer TLB flushes,
392                                  * so no need to propagate the tlb argument.
393                                  */
394                                 int nr_ptes = change_huge_pmd(tlb, vma, pmd,
395                                                 addr, newprot, cp_flags);
396
397                                 if (nr_ptes) {
398                                         if (nr_ptes == HPAGE_PMD_NR) {
399                                                 pages += HPAGE_PMD_NR;
400                                                 nr_huge_updates++;
401                                         }
402
403                                         /* huge pmd was handled */
404                                         goto next;
405                                 }
406                         }
407                         /* fall through, the trans huge pmd just split */
408                 }
409                 this_pages = change_pte_range(tlb, vma, pmd, addr, next,
410                                               newprot, cp_flags);
411                 pages += this_pages;
412 next:
413                 cond_resched();
414         } while (pmd++, addr = next, addr != end);
415
416         if (range.start)
417                 mmu_notifier_invalidate_range_end(&range);
418
419         if (nr_huge_updates)
420                 count_vm_numa_events(NUMA_HUGE_PTE_UPDATES, nr_huge_updates);
421         return pages;
422 }
423
424 static inline unsigned long change_pud_range(struct mmu_gather *tlb,
425                 struct vm_area_struct *vma, p4d_t *p4d, unsigned long addr,
426                 unsigned long end, pgprot_t newprot, unsigned long cp_flags)
427 {
428         pud_t *pud;
429         unsigned long next;
430         unsigned long pages = 0;
431
432         pud = pud_offset(p4d, addr);
433         do {
434                 next = pud_addr_end(addr, end);
435                 change_prepare(vma, pud, pmd, addr, cp_flags);
436                 if (pud_none_or_clear_bad(pud))
437                         continue;
438                 pages += change_pmd_range(tlb, vma, pud, addr, next, newprot,
439                                           cp_flags);
440         } while (pud++, addr = next, addr != end);
441
442         return pages;
443 }
444
445 static inline unsigned long change_p4d_range(struct mmu_gather *tlb,
446                 struct vm_area_struct *vma, pgd_t *pgd, unsigned long addr,
447                 unsigned long end, pgprot_t newprot, unsigned long cp_flags)
448 {
449         p4d_t *p4d;
450         unsigned long next;
451         unsigned long pages = 0;
452
453         p4d = p4d_offset(pgd, addr);
454         do {
455                 next = p4d_addr_end(addr, end);
456                 change_prepare(vma, p4d, pud, addr, cp_flags);
457                 if (p4d_none_or_clear_bad(p4d))
458                         continue;
459                 pages += change_pud_range(tlb, vma, p4d, addr, next, newprot,
460                                           cp_flags);
461         } while (p4d++, addr = next, addr != end);
462
463         return pages;
464 }
465
466 static unsigned long change_protection_range(struct mmu_gather *tlb,
467                 struct vm_area_struct *vma, unsigned long addr,
468                 unsigned long end, pgprot_t newprot, unsigned long cp_flags)
469 {
470         struct mm_struct *mm = vma->vm_mm;
471         pgd_t *pgd;
472         unsigned long next;
473         unsigned long pages = 0;
474
475         BUG_ON(addr >= end);
476         pgd = pgd_offset(mm, addr);
477         tlb_start_vma(tlb, vma);
478         do {
479                 next = pgd_addr_end(addr, end);
480                 change_prepare(vma, pgd, p4d, addr, cp_flags);
481                 if (pgd_none_or_clear_bad(pgd))
482                         continue;
483                 pages += change_p4d_range(tlb, vma, pgd, addr, next, newprot,
484                                           cp_flags);
485         } while (pgd++, addr = next, addr != end);
486
487         tlb_end_vma(tlb, vma);
488
489         return pages;
490 }
491
492 unsigned long change_protection(struct mmu_gather *tlb,
493                        struct vm_area_struct *vma, unsigned long start,
494                        unsigned long end, pgprot_t newprot,
495                        unsigned long cp_flags)
496 {
497         unsigned long pages;
498
499         BUG_ON((cp_flags & MM_CP_UFFD_WP_ALL) == MM_CP_UFFD_WP_ALL);
500
501         if (is_vm_hugetlb_page(vma))
502                 pages = hugetlb_change_protection(vma, start, end, newprot,
503                                                   cp_flags);
504         else
505                 pages = change_protection_range(tlb, vma, start, end, newprot,
506                                                 cp_flags);
507
508         return pages;
509 }
510
511 static int prot_none_pte_entry(pte_t *pte, unsigned long addr,
512                                unsigned long next, struct mm_walk *walk)
513 {
514         return pfn_modify_allowed(pte_pfn(*pte), *(pgprot_t *)(walk->private)) ?
515                 0 : -EACCES;
516 }
517
518 static int prot_none_hugetlb_entry(pte_t *pte, unsigned long hmask,
519                                    unsigned long addr, unsigned long next,
520                                    struct mm_walk *walk)
521 {
522         return pfn_modify_allowed(pte_pfn(*pte), *(pgprot_t *)(walk->private)) ?
523                 0 : -EACCES;
524 }
525
526 static int prot_none_test(unsigned long addr, unsigned long next,
527                           struct mm_walk *walk)
528 {
529         return 0;
530 }
531
532 static const struct mm_walk_ops prot_none_walk_ops = {
533         .pte_entry              = prot_none_pte_entry,
534         .hugetlb_entry          = prot_none_hugetlb_entry,
535         .test_walk              = prot_none_test,
536 };
537
538 int
539 mprotect_fixup(struct mmu_gather *tlb, struct vm_area_struct *vma,
540                struct vm_area_struct **pprev, unsigned long start,
541                unsigned long end, unsigned long newflags)
542 {
543         struct mm_struct *mm = vma->vm_mm;
544         unsigned long oldflags = vma->vm_flags;
545         long nrpages = (end - start) >> PAGE_SHIFT;
546         unsigned long charged = 0;
547         bool try_change_writable;
548         pgoff_t pgoff;
549         int error;
550
551         if (newflags == oldflags) {
552                 *pprev = vma;
553                 return 0;
554         }
555
556         /*
557          * Do PROT_NONE PFN permission checks here when we can still
558          * bail out without undoing a lot of state. This is a rather
559          * uncommon case, so doesn't need to be very optimized.
560          */
561         if (arch_has_pfn_modify_check() &&
562             (vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) &&
563             (newflags & VM_ACCESS_FLAGS) == 0) {
564                 pgprot_t new_pgprot = vm_get_page_prot(newflags);
565
566                 error = walk_page_range(current->mm, start, end,
567                                 &prot_none_walk_ops, &new_pgprot);
568                 if (error)
569                         return error;
570         }
571
572         /*
573          * If we make a private mapping writable we increase our commit;
574          * but (without finer accounting) cannot reduce our commit if we
575          * make it unwritable again. hugetlb mapping were accounted for
576          * even if read-only so there is no need to account for them here
577          */
578         if (newflags & VM_WRITE) {
579                 /* Check space limits when area turns into data. */
580                 if (!may_expand_vm(mm, newflags, nrpages) &&
581                                 may_expand_vm(mm, oldflags, nrpages))
582                         return -ENOMEM;
583                 if (!(oldflags & (VM_ACCOUNT|VM_WRITE|VM_HUGETLB|
584                                                 VM_SHARED|VM_NORESERVE))) {
585                         charged = nrpages;
586                         if (security_vm_enough_memory_mm(mm, charged))
587                                 return -ENOMEM;
588                         newflags |= VM_ACCOUNT;
589                 }
590         }
591
592         /*
593          * First try to merge with previous and/or next vma.
594          */
595         pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
596         *pprev = vma_merge(mm, *pprev, start, end, newflags,
597                            vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma),
598                            vma->vm_userfaultfd_ctx, anon_vma_name(vma));
599         if (*pprev) {
600                 vma = *pprev;
601                 VM_WARN_ON((vma->vm_flags ^ newflags) & ~VM_SOFTDIRTY);
602                 goto success;
603         }
604
605         *pprev = vma;
606
607         if (start != vma->vm_start) {
608                 error = split_vma(mm, vma, start, 1);
609                 if (error)
610                         goto fail;
611         }
612
613         if (end != vma->vm_end) {
614                 error = split_vma(mm, vma, end, 0);
615                 if (error)
616                         goto fail;
617         }
618
619 success:
620         /*
621          * vm_flags and vm_page_prot are protected by the mmap_lock
622          * held in write mode.
623          */
624         vma->vm_flags = newflags;
625         /*
626          * We want to check manually if we can change individual PTEs writable
627          * if we can't do that automatically for all PTEs in a mapping. For
628          * private mappings, that's always the case when we have write
629          * permissions as we properly have to handle COW.
630          */
631         if (vma->vm_flags & VM_SHARED)
632                 try_change_writable = vma_wants_writenotify(vma, vma->vm_page_prot);
633         else
634                 try_change_writable = !!(vma->vm_flags & VM_WRITE);
635         vma_set_page_prot(vma);
636
637         change_protection(tlb, vma, start, end, vma->vm_page_prot,
638                           try_change_writable ? MM_CP_TRY_CHANGE_WRITABLE : 0);
639
640         /*
641          * Private VM_LOCKED VMA becoming writable: trigger COW to avoid major
642          * fault on access.
643          */
644         if ((oldflags & (VM_WRITE | VM_SHARED | VM_LOCKED)) == VM_LOCKED &&
645                         (newflags & VM_WRITE)) {
646                 populate_vma_page_range(vma, start, end, NULL);
647         }
648
649         vm_stat_account(mm, oldflags, -nrpages);
650         vm_stat_account(mm, newflags, nrpages);
651         perf_event_mmap(vma);
652         return 0;
653
654 fail:
655         vm_unacct_memory(charged);
656         return error;
657 }
658
659 /*
660  * pkey==-1 when doing a legacy mprotect()
661  */
662 static int do_mprotect_pkey(unsigned long start, size_t len,
663                 unsigned long prot, int pkey)
664 {
665         unsigned long nstart, end, tmp, reqprot;
666         struct vm_area_struct *vma, *prev;
667         int error;
668         const int grows = prot & (PROT_GROWSDOWN|PROT_GROWSUP);
669         const bool rier = (current->personality & READ_IMPLIES_EXEC) &&
670                                 (prot & PROT_READ);
671         struct mmu_gather tlb;
672
673         start = untagged_addr(start);
674
675         prot &= ~(PROT_GROWSDOWN|PROT_GROWSUP);
676         if (grows == (PROT_GROWSDOWN|PROT_GROWSUP)) /* can't be both */
677                 return -EINVAL;
678
679         if (start & ~PAGE_MASK)
680                 return -EINVAL;
681         if (!len)
682                 return 0;
683         len = PAGE_ALIGN(len);
684         end = start + len;
685         if (end <= start)
686                 return -ENOMEM;
687         if (!arch_validate_prot(prot, start))
688                 return -EINVAL;
689
690         reqprot = prot;
691
692         if (mmap_write_lock_killable(current->mm))
693                 return -EINTR;
694
695         /*
696          * If userspace did not allocate the pkey, do not let
697          * them use it here.
698          */
699         error = -EINVAL;
700         if ((pkey != -1) && !mm_pkey_is_allocated(current->mm, pkey))
701                 goto out;
702
703         vma = find_vma(current->mm, start);
704         error = -ENOMEM;
705         if (!vma)
706                 goto out;
707
708         if (unlikely(grows & PROT_GROWSDOWN)) {
709                 if (vma->vm_start >= end)
710                         goto out;
711                 start = vma->vm_start;
712                 error = -EINVAL;
713                 if (!(vma->vm_flags & VM_GROWSDOWN))
714                         goto out;
715         } else {
716                 if (vma->vm_start > start)
717                         goto out;
718                 if (unlikely(grows & PROT_GROWSUP)) {
719                         end = vma->vm_end;
720                         error = -EINVAL;
721                         if (!(vma->vm_flags & VM_GROWSUP))
722                                 goto out;
723                 }
724         }
725
726         if (start > vma->vm_start)
727                 prev = vma;
728         else
729                 prev = vma->vm_prev;
730
731         tlb_gather_mmu(&tlb, current->mm);
732         for (nstart = start ; ; ) {
733                 unsigned long mask_off_old_flags;
734                 unsigned long newflags;
735                 int new_vma_pkey;
736
737                 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
738
739                 /* Does the application expect PROT_READ to imply PROT_EXEC */
740                 if (rier && (vma->vm_flags & VM_MAYEXEC))
741                         prot |= PROT_EXEC;
742
743                 /*
744                  * Each mprotect() call explicitly passes r/w/x permissions.
745                  * If a permission is not passed to mprotect(), it must be
746                  * cleared from the VMA.
747                  */
748                 mask_off_old_flags = VM_READ | VM_WRITE | VM_EXEC |
749                                         VM_FLAGS_CLEAR;
750
751                 new_vma_pkey = arch_override_mprotect_pkey(vma, prot, pkey);
752                 newflags = calc_vm_prot_bits(prot, new_vma_pkey);
753                 newflags |= (vma->vm_flags & ~mask_off_old_flags);
754
755                 /* newflags >> 4 shift VM_MAY% in place of VM_% */
756                 if ((newflags & ~(newflags >> 4)) & VM_ACCESS_FLAGS) {
757                         error = -EACCES;
758                         break;
759                 }
760
761                 /* Allow architectures to sanity-check the new flags */
762                 if (!arch_validate_flags(newflags)) {
763                         error = -EINVAL;
764                         break;
765                 }
766
767                 error = security_file_mprotect(vma, reqprot, prot);
768                 if (error)
769                         break;
770
771                 tmp = vma->vm_end;
772                 if (tmp > end)
773                         tmp = end;
774
775                 if (vma->vm_ops && vma->vm_ops->mprotect) {
776                         error = vma->vm_ops->mprotect(vma, nstart, tmp, newflags);
777                         if (error)
778                                 break;
779                 }
780
781                 error = mprotect_fixup(&tlb, vma, &prev, nstart, tmp, newflags);
782                 if (error)
783                         break;
784
785                 nstart = tmp;
786
787                 if (nstart < prev->vm_end)
788                         nstart = prev->vm_end;
789                 if (nstart >= end)
790                         break;
791
792                 vma = prev->vm_next;
793                 if (!vma || vma->vm_start != nstart) {
794                         error = -ENOMEM;
795                         break;
796                 }
797                 prot = reqprot;
798         }
799         tlb_finish_mmu(&tlb);
800 out:
801         mmap_write_unlock(current->mm);
802         return error;
803 }
804
805 SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
806                 unsigned long, prot)
807 {
808         return do_mprotect_pkey(start, len, prot, -1);
809 }
810
811 #ifdef CONFIG_ARCH_HAS_PKEYS
812
813 SYSCALL_DEFINE4(pkey_mprotect, unsigned long, start, size_t, len,
814                 unsigned long, prot, int, pkey)
815 {
816         return do_mprotect_pkey(start, len, prot, pkey);
817 }
818
819 SYSCALL_DEFINE2(pkey_alloc, unsigned long, flags, unsigned long, init_val)
820 {
821         int pkey;
822         int ret;
823
824         /* No flags supported yet. */
825         if (flags)
826                 return -EINVAL;
827         /* check for unsupported init values */
828         if (init_val & ~PKEY_ACCESS_MASK)
829                 return -EINVAL;
830
831         mmap_write_lock(current->mm);
832         pkey = mm_pkey_alloc(current->mm);
833
834         ret = -ENOSPC;
835         if (pkey == -1)
836                 goto out;
837
838         ret = arch_set_user_pkey_access(current, pkey, init_val);
839         if (ret) {
840                 mm_pkey_free(current->mm, pkey);
841                 goto out;
842         }
843         ret = pkey;
844 out:
845         mmap_write_unlock(current->mm);
846         return ret;
847 }
848
849 SYSCALL_DEFINE1(pkey_free, int, pkey)
850 {
851         int ret;
852
853         mmap_write_lock(current->mm);
854         ret = mm_pkey_free(current->mm, pkey);
855         mmap_write_unlock(current->mm);
856
857         /*
858          * We could provide warnings or errors if any VMA still
859          * has the pkey set here.
860          */
861         return ret;
862 }
863
864 #endif /* CONFIG_ARCH_HAS_PKEYS */