net: validate lwtstate->data before returning from skb_tunnel_info()
[platform/kernel/linux-starfive.git] / mm / pgtable-generic.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  mm/pgtable-generic.c
4  *
5  *  Generic pgtable methods declared in linux/pgtable.h
6  *
7  *  Copyright (C) 2010  Linus Torvalds
8  */
9
10 #include <linux/pagemap.h>
11 #include <linux/hugetlb.h>
12 #include <linux/pgtable.h>
13 #include <asm/tlb.h>
14
15 /*
16  * If a p?d_bad entry is found while walking page tables, report
17  * the error, before resetting entry to p?d_none.  Usually (but
18  * very seldom) called out from the p?d_none_or_clear_bad macros.
19  */
20
21 void pgd_clear_bad(pgd_t *pgd)
22 {
23         pgd_ERROR(*pgd);
24         pgd_clear(pgd);
25 }
26
27 #ifndef __PAGETABLE_P4D_FOLDED
28 void p4d_clear_bad(p4d_t *p4d)
29 {
30         p4d_ERROR(*p4d);
31         p4d_clear(p4d);
32 }
33 #endif
34
35 #ifndef __PAGETABLE_PUD_FOLDED
36 void pud_clear_bad(pud_t *pud)
37 {
38         pud_ERROR(*pud);
39         pud_clear(pud);
40 }
41 #endif
42
43 /*
44  * Note that the pmd variant below can't be stub'ed out just as for p4d/pud
45  * above. pmd folding is special and typically pmd_* macros refer to upper
46  * level even when folded
47  */
48 void pmd_clear_bad(pmd_t *pmd)
49 {
50         pmd_ERROR(*pmd);
51         pmd_clear(pmd);
52 }
53
54 #ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
55 /*
56  * Only sets the access flags (dirty, accessed), as well as write
57  * permission. Furthermore, we know it always gets set to a "more
58  * permissive" setting, which allows most architectures to optimize
59  * this. We return whether the PTE actually changed, which in turn
60  * instructs the caller to do things like update__mmu_cache.  This
61  * used to be done in the caller, but sparc needs minor faults to
62  * force that call on sun4c so we changed this macro slightly
63  */
64 int ptep_set_access_flags(struct vm_area_struct *vma,
65                           unsigned long address, pte_t *ptep,
66                           pte_t entry, int dirty)
67 {
68         int changed = !pte_same(*ptep, entry);
69         if (changed) {
70                 set_pte_at(vma->vm_mm, address, ptep, entry);
71                 flush_tlb_fix_spurious_fault(vma, address);
72         }
73         return changed;
74 }
75 #endif
76
77 #ifndef __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
78 int ptep_clear_flush_young(struct vm_area_struct *vma,
79                            unsigned long address, pte_t *ptep)
80 {
81         int young;
82         young = ptep_test_and_clear_young(vma, address, ptep);
83         if (young)
84                 flush_tlb_page(vma, address);
85         return young;
86 }
87 #endif
88
89 #ifndef __HAVE_ARCH_PTEP_CLEAR_FLUSH
90 pte_t ptep_clear_flush(struct vm_area_struct *vma, unsigned long address,
91                        pte_t *ptep)
92 {
93         struct mm_struct *mm = (vma)->vm_mm;
94         pte_t pte;
95         pte = ptep_get_and_clear(mm, address, ptep);
96         if (pte_accessible(mm, pte))
97                 flush_tlb_page(vma, address);
98         return pte;
99 }
100 #endif
101
102 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
103
104 #ifndef __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
105 int pmdp_set_access_flags(struct vm_area_struct *vma,
106                           unsigned long address, pmd_t *pmdp,
107                           pmd_t entry, int dirty)
108 {
109         int changed = !pmd_same(*pmdp, entry);
110         VM_BUG_ON(address & ~HPAGE_PMD_MASK);
111         if (changed) {
112                 set_pmd_at(vma->vm_mm, address, pmdp, entry);
113                 flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
114         }
115         return changed;
116 }
117 #endif
118
119 #ifndef __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
120 int pmdp_clear_flush_young(struct vm_area_struct *vma,
121                            unsigned long address, pmd_t *pmdp)
122 {
123         int young;
124         VM_BUG_ON(address & ~HPAGE_PMD_MASK);
125         young = pmdp_test_and_clear_young(vma, address, pmdp);
126         if (young)
127                 flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
128         return young;
129 }
130 #endif
131
132 #ifndef __HAVE_ARCH_PMDP_HUGE_CLEAR_FLUSH
133 pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma, unsigned long address,
134                             pmd_t *pmdp)
135 {
136         pmd_t pmd;
137         VM_BUG_ON(address & ~HPAGE_PMD_MASK);
138         VM_BUG_ON(pmd_present(*pmdp) && !pmd_trans_huge(*pmdp) &&
139                            !pmd_devmap(*pmdp));
140         pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
141         flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
142         return pmd;
143 }
144
145 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
146 pud_t pudp_huge_clear_flush(struct vm_area_struct *vma, unsigned long address,
147                             pud_t *pudp)
148 {
149         pud_t pud;
150
151         VM_BUG_ON(address & ~HPAGE_PUD_MASK);
152         VM_BUG_ON(!pud_trans_huge(*pudp) && !pud_devmap(*pudp));
153         pud = pudp_huge_get_and_clear(vma->vm_mm, address, pudp);
154         flush_pud_tlb_range(vma, address, address + HPAGE_PUD_SIZE);
155         return pud;
156 }
157 #endif
158 #endif
159
160 #ifndef __HAVE_ARCH_PGTABLE_DEPOSIT
161 void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
162                                 pgtable_t pgtable)
163 {
164         assert_spin_locked(pmd_lockptr(mm, pmdp));
165
166         /* FIFO */
167         if (!pmd_huge_pte(mm, pmdp))
168                 INIT_LIST_HEAD(&pgtable->lru);
169         else
170                 list_add(&pgtable->lru, &pmd_huge_pte(mm, pmdp)->lru);
171         pmd_huge_pte(mm, pmdp) = pgtable;
172 }
173 #endif
174
175 #ifndef __HAVE_ARCH_PGTABLE_WITHDRAW
176 /* no "address" argument so destroys page coloring of some arch */
177 pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
178 {
179         pgtable_t pgtable;
180
181         assert_spin_locked(pmd_lockptr(mm, pmdp));
182
183         /* FIFO */
184         pgtable = pmd_huge_pte(mm, pmdp);
185         pmd_huge_pte(mm, pmdp) = list_first_entry_or_null(&pgtable->lru,
186                                                           struct page, lru);
187         if (pmd_huge_pte(mm, pmdp))
188                 list_del(&pgtable->lru);
189         return pgtable;
190 }
191 #endif
192
193 #ifndef __HAVE_ARCH_PMDP_INVALIDATE
194 pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
195                      pmd_t *pmdp)
196 {
197         pmd_t old = pmdp_establish(vma, address, pmdp, pmd_mkinvalid(*pmdp));
198         flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
199         return old;
200 }
201 #endif
202
203 #ifndef pmdp_collapse_flush
204 pmd_t pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long address,
205                           pmd_t *pmdp)
206 {
207         /*
208          * pmd and hugepage pte format are same. So we could
209          * use the same function.
210          */
211         pmd_t pmd;
212
213         VM_BUG_ON(address & ~HPAGE_PMD_MASK);
214         VM_BUG_ON(pmd_trans_huge(*pmdp));
215         pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
216
217         /* collapse entails shooting down ptes not pmd */
218         flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
219         return pmd;
220 }
221 #endif
222 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */