blk-cgroup: pass a gendisk to blkg_destroy_all
[platform/kernel/linux-starfive.git] / mm / page_table_check.c
1 // SPDX-License-Identifier: GPL-2.0
2
3 /*
4  * Copyright (c) 2021, Google LLC.
5  * Pasha Tatashin <pasha.tatashin@soleen.com>
6  */
7 #include <linux/mm.h>
8 #include <linux/page_table_check.h>
9
10 #undef pr_fmt
11 #define pr_fmt(fmt)     "page_table_check: " fmt
12
13 struct page_table_check {
14         atomic_t anon_map_count;
15         atomic_t file_map_count;
16 };
17
18 static bool __page_table_check_enabled __initdata =
19                                 IS_ENABLED(CONFIG_PAGE_TABLE_CHECK_ENFORCED);
20
21 DEFINE_STATIC_KEY_TRUE(page_table_check_disabled);
22 EXPORT_SYMBOL(page_table_check_disabled);
23
24 static int __init early_page_table_check_param(char *buf)
25 {
26         return strtobool(buf, &__page_table_check_enabled);
27 }
28
29 early_param("page_table_check", early_page_table_check_param);
30
31 static bool __init need_page_table_check(void)
32 {
33         return __page_table_check_enabled;
34 }
35
36 static void __init init_page_table_check(void)
37 {
38         if (!__page_table_check_enabled)
39                 return;
40         static_branch_disable(&page_table_check_disabled);
41 }
42
43 struct page_ext_operations page_table_check_ops = {
44         .size = sizeof(struct page_table_check),
45         .need = need_page_table_check,
46         .init = init_page_table_check,
47 };
48
49 static struct page_table_check *get_page_table_check(struct page_ext *page_ext)
50 {
51         BUG_ON(!page_ext);
52         return (void *)(page_ext) + page_table_check_ops.offset;
53 }
54
55 /*
56  * An enty is removed from the page table, decrement the counters for that page
57  * verify that it is of correct type and counters do not become negative.
58  */
59 static void page_table_check_clear(struct mm_struct *mm, unsigned long addr,
60                                    unsigned long pfn, unsigned long pgcnt)
61 {
62         struct page_ext *page_ext;
63         struct page *page;
64         unsigned long i;
65         bool anon;
66
67         if (!pfn_valid(pfn))
68                 return;
69
70         page = pfn_to_page(pfn);
71         page_ext = lookup_page_ext(page);
72         anon = PageAnon(page);
73
74         for (i = 0; i < pgcnt; i++) {
75                 struct page_table_check *ptc = get_page_table_check(page_ext);
76
77                 if (anon) {
78                         BUG_ON(atomic_read(&ptc->file_map_count));
79                         BUG_ON(atomic_dec_return(&ptc->anon_map_count) < 0);
80                 } else {
81                         BUG_ON(atomic_read(&ptc->anon_map_count));
82                         BUG_ON(atomic_dec_return(&ptc->file_map_count) < 0);
83                 }
84                 page_ext = page_ext_next(page_ext);
85         }
86 }
87
88 /*
89  * A new enty is added to the page table, increment the counters for that page
90  * verify that it is of correct type and is not being mapped with a different
91  * type to a different process.
92  */
93 static void page_table_check_set(struct mm_struct *mm, unsigned long addr,
94                                  unsigned long pfn, unsigned long pgcnt,
95                                  bool rw)
96 {
97         struct page_ext *page_ext;
98         struct page *page;
99         unsigned long i;
100         bool anon;
101
102         if (!pfn_valid(pfn))
103                 return;
104
105         page = pfn_to_page(pfn);
106         page_ext = lookup_page_ext(page);
107         anon = PageAnon(page);
108
109         for (i = 0; i < pgcnt; i++) {
110                 struct page_table_check *ptc = get_page_table_check(page_ext);
111
112                 if (anon) {
113                         BUG_ON(atomic_read(&ptc->file_map_count));
114                         BUG_ON(atomic_inc_return(&ptc->anon_map_count) > 1 && rw);
115                 } else {
116                         BUG_ON(atomic_read(&ptc->anon_map_count));
117                         BUG_ON(atomic_inc_return(&ptc->file_map_count) < 0);
118                 }
119                 page_ext = page_ext_next(page_ext);
120         }
121 }
122
123 /*
124  * page is on free list, or is being allocated, verify that counters are zeroes
125  * crash if they are not.
126  */
127 void __page_table_check_zero(struct page *page, unsigned int order)
128 {
129         struct page_ext *page_ext = lookup_page_ext(page);
130         unsigned long i;
131
132         BUG_ON(!page_ext);
133         for (i = 0; i < (1ul << order); i++) {
134                 struct page_table_check *ptc = get_page_table_check(page_ext);
135
136                 BUG_ON(atomic_read(&ptc->anon_map_count));
137                 BUG_ON(atomic_read(&ptc->file_map_count));
138                 page_ext = page_ext_next(page_ext);
139         }
140 }
141
142 void __page_table_check_pte_clear(struct mm_struct *mm, unsigned long addr,
143                                   pte_t pte)
144 {
145         if (&init_mm == mm)
146                 return;
147
148         if (pte_user_accessible_page(pte)) {
149                 page_table_check_clear(mm, addr, pte_pfn(pte),
150                                        PAGE_SIZE >> PAGE_SHIFT);
151         }
152 }
153 EXPORT_SYMBOL(__page_table_check_pte_clear);
154
155 void __page_table_check_pmd_clear(struct mm_struct *mm, unsigned long addr,
156                                   pmd_t pmd)
157 {
158         if (&init_mm == mm)
159                 return;
160
161         if (pmd_user_accessible_page(pmd)) {
162                 page_table_check_clear(mm, addr, pmd_pfn(pmd),
163                                        PMD_SIZE >> PAGE_SHIFT);
164         }
165 }
166 EXPORT_SYMBOL(__page_table_check_pmd_clear);
167
168 void __page_table_check_pud_clear(struct mm_struct *mm, unsigned long addr,
169                                   pud_t pud)
170 {
171         if (&init_mm == mm)
172                 return;
173
174         if (pud_user_accessible_page(pud)) {
175                 page_table_check_clear(mm, addr, pud_pfn(pud),
176                                        PUD_SIZE >> PAGE_SHIFT);
177         }
178 }
179 EXPORT_SYMBOL(__page_table_check_pud_clear);
180
181 void __page_table_check_pte_set(struct mm_struct *mm, unsigned long addr,
182                                 pte_t *ptep, pte_t pte)
183 {
184         if (&init_mm == mm)
185                 return;
186
187         __page_table_check_pte_clear(mm, addr, *ptep);
188         if (pte_user_accessible_page(pte)) {
189                 page_table_check_set(mm, addr, pte_pfn(pte),
190                                      PAGE_SIZE >> PAGE_SHIFT,
191                                      pte_write(pte));
192         }
193 }
194 EXPORT_SYMBOL(__page_table_check_pte_set);
195
196 void __page_table_check_pmd_set(struct mm_struct *mm, unsigned long addr,
197                                 pmd_t *pmdp, pmd_t pmd)
198 {
199         if (&init_mm == mm)
200                 return;
201
202         __page_table_check_pmd_clear(mm, addr, *pmdp);
203         if (pmd_user_accessible_page(pmd)) {
204                 page_table_check_set(mm, addr, pmd_pfn(pmd),
205                                      PMD_SIZE >> PAGE_SHIFT,
206                                      pmd_write(pmd));
207         }
208 }
209 EXPORT_SYMBOL(__page_table_check_pmd_set);
210
211 void __page_table_check_pud_set(struct mm_struct *mm, unsigned long addr,
212                                 pud_t *pudp, pud_t pud)
213 {
214         if (&init_mm == mm)
215                 return;
216
217         __page_table_check_pud_clear(mm, addr, *pudp);
218         if (pud_user_accessible_page(pud)) {
219                 page_table_check_set(mm, addr, pud_pfn(pud),
220                                      PUD_SIZE >> PAGE_SHIFT,
221                                      pud_write(pud));
222         }
223 }
224 EXPORT_SYMBOL(__page_table_check_pud_set);
225
226 void __page_table_check_pte_clear_range(struct mm_struct *mm,
227                                         unsigned long addr,
228                                         pmd_t pmd)
229 {
230         if (&init_mm == mm)
231                 return;
232
233         if (!pmd_bad(pmd) && !pmd_leaf(pmd)) {
234                 pte_t *ptep = pte_offset_map(&pmd, addr);
235                 unsigned long i;
236
237                 for (i = 0; i < PTRS_PER_PTE; i++) {
238                         __page_table_check_pte_clear(mm, addr, *ptep);
239                         addr += PAGE_SIZE;
240                         ptep++;
241                 }
242                 pte_unmap(ptep - PTRS_PER_PTE);
243         }
244 }