1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright (c) 2021, Google LLC.
5 * Pasha Tatashin <pasha.tatashin@soleen.com>
8 #include <linux/page_table_check.h>
11 #define pr_fmt(fmt) "page_table_check: " fmt
13 struct page_table_check {
14 atomic_t anon_map_count;
15 atomic_t file_map_count;
18 static bool __page_table_check_enabled __initdata =
19 IS_ENABLED(CONFIG_PAGE_TABLE_CHECK_ENFORCED);
21 DEFINE_STATIC_KEY_TRUE(page_table_check_disabled);
22 EXPORT_SYMBOL(page_table_check_disabled);
24 static int __init early_page_table_check_param(char *buf)
29 if (strcmp(buf, "on") == 0)
30 __page_table_check_enabled = true;
31 else if (strcmp(buf, "off") == 0)
32 __page_table_check_enabled = false;
37 early_param("page_table_check", early_page_table_check_param);
39 static bool __init need_page_table_check(void)
41 return __page_table_check_enabled;
44 static void __init init_page_table_check(void)
46 if (!__page_table_check_enabled)
48 static_branch_disable(&page_table_check_disabled);
51 struct page_ext_operations page_table_check_ops = {
52 .size = sizeof(struct page_table_check),
53 .need = need_page_table_check,
54 .init = init_page_table_check,
57 static struct page_table_check *get_page_table_check(struct page_ext *page_ext)
60 return (void *)(page_ext) + page_table_check_ops.offset;
63 static inline bool pte_user_accessible_page(pte_t pte)
65 return (pte_val(pte) & _PAGE_PRESENT) && (pte_val(pte) & _PAGE_USER);
68 static inline bool pmd_user_accessible_page(pmd_t pmd)
70 return pmd_leaf(pmd) && (pmd_val(pmd) & _PAGE_PRESENT) &&
71 (pmd_val(pmd) & _PAGE_USER);
74 static inline bool pud_user_accessible_page(pud_t pud)
76 return pud_leaf(pud) && (pud_val(pud) & _PAGE_PRESENT) &&
77 (pud_val(pud) & _PAGE_USER);
81 * An enty is removed from the page table, decrement the counters for that page
82 * verify that it is of correct type and counters do not become negative.
84 static void page_table_check_clear(struct mm_struct *mm, unsigned long addr,
85 unsigned long pfn, unsigned long pgcnt)
87 struct page_ext *page_ext;
95 page = pfn_to_page(pfn);
96 page_ext = lookup_page_ext(page);
97 anon = PageAnon(page);
99 for (i = 0; i < pgcnt; i++) {
100 struct page_table_check *ptc = get_page_table_check(page_ext);
103 BUG_ON(atomic_read(&ptc->file_map_count));
104 BUG_ON(atomic_dec_return(&ptc->anon_map_count) < 0);
106 BUG_ON(atomic_read(&ptc->anon_map_count));
107 BUG_ON(atomic_dec_return(&ptc->file_map_count) < 0);
109 page_ext = page_ext_next(page_ext);
114 * A new enty is added to the page table, increment the counters for that page
115 * verify that it is of correct type and is not being mapped with a different
116 * type to a different process.
118 static void page_table_check_set(struct mm_struct *mm, unsigned long addr,
119 unsigned long pfn, unsigned long pgcnt,
122 struct page_ext *page_ext;
130 page = pfn_to_page(pfn);
131 page_ext = lookup_page_ext(page);
132 anon = PageAnon(page);
134 for (i = 0; i < pgcnt; i++) {
135 struct page_table_check *ptc = get_page_table_check(page_ext);
138 BUG_ON(atomic_read(&ptc->file_map_count));
139 BUG_ON(atomic_inc_return(&ptc->anon_map_count) > 1 && rw);
141 BUG_ON(atomic_read(&ptc->anon_map_count));
142 BUG_ON(atomic_inc_return(&ptc->file_map_count) < 0);
144 page_ext = page_ext_next(page_ext);
149 * page is on free list, or is being allocated, verify that counters are zeroes
150 * crash if they are not.
152 void __page_table_check_zero(struct page *page, unsigned int order)
154 struct page_ext *page_ext = lookup_page_ext(page);
158 for (i = 0; i < (1ul << order); i++) {
159 struct page_table_check *ptc = get_page_table_check(page_ext);
161 BUG_ON(atomic_read(&ptc->anon_map_count));
162 BUG_ON(atomic_read(&ptc->file_map_count));
163 page_ext = page_ext_next(page_ext);
167 void __page_table_check_pte_clear(struct mm_struct *mm, unsigned long addr,
173 if (pte_user_accessible_page(pte)) {
174 page_table_check_clear(mm, addr, pte_pfn(pte),
175 PAGE_SIZE >> PAGE_SHIFT);
178 EXPORT_SYMBOL(__page_table_check_pte_clear);
180 void __page_table_check_pmd_clear(struct mm_struct *mm, unsigned long addr,
186 if (pmd_user_accessible_page(pmd)) {
187 page_table_check_clear(mm, addr, pmd_pfn(pmd),
188 PMD_PAGE_SIZE >> PAGE_SHIFT);
191 EXPORT_SYMBOL(__page_table_check_pmd_clear);
193 void __page_table_check_pud_clear(struct mm_struct *mm, unsigned long addr,
199 if (pud_user_accessible_page(pud)) {
200 page_table_check_clear(mm, addr, pud_pfn(pud),
201 PUD_PAGE_SIZE >> PAGE_SHIFT);
204 EXPORT_SYMBOL(__page_table_check_pud_clear);
206 void __page_table_check_pte_set(struct mm_struct *mm, unsigned long addr,
207 pte_t *ptep, pte_t pte)
212 __page_table_check_pte_clear(mm, addr, *ptep);
213 if (pte_user_accessible_page(pte)) {
214 page_table_check_set(mm, addr, pte_pfn(pte),
215 PAGE_SIZE >> PAGE_SHIFT,
219 EXPORT_SYMBOL(__page_table_check_pte_set);
221 void __page_table_check_pmd_set(struct mm_struct *mm, unsigned long addr,
222 pmd_t *pmdp, pmd_t pmd)
227 __page_table_check_pmd_clear(mm, addr, *pmdp);
228 if (pmd_user_accessible_page(pmd)) {
229 page_table_check_set(mm, addr, pmd_pfn(pmd),
230 PMD_PAGE_SIZE >> PAGE_SHIFT,
234 EXPORT_SYMBOL(__page_table_check_pmd_set);
236 void __page_table_check_pud_set(struct mm_struct *mm, unsigned long addr,
237 pud_t *pudp, pud_t pud)
242 __page_table_check_pud_clear(mm, addr, *pudp);
243 if (pud_user_accessible_page(pud)) {
244 page_table_check_set(mm, addr, pud_pfn(pud),
245 PUD_PAGE_SIZE >> PAGE_SHIFT,
249 EXPORT_SYMBOL(__page_table_check_pud_set);
251 void __page_table_check_pte_clear_range(struct mm_struct *mm,
258 if (!pmd_bad(pmd) && !pmd_leaf(pmd)) {
259 pte_t *ptep = pte_offset_map(&pmd, addr);
263 for (i = 0; i < PTRS_PER_PTE; i++) {
264 __page_table_check_pte_clear(mm, addr, *ptep);