1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright (c) 2021, Google LLC.
5 * Pasha Tatashin <pasha.tatashin@soleen.com>
8 #include <linux/page_table_check.h>
11 #define pr_fmt(fmt) "page_table_check: " fmt
13 struct page_table_check {
14 atomic_t anon_map_count;
15 atomic_t file_map_count;
18 static bool __page_table_check_enabled __initdata =
19 IS_ENABLED(CONFIG_PAGE_TABLE_CHECK_ENFORCED);
21 DEFINE_STATIC_KEY_TRUE(page_table_check_disabled);
22 EXPORT_SYMBOL(page_table_check_disabled);
24 static int __init early_page_table_check_param(char *buf)
26 return strtobool(buf, &__page_table_check_enabled);
29 early_param("page_table_check", early_page_table_check_param);
31 static bool __init need_page_table_check(void)
33 return __page_table_check_enabled;
36 static void __init init_page_table_check(void)
38 if (!__page_table_check_enabled)
40 static_branch_disable(&page_table_check_disabled);
43 struct page_ext_operations page_table_check_ops = {
44 .size = sizeof(struct page_table_check),
45 .need = need_page_table_check,
46 .init = init_page_table_check,
49 static struct page_table_check *get_page_table_check(struct page_ext *page_ext)
52 return (void *)(page_ext) + page_table_check_ops.offset;
56 * An enty is removed from the page table, decrement the counters for that page
57 * verify that it is of correct type and counters do not become negative.
59 static void page_table_check_clear(struct mm_struct *mm, unsigned long addr,
60 unsigned long pfn, unsigned long pgcnt)
62 struct page_ext *page_ext;
70 page = pfn_to_page(pfn);
71 page_ext = lookup_page_ext(page);
72 anon = PageAnon(page);
74 for (i = 0; i < pgcnt; i++) {
75 struct page_table_check *ptc = get_page_table_check(page_ext);
78 BUG_ON(atomic_read(&ptc->file_map_count));
79 BUG_ON(atomic_dec_return(&ptc->anon_map_count) < 0);
81 BUG_ON(atomic_read(&ptc->anon_map_count));
82 BUG_ON(atomic_dec_return(&ptc->file_map_count) < 0);
84 page_ext = page_ext_next(page_ext);
89 * A new enty is added to the page table, increment the counters for that page
90 * verify that it is of correct type and is not being mapped with a different
91 * type to a different process.
93 static void page_table_check_set(struct mm_struct *mm, unsigned long addr,
94 unsigned long pfn, unsigned long pgcnt,
97 struct page_ext *page_ext;
105 page = pfn_to_page(pfn);
106 page_ext = lookup_page_ext(page);
107 anon = PageAnon(page);
109 for (i = 0; i < pgcnt; i++) {
110 struct page_table_check *ptc = get_page_table_check(page_ext);
113 BUG_ON(atomic_read(&ptc->file_map_count));
114 BUG_ON(atomic_inc_return(&ptc->anon_map_count) > 1 && rw);
116 BUG_ON(atomic_read(&ptc->anon_map_count));
117 BUG_ON(atomic_inc_return(&ptc->file_map_count) < 0);
119 page_ext = page_ext_next(page_ext);
124 * page is on free list, or is being allocated, verify that counters are zeroes
125 * crash if they are not.
127 void __page_table_check_zero(struct page *page, unsigned int order)
129 struct page_ext *page_ext = lookup_page_ext(page);
133 for (i = 0; i < (1ul << order); i++) {
134 struct page_table_check *ptc = get_page_table_check(page_ext);
136 BUG_ON(atomic_read(&ptc->anon_map_count));
137 BUG_ON(atomic_read(&ptc->file_map_count));
138 page_ext = page_ext_next(page_ext);
142 void __page_table_check_pte_clear(struct mm_struct *mm, unsigned long addr,
148 if (pte_user_accessible_page(pte)) {
149 page_table_check_clear(mm, addr, pte_pfn(pte),
150 PAGE_SIZE >> PAGE_SHIFT);
153 EXPORT_SYMBOL(__page_table_check_pte_clear);
155 void __page_table_check_pmd_clear(struct mm_struct *mm, unsigned long addr,
161 if (pmd_user_accessible_page(pmd)) {
162 page_table_check_clear(mm, addr, pmd_pfn(pmd),
163 PMD_SIZE >> PAGE_SHIFT);
166 EXPORT_SYMBOL(__page_table_check_pmd_clear);
168 void __page_table_check_pud_clear(struct mm_struct *mm, unsigned long addr,
174 if (pud_user_accessible_page(pud)) {
175 page_table_check_clear(mm, addr, pud_pfn(pud),
176 PUD_SIZE >> PAGE_SHIFT);
179 EXPORT_SYMBOL(__page_table_check_pud_clear);
181 void __page_table_check_pte_set(struct mm_struct *mm, unsigned long addr,
182 pte_t *ptep, pte_t pte)
187 __page_table_check_pte_clear(mm, addr, *ptep);
188 if (pte_user_accessible_page(pte)) {
189 page_table_check_set(mm, addr, pte_pfn(pte),
190 PAGE_SIZE >> PAGE_SHIFT,
194 EXPORT_SYMBOL(__page_table_check_pte_set);
196 void __page_table_check_pmd_set(struct mm_struct *mm, unsigned long addr,
197 pmd_t *pmdp, pmd_t pmd)
202 __page_table_check_pmd_clear(mm, addr, *pmdp);
203 if (pmd_user_accessible_page(pmd)) {
204 page_table_check_set(mm, addr, pmd_pfn(pmd),
205 PMD_SIZE >> PAGE_SHIFT,
209 EXPORT_SYMBOL(__page_table_check_pmd_set);
211 void __page_table_check_pud_set(struct mm_struct *mm, unsigned long addr,
212 pud_t *pudp, pud_t pud)
217 __page_table_check_pud_clear(mm, addr, *pudp);
218 if (pud_user_accessible_page(pud)) {
219 page_table_check_set(mm, addr, pud_pfn(pud),
220 PUD_SIZE >> PAGE_SHIFT,
224 EXPORT_SYMBOL(__page_table_check_pud_set);
226 void __page_table_check_pte_clear_range(struct mm_struct *mm,
233 if (!pmd_bad(pmd) && !pmd_leaf(pmd)) {
234 pte_t *ptep = pte_offset_map(&pmd, addr);
237 for (i = 0; i < PTRS_PER_PTE; i++) {
238 __page_table_check_pte_clear(mm, addr, *ptep);
242 pte_unmap(ptep - PTRS_PER_PTE);