1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/pagewalk.h>
3 #include <linux/hugetlb.h>
4 #include <linux/bitops.h>
5 #include <linux/mmu_notifier.h>
6 #include <asm/cacheflush.h>
7 #include <asm/tlbflush.h>
10 * struct wp_walk - Private struct for pagetable walk callbacks
11 * @range: Range for mmu notifiers
12 * @tlbflush_start: Address of first modified pte
13 * @tlbflush_end: Address of last modified pte + 1
14 * @total: Total number of modified ptes
17 struct mmu_notifier_range range;
18 unsigned long tlbflush_start;
19 unsigned long tlbflush_end;
24 * wp_pte - Write-protect a pte
25 * @pte: Pointer to the pte
26 * @addr: The virtual page address
27 * @walk: pagetable walk callback argument
29 * The function write-protects a pte and records the range in
30 * virtual address space of touched ptes for efficient range TLB flushes.
32 static int wp_pte(pte_t *pte, unsigned long addr, unsigned long end,
35 struct wp_walk *wpwalk = walk->private;
38 if (pte_write(ptent)) {
39 pte_t old_pte = ptep_modify_prot_start(walk->vma, addr, pte);
41 ptent = pte_wrprotect(old_pte);
42 ptep_modify_prot_commit(walk->vma, addr, pte, old_pte, ptent);
44 wpwalk->tlbflush_start = min(wpwalk->tlbflush_start, addr);
45 wpwalk->tlbflush_end = max(wpwalk->tlbflush_end,
53 * struct clean_walk - Private struct for the clean_record_pte function.
54 * @base: struct wp_walk we derive from
55 * @bitmap_pgoff: Address_space Page offset of the first bit in @bitmap
56 * @bitmap: Bitmap with one bit for each page offset in the address_space range
58 * @start: Address_space page offset of first modified pte relative
60 * @end: Address_space page offset of last modified pte relative
66 unsigned long *bitmap;
71 #define to_clean_walk(_wpwalk) container_of(_wpwalk, struct clean_walk, base)
74 * clean_record_pte - Clean a pte and record its address space offset in a
76 * @pte: Pointer to the pte
77 * @addr: The virtual page address
78 * @walk: pagetable walk callback argument
80 * The function cleans a pte and records the range in
81 * virtual address space of touched ptes for efficient TLB flushes.
82 * It also records dirty ptes in a bitmap representing page offsets
83 * in the address_space, as well as the first and last of the bits
86 static int clean_record_pte(pte_t *pte, unsigned long addr,
87 unsigned long end, struct mm_walk *walk)
89 struct wp_walk *wpwalk = walk->private;
90 struct clean_walk *cwalk = to_clean_walk(wpwalk);
93 if (pte_dirty(ptent)) {
94 pgoff_t pgoff = ((addr - walk->vma->vm_start) >> PAGE_SHIFT) +
95 walk->vma->vm_pgoff - cwalk->bitmap_pgoff;
96 pte_t old_pte = ptep_modify_prot_start(walk->vma, addr, pte);
98 ptent = pte_mkclean(old_pte);
99 ptep_modify_prot_commit(walk->vma, addr, pte, old_pte, ptent);
102 wpwalk->tlbflush_start = min(wpwalk->tlbflush_start, addr);
103 wpwalk->tlbflush_end = max(wpwalk->tlbflush_end,
106 __set_bit(pgoff, cwalk->bitmap);
107 cwalk->start = min(cwalk->start, pgoff);
108 cwalk->end = max(cwalk->end, pgoff + 1);
114 /* wp_clean_pmd_entry - The pagewalk pmd callback. */
115 static int wp_clean_pmd_entry(pmd_t *pmd, unsigned long addr, unsigned long end,
116 struct mm_walk *walk)
118 /* Dirty-tracking should be handled on the pte level */
119 pmd_t pmdval = pmd_read_atomic(pmd);
121 if (pmd_trans_huge(pmdval) || pmd_devmap(pmdval))
122 WARN_ON(pmd_write(pmdval) || pmd_dirty(pmdval));
127 /* wp_clean_pud_entry - The pagewalk pud callback. */
128 static int wp_clean_pud_entry(pud_t *pud, unsigned long addr, unsigned long end,
129 struct mm_walk *walk)
131 /* Dirty-tracking should be handled on the pte level */
132 pud_t pudval = READ_ONCE(*pud);
134 if (pud_trans_huge(pudval) || pud_devmap(pudval))
135 WARN_ON(pud_write(pudval) || pud_dirty(pudval));
141 * wp_clean_pre_vma - The pagewalk pre_vma callback.
143 * The pre_vma callback performs the cache flush, stages the tlb flush
144 * and calls the necessary mmu notifiers.
146 static int wp_clean_pre_vma(unsigned long start, unsigned long end,
147 struct mm_walk *walk)
149 struct wp_walk *wpwalk = walk->private;
151 wpwalk->tlbflush_start = end;
152 wpwalk->tlbflush_end = start;
154 mmu_notifier_range_init(&wpwalk->range, MMU_NOTIFY_PROTECTION_PAGE, 0,
155 walk->vma, walk->mm, start, end);
156 mmu_notifier_invalidate_range_start(&wpwalk->range);
157 flush_cache_range(walk->vma, start, end);
160 * We're not using tlb_gather_mmu() since typically
161 * only a small subrange of PTEs are affected, whereas
162 * tlb_gather_mmu() records the full range.
164 inc_tlb_flush_pending(walk->mm);
170 * wp_clean_post_vma - The pagewalk post_vma callback.
172 * The post_vma callback performs the tlb flush and calls necessary mmu
175 static void wp_clean_post_vma(struct mm_walk *walk)
177 struct wp_walk *wpwalk = walk->private;
179 if (mm_tlb_flush_nested(walk->mm))
180 flush_tlb_range(walk->vma, wpwalk->range.start,
182 else if (wpwalk->tlbflush_end > wpwalk->tlbflush_start)
183 flush_tlb_range(walk->vma, wpwalk->tlbflush_start,
184 wpwalk->tlbflush_end);
186 mmu_notifier_invalidate_range_end(&wpwalk->range);
187 dec_tlb_flush_pending(walk->mm);
191 * wp_clean_test_walk - The pagewalk test_walk callback.
193 * Won't perform dirty-tracking on COW, read-only or HUGETLB vmas.
195 static int wp_clean_test_walk(unsigned long start, unsigned long end,
196 struct mm_walk *walk)
198 unsigned long vm_flags = READ_ONCE(walk->vma->vm_flags);
200 /* Skip non-applicable VMAs */
201 if ((vm_flags & (VM_SHARED | VM_MAYWRITE | VM_HUGETLB)) !=
202 (VM_SHARED | VM_MAYWRITE))
208 static const struct mm_walk_ops clean_walk_ops = {
209 .pte_entry = clean_record_pte,
210 .pmd_entry = wp_clean_pmd_entry,
211 .pud_entry = wp_clean_pud_entry,
212 .test_walk = wp_clean_test_walk,
213 .pre_vma = wp_clean_pre_vma,
214 .post_vma = wp_clean_post_vma
217 static const struct mm_walk_ops wp_walk_ops = {
219 .pmd_entry = wp_clean_pmd_entry,
220 .pud_entry = wp_clean_pud_entry,
221 .test_walk = wp_clean_test_walk,
222 .pre_vma = wp_clean_pre_vma,
223 .post_vma = wp_clean_post_vma
227 * wp_shared_mapping_range - Write-protect all ptes in an address space range
228 * @mapping: The address_space we want to write protect
229 * @first_index: The first page offset in the range
230 * @nr: Number of incremental page offsets to cover
232 * Note: This function currently skips transhuge page-table entries, since
233 * it's intended for dirty-tracking on the PTE level. It will warn on
234 * encountering transhuge write-enabled entries, though, and can easily be
235 * extended to handle them as well.
237 * Return: The number of ptes actually write-protected. Note that
238 * already write-protected ptes are not counted.
240 unsigned long wp_shared_mapping_range(struct address_space *mapping,
241 pgoff_t first_index, pgoff_t nr)
243 struct wp_walk wpwalk = { .total = 0 };
245 i_mmap_lock_read(mapping);
246 WARN_ON(walk_page_mapping(mapping, first_index, nr, &wp_walk_ops,
248 i_mmap_unlock_read(mapping);
252 EXPORT_SYMBOL_GPL(wp_shared_mapping_range);
255 * clean_record_shared_mapping_range - Clean and record all ptes in an
256 * address space range
257 * @mapping: The address_space we want to clean
258 * @first_index: The first page offset in the range
259 * @nr: Number of incremental page offsets to cover
260 * @bitmap_pgoff: The page offset of the first bit in @bitmap
261 * @bitmap: Pointer to a bitmap of at least @nr bits. The bitmap needs to
262 * cover the whole range @first_index..@first_index + @nr.
263 * @start: Pointer to number of the first set bit in @bitmap.
264 * is modified as new bits are set by the function.
265 * @end: Pointer to the number of the last set bit in @bitmap.
266 * none set. The value is modified as new bits are set by the function.
268 * Note: When this function returns there is no guarantee that a CPU has
269 * not already dirtied new ptes. However it will not clean any ptes not
270 * reported in the bitmap. The guarantees are as follows:
271 * a) All ptes dirty when the function starts executing will end up recorded
273 * b) All ptes dirtied after that will either remain dirty, be recorded in the
276 * If a caller needs to make sure all dirty ptes are picked up and none
277 * additional are added, it first needs to write-protect the address-space
278 * range and make sure new writers are blocked in page_mkwrite() or
279 * pfn_mkwrite(). And then after a TLB flush following the write-protection
280 * pick up all dirty bits.
282 * Note: This function currently skips transhuge page-table entries, since
283 * it's intended for dirty-tracking on the PTE level. It will warn on
284 * encountering transhuge dirty entries, though, and can easily be extended
285 * to handle them as well.
287 * Return: The number of dirty ptes actually cleaned.
289 unsigned long clean_record_shared_mapping_range(struct address_space *mapping,
290 pgoff_t first_index, pgoff_t nr,
291 pgoff_t bitmap_pgoff,
292 unsigned long *bitmap,
296 bool none_set = (*start >= *end);
297 struct clean_walk cwalk = {
298 .base = { .total = 0 },
299 .bitmap_pgoff = bitmap_pgoff,
301 .start = none_set ? nr : *start,
302 .end = none_set ? 0 : *end,
305 i_mmap_lock_read(mapping);
306 WARN_ON(walk_page_mapping(mapping, first_index, nr, &clean_walk_ops,
308 i_mmap_unlock_read(mapping);
310 *start = cwalk.start;
313 return cwalk.base.total;
315 EXPORT_SYMBOL_GPL(clean_record_shared_mapping_range);