1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_MMU_NOTIFIER_H
3 #define _LINUX_MMU_NOTIFIER_H
5 #include <linux/list.h>
6 #include <linux/spinlock.h>
7 #include <linux/mm_types.h>
8 #include <linux/mmap_lock.h>
9 #include <linux/srcu.h>
10 #include <linux/interval_tree.h>
12 struct mmu_notifier_subscriptions;
14 struct mmu_notifier_range;
15 struct mmu_interval_notifier;
18 * enum mmu_notifier_event - reason for the mmu notifier callback
19 * @MMU_NOTIFY_UNMAP: either munmap() that unmap the range or a mremap() that
22 * @MMU_NOTIFY_CLEAR: clear page table entry (many reasons for this like
23 * madvise() or replacing a page by another one, ...).
25 * @MMU_NOTIFY_PROTECTION_VMA: update is due to protection change for the range
26 * ie using the vma access permission (vm_page_prot) to update the whole range
27 * is enough no need to inspect changes to the CPU page table (mprotect()
30 * @MMU_NOTIFY_PROTECTION_PAGE: update is due to change in read/write flag for
31 * pages in the range so to mirror those changes the user must inspect the CPU
32 * page table (from the end callback).
34 * @MMU_NOTIFY_SOFT_DIRTY: soft dirty accounting (still same page and same
35 * access flags). User should soft dirty the page in the end callback to make
36 * sure that anyone relying on soft dirtiness catch pages that might be written
37 * through non CPU mappings.
39 * @MMU_NOTIFY_RELEASE: used during mmu_interval_notifier invalidate to signal
40 * that the mm refcount is zero and the range is no longer accessible.
42 * @MMU_NOTIFY_MIGRATE: used during migrate_vma_collect() invalidate to signal
43 * a device driver to possibly ignore the invalidation if the
44 * owner field matches the driver's device private pgmap owner.
46 * @MMU_NOTIFY_EXCLUSIVE: to signal a device driver that the device will no
47 * longer have exclusive access to the page. When sent during creation of an
48 * exclusive range the owner will be initialised to the value provided by the
49 * caller of make_device_exclusive_range(), otherwise the owner will be NULL.
51 enum mmu_notifier_event {
54 MMU_NOTIFY_PROTECTION_VMA,
55 MMU_NOTIFY_PROTECTION_PAGE,
56 MMU_NOTIFY_SOFT_DIRTY,
62 #define MMU_NOTIFIER_RANGE_BLOCKABLE (1 << 0)
64 struct mmu_notifier_ops {
66 * Called either by mmu_notifier_unregister or when the mm is
67 * being destroyed by exit_mmap, always before all pages are
68 * freed. This can run concurrently with other mmu notifier
69 * methods (the ones invoked outside the mm context) and it
70 * should tear down all secondary mmu mappings and freeze the
71 * secondary mmu. If this method isn't implemented you've to
72 * be sure that nothing could possibly write to the pages
73 * through the secondary mmu by the time the last thread with
74 * tsk->mm == mm exits.
76 * As side note: the pages freed after ->release returns could
77 * be immediately reallocated by the gart at an alias physical
78 * address with a different cache model, so if ->release isn't
79 * implemented because all _software_ driven memory accesses
80 * through the secondary mmu are terminated by the time the
81 * last thread of this mm quits, you've also to be sure that
82 * speculative _hardware_ operations can't allocate dirty
83 * cachelines in the cpu that could not be snooped and made
84 * coherent with the other read and write operations happening
85 * through the gart alias address, so leading to memory
88 void (*release)(struct mmu_notifier *subscription,
89 struct mm_struct *mm);
92 * clear_flush_young is called after the VM is
93 * test-and-clearing the young/accessed bitflag in the
94 * pte. This way the VM will provide proper aging to the
95 * accesses to the page through the secondary MMUs and not
96 * only to the ones through the Linux pte.
97 * Start-end is necessary in case the secondary MMU is mapping the page
98 * at a smaller granularity than the primary MMU.
100 int (*clear_flush_young)(struct mmu_notifier *subscription,
101 struct mm_struct *mm,
106 * clear_young is a lightweight version of clear_flush_young. Like the
107 * latter, it is supposed to test-and-clear the young/accessed bitflag
108 * in the secondary pte, but it may omit flushing the secondary tlb.
110 int (*clear_young)(struct mmu_notifier *subscription,
111 struct mm_struct *mm,
116 * test_young is called to check the young/accessed bitflag in
117 * the secondary pte. This is used to know if the page is
118 * frequently used without actually clearing the flag or tearing
119 * down the secondary mapping on the page.
121 int (*test_young)(struct mmu_notifier *subscription,
122 struct mm_struct *mm,
123 unsigned long address);
126 * change_pte is called in cases that pte mapping to page is changed:
127 * for example, when ksm remaps pte to point to a new shared page.
129 void (*change_pte)(struct mmu_notifier *subscription,
130 struct mm_struct *mm,
131 unsigned long address,
135 * invalidate_range_start() and invalidate_range_end() must be
136 * paired and are called only when the mmap_lock and/or the
137 * locks protecting the reverse maps are held. If the subsystem
138 * can't guarantee that no additional references are taken to
139 * the pages in the range, it has to implement the
140 * invalidate_range() notifier to remove any references taken
141 * after invalidate_range_start().
143 * Invalidation of multiple concurrent ranges may be
144 * optionally permitted by the driver. Either way the
145 * establishment of sptes is forbidden in the range passed to
146 * invalidate_range_begin/end for the whole duration of the
147 * invalidate_range_begin/end critical section.
149 * invalidate_range_start() is called when all pages in the
150 * range are still mapped and have at least a refcount of one.
152 * invalidate_range_end() is called when all pages in the
153 * range have been unmapped and the pages have been freed by
156 * The VM will remove the page table entries and potentially
157 * the page between invalidate_range_start() and
158 * invalidate_range_end(). If the page must not be freed
159 * because of pending I/O or other circumstances then the
160 * invalidate_range_start() callback (or the initial mapping
161 * by the driver) must make sure that the refcount is kept
164 * If the driver increases the refcount when the pages are
165 * initially mapped into an address space then either
166 * invalidate_range_start() or invalidate_range_end() may
167 * decrease the refcount. If the refcount is decreased on
168 * invalidate_range_start() then the VM can free pages as page
169 * table entries are removed. If the refcount is only
170 * dropped on invalidate_range_end() then the driver itself
171 * will drop the last refcount but it must take care to flush
172 * any secondary tlb before doing the final free on the
173 * page. Pages will no longer be referenced by the linux
174 * address space but may still be referenced by sptes until
175 * the last refcount is dropped.
177 * If blockable argument is set to false then the callback cannot
178 * sleep and has to return with -EAGAIN if sleeping would be required.
179 * 0 should be returned otherwise. Please note that notifiers that can
180 * fail invalidate_range_start are not allowed to implement
181 * invalidate_range_end, as there is no mechanism for informing the
182 * notifier that its start failed.
184 int (*invalidate_range_start)(struct mmu_notifier *subscription,
185 const struct mmu_notifier_range *range);
186 void (*invalidate_range_end)(struct mmu_notifier *subscription,
187 const struct mmu_notifier_range *range);
190 * arch_invalidate_secondary_tlbs() is used to manage a non-CPU TLB
191 * which shares page-tables with the CPU. The
192 * invalidate_range_start()/end() callbacks should not be implemented as
193 * invalidate_secondary_tlbs() already catches the points in time when
194 * an external TLB needs to be flushed.
196 * This requires arch_invalidate_secondary_tlbs() to be called while
197 * holding the ptl spin-lock and therefore this callback is not allowed
200 * This is called by architecture code whenever invalidating a TLB
201 * entry. It is assumed that any secondary TLB has the same rules for
202 * when invalidations are required. If this is not the case architecture
203 * code will need to call this explicitly when required for secondary
206 void (*arch_invalidate_secondary_tlbs)(
207 struct mmu_notifier *subscription,
208 struct mm_struct *mm,
213 * These callbacks are used with the get/put interface to manage the
214 * lifetime of the mmu_notifier memory. alloc_notifier() returns a new
215 * notifier for use with the mm.
217 * free_notifier() is only called after the mmu_notifier has been
218 * fully put, calls to any ops callback are prevented and no ops
219 * callbacks are currently running. It is called from a SRCU callback
222 struct mmu_notifier *(*alloc_notifier)(struct mm_struct *mm);
223 void (*free_notifier)(struct mmu_notifier *subscription);
227 * The notifier chains are protected by mmap_lock and/or the reverse map
228 * semaphores. Notifier chains are only changed when all reverse maps and
229 * the mmap_lock locks are taken.
231 * Therefore notifier chains can only be traversed when either
233 * 1. mmap_lock is held.
234 * 2. One of the reverse map locks is held (i_mmap_rwsem or anon_vma->rwsem).
235 * 3. No other concurrent thread can access the list (release)
237 struct mmu_notifier {
238 struct hlist_node hlist;
239 const struct mmu_notifier_ops *ops;
240 struct mm_struct *mm;
246 * struct mmu_interval_notifier_ops
247 * @invalidate: Upon return the caller must stop using any SPTEs within this
248 * range. This function can sleep. Return false only if sleeping
249 * was required but mmu_notifier_range_blockable(range) is false.
251 struct mmu_interval_notifier_ops {
252 bool (*invalidate)(struct mmu_interval_notifier *interval_sub,
253 const struct mmu_notifier_range *range,
254 unsigned long cur_seq);
257 struct mmu_interval_notifier {
258 struct interval_tree_node interval_tree;
259 const struct mmu_interval_notifier_ops *ops;
260 struct mm_struct *mm;
261 struct hlist_node deferred_item;
262 unsigned long invalidate_seq;
265 #ifdef CONFIG_MMU_NOTIFIER
267 #ifdef CONFIG_LOCKDEP
268 extern struct lockdep_map __mmu_notifier_invalidate_range_start_map;
271 struct mmu_notifier_range {
272 struct mm_struct *mm;
276 enum mmu_notifier_event event;
280 static inline int mm_has_notifiers(struct mm_struct *mm)
282 return unlikely(mm->notifier_subscriptions);
285 struct mmu_notifier *mmu_notifier_get_locked(const struct mmu_notifier_ops *ops,
286 struct mm_struct *mm);
287 static inline struct mmu_notifier *
288 mmu_notifier_get(const struct mmu_notifier_ops *ops, struct mm_struct *mm)
290 struct mmu_notifier *ret;
293 ret = mmu_notifier_get_locked(ops, mm);
294 mmap_write_unlock(mm);
297 void mmu_notifier_put(struct mmu_notifier *subscription);
298 void mmu_notifier_synchronize(void);
300 extern int mmu_notifier_register(struct mmu_notifier *subscription,
301 struct mm_struct *mm);
302 extern int __mmu_notifier_register(struct mmu_notifier *subscription,
303 struct mm_struct *mm);
304 extern void mmu_notifier_unregister(struct mmu_notifier *subscription,
305 struct mm_struct *mm);
308 mmu_interval_read_begin(struct mmu_interval_notifier *interval_sub);
309 int mmu_interval_notifier_insert(struct mmu_interval_notifier *interval_sub,
310 struct mm_struct *mm, unsigned long start,
311 unsigned long length,
312 const struct mmu_interval_notifier_ops *ops);
313 int mmu_interval_notifier_insert_locked(
314 struct mmu_interval_notifier *interval_sub, struct mm_struct *mm,
315 unsigned long start, unsigned long length,
316 const struct mmu_interval_notifier_ops *ops);
317 void mmu_interval_notifier_remove(struct mmu_interval_notifier *interval_sub);
320 * mmu_interval_set_seq - Save the invalidation sequence
321 * @interval_sub - The subscription passed to invalidate
322 * @cur_seq - The cur_seq passed to the invalidate() callback
324 * This must be called unconditionally from the invalidate callback of a
325 * struct mmu_interval_notifier_ops under the same lock that is used to call
326 * mmu_interval_read_retry(). It updates the sequence number for later use by
327 * mmu_interval_read_retry(). The provided cur_seq will always be odd.
329 * If the caller does not call mmu_interval_read_begin() or
330 * mmu_interval_read_retry() then this call is not required.
333 mmu_interval_set_seq(struct mmu_interval_notifier *interval_sub,
334 unsigned long cur_seq)
336 WRITE_ONCE(interval_sub->invalidate_seq, cur_seq);
340 * mmu_interval_read_retry - End a read side critical section against a VA range
341 * interval_sub: The subscription
342 * seq: The return of the paired mmu_interval_read_begin()
344 * This MUST be called under a user provided lock that is also held
345 * unconditionally by op->invalidate() when it calls mmu_interval_set_seq().
347 * Each call should be paired with a single mmu_interval_read_begin() and
348 * should be used to conclude the read side.
350 * Returns true if an invalidation collided with this critical section, and
351 * the caller should retry.
354 mmu_interval_read_retry(struct mmu_interval_notifier *interval_sub,
357 return interval_sub->invalidate_seq != seq;
361 * mmu_interval_check_retry - Test if a collision has occurred
362 * interval_sub: The subscription
363 * seq: The return of the matching mmu_interval_read_begin()
365 * This can be used in the critical section between mmu_interval_read_begin()
366 * and mmu_interval_read_retry(). A return of true indicates an invalidation
367 * has collided with this critical region and a future
368 * mmu_interval_read_retry() will return true.
370 * False is not reliable and only suggests a collision may not have
371 * occurred. It can be called many times and does not have to hold the user
374 * This call can be used as part of loops and other expensive operations to
378 mmu_interval_check_retry(struct mmu_interval_notifier *interval_sub,
381 /* Pairs with the WRITE_ONCE in mmu_interval_set_seq() */
382 return READ_ONCE(interval_sub->invalidate_seq) != seq;
385 extern void __mmu_notifier_subscriptions_destroy(struct mm_struct *mm);
386 extern void __mmu_notifier_release(struct mm_struct *mm);
387 extern int __mmu_notifier_clear_flush_young(struct mm_struct *mm,
390 extern int __mmu_notifier_clear_young(struct mm_struct *mm,
393 extern int __mmu_notifier_test_young(struct mm_struct *mm,
394 unsigned long address);
395 extern void __mmu_notifier_change_pte(struct mm_struct *mm,
396 unsigned long address, pte_t pte);
397 extern int __mmu_notifier_invalidate_range_start(struct mmu_notifier_range *r);
398 extern void __mmu_notifier_invalidate_range_end(struct mmu_notifier_range *r);
399 extern void __mmu_notifier_arch_invalidate_secondary_tlbs(struct mm_struct *mm,
400 unsigned long start, unsigned long end);
402 mmu_notifier_range_update_to_read_only(const struct mmu_notifier_range *range);
405 mmu_notifier_range_blockable(const struct mmu_notifier_range *range)
407 return (range->flags & MMU_NOTIFIER_RANGE_BLOCKABLE);
410 static inline void mmu_notifier_release(struct mm_struct *mm)
412 if (mm_has_notifiers(mm))
413 __mmu_notifier_release(mm);
416 static inline int mmu_notifier_clear_flush_young(struct mm_struct *mm,
420 if (mm_has_notifiers(mm))
421 return __mmu_notifier_clear_flush_young(mm, start, end);
425 static inline int mmu_notifier_clear_young(struct mm_struct *mm,
429 if (mm_has_notifiers(mm))
430 return __mmu_notifier_clear_young(mm, start, end);
434 static inline int mmu_notifier_test_young(struct mm_struct *mm,
435 unsigned long address)
437 if (mm_has_notifiers(mm))
438 return __mmu_notifier_test_young(mm, address);
442 static inline void mmu_notifier_change_pte(struct mm_struct *mm,
443 unsigned long address, pte_t pte)
445 if (mm_has_notifiers(mm))
446 __mmu_notifier_change_pte(mm, address, pte);
450 mmu_notifier_invalidate_range_start(struct mmu_notifier_range *range)
454 lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
455 if (mm_has_notifiers(range->mm)) {
456 range->flags |= MMU_NOTIFIER_RANGE_BLOCKABLE;
457 __mmu_notifier_invalidate_range_start(range);
459 lock_map_release(&__mmu_notifier_invalidate_range_start_map);
463 mmu_notifier_invalidate_range_start_nonblock(struct mmu_notifier_range *range)
467 lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
468 if (mm_has_notifiers(range->mm)) {
469 range->flags &= ~MMU_NOTIFIER_RANGE_BLOCKABLE;
470 ret = __mmu_notifier_invalidate_range_start(range);
472 lock_map_release(&__mmu_notifier_invalidate_range_start_map);
477 mmu_notifier_invalidate_range_end(struct mmu_notifier_range *range)
479 if (mmu_notifier_range_blockable(range))
482 if (mm_has_notifiers(range->mm))
483 __mmu_notifier_invalidate_range_end(range);
486 static inline void mmu_notifier_arch_invalidate_secondary_tlbs(struct mm_struct *mm,
487 unsigned long start, unsigned long end)
489 if (mm_has_notifiers(mm))
490 __mmu_notifier_arch_invalidate_secondary_tlbs(mm, start, end);
493 static inline void mmu_notifier_subscriptions_init(struct mm_struct *mm)
495 mm->notifier_subscriptions = NULL;
498 static inline void mmu_notifier_subscriptions_destroy(struct mm_struct *mm)
500 if (mm_has_notifiers(mm))
501 __mmu_notifier_subscriptions_destroy(mm);
505 static inline void mmu_notifier_range_init(struct mmu_notifier_range *range,
506 enum mmu_notifier_event event,
508 struct mm_struct *mm,
512 range->event = event;
514 range->start = start;
516 range->flags = flags;
519 static inline void mmu_notifier_range_init_owner(
520 struct mmu_notifier_range *range,
521 enum mmu_notifier_event event, unsigned int flags,
522 struct mm_struct *mm, unsigned long start,
523 unsigned long end, void *owner)
525 mmu_notifier_range_init(range, event, flags, mm, start, end);
526 range->owner = owner;
529 #define ptep_clear_flush_young_notify(__vma, __address, __ptep) \
532 struct vm_area_struct *___vma = __vma; \
533 unsigned long ___address = __address; \
534 __young = ptep_clear_flush_young(___vma, ___address, __ptep); \
535 __young |= mmu_notifier_clear_flush_young(___vma->vm_mm, \
542 #define pmdp_clear_flush_young_notify(__vma, __address, __pmdp) \
545 struct vm_area_struct *___vma = __vma; \
546 unsigned long ___address = __address; \
547 __young = pmdp_clear_flush_young(___vma, ___address, __pmdp); \
548 __young |= mmu_notifier_clear_flush_young(___vma->vm_mm, \
555 #define ptep_clear_young_notify(__vma, __address, __ptep) \
558 struct vm_area_struct *___vma = __vma; \
559 unsigned long ___address = __address; \
560 __young = ptep_test_and_clear_young(___vma, ___address, __ptep);\
561 __young |= mmu_notifier_clear_young(___vma->vm_mm, ___address, \
562 ___address + PAGE_SIZE); \
566 #define pmdp_clear_young_notify(__vma, __address, __pmdp) \
569 struct vm_area_struct *___vma = __vma; \
570 unsigned long ___address = __address; \
571 __young = pmdp_test_and_clear_young(___vma, ___address, __pmdp);\
572 __young |= mmu_notifier_clear_young(___vma->vm_mm, ___address, \
573 ___address + PMD_SIZE); \
578 * set_pte_at_notify() sets the pte _after_ running the notifier.
579 * This is safe to start by updating the secondary MMUs, because the primary MMU
580 * pte invalidate must have already happened with a ptep_clear_flush() before
581 * set_pte_at_notify() has been invoked. Updating the secondary MMUs first is
582 * required when we change both the protection of the mapping from read-only to
583 * read-write and the pfn (like during copy on write page faults). Otherwise the
584 * old page would remain mapped readonly in the secondary MMUs after the new
585 * page is already writable by some CPU through the primary MMU.
587 #define set_pte_at_notify(__mm, __address, __ptep, __pte) \
589 struct mm_struct *___mm = __mm; \
590 unsigned long ___address = __address; \
591 pte_t ___pte = __pte; \
593 mmu_notifier_change_pte(___mm, ___address, ___pte); \
594 set_pte_at(___mm, ___address, __ptep, ___pte); \
597 #else /* CONFIG_MMU_NOTIFIER */
599 struct mmu_notifier_range {
604 static inline void _mmu_notifier_range_init(struct mmu_notifier_range *range,
608 range->start = start;
612 #define mmu_notifier_range_init(range,event,flags,mm,start,end) \
613 _mmu_notifier_range_init(range, start, end)
614 #define mmu_notifier_range_init_owner(range, event, flags, mm, start, \
616 _mmu_notifier_range_init(range, start, end)
619 mmu_notifier_range_blockable(const struct mmu_notifier_range *range)
624 static inline int mm_has_notifiers(struct mm_struct *mm)
629 static inline void mmu_notifier_release(struct mm_struct *mm)
633 static inline int mmu_notifier_clear_flush_young(struct mm_struct *mm,
640 static inline int mmu_notifier_test_young(struct mm_struct *mm,
641 unsigned long address)
646 static inline void mmu_notifier_change_pte(struct mm_struct *mm,
647 unsigned long address, pte_t pte)
652 mmu_notifier_invalidate_range_start(struct mmu_notifier_range *range)
657 mmu_notifier_invalidate_range_start_nonblock(struct mmu_notifier_range *range)
663 void mmu_notifier_invalidate_range_end(struct mmu_notifier_range *range)
667 static inline void mmu_notifier_arch_invalidate_secondary_tlbs(struct mm_struct *mm,
668 unsigned long start, unsigned long end)
672 static inline void mmu_notifier_subscriptions_init(struct mm_struct *mm)
676 static inline void mmu_notifier_subscriptions_destroy(struct mm_struct *mm)
680 #define mmu_notifier_range_update_to_read_only(r) false
682 #define ptep_clear_flush_young_notify ptep_clear_flush_young
683 #define pmdp_clear_flush_young_notify pmdp_clear_flush_young
684 #define ptep_clear_young_notify ptep_test_and_clear_young
685 #define pmdp_clear_young_notify pmdp_test_and_clear_young
686 #define ptep_clear_flush_notify ptep_clear_flush
687 #define pmdp_huge_clear_flush_notify pmdp_huge_clear_flush
688 #define pudp_huge_clear_flush_notify pudp_huge_clear_flush
689 #define set_pte_at_notify set_pte_at
691 static inline void mmu_notifier_synchronize(void)
695 #endif /* CONFIG_MMU_NOTIFIER */
697 #endif /* _LINUX_MMU_NOTIFIER_H */