1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_SCHED_MM_H
3 #define _LINUX_SCHED_MM_H
5 #include <linux/kernel.h>
6 #include <linux/atomic.h>
7 #include <linux/sched.h>
8 #include <linux/mm_types.h>
10 #include <linux/sync_core.h>
11 #include <linux/ioasid.h>
14 * Routines for handling mm_structs
16 extern struct mm_struct *mm_alloc(void);
19 * mmgrab() - Pin a &struct mm_struct.
20 * @mm: The &struct mm_struct to pin.
22 * Make sure that @mm will not get freed even after the owning task
23 * exits. This doesn't guarantee that the associated address space
24 * will still exist later on and mmget_not_zero() has to be used before
27 * This is a preferred way to pin @mm for a longer/unbounded amount
30 * Use mmdrop() to release the reference acquired by mmgrab().
32 * See also <Documentation/mm/active_mm.rst> for an in-depth explanation
33 * of &mm_struct.mm_count vs &mm_struct.mm_users.
35 static inline void mmgrab(struct mm_struct *mm)
37 atomic_inc(&mm->mm_count);
40 extern void __mmdrop(struct mm_struct *mm);
42 static inline void mmdrop(struct mm_struct *mm)
45 * The implicit full barrier implied by atomic_dec_and_test() is
46 * required by the membarrier system call before returning to
47 * user-space, after storing to rq->curr.
49 if (unlikely(atomic_dec_and_test(&mm->mm_count)))
53 #ifdef CONFIG_PREEMPT_RT
55 * RCU callback for delayed mm drop. Not strictly RCU, but call_rcu() is
56 * by far the least expensive way to do that.
58 static inline void __mmdrop_delayed(struct rcu_head *rhp)
60 struct mm_struct *mm = container_of(rhp, struct mm_struct, delayed_drop);
66 * Invoked from finish_task_switch(). Delegates the heavy lifting on RT
69 static inline void mmdrop_sched(struct mm_struct *mm)
71 /* Provides a full memory barrier. See mmdrop() */
72 if (atomic_dec_and_test(&mm->mm_count))
73 call_rcu(&mm->delayed_drop, __mmdrop_delayed);
76 static inline void mmdrop_sched(struct mm_struct *mm)
82 /* Helpers for lazy TLB mm refcounting */
83 static inline void mmgrab_lazy_tlb(struct mm_struct *mm)
85 if (IS_ENABLED(CONFIG_MMU_LAZY_TLB_REFCOUNT))
89 static inline void mmdrop_lazy_tlb(struct mm_struct *mm)
91 if (IS_ENABLED(CONFIG_MMU_LAZY_TLB_REFCOUNT)) {
95 * mmdrop_lazy_tlb must provide a full memory barrier, see the
96 * membarrier comment finish_task_switch which relies on this.
102 static inline void mmdrop_lazy_tlb_sched(struct mm_struct *mm)
104 if (IS_ENABLED(CONFIG_MMU_LAZY_TLB_REFCOUNT))
107 smp_mb(); /* see mmdrop_lazy_tlb() above */
111 * mmget() - Pin the address space associated with a &struct mm_struct.
112 * @mm: The address space to pin.
114 * Make sure that the address space of the given &struct mm_struct doesn't
115 * go away. This does not protect against parts of the address space being
116 * modified or freed, however.
118 * Never use this function to pin this address space for an
119 * unbounded/indefinite amount of time.
121 * Use mmput() to release the reference acquired by mmget().
123 * See also <Documentation/mm/active_mm.rst> for an in-depth explanation
124 * of &mm_struct.mm_count vs &mm_struct.mm_users.
126 static inline void mmget(struct mm_struct *mm)
128 atomic_inc(&mm->mm_users);
131 static inline bool mmget_not_zero(struct mm_struct *mm)
133 return atomic_inc_not_zero(&mm->mm_users);
136 /* mmput gets rid of the mappings and all user-space */
137 extern void mmput(struct mm_struct *);
139 /* same as above but performs the slow path from the async context. Can
140 * be called from the atomic context as well
142 void mmput_async(struct mm_struct *);
145 /* Grab a reference to a task's mm, if it is not already going away */
146 extern struct mm_struct *get_task_mm(struct task_struct *task);
148 * Grab a reference to a task's mm, if it is not already going away
149 * and ptrace_may_access with the mode parameter passed to it
152 extern struct mm_struct *mm_access(struct task_struct *task, unsigned int mode);
153 /* Remove the current tasks stale references to the old mm_struct on exit() */
154 extern void exit_mm_release(struct task_struct *, struct mm_struct *);
155 /* Remove the current tasks stale references to the old mm_struct on exec() */
156 extern void exec_mm_release(struct task_struct *, struct mm_struct *);
159 extern void mm_update_next_owner(struct mm_struct *mm);
161 static inline void mm_update_next_owner(struct mm_struct *mm)
164 #endif /* CONFIG_MEMCG */
167 #ifndef arch_get_mmap_end
168 #define arch_get_mmap_end(addr, len, flags) (TASK_SIZE)
171 #ifndef arch_get_mmap_base
172 #define arch_get_mmap_base(addr, base) (base)
175 extern void arch_pick_mmap_layout(struct mm_struct *mm,
176 struct rlimit *rlim_stack);
178 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
179 unsigned long, unsigned long);
181 arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
182 unsigned long len, unsigned long pgoff,
183 unsigned long flags);
186 generic_get_unmapped_area(struct file *filp, unsigned long addr,
187 unsigned long len, unsigned long pgoff,
188 unsigned long flags);
190 generic_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
191 unsigned long len, unsigned long pgoff,
192 unsigned long flags);
194 static inline void arch_pick_mmap_layout(struct mm_struct *mm,
195 struct rlimit *rlim_stack) {}
198 static inline bool in_vfork(struct task_struct *tsk)
203 * need RCU to access ->real_parent if CLONE_VM was used along with
206 * We check real_parent->mm == tsk->mm because CLONE_VFORK does not
209 * CLONE_VFORK can be used with CLONE_PARENT/CLONE_THREAD and thus
210 * ->real_parent is not necessarily the task doing vfork(), so in
211 * theory we can't rely on task_lock() if we want to dereference it.
213 * And in this case we can't trust the real_parent->mm == tsk->mm
214 * check, it can be false negative. But we do not care, if init or
215 * another oom-unkillable task does this it should blame itself.
218 ret = tsk->vfork_done &&
219 rcu_dereference(tsk->real_parent)->mm == tsk->mm;
226 * Applies per-task gfp context to the given allocation flags.
227 * PF_MEMALLOC_NOIO implies GFP_NOIO
228 * PF_MEMALLOC_NOFS implies GFP_NOFS
229 * PF_MEMALLOC_PIN implies !GFP_MOVABLE
231 static inline gfp_t current_gfp_context(gfp_t flags)
233 unsigned int pflags = READ_ONCE(current->flags);
235 if (unlikely(pflags & (PF_MEMALLOC_NOIO | PF_MEMALLOC_NOFS | PF_MEMALLOC_PIN))) {
237 * NOIO implies both NOIO and NOFS and it is a weaker context
238 * so always make sure it makes precedence
240 if (pflags & PF_MEMALLOC_NOIO)
241 flags &= ~(__GFP_IO | __GFP_FS);
242 else if (pflags & PF_MEMALLOC_NOFS)
245 if (pflags & PF_MEMALLOC_PIN)
246 flags &= ~__GFP_MOVABLE;
251 #ifdef CONFIG_LOCKDEP
252 extern void __fs_reclaim_acquire(unsigned long ip);
253 extern void __fs_reclaim_release(unsigned long ip);
254 extern void fs_reclaim_acquire(gfp_t gfp_mask);
255 extern void fs_reclaim_release(gfp_t gfp_mask);
257 static inline void __fs_reclaim_acquire(unsigned long ip) { }
258 static inline void __fs_reclaim_release(unsigned long ip) { }
259 static inline void fs_reclaim_acquire(gfp_t gfp_mask) { }
260 static inline void fs_reclaim_release(gfp_t gfp_mask) { }
263 /* Any memory-allocation retry loop should use
264 * memalloc_retry_wait(), and pass the flags for the most
265 * constrained allocation attempt that might have failed.
266 * This provides useful documentation of where loops are,
267 * and a central place to fine tune the waiting as the MM
268 * implementation changes.
270 static inline void memalloc_retry_wait(gfp_t gfp_flags)
272 /* We use io_schedule_timeout because waiting for memory
273 * typically included waiting for dirty pages to be
274 * written out, which requires IO.
276 __set_current_state(TASK_UNINTERRUPTIBLE);
277 gfp_flags = current_gfp_context(gfp_flags);
278 if (gfpflags_allow_blocking(gfp_flags) &&
279 !(gfp_flags & __GFP_NORETRY))
280 /* Probably waited already, no need for much more */
281 io_schedule_timeout(1);
283 /* Probably didn't wait, and has now released a lock,
284 * so now is a good time to wait
286 io_schedule_timeout(HZ/50);
290 * might_alloc - Mark possible allocation sites
291 * @gfp_mask: gfp_t flags that would be used to allocate
293 * Similar to might_sleep() and other annotations, this can be used in functions
294 * that might allocate, but often don't. Compiles to nothing without
295 * CONFIG_LOCKDEP. Includes a conditional might_sleep() if @gfp allows blocking.
297 static inline void might_alloc(gfp_t gfp_mask)
299 fs_reclaim_acquire(gfp_mask);
300 fs_reclaim_release(gfp_mask);
302 might_sleep_if(gfpflags_allow_blocking(gfp_mask));
306 * memalloc_noio_save - Marks implicit GFP_NOIO allocation scope.
308 * This functions marks the beginning of the GFP_NOIO allocation scope.
309 * All further allocations will implicitly drop __GFP_IO flag and so
310 * they are safe for the IO critical section from the allocation recursion
311 * point of view. Use memalloc_noio_restore to end the scope with flags
312 * returned by this function.
314 * This function is safe to be used from any context.
316 static inline unsigned int memalloc_noio_save(void)
318 unsigned int flags = current->flags & PF_MEMALLOC_NOIO;
319 current->flags |= PF_MEMALLOC_NOIO;
324 * memalloc_noio_restore - Ends the implicit GFP_NOIO scope.
325 * @flags: Flags to restore.
327 * Ends the implicit GFP_NOIO scope started by memalloc_noio_save function.
328 * Always make sure that the given flags is the return value from the
329 * pairing memalloc_noio_save call.
331 static inline void memalloc_noio_restore(unsigned int flags)
333 current->flags = (current->flags & ~PF_MEMALLOC_NOIO) | flags;
337 * memalloc_nofs_save - Marks implicit GFP_NOFS allocation scope.
339 * This functions marks the beginning of the GFP_NOFS allocation scope.
340 * All further allocations will implicitly drop __GFP_FS flag and so
341 * they are safe for the FS critical section from the allocation recursion
342 * point of view. Use memalloc_nofs_restore to end the scope with flags
343 * returned by this function.
345 * This function is safe to be used from any context.
347 static inline unsigned int memalloc_nofs_save(void)
349 unsigned int flags = current->flags & PF_MEMALLOC_NOFS;
350 current->flags |= PF_MEMALLOC_NOFS;
355 * memalloc_nofs_restore - Ends the implicit GFP_NOFS scope.
356 * @flags: Flags to restore.
358 * Ends the implicit GFP_NOFS scope started by memalloc_nofs_save function.
359 * Always make sure that the given flags is the return value from the
360 * pairing memalloc_nofs_save call.
362 static inline void memalloc_nofs_restore(unsigned int flags)
364 current->flags = (current->flags & ~PF_MEMALLOC_NOFS) | flags;
367 static inline unsigned int memalloc_noreclaim_save(void)
369 unsigned int flags = current->flags & PF_MEMALLOC;
370 current->flags |= PF_MEMALLOC;
374 static inline void memalloc_noreclaim_restore(unsigned int flags)
376 current->flags = (current->flags & ~PF_MEMALLOC) | flags;
379 static inline unsigned int memalloc_pin_save(void)
381 unsigned int flags = current->flags & PF_MEMALLOC_PIN;
383 current->flags |= PF_MEMALLOC_PIN;
387 static inline void memalloc_pin_restore(unsigned int flags)
389 current->flags = (current->flags & ~PF_MEMALLOC_PIN) | flags;
393 DECLARE_PER_CPU(struct mem_cgroup *, int_active_memcg);
395 * set_active_memcg - Starts the remote memcg charging scope.
396 * @memcg: memcg to charge.
398 * This function marks the beginning of the remote memcg charging scope. All the
399 * __GFP_ACCOUNT allocations till the end of the scope will be charged to the
402 * NOTE: This function can nest. Users must save the return value and
403 * reset the previous value after their own charging scope is over.
405 static inline struct mem_cgroup *
406 set_active_memcg(struct mem_cgroup *memcg)
408 struct mem_cgroup *old;
411 old = this_cpu_read(int_active_memcg);
412 this_cpu_write(int_active_memcg, memcg);
414 old = current->active_memcg;
415 current->active_memcg = memcg;
421 static inline struct mem_cgroup *
422 set_active_memcg(struct mem_cgroup *memcg)
428 #ifdef CONFIG_MEMBARRIER
430 MEMBARRIER_STATE_PRIVATE_EXPEDITED_READY = (1U << 0),
431 MEMBARRIER_STATE_PRIVATE_EXPEDITED = (1U << 1),
432 MEMBARRIER_STATE_GLOBAL_EXPEDITED_READY = (1U << 2),
433 MEMBARRIER_STATE_GLOBAL_EXPEDITED = (1U << 3),
434 MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE_READY = (1U << 4),
435 MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE = (1U << 5),
436 MEMBARRIER_STATE_PRIVATE_EXPEDITED_RSEQ_READY = (1U << 6),
437 MEMBARRIER_STATE_PRIVATE_EXPEDITED_RSEQ = (1U << 7),
441 MEMBARRIER_FLAG_SYNC_CORE = (1U << 0),
442 MEMBARRIER_FLAG_RSEQ = (1U << 1),
445 #ifdef CONFIG_ARCH_HAS_MEMBARRIER_CALLBACKS
446 #include <asm/membarrier.h>
449 static inline void membarrier_mm_sync_core_before_usermode(struct mm_struct *mm)
451 if (current->mm != mm)
453 if (likely(!(atomic_read(&mm->membarrier_state) &
454 MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE)))
456 sync_core_before_usermode();
459 extern void membarrier_exec_mmap(struct mm_struct *mm);
461 extern void membarrier_update_current_mm(struct mm_struct *next_mm);
464 #ifdef CONFIG_ARCH_HAS_MEMBARRIER_CALLBACKS
465 static inline void membarrier_arch_switch_mm(struct mm_struct *prev,
466 struct mm_struct *next,
467 struct task_struct *tsk)
471 static inline void membarrier_exec_mmap(struct mm_struct *mm)
474 static inline void membarrier_mm_sync_core_before_usermode(struct mm_struct *mm)
477 static inline void membarrier_update_current_mm(struct mm_struct *next_mm)
482 #ifdef CONFIG_IOMMU_SVA
483 static inline void mm_pasid_init(struct mm_struct *mm)
485 mm->pasid = INVALID_IOASID;
488 static inline bool mm_valid_pasid(struct mm_struct *mm)
490 return mm->pasid != INVALID_IOASID;
493 /* Associate a PASID with an mm_struct: */
494 static inline void mm_pasid_set(struct mm_struct *mm, u32 pasid)
499 static inline void mm_pasid_drop(struct mm_struct *mm)
501 if (mm_valid_pasid(mm)) {
502 ioasid_free(mm->pasid);
503 mm->pasid = INVALID_IOASID;
507 static inline void mm_pasid_init(struct mm_struct *mm) {}
508 static inline bool mm_valid_pasid(struct mm_struct *mm) { return false; }
509 static inline void mm_pasid_set(struct mm_struct *mm, u32 pasid) {}
510 static inline void mm_pasid_drop(struct mm_struct *mm) {}
513 #endif /* _LINUX_SCHED_MM_H */