1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_SCHED_TASK_H
3 #define _LINUX_SCHED_TASK_H
6 * Interface between the scheduler and various task lifetime (fork()/exit())
10 #include <linux/sched.h>
11 #include <linux/uaccess.h>
18 /* All the bits taken by the old clone syscall. */
19 #define CLONE_LEGACY_FLAGS 0xffffffffULL
21 struct kernel_clone_args {
24 int __user *child_tid;
25 int __user *parent_tid;
28 unsigned long stack_size;
31 /* Number of elements in *set_tid */
44 * This serializes "schedule()" and also protects
45 * the run-queue from deletions/modifications (but
46 * _adding_ to the beginning of the run-queue has
49 extern rwlock_t tasklist_lock;
50 extern spinlock_t mmlist_lock;
52 extern union thread_union init_thread_union;
53 extern struct task_struct init_task;
55 extern int lockdep_tasklist_lock_is_held(void);
57 extern asmlinkage void schedule_tail(struct task_struct *prev);
58 extern void init_idle(struct task_struct *idle, int cpu);
60 extern int sched_fork(unsigned long clone_flags, struct task_struct *p);
61 extern void sched_cgroup_fork(struct task_struct *p, struct kernel_clone_args *kargs);
62 extern void sched_post_fork(struct task_struct *p);
63 extern void sched_dead(struct task_struct *p);
65 void __noreturn do_task_dead(void);
66 void __noreturn make_task_dead(int signr);
68 extern void proc_caches_init(void);
70 extern void fork_init(void);
72 extern void release_task(struct task_struct * p);
74 extern int copy_thread(struct task_struct *, const struct kernel_clone_args *);
76 extern void flush_thread(void);
78 #ifdef CONFIG_HAVE_EXIT_THREAD
79 extern void exit_thread(struct task_struct *tsk);
81 static inline void exit_thread(struct task_struct *tsk)
85 extern __noreturn void do_group_exit(int);
87 extern void exit_files(struct task_struct *);
88 extern void exit_itimers(struct task_struct *);
90 extern pid_t kernel_clone(struct kernel_clone_args *kargs);
91 struct task_struct *create_io_thread(int (*fn)(void *), void *arg, int node);
92 struct task_struct *fork_idle(int);
93 struct mm_struct *copy_init_mm(void);
94 extern pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
95 extern pid_t user_mode_thread(int (*fn)(void *), void *arg, unsigned long flags);
96 extern long kernel_wait4(pid_t, int __user *, int, struct rusage *);
97 int kernel_wait(pid_t pid, int *stat);
99 extern void free_task(struct task_struct *tsk);
101 /* sched_exec is called by processes performing an exec */
103 extern void sched_exec(void);
105 #define sched_exec() {}
108 static inline struct task_struct *get_task_struct(struct task_struct *t)
110 refcount_inc(&t->usage);
114 extern void __put_task_struct(struct task_struct *t);
116 static inline void put_task_struct(struct task_struct *t)
118 if (refcount_dec_and_test(&t->usage))
119 __put_task_struct(t);
122 static inline void put_task_struct_many(struct task_struct *t, int nr)
124 if (refcount_sub_and_test(nr, &t->usage))
125 __put_task_struct(t);
128 void put_task_struct_rcu_user(struct task_struct *task);
130 /* Free all architecture-specific resources held by a thread. */
131 void release_thread(struct task_struct *dead_task);
133 #ifdef CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT
134 extern int arch_task_struct_size __read_mostly;
136 # define arch_task_struct_size (sizeof(struct task_struct))
139 #ifndef CONFIG_HAVE_ARCH_THREAD_STRUCT_WHITELIST
141 * If an architecture has not declared a thread_struct whitelist we
142 * must assume something there may need to be copied to userspace.
144 static inline void arch_thread_struct_whitelist(unsigned long *offset,
148 /* Handle dynamically sized thread_struct. */
149 *size = arch_task_struct_size - offsetof(struct task_struct, thread);
153 #ifdef CONFIG_VMAP_STACK
154 static inline struct vm_struct *task_stack_vm_area(const struct task_struct *t)
156 return t->stack_vm_area;
159 static inline struct vm_struct *task_stack_vm_area(const struct task_struct *t)
166 * Protects ->fs, ->files, ->mm, ->group_info, ->comm, keyring
167 * subscriptions and synchronises with wait4(). Also used in procfs. Also
168 * pins the final release of task.io_context. Also protects ->cpuset and
169 * ->cgroup.subsys[]. And ->vfork_done. And ->sysvshm.shm_clist.
171 * Nests both inside and outside of read_lock(&tasklist_lock).
172 * It must not be nested with write_lock_irq(&tasklist_lock),
173 * neither inside nor outside.
175 static inline void task_lock(struct task_struct *p)
177 spin_lock(&p->alloc_lock);
180 static inline void task_unlock(struct task_struct *p)
182 spin_unlock(&p->alloc_lock);
185 #endif /* _LINUX_SCHED_TASK_H */