1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* thread_info.h: common low-level thread information accessors
4 * Copyright (C) 2002 David Howells (dhowells@redhat.com)
5 * - Incorporating suggestions made by Linus Torvalds
8 #ifndef _LINUX_THREAD_INFO_H
9 #define _LINUX_THREAD_INFO_H
11 #include <linux/types.h>
12 #include <linux/limits.h>
13 #include <linux/bug.h>
14 #include <linux/restart_block.h>
15 #include <linux/errno.h>
17 #ifdef CONFIG_THREAD_INFO_IN_TASK
19 * For CONFIG_THREAD_INFO_IN_TASK kernels we need <asm/current.h> for the
20 * definition of current, but for !CONFIG_THREAD_INFO_IN_TASK kernels,
21 * including <asm/current.h> can cause a circular dependency on some platforms.
23 #include <asm/current.h>
24 #define current_thread_info() ((struct thread_info *)current)
27 #include <linux/bitops.h>
30 * For per-arch arch_within_stack_frames() implementations, defined in
40 #ifdef CONFIG_GENERIC_ENTRY
41 enum syscall_work_bit {
42 SYSCALL_WORK_BIT_SECCOMP,
43 SYSCALL_WORK_BIT_SYSCALL_TRACEPOINT,
44 SYSCALL_WORK_BIT_SYSCALL_TRACE,
45 SYSCALL_WORK_BIT_SYSCALL_EMU,
46 SYSCALL_WORK_BIT_SYSCALL_AUDIT,
47 SYSCALL_WORK_BIT_SYSCALL_USER_DISPATCH,
48 SYSCALL_WORK_BIT_SYSCALL_EXIT_TRAP,
51 #define SYSCALL_WORK_SECCOMP BIT(SYSCALL_WORK_BIT_SECCOMP)
52 #define SYSCALL_WORK_SYSCALL_TRACEPOINT BIT(SYSCALL_WORK_BIT_SYSCALL_TRACEPOINT)
53 #define SYSCALL_WORK_SYSCALL_TRACE BIT(SYSCALL_WORK_BIT_SYSCALL_TRACE)
54 #define SYSCALL_WORK_SYSCALL_EMU BIT(SYSCALL_WORK_BIT_SYSCALL_EMU)
55 #define SYSCALL_WORK_SYSCALL_AUDIT BIT(SYSCALL_WORK_BIT_SYSCALL_AUDIT)
56 #define SYSCALL_WORK_SYSCALL_USER_DISPATCH BIT(SYSCALL_WORK_BIT_SYSCALL_USER_DISPATCH)
57 #define SYSCALL_WORK_SYSCALL_EXIT_TRAP BIT(SYSCALL_WORK_BIT_SYSCALL_EXIT_TRAP)
60 #include <asm/thread_info.h>
64 #ifndef arch_set_restart_data
65 #define arch_set_restart_data(restart) do { } while (0)
68 static inline long set_restart_fn(struct restart_block *restart,
69 long (*fn)(struct restart_block *))
72 arch_set_restart_data(restart);
73 return -ERESTART_RESTARTBLOCK;
77 #define THREAD_ALIGN THREAD_SIZE
80 #define THREADINFO_GFP (GFP_KERNEL_ACCOUNT | __GFP_ZERO)
83 * flag set/clear/test wrappers
84 * - pass TIF_xxxx constants to these functions
87 static inline void set_ti_thread_flag(struct thread_info *ti, int flag)
89 set_bit(flag, (unsigned long *)&ti->flags);
92 static inline void clear_ti_thread_flag(struct thread_info *ti, int flag)
94 clear_bit(flag, (unsigned long *)&ti->flags);
97 static inline void update_ti_thread_flag(struct thread_info *ti, int flag,
101 set_ti_thread_flag(ti, flag);
103 clear_ti_thread_flag(ti, flag);
106 static inline int test_and_set_ti_thread_flag(struct thread_info *ti, int flag)
108 return test_and_set_bit(flag, (unsigned long *)&ti->flags);
111 static inline int test_and_clear_ti_thread_flag(struct thread_info *ti, int flag)
113 return test_and_clear_bit(flag, (unsigned long *)&ti->flags);
116 static inline int test_ti_thread_flag(struct thread_info *ti, int flag)
118 return test_bit(flag, (unsigned long *)&ti->flags);
122 * This may be used in noinstr code, and needs to be __always_inline to prevent
123 * inadvertent instrumentation.
125 static __always_inline unsigned long read_ti_thread_flags(struct thread_info *ti)
127 return READ_ONCE(ti->flags);
130 #define set_thread_flag(flag) \
131 set_ti_thread_flag(current_thread_info(), flag)
132 #define clear_thread_flag(flag) \
133 clear_ti_thread_flag(current_thread_info(), flag)
134 #define update_thread_flag(flag, value) \
135 update_ti_thread_flag(current_thread_info(), flag, value)
136 #define test_and_set_thread_flag(flag) \
137 test_and_set_ti_thread_flag(current_thread_info(), flag)
138 #define test_and_clear_thread_flag(flag) \
139 test_and_clear_ti_thread_flag(current_thread_info(), flag)
140 #define test_thread_flag(flag) \
141 test_ti_thread_flag(current_thread_info(), flag)
142 #define read_thread_flags() \
143 read_ti_thread_flags(current_thread_info())
145 #define read_task_thread_flags(t) \
146 read_ti_thread_flags(task_thread_info(t))
148 #ifdef CONFIG_GENERIC_ENTRY
149 #define set_syscall_work(fl) \
150 set_bit(SYSCALL_WORK_BIT_##fl, ¤t_thread_info()->syscall_work)
151 #define test_syscall_work(fl) \
152 test_bit(SYSCALL_WORK_BIT_##fl, ¤t_thread_info()->syscall_work)
153 #define clear_syscall_work(fl) \
154 clear_bit(SYSCALL_WORK_BIT_##fl, ¤t_thread_info()->syscall_work)
156 #define set_task_syscall_work(t, fl) \
157 set_bit(SYSCALL_WORK_BIT_##fl, &task_thread_info(t)->syscall_work)
158 #define test_task_syscall_work(t, fl) \
159 test_bit(SYSCALL_WORK_BIT_##fl, &task_thread_info(t)->syscall_work)
160 #define clear_task_syscall_work(t, fl) \
161 clear_bit(SYSCALL_WORK_BIT_##fl, &task_thread_info(t)->syscall_work)
163 #else /* CONFIG_GENERIC_ENTRY */
165 #define set_syscall_work(fl) \
166 set_ti_thread_flag(current_thread_info(), TIF_##fl)
167 #define test_syscall_work(fl) \
168 test_ti_thread_flag(current_thread_info(), TIF_##fl)
169 #define clear_syscall_work(fl) \
170 clear_ti_thread_flag(current_thread_info(), TIF_##fl)
172 #define set_task_syscall_work(t, fl) \
173 set_ti_thread_flag(task_thread_info(t), TIF_##fl)
174 #define test_task_syscall_work(t, fl) \
175 test_ti_thread_flag(task_thread_info(t), TIF_##fl)
176 #define clear_task_syscall_work(t, fl) \
177 clear_ti_thread_flag(task_thread_info(t), TIF_##fl)
178 #endif /* !CONFIG_GENERIC_ENTRY */
180 #ifdef _ASM_GENERIC_BITOPS_INSTRUMENTED_NON_ATOMIC_H
182 static __always_inline bool tif_need_resched(void)
184 return arch_test_bit(TIF_NEED_RESCHED,
185 (unsigned long *)(¤t_thread_info()->flags));
190 static __always_inline bool tif_need_resched(void)
192 return test_bit(TIF_NEED_RESCHED,
193 (unsigned long *)(¤t_thread_info()->flags));
196 #endif /* _ASM_GENERIC_BITOPS_INSTRUMENTED_NON_ATOMIC_H */
198 #ifndef CONFIG_HAVE_ARCH_WITHIN_STACK_FRAMES
199 static inline int arch_within_stack_frames(const void * const stack,
200 const void * const stackend,
201 const void *obj, unsigned long len)
207 #ifdef CONFIG_HARDENED_USERCOPY
208 extern void __check_object_size(const void *ptr, unsigned long n,
211 static __always_inline void check_object_size(const void *ptr, unsigned long n,
214 if (!__builtin_constant_p(n))
215 __check_object_size(ptr, n, to_user);
218 static inline void check_object_size(const void *ptr, unsigned long n,
221 #endif /* CONFIG_HARDENED_USERCOPY */
223 extern void __compiletime_error("copy source size is too small")
224 __bad_copy_from(void);
225 extern void __compiletime_error("copy destination size is too small")
228 void __copy_overflow(int size, unsigned long count);
230 static inline void copy_overflow(int size, unsigned long count)
232 if (IS_ENABLED(CONFIG_BUG))
233 __copy_overflow(size, count);
236 static __always_inline __must_check bool
237 check_copy_size(const void *addr, size_t bytes, bool is_source)
239 int sz = __builtin_object_size(addr, 0);
240 if (unlikely(sz >= 0 && sz < bytes)) {
241 if (!__builtin_constant_p(bytes))
242 copy_overflow(sz, bytes);
249 if (WARN_ON_ONCE(bytes > INT_MAX))
251 check_object_size(addr, bytes, is_source);
255 #ifndef arch_setup_new_exec
256 static inline void arch_setup_new_exec(void) { }
259 void arch_task_cache_init(void); /* for CONFIG_SH */
260 void arch_release_task_struct(struct task_struct *tsk);
261 int arch_dup_task_struct(struct task_struct *dst,
262 struct task_struct *src);
264 #endif /* __KERNEL__ */
266 #endif /* _LINUX_THREAD_INFO_H */