ARM64: configs: tizen: Adjust for mipi csi camera of rpi5
[platform/kernel/linux-rpi.git] / include / linux / thread_info.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* thread_info.h: common low-level thread information accessors
3  *
4  * Copyright (C) 2002  David Howells (dhowells@redhat.com)
5  * - Incorporating suggestions made by Linus Torvalds
6  */
7
8 #ifndef _LINUX_THREAD_INFO_H
9 #define _LINUX_THREAD_INFO_H
10
11 #include <linux/types.h>
12 #include <linux/limits.h>
13 #include <linux/bug.h>
14 #include <linux/restart_block.h>
15 #include <linux/errno.h>
16
17 #ifdef CONFIG_THREAD_INFO_IN_TASK
18 /*
19  * For CONFIG_THREAD_INFO_IN_TASK kernels we need <asm/current.h> for the
20  * definition of current, but for !CONFIG_THREAD_INFO_IN_TASK kernels,
21  * including <asm/current.h> can cause a circular dependency on some platforms.
22  */
23 #include <asm/current.h>
24 #define current_thread_info() ((struct thread_info *)current)
25 #endif
26
27 #include <linux/bitops.h>
28
29 /*
30  * For per-arch arch_within_stack_frames() implementations, defined in
31  * asm/thread_info.h.
32  */
33 enum {
34         BAD_STACK = -1,
35         NOT_STACK = 0,
36         GOOD_FRAME,
37         GOOD_STACK,
38 };
39
40 #ifdef CONFIG_GENERIC_ENTRY
41 enum syscall_work_bit {
42         SYSCALL_WORK_BIT_SECCOMP,
43         SYSCALL_WORK_BIT_SYSCALL_TRACEPOINT,
44         SYSCALL_WORK_BIT_SYSCALL_TRACE,
45         SYSCALL_WORK_BIT_SYSCALL_EMU,
46         SYSCALL_WORK_BIT_SYSCALL_AUDIT,
47         SYSCALL_WORK_BIT_SYSCALL_USER_DISPATCH,
48         SYSCALL_WORK_BIT_SYSCALL_EXIT_TRAP,
49 };
50
51 #define SYSCALL_WORK_SECCOMP            BIT(SYSCALL_WORK_BIT_SECCOMP)
52 #define SYSCALL_WORK_SYSCALL_TRACEPOINT BIT(SYSCALL_WORK_BIT_SYSCALL_TRACEPOINT)
53 #define SYSCALL_WORK_SYSCALL_TRACE      BIT(SYSCALL_WORK_BIT_SYSCALL_TRACE)
54 #define SYSCALL_WORK_SYSCALL_EMU        BIT(SYSCALL_WORK_BIT_SYSCALL_EMU)
55 #define SYSCALL_WORK_SYSCALL_AUDIT      BIT(SYSCALL_WORK_BIT_SYSCALL_AUDIT)
56 #define SYSCALL_WORK_SYSCALL_USER_DISPATCH BIT(SYSCALL_WORK_BIT_SYSCALL_USER_DISPATCH)
57 #define SYSCALL_WORK_SYSCALL_EXIT_TRAP  BIT(SYSCALL_WORK_BIT_SYSCALL_EXIT_TRAP)
58 #endif
59
60 #include <asm/thread_info.h>
61
62 #ifdef CONFIG_PREEMPT_BUILD_AUTO
63 # define TIF_NEED_RESCHED_LAZY          TIF_ARCH_RESCHED_LAZY
64 # define _TIF_NEED_RESCHED_LAZY         _TIF_ARCH_RESCHED_LAZY
65 # define TIF_NEED_RESCHED_LAZY_OFFSET   (TIF_NEED_RESCHED_LAZY - TIF_NEED_RESCHED)
66 #else
67 # define TIF_NEED_RESCHED_LAZY          TIF_NEED_RESCHED
68 # define _TIF_NEED_RESCHED_LAZY         _TIF_NEED_RESCHED
69 # define TIF_NEED_RESCHED_LAZY_OFFSET   0
70 #endif
71
72 #ifdef __KERNEL__
73
74 #ifndef arch_set_restart_data
75 #define arch_set_restart_data(restart) do { } while (0)
76 #endif
77
78 static inline long set_restart_fn(struct restart_block *restart,
79                                         long (*fn)(struct restart_block *))
80 {
81         restart->fn = fn;
82         arch_set_restart_data(restart);
83         return -ERESTART_RESTARTBLOCK;
84 }
85
86 #ifndef THREAD_ALIGN
87 #define THREAD_ALIGN    THREAD_SIZE
88 #endif
89
90 #define THREADINFO_GFP          (GFP_KERNEL_ACCOUNT | __GFP_ZERO)
91
92 /*
93  * flag set/clear/test wrappers
94  * - pass TIF_xxxx constants to these functions
95  */
96
97 static inline void set_ti_thread_flag(struct thread_info *ti, int flag)
98 {
99         set_bit(flag, (unsigned long *)&ti->flags);
100 }
101
102 static inline void clear_ti_thread_flag(struct thread_info *ti, int flag)
103 {
104         clear_bit(flag, (unsigned long *)&ti->flags);
105 }
106
107 static inline void update_ti_thread_flag(struct thread_info *ti, int flag,
108                                          bool value)
109 {
110         if (value)
111                 set_ti_thread_flag(ti, flag);
112         else
113                 clear_ti_thread_flag(ti, flag);
114 }
115
116 static inline int test_and_set_ti_thread_flag(struct thread_info *ti, int flag)
117 {
118         return test_and_set_bit(flag, (unsigned long *)&ti->flags);
119 }
120
121 static inline int test_and_clear_ti_thread_flag(struct thread_info *ti, int flag)
122 {
123         return test_and_clear_bit(flag, (unsigned long *)&ti->flags);
124 }
125
126 static inline int test_ti_thread_flag(struct thread_info *ti, int flag)
127 {
128         return test_bit(flag, (unsigned long *)&ti->flags);
129 }
130
131 /*
132  * This may be used in noinstr code, and needs to be __always_inline to prevent
133  * inadvertent instrumentation.
134  */
135 static __always_inline unsigned long read_ti_thread_flags(struct thread_info *ti)
136 {
137         return READ_ONCE(ti->flags);
138 }
139
140 #define set_thread_flag(flag) \
141         set_ti_thread_flag(current_thread_info(), flag)
142 #define clear_thread_flag(flag) \
143         clear_ti_thread_flag(current_thread_info(), flag)
144 #define update_thread_flag(flag, value) \
145         update_ti_thread_flag(current_thread_info(), flag, value)
146 #define test_and_set_thread_flag(flag) \
147         test_and_set_ti_thread_flag(current_thread_info(), flag)
148 #define test_and_clear_thread_flag(flag) \
149         test_and_clear_ti_thread_flag(current_thread_info(), flag)
150 #define test_thread_flag(flag) \
151         test_ti_thread_flag(current_thread_info(), flag)
152 #define read_thread_flags() \
153         read_ti_thread_flags(current_thread_info())
154
155 #define read_task_thread_flags(t) \
156         read_ti_thread_flags(task_thread_info(t))
157
158 #ifdef CONFIG_GENERIC_ENTRY
159 #define set_syscall_work(fl) \
160         set_bit(SYSCALL_WORK_BIT_##fl, &current_thread_info()->syscall_work)
161 #define test_syscall_work(fl) \
162         test_bit(SYSCALL_WORK_BIT_##fl, &current_thread_info()->syscall_work)
163 #define clear_syscall_work(fl) \
164         clear_bit(SYSCALL_WORK_BIT_##fl, &current_thread_info()->syscall_work)
165
166 #define set_task_syscall_work(t, fl) \
167         set_bit(SYSCALL_WORK_BIT_##fl, &task_thread_info(t)->syscall_work)
168 #define test_task_syscall_work(t, fl) \
169         test_bit(SYSCALL_WORK_BIT_##fl, &task_thread_info(t)->syscall_work)
170 #define clear_task_syscall_work(t, fl) \
171         clear_bit(SYSCALL_WORK_BIT_##fl, &task_thread_info(t)->syscall_work)
172
173 #else /* CONFIG_GENERIC_ENTRY */
174
175 #define set_syscall_work(fl)                                            \
176         set_ti_thread_flag(current_thread_info(), TIF_##fl)
177 #define test_syscall_work(fl) \
178         test_ti_thread_flag(current_thread_info(), TIF_##fl)
179 #define clear_syscall_work(fl) \
180         clear_ti_thread_flag(current_thread_info(), TIF_##fl)
181
182 #define set_task_syscall_work(t, fl) \
183         set_ti_thread_flag(task_thread_info(t), TIF_##fl)
184 #define test_task_syscall_work(t, fl) \
185         test_ti_thread_flag(task_thread_info(t), TIF_##fl)
186 #define clear_task_syscall_work(t, fl) \
187         clear_ti_thread_flag(task_thread_info(t), TIF_##fl)
188 #endif /* !CONFIG_GENERIC_ENTRY */
189
190 #ifdef _ASM_GENERIC_BITOPS_INSTRUMENTED_NON_ATOMIC_H
191
192 static __always_inline bool tif_need_resched(void)
193 {
194         return arch_test_bit(TIF_NEED_RESCHED,
195                              (unsigned long *)(&current_thread_info()->flags));
196 }
197
198 static __always_inline bool tif_need_resched_lazy(void)
199 {
200         return IS_ENABLED(CONFIG_PREEMPT_BUILD_AUTO) &&
201                 arch_test_bit(TIF_NEED_RESCHED_LAZY,
202                               (unsigned long *)(&current_thread_info()->flags));
203 }
204
205 #else
206
207 static __always_inline bool tif_need_resched(void)
208 {
209         return test_bit(TIF_NEED_RESCHED,
210                         (unsigned long *)(&current_thread_info()->flags));
211 }
212
213 static __always_inline bool tif_need_resched_lazy(void)
214 {
215         return IS_ENABLED(CONFIG_PREEMPT_BUILD_AUTO) &&
216                 test_bit(TIF_NEED_RESCHED_LAZY,
217                          (unsigned long *)(&current_thread_info()->flags));
218 }
219
220 #endif /* _ASM_GENERIC_BITOPS_INSTRUMENTED_NON_ATOMIC_H */
221
222 #ifndef CONFIG_HAVE_ARCH_WITHIN_STACK_FRAMES
223 static inline int arch_within_stack_frames(const void * const stack,
224                                            const void * const stackend,
225                                            const void *obj, unsigned long len)
226 {
227         return 0;
228 }
229 #endif
230
231 #ifdef CONFIG_HARDENED_USERCOPY
232 extern void __check_object_size(const void *ptr, unsigned long n,
233                                         bool to_user);
234
235 static __always_inline void check_object_size(const void *ptr, unsigned long n,
236                                               bool to_user)
237 {
238         if (!__builtin_constant_p(n))
239                 __check_object_size(ptr, n, to_user);
240 }
241 #else
242 static inline void check_object_size(const void *ptr, unsigned long n,
243                                      bool to_user)
244 { }
245 #endif /* CONFIG_HARDENED_USERCOPY */
246
247 extern void __compiletime_error("copy source size is too small")
248 __bad_copy_from(void);
249 extern void __compiletime_error("copy destination size is too small")
250 __bad_copy_to(void);
251
252 void __copy_overflow(int size, unsigned long count);
253
254 static inline void copy_overflow(int size, unsigned long count)
255 {
256         if (IS_ENABLED(CONFIG_BUG))
257                 __copy_overflow(size, count);
258 }
259
260 static __always_inline __must_check bool
261 check_copy_size(const void *addr, size_t bytes, bool is_source)
262 {
263         int sz = __builtin_object_size(addr, 0);
264         if (unlikely(sz >= 0 && sz < bytes)) {
265                 if (!__builtin_constant_p(bytes))
266                         copy_overflow(sz, bytes);
267                 else if (is_source)
268                         __bad_copy_from();
269                 else
270                         __bad_copy_to();
271                 return false;
272         }
273         if (WARN_ON_ONCE(bytes > INT_MAX))
274                 return false;
275         check_object_size(addr, bytes, is_source);
276         return true;
277 }
278
279 #ifndef arch_setup_new_exec
280 static inline void arch_setup_new_exec(void) { }
281 #endif
282
283 void arch_task_cache_init(void); /* for CONFIG_SH */
284 void arch_release_task_struct(struct task_struct *tsk);
285 int arch_dup_task_struct(struct task_struct *dst,
286                                 struct task_struct *src);
287
288 #endif  /* __KERNEL__ */
289
290 #endif /* _LINUX_THREAD_INFO_H */