patch-5.15.79-rt54.patch
[platform/kernel/linux-rpi.git] / kernel / entry / common.c
1 // SPDX-License-Identifier: GPL-2.0
2
3 #include <linux/context_tracking.h>
4 #include <linux/entry-common.h>
5 #include <linux/highmem.h>
6 #include <linux/livepatch.h>
7 #include <linux/audit.h>
8 #include <linux/tick.h>
9
10 #include "common.h"
11
12 #define CREATE_TRACE_POINTS
13 #include <trace/events/syscalls.h>
14
15 /* See comment for enter_from_user_mode() in entry-common.h */
16 static __always_inline void __enter_from_user_mode(struct pt_regs *regs)
17 {
18         arch_check_user_regs(regs);
19         lockdep_hardirqs_off(CALLER_ADDR0);
20
21         CT_WARN_ON(ct_state() != CONTEXT_USER);
22         user_exit_irqoff();
23
24         instrumentation_begin();
25         trace_hardirqs_off_finish();
26         instrumentation_end();
27 }
28
29 void noinstr enter_from_user_mode(struct pt_regs *regs)
30 {
31         __enter_from_user_mode(regs);
32 }
33
34 static inline void syscall_enter_audit(struct pt_regs *regs, long syscall)
35 {
36         if (unlikely(audit_context())) {
37                 unsigned long args[6];
38
39                 syscall_get_arguments(current, regs, args);
40                 audit_syscall_entry(syscall, args[0], args[1], args[2], args[3]);
41         }
42 }
43
44 static long syscall_trace_enter(struct pt_regs *regs, long syscall,
45                                 unsigned long work)
46 {
47         long ret = 0;
48
49         /*
50          * Handle Syscall User Dispatch.  This must comes first, since
51          * the ABI here can be something that doesn't make sense for
52          * other syscall_work features.
53          */
54         if (work & SYSCALL_WORK_SYSCALL_USER_DISPATCH) {
55                 if (syscall_user_dispatch(regs))
56                         return -1L;
57         }
58
59         /* Handle ptrace */
60         if (work & (SYSCALL_WORK_SYSCALL_TRACE | SYSCALL_WORK_SYSCALL_EMU)) {
61                 ret = arch_syscall_enter_tracehook(regs);
62                 if (ret || (work & SYSCALL_WORK_SYSCALL_EMU))
63                         return -1L;
64         }
65
66         /* Do seccomp after ptrace, to catch any tracer changes. */
67         if (work & SYSCALL_WORK_SECCOMP) {
68                 ret = __secure_computing(NULL);
69                 if (ret == -1L)
70                         return ret;
71         }
72
73         /* Either of the above might have changed the syscall number */
74         syscall = syscall_get_nr(current, regs);
75
76         if (unlikely(work & SYSCALL_WORK_SYSCALL_TRACEPOINT))
77                 trace_sys_enter(regs, syscall);
78
79         syscall_enter_audit(regs, syscall);
80
81         return ret ? : syscall;
82 }
83
84 static __always_inline long
85 __syscall_enter_from_user_work(struct pt_regs *regs, long syscall)
86 {
87         unsigned long work = READ_ONCE(current_thread_info()->syscall_work);
88
89         if (work & SYSCALL_WORK_ENTER)
90                 syscall = syscall_trace_enter(regs, syscall, work);
91
92         return syscall;
93 }
94
95 long syscall_enter_from_user_mode_work(struct pt_regs *regs, long syscall)
96 {
97         return __syscall_enter_from_user_work(regs, syscall);
98 }
99
100 noinstr long syscall_enter_from_user_mode(struct pt_regs *regs, long syscall)
101 {
102         long ret;
103
104         __enter_from_user_mode(regs);
105
106         instrumentation_begin();
107         local_irq_enable();
108         ret = __syscall_enter_from_user_work(regs, syscall);
109         instrumentation_end();
110
111         return ret;
112 }
113
114 noinstr void syscall_enter_from_user_mode_prepare(struct pt_regs *regs)
115 {
116         __enter_from_user_mode(regs);
117         instrumentation_begin();
118         local_irq_enable();
119         instrumentation_end();
120 }
121
122 /* See comment for exit_to_user_mode() in entry-common.h */
123 static __always_inline void __exit_to_user_mode(void)
124 {
125         instrumentation_begin();
126         trace_hardirqs_on_prepare();
127         lockdep_hardirqs_on_prepare();
128         instrumentation_end();
129
130         user_enter_irqoff();
131         arch_exit_to_user_mode();
132         lockdep_hardirqs_on(CALLER_ADDR0);
133 }
134
135 void noinstr exit_to_user_mode(void)
136 {
137         __exit_to_user_mode();
138 }
139
140 /* Workaround to allow gradual conversion of architecture code */
141 void __weak arch_do_signal_or_restart(struct pt_regs *regs, bool has_signal) { }
142
143 static void handle_signal_work(struct pt_regs *regs, unsigned long ti_work)
144 {
145         if (ti_work & _TIF_NOTIFY_SIGNAL)
146                 tracehook_notify_signal();
147
148         arch_do_signal_or_restart(regs, ti_work & _TIF_SIGPENDING);
149 }
150
151 static unsigned long exit_to_user_mode_loop(struct pt_regs *regs,
152                                             unsigned long ti_work)
153 {
154         /*
155          * Before returning to user space ensure that all pending work
156          * items have been completed.
157          */
158         while (ti_work & EXIT_TO_USER_MODE_WORK) {
159
160                 local_irq_enable_exit_to_user(ti_work);
161
162                 if (ti_work & _TIF_NEED_RESCHED_MASK)
163                         schedule();
164
165 #ifdef ARCH_RT_DELAYS_SIGNAL_SEND
166                 if (unlikely(current->forced_info.si_signo)) {
167                         struct task_struct *t = current;
168                         force_sig_info(&t->forced_info);
169                         t->forced_info.si_signo = 0;
170                 }
171 #endif
172
173                 if (ti_work & _TIF_UPROBE)
174                         uprobe_notify_resume(regs);
175
176                 if (ti_work & _TIF_PATCH_PENDING)
177                         klp_update_patch_state(current);
178
179                 if (ti_work & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL))
180                         handle_signal_work(regs, ti_work);
181
182                 if (ti_work & _TIF_NOTIFY_RESUME)
183                         tracehook_notify_resume(regs);
184
185                 /* Architecture specific TIF work */
186                 arch_exit_to_user_mode_work(regs, ti_work);
187
188                 /*
189                  * Disable interrupts and reevaluate the work flags as they
190                  * might have changed while interrupts and preemption was
191                  * enabled above.
192                  */
193                 local_irq_disable_exit_to_user();
194
195                 /* Check if any of the above work has queued a deferred wakeup */
196                 tick_nohz_user_enter_prepare();
197
198                 ti_work = READ_ONCE(current_thread_info()->flags);
199         }
200
201         /* Return the latest work state for arch_exit_to_user_mode() */
202         return ti_work;
203 }
204
205 static void exit_to_user_mode_prepare(struct pt_regs *regs)
206 {
207         unsigned long ti_work = READ_ONCE(current_thread_info()->flags);
208
209         lockdep_assert_irqs_disabled();
210
211         /* Flush pending rcuog wakeup before the last need_resched() check */
212         tick_nohz_user_enter_prepare();
213
214         if (unlikely(ti_work & EXIT_TO_USER_MODE_WORK))
215                 ti_work = exit_to_user_mode_loop(regs, ti_work);
216
217         arch_exit_to_user_mode_prepare(regs, ti_work);
218
219         /* Ensure that the address limit is intact and no locks are held */
220         addr_limit_user_check();
221         kmap_assert_nomap();
222         lockdep_assert_irqs_disabled();
223         lockdep_sys_exit();
224 }
225
226 /*
227  * If SYSCALL_EMU is set, then the only reason to report is when
228  * SINGLESTEP is set (i.e. PTRACE_SYSEMU_SINGLESTEP).  This syscall
229  * instruction has been already reported in syscall_enter_from_user_mode().
230  */
231 static inline bool report_single_step(unsigned long work)
232 {
233         if (work & SYSCALL_WORK_SYSCALL_EMU)
234                 return false;
235
236         return work & SYSCALL_WORK_SYSCALL_EXIT_TRAP;
237 }
238
239 static void syscall_exit_work(struct pt_regs *regs, unsigned long work)
240 {
241         bool step;
242
243         /*
244          * If the syscall was rolled back due to syscall user dispatching,
245          * then the tracers below are not invoked for the same reason as
246          * the entry side was not invoked in syscall_trace_enter(): The ABI
247          * of these syscalls is unknown.
248          */
249         if (work & SYSCALL_WORK_SYSCALL_USER_DISPATCH) {
250                 if (unlikely(current->syscall_dispatch.on_dispatch)) {
251                         current->syscall_dispatch.on_dispatch = false;
252                         return;
253                 }
254         }
255
256         audit_syscall_exit(regs);
257
258         if (work & SYSCALL_WORK_SYSCALL_TRACEPOINT)
259                 trace_sys_exit(regs, syscall_get_return_value(current, regs));
260
261         step = report_single_step(work);
262         if (step || work & SYSCALL_WORK_SYSCALL_TRACE)
263                 arch_syscall_exit_tracehook(regs, step);
264 }
265
266 /*
267  * Syscall specific exit to user mode preparation. Runs with interrupts
268  * enabled.
269  */
270 static void syscall_exit_to_user_mode_prepare(struct pt_regs *regs)
271 {
272         unsigned long work = READ_ONCE(current_thread_info()->syscall_work);
273         unsigned long nr = syscall_get_nr(current, regs);
274
275         CT_WARN_ON(ct_state() != CONTEXT_KERNEL);
276
277         if (IS_ENABLED(CONFIG_PROVE_LOCKING)) {
278                 if (WARN(irqs_disabled(), "syscall %lu left IRQs disabled", nr))
279                         local_irq_enable();
280         }
281
282         rseq_syscall(regs);
283
284         /*
285          * Do one-time syscall specific work. If these work items are
286          * enabled, we want to run them exactly once per syscall exit with
287          * interrupts enabled.
288          */
289         if (unlikely(work & SYSCALL_WORK_EXIT))
290                 syscall_exit_work(regs, work);
291 }
292
293 static __always_inline void __syscall_exit_to_user_mode_work(struct pt_regs *regs)
294 {
295         syscall_exit_to_user_mode_prepare(regs);
296         local_irq_disable_exit_to_user();
297         exit_to_user_mode_prepare(regs);
298 }
299
300 void syscall_exit_to_user_mode_work(struct pt_regs *regs)
301 {
302         __syscall_exit_to_user_mode_work(regs);
303 }
304
305 __visible noinstr void syscall_exit_to_user_mode(struct pt_regs *regs)
306 {
307         instrumentation_begin();
308         __syscall_exit_to_user_mode_work(regs);
309         instrumentation_end();
310         __exit_to_user_mode();
311 }
312
313 noinstr void irqentry_enter_from_user_mode(struct pt_regs *regs)
314 {
315         __enter_from_user_mode(regs);
316 }
317
318 noinstr void irqentry_exit_to_user_mode(struct pt_regs *regs)
319 {
320         instrumentation_begin();
321         exit_to_user_mode_prepare(regs);
322         instrumentation_end();
323         __exit_to_user_mode();
324 }
325
326 noinstr irqentry_state_t irqentry_enter(struct pt_regs *regs)
327 {
328         irqentry_state_t ret = {
329                 .exit_rcu = false,
330         };
331
332         if (user_mode(regs)) {
333                 irqentry_enter_from_user_mode(regs);
334                 return ret;
335         }
336
337         /*
338          * If this entry hit the idle task invoke rcu_irq_enter() whether
339          * RCU is watching or not.
340          *
341          * Interrupts can nest when the first interrupt invokes softirq
342          * processing on return which enables interrupts.
343          *
344          * Scheduler ticks in the idle task can mark quiescent state and
345          * terminate a grace period, if and only if the timer interrupt is
346          * not nested into another interrupt.
347          *
348          * Checking for rcu_is_watching() here would prevent the nesting
349          * interrupt to invoke rcu_irq_enter(). If that nested interrupt is
350          * the tick then rcu_flavor_sched_clock_irq() would wrongfully
351          * assume that it is the first interrupt and eventually claim
352          * quiescent state and end grace periods prematurely.
353          *
354          * Unconditionally invoke rcu_irq_enter() so RCU state stays
355          * consistent.
356          *
357          * TINY_RCU does not support EQS, so let the compiler eliminate
358          * this part when enabled.
359          */
360         if (!IS_ENABLED(CONFIG_TINY_RCU) && is_idle_task(current)) {
361                 /*
362                  * If RCU is not watching then the same careful
363                  * sequence vs. lockdep and tracing is required
364                  * as in irqentry_enter_from_user_mode().
365                  */
366                 lockdep_hardirqs_off(CALLER_ADDR0);
367                 rcu_irq_enter();
368                 instrumentation_begin();
369                 trace_hardirqs_off_finish();
370                 instrumentation_end();
371
372                 ret.exit_rcu = true;
373                 return ret;
374         }
375
376         /*
377          * If RCU is watching then RCU only wants to check whether it needs
378          * to restart the tick in NOHZ mode. rcu_irq_enter_check_tick()
379          * already contains a warning when RCU is not watching, so no point
380          * in having another one here.
381          */
382         lockdep_hardirqs_off(CALLER_ADDR0);
383         instrumentation_begin();
384         rcu_irq_enter_check_tick();
385         trace_hardirqs_off_finish();
386         instrumentation_end();
387
388         return ret;
389 }
390
391 void irqentry_exit_cond_resched(void)
392 {
393         if (!preempt_count()) {
394                 /* Sanity check RCU and thread stack */
395                 rcu_irq_exit_check_preempt();
396                 if (IS_ENABLED(CONFIG_DEBUG_ENTRY))
397                         WARN_ON_ONCE(!on_thread_stack());
398                 if (should_resched(0))
399                         preempt_schedule_irq();
400         }
401 }
402 #ifdef CONFIG_PREEMPT_DYNAMIC
403 DEFINE_STATIC_CALL(irqentry_exit_cond_resched, irqentry_exit_cond_resched);
404 #endif
405
406 noinstr void irqentry_exit(struct pt_regs *regs, irqentry_state_t state)
407 {
408         lockdep_assert_irqs_disabled();
409
410         /* Check whether this returns to user mode */
411         if (user_mode(regs)) {
412                 irqentry_exit_to_user_mode(regs);
413         } else if (!regs_irqs_disabled(regs)) {
414                 /*
415                  * If RCU was not watching on entry this needs to be done
416                  * carefully and needs the same ordering of lockdep/tracing
417                  * and RCU as the return to user mode path.
418                  */
419                 if (state.exit_rcu) {
420                         instrumentation_begin();
421                         /* Tell the tracer that IRET will enable interrupts */
422                         trace_hardirqs_on_prepare();
423                         lockdep_hardirqs_on_prepare();
424                         instrumentation_end();
425                         rcu_irq_exit();
426                         lockdep_hardirqs_on(CALLER_ADDR0);
427                         return;
428                 }
429
430                 instrumentation_begin();
431                 if (IS_ENABLED(CONFIG_PREEMPTION)) {
432 #ifdef CONFIG_PREEMPT_DYNAMIC
433                         static_call(irqentry_exit_cond_resched)();
434 #else
435                         irqentry_exit_cond_resched();
436 #endif
437                 }
438                 /* Covers both tracing and lockdep */
439                 trace_hardirqs_on();
440                 instrumentation_end();
441         } else {
442                 /*
443                  * IRQ flags state is correct already. Just tell RCU if it
444                  * was not watching on entry.
445                  */
446                 if (state.exit_rcu)
447                         rcu_irq_exit();
448         }
449 }
450
451 irqentry_state_t noinstr irqentry_nmi_enter(struct pt_regs *regs)
452 {
453         irqentry_state_t irq_state;
454
455         irq_state.lockdep = lockdep_hardirqs_enabled();
456
457         __nmi_enter();
458         lockdep_hardirqs_off(CALLER_ADDR0);
459         lockdep_hardirq_enter();
460         rcu_nmi_enter();
461
462         instrumentation_begin();
463         trace_hardirqs_off_finish();
464         ftrace_nmi_enter();
465         instrumentation_end();
466
467         return irq_state;
468 }
469
470 void noinstr irqentry_nmi_exit(struct pt_regs *regs, irqentry_state_t irq_state)
471 {
472         instrumentation_begin();
473         ftrace_nmi_exit();
474         if (irq_state.lockdep) {
475                 trace_hardirqs_on_prepare();
476                 lockdep_hardirqs_on_prepare();
477         }
478         instrumentation_end();
479
480         rcu_nmi_exit();
481         lockdep_hardirq_exit();
482         if (irq_state.lockdep)
483                 lockdep_hardirqs_on(CALLER_ADDR0);
484         __nmi_exit();
485 }