patch-5.15.79-rt54.patch
[platform/kernel/linux-rpi.git] / include / linux / sched.h
index e418935..7790659 100644 (file)
@@ -118,12 +118,8 @@ struct task_group;
 
 #define task_is_running(task)          (READ_ONCE((task)->__state) == TASK_RUNNING)
 
-#define task_is_traced(task)           ((READ_ONCE(task->__state) & __TASK_TRACED) != 0)
-
 #define task_is_stopped(task)          ((READ_ONCE(task->__state) & __TASK_STOPPED) != 0)
 
-#define task_is_stopped_or_traced(task)        ((READ_ONCE(task->__state) & (__TASK_STOPPED | __TASK_TRACED)) != 0)
-
 /*
  * Special states are those that do not use the normal wait-loop pattern. See
  * the comment with set_special_state().
@@ -1084,6 +1080,10 @@ struct task_struct {
        /* Restored if set_restore_sigmask() was used: */
        sigset_t                        saved_sigmask;
        struct sigpending               pending;
+#ifdef CONFIG_PREEMPT_RT
+       /* TODO: move me into ->restart_block ? */
+       struct                          kernel_siginfo forced_info;
+#endif
        unsigned long                   sas_ss_sp;
        size_t                          sas_ss_size;
        unsigned int                    sas_ss_flags;
@@ -1738,6 +1738,16 @@ static __always_inline bool is_percpu_thread(void)
 #endif
 }
 
+/* Is the current task guaranteed to stay on its current CPU? */
+static inline bool is_migratable(void)
+{
+#ifdef CONFIG_SMP
+       return preemptible() && !current->migration_disabled;
+#else
+       return false;
+#endif
+}
+
 /* Per-process atomic flags. */
 #define PFA_NO_NEW_PRIVS               0       /* May not gain new privileges. */
 #define PFA_SPREAD_PAGE                        1       /* Spread page cache over cpuset */
@@ -2013,6 +2023,118 @@ static inline int test_tsk_need_resched(struct task_struct *tsk)
        return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
 }
 
+#ifdef CONFIG_PREEMPT_LAZY
+static inline void set_tsk_need_resched_lazy(struct task_struct *tsk)
+{
+       set_tsk_thread_flag(tsk,TIF_NEED_RESCHED_LAZY);
+}
+
+static inline void clear_tsk_need_resched_lazy(struct task_struct *tsk)
+{
+       clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED_LAZY);
+}
+
+static inline int test_tsk_need_resched_lazy(struct task_struct *tsk)
+{
+       return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED_LAZY));
+}
+
+static inline int need_resched_lazy(void)
+{
+       return test_thread_flag(TIF_NEED_RESCHED_LAZY);
+}
+
+static inline int need_resched_now(void)
+{
+       return test_thread_flag(TIF_NEED_RESCHED);
+}
+
+#else
+static inline void clear_tsk_need_resched_lazy(struct task_struct *tsk) { }
+static inline int need_resched_lazy(void) { return 0; }
+
+static inline int need_resched_now(void)
+{
+       return test_thread_flag(TIF_NEED_RESCHED);
+}
+
+#endif
+
+#ifdef CONFIG_PREEMPT_RT
+static inline bool task_match_saved_state(struct task_struct *p, long match_state)
+{
+       return p->saved_state == match_state;
+}
+
+static inline bool task_is_traced(struct task_struct *task)
+{
+       bool traced = false;
+
+       /* in case the task is sleeping on tasklist_lock */
+       raw_spin_lock_irq(&task->pi_lock);
+       if (READ_ONCE(task->__state) & __TASK_TRACED)
+               traced = true;
+       else if (task->saved_state & __TASK_TRACED)
+               traced = true;
+       raw_spin_unlock_irq(&task->pi_lock);
+       return traced;
+}
+
+static inline bool task_is_stopped_or_traced(struct task_struct *task)
+{
+       bool traced_stopped = false;
+       unsigned long flags;
+
+       raw_spin_lock_irqsave(&task->pi_lock, flags);
+
+       if (READ_ONCE(task->__state) & (__TASK_STOPPED | __TASK_TRACED))
+               traced_stopped = true;
+       else if (task->saved_state & (__TASK_STOPPED | __TASK_TRACED))
+               traced_stopped = true;
+
+       raw_spin_unlock_irqrestore(&task->pi_lock, flags);
+       return traced_stopped;
+}
+
+#else
+
+static inline bool task_match_saved_state(struct task_struct *p, long match_state)
+{
+       return false;
+}
+
+static inline bool task_is_traced(struct task_struct *task)
+{
+       return READ_ONCE(task->__state) & __TASK_TRACED;
+}
+
+static inline bool task_is_stopped_or_traced(struct task_struct *task)
+{
+       return READ_ONCE(task->__state) & (__TASK_STOPPED | __TASK_TRACED);
+}
+#endif
+
+static inline bool task_match_state_or_saved(struct task_struct *p,
+                                            long match_state)
+{
+       if (READ_ONCE(p->__state) == match_state)
+               return true;
+
+       return task_match_saved_state(p, match_state);
+}
+
+static inline bool task_match_state_lock(struct task_struct *p,
+                                        long match_state)
+{
+       bool match;
+
+       raw_spin_lock_irq(&p->pi_lock);
+       match = task_match_state_or_saved(p, match_state);
+       raw_spin_unlock_irq(&p->pi_lock);
+
+       return match;
+}
+
 /*
  * cond_resched() and cond_resched_lock(): latency reduction via
  * explicit rescheduling in places that are safe. The return
@@ -2047,7 +2169,7 @@ static inline int _cond_resched(void) { return 0; }
 #endif /* !defined(CONFIG_PREEMPTION) || defined(CONFIG_PREEMPT_DYNAMIC) */
 
 #define cond_resched() ({                      \
-       ___might_sleep(__FILE__, __LINE__, 0);  \
+       __might_resched(__FILE__, __LINE__, 0); \
        _cond_resched();                        \
 })
 
@@ -2055,19 +2177,38 @@ extern int __cond_resched_lock(spinlock_t *lock);
 extern int __cond_resched_rwlock_read(rwlock_t *lock);
 extern int __cond_resched_rwlock_write(rwlock_t *lock);
 
-#define cond_resched_lock(lock) ({                             \
-       ___might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET);\
-       __cond_resched_lock(lock);                              \
+#define MIGHT_RESCHED_RCU_SHIFT                8
+#define MIGHT_RESCHED_PREEMPT_MASK     ((1U << MIGHT_RESCHED_RCU_SHIFT) - 1)
+
+#ifndef CONFIG_PREEMPT_RT
+/*
+ * Non RT kernels have an elevated preempt count due to the held lock,
+ * but are not allowed to be inside a RCU read side critical section
+ */
+# define PREEMPT_LOCK_RESCHED_OFFSETS  PREEMPT_LOCK_OFFSET
+#else
+/*
+ * spin/rw_lock() on RT implies rcu_read_lock(). The might_sleep() check in
+ * cond_resched*lock() has to take that into account because it checks for
+ * preempt_count() and rcu_preempt_depth().
+ */
+# define PREEMPT_LOCK_RESCHED_OFFSETS  \
+       (PREEMPT_LOCK_OFFSET + (1U << MIGHT_RESCHED_RCU_SHIFT))
+#endif
+
+#define cond_resched_lock(lock) ({                                             \
+       __might_resched(__FILE__, __LINE__, PREEMPT_LOCK_RESCHED_OFFSETS);      \
+       __cond_resched_lock(lock);                                              \
 })
 
-#define cond_resched_rwlock_read(lock) ({                      \
-       __might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET); \
-       __cond_resched_rwlock_read(lock);                       \
+#define cond_resched_rwlock_read(lock) ({                                      \
+       __might_resched(__FILE__, __LINE__, PREEMPT_LOCK_RESCHED_OFFSETS);      \
+       __cond_resched_rwlock_read(lock);                                       \
 })
 
-#define cond_resched_rwlock_write(lock) ({                     \
-       __might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET); \
-       __cond_resched_rwlock_write(lock);                      \
+#define cond_resched_rwlock_write(lock) ({                                     \
+       __might_resched(__FILE__, __LINE__, PREEMPT_LOCK_RESCHED_OFFSETS);      \
+       __cond_resched_rwlock_write(lock);                                      \
 })
 
 static inline void cond_resched_rcu(void)