Merge tag 'sched-core-2022-08-01' of git://git.kernel.org/pub/scm/linux/kernel/git...
[platform/kernel/linux-starfive.git] / kernel / sched / sched.h
index 47b89a0..aad7f5e 100644 (file)
@@ -520,6 +520,45 @@ struct cfs_bandwidth { };
 
 #endif /* CONFIG_CGROUP_SCHED */
 
+/*
+ * u64_u32_load/u64_u32_store
+ *
+ * Use a copy of a u64 value to protect against data race. This is only
+ * applicable for 32-bits architectures.
+ */
+#ifdef CONFIG_64BIT
+# define u64_u32_load_copy(var, copy)       var
+# define u64_u32_store_copy(var, copy, val) (var = val)
+#else
+# define u64_u32_load_copy(var, copy)                                  \
+({                                                                     \
+       u64 __val, __val_copy;                                          \
+       do {                                                            \
+               __val_copy = copy;                                      \
+               /*                                                      \
+                * paired with u64_u32_store_copy(), ordering access    \
+                * to var and copy.                                     \
+                */                                                     \
+               smp_rmb();                                              \
+               __val = var;                                            \
+       } while (__val != __val_copy);                                  \
+       __val;                                                          \
+})
+# define u64_u32_store_copy(var, copy, val)                            \
+do {                                                                   \
+       typeof(val) __val = (val);                                      \
+       var = __val;                                                    \
+       /*                                                              \
+        * paired with u64_u32_load_copy(), ordering access to var and  \
+        * copy.                                                        \
+        */                                                             \
+       smp_wmb();                                                      \
+       copy = __val;                                                   \
+} while (0)
+#endif
+# define u64_u32_load(var)      u64_u32_load_copy(var, var##_copy)
+# define u64_u32_store(var, val) u64_u32_store_copy(var, var##_copy, val)
+
 /* CFS-related fields in a runqueue */
 struct cfs_rq {
        struct load_weight      load;
@@ -560,7 +599,7 @@ struct cfs_rq {
         */
        struct sched_avg        avg;
 #ifndef CONFIG_64BIT
-       u64                     load_last_update_time_copy;
+       u64                     last_update_time_copy;
 #endif
        struct {
                raw_spinlock_t  lock ____cacheline_aligned;
@@ -609,6 +648,10 @@ struct cfs_rq {
        int                     runtime_enabled;
        s64                     runtime_remaining;
 
+       u64                     throttled_pelt_idle;
+#ifndef CONFIG_64BIT
+       u64                     throttled_pelt_idle_copy;
+#endif
        u64                     throttled_clock;
        u64                     throttled_clock_pelt;
        u64                     throttled_clock_pelt_time;
@@ -981,6 +1024,12 @@ struct rq {
        u64                     clock_task ____cacheline_aligned;
        u64                     clock_pelt;
        unsigned long           lost_idle_time;
+       u64                     clock_pelt_idle;
+       u64                     clock_idle;
+#ifndef CONFIG_64BIT
+       u64                     clock_pelt_idle_copy;
+       u64                     clock_idle_copy;
+#endif
 
        atomic_t                nr_iowait;
 
@@ -1815,15 +1864,6 @@ static inline struct cpumask *group_balance_mask(struct sched_group *sg)
        return to_cpumask(sg->sgc->cpumask);
 }
 
-/**
- * group_first_cpu - Returns the first CPU in the cpumask of a sched_group.
- * @group: The group whose first CPU is to be returned.
- */
-static inline unsigned int group_first_cpu(struct sched_group *group)
-{
-       return cpumask_first(sched_group_span(group));
-}
-
 extern int group_balance_cpu(struct sched_group *sg);
 
 #ifdef CONFIG_SCHED_DEBUG
@@ -2044,7 +2084,6 @@ static inline int task_on_rq_migrating(struct task_struct *p)
 
 #define WF_SYNC     0x10 /* Waker goes to sleep after wakeup */
 #define WF_MIGRATED 0x20 /* Internal use, task got migrated */
-#define WF_ON_CPU   0x40 /* Wakee is on_cpu */
 
 #ifdef CONFIG_SMP
 static_assert(WF_EXEC == SD_BALANCE_EXEC);
@@ -2852,7 +2891,7 @@ enum cpu_util_type {
 };
 
 unsigned long effective_cpu_util(int cpu, unsigned long util_cfs,
-                                unsigned long max, enum cpu_util_type type,
+                                enum cpu_util_type type,
                                 struct task_struct *p);
 
 static inline unsigned long cpu_bw_dl(struct rq *rq)