patch-5.15.79-rt54.patch
[platform/kernel/linux-rpi.git] / kernel / sched / fair.c
index 02766f3..0e13c85 100644 (file)
@@ -3794,11 +3794,11 @@ static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s
 
        se->avg.runnable_sum = se->avg.runnable_avg * divider;
 
-       se->avg.load_sum = divider;
-       if (se_weight(se)) {
-               se->avg.load_sum =
-                       div_u64(se->avg.load_avg * se->avg.load_sum, se_weight(se));
-       }
+       se->avg.load_sum = se->avg.load_avg * divider;
+       if (se_weight(se) < se->avg.load_sum)
+               se->avg.load_sum = div_u64(se->avg.load_sum, se_weight(se));
+       else
+               se->avg.load_sum = 1;
 
        enqueue_load_avg(cfs_rq, se);
        cfs_rq->avg.util_avg += se->avg.util_avg;
@@ -4458,7 +4458,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
        ideal_runtime = sched_slice(cfs_rq, curr);
        delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
        if (delta_exec > ideal_runtime) {
-               resched_curr(rq_of(cfs_rq));
+               resched_curr_lazy(rq_of(cfs_rq));
                /*
                 * The current task ran long enough, ensure it doesn't get
                 * re-elected due to buddy favours.
@@ -4482,7 +4482,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
                return;
 
        if (delta > ideal_runtime)
-               resched_curr(rq_of(cfs_rq));
+               resched_curr_lazy(rq_of(cfs_rq));
 }
 
 static void
@@ -4625,7 +4625,7 @@ entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
         * validating it and just reschedule.
         */
        if (queued) {
-               resched_curr(rq_of(cfs_rq));
+               resched_curr_lazy(rq_of(cfs_rq));
                return;
        }
        /*
@@ -4765,7 +4765,7 @@ static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec)
         * hierarchy can be throttled
         */
        if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr))
-               resched_curr(rq_of(cfs_rq));
+               resched_curr_lazy(rq_of(cfs_rq));
 }
 
 static __always_inline
@@ -4812,8 +4812,8 @@ static int tg_unthrottle_up(struct task_group *tg, void *data)
 
        cfs_rq->throttle_count--;
        if (!cfs_rq->throttle_count) {
-               cfs_rq->throttled_clock_task_time += rq_clock_task(rq) -
-                                            cfs_rq->throttled_clock_task;
+               cfs_rq->throttled_clock_pelt_time += rq_clock_pelt(rq) -
+                                            cfs_rq->throttled_clock_pelt;
 
                /* Add cfs_rq with load or one or more already running entities to the list */
                if (!cfs_rq_is_decayed(cfs_rq) || cfs_rq->nr_running)
@@ -4830,7 +4830,7 @@ static int tg_throttle_down(struct task_group *tg, void *data)
 
        /* group is entering throttled state, stop time */
        if (!cfs_rq->throttle_count) {
-               cfs_rq->throttled_clock_task = rq_clock_task(rq);
+               cfs_rq->throttled_clock_pelt = rq_clock_pelt(rq);
                list_del_leaf_cfs_rq(cfs_rq);
        }
        cfs_rq->throttle_count++;
@@ -5274,7 +5274,7 @@ static void sync_throttle(struct task_group *tg, int cpu)
        pcfs_rq = tg->parent->cfs_rq[cpu];
 
        cfs_rq->throttle_count = pcfs_rq->throttle_count;
-       cfs_rq->throttled_clock_task = rq_clock_task(cpu_rq(cpu));
+       cfs_rq->throttled_clock_pelt = rq_clock_pelt(cpu_rq(cpu));
 }
 
 /* conditionally throttle active cfs_rq's from put_prev_entity() */
@@ -5528,7 +5528,7 @@ static void hrtick_start_fair(struct rq *rq, struct task_struct *p)
 
                if (delta < 0) {
                        if (task_current(rq, p))
-                               resched_curr(rq);
+                               resched_curr_lazy(rq);
                        return;
                }
                hrtick_start(rq, delta);
@@ -6280,6 +6280,7 @@ static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, bool
 {
        struct cpumask *cpus = this_cpu_cpumask_var_ptr(select_idle_mask);
        int i, cpu, idle_cpu = -1, nr = INT_MAX;
+       struct sched_domain_shared *sd_share;
        struct rq *this_rq = this_rq();
        int this = smp_processor_id();
        struct sched_domain *this_sd;
@@ -6319,6 +6320,17 @@ static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, bool
                time = cpu_clock(this);
        }
 
+       if (sched_feat(SIS_UTIL)) {
+               sd_share = rcu_dereference(per_cpu(sd_llc_shared, target));
+               if (sd_share) {
+                       /* because !--nr is the condition to stop scan */
+                       nr = READ_ONCE(sd_share->nr_idle_scan) + 1;
+                       /* overloaded LLC is unlikely to have idle cpu/core */
+                       if (nr == 1)
+                               return -1;
+               }
+       }
+
        for_each_cpu_wrap(cpu, cpus, target + 1) {
                if (has_idle_core) {
                        i = select_idle_core(p, cpu, cpus, &idle_cpu);
@@ -7220,7 +7232,7 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
        return;
 
 preempt:
-       resched_curr(rq);
+       resched_curr_lazy(rq);
        /*
         * Only set the backward buddy when the current task is still
         * on the rq. This can happen when a wakeup gets interleaved
@@ -9166,6 +9178,77 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu)
        return idlest;
 }
 
+static void update_idle_cpu_scan(struct lb_env *env,
+                                unsigned long sum_util)
+{
+       struct sched_domain_shared *sd_share;
+       int llc_weight, pct;
+       u64 x, y, tmp;
+       /*
+        * Update the number of CPUs to scan in LLC domain, which could
+        * be used as a hint in select_idle_cpu(). The update of sd_share
+        * could be expensive because it is within a shared cache line.
+        * So the write of this hint only occurs during periodic load
+        * balancing, rather than CPU_NEWLY_IDLE, because the latter
+        * can fire way more frequently than the former.
+        */
+       if (!sched_feat(SIS_UTIL) || env->idle == CPU_NEWLY_IDLE)
+               return;
+
+       llc_weight = per_cpu(sd_llc_size, env->dst_cpu);
+       if (env->sd->span_weight != llc_weight)
+               return;
+
+       sd_share = rcu_dereference(per_cpu(sd_llc_shared, env->dst_cpu));
+       if (!sd_share)
+               return;
+
+       /*
+        * The number of CPUs to search drops as sum_util increases, when
+        * sum_util hits 85% or above, the scan stops.
+        * The reason to choose 85% as the threshold is because this is the
+        * imbalance_pct(117) when a LLC sched group is overloaded.
+        *
+        * let y = SCHED_CAPACITY_SCALE - p * x^2                       [1]
+        * and y'= y / SCHED_CAPACITY_SCALE
+        *
+        * x is the ratio of sum_util compared to the CPU capacity:
+        * x = sum_util / (llc_weight * SCHED_CAPACITY_SCALE)
+        * y' is the ratio of CPUs to be scanned in the LLC domain,
+        * and the number of CPUs to scan is calculated by:
+        *
+        * nr_scan = llc_weight * y'                                    [2]
+        *
+        * When x hits the threshold of overloaded, AKA, when
+        * x = 100 / pct, y drops to 0. According to [1],
+        * p should be SCHED_CAPACITY_SCALE * pct^2 / 10000
+        *
+        * Scale x by SCHED_CAPACITY_SCALE:
+        * x' = sum_util / llc_weight;                                  [3]
+        *
+        * and finally [1] becomes:
+        * y = SCHED_CAPACITY_SCALE -
+        *     x'^2 * pct^2 / (10000 * SCHED_CAPACITY_SCALE)            [4]
+        *
+        */
+       /* equation [3] */
+       x = sum_util;
+       do_div(x, llc_weight);
+
+       /* equation [4] */
+       pct = env->sd->imbalance_pct;
+       tmp = x * x * pct * pct;
+       do_div(tmp, 10000 * SCHED_CAPACITY_SCALE);
+       tmp = min_t(long, tmp, SCHED_CAPACITY_SCALE);
+       y = SCHED_CAPACITY_SCALE - tmp;
+
+       /* equation [2] */
+       y *= llc_weight;
+       do_div(y, SCHED_CAPACITY_SCALE);
+       if ((int)y != sd_share->nr_idle_scan)
+               WRITE_ONCE(sd_share->nr_idle_scan, (int)y);
+}
+
 /**
  * update_sd_lb_stats - Update sched_domain's statistics for load balancing.
  * @env: The load balancing environment.
@@ -9178,6 +9261,7 @@ static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sd
        struct sched_group *sg = env->sd->groups;
        struct sg_lb_stats *local = &sds->local_stat;
        struct sg_lb_stats tmp_sgs;
+       unsigned long sum_util = 0;
        int sg_status = 0;
 
        do {
@@ -9210,6 +9294,7 @@ next_group:
                sds->total_load += sgs->group_load;
                sds->total_capacity += sgs->group_capacity;
 
+               sum_util += sgs->group_util;
                sg = sg->next;
        } while (sg != env->sd->groups);
 
@@ -9235,6 +9320,8 @@ next_group:
                WRITE_ONCE(rd->overutilized, SG_OVERUTILIZED);
                trace_sched_overutilized_tp(rd, SG_OVERUTILIZED);
        }
+
+       update_idle_cpu_scan(env, sum_util);
 }
 
 #define NUMA_IMBALANCE_MIN 2
@@ -11123,7 +11210,7 @@ static void task_fork_fair(struct task_struct *p)
                 * 'current' within the tree based on its new key value.
                 */
                swap(curr->vruntime, se->vruntime);
-               resched_curr(rq);
+               resched_curr_lazy(rq);
        }
 
        se->vruntime -= cfs_rq->min_vruntime;
@@ -11150,7 +11237,7 @@ prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio)
         */
        if (task_current(rq, p)) {
                if (p->prio > oldprio)
-                       resched_curr(rq);
+                       resched_curr_lazy(rq);
        } else
                check_preempt_curr(rq, p, 0);
 }