sched/power: update thermal subsystem after CPUs reweight
authorLukasz Luba <l.luba@partner.samsung.com>
Thu, 25 Oct 2018 17:17:57 +0000 (19:17 +0200)
committerLukasz Luba <l.luba@partner.samsung.com>
Fri, 17 May 2019 07:15:35 +0000 (09:15 +0200)
The patch....

Signed-off-by: Lukasz Luba <l.luba@partner.samsung.com>
kernel/sched/power.c

index c2fc0811bf3758a5e5cb081d24f6ecbf2ed3421a..0dcb4579b474d8cba8db8afa327e2f8b078fb029 100644 (file)
@@ -131,20 +131,28 @@ static void sched_power_work(struct kthread_work *work)
        int i;
        struct cpu_power *cpower = NULL;
        struct power_request req;
+       unsigned int w;
+       bool need_update = false;
 
        for_each_online_cpu(i) {
                cpower = (&per_cpu(cpu_power, i));
                raw_spin_lock(&cpower->update_lock);
+               w = cpower->weight;
                req = cpower->req;
                cpower->req.time = 0;
+               cpower->weight = req.weight;
                raw_spin_unlock(&cpower->update_lock);
 
                if (should_update_next_weight(req.time)) {
                        pr_info("cpower req poped\n");
                        thermal_cpu_cdev_set_weight(req.cpu, req.weight);
+                       need_update = true;
                }
        }
 
+       if (need_update)
+               thermal_all_zones_recalc_power();
+
        sp->work_in_progress = false;
 }
 
@@ -167,12 +175,12 @@ static void sched_power_update(struct update_sched_power *update, int cpu,
        if (!cpower->operating)
                return;
 
-       sp = cpower->sched_power;
-
-       /* Filter to frequent changes */
+       /* Filter to frequent changes or not needed*/
        if (!should_update_next_weight(time))
                return;
 
+       sp = cpower->sched_power;
+
        raw_spin_lock(&cpower->update_lock);
        cpower->req.weight = weight;
        cpower->req.cpu = cpu;