sched/power: change update filter function
authorLukasz Luba <l.luba@partner.samsung.com>
Mon, 29 Oct 2018 14:37:12 +0000 (15:37 +0100)
committerLukasz Luba <l.luba@partner.samsung.com>
Fri, 17 May 2019 07:15:39 +0000 (09:15 +0200)
Add basic filtering mechanism of filtering to frequent requests.
On the other hand create a by-pass for RT od deadline bandwith
requests which must be served.

Signed-off-by: Lukasz Luba <l.luba@partner.samsung.com>
kernel/sched/power.c
kernel/sched/power.h
kernel/sched/sched.h

index 0dcb4579b474d8cba8db8afa327e2f8b078fb029..28f3b6c8c0a34e4360205afe6c2a236c793e643c 100644 (file)
@@ -13,6 +13,7 @@
 
 #define THERMAL_REQUEST_KFIFO_SIZE     (64 * sizeof(struct power_request))
 #define DEFAULT_CPU_WEIGHT 1024
+#define MINIMUM_UPDATE_TIME 10000000 /* 10 ms */
 
 static DEFINE_PER_CPU(struct cpu_power, cpu_power);
 DEFINE_PER_CPU(struct update_sched_power *, update_cpu_power);
@@ -120,9 +121,15 @@ EXPORT_SYMBOL_GPL(sched_power_cpu_reinit_weight);
 //////////////////////////////////////////////////////////////
 
 
-static bool should_update_next_weight(int time)
+static bool should_update_next_weight(u64 time, int flags)
 {
-       return 1;
+       if (flags & SCHED_POWER_FORCE_UPDATE_RT)
+               return 1;
+
+       if (time >= sched_clock() + MINIMUM_UPDATE_TIME)
+               return 1;
+
+       return 0;
 }
 
 static void sched_power_work(struct kthread_work *work)
@@ -139,15 +146,13 @@ static void sched_power_work(struct kthread_work *work)
                raw_spin_lock(&cpower->update_lock);
                w = cpower->weight;
                req = cpower->req;
-               cpower->req.time = 0;
+               cpower->req.time = sched_clock();
                cpower->weight = req.weight;
                raw_spin_unlock(&cpower->update_lock);
 
-               if (should_update_next_weight(req.time)) {
-                       pr_info("cpower req poped\n");
-                       thermal_cpu_cdev_set_weight(req.cpu, req.weight);
-                       need_update = true;
-               }
+               pr_info("cpower req poped\n");
+               thermal_cpu_cdev_set_weight(req.cpu, req.weight);
+               need_update = true;
        }
 
        if (need_update)
@@ -176,7 +181,7 @@ static void sched_power_update(struct update_sched_power *update, int cpu,
                return;
 
        /* Filter to frequent changes or not needed*/
-       if (!should_update_next_weight(time))
+       if (!should_update_next_weight(time, flags))
                return;
 
        sp = cpower->sched_power;
@@ -185,6 +190,7 @@ static void sched_power_update(struct update_sched_power *update, int cpu,
        cpower->req.weight = weight;
        cpower->req.cpu = cpu;
        cpower->req.time = time;
+       cpower->req.flags = flags;
        raw_spin_unlock(&cpower->update_lock);
 
        if (!sp->work_in_progress) {
index f08277efd50d5e2f5baceaf921dc67c06d4e3e05..1992e637d53f17f5b8970121593a16c7b249fbec 100644 (file)
@@ -32,7 +32,8 @@ struct sched_power {
 struct power_request {
        unsigned int weight;
        int cpu;
-       int time;
+       u64 time;
+       int flags
 };
 
 struct cpu_power {
index c1714ef7366982c1c442913fca7769fba38a9d1c..7c8dea6df31aa120d17a592cc9c549f7564934c3 100644 (file)
@@ -2245,6 +2245,7 @@ unsigned long scale_irq_capacity(unsigned long util, unsigned long irq, unsigned
 }
 #endif
 
+#define SCHED_POWER_FORCE_UPDATE_RT 0x01
 #ifdef CONFIG_THERMAL
 struct update_sched_power {
        void (*func)(struct update_sched_power *, int, unsigned int, int, int);
@@ -2255,7 +2256,7 @@ static inline void sched_power_change_cpu_weight(int cpu, unsigned long weight,
                                                 int flags)
 {
        struct update_sched_power *update;
-       int time = 0;
+       u64 time = sched_clock();
 
 
        update = rcu_dereference_sched(*per_cpu_ptr(&update_cpu_power, cpu));