#define THERMAL_REQUEST_KFIFO_SIZE (64 * sizeof(struct power_request))
#define DEFAULT_CPU_WEIGHT 1024
+#define MINIMUM_UPDATE_TIME 10000000 /* 10 ms */
static DEFINE_PER_CPU(struct cpu_power, cpu_power);
DEFINE_PER_CPU(struct update_sched_power *, update_cpu_power);
//////////////////////////////////////////////////////////////
-static bool should_update_next_weight(int time)
+static bool should_update_next_weight(u64 time, int flags)
{
- return 1;
+ if (flags & SCHED_POWER_FORCE_UPDATE_RT)
+ return 1;
+
+ if (time >= sched_clock() + MINIMUM_UPDATE_TIME)
+ return 1;
+
+ return 0;
}
static void sched_power_work(struct kthread_work *work)
raw_spin_lock(&cpower->update_lock);
w = cpower->weight;
req = cpower->req;
- cpower->req.time = 0;
+ cpower->req.time = sched_clock();
cpower->weight = req.weight;
raw_spin_unlock(&cpower->update_lock);
- if (should_update_next_weight(req.time)) {
- pr_info("cpower req poped\n");
- thermal_cpu_cdev_set_weight(req.cpu, req.weight);
- need_update = true;
- }
+ pr_info("cpower req poped\n");
+ thermal_cpu_cdev_set_weight(req.cpu, req.weight);
+ need_update = true;
}
if (need_update)
return;
/* Filter to frequent changes or not needed*/
- if (!should_update_next_weight(time))
+ if (!should_update_next_weight(time, flags))
return;
sp = cpower->sched_power;
cpower->req.weight = weight;
cpower->req.cpu = cpu;
cpower->req.time = time;
+ cpower->req.flags = flags;
raw_spin_unlock(&cpower->update_lock);
if (!sp->work_in_progress) {
}
#endif
+#define SCHED_POWER_FORCE_UPDATE_RT 0x01
#ifdef CONFIG_THERMAL
struct update_sched_power {
void (*func)(struct update_sched_power *, int, unsigned int, int, int);
int flags)
{
struct update_sched_power *update;
- int time = 0;
+ u64 time = sched_clock();
update = rcu_dereference_sched(*per_cpu_ptr(&update_cpu_power, cpu));