#include <linux/sched.h>
#include <linux/thermal.h>
+#include <linux/idle_inject.h>
#include "power.h"
#define THERMAL_REQUEST_KFIFO_SIZE (64 * sizeof(struct power_request))
#define DEFAULT_CPU_WEIGHT 1024
#define MINIMUM_UPDATE_TIME 10000000 /* 10 ms */
+#define MAX_CAPACITY_REQEST_PERIOD 50 /* ms */
static DEFINE_PER_CPU(struct cpu_power, cpu_power);
DEFINE_PER_CPU(struct update_sched_power *, update_cpu_power);
}
EXPORT_SYMBOL_GPL(sched_power_cpu_reinit_weight);
+static int vidle_setup(int cpu, int rate, int period)
+{
+
+ return 0;
+}
+
+
//////////////////////////////////////////////////////////////
return 0;
}
+static int play_idle_setup;
+
+static void sched_power_idle_stop(struct cpu_power *cpower)
+{
+
+ raw_spin_lock(&cpower->update_lock);
+ cpower->vidle = 0;
+ cpower->vrun = 0;
+ raw_spin_unlock(&cpower->update_lock);
+
+ idle_inject_stop(cpower->ii_dev);
+ idle_inject_set_duration(cpower->ii_dev, 0, 0);
+}
+
+static int sched_power_idle_play(struct cpu_power *cpower, unsigned int period,
+ unsigned int idle)
+{
+ unsigned int run;
+
+ if (period <= idle)
+ return -EINVAL;
+
+ raw_spin_lock(&cpower->update_lock);
+ cpower->vidle = idle;
+ cpower->vrun = period - idle;
+ raw_spin_unlock(&cpower->update_lock);
+
+ idle_inject_set_duration(cpower->ii_dev, cpower->vrun, cpower->vidle);
+ idle_inject_start(cpower->ii_dev);
+}
+
+
+
+static u64 cluster_power_budget(struct cpumask *cpus)
+{
+
+
+ return 100;
+}
+
+static int
+sched_power_reweight_cluster(int cpu, struct cpumask *cpus, unsigned int capacity,
+ unsigned int period, int flags)
+{
+ int ret, i;
+ struct cpu_power *cpower = NULL;
+ int opp_curr_state, opp_curr_cost;
+ int opp_next_state, opp_next_cost;
+ u64 cluster_udget;
+ u64 total_weight = 0;
+
+ /* opp_next_state = get_opp_for_capacity(cpu, capacity); */
+ /* opp_next_cost = get_opp_cost(cpu, opp_next_state); */
+ /* */
+ /* cluster_budget = cluster_power_budget(cpus); */
+ /* */
+ /* for_each_cpu(i, cpus) { */
+ /* cpower = (&per_cpu(cpu_power, i)); */
+ /* raw_spin_lock(&cpower->update_lock); */
+ /* total_weight += cpower->weight; */
+ /* raw_spin_unlock(&cpower->update_lock); */
+ /* } */
+ /* */
+ /* for_each_cpu(i, cpus) { */
+ /* cpower = (&per_cpu(cpu_power, i)); */
+ /* raw_spin_lock(&cpower->update_lock); */
+ /* budget = cluster_budget * cpower->weight << 10; */
+ /* raw_spin_unlock(&cpower->update_lock); */
+ /* budget /= total_weight; */
+ /* budget >>= 10; */
+ /* } */
+
+ return 0;
+}
+
+static int sched_power_cpu_capacity_request(int cpu, unsigned int capacity,
+ unsigned int period, int flags)
+{
+ int ret;
+ struct cpu_power *cpower;
+
+ if (period > MAX_CAPACITY_REQEST_PERIOD)
+ return -EINVAL;
+
+ cpower = (&per_cpu(cpu_power, cpu));
+
+ //for cluster OR system wise
+ ret = sched_power_reweight_cluster(cpu, cpower->cluster_mask, capacity,
+ period, flags);
+
+ return ret;
+}
+
static void sched_power_work(struct kthread_work *work)
{
struct sched_power *sp = container_of(work, struct sched_power, work);
thermal_all_zones_recalc_power();
sp->work_in_progress = false;
+
}
static void sched_power_irq_work(struct irq_work *irq_work)
sp->work_in_progress = true;
irq_work_queue(&sp->irq_work);
}
+
+ if (!play_idle_setup && cpu == 4) {
+ play_idle_setup = 1;
+ idle_inject_set_duration(cpower->ii_dev, 10, 4);
+ idle_inject_start(cpower->ii_dev);
+ }
}
return 0;
}
+static int sched_power_idle_init(struct sched_power *sp)
+{
+ struct idle_inject_device *ii_dev;
+ struct cpumask *cpus;
+ int i, last_cpu;
+ struct cpu_power *cpower;
+
+
+ cpus = kzalloc(cpumask_size(), GFP_KERNEL);
+ if (!cpus)
+ return -ENOMEM;
+
+ for_each_possible_cpu(i) {
+ cpumask_set_cpu(i, cpus);
+
+ ii_dev = idle_inject_register(cpus);
+ if (IS_ERR_OR_NULL(ii_dev)) {
+ last_cpu;
+ goto cleanup;
+ }
+
+ cpower = (&per_cpu(cpu_power, i));
+ raw_spin_lock(&cpower->update_lock);
+ cpower->ii_dev = ii_dev;
+ raw_spin_unlock(&cpower->update_lock);
+
+ cpumask_clear_cpu(i, cpus);
+ }
+
+ kfree(cpus);
+ return 0;
+
+cleanup:
+ kfree(cpus);
+
+ for_each_possible_cpu(i) {
+ if (i == last_cpu)
+ break;
+
+ cpower = (&per_cpu(cpu_power, i));
+ raw_spin_lock(&cpower->update_lock);
+ idle_inject_unregister(cpower->ii_dev);
+ raw_spin_unlock(&cpower->update_lock);
+ }
+
+ return -ENODEV;
+}
+
+static void sched_power_idle_unregister(struct sched_power *sp)
+{
+ struct idle_inject_device *ii_dev;
+ int i, last_cpu;
+ struct cpu_power *cpower;
+
+ for_each_possible_cpu(i) {
+ cpower = (&per_cpu(cpu_power, i));
+ raw_spin_lock(&cpower->update_lock);
+ idle_inject_unregister(cpower->ii_dev);
+ raw_spin_unlock(&cpower->update_lock);
+ }
+
+}
static int __init sched_power_init(void)
{
sched_power_setup(&sched_power);
+ ret = sched_power_idle_init(&sched_power);
+ if (ret)
+ sched_power_disable_thread(&sched_power);
+
return ret;
}
fs_initcall(sched_power_init);