--- /dev/null
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Scheduler CPU power
+ *
+ * Copyright (C) 2018 Samsung
+ */
+
+
+#include <linux/sched.h>
+#include <linux/thermal.h>
+
+#include "power.h"
+
+#define THERMAL_REQUEST_KFIFO_SIZE (64 * sizeof(struct power_request))
+#define DEFAULT_CPU_WEIGHT 1024
+
+static DEFINE_PER_CPU(struct cpu_power, cpu_power);
+DEFINE_PER_CPU(struct update_sched_power *, update_cpu_power);
+
+static struct sched_power sched_power;
+
+void sched_power_set_update_func(int cpu, struct update_sched_power *update,
+ void (*fn)(struct update_sched_power *, int, unsigned int, int,
+ int))
+{
+
+ if (WARN_ON(!update || !fn))
+ return;
+
+ if (WARN_ON(per_cpu(update_cpu_power, cpu)))
+ return;
+
+ update->func = fn;
+ rcu_assign_pointer(per_cpu(update_cpu_power, cpu), update);
+}
+
+void sched_power_clean_update_func(int cpu)
+{
+ rcu_assign_pointer(per_cpu(update_cpu_power, cpu), NULL);
+}
+
+
+/////////////////////////////////////////////////////////////////////////
+
+
+unsigned int cpu_power_calc_group_weight(int cpu)
+{
+ cpumask_t *span_cpus = NULL;
+ struct cpu_power *power;
+ unsigned int w = 0;
+ int i;
+ int num_cpus;
+
+
+ num_cpus = cpumask_weight(span_cpus);
+
+ for_each_cpu(i, span_cpus) {
+ power = (&per_cpu(cpu_power, i));
+ w += power->weight;
+ }
+
+ if (num_cpus)
+ w /= num_cpus;
+
+ return w;
+}
+
+int get_state_for_power(int cpu, unsigned long power)
+{
+ /* unsigned long gr_load; */
+
+
+ return 0;
+}
+
+int cpu_power_calc_group_capacity(unsigned long gr_power, unsigned gr_weight,
+ int cpu)
+{
+ cpumask_t *span_cpus = NULL;
+ int num_cpus;
+ struct cpu_power *power;
+ unsigned long p;
+ int i, state;
+ /* int size = 0; */
+ unsigned long max_power = 0;
+
+ num_cpus = cpumask_weight(span_cpus);
+
+ for_each_cpu(i, span_cpus) {
+ power = (&per_cpu(cpu_power, i));
+ p = gr_power * (power->weight << 10) / gr_weight;
+ p >>= 10;
+
+ if (max_power < p)
+ max_power = p;
+ }
+
+
+ state = get_state_for_power(cpu, max_power);
+
+
+ return 0;
+}
+
+int sched_power_cpu_reinit_weight(int cpu, int weight)
+{
+ struct cpu_power *cpower = &per_cpu(cpu_power, cpu);
+
+ if (!cpower->operating)
+ return -EAGAIN;
+
+ raw_spin_lock(&cpower->update_lock);
+ cpower->weight = weight;
+ raw_spin_unlock(&cpower->update_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(sched_power_cpu_reinit_weight);
+
+//////////////////////////////////////////////////////////////
+
+
+static bool should_update_next_weight(int time)
+{
+ return 1;
+}
+
+static void sched_power_work(struct kthread_work *work)
+{
+ struct sched_power *sp = container_of(work, struct sched_power, work);
+ int i;
+ struct cpu_power *cpower = NULL;
+ struct power_request req;
+
+ for_each_online_cpu(i) {
+ cpower = (&per_cpu(cpu_power, i));
+ raw_spin_lock(&cpower->update_lock);
+ req = cpower->req;
+ cpower->req.time = 0;
+ raw_spin_unlock(&cpower->update_lock);
+
+ if (should_update_next_weight(req.time)) {
+ pr_info("cpower req poped\n");
+ thermal_cpu_cdev_set_weight(req.cpu, req.weight);
+ }
+ }
+
+ sp->work_in_progress = false;
+}
+
+static void sched_power_irq_work(struct irq_work *irq_work)
+{
+ struct sched_power *power;
+
+ power = container_of(irq_work, struct sched_power, irq_work);
+
+ kthread_queue_work(&power->worker, &power->work);
+}
+
+static void sched_power_update(struct update_sched_power *update, int cpu,
+ unsigned int weight, int flags, int time)
+{
+ struct cpu_power *cpower = container_of(update, struct cpu_power,
+ update_power);
+ struct sched_power *sp;
+
+ if (!cpower->operating)
+ return;
+
+ sp = cpower->sched_power;
+
+ /* Filter to frequent changes */
+ if (!should_update_next_weight(time))
+ return;
+
+ raw_spin_lock(&cpower->update_lock);
+ cpower->req.weight = weight;
+ cpower->req.cpu = cpu;
+ cpower->req.time = time;
+ raw_spin_unlock(&cpower->update_lock);
+
+ if (!sp->work_in_progress) {
+ sp->work_in_progress = true;
+ irq_work_queue(&sp->irq_work);
+ }
+}
+
+
+static int sched_power_create_thread(struct sched_power *power)
+{
+ int ret;
+ struct task_struct *thread;
+ struct sched_attr attr = {
+ .sched_policy = SCHED_DEADLINE,
+ .sched_nice = 0,
+ .sched_priority = 0,
+ .sched_flags = 0,
+ .sched_runtime = 1000000,
+ .sched_deadline = 10000000,
+ .sched_period = 10000000,
+ };
+
+ kthread_init_work(&power->work, sched_power_work);
+ kthread_init_worker(&power->worker);
+ thread = kthread_create(kthread_worker_fn, &power->worker,
+ "sched_power/a");
+
+ if (IS_ERR(thread)) {
+ pr_err("failed to create sched_power thread %ld\n",
+ PTR_ERR(thread));
+ return PTR_ERR(thread);
+ }
+
+ ret = sched_setattr_nocheck(thread, &attr);
+ if (ret) {
+ kthread_stop(thread);
+ pr_warn("failed to set SCHED_DEADLINE for sched_power %d\n",
+ ret);
+ return ret;
+ }
+
+ power->thread = thread;
+ mutex_init(&power->work_lock);
+ init_irq_work(&power->irq_work, sched_power_irq_work);
+ wake_up_process(thread);
+
+ return 0;
+}
+
+static void sched_power_disable_thread(struct sched_power *sp)
+{
+ kthread_flush_worker(&sp->worker);
+ kthread_stop(sp->thread);
+ mutex_destroy(&sp->work_lock);
+}
+
+static int sched_power_setup(struct sched_power *sp)
+{
+ int i;
+ struct cpu_power *cpower;
+
+ for_each_possible_cpu(i) {
+ cpower = (&per_cpu(cpu_power, i));
+ cpower->weight = DEFAULT_CPU_WEIGHT;
+ cpower->sched_power = sp;
+ sched_power_set_update_func(i, &cpower->update_power,
+ sched_power_update);
+ raw_spin_lock_init(&cpower->update_lock);
+ cpower->operating = true;
+ }
+
+ return 0;
+}
+
+
+static int __init sched_power_init(void)
+{
+ int ret = 0;
+
+ ret = sched_power_create_thread(&sched_power);
+ if (ret)
+ return ret;
+
+ sched_power_setup(&sched_power);
+
+ return ret;
+}
+fs_initcall(sched_power_init);