/* CPU running */
-static DEFINE_RATIONAL(cpu_running_coef);
+static DEFINE_RATIONAL(cpu0_running_coef); /* boot core uses distinct coeff */
+static DEFINE_RATIONAL(cpuN_running_coef);
-static u64 cpu_system(void)
+static u64 __energy_cpu(enum parameter_energy pe)
{
- u64 time = get_parameter_energy(PE_TIME_SYSTEM);
+ u64 times[NR_CPUS] = { 0 };
+ u64 val = 0;
+ int i;
+
+ if (get_parameter_energy(pe, times, sizeof(times)) == 0) {
+ val = div_u64(times[0] * cpu0_running_coef.num,
+ cpu0_running_coef.denom);
+ for (i = 1; i < NR_CPUS; i++)
+ val += div_u64(times[i] * cpuN_running_coef.num,
+ cpuN_running_coef.denom);
+ }
- return div_u64(time * cpu_running_coef.num, cpu_running_coef.denom);
+ return val;
}
-static u64 cpu_apps(void)
+static u64 cpu_system(void)
{
- u64 time = get_parameter_energy(PE_TIME_APPS);
+ return __energy_cpu(PE_TIME_SYSTEM);
+}
- return div_u64(time * cpu_running_coef.num, cpu_running_coef.denom);
+static u64 cpu_apps(void)
+{
+ return __energy_cpu(PE_TIME_APPS);
}
static u64 cpu_idle_system(void)
{
- u64 time = get_parameter_energy(PE_TIME_IDLE);
+ u64 time = 0;
+ get_parameter_energy(PE_TIME_IDLE, &time, sizeof(time));
return div_u64(time * cpu_idle_coef.num, cpu_idle_coef.denom);
}
static u64 fr_system(void)
{
- u64 byte = get_parameter_energy(PE_READ_SYSTEM);
+ u64 byte = 0;
+ get_parameter_energy(PE_READ_SYSTEM, &byte, sizeof(byte));
return div_u64(byte * fr_coef.num, fr_coef.denom);
}
static u64 fr_apps(void)
{
- u64 byte = get_parameter_energy(PE_READ_APPS);
+ u64 byte = 0;
+ get_parameter_energy(PE_READ_APPS, &byte, sizeof(byte));
return div_u64(byte * fr_coef.num, fr_coef.denom);
}
static u64 fw_system(void)
{
- u64 byte = get_parameter_energy(PE_WRITE_SYSTEM);
+ u64 byte = 0;
+ get_parameter_energy(PE_WRITE_SYSTEM, &byte, sizeof(byte));
return div_u64(byte * fw_coef.num, fw_coef.denom);
}
static u64 fw_apps(void)
{
- u64 byte = get_parameter_energy(PE_WRITE_APPS);
+ u64 byte = 0;
+ get_parameter_energy(PE_WRITE_APPS, &byte, sizeof(byte));
return div_u64(byte * fw_coef.num, fw_coef.denom);
}
struct param_data parameters[] = {
{
.name = "cpu_running",
- .coef = &cpu_running_coef,
+ .coef = &cpu0_running_coef,
+ .system = cpu_system,
+ .apps = cpu_apps
+ },
+ {
+ .name = "cpuN_running",
+ .coef = &cpuN_running_coef,
.system = cpu_system,
.apps = cpu_apps
},
#include <linux/spinlock.h>
#include <linux/magic.h>
#include <linux/slab.h>
+#include <linux/spinlock.h>
#include <kprobe/dbi_kprobes.h>
#include <ksyms/ksyms.h>
#include <us_manager/sspt/sspt_proc.h>
* ============================================================================
*/
struct cpus_time {
+ spinlock_t lock; /* for concurrent access */
struct tm_stat tm[NR_CPUS];
};
+#define cpus_time_lock(ct, flags) spin_lock_irqsave(&(ct)->lock, flags)
+#define cpus_time_unlock(ct, flags) spin_unlock_irqrestore(&(ct)->lock, flags)
+
static void cpus_time_init(struct cpus_time *ct, u64 time)
{
int cpu;
+ spin_lock_init(&ct->lock);
+
for (cpu = 0; cpu < NR_CPUS; ++cpu) {
tm_stat_init(&ct->tm[cpu]);
tm_stat_set_timestamp(&ct->tm[cpu], time);
}
}
-static u64 cpus_time_get_running_all(struct cpus_time *ct)
+static inline u64 cpu_time_get_running(struct cpus_time *ct, int cpu, u64 now)
+{
+ return tm_stat_current_running(&ct->tm[cpu], now);
+}
+
+static void *cpus_time_get_running_all(struct cpus_time *ct, u64 *buf, u64 now)
+{
+ int cpu;
+
+ for (cpu = 0; cpu < NR_CPUS; ++cpu)
+ buf[cpu] = tm_stat_current_running(&ct->tm[cpu], now);
+
+ return buf;
+}
+
+static void *cpus_time_sum_running_all(struct cpus_time *ct, u64 *buf, u64 now)
{
- u64 time = 0;
int cpu;
for (cpu = 0; cpu < NR_CPUS; ++cpu)
- time += tm_stat_running(&ct->tm[cpu]);
+ buf[cpu] += tm_stat_current_running(&ct->tm[cpu], now);
- return time;
+ return buf;
}
static void cpus_time_save_entry(struct cpus_time *ct, int cpu, u64 time)
{
+ struct tm_stat *tm = &ct->tm[cpu];
+
+ if (unlikely(tm_stat_timestamp(tm))) /* should never happen */
+ printk("XXX %s[%d/%d]: WARNING tmstamp(%p) set on cpu(%d)\n",
+ current->comm, current->tgid, current->pid, tm, cpu);
tm_stat_set_timestamp(&ct->tm[cpu], time);
}
-static void cpus_time_update_running(struct cpus_time *ct, int cpu, u64 time)
+static void cpus_time_update_running(struct cpus_time *ct, int cpu, u64 now,
+ u64 start_time)
{
- tm_stat_update(&ct->tm[cpu], time);
+ struct tm_stat *tm = &ct->tm[cpu];
+
+ if (unlikely(tm_stat_timestamp(tm) == 0)) {
+ /* not initialized. should happen only once per cpu/task */
+ printk("XXX %s[%d/%d]: nnitializing tmstamp(%p) on cpu(%d)\n",
+ current->comm, current->tgid, current->pid, tm, cpu);
+ tm_stat_set_timestamp(tm, start_time);
+ }
+
+ tm_stat_update(tm, now);
+ tm_stat_set_timestamp(tm, 0); /* set timestamp to 0 */
}
static void init_ed(struct energy_data *ed)
{
- cpus_time_init(&ed->ct, get_ntime());
+ /* instead of get_ntime(), CPU time is initialized to 0 here. Timestamp
+ * value will be properly set when the corresponding __switch_to event
+ * occurs */
+ cpus_time_init(&ed->ct, 0);
atomic64_set(&ed->bytes_read, 0);
atomic64_set(&ed->bytes_written, 0);
}
static struct cpus_time ct_idle;
static struct energy_data ed_system;
+static u64 start_time;
static void init_data_energy(void)
{
+ start_time = get_ntime();
init_ed(&ed_system);
- cpus_time_init(&ct_idle, get_ntime());
+ cpus_time_init(&ct_idle, 0);
}
static void uninit_data_energy(void)
{
+ start_time = 0;
uninit_ed(&ed_system);
cpus_time_init(&ct_idle, 0);
}
static int entry_handler_switch(struct kretprobe_instance *ri, struct pt_regs *regs)
{
int cpu;
- u64 time;
- struct cpus_time* ct;
+ struct cpus_time *ct;
struct energy_data *ed;
+ unsigned long flags;
cpu = smp_processor_id();
- time = get_ntime();
- ct = current->tgid ? &ed_system.ct : &ct_idle;
- cpus_time_update_running(ct, cpu, time);
+
+ ct = current->tgid ? &ed_system.ct: &ct_idle;
+ cpus_time_lock(ct, flags);
+ cpus_time_update_running(ct, cpu, get_ntime(), start_time);
+ cpus_time_unlock(ct, flags);
ed = get_energy_data(current);
- if (ed)
- cpus_time_update_running(&ed->ct, cpu, time);
+ if (ed) {
+ ct = &ed->ct;
+ cpus_time_lock(ct, flags);
+ cpus_time_update_running(ct, cpu, get_ntime(), start_time);
+ cpus_time_unlock(ct, flags);
+ }
return 0;
}
static int ret_handler_switch(struct kretprobe_instance *ri, struct pt_regs *regs)
{
int cpu;
- u64 time;
- struct cpus_time* ct;
+ struct cpus_time *ct;
struct energy_data *ed;
+ unsigned long flags;
cpu = smp_processor_id();
- time = get_ntime();
- ct = current->tgid ? &ed_system.ct : &ct_idle;
- cpus_time_save_entry(ct, cpu, time);
+
+ ct = current->tgid ? &ed_system.ct: &ct_idle;
+ cpus_time_lock(ct, flags);
+ cpus_time_save_entry(ct, cpu, get_ntime());
+ cpus_time_unlock(ct, flags);
ed = get_energy_data(current);
- if (ed)
- cpus_time_save_entry(&ed->ct, cpu, time);
+ if (ed) {
+ ct = &ed->ct;
+ cpus_time_lock(ct, flags);
+ cpus_time_save_entry(ct, cpu, get_ntime());
+ cpus_time_unlock(ct, flags);
+ }
return 0;
}
struct cmd_pt {
enum parameter_type pt;
- u64 val;
+ void *buf;
+ int sz;
};
static void callback_for_proc(struct sspt_proc *proc, void *data)
struct energy_data *ed = (struct energy_data *)f_data;
if (ed) {
+ unsigned long flags;
struct cmd_pt *cmdp = (struct cmd_pt *)data;
+ u64 *val = cmdp->buf;
switch (cmdp->pt) {
case PT_CPU:
- cmdp->val += cpus_time_get_running_all(&ed->ct);
+ cpus_time_lock(&ed->ct, flags);
+ cpus_time_sum_running_all(&ed->ct, val, get_ntime());
+ cpus_time_unlock(&ed->ct, flags);
break;
case PT_READ:
- cmdp->val += atomic64_read(&ed->bytes_read);
+ *val += atomic64_read(&ed->bytes_read);
break;
case PT_WRITE:
- cmdp->val += atomic64_read(&ed->bytes_written);
+ *val += atomic64_read(&ed->bytes_written);
break;
default:
break;
}
}
-static u64 current_parameter_apps(enum parameter_type pt)
+static int current_parameter_apps(enum parameter_type pt, void *buf, int sz)
{
struct cmd_pt cmdp;
cmdp.pt = pt;
- cmdp.val = 0;
+ cmdp.buf = buf;
+ cmdp.sz = sz;
on_each_proc(callback_for_proc, (void *)&cmdp);
- return cmdp.val;
+ return 0;
}
-u64 get_parameter_energy(enum parameter_energy pe)
+int get_parameter_energy(enum parameter_energy pe, void *buf, size_t sz)
{
- u64 val = 0;
+ unsigned long flags;
+ u64 *val = buf; /* currently all parameters are u64 vals */
+ int ret = 0;
switch (pe) {
case PE_TIME_IDLE:
- val = cpus_time_get_running_all(&ct_idle);
+ cpus_time_lock(&ct_idle, flags);
+ /* for the moment we consider only CPU[0] idle time */
+ *val = cpu_time_get_running(&ct_idle, 0, get_ntime());
+ cpus_time_unlock(&ct_idle, flags);
break;
case PE_TIME_SYSTEM:
- val = cpus_time_get_running_all(&ed_system.ct);
+ cpus_time_lock(&ed_system.ct, flags);
+ cpus_time_get_running_all(&ed_system.ct, val, get_ntime());
+ cpus_time_unlock(&ed_system.ct, flags);
break;
case PE_TIME_APPS:
- val = current_parameter_apps(PT_CPU);
+ current_parameter_apps(PT_CPU, buf, sz);
break;
case PE_READ_SYSTEM:
- val = atomic64_read(&ed_system.bytes_read);
+ *val = atomic64_read(&ed_system.bytes_read);
break;
case PE_WRITE_SYSTEM:
- val = atomic64_read(&ed_system.bytes_written);
+ *val = atomic64_read(&ed_system.bytes_written);
break;
case PE_READ_APPS:
- val = current_parameter_apps(PT_READ);
+ current_parameter_apps(PT_READ, buf, sz);
break;
case PE_WRITE_APPS:
- val = current_parameter_apps(PT_WRITE);
+ current_parameter_apps(PT_WRITE, buf, sz);
break;
default:
+ ret = -EINVAL;
break;
}
- return val;
+ return ret;
}
int do_set_energy(void)