__entry->comm, __entry->pid,
__entry->ratio)
);
+
+/*
+ * Tracepoint for HMP (CONFIG_SCHED_HMP) task migrations.
+ */
+TRACE_EVENT(sched_hmp_migrate,
+
+ TP_PROTO(struct task_struct *tsk, int dest, int force),
+
+ TP_ARGS(tsk, dest, force),
+
+ TP_STRUCT__entry(
+ __array(char, comm, TASK_COMM_LEN)
+ __field(pid_t, pid)
+ __field(int, dest)
+ __field(int, force)
+ ),
+
+ TP_fast_assign(
+ memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
+ __entry->pid = tsk->pid;
+ __entry->dest = dest;
+ __entry->force = force;
+ ),
+
+ TP_printk("comm=%s pid=%d dest=%d force=%d",
+ __entry->comm, __entry->pid,
+ __entry->dest, __entry->force)
+);
#endif /* _TRACE_SCHED_H */
/* This part must be outside protection */
rcu_read_unlock();
#ifdef CONFIG_SCHED_HMP
- if (hmp_up_migration(prev_cpu, &p->se))
- return hmp_select_faster_cpu(p, prev_cpu);
- if (hmp_down_migration(prev_cpu, &p->se))
- return hmp_select_slower_cpu(p, prev_cpu);
+ if (hmp_up_migration(prev_cpu, &p->se)) {
+ new_cpu = hmp_select_faster_cpu(p, prev_cpu);
+ trace_sched_hmp_migrate(p, new_cpu, 0);
+ return new_cpu;
+ }
+ if (hmp_down_migration(prev_cpu, &p->se)) {
+ new_cpu = hmp_select_slower_cpu(p, prev_cpu);
+ trace_sched_hmp_migrate(p, new_cpu, 0);
+ return new_cpu;
+ }
/* Make sure that the task stays in its previous hmp domain */
if (!cpumask_test_cpu(new_cpu, &hmp_cpu_domain(prev_cpu)->cpus))
return prev_cpu;
target->push_cpu = hmp_select_faster_cpu(p, cpu);
target->migrate_task = p;
force = 1;
+ trace_sched_hmp_migrate(p, target->push_cpu, 1);
}
}
raw_spin_unlock_irqrestore(&target->lock, flags);