struct kvm *kvm;
#ifdef CONFIG_PREEMPT_NOTIFIERS
struct preempt_notifier preempt_notifier;
+#endif
+#ifdef CONFIG_IPIPE
+ struct ipipe_vm_notifier ipipe_notifier;
#endif
int cpu;
int vcpu_id;
#include <trace/events/sched.h>
#include <linux/hw_breakpoint.h>
#include <linux/oom.h>
+#include <linux/ipipe.h>
#include <linux/writeback.h>
#include <linux/shm.h>
#include <linux/kcov.h>
*/
raw_spin_lock_irq(&tsk->pi_lock);
raw_spin_unlock_irq(&tsk->pi_lock);
+ __ipipe_report_exit(tsk);
if (unlikely(in_atomic())) {
pr_info("note: %s[%d] exited with preempt_count %d\n",
#include <linux/futex.h>
#include <linux/compat.h>
#include <linux/kthread.h>
+#include <linux/ipipe.h>
#include <linux/task_io_accounting_ops.h>
#include <linux/rcupdate.h>
#include <linux/ptrace.h>
#endif
setup_thread_stack(tsk, orig);
+ __ipipe_init_threadflags(task_thread_info(tsk));
+ __ipipe_init_threadinfo(&task_thread_info(tsk)->ipipe_data);
clear_user_return_notifier(tsk);
clear_tsk_need_resched(tsk);
set_task_stack_end_magic(tsk);
exit_aio(mm);
ksm_exit(mm);
khugepaged_exit(mm); /* must run before exit_mmap */
+ __ipipe_report_cleanup(mm);
exit_mmap(mm);
mm_put_huge_zero_page(mm);
set_mm_exe_file(mm, NULL);
proc_fork_connector(p);
cgroup_post_fork(p);
cgroup_threadgroup_change_end(current);
+ __ipipe_init_taskinfo(p);
perf_event_fork(p);
trace_task_newtask(p, clone_flags);
}
/* Can the task run on the task's current CPU? If so, we're done */
- if (cpumask_test_cpu(task_cpu(p), new_mask))
+ if (cpumask_test_cpu(task_cpu(p), new_mask)) {
+ __ipipe_report_setaffinity(p, task_cpu(p));
goto out;
+ }
dest_cpu = cpumask_any_and(cpu_valid_mask, new_mask);
+ __ipipe_report_setaffinity(p, dest_cpu);
if (task_running(rq, p) || p->state == TASK_WAKING) {
struct migration_arg arg = { p, dest_cpu };
/* Need help from migration thread: drop lock and wait. */
#include <linux/tracehook.h>
#include <linux/capability.h>
#include <linux/freezer.h>
+#include <linux/ipipe.h>
#include <linux/pid_namespace.h>
#include <linux/nsproxy.h>
#include <linux/user_namespace.h>
void signal_wake_up_state(struct task_struct *t, unsigned int state)
{
set_tsk_thread_flag(t, TIF_SIGPENDING);
+
+ /* TIF_SIGPENDING must be prior to reporting. */
+ __ipipe_report_sigwake(t);
+
/*
* TASK_WAKEKILL also means wake it up in the stopped/traced/killable
* case. We don't check t->state here because there is a race with it
return 0;
if (sig == SIGKILL)
return 1;
- if (task_is_stopped_or_traced(p))
+ if (task_is_stopped_or_traced(p)) {
+ if (!signal_pending(p))
+ __ipipe_report_sigwake(p);
return 0;
+ }
return task_curr(p) || !signal_pending(p);
}