kernel: ipipe: add domain event notifications
authorPhilippe Gerum <rpm@xenomai.org>
Sun, 3 Dec 2017 11:00:22 +0000 (12:00 +0100)
committerMarek Szyprowski <m.szyprowski@samsung.com>
Fri, 27 Apr 2018 09:21:34 +0000 (11:21 +0200)
include/linux/kvm_host.h
kernel/exit.c
kernel/fork.c
kernel/sched/core.c
kernel/signal.c

index 5a8019befafdcbe1a2ff23b3b99c270e10cdb884..5efcef6234c9bfa6180cc72857f86c044d14d6b6 100644 (file)
@@ -218,6 +218,9 @@ struct kvm_vcpu {
        struct kvm *kvm;
 #ifdef CONFIG_PREEMPT_NOTIFIERS
        struct preempt_notifier preempt_notifier;
+#endif
+#ifdef CONFIG_IPIPE
+       struct ipipe_vm_notifier ipipe_notifier;
 #endif
        int cpu;
        int vcpu_id;
index e3a08761eb4074216ecd29e98a2973023c62db1b..3ca20c9e4385c217690c590e27f30bbeb2c2952e 100644 (file)
@@ -56,6 +56,7 @@
 #include <trace/events/sched.h>
 #include <linux/hw_breakpoint.h>
 #include <linux/oom.h>
+#include <linux/ipipe.h>
 #include <linux/writeback.h>
 #include <linux/shm.h>
 #include <linux/kcov.h>
@@ -820,6 +821,7 @@ void __noreturn do_exit(long code)
         */
        raw_spin_lock_irq(&tsk->pi_lock);
        raw_spin_unlock_irq(&tsk->pi_lock);
+       __ipipe_report_exit(tsk);
 
        if (unlikely(in_atomic())) {
                pr_info("note: %s[%d] exited with preempt_count %d\n",
index 500ce64517d93e68ebfa856d244c51f148faa7ba..59dd6b8df0b60c62a486d1bbd5cc917a21d83e57 100644 (file)
@@ -54,6 +54,7 @@
 #include <linux/futex.h>
 #include <linux/compat.h>
 #include <linux/kthread.h>
+#include <linux/ipipe.h>
 #include <linux/task_io_accounting_ops.h>
 #include <linux/rcupdate.h>
 #include <linux/ptrace.h>
@@ -557,6 +558,8 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node)
 #endif
 
        setup_thread_stack(tsk, orig);
+       __ipipe_init_threadflags(task_thread_info(tsk));
+       __ipipe_init_threadinfo(&task_thread_info(tsk)->ipipe_data);
        clear_user_return_notifier(tsk);
        clear_tsk_need_resched(tsk);
        set_task_stack_end_magic(tsk);
@@ -924,6 +927,7 @@ static inline void __mmput(struct mm_struct *mm)
        exit_aio(mm);
        ksm_exit(mm);
        khugepaged_exit(mm); /* must run before exit_mmap */
+       __ipipe_report_cleanup(mm);
        exit_mmap(mm);
        mm_put_huge_zero_page(mm);
        set_mm_exe_file(mm, NULL);
@@ -1927,6 +1931,7 @@ static __latent_entropy struct task_struct *copy_process(
        proc_fork_connector(p);
        cgroup_post_fork(p);
        cgroup_threadgroup_change_end(current);
+       __ipipe_init_taskinfo(p);
        perf_event_fork(p);
 
        trace_task_newtask(p, clone_flags);
index 2119b43b92cc37862050ce885e9301cb69c8c7a2..81c4e580a823d23578aced7c298554bcef765629 100644 (file)
@@ -1109,10 +1109,13 @@ static int __set_cpus_allowed_ptr(struct task_struct *p,
        }
 
        /* Can the task run on the task's current CPU? If so, we're done */
-       if (cpumask_test_cpu(task_cpu(p), new_mask))
+       if (cpumask_test_cpu(task_cpu(p), new_mask)) {
+               __ipipe_report_setaffinity(p, task_cpu(p));
                goto out;
+       }
 
        dest_cpu = cpumask_any_and(cpu_valid_mask, new_mask);
+       __ipipe_report_setaffinity(p, dest_cpu);
        if (task_running(rq, p) || p->state == TASK_WAKING) {
                struct migration_arg arg = { p, dest_cpu };
                /* Need help from migration thread: drop lock and wait. */
index 1facff1dbbaecb61df2c19bc04477ca02f3630b7..67bff1125724b10ffff15e4931dbca9420bdaa10 100644 (file)
@@ -32,6 +32,7 @@
 #include <linux/tracehook.h>
 #include <linux/capability.h>
 #include <linux/freezer.h>
+#include <linux/ipipe.h>
 #include <linux/pid_namespace.h>
 #include <linux/nsproxy.h>
 #include <linux/user_namespace.h>
@@ -661,6 +662,10 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
 void signal_wake_up_state(struct task_struct *t, unsigned int state)
 {
        set_tsk_thread_flag(t, TIF_SIGPENDING);
+
+       /* TIF_SIGPENDING must be prior to reporting. */
+       __ipipe_report_sigwake(t);
+
        /*
         * TASK_WAKEKILL also means wake it up in the stopped/traced/killable
         * case. We don't check t->state here because there is a race with it
@@ -884,8 +889,11 @@ static inline int wants_signal(int sig, struct task_struct *p)
                return 0;
        if (sig == SIGKILL)
                return 1;
-       if (task_is_stopped_or_traced(p))
+       if (task_is_stopped_or_traced(p)) {
+               if (!signal_pending(p))
+                       __ipipe_report_sigwake(p);
                return 0;
+       }
        return task_curr(p) || !signal_pending(p);
 }