From 3aab4f50bff89bdea5066a05d4f3c5fa25bc37c7 Mon Sep 17 00:00:00 2001 From: Rik van Riel Date: Tue, 10 Feb 2015 15:27:50 -0500 Subject: [PATCH] context_tracking: Generalize context tracking APIs to support user and guest Generalize the context tracking APIs to support various nature of contexts. This is performed by splitting out the mechanism from context_tracking_user_enter and context_tracking_user_exit into context_tracking_enter and context_tracking_exit. The nature of the context we track is now detailed in a ctx_state parameter pushed to these APIs, allowing the same functions to not just track kernel <> user space switching, but also kernel <> guest transitions. But leave the old functions in order to avoid breaking ARM, which calls these functions from assembler code, and cannot easily use C enum parameters. Reviewed-by: Paul E. McKenney Signed-off-by: Rik van Riel Cc: Paul E. McKenney Cc: Andy Lutomirski Cc: Will deacon Cc: Marcelo Tosatti Cc: Christian Borntraeger Cc: Luiz Capitulino Cc: Paolo Bonzini Signed-off-by: Frederic Weisbecker --- include/linux/context_tracking.h | 9 ++++++--- kernel/context_tracking.c | 43 ++++++++++++++++++++++++++-------------- 2 files changed, 34 insertions(+), 18 deletions(-) diff --git a/include/linux/context_tracking.h b/include/linux/context_tracking.h index 427b056..7f1810a 100644 --- a/include/linux/context_tracking.h +++ b/include/linux/context_tracking.h @@ -10,6 +10,8 @@ #ifdef CONFIG_CONTEXT_TRACKING extern void context_tracking_cpu_set(int cpu); +extern void context_tracking_enter(enum ctx_state state); +extern void context_tracking_exit(enum ctx_state state); extern void context_tracking_user_enter(void); extern void context_tracking_user_exit(void); extern void __context_tracking_task_switch(struct task_struct *prev, @@ -35,7 +37,8 @@ static inline enum ctx_state exception_enter(void) return 0; prev_ctx = this_cpu_read(context_tracking.state); - context_tracking_user_exit(); + if (prev_ctx != CONTEXT_KERNEL) + context_tracking_exit(prev_ctx); return prev_ctx; } @@ -43,8 +46,8 @@ static inline enum ctx_state exception_enter(void) static inline void exception_exit(enum ctx_state prev_ctx) { if (context_tracking_is_enabled()) { - if (prev_ctx == CONTEXT_USER) - context_tracking_user_enter(); + if (prev_ctx != CONTEXT_KERNEL) + context_tracking_enter(prev_ctx); } } diff --git a/kernel/context_tracking.c b/kernel/context_tracking.c index 8ad53c9..17715d8 100644 --- a/kernel/context_tracking.c +++ b/kernel/context_tracking.c @@ -39,15 +39,15 @@ void context_tracking_cpu_set(int cpu) } /** - * context_tracking_user_enter - Inform the context tracking that the CPU is going to - * enter userspace mode. + * context_tracking_enter - Inform the context tracking that the CPU is going + * enter user or guest space mode. * * This function must be called right before we switch from the kernel - * to userspace, when it's guaranteed the remaining kernel instructions - * to execute won't use any RCU read side critical section because this - * function sets RCU in extended quiescent state. + * to user or guest space, when it's guaranteed the remaining kernel + * instructions to execute won't use any RCU read side critical section + * because this function sets RCU in extended quiescent state. */ -void context_tracking_user_enter(void) +void context_tracking_enter(enum ctx_state state) { unsigned long flags; @@ -75,7 +75,7 @@ void context_tracking_user_enter(void) WARN_ON_ONCE(!current->mm); local_irq_save(flags); - if ( __this_cpu_read(context_tracking.state) != CONTEXT_USER) { + if ( __this_cpu_read(context_tracking.state) != state) { if (__this_cpu_read(context_tracking.active)) { trace_user_enter(0); /* @@ -101,24 +101,31 @@ void context_tracking_user_enter(void) * OTOH we can spare the calls to vtime and RCU when context_tracking.active * is false because we know that CPU is not tickless. */ - __this_cpu_write(context_tracking.state, CONTEXT_USER); + __this_cpu_write(context_tracking.state, state); } local_irq_restore(flags); } +NOKPROBE_SYMBOL(context_tracking_enter); + +void context_tracking_user_enter(void) +{ + context_tracking_enter(CONTEXT_USER); +} NOKPROBE_SYMBOL(context_tracking_user_enter); /** - * context_tracking_user_exit - Inform the context tracking that the CPU is - * exiting userspace mode and entering the kernel. + * context_tracking_exit - Inform the context tracking that the CPU is + * exiting user or guest mode and entering the kernel. * - * This function must be called after we entered the kernel from userspace - * before any use of RCU read side critical section. This potentially include - * any high level kernel code like syscalls, exceptions, signal handling, etc... + * This function must be called after we entered the kernel from user or + * guest space before any use of RCU read side critical section. This + * potentially include any high level kernel code like syscalls, exceptions, + * signal handling, etc... * * This call supports re-entrancy. This way it can be called from any exception * handler without needing to know if we came from userspace or not. */ -void context_tracking_user_exit(void) +void context_tracking_exit(enum ctx_state state) { unsigned long flags; @@ -129,7 +136,7 @@ void context_tracking_user_exit(void) return; local_irq_save(flags); - if (__this_cpu_read(context_tracking.state) == CONTEXT_USER) { + if (__this_cpu_read(context_tracking.state) == state) { if (__this_cpu_read(context_tracking.active)) { /* * We are going to run code that may use RCU. Inform @@ -143,6 +150,12 @@ void context_tracking_user_exit(void) } local_irq_restore(flags); } +NOKPROBE_SYMBOL(context_tracking_exit); + +void context_tracking_user_exit(void) +{ + context_tracking_exit(CONTEXT_USER); +} NOKPROBE_SYMBOL(context_tracking_user_exit); /** -- 2.7.4