sched,livepatch: Use task_call_func()
authorPeter Zijlstra <peterz@infradead.org>
Tue, 21 Sep 2021 19:54:32 +0000 (21:54 +0200)
committerPeter Zijlstra <peterz@infradead.org>
Thu, 7 Oct 2021 11:51:15 +0000 (13:51 +0200)
Instead of frobbing around with scheduler internals, use the shiny new
task_call_func() interface.

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Petr Mladek <pmladek@suse.com>
Acked-by: Miroslav Benes <mbenes@suse.cz>
Acked-by: Vasily Gorbik <gor@linux.ibm.com>
Tested-by: Petr Mladek <pmladek@suse.com>
Tested-by: Vasily Gorbik <gor@linux.ibm.com> # on s390
Link: https://lkml.kernel.org/r/20210929152428.709906138@infradead.org
kernel/livepatch/transition.c

index 291b857..75251e9 100644 (file)
@@ -13,7 +13,6 @@
 #include "core.h"
 #include "patch.h"
 #include "transition.h"
-#include "../sched/sched.h"
 
 #define MAX_STACK_ENTRIES  100
 #define STACK_ERR_BUF_SIZE 128
@@ -240,7 +239,7 @@ static int klp_check_stack_func(struct klp_func *func, unsigned long *entries,
  * Determine whether it's safe to transition the task to the target patch state
  * by looking for any to-be-patched or to-be-unpatched functions on its stack.
  */
-static int klp_check_stack(struct task_struct *task, char *err_buf)
+static int klp_check_stack(struct task_struct *task, const char **oldname)
 {
        static unsigned long entries[MAX_STACK_ENTRIES];
        struct klp_object *obj;
@@ -248,12 +247,8 @@ static int klp_check_stack(struct task_struct *task, char *err_buf)
        int ret, nr_entries;
 
        ret = stack_trace_save_tsk_reliable(task, entries, ARRAY_SIZE(entries));
-       if (ret < 0) {
-               snprintf(err_buf, STACK_ERR_BUF_SIZE,
-                        "%s: %s:%d has an unreliable stack\n",
-                        __func__, task->comm, task->pid);
-               return ret;
-       }
+       if (ret < 0)
+               return -EINVAL;
        nr_entries = ret;
 
        klp_for_each_object(klp_transition_patch, obj) {
@@ -262,11 +257,8 @@ static int klp_check_stack(struct task_struct *task, char *err_buf)
                klp_for_each_func(obj, func) {
                        ret = klp_check_stack_func(func, entries, nr_entries);
                        if (ret) {
-                               snprintf(err_buf, STACK_ERR_BUF_SIZE,
-                                        "%s: %s:%d is sleeping on function %s\n",
-                                        __func__, task->comm, task->pid,
-                                        func->old_name);
-                               return ret;
+                               *oldname = func->old_name;
+                               return -EADDRINUSE;
                        }
                }
        }
@@ -274,6 +266,22 @@ static int klp_check_stack(struct task_struct *task, char *err_buf)
        return 0;
 }
 
+static int klp_check_and_switch_task(struct task_struct *task, void *arg)
+{
+       int ret;
+
+       if (task_curr(task) && task != current)
+               return -EBUSY;
+
+       ret = klp_check_stack(task, arg);
+       if (ret)
+               return ret;
+
+       clear_tsk_thread_flag(task, TIF_PATCH_PENDING);
+       task->patch_state = klp_target_state;
+       return 0;
+}
+
 /*
  * Try to safely switch a task to the target patch state.  If it's currently
  * running, or it's sleeping on a to-be-patched or to-be-unpatched function, or
@@ -281,13 +289,8 @@ static int klp_check_stack(struct task_struct *task, char *err_buf)
  */
 static bool klp_try_switch_task(struct task_struct *task)
 {
-       static char err_buf[STACK_ERR_BUF_SIZE];
-       struct rq *rq;
-       struct rq_flags flags;
+       const char *old_name;
        int ret;
-       bool success = false;
-
-       err_buf[0] = '\0';
 
        /* check if this task has already switched over */
        if (task->patch_state == klp_target_state)
@@ -305,36 +308,31 @@ static bool klp_try_switch_task(struct task_struct *task)
         * functions.  If all goes well, switch the task to the target patch
         * state.
         */
-       rq = task_rq_lock(task, &flags);
+       ret = task_call_func(task, klp_check_and_switch_task, &old_name);
+       switch (ret) {
+       case 0:         /* success */
+               break;
 
-       if (task_running(rq, task) && task != current) {
-               snprintf(err_buf, STACK_ERR_BUF_SIZE,
-                        "%s: %s:%d is running\n", __func__, task->comm,
-                        task->pid);
-               goto done;
+       case -EBUSY:    /* klp_check_and_switch_task() */
+               pr_debug("%s: %s:%d is running\n",
+                        __func__, task->comm, task->pid);
+               break;
+       case -EINVAL:   /* klp_check_and_switch_task() */
+               pr_debug("%s: %s:%d has an unreliable stack\n",
+                        __func__, task->comm, task->pid);
+               break;
+       case -EADDRINUSE: /* klp_check_and_switch_task() */
+               pr_debug("%s: %s:%d is sleeping on function %s\n",
+                        __func__, task->comm, task->pid, old_name);
+               break;
+
+       default:
+               pr_debug("%s: Unknown error code (%d) when trying to switch %s:%d\n",
+                        __func__, ret, task->comm, task->pid);
+               break;
        }
 
-       ret = klp_check_stack(task, err_buf);
-       if (ret)
-               goto done;
-
-       success = true;
-
-       clear_tsk_thread_flag(task, TIF_PATCH_PENDING);
-       task->patch_state = klp_target_state;
-
-done:
-       task_rq_unlock(rq, task, &flags);
-
-       /*
-        * Due to console deadlock issues, pr_debug() can't be used while
-        * holding the task rq lock.  Instead we have to use a temporary buffer
-        * and print the debug message after releasing the lock.
-        */
-       if (err_buf[0] != '\0')
-               pr_debug("%s", err_buf);
-
-       return success;
+       return !ret;
 }
 
 /*