ktd: Fix synchronization when module is unloaded 87/177387/1
authorVyacheslav Cherkashin <v.cherkashin@samsung.com>
Mon, 16 Apr 2018 11:24:57 +0000 (14:24 +0300)
committerVyacheslav Cherkashin <v.cherkashin@samsung.com>
Fri, 27 Apr 2018 11:57:12 +0000 (14:57 +0300)
Problem:
  When unloading the module there was a race condition between
  __put_task_strcut() handler call and cleaning td struct
  for all tasks (after swap_ktd_uninit_top() and
  before swap_ktd_uninit_bottom() calls).

Solution:
  Add get_task_flag flag to td struct. Set it if task was catched
  (increment task usage in swap_ktd_uninit_top() and task_prepare()).
  Add checking this flag and it handler to do_put_task() handler.

Change-Id: I627acad69819630c02f0eaac1632b11825d856f5
Signed-off-by: Vyacheslav Cherkashin <v.cherkashin@samsung.com>
modules/kprobe/swap_ktd.c

index 3095346cadfa8b04d89c072d8d03ff22129df864..976ada64619f396d1ce024511012c9e04f09c5bb 100644 (file)
@@ -45,6 +45,7 @@ struct td {
 
        spinlock_t flags_lock;
        unsigned long init_flags;
+       unsigned get_task_flag:1;
 };
 
 
@@ -147,6 +148,7 @@ static struct td *td_by_task(struct task_struct *task)
        return (struct td *)swap_td_raw(&td_raw, task);
 }
 
+bool g_shutdown_flag;
 
 static void task_prepare(struct task_struct *task, struct td *td,
                         struct ktask_data *ktd)
@@ -166,6 +168,19 @@ static void task_prepare(struct task_struct *task, struct td *td,
        spin_lock_init(&td->flags_lock);
        td->init_flags = 0;
 
+       /*
+        * Set get_task_flag to true and increment task usage
+        * if task_prepare() called between swap_ktd_uninit_top()
+        * and swap_ktd_uninit_bottom(). This is necessary to clear td struct
+        * in functions do_put_task() or swap_ktd_uninit_bottom().
+        */
+       if (g_shutdown_flag) {
+               get_task_struct(task);
+               td->get_task_flag = true;
+       } else {
+               td->get_task_flag = false;
+       }
+
        /* add to prepare_list */
        list_add(&td->list, &prepare_list);
 
@@ -175,6 +190,7 @@ unlock:
 
 static void ktd_exit_all(struct td *td, struct task_struct *task);
 
+/* Called with prepare_lock held */
 static void td_prepare_clear_no_lock(struct td *td, struct task_struct *task)
 {
        if (task_prepare_is(task)) {
@@ -187,15 +203,6 @@ static void td_prepare_clear_no_lock(struct td *td, struct task_struct *task)
        }
 }
 
-static void td_prepare_clear(struct td *td, struct task_struct *task)
-{
-       unsigned long flags;
-
-       write_lock_irqsave(&prepare_lock, flags);
-       td_prepare_clear_no_lock(td, task);
-       write_unlock_irqrestore(&prepare_lock, flags);
-}
-
 void *swap_ktd(struct ktask_data *ktd, struct task_struct *task)
 {
        struct td *td = td_by_task(task);
@@ -271,11 +278,33 @@ void swap_ktd_unreg(struct ktask_data *ktd)
 }
 EXPORT_SYMBOL_GPL(swap_ktd_unreg);
 
+struct task_struct *g_uninit_task;
 
 static void do_put_task(struct task_struct *task)
 {
-       if (task_prepare_is(task))
-               td_prepare_clear(td_by_task(task), task);
+       unsigned long flags;
+
+       if (current == g_uninit_task) {
+               /* The 'task' is already cleaned */
+               return;
+       }
+
+       write_lock_irqsave(&prepare_lock, flags);
+       if (task_prepare_is(task)) {
+               struct td *td = td_by_task(task);
+
+               if (td->get_task_flag) {
+                       /*
+                        * Decrement the task usage because put_task_struct()
+                        * call will lead to infinit loop
+                        */
+                       atomic_dec(&task->usage);
+                       WARN_ON(atomic_read(&task->usage));
+               }
+
+               td_prepare_clear_no_lock(td, task);
+       }
+       write_unlock_irqrestore(&prepare_lock, flags);
 }
 
 #ifdef CONFIG_SWAP_HOOK_TASKDATA
@@ -344,14 +373,40 @@ int swap_ktd_init(void)
 
 void swap_ktd_uninit_top(void)
 {
-       struct td *td;
+       struct td *td, *n;
        unsigned long flags;
 
        /* get injected tasks */
        write_lock_irqsave(&prepare_lock, flags);
-       list_for_each_entry(td, &prepare_list, list) {
-               get_task_struct(task_by_td(td));
+       list_for_each_entry_safe(td, n, &prepare_list, list) {
+               int usage;
+               struct task_struct *task = task_by_td(td);
+               BUG_ON(td->get_task_flag);
+
+               get_task_struct(task);
+               usage = atomic_read(&task->usage);
+               if (usage <= 0) {
+                       WARN(1, "Incorrect usage=%d value, in task[%u %u %s]\n",
+                            usage, task->tgid, task->pid, task->comm);
+                       td_prepare_clear_no_lock(td, task);
+                       atomic_dec(&task->usage);
+               } else if (usage == 1) {
+                       /*
+                        * Case between
+                        * atomic_dec(&task->usage) [in put_task_struct()]
+                        * and [__put_task_struct()] handler call
+       `                */
+                       td_prepare_clear_no_lock(td, task);
+                       atomic_dec(&task->usage);
+               } else {
+                       /* if (usage > 1) */
+                       td->get_task_flag = true;
+               }
        }
+
+       BUG_ON(g_shutdown_flag);
+       g_shutdown_flag = true;
+
        write_unlock_irqrestore(&prepare_lock, flags);
 }
 
@@ -362,12 +417,20 @@ void swap_ktd_uninit_bottom(void)
 
        /* remove td injection from tasks and put tasks */
        write_lock_irqsave(&prepare_lock, flags);
+       g_shutdown_flag = false;
+       g_uninit_task = current;
        list_for_each_entry_safe(td, n, &prepare_list, list) {
                struct task_struct *task = task_by_td(td);
+               if (!td->get_task_flag) {
+                       pr_err("Incorrect TD for task[%u %u %s]\n",
+                              task->tgid, task->pid, task->comm);
+                       BUG();
+               }
 
                td_prepare_clear_no_lock(td, task);
                put_task_struct(task);
        }
+       g_uninit_task = NULL;
        write_unlock_irqrestore(&prepare_lock, flags);
 
        taskdata_uninit();