spinlock_t flags_lock;
unsigned long init_flags;
+ unsigned get_task_flag:1;
};
return (struct td *)swap_td_raw(&td_raw, task);
}
+bool g_shutdown_flag;
static void task_prepare(struct task_struct *task, struct td *td,
struct ktask_data *ktd)
spin_lock_init(&td->flags_lock);
td->init_flags = 0;
+ /*
+ * Set get_task_flag to true and increment task usage
+ * if task_prepare() called between swap_ktd_uninit_top()
+ * and swap_ktd_uninit_bottom(). This is necessary to clear td struct
+ * in functions do_put_task() or swap_ktd_uninit_bottom().
+ */
+ if (g_shutdown_flag) {
+ get_task_struct(task);
+ td->get_task_flag = true;
+ } else {
+ td->get_task_flag = false;
+ }
+
/* add to prepare_list */
list_add(&td->list, &prepare_list);
static void ktd_exit_all(struct td *td, struct task_struct *task);
+/* Called with prepare_lock held */
static void td_prepare_clear_no_lock(struct td *td, struct task_struct *task)
{
if (task_prepare_is(task)) {
}
}
-static void td_prepare_clear(struct td *td, struct task_struct *task)
-{
- unsigned long flags;
-
- write_lock_irqsave(&prepare_lock, flags);
- td_prepare_clear_no_lock(td, task);
- write_unlock_irqrestore(&prepare_lock, flags);
-}
-
void *swap_ktd(struct ktask_data *ktd, struct task_struct *task)
{
struct td *td = td_by_task(task);
}
EXPORT_SYMBOL_GPL(swap_ktd_unreg);
+struct task_struct *g_uninit_task;
static void do_put_task(struct task_struct *task)
{
- if (task_prepare_is(task))
- td_prepare_clear(td_by_task(task), task);
+ unsigned long flags;
+
+ if (current == g_uninit_task) {
+ /* The 'task' is already cleaned */
+ return;
+ }
+
+ write_lock_irqsave(&prepare_lock, flags);
+ if (task_prepare_is(task)) {
+ struct td *td = td_by_task(task);
+
+ if (td->get_task_flag) {
+ /*
+ * Decrement the task usage because put_task_struct()
+ * call will lead to infinit loop
+ */
+ atomic_dec(&task->usage);
+ WARN_ON(atomic_read(&task->usage));
+ }
+
+ td_prepare_clear_no_lock(td, task);
+ }
+ write_unlock_irqrestore(&prepare_lock, flags);
}
#ifdef CONFIG_SWAP_HOOK_TASKDATA
void swap_ktd_uninit_top(void)
{
- struct td *td;
+ struct td *td, *n;
unsigned long flags;
/* get injected tasks */
write_lock_irqsave(&prepare_lock, flags);
- list_for_each_entry(td, &prepare_list, list) {
- get_task_struct(task_by_td(td));
+ list_for_each_entry_safe(td, n, &prepare_list, list) {
+ int usage;
+ struct task_struct *task = task_by_td(td);
+ BUG_ON(td->get_task_flag);
+
+ get_task_struct(task);
+ usage = atomic_read(&task->usage);
+ if (usage <= 0) {
+ WARN(1, "Incorrect usage=%d value, in task[%u %u %s]\n",
+ usage, task->tgid, task->pid, task->comm);
+ td_prepare_clear_no_lock(td, task);
+ atomic_dec(&task->usage);
+ } else if (usage == 1) {
+ /*
+ * Case between
+ * atomic_dec(&task->usage) [in put_task_struct()]
+ * and [__put_task_struct()] handler call
+ ` */
+ td_prepare_clear_no_lock(td, task);
+ atomic_dec(&task->usage);
+ } else {
+ /* if (usage > 1) */
+ td->get_task_flag = true;
+ }
}
+
+ BUG_ON(g_shutdown_flag);
+ g_shutdown_flag = true;
+
write_unlock_irqrestore(&prepare_lock, flags);
}
/* remove td injection from tasks and put tasks */
write_lock_irqsave(&prepare_lock, flags);
+ g_shutdown_flag = false;
+ g_uninit_task = current;
list_for_each_entry_safe(td, n, &prepare_list, list) {
struct task_struct *task = task_by_td(td);
+ if (!td->get_task_flag) {
+ pr_err("Incorrect TD for task[%u %u %s]\n",
+ task->tgid, task->pid, task->comm);
+ BUG();
+ }
td_prepare_clear_no_lock(td, task);
put_task_struct(task);
}
+ g_uninit_task = NULL;
write_unlock_irqrestore(&prepare_lock, flags);
taskdata_uninit();