struct rcu_head rcu;
struct list_head list;
+
+ struct task_group *parent;
+ struct list_head siblings;
+ struct list_head children;
};
#ifdef CONFIG_USER_SCHED
#ifdef CONFIG_GROUP_SCHED
list_add(&init_task_group.list, &task_groups);
+ INIT_LIST_HEAD(&init_task_group.children);
+
+#ifdef CONFIG_USER_SCHED
+ INIT_LIST_HEAD(&root_task_group.children);
+ init_task_group.parent = &root_task_group;
+ list_add(&init_task_group.siblings, &root_task_group.children);
+#endif
#endif
for_each_possible_cpu(i) {
register_rt_sched_group(tg, i);
}
list_add_rcu(&tg->list, &task_groups);
+
+ WARN_ON(!parent); /* root should already exist */
+
+ tg->parent = parent;
+ list_add_rcu(&tg->siblings, &parent->children);
+ INIT_LIST_HEAD(&tg->children);
spin_unlock_irqrestore(&task_group_lock, flags);
return tg;
unregister_rt_sched_group(tg, i);
}
list_del_rcu(&tg->list);
+ list_del_rcu(&tg->siblings);
spin_unlock_irqrestore(&task_group_lock, flags);
/* wait for possible concurrent references to cfs_rqs complete */
spin_lock_irqsave(&task_group_lock, flags);
for_each_possible_cpu(i)
unregister_fair_sched_group(tg, i);
+ list_del_rcu(&tg->siblings);
spin_unlock_irqrestore(&task_group_lock, flags);
/* wait for any ongoing reference to this group to finish */
spin_lock_irqsave(&task_group_lock, flags);
for_each_possible_cpu(i)
register_fair_sched_group(tg, i);
+ list_add_rcu(&tg->siblings, &tg->parent->children);
spin_unlock_irqrestore(&task_group_lock, flags);
done:
mutex_unlock(&shares_mutex);