From: Tejun Heo Date: Fri, 29 Nov 2013 15:42:59 +0000 (-0500) Subject: cgroup: remove cgroup_pidlist->rwsem X-Git-Tag: upstream/snapshot3+hdmi~3614^2~28 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=069df3b7aeb3f4e926c4da9630c92010909af512;p=platform%2Fadaptation%2Frenesas_rcar%2Frenesas_kernel.git cgroup: remove cgroup_pidlist->rwsem cgroup_pidlist locking is needlessly complicated. It has outer cgroup->pidlist_mutex to protect the list of pidlists associated with a cgroup and then each pidlist has rwsem to synchronize updates and reads. Given that the only read access is from seq_file operations which are always invoked back-to-back, the rwsem is a giant overkill. All it does is adding unnecessary complexity. This patch removes cgroup_pidlist->rwsem and protects all accesses to pidlists belonging to a cgroup with cgroup->pidlist_mutex. pidlist->rwsem locking is removed if it's nested inside cgroup->pidlist_mutex; otherwise, it's replaced with cgroup->pidlist_mutex locking. Signed-off-by: Tejun Heo Acked-by: Li Zefan --- diff --git a/kernel/cgroup.c b/kernel/cgroup.c index d58c30d..dc39e17 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -3462,8 +3462,6 @@ struct cgroup_pidlist { struct list_head links; /* pointer to the cgroup we belong to, for list removal purposes */ struct cgroup *owner; - /* protects the other fields */ - struct rw_semaphore rwsem; /* for delayed destruction */ struct delayed_work destroy_dwork; }; @@ -3522,7 +3520,6 @@ static void cgroup_pidlist_destroy_work_fn(struct work_struct *work) struct cgroup_pidlist *tofree = NULL; mutex_lock(&l->owner->pidlist_mutex); - down_write(&l->rwsem); /* * Destroy iff we didn't race with a new user or get queued again. @@ -3535,7 +3532,6 @@ static void cgroup_pidlist_destroy_work_fn(struct work_struct *work) tofree = l; } - up_write(&l->rwsem); mutex_unlock(&l->owner->pidlist_mutex); kfree(tofree); } @@ -3612,7 +3608,6 @@ static struct cgroup_pidlist *cgroup_pidlist_find_create(struct cgroup *cgrp, if (!l) return l; - init_rwsem(&l->rwsem); INIT_DELAYED_WORK(&l->destroy_dwork, cgroup_pidlist_destroy_work_fn); l->key.type = type; /* don't need task_nsproxy() if we're looking at ourself */ @@ -3675,12 +3670,10 @@ static int pidlist_array_load(struct cgroup *cgrp, enum cgroup_filetype type, } /* store array, freeing old if necessary */ - down_write(&l->rwsem); pidlist_free(l->list); l->list = array; l->length = length; l->use_count++; - up_write(&l->rwsem); mutex_unlock(&cgrp->pidlist_mutex); @@ -3762,7 +3755,7 @@ static void *cgroup_pidlist_start(struct seq_file *s, loff_t *pos) int index = 0, pid = *pos; int *iter; - down_read(&l->rwsem); + mutex_lock(&of->cgrp->pidlist_mutex); if (pid) { int end = l->length; @@ -3790,7 +3783,7 @@ static void cgroup_pidlist_stop(struct seq_file *s, void *v) { struct cgroup_pidlist_open_file *of = s->private; - up_read(&of->pidlist->rwsem); + mutex_unlock(&of->cgrp->pidlist_mutex); } static void *cgroup_pidlist_next(struct seq_file *s, void *v, loff_t *pos) @@ -3830,13 +3823,13 @@ static const struct seq_operations cgroup_pidlist_seq_operations = { static void cgroup_release_pid_array(struct cgroup_pidlist *l) { - down_write(&l->rwsem); + mutex_lock(&l->owner->pidlist_mutex); BUG_ON(!l->use_count); /* if the last user, arm the destroy work */ if (!--l->use_count) mod_delayed_work(cgroup_pidlist_destroy_wq, &l->destroy_dwork, CGROUP_PIDLIST_DESTROY_DELAY); - up_write(&l->rwsem); + mutex_unlock(&l->owner->pidlist_mutex); } static int cgroup_pidlist_release(struct inode *inode, struct file *file)