touch_mnt_namespace(n);
}
-static struct mount *next_mnt(struct mount *p, struct vfsmount *root)
+static struct mount *next_mnt(struct mount *p, struct mount *root)
{
struct list_head *next = p->mnt_mounts.next;
if (next == &p->mnt_mounts) {
while (1) {
- if (&p->mnt == root)
+ if (p == root)
return NULL;
next = p->mnt_child.next;
if (next != &p->mnt_parent->mnt_mounts)
* open files, pwds, chroots or sub mounts that are
* busy.
*/
-int may_umount_tree(struct vfsmount *mnt)
+int may_umount_tree(struct vfsmount *m)
{
+ struct mount *mnt = real_mount(m);
int actual_refs = 0;
int minimum_refs = 0;
struct mount *p;
- BUG_ON(!mnt);
+ BUG_ON(!m);
/* write lock needed for mnt_get_count */
br_write_lock(vfsmount_lock);
- for (p = real_mount(mnt); p; p = next_mnt(p, mnt)) {
+ for (p = mnt; p; p = next_mnt(p, mnt)) {
actual_refs += mnt_get_count(p);
minimum_refs += 2;
}
LIST_HEAD(tmp_list);
struct mount *p;
- for (p = mnt; p; p = next_mnt(p, &mnt->mnt))
+ for (p = mnt; p; p = next_mnt(p, mnt))
list_move(&p->mnt_hash, &tmp_list);
if (propagate)
if (!is_subdir(r->mnt_mountpoint, dentry))
continue;
- for (s = r; s; s = next_mnt(s, &r->mnt)) {
+ for (s = r; s; s = next_mnt(s, r)) {
if (!(flag & CL_COPY_ALL) && IS_MNT_UNBINDABLE(s)) {
s = skip_mnt_tree(s);
continue;
{
struct mount *p;
- for (p = mnt; p != end; p = next_mnt(p, &mnt->mnt)) {
+ for (p = mnt; p != end; p = next_mnt(p, mnt)) {
if (p->mnt_group_id && !IS_MNT_SHARED(p))
mnt_release_group_id(p);
}
{
struct mount *p;
- for (p = mnt; p; p = recurse ? next_mnt(p, &mnt->mnt) : NULL) {
+ for (p = mnt; p; p = recurse ? next_mnt(p, mnt) : NULL) {
if (!p->mnt_group_id && !IS_MNT_SHARED(p)) {
int err = mnt_alloc_group_id(p);
if (err) {
br_write_lock(vfsmount_lock);
if (IS_MNT_SHARED(dest_mnt)) {
- for (p = source_mnt; p; p = next_mnt(p, &source_mnt->mnt))
+ for (p = source_mnt; p; p = next_mnt(p, source_mnt))
set_mnt_shared(p);
}
if (parent_path) {
}
br_write_lock(vfsmount_lock);
- for (m = mnt; m; m = (recurse ? next_mnt(m, &mnt->mnt) : NULL))
+ for (m = mnt; m; m = (recurse ? next_mnt(m, mnt) : NULL))
change_mnt_propagation(m, type);
br_write_unlock(vfsmount_lock);
static inline int tree_contains_unbindable(struct mount *mnt)
{
struct mount *p;
- for (p = mnt; p; p = next_mnt(p, &mnt->mnt)) {
+ for (p = mnt; p; p = next_mnt(p, mnt)) {
if (IS_MNT_UNBINDABLE(p))
return 1;
}
struct mnt_namespace *new_ns;
struct vfsmount *rootmnt = NULL, *pwdmnt = NULL;
struct mount *p, *q;
+ struct mount *old = real_mount(mnt_ns->root);
struct mount *new;
new_ns = alloc_mnt_ns();
down_write(&namespace_sem);
/* First pass: copy the tree topology */
- new = copy_tree(real_mount(mnt_ns->root), mnt_ns->root->mnt_root,
- CL_COPY_ALL | CL_EXPIRE);
+ new = copy_tree(old, old->mnt.mnt_root, CL_COPY_ALL | CL_EXPIRE);
if (!new) {
up_write(&namespace_sem);
kfree(new_ns);
* as belonging to new namespace. We have already acquired a private
* fs_struct, so tsk->fs->lock is not needed.
*/
- p = real_mount(mnt_ns->root);
+ p = old;
q = new;
while (p) {
q->mnt_ns = new_ns;
pwdmnt = &p->mnt;
}
}
- p = next_mnt(p, mnt_ns->root);
- q = next_mnt(q, new_ns->root);
+ p = next_mnt(p, old);
+ q = next_mnt(q, new);
}
up_write(&namespace_sem);