1 // SPDX-License-Identifier: GPL-2.0-only
5 * (C) Copyright Al Viro 2000, 2001
7 * Based on code from fs/super.c, copyright Linus Torvalds and others.
11 #include <linux/syscalls.h>
12 #include <linux/export.h>
13 #include <linux/capability.h>
14 #include <linux/mnt_namespace.h>
15 #include <linux/user_namespace.h>
16 #include <linux/namei.h>
17 #include <linux/security.h>
18 #include <linux/cred.h>
19 #include <linux/idr.h>
20 #include <linux/init.h> /* init_rootfs */
21 #include <linux/fs_struct.h> /* get_fs_root et.al. */
22 #include <linux/fsnotify.h> /* fsnotify_vfsmount_delete */
23 #include <linux/file.h>
24 #include <linux/uaccess.h>
25 #include <linux/proc_ns.h>
26 #include <linux/magic.h>
27 #include <linux/memblock.h>
28 #include <linux/proc_fs.h>
29 #include <linux/task_work.h>
30 #include <linux/sched/task.h>
31 #include <uapi/linux/mount.h>
32 #include <linux/fs_context.h>
33 #include <linux/shmem_fs.h>
34 #include <linux/mnt_idmapping.h>
39 /* Maximum number of mounts in a mount namespace */
40 unsigned int sysctl_mount_max __read_mostly = 100000;
42 static unsigned int m_hash_mask __read_mostly;
43 static unsigned int m_hash_shift __read_mostly;
44 static unsigned int mp_hash_mask __read_mostly;
45 static unsigned int mp_hash_shift __read_mostly;
47 static __initdata unsigned long mhash_entries;
48 static int __init set_mhash_entries(char *str)
52 mhash_entries = simple_strtoul(str, &str, 0);
55 __setup("mhash_entries=", set_mhash_entries);
57 static __initdata unsigned long mphash_entries;
58 static int __init set_mphash_entries(char *str)
62 mphash_entries = simple_strtoul(str, &str, 0);
65 __setup("mphash_entries=", set_mphash_entries);
68 static DEFINE_IDA(mnt_id_ida);
69 static DEFINE_IDA(mnt_group_ida);
71 static struct hlist_head *mount_hashtable __read_mostly;
72 static struct hlist_head *mountpoint_hashtable __read_mostly;
73 static struct kmem_cache *mnt_cache __read_mostly;
74 static DECLARE_RWSEM(namespace_sem);
75 static HLIST_HEAD(unmounted); /* protected by namespace_sem */
76 static LIST_HEAD(ex_mountpoints); /* protected by namespace_sem */
79 unsigned int attr_set;
80 unsigned int attr_clr;
81 unsigned int propagation;
82 unsigned int lookup_flags;
84 struct user_namespace *mnt_userns;
88 struct kobject *fs_kobj;
89 EXPORT_SYMBOL_GPL(fs_kobj);
92 * vfsmount lock may be taken for read to prevent changes to the
93 * vfsmount hash, ie. during mountpoint lookups or walking back
96 * It should be taken for write in all cases where the vfsmount
97 * tree or hash is modified or when a vfsmount structure is modified.
99 __cacheline_aligned_in_smp DEFINE_SEQLOCK(mount_lock);
101 static inline void lock_mount_hash(void)
103 write_seqlock(&mount_lock);
106 static inline void unlock_mount_hash(void)
108 write_sequnlock(&mount_lock);
111 static inline struct hlist_head *m_hash(struct vfsmount *mnt, struct dentry *dentry)
113 unsigned long tmp = ((unsigned long)mnt / L1_CACHE_BYTES);
114 tmp += ((unsigned long)dentry / L1_CACHE_BYTES);
115 tmp = tmp + (tmp >> m_hash_shift);
116 return &mount_hashtable[tmp & m_hash_mask];
119 static inline struct hlist_head *mp_hash(struct dentry *dentry)
121 unsigned long tmp = ((unsigned long)dentry / L1_CACHE_BYTES);
122 tmp = tmp + (tmp >> mp_hash_shift);
123 return &mountpoint_hashtable[tmp & mp_hash_mask];
126 static int mnt_alloc_id(struct mount *mnt)
128 int res = ida_alloc(&mnt_id_ida, GFP_KERNEL);
136 static void mnt_free_id(struct mount *mnt)
138 ida_free(&mnt_id_ida, mnt->mnt_id);
142 * Allocate a new peer group ID
144 static int mnt_alloc_group_id(struct mount *mnt)
146 int res = ida_alloc_min(&mnt_group_ida, 1, GFP_KERNEL);
150 mnt->mnt_group_id = res;
155 * Release a peer group ID
157 void mnt_release_group_id(struct mount *mnt)
159 ida_free(&mnt_group_ida, mnt->mnt_group_id);
160 mnt->mnt_group_id = 0;
164 * vfsmount lock must be held for read
166 static inline void mnt_add_count(struct mount *mnt, int n)
169 this_cpu_add(mnt->mnt_pcp->mnt_count, n);
178 * vfsmount lock must be held for write
180 int mnt_get_count(struct mount *mnt)
186 for_each_possible_cpu(cpu) {
187 count += per_cpu_ptr(mnt->mnt_pcp, cpu)->mnt_count;
192 return mnt->mnt_count;
196 static struct mount *alloc_vfsmnt(const char *name)
198 struct mount *mnt = kmem_cache_zalloc(mnt_cache, GFP_KERNEL);
202 err = mnt_alloc_id(mnt);
207 mnt->mnt_devname = kstrdup_const(name,
209 if (!mnt->mnt_devname)
214 mnt->mnt_pcp = alloc_percpu(struct mnt_pcp);
216 goto out_free_devname;
218 this_cpu_add(mnt->mnt_pcp->mnt_count, 1);
221 mnt->mnt_writers = 0;
224 INIT_HLIST_NODE(&mnt->mnt_hash);
225 INIT_LIST_HEAD(&mnt->mnt_child);
226 INIT_LIST_HEAD(&mnt->mnt_mounts);
227 INIT_LIST_HEAD(&mnt->mnt_list);
228 INIT_LIST_HEAD(&mnt->mnt_expire);
229 INIT_LIST_HEAD(&mnt->mnt_share);
230 INIT_LIST_HEAD(&mnt->mnt_slave_list);
231 INIT_LIST_HEAD(&mnt->mnt_slave);
232 INIT_HLIST_NODE(&mnt->mnt_mp_list);
233 INIT_LIST_HEAD(&mnt->mnt_umounting);
234 INIT_HLIST_HEAD(&mnt->mnt_stuck_children);
235 mnt->mnt.mnt_userns = &init_user_ns;
241 kfree_const(mnt->mnt_devname);
246 kmem_cache_free(mnt_cache, mnt);
251 * Most r/o checks on a fs are for operations that take
252 * discrete amounts of time, like a write() or unlink().
253 * We must keep track of when those operations start
254 * (for permission checks) and when they end, so that
255 * we can determine when writes are able to occur to
259 * __mnt_is_readonly: check whether a mount is read-only
260 * @mnt: the mount to check for its write status
262 * This shouldn't be used directly ouside of the VFS.
263 * It does not guarantee that the filesystem will stay
264 * r/w, just that it is right *now*. This can not and
265 * should not be used in place of IS_RDONLY(inode).
266 * mnt_want/drop_write() will _keep_ the filesystem
269 bool __mnt_is_readonly(struct vfsmount *mnt)
271 return (mnt->mnt_flags & MNT_READONLY) || sb_rdonly(mnt->mnt_sb);
273 EXPORT_SYMBOL_GPL(__mnt_is_readonly);
275 static inline void mnt_inc_writers(struct mount *mnt)
278 this_cpu_inc(mnt->mnt_pcp->mnt_writers);
284 static inline void mnt_dec_writers(struct mount *mnt)
287 this_cpu_dec(mnt->mnt_pcp->mnt_writers);
293 static unsigned int mnt_get_writers(struct mount *mnt)
296 unsigned int count = 0;
299 for_each_possible_cpu(cpu) {
300 count += per_cpu_ptr(mnt->mnt_pcp, cpu)->mnt_writers;
305 return mnt->mnt_writers;
309 static int mnt_is_readonly(struct vfsmount *mnt)
311 if (mnt->mnt_sb->s_readonly_remount)
313 /* Order wrt setting s_flags/s_readonly_remount in do_remount() */
315 return __mnt_is_readonly(mnt);
319 * Most r/o & frozen checks on a fs are for operations that take discrete
320 * amounts of time, like a write() or unlink(). We must keep track of when
321 * those operations start (for permission checks) and when they end, so that we
322 * can determine when writes are able to occur to a filesystem.
325 * __mnt_want_write - get write access to a mount without freeze protection
326 * @m: the mount on which to take a write
328 * This tells the low-level filesystem that a write is about to be performed to
329 * it, and makes sure that writes are allowed (mnt it read-write) before
330 * returning success. This operation does not protect against filesystem being
331 * frozen. When the write operation is finished, __mnt_drop_write() must be
332 * called. This is effectively a refcount.
334 int __mnt_want_write(struct vfsmount *m)
336 struct mount *mnt = real_mount(m);
340 mnt_inc_writers(mnt);
342 * The store to mnt_inc_writers must be visible before we pass
343 * MNT_WRITE_HOLD loop below, so that the slowpath can see our
344 * incremented count after it has set MNT_WRITE_HOLD.
347 might_lock(&mount_lock.lock);
348 while (READ_ONCE(mnt->mnt.mnt_flags) & MNT_WRITE_HOLD) {
349 if (!IS_ENABLED(CONFIG_PREEMPT_RT)) {
353 * This prevents priority inversion, if the task
354 * setting MNT_WRITE_HOLD got preempted on a remote
355 * CPU, and it prevents life lock if the task setting
356 * MNT_WRITE_HOLD has a lower priority and is bound to
357 * the same CPU as the task that is spinning here.
366 * After the slowpath clears MNT_WRITE_HOLD, mnt_is_readonly will
367 * be set to match its requirements. So we must not load that until
368 * MNT_WRITE_HOLD is cleared.
371 if (mnt_is_readonly(m)) {
372 mnt_dec_writers(mnt);
381 * mnt_want_write - get write access to a mount
382 * @m: the mount on which to take a write
384 * This tells the low-level filesystem that a write is about to be performed to
385 * it, and makes sure that writes are allowed (mount is read-write, filesystem
386 * is not frozen) before returning success. When the write operation is
387 * finished, mnt_drop_write() must be called. This is effectively a refcount.
389 int mnt_want_write(struct vfsmount *m)
393 sb_start_write(m->mnt_sb);
394 ret = __mnt_want_write(m);
396 sb_end_write(m->mnt_sb);
399 EXPORT_SYMBOL_GPL(mnt_want_write);
402 * __mnt_want_write_file - get write access to a file's mount
403 * @file: the file who's mount on which to take a write
405 * This is like __mnt_want_write, but if the file is already open for writing it
406 * skips incrementing mnt_writers (since the open file already has a reference)
407 * and instead only does the check for emergency r/o remounts. This must be
408 * paired with __mnt_drop_write_file.
410 int __mnt_want_write_file(struct file *file)
412 if (file->f_mode & FMODE_WRITER) {
414 * Superblock may have become readonly while there are still
415 * writable fd's, e.g. due to a fs error with errors=remount-ro
417 if (__mnt_is_readonly(file->f_path.mnt))
421 return __mnt_want_write(file->f_path.mnt);
425 * mnt_want_write_file - get write access to a file's mount
426 * @file: the file who's mount on which to take a write
428 * This is like mnt_want_write, but if the file is already open for writing it
429 * skips incrementing mnt_writers (since the open file already has a reference)
430 * and instead only does the freeze protection and the check for emergency r/o
431 * remounts. This must be paired with mnt_drop_write_file.
433 int mnt_want_write_file(struct file *file)
437 sb_start_write(file_inode(file)->i_sb);
438 ret = __mnt_want_write_file(file);
440 sb_end_write(file_inode(file)->i_sb);
443 EXPORT_SYMBOL_GPL(mnt_want_write_file);
446 * __mnt_drop_write - give up write access to a mount
447 * @mnt: the mount on which to give up write access
449 * Tells the low-level filesystem that we are done
450 * performing writes to it. Must be matched with
451 * __mnt_want_write() call above.
453 void __mnt_drop_write(struct vfsmount *mnt)
456 mnt_dec_writers(real_mount(mnt));
461 * mnt_drop_write - give up write access to a mount
462 * @mnt: the mount on which to give up write access
464 * Tells the low-level filesystem that we are done performing writes to it and
465 * also allows filesystem to be frozen again. Must be matched with
466 * mnt_want_write() call above.
468 void mnt_drop_write(struct vfsmount *mnt)
470 __mnt_drop_write(mnt);
471 sb_end_write(mnt->mnt_sb);
473 EXPORT_SYMBOL_GPL(mnt_drop_write);
475 void __mnt_drop_write_file(struct file *file)
477 if (!(file->f_mode & FMODE_WRITER))
478 __mnt_drop_write(file->f_path.mnt);
481 void mnt_drop_write_file(struct file *file)
483 __mnt_drop_write_file(file);
484 sb_end_write(file_inode(file)->i_sb);
486 EXPORT_SYMBOL(mnt_drop_write_file);
488 static inline int mnt_hold_writers(struct mount *mnt)
490 mnt->mnt.mnt_flags |= MNT_WRITE_HOLD;
492 * After storing MNT_WRITE_HOLD, we'll read the counters. This store
493 * should be visible before we do.
498 * With writers on hold, if this value is zero, then there are
499 * definitely no active writers (although held writers may subsequently
500 * increment the count, they'll have to wait, and decrement it after
501 * seeing MNT_READONLY).
503 * It is OK to have counter incremented on one CPU and decremented on
504 * another: the sum will add up correctly. The danger would be when we
505 * sum up each counter, if we read a counter before it is incremented,
506 * but then read another CPU's count which it has been subsequently
507 * decremented from -- we would see more decrements than we should.
508 * MNT_WRITE_HOLD protects against this scenario, because
509 * mnt_want_write first increments count, then smp_mb, then spins on
510 * MNT_WRITE_HOLD, so it can't be decremented by another CPU while
511 * we're counting up here.
513 if (mnt_get_writers(mnt) > 0)
519 static inline void mnt_unhold_writers(struct mount *mnt)
522 * MNT_READONLY must become visible before ~MNT_WRITE_HOLD, so writers
523 * that become unheld will see MNT_READONLY.
526 mnt->mnt.mnt_flags &= ~MNT_WRITE_HOLD;
529 static int mnt_make_readonly(struct mount *mnt)
533 ret = mnt_hold_writers(mnt);
535 mnt->mnt.mnt_flags |= MNT_READONLY;
536 mnt_unhold_writers(mnt);
540 int sb_prepare_remount_readonly(struct super_block *sb)
545 /* Racy optimization. Recheck the counter under MNT_WRITE_HOLD */
546 if (atomic_long_read(&sb->s_remove_count))
550 list_for_each_entry(mnt, &sb->s_mounts, mnt_instance) {
551 if (!(mnt->mnt.mnt_flags & MNT_READONLY)) {
552 mnt->mnt.mnt_flags |= MNT_WRITE_HOLD;
554 if (mnt_get_writers(mnt) > 0) {
560 if (!err && atomic_long_read(&sb->s_remove_count))
564 sb->s_readonly_remount = 1;
567 list_for_each_entry(mnt, &sb->s_mounts, mnt_instance) {
568 if (mnt->mnt.mnt_flags & MNT_WRITE_HOLD)
569 mnt->mnt.mnt_flags &= ~MNT_WRITE_HOLD;
576 static void free_vfsmnt(struct mount *mnt)
578 struct user_namespace *mnt_userns;
580 mnt_userns = mnt_user_ns(&mnt->mnt);
581 if (!initial_idmapping(mnt_userns))
582 put_user_ns(mnt_userns);
583 kfree_const(mnt->mnt_devname);
585 free_percpu(mnt->mnt_pcp);
587 kmem_cache_free(mnt_cache, mnt);
590 static void delayed_free_vfsmnt(struct rcu_head *head)
592 free_vfsmnt(container_of(head, struct mount, mnt_rcu));
595 /* call under rcu_read_lock */
596 int __legitimize_mnt(struct vfsmount *bastard, unsigned seq)
599 if (read_seqretry(&mount_lock, seq))
603 mnt = real_mount(bastard);
604 mnt_add_count(mnt, 1);
605 smp_mb(); // see mntput_no_expire()
606 if (likely(!read_seqretry(&mount_lock, seq)))
608 if (bastard->mnt_flags & MNT_SYNC_UMOUNT) {
609 mnt_add_count(mnt, -1);
613 if (unlikely(bastard->mnt_flags & MNT_DOOMED)) {
614 mnt_add_count(mnt, -1);
619 /* caller will mntput() */
623 /* call under rcu_read_lock */
624 bool legitimize_mnt(struct vfsmount *bastard, unsigned seq)
626 int res = __legitimize_mnt(bastard, seq);
629 if (unlikely(res < 0)) {
638 * find the first mount at @dentry on vfsmount @mnt.
639 * call under rcu_read_lock()
641 struct mount *__lookup_mnt(struct vfsmount *mnt, struct dentry *dentry)
643 struct hlist_head *head = m_hash(mnt, dentry);
646 hlist_for_each_entry_rcu(p, head, mnt_hash)
647 if (&p->mnt_parent->mnt == mnt && p->mnt_mountpoint == dentry)
653 * lookup_mnt - Return the first child mount mounted at path
655 * "First" means first mounted chronologically. If you create the
658 * mount /dev/sda1 /mnt
659 * mount /dev/sda2 /mnt
660 * mount /dev/sda3 /mnt
662 * Then lookup_mnt() on the base /mnt dentry in the root mount will
663 * return successively the root dentry and vfsmount of /dev/sda1, then
664 * /dev/sda2, then /dev/sda3, then NULL.
666 * lookup_mnt takes a reference to the found vfsmount.
668 struct vfsmount *lookup_mnt(const struct path *path)
670 struct mount *child_mnt;
676 seq = read_seqbegin(&mount_lock);
677 child_mnt = __lookup_mnt(path->mnt, path->dentry);
678 m = child_mnt ? &child_mnt->mnt : NULL;
679 } while (!legitimize_mnt(m, seq));
684 static inline void lock_ns_list(struct mnt_namespace *ns)
686 spin_lock(&ns->ns_lock);
689 static inline void unlock_ns_list(struct mnt_namespace *ns)
691 spin_unlock(&ns->ns_lock);
694 static inline bool mnt_is_cursor(struct mount *mnt)
696 return mnt->mnt.mnt_flags & MNT_CURSOR;
700 * __is_local_mountpoint - Test to see if dentry is a mountpoint in the
701 * current mount namespace.
703 * The common case is dentries are not mountpoints at all and that
704 * test is handled inline. For the slow case when we are actually
705 * dealing with a mountpoint of some kind, walk through all of the
706 * mounts in the current mount namespace and test to see if the dentry
709 * The mount_hashtable is not usable in the context because we
710 * need to identify all mounts that may be in the current mount
711 * namespace not just a mount that happens to have some specified
714 bool __is_local_mountpoint(struct dentry *dentry)
716 struct mnt_namespace *ns = current->nsproxy->mnt_ns;
718 bool is_covered = false;
720 down_read(&namespace_sem);
722 list_for_each_entry(mnt, &ns->list, mnt_list) {
723 if (mnt_is_cursor(mnt))
725 is_covered = (mnt->mnt_mountpoint == dentry);
730 up_read(&namespace_sem);
735 static struct mountpoint *lookup_mountpoint(struct dentry *dentry)
737 struct hlist_head *chain = mp_hash(dentry);
738 struct mountpoint *mp;
740 hlist_for_each_entry(mp, chain, m_hash) {
741 if (mp->m_dentry == dentry) {
749 static struct mountpoint *get_mountpoint(struct dentry *dentry)
751 struct mountpoint *mp, *new = NULL;
754 if (d_mountpoint(dentry)) {
755 /* might be worth a WARN_ON() */
756 if (d_unlinked(dentry))
757 return ERR_PTR(-ENOENT);
759 read_seqlock_excl(&mount_lock);
760 mp = lookup_mountpoint(dentry);
761 read_sequnlock_excl(&mount_lock);
767 new = kmalloc(sizeof(struct mountpoint), GFP_KERNEL);
769 return ERR_PTR(-ENOMEM);
772 /* Exactly one processes may set d_mounted */
773 ret = d_set_mounted(dentry);
775 /* Someone else set d_mounted? */
779 /* The dentry is not available as a mountpoint? */
784 /* Add the new mountpoint to the hash table */
785 read_seqlock_excl(&mount_lock);
786 new->m_dentry = dget(dentry);
788 hlist_add_head(&new->m_hash, mp_hash(dentry));
789 INIT_HLIST_HEAD(&new->m_list);
790 read_sequnlock_excl(&mount_lock);
800 * vfsmount lock must be held. Additionally, the caller is responsible
801 * for serializing calls for given disposal list.
803 static void __put_mountpoint(struct mountpoint *mp, struct list_head *list)
805 if (!--mp->m_count) {
806 struct dentry *dentry = mp->m_dentry;
807 BUG_ON(!hlist_empty(&mp->m_list));
808 spin_lock(&dentry->d_lock);
809 dentry->d_flags &= ~DCACHE_MOUNTED;
810 spin_unlock(&dentry->d_lock);
811 dput_to_list(dentry, list);
812 hlist_del(&mp->m_hash);
817 /* called with namespace_lock and vfsmount lock */
818 static void put_mountpoint(struct mountpoint *mp)
820 __put_mountpoint(mp, &ex_mountpoints);
823 static inline int check_mnt(struct mount *mnt)
825 return mnt->mnt_ns == current->nsproxy->mnt_ns;
829 * vfsmount lock must be held for write
831 static void touch_mnt_namespace(struct mnt_namespace *ns)
835 wake_up_interruptible(&ns->poll);
840 * vfsmount lock must be held for write
842 static void __touch_mnt_namespace(struct mnt_namespace *ns)
844 if (ns && ns->event != event) {
846 wake_up_interruptible(&ns->poll);
851 * vfsmount lock must be held for write
853 static struct mountpoint *unhash_mnt(struct mount *mnt)
855 struct mountpoint *mp;
856 mnt->mnt_parent = mnt;
857 mnt->mnt_mountpoint = mnt->mnt.mnt_root;
858 list_del_init(&mnt->mnt_child);
859 hlist_del_init_rcu(&mnt->mnt_hash);
860 hlist_del_init(&mnt->mnt_mp_list);
867 * vfsmount lock must be held for write
869 static void umount_mnt(struct mount *mnt)
871 put_mountpoint(unhash_mnt(mnt));
875 * vfsmount lock must be held for write
877 void mnt_set_mountpoint(struct mount *mnt,
878 struct mountpoint *mp,
879 struct mount *child_mnt)
882 mnt_add_count(mnt, 1); /* essentially, that's mntget */
883 child_mnt->mnt_mountpoint = mp->m_dentry;
884 child_mnt->mnt_parent = mnt;
885 child_mnt->mnt_mp = mp;
886 hlist_add_head(&child_mnt->mnt_mp_list, &mp->m_list);
889 static void __attach_mnt(struct mount *mnt, struct mount *parent)
891 hlist_add_head_rcu(&mnt->mnt_hash,
892 m_hash(&parent->mnt, mnt->mnt_mountpoint));
893 list_add_tail(&mnt->mnt_child, &parent->mnt_mounts);
897 * vfsmount lock must be held for write
899 static void attach_mnt(struct mount *mnt,
900 struct mount *parent,
901 struct mountpoint *mp)
903 mnt_set_mountpoint(parent, mp, mnt);
904 __attach_mnt(mnt, parent);
907 void mnt_change_mountpoint(struct mount *parent, struct mountpoint *mp, struct mount *mnt)
909 struct mountpoint *old_mp = mnt->mnt_mp;
910 struct mount *old_parent = mnt->mnt_parent;
912 list_del_init(&mnt->mnt_child);
913 hlist_del_init(&mnt->mnt_mp_list);
914 hlist_del_init_rcu(&mnt->mnt_hash);
916 attach_mnt(mnt, parent, mp);
918 put_mountpoint(old_mp);
919 mnt_add_count(old_parent, -1);
923 * vfsmount lock must be held for write
925 static void commit_tree(struct mount *mnt)
927 struct mount *parent = mnt->mnt_parent;
930 struct mnt_namespace *n = parent->mnt_ns;
932 BUG_ON(parent == mnt);
934 list_add_tail(&head, &mnt->mnt_list);
935 list_for_each_entry(m, &head, mnt_list)
938 list_splice(&head, n->list.prev);
940 n->mounts += n->pending_mounts;
941 n->pending_mounts = 0;
943 __attach_mnt(mnt, parent);
944 touch_mnt_namespace(n);
947 static struct mount *next_mnt(struct mount *p, struct mount *root)
949 struct list_head *next = p->mnt_mounts.next;
950 if (next == &p->mnt_mounts) {
954 next = p->mnt_child.next;
955 if (next != &p->mnt_parent->mnt_mounts)
960 return list_entry(next, struct mount, mnt_child);
963 static struct mount *skip_mnt_tree(struct mount *p)
965 struct list_head *prev = p->mnt_mounts.prev;
966 while (prev != &p->mnt_mounts) {
967 p = list_entry(prev, struct mount, mnt_child);
968 prev = p->mnt_mounts.prev;
974 * vfs_create_mount - Create a mount for a configured superblock
975 * @fc: The configuration context with the superblock attached
977 * Create a mount to an already configured superblock. If necessary, the
978 * caller should invoke vfs_get_tree() before calling this.
980 * Note that this does not attach the mount to anything.
982 struct vfsmount *vfs_create_mount(struct fs_context *fc)
985 struct user_namespace *fs_userns;
988 return ERR_PTR(-EINVAL);
990 mnt = alloc_vfsmnt(fc->source ?: "none");
992 return ERR_PTR(-ENOMEM);
994 if (fc->sb_flags & SB_KERNMOUNT)
995 mnt->mnt.mnt_flags = MNT_INTERNAL;
997 atomic_inc(&fc->root->d_sb->s_active);
998 mnt->mnt.mnt_sb = fc->root->d_sb;
999 mnt->mnt.mnt_root = dget(fc->root);
1000 mnt->mnt_mountpoint = mnt->mnt.mnt_root;
1001 mnt->mnt_parent = mnt;
1003 fs_userns = mnt->mnt.mnt_sb->s_user_ns;
1004 if (!initial_idmapping(fs_userns))
1005 mnt->mnt.mnt_userns = get_user_ns(fs_userns);
1008 list_add_tail(&mnt->mnt_instance, &mnt->mnt.mnt_sb->s_mounts);
1009 unlock_mount_hash();
1012 EXPORT_SYMBOL(vfs_create_mount);
1014 struct vfsmount *fc_mount(struct fs_context *fc)
1016 int err = vfs_get_tree(fc);
1018 up_write(&fc->root->d_sb->s_umount);
1019 return vfs_create_mount(fc);
1021 return ERR_PTR(err);
1023 EXPORT_SYMBOL(fc_mount);
1025 struct vfsmount *vfs_kern_mount(struct file_system_type *type,
1026 int flags, const char *name,
1029 struct fs_context *fc;
1030 struct vfsmount *mnt;
1034 return ERR_PTR(-EINVAL);
1036 fc = fs_context_for_mount(type, flags);
1038 return ERR_CAST(fc);
1041 ret = vfs_parse_fs_string(fc, "source",
1042 name, strlen(name));
1044 ret = parse_monolithic_mount_data(fc, data);
1053 EXPORT_SYMBOL_GPL(vfs_kern_mount);
1056 vfs_submount(const struct dentry *mountpoint, struct file_system_type *type,
1057 const char *name, void *data)
1059 /* Until it is worked out how to pass the user namespace
1060 * through from the parent mount to the submount don't support
1061 * unprivileged mounts with submounts.
1063 if (mountpoint->d_sb->s_user_ns != &init_user_ns)
1064 return ERR_PTR(-EPERM);
1066 return vfs_kern_mount(type, SB_SUBMOUNT, name, data);
1068 EXPORT_SYMBOL_GPL(vfs_submount);
1070 static struct mount *clone_mnt(struct mount *old, struct dentry *root,
1073 struct super_block *sb = old->mnt.mnt_sb;
1077 mnt = alloc_vfsmnt(old->mnt_devname);
1079 return ERR_PTR(-ENOMEM);
1081 if (flag & (CL_SLAVE | CL_PRIVATE | CL_SHARED_TO_SLAVE))
1082 mnt->mnt_group_id = 0; /* not a peer of original */
1084 mnt->mnt_group_id = old->mnt_group_id;
1086 if ((flag & CL_MAKE_SHARED) && !mnt->mnt_group_id) {
1087 err = mnt_alloc_group_id(mnt);
1092 mnt->mnt.mnt_flags = old->mnt.mnt_flags;
1093 mnt->mnt.mnt_flags &= ~(MNT_WRITE_HOLD|MNT_MARKED|MNT_INTERNAL);
1095 atomic_inc(&sb->s_active);
1096 mnt->mnt.mnt_userns = mnt_user_ns(&old->mnt);
1097 if (!initial_idmapping(mnt->mnt.mnt_userns))
1098 mnt->mnt.mnt_userns = get_user_ns(mnt->mnt.mnt_userns);
1099 mnt->mnt.mnt_sb = sb;
1100 mnt->mnt.mnt_root = dget(root);
1101 mnt->mnt_mountpoint = mnt->mnt.mnt_root;
1102 mnt->mnt_parent = mnt;
1104 list_add_tail(&mnt->mnt_instance, &sb->s_mounts);
1105 unlock_mount_hash();
1107 if ((flag & CL_SLAVE) ||
1108 ((flag & CL_SHARED_TO_SLAVE) && IS_MNT_SHARED(old))) {
1109 list_add(&mnt->mnt_slave, &old->mnt_slave_list);
1110 mnt->mnt_master = old;
1111 CLEAR_MNT_SHARED(mnt);
1112 } else if (!(flag & CL_PRIVATE)) {
1113 if ((flag & CL_MAKE_SHARED) || IS_MNT_SHARED(old))
1114 list_add(&mnt->mnt_share, &old->mnt_share);
1115 if (IS_MNT_SLAVE(old))
1116 list_add(&mnt->mnt_slave, &old->mnt_slave);
1117 mnt->mnt_master = old->mnt_master;
1119 CLEAR_MNT_SHARED(mnt);
1121 if (flag & CL_MAKE_SHARED)
1122 set_mnt_shared(mnt);
1124 /* stick the duplicate mount on the same expiry list
1125 * as the original if that was on one */
1126 if (flag & CL_EXPIRE) {
1127 if (!list_empty(&old->mnt_expire))
1128 list_add(&mnt->mnt_expire, &old->mnt_expire);
1136 return ERR_PTR(err);
1139 static void cleanup_mnt(struct mount *mnt)
1141 struct hlist_node *p;
1144 * The warning here probably indicates that somebody messed
1145 * up a mnt_want/drop_write() pair. If this happens, the
1146 * filesystem was probably unable to make r/w->r/o transitions.
1147 * The locking used to deal with mnt_count decrement provides barriers,
1148 * so mnt_get_writers() below is safe.
1150 WARN_ON(mnt_get_writers(mnt));
1151 if (unlikely(mnt->mnt_pins.first))
1153 hlist_for_each_entry_safe(m, p, &mnt->mnt_stuck_children, mnt_umount) {
1154 hlist_del(&m->mnt_umount);
1157 fsnotify_vfsmount_delete(&mnt->mnt);
1158 dput(mnt->mnt.mnt_root);
1159 deactivate_super(mnt->mnt.mnt_sb);
1161 call_rcu(&mnt->mnt_rcu, delayed_free_vfsmnt);
1164 static void __cleanup_mnt(struct rcu_head *head)
1166 cleanup_mnt(container_of(head, struct mount, mnt_rcu));
1169 static LLIST_HEAD(delayed_mntput_list);
1170 static void delayed_mntput(struct work_struct *unused)
1172 struct llist_node *node = llist_del_all(&delayed_mntput_list);
1173 struct mount *m, *t;
1175 llist_for_each_entry_safe(m, t, node, mnt_llist)
1178 static DECLARE_DELAYED_WORK(delayed_mntput_work, delayed_mntput);
1180 static void mntput_no_expire(struct mount *mnt)
1186 if (likely(READ_ONCE(mnt->mnt_ns))) {
1188 * Since we don't do lock_mount_hash() here,
1189 * ->mnt_ns can change under us. However, if it's
1190 * non-NULL, then there's a reference that won't
1191 * be dropped until after an RCU delay done after
1192 * turning ->mnt_ns NULL. So if we observe it
1193 * non-NULL under rcu_read_lock(), the reference
1194 * we are dropping is not the final one.
1196 mnt_add_count(mnt, -1);
1202 * make sure that if __legitimize_mnt() has not seen us grab
1203 * mount_lock, we'll see their refcount increment here.
1206 mnt_add_count(mnt, -1);
1207 count = mnt_get_count(mnt);
1211 unlock_mount_hash();
1214 if (unlikely(mnt->mnt.mnt_flags & MNT_DOOMED)) {
1216 unlock_mount_hash();
1219 mnt->mnt.mnt_flags |= MNT_DOOMED;
1222 list_del(&mnt->mnt_instance);
1224 if (unlikely(!list_empty(&mnt->mnt_mounts))) {
1225 struct mount *p, *tmp;
1226 list_for_each_entry_safe(p, tmp, &mnt->mnt_mounts, mnt_child) {
1227 __put_mountpoint(unhash_mnt(p), &list);
1228 hlist_add_head(&p->mnt_umount, &mnt->mnt_stuck_children);
1231 unlock_mount_hash();
1232 shrink_dentry_list(&list);
1234 if (likely(!(mnt->mnt.mnt_flags & MNT_INTERNAL))) {
1235 struct task_struct *task = current;
1236 if (likely(!(task->flags & PF_KTHREAD))) {
1237 init_task_work(&mnt->mnt_rcu, __cleanup_mnt);
1238 if (!task_work_add(task, &mnt->mnt_rcu, TWA_RESUME))
1241 if (llist_add(&mnt->mnt_llist, &delayed_mntput_list))
1242 schedule_delayed_work(&delayed_mntput_work, 1);
1248 void mntput(struct vfsmount *mnt)
1251 struct mount *m = real_mount(mnt);
1252 /* avoid cacheline pingpong, hope gcc doesn't get "smart" */
1253 if (unlikely(m->mnt_expiry_mark))
1254 m->mnt_expiry_mark = 0;
1255 mntput_no_expire(m);
1258 EXPORT_SYMBOL(mntput);
1260 struct vfsmount *mntget(struct vfsmount *mnt)
1263 mnt_add_count(real_mount(mnt), 1);
1266 EXPORT_SYMBOL(mntget);
1269 * path_is_mountpoint() - Check if path is a mount in the current namespace.
1270 * @path: path to check
1272 * d_mountpoint() can only be used reliably to establish if a dentry is
1273 * not mounted in any namespace and that common case is handled inline.
1274 * d_mountpoint() isn't aware of the possibility there may be multiple
1275 * mounts using a given dentry in a different namespace. This function
1276 * checks if the passed in path is a mountpoint rather than the dentry
1279 bool path_is_mountpoint(const struct path *path)
1284 if (!d_mountpoint(path->dentry))
1289 seq = read_seqbegin(&mount_lock);
1290 res = __path_is_mountpoint(path);
1291 } while (read_seqretry(&mount_lock, seq));
1296 EXPORT_SYMBOL(path_is_mountpoint);
1298 struct vfsmount *mnt_clone_internal(const struct path *path)
1301 p = clone_mnt(real_mount(path->mnt), path->dentry, CL_PRIVATE);
1304 p->mnt.mnt_flags |= MNT_INTERNAL;
1308 #ifdef CONFIG_PROC_FS
1309 static struct mount *mnt_list_next(struct mnt_namespace *ns,
1310 struct list_head *p)
1312 struct mount *mnt, *ret = NULL;
1315 list_for_each_continue(p, &ns->list) {
1316 mnt = list_entry(p, typeof(*mnt), mnt_list);
1317 if (!mnt_is_cursor(mnt)) {
1327 /* iterator; we want it to have access to namespace_sem, thus here... */
1328 static void *m_start(struct seq_file *m, loff_t *pos)
1330 struct proc_mounts *p = m->private;
1331 struct list_head *prev;
1333 down_read(&namespace_sem);
1335 prev = &p->ns->list;
1337 prev = &p->cursor.mnt_list;
1339 /* Read after we'd reached the end? */
1340 if (list_empty(prev))
1344 return mnt_list_next(p->ns, prev);
1347 static void *m_next(struct seq_file *m, void *v, loff_t *pos)
1349 struct proc_mounts *p = m->private;
1350 struct mount *mnt = v;
1353 return mnt_list_next(p->ns, &mnt->mnt_list);
1356 static void m_stop(struct seq_file *m, void *v)
1358 struct proc_mounts *p = m->private;
1359 struct mount *mnt = v;
1361 lock_ns_list(p->ns);
1363 list_move_tail(&p->cursor.mnt_list, &mnt->mnt_list);
1365 list_del_init(&p->cursor.mnt_list);
1366 unlock_ns_list(p->ns);
1367 up_read(&namespace_sem);
1370 static int m_show(struct seq_file *m, void *v)
1372 struct proc_mounts *p = m->private;
1373 struct mount *r = v;
1374 return p->show(m, &r->mnt);
1377 const struct seq_operations mounts_op = {
1384 void mnt_cursor_del(struct mnt_namespace *ns, struct mount *cursor)
1386 down_read(&namespace_sem);
1388 list_del(&cursor->mnt_list);
1390 up_read(&namespace_sem);
1392 #endif /* CONFIG_PROC_FS */
1395 * may_umount_tree - check if a mount tree is busy
1396 * @m: root of mount tree
1398 * This is called to check if a tree of mounts has any
1399 * open files, pwds, chroots or sub mounts that are
1402 int may_umount_tree(struct vfsmount *m)
1404 struct mount *mnt = real_mount(m);
1405 int actual_refs = 0;
1406 int minimum_refs = 0;
1410 /* write lock needed for mnt_get_count */
1412 for (p = mnt; p; p = next_mnt(p, mnt)) {
1413 actual_refs += mnt_get_count(p);
1416 unlock_mount_hash();
1418 if (actual_refs > minimum_refs)
1424 EXPORT_SYMBOL(may_umount_tree);
1427 * may_umount - check if a mount point is busy
1428 * @mnt: root of mount
1430 * This is called to check if a mount point has any
1431 * open files, pwds, chroots or sub mounts. If the
1432 * mount has sub mounts this will return busy
1433 * regardless of whether the sub mounts are busy.
1435 * Doesn't take quota and stuff into account. IOW, in some cases it will
1436 * give false negatives. The main reason why it's here is that we need
1437 * a non-destructive way to look for easily umountable filesystems.
1439 int may_umount(struct vfsmount *mnt)
1442 down_read(&namespace_sem);
1444 if (propagate_mount_busy(real_mount(mnt), 2))
1446 unlock_mount_hash();
1447 up_read(&namespace_sem);
1451 EXPORT_SYMBOL(may_umount);
1453 static void namespace_unlock(void)
1455 struct hlist_head head;
1456 struct hlist_node *p;
1460 hlist_move_list(&unmounted, &head);
1461 list_splice_init(&ex_mountpoints, &list);
1463 up_write(&namespace_sem);
1465 shrink_dentry_list(&list);
1467 if (likely(hlist_empty(&head)))
1470 synchronize_rcu_expedited();
1472 hlist_for_each_entry_safe(m, p, &head, mnt_umount) {
1473 hlist_del(&m->mnt_umount);
1478 static inline void namespace_lock(void)
1480 down_write(&namespace_sem);
1483 enum umount_tree_flags {
1485 UMOUNT_PROPAGATE = 2,
1486 UMOUNT_CONNECTED = 4,
1489 static bool disconnect_mount(struct mount *mnt, enum umount_tree_flags how)
1491 /* Leaving mounts connected is only valid for lazy umounts */
1492 if (how & UMOUNT_SYNC)
1495 /* A mount without a parent has nothing to be connected to */
1496 if (!mnt_has_parent(mnt))
1499 /* Because the reference counting rules change when mounts are
1500 * unmounted and connected, umounted mounts may not be
1501 * connected to mounted mounts.
1503 if (!(mnt->mnt_parent->mnt.mnt_flags & MNT_UMOUNT))
1506 /* Has it been requested that the mount remain connected? */
1507 if (how & UMOUNT_CONNECTED)
1510 /* Is the mount locked such that it needs to remain connected? */
1511 if (IS_MNT_LOCKED(mnt))
1514 /* By default disconnect the mount */
1519 * mount_lock must be held
1520 * namespace_sem must be held for write
1522 static void umount_tree(struct mount *mnt, enum umount_tree_flags how)
1524 LIST_HEAD(tmp_list);
1527 if (how & UMOUNT_PROPAGATE)
1528 propagate_mount_unlock(mnt);
1530 /* Gather the mounts to umount */
1531 for (p = mnt; p; p = next_mnt(p, mnt)) {
1532 p->mnt.mnt_flags |= MNT_UMOUNT;
1533 list_move(&p->mnt_list, &tmp_list);
1536 /* Hide the mounts from mnt_mounts */
1537 list_for_each_entry(p, &tmp_list, mnt_list) {
1538 list_del_init(&p->mnt_child);
1541 /* Add propogated mounts to the tmp_list */
1542 if (how & UMOUNT_PROPAGATE)
1543 propagate_umount(&tmp_list);
1545 while (!list_empty(&tmp_list)) {
1546 struct mnt_namespace *ns;
1548 p = list_first_entry(&tmp_list, struct mount, mnt_list);
1549 list_del_init(&p->mnt_expire);
1550 list_del_init(&p->mnt_list);
1554 __touch_mnt_namespace(ns);
1557 if (how & UMOUNT_SYNC)
1558 p->mnt.mnt_flags |= MNT_SYNC_UMOUNT;
1560 disconnect = disconnect_mount(p, how);
1561 if (mnt_has_parent(p)) {
1562 mnt_add_count(p->mnt_parent, -1);
1564 /* Don't forget about p */
1565 list_add_tail(&p->mnt_child, &p->mnt_parent->mnt_mounts);
1570 change_mnt_propagation(p, MS_PRIVATE);
1572 hlist_add_head(&p->mnt_umount, &unmounted);
1576 static void shrink_submounts(struct mount *mnt);
1578 static int do_umount_root(struct super_block *sb)
1582 down_write(&sb->s_umount);
1583 if (!sb_rdonly(sb)) {
1584 struct fs_context *fc;
1586 fc = fs_context_for_reconfigure(sb->s_root, SB_RDONLY,
1591 ret = parse_monolithic_mount_data(fc, NULL);
1593 ret = reconfigure_super(fc);
1597 up_write(&sb->s_umount);
1601 static int do_umount(struct mount *mnt, int flags)
1603 struct super_block *sb = mnt->mnt.mnt_sb;
1606 retval = security_sb_umount(&mnt->mnt, flags);
1611 * Allow userspace to request a mountpoint be expired rather than
1612 * unmounting unconditionally. Unmount only happens if:
1613 * (1) the mark is already set (the mark is cleared by mntput())
1614 * (2) the usage count == 1 [parent vfsmount] + 1 [sys_umount]
1616 if (flags & MNT_EXPIRE) {
1617 if (&mnt->mnt == current->fs->root.mnt ||
1618 flags & (MNT_FORCE | MNT_DETACH))
1622 * probably don't strictly need the lock here if we examined
1623 * all race cases, but it's a slowpath.
1626 if (mnt_get_count(mnt) != 2) {
1627 unlock_mount_hash();
1630 unlock_mount_hash();
1632 if (!xchg(&mnt->mnt_expiry_mark, 1))
1637 * If we may have to abort operations to get out of this
1638 * mount, and they will themselves hold resources we must
1639 * allow the fs to do things. In the Unix tradition of
1640 * 'Gee thats tricky lets do it in userspace' the umount_begin
1641 * might fail to complete on the first run through as other tasks
1642 * must return, and the like. Thats for the mount program to worry
1643 * about for the moment.
1646 if (flags & MNT_FORCE && sb->s_op->umount_begin) {
1647 sb->s_op->umount_begin(sb);
1651 * No sense to grab the lock for this test, but test itself looks
1652 * somewhat bogus. Suggestions for better replacement?
1653 * Ho-hum... In principle, we might treat that as umount + switch
1654 * to rootfs. GC would eventually take care of the old vfsmount.
1655 * Actually it makes sense, especially if rootfs would contain a
1656 * /reboot - static binary that would close all descriptors and
1657 * call reboot(9). Then init(8) could umount root and exec /reboot.
1659 if (&mnt->mnt == current->fs->root.mnt && !(flags & MNT_DETACH)) {
1661 * Special case for "unmounting" root ...
1662 * we just try to remount it readonly.
1664 if (!ns_capable(sb->s_user_ns, CAP_SYS_ADMIN))
1666 return do_umount_root(sb);
1672 /* Recheck MNT_LOCKED with the locks held */
1674 if (mnt->mnt.mnt_flags & MNT_LOCKED)
1678 if (flags & MNT_DETACH) {
1679 if (!list_empty(&mnt->mnt_list))
1680 umount_tree(mnt, UMOUNT_PROPAGATE);
1683 shrink_submounts(mnt);
1685 if (!propagate_mount_busy(mnt, 2)) {
1686 if (!list_empty(&mnt->mnt_list))
1687 umount_tree(mnt, UMOUNT_PROPAGATE|UMOUNT_SYNC);
1692 unlock_mount_hash();
1698 * __detach_mounts - lazily unmount all mounts on the specified dentry
1700 * During unlink, rmdir, and d_drop it is possible to loose the path
1701 * to an existing mountpoint, and wind up leaking the mount.
1702 * detach_mounts allows lazily unmounting those mounts instead of
1705 * The caller may hold dentry->d_inode->i_mutex.
1707 void __detach_mounts(struct dentry *dentry)
1709 struct mountpoint *mp;
1714 mp = lookup_mountpoint(dentry);
1719 while (!hlist_empty(&mp->m_list)) {
1720 mnt = hlist_entry(mp->m_list.first, struct mount, mnt_mp_list);
1721 if (mnt->mnt.mnt_flags & MNT_UMOUNT) {
1723 hlist_add_head(&mnt->mnt_umount, &unmounted);
1725 else umount_tree(mnt, UMOUNT_CONNECTED);
1729 unlock_mount_hash();
1734 * Is the caller allowed to modify his namespace?
1736 static inline bool may_mount(void)
1738 return ns_capable(current->nsproxy->mnt_ns->user_ns, CAP_SYS_ADMIN);
1741 static void warn_mandlock(void)
1743 pr_warn_once("=======================================================\n"
1744 "WARNING: The mand mount option has been deprecated and\n"
1745 " and is ignored by this kernel. Remove the mand\n"
1746 " option from the mount to silence this warning.\n"
1747 "=======================================================\n");
1750 static int can_umount(const struct path *path, int flags)
1752 struct mount *mnt = real_mount(path->mnt);
1756 if (path->dentry != path->mnt->mnt_root)
1758 if (!check_mnt(mnt))
1760 if (mnt->mnt.mnt_flags & MNT_LOCKED) /* Check optimistically */
1762 if (flags & MNT_FORCE && !capable(CAP_SYS_ADMIN))
1767 // caller is responsible for flags being sane
1768 int path_umount(struct path *path, int flags)
1770 struct mount *mnt = real_mount(path->mnt);
1773 ret = can_umount(path, flags);
1775 ret = do_umount(mnt, flags);
1777 /* we mustn't call path_put() as that would clear mnt_expiry_mark */
1779 mntput_no_expire(mnt);
1783 static int ksys_umount(char __user *name, int flags)
1785 int lookup_flags = LOOKUP_MOUNTPOINT;
1789 // basic validity checks done first
1790 if (flags & ~(MNT_FORCE | MNT_DETACH | MNT_EXPIRE | UMOUNT_NOFOLLOW))
1793 if (!(flags & UMOUNT_NOFOLLOW))
1794 lookup_flags |= LOOKUP_FOLLOW;
1795 ret = user_path_at(AT_FDCWD, name, lookup_flags, &path);
1798 return path_umount(&path, flags);
1801 SYSCALL_DEFINE2(umount, char __user *, name, int, flags)
1803 return ksys_umount(name, flags);
1806 #ifdef __ARCH_WANT_SYS_OLDUMOUNT
1809 * The 2.0 compatible umount. No flags.
1811 SYSCALL_DEFINE1(oldumount, char __user *, name)
1813 return ksys_umount(name, 0);
1818 static bool is_mnt_ns_file(struct dentry *dentry)
1820 /* Is this a proxy for a mount namespace? */
1821 return dentry->d_op == &ns_dentry_operations &&
1822 dentry->d_fsdata == &mntns_operations;
1825 static struct mnt_namespace *to_mnt_ns(struct ns_common *ns)
1827 return container_of(ns, struct mnt_namespace, ns);
1830 struct ns_common *from_mnt_ns(struct mnt_namespace *mnt)
1835 static bool mnt_ns_loop(struct dentry *dentry)
1837 /* Could bind mounting the mount namespace inode cause a
1838 * mount namespace loop?
1840 struct mnt_namespace *mnt_ns;
1841 if (!is_mnt_ns_file(dentry))
1844 mnt_ns = to_mnt_ns(get_proc_ns(dentry->d_inode));
1845 return current->nsproxy->mnt_ns->seq >= mnt_ns->seq;
1848 struct mount *copy_tree(struct mount *mnt, struct dentry *dentry,
1851 struct mount *res, *p, *q, *r, *parent;
1853 if (!(flag & CL_COPY_UNBINDABLE) && IS_MNT_UNBINDABLE(mnt))
1854 return ERR_PTR(-EINVAL);
1856 if (!(flag & CL_COPY_MNT_NS_FILE) && is_mnt_ns_file(dentry))
1857 return ERR_PTR(-EINVAL);
1859 res = q = clone_mnt(mnt, dentry, flag);
1863 q->mnt_mountpoint = mnt->mnt_mountpoint;
1866 list_for_each_entry(r, &mnt->mnt_mounts, mnt_child) {
1868 if (!is_subdir(r->mnt_mountpoint, dentry))
1871 for (s = r; s; s = next_mnt(s, r)) {
1872 if (!(flag & CL_COPY_UNBINDABLE) &&
1873 IS_MNT_UNBINDABLE(s)) {
1874 if (s->mnt.mnt_flags & MNT_LOCKED) {
1875 /* Both unbindable and locked. */
1876 q = ERR_PTR(-EPERM);
1879 s = skip_mnt_tree(s);
1883 if (!(flag & CL_COPY_MNT_NS_FILE) &&
1884 is_mnt_ns_file(s->mnt.mnt_root)) {
1885 s = skip_mnt_tree(s);
1888 while (p != s->mnt_parent) {
1894 q = clone_mnt(p, p->mnt.mnt_root, flag);
1898 list_add_tail(&q->mnt_list, &res->mnt_list);
1899 attach_mnt(q, parent, p->mnt_mp);
1900 unlock_mount_hash();
1907 umount_tree(res, UMOUNT_SYNC);
1908 unlock_mount_hash();
1913 /* Caller should check returned pointer for errors */
1915 struct vfsmount *collect_mounts(const struct path *path)
1919 if (!check_mnt(real_mount(path->mnt)))
1920 tree = ERR_PTR(-EINVAL);
1922 tree = copy_tree(real_mount(path->mnt), path->dentry,
1923 CL_COPY_ALL | CL_PRIVATE);
1926 return ERR_CAST(tree);
1929 EXPORT_SYMBOL_GPL(collect_mounts);
1931 static void free_mnt_ns(struct mnt_namespace *);
1932 static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *, bool);
1934 void dissolve_on_fput(struct vfsmount *mnt)
1936 struct mnt_namespace *ns;
1939 ns = real_mount(mnt)->mnt_ns;
1942 umount_tree(real_mount(mnt), UMOUNT_CONNECTED);
1946 unlock_mount_hash();
1952 void drop_collected_mounts(struct vfsmount *mnt)
1956 umount_tree(real_mount(mnt), 0);
1957 unlock_mount_hash();
1960 EXPORT_SYMBOL_GPL(drop_collected_mounts);
1962 static bool has_locked_children(struct mount *mnt, struct dentry *dentry)
1964 struct mount *child;
1966 list_for_each_entry(child, &mnt->mnt_mounts, mnt_child) {
1967 if (!is_subdir(child->mnt_mountpoint, dentry))
1970 if (child->mnt.mnt_flags & MNT_LOCKED)
1977 * clone_private_mount - create a private clone of a path
1978 * @path: path to clone
1980 * This creates a new vfsmount, which will be the clone of @path. The new mount
1981 * will not be attached anywhere in the namespace and will be private (i.e.
1982 * changes to the originating mount won't be propagated into this).
1984 * Release with mntput().
1986 struct vfsmount *clone_private_mount(const struct path *path)
1988 struct mount *old_mnt = real_mount(path->mnt);
1989 struct mount *new_mnt;
1991 down_read(&namespace_sem);
1992 if (IS_MNT_UNBINDABLE(old_mnt))
1995 if (!check_mnt(old_mnt))
1998 if (has_locked_children(old_mnt, path->dentry))
2001 new_mnt = clone_mnt(old_mnt, path->dentry, CL_PRIVATE);
2002 up_read(&namespace_sem);
2004 if (IS_ERR(new_mnt))
2005 return ERR_CAST(new_mnt);
2007 /* Longterm mount to be removed by kern_unmount*() */
2008 new_mnt->mnt_ns = MNT_NS_INTERNAL;
2010 return &new_mnt->mnt;
2013 up_read(&namespace_sem);
2014 return ERR_PTR(-EINVAL);
2016 EXPORT_SYMBOL_GPL(clone_private_mount);
2018 int iterate_mounts(int (*f)(struct vfsmount *, void *), void *arg,
2019 struct vfsmount *root)
2022 int res = f(root, arg);
2025 list_for_each_entry(mnt, &real_mount(root)->mnt_list, mnt_list) {
2026 res = f(&mnt->mnt, arg);
2032 EXPORT_SYMBOL_GPL(iterate_mounts);
2034 static void lock_mnt_tree(struct mount *mnt)
2038 for (p = mnt; p; p = next_mnt(p, mnt)) {
2039 int flags = p->mnt.mnt_flags;
2040 /* Don't allow unprivileged users to change mount flags */
2041 flags |= MNT_LOCK_ATIME;
2043 if (flags & MNT_READONLY)
2044 flags |= MNT_LOCK_READONLY;
2046 if (flags & MNT_NODEV)
2047 flags |= MNT_LOCK_NODEV;
2049 if (flags & MNT_NOSUID)
2050 flags |= MNT_LOCK_NOSUID;
2052 if (flags & MNT_NOEXEC)
2053 flags |= MNT_LOCK_NOEXEC;
2054 /* Don't allow unprivileged users to reveal what is under a mount */
2055 if (list_empty(&p->mnt_expire))
2056 flags |= MNT_LOCKED;
2057 p->mnt.mnt_flags = flags;
2061 static void cleanup_group_ids(struct mount *mnt, struct mount *end)
2065 for (p = mnt; p != end; p = next_mnt(p, mnt)) {
2066 if (p->mnt_group_id && !IS_MNT_SHARED(p))
2067 mnt_release_group_id(p);
2071 static int invent_group_ids(struct mount *mnt, bool recurse)
2075 for (p = mnt; p; p = recurse ? next_mnt(p, mnt) : NULL) {
2076 if (!p->mnt_group_id && !IS_MNT_SHARED(p)) {
2077 int err = mnt_alloc_group_id(p);
2079 cleanup_group_ids(mnt, p);
2088 int count_mounts(struct mnt_namespace *ns, struct mount *mnt)
2090 unsigned int max = READ_ONCE(sysctl_mount_max);
2091 unsigned int mounts = 0, old, pending, sum;
2094 for (p = mnt; p; p = next_mnt(p, mnt))
2098 pending = ns->pending_mounts;
2099 sum = old + pending;
2103 (mounts > (max - sum)))
2106 ns->pending_mounts = pending + mounts;
2111 * @source_mnt : mount tree to be attached
2112 * @nd : place the mount tree @source_mnt is attached
2113 * @parent_nd : if non-null, detach the source_mnt from its parent and
2114 * store the parent mount and mountpoint dentry.
2115 * (done when source_mnt is moved)
2117 * NOTE: in the table below explains the semantics when a source mount
2118 * of a given type is attached to a destination mount of a given type.
2119 * ---------------------------------------------------------------------------
2120 * | BIND MOUNT OPERATION |
2121 * |**************************************************************************
2122 * | source-->| shared | private | slave | unbindable |
2126 * |**************************************************************************
2127 * | shared | shared (++) | shared (+) | shared(+++)| invalid |
2129 * |non-shared| shared (+) | private | slave (*) | invalid |
2130 * ***************************************************************************
2131 * A bind operation clones the source mount and mounts the clone on the
2132 * destination mount.
2134 * (++) the cloned mount is propagated to all the mounts in the propagation
2135 * tree of the destination mount and the cloned mount is added to
2136 * the peer group of the source mount.
2137 * (+) the cloned mount is created under the destination mount and is marked
2138 * as shared. The cloned mount is added to the peer group of the source
2140 * (+++) the mount is propagated to all the mounts in the propagation tree
2141 * of the destination mount and the cloned mount is made slave
2142 * of the same master as that of the source mount. The cloned mount
2143 * is marked as 'shared and slave'.
2144 * (*) the cloned mount is made a slave of the same master as that of the
2147 * ---------------------------------------------------------------------------
2148 * | MOVE MOUNT OPERATION |
2149 * |**************************************************************************
2150 * | source-->| shared | private | slave | unbindable |
2154 * |**************************************************************************
2155 * | shared | shared (+) | shared (+) | shared(+++) | invalid |
2157 * |non-shared| shared (+*) | private | slave (*) | unbindable |
2158 * ***************************************************************************
2160 * (+) the mount is moved to the destination. And is then propagated to
2161 * all the mounts in the propagation tree of the destination mount.
2162 * (+*) the mount is moved to the destination.
2163 * (+++) the mount is moved to the destination and is then propagated to
2164 * all the mounts belonging to the destination mount's propagation tree.
2165 * the mount is marked as 'shared and slave'.
2166 * (*) the mount continues to be a slave at the new location.
2168 * if the source mount is a tree, the operations explained above is
2169 * applied to each mount in the tree.
2170 * Must be called without spinlocks held, since this function can sleep
2173 static int attach_recursive_mnt(struct mount *source_mnt,
2174 struct mount *dest_mnt,
2175 struct mountpoint *dest_mp,
2178 struct user_namespace *user_ns = current->nsproxy->mnt_ns->user_ns;
2179 HLIST_HEAD(tree_list);
2180 struct mnt_namespace *ns = dest_mnt->mnt_ns;
2181 struct mountpoint *smp;
2182 struct mount *child, *p;
2183 struct hlist_node *n;
2186 /* Preallocate a mountpoint in case the new mounts need
2187 * to be tucked under other mounts.
2189 smp = get_mountpoint(source_mnt->mnt.mnt_root);
2191 return PTR_ERR(smp);
2193 /* Is there space to add these mounts to the mount namespace? */
2195 err = count_mounts(ns, source_mnt);
2200 if (IS_MNT_SHARED(dest_mnt)) {
2201 err = invent_group_ids(source_mnt, true);
2204 err = propagate_mnt(dest_mnt, dest_mp, source_mnt, &tree_list);
2207 goto out_cleanup_ids;
2208 for (p = source_mnt; p; p = next_mnt(p, source_mnt))
2214 unhash_mnt(source_mnt);
2215 attach_mnt(source_mnt, dest_mnt, dest_mp);
2216 touch_mnt_namespace(source_mnt->mnt_ns);
2218 if (source_mnt->mnt_ns) {
2219 /* move from anon - the caller will destroy */
2220 list_del_init(&source_mnt->mnt_ns->list);
2222 mnt_set_mountpoint(dest_mnt, dest_mp, source_mnt);
2223 commit_tree(source_mnt);
2226 hlist_for_each_entry_safe(child, n, &tree_list, mnt_hash) {
2228 hlist_del_init(&child->mnt_hash);
2229 q = __lookup_mnt(&child->mnt_parent->mnt,
2230 child->mnt_mountpoint);
2232 mnt_change_mountpoint(child, smp, q);
2233 /* Notice when we are propagating across user namespaces */
2234 if (child->mnt_parent->mnt_ns->user_ns != user_ns)
2235 lock_mnt_tree(child);
2236 child->mnt.mnt_flags &= ~MNT_LOCKED;
2239 put_mountpoint(smp);
2240 unlock_mount_hash();
2245 while (!hlist_empty(&tree_list)) {
2246 child = hlist_entry(tree_list.first, struct mount, mnt_hash);
2247 child->mnt_parent->mnt_ns->pending_mounts = 0;
2248 umount_tree(child, UMOUNT_SYNC);
2250 unlock_mount_hash();
2251 cleanup_group_ids(source_mnt, NULL);
2253 ns->pending_mounts = 0;
2255 read_seqlock_excl(&mount_lock);
2256 put_mountpoint(smp);
2257 read_sequnlock_excl(&mount_lock);
2262 static struct mountpoint *lock_mount(struct path *path)
2264 struct vfsmount *mnt;
2265 struct dentry *dentry = path->dentry;
2267 inode_lock(dentry->d_inode);
2268 if (unlikely(cant_mount(dentry))) {
2269 inode_unlock(dentry->d_inode);
2270 return ERR_PTR(-ENOENT);
2273 mnt = lookup_mnt(path);
2275 struct mountpoint *mp = get_mountpoint(dentry);
2278 inode_unlock(dentry->d_inode);
2284 inode_unlock(path->dentry->d_inode);
2287 dentry = path->dentry = dget(mnt->mnt_root);
2291 static void unlock_mount(struct mountpoint *where)
2293 struct dentry *dentry = where->m_dentry;
2295 read_seqlock_excl(&mount_lock);
2296 put_mountpoint(where);
2297 read_sequnlock_excl(&mount_lock);
2300 inode_unlock(dentry->d_inode);
2303 static int graft_tree(struct mount *mnt, struct mount *p, struct mountpoint *mp)
2305 if (mnt->mnt.mnt_sb->s_flags & SB_NOUSER)
2308 if (d_is_dir(mp->m_dentry) !=
2309 d_is_dir(mnt->mnt.mnt_root))
2312 return attach_recursive_mnt(mnt, p, mp, false);
2316 * Sanity check the flags to change_mnt_propagation.
2319 static int flags_to_propagation_type(int ms_flags)
2321 int type = ms_flags & ~(MS_REC | MS_SILENT);
2323 /* Fail if any non-propagation flags are set */
2324 if (type & ~(MS_SHARED | MS_PRIVATE | MS_SLAVE | MS_UNBINDABLE))
2326 /* Only one propagation flag should be set */
2327 if (!is_power_of_2(type))
2333 * recursively change the type of the mountpoint.
2335 static int do_change_type(struct path *path, int ms_flags)
2338 struct mount *mnt = real_mount(path->mnt);
2339 int recurse = ms_flags & MS_REC;
2343 if (path->dentry != path->mnt->mnt_root)
2346 type = flags_to_propagation_type(ms_flags);
2351 if (type == MS_SHARED) {
2352 err = invent_group_ids(mnt, recurse);
2358 for (m = mnt; m; m = (recurse ? next_mnt(m, mnt) : NULL))
2359 change_mnt_propagation(m, type);
2360 unlock_mount_hash();
2367 static struct mount *__do_loopback(struct path *old_path, int recurse)
2369 struct mount *mnt = ERR_PTR(-EINVAL), *old = real_mount(old_path->mnt);
2371 if (IS_MNT_UNBINDABLE(old))
2374 if (!check_mnt(old) && old_path->dentry->d_op != &ns_dentry_operations)
2377 if (!recurse && has_locked_children(old, old_path->dentry))
2381 mnt = copy_tree(old, old_path->dentry, CL_COPY_MNT_NS_FILE);
2383 mnt = clone_mnt(old, old_path->dentry, 0);
2386 mnt->mnt.mnt_flags &= ~MNT_LOCKED;
2392 * do loopback mount.
2394 static int do_loopback(struct path *path, const char *old_name,
2397 struct path old_path;
2398 struct mount *mnt = NULL, *parent;
2399 struct mountpoint *mp;
2401 if (!old_name || !*old_name)
2403 err = kern_path(old_name, LOOKUP_FOLLOW|LOOKUP_AUTOMOUNT, &old_path);
2408 if (mnt_ns_loop(old_path.dentry))
2411 mp = lock_mount(path);
2417 parent = real_mount(path->mnt);
2418 if (!check_mnt(parent))
2421 mnt = __do_loopback(&old_path, recurse);
2427 err = graft_tree(mnt, parent, mp);
2430 umount_tree(mnt, UMOUNT_SYNC);
2431 unlock_mount_hash();
2436 path_put(&old_path);
2440 static struct file *open_detached_copy(struct path *path, bool recursive)
2442 struct user_namespace *user_ns = current->nsproxy->mnt_ns->user_ns;
2443 struct mnt_namespace *ns = alloc_mnt_ns(user_ns, true);
2444 struct mount *mnt, *p;
2448 return ERR_CAST(ns);
2451 mnt = __do_loopback(path, recursive);
2455 return ERR_CAST(mnt);
2459 for (p = mnt; p; p = next_mnt(p, mnt)) {
2464 list_add_tail(&ns->list, &mnt->mnt_list);
2466 unlock_mount_hash();
2470 path->mnt = &mnt->mnt;
2471 file = dentry_open(path, O_PATH, current_cred());
2473 dissolve_on_fput(path->mnt);
2475 file->f_mode |= FMODE_NEED_UNMOUNT;
2479 SYSCALL_DEFINE3(open_tree, int, dfd, const char __user *, filename, unsigned, flags)
2483 int lookup_flags = LOOKUP_AUTOMOUNT | LOOKUP_FOLLOW;
2484 bool detached = flags & OPEN_TREE_CLONE;
2488 BUILD_BUG_ON(OPEN_TREE_CLOEXEC != O_CLOEXEC);
2490 if (flags & ~(AT_EMPTY_PATH | AT_NO_AUTOMOUNT | AT_RECURSIVE |
2491 AT_SYMLINK_NOFOLLOW | OPEN_TREE_CLONE |
2495 if ((flags & (AT_RECURSIVE | OPEN_TREE_CLONE)) == AT_RECURSIVE)
2498 if (flags & AT_NO_AUTOMOUNT)
2499 lookup_flags &= ~LOOKUP_AUTOMOUNT;
2500 if (flags & AT_SYMLINK_NOFOLLOW)
2501 lookup_flags &= ~LOOKUP_FOLLOW;
2502 if (flags & AT_EMPTY_PATH)
2503 lookup_flags |= LOOKUP_EMPTY;
2505 if (detached && !may_mount())
2508 fd = get_unused_fd_flags(flags & O_CLOEXEC);
2512 error = user_path_at(dfd, filename, lookup_flags, &path);
2513 if (unlikely(error)) {
2514 file = ERR_PTR(error);
2517 file = open_detached_copy(&path, flags & AT_RECURSIVE);
2519 file = dentry_open(&path, O_PATH, current_cred());
2524 return PTR_ERR(file);
2526 fd_install(fd, file);
2531 * Don't allow locked mount flags to be cleared.
2533 * No locks need to be held here while testing the various MNT_LOCK
2534 * flags because those flags can never be cleared once they are set.
2536 static bool can_change_locked_flags(struct mount *mnt, unsigned int mnt_flags)
2538 unsigned int fl = mnt->mnt.mnt_flags;
2540 if ((fl & MNT_LOCK_READONLY) &&
2541 !(mnt_flags & MNT_READONLY))
2544 if ((fl & MNT_LOCK_NODEV) &&
2545 !(mnt_flags & MNT_NODEV))
2548 if ((fl & MNT_LOCK_NOSUID) &&
2549 !(mnt_flags & MNT_NOSUID))
2552 if ((fl & MNT_LOCK_NOEXEC) &&
2553 !(mnt_flags & MNT_NOEXEC))
2556 if ((fl & MNT_LOCK_ATIME) &&
2557 ((fl & MNT_ATIME_MASK) != (mnt_flags & MNT_ATIME_MASK)))
2563 static int change_mount_ro_state(struct mount *mnt, unsigned int mnt_flags)
2565 bool readonly_request = (mnt_flags & MNT_READONLY);
2567 if (readonly_request == __mnt_is_readonly(&mnt->mnt))
2570 if (readonly_request)
2571 return mnt_make_readonly(mnt);
2573 mnt->mnt.mnt_flags &= ~MNT_READONLY;
2577 static void set_mount_attributes(struct mount *mnt, unsigned int mnt_flags)
2579 mnt_flags |= mnt->mnt.mnt_flags & ~MNT_USER_SETTABLE_MASK;
2580 mnt->mnt.mnt_flags = mnt_flags;
2581 touch_mnt_namespace(mnt->mnt_ns);
2584 static void mnt_warn_timestamp_expiry(struct path *mountpoint, struct vfsmount *mnt)
2586 struct super_block *sb = mnt->mnt_sb;
2588 if (!__mnt_is_readonly(mnt) &&
2589 (ktime_get_real_seconds() + TIME_UPTIME_SEC_MAX > sb->s_time_max)) {
2590 char *buf = (char *)__get_free_page(GFP_KERNEL);
2591 char *mntpath = buf ? d_path(mountpoint, buf, PAGE_SIZE) : ERR_PTR(-ENOMEM);
2594 time64_to_tm(sb->s_time_max, 0, &tm);
2596 pr_warn("%s filesystem being %s at %s supports timestamps until %04ld (0x%llx)\n",
2598 is_mounted(mnt) ? "remounted" : "mounted",
2600 tm.tm_year+1900, (unsigned long long)sb->s_time_max);
2602 free_page((unsigned long)buf);
2607 * Handle reconfiguration of the mountpoint only without alteration of the
2608 * superblock it refers to. This is triggered by specifying MS_REMOUNT|MS_BIND
2611 static int do_reconfigure_mnt(struct path *path, unsigned int mnt_flags)
2613 struct super_block *sb = path->mnt->mnt_sb;
2614 struct mount *mnt = real_mount(path->mnt);
2617 if (!check_mnt(mnt))
2620 if (path->dentry != mnt->mnt.mnt_root)
2623 if (!can_change_locked_flags(mnt, mnt_flags))
2627 * We're only checking whether the superblock is read-only not
2628 * changing it, so only take down_read(&sb->s_umount).
2630 down_read(&sb->s_umount);
2632 ret = change_mount_ro_state(mnt, mnt_flags);
2634 set_mount_attributes(mnt, mnt_flags);
2635 unlock_mount_hash();
2636 up_read(&sb->s_umount);
2638 mnt_warn_timestamp_expiry(path, &mnt->mnt);
2644 * change filesystem flags. dir should be a physical root of filesystem.
2645 * If you've mounted a non-root directory somewhere and want to do remount
2646 * on it - tough luck.
2648 static int do_remount(struct path *path, int ms_flags, int sb_flags,
2649 int mnt_flags, void *data)
2652 struct super_block *sb = path->mnt->mnt_sb;
2653 struct mount *mnt = real_mount(path->mnt);
2654 struct fs_context *fc;
2656 if (!check_mnt(mnt))
2659 if (path->dentry != path->mnt->mnt_root)
2662 if (!can_change_locked_flags(mnt, mnt_flags))
2665 fc = fs_context_for_reconfigure(path->dentry, sb_flags, MS_RMT_MASK);
2670 err = parse_monolithic_mount_data(fc, data);
2672 down_write(&sb->s_umount);
2674 if (ns_capable(sb->s_user_ns, CAP_SYS_ADMIN)) {
2675 err = reconfigure_super(fc);
2678 set_mount_attributes(mnt, mnt_flags);
2679 unlock_mount_hash();
2682 up_write(&sb->s_umount);
2685 mnt_warn_timestamp_expiry(path, &mnt->mnt);
2691 static inline int tree_contains_unbindable(struct mount *mnt)
2694 for (p = mnt; p; p = next_mnt(p, mnt)) {
2695 if (IS_MNT_UNBINDABLE(p))
2702 * Check that there aren't references to earlier/same mount namespaces in the
2703 * specified subtree. Such references can act as pins for mount namespaces
2704 * that aren't checked by the mount-cycle checking code, thereby allowing
2705 * cycles to be made.
2707 static bool check_for_nsfs_mounts(struct mount *subtree)
2713 for (p = subtree; p; p = next_mnt(p, subtree))
2714 if (mnt_ns_loop(p->mnt.mnt_root))
2719 unlock_mount_hash();
2723 static int do_set_group(struct path *from_path, struct path *to_path)
2725 struct mount *from, *to;
2728 from = real_mount(from_path->mnt);
2729 to = real_mount(to_path->mnt);
2734 /* To and From must be mounted */
2735 if (!is_mounted(&from->mnt))
2737 if (!is_mounted(&to->mnt))
2741 /* We should be allowed to modify mount namespaces of both mounts */
2742 if (!ns_capable(from->mnt_ns->user_ns, CAP_SYS_ADMIN))
2744 if (!ns_capable(to->mnt_ns->user_ns, CAP_SYS_ADMIN))
2748 /* To and From paths should be mount roots */
2749 if (from_path->dentry != from_path->mnt->mnt_root)
2751 if (to_path->dentry != to_path->mnt->mnt_root)
2754 /* Setting sharing groups is only allowed across same superblock */
2755 if (from->mnt.mnt_sb != to->mnt.mnt_sb)
2758 /* From mount root should be wider than To mount root */
2759 if (!is_subdir(to->mnt.mnt_root, from->mnt.mnt_root))
2762 /* From mount should not have locked children in place of To's root */
2763 if (has_locked_children(from, to->mnt.mnt_root))
2766 /* Setting sharing groups is only allowed on private mounts */
2767 if (IS_MNT_SHARED(to) || IS_MNT_SLAVE(to))
2770 /* From should not be private */
2771 if (!IS_MNT_SHARED(from) && !IS_MNT_SLAVE(from))
2774 if (IS_MNT_SLAVE(from)) {
2775 struct mount *m = from->mnt_master;
2777 list_add(&to->mnt_slave, &m->mnt_slave_list);
2781 if (IS_MNT_SHARED(from)) {
2782 to->mnt_group_id = from->mnt_group_id;
2783 list_add(&to->mnt_share, &from->mnt_share);
2786 unlock_mount_hash();
2795 static int do_move_mount(struct path *old_path, struct path *new_path)
2797 struct mnt_namespace *ns;
2800 struct mount *parent;
2801 struct mountpoint *mp, *old_mp;
2805 mp = lock_mount(new_path);
2809 old = real_mount(old_path->mnt);
2810 p = real_mount(new_path->mnt);
2811 parent = old->mnt_parent;
2812 attached = mnt_has_parent(old);
2813 old_mp = old->mnt_mp;
2817 /* The mountpoint must be in our namespace. */
2821 /* The thing moved must be mounted... */
2822 if (!is_mounted(&old->mnt))
2825 /* ... and either ours or the root of anon namespace */
2826 if (!(attached ? check_mnt(old) : is_anon_ns(ns)))
2829 if (old->mnt.mnt_flags & MNT_LOCKED)
2832 if (old_path->dentry != old_path->mnt->mnt_root)
2835 if (d_is_dir(new_path->dentry) !=
2836 d_is_dir(old_path->dentry))
2839 * Don't move a mount residing in a shared parent.
2841 if (attached && IS_MNT_SHARED(parent))
2844 * Don't move a mount tree containing unbindable mounts to a destination
2845 * mount which is shared.
2847 if (IS_MNT_SHARED(p) && tree_contains_unbindable(old))
2850 if (!check_for_nsfs_mounts(old))
2852 for (; mnt_has_parent(p); p = p->mnt_parent)
2856 err = attach_recursive_mnt(old, real_mount(new_path->mnt), mp,
2861 /* if the mount is moved, it should no longer be expire
2863 list_del_init(&old->mnt_expire);
2865 put_mountpoint(old_mp);
2870 mntput_no_expire(parent);
2877 static int do_move_mount_old(struct path *path, const char *old_name)
2879 struct path old_path;
2882 if (!old_name || !*old_name)
2885 err = kern_path(old_name, LOOKUP_FOLLOW, &old_path);
2889 err = do_move_mount(&old_path, path);
2890 path_put(&old_path);
2895 * add a mount into a namespace's mount tree
2897 static int do_add_mount(struct mount *newmnt, struct mountpoint *mp,
2898 struct path *path, int mnt_flags)
2900 struct mount *parent = real_mount(path->mnt);
2902 mnt_flags &= ~MNT_INTERNAL_FLAGS;
2904 if (unlikely(!check_mnt(parent))) {
2905 /* that's acceptable only for automounts done in private ns */
2906 if (!(mnt_flags & MNT_SHRINKABLE))
2908 /* ... and for those we'd better have mountpoint still alive */
2909 if (!parent->mnt_ns)
2913 /* Refuse the same filesystem on the same mount point */
2914 if (path->mnt->mnt_sb == newmnt->mnt.mnt_sb &&
2915 path->mnt->mnt_root == path->dentry)
2918 if (d_is_symlink(newmnt->mnt.mnt_root))
2921 newmnt->mnt.mnt_flags = mnt_flags;
2922 return graft_tree(newmnt, parent, mp);
2925 static bool mount_too_revealing(const struct super_block *sb, int *new_mnt_flags);
2928 * Create a new mount using a superblock configuration and request it
2929 * be added to the namespace tree.
2931 static int do_new_mount_fc(struct fs_context *fc, struct path *mountpoint,
2932 unsigned int mnt_flags)
2934 struct vfsmount *mnt;
2935 struct mountpoint *mp;
2936 struct super_block *sb = fc->root->d_sb;
2939 error = security_sb_kern_mount(sb);
2940 if (!error && mount_too_revealing(sb, &mnt_flags))
2943 if (unlikely(error)) {
2948 up_write(&sb->s_umount);
2950 mnt = vfs_create_mount(fc);
2952 return PTR_ERR(mnt);
2954 mnt_warn_timestamp_expiry(mountpoint, mnt);
2956 mp = lock_mount(mountpoint);
2961 error = do_add_mount(real_mount(mnt), mp, mountpoint, mnt_flags);
2969 * create a new mount for userspace and request it to be added into the
2972 static int do_new_mount(struct path *path, const char *fstype, int sb_flags,
2973 int mnt_flags, const char *name, void *data)
2975 struct file_system_type *type;
2976 struct fs_context *fc;
2977 const char *subtype = NULL;
2983 type = get_fs_type(fstype);
2987 if (type->fs_flags & FS_HAS_SUBTYPE) {
2988 subtype = strchr(fstype, '.');
2992 put_filesystem(type);
2998 fc = fs_context_for_mount(type, sb_flags);
2999 put_filesystem(type);
3004 err = vfs_parse_fs_string(fc, "subtype",
3005 subtype, strlen(subtype));
3007 err = vfs_parse_fs_string(fc, "source", name, strlen(name));
3009 err = parse_monolithic_mount_data(fc, data);
3010 if (!err && !mount_capable(fc))
3013 err = vfs_get_tree(fc);
3015 err = do_new_mount_fc(fc, path, mnt_flags);
3021 int finish_automount(struct vfsmount *m, struct path *path)
3023 struct dentry *dentry = path->dentry;
3024 struct mountpoint *mp;
3033 mnt = real_mount(m);
3034 /* The new mount record should have at least 2 refs to prevent it being
3035 * expired before we get a chance to add it
3037 BUG_ON(mnt_get_count(mnt) < 2);
3039 if (m->mnt_sb == path->mnt->mnt_sb &&
3040 m->mnt_root == dentry) {
3046 * we don't want to use lock_mount() - in this case finding something
3047 * that overmounts our mountpoint to be means "quitely drop what we've
3048 * got", not "try to mount it on top".
3050 inode_lock(dentry->d_inode);
3052 if (unlikely(cant_mount(dentry))) {
3054 goto discard_locked;
3057 if (unlikely(__lookup_mnt(path->mnt, dentry))) {
3060 goto discard_locked;
3063 mp = get_mountpoint(dentry);
3066 goto discard_locked;
3069 err = do_add_mount(mnt, mp, path, path->mnt->mnt_flags | MNT_SHRINKABLE);
3078 inode_unlock(dentry->d_inode);
3080 /* remove m from any expiration list it may be on */
3081 if (!list_empty(&mnt->mnt_expire)) {
3083 list_del_init(&mnt->mnt_expire);
3092 * mnt_set_expiry - Put a mount on an expiration list
3093 * @mnt: The mount to list.
3094 * @expiry_list: The list to add the mount to.
3096 void mnt_set_expiry(struct vfsmount *mnt, struct list_head *expiry_list)
3100 list_add_tail(&real_mount(mnt)->mnt_expire, expiry_list);
3104 EXPORT_SYMBOL(mnt_set_expiry);
3107 * process a list of expirable mountpoints with the intent of discarding any
3108 * mountpoints that aren't in use and haven't been touched since last we came
3111 void mark_mounts_for_expiry(struct list_head *mounts)
3113 struct mount *mnt, *next;
3114 LIST_HEAD(graveyard);
3116 if (list_empty(mounts))
3122 /* extract from the expiration list every vfsmount that matches the
3123 * following criteria:
3124 * - only referenced by its parent vfsmount
3125 * - still marked for expiry (marked on the last call here; marks are
3126 * cleared by mntput())
3128 list_for_each_entry_safe(mnt, next, mounts, mnt_expire) {
3129 if (!xchg(&mnt->mnt_expiry_mark, 1) ||
3130 propagate_mount_busy(mnt, 1))
3132 list_move(&mnt->mnt_expire, &graveyard);
3134 while (!list_empty(&graveyard)) {
3135 mnt = list_first_entry(&graveyard, struct mount, mnt_expire);
3136 touch_mnt_namespace(mnt->mnt_ns);
3137 umount_tree(mnt, UMOUNT_PROPAGATE|UMOUNT_SYNC);
3139 unlock_mount_hash();
3143 EXPORT_SYMBOL_GPL(mark_mounts_for_expiry);
3146 * Ripoff of 'select_parent()'
3148 * search the list of submounts for a given mountpoint, and move any
3149 * shrinkable submounts to the 'graveyard' list.
3151 static int select_submounts(struct mount *parent, struct list_head *graveyard)
3153 struct mount *this_parent = parent;
3154 struct list_head *next;
3158 next = this_parent->mnt_mounts.next;
3160 while (next != &this_parent->mnt_mounts) {
3161 struct list_head *tmp = next;
3162 struct mount *mnt = list_entry(tmp, struct mount, mnt_child);
3165 if (!(mnt->mnt.mnt_flags & MNT_SHRINKABLE))
3168 * Descend a level if the d_mounts list is non-empty.
3170 if (!list_empty(&mnt->mnt_mounts)) {
3175 if (!propagate_mount_busy(mnt, 1)) {
3176 list_move_tail(&mnt->mnt_expire, graveyard);
3181 * All done at this level ... ascend and resume the search
3183 if (this_parent != parent) {
3184 next = this_parent->mnt_child.next;
3185 this_parent = this_parent->mnt_parent;
3192 * process a list of expirable mountpoints with the intent of discarding any
3193 * submounts of a specific parent mountpoint
3195 * mount_lock must be held for write
3197 static void shrink_submounts(struct mount *mnt)
3199 LIST_HEAD(graveyard);
3202 /* extract submounts of 'mountpoint' from the expiration list */
3203 while (select_submounts(mnt, &graveyard)) {
3204 while (!list_empty(&graveyard)) {
3205 m = list_first_entry(&graveyard, struct mount,
3207 touch_mnt_namespace(m->mnt_ns);
3208 umount_tree(m, UMOUNT_PROPAGATE|UMOUNT_SYNC);
3213 static void *copy_mount_options(const void __user * data)
3216 unsigned left, offset;
3221 copy = kmalloc(PAGE_SIZE, GFP_KERNEL);
3223 return ERR_PTR(-ENOMEM);
3225 left = copy_from_user(copy, data, PAGE_SIZE);
3228 * Not all architectures have an exact copy_from_user(). Resort to
3231 offset = PAGE_SIZE - left;
3234 if (get_user(c, (const char __user *)data + offset))
3241 if (left == PAGE_SIZE) {
3243 return ERR_PTR(-EFAULT);
3249 static char *copy_mount_string(const void __user *data)
3251 return data ? strndup_user(data, PATH_MAX) : NULL;
3255 * Flags is a 32-bit value that allows up to 31 non-fs dependent flags to
3256 * be given to the mount() call (ie: read-only, no-dev, no-suid etc).
3258 * data is a (void *) that can point to any structure up to
3259 * PAGE_SIZE-1 bytes, which can contain arbitrary fs-dependent
3260 * information (or be NULL).
3262 * Pre-0.97 versions of mount() didn't have a flags word.
3263 * When the flags word was introduced its top half was required
3264 * to have the magic value 0xC0ED, and this remained so until 2.4.0-test9.
3265 * Therefore, if this magic number is present, it carries no information
3266 * and must be discarded.
3268 int path_mount(const char *dev_name, struct path *path,
3269 const char *type_page, unsigned long flags, void *data_page)
3271 unsigned int mnt_flags = 0, sb_flags;
3275 if ((flags & MS_MGC_MSK) == MS_MGC_VAL)
3276 flags &= ~MS_MGC_MSK;
3278 /* Basic sanity checks */
3280 ((char *)data_page)[PAGE_SIZE - 1] = 0;
3282 if (flags & MS_NOUSER)
3285 ret = security_sb_mount(dev_name, path, type_page, flags, data_page);
3290 if (flags & SB_MANDLOCK)
3293 /* Default to relatime unless overriden */
3294 if (!(flags & MS_NOATIME))
3295 mnt_flags |= MNT_RELATIME;
3297 /* Separate the per-mountpoint flags */
3298 if (flags & MS_NOSUID)
3299 mnt_flags |= MNT_NOSUID;
3300 if (flags & MS_NODEV)
3301 mnt_flags |= MNT_NODEV;
3302 if (flags & MS_NOEXEC)
3303 mnt_flags |= MNT_NOEXEC;
3304 if (flags & MS_NOATIME)
3305 mnt_flags |= MNT_NOATIME;
3306 if (flags & MS_NODIRATIME)
3307 mnt_flags |= MNT_NODIRATIME;
3308 if (flags & MS_STRICTATIME)
3309 mnt_flags &= ~(MNT_RELATIME | MNT_NOATIME);
3310 if (flags & MS_RDONLY)
3311 mnt_flags |= MNT_READONLY;
3312 if (flags & MS_NOSYMFOLLOW)
3313 mnt_flags |= MNT_NOSYMFOLLOW;
3315 /* The default atime for remount is preservation */
3316 if ((flags & MS_REMOUNT) &&
3317 ((flags & (MS_NOATIME | MS_NODIRATIME | MS_RELATIME |
3318 MS_STRICTATIME)) == 0)) {
3319 mnt_flags &= ~MNT_ATIME_MASK;
3320 mnt_flags |= path->mnt->mnt_flags & MNT_ATIME_MASK;
3323 sb_flags = flags & (SB_RDONLY |
3332 if ((flags & (MS_REMOUNT | MS_BIND)) == (MS_REMOUNT | MS_BIND))
3333 return do_reconfigure_mnt(path, mnt_flags);
3334 if (flags & MS_REMOUNT)
3335 return do_remount(path, flags, sb_flags, mnt_flags, data_page);
3336 if (flags & MS_BIND)
3337 return do_loopback(path, dev_name, flags & MS_REC);
3338 if (flags & (MS_SHARED | MS_PRIVATE | MS_SLAVE | MS_UNBINDABLE))
3339 return do_change_type(path, flags);
3340 if (flags & MS_MOVE)
3341 return do_move_mount_old(path, dev_name);
3343 return do_new_mount(path, type_page, sb_flags, mnt_flags, dev_name,
3347 long do_mount(const char *dev_name, const char __user *dir_name,
3348 const char *type_page, unsigned long flags, void *data_page)
3353 ret = user_path_at(AT_FDCWD, dir_name, LOOKUP_FOLLOW, &path);
3356 ret = path_mount(dev_name, &path, type_page, flags, data_page);
3361 static struct ucounts *inc_mnt_namespaces(struct user_namespace *ns)
3363 return inc_ucount(ns, current_euid(), UCOUNT_MNT_NAMESPACES);
3366 static void dec_mnt_namespaces(struct ucounts *ucounts)
3368 dec_ucount(ucounts, UCOUNT_MNT_NAMESPACES);
3371 static void free_mnt_ns(struct mnt_namespace *ns)
3373 if (!is_anon_ns(ns))
3374 ns_free_inum(&ns->ns);
3375 dec_mnt_namespaces(ns->ucounts);
3376 put_user_ns(ns->user_ns);
3381 * Assign a sequence number so we can detect when we attempt to bind
3382 * mount a reference to an older mount namespace into the current
3383 * mount namespace, preventing reference counting loops. A 64bit
3384 * number incrementing at 10Ghz will take 12,427 years to wrap which
3385 * is effectively never, so we can ignore the possibility.
3387 static atomic64_t mnt_ns_seq = ATOMIC64_INIT(1);
3389 static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns, bool anon)
3391 struct mnt_namespace *new_ns;
3392 struct ucounts *ucounts;
3395 ucounts = inc_mnt_namespaces(user_ns);
3397 return ERR_PTR(-ENOSPC);
3399 new_ns = kzalloc(sizeof(struct mnt_namespace), GFP_KERNEL_ACCOUNT);
3401 dec_mnt_namespaces(ucounts);
3402 return ERR_PTR(-ENOMEM);
3405 ret = ns_alloc_inum(&new_ns->ns);
3408 dec_mnt_namespaces(ucounts);
3409 return ERR_PTR(ret);
3412 new_ns->ns.ops = &mntns_operations;
3414 new_ns->seq = atomic64_add_return(1, &mnt_ns_seq);
3415 refcount_set(&new_ns->ns.count, 1);
3416 INIT_LIST_HEAD(&new_ns->list);
3417 init_waitqueue_head(&new_ns->poll);
3418 spin_lock_init(&new_ns->ns_lock);
3419 new_ns->user_ns = get_user_ns(user_ns);
3420 new_ns->ucounts = ucounts;
3425 struct mnt_namespace *copy_mnt_ns(unsigned long flags, struct mnt_namespace *ns,
3426 struct user_namespace *user_ns, struct fs_struct *new_fs)
3428 struct mnt_namespace *new_ns;
3429 struct vfsmount *rootmnt = NULL, *pwdmnt = NULL;
3430 struct mount *p, *q;
3437 if (likely(!(flags & CLONE_NEWNS))) {
3444 new_ns = alloc_mnt_ns(user_ns, false);
3449 /* First pass: copy the tree topology */
3450 copy_flags = CL_COPY_UNBINDABLE | CL_EXPIRE;
3451 if (user_ns != ns->user_ns)
3452 copy_flags |= CL_SHARED_TO_SLAVE;
3453 new = copy_tree(old, old->mnt.mnt_root, copy_flags);
3456 free_mnt_ns(new_ns);
3457 return ERR_CAST(new);
3459 if (user_ns != ns->user_ns) {
3462 unlock_mount_hash();
3465 list_add_tail(&new_ns->list, &new->mnt_list);
3468 * Second pass: switch the tsk->fs->* elements and mark new vfsmounts
3469 * as belonging to new namespace. We have already acquired a private
3470 * fs_struct, so tsk->fs->lock is not needed.
3478 if (&p->mnt == new_fs->root.mnt) {
3479 new_fs->root.mnt = mntget(&q->mnt);
3482 if (&p->mnt == new_fs->pwd.mnt) {
3483 new_fs->pwd.mnt = mntget(&q->mnt);
3487 p = next_mnt(p, old);
3488 q = next_mnt(q, new);
3491 while (p->mnt.mnt_root != q->mnt.mnt_root)
3492 p = next_mnt(p, old);
3504 struct dentry *mount_subtree(struct vfsmount *m, const char *name)
3506 struct mount *mnt = real_mount(m);
3507 struct mnt_namespace *ns;
3508 struct super_block *s;
3512 ns = alloc_mnt_ns(&init_user_ns, true);
3515 return ERR_CAST(ns);
3520 list_add(&mnt->mnt_list, &ns->list);
3522 err = vfs_path_lookup(m->mnt_root, m,
3523 name, LOOKUP_FOLLOW|LOOKUP_AUTOMOUNT, &path);
3528 return ERR_PTR(err);
3530 /* trade a vfsmount reference for active sb one */
3531 s = path.mnt->mnt_sb;
3532 atomic_inc(&s->s_active);
3534 /* lock the sucker */
3535 down_write(&s->s_umount);
3536 /* ... and return the root of (sub)tree on it */
3539 EXPORT_SYMBOL(mount_subtree);
3541 SYSCALL_DEFINE5(mount, char __user *, dev_name, char __user *, dir_name,
3542 char __user *, type, unsigned long, flags, void __user *, data)
3549 kernel_type = copy_mount_string(type);
3550 ret = PTR_ERR(kernel_type);
3551 if (IS_ERR(kernel_type))
3554 kernel_dev = copy_mount_string(dev_name);
3555 ret = PTR_ERR(kernel_dev);
3556 if (IS_ERR(kernel_dev))
3559 options = copy_mount_options(data);
3560 ret = PTR_ERR(options);
3561 if (IS_ERR(options))
3564 ret = do_mount(kernel_dev, dir_name, kernel_type, flags, options);
3575 #define FSMOUNT_VALID_FLAGS \
3576 (MOUNT_ATTR_RDONLY | MOUNT_ATTR_NOSUID | MOUNT_ATTR_NODEV | \
3577 MOUNT_ATTR_NOEXEC | MOUNT_ATTR__ATIME | MOUNT_ATTR_NODIRATIME | \
3578 MOUNT_ATTR_NOSYMFOLLOW)
3580 #define MOUNT_SETATTR_VALID_FLAGS (FSMOUNT_VALID_FLAGS | MOUNT_ATTR_IDMAP)
3582 #define MOUNT_SETATTR_PROPAGATION_FLAGS \
3583 (MS_UNBINDABLE | MS_PRIVATE | MS_SLAVE | MS_SHARED)
3585 static unsigned int attr_flags_to_mnt_flags(u64 attr_flags)
3587 unsigned int mnt_flags = 0;
3589 if (attr_flags & MOUNT_ATTR_RDONLY)
3590 mnt_flags |= MNT_READONLY;
3591 if (attr_flags & MOUNT_ATTR_NOSUID)
3592 mnt_flags |= MNT_NOSUID;
3593 if (attr_flags & MOUNT_ATTR_NODEV)
3594 mnt_flags |= MNT_NODEV;
3595 if (attr_flags & MOUNT_ATTR_NOEXEC)
3596 mnt_flags |= MNT_NOEXEC;
3597 if (attr_flags & MOUNT_ATTR_NODIRATIME)
3598 mnt_flags |= MNT_NODIRATIME;
3599 if (attr_flags & MOUNT_ATTR_NOSYMFOLLOW)
3600 mnt_flags |= MNT_NOSYMFOLLOW;
3606 * Create a kernel mount representation for a new, prepared superblock
3607 * (specified by fs_fd) and attach to an open_tree-like file descriptor.
3609 SYSCALL_DEFINE3(fsmount, int, fs_fd, unsigned int, flags,
3610 unsigned int, attr_flags)
3612 struct mnt_namespace *ns;
3613 struct fs_context *fc;
3615 struct path newmount;
3618 unsigned int mnt_flags = 0;
3624 if ((flags & ~(FSMOUNT_CLOEXEC)) != 0)
3627 if (attr_flags & ~FSMOUNT_VALID_FLAGS)
3630 mnt_flags = attr_flags_to_mnt_flags(attr_flags);
3632 switch (attr_flags & MOUNT_ATTR__ATIME) {
3633 case MOUNT_ATTR_STRICTATIME:
3635 case MOUNT_ATTR_NOATIME:
3636 mnt_flags |= MNT_NOATIME;
3638 case MOUNT_ATTR_RELATIME:
3639 mnt_flags |= MNT_RELATIME;
3650 if (f.file->f_op != &fscontext_fops)
3653 fc = f.file->private_data;
3655 ret = mutex_lock_interruptible(&fc->uapi_mutex);
3659 /* There must be a valid superblock or we can't mount it */
3665 if (mount_too_revealing(fc->root->d_sb, &mnt_flags)) {
3666 pr_warn("VFS: Mount too revealing\n");
3671 if (fc->phase != FS_CONTEXT_AWAITING_MOUNT)
3674 if (fc->sb_flags & SB_MANDLOCK)
3677 newmount.mnt = vfs_create_mount(fc);
3678 if (IS_ERR(newmount.mnt)) {
3679 ret = PTR_ERR(newmount.mnt);
3682 newmount.dentry = dget(fc->root);
3683 newmount.mnt->mnt_flags = mnt_flags;
3685 /* We've done the mount bit - now move the file context into more or
3686 * less the same state as if we'd done an fspick(). We don't want to
3687 * do any memory allocation or anything like that at this point as we
3688 * don't want to have to handle any errors incurred.
3690 vfs_clean_context(fc);
3692 ns = alloc_mnt_ns(current->nsproxy->mnt_ns->user_ns, true);
3697 mnt = real_mount(newmount.mnt);
3701 list_add(&mnt->mnt_list, &ns->list);
3702 mntget(newmount.mnt);
3704 /* Attach to an apparent O_PATH fd with a note that we need to unmount
3705 * it, not just simply put it.
3707 file = dentry_open(&newmount, O_PATH, fc->cred);
3709 dissolve_on_fput(newmount.mnt);
3710 ret = PTR_ERR(file);
3713 file->f_mode |= FMODE_NEED_UNMOUNT;
3715 ret = get_unused_fd_flags((flags & FSMOUNT_CLOEXEC) ? O_CLOEXEC : 0);
3717 fd_install(ret, file);
3722 path_put(&newmount);
3724 mutex_unlock(&fc->uapi_mutex);
3731 * Move a mount from one place to another. In combination with
3732 * fsopen()/fsmount() this is used to install a new mount and in combination
3733 * with open_tree(OPEN_TREE_CLONE [| AT_RECURSIVE]) it can be used to copy
3736 * Note the flags value is a combination of MOVE_MOUNT_* flags.
3738 SYSCALL_DEFINE5(move_mount,
3739 int, from_dfd, const char __user *, from_pathname,
3740 int, to_dfd, const char __user *, to_pathname,
3741 unsigned int, flags)
3743 struct path from_path, to_path;
3744 unsigned int lflags;
3750 if (flags & ~MOVE_MOUNT__MASK)
3753 /* If someone gives a pathname, they aren't permitted to move
3754 * from an fd that requires unmount as we can't get at the flag
3755 * to clear it afterwards.
3758 if (flags & MOVE_MOUNT_F_SYMLINKS) lflags |= LOOKUP_FOLLOW;
3759 if (flags & MOVE_MOUNT_F_AUTOMOUNTS) lflags |= LOOKUP_AUTOMOUNT;
3760 if (flags & MOVE_MOUNT_F_EMPTY_PATH) lflags |= LOOKUP_EMPTY;
3762 ret = user_path_at(from_dfd, from_pathname, lflags, &from_path);
3767 if (flags & MOVE_MOUNT_T_SYMLINKS) lflags |= LOOKUP_FOLLOW;
3768 if (flags & MOVE_MOUNT_T_AUTOMOUNTS) lflags |= LOOKUP_AUTOMOUNT;
3769 if (flags & MOVE_MOUNT_T_EMPTY_PATH) lflags |= LOOKUP_EMPTY;
3771 ret = user_path_at(to_dfd, to_pathname, lflags, &to_path);
3775 ret = security_move_mount(&from_path, &to_path);
3779 if (flags & MOVE_MOUNT_SET_GROUP)
3780 ret = do_set_group(&from_path, &to_path);
3782 ret = do_move_mount(&from_path, &to_path);
3787 path_put(&from_path);
3792 * Return true if path is reachable from root
3794 * namespace_sem or mount_lock is held
3796 bool is_path_reachable(struct mount *mnt, struct dentry *dentry,
3797 const struct path *root)
3799 while (&mnt->mnt != root->mnt && mnt_has_parent(mnt)) {
3800 dentry = mnt->mnt_mountpoint;
3801 mnt = mnt->mnt_parent;
3803 return &mnt->mnt == root->mnt && is_subdir(dentry, root->dentry);
3806 bool path_is_under(const struct path *path1, const struct path *path2)
3809 read_seqlock_excl(&mount_lock);
3810 res = is_path_reachable(real_mount(path1->mnt), path1->dentry, path2);
3811 read_sequnlock_excl(&mount_lock);
3814 EXPORT_SYMBOL(path_is_under);
3817 * pivot_root Semantics:
3818 * Moves the root file system of the current process to the directory put_old,
3819 * makes new_root as the new root file system of the current process, and sets
3820 * root/cwd of all processes which had them on the current root to new_root.
3823 * The new_root and put_old must be directories, and must not be on the
3824 * same file system as the current process root. The put_old must be
3825 * underneath new_root, i.e. adding a non-zero number of /.. to the string
3826 * pointed to by put_old must yield the same directory as new_root. No other
3827 * file system may be mounted on put_old. After all, new_root is a mountpoint.
3829 * Also, the current root cannot be on the 'rootfs' (initial ramfs) filesystem.
3830 * See Documentation/filesystems/ramfs-rootfs-initramfs.rst for alternatives
3831 * in this situation.
3834 * - we don't move root/cwd if they are not at the root (reason: if something
3835 * cared enough to change them, it's probably wrong to force them elsewhere)
3836 * - it's okay to pick a root that isn't the root of a file system, e.g.
3837 * /nfs/my_root where /nfs is the mount point. It must be a mountpoint,
3838 * though, so you may need to say mount --bind /nfs/my_root /nfs/my_root
3841 SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
3842 const char __user *, put_old)
3844 struct path new, old, root;
3845 struct mount *new_mnt, *root_mnt, *old_mnt, *root_parent, *ex_parent;
3846 struct mountpoint *old_mp, *root_mp;
3852 error = user_path_at(AT_FDCWD, new_root,
3853 LOOKUP_FOLLOW | LOOKUP_DIRECTORY, &new);
3857 error = user_path_at(AT_FDCWD, put_old,
3858 LOOKUP_FOLLOW | LOOKUP_DIRECTORY, &old);
3862 error = security_sb_pivotroot(&old, &new);
3866 get_fs_root(current->fs, &root);
3867 old_mp = lock_mount(&old);
3868 error = PTR_ERR(old_mp);
3873 new_mnt = real_mount(new.mnt);
3874 root_mnt = real_mount(root.mnt);
3875 old_mnt = real_mount(old.mnt);
3876 ex_parent = new_mnt->mnt_parent;
3877 root_parent = root_mnt->mnt_parent;
3878 if (IS_MNT_SHARED(old_mnt) ||
3879 IS_MNT_SHARED(ex_parent) ||
3880 IS_MNT_SHARED(root_parent))
3882 if (!check_mnt(root_mnt) || !check_mnt(new_mnt))
3884 if (new_mnt->mnt.mnt_flags & MNT_LOCKED)
3887 if (d_unlinked(new.dentry))
3890 if (new_mnt == root_mnt || old_mnt == root_mnt)
3891 goto out4; /* loop, on the same file system */
3893 if (root.mnt->mnt_root != root.dentry)
3894 goto out4; /* not a mountpoint */
3895 if (!mnt_has_parent(root_mnt))
3896 goto out4; /* not attached */
3897 if (new.mnt->mnt_root != new.dentry)
3898 goto out4; /* not a mountpoint */
3899 if (!mnt_has_parent(new_mnt))
3900 goto out4; /* not attached */
3901 /* make sure we can reach put_old from new_root */
3902 if (!is_path_reachable(old_mnt, old.dentry, &new))
3904 /* make certain new is below the root */
3905 if (!is_path_reachable(new_mnt, new.dentry, &root))
3908 umount_mnt(new_mnt);
3909 root_mp = unhash_mnt(root_mnt); /* we'll need its mountpoint */
3910 if (root_mnt->mnt.mnt_flags & MNT_LOCKED) {
3911 new_mnt->mnt.mnt_flags |= MNT_LOCKED;
3912 root_mnt->mnt.mnt_flags &= ~MNT_LOCKED;
3914 /* mount old root on put_old */
3915 attach_mnt(root_mnt, old_mnt, old_mp);
3916 /* mount new_root on / */
3917 attach_mnt(new_mnt, root_parent, root_mp);
3918 mnt_add_count(root_parent, -1);
3919 touch_mnt_namespace(current->nsproxy->mnt_ns);
3920 /* A moved mount should not expire automatically */
3921 list_del_init(&new_mnt->mnt_expire);
3922 put_mountpoint(root_mp);
3923 unlock_mount_hash();
3924 chroot_fs_refs(&root, &new);
3927 unlock_mount(old_mp);
3929 mntput_no_expire(ex_parent);
3940 static unsigned int recalc_flags(struct mount_kattr *kattr, struct mount *mnt)
3942 unsigned int flags = mnt->mnt.mnt_flags;
3944 /* flags to clear */
3945 flags &= ~kattr->attr_clr;
3946 /* flags to raise */
3947 flags |= kattr->attr_set;
3952 static int can_idmap_mount(const struct mount_kattr *kattr, struct mount *mnt)
3954 struct vfsmount *m = &mnt->mnt;
3955 struct user_namespace *fs_userns = m->mnt_sb->s_user_ns;
3957 if (!kattr->mnt_userns)
3961 * Creating an idmapped mount with the filesystem wide idmapping
3962 * doesn't make sense so block that. We don't allow mushy semantics.
3964 if (kattr->mnt_userns == fs_userns)
3968 * Once a mount has been idmapped we don't allow it to change its
3969 * mapping. It makes things simpler and callers can just create
3970 * another bind-mount they can idmap if they want to.
3972 if (is_idmapped_mnt(m))
3975 /* The underlying filesystem doesn't support idmapped mounts yet. */
3976 if (!(m->mnt_sb->s_type->fs_flags & FS_ALLOW_IDMAP))
3979 /* We're not controlling the superblock. */
3980 if (!ns_capable(fs_userns, CAP_SYS_ADMIN))
3983 /* Mount has already been visible in the filesystem hierarchy. */
3984 if (!is_anon_ns(mnt->mnt_ns))
3990 static struct mount *mount_setattr_prepare(struct mount_kattr *kattr,
3991 struct mount *mnt, int *err)
3993 struct mount *m = mnt, *last = NULL;
3995 if (!is_mounted(&m->mnt)) {
4000 if (!(mnt_has_parent(m) ? check_mnt(m) : is_anon_ns(m->mnt_ns))) {
4008 flags = recalc_flags(kattr, m);
4009 if (!can_change_locked_flags(m, flags)) {
4014 *err = can_idmap_mount(kattr, m);
4020 if ((kattr->attr_set & MNT_READONLY) &&
4021 !(m->mnt.mnt_flags & MNT_READONLY)) {
4022 *err = mnt_hold_writers(m);
4026 } while (kattr->recurse && (m = next_mnt(m, mnt)));
4032 static void do_idmap_mount(const struct mount_kattr *kattr, struct mount *mnt)
4034 struct user_namespace *mnt_userns, *old_mnt_userns;
4036 if (!kattr->mnt_userns)
4040 * We're the only ones able to change the mount's idmapping. So
4041 * mnt->mnt.mnt_userns is stable and we can retrieve it directly.
4043 old_mnt_userns = mnt->mnt.mnt_userns;
4045 mnt_userns = get_user_ns(kattr->mnt_userns);
4046 /* Pairs with smp_load_acquire() in mnt_user_ns(). */
4047 smp_store_release(&mnt->mnt.mnt_userns, mnt_userns);
4050 * If this is an idmapped filesystem drop the reference we've taken
4051 * in vfs_create_mount() before.
4053 if (!initial_idmapping(old_mnt_userns))
4054 put_user_ns(old_mnt_userns);
4057 static void mount_setattr_commit(struct mount_kattr *kattr,
4058 struct mount *mnt, struct mount *last,
4061 struct mount *m = mnt;
4067 do_idmap_mount(kattr, m);
4068 flags = recalc_flags(kattr, m);
4069 WRITE_ONCE(m->mnt.mnt_flags, flags);
4073 * We either set MNT_READONLY above so make it visible
4074 * before ~MNT_WRITE_HOLD or we failed to recursively
4075 * apply mount options.
4077 if ((kattr->attr_set & MNT_READONLY) &&
4078 (m->mnt.mnt_flags & MNT_WRITE_HOLD))
4079 mnt_unhold_writers(m);
4081 if (!err && kattr->propagation)
4082 change_mnt_propagation(m, kattr->propagation);
4085 * On failure, only cleanup until we found the first mount
4086 * we failed to handle.
4088 if (err && m == last)
4090 } while (kattr->recurse && (m = next_mnt(m, mnt)));
4093 touch_mnt_namespace(mnt->mnt_ns);
4096 static int do_mount_setattr(struct path *path, struct mount_kattr *kattr)
4098 struct mount *mnt = real_mount(path->mnt), *last = NULL;
4101 if (path->dentry != mnt->mnt.mnt_root)
4104 if (kattr->propagation) {
4106 * Only take namespace_lock() if we're actually changing
4110 if (kattr->propagation == MS_SHARED) {
4111 err = invent_group_ids(mnt, kattr->recurse);
4122 * Get the mount tree in a shape where we can change mount
4123 * properties without failure.
4125 last = mount_setattr_prepare(kattr, mnt, &err);
4126 if (last) /* Commit all changes or revert to the old state. */
4127 mount_setattr_commit(kattr, mnt, last, err);
4129 unlock_mount_hash();
4131 if (kattr->propagation) {
4134 cleanup_group_ids(mnt, NULL);
4140 static int build_mount_idmapped(const struct mount_attr *attr, size_t usize,
4141 struct mount_kattr *kattr, unsigned int flags)
4144 struct ns_common *ns;
4145 struct user_namespace *mnt_userns;
4148 if (!((attr->attr_set | attr->attr_clr) & MOUNT_ATTR_IDMAP))
4152 * We currently do not support clearing an idmapped mount. If this ever
4153 * is a use-case we can revisit this but for now let's keep it simple
4156 if (attr->attr_clr & MOUNT_ATTR_IDMAP)
4159 if (attr->userns_fd > INT_MAX)
4162 file = fget(attr->userns_fd);
4166 if (!proc_ns_file(file)) {
4171 ns = get_proc_ns(file_inode(file));
4172 if (ns->ops->type != CLONE_NEWUSER) {
4178 * The initial idmapping cannot be used to create an idmapped
4179 * mount. We use the initial idmapping as an indicator of a mount
4180 * that is not idmapped. It can simply be passed into helpers that
4181 * are aware of idmapped mounts as a convenient shortcut. A user
4182 * can just create a dedicated identity mapping to achieve the same
4185 mnt_userns = container_of(ns, struct user_namespace, ns);
4186 if (initial_idmapping(mnt_userns)) {
4191 /* We're not controlling the target namespace. */
4192 if (!ns_capable(mnt_userns, CAP_SYS_ADMIN)) {
4197 kattr->mnt_userns = get_user_ns(mnt_userns);
4204 static int build_mount_kattr(const struct mount_attr *attr, size_t usize,
4205 struct mount_kattr *kattr, unsigned int flags)
4207 unsigned int lookup_flags = LOOKUP_AUTOMOUNT | LOOKUP_FOLLOW;
4209 if (flags & AT_NO_AUTOMOUNT)
4210 lookup_flags &= ~LOOKUP_AUTOMOUNT;
4211 if (flags & AT_SYMLINK_NOFOLLOW)
4212 lookup_flags &= ~LOOKUP_FOLLOW;
4213 if (flags & AT_EMPTY_PATH)
4214 lookup_flags |= LOOKUP_EMPTY;
4216 *kattr = (struct mount_kattr) {
4217 .lookup_flags = lookup_flags,
4218 .recurse = !!(flags & AT_RECURSIVE),
4221 if (attr->propagation & ~MOUNT_SETATTR_PROPAGATION_FLAGS)
4223 if (hweight32(attr->propagation & MOUNT_SETATTR_PROPAGATION_FLAGS) > 1)
4225 kattr->propagation = attr->propagation;
4227 if ((attr->attr_set | attr->attr_clr) & ~MOUNT_SETATTR_VALID_FLAGS)
4230 kattr->attr_set = attr_flags_to_mnt_flags(attr->attr_set);
4231 kattr->attr_clr = attr_flags_to_mnt_flags(attr->attr_clr);
4234 * Since the MOUNT_ATTR_<atime> values are an enum, not a bitmap,
4235 * users wanting to transition to a different atime setting cannot
4236 * simply specify the atime setting in @attr_set, but must also
4237 * specify MOUNT_ATTR__ATIME in the @attr_clr field.
4238 * So ensure that MOUNT_ATTR__ATIME can't be partially set in
4239 * @attr_clr and that @attr_set can't have any atime bits set if
4240 * MOUNT_ATTR__ATIME isn't set in @attr_clr.
4242 if (attr->attr_clr & MOUNT_ATTR__ATIME) {
4243 if ((attr->attr_clr & MOUNT_ATTR__ATIME) != MOUNT_ATTR__ATIME)
4247 * Clear all previous time settings as they are mutually
4250 kattr->attr_clr |= MNT_RELATIME | MNT_NOATIME;
4251 switch (attr->attr_set & MOUNT_ATTR__ATIME) {
4252 case MOUNT_ATTR_RELATIME:
4253 kattr->attr_set |= MNT_RELATIME;
4255 case MOUNT_ATTR_NOATIME:
4256 kattr->attr_set |= MNT_NOATIME;
4258 case MOUNT_ATTR_STRICTATIME:
4264 if (attr->attr_set & MOUNT_ATTR__ATIME)
4268 return build_mount_idmapped(attr, usize, kattr, flags);
4271 static void finish_mount_kattr(struct mount_kattr *kattr)
4273 put_user_ns(kattr->mnt_userns);
4274 kattr->mnt_userns = NULL;
4277 SYSCALL_DEFINE5(mount_setattr, int, dfd, const char __user *, path,
4278 unsigned int, flags, struct mount_attr __user *, uattr,
4283 struct mount_attr attr;
4284 struct mount_kattr kattr;
4286 BUILD_BUG_ON(sizeof(struct mount_attr) != MOUNT_ATTR_SIZE_VER0);
4288 if (flags & ~(AT_EMPTY_PATH |
4290 AT_SYMLINK_NOFOLLOW |
4294 if (unlikely(usize > PAGE_SIZE))
4296 if (unlikely(usize < MOUNT_ATTR_SIZE_VER0))
4302 err = copy_struct_from_user(&attr, sizeof(attr), uattr, usize);
4306 /* Don't bother walking through the mounts if this is a nop. */
4307 if (attr.attr_set == 0 &&
4308 attr.attr_clr == 0 &&
4309 attr.propagation == 0)
4312 err = build_mount_kattr(&attr, usize, &kattr, flags);
4316 err = user_path_at(dfd, path, kattr.lookup_flags, &target);
4318 err = do_mount_setattr(&target, &kattr);
4321 finish_mount_kattr(&kattr);
4325 static void __init init_mount_tree(void)
4327 struct vfsmount *mnt;
4329 struct mnt_namespace *ns;
4332 mnt = vfs_kern_mount(&rootfs_fs_type, 0, "rootfs", NULL);
4334 panic("Can't create rootfs");
4336 ns = alloc_mnt_ns(&init_user_ns, false);
4338 panic("Can't allocate initial namespace");
4339 m = real_mount(mnt);
4343 list_add(&m->mnt_list, &ns->list);
4344 init_task.nsproxy->mnt_ns = ns;
4348 root.dentry = mnt->mnt_root;
4349 mnt->mnt_flags |= MNT_LOCKED;
4351 set_fs_pwd(current->fs, &root);
4352 set_fs_root(current->fs, &root);
4355 void __init mnt_init(void)
4359 mnt_cache = kmem_cache_create("mnt_cache", sizeof(struct mount),
4360 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT, NULL);
4362 mount_hashtable = alloc_large_system_hash("Mount-cache",
4363 sizeof(struct hlist_head),
4366 &m_hash_shift, &m_hash_mask, 0, 0);
4367 mountpoint_hashtable = alloc_large_system_hash("Mountpoint-cache",
4368 sizeof(struct hlist_head),
4371 &mp_hash_shift, &mp_hash_mask, 0, 0);
4373 if (!mount_hashtable || !mountpoint_hashtable)
4374 panic("Failed to allocate mount hash table\n");
4380 printk(KERN_WARNING "%s: sysfs_init error: %d\n",
4382 fs_kobj = kobject_create_and_add("fs", NULL);
4384 printk(KERN_WARNING "%s: kobj create error\n", __func__);
4390 void put_mnt_ns(struct mnt_namespace *ns)
4392 if (!refcount_dec_and_test(&ns->ns.count))
4394 drop_collected_mounts(&ns->root->mnt);
4398 struct vfsmount *kern_mount(struct file_system_type *type)
4400 struct vfsmount *mnt;
4401 mnt = vfs_kern_mount(type, SB_KERNMOUNT, type->name, NULL);
4404 * it is a longterm mount, don't release mnt until
4405 * we unmount before file sys is unregistered
4407 real_mount(mnt)->mnt_ns = MNT_NS_INTERNAL;
4411 EXPORT_SYMBOL_GPL(kern_mount);
4413 void kern_unmount(struct vfsmount *mnt)
4415 /* release long term mount so mount point can be released */
4416 if (!IS_ERR_OR_NULL(mnt)) {
4417 real_mount(mnt)->mnt_ns = NULL;
4418 synchronize_rcu(); /* yecchhh... */
4422 EXPORT_SYMBOL(kern_unmount);
4424 void kern_unmount_array(struct vfsmount *mnt[], unsigned int num)
4428 for (i = 0; i < num; i++)
4430 real_mount(mnt[i])->mnt_ns = NULL;
4431 synchronize_rcu_expedited();
4432 for (i = 0; i < num; i++)
4435 EXPORT_SYMBOL(kern_unmount_array);
4437 bool our_mnt(struct vfsmount *mnt)
4439 return check_mnt(real_mount(mnt));
4442 bool current_chrooted(void)
4444 /* Does the current process have a non-standard root */
4445 struct path ns_root;
4446 struct path fs_root;
4449 /* Find the namespace root */
4450 ns_root.mnt = ¤t->nsproxy->mnt_ns->root->mnt;
4451 ns_root.dentry = ns_root.mnt->mnt_root;
4453 while (d_mountpoint(ns_root.dentry) && follow_down_one(&ns_root))
4456 get_fs_root(current->fs, &fs_root);
4458 chrooted = !path_equal(&fs_root, &ns_root);
4466 static bool mnt_already_visible(struct mnt_namespace *ns,
4467 const struct super_block *sb,
4470 int new_flags = *new_mnt_flags;
4472 bool visible = false;
4474 down_read(&namespace_sem);
4476 list_for_each_entry(mnt, &ns->list, mnt_list) {
4477 struct mount *child;
4480 if (mnt_is_cursor(mnt))
4483 if (mnt->mnt.mnt_sb->s_type != sb->s_type)
4486 /* This mount is not fully visible if it's root directory
4487 * is not the root directory of the filesystem.
4489 if (mnt->mnt.mnt_root != mnt->mnt.mnt_sb->s_root)
4492 /* A local view of the mount flags */
4493 mnt_flags = mnt->mnt.mnt_flags;
4495 /* Don't miss readonly hidden in the superblock flags */
4496 if (sb_rdonly(mnt->mnt.mnt_sb))
4497 mnt_flags |= MNT_LOCK_READONLY;
4499 /* Verify the mount flags are equal to or more permissive
4500 * than the proposed new mount.
4502 if ((mnt_flags & MNT_LOCK_READONLY) &&
4503 !(new_flags & MNT_READONLY))
4505 if ((mnt_flags & MNT_LOCK_ATIME) &&
4506 ((mnt_flags & MNT_ATIME_MASK) != (new_flags & MNT_ATIME_MASK)))
4509 /* This mount is not fully visible if there are any
4510 * locked child mounts that cover anything except for
4511 * empty directories.
4513 list_for_each_entry(child, &mnt->mnt_mounts, mnt_child) {
4514 struct inode *inode = child->mnt_mountpoint->d_inode;
4515 /* Only worry about locked mounts */
4516 if (!(child->mnt.mnt_flags & MNT_LOCKED))
4518 /* Is the directory permanetly empty? */
4519 if (!is_empty_dir_inode(inode))
4522 /* Preserve the locked attributes */
4523 *new_mnt_flags |= mnt_flags & (MNT_LOCK_READONLY | \
4531 up_read(&namespace_sem);
4535 static bool mount_too_revealing(const struct super_block *sb, int *new_mnt_flags)
4537 const unsigned long required_iflags = SB_I_NOEXEC | SB_I_NODEV;
4538 struct mnt_namespace *ns = current->nsproxy->mnt_ns;
4539 unsigned long s_iflags;
4541 if (ns->user_ns == &init_user_ns)
4544 /* Can this filesystem be too revealing? */
4545 s_iflags = sb->s_iflags;
4546 if (!(s_iflags & SB_I_USERNS_VISIBLE))
4549 if ((s_iflags & required_iflags) != required_iflags) {
4550 WARN_ONCE(1, "Expected s_iflags to contain 0x%lx\n",
4555 return !mnt_already_visible(ns, sb, new_mnt_flags);
4558 bool mnt_may_suid(struct vfsmount *mnt)
4561 * Foreign mounts (accessed via fchdir or through /proc
4562 * symlinks) are always treated as if they are nosuid. This
4563 * prevents namespaces from trusting potentially unsafe
4564 * suid/sgid bits, file caps, or security labels that originate
4565 * in other namespaces.
4567 return !(mnt->mnt_flags & MNT_NOSUID) && check_mnt(real_mount(mnt)) &&
4568 current_in_userns(mnt->mnt_sb->s_user_ns);
4571 static struct ns_common *mntns_get(struct task_struct *task)
4573 struct ns_common *ns = NULL;
4574 struct nsproxy *nsproxy;
4577 nsproxy = task->nsproxy;
4579 ns = &nsproxy->mnt_ns->ns;
4580 get_mnt_ns(to_mnt_ns(ns));
4587 static void mntns_put(struct ns_common *ns)
4589 put_mnt_ns(to_mnt_ns(ns));
4592 static int mntns_install(struct nsset *nsset, struct ns_common *ns)
4594 struct nsproxy *nsproxy = nsset->nsproxy;
4595 struct fs_struct *fs = nsset->fs;
4596 struct mnt_namespace *mnt_ns = to_mnt_ns(ns), *old_mnt_ns;
4597 struct user_namespace *user_ns = nsset->cred->user_ns;
4601 if (!ns_capable(mnt_ns->user_ns, CAP_SYS_ADMIN) ||
4602 !ns_capable(user_ns, CAP_SYS_CHROOT) ||
4603 !ns_capable(user_ns, CAP_SYS_ADMIN))
4606 if (is_anon_ns(mnt_ns))
4613 old_mnt_ns = nsproxy->mnt_ns;
4614 nsproxy->mnt_ns = mnt_ns;
4617 err = vfs_path_lookup(mnt_ns->root->mnt.mnt_root, &mnt_ns->root->mnt,
4618 "/", LOOKUP_DOWN, &root);
4620 /* revert to old namespace */
4621 nsproxy->mnt_ns = old_mnt_ns;
4626 put_mnt_ns(old_mnt_ns);
4628 /* Update the pwd and root */
4629 set_fs_pwd(fs, &root);
4630 set_fs_root(fs, &root);
4636 static struct user_namespace *mntns_owner(struct ns_common *ns)
4638 return to_mnt_ns(ns)->user_ns;
4641 const struct proc_ns_operations mntns_operations = {
4643 .type = CLONE_NEWNS,
4646 .install = mntns_install,
4647 .owner = mntns_owner,