1 // SPDX-License-Identifier: GPL-2.0-only
5 * (C) Copyright Al Viro 2000, 2001
7 * Based on code from fs/super.c, copyright Linus Torvalds and others.
11 #include <linux/syscalls.h>
12 #include <linux/export.h>
13 #include <linux/capability.h>
14 #include <linux/mnt_namespace.h>
15 #include <linux/user_namespace.h>
16 #include <linux/namei.h>
17 #include <linux/security.h>
18 #include <linux/cred.h>
19 #include <linux/idr.h>
20 #include <linux/init.h> /* init_rootfs */
21 #include <linux/fs_struct.h> /* get_fs_root et.al. */
22 #include <linux/fsnotify.h> /* fsnotify_vfsmount_delete */
23 #include <linux/file.h>
24 #include <linux/uaccess.h>
25 #include <linux/proc_ns.h>
26 #include <linux/magic.h>
27 #include <linux/memblock.h>
28 #include <linux/proc_fs.h>
29 #include <linux/task_work.h>
30 #include <linux/sched/task.h>
31 #include <uapi/linux/mount.h>
32 #include <linux/fs_context.h>
33 #include <linux/shmem_fs.h>
34 #include <linux/mnt_idmapping.h>
39 /* Maximum number of mounts in a mount namespace */
40 static unsigned int sysctl_mount_max __read_mostly = 100000;
42 static unsigned int m_hash_mask __read_mostly;
43 static unsigned int m_hash_shift __read_mostly;
44 static unsigned int mp_hash_mask __read_mostly;
45 static unsigned int mp_hash_shift __read_mostly;
47 static __initdata unsigned long mhash_entries;
48 static int __init set_mhash_entries(char *str)
52 mhash_entries = simple_strtoul(str, &str, 0);
55 __setup("mhash_entries=", set_mhash_entries);
57 static __initdata unsigned long mphash_entries;
58 static int __init set_mphash_entries(char *str)
62 mphash_entries = simple_strtoul(str, &str, 0);
65 __setup("mphash_entries=", set_mphash_entries);
68 static DEFINE_IDA(mnt_id_ida);
69 static DEFINE_IDA(mnt_group_ida);
71 static struct hlist_head *mount_hashtable __read_mostly;
72 static struct hlist_head *mountpoint_hashtable __read_mostly;
73 static struct kmem_cache *mnt_cache __read_mostly;
74 static DECLARE_RWSEM(namespace_sem);
75 static HLIST_HEAD(unmounted); /* protected by namespace_sem */
76 static LIST_HEAD(ex_mountpoints); /* protected by namespace_sem */
79 struct user_namespace *owner;
84 * Carries the initial idmapping of 0:0:4294967295 which is an identity
85 * mapping. This means that {g,u}id 0 is mapped to {g,u}id 0, {g,u}id 1 is
86 * mapped to {g,u}id 1, [...], {g,u}id 1000 to {g,u}id 1000, [...].
88 struct mnt_idmap nop_mnt_idmap = {
89 .owner = &init_user_ns,
90 .count = REFCOUNT_INIT(1),
92 EXPORT_SYMBOL_GPL(nop_mnt_idmap);
95 unsigned int attr_set;
96 unsigned int attr_clr;
97 unsigned int propagation;
98 unsigned int lookup_flags;
100 struct user_namespace *mnt_userns;
101 struct mnt_idmap *mnt_idmap;
105 struct kobject *fs_kobj;
106 EXPORT_SYMBOL_GPL(fs_kobj);
109 * vfsmount lock may be taken for read to prevent changes to the
110 * vfsmount hash, ie. during mountpoint lookups or walking back
113 * It should be taken for write in all cases where the vfsmount
114 * tree or hash is modified or when a vfsmount structure is modified.
116 __cacheline_aligned_in_smp DEFINE_SEQLOCK(mount_lock);
118 static inline void lock_mount_hash(void)
120 write_seqlock(&mount_lock);
123 static inline void unlock_mount_hash(void)
125 write_sequnlock(&mount_lock);
128 static inline struct hlist_head *m_hash(struct vfsmount *mnt, struct dentry *dentry)
130 unsigned long tmp = ((unsigned long)mnt / L1_CACHE_BYTES);
131 tmp += ((unsigned long)dentry / L1_CACHE_BYTES);
132 tmp = tmp + (tmp >> m_hash_shift);
133 return &mount_hashtable[tmp & m_hash_mask];
136 static inline struct hlist_head *mp_hash(struct dentry *dentry)
138 unsigned long tmp = ((unsigned long)dentry / L1_CACHE_BYTES);
139 tmp = tmp + (tmp >> mp_hash_shift);
140 return &mountpoint_hashtable[tmp & mp_hash_mask];
143 static int mnt_alloc_id(struct mount *mnt)
145 int res = ida_alloc(&mnt_id_ida, GFP_KERNEL);
153 static void mnt_free_id(struct mount *mnt)
155 ida_free(&mnt_id_ida, mnt->mnt_id);
159 * Allocate a new peer group ID
161 static int mnt_alloc_group_id(struct mount *mnt)
163 int res = ida_alloc_min(&mnt_group_ida, 1, GFP_KERNEL);
167 mnt->mnt_group_id = res;
172 * Release a peer group ID
174 void mnt_release_group_id(struct mount *mnt)
176 ida_free(&mnt_group_ida, mnt->mnt_group_id);
177 mnt->mnt_group_id = 0;
181 * vfsmount lock must be held for read
183 static inline void mnt_add_count(struct mount *mnt, int n)
186 this_cpu_add(mnt->mnt_pcp->mnt_count, n);
195 * vfsmount lock must be held for write
197 int mnt_get_count(struct mount *mnt)
203 for_each_possible_cpu(cpu) {
204 count += per_cpu_ptr(mnt->mnt_pcp, cpu)->mnt_count;
209 return mnt->mnt_count;
214 * mnt_idmap_owner - retrieve owner of the mount's idmapping
215 * @idmap: mount idmapping
217 * This helper will go away once the conversion to use struct mnt_idmap
218 * everywhere has finished at which point the helper will be unexported.
220 * Only code that needs to perform permission checks based on the owner of the
221 * idmapping will get access to it. All other code will solely rely on
222 * idmappings. This will get us type safety so it's impossible to conflate
223 * filesystems idmappings with mount idmappings.
225 * Return: The owner of the idmapping.
227 struct user_namespace *mnt_idmap_owner(const struct mnt_idmap *idmap)
231 EXPORT_SYMBOL_GPL(mnt_idmap_owner);
234 * mnt_user_ns - retrieve owner of an idmapped mount
235 * @mnt: the relevant vfsmount
237 * This helper will go away once the conversion to use struct mnt_idmap
238 * everywhere has finished at which point the helper will be unexported.
240 * Only code that needs to perform permission checks based on the owner of the
241 * idmapping will get access to it. All other code will solely rely on
242 * idmappings. This will get us type safety so it's impossible to conflate
243 * filesystems idmappings with mount idmappings.
245 * Return: The owner of the idmapped.
247 struct user_namespace *mnt_user_ns(const struct vfsmount *mnt)
249 struct mnt_idmap *idmap = mnt_idmap(mnt);
251 /* Return the actual owner of the filesystem instead of the nop. */
252 if (idmap == &nop_mnt_idmap &&
253 !initial_idmapping(mnt->mnt_sb->s_user_ns))
254 return mnt->mnt_sb->s_user_ns;
255 return mnt_idmap_owner(idmap);
257 EXPORT_SYMBOL_GPL(mnt_user_ns);
260 * alloc_mnt_idmap - allocate a new idmapping for the mount
261 * @mnt_userns: owning userns of the idmapping
263 * Allocate a new struct mnt_idmap which carries the idmapping of the mount.
265 * Return: On success a new idmap, on error an error pointer is returned.
267 static struct mnt_idmap *alloc_mnt_idmap(struct user_namespace *mnt_userns)
269 struct mnt_idmap *idmap;
271 idmap = kzalloc(sizeof(struct mnt_idmap), GFP_KERNEL_ACCOUNT);
273 return ERR_PTR(-ENOMEM);
275 idmap->owner = get_user_ns(mnt_userns);
276 refcount_set(&idmap->count, 1);
281 * mnt_idmap_get - get a reference to an idmapping
282 * @idmap: the idmap to bump the reference on
284 * If @idmap is not the @nop_mnt_idmap bump the reference count.
286 * Return: @idmap with reference count bumped if @not_mnt_idmap isn't passed.
288 static inline struct mnt_idmap *mnt_idmap_get(struct mnt_idmap *idmap)
290 if (idmap != &nop_mnt_idmap)
291 refcount_inc(&idmap->count);
297 * mnt_idmap_put - put a reference to an idmapping
298 * @idmap: the idmap to put the reference on
300 * If this is a non-initial idmapping, put the reference count when a mount is
301 * released and free it if we're the last user.
303 static inline void mnt_idmap_put(struct mnt_idmap *idmap)
305 if (idmap != &nop_mnt_idmap && refcount_dec_and_test(&idmap->count)) {
306 put_user_ns(idmap->owner);
311 static struct mount *alloc_vfsmnt(const char *name)
313 struct mount *mnt = kmem_cache_zalloc(mnt_cache, GFP_KERNEL);
317 err = mnt_alloc_id(mnt);
322 mnt->mnt_devname = kstrdup_const(name,
324 if (!mnt->mnt_devname)
329 mnt->mnt_pcp = alloc_percpu(struct mnt_pcp);
331 goto out_free_devname;
333 this_cpu_add(mnt->mnt_pcp->mnt_count, 1);
336 mnt->mnt_writers = 0;
339 INIT_HLIST_NODE(&mnt->mnt_hash);
340 INIT_LIST_HEAD(&mnt->mnt_child);
341 INIT_LIST_HEAD(&mnt->mnt_mounts);
342 INIT_LIST_HEAD(&mnt->mnt_list);
343 INIT_LIST_HEAD(&mnt->mnt_expire);
344 INIT_LIST_HEAD(&mnt->mnt_share);
345 INIT_LIST_HEAD(&mnt->mnt_slave_list);
346 INIT_LIST_HEAD(&mnt->mnt_slave);
347 INIT_HLIST_NODE(&mnt->mnt_mp_list);
348 INIT_LIST_HEAD(&mnt->mnt_umounting);
349 INIT_HLIST_HEAD(&mnt->mnt_stuck_children);
350 mnt->mnt.mnt_idmap = &nop_mnt_idmap;
356 kfree_const(mnt->mnt_devname);
361 kmem_cache_free(mnt_cache, mnt);
366 * Most r/o checks on a fs are for operations that take
367 * discrete amounts of time, like a write() or unlink().
368 * We must keep track of when those operations start
369 * (for permission checks) and when they end, so that
370 * we can determine when writes are able to occur to
374 * __mnt_is_readonly: check whether a mount is read-only
375 * @mnt: the mount to check for its write status
377 * This shouldn't be used directly ouside of the VFS.
378 * It does not guarantee that the filesystem will stay
379 * r/w, just that it is right *now*. This can not and
380 * should not be used in place of IS_RDONLY(inode).
381 * mnt_want/drop_write() will _keep_ the filesystem
384 bool __mnt_is_readonly(struct vfsmount *mnt)
386 return (mnt->mnt_flags & MNT_READONLY) || sb_rdonly(mnt->mnt_sb);
388 EXPORT_SYMBOL_GPL(__mnt_is_readonly);
390 static inline void mnt_inc_writers(struct mount *mnt)
393 this_cpu_inc(mnt->mnt_pcp->mnt_writers);
399 static inline void mnt_dec_writers(struct mount *mnt)
402 this_cpu_dec(mnt->mnt_pcp->mnt_writers);
408 static unsigned int mnt_get_writers(struct mount *mnt)
411 unsigned int count = 0;
414 for_each_possible_cpu(cpu) {
415 count += per_cpu_ptr(mnt->mnt_pcp, cpu)->mnt_writers;
420 return mnt->mnt_writers;
424 static int mnt_is_readonly(struct vfsmount *mnt)
426 if (mnt->mnt_sb->s_readonly_remount)
428 /* Order wrt setting s_flags/s_readonly_remount in do_remount() */
430 return __mnt_is_readonly(mnt);
434 * Most r/o & frozen checks on a fs are for operations that take discrete
435 * amounts of time, like a write() or unlink(). We must keep track of when
436 * those operations start (for permission checks) and when they end, so that we
437 * can determine when writes are able to occur to a filesystem.
440 * __mnt_want_write - get write access to a mount without freeze protection
441 * @m: the mount on which to take a write
443 * This tells the low-level filesystem that a write is about to be performed to
444 * it, and makes sure that writes are allowed (mnt it read-write) before
445 * returning success. This operation does not protect against filesystem being
446 * frozen. When the write operation is finished, __mnt_drop_write() must be
447 * called. This is effectively a refcount.
449 int __mnt_want_write(struct vfsmount *m)
451 struct mount *mnt = real_mount(m);
455 mnt_inc_writers(mnt);
457 * The store to mnt_inc_writers must be visible before we pass
458 * MNT_WRITE_HOLD loop below, so that the slowpath can see our
459 * incremented count after it has set MNT_WRITE_HOLD.
462 might_lock(&mount_lock.lock);
463 while (READ_ONCE(mnt->mnt.mnt_flags) & MNT_WRITE_HOLD) {
464 if (!IS_ENABLED(CONFIG_PREEMPT_RT)) {
468 * This prevents priority inversion, if the task
469 * setting MNT_WRITE_HOLD got preempted on a remote
470 * CPU, and it prevents life lock if the task setting
471 * MNT_WRITE_HOLD has a lower priority and is bound to
472 * the same CPU as the task that is spinning here.
481 * After the slowpath clears MNT_WRITE_HOLD, mnt_is_readonly will
482 * be set to match its requirements. So we must not load that until
483 * MNT_WRITE_HOLD is cleared.
486 if (mnt_is_readonly(m)) {
487 mnt_dec_writers(mnt);
496 * mnt_want_write - get write access to a mount
497 * @m: the mount on which to take a write
499 * This tells the low-level filesystem that a write is about to be performed to
500 * it, and makes sure that writes are allowed (mount is read-write, filesystem
501 * is not frozen) before returning success. When the write operation is
502 * finished, mnt_drop_write() must be called. This is effectively a refcount.
504 int mnt_want_write(struct vfsmount *m)
508 sb_start_write(m->mnt_sb);
509 ret = __mnt_want_write(m);
511 sb_end_write(m->mnt_sb);
514 EXPORT_SYMBOL_GPL(mnt_want_write);
517 * __mnt_want_write_file - get write access to a file's mount
518 * @file: the file who's mount on which to take a write
520 * This is like __mnt_want_write, but if the file is already open for writing it
521 * skips incrementing mnt_writers (since the open file already has a reference)
522 * and instead only does the check for emergency r/o remounts. This must be
523 * paired with __mnt_drop_write_file.
525 int __mnt_want_write_file(struct file *file)
527 if (file->f_mode & FMODE_WRITER) {
529 * Superblock may have become readonly while there are still
530 * writable fd's, e.g. due to a fs error with errors=remount-ro
532 if (__mnt_is_readonly(file->f_path.mnt))
536 return __mnt_want_write(file->f_path.mnt);
540 * mnt_want_write_file - get write access to a file's mount
541 * @file: the file who's mount on which to take a write
543 * This is like mnt_want_write, but if the file is already open for writing it
544 * skips incrementing mnt_writers (since the open file already has a reference)
545 * and instead only does the freeze protection and the check for emergency r/o
546 * remounts. This must be paired with mnt_drop_write_file.
548 int mnt_want_write_file(struct file *file)
552 sb_start_write(file_inode(file)->i_sb);
553 ret = __mnt_want_write_file(file);
555 sb_end_write(file_inode(file)->i_sb);
558 EXPORT_SYMBOL_GPL(mnt_want_write_file);
561 * __mnt_drop_write - give up write access to a mount
562 * @mnt: the mount on which to give up write access
564 * Tells the low-level filesystem that we are done
565 * performing writes to it. Must be matched with
566 * __mnt_want_write() call above.
568 void __mnt_drop_write(struct vfsmount *mnt)
571 mnt_dec_writers(real_mount(mnt));
576 * mnt_drop_write - give up write access to a mount
577 * @mnt: the mount on which to give up write access
579 * Tells the low-level filesystem that we are done performing writes to it and
580 * also allows filesystem to be frozen again. Must be matched with
581 * mnt_want_write() call above.
583 void mnt_drop_write(struct vfsmount *mnt)
585 __mnt_drop_write(mnt);
586 sb_end_write(mnt->mnt_sb);
588 EXPORT_SYMBOL_GPL(mnt_drop_write);
590 void __mnt_drop_write_file(struct file *file)
592 if (!(file->f_mode & FMODE_WRITER))
593 __mnt_drop_write(file->f_path.mnt);
596 void mnt_drop_write_file(struct file *file)
598 __mnt_drop_write_file(file);
599 sb_end_write(file_inode(file)->i_sb);
601 EXPORT_SYMBOL(mnt_drop_write_file);
604 * mnt_hold_writers - prevent write access to the given mount
605 * @mnt: mnt to prevent write access to
607 * Prevents write access to @mnt if there are no active writers for @mnt.
608 * This function needs to be called and return successfully before changing
609 * properties of @mnt that need to remain stable for callers with write access
612 * After this functions has been called successfully callers must pair it with
613 * a call to mnt_unhold_writers() in order to stop preventing write access to
616 * Context: This function expects lock_mount_hash() to be held serializing
617 * setting MNT_WRITE_HOLD.
618 * Return: On success 0 is returned.
619 * On error, -EBUSY is returned.
621 static inline int mnt_hold_writers(struct mount *mnt)
623 mnt->mnt.mnt_flags |= MNT_WRITE_HOLD;
625 * After storing MNT_WRITE_HOLD, we'll read the counters. This store
626 * should be visible before we do.
631 * With writers on hold, if this value is zero, then there are
632 * definitely no active writers (although held writers may subsequently
633 * increment the count, they'll have to wait, and decrement it after
634 * seeing MNT_READONLY).
636 * It is OK to have counter incremented on one CPU and decremented on
637 * another: the sum will add up correctly. The danger would be when we
638 * sum up each counter, if we read a counter before it is incremented,
639 * but then read another CPU's count which it has been subsequently
640 * decremented from -- we would see more decrements than we should.
641 * MNT_WRITE_HOLD protects against this scenario, because
642 * mnt_want_write first increments count, then smp_mb, then spins on
643 * MNT_WRITE_HOLD, so it can't be decremented by another CPU while
644 * we're counting up here.
646 if (mnt_get_writers(mnt) > 0)
653 * mnt_unhold_writers - stop preventing write access to the given mount
654 * @mnt: mnt to stop preventing write access to
656 * Stop preventing write access to @mnt allowing callers to gain write access
659 * This function can only be called after a successful call to
660 * mnt_hold_writers().
662 * Context: This function expects lock_mount_hash() to be held.
664 static inline void mnt_unhold_writers(struct mount *mnt)
667 * MNT_READONLY must become visible before ~MNT_WRITE_HOLD, so writers
668 * that become unheld will see MNT_READONLY.
671 mnt->mnt.mnt_flags &= ~MNT_WRITE_HOLD;
674 static int mnt_make_readonly(struct mount *mnt)
678 ret = mnt_hold_writers(mnt);
680 mnt->mnt.mnt_flags |= MNT_READONLY;
681 mnt_unhold_writers(mnt);
685 int sb_prepare_remount_readonly(struct super_block *sb)
690 /* Racy optimization. Recheck the counter under MNT_WRITE_HOLD */
691 if (atomic_long_read(&sb->s_remove_count))
695 list_for_each_entry(mnt, &sb->s_mounts, mnt_instance) {
696 if (!(mnt->mnt.mnt_flags & MNT_READONLY)) {
697 err = mnt_hold_writers(mnt);
702 if (!err && atomic_long_read(&sb->s_remove_count))
706 sb->s_readonly_remount = 1;
709 list_for_each_entry(mnt, &sb->s_mounts, mnt_instance) {
710 if (mnt->mnt.mnt_flags & MNT_WRITE_HOLD)
711 mnt->mnt.mnt_flags &= ~MNT_WRITE_HOLD;
718 static void free_vfsmnt(struct mount *mnt)
720 mnt_idmap_put(mnt_idmap(&mnt->mnt));
721 kfree_const(mnt->mnt_devname);
723 free_percpu(mnt->mnt_pcp);
725 kmem_cache_free(mnt_cache, mnt);
728 static void delayed_free_vfsmnt(struct rcu_head *head)
730 free_vfsmnt(container_of(head, struct mount, mnt_rcu));
733 /* call under rcu_read_lock */
734 int __legitimize_mnt(struct vfsmount *bastard, unsigned seq)
737 if (read_seqretry(&mount_lock, seq))
741 mnt = real_mount(bastard);
742 mnt_add_count(mnt, 1);
743 smp_mb(); // see mntput_no_expire()
744 if (likely(!read_seqretry(&mount_lock, seq)))
746 if (bastard->mnt_flags & MNT_SYNC_UMOUNT) {
747 mnt_add_count(mnt, -1);
751 if (unlikely(bastard->mnt_flags & MNT_DOOMED)) {
752 mnt_add_count(mnt, -1);
757 /* caller will mntput() */
761 /* call under rcu_read_lock */
762 static bool legitimize_mnt(struct vfsmount *bastard, unsigned seq)
764 int res = __legitimize_mnt(bastard, seq);
767 if (unlikely(res < 0)) {
776 * find the first mount at @dentry on vfsmount @mnt.
777 * call under rcu_read_lock()
779 struct mount *__lookup_mnt(struct vfsmount *mnt, struct dentry *dentry)
781 struct hlist_head *head = m_hash(mnt, dentry);
784 hlist_for_each_entry_rcu(p, head, mnt_hash)
785 if (&p->mnt_parent->mnt == mnt && p->mnt_mountpoint == dentry)
791 * lookup_mnt - Return the first child mount mounted at path
793 * "First" means first mounted chronologically. If you create the
796 * mount /dev/sda1 /mnt
797 * mount /dev/sda2 /mnt
798 * mount /dev/sda3 /mnt
800 * Then lookup_mnt() on the base /mnt dentry in the root mount will
801 * return successively the root dentry and vfsmount of /dev/sda1, then
802 * /dev/sda2, then /dev/sda3, then NULL.
804 * lookup_mnt takes a reference to the found vfsmount.
806 struct vfsmount *lookup_mnt(const struct path *path)
808 struct mount *child_mnt;
814 seq = read_seqbegin(&mount_lock);
815 child_mnt = __lookup_mnt(path->mnt, path->dentry);
816 m = child_mnt ? &child_mnt->mnt : NULL;
817 } while (!legitimize_mnt(m, seq));
822 static inline void lock_ns_list(struct mnt_namespace *ns)
824 spin_lock(&ns->ns_lock);
827 static inline void unlock_ns_list(struct mnt_namespace *ns)
829 spin_unlock(&ns->ns_lock);
832 static inline bool mnt_is_cursor(struct mount *mnt)
834 return mnt->mnt.mnt_flags & MNT_CURSOR;
838 * __is_local_mountpoint - Test to see if dentry is a mountpoint in the
839 * current mount namespace.
841 * The common case is dentries are not mountpoints at all and that
842 * test is handled inline. For the slow case when we are actually
843 * dealing with a mountpoint of some kind, walk through all of the
844 * mounts in the current mount namespace and test to see if the dentry
847 * The mount_hashtable is not usable in the context because we
848 * need to identify all mounts that may be in the current mount
849 * namespace not just a mount that happens to have some specified
852 bool __is_local_mountpoint(struct dentry *dentry)
854 struct mnt_namespace *ns = current->nsproxy->mnt_ns;
856 bool is_covered = false;
858 down_read(&namespace_sem);
860 list_for_each_entry(mnt, &ns->list, mnt_list) {
861 if (mnt_is_cursor(mnt))
863 is_covered = (mnt->mnt_mountpoint == dentry);
868 up_read(&namespace_sem);
873 static struct mountpoint *lookup_mountpoint(struct dentry *dentry)
875 struct hlist_head *chain = mp_hash(dentry);
876 struct mountpoint *mp;
878 hlist_for_each_entry(mp, chain, m_hash) {
879 if (mp->m_dentry == dentry) {
887 static struct mountpoint *get_mountpoint(struct dentry *dentry)
889 struct mountpoint *mp, *new = NULL;
892 if (d_mountpoint(dentry)) {
893 /* might be worth a WARN_ON() */
894 if (d_unlinked(dentry))
895 return ERR_PTR(-ENOENT);
897 read_seqlock_excl(&mount_lock);
898 mp = lookup_mountpoint(dentry);
899 read_sequnlock_excl(&mount_lock);
905 new = kmalloc(sizeof(struct mountpoint), GFP_KERNEL);
907 return ERR_PTR(-ENOMEM);
910 /* Exactly one processes may set d_mounted */
911 ret = d_set_mounted(dentry);
913 /* Someone else set d_mounted? */
917 /* The dentry is not available as a mountpoint? */
922 /* Add the new mountpoint to the hash table */
923 read_seqlock_excl(&mount_lock);
924 new->m_dentry = dget(dentry);
926 hlist_add_head(&new->m_hash, mp_hash(dentry));
927 INIT_HLIST_HEAD(&new->m_list);
928 read_sequnlock_excl(&mount_lock);
938 * vfsmount lock must be held. Additionally, the caller is responsible
939 * for serializing calls for given disposal list.
941 static void __put_mountpoint(struct mountpoint *mp, struct list_head *list)
943 if (!--mp->m_count) {
944 struct dentry *dentry = mp->m_dentry;
945 BUG_ON(!hlist_empty(&mp->m_list));
946 spin_lock(&dentry->d_lock);
947 dentry->d_flags &= ~DCACHE_MOUNTED;
948 spin_unlock(&dentry->d_lock);
949 dput_to_list(dentry, list);
950 hlist_del(&mp->m_hash);
955 /* called with namespace_lock and vfsmount lock */
956 static void put_mountpoint(struct mountpoint *mp)
958 __put_mountpoint(mp, &ex_mountpoints);
961 static inline int check_mnt(struct mount *mnt)
963 return mnt->mnt_ns == current->nsproxy->mnt_ns;
967 * vfsmount lock must be held for write
969 static void touch_mnt_namespace(struct mnt_namespace *ns)
973 wake_up_interruptible(&ns->poll);
978 * vfsmount lock must be held for write
980 static void __touch_mnt_namespace(struct mnt_namespace *ns)
982 if (ns && ns->event != event) {
984 wake_up_interruptible(&ns->poll);
989 * vfsmount lock must be held for write
991 static struct mountpoint *unhash_mnt(struct mount *mnt)
993 struct mountpoint *mp;
994 mnt->mnt_parent = mnt;
995 mnt->mnt_mountpoint = mnt->mnt.mnt_root;
996 list_del_init(&mnt->mnt_child);
997 hlist_del_init_rcu(&mnt->mnt_hash);
998 hlist_del_init(&mnt->mnt_mp_list);
1005 * vfsmount lock must be held for write
1007 static void umount_mnt(struct mount *mnt)
1009 put_mountpoint(unhash_mnt(mnt));
1013 * vfsmount lock must be held for write
1015 void mnt_set_mountpoint(struct mount *mnt,
1016 struct mountpoint *mp,
1017 struct mount *child_mnt)
1020 mnt_add_count(mnt, 1); /* essentially, that's mntget */
1021 child_mnt->mnt_mountpoint = mp->m_dentry;
1022 child_mnt->mnt_parent = mnt;
1023 child_mnt->mnt_mp = mp;
1024 hlist_add_head(&child_mnt->mnt_mp_list, &mp->m_list);
1027 static void __attach_mnt(struct mount *mnt, struct mount *parent)
1029 hlist_add_head_rcu(&mnt->mnt_hash,
1030 m_hash(&parent->mnt, mnt->mnt_mountpoint));
1031 list_add_tail(&mnt->mnt_child, &parent->mnt_mounts);
1035 * vfsmount lock must be held for write
1037 static void attach_mnt(struct mount *mnt,
1038 struct mount *parent,
1039 struct mountpoint *mp)
1041 mnt_set_mountpoint(parent, mp, mnt);
1042 __attach_mnt(mnt, parent);
1045 void mnt_change_mountpoint(struct mount *parent, struct mountpoint *mp, struct mount *mnt)
1047 struct mountpoint *old_mp = mnt->mnt_mp;
1048 struct mount *old_parent = mnt->mnt_parent;
1050 list_del_init(&mnt->mnt_child);
1051 hlist_del_init(&mnt->mnt_mp_list);
1052 hlist_del_init_rcu(&mnt->mnt_hash);
1054 attach_mnt(mnt, parent, mp);
1056 put_mountpoint(old_mp);
1057 mnt_add_count(old_parent, -1);
1061 * vfsmount lock must be held for write
1063 static void commit_tree(struct mount *mnt)
1065 struct mount *parent = mnt->mnt_parent;
1068 struct mnt_namespace *n = parent->mnt_ns;
1070 BUG_ON(parent == mnt);
1072 list_add_tail(&head, &mnt->mnt_list);
1073 list_for_each_entry(m, &head, mnt_list)
1076 list_splice(&head, n->list.prev);
1078 n->mounts += n->pending_mounts;
1079 n->pending_mounts = 0;
1081 __attach_mnt(mnt, parent);
1082 touch_mnt_namespace(n);
1085 static struct mount *next_mnt(struct mount *p, struct mount *root)
1087 struct list_head *next = p->mnt_mounts.next;
1088 if (next == &p->mnt_mounts) {
1092 next = p->mnt_child.next;
1093 if (next != &p->mnt_parent->mnt_mounts)
1098 return list_entry(next, struct mount, mnt_child);
1101 static struct mount *skip_mnt_tree(struct mount *p)
1103 struct list_head *prev = p->mnt_mounts.prev;
1104 while (prev != &p->mnt_mounts) {
1105 p = list_entry(prev, struct mount, mnt_child);
1106 prev = p->mnt_mounts.prev;
1112 * vfs_create_mount - Create a mount for a configured superblock
1113 * @fc: The configuration context with the superblock attached
1115 * Create a mount to an already configured superblock. If necessary, the
1116 * caller should invoke vfs_get_tree() before calling this.
1118 * Note that this does not attach the mount to anything.
1120 struct vfsmount *vfs_create_mount(struct fs_context *fc)
1125 return ERR_PTR(-EINVAL);
1127 mnt = alloc_vfsmnt(fc->source ?: "none");
1129 return ERR_PTR(-ENOMEM);
1131 if (fc->sb_flags & SB_KERNMOUNT)
1132 mnt->mnt.mnt_flags = MNT_INTERNAL;
1134 atomic_inc(&fc->root->d_sb->s_active);
1135 mnt->mnt.mnt_sb = fc->root->d_sb;
1136 mnt->mnt.mnt_root = dget(fc->root);
1137 mnt->mnt_mountpoint = mnt->mnt.mnt_root;
1138 mnt->mnt_parent = mnt;
1141 list_add_tail(&mnt->mnt_instance, &mnt->mnt.mnt_sb->s_mounts);
1142 unlock_mount_hash();
1145 EXPORT_SYMBOL(vfs_create_mount);
1147 struct vfsmount *fc_mount(struct fs_context *fc)
1149 int err = vfs_get_tree(fc);
1151 up_write(&fc->root->d_sb->s_umount);
1152 return vfs_create_mount(fc);
1154 return ERR_PTR(err);
1156 EXPORT_SYMBOL(fc_mount);
1158 struct vfsmount *vfs_kern_mount(struct file_system_type *type,
1159 int flags, const char *name,
1162 struct fs_context *fc;
1163 struct vfsmount *mnt;
1167 return ERR_PTR(-EINVAL);
1169 fc = fs_context_for_mount(type, flags);
1171 return ERR_CAST(fc);
1174 ret = vfs_parse_fs_string(fc, "source",
1175 name, strlen(name));
1177 ret = parse_monolithic_mount_data(fc, data);
1186 EXPORT_SYMBOL_GPL(vfs_kern_mount);
1189 vfs_submount(const struct dentry *mountpoint, struct file_system_type *type,
1190 const char *name, void *data)
1192 /* Until it is worked out how to pass the user namespace
1193 * through from the parent mount to the submount don't support
1194 * unprivileged mounts with submounts.
1196 if (mountpoint->d_sb->s_user_ns != &init_user_ns)
1197 return ERR_PTR(-EPERM);
1199 return vfs_kern_mount(type, SB_SUBMOUNT, name, data);
1201 EXPORT_SYMBOL_GPL(vfs_submount);
1203 static struct mount *clone_mnt(struct mount *old, struct dentry *root,
1206 struct super_block *sb = old->mnt.mnt_sb;
1210 mnt = alloc_vfsmnt(old->mnt_devname);
1212 return ERR_PTR(-ENOMEM);
1214 if (flag & (CL_SLAVE | CL_PRIVATE | CL_SHARED_TO_SLAVE))
1215 mnt->mnt_group_id = 0; /* not a peer of original */
1217 mnt->mnt_group_id = old->mnt_group_id;
1219 if ((flag & CL_MAKE_SHARED) && !mnt->mnt_group_id) {
1220 err = mnt_alloc_group_id(mnt);
1225 mnt->mnt.mnt_flags = old->mnt.mnt_flags;
1226 mnt->mnt.mnt_flags &= ~(MNT_WRITE_HOLD|MNT_MARKED|MNT_INTERNAL);
1228 atomic_inc(&sb->s_active);
1229 mnt->mnt.mnt_idmap = mnt_idmap_get(mnt_idmap(&old->mnt));
1231 mnt->mnt.mnt_sb = sb;
1232 mnt->mnt.mnt_root = dget(root);
1233 mnt->mnt_mountpoint = mnt->mnt.mnt_root;
1234 mnt->mnt_parent = mnt;
1236 list_add_tail(&mnt->mnt_instance, &sb->s_mounts);
1237 unlock_mount_hash();
1239 if ((flag & CL_SLAVE) ||
1240 ((flag & CL_SHARED_TO_SLAVE) && IS_MNT_SHARED(old))) {
1241 list_add(&mnt->mnt_slave, &old->mnt_slave_list);
1242 mnt->mnt_master = old;
1243 CLEAR_MNT_SHARED(mnt);
1244 } else if (!(flag & CL_PRIVATE)) {
1245 if ((flag & CL_MAKE_SHARED) || IS_MNT_SHARED(old))
1246 list_add(&mnt->mnt_share, &old->mnt_share);
1247 if (IS_MNT_SLAVE(old))
1248 list_add(&mnt->mnt_slave, &old->mnt_slave);
1249 mnt->mnt_master = old->mnt_master;
1251 CLEAR_MNT_SHARED(mnt);
1253 if (flag & CL_MAKE_SHARED)
1254 set_mnt_shared(mnt);
1256 /* stick the duplicate mount on the same expiry list
1257 * as the original if that was on one */
1258 if (flag & CL_EXPIRE) {
1259 if (!list_empty(&old->mnt_expire))
1260 list_add(&mnt->mnt_expire, &old->mnt_expire);
1268 return ERR_PTR(err);
1271 static void cleanup_mnt(struct mount *mnt)
1273 struct hlist_node *p;
1276 * The warning here probably indicates that somebody messed
1277 * up a mnt_want/drop_write() pair. If this happens, the
1278 * filesystem was probably unable to make r/w->r/o transitions.
1279 * The locking used to deal with mnt_count decrement provides barriers,
1280 * so mnt_get_writers() below is safe.
1282 WARN_ON(mnt_get_writers(mnt));
1283 if (unlikely(mnt->mnt_pins.first))
1285 hlist_for_each_entry_safe(m, p, &mnt->mnt_stuck_children, mnt_umount) {
1286 hlist_del(&m->mnt_umount);
1289 fsnotify_vfsmount_delete(&mnt->mnt);
1290 dput(mnt->mnt.mnt_root);
1291 deactivate_super(mnt->mnt.mnt_sb);
1293 call_rcu(&mnt->mnt_rcu, delayed_free_vfsmnt);
1296 static void __cleanup_mnt(struct rcu_head *head)
1298 cleanup_mnt(container_of(head, struct mount, mnt_rcu));
1301 static LLIST_HEAD(delayed_mntput_list);
1302 static void delayed_mntput(struct work_struct *unused)
1304 struct llist_node *node = llist_del_all(&delayed_mntput_list);
1305 struct mount *m, *t;
1307 llist_for_each_entry_safe(m, t, node, mnt_llist)
1310 static DECLARE_DELAYED_WORK(delayed_mntput_work, delayed_mntput);
1312 static void mntput_no_expire(struct mount *mnt)
1318 if (likely(READ_ONCE(mnt->mnt_ns))) {
1320 * Since we don't do lock_mount_hash() here,
1321 * ->mnt_ns can change under us. However, if it's
1322 * non-NULL, then there's a reference that won't
1323 * be dropped until after an RCU delay done after
1324 * turning ->mnt_ns NULL. So if we observe it
1325 * non-NULL under rcu_read_lock(), the reference
1326 * we are dropping is not the final one.
1328 mnt_add_count(mnt, -1);
1334 * make sure that if __legitimize_mnt() has not seen us grab
1335 * mount_lock, we'll see their refcount increment here.
1338 mnt_add_count(mnt, -1);
1339 count = mnt_get_count(mnt);
1343 unlock_mount_hash();
1346 if (unlikely(mnt->mnt.mnt_flags & MNT_DOOMED)) {
1348 unlock_mount_hash();
1351 mnt->mnt.mnt_flags |= MNT_DOOMED;
1354 list_del(&mnt->mnt_instance);
1356 if (unlikely(!list_empty(&mnt->mnt_mounts))) {
1357 struct mount *p, *tmp;
1358 list_for_each_entry_safe(p, tmp, &mnt->mnt_mounts, mnt_child) {
1359 __put_mountpoint(unhash_mnt(p), &list);
1360 hlist_add_head(&p->mnt_umount, &mnt->mnt_stuck_children);
1363 unlock_mount_hash();
1364 shrink_dentry_list(&list);
1366 if (likely(!(mnt->mnt.mnt_flags & MNT_INTERNAL))) {
1367 struct task_struct *task = current;
1368 if (likely(!(task->flags & PF_KTHREAD))) {
1369 init_task_work(&mnt->mnt_rcu, __cleanup_mnt);
1370 if (!task_work_add(task, &mnt->mnt_rcu, TWA_RESUME))
1373 if (llist_add(&mnt->mnt_llist, &delayed_mntput_list))
1374 schedule_delayed_work(&delayed_mntput_work, 1);
1380 void mntput(struct vfsmount *mnt)
1383 struct mount *m = real_mount(mnt);
1384 /* avoid cacheline pingpong, hope gcc doesn't get "smart" */
1385 if (unlikely(m->mnt_expiry_mark))
1386 m->mnt_expiry_mark = 0;
1387 mntput_no_expire(m);
1390 EXPORT_SYMBOL(mntput);
1392 struct vfsmount *mntget(struct vfsmount *mnt)
1395 mnt_add_count(real_mount(mnt), 1);
1398 EXPORT_SYMBOL(mntget);
1401 * path_is_mountpoint() - Check if path is a mount in the current namespace.
1402 * @path: path to check
1404 * d_mountpoint() can only be used reliably to establish if a dentry is
1405 * not mounted in any namespace and that common case is handled inline.
1406 * d_mountpoint() isn't aware of the possibility there may be multiple
1407 * mounts using a given dentry in a different namespace. This function
1408 * checks if the passed in path is a mountpoint rather than the dentry
1411 bool path_is_mountpoint(const struct path *path)
1416 if (!d_mountpoint(path->dentry))
1421 seq = read_seqbegin(&mount_lock);
1422 res = __path_is_mountpoint(path);
1423 } while (read_seqretry(&mount_lock, seq));
1428 EXPORT_SYMBOL(path_is_mountpoint);
1430 struct vfsmount *mnt_clone_internal(const struct path *path)
1433 p = clone_mnt(real_mount(path->mnt), path->dentry, CL_PRIVATE);
1436 p->mnt.mnt_flags |= MNT_INTERNAL;
1440 #ifdef CONFIG_PROC_FS
1441 static struct mount *mnt_list_next(struct mnt_namespace *ns,
1442 struct list_head *p)
1444 struct mount *mnt, *ret = NULL;
1447 list_for_each_continue(p, &ns->list) {
1448 mnt = list_entry(p, typeof(*mnt), mnt_list);
1449 if (!mnt_is_cursor(mnt)) {
1459 /* iterator; we want it to have access to namespace_sem, thus here... */
1460 static void *m_start(struct seq_file *m, loff_t *pos)
1462 struct proc_mounts *p = m->private;
1463 struct list_head *prev;
1465 down_read(&namespace_sem);
1467 prev = &p->ns->list;
1469 prev = &p->cursor.mnt_list;
1471 /* Read after we'd reached the end? */
1472 if (list_empty(prev))
1476 return mnt_list_next(p->ns, prev);
1479 static void *m_next(struct seq_file *m, void *v, loff_t *pos)
1481 struct proc_mounts *p = m->private;
1482 struct mount *mnt = v;
1485 return mnt_list_next(p->ns, &mnt->mnt_list);
1488 static void m_stop(struct seq_file *m, void *v)
1490 struct proc_mounts *p = m->private;
1491 struct mount *mnt = v;
1493 lock_ns_list(p->ns);
1495 list_move_tail(&p->cursor.mnt_list, &mnt->mnt_list);
1497 list_del_init(&p->cursor.mnt_list);
1498 unlock_ns_list(p->ns);
1499 up_read(&namespace_sem);
1502 static int m_show(struct seq_file *m, void *v)
1504 struct proc_mounts *p = m->private;
1505 struct mount *r = v;
1506 return p->show(m, &r->mnt);
1509 const struct seq_operations mounts_op = {
1516 void mnt_cursor_del(struct mnt_namespace *ns, struct mount *cursor)
1518 down_read(&namespace_sem);
1520 list_del(&cursor->mnt_list);
1522 up_read(&namespace_sem);
1524 #endif /* CONFIG_PROC_FS */
1527 * may_umount_tree - check if a mount tree is busy
1528 * @m: root of mount tree
1530 * This is called to check if a tree of mounts has any
1531 * open files, pwds, chroots or sub mounts that are
1534 int may_umount_tree(struct vfsmount *m)
1536 struct mount *mnt = real_mount(m);
1537 int actual_refs = 0;
1538 int minimum_refs = 0;
1542 /* write lock needed for mnt_get_count */
1544 for (p = mnt; p; p = next_mnt(p, mnt)) {
1545 actual_refs += mnt_get_count(p);
1548 unlock_mount_hash();
1550 if (actual_refs > minimum_refs)
1556 EXPORT_SYMBOL(may_umount_tree);
1559 * may_umount - check if a mount point is busy
1560 * @mnt: root of mount
1562 * This is called to check if a mount point has any
1563 * open files, pwds, chroots or sub mounts. If the
1564 * mount has sub mounts this will return busy
1565 * regardless of whether the sub mounts are busy.
1567 * Doesn't take quota and stuff into account. IOW, in some cases it will
1568 * give false negatives. The main reason why it's here is that we need
1569 * a non-destructive way to look for easily umountable filesystems.
1571 int may_umount(struct vfsmount *mnt)
1574 down_read(&namespace_sem);
1576 if (propagate_mount_busy(real_mount(mnt), 2))
1578 unlock_mount_hash();
1579 up_read(&namespace_sem);
1583 EXPORT_SYMBOL(may_umount);
1585 static void namespace_unlock(void)
1587 struct hlist_head head;
1588 struct hlist_node *p;
1592 hlist_move_list(&unmounted, &head);
1593 list_splice_init(&ex_mountpoints, &list);
1595 up_write(&namespace_sem);
1597 shrink_dentry_list(&list);
1599 if (likely(hlist_empty(&head)))
1602 synchronize_rcu_expedited();
1604 hlist_for_each_entry_safe(m, p, &head, mnt_umount) {
1605 hlist_del(&m->mnt_umount);
1610 static inline void namespace_lock(void)
1612 down_write(&namespace_sem);
1615 enum umount_tree_flags {
1617 UMOUNT_PROPAGATE = 2,
1618 UMOUNT_CONNECTED = 4,
1621 static bool disconnect_mount(struct mount *mnt, enum umount_tree_flags how)
1623 /* Leaving mounts connected is only valid for lazy umounts */
1624 if (how & UMOUNT_SYNC)
1627 /* A mount without a parent has nothing to be connected to */
1628 if (!mnt_has_parent(mnt))
1631 /* Because the reference counting rules change when mounts are
1632 * unmounted and connected, umounted mounts may not be
1633 * connected to mounted mounts.
1635 if (!(mnt->mnt_parent->mnt.mnt_flags & MNT_UMOUNT))
1638 /* Has it been requested that the mount remain connected? */
1639 if (how & UMOUNT_CONNECTED)
1642 /* Is the mount locked such that it needs to remain connected? */
1643 if (IS_MNT_LOCKED(mnt))
1646 /* By default disconnect the mount */
1651 * mount_lock must be held
1652 * namespace_sem must be held for write
1654 static void umount_tree(struct mount *mnt, enum umount_tree_flags how)
1656 LIST_HEAD(tmp_list);
1659 if (how & UMOUNT_PROPAGATE)
1660 propagate_mount_unlock(mnt);
1662 /* Gather the mounts to umount */
1663 for (p = mnt; p; p = next_mnt(p, mnt)) {
1664 p->mnt.mnt_flags |= MNT_UMOUNT;
1665 list_move(&p->mnt_list, &tmp_list);
1668 /* Hide the mounts from mnt_mounts */
1669 list_for_each_entry(p, &tmp_list, mnt_list) {
1670 list_del_init(&p->mnt_child);
1673 /* Add propogated mounts to the tmp_list */
1674 if (how & UMOUNT_PROPAGATE)
1675 propagate_umount(&tmp_list);
1677 while (!list_empty(&tmp_list)) {
1678 struct mnt_namespace *ns;
1680 p = list_first_entry(&tmp_list, struct mount, mnt_list);
1681 list_del_init(&p->mnt_expire);
1682 list_del_init(&p->mnt_list);
1686 __touch_mnt_namespace(ns);
1689 if (how & UMOUNT_SYNC)
1690 p->mnt.mnt_flags |= MNT_SYNC_UMOUNT;
1692 disconnect = disconnect_mount(p, how);
1693 if (mnt_has_parent(p)) {
1694 mnt_add_count(p->mnt_parent, -1);
1696 /* Don't forget about p */
1697 list_add_tail(&p->mnt_child, &p->mnt_parent->mnt_mounts);
1702 change_mnt_propagation(p, MS_PRIVATE);
1704 hlist_add_head(&p->mnt_umount, &unmounted);
1708 static void shrink_submounts(struct mount *mnt);
1710 static int do_umount_root(struct super_block *sb)
1714 down_write(&sb->s_umount);
1715 if (!sb_rdonly(sb)) {
1716 struct fs_context *fc;
1718 fc = fs_context_for_reconfigure(sb->s_root, SB_RDONLY,
1723 ret = parse_monolithic_mount_data(fc, NULL);
1725 ret = reconfigure_super(fc);
1729 up_write(&sb->s_umount);
1733 static int do_umount(struct mount *mnt, int flags)
1735 struct super_block *sb = mnt->mnt.mnt_sb;
1738 retval = security_sb_umount(&mnt->mnt, flags);
1743 * Allow userspace to request a mountpoint be expired rather than
1744 * unmounting unconditionally. Unmount only happens if:
1745 * (1) the mark is already set (the mark is cleared by mntput())
1746 * (2) the usage count == 1 [parent vfsmount] + 1 [sys_umount]
1748 if (flags & MNT_EXPIRE) {
1749 if (&mnt->mnt == current->fs->root.mnt ||
1750 flags & (MNT_FORCE | MNT_DETACH))
1754 * probably don't strictly need the lock here if we examined
1755 * all race cases, but it's a slowpath.
1758 if (mnt_get_count(mnt) != 2) {
1759 unlock_mount_hash();
1762 unlock_mount_hash();
1764 if (!xchg(&mnt->mnt_expiry_mark, 1))
1769 * If we may have to abort operations to get out of this
1770 * mount, and they will themselves hold resources we must
1771 * allow the fs to do things. In the Unix tradition of
1772 * 'Gee thats tricky lets do it in userspace' the umount_begin
1773 * might fail to complete on the first run through as other tasks
1774 * must return, and the like. Thats for the mount program to worry
1775 * about for the moment.
1778 if (flags & MNT_FORCE && sb->s_op->umount_begin) {
1779 sb->s_op->umount_begin(sb);
1783 * No sense to grab the lock for this test, but test itself looks
1784 * somewhat bogus. Suggestions for better replacement?
1785 * Ho-hum... In principle, we might treat that as umount + switch
1786 * to rootfs. GC would eventually take care of the old vfsmount.
1787 * Actually it makes sense, especially if rootfs would contain a
1788 * /reboot - static binary that would close all descriptors and
1789 * call reboot(9). Then init(8) could umount root and exec /reboot.
1791 if (&mnt->mnt == current->fs->root.mnt && !(flags & MNT_DETACH)) {
1793 * Special case for "unmounting" root ...
1794 * we just try to remount it readonly.
1796 if (!ns_capable(sb->s_user_ns, CAP_SYS_ADMIN))
1798 return do_umount_root(sb);
1804 /* Recheck MNT_LOCKED with the locks held */
1806 if (mnt->mnt.mnt_flags & MNT_LOCKED)
1810 if (flags & MNT_DETACH) {
1811 if (!list_empty(&mnt->mnt_list))
1812 umount_tree(mnt, UMOUNT_PROPAGATE);
1815 shrink_submounts(mnt);
1817 if (!propagate_mount_busy(mnt, 2)) {
1818 if (!list_empty(&mnt->mnt_list))
1819 umount_tree(mnt, UMOUNT_PROPAGATE|UMOUNT_SYNC);
1824 unlock_mount_hash();
1830 * __detach_mounts - lazily unmount all mounts on the specified dentry
1832 * During unlink, rmdir, and d_drop it is possible to loose the path
1833 * to an existing mountpoint, and wind up leaking the mount.
1834 * detach_mounts allows lazily unmounting those mounts instead of
1837 * The caller may hold dentry->d_inode->i_mutex.
1839 void __detach_mounts(struct dentry *dentry)
1841 struct mountpoint *mp;
1846 mp = lookup_mountpoint(dentry);
1851 while (!hlist_empty(&mp->m_list)) {
1852 mnt = hlist_entry(mp->m_list.first, struct mount, mnt_mp_list);
1853 if (mnt->mnt.mnt_flags & MNT_UMOUNT) {
1855 hlist_add_head(&mnt->mnt_umount, &unmounted);
1857 else umount_tree(mnt, UMOUNT_CONNECTED);
1861 unlock_mount_hash();
1866 * Is the caller allowed to modify his namespace?
1868 bool may_mount(void)
1870 return ns_capable(current->nsproxy->mnt_ns->user_ns, CAP_SYS_ADMIN);
1873 static void warn_mandlock(void)
1875 pr_warn_once("=======================================================\n"
1876 "WARNING: The mand mount option has been deprecated and\n"
1877 " and is ignored by this kernel. Remove the mand\n"
1878 " option from the mount to silence this warning.\n"
1879 "=======================================================\n");
1882 static int can_umount(const struct path *path, int flags)
1884 struct mount *mnt = real_mount(path->mnt);
1888 if (path->dentry != path->mnt->mnt_root)
1890 if (!check_mnt(mnt))
1892 if (mnt->mnt.mnt_flags & MNT_LOCKED) /* Check optimistically */
1894 if (flags & MNT_FORCE && !capable(CAP_SYS_ADMIN))
1899 // caller is responsible for flags being sane
1900 int path_umount(struct path *path, int flags)
1902 struct mount *mnt = real_mount(path->mnt);
1905 ret = can_umount(path, flags);
1907 ret = do_umount(mnt, flags);
1909 /* we mustn't call path_put() as that would clear mnt_expiry_mark */
1911 mntput_no_expire(mnt);
1915 static int ksys_umount(char __user *name, int flags)
1917 int lookup_flags = LOOKUP_MOUNTPOINT;
1921 // basic validity checks done first
1922 if (flags & ~(MNT_FORCE | MNT_DETACH | MNT_EXPIRE | UMOUNT_NOFOLLOW))
1925 if (!(flags & UMOUNT_NOFOLLOW))
1926 lookup_flags |= LOOKUP_FOLLOW;
1927 ret = user_path_at(AT_FDCWD, name, lookup_flags, &path);
1930 return path_umount(&path, flags);
1933 SYSCALL_DEFINE2(umount, char __user *, name, int, flags)
1935 return ksys_umount(name, flags);
1938 #ifdef __ARCH_WANT_SYS_OLDUMOUNT
1941 * The 2.0 compatible umount. No flags.
1943 SYSCALL_DEFINE1(oldumount, char __user *, name)
1945 return ksys_umount(name, 0);
1950 static bool is_mnt_ns_file(struct dentry *dentry)
1952 /* Is this a proxy for a mount namespace? */
1953 return dentry->d_op == &ns_dentry_operations &&
1954 dentry->d_fsdata == &mntns_operations;
1957 static struct mnt_namespace *to_mnt_ns(struct ns_common *ns)
1959 return container_of(ns, struct mnt_namespace, ns);
1962 struct ns_common *from_mnt_ns(struct mnt_namespace *mnt)
1967 static bool mnt_ns_loop(struct dentry *dentry)
1969 /* Could bind mounting the mount namespace inode cause a
1970 * mount namespace loop?
1972 struct mnt_namespace *mnt_ns;
1973 if (!is_mnt_ns_file(dentry))
1976 mnt_ns = to_mnt_ns(get_proc_ns(dentry->d_inode));
1977 return current->nsproxy->mnt_ns->seq >= mnt_ns->seq;
1980 struct mount *copy_tree(struct mount *mnt, struct dentry *dentry,
1983 struct mount *res, *p, *q, *r, *parent;
1985 if (!(flag & CL_COPY_UNBINDABLE) && IS_MNT_UNBINDABLE(mnt))
1986 return ERR_PTR(-EINVAL);
1988 if (!(flag & CL_COPY_MNT_NS_FILE) && is_mnt_ns_file(dentry))
1989 return ERR_PTR(-EINVAL);
1991 res = q = clone_mnt(mnt, dentry, flag);
1995 q->mnt_mountpoint = mnt->mnt_mountpoint;
1998 list_for_each_entry(r, &mnt->mnt_mounts, mnt_child) {
2000 if (!is_subdir(r->mnt_mountpoint, dentry))
2003 for (s = r; s; s = next_mnt(s, r)) {
2004 if (!(flag & CL_COPY_UNBINDABLE) &&
2005 IS_MNT_UNBINDABLE(s)) {
2006 if (s->mnt.mnt_flags & MNT_LOCKED) {
2007 /* Both unbindable and locked. */
2008 q = ERR_PTR(-EPERM);
2011 s = skip_mnt_tree(s);
2015 if (!(flag & CL_COPY_MNT_NS_FILE) &&
2016 is_mnt_ns_file(s->mnt.mnt_root)) {
2017 s = skip_mnt_tree(s);
2020 while (p != s->mnt_parent) {
2026 q = clone_mnt(p, p->mnt.mnt_root, flag);
2030 list_add_tail(&q->mnt_list, &res->mnt_list);
2031 attach_mnt(q, parent, p->mnt_mp);
2032 unlock_mount_hash();
2039 umount_tree(res, UMOUNT_SYNC);
2040 unlock_mount_hash();
2045 /* Caller should check returned pointer for errors */
2047 struct vfsmount *collect_mounts(const struct path *path)
2051 if (!check_mnt(real_mount(path->mnt)))
2052 tree = ERR_PTR(-EINVAL);
2054 tree = copy_tree(real_mount(path->mnt), path->dentry,
2055 CL_COPY_ALL | CL_PRIVATE);
2058 return ERR_CAST(tree);
2062 static void free_mnt_ns(struct mnt_namespace *);
2063 static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *, bool);
2065 void dissolve_on_fput(struct vfsmount *mnt)
2067 struct mnt_namespace *ns;
2070 ns = real_mount(mnt)->mnt_ns;
2073 umount_tree(real_mount(mnt), UMOUNT_CONNECTED);
2077 unlock_mount_hash();
2083 void drop_collected_mounts(struct vfsmount *mnt)
2087 umount_tree(real_mount(mnt), 0);
2088 unlock_mount_hash();
2092 static bool has_locked_children(struct mount *mnt, struct dentry *dentry)
2094 struct mount *child;
2096 list_for_each_entry(child, &mnt->mnt_mounts, mnt_child) {
2097 if (!is_subdir(child->mnt_mountpoint, dentry))
2100 if (child->mnt.mnt_flags & MNT_LOCKED)
2107 * clone_private_mount - create a private clone of a path
2108 * @path: path to clone
2110 * This creates a new vfsmount, which will be the clone of @path. The new mount
2111 * will not be attached anywhere in the namespace and will be private (i.e.
2112 * changes to the originating mount won't be propagated into this).
2114 * Release with mntput().
2116 struct vfsmount *clone_private_mount(const struct path *path)
2118 struct mount *old_mnt = real_mount(path->mnt);
2119 struct mount *new_mnt;
2121 down_read(&namespace_sem);
2122 if (IS_MNT_UNBINDABLE(old_mnt))
2125 if (!check_mnt(old_mnt))
2128 if (has_locked_children(old_mnt, path->dentry))
2131 new_mnt = clone_mnt(old_mnt, path->dentry, CL_PRIVATE);
2132 up_read(&namespace_sem);
2134 if (IS_ERR(new_mnt))
2135 return ERR_CAST(new_mnt);
2137 /* Longterm mount to be removed by kern_unmount*() */
2138 new_mnt->mnt_ns = MNT_NS_INTERNAL;
2140 return &new_mnt->mnt;
2143 up_read(&namespace_sem);
2144 return ERR_PTR(-EINVAL);
2146 EXPORT_SYMBOL_GPL(clone_private_mount);
2148 int iterate_mounts(int (*f)(struct vfsmount *, void *), void *arg,
2149 struct vfsmount *root)
2152 int res = f(root, arg);
2155 list_for_each_entry(mnt, &real_mount(root)->mnt_list, mnt_list) {
2156 res = f(&mnt->mnt, arg);
2163 static void lock_mnt_tree(struct mount *mnt)
2167 for (p = mnt; p; p = next_mnt(p, mnt)) {
2168 int flags = p->mnt.mnt_flags;
2169 /* Don't allow unprivileged users to change mount flags */
2170 flags |= MNT_LOCK_ATIME;
2172 if (flags & MNT_READONLY)
2173 flags |= MNT_LOCK_READONLY;
2175 if (flags & MNT_NODEV)
2176 flags |= MNT_LOCK_NODEV;
2178 if (flags & MNT_NOSUID)
2179 flags |= MNT_LOCK_NOSUID;
2181 if (flags & MNT_NOEXEC)
2182 flags |= MNT_LOCK_NOEXEC;
2183 /* Don't allow unprivileged users to reveal what is under a mount */
2184 if (list_empty(&p->mnt_expire))
2185 flags |= MNT_LOCKED;
2186 p->mnt.mnt_flags = flags;
2190 static void cleanup_group_ids(struct mount *mnt, struct mount *end)
2194 for (p = mnt; p != end; p = next_mnt(p, mnt)) {
2195 if (p->mnt_group_id && !IS_MNT_SHARED(p))
2196 mnt_release_group_id(p);
2200 static int invent_group_ids(struct mount *mnt, bool recurse)
2204 for (p = mnt; p; p = recurse ? next_mnt(p, mnt) : NULL) {
2205 if (!p->mnt_group_id && !IS_MNT_SHARED(p)) {
2206 int err = mnt_alloc_group_id(p);
2208 cleanup_group_ids(mnt, p);
2217 int count_mounts(struct mnt_namespace *ns, struct mount *mnt)
2219 unsigned int max = READ_ONCE(sysctl_mount_max);
2220 unsigned int mounts = 0;
2223 if (ns->mounts >= max)
2226 if (ns->pending_mounts >= max)
2228 max -= ns->pending_mounts;
2230 for (p = mnt; p; p = next_mnt(p, mnt))
2236 ns->pending_mounts += mounts;
2241 * @source_mnt : mount tree to be attached
2242 * @nd : place the mount tree @source_mnt is attached
2243 * @parent_nd : if non-null, detach the source_mnt from its parent and
2244 * store the parent mount and mountpoint dentry.
2245 * (done when source_mnt is moved)
2247 * NOTE: in the table below explains the semantics when a source mount
2248 * of a given type is attached to a destination mount of a given type.
2249 * ---------------------------------------------------------------------------
2250 * | BIND MOUNT OPERATION |
2251 * |**************************************************************************
2252 * | source-->| shared | private | slave | unbindable |
2256 * |**************************************************************************
2257 * | shared | shared (++) | shared (+) | shared(+++)| invalid |
2259 * |non-shared| shared (+) | private | slave (*) | invalid |
2260 * ***************************************************************************
2261 * A bind operation clones the source mount and mounts the clone on the
2262 * destination mount.
2264 * (++) the cloned mount is propagated to all the mounts in the propagation
2265 * tree of the destination mount and the cloned mount is added to
2266 * the peer group of the source mount.
2267 * (+) the cloned mount is created under the destination mount and is marked
2268 * as shared. The cloned mount is added to the peer group of the source
2270 * (+++) the mount is propagated to all the mounts in the propagation tree
2271 * of the destination mount and the cloned mount is made slave
2272 * of the same master as that of the source mount. The cloned mount
2273 * is marked as 'shared and slave'.
2274 * (*) the cloned mount is made a slave of the same master as that of the
2277 * ---------------------------------------------------------------------------
2278 * | MOVE MOUNT OPERATION |
2279 * |**************************************************************************
2280 * | source-->| shared | private | slave | unbindable |
2284 * |**************************************************************************
2285 * | shared | shared (+) | shared (+) | shared(+++) | invalid |
2287 * |non-shared| shared (+*) | private | slave (*) | unbindable |
2288 * ***************************************************************************
2290 * (+) the mount is moved to the destination. And is then propagated to
2291 * all the mounts in the propagation tree of the destination mount.
2292 * (+*) the mount is moved to the destination.
2293 * (+++) the mount is moved to the destination and is then propagated to
2294 * all the mounts belonging to the destination mount's propagation tree.
2295 * the mount is marked as 'shared and slave'.
2296 * (*) the mount continues to be a slave at the new location.
2298 * if the source mount is a tree, the operations explained above is
2299 * applied to each mount in the tree.
2300 * Must be called without spinlocks held, since this function can sleep
2303 static int attach_recursive_mnt(struct mount *source_mnt,
2304 struct mount *dest_mnt,
2305 struct mountpoint *dest_mp,
2308 struct user_namespace *user_ns = current->nsproxy->mnt_ns->user_ns;
2309 HLIST_HEAD(tree_list);
2310 struct mnt_namespace *ns = dest_mnt->mnt_ns;
2311 struct mountpoint *smp;
2312 struct mount *child, *p;
2313 struct hlist_node *n;
2316 /* Preallocate a mountpoint in case the new mounts need
2317 * to be tucked under other mounts.
2319 smp = get_mountpoint(source_mnt->mnt.mnt_root);
2321 return PTR_ERR(smp);
2323 /* Is there space to add these mounts to the mount namespace? */
2325 err = count_mounts(ns, source_mnt);
2330 if (IS_MNT_SHARED(dest_mnt)) {
2331 err = invent_group_ids(source_mnt, true);
2334 err = propagate_mnt(dest_mnt, dest_mp, source_mnt, &tree_list);
2337 goto out_cleanup_ids;
2338 for (p = source_mnt; p; p = next_mnt(p, source_mnt))
2344 unhash_mnt(source_mnt);
2345 attach_mnt(source_mnt, dest_mnt, dest_mp);
2346 touch_mnt_namespace(source_mnt->mnt_ns);
2348 if (source_mnt->mnt_ns) {
2349 /* move from anon - the caller will destroy */
2350 list_del_init(&source_mnt->mnt_ns->list);
2352 mnt_set_mountpoint(dest_mnt, dest_mp, source_mnt);
2353 commit_tree(source_mnt);
2356 hlist_for_each_entry_safe(child, n, &tree_list, mnt_hash) {
2358 hlist_del_init(&child->mnt_hash);
2359 q = __lookup_mnt(&child->mnt_parent->mnt,
2360 child->mnt_mountpoint);
2362 mnt_change_mountpoint(child, smp, q);
2363 /* Notice when we are propagating across user namespaces */
2364 if (child->mnt_parent->mnt_ns->user_ns != user_ns)
2365 lock_mnt_tree(child);
2366 child->mnt.mnt_flags &= ~MNT_LOCKED;
2369 put_mountpoint(smp);
2370 unlock_mount_hash();
2375 while (!hlist_empty(&tree_list)) {
2376 child = hlist_entry(tree_list.first, struct mount, mnt_hash);
2377 child->mnt_parent->mnt_ns->pending_mounts = 0;
2378 umount_tree(child, UMOUNT_SYNC);
2380 unlock_mount_hash();
2381 cleanup_group_ids(source_mnt, NULL);
2383 ns->pending_mounts = 0;
2385 read_seqlock_excl(&mount_lock);
2386 put_mountpoint(smp);
2387 read_sequnlock_excl(&mount_lock);
2392 static struct mountpoint *lock_mount(struct path *path)
2394 struct vfsmount *mnt;
2395 struct dentry *dentry = path->dentry;
2397 inode_lock(dentry->d_inode);
2398 if (unlikely(cant_mount(dentry))) {
2399 inode_unlock(dentry->d_inode);
2400 return ERR_PTR(-ENOENT);
2403 mnt = lookup_mnt(path);
2405 struct mountpoint *mp = get_mountpoint(dentry);
2408 inode_unlock(dentry->d_inode);
2414 inode_unlock(path->dentry->d_inode);
2417 dentry = path->dentry = dget(mnt->mnt_root);
2421 static void unlock_mount(struct mountpoint *where)
2423 struct dentry *dentry = where->m_dentry;
2425 read_seqlock_excl(&mount_lock);
2426 put_mountpoint(where);
2427 read_sequnlock_excl(&mount_lock);
2430 inode_unlock(dentry->d_inode);
2433 static int graft_tree(struct mount *mnt, struct mount *p, struct mountpoint *mp)
2435 if (mnt->mnt.mnt_sb->s_flags & SB_NOUSER)
2438 if (d_is_dir(mp->m_dentry) !=
2439 d_is_dir(mnt->mnt.mnt_root))
2442 return attach_recursive_mnt(mnt, p, mp, false);
2446 * Sanity check the flags to change_mnt_propagation.
2449 static int flags_to_propagation_type(int ms_flags)
2451 int type = ms_flags & ~(MS_REC | MS_SILENT);
2453 /* Fail if any non-propagation flags are set */
2454 if (type & ~(MS_SHARED | MS_PRIVATE | MS_SLAVE | MS_UNBINDABLE))
2456 /* Only one propagation flag should be set */
2457 if (!is_power_of_2(type))
2463 * recursively change the type of the mountpoint.
2465 static int do_change_type(struct path *path, int ms_flags)
2468 struct mount *mnt = real_mount(path->mnt);
2469 int recurse = ms_flags & MS_REC;
2473 if (path->dentry != path->mnt->mnt_root)
2476 type = flags_to_propagation_type(ms_flags);
2481 if (type == MS_SHARED) {
2482 err = invent_group_ids(mnt, recurse);
2488 for (m = mnt; m; m = (recurse ? next_mnt(m, mnt) : NULL))
2489 change_mnt_propagation(m, type);
2490 unlock_mount_hash();
2497 static struct mount *__do_loopback(struct path *old_path, int recurse)
2499 struct mount *mnt = ERR_PTR(-EINVAL), *old = real_mount(old_path->mnt);
2501 if (IS_MNT_UNBINDABLE(old))
2504 if (!check_mnt(old) && old_path->dentry->d_op != &ns_dentry_operations)
2507 if (!recurse && has_locked_children(old, old_path->dentry))
2511 mnt = copy_tree(old, old_path->dentry, CL_COPY_MNT_NS_FILE);
2513 mnt = clone_mnt(old, old_path->dentry, 0);
2516 mnt->mnt.mnt_flags &= ~MNT_LOCKED;
2522 * do loopback mount.
2524 static int do_loopback(struct path *path, const char *old_name,
2527 struct path old_path;
2528 struct mount *mnt = NULL, *parent;
2529 struct mountpoint *mp;
2531 if (!old_name || !*old_name)
2533 err = kern_path(old_name, LOOKUP_FOLLOW|LOOKUP_AUTOMOUNT, &old_path);
2538 if (mnt_ns_loop(old_path.dentry))
2541 mp = lock_mount(path);
2547 parent = real_mount(path->mnt);
2548 if (!check_mnt(parent))
2551 mnt = __do_loopback(&old_path, recurse);
2557 err = graft_tree(mnt, parent, mp);
2560 umount_tree(mnt, UMOUNT_SYNC);
2561 unlock_mount_hash();
2566 path_put(&old_path);
2570 static struct file *open_detached_copy(struct path *path, bool recursive)
2572 struct user_namespace *user_ns = current->nsproxy->mnt_ns->user_ns;
2573 struct mnt_namespace *ns = alloc_mnt_ns(user_ns, true);
2574 struct mount *mnt, *p;
2578 return ERR_CAST(ns);
2581 mnt = __do_loopback(path, recursive);
2585 return ERR_CAST(mnt);
2589 for (p = mnt; p; p = next_mnt(p, mnt)) {
2594 list_add_tail(&ns->list, &mnt->mnt_list);
2596 unlock_mount_hash();
2600 path->mnt = &mnt->mnt;
2601 file = dentry_open(path, O_PATH, current_cred());
2603 dissolve_on_fput(path->mnt);
2605 file->f_mode |= FMODE_NEED_UNMOUNT;
2609 SYSCALL_DEFINE3(open_tree, int, dfd, const char __user *, filename, unsigned, flags)
2613 int lookup_flags = LOOKUP_AUTOMOUNT | LOOKUP_FOLLOW;
2614 bool detached = flags & OPEN_TREE_CLONE;
2618 BUILD_BUG_ON(OPEN_TREE_CLOEXEC != O_CLOEXEC);
2620 if (flags & ~(AT_EMPTY_PATH | AT_NO_AUTOMOUNT | AT_RECURSIVE |
2621 AT_SYMLINK_NOFOLLOW | OPEN_TREE_CLONE |
2625 if ((flags & (AT_RECURSIVE | OPEN_TREE_CLONE)) == AT_RECURSIVE)
2628 if (flags & AT_NO_AUTOMOUNT)
2629 lookup_flags &= ~LOOKUP_AUTOMOUNT;
2630 if (flags & AT_SYMLINK_NOFOLLOW)
2631 lookup_flags &= ~LOOKUP_FOLLOW;
2632 if (flags & AT_EMPTY_PATH)
2633 lookup_flags |= LOOKUP_EMPTY;
2635 if (detached && !may_mount())
2638 fd = get_unused_fd_flags(flags & O_CLOEXEC);
2642 error = user_path_at(dfd, filename, lookup_flags, &path);
2643 if (unlikely(error)) {
2644 file = ERR_PTR(error);
2647 file = open_detached_copy(&path, flags & AT_RECURSIVE);
2649 file = dentry_open(&path, O_PATH, current_cred());
2654 return PTR_ERR(file);
2656 fd_install(fd, file);
2661 * Don't allow locked mount flags to be cleared.
2663 * No locks need to be held here while testing the various MNT_LOCK
2664 * flags because those flags can never be cleared once they are set.
2666 static bool can_change_locked_flags(struct mount *mnt, unsigned int mnt_flags)
2668 unsigned int fl = mnt->mnt.mnt_flags;
2670 if ((fl & MNT_LOCK_READONLY) &&
2671 !(mnt_flags & MNT_READONLY))
2674 if ((fl & MNT_LOCK_NODEV) &&
2675 !(mnt_flags & MNT_NODEV))
2678 if ((fl & MNT_LOCK_NOSUID) &&
2679 !(mnt_flags & MNT_NOSUID))
2682 if ((fl & MNT_LOCK_NOEXEC) &&
2683 !(mnt_flags & MNT_NOEXEC))
2686 if ((fl & MNT_LOCK_ATIME) &&
2687 ((fl & MNT_ATIME_MASK) != (mnt_flags & MNT_ATIME_MASK)))
2693 static int change_mount_ro_state(struct mount *mnt, unsigned int mnt_flags)
2695 bool readonly_request = (mnt_flags & MNT_READONLY);
2697 if (readonly_request == __mnt_is_readonly(&mnt->mnt))
2700 if (readonly_request)
2701 return mnt_make_readonly(mnt);
2703 mnt->mnt.mnt_flags &= ~MNT_READONLY;
2707 static void set_mount_attributes(struct mount *mnt, unsigned int mnt_flags)
2709 mnt_flags |= mnt->mnt.mnt_flags & ~MNT_USER_SETTABLE_MASK;
2710 mnt->mnt.mnt_flags = mnt_flags;
2711 touch_mnt_namespace(mnt->mnt_ns);
2714 static void mnt_warn_timestamp_expiry(struct path *mountpoint, struct vfsmount *mnt)
2716 struct super_block *sb = mnt->mnt_sb;
2718 if (!__mnt_is_readonly(mnt) &&
2719 (!(sb->s_iflags & SB_I_TS_EXPIRY_WARNED)) &&
2720 (ktime_get_real_seconds() + TIME_UPTIME_SEC_MAX > sb->s_time_max)) {
2721 char *buf = (char *)__get_free_page(GFP_KERNEL);
2722 char *mntpath = buf ? d_path(mountpoint, buf, PAGE_SIZE) : ERR_PTR(-ENOMEM);
2725 time64_to_tm(sb->s_time_max, 0, &tm);
2727 pr_warn("%s filesystem being %s at %s supports timestamps until %04ld (0x%llx)\n",
2729 is_mounted(mnt) ? "remounted" : "mounted",
2731 tm.tm_year+1900, (unsigned long long)sb->s_time_max);
2733 free_page((unsigned long)buf);
2734 sb->s_iflags |= SB_I_TS_EXPIRY_WARNED;
2739 * Handle reconfiguration of the mountpoint only without alteration of the
2740 * superblock it refers to. This is triggered by specifying MS_REMOUNT|MS_BIND
2743 static int do_reconfigure_mnt(struct path *path, unsigned int mnt_flags)
2745 struct super_block *sb = path->mnt->mnt_sb;
2746 struct mount *mnt = real_mount(path->mnt);
2749 if (!check_mnt(mnt))
2752 if (path->dentry != mnt->mnt.mnt_root)
2755 if (!can_change_locked_flags(mnt, mnt_flags))
2759 * We're only checking whether the superblock is read-only not
2760 * changing it, so only take down_read(&sb->s_umount).
2762 down_read(&sb->s_umount);
2764 ret = change_mount_ro_state(mnt, mnt_flags);
2766 set_mount_attributes(mnt, mnt_flags);
2767 unlock_mount_hash();
2768 up_read(&sb->s_umount);
2770 mnt_warn_timestamp_expiry(path, &mnt->mnt);
2776 * change filesystem flags. dir should be a physical root of filesystem.
2777 * If you've mounted a non-root directory somewhere and want to do remount
2778 * on it - tough luck.
2780 static int do_remount(struct path *path, int ms_flags, int sb_flags,
2781 int mnt_flags, void *data)
2784 struct super_block *sb = path->mnt->mnt_sb;
2785 struct mount *mnt = real_mount(path->mnt);
2786 struct fs_context *fc;
2788 if (!check_mnt(mnt))
2791 if (path->dentry != path->mnt->mnt_root)
2794 if (!can_change_locked_flags(mnt, mnt_flags))
2797 fc = fs_context_for_reconfigure(path->dentry, sb_flags, MS_RMT_MASK);
2802 err = parse_monolithic_mount_data(fc, data);
2804 down_write(&sb->s_umount);
2806 if (ns_capable(sb->s_user_ns, CAP_SYS_ADMIN)) {
2807 err = reconfigure_super(fc);
2810 set_mount_attributes(mnt, mnt_flags);
2811 unlock_mount_hash();
2814 up_write(&sb->s_umount);
2817 mnt_warn_timestamp_expiry(path, &mnt->mnt);
2823 static inline int tree_contains_unbindable(struct mount *mnt)
2826 for (p = mnt; p; p = next_mnt(p, mnt)) {
2827 if (IS_MNT_UNBINDABLE(p))
2834 * Check that there aren't references to earlier/same mount namespaces in the
2835 * specified subtree. Such references can act as pins for mount namespaces
2836 * that aren't checked by the mount-cycle checking code, thereby allowing
2837 * cycles to be made.
2839 static bool check_for_nsfs_mounts(struct mount *subtree)
2845 for (p = subtree; p; p = next_mnt(p, subtree))
2846 if (mnt_ns_loop(p->mnt.mnt_root))
2851 unlock_mount_hash();
2855 static int do_set_group(struct path *from_path, struct path *to_path)
2857 struct mount *from, *to;
2860 from = real_mount(from_path->mnt);
2861 to = real_mount(to_path->mnt);
2866 /* To and From must be mounted */
2867 if (!is_mounted(&from->mnt))
2869 if (!is_mounted(&to->mnt))
2873 /* We should be allowed to modify mount namespaces of both mounts */
2874 if (!ns_capable(from->mnt_ns->user_ns, CAP_SYS_ADMIN))
2876 if (!ns_capable(to->mnt_ns->user_ns, CAP_SYS_ADMIN))
2880 /* To and From paths should be mount roots */
2881 if (from_path->dentry != from_path->mnt->mnt_root)
2883 if (to_path->dentry != to_path->mnt->mnt_root)
2886 /* Setting sharing groups is only allowed across same superblock */
2887 if (from->mnt.mnt_sb != to->mnt.mnt_sb)
2890 /* From mount root should be wider than To mount root */
2891 if (!is_subdir(to->mnt.mnt_root, from->mnt.mnt_root))
2894 /* From mount should not have locked children in place of To's root */
2895 if (has_locked_children(from, to->mnt.mnt_root))
2898 /* Setting sharing groups is only allowed on private mounts */
2899 if (IS_MNT_SHARED(to) || IS_MNT_SLAVE(to))
2902 /* From should not be private */
2903 if (!IS_MNT_SHARED(from) && !IS_MNT_SLAVE(from))
2906 if (IS_MNT_SLAVE(from)) {
2907 struct mount *m = from->mnt_master;
2909 list_add(&to->mnt_slave, &m->mnt_slave_list);
2913 if (IS_MNT_SHARED(from)) {
2914 to->mnt_group_id = from->mnt_group_id;
2915 list_add(&to->mnt_share, &from->mnt_share);
2918 unlock_mount_hash();
2927 static int do_move_mount(struct path *old_path, struct path *new_path)
2929 struct mnt_namespace *ns;
2932 struct mount *parent;
2933 struct mountpoint *mp, *old_mp;
2937 mp = lock_mount(new_path);
2941 old = real_mount(old_path->mnt);
2942 p = real_mount(new_path->mnt);
2943 parent = old->mnt_parent;
2944 attached = mnt_has_parent(old);
2945 old_mp = old->mnt_mp;
2949 /* The mountpoint must be in our namespace. */
2953 /* The thing moved must be mounted... */
2954 if (!is_mounted(&old->mnt))
2957 /* ... and either ours or the root of anon namespace */
2958 if (!(attached ? check_mnt(old) : is_anon_ns(ns)))
2961 if (old->mnt.mnt_flags & MNT_LOCKED)
2964 if (old_path->dentry != old_path->mnt->mnt_root)
2967 if (d_is_dir(new_path->dentry) !=
2968 d_is_dir(old_path->dentry))
2971 * Don't move a mount residing in a shared parent.
2973 if (attached && IS_MNT_SHARED(parent))
2976 * Don't move a mount tree containing unbindable mounts to a destination
2977 * mount which is shared.
2979 if (IS_MNT_SHARED(p) && tree_contains_unbindable(old))
2982 if (!check_for_nsfs_mounts(old))
2984 for (; mnt_has_parent(p); p = p->mnt_parent)
2988 err = attach_recursive_mnt(old, real_mount(new_path->mnt), mp,
2993 /* if the mount is moved, it should no longer be expire
2995 list_del_init(&old->mnt_expire);
2997 put_mountpoint(old_mp);
3002 mntput_no_expire(parent);
3009 static int do_move_mount_old(struct path *path, const char *old_name)
3011 struct path old_path;
3014 if (!old_name || !*old_name)
3017 err = kern_path(old_name, LOOKUP_FOLLOW, &old_path);
3021 err = do_move_mount(&old_path, path);
3022 path_put(&old_path);
3027 * add a mount into a namespace's mount tree
3029 static int do_add_mount(struct mount *newmnt, struct mountpoint *mp,
3030 const struct path *path, int mnt_flags)
3032 struct mount *parent = real_mount(path->mnt);
3034 mnt_flags &= ~MNT_INTERNAL_FLAGS;
3036 if (unlikely(!check_mnt(parent))) {
3037 /* that's acceptable only for automounts done in private ns */
3038 if (!(mnt_flags & MNT_SHRINKABLE))
3040 /* ... and for those we'd better have mountpoint still alive */
3041 if (!parent->mnt_ns)
3045 /* Refuse the same filesystem on the same mount point */
3046 if (path->mnt->mnt_sb == newmnt->mnt.mnt_sb &&
3047 path->mnt->mnt_root == path->dentry)
3050 if (d_is_symlink(newmnt->mnt.mnt_root))
3053 newmnt->mnt.mnt_flags = mnt_flags;
3054 return graft_tree(newmnt, parent, mp);
3057 static bool mount_too_revealing(const struct super_block *sb, int *new_mnt_flags);
3060 * Create a new mount using a superblock configuration and request it
3061 * be added to the namespace tree.
3063 static int do_new_mount_fc(struct fs_context *fc, struct path *mountpoint,
3064 unsigned int mnt_flags)
3066 struct vfsmount *mnt;
3067 struct mountpoint *mp;
3068 struct super_block *sb = fc->root->d_sb;
3071 error = security_sb_kern_mount(sb);
3072 if (!error && mount_too_revealing(sb, &mnt_flags))
3075 if (unlikely(error)) {
3080 up_write(&sb->s_umount);
3082 mnt = vfs_create_mount(fc);
3084 return PTR_ERR(mnt);
3086 mnt_warn_timestamp_expiry(mountpoint, mnt);
3088 mp = lock_mount(mountpoint);
3093 error = do_add_mount(real_mount(mnt), mp, mountpoint, mnt_flags);
3101 * create a new mount for userspace and request it to be added into the
3104 static int do_new_mount(struct path *path, const char *fstype, int sb_flags,
3105 int mnt_flags, const char *name, void *data)
3107 struct file_system_type *type;
3108 struct fs_context *fc;
3109 const char *subtype = NULL;
3115 type = get_fs_type(fstype);
3119 if (type->fs_flags & FS_HAS_SUBTYPE) {
3120 subtype = strchr(fstype, '.');
3124 put_filesystem(type);
3130 fc = fs_context_for_mount(type, sb_flags);
3131 put_filesystem(type);
3136 err = vfs_parse_fs_string(fc, "subtype",
3137 subtype, strlen(subtype));
3139 err = vfs_parse_fs_string(fc, "source", name, strlen(name));
3141 err = parse_monolithic_mount_data(fc, data);
3142 if (!err && !mount_capable(fc))
3145 err = vfs_get_tree(fc);
3147 err = do_new_mount_fc(fc, path, mnt_flags);
3153 int finish_automount(struct vfsmount *m, const struct path *path)
3155 struct dentry *dentry = path->dentry;
3156 struct mountpoint *mp;
3165 mnt = real_mount(m);
3166 /* The new mount record should have at least 2 refs to prevent it being
3167 * expired before we get a chance to add it
3169 BUG_ON(mnt_get_count(mnt) < 2);
3171 if (m->mnt_sb == path->mnt->mnt_sb &&
3172 m->mnt_root == dentry) {
3178 * we don't want to use lock_mount() - in this case finding something
3179 * that overmounts our mountpoint to be means "quitely drop what we've
3180 * got", not "try to mount it on top".
3182 inode_lock(dentry->d_inode);
3184 if (unlikely(cant_mount(dentry))) {
3186 goto discard_locked;
3189 if (unlikely(__lookup_mnt(path->mnt, dentry))) {
3192 goto discard_locked;
3195 mp = get_mountpoint(dentry);
3198 goto discard_locked;
3201 err = do_add_mount(mnt, mp, path, path->mnt->mnt_flags | MNT_SHRINKABLE);
3210 inode_unlock(dentry->d_inode);
3212 /* remove m from any expiration list it may be on */
3213 if (!list_empty(&mnt->mnt_expire)) {
3215 list_del_init(&mnt->mnt_expire);
3224 * mnt_set_expiry - Put a mount on an expiration list
3225 * @mnt: The mount to list.
3226 * @expiry_list: The list to add the mount to.
3228 void mnt_set_expiry(struct vfsmount *mnt, struct list_head *expiry_list)
3232 list_add_tail(&real_mount(mnt)->mnt_expire, expiry_list);
3236 EXPORT_SYMBOL(mnt_set_expiry);
3239 * process a list of expirable mountpoints with the intent of discarding any
3240 * mountpoints that aren't in use and haven't been touched since last we came
3243 void mark_mounts_for_expiry(struct list_head *mounts)
3245 struct mount *mnt, *next;
3246 LIST_HEAD(graveyard);
3248 if (list_empty(mounts))
3254 /* extract from the expiration list every vfsmount that matches the
3255 * following criteria:
3256 * - only referenced by its parent vfsmount
3257 * - still marked for expiry (marked on the last call here; marks are
3258 * cleared by mntput())
3260 list_for_each_entry_safe(mnt, next, mounts, mnt_expire) {
3261 if (!xchg(&mnt->mnt_expiry_mark, 1) ||
3262 propagate_mount_busy(mnt, 1))
3264 list_move(&mnt->mnt_expire, &graveyard);
3266 while (!list_empty(&graveyard)) {
3267 mnt = list_first_entry(&graveyard, struct mount, mnt_expire);
3268 touch_mnt_namespace(mnt->mnt_ns);
3269 umount_tree(mnt, UMOUNT_PROPAGATE|UMOUNT_SYNC);
3271 unlock_mount_hash();
3275 EXPORT_SYMBOL_GPL(mark_mounts_for_expiry);
3278 * Ripoff of 'select_parent()'
3280 * search the list of submounts for a given mountpoint, and move any
3281 * shrinkable submounts to the 'graveyard' list.
3283 static int select_submounts(struct mount *parent, struct list_head *graveyard)
3285 struct mount *this_parent = parent;
3286 struct list_head *next;
3290 next = this_parent->mnt_mounts.next;
3292 while (next != &this_parent->mnt_mounts) {
3293 struct list_head *tmp = next;
3294 struct mount *mnt = list_entry(tmp, struct mount, mnt_child);
3297 if (!(mnt->mnt.mnt_flags & MNT_SHRINKABLE))
3300 * Descend a level if the d_mounts list is non-empty.
3302 if (!list_empty(&mnt->mnt_mounts)) {
3307 if (!propagate_mount_busy(mnt, 1)) {
3308 list_move_tail(&mnt->mnt_expire, graveyard);
3313 * All done at this level ... ascend and resume the search
3315 if (this_parent != parent) {
3316 next = this_parent->mnt_child.next;
3317 this_parent = this_parent->mnt_parent;
3324 * process a list of expirable mountpoints with the intent of discarding any
3325 * submounts of a specific parent mountpoint
3327 * mount_lock must be held for write
3329 static void shrink_submounts(struct mount *mnt)
3331 LIST_HEAD(graveyard);
3334 /* extract submounts of 'mountpoint' from the expiration list */
3335 while (select_submounts(mnt, &graveyard)) {
3336 while (!list_empty(&graveyard)) {
3337 m = list_first_entry(&graveyard, struct mount,
3339 touch_mnt_namespace(m->mnt_ns);
3340 umount_tree(m, UMOUNT_PROPAGATE|UMOUNT_SYNC);
3345 static void *copy_mount_options(const void __user * data)
3348 unsigned left, offset;
3353 copy = kmalloc(PAGE_SIZE, GFP_KERNEL);
3355 return ERR_PTR(-ENOMEM);
3357 left = copy_from_user(copy, data, PAGE_SIZE);
3360 * Not all architectures have an exact copy_from_user(). Resort to
3363 offset = PAGE_SIZE - left;
3366 if (get_user(c, (const char __user *)data + offset))
3373 if (left == PAGE_SIZE) {
3375 return ERR_PTR(-EFAULT);
3381 static char *copy_mount_string(const void __user *data)
3383 return data ? strndup_user(data, PATH_MAX) : NULL;
3387 * Flags is a 32-bit value that allows up to 31 non-fs dependent flags to
3388 * be given to the mount() call (ie: read-only, no-dev, no-suid etc).
3390 * data is a (void *) that can point to any structure up to
3391 * PAGE_SIZE-1 bytes, which can contain arbitrary fs-dependent
3392 * information (or be NULL).
3394 * Pre-0.97 versions of mount() didn't have a flags word.
3395 * When the flags word was introduced its top half was required
3396 * to have the magic value 0xC0ED, and this remained so until 2.4.0-test9.
3397 * Therefore, if this magic number is present, it carries no information
3398 * and must be discarded.
3400 int path_mount(const char *dev_name, struct path *path,
3401 const char *type_page, unsigned long flags, void *data_page)
3403 unsigned int mnt_flags = 0, sb_flags;
3407 if ((flags & MS_MGC_MSK) == MS_MGC_VAL)
3408 flags &= ~MS_MGC_MSK;
3410 /* Basic sanity checks */
3412 ((char *)data_page)[PAGE_SIZE - 1] = 0;
3414 if (flags & MS_NOUSER)
3417 ret = security_sb_mount(dev_name, path, type_page, flags, data_page);
3422 if (flags & SB_MANDLOCK)
3425 /* Default to relatime unless overriden */
3426 if (!(flags & MS_NOATIME))
3427 mnt_flags |= MNT_RELATIME;
3429 /* Separate the per-mountpoint flags */
3430 if (flags & MS_NOSUID)
3431 mnt_flags |= MNT_NOSUID;
3432 if (flags & MS_NODEV)
3433 mnt_flags |= MNT_NODEV;
3434 if (flags & MS_NOEXEC)
3435 mnt_flags |= MNT_NOEXEC;
3436 if (flags & MS_NOATIME)
3437 mnt_flags |= MNT_NOATIME;
3438 if (flags & MS_NODIRATIME)
3439 mnt_flags |= MNT_NODIRATIME;
3440 if (flags & MS_STRICTATIME)
3441 mnt_flags &= ~(MNT_RELATIME | MNT_NOATIME);
3442 if (flags & MS_RDONLY)
3443 mnt_flags |= MNT_READONLY;
3444 if (flags & MS_NOSYMFOLLOW)
3445 mnt_flags |= MNT_NOSYMFOLLOW;
3447 /* The default atime for remount is preservation */
3448 if ((flags & MS_REMOUNT) &&
3449 ((flags & (MS_NOATIME | MS_NODIRATIME | MS_RELATIME |
3450 MS_STRICTATIME)) == 0)) {
3451 mnt_flags &= ~MNT_ATIME_MASK;
3452 mnt_flags |= path->mnt->mnt_flags & MNT_ATIME_MASK;
3455 sb_flags = flags & (SB_RDONLY |
3464 if ((flags & (MS_REMOUNT | MS_BIND)) == (MS_REMOUNT | MS_BIND))
3465 return do_reconfigure_mnt(path, mnt_flags);
3466 if (flags & MS_REMOUNT)
3467 return do_remount(path, flags, sb_flags, mnt_flags, data_page);
3468 if (flags & MS_BIND)
3469 return do_loopback(path, dev_name, flags & MS_REC);
3470 if (flags & (MS_SHARED | MS_PRIVATE | MS_SLAVE | MS_UNBINDABLE))
3471 return do_change_type(path, flags);
3472 if (flags & MS_MOVE)
3473 return do_move_mount_old(path, dev_name);
3475 return do_new_mount(path, type_page, sb_flags, mnt_flags, dev_name,
3479 long do_mount(const char *dev_name, const char __user *dir_name,
3480 const char *type_page, unsigned long flags, void *data_page)
3485 ret = user_path_at(AT_FDCWD, dir_name, LOOKUP_FOLLOW, &path);
3488 ret = path_mount(dev_name, &path, type_page, flags, data_page);
3493 static struct ucounts *inc_mnt_namespaces(struct user_namespace *ns)
3495 return inc_ucount(ns, current_euid(), UCOUNT_MNT_NAMESPACES);
3498 static void dec_mnt_namespaces(struct ucounts *ucounts)
3500 dec_ucount(ucounts, UCOUNT_MNT_NAMESPACES);
3503 static void free_mnt_ns(struct mnt_namespace *ns)
3505 if (!is_anon_ns(ns))
3506 ns_free_inum(&ns->ns);
3507 dec_mnt_namespaces(ns->ucounts);
3508 put_user_ns(ns->user_ns);
3513 * Assign a sequence number so we can detect when we attempt to bind
3514 * mount a reference to an older mount namespace into the current
3515 * mount namespace, preventing reference counting loops. A 64bit
3516 * number incrementing at 10Ghz will take 12,427 years to wrap which
3517 * is effectively never, so we can ignore the possibility.
3519 static atomic64_t mnt_ns_seq = ATOMIC64_INIT(1);
3521 static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns, bool anon)
3523 struct mnt_namespace *new_ns;
3524 struct ucounts *ucounts;
3527 ucounts = inc_mnt_namespaces(user_ns);
3529 return ERR_PTR(-ENOSPC);
3531 new_ns = kzalloc(sizeof(struct mnt_namespace), GFP_KERNEL_ACCOUNT);
3533 dec_mnt_namespaces(ucounts);
3534 return ERR_PTR(-ENOMEM);
3537 ret = ns_alloc_inum(&new_ns->ns);
3540 dec_mnt_namespaces(ucounts);
3541 return ERR_PTR(ret);
3544 new_ns->ns.ops = &mntns_operations;
3546 new_ns->seq = atomic64_add_return(1, &mnt_ns_seq);
3547 refcount_set(&new_ns->ns.count, 1);
3548 INIT_LIST_HEAD(&new_ns->list);
3549 init_waitqueue_head(&new_ns->poll);
3550 spin_lock_init(&new_ns->ns_lock);
3551 new_ns->user_ns = get_user_ns(user_ns);
3552 new_ns->ucounts = ucounts;
3557 struct mnt_namespace *copy_mnt_ns(unsigned long flags, struct mnt_namespace *ns,
3558 struct user_namespace *user_ns, struct fs_struct *new_fs)
3560 struct mnt_namespace *new_ns;
3561 struct vfsmount *rootmnt = NULL, *pwdmnt = NULL;
3562 struct mount *p, *q;
3569 if (likely(!(flags & CLONE_NEWNS))) {
3576 new_ns = alloc_mnt_ns(user_ns, false);
3581 /* First pass: copy the tree topology */
3582 copy_flags = CL_COPY_UNBINDABLE | CL_EXPIRE;
3583 if (user_ns != ns->user_ns)
3584 copy_flags |= CL_SHARED_TO_SLAVE;
3585 new = copy_tree(old, old->mnt.mnt_root, copy_flags);
3588 free_mnt_ns(new_ns);
3589 return ERR_CAST(new);
3591 if (user_ns != ns->user_ns) {
3594 unlock_mount_hash();
3597 list_add_tail(&new_ns->list, &new->mnt_list);
3600 * Second pass: switch the tsk->fs->* elements and mark new vfsmounts
3601 * as belonging to new namespace. We have already acquired a private
3602 * fs_struct, so tsk->fs->lock is not needed.
3610 if (&p->mnt == new_fs->root.mnt) {
3611 new_fs->root.mnt = mntget(&q->mnt);
3614 if (&p->mnt == new_fs->pwd.mnt) {
3615 new_fs->pwd.mnt = mntget(&q->mnt);
3619 p = next_mnt(p, old);
3620 q = next_mnt(q, new);
3623 // an mntns binding we'd skipped?
3624 while (p->mnt.mnt_root != q->mnt.mnt_root)
3625 p = next_mnt(skip_mnt_tree(p), old);
3637 struct dentry *mount_subtree(struct vfsmount *m, const char *name)
3639 struct mount *mnt = real_mount(m);
3640 struct mnt_namespace *ns;
3641 struct super_block *s;
3645 ns = alloc_mnt_ns(&init_user_ns, true);
3648 return ERR_CAST(ns);
3653 list_add(&mnt->mnt_list, &ns->list);
3655 err = vfs_path_lookup(m->mnt_root, m,
3656 name, LOOKUP_FOLLOW|LOOKUP_AUTOMOUNT, &path);
3661 return ERR_PTR(err);
3663 /* trade a vfsmount reference for active sb one */
3664 s = path.mnt->mnt_sb;
3665 atomic_inc(&s->s_active);
3667 /* lock the sucker */
3668 down_write(&s->s_umount);
3669 /* ... and return the root of (sub)tree on it */
3672 EXPORT_SYMBOL(mount_subtree);
3674 SYSCALL_DEFINE5(mount, char __user *, dev_name, char __user *, dir_name,
3675 char __user *, type, unsigned long, flags, void __user *, data)
3682 kernel_type = copy_mount_string(type);
3683 ret = PTR_ERR(kernel_type);
3684 if (IS_ERR(kernel_type))
3687 kernel_dev = copy_mount_string(dev_name);
3688 ret = PTR_ERR(kernel_dev);
3689 if (IS_ERR(kernel_dev))
3692 options = copy_mount_options(data);
3693 ret = PTR_ERR(options);
3694 if (IS_ERR(options))
3697 ret = do_mount(kernel_dev, dir_name, kernel_type, flags, options);
3708 #define FSMOUNT_VALID_FLAGS \
3709 (MOUNT_ATTR_RDONLY | MOUNT_ATTR_NOSUID | MOUNT_ATTR_NODEV | \
3710 MOUNT_ATTR_NOEXEC | MOUNT_ATTR__ATIME | MOUNT_ATTR_NODIRATIME | \
3711 MOUNT_ATTR_NOSYMFOLLOW)
3713 #define MOUNT_SETATTR_VALID_FLAGS (FSMOUNT_VALID_FLAGS | MOUNT_ATTR_IDMAP)
3715 #define MOUNT_SETATTR_PROPAGATION_FLAGS \
3716 (MS_UNBINDABLE | MS_PRIVATE | MS_SLAVE | MS_SHARED)
3718 static unsigned int attr_flags_to_mnt_flags(u64 attr_flags)
3720 unsigned int mnt_flags = 0;
3722 if (attr_flags & MOUNT_ATTR_RDONLY)
3723 mnt_flags |= MNT_READONLY;
3724 if (attr_flags & MOUNT_ATTR_NOSUID)
3725 mnt_flags |= MNT_NOSUID;
3726 if (attr_flags & MOUNT_ATTR_NODEV)
3727 mnt_flags |= MNT_NODEV;
3728 if (attr_flags & MOUNT_ATTR_NOEXEC)
3729 mnt_flags |= MNT_NOEXEC;
3730 if (attr_flags & MOUNT_ATTR_NODIRATIME)
3731 mnt_flags |= MNT_NODIRATIME;
3732 if (attr_flags & MOUNT_ATTR_NOSYMFOLLOW)
3733 mnt_flags |= MNT_NOSYMFOLLOW;
3739 * Create a kernel mount representation for a new, prepared superblock
3740 * (specified by fs_fd) and attach to an open_tree-like file descriptor.
3742 SYSCALL_DEFINE3(fsmount, int, fs_fd, unsigned int, flags,
3743 unsigned int, attr_flags)
3745 struct mnt_namespace *ns;
3746 struct fs_context *fc;
3748 struct path newmount;
3751 unsigned int mnt_flags = 0;
3757 if ((flags & ~(FSMOUNT_CLOEXEC)) != 0)
3760 if (attr_flags & ~FSMOUNT_VALID_FLAGS)
3763 mnt_flags = attr_flags_to_mnt_flags(attr_flags);
3765 switch (attr_flags & MOUNT_ATTR__ATIME) {
3766 case MOUNT_ATTR_STRICTATIME:
3768 case MOUNT_ATTR_NOATIME:
3769 mnt_flags |= MNT_NOATIME;
3771 case MOUNT_ATTR_RELATIME:
3772 mnt_flags |= MNT_RELATIME;
3783 if (f.file->f_op != &fscontext_fops)
3786 fc = f.file->private_data;
3788 ret = mutex_lock_interruptible(&fc->uapi_mutex);
3792 /* There must be a valid superblock or we can't mount it */
3798 if (mount_too_revealing(fc->root->d_sb, &mnt_flags)) {
3799 pr_warn("VFS: Mount too revealing\n");
3804 if (fc->phase != FS_CONTEXT_AWAITING_MOUNT)
3807 if (fc->sb_flags & SB_MANDLOCK)
3810 newmount.mnt = vfs_create_mount(fc);
3811 if (IS_ERR(newmount.mnt)) {
3812 ret = PTR_ERR(newmount.mnt);
3815 newmount.dentry = dget(fc->root);
3816 newmount.mnt->mnt_flags = mnt_flags;
3818 /* We've done the mount bit - now move the file context into more or
3819 * less the same state as if we'd done an fspick(). We don't want to
3820 * do any memory allocation or anything like that at this point as we
3821 * don't want to have to handle any errors incurred.
3823 vfs_clean_context(fc);
3825 ns = alloc_mnt_ns(current->nsproxy->mnt_ns->user_ns, true);
3830 mnt = real_mount(newmount.mnt);
3834 list_add(&mnt->mnt_list, &ns->list);
3835 mntget(newmount.mnt);
3837 /* Attach to an apparent O_PATH fd with a note that we need to unmount
3838 * it, not just simply put it.
3840 file = dentry_open(&newmount, O_PATH, fc->cred);
3842 dissolve_on_fput(newmount.mnt);
3843 ret = PTR_ERR(file);
3846 file->f_mode |= FMODE_NEED_UNMOUNT;
3848 ret = get_unused_fd_flags((flags & FSMOUNT_CLOEXEC) ? O_CLOEXEC : 0);
3850 fd_install(ret, file);
3855 path_put(&newmount);
3857 mutex_unlock(&fc->uapi_mutex);
3864 * Move a mount from one place to another. In combination with
3865 * fsopen()/fsmount() this is used to install a new mount and in combination
3866 * with open_tree(OPEN_TREE_CLONE [| AT_RECURSIVE]) it can be used to copy
3869 * Note the flags value is a combination of MOVE_MOUNT_* flags.
3871 SYSCALL_DEFINE5(move_mount,
3872 int, from_dfd, const char __user *, from_pathname,
3873 int, to_dfd, const char __user *, to_pathname,
3874 unsigned int, flags)
3876 struct path from_path, to_path;
3877 unsigned int lflags;
3883 if (flags & ~MOVE_MOUNT__MASK)
3886 /* If someone gives a pathname, they aren't permitted to move
3887 * from an fd that requires unmount as we can't get at the flag
3888 * to clear it afterwards.
3891 if (flags & MOVE_MOUNT_F_SYMLINKS) lflags |= LOOKUP_FOLLOW;
3892 if (flags & MOVE_MOUNT_F_AUTOMOUNTS) lflags |= LOOKUP_AUTOMOUNT;
3893 if (flags & MOVE_MOUNT_F_EMPTY_PATH) lflags |= LOOKUP_EMPTY;
3895 ret = user_path_at(from_dfd, from_pathname, lflags, &from_path);
3900 if (flags & MOVE_MOUNT_T_SYMLINKS) lflags |= LOOKUP_FOLLOW;
3901 if (flags & MOVE_MOUNT_T_AUTOMOUNTS) lflags |= LOOKUP_AUTOMOUNT;
3902 if (flags & MOVE_MOUNT_T_EMPTY_PATH) lflags |= LOOKUP_EMPTY;
3904 ret = user_path_at(to_dfd, to_pathname, lflags, &to_path);
3908 ret = security_move_mount(&from_path, &to_path);
3912 if (flags & MOVE_MOUNT_SET_GROUP)
3913 ret = do_set_group(&from_path, &to_path);
3915 ret = do_move_mount(&from_path, &to_path);
3920 path_put(&from_path);
3925 * Return true if path is reachable from root
3927 * namespace_sem or mount_lock is held
3929 bool is_path_reachable(struct mount *mnt, struct dentry *dentry,
3930 const struct path *root)
3932 while (&mnt->mnt != root->mnt && mnt_has_parent(mnt)) {
3933 dentry = mnt->mnt_mountpoint;
3934 mnt = mnt->mnt_parent;
3936 return &mnt->mnt == root->mnt && is_subdir(dentry, root->dentry);
3939 bool path_is_under(const struct path *path1, const struct path *path2)
3942 read_seqlock_excl(&mount_lock);
3943 res = is_path_reachable(real_mount(path1->mnt), path1->dentry, path2);
3944 read_sequnlock_excl(&mount_lock);
3947 EXPORT_SYMBOL(path_is_under);
3950 * pivot_root Semantics:
3951 * Moves the root file system of the current process to the directory put_old,
3952 * makes new_root as the new root file system of the current process, and sets
3953 * root/cwd of all processes which had them on the current root to new_root.
3956 * The new_root and put_old must be directories, and must not be on the
3957 * same file system as the current process root. The put_old must be
3958 * underneath new_root, i.e. adding a non-zero number of /.. to the string
3959 * pointed to by put_old must yield the same directory as new_root. No other
3960 * file system may be mounted on put_old. After all, new_root is a mountpoint.
3962 * Also, the current root cannot be on the 'rootfs' (initial ramfs) filesystem.
3963 * See Documentation/filesystems/ramfs-rootfs-initramfs.rst for alternatives
3964 * in this situation.
3967 * - we don't move root/cwd if they are not at the root (reason: if something
3968 * cared enough to change them, it's probably wrong to force them elsewhere)
3969 * - it's okay to pick a root that isn't the root of a file system, e.g.
3970 * /nfs/my_root where /nfs is the mount point. It must be a mountpoint,
3971 * though, so you may need to say mount --bind /nfs/my_root /nfs/my_root
3974 SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
3975 const char __user *, put_old)
3977 struct path new, old, root;
3978 struct mount *new_mnt, *root_mnt, *old_mnt, *root_parent, *ex_parent;
3979 struct mountpoint *old_mp, *root_mp;
3985 error = user_path_at(AT_FDCWD, new_root,
3986 LOOKUP_FOLLOW | LOOKUP_DIRECTORY, &new);
3990 error = user_path_at(AT_FDCWD, put_old,
3991 LOOKUP_FOLLOW | LOOKUP_DIRECTORY, &old);
3995 error = security_sb_pivotroot(&old, &new);
3999 get_fs_root(current->fs, &root);
4000 old_mp = lock_mount(&old);
4001 error = PTR_ERR(old_mp);
4006 new_mnt = real_mount(new.mnt);
4007 root_mnt = real_mount(root.mnt);
4008 old_mnt = real_mount(old.mnt);
4009 ex_parent = new_mnt->mnt_parent;
4010 root_parent = root_mnt->mnt_parent;
4011 if (IS_MNT_SHARED(old_mnt) ||
4012 IS_MNT_SHARED(ex_parent) ||
4013 IS_MNT_SHARED(root_parent))
4015 if (!check_mnt(root_mnt) || !check_mnt(new_mnt))
4017 if (new_mnt->mnt.mnt_flags & MNT_LOCKED)
4020 if (d_unlinked(new.dentry))
4023 if (new_mnt == root_mnt || old_mnt == root_mnt)
4024 goto out4; /* loop, on the same file system */
4026 if (root.mnt->mnt_root != root.dentry)
4027 goto out4; /* not a mountpoint */
4028 if (!mnt_has_parent(root_mnt))
4029 goto out4; /* not attached */
4030 if (new.mnt->mnt_root != new.dentry)
4031 goto out4; /* not a mountpoint */
4032 if (!mnt_has_parent(new_mnt))
4033 goto out4; /* not attached */
4034 /* make sure we can reach put_old from new_root */
4035 if (!is_path_reachable(old_mnt, old.dentry, &new))
4037 /* make certain new is below the root */
4038 if (!is_path_reachable(new_mnt, new.dentry, &root))
4041 umount_mnt(new_mnt);
4042 root_mp = unhash_mnt(root_mnt); /* we'll need its mountpoint */
4043 if (root_mnt->mnt.mnt_flags & MNT_LOCKED) {
4044 new_mnt->mnt.mnt_flags |= MNT_LOCKED;
4045 root_mnt->mnt.mnt_flags &= ~MNT_LOCKED;
4047 /* mount old root on put_old */
4048 attach_mnt(root_mnt, old_mnt, old_mp);
4049 /* mount new_root on / */
4050 attach_mnt(new_mnt, root_parent, root_mp);
4051 mnt_add_count(root_parent, -1);
4052 touch_mnt_namespace(current->nsproxy->mnt_ns);
4053 /* A moved mount should not expire automatically */
4054 list_del_init(&new_mnt->mnt_expire);
4055 put_mountpoint(root_mp);
4056 unlock_mount_hash();
4057 chroot_fs_refs(&root, &new);
4060 unlock_mount(old_mp);
4062 mntput_no_expire(ex_parent);
4073 static unsigned int recalc_flags(struct mount_kattr *kattr, struct mount *mnt)
4075 unsigned int flags = mnt->mnt.mnt_flags;
4077 /* flags to clear */
4078 flags &= ~kattr->attr_clr;
4079 /* flags to raise */
4080 flags |= kattr->attr_set;
4085 static int can_idmap_mount(const struct mount_kattr *kattr, struct mount *mnt)
4087 struct vfsmount *m = &mnt->mnt;
4088 struct user_namespace *fs_userns = m->mnt_sb->s_user_ns;
4090 if (!kattr->mnt_idmap)
4094 * Creating an idmapped mount with the filesystem wide idmapping
4095 * doesn't make sense so block that. We don't allow mushy semantics.
4097 if (mnt_idmap_owner(kattr->mnt_idmap) == fs_userns)
4101 * Once a mount has been idmapped we don't allow it to change its
4102 * mapping. It makes things simpler and callers can just create
4103 * another bind-mount they can idmap if they want to.
4105 if (is_idmapped_mnt(m))
4108 /* The underlying filesystem doesn't support idmapped mounts yet. */
4109 if (!(m->mnt_sb->s_type->fs_flags & FS_ALLOW_IDMAP))
4112 /* We're not controlling the superblock. */
4113 if (!ns_capable(fs_userns, CAP_SYS_ADMIN))
4116 /* Mount has already been visible in the filesystem hierarchy. */
4117 if (!is_anon_ns(mnt->mnt_ns))
4124 * mnt_allow_writers() - check whether the attribute change allows writers
4125 * @kattr: the new mount attributes
4126 * @mnt: the mount to which @kattr will be applied
4128 * Check whether thew new mount attributes in @kattr allow concurrent writers.
4130 * Return: true if writers need to be held, false if not
4132 static inline bool mnt_allow_writers(const struct mount_kattr *kattr,
4133 const struct mount *mnt)
4135 return (!(kattr->attr_set & MNT_READONLY) ||
4136 (mnt->mnt.mnt_flags & MNT_READONLY)) &&
4140 static int mount_setattr_prepare(struct mount_kattr *kattr, struct mount *mnt)
4145 for (m = mnt; m; m = next_mnt(m, mnt)) {
4146 if (!can_change_locked_flags(m, recalc_flags(kattr, m))) {
4151 err = can_idmap_mount(kattr, m);
4155 if (!mnt_allow_writers(kattr, m)) {
4156 err = mnt_hold_writers(m);
4161 if (!kattr->recurse)
4169 * If we had to call mnt_hold_writers() MNT_WRITE_HOLD will
4170 * be set in @mnt_flags. The loop unsets MNT_WRITE_HOLD for all
4171 * mounts and needs to take care to include the first mount.
4173 for (p = mnt; p; p = next_mnt(p, mnt)) {
4174 /* If we had to hold writers unblock them. */
4175 if (p->mnt.mnt_flags & MNT_WRITE_HOLD)
4176 mnt_unhold_writers(p);
4179 * We're done once the first mount we changed got
4180 * MNT_WRITE_HOLD unset.
4189 static void do_idmap_mount(const struct mount_kattr *kattr, struct mount *mnt)
4191 if (!kattr->mnt_idmap)
4195 * Pairs with smp_load_acquire() in mnt_idmap().
4197 * Since we only allow a mount to change the idmapping once and
4198 * verified this in can_idmap_mount() we know that the mount has
4199 * @nop_mnt_idmap attached to it. So there's no need to drop any
4202 smp_store_release(&mnt->mnt.mnt_idmap, mnt_idmap_get(kattr->mnt_idmap));
4205 static void mount_setattr_commit(struct mount_kattr *kattr, struct mount *mnt)
4209 for (m = mnt; m; m = next_mnt(m, mnt)) {
4212 do_idmap_mount(kattr, m);
4213 flags = recalc_flags(kattr, m);
4214 WRITE_ONCE(m->mnt.mnt_flags, flags);
4216 /* If we had to hold writers unblock them. */
4217 if (m->mnt.mnt_flags & MNT_WRITE_HOLD)
4218 mnt_unhold_writers(m);
4220 if (kattr->propagation)
4221 change_mnt_propagation(m, kattr->propagation);
4222 if (!kattr->recurse)
4225 touch_mnt_namespace(mnt->mnt_ns);
4228 static int do_mount_setattr(struct path *path, struct mount_kattr *kattr)
4230 struct mount *mnt = real_mount(path->mnt);
4233 if (path->dentry != mnt->mnt.mnt_root)
4236 if (kattr->mnt_userns) {
4237 struct mnt_idmap *mnt_idmap;
4239 mnt_idmap = alloc_mnt_idmap(kattr->mnt_userns);
4240 if (IS_ERR(mnt_idmap))
4241 return PTR_ERR(mnt_idmap);
4242 kattr->mnt_idmap = mnt_idmap;
4245 if (kattr->propagation) {
4247 * Only take namespace_lock() if we're actually changing
4251 if (kattr->propagation == MS_SHARED) {
4252 err = invent_group_ids(mnt, kattr->recurse);
4263 /* Ensure that this isn't anything purely vfs internal. */
4264 if (!is_mounted(&mnt->mnt))
4268 * If this is an attached mount make sure it's located in the callers
4269 * mount namespace. If it's not don't let the caller interact with it.
4270 * If this is a detached mount make sure it has an anonymous mount
4271 * namespace attached to it, i.e. we've created it via OPEN_TREE_CLONE.
4273 if (!(mnt_has_parent(mnt) ? check_mnt(mnt) : is_anon_ns(mnt->mnt_ns)))
4277 * First, we get the mount tree in a shape where we can change mount
4278 * properties without failure. If we succeeded to do so we commit all
4279 * changes and if we failed we clean up.
4281 err = mount_setattr_prepare(kattr, mnt);
4283 mount_setattr_commit(kattr, mnt);
4286 unlock_mount_hash();
4288 if (kattr->propagation) {
4291 cleanup_group_ids(mnt, NULL);
4297 static int build_mount_idmapped(const struct mount_attr *attr, size_t usize,
4298 struct mount_kattr *kattr, unsigned int flags)
4301 struct ns_common *ns;
4302 struct user_namespace *mnt_userns;
4305 if (!((attr->attr_set | attr->attr_clr) & MOUNT_ATTR_IDMAP))
4309 * We currently do not support clearing an idmapped mount. If this ever
4310 * is a use-case we can revisit this but for now let's keep it simple
4313 if (attr->attr_clr & MOUNT_ATTR_IDMAP)
4316 if (attr->userns_fd > INT_MAX)
4319 file = fget(attr->userns_fd);
4323 if (!proc_ns_file(file)) {
4328 ns = get_proc_ns(file_inode(file));
4329 if (ns->ops->type != CLONE_NEWUSER) {
4335 * The initial idmapping cannot be used to create an idmapped
4336 * mount. We use the initial idmapping as an indicator of a mount
4337 * that is not idmapped. It can simply be passed into helpers that
4338 * are aware of idmapped mounts as a convenient shortcut. A user
4339 * can just create a dedicated identity mapping to achieve the same
4342 mnt_userns = container_of(ns, struct user_namespace, ns);
4343 if (initial_idmapping(mnt_userns)) {
4348 /* We're not controlling the target namespace. */
4349 if (!ns_capable(mnt_userns, CAP_SYS_ADMIN)) {
4354 kattr->mnt_userns = get_user_ns(mnt_userns);
4361 static int build_mount_kattr(const struct mount_attr *attr, size_t usize,
4362 struct mount_kattr *kattr, unsigned int flags)
4364 unsigned int lookup_flags = LOOKUP_AUTOMOUNT | LOOKUP_FOLLOW;
4366 if (flags & AT_NO_AUTOMOUNT)
4367 lookup_flags &= ~LOOKUP_AUTOMOUNT;
4368 if (flags & AT_SYMLINK_NOFOLLOW)
4369 lookup_flags &= ~LOOKUP_FOLLOW;
4370 if (flags & AT_EMPTY_PATH)
4371 lookup_flags |= LOOKUP_EMPTY;
4373 *kattr = (struct mount_kattr) {
4374 .lookup_flags = lookup_flags,
4375 .recurse = !!(flags & AT_RECURSIVE),
4378 if (attr->propagation & ~MOUNT_SETATTR_PROPAGATION_FLAGS)
4380 if (hweight32(attr->propagation & MOUNT_SETATTR_PROPAGATION_FLAGS) > 1)
4382 kattr->propagation = attr->propagation;
4384 if ((attr->attr_set | attr->attr_clr) & ~MOUNT_SETATTR_VALID_FLAGS)
4387 kattr->attr_set = attr_flags_to_mnt_flags(attr->attr_set);
4388 kattr->attr_clr = attr_flags_to_mnt_flags(attr->attr_clr);
4391 * Since the MOUNT_ATTR_<atime> values are an enum, not a bitmap,
4392 * users wanting to transition to a different atime setting cannot
4393 * simply specify the atime setting in @attr_set, but must also
4394 * specify MOUNT_ATTR__ATIME in the @attr_clr field.
4395 * So ensure that MOUNT_ATTR__ATIME can't be partially set in
4396 * @attr_clr and that @attr_set can't have any atime bits set if
4397 * MOUNT_ATTR__ATIME isn't set in @attr_clr.
4399 if (attr->attr_clr & MOUNT_ATTR__ATIME) {
4400 if ((attr->attr_clr & MOUNT_ATTR__ATIME) != MOUNT_ATTR__ATIME)
4404 * Clear all previous time settings as they are mutually
4407 kattr->attr_clr |= MNT_RELATIME | MNT_NOATIME;
4408 switch (attr->attr_set & MOUNT_ATTR__ATIME) {
4409 case MOUNT_ATTR_RELATIME:
4410 kattr->attr_set |= MNT_RELATIME;
4412 case MOUNT_ATTR_NOATIME:
4413 kattr->attr_set |= MNT_NOATIME;
4415 case MOUNT_ATTR_STRICTATIME:
4421 if (attr->attr_set & MOUNT_ATTR__ATIME)
4425 return build_mount_idmapped(attr, usize, kattr, flags);
4428 static void finish_mount_kattr(struct mount_kattr *kattr)
4430 put_user_ns(kattr->mnt_userns);
4431 kattr->mnt_userns = NULL;
4433 if (kattr->mnt_idmap)
4434 mnt_idmap_put(kattr->mnt_idmap);
4437 SYSCALL_DEFINE5(mount_setattr, int, dfd, const char __user *, path,
4438 unsigned int, flags, struct mount_attr __user *, uattr,
4443 struct mount_attr attr;
4444 struct mount_kattr kattr;
4446 BUILD_BUG_ON(sizeof(struct mount_attr) != MOUNT_ATTR_SIZE_VER0);
4448 if (flags & ~(AT_EMPTY_PATH |
4450 AT_SYMLINK_NOFOLLOW |
4454 if (unlikely(usize > PAGE_SIZE))
4456 if (unlikely(usize < MOUNT_ATTR_SIZE_VER0))
4462 err = copy_struct_from_user(&attr, sizeof(attr), uattr, usize);
4466 /* Don't bother walking through the mounts if this is a nop. */
4467 if (attr.attr_set == 0 &&
4468 attr.attr_clr == 0 &&
4469 attr.propagation == 0)
4472 err = build_mount_kattr(&attr, usize, &kattr, flags);
4476 err = user_path_at(dfd, path, kattr.lookup_flags, &target);
4478 err = do_mount_setattr(&target, &kattr);
4481 finish_mount_kattr(&kattr);
4485 static void __init init_mount_tree(void)
4487 struct vfsmount *mnt;
4489 struct mnt_namespace *ns;
4492 mnt = vfs_kern_mount(&rootfs_fs_type, 0, "rootfs", NULL);
4494 panic("Can't create rootfs");
4496 ns = alloc_mnt_ns(&init_user_ns, false);
4498 panic("Can't allocate initial namespace");
4499 m = real_mount(mnt);
4503 list_add(&m->mnt_list, &ns->list);
4504 init_task.nsproxy->mnt_ns = ns;
4508 root.dentry = mnt->mnt_root;
4509 mnt->mnt_flags |= MNT_LOCKED;
4511 set_fs_pwd(current->fs, &root);
4512 set_fs_root(current->fs, &root);
4515 void __init mnt_init(void)
4519 mnt_cache = kmem_cache_create("mnt_cache", sizeof(struct mount),
4520 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT, NULL);
4522 mount_hashtable = alloc_large_system_hash("Mount-cache",
4523 sizeof(struct hlist_head),
4526 &m_hash_shift, &m_hash_mask, 0, 0);
4527 mountpoint_hashtable = alloc_large_system_hash("Mountpoint-cache",
4528 sizeof(struct hlist_head),
4531 &mp_hash_shift, &mp_hash_mask, 0, 0);
4533 if (!mount_hashtable || !mountpoint_hashtable)
4534 panic("Failed to allocate mount hash table\n");
4540 printk(KERN_WARNING "%s: sysfs_init error: %d\n",
4542 fs_kobj = kobject_create_and_add("fs", NULL);
4544 printk(KERN_WARNING "%s: kobj create error\n", __func__);
4550 void put_mnt_ns(struct mnt_namespace *ns)
4552 if (!refcount_dec_and_test(&ns->ns.count))
4554 drop_collected_mounts(&ns->root->mnt);
4558 struct vfsmount *kern_mount(struct file_system_type *type)
4560 struct vfsmount *mnt;
4561 mnt = vfs_kern_mount(type, SB_KERNMOUNT, type->name, NULL);
4564 * it is a longterm mount, don't release mnt until
4565 * we unmount before file sys is unregistered
4567 real_mount(mnt)->mnt_ns = MNT_NS_INTERNAL;
4571 EXPORT_SYMBOL_GPL(kern_mount);
4573 void kern_unmount(struct vfsmount *mnt)
4575 /* release long term mount so mount point can be released */
4576 if (!IS_ERR_OR_NULL(mnt)) {
4577 real_mount(mnt)->mnt_ns = NULL;
4578 synchronize_rcu(); /* yecchhh... */
4582 EXPORT_SYMBOL(kern_unmount);
4584 void kern_unmount_array(struct vfsmount *mnt[], unsigned int num)
4588 for (i = 0; i < num; i++)
4590 real_mount(mnt[i])->mnt_ns = NULL;
4591 synchronize_rcu_expedited();
4592 for (i = 0; i < num; i++)
4595 EXPORT_SYMBOL(kern_unmount_array);
4597 bool our_mnt(struct vfsmount *mnt)
4599 return check_mnt(real_mount(mnt));
4602 bool current_chrooted(void)
4604 /* Does the current process have a non-standard root */
4605 struct path ns_root;
4606 struct path fs_root;
4609 /* Find the namespace root */
4610 ns_root.mnt = ¤t->nsproxy->mnt_ns->root->mnt;
4611 ns_root.dentry = ns_root.mnt->mnt_root;
4613 while (d_mountpoint(ns_root.dentry) && follow_down_one(&ns_root))
4616 get_fs_root(current->fs, &fs_root);
4618 chrooted = !path_equal(&fs_root, &ns_root);
4626 static bool mnt_already_visible(struct mnt_namespace *ns,
4627 const struct super_block *sb,
4630 int new_flags = *new_mnt_flags;
4632 bool visible = false;
4634 down_read(&namespace_sem);
4636 list_for_each_entry(mnt, &ns->list, mnt_list) {
4637 struct mount *child;
4640 if (mnt_is_cursor(mnt))
4643 if (mnt->mnt.mnt_sb->s_type != sb->s_type)
4646 /* This mount is not fully visible if it's root directory
4647 * is not the root directory of the filesystem.
4649 if (mnt->mnt.mnt_root != mnt->mnt.mnt_sb->s_root)
4652 /* A local view of the mount flags */
4653 mnt_flags = mnt->mnt.mnt_flags;
4655 /* Don't miss readonly hidden in the superblock flags */
4656 if (sb_rdonly(mnt->mnt.mnt_sb))
4657 mnt_flags |= MNT_LOCK_READONLY;
4659 /* Verify the mount flags are equal to or more permissive
4660 * than the proposed new mount.
4662 if ((mnt_flags & MNT_LOCK_READONLY) &&
4663 !(new_flags & MNT_READONLY))
4665 if ((mnt_flags & MNT_LOCK_ATIME) &&
4666 ((mnt_flags & MNT_ATIME_MASK) != (new_flags & MNT_ATIME_MASK)))
4669 /* This mount is not fully visible if there are any
4670 * locked child mounts that cover anything except for
4671 * empty directories.
4673 list_for_each_entry(child, &mnt->mnt_mounts, mnt_child) {
4674 struct inode *inode = child->mnt_mountpoint->d_inode;
4675 /* Only worry about locked mounts */
4676 if (!(child->mnt.mnt_flags & MNT_LOCKED))
4678 /* Is the directory permanetly empty? */
4679 if (!is_empty_dir_inode(inode))
4682 /* Preserve the locked attributes */
4683 *new_mnt_flags |= mnt_flags & (MNT_LOCK_READONLY | \
4691 up_read(&namespace_sem);
4695 static bool mount_too_revealing(const struct super_block *sb, int *new_mnt_flags)
4697 const unsigned long required_iflags = SB_I_NOEXEC | SB_I_NODEV;
4698 struct mnt_namespace *ns = current->nsproxy->mnt_ns;
4699 unsigned long s_iflags;
4701 if (ns->user_ns == &init_user_ns)
4704 /* Can this filesystem be too revealing? */
4705 s_iflags = sb->s_iflags;
4706 if (!(s_iflags & SB_I_USERNS_VISIBLE))
4709 if ((s_iflags & required_iflags) != required_iflags) {
4710 WARN_ONCE(1, "Expected s_iflags to contain 0x%lx\n",
4715 return !mnt_already_visible(ns, sb, new_mnt_flags);
4718 bool mnt_may_suid(struct vfsmount *mnt)
4721 * Foreign mounts (accessed via fchdir or through /proc
4722 * symlinks) are always treated as if they are nosuid. This
4723 * prevents namespaces from trusting potentially unsafe
4724 * suid/sgid bits, file caps, or security labels that originate
4725 * in other namespaces.
4727 return !(mnt->mnt_flags & MNT_NOSUID) && check_mnt(real_mount(mnt)) &&
4728 current_in_userns(mnt->mnt_sb->s_user_ns);
4731 static struct ns_common *mntns_get(struct task_struct *task)
4733 struct ns_common *ns = NULL;
4734 struct nsproxy *nsproxy;
4737 nsproxy = task->nsproxy;
4739 ns = &nsproxy->mnt_ns->ns;
4740 get_mnt_ns(to_mnt_ns(ns));
4747 static void mntns_put(struct ns_common *ns)
4749 put_mnt_ns(to_mnt_ns(ns));
4752 static int mntns_install(struct nsset *nsset, struct ns_common *ns)
4754 struct nsproxy *nsproxy = nsset->nsproxy;
4755 struct fs_struct *fs = nsset->fs;
4756 struct mnt_namespace *mnt_ns = to_mnt_ns(ns), *old_mnt_ns;
4757 struct user_namespace *user_ns = nsset->cred->user_ns;
4761 if (!ns_capable(mnt_ns->user_ns, CAP_SYS_ADMIN) ||
4762 !ns_capable(user_ns, CAP_SYS_CHROOT) ||
4763 !ns_capable(user_ns, CAP_SYS_ADMIN))
4766 if (is_anon_ns(mnt_ns))
4773 old_mnt_ns = nsproxy->mnt_ns;
4774 nsproxy->mnt_ns = mnt_ns;
4777 err = vfs_path_lookup(mnt_ns->root->mnt.mnt_root, &mnt_ns->root->mnt,
4778 "/", LOOKUP_DOWN, &root);
4780 /* revert to old namespace */
4781 nsproxy->mnt_ns = old_mnt_ns;
4786 put_mnt_ns(old_mnt_ns);
4788 /* Update the pwd and root */
4789 set_fs_pwd(fs, &root);
4790 set_fs_root(fs, &root);
4796 static struct user_namespace *mntns_owner(struct ns_common *ns)
4798 return to_mnt_ns(ns)->user_ns;
4801 const struct proc_ns_operations mntns_operations = {
4803 .type = CLONE_NEWNS,
4806 .install = mntns_install,
4807 .owner = mntns_owner,
4810 #ifdef CONFIG_SYSCTL
4811 static struct ctl_table fs_namespace_sysctls[] = {
4813 .procname = "mount-max",
4814 .data = &sysctl_mount_max,
4815 .maxlen = sizeof(unsigned int),
4817 .proc_handler = proc_dointvec_minmax,
4818 .extra1 = SYSCTL_ONE,
4823 static int __init init_fs_namespace_sysctls(void)
4825 register_sysctl_init("fs", fs_namespace_sysctls);
4828 fs_initcall(init_fs_namespace_sysctls);
4830 #endif /* CONFIG_SYSCTL */