1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (C) 1991, 1992 Linus Torvalds
7 * super.c contains code to handle: - mount structures
9 * - filesystem drivers list
11 * - umount system call
14 * GK 2/5/95 - Changed to support mounting the root fs via NFS
16 * Added kerneld support: Jacques Gelinas and Bjorn Ekwall
17 * Added change_root: Werner Almesberger & Hans Lermen, Feb '96
18 * Added options to /proc/mounts:
19 * Torbjörn Lindh (torbjorn.lindh@gopta.se), April 14, 1996.
20 * Added devfs support: Richard Gooch <rgooch@atnf.csiro.au>, 13-JAN-1998
21 * Heavily rewritten for 'one fs - one tree' dcache architecture. AV, Mar 2000
24 #include <linux/export.h>
25 #include <linux/slab.h>
26 #include <linux/blkdev.h>
27 #include <linux/mount.h>
28 #include <linux/security.h>
29 #include <linux/writeback.h> /* for the emergency remount stuff */
30 #include <linux/idr.h>
31 #include <linux/mutex.h>
32 #include <linux/backing-dev.h>
33 #include <linux/rculist_bl.h>
34 #include <linux/cleancache.h>
35 #include <linux/fsnotify.h>
36 #include <linux/lockdep.h>
37 #include <linux/user_namespace.h>
40 static int thaw_super_locked(struct super_block *sb);
42 static LIST_HEAD(super_blocks);
43 static DEFINE_SPINLOCK(sb_lock);
45 static char *sb_writers_name[SB_FREEZE_LEVELS] = {
52 * One thing we have to be careful of with a per-sb shrinker is that we don't
53 * drop the last active reference to the superblock from within the shrinker.
54 * If that happens we could trigger unregistering the shrinker from within the
55 * shrinker path and that leads to deadlock on the shrinker_rwsem. Hence we
56 * take a passive reference to the superblock to avoid this from occurring.
58 static unsigned long super_cache_scan(struct shrinker *shrink,
59 struct shrink_control *sc)
61 struct super_block *sb;
68 sb = container_of(shrink, struct super_block, s_shrink);
71 * Deadlock avoidance. We may hold various FS locks, and we don't want
72 * to recurse into the FS that called us in clear_inode() and friends..
74 if (!(sc->gfp_mask & __GFP_FS))
77 if (!trylock_super(sb))
80 if (sb->s_op->nr_cached_objects)
81 fs_objects = sb->s_op->nr_cached_objects(sb, sc);
83 inodes = list_lru_shrink_count(&sb->s_inode_lru, sc);
84 dentries = list_lru_shrink_count(&sb->s_dentry_lru, sc);
85 total_objects = dentries + inodes + fs_objects + 1;
89 /* proportion the scan between the caches */
90 dentries = mult_frac(sc->nr_to_scan, dentries, total_objects);
91 inodes = mult_frac(sc->nr_to_scan, inodes, total_objects);
92 fs_objects = mult_frac(sc->nr_to_scan, fs_objects, total_objects);
95 * prune the dcache first as the icache is pinned by it, then
96 * prune the icache, followed by the filesystem specific caches
98 * Ensure that we always scan at least one object - memcg kmem
99 * accounting uses this to fully empty the caches.
101 sc->nr_to_scan = dentries + 1;
102 freed = prune_dcache_sb(sb, sc);
103 sc->nr_to_scan = inodes + 1;
104 freed += prune_icache_sb(sb, sc);
107 sc->nr_to_scan = fs_objects + 1;
108 freed += sb->s_op->free_cached_objects(sb, sc);
111 up_read(&sb->s_umount);
115 static unsigned long super_cache_count(struct shrinker *shrink,
116 struct shrink_control *sc)
118 struct super_block *sb;
119 long total_objects = 0;
121 sb = container_of(shrink, struct super_block, s_shrink);
124 * Don't call trylock_super as it is a potential
125 * scalability bottleneck. The counts could get updated
126 * between super_cache_count and super_cache_scan anyway.
127 * Call to super_cache_count with shrinker_rwsem held
128 * ensures the safety of call to list_lru_shrink_count() and
129 * s_op->nr_cached_objects().
131 if (sb->s_op && sb->s_op->nr_cached_objects)
132 total_objects = sb->s_op->nr_cached_objects(sb, sc);
134 total_objects += list_lru_shrink_count(&sb->s_dentry_lru, sc);
135 total_objects += list_lru_shrink_count(&sb->s_inode_lru, sc);
137 total_objects = vfs_pressure_ratio(total_objects);
138 return total_objects;
141 static void destroy_super_work(struct work_struct *work)
143 struct super_block *s = container_of(work, struct super_block,
147 for (i = 0; i < SB_FREEZE_LEVELS; i++)
148 percpu_free_rwsem(&s->s_writers.rw_sem[i]);
152 static void destroy_super_rcu(struct rcu_head *head)
154 struct super_block *s = container_of(head, struct super_block, rcu);
155 INIT_WORK(&s->destroy_work, destroy_super_work);
156 schedule_work(&s->destroy_work);
159 /* Free a superblock that has never been seen by anyone */
160 static void destroy_unused_super(struct super_block *s)
164 up_write(&s->s_umount);
165 list_lru_destroy(&s->s_dentry_lru);
166 list_lru_destroy(&s->s_inode_lru);
168 put_user_ns(s->s_user_ns);
170 /* no delays needed */
171 destroy_super_work(&s->destroy_work);
175 * alloc_super - create new superblock
176 * @type: filesystem type superblock should belong to
177 * @flags: the mount flags
178 * @user_ns: User namespace for the super_block
180 * Allocates and initializes a new &struct super_block. alloc_super()
181 * returns a pointer new superblock or %NULL if allocation had failed.
183 static struct super_block *alloc_super(struct file_system_type *type, int flags,
184 struct user_namespace *user_ns)
186 struct super_block *s = kzalloc(sizeof(struct super_block), GFP_USER);
187 static const struct super_operations default_op;
193 INIT_LIST_HEAD(&s->s_mounts);
194 s->s_user_ns = get_user_ns(user_ns);
195 init_rwsem(&s->s_umount);
196 lockdep_set_class(&s->s_umount, &type->s_umount_key);
198 * sget() can have s_umount recursion.
200 * When it cannot find a suitable sb, it allocates a new
201 * one (this one), and tries again to find a suitable old
204 * In case that succeeds, it will acquire the s_umount
205 * lock of the old one. Since these are clearly distrinct
206 * locks, and this object isn't exposed yet, there's no
209 * Annotate this by putting this lock in a different
212 down_write_nested(&s->s_umount, SINGLE_DEPTH_NESTING);
214 if (security_sb_alloc(s))
217 for (i = 0; i < SB_FREEZE_LEVELS; i++) {
218 if (__percpu_init_rwsem(&s->s_writers.rw_sem[i],
220 &type->s_writers_key[i]))
223 init_waitqueue_head(&s->s_writers.wait_unfrozen);
224 s->s_bdi = &noop_backing_dev_info;
226 if (s->s_user_ns != &init_user_ns)
227 s->s_iflags |= SB_I_NODEV;
228 INIT_HLIST_NODE(&s->s_instances);
229 INIT_HLIST_BL_HEAD(&s->s_roots);
230 mutex_init(&s->s_sync_lock);
231 INIT_LIST_HEAD(&s->s_inodes);
232 spin_lock_init(&s->s_inode_list_lock);
233 INIT_LIST_HEAD(&s->s_inodes_wb);
234 spin_lock_init(&s->s_inode_wblist_lock);
236 if (list_lru_init_memcg(&s->s_dentry_lru))
238 if (list_lru_init_memcg(&s->s_inode_lru))
241 atomic_set(&s->s_active, 1);
242 mutex_init(&s->s_vfs_rename_mutex);
243 lockdep_set_class(&s->s_vfs_rename_mutex, &type->s_vfs_rename_key);
244 init_rwsem(&s->s_dquot.dqio_sem);
245 s->s_maxbytes = MAX_NON_LFS;
246 s->s_op = &default_op;
247 s->s_time_gran = 1000000000;
248 s->cleancache_poolid = CLEANCACHE_NO_POOL;
250 s->s_shrink.seeks = DEFAULT_SEEKS;
251 s->s_shrink.scan_objects = super_cache_scan;
252 s->s_shrink.count_objects = super_cache_count;
253 s->s_shrink.batch = 1024;
254 s->s_shrink.flags = SHRINKER_NUMA_AWARE | SHRINKER_MEMCG_AWARE;
258 destroy_unused_super(s);
262 /* Superblock refcounting */
265 * Drop a superblock's refcount. The caller must hold sb_lock.
267 static void __put_super(struct super_block *s)
270 list_del_init(&s->s_list);
271 WARN_ON(s->s_dentry_lru.node);
272 WARN_ON(s->s_inode_lru.node);
273 WARN_ON(!list_empty(&s->s_mounts));
275 put_user_ns(s->s_user_ns);
277 call_rcu(&s->rcu, destroy_super_rcu);
282 * put_super - drop a temporary reference to superblock
283 * @sb: superblock in question
285 * Drops a temporary reference, frees superblock if there's no
288 static void put_super(struct super_block *sb)
292 spin_unlock(&sb_lock);
297 * deactivate_locked_super - drop an active reference to superblock
298 * @s: superblock to deactivate
300 * Drops an active reference to superblock, converting it into a temporary
301 * one if there is no other active references left. In that case we
302 * tell fs driver to shut it down and drop the temporary reference we
305 * Caller holds exclusive lock on superblock; that lock is released.
307 void deactivate_locked_super(struct super_block *s)
309 struct file_system_type *fs = s->s_type;
310 if (atomic_dec_and_test(&s->s_active)) {
311 cleancache_invalidate_fs(s);
312 unregister_shrinker(&s->s_shrink);
316 * Since list_lru_destroy() may sleep, we cannot call it from
317 * put_super(), where we hold the sb_lock. Therefore we destroy
318 * the lru lists right now.
320 list_lru_destroy(&s->s_dentry_lru);
321 list_lru_destroy(&s->s_inode_lru);
326 up_write(&s->s_umount);
330 EXPORT_SYMBOL(deactivate_locked_super);
333 * deactivate_super - drop an active reference to superblock
334 * @s: superblock to deactivate
336 * Variant of deactivate_locked_super(), except that superblock is *not*
337 * locked by caller. If we are going to drop the final active reference,
338 * lock will be acquired prior to that.
340 void deactivate_super(struct super_block *s)
342 if (!atomic_add_unless(&s->s_active, -1, 1)) {
343 down_write(&s->s_umount);
344 deactivate_locked_super(s);
348 EXPORT_SYMBOL(deactivate_super);
351 * grab_super - acquire an active reference
352 * @s: reference we are trying to make active
354 * Tries to acquire an active reference. grab_super() is used when we
355 * had just found a superblock in super_blocks or fs_type->fs_supers
356 * and want to turn it into a full-blown active reference. grab_super()
357 * is called with sb_lock held and drops it. Returns 1 in case of
358 * success, 0 if we had failed (superblock contents was already dead or
359 * dying when grab_super() had been called). Note that this is only
360 * called for superblocks not in rundown mode (== ones still on ->fs_supers
361 * of their type), so increment of ->s_count is OK here.
363 static int grab_super(struct super_block *s) __releases(sb_lock)
366 spin_unlock(&sb_lock);
367 down_write(&s->s_umount);
368 if ((s->s_flags & SB_BORN) && atomic_inc_not_zero(&s->s_active)) {
372 up_write(&s->s_umount);
378 * trylock_super - try to grab ->s_umount shared
379 * @sb: reference we are trying to grab
381 * Try to prevent fs shutdown. This is used in places where we
382 * cannot take an active reference but we need to ensure that the
383 * filesystem is not shut down while we are working on it. It returns
384 * false if we cannot acquire s_umount or if we lose the race and
385 * filesystem already got into shutdown, and returns true with the s_umount
386 * lock held in read mode in case of success. On successful return,
387 * the caller must drop the s_umount lock when done.
389 * Note that unlike get_super() et.al. this one does *not* bump ->s_count.
390 * The reason why it's safe is that we are OK with doing trylock instead
391 * of down_read(). There's a couple of places that are OK with that, but
392 * it's very much not a general-purpose interface.
394 bool trylock_super(struct super_block *sb)
396 if (down_read_trylock(&sb->s_umount)) {
397 if (!hlist_unhashed(&sb->s_instances) &&
398 sb->s_root && (sb->s_flags & SB_BORN))
400 up_read(&sb->s_umount);
407 * generic_shutdown_super - common helper for ->kill_sb()
408 * @sb: superblock to kill
410 * generic_shutdown_super() does all fs-independent work on superblock
411 * shutdown. Typical ->kill_sb() should pick all fs-specific objects
412 * that need destruction out of superblock, call generic_shutdown_super()
413 * and release aforementioned objects. Note: dentries and inodes _are_
414 * taken care of and do not need specific handling.
416 * Upon calling this function, the filesystem may no longer alter or
417 * rearrange the set of dentries belonging to this super_block, nor may it
418 * change the attachments of dentries to inodes.
420 void generic_shutdown_super(struct super_block *sb)
422 const struct super_operations *sop = sb->s_op;
425 shrink_dcache_for_umount(sb);
427 sb->s_flags &= ~SB_ACTIVE;
429 fsnotify_unmount_inodes(sb);
430 cgroup_writeback_umount();
434 if (sb->s_dio_done_wq) {
435 destroy_workqueue(sb->s_dio_done_wq);
436 sb->s_dio_done_wq = NULL;
442 if (!list_empty(&sb->s_inodes)) {
443 printk("VFS: Busy inodes after unmount of %s. "
444 "Self-destruct in 5 seconds. Have a nice day...\n",
449 /* should be initialized for __put_super_and_need_restart() */
450 hlist_del_init(&sb->s_instances);
451 spin_unlock(&sb_lock);
452 up_write(&sb->s_umount);
453 if (sb->s_bdi != &noop_backing_dev_info) {
455 sb->s_bdi = &noop_backing_dev_info;
459 EXPORT_SYMBOL(generic_shutdown_super);
462 * sget_userns - find or create a superblock
463 * @type: filesystem type superblock should belong to
464 * @test: comparison callback
465 * @set: setup callback
466 * @flags: mount flags
467 * @user_ns: User namespace for the super_block
468 * @data: argument to each of them
470 struct super_block *sget_userns(struct file_system_type *type,
471 int (*test)(struct super_block *,void *),
472 int (*set)(struct super_block *,void *),
473 int flags, struct user_namespace *user_ns,
476 struct super_block *s = NULL;
477 struct super_block *old;
480 if (!(flags & (SB_KERNMOUNT|SB_SUBMOUNT)) &&
481 !(type->fs_flags & FS_USERNS_MOUNT) &&
482 !capable(CAP_SYS_ADMIN))
483 return ERR_PTR(-EPERM);
487 hlist_for_each_entry(old, &type->fs_supers, s_instances) {
488 if (!test(old, data))
490 if (user_ns != old->s_user_ns) {
491 spin_unlock(&sb_lock);
492 destroy_unused_super(s);
493 return ERR_PTR(-EBUSY);
495 if (!grab_super(old))
497 destroy_unused_super(s);
502 spin_unlock(&sb_lock);
503 s = alloc_super(type, (flags & ~SB_SUBMOUNT), user_ns);
505 return ERR_PTR(-ENOMEM);
511 spin_unlock(&sb_lock);
512 destroy_unused_super(s);
516 strlcpy(s->s_id, type->name, sizeof(s->s_id));
517 list_add_tail(&s->s_list, &super_blocks);
518 hlist_add_head(&s->s_instances, &type->fs_supers);
519 spin_unlock(&sb_lock);
520 get_filesystem(type);
521 err = register_shrinker(&s->s_shrink);
523 deactivate_locked_super(s);
529 EXPORT_SYMBOL(sget_userns);
532 * sget - find or create a superblock
533 * @type: filesystem type superblock should belong to
534 * @test: comparison callback
535 * @set: setup callback
536 * @flags: mount flags
537 * @data: argument to each of them
539 struct super_block *sget(struct file_system_type *type,
540 int (*test)(struct super_block *,void *),
541 int (*set)(struct super_block *,void *),
545 struct user_namespace *user_ns = current_user_ns();
547 /* We don't yet pass the user namespace of the parent
548 * mount through to here so always use &init_user_ns
549 * until that changes.
551 if (flags & SB_SUBMOUNT)
552 user_ns = &init_user_ns;
554 /* Ensure the requestor has permissions over the target filesystem */
555 if (!(flags & (SB_KERNMOUNT|SB_SUBMOUNT)) && !ns_capable(user_ns, CAP_SYS_ADMIN))
556 return ERR_PTR(-EPERM);
558 return sget_userns(type, test, set, flags, user_ns, data);
563 void drop_super(struct super_block *sb)
565 up_read(&sb->s_umount);
569 EXPORT_SYMBOL(drop_super);
571 void drop_super_exclusive(struct super_block *sb)
573 up_write(&sb->s_umount);
576 EXPORT_SYMBOL(drop_super_exclusive);
578 static void __iterate_supers(void (*f)(struct super_block *))
580 struct super_block *sb, *p = NULL;
583 list_for_each_entry(sb, &super_blocks, s_list) {
584 if (hlist_unhashed(&sb->s_instances))
587 spin_unlock(&sb_lock);
598 spin_unlock(&sb_lock);
601 * iterate_supers - call function for all active superblocks
602 * @f: function to call
603 * @arg: argument to pass to it
605 * Scans the superblock list and calls given function, passing it
606 * locked superblock and given argument.
608 void iterate_supers(void (*f)(struct super_block *, void *), void *arg)
610 struct super_block *sb, *p = NULL;
613 list_for_each_entry(sb, &super_blocks, s_list) {
614 if (hlist_unhashed(&sb->s_instances))
617 spin_unlock(&sb_lock);
619 down_read(&sb->s_umount);
620 if (sb->s_root && (sb->s_flags & SB_BORN))
622 up_read(&sb->s_umount);
631 spin_unlock(&sb_lock);
635 * iterate_supers_type - call function for superblocks of given type
637 * @f: function to call
638 * @arg: argument to pass to it
640 * Scans the superblock list and calls given function, passing it
641 * locked superblock and given argument.
643 void iterate_supers_type(struct file_system_type *type,
644 void (*f)(struct super_block *, void *), void *arg)
646 struct super_block *sb, *p = NULL;
649 hlist_for_each_entry(sb, &type->fs_supers, s_instances) {
651 spin_unlock(&sb_lock);
653 down_read(&sb->s_umount);
654 if (sb->s_root && (sb->s_flags & SB_BORN))
656 up_read(&sb->s_umount);
665 spin_unlock(&sb_lock);
668 EXPORT_SYMBOL(iterate_supers_type);
670 static struct super_block *__get_super(struct block_device *bdev, bool excl)
672 struct super_block *sb;
679 list_for_each_entry(sb, &super_blocks, s_list) {
680 if (hlist_unhashed(&sb->s_instances))
682 if (sb->s_bdev == bdev) {
684 spin_unlock(&sb_lock);
686 down_read(&sb->s_umount);
688 down_write(&sb->s_umount);
690 if (sb->s_root && (sb->s_flags & SB_BORN))
693 up_read(&sb->s_umount);
695 up_write(&sb->s_umount);
696 /* nope, got unmounted */
702 spin_unlock(&sb_lock);
707 * get_super - get the superblock of a device
708 * @bdev: device to get the superblock for
710 * Scans the superblock list and finds the superblock of the file system
711 * mounted on the device given. %NULL is returned if no match is found.
713 struct super_block *get_super(struct block_device *bdev)
715 return __get_super(bdev, false);
717 EXPORT_SYMBOL(get_super);
719 static struct super_block *__get_super_thawed(struct block_device *bdev,
723 struct super_block *s = __get_super(bdev, excl);
724 if (!s || s->s_writers.frozen == SB_UNFROZEN)
727 up_read(&s->s_umount);
729 up_write(&s->s_umount);
730 wait_event(s->s_writers.wait_unfrozen,
731 s->s_writers.frozen == SB_UNFROZEN);
737 * get_super_thawed - get thawed superblock of a device
738 * @bdev: device to get the superblock for
740 * Scans the superblock list and finds the superblock of the file system
741 * mounted on the device. The superblock is returned once it is thawed
742 * (or immediately if it was not frozen). %NULL is returned if no match
745 struct super_block *get_super_thawed(struct block_device *bdev)
747 return __get_super_thawed(bdev, false);
749 EXPORT_SYMBOL(get_super_thawed);
752 * get_super_exclusive_thawed - get thawed superblock of a device
753 * @bdev: device to get the superblock for
755 * Scans the superblock list and finds the superblock of the file system
756 * mounted on the device. The superblock is returned once it is thawed
757 * (or immediately if it was not frozen) and s_umount semaphore is held
758 * in exclusive mode. %NULL is returned if no match is found.
760 struct super_block *get_super_exclusive_thawed(struct block_device *bdev)
762 return __get_super_thawed(bdev, true);
764 EXPORT_SYMBOL(get_super_exclusive_thawed);
767 * get_active_super - get an active reference to the superblock of a device
768 * @bdev: device to get the superblock for
770 * Scans the superblock list and finds the superblock of the file system
771 * mounted on the device given. Returns the superblock with an active
772 * reference or %NULL if none was found.
774 struct super_block *get_active_super(struct block_device *bdev)
776 struct super_block *sb;
783 list_for_each_entry(sb, &super_blocks, s_list) {
784 if (hlist_unhashed(&sb->s_instances))
786 if (sb->s_bdev == bdev) {
789 up_write(&sb->s_umount);
793 spin_unlock(&sb_lock);
797 struct super_block *user_get_super(dev_t dev)
799 struct super_block *sb;
803 list_for_each_entry(sb, &super_blocks, s_list) {
804 if (hlist_unhashed(&sb->s_instances))
806 if (sb->s_dev == dev) {
808 spin_unlock(&sb_lock);
809 down_read(&sb->s_umount);
811 if (sb->s_root && (sb->s_flags & SB_BORN))
813 up_read(&sb->s_umount);
814 /* nope, got unmounted */
820 spin_unlock(&sb_lock);
825 * do_remount_sb - asks filesystem to change mount options.
826 * @sb: superblock in question
827 * @sb_flags: revised superblock flags
828 * @data: the rest of options
829 * @force: whether or not to force the change
831 * Alters the mount options of a mounted file system.
833 int do_remount_sb(struct super_block *sb, int sb_flags, void *data, int force)
838 if (sb->s_writers.frozen != SB_UNFROZEN)
842 if (!(sb_flags & SB_RDONLY) && bdev_read_only(sb->s_bdev))
846 remount_ro = (sb_flags & SB_RDONLY) && !sb_rdonly(sb);
849 if (!hlist_empty(&sb->s_pins)) {
850 up_write(&sb->s_umount);
851 group_pin_kill(&sb->s_pins);
852 down_write(&sb->s_umount);
855 if (sb->s_writers.frozen != SB_UNFROZEN)
857 remount_ro = (sb_flags & SB_RDONLY) && !sb_rdonly(sb);
860 shrink_dcache_sb(sb);
862 /* If we are remounting RDONLY and current sb is read/write,
863 make sure there are no rw files opened */
866 sb->s_readonly_remount = 1;
869 retval = sb_prepare_remount_readonly(sb);
875 if (sb->s_op->remount_fs) {
876 retval = sb->s_op->remount_fs(sb, &sb_flags, data);
879 goto cancel_readonly;
880 /* If forced remount, go ahead despite any errors */
881 WARN(1, "forced remount of a %s fs returned %i\n",
882 sb->s_type->name, retval);
885 sb->s_flags = (sb->s_flags & ~MS_RMT_MASK) | (sb_flags & MS_RMT_MASK);
886 /* Needs to be ordered wrt mnt_is_readonly() */
888 sb->s_readonly_remount = 0;
891 * Some filesystems modify their metadata via some other path than the
892 * bdev buffer cache (eg. use a private mapping, or directories in
893 * pagecache, etc). Also file data modifications go via their own
894 * mappings. So If we try to mount readonly then copy the filesystem
895 * from bdev, we could get stale data, so invalidate it to give a best
896 * effort at coherency.
898 if (remount_ro && sb->s_bdev)
899 invalidate_bdev(sb->s_bdev);
903 sb->s_readonly_remount = 0;
907 static void do_emergency_remount_callback(struct super_block *sb)
909 down_write(&sb->s_umount);
910 if (sb->s_root && sb->s_bdev && (sb->s_flags & SB_BORN) &&
913 * What lock protects sb->s_flags??
915 do_remount_sb(sb, SB_RDONLY, NULL, 1);
917 up_write(&sb->s_umount);
920 static void do_emergency_remount(struct work_struct *work)
922 __iterate_supers(do_emergency_remount_callback);
924 printk("Emergency Remount complete\n");
927 void emergency_remount(void)
929 struct work_struct *work;
931 work = kmalloc(sizeof(*work), GFP_ATOMIC);
933 INIT_WORK(work, do_emergency_remount);
938 static void do_thaw_all_callback(struct super_block *sb)
940 down_write(&sb->s_umount);
941 if (sb->s_root && sb->s_flags & MS_BORN) {
942 emergency_thaw_bdev(sb);
943 thaw_super_locked(sb);
945 up_write(&sb->s_umount);
949 static void do_thaw_all(struct work_struct *work)
951 __iterate_supers(do_thaw_all_callback);
953 printk(KERN_WARNING "Emergency Thaw complete\n");
957 * emergency_thaw_all -- forcibly thaw every frozen filesystem
959 * Used for emergency unfreeze of all filesystems via SysRq
961 void emergency_thaw_all(void)
963 struct work_struct *work;
965 work = kmalloc(sizeof(*work), GFP_ATOMIC);
967 INIT_WORK(work, do_thaw_all);
973 * Unnamed block devices are dummy devices used by virtual
974 * filesystems which don't use real block-devices. -- jrs
977 static DEFINE_IDA(unnamed_dev_ida);
978 static DEFINE_SPINLOCK(unnamed_dev_lock);/* protects the above */
979 /* Many userspace utilities consider an FSID of 0 invalid.
980 * Always return at least 1 from get_anon_bdev.
982 static int unnamed_dev_start = 1;
984 int get_anon_bdev(dev_t *p)
990 if (ida_pre_get(&unnamed_dev_ida, GFP_ATOMIC) == 0)
992 spin_lock(&unnamed_dev_lock);
993 error = ida_get_new_above(&unnamed_dev_ida, unnamed_dev_start, &dev);
995 unnamed_dev_start = dev + 1;
996 spin_unlock(&unnamed_dev_lock);
997 if (error == -EAGAIN)
998 /* We raced and lost with another CPU. */
1003 if (dev >= (1 << MINORBITS)) {
1004 spin_lock(&unnamed_dev_lock);
1005 ida_remove(&unnamed_dev_ida, dev);
1006 if (unnamed_dev_start > dev)
1007 unnamed_dev_start = dev;
1008 spin_unlock(&unnamed_dev_lock);
1011 *p = MKDEV(0, dev & MINORMASK);
1014 EXPORT_SYMBOL(get_anon_bdev);
1016 void free_anon_bdev(dev_t dev)
1018 int slot = MINOR(dev);
1019 spin_lock(&unnamed_dev_lock);
1020 ida_remove(&unnamed_dev_ida, slot);
1021 if (slot < unnamed_dev_start)
1022 unnamed_dev_start = slot;
1023 spin_unlock(&unnamed_dev_lock);
1025 EXPORT_SYMBOL(free_anon_bdev);
1027 int set_anon_super(struct super_block *s, void *data)
1029 return get_anon_bdev(&s->s_dev);
1032 EXPORT_SYMBOL(set_anon_super);
1034 void kill_anon_super(struct super_block *sb)
1036 dev_t dev = sb->s_dev;
1037 generic_shutdown_super(sb);
1038 free_anon_bdev(dev);
1041 EXPORT_SYMBOL(kill_anon_super);
1043 void kill_litter_super(struct super_block *sb)
1046 d_genocide(sb->s_root);
1047 kill_anon_super(sb);
1050 EXPORT_SYMBOL(kill_litter_super);
1052 static int ns_test_super(struct super_block *sb, void *data)
1054 return sb->s_fs_info == data;
1057 static int ns_set_super(struct super_block *sb, void *data)
1059 sb->s_fs_info = data;
1060 return set_anon_super(sb, NULL);
1063 struct dentry *mount_ns(struct file_system_type *fs_type,
1064 int flags, void *data, void *ns, struct user_namespace *user_ns,
1065 int (*fill_super)(struct super_block *, void *, int))
1067 struct super_block *sb;
1069 /* Don't allow mounting unless the caller has CAP_SYS_ADMIN
1070 * over the namespace.
1072 if (!(flags & SB_KERNMOUNT) && !ns_capable(user_ns, CAP_SYS_ADMIN))
1073 return ERR_PTR(-EPERM);
1075 sb = sget_userns(fs_type, ns_test_super, ns_set_super, flags,
1078 return ERR_CAST(sb);
1082 err = fill_super(sb, data, flags & SB_SILENT ? 1 : 0);
1084 deactivate_locked_super(sb);
1085 return ERR_PTR(err);
1088 sb->s_flags |= SB_ACTIVE;
1091 return dget(sb->s_root);
1094 EXPORT_SYMBOL(mount_ns);
1097 static int set_bdev_super(struct super_block *s, void *data)
1100 s->s_dev = s->s_bdev->bd_dev;
1101 s->s_bdi = bdi_get(s->s_bdev->bd_bdi);
1106 static int test_bdev_super(struct super_block *s, void *data)
1108 return (void *)s->s_bdev == data;
1111 struct dentry *mount_bdev(struct file_system_type *fs_type,
1112 int flags, const char *dev_name, void *data,
1113 int (*fill_super)(struct super_block *, void *, int))
1115 struct block_device *bdev;
1116 struct super_block *s;
1117 fmode_t mode = FMODE_READ | FMODE_EXCL;
1120 if (!(flags & SB_RDONLY))
1121 mode |= FMODE_WRITE;
1123 bdev = blkdev_get_by_path(dev_name, mode, fs_type);
1125 return ERR_CAST(bdev);
1128 * once the super is inserted into the list by sget, s_umount
1129 * will protect the lockfs code from trying to start a snapshot
1130 * while we are mounting
1132 mutex_lock(&bdev->bd_fsfreeze_mutex);
1133 if (bdev->bd_fsfreeze_count > 0) {
1134 mutex_unlock(&bdev->bd_fsfreeze_mutex);
1138 s = sget(fs_type, test_bdev_super, set_bdev_super, flags | SB_NOSEC,
1140 mutex_unlock(&bdev->bd_fsfreeze_mutex);
1145 if ((flags ^ s->s_flags) & SB_RDONLY) {
1146 deactivate_locked_super(s);
1152 * s_umount nests inside bd_mutex during
1153 * __invalidate_device(). blkdev_put() acquires
1154 * bd_mutex and can't be called under s_umount. Drop
1155 * s_umount temporarily. This is safe as we're
1156 * holding an active reference.
1158 up_write(&s->s_umount);
1159 blkdev_put(bdev, mode);
1160 down_write(&s->s_umount);
1163 snprintf(s->s_id, sizeof(s->s_id), "%pg", bdev);
1164 sb_set_blocksize(s, block_size(bdev));
1165 error = fill_super(s, data, flags & SB_SILENT ? 1 : 0);
1167 deactivate_locked_super(s);
1171 s->s_flags |= SB_ACTIVE;
1175 return dget(s->s_root);
1180 blkdev_put(bdev, mode);
1182 return ERR_PTR(error);
1184 EXPORT_SYMBOL(mount_bdev);
1186 void kill_block_super(struct super_block *sb)
1188 struct block_device *bdev = sb->s_bdev;
1189 fmode_t mode = sb->s_mode;
1191 bdev->bd_super = NULL;
1192 generic_shutdown_super(sb);
1193 sync_blockdev(bdev);
1194 WARN_ON_ONCE(!(mode & FMODE_EXCL));
1195 blkdev_put(bdev, mode | FMODE_EXCL);
1198 EXPORT_SYMBOL(kill_block_super);
1201 struct dentry *mount_nodev(struct file_system_type *fs_type,
1202 int flags, void *data,
1203 int (*fill_super)(struct super_block *, void *, int))
1206 struct super_block *s = sget(fs_type, NULL, set_anon_super, flags, NULL);
1211 error = fill_super(s, data, flags & SB_SILENT ? 1 : 0);
1213 deactivate_locked_super(s);
1214 return ERR_PTR(error);
1216 s->s_flags |= SB_ACTIVE;
1217 return dget(s->s_root);
1219 EXPORT_SYMBOL(mount_nodev);
1221 static int compare_single(struct super_block *s, void *p)
1226 struct dentry *mount_single(struct file_system_type *fs_type,
1227 int flags, void *data,
1228 int (*fill_super)(struct super_block *, void *, int))
1230 struct super_block *s;
1233 s = sget(fs_type, compare_single, set_anon_super, flags, NULL);
1237 error = fill_super(s, data, flags & SB_SILENT ? 1 : 0);
1239 deactivate_locked_super(s);
1240 return ERR_PTR(error);
1242 s->s_flags |= SB_ACTIVE;
1244 do_remount_sb(s, flags, data, 0);
1246 return dget(s->s_root);
1248 EXPORT_SYMBOL(mount_single);
1251 mount_fs(struct file_system_type *type, int flags, const char *name, void *data)
1253 struct dentry *root;
1254 struct super_block *sb;
1255 char *secdata = NULL;
1256 int error = -ENOMEM;
1258 if (data && !(type->fs_flags & FS_BINARY_MOUNTDATA)) {
1259 secdata = alloc_secdata();
1263 error = security_sb_copy_data(data, secdata);
1265 goto out_free_secdata;
1268 root = type->mount(type, flags, name, data);
1270 error = PTR_ERR(root);
1271 goto out_free_secdata;
1275 WARN_ON(!sb->s_bdi);
1276 sb->s_flags |= SB_BORN;
1278 error = security_sb_kern_mount(sb, flags, secdata);
1283 * filesystems should never set s_maxbytes larger than MAX_LFS_FILESIZE
1284 * but s_maxbytes was an unsigned long long for many releases. Throw
1285 * this warning for a little while to try and catch filesystems that
1286 * violate this rule.
1288 WARN((sb->s_maxbytes < 0), "%s set sb->s_maxbytes to "
1289 "negative value (%lld)\n", type->name, sb->s_maxbytes);
1291 up_write(&sb->s_umount);
1292 free_secdata(secdata);
1296 deactivate_locked_super(sb);
1298 free_secdata(secdata);
1300 return ERR_PTR(error);
1304 * Setup private BDI for given superblock. It gets automatically cleaned up
1305 * in generic_shutdown_super().
1307 int super_setup_bdi_name(struct super_block *sb, char *fmt, ...)
1309 struct backing_dev_info *bdi;
1313 bdi = bdi_alloc(GFP_KERNEL);
1317 bdi->name = sb->s_type->name;
1319 va_start(args, fmt);
1320 err = bdi_register_va(bdi, fmt, args);
1326 WARN_ON(sb->s_bdi != &noop_backing_dev_info);
1331 EXPORT_SYMBOL(super_setup_bdi_name);
1334 * Setup private BDI for given superblock. I gets automatically cleaned up
1335 * in generic_shutdown_super().
1337 int super_setup_bdi(struct super_block *sb)
1339 static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0);
1341 return super_setup_bdi_name(sb, "%.28s-%ld", sb->s_type->name,
1342 atomic_long_inc_return(&bdi_seq));
1344 EXPORT_SYMBOL(super_setup_bdi);
1347 * This is an internal function, please use sb_end_{write,pagefault,intwrite}
1350 void __sb_end_write(struct super_block *sb, int level)
1352 percpu_up_read(sb->s_writers.rw_sem + level-1);
1354 EXPORT_SYMBOL(__sb_end_write);
1357 * This is an internal function, please use sb_start_{write,pagefault,intwrite}
1360 int __sb_start_write(struct super_block *sb, int level, bool wait)
1362 bool force_trylock = false;
1365 #ifdef CONFIG_LOCKDEP
1367 * We want lockdep to tell us about possible deadlocks with freezing
1368 * but it's it bit tricky to properly instrument it. Getting a freeze
1369 * protection works as getting a read lock but there are subtle
1370 * problems. XFS for example gets freeze protection on internal level
1371 * twice in some cases, which is OK only because we already hold a
1372 * freeze protection also on higher level. Due to these cases we have
1373 * to use wait == F (trylock mode) which must not fail.
1378 for (i = 0; i < level - 1; i++)
1379 if (percpu_rwsem_is_held(sb->s_writers.rw_sem + i)) {
1380 force_trylock = true;
1385 if (wait && !force_trylock)
1386 percpu_down_read(sb->s_writers.rw_sem + level-1);
1388 ret = percpu_down_read_trylock(sb->s_writers.rw_sem + level-1);
1390 WARN_ON(force_trylock && !ret);
1393 EXPORT_SYMBOL(__sb_start_write);
1396 * sb_wait_write - wait until all writers to given file system finish
1397 * @sb: the super for which we wait
1398 * @level: type of writers we wait for (normal vs page fault)
1400 * This function waits until there are no writers of given type to given file
1403 static void sb_wait_write(struct super_block *sb, int level)
1405 percpu_down_write(sb->s_writers.rw_sem + level-1);
1409 * We are going to return to userspace and forget about these locks, the
1410 * ownership goes to the caller of thaw_super() which does unlock().
1412 static void lockdep_sb_freeze_release(struct super_block *sb)
1416 for (level = SB_FREEZE_LEVELS - 1; level >= 0; level--)
1417 percpu_rwsem_release(sb->s_writers.rw_sem + level, 0, _THIS_IP_);
1421 * Tell lockdep we are holding these locks before we call ->unfreeze_fs(sb).
1423 static void lockdep_sb_freeze_acquire(struct super_block *sb)
1427 for (level = 0; level < SB_FREEZE_LEVELS; ++level)
1428 percpu_rwsem_acquire(sb->s_writers.rw_sem + level, 0, _THIS_IP_);
1431 static void sb_freeze_unlock(struct super_block *sb)
1435 for (level = SB_FREEZE_LEVELS - 1; level >= 0; level--)
1436 percpu_up_write(sb->s_writers.rw_sem + level);
1440 * freeze_super - lock the filesystem and force it into a consistent state
1441 * @sb: the super to lock
1443 * Syncs the super to make sure the filesystem is consistent and calls the fs's
1444 * freeze_fs. Subsequent calls to this without first thawing the fs will return
1447 * During this function, sb->s_writers.frozen goes through these values:
1449 * SB_UNFROZEN: File system is normal, all writes progress as usual.
1451 * SB_FREEZE_WRITE: The file system is in the process of being frozen. New
1452 * writes should be blocked, though page faults are still allowed. We wait for
1453 * all writes to complete and then proceed to the next stage.
1455 * SB_FREEZE_PAGEFAULT: Freezing continues. Now also page faults are blocked
1456 * but internal fs threads can still modify the filesystem (although they
1457 * should not dirty new pages or inodes), writeback can run etc. After waiting
1458 * for all running page faults we sync the filesystem which will clean all
1459 * dirty pages and inodes (no new dirty pages or inodes can be created when
1462 * SB_FREEZE_FS: The file system is frozen. Now all internal sources of fs
1463 * modification are blocked (e.g. XFS preallocation truncation on inode
1464 * reclaim). This is usually implemented by blocking new transactions for
1465 * filesystems that have them and need this additional guard. After all
1466 * internal writers are finished we call ->freeze_fs() to finish filesystem
1467 * freezing. Then we transition to SB_FREEZE_COMPLETE state. This state is
1468 * mostly auxiliary for filesystems to verify they do not modify frozen fs.
1470 * sb->s_writers.frozen is protected by sb->s_umount.
1472 int freeze_super(struct super_block *sb)
1476 atomic_inc(&sb->s_active);
1477 down_write(&sb->s_umount);
1478 if (sb->s_writers.frozen != SB_UNFROZEN) {
1479 deactivate_locked_super(sb);
1483 if (!(sb->s_flags & SB_BORN)) {
1484 up_write(&sb->s_umount);
1485 return 0; /* sic - it's "nothing to do" */
1488 if (sb_rdonly(sb)) {
1489 /* Nothing to do really... */
1490 sb->s_writers.frozen = SB_FREEZE_COMPLETE;
1491 up_write(&sb->s_umount);
1495 sb->s_writers.frozen = SB_FREEZE_WRITE;
1496 /* Release s_umount to preserve sb_start_write -> s_umount ordering */
1497 up_write(&sb->s_umount);
1498 sb_wait_write(sb, SB_FREEZE_WRITE);
1499 down_write(&sb->s_umount);
1501 /* Now we go and block page faults... */
1502 sb->s_writers.frozen = SB_FREEZE_PAGEFAULT;
1503 sb_wait_write(sb, SB_FREEZE_PAGEFAULT);
1505 /* All writers are done so after syncing there won't be dirty data */
1506 sync_filesystem(sb);
1508 /* Now wait for internal filesystem counter */
1509 sb->s_writers.frozen = SB_FREEZE_FS;
1510 sb_wait_write(sb, SB_FREEZE_FS);
1512 if (sb->s_op->freeze_fs) {
1513 ret = sb->s_op->freeze_fs(sb);
1516 "VFS:Filesystem freeze failed\n");
1517 sb->s_writers.frozen = SB_UNFROZEN;
1518 sb_freeze_unlock(sb);
1519 wake_up(&sb->s_writers.wait_unfrozen);
1520 deactivate_locked_super(sb);
1525 * For debugging purposes so that fs can warn if it sees write activity
1526 * when frozen is set to SB_FREEZE_COMPLETE, and for thaw_super().
1528 sb->s_writers.frozen = SB_FREEZE_COMPLETE;
1529 lockdep_sb_freeze_release(sb);
1530 up_write(&sb->s_umount);
1533 EXPORT_SYMBOL(freeze_super);
1536 * thaw_super -- unlock filesystem
1537 * @sb: the super to thaw
1539 * Unlocks the filesystem and marks it writeable again after freeze_super().
1541 static int thaw_super_locked(struct super_block *sb)
1545 if (sb->s_writers.frozen != SB_FREEZE_COMPLETE) {
1546 up_write(&sb->s_umount);
1550 if (sb_rdonly(sb)) {
1551 sb->s_writers.frozen = SB_UNFROZEN;
1555 lockdep_sb_freeze_acquire(sb);
1557 if (sb->s_op->unfreeze_fs) {
1558 error = sb->s_op->unfreeze_fs(sb);
1561 "VFS:Filesystem thaw failed\n");
1562 lockdep_sb_freeze_release(sb);
1563 up_write(&sb->s_umount);
1568 sb->s_writers.frozen = SB_UNFROZEN;
1569 sb_freeze_unlock(sb);
1571 wake_up(&sb->s_writers.wait_unfrozen);
1572 deactivate_locked_super(sb);
1576 int thaw_super(struct super_block *sb)
1578 down_write(&sb->s_umount);
1579 return thaw_super_locked(sb);
1581 EXPORT_SYMBOL(thaw_super);