1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 1991, 1992 Linus Torvalds
4 * Copyright (C) 2001 Andrea Arcangeli <andrea@suse.de> SuSE
5 * Copyright (C) 2016 - 2020 Christoph Hellwig
8 #include <linux/init.h>
10 #include <linux/slab.h>
11 #include <linux/kmod.h>
12 #include <linux/major.h>
13 #include <linux/device_cgroup.h>
14 #include <linux/blkdev.h>
15 #include <linux/blk-integrity.h>
16 #include <linux/backing-dev.h>
17 #include <linux/module.h>
18 #include <linux/blkpg.h>
19 #include <linux/magic.h>
20 #include <linux/buffer_head.h>
21 #include <linux/swap.h>
22 #include <linux/writeback.h>
23 #include <linux/mount.h>
24 #include <linux/pseudo_fs.h>
25 #include <linux/uio.h>
26 #include <linux/namei.h>
27 #include <linux/part_stat.h>
28 #include <linux/uaccess.h>
29 #include <linux/stat.h>
30 #include "../fs/internal.h"
34 struct block_device bdev;
35 struct inode vfs_inode;
38 static inline struct bdev_inode *BDEV_I(struct inode *inode)
40 return container_of(inode, struct bdev_inode, vfs_inode);
43 struct block_device *I_BDEV(struct inode *inode)
45 return &BDEV_I(inode)->bdev;
47 EXPORT_SYMBOL(I_BDEV);
49 static void bdev_write_inode(struct block_device *bdev)
51 struct inode *inode = bdev->bd_inode;
54 spin_lock(&inode->i_lock);
55 while (inode->i_state & I_DIRTY) {
56 spin_unlock(&inode->i_lock);
57 ret = write_inode_now(inode, true);
60 "VFS: Dirty inode writeback failed for block device %pg (err=%d).\n",
62 spin_lock(&inode->i_lock);
64 spin_unlock(&inode->i_lock);
67 /* Kill _all_ buffers and pagecache , dirty or not.. */
68 static void kill_bdev(struct block_device *bdev)
70 struct address_space *mapping = bdev->bd_inode->i_mapping;
72 if (mapping_empty(mapping))
76 truncate_inode_pages(mapping, 0);
79 /* Invalidate clean unused buffers and pagecache. */
80 void invalidate_bdev(struct block_device *bdev)
82 struct address_space *mapping = bdev->bd_inode->i_mapping;
84 if (mapping->nrpages) {
86 lru_add_drain_all(); /* make sure all lru add caches are flushed */
87 invalidate_mapping_pages(mapping, 0, -1);
90 EXPORT_SYMBOL(invalidate_bdev);
93 * Drop all buffers & page cache for given bdev range. This function bails
94 * with error if bdev has other exclusive owner (such as filesystem).
96 int truncate_bdev_range(struct block_device *bdev, blk_mode_t mode,
97 loff_t lstart, loff_t lend)
100 * If we don't hold exclusive handle for the device, upgrade to it
101 * while we discard the buffer cache to avoid discarding buffers
102 * under live filesystem.
104 if (!(mode & BLK_OPEN_EXCL)) {
105 int err = bd_prepare_to_claim(bdev, truncate_bdev_range, NULL);
110 truncate_inode_pages_range(bdev->bd_inode->i_mapping, lstart, lend);
111 if (!(mode & BLK_OPEN_EXCL))
112 bd_abort_claiming(bdev, truncate_bdev_range);
117 * Someone else has handle exclusively open. Try invalidating instead.
118 * The 'end' argument is inclusive so the rounding is safe.
120 return invalidate_inode_pages2_range(bdev->bd_inode->i_mapping,
121 lstart >> PAGE_SHIFT,
125 static void set_init_blocksize(struct block_device *bdev)
127 unsigned int bsize = bdev_logical_block_size(bdev);
128 loff_t size = i_size_read(bdev->bd_inode);
130 while (bsize < PAGE_SIZE) {
135 bdev->bd_inode->i_blkbits = blksize_bits(bsize);
138 int set_blocksize(struct block_device *bdev, int size)
140 /* Size must be a power of two, and between 512 and PAGE_SIZE */
141 if (size > PAGE_SIZE || size < 512 || !is_power_of_2(size))
144 /* Size cannot be smaller than the size supported by the device */
145 if (size < bdev_logical_block_size(bdev))
148 /* Don't change the size if it is same as current */
149 if (bdev->bd_inode->i_blkbits != blksize_bits(size)) {
151 bdev->bd_inode->i_blkbits = blksize_bits(size);
157 EXPORT_SYMBOL(set_blocksize);
159 int sb_set_blocksize(struct super_block *sb, int size)
161 if (set_blocksize(sb->s_bdev, size))
163 /* If we get here, we know size is power of two
164 * and it's value is between 512 and PAGE_SIZE */
165 sb->s_blocksize = size;
166 sb->s_blocksize_bits = blksize_bits(size);
167 return sb->s_blocksize;
170 EXPORT_SYMBOL(sb_set_blocksize);
172 int sb_min_blocksize(struct super_block *sb, int size)
174 int minsize = bdev_logical_block_size(sb->s_bdev);
177 return sb_set_blocksize(sb, size);
180 EXPORT_SYMBOL(sb_min_blocksize);
182 int sync_blockdev_nowait(struct block_device *bdev)
186 return filemap_flush(bdev->bd_inode->i_mapping);
188 EXPORT_SYMBOL_GPL(sync_blockdev_nowait);
191 * Write out and wait upon all the dirty data associated with a block
192 * device via its mapping. Does not take the superblock lock.
194 int sync_blockdev(struct block_device *bdev)
198 return filemap_write_and_wait(bdev->bd_inode->i_mapping);
200 EXPORT_SYMBOL(sync_blockdev);
202 int sync_blockdev_range(struct block_device *bdev, loff_t lstart, loff_t lend)
204 return filemap_write_and_wait_range(bdev->bd_inode->i_mapping,
207 EXPORT_SYMBOL(sync_blockdev_range);
210 * freeze_bdev - lock a filesystem and force it into a consistent state
211 * @bdev: blockdevice to lock
213 * If a superblock is found on this device, we take the s_umount semaphore
214 * on it to make sure nobody unmounts until the snapshot creation is done.
215 * The reference counter (bd_fsfreeze_count) guarantees that only the last
216 * unfreeze process can unfreeze the frozen filesystem actually when multiple
217 * freeze requests arrive simultaneously. It counts up in freeze_bdev() and
218 * count down in thaw_bdev(). When it becomes 0, thaw_bdev() will unfreeze
221 int freeze_bdev(struct block_device *bdev)
223 struct super_block *sb;
226 mutex_lock(&bdev->bd_fsfreeze_mutex);
227 if (++bdev->bd_fsfreeze_count > 1)
230 sb = get_active_super(bdev);
233 if (sb->s_op->freeze_super)
234 error = sb->s_op->freeze_super(sb, FREEZE_HOLDER_USERSPACE);
236 error = freeze_super(sb, FREEZE_HOLDER_USERSPACE);
237 deactivate_super(sb);
240 bdev->bd_fsfreeze_count--;
243 bdev->bd_fsfreeze_sb = sb;
248 mutex_unlock(&bdev->bd_fsfreeze_mutex);
251 EXPORT_SYMBOL(freeze_bdev);
254 * thaw_bdev - unlock filesystem
255 * @bdev: blockdevice to unlock
257 * Unlocks the filesystem and marks it writeable again after freeze_bdev().
259 int thaw_bdev(struct block_device *bdev)
261 struct super_block *sb;
264 mutex_lock(&bdev->bd_fsfreeze_mutex);
265 if (!bdev->bd_fsfreeze_count)
269 if (--bdev->bd_fsfreeze_count > 0)
272 sb = bdev->bd_fsfreeze_sb;
276 if (sb->s_op->thaw_super)
277 error = sb->s_op->thaw_super(sb, FREEZE_HOLDER_USERSPACE);
279 error = thaw_super(sb, FREEZE_HOLDER_USERSPACE);
281 bdev->bd_fsfreeze_count++;
283 bdev->bd_fsfreeze_sb = NULL;
285 mutex_unlock(&bdev->bd_fsfreeze_mutex);
288 EXPORT_SYMBOL(thaw_bdev);
294 static __cacheline_aligned_in_smp DEFINE_MUTEX(bdev_lock);
295 static struct kmem_cache * bdev_cachep __read_mostly;
297 static struct inode *bdev_alloc_inode(struct super_block *sb)
299 struct bdev_inode *ei = alloc_inode_sb(sb, bdev_cachep, GFP_KERNEL);
303 memset(&ei->bdev, 0, sizeof(ei->bdev));
304 return &ei->vfs_inode;
307 static void bdev_free_inode(struct inode *inode)
309 struct block_device *bdev = I_BDEV(inode);
311 free_percpu(bdev->bd_stats);
312 kfree(bdev->bd_meta_info);
314 if (!bdev_is_partition(bdev)) {
315 if (bdev->bd_disk && bdev->bd_disk->bdi)
316 bdi_put(bdev->bd_disk->bdi);
317 kfree(bdev->bd_disk);
320 if (MAJOR(bdev->bd_dev) == BLOCK_EXT_MAJOR)
321 blk_free_ext_minor(MINOR(bdev->bd_dev));
323 kmem_cache_free(bdev_cachep, BDEV_I(inode));
326 static void init_once(void *data)
328 struct bdev_inode *ei = data;
330 inode_init_once(&ei->vfs_inode);
333 static void bdev_evict_inode(struct inode *inode)
335 truncate_inode_pages_final(&inode->i_data);
336 invalidate_inode_buffers(inode); /* is it needed here? */
340 static const struct super_operations bdev_sops = {
341 .statfs = simple_statfs,
342 .alloc_inode = bdev_alloc_inode,
343 .free_inode = bdev_free_inode,
344 .drop_inode = generic_delete_inode,
345 .evict_inode = bdev_evict_inode,
348 static int bd_init_fs_context(struct fs_context *fc)
350 struct pseudo_fs_context *ctx = init_pseudo(fc, BDEVFS_MAGIC);
353 fc->s_iflags |= SB_I_CGROUPWB;
354 ctx->ops = &bdev_sops;
358 static struct file_system_type bd_type = {
360 .init_fs_context = bd_init_fs_context,
361 .kill_sb = kill_anon_super,
364 struct super_block *blockdev_superblock __read_mostly;
365 EXPORT_SYMBOL_GPL(blockdev_superblock);
367 void __init bdev_cache_init(void)
370 static struct vfsmount *bd_mnt;
372 bdev_cachep = kmem_cache_create("bdev_cache", sizeof(struct bdev_inode),
373 0, (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT|
374 SLAB_MEM_SPREAD|SLAB_ACCOUNT|SLAB_PANIC),
376 err = register_filesystem(&bd_type);
378 panic("Cannot register bdev pseudo-fs");
379 bd_mnt = kern_mount(&bd_type);
381 panic("Cannot create bdev pseudo-fs");
382 blockdev_superblock = bd_mnt->mnt_sb; /* For writeback */
385 struct block_device *bdev_alloc(struct gendisk *disk, u8 partno)
387 struct block_device *bdev;
390 inode = new_inode(blockdev_superblock);
393 inode->i_mode = S_IFBLK;
395 inode->i_data.a_ops = &def_blk_aops;
396 mapping_set_gfp_mask(&inode->i_data, GFP_USER);
398 bdev = I_BDEV(inode);
399 mutex_init(&bdev->bd_fsfreeze_mutex);
400 spin_lock_init(&bdev->bd_size_lock);
401 mutex_init(&bdev->bd_holder_lock);
402 bdev->bd_partno = partno;
403 bdev->bd_inode = inode;
404 bdev->bd_queue = disk->queue;
406 bdev->bd_has_submit_bio = disk->part0->bd_has_submit_bio;
408 bdev->bd_has_submit_bio = false;
409 bdev->bd_stats = alloc_percpu(struct disk_stats);
410 if (!bdev->bd_stats) {
414 bdev->bd_disk = disk;
418 void bdev_set_nr_sectors(struct block_device *bdev, sector_t sectors)
420 spin_lock(&bdev->bd_size_lock);
421 i_size_write(bdev->bd_inode, (loff_t)sectors << SECTOR_SHIFT);
422 bdev->bd_nr_sectors = sectors;
423 spin_unlock(&bdev->bd_size_lock);
426 void bdev_add(struct block_device *bdev, dev_t dev)
428 if (bdev_stable_writes(bdev))
429 mapping_set_stable_writes(bdev->bd_inode->i_mapping);
431 bdev->bd_inode->i_rdev = dev;
432 bdev->bd_inode->i_ino = dev;
433 insert_inode_hash(bdev->bd_inode);
436 long nr_blockdev_pages(void)
441 spin_lock(&blockdev_superblock->s_inode_list_lock);
442 list_for_each_entry(inode, &blockdev_superblock->s_inodes, i_sb_list)
443 ret += inode->i_mapping->nrpages;
444 spin_unlock(&blockdev_superblock->s_inode_list_lock);
450 * bd_may_claim - test whether a block device can be claimed
451 * @bdev: block device of interest
452 * @holder: holder trying to claim @bdev
455 * Test whether @bdev can be claimed by @holder.
458 * %true if @bdev can be claimed, %false otherwise.
460 static bool bd_may_claim(struct block_device *bdev, void *holder,
461 const struct blk_holder_ops *hops)
463 struct block_device *whole = bdev_whole(bdev);
465 lockdep_assert_held(&bdev_lock);
467 if (bdev->bd_holder) {
469 * The same holder can always re-claim.
471 if (bdev->bd_holder == holder) {
472 if (WARN_ON_ONCE(bdev->bd_holder_ops != hops))
480 * If the whole devices holder is set to bd_may_claim, a partition on
481 * the device is claimed, but not the whole device.
484 whole->bd_holder && whole->bd_holder != bd_may_claim)
490 * bd_prepare_to_claim - claim a block device
491 * @bdev: block device of interest
492 * @holder: holder trying to claim @bdev
495 * Claim @bdev. This function fails if @bdev is already claimed by another
496 * holder and waits if another claiming is in progress. return, the caller
497 * has ownership of bd_claiming and bd_holder[s].
500 * 0 if @bdev can be claimed, -EBUSY otherwise.
502 int bd_prepare_to_claim(struct block_device *bdev, void *holder,
503 const struct blk_holder_ops *hops)
505 struct block_device *whole = bdev_whole(bdev);
507 if (WARN_ON_ONCE(!holder))
510 mutex_lock(&bdev_lock);
511 /* if someone else claimed, fail */
512 if (!bd_may_claim(bdev, holder, hops)) {
513 mutex_unlock(&bdev_lock);
517 /* if claiming is already in progress, wait for it to finish */
518 if (whole->bd_claiming) {
519 wait_queue_head_t *wq = bit_waitqueue(&whole->bd_claiming, 0);
522 prepare_to_wait(wq, &wait, TASK_UNINTERRUPTIBLE);
523 mutex_unlock(&bdev_lock);
525 finish_wait(wq, &wait);
530 whole->bd_claiming = holder;
531 mutex_unlock(&bdev_lock);
534 EXPORT_SYMBOL_GPL(bd_prepare_to_claim); /* only for the loop driver */
536 static void bd_clear_claiming(struct block_device *whole, void *holder)
538 lockdep_assert_held(&bdev_lock);
539 /* tell others that we're done */
540 BUG_ON(whole->bd_claiming != holder);
541 whole->bd_claiming = NULL;
542 wake_up_bit(&whole->bd_claiming, 0);
546 * bd_finish_claiming - finish claiming of a block device
547 * @bdev: block device of interest
548 * @holder: holder that has claimed @bdev
549 * @hops: block device holder operations
551 * Finish exclusive open of a block device. Mark the device as exlusively
552 * open by the holder and wake up all waiters for exclusive open to finish.
554 static void bd_finish_claiming(struct block_device *bdev, void *holder,
555 const struct blk_holder_ops *hops)
557 struct block_device *whole = bdev_whole(bdev);
559 mutex_lock(&bdev_lock);
560 BUG_ON(!bd_may_claim(bdev, holder, hops));
562 * Note that for a whole device bd_holders will be incremented twice,
563 * and bd_holder will be set to bd_may_claim before being set to holder
566 whole->bd_holder = bd_may_claim;
568 mutex_lock(&bdev->bd_holder_lock);
569 bdev->bd_holder = holder;
570 bdev->bd_holder_ops = hops;
571 mutex_unlock(&bdev->bd_holder_lock);
572 bd_clear_claiming(whole, holder);
573 mutex_unlock(&bdev_lock);
577 * bd_abort_claiming - abort claiming of a block device
578 * @bdev: block device of interest
579 * @holder: holder that has claimed @bdev
581 * Abort claiming of a block device when the exclusive open failed. This can be
582 * also used when exclusive open is not actually desired and we just needed
583 * to block other exclusive openers for a while.
585 void bd_abort_claiming(struct block_device *bdev, void *holder)
587 mutex_lock(&bdev_lock);
588 bd_clear_claiming(bdev_whole(bdev), holder);
589 mutex_unlock(&bdev_lock);
591 EXPORT_SYMBOL(bd_abort_claiming);
593 static void bd_end_claim(struct block_device *bdev, void *holder)
595 struct block_device *whole = bdev_whole(bdev);
596 bool unblock = false;
599 * Release a claim on the device. The holder fields are protected with
600 * bdev_lock. open_mutex is used to synchronize disk_holder unlinking.
602 mutex_lock(&bdev_lock);
603 WARN_ON_ONCE(bdev->bd_holder != holder);
604 WARN_ON_ONCE(--bdev->bd_holders < 0);
605 WARN_ON_ONCE(--whole->bd_holders < 0);
606 if (!bdev->bd_holders) {
607 mutex_lock(&bdev->bd_holder_lock);
608 bdev->bd_holder = NULL;
609 bdev->bd_holder_ops = NULL;
610 mutex_unlock(&bdev->bd_holder_lock);
611 if (bdev->bd_write_holder)
614 if (!whole->bd_holders)
615 whole->bd_holder = NULL;
616 mutex_unlock(&bdev_lock);
619 * If this was the last claim, remove holder link and unblock evpoll if
620 * it was a write holder.
623 disk_unblock_events(bdev->bd_disk);
624 bdev->bd_write_holder = false;
628 static void blkdev_flush_mapping(struct block_device *bdev)
630 WARN_ON_ONCE(bdev->bd_holders);
633 bdev_write_inode(bdev);
636 static int blkdev_get_whole(struct block_device *bdev, blk_mode_t mode)
638 struct gendisk *disk = bdev->bd_disk;
641 if (disk->fops->open) {
642 ret = disk->fops->open(disk, mode);
644 /* avoid ghost partitions on a removed medium */
645 if (ret == -ENOMEDIUM &&
646 test_bit(GD_NEED_PART_SCAN, &disk->state))
647 bdev_disk_changed(disk, true);
652 if (!atomic_read(&bdev->bd_openers))
653 set_init_blocksize(bdev);
654 if (test_bit(GD_NEED_PART_SCAN, &disk->state))
655 bdev_disk_changed(disk, false);
656 atomic_inc(&bdev->bd_openers);
660 static void blkdev_put_whole(struct block_device *bdev)
662 if (atomic_dec_and_test(&bdev->bd_openers))
663 blkdev_flush_mapping(bdev);
664 if (bdev->bd_disk->fops->release)
665 bdev->bd_disk->fops->release(bdev->bd_disk);
668 static int blkdev_get_part(struct block_device *part, blk_mode_t mode)
670 struct gendisk *disk = part->bd_disk;
673 ret = blkdev_get_whole(bdev_whole(part), mode);
678 if (!bdev_nr_sectors(part))
681 if (!atomic_read(&part->bd_openers)) {
682 disk->open_partitions++;
683 set_init_blocksize(part);
685 atomic_inc(&part->bd_openers);
689 blkdev_put_whole(bdev_whole(part));
693 static void blkdev_put_part(struct block_device *part)
695 struct block_device *whole = bdev_whole(part);
697 if (atomic_dec_and_test(&part->bd_openers)) {
698 blkdev_flush_mapping(part);
699 whole->bd_disk->open_partitions--;
701 blkdev_put_whole(whole);
704 struct block_device *blkdev_get_no_open(dev_t dev)
706 struct block_device *bdev;
709 inode = ilookup(blockdev_superblock, dev);
710 if (!inode && IS_ENABLED(CONFIG_BLOCK_LEGACY_AUTOLOAD)) {
711 blk_request_module(dev);
712 inode = ilookup(blockdev_superblock, dev);
715 "block device autoloading is deprecated and will be removed.\n");
720 /* switch from the inode reference to a device mode one: */
721 bdev = &BDEV_I(inode)->bdev;
722 if (!kobject_get_unless_zero(&bdev->bd_device.kobj))
728 void blkdev_put_no_open(struct block_device *bdev)
730 put_device(&bdev->bd_device);
734 * blkdev_get_by_dev - open a block device by device number
735 * @dev: device number of block device to open
736 * @mode: open mode (BLK_OPEN_*)
737 * @holder: exclusive holder identifier
738 * @hops: holder operations
740 * Open the block device described by device number @dev. If @holder is not
741 * %NULL, the block device is opened with exclusive access. Exclusive opens may
742 * nest for the same @holder.
744 * Use this interface ONLY if you really do not have anything better - i.e. when
745 * you are behind a truly sucky interface and all you are given is a device
746 * number. Everything else should use blkdev_get_by_path().
752 * Reference to the block_device on success, ERR_PTR(-errno) on failure.
754 struct block_device *blkdev_get_by_dev(dev_t dev, blk_mode_t mode, void *holder,
755 const struct blk_holder_ops *hops)
757 bool unblock_events = true;
758 struct block_device *bdev;
759 struct gendisk *disk;
762 ret = devcgroup_check_permission(DEVCG_DEV_BLOCK,
763 MAJOR(dev), MINOR(dev),
764 ((mode & BLK_OPEN_READ) ? DEVCG_ACC_READ : 0) |
765 ((mode & BLK_OPEN_WRITE) ? DEVCG_ACC_WRITE : 0));
769 bdev = blkdev_get_no_open(dev);
771 return ERR_PTR(-ENXIO);
772 disk = bdev->bd_disk;
775 mode |= BLK_OPEN_EXCL;
776 ret = bd_prepare_to_claim(bdev, holder, hops);
780 if (WARN_ON_ONCE(mode & BLK_OPEN_EXCL)) {
786 disk_block_events(disk);
788 mutex_lock(&disk->open_mutex);
790 if (!disk_live(disk))
792 if (!try_module_get(disk->fops->owner))
794 if (bdev_is_partition(bdev))
795 ret = blkdev_get_part(bdev, mode);
797 ret = blkdev_get_whole(bdev, mode);
801 bd_finish_claiming(bdev, holder, hops);
804 * Block event polling for write claims if requested. Any write
805 * holder makes the write_holder state stick until all are
806 * released. This is good enough and tracking individual
807 * writeable reference is too fragile given the way @mode is
808 * used in blkdev_get/put().
810 if ((mode & BLK_OPEN_WRITE) && !bdev->bd_write_holder &&
811 (disk->event_flags & DISK_EVENT_FLAG_BLOCK_ON_EXCL_WRITE)) {
812 bdev->bd_write_holder = true;
813 unblock_events = false;
816 mutex_unlock(&disk->open_mutex);
819 disk_unblock_events(disk);
822 module_put(disk->fops->owner);
825 bd_abort_claiming(bdev, holder);
826 mutex_unlock(&disk->open_mutex);
827 disk_unblock_events(disk);
829 blkdev_put_no_open(bdev);
832 EXPORT_SYMBOL(blkdev_get_by_dev);
835 * blkdev_get_by_path - open a block device by name
836 * @path: path to the block device to open
837 * @mode: open mode (BLK_OPEN_*)
838 * @holder: exclusive holder identifier
839 * @hops: holder operations
841 * Open the block device described by the device file at @path. If @holder is
842 * not %NULL, the block device is opened with exclusive access. Exclusive opens
843 * may nest for the same @holder.
849 * Reference to the block_device on success, ERR_PTR(-errno) on failure.
851 struct block_device *blkdev_get_by_path(const char *path, blk_mode_t mode,
852 void *holder, const struct blk_holder_ops *hops)
854 struct block_device *bdev;
858 error = lookup_bdev(path, &dev);
860 return ERR_PTR(error);
862 bdev = blkdev_get_by_dev(dev, mode, holder, hops);
863 if (!IS_ERR(bdev) && (mode & BLK_OPEN_WRITE) && bdev_read_only(bdev)) {
864 blkdev_put(bdev, holder);
865 return ERR_PTR(-EACCES);
870 EXPORT_SYMBOL(blkdev_get_by_path);
872 void blkdev_put(struct block_device *bdev, void *holder)
874 struct gendisk *disk = bdev->bd_disk;
877 * Sync early if it looks like we're the last one. If someone else
878 * opens the block device between now and the decrement of bd_openers
879 * then we did a sync that we didn't need to, but that's not the end
880 * of the world and we want to avoid long (could be several minute)
881 * syncs while holding the mutex.
883 if (atomic_read(&bdev->bd_openers) == 1)
886 mutex_lock(&disk->open_mutex);
888 bd_end_claim(bdev, holder);
891 * Trigger event checking and tell drivers to flush MEDIA_CHANGE
892 * event. This is to ensure detection of media removal commanded
893 * from userland - e.g. eject(1).
895 disk_flush_events(disk, DISK_EVENT_MEDIA_CHANGE);
897 if (bdev_is_partition(bdev))
898 blkdev_put_part(bdev);
900 blkdev_put_whole(bdev);
901 mutex_unlock(&disk->open_mutex);
903 module_put(disk->fops->owner);
904 blkdev_put_no_open(bdev);
906 EXPORT_SYMBOL(blkdev_put);
909 * lookup_bdev() - Look up a struct block_device by name.
910 * @pathname: Name of the block device in the filesystem.
911 * @dev: Pointer to the block device's dev_t, if found.
913 * Lookup the block device's dev_t at @pathname in the current
914 * namespace if possible and return it in @dev.
916 * Context: May sleep.
917 * Return: 0 if succeeded, negative errno otherwise.
919 int lookup_bdev(const char *pathname, dev_t *dev)
925 if (!pathname || !*pathname)
928 error = kern_path(pathname, LOOKUP_FOLLOW, &path);
932 inode = d_backing_inode(path.dentry);
934 if (!S_ISBLK(inode->i_mode))
937 if (!may_open_dev(&path))
940 *dev = inode->i_rdev;
946 EXPORT_SYMBOL(lookup_bdev);
949 * bdev_mark_dead - mark a block device as dead
950 * @bdev: block device to operate on
951 * @surprise: indicate a surprise removal
953 * Tell the file system that this devices or media is dead. If @surprise is set
954 * to %true the device or media is already gone, if not we are preparing for an
957 * This calls into the file system, which then typicall syncs out all dirty data
958 * and writes back inodes and then invalidates any cached data in the inodes on
959 * the file system. In addition we also invalidate the block device mapping.
961 void bdev_mark_dead(struct block_device *bdev, bool surprise)
963 mutex_lock(&bdev->bd_holder_lock);
964 if (bdev->bd_holder_ops && bdev->bd_holder_ops->mark_dead)
965 bdev->bd_holder_ops->mark_dead(bdev, surprise);
968 mutex_unlock(&bdev->bd_holder_lock);
970 invalidate_bdev(bdev);
972 #ifdef CONFIG_DASD_MODULE
974 * Drivers should not use this directly, but the DASD driver has historically
975 * had a shutdown to offline mode that doesn't actually remove the gendisk
976 * that otherwise looks a lot like a safe device removal.
978 EXPORT_SYMBOL_GPL(bdev_mark_dead);
981 void sync_bdevs(bool wait)
983 struct inode *inode, *old_inode = NULL;
985 spin_lock(&blockdev_superblock->s_inode_list_lock);
986 list_for_each_entry(inode, &blockdev_superblock->s_inodes, i_sb_list) {
987 struct address_space *mapping = inode->i_mapping;
988 struct block_device *bdev;
990 spin_lock(&inode->i_lock);
991 if (inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW) ||
992 mapping->nrpages == 0) {
993 spin_unlock(&inode->i_lock);
997 spin_unlock(&inode->i_lock);
998 spin_unlock(&blockdev_superblock->s_inode_list_lock);
1000 * We hold a reference to 'inode' so it couldn't have been
1001 * removed from s_inodes list while we dropped the
1002 * s_inode_list_lock We cannot iput the inode now as we can
1003 * be holding the last reference and we cannot iput it under
1004 * s_inode_list_lock. So we keep the reference and iput it
1009 bdev = I_BDEV(inode);
1011 mutex_lock(&bdev->bd_disk->open_mutex);
1012 if (!atomic_read(&bdev->bd_openers)) {
1016 * We keep the error status of individual mapping so
1017 * that applications can catch the writeback error using
1018 * fsync(2). See filemap_fdatawait_keep_errors() for
1021 filemap_fdatawait_keep_errors(inode->i_mapping);
1023 filemap_fdatawrite(inode->i_mapping);
1025 mutex_unlock(&bdev->bd_disk->open_mutex);
1027 spin_lock(&blockdev_superblock->s_inode_list_lock);
1029 spin_unlock(&blockdev_superblock->s_inode_list_lock);
1034 * Handle STATX_DIOALIGN for block devices.
1036 * Note that the inode passed to this is the inode of a block device node file,
1037 * not the block device's internal inode. Therefore it is *not* valid to use
1038 * I_BDEV() here; the block device has to be looked up by i_rdev instead.
1040 void bdev_statx_dioalign(struct inode *inode, struct kstat *stat)
1042 struct block_device *bdev;
1044 bdev = blkdev_get_no_open(inode->i_rdev);
1048 stat->dio_mem_align = bdev_dma_alignment(bdev) + 1;
1049 stat->dio_offset_align = bdev_logical_block_size(bdev);
1050 stat->result_mask |= STATX_DIOALIGN;
1052 blkdev_put_no_open(bdev);