1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 1991, 1992 Linus Torvalds
4 * Copyright (C) 2001 Andrea Arcangeli <andrea@suse.de> SuSE
5 * Copyright (C) 2016 - 2020 Christoph Hellwig
8 #include <linux/init.h>
10 #include <linux/slab.h>
11 #include <linux/kmod.h>
12 #include <linux/major.h>
13 #include <linux/device_cgroup.h>
14 #include <linux/blkdev.h>
15 #include <linux/blk-integrity.h>
16 #include <linux/backing-dev.h>
17 #include <linux/module.h>
18 #include <linux/blkpg.h>
19 #include <linux/magic.h>
20 #include <linux/buffer_head.h>
21 #include <linux/swap.h>
22 #include <linux/writeback.h>
23 #include <linux/mount.h>
24 #include <linux/pseudo_fs.h>
25 #include <linux/uio.h>
26 #include <linux/namei.h>
27 #include <linux/part_stat.h>
28 #include <linux/uaccess.h>
29 #include <linux/stat.h>
30 #include "../fs/internal.h"
34 struct block_device bdev;
35 struct inode vfs_inode;
38 static inline struct bdev_inode *BDEV_I(struct inode *inode)
40 return container_of(inode, struct bdev_inode, vfs_inode);
43 struct block_device *I_BDEV(struct inode *inode)
45 return &BDEV_I(inode)->bdev;
47 EXPORT_SYMBOL(I_BDEV);
49 static void bdev_write_inode(struct block_device *bdev)
51 struct inode *inode = bdev->bd_inode;
54 spin_lock(&inode->i_lock);
55 while (inode->i_state & I_DIRTY) {
56 spin_unlock(&inode->i_lock);
57 ret = write_inode_now(inode, true);
60 "VFS: Dirty inode writeback failed for block device %pg (err=%d).\n",
62 spin_lock(&inode->i_lock);
64 spin_unlock(&inode->i_lock);
67 /* Kill _all_ buffers and pagecache , dirty or not.. */
68 static void kill_bdev(struct block_device *bdev)
70 struct address_space *mapping = bdev->bd_inode->i_mapping;
72 if (mapping_empty(mapping))
76 truncate_inode_pages(mapping, 0);
79 /* Invalidate clean unused buffers and pagecache. */
80 void invalidate_bdev(struct block_device *bdev)
82 struct address_space *mapping = bdev->bd_inode->i_mapping;
84 if (mapping->nrpages) {
86 lru_add_drain_all(); /* make sure all lru add caches are flushed */
87 invalidate_mapping_pages(mapping, 0, -1);
90 EXPORT_SYMBOL(invalidate_bdev);
93 * Drop all buffers & page cache for given bdev range. This function bails
94 * with error if bdev has other exclusive owner (such as filesystem).
96 int truncate_bdev_range(struct block_device *bdev, fmode_t mode,
97 loff_t lstart, loff_t lend)
100 * If we don't hold exclusive handle for the device, upgrade to it
101 * while we discard the buffer cache to avoid discarding buffers
102 * under live filesystem.
104 if (!(mode & FMODE_EXCL)) {
105 int err = bd_prepare_to_claim(bdev, truncate_bdev_range);
110 truncate_inode_pages_range(bdev->bd_inode->i_mapping, lstart, lend);
111 if (!(mode & FMODE_EXCL))
112 bd_abort_claiming(bdev, truncate_bdev_range);
117 * Someone else has handle exclusively open. Try invalidating instead.
118 * The 'end' argument is inclusive so the rounding is safe.
120 return invalidate_inode_pages2_range(bdev->bd_inode->i_mapping,
121 lstart >> PAGE_SHIFT,
125 static void set_init_blocksize(struct block_device *bdev)
127 unsigned int bsize = bdev_logical_block_size(bdev);
128 loff_t size = i_size_read(bdev->bd_inode);
130 while (bsize < PAGE_SIZE) {
135 bdev->bd_inode->i_blkbits = blksize_bits(bsize);
138 int set_blocksize(struct block_device *bdev, int size)
140 /* Size must be a power of two, and between 512 and PAGE_SIZE */
141 if (size > PAGE_SIZE || size < 512 || !is_power_of_2(size))
144 /* Size cannot be smaller than the size supported by the device */
145 if (size < bdev_logical_block_size(bdev))
148 /* Don't change the size if it is same as current */
149 if (bdev->bd_inode->i_blkbits != blksize_bits(size)) {
151 bdev->bd_inode->i_blkbits = blksize_bits(size);
157 EXPORT_SYMBOL(set_blocksize);
159 int sb_set_blocksize(struct super_block *sb, int size)
161 if (set_blocksize(sb->s_bdev, size))
163 /* If we get here, we know size is power of two
164 * and it's value is between 512 and PAGE_SIZE */
165 sb->s_blocksize = size;
166 sb->s_blocksize_bits = blksize_bits(size);
167 return sb->s_blocksize;
170 EXPORT_SYMBOL(sb_set_blocksize);
172 int sb_min_blocksize(struct super_block *sb, int size)
174 int minsize = bdev_logical_block_size(sb->s_bdev);
177 return sb_set_blocksize(sb, size);
180 EXPORT_SYMBOL(sb_min_blocksize);
182 int sync_blockdev_nowait(struct block_device *bdev)
186 return filemap_flush(bdev->bd_inode->i_mapping);
188 EXPORT_SYMBOL_GPL(sync_blockdev_nowait);
191 * Write out and wait upon all the dirty data associated with a block
192 * device via its mapping. Does not take the superblock lock.
194 int sync_blockdev(struct block_device *bdev)
198 return filemap_write_and_wait(bdev->bd_inode->i_mapping);
200 EXPORT_SYMBOL(sync_blockdev);
202 int sync_blockdev_range(struct block_device *bdev, loff_t lstart, loff_t lend)
204 return filemap_write_and_wait_range(bdev->bd_inode->i_mapping,
207 EXPORT_SYMBOL(sync_blockdev_range);
210 * Write out and wait upon all dirty data associated with this
211 * device. Filesystem data as well as the underlying block
212 * device. Takes the superblock lock.
214 int fsync_bdev(struct block_device *bdev)
216 struct super_block *sb = get_super(bdev);
218 int res = sync_filesystem(sb);
222 return sync_blockdev(bdev);
224 EXPORT_SYMBOL(fsync_bdev);
227 * freeze_bdev - lock a filesystem and force it into a consistent state
228 * @bdev: blockdevice to lock
230 * If a superblock is found on this device, we take the s_umount semaphore
231 * on it to make sure nobody unmounts until the snapshot creation is done.
232 * The reference counter (bd_fsfreeze_count) guarantees that only the last
233 * unfreeze process can unfreeze the frozen filesystem actually when multiple
234 * freeze requests arrive simultaneously. It counts up in freeze_bdev() and
235 * count down in thaw_bdev(). When it becomes 0, thaw_bdev() will unfreeze
238 int freeze_bdev(struct block_device *bdev)
240 struct super_block *sb;
243 mutex_lock(&bdev->bd_fsfreeze_mutex);
244 if (++bdev->bd_fsfreeze_count > 1)
247 sb = get_active_super(bdev);
250 if (sb->s_op->freeze_super)
251 error = sb->s_op->freeze_super(sb);
253 error = freeze_super(sb);
254 deactivate_super(sb);
257 bdev->bd_fsfreeze_count--;
260 bdev->bd_fsfreeze_sb = sb;
265 mutex_unlock(&bdev->bd_fsfreeze_mutex);
268 EXPORT_SYMBOL(freeze_bdev);
271 * thaw_bdev - unlock filesystem
272 * @bdev: blockdevice to unlock
274 * Unlocks the filesystem and marks it writeable again after freeze_bdev().
276 int thaw_bdev(struct block_device *bdev)
278 struct super_block *sb;
281 mutex_lock(&bdev->bd_fsfreeze_mutex);
282 if (!bdev->bd_fsfreeze_count)
286 if (--bdev->bd_fsfreeze_count > 0)
289 sb = bdev->bd_fsfreeze_sb;
293 if (sb->s_op->thaw_super)
294 error = sb->s_op->thaw_super(sb);
296 error = thaw_super(sb);
298 bdev->bd_fsfreeze_count++;
300 bdev->bd_fsfreeze_sb = NULL;
302 mutex_unlock(&bdev->bd_fsfreeze_mutex);
305 EXPORT_SYMBOL(thaw_bdev);
311 static __cacheline_aligned_in_smp DEFINE_SPINLOCK(bdev_lock);
312 static struct kmem_cache * bdev_cachep __read_mostly;
314 static struct inode *bdev_alloc_inode(struct super_block *sb)
316 struct bdev_inode *ei = alloc_inode_sb(sb, bdev_cachep, GFP_KERNEL);
320 memset(&ei->bdev, 0, sizeof(ei->bdev));
321 return &ei->vfs_inode;
324 static void bdev_free_inode(struct inode *inode)
326 struct block_device *bdev = I_BDEV(inode);
328 free_percpu(bdev->bd_stats);
329 kfree(bdev->bd_meta_info);
331 if (!bdev_is_partition(bdev)) {
332 if (bdev->bd_disk && bdev->bd_disk->bdi)
333 bdi_put(bdev->bd_disk->bdi);
334 kfree(bdev->bd_disk);
337 if (MAJOR(bdev->bd_dev) == BLOCK_EXT_MAJOR)
338 blk_free_ext_minor(MINOR(bdev->bd_dev));
340 kmem_cache_free(bdev_cachep, BDEV_I(inode));
343 static void init_once(void *data)
345 struct bdev_inode *ei = data;
347 inode_init_once(&ei->vfs_inode);
350 static void bdev_evict_inode(struct inode *inode)
352 truncate_inode_pages_final(&inode->i_data);
353 invalidate_inode_buffers(inode); /* is it needed here? */
357 static const struct super_operations bdev_sops = {
358 .statfs = simple_statfs,
359 .alloc_inode = bdev_alloc_inode,
360 .free_inode = bdev_free_inode,
361 .drop_inode = generic_delete_inode,
362 .evict_inode = bdev_evict_inode,
365 static int bd_init_fs_context(struct fs_context *fc)
367 struct pseudo_fs_context *ctx = init_pseudo(fc, BDEVFS_MAGIC);
370 fc->s_iflags |= SB_I_CGROUPWB;
371 ctx->ops = &bdev_sops;
375 static struct file_system_type bd_type = {
377 .init_fs_context = bd_init_fs_context,
378 .kill_sb = kill_anon_super,
381 struct super_block *blockdev_superblock __read_mostly;
382 EXPORT_SYMBOL_GPL(blockdev_superblock);
384 void __init bdev_cache_init(void)
387 static struct vfsmount *bd_mnt;
389 bdev_cachep = kmem_cache_create("bdev_cache", sizeof(struct bdev_inode),
390 0, (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT|
391 SLAB_MEM_SPREAD|SLAB_ACCOUNT|SLAB_PANIC),
393 err = register_filesystem(&bd_type);
395 panic("Cannot register bdev pseudo-fs");
396 bd_mnt = kern_mount(&bd_type);
398 panic("Cannot create bdev pseudo-fs");
399 blockdev_superblock = bd_mnt->mnt_sb; /* For writeback */
402 struct block_device *bdev_alloc(struct gendisk *disk, u8 partno)
404 struct block_device *bdev;
407 inode = new_inode(blockdev_superblock);
410 inode->i_mode = S_IFBLK;
412 inode->i_data.a_ops = &def_blk_aops;
413 mapping_set_gfp_mask(&inode->i_data, GFP_USER);
415 bdev = I_BDEV(inode);
416 mutex_init(&bdev->bd_fsfreeze_mutex);
417 spin_lock_init(&bdev->bd_size_lock);
418 bdev->bd_partno = partno;
419 bdev->bd_inode = inode;
420 bdev->bd_queue = disk->queue;
422 bdev->bd_has_submit_bio = disk->part0->bd_has_submit_bio;
424 bdev->bd_has_submit_bio = false;
425 bdev->bd_stats = alloc_percpu(struct disk_stats);
426 if (!bdev->bd_stats) {
430 bdev->bd_disk = disk;
434 void bdev_set_nr_sectors(struct block_device *bdev, sector_t sectors)
436 spin_lock(&bdev->bd_size_lock);
437 i_size_write(bdev->bd_inode, (loff_t)sectors << SECTOR_SHIFT);
438 bdev->bd_nr_sectors = sectors;
439 spin_unlock(&bdev->bd_size_lock);
442 void bdev_add(struct block_device *bdev, dev_t dev)
445 bdev->bd_inode->i_rdev = dev;
446 bdev->bd_inode->i_ino = dev;
447 insert_inode_hash(bdev->bd_inode);
450 long nr_blockdev_pages(void)
455 spin_lock(&blockdev_superblock->s_inode_list_lock);
456 list_for_each_entry(inode, &blockdev_superblock->s_inodes, i_sb_list)
457 ret += inode->i_mapping->nrpages;
458 spin_unlock(&blockdev_superblock->s_inode_list_lock);
464 * bd_may_claim - test whether a block device can be claimed
465 * @bdev: block device of interest
466 * @whole: whole block device containing @bdev, may equal @bdev
467 * @holder: holder trying to claim @bdev
469 * Test whether @bdev can be claimed by @holder.
472 * spin_lock(&bdev_lock).
475 * %true if @bdev can be claimed, %false otherwise.
477 static bool bd_may_claim(struct block_device *bdev, struct block_device *whole,
480 if (bdev->bd_holder == holder)
481 return true; /* already a holder */
482 else if (bdev->bd_holder != NULL)
483 return false; /* held by someone else */
484 else if (whole == bdev)
485 return true; /* is a whole device which isn't held */
487 else if (whole->bd_holder == bd_may_claim)
488 return true; /* is a partition of a device that is being partitioned */
489 else if (whole->bd_holder != NULL)
490 return false; /* is a partition of a held device */
492 return true; /* is a partition of an un-held device */
496 * bd_prepare_to_claim - claim a block device
497 * @bdev: block device of interest
498 * @holder: holder trying to claim @bdev
500 * Claim @bdev. This function fails if @bdev is already claimed by another
501 * holder and waits if another claiming is in progress. return, the caller
502 * has ownership of bd_claiming and bd_holder[s].
505 * 0 if @bdev can be claimed, -EBUSY otherwise.
507 int bd_prepare_to_claim(struct block_device *bdev, void *holder)
509 struct block_device *whole = bdev_whole(bdev);
511 if (WARN_ON_ONCE(!holder))
514 spin_lock(&bdev_lock);
515 /* if someone else claimed, fail */
516 if (!bd_may_claim(bdev, whole, holder)) {
517 spin_unlock(&bdev_lock);
521 /* if claiming is already in progress, wait for it to finish */
522 if (whole->bd_claiming) {
523 wait_queue_head_t *wq = bit_waitqueue(&whole->bd_claiming, 0);
526 prepare_to_wait(wq, &wait, TASK_UNINTERRUPTIBLE);
527 spin_unlock(&bdev_lock);
529 finish_wait(wq, &wait);
534 whole->bd_claiming = holder;
535 spin_unlock(&bdev_lock);
538 EXPORT_SYMBOL_GPL(bd_prepare_to_claim); /* only for the loop driver */
540 static void bd_clear_claiming(struct block_device *whole, void *holder)
542 lockdep_assert_held(&bdev_lock);
543 /* tell others that we're done */
544 BUG_ON(whole->bd_claiming != holder);
545 whole->bd_claiming = NULL;
546 wake_up_bit(&whole->bd_claiming, 0);
550 * bd_finish_claiming - finish claiming of a block device
551 * @bdev: block device of interest
552 * @holder: holder that has claimed @bdev
554 * Finish exclusive open of a block device. Mark the device as exlusively
555 * open by the holder and wake up all waiters for exclusive open to finish.
557 static void bd_finish_claiming(struct block_device *bdev, void *holder)
559 struct block_device *whole = bdev_whole(bdev);
561 spin_lock(&bdev_lock);
562 BUG_ON(!bd_may_claim(bdev, whole, holder));
564 * Note that for a whole device bd_holders will be incremented twice,
565 * and bd_holder will be set to bd_may_claim before being set to holder
568 whole->bd_holder = bd_may_claim;
570 bdev->bd_holder = holder;
571 bd_clear_claiming(whole, holder);
572 spin_unlock(&bdev_lock);
576 * bd_abort_claiming - abort claiming of a block device
577 * @bdev: block device of interest
578 * @holder: holder that has claimed @bdev
580 * Abort claiming of a block device when the exclusive open failed. This can be
581 * also used when exclusive open is not actually desired and we just needed
582 * to block other exclusive openers for a while.
584 void bd_abort_claiming(struct block_device *bdev, void *holder)
586 spin_lock(&bdev_lock);
587 bd_clear_claiming(bdev_whole(bdev), holder);
588 spin_unlock(&bdev_lock);
590 EXPORT_SYMBOL(bd_abort_claiming);
592 static void blkdev_flush_mapping(struct block_device *bdev)
594 WARN_ON_ONCE(bdev->bd_holders);
597 bdev_write_inode(bdev);
600 static int blkdev_get_whole(struct block_device *bdev, fmode_t mode)
602 struct gendisk *disk = bdev->bd_disk;
605 if (disk->fops->open) {
606 ret = disk->fops->open(bdev, mode);
608 /* avoid ghost partitions on a removed medium */
609 if (ret == -ENOMEDIUM &&
610 test_bit(GD_NEED_PART_SCAN, &disk->state))
611 bdev_disk_changed(disk, true);
616 if (!atomic_read(&bdev->bd_openers))
617 set_init_blocksize(bdev);
618 if (test_bit(GD_NEED_PART_SCAN, &disk->state))
619 bdev_disk_changed(disk, false);
620 atomic_inc(&bdev->bd_openers);
624 static void blkdev_put_whole(struct block_device *bdev, fmode_t mode)
626 if (atomic_dec_and_test(&bdev->bd_openers))
627 blkdev_flush_mapping(bdev);
628 if (bdev->bd_disk->fops->release)
629 bdev->bd_disk->fops->release(bdev->bd_disk, mode);
632 static int blkdev_get_part(struct block_device *part, fmode_t mode)
634 struct gendisk *disk = part->bd_disk;
637 if (atomic_read(&part->bd_openers))
640 ret = blkdev_get_whole(bdev_whole(part), mode);
645 if (!bdev_nr_sectors(part))
648 disk->open_partitions++;
649 set_init_blocksize(part);
651 atomic_inc(&part->bd_openers);
655 blkdev_put_whole(bdev_whole(part), mode);
659 static void blkdev_put_part(struct block_device *part, fmode_t mode)
661 struct block_device *whole = bdev_whole(part);
663 if (!atomic_dec_and_test(&part->bd_openers))
665 blkdev_flush_mapping(part);
666 whole->bd_disk->open_partitions--;
667 blkdev_put_whole(whole, mode);
670 struct block_device *blkdev_get_no_open(dev_t dev)
672 struct block_device *bdev;
675 inode = ilookup(blockdev_superblock, dev);
676 if (!inode && IS_ENABLED(CONFIG_BLOCK_LEGACY_AUTOLOAD)) {
677 blk_request_module(dev);
678 inode = ilookup(blockdev_superblock, dev);
681 "block device autoloading is deprecated and will be removed.\n");
686 /* switch from the inode reference to a device mode one: */
687 bdev = &BDEV_I(inode)->bdev;
688 if (!kobject_get_unless_zero(&bdev->bd_device.kobj))
694 void blkdev_put_no_open(struct block_device *bdev)
696 put_device(&bdev->bd_device);
700 * blkdev_get_by_dev - open a block device by device number
701 * @dev: device number of block device to open
702 * @mode: FMODE_* mask
703 * @holder: exclusive holder identifier
705 * Open the block device described by device number @dev. If @mode includes
706 * %FMODE_EXCL, the block device is opened with exclusive access. Specifying
707 * %FMODE_EXCL with a %NULL @holder is invalid. Exclusive opens may nest for
710 * Use this interface ONLY if you really do not have anything better - i.e. when
711 * you are behind a truly sucky interface and all you are given is a device
712 * number. Everything else should use blkdev_get_by_path().
718 * Reference to the block_device on success, ERR_PTR(-errno) on failure.
720 struct block_device *blkdev_get_by_dev(dev_t dev, fmode_t mode, void *holder)
722 bool unblock_events = true;
723 struct block_device *bdev;
724 struct gendisk *disk;
727 ret = devcgroup_check_permission(DEVCG_DEV_BLOCK,
728 MAJOR(dev), MINOR(dev),
729 ((mode & FMODE_READ) ? DEVCG_ACC_READ : 0) |
730 ((mode & FMODE_WRITE) ? DEVCG_ACC_WRITE : 0));
734 bdev = blkdev_get_no_open(dev);
736 return ERR_PTR(-ENXIO);
737 disk = bdev->bd_disk;
739 if (mode & FMODE_EXCL) {
740 ret = bd_prepare_to_claim(bdev, holder);
745 disk_block_events(disk);
747 mutex_lock(&disk->open_mutex);
749 if (!disk_live(disk))
751 if (!try_module_get(disk->fops->owner))
753 if (bdev_is_partition(bdev))
754 ret = blkdev_get_part(bdev, mode);
756 ret = blkdev_get_whole(bdev, mode);
759 if (mode & FMODE_EXCL) {
760 bd_finish_claiming(bdev, holder);
763 * Block event polling for write claims if requested. Any write
764 * holder makes the write_holder state stick until all are
765 * released. This is good enough and tracking individual
766 * writeable reference is too fragile given the way @mode is
767 * used in blkdev_get/put().
769 if ((mode & FMODE_WRITE) && !bdev->bd_write_holder &&
770 (disk->event_flags & DISK_EVENT_FLAG_BLOCK_ON_EXCL_WRITE)) {
771 bdev->bd_write_holder = true;
772 unblock_events = false;
775 mutex_unlock(&disk->open_mutex);
778 disk_unblock_events(disk);
781 module_put(disk->fops->owner);
783 if (mode & FMODE_EXCL)
784 bd_abort_claiming(bdev, holder);
785 mutex_unlock(&disk->open_mutex);
786 disk_unblock_events(disk);
788 blkdev_put_no_open(bdev);
791 EXPORT_SYMBOL(blkdev_get_by_dev);
794 * blkdev_get_by_path - open a block device by name
795 * @path: path to the block device to open
796 * @mode: FMODE_* mask
797 * @holder: exclusive holder identifier
799 * Open the block device described by the device file at @path. If @mode
800 * includes %FMODE_EXCL, the block device is opened with exclusive access.
801 * Specifying %FMODE_EXCL with a %NULL @holder is invalid. Exclusive opens may
802 * nest for the same @holder.
808 * Reference to the block_device on success, ERR_PTR(-errno) on failure.
810 struct block_device *blkdev_get_by_path(const char *path, fmode_t mode,
813 struct block_device *bdev;
817 error = lookup_bdev(path, &dev);
819 return ERR_PTR(error);
821 bdev = blkdev_get_by_dev(dev, mode, holder);
822 if (!IS_ERR(bdev) && (mode & FMODE_WRITE) && bdev_read_only(bdev)) {
823 blkdev_put(bdev, mode);
824 return ERR_PTR(-EACCES);
829 EXPORT_SYMBOL(blkdev_get_by_path);
831 void blkdev_put(struct block_device *bdev, fmode_t mode)
833 struct gendisk *disk = bdev->bd_disk;
836 * Sync early if it looks like we're the last one. If someone else
837 * opens the block device between now and the decrement of bd_openers
838 * then we did a sync that we didn't need to, but that's not the end
839 * of the world and we want to avoid long (could be several minute)
840 * syncs while holding the mutex.
842 if (atomic_read(&bdev->bd_openers) == 1)
845 mutex_lock(&disk->open_mutex);
846 if (mode & FMODE_EXCL) {
847 struct block_device *whole = bdev_whole(bdev);
851 * Release a claim on the device. The holder fields
852 * are protected with bdev_lock. open_mutex is to
853 * synchronize disk_holder unlinking.
855 spin_lock(&bdev_lock);
857 WARN_ON_ONCE(--bdev->bd_holders < 0);
858 WARN_ON_ONCE(--whole->bd_holders < 0);
860 if ((bdev_free = !bdev->bd_holders))
861 bdev->bd_holder = NULL;
862 if (!whole->bd_holders)
863 whole->bd_holder = NULL;
865 spin_unlock(&bdev_lock);
868 * If this was the last claim, remove holder link and
869 * unblock evpoll if it was a write holder.
871 if (bdev_free && bdev->bd_write_holder) {
872 disk_unblock_events(disk);
873 bdev->bd_write_holder = false;
878 * Trigger event checking and tell drivers to flush MEDIA_CHANGE
879 * event. This is to ensure detection of media removal commanded
880 * from userland - e.g. eject(1).
882 disk_flush_events(disk, DISK_EVENT_MEDIA_CHANGE);
884 if (bdev_is_partition(bdev))
885 blkdev_put_part(bdev, mode);
887 blkdev_put_whole(bdev, mode);
888 mutex_unlock(&disk->open_mutex);
890 module_put(disk->fops->owner);
891 blkdev_put_no_open(bdev);
893 EXPORT_SYMBOL(blkdev_put);
896 * lookup_bdev() - Look up a struct block_device by name.
897 * @pathname: Name of the block device in the filesystem.
898 * @dev: Pointer to the block device's dev_t, if found.
900 * Lookup the block device's dev_t at @pathname in the current
901 * namespace if possible and return it in @dev.
903 * Context: May sleep.
904 * Return: 0 if succeeded, negative errno otherwise.
906 int lookup_bdev(const char *pathname, dev_t *dev)
912 if (!pathname || !*pathname)
915 error = kern_path(pathname, LOOKUP_FOLLOW, &path);
919 inode = d_backing_inode(path.dentry);
921 if (!S_ISBLK(inode->i_mode))
924 if (!may_open_dev(&path))
927 *dev = inode->i_rdev;
933 EXPORT_SYMBOL(lookup_bdev);
935 int __invalidate_device(struct block_device *bdev, bool kill_dirty)
937 struct super_block *sb = get_super(bdev);
942 * no need to lock the super, get_super holds the
943 * read mutex so the filesystem cannot go away
944 * under us (->put_super runs with the write lock
947 shrink_dcache_sb(sb);
948 res = invalidate_inodes(sb, kill_dirty);
951 invalidate_bdev(bdev);
954 EXPORT_SYMBOL(__invalidate_device);
956 void sync_bdevs(bool wait)
958 struct inode *inode, *old_inode = NULL;
960 spin_lock(&blockdev_superblock->s_inode_list_lock);
961 list_for_each_entry(inode, &blockdev_superblock->s_inodes, i_sb_list) {
962 struct address_space *mapping = inode->i_mapping;
963 struct block_device *bdev;
965 spin_lock(&inode->i_lock);
966 if (inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW) ||
967 mapping->nrpages == 0) {
968 spin_unlock(&inode->i_lock);
972 spin_unlock(&inode->i_lock);
973 spin_unlock(&blockdev_superblock->s_inode_list_lock);
975 * We hold a reference to 'inode' so it couldn't have been
976 * removed from s_inodes list while we dropped the
977 * s_inode_list_lock We cannot iput the inode now as we can
978 * be holding the last reference and we cannot iput it under
979 * s_inode_list_lock. So we keep the reference and iput it
984 bdev = I_BDEV(inode);
986 mutex_lock(&bdev->bd_disk->open_mutex);
987 if (!atomic_read(&bdev->bd_openers)) {
991 * We keep the error status of individual mapping so
992 * that applications can catch the writeback error using
993 * fsync(2). See filemap_fdatawait_keep_errors() for
996 filemap_fdatawait_keep_errors(inode->i_mapping);
998 filemap_fdatawrite(inode->i_mapping);
1000 mutex_unlock(&bdev->bd_disk->open_mutex);
1002 spin_lock(&blockdev_superblock->s_inode_list_lock);
1004 spin_unlock(&blockdev_superblock->s_inode_list_lock);
1009 * Handle STATX_DIOALIGN for block devices.
1011 * Note that the inode passed to this is the inode of a block device node file,
1012 * not the block device's internal inode. Therefore it is *not* valid to use
1013 * I_BDEV() here; the block device has to be looked up by i_rdev instead.
1015 void bdev_statx_dioalign(struct inode *inode, struct kstat *stat)
1017 struct block_device *bdev;
1019 bdev = blkdev_get_no_open(inode->i_rdev);
1023 stat->dio_mem_align = bdev_dma_alignment(bdev) + 1;
1024 stat->dio_offset_align = bdev_logical_block_size(bdev);
1025 stat->result_mask |= STATX_DIOALIGN;
1027 blkdev_put_no_open(bdev);