return -EINVAL;
if (get_user(n, (int __user *) arg))
return -EFAULT;
- if (!(mode & FMODE_EXCL) && bd_claim(bdev, &bdev) < 0)
+ if (!(mode & FMODE_EXCL) &&
+ blkdev_get(bdev, mode | FMODE_EXCL, &bdev) < 0)
return -EBUSY;
ret = set_blocksize(bdev, n);
if (!(mode & FMODE_EXCL))
- bd_release(bdev);
+ blkdev_put(bdev, mode | FMODE_EXCL);
return ret;
case BLKPG:
ret = blkpg_ioctl(bdev, (struct blkpg_ioctl_arg __user *) arg);
struct drbd_backing_dev {
struct block_device *backing_bdev;
struct block_device *md_bdev;
- struct file *lo_file;
- struct file *md_file;
struct drbd_md md;
struct disk_conf dc; /* The user provided config... */
sector_t known_size; /* last known size of that backing device */
if (ldev == NULL)
return;
- bd_release(ldev->backing_bdev);
- bd_release(ldev->md_bdev);
-
- fput(ldev->lo_file);
- fput(ldev->md_file);
+ blkdev_put(ldev->backing_bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
+ blkdev_put(ldev->md_bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
kfree(ldev);
}
sector_t max_possible_sectors;
sector_t min_md_device_sectors;
struct drbd_backing_dev *nbc = NULL; /* new_backing_conf */
- struct inode *inode, *inode2;
+ struct block_device *bdev;
struct lru_cache *resync_lru = NULL;
union drbd_state ns, os;
unsigned int max_seg_s;
}
}
- nbc->lo_file = filp_open(nbc->dc.backing_dev, O_RDWR, 0);
- if (IS_ERR(nbc->lo_file)) {
+ bdev = open_bdev_exclusive(nbc->dc.backing_dev,
+ FMODE_READ | FMODE_WRITE, mdev);
+ if (IS_ERR(bdev)) {
dev_err(DEV, "open(\"%s\") failed with %ld\n", nbc->dc.backing_dev,
- PTR_ERR(nbc->lo_file));
- nbc->lo_file = NULL;
+ PTR_ERR(bdev));
retcode = ERR_OPEN_DISK;
goto fail;
}
+ nbc->backing_bdev = bdev;
- inode = nbc->lo_file->f_dentry->d_inode;
-
- if (!S_ISBLK(inode->i_mode)) {
- retcode = ERR_DISK_NOT_BDEV;
- goto fail;
- }
-
- nbc->md_file = filp_open(nbc->dc.meta_dev, O_RDWR, 0);
- if (IS_ERR(nbc->md_file)) {
+ /*
+ * meta_dev_idx >= 0: external fixed size, possibly multiple
+ * drbd sharing one meta device. TODO in that case, paranoia
+ * check that [md_bdev, meta_dev_idx] is not yet used by some
+ * other drbd minor! (if you use drbd.conf + drbdadm, that
+ * should check it for you already; but if you don't, or
+ * someone fooled it, we need to double check here)
+ */
+ bdev = open_bdev_exclusive(nbc->dc.meta_dev,
+ FMODE_READ | FMODE_WRITE,
+ (nbc->dc.meta_dev_idx < 0) ?
+ (void *)mdev : (void *)drbd_m_holder);
+ if (IS_ERR(bdev)) {
dev_err(DEV, "open(\"%s\") failed with %ld\n", nbc->dc.meta_dev,
- PTR_ERR(nbc->md_file));
- nbc->md_file = NULL;
+ PTR_ERR(bdev));
retcode = ERR_OPEN_MD_DISK;
goto fail;
}
+ nbc->md_bdev = bdev;
- inode2 = nbc->md_file->f_dentry->d_inode;
-
- if (!S_ISBLK(inode2->i_mode)) {
- retcode = ERR_MD_NOT_BDEV;
- goto fail;
- }
-
- nbc->backing_bdev = inode->i_bdev;
- if (bd_claim(nbc->backing_bdev, mdev)) {
- printk(KERN_ERR "drbd: bd_claim(%p,%p); failed [%p;%p;%u]\n",
- nbc->backing_bdev, mdev,
- nbc->backing_bdev->bd_holder,
- nbc->backing_bdev->bd_contains->bd_holder,
- nbc->backing_bdev->bd_holders);
- retcode = ERR_BDCLAIM_DISK;
+ if ((nbc->backing_bdev == nbc->md_bdev) !=
+ (nbc->dc.meta_dev_idx == DRBD_MD_INDEX_INTERNAL ||
+ nbc->dc.meta_dev_idx == DRBD_MD_INDEX_FLEX_INT)) {
+ retcode = ERR_MD_IDX_INVALID;
goto fail;
}
offsetof(struct bm_extent, lce));
if (!resync_lru) {
retcode = ERR_NOMEM;
- goto release_bdev_fail;
- }
-
- /* meta_dev_idx >= 0: external fixed size,
- * possibly multiple drbd sharing one meta device.
- * TODO in that case, paranoia check that [md_bdev, meta_dev_idx] is
- * not yet used by some other drbd minor!
- * (if you use drbd.conf + drbdadm,
- * that should check it for you already; but if you don't, or someone
- * fooled it, we need to double check here) */
- nbc->md_bdev = inode2->i_bdev;
- if (bd_claim(nbc->md_bdev, (nbc->dc.meta_dev_idx < 0) ? (void *)mdev
- : (void *) drbd_m_holder)) {
- retcode = ERR_BDCLAIM_MD_DISK;
- goto release_bdev_fail;
- }
-
- if ((nbc->backing_bdev == nbc->md_bdev) !=
- (nbc->dc.meta_dev_idx == DRBD_MD_INDEX_INTERNAL ||
- nbc->dc.meta_dev_idx == DRBD_MD_INDEX_FLEX_INT)) {
- retcode = ERR_MD_IDX_INVALID;
- goto release_bdev2_fail;
+ goto fail;
}
/* RT - for drbd_get_max_capacity() DRBD_MD_INDEX_FLEX_INT */
(unsigned long long) drbd_get_max_capacity(nbc),
(unsigned long long) nbc->dc.disk_size);
retcode = ERR_DISK_TO_SMALL;
- goto release_bdev2_fail;
+ goto fail;
}
if (nbc->dc.meta_dev_idx < 0) {
dev_warn(DEV, "refusing attach: md-device too small, "
"at least %llu sectors needed for this meta-disk type\n",
(unsigned long long) min_md_device_sectors);
- goto release_bdev2_fail;
+ goto fail;
}
/* Make sure the new disk is big enough
if (drbd_get_max_capacity(nbc) <
drbd_get_capacity(mdev->this_bdev)) {
retcode = ERR_DISK_TO_SMALL;
- goto release_bdev2_fail;
+ goto fail;
}
nbc->known_size = drbd_get_capacity(nbc->backing_bdev);
retcode = _drbd_request_state(mdev, NS(disk, D_ATTACHING), CS_VERBOSE);
drbd_resume_io(mdev);
if (retcode < SS_SUCCESS)
- goto release_bdev2_fail;
+ goto fail;
if (!get_ldev_if_state(mdev, D_ATTACHING))
goto force_diskless;
force_diskless:
drbd_force_state(mdev, NS(disk, D_DISKLESS));
drbd_md_sync(mdev);
- release_bdev2_fail:
- if (nbc)
- bd_release(nbc->md_bdev);
- release_bdev_fail:
- if (nbc)
- bd_release(nbc->backing_bdev);
fail:
if (nbc) {
- if (nbc->lo_file)
- fput(nbc->lo_file);
- if (nbc->md_file)
- fput(nbc->md_file);
+ if (nbc->backing_bdev)
+ blkdev_put(nbc->backing_bdev,
+ FMODE_READ | FMODE_WRITE | FMODE_EXCL);
+ if (nbc->md_bdev)
+ blkdev_put(nbc->md_bdev,
+ FMODE_READ | FMODE_WRITE | FMODE_EXCL);
kfree(nbc);
}
lc_destroy(resync_lru);
* so bdget() can't fail.
*/
bdget(pd->bdev->bd_dev);
- if ((ret = blkdev_get(pd->bdev, FMODE_READ)))
+ if ((ret = blkdev_get(pd->bdev, FMODE_READ | FMODE_EXCL, pd)))
goto out;
- if ((ret = bd_claim(pd->bdev, pd)))
- goto out_putdev;
-
if ((ret = pkt_get_last_written(pd, &lba))) {
printk(DRIVER_NAME": pkt_get_last_written failed\n");
- goto out_unclaim;
+ goto out_putdev;
}
set_capacity(pd->disk, lba << 2);
q = bdev_get_queue(pd->bdev);
if (write) {
if ((ret = pkt_open_write(pd)))
- goto out_unclaim;
+ goto out_putdev;
/*
* Some CDRW drives can not handle writes larger than one packet,
* even if the size is a multiple of the packet size.
}
if ((ret = pkt_set_segment_merging(pd, q)))
- goto out_unclaim;
+ goto out_putdev;
if (write) {
if (!pkt_grow_pktlist(pd, CONFIG_CDROM_PKTCDVD_BUFFERS)) {
printk(DRIVER_NAME": not enough memory for buffers\n");
ret = -ENOMEM;
- goto out_unclaim;
+ goto out_putdev;
}
printk(DRIVER_NAME": %lukB available on disc\n", lba << 1);
}
return 0;
-out_unclaim:
- bd_release(pd->bdev);
out_putdev:
- blkdev_put(pd->bdev, FMODE_READ);
+ blkdev_put(pd->bdev, FMODE_READ | FMODE_EXCL);
out:
return ret;
}
pkt_lock_door(pd, 0);
pkt_set_speed(pd, MAX_SPEED, MAX_SPEED);
- bd_release(pd->bdev);
- blkdev_put(pd->bdev, FMODE_READ);
+ blkdev_put(pd->bdev, FMODE_READ | FMODE_EXCL);
pkt_shrink_pktlist(pd);
}
bdev = bdget(dev);
if (!bdev)
return -ENOMEM;
- ret = blkdev_get(bdev, FMODE_READ | FMODE_NDELAY);
+ ret = blkdev_get(bdev, FMODE_READ | FMODE_NDELAY, NULL);
if (ret)
return ret;
if (!bdev)
goto out;
igrab(bdev->bd_inode);
- err = blkdev_get(bdev, filp->f_mode);
+ err = blkdev_get(bdev, filp->f_mode | FMODE_EXCL, raw_open);
if (err)
goto out;
- err = bd_claim(bdev, raw_open);
- if (err)
- goto out1;
err = set_blocksize(bdev, bdev_logical_block_size(bdev));
if (err)
- goto out2;
+ goto out1;
filp->f_flags |= O_DIRECT;
filp->f_mapping = bdev->bd_inode->i_mapping;
if (++raw_devices[minor].inuse == 1)
mutex_unlock(&raw_mutex);
return 0;
-out2:
- bd_release(bdev);
out1:
- blkdev_put(bdev, filp->f_mode);
+ blkdev_put(bdev, filp->f_mode | FMODE_EXCL);
out:
mutex_unlock(&raw_mutex);
return err;
}
mutex_unlock(&raw_mutex);
- bd_release(bdev);
- blkdev_put(bdev, filp->f_mode);
+ blkdev_put(bdev, filp->f_mode | FMODE_EXCL);
return 0;
}
BUG_ON(d->dm_dev.bdev);
- bdev = open_by_devnum(dev, d->dm_dev.mode);
+ bdev = open_by_devnum(dev, d->dm_dev.mode | FMODE_EXCL, _claim_ptr);
if (IS_ERR(bdev))
return PTR_ERR(bdev);
- r = bd_claim(bdev, _claim_ptr);
- if (r) {
- blkdev_put(bdev, d->dm_dev.mode);
- return r;
- }
-
r = bd_link_disk_holder(bdev, dm_disk(md));
if (r) {
- bd_release(bdev);
- blkdev_put(bdev, d->dm_dev.mode);
+ blkdev_put(bdev, d->dm_dev.mode | FMODE_EXCL);
return r;
}
if (!d->dm_dev.bdev)
return;
- bd_unlink_disk_holder(d->dm_dev.bdev);
- bd_release(d->dm_dev.bdev);
- blkdev_put(d->dm_dev.bdev, d->dm_dev.mode);
+ blkdev_put(d->dm_dev.bdev, d->dm_dev.mode | FMODE_EXCL);
d->dm_dev.bdev = NULL;
}
MD_BUG();
return;
}
- bd_unlink_disk_holder(rdev->bdev);
list_del_rcu(&rdev->same_set);
printk(KERN_INFO "md: unbind<%s>\n", bdevname(rdev->bdev,b));
rdev->mddev = NULL;
struct block_device *bdev;
char b[BDEVNAME_SIZE];
- bdev = open_by_devnum(dev, FMODE_READ|FMODE_WRITE);
+ bdev = open_by_devnum(dev, FMODE_READ|FMODE_WRITE|FMODE_EXCL,
+ shared ? (mdk_rdev_t *)lock_rdev : rdev);
if (IS_ERR(bdev)) {
printk(KERN_ERR "md: could not open %s.\n",
__bdevname(dev, b));
return PTR_ERR(bdev);
}
- err = bd_claim(bdev, shared ? (mdk_rdev_t *)lock_rdev : rdev);
- if (err) {
- printk(KERN_ERR "md: could not bd_claim %s.\n",
- bdevname(bdev, b));
- blkdev_put(bdev, FMODE_READ|FMODE_WRITE);
- return err;
- }
if (!shared)
set_bit(AllReserved, &rdev->flags);
rdev->bdev = bdev;
rdev->bdev = NULL;
if (!bdev)
MD_BUG();
- bd_release(bdev);
- blkdev_put(bdev, FMODE_READ|FMODE_WRITE);
+ blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
}
void md_autodetect_dev(dev_t dev);
if (dev->blkdev) {
invalidate_mapping_pages(dev->blkdev->bd_inode->i_mapping,
0, -1);
- close_bdev_exclusive(dev->blkdev, FMODE_READ|FMODE_WRITE);
+ blkdev_put(dev->blkdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
}
kfree(dev);
/* FIXME: ensure that mtd->size % erase_size == 0 */
static struct block2mtd_dev *add_device(char *devname, int erase_size)
{
- const fmode_t mode = FMODE_READ | FMODE_WRITE;
+ const fmode_t mode = FMODE_READ | FMODE_WRITE | FMODE_EXCL;
struct block_device *bdev;
struct block2mtd_dev *dev;
char *name;
to resolve the device name by other means. */
dev_t devt = name_to_dev_t(devname);
- if (devt) {
- bdev = open_by_devnum(devt, mode);
- if (!IS_ERR(bdev)) {
- int ret;
- ret = bd_claim(bdev, dev);
- if (ret) {
- blkdev_put(bdev, mode);
- bdev = ERR_PTR(ret);
- }
- }
- }
+ if (devt)
+ bdev = open_by_devnum(devt, mode, dev);
}
#endif
struct block_device *bdev;
bdev = bdget_disk(block->gdp, 0);
- if (!bdev || blkdev_get(bdev, FMODE_READ) < 0)
+ if (!bdev || blkdev_get(bdev, FMODE_READ, NULL) < 0)
return -ENODEV;
/*
* See fs/partition/check.c:register_disk,rescan_partitions
else if (bdev->bd_contains == bdev)
return true; /* is a whole device which isn't held */
- else if (whole->bd_holder == bd_claim)
+ else if (whole->bd_holder == bd_may_claim)
return true; /* is a partition of a device that is being partitioned */
else if (whole->bd_holder != NULL)
return false; /* is a partition of a held device */
{
/* note that for a whole device bd_holders
* will be incremented twice, and bd_holder will
- * be set to bd_claim before being set to holder
+ * be set to bd_may_claim before being set to holder
*/
whole->bd_holders++;
- whole->bd_holder = bd_claim;
+ whole->bd_holder = bd_may_claim;
bdev->bd_holders++;
bdev->bd_holder = holder;
}
__bd_abort_claiming(whole, holder); /* not actually an abort */
}
-/**
- * bd_claim - claim a block device
- * @bdev: block device to claim
- * @holder: holder trying to claim @bdev
- *
- * Try to claim @bdev which must have been opened successfully.
- *
- * CONTEXT:
- * Might sleep.
- *
- * RETURNS:
- * 0 if successful, -EBUSY if @bdev is already claimed.
- */
-int bd_claim(struct block_device *bdev, void *holder)
-{
- struct block_device *whole = bdev->bd_contains;
- int res;
-
- might_sleep();
-
- spin_lock(&bdev_lock);
- res = bd_prepare_to_claim(bdev, whole, holder);
- if (res == 0)
- __bd_claim(bdev, whole, holder);
- spin_unlock(&bdev_lock);
-
- return res;
-}
-EXPORT_SYMBOL(bd_claim);
-
-void bd_release(struct block_device *bdev)
+static void bd_release(struct block_device *bdev)
{
spin_lock(&bdev_lock);
if (!--bdev->bd_contains->bd_holders)
spin_unlock(&bdev_lock);
}
-EXPORT_SYMBOL(bd_release);
-
#ifdef CONFIG_SYSFS
static int add_symlink(struct kobject *from, struct kobject *to)
{
}
EXPORT_SYMBOL_GPL(bd_link_disk_holder);
-void bd_unlink_disk_holder(struct block_device *bdev)
+static void bd_unlink_disk_holder(struct block_device *bdev)
{
struct gendisk *disk = bdev->bd_holder_disk;
del_symlink(disk->slave_dir, &part_to_dev(bdev->bd_part)->kobj);
del_symlink(bdev->bd_part->holder_dir, &disk_to_dev(disk)->kobj);
}
-EXPORT_SYMBOL_GPL(bd_unlink_disk_holder);
+#else
+static inline void bd_unlink_disk_holder(struct block_device *bdev)
+{ }
#endif
/*
* to be used for internal purposes. If you ever need it - reconsider
* your API.
*/
-struct block_device *open_by_devnum(dev_t dev, fmode_t mode)
+struct block_device *open_by_devnum(dev_t dev, fmode_t mode, void *holder)
{
struct block_device *bdev = bdget(dev);
int err = -ENOMEM;
if (bdev)
- err = blkdev_get(bdev, mode);
+ err = blkdev_get(bdev, mode, holder);
return err ? ERR_PTR(err) : bdev;
}
return ret;
}
-int blkdev_get(struct block_device *bdev, fmode_t mode)
+int blkdev_get(struct block_device *bdev, fmode_t mode, void *holder)
{
- return __blkdev_get(bdev, mode, 0);
+ struct block_device *whole = NULL;
+ int res;
+
+ WARN_ON_ONCE((mode & FMODE_EXCL) && !holder);
+
+ if ((mode & FMODE_EXCL) && holder) {
+ whole = bd_start_claiming(bdev, holder);
+ if (IS_ERR(whole)) {
+ bdput(bdev);
+ return PTR_ERR(whole);
+ }
+ }
+
+ res = __blkdev_get(bdev, mode, 0);
+
+ if (whole) {
+ if (res == 0)
+ bd_finish_claiming(bdev, whole, holder);
+ else
+ bd_abort_claiming(whole, holder);
+ }
+
+ return res;
}
EXPORT_SYMBOL(blkdev_get);
static int blkdev_open(struct inode * inode, struct file * filp)
{
- struct block_device *whole = NULL;
struct block_device *bdev;
- int res;
/*
* Preserve backwards compatibility and allow large file access
if (bdev == NULL)
return -ENOMEM;
- if (filp->f_mode & FMODE_EXCL) {
- whole = bd_start_claiming(bdev, filp);
- if (IS_ERR(whole)) {
- bdput(bdev);
- return PTR_ERR(whole);
- }
- }
-
filp->f_mapping = bdev->bd_inode->i_mapping;
- res = blkdev_get(bdev, filp->f_mode);
-
- if (whole) {
- if (res == 0)
- bd_finish_claiming(bdev, whole, filp);
- else
- bd_abort_claiming(whole, filp);
- }
-
- return res;
+ return blkdev_get(bdev, filp->f_mode, filp);
}
static int __blkdev_put(struct block_device *bdev, fmode_t mode, int for_part)
int blkdev_put(struct block_device *bdev, fmode_t mode)
{
+ if (mode & FMODE_EXCL) {
+ mutex_lock(&bdev->bd_mutex);
+ bd_release(bdev);
+ if (!bdev->bd_holders)
+ bd_unlink_disk_holder(bdev);
+ mutex_unlock(&bdev->bd_mutex);
+ }
return __blkdev_put(bdev, mode, 0);
}
EXPORT_SYMBOL(blkdev_put);
static int blkdev_close(struct inode * inode, struct file * filp)
{
struct block_device *bdev = I_BDEV(filp->f_mapping->host);
- if (bdev->bd_holder == filp)
- bd_release(bdev);
+
return blkdev_put(bdev, filp->f_mode);
}
*/
struct block_device *open_bdev_exclusive(const char *path, fmode_t mode, void *holder)
{
- struct block_device *bdev, *whole;
+ struct block_device *bdev;
int error;
bdev = lookup_bdev(path);
if (IS_ERR(bdev))
return bdev;
- whole = bd_start_claiming(bdev, holder);
- if (IS_ERR(whole)) {
- bdput(bdev);
- return whole;
- }
-
- error = blkdev_get(bdev, mode);
+ error = blkdev_get(bdev, mode | FMODE_EXCL, holder);
if (error)
- goto out_abort_claiming;
+ return ERR_PTR(error);
- error = -EACCES;
- if ((mode & FMODE_WRITE) && bdev_read_only(bdev))
- goto out_blkdev_put;
+ if ((mode & FMODE_WRITE) && bdev_read_only(bdev)) {
+ blkdev_put(bdev, mode);
+ return ERR_PTR(-EACCES);
+ }
- bd_finish_claiming(bdev, whole, holder);
return bdev;
-
-out_blkdev_put:
- blkdev_put(bdev, mode);
-out_abort_claiming:
- bd_abort_claiming(whole, holder);
- return ERR_PTR(error);
}
EXPORT_SYMBOL(open_bdev_exclusive);
-/**
- * close_bdev_exclusive - close a blockdevice opened by open_bdev_exclusive()
- *
- * @bdev: blockdevice to close
- * @mode: mode, must match that used to open.
- *
- * This is the counterpart to open_bdev_exclusive().
- */
-void close_bdev_exclusive(struct block_device *bdev, fmode_t mode)
-{
- bd_release(bdev);
- blkdev_put(bdev, mode);
-}
-
-EXPORT_SYMBOL(close_bdev_exclusive);
-
int __invalidate_device(struct block_device *bdev)
{
struct super_block *sb = get_super(bdev);
continue;
if (device->bdev) {
- close_bdev_exclusive(device->bdev, device->mode);
+ blkdev_put(device->bdev, device->mode | FMODE_EXCL);
device->bdev = NULL;
fs_devices->open_devices--;
}
list_for_each_entry(device, &fs_devices->devices, dev_list) {
if (device->bdev) {
- close_bdev_exclusive(device->bdev, device->mode);
+ blkdev_put(device->bdev, device->mode | FMODE_EXCL);
fs_devices->open_devices--;
}
if (device->writeable) {
error_brelse:
brelse(bh);
error_close:
- close_bdev_exclusive(bdev, flags);
+ blkdev_put(bdev, flags | FMODE_EXCL);
error:
continue;
}
brelse(bh);
error_close:
- close_bdev_exclusive(bdev, flags);
+ blkdev_put(bdev, flags | FMODE_EXCL);
error:
mutex_unlock(&uuid_mutex);
return ret;
root->fs_info->fs_devices->latest_bdev = next_device->bdev;
if (device->bdev) {
- close_bdev_exclusive(device->bdev, device->mode);
+ blkdev_put(device->bdev, device->mode | FMODE_EXCL);
device->bdev = NULL;
device->fs_devices->open_devices--;
}
brelse(bh);
error_close:
if (bdev)
- close_bdev_exclusive(bdev, FMODE_READ);
+ blkdev_put(bdev, FMODE_READ | FMODE_EXCL);
out:
mutex_unlock(&root->fs_info->volume_mutex);
mutex_unlock(&uuid_mutex);
mutex_unlock(&root->fs_info->volume_mutex);
return ret;
error:
- close_bdev_exclusive(bdev, 0);
+ blkdev_put(bdev, FMODE_EXCL);
if (seeding_dev) {
mutex_unlock(&uuid_mutex);
up_write(&sb->s_umount);
struct block_device *bdev;
char b[BDEVNAME_SIZE];
- bdev = open_by_devnum(dev, FMODE_READ|FMODE_WRITE);
+ bdev = open_by_devnum(dev, FMODE_READ|FMODE_WRITE|FMODE_EXCL, sb);
if (IS_ERR(bdev))
goto fail;
return bdev;
*/
static int ext3_blkdev_put(struct block_device *bdev)
{
- bd_release(bdev);
- return blkdev_put(bdev, FMODE_READ|FMODE_WRITE);
+ return blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
}
static int ext3_blkdev_remove(struct ext3_sb_info *sbi)
if (bdev == NULL)
return NULL;
- if (bd_claim(bdev, sb)) {
- ext3_msg(sb, KERN_ERR,
- "error: failed to claim external journal device");
- blkdev_put(bdev, FMODE_READ|FMODE_WRITE);
- return NULL;
- }
-
blocksize = sb->s_blocksize;
hblock = bdev_logical_block_size(bdev);
if (blocksize < hblock) {
struct block_device *bdev;
char b[BDEVNAME_SIZE];
- bdev = open_by_devnum(dev, FMODE_READ|FMODE_WRITE);
+ bdev = open_by_devnum(dev, FMODE_READ|FMODE_WRITE|FMODE_EXCL, sb);
if (IS_ERR(bdev))
goto fail;
return bdev;
*/
static int ext4_blkdev_put(struct block_device *bdev)
{
- bd_release(bdev);
- return blkdev_put(bdev, FMODE_READ|FMODE_WRITE);
+ return blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
}
static int ext4_blkdev_remove(struct ext4_sb_info *sbi)
if (bdev == NULL)
return NULL;
- if (bd_claim(bdev, sb)) {
- ext4_msg(sb, KERN_ERR,
- "failed to claim external journal device");
- blkdev_put(bdev, FMODE_READ|FMODE_WRITE);
- return NULL;
- }
-
blocksize = sb->s_blocksize;
hblock = bdev_logical_block_size(bdev);
if (blocksize < hblock) {
goto error_bdev;
if (s->s_root)
- close_bdev_exclusive(bdev, mode);
+ blkdev_put(bdev, mode | FMODE_EXCL);
memset(&args, 0, sizeof(args));
args.ar_quota = GFS2_QUOTA_DEFAULT;
deactivate_locked_super(s);
return ERR_PTR(error);
error_bdev:
- close_bdev_exclusive(bdev, mode);
+ blkdev_put(bdev, mode | FMODE_EXCL);
return ERR_PTR(error);
}
* file systems to log may have n-to-1 relationship;
*/
- bdev = open_by_devnum(sbi->logdev, FMODE_READ|FMODE_WRITE);
+ bdev = open_by_devnum(sbi->logdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL,
+ log);
if (IS_ERR(bdev)) {
rc = -PTR_ERR(bdev);
goto free;
}
- if ((rc = bd_claim(bdev, log))) {
- goto close;
- }
-
log->bdev = bdev;
memcpy(log->uuid, sbi->loguuid, sizeof(log->uuid));
* initialize log:
*/
if ((rc = lmLogInit(log)))
- goto unclaim;
+ goto close;
list_add(&log->journal_list, &jfs_external_logs);
list_del(&log->journal_list);
lbmLogShutdown(log);
- unclaim:
- bd_release(bdev);
-
close: /* close external log device */
- blkdev_put(bdev, FMODE_READ|FMODE_WRITE);
+ blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
free: /* free log descriptor */
mutex_unlock(&jfs_log_mutex);
bdev = log->bdev;
rc = lmLogShutdown(log);
- bd_release(bdev);
- blkdev_put(bdev, FMODE_READ|FMODE_WRITE);
+ blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
kfree(log);
static void bdev_put_device(struct logfs_super *s)
{
- close_bdev_exclusive(s->s_bdev, FMODE_READ|FMODE_WRITE);
+ blkdev_put(s->s_bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
}
static int bdev_can_write_buf(struct super_block *sb, u64 ofs)
if (MAJOR(bdev->bd_dev) == MTD_BLOCK_MAJOR) {
int mtdnr = MINOR(bdev->bd_dev);
- close_bdev_exclusive(bdev, FMODE_READ|FMODE_WRITE);
+ blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
return logfs_get_sb_mtd(p, mtdnr);
}
}
if (!s_new)
- close_bdev_exclusive(sd.bdev, mode);
+ blkdev_put(sd.bdev, mode | FMODE_EXCL);
return root_dentry;
failed:
if (!s_new)
- close_bdev_exclusive(sd.bdev, mode);
+ blkdev_put(sd.bdev, mode | FMODE_EXCL);
return ERR_PTR(err);
}
goto out;
reg->hr_bdev = I_BDEV(filp->f_mapping->host);
- ret = blkdev_get(reg->hr_bdev, FMODE_WRITE | FMODE_READ);
+ ret = blkdev_get(reg->hr_bdev, FMODE_WRITE | FMODE_READ, NULL);
if (ret) {
reg->hr_bdev = NULL;
goto out;
goto exit;
bdev->bd_invalidated = 1;
- err = blkdev_get(bdev, FMODE_READ);
+ err = blkdev_get(bdev, FMODE_READ, NULL);
if (err < 0)
goto exit;
blkdev_put(bdev, FMODE_READ);
result = 0;
if (journal->j_dev_bd != NULL) {
- if (journal->j_dev_bd->bd_dev != super->s_dev)
- bd_release(journal->j_dev_bd);
result = blkdev_put(journal->j_dev_bd, journal->j_dev_mode);
journal->j_dev_bd = NULL;
}
{
int result;
dev_t jdev;
- fmode_t blkdev_mode = FMODE_READ | FMODE_WRITE;
+ fmode_t blkdev_mode = FMODE_READ | FMODE_WRITE | FMODE_EXCL;
char b[BDEVNAME_SIZE];
result = 0;
/* there is no "jdev" option and journal is on separate device */
if ((!jdev_name || !jdev_name[0])) {
- journal->j_dev_bd = open_by_devnum(jdev, blkdev_mode);
+ if (jdev == super->s_dev)
+ blkdev_mode &= ~FMODE_EXCL;
+ journal->j_dev_bd = open_by_devnum(jdev, blkdev_mode, journal);
journal->j_dev_mode = blkdev_mode;
if (IS_ERR(journal->j_dev_bd)) {
result = PTR_ERR(journal->j_dev_bd);
"cannot init journal device '%s': %i",
__bdevname(jdev, b), result);
return result;
- } else if (jdev != super->s_dev) {
- result = bd_claim(journal->j_dev_bd, journal);
- if (result) {
- blkdev_put(journal->j_dev_bd, blkdev_mode);
- return result;
- }
-
+ } else if (jdev != super->s_dev)
set_blocksize(journal->j_dev_bd, super->s_blocksize);
- }
return 0;
}
/*
* s_umount nests inside bd_mutex during
- * __invalidate_device(). close_bdev_exclusive()
- * acquires bd_mutex and can't be called under
- * s_umount. Drop s_umount temporarily. This is safe
- * as we're holding an active reference.
+ * __invalidate_device(). blkdev_put() acquires
+ * bd_mutex and can't be called under s_umount. Drop
+ * s_umount temporarily. This is safe as we're
+ * holding an active reference.
*/
up_write(&s->s_umount);
- close_bdev_exclusive(bdev, mode);
+ blkdev_put(bdev, mode | FMODE_EXCL);
down_write(&s->s_umount);
} else {
char b[BDEVNAME_SIZE];
error_s:
error = PTR_ERR(s);
error_bdev:
- close_bdev_exclusive(bdev, mode);
+ blkdev_put(bdev, mode | FMODE_EXCL);
error:
return ERR_PTR(error);
}
bdev->bd_super = NULL;
generic_shutdown_super(sb);
sync_blockdev(bdev);
- close_bdev_exclusive(bdev, mode);
+ blkdev_put(bdev, mode | FMODE_EXCL);
}
EXPORT_SYMBOL(kill_block_super);
struct block_device *bdev)
{
if (bdev)
- close_bdev_exclusive(bdev, FMODE_READ|FMODE_WRITE);
+ blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
}
/*
extern void bd_set_size(struct block_device *, loff_t size);
extern void bd_forget(struct inode *inode);
extern void bdput(struct block_device *);
-extern struct block_device *open_by_devnum(dev_t, fmode_t);
+extern struct block_device *open_by_devnum(dev_t dev, fmode_t mode,
+ void *holder);
extern void invalidate_bdev(struct block_device *);
extern int sync_blockdev(struct block_device *bdev);
extern struct super_block *freeze_bdev(struct block_device *);
extern int ioctl_by_bdev(struct block_device *, unsigned, unsigned long);
extern int blkdev_ioctl(struct block_device *, fmode_t, unsigned, unsigned long);
extern long compat_blkdev_ioctl(struct file *, unsigned, unsigned long);
-extern int blkdev_get(struct block_device *, fmode_t);
-extern int blkdev_put(struct block_device *, fmode_t);
-extern int bd_claim(struct block_device *, void *);
-extern void bd_release(struct block_device *);
+extern int blkdev_get(struct block_device *bdev, fmode_t mode, void *holder);
+extern int blkdev_put(struct block_device *bdev, fmode_t mode);
#ifdef CONFIG_SYSFS
extern int bd_link_disk_holder(struct block_device *bdev, struct gendisk *disk);
-extern void bd_unlink_disk_holder(struct block_device *bdev);
#else
static inline int bd_link_disk_holder(struct block_device *bdev,
struct gendisk *disk)
{
return 0;
}
-static inline void bd_unlink_disk_holder(struct block_device *bdev)
-{
-}
#endif
#endif
extern const char *bdevname(struct block_device *bdev, char *buffer);
extern struct block_device *lookup_bdev(const char *);
extern struct block_device *open_bdev_exclusive(const char *, fmode_t, void *);
-extern void close_bdev_exclusive(struct block_device *, fmode_t);
extern void blkdev_show(struct seq_file *,off_t);
#else
return res;
root_swap = res;
- res = blkdev_get(hib_resume_bdev, FMODE_WRITE);
+ res = blkdev_get(hib_resume_bdev, FMODE_WRITE, NULL);
if (res)
return res;
{
int error;
- hib_resume_bdev = open_by_devnum(swsusp_resume_device, FMODE_READ);
+ hib_resume_bdev = open_by_devnum(swsusp_resume_device,
+ FMODE_READ, NULL);
if (!IS_ERR(hib_resume_bdev)) {
set_blocksize(hib_resume_bdev, PAGE_SIZE);
clear_page(swsusp_header);
if (S_ISBLK(inode->i_mode)) {
struct block_device *bdev = I_BDEV(inode);
set_blocksize(bdev, p->old_block_size);
- bd_release(bdev);
+ blkdev_put(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
} else {
mutex_lock(&inode->i_mutex);
inode->i_flags &= ~S_SWAPFILE;
error = -EINVAL;
if (S_ISBLK(inode->i_mode)) {
bdev = I_BDEV(inode);
- error = bd_claim(bdev, sys_swapon);
+ error = blkdev_get(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL,
+ sys_swapon);
if (error < 0) {
bdev = NULL;
error = -EINVAL;
bad_swap:
if (bdev) {
set_blocksize(bdev, p->old_block_size);
- bd_release(bdev);
+ blkdev_put(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
}
destroy_swap_extents(p);
swap_cgroup_swapoff(type);