#include "volumes.h"
#include "locking.h"
#include "ref-cache.h"
+#include "compat.h"
#define PENDING_EXTENT_INSERT 0
#define PENDING_EXTENT_DELETE 1
return ret;
}
+static void btrfs_issue_discard(struct block_device *bdev,
+ u64 start, u64 len)
+{
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28)
+ blkdev_issue_discard(bdev, start >> 9, len >> 9, GFP_KERNEL);
+#else
+ blkdev_issue_discard(bdev, start >> 9, len >> 9);
+#endif
+}
+
+
static int noinline free_extents(struct btrfs_trans_handle *trans,
struct btrfs_root *extent_root,
struct list_head *del_list)
BUG_ON(ret);
#ifdef BIO_RW_DISCARD
+ map_length = tmp->num_bytes;
ret = btrfs_map_block(&info->mapping_tree, READ,
tmp->bytenr, &map_length, &multi,
0);
struct btrfs_bio_stripe *stripe;
int i;
- stripe = multi->stripe;
+ stripe = multi->stripes;
if (map_length > tmp->num_bytes)
map_length = tmp->num_bytes;
for (i = 0; i < multi->num_stripes;
i++, stripe++)
- blkdev_issue_discard(stripe->dev->bdev,
- stripe->physical >> 9,
- map_length >> 9);
+ btrfs_issue_discard(stripe->dev->bdev,
+ stripe->physical,
+ map_length);
kfree(multi);
}
#endif
map_length = num_bytes;
for (i = 0; i < multi->num_stripes; i++, stripe++) {
- blkdev_issue_discard(stripe->dev->bdev,
- stripe->physical >> 9,
- map_length >> 9);
+ btrfs_issue_discard(stripe->dev->bdev,
+ stripe->physical,
+ map_length);
}
kfree(multi);
}
dev = list_entry(fs_devices->devices.next,
struct btrfs_device, dev_list);
if (dev->bdev) {
- close_bdev_excl(dev->bdev);
+ close_bdev_exclusive(dev->bdev, dev->mode);
fs_devices->open_devices--;
}
fs_devices->num_devices--;
continue;
if (device->bdev) {
- close_bdev_excl(device->bdev);
+ close_bdev_exclusive(device->bdev, device->mode);
device->bdev = NULL;
fs_devices->open_devices--;
}
list_for_each(cur, &fs_devices->devices) {
device = list_entry(cur, struct btrfs_device, dev_list);
if (device->bdev) {
- close_bdev_excl(device->bdev);
+ close_bdev_exclusive(device->bdev, device->mode);
fs_devices->open_devices--;
}
if (device->writeable) {
return ret;
}
-int __btrfs_open_devices(struct btrfs_fs_devices *fs_devices, void *holder)
+int __btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
+ int flags, void *holder)
{
struct block_device *bdev;
struct list_head *head = &fs_devices->devices;
if (!device->name)
continue;
- bdev = open_bdev_excl(device->name, MS_RDONLY, holder);
+ bdev = open_bdev_exclusive(device->name, flags, holder);
if (IS_ERR(bdev)) {
printk("open %s failed\n", device->name);
goto error;
device->bdev = bdev;
device->in_fs_metadata = 0;
+ device->mode = flags;
+
fs_devices->open_devices++;
if (device->writeable) {
fs_devices->rw_devices++;
error_brelse:
brelse(bh);
error_close:
- close_bdev_excl(bdev);
+ close_bdev_exclusive(bdev, MS_RDONLY);
error:
continue;
}
ret = 0;
}
} else {
- ret = __btrfs_open_devices(fs_devices, holder);
+ ret = __btrfs_open_devices(fs_devices, flags, holder);
}
mutex_unlock(&uuid_mutex);
return ret;
mutex_lock(&uuid_mutex);
- bdev = open_bdev_excl(path, flags, holder);
+ bdev = open_bdev_exclusive(path, flags, holder);
if (IS_ERR(bdev)) {
ret = PTR_ERR(bdev);
error_brelse:
brelse(bh);
error_close:
- close_bdev_excl(bdev);
+ close_bdev_exclusive(bdev, flags);
error:
mutex_unlock(&uuid_mutex);
return ret;
goto out;
}
} else {
- bdev = open_bdev_excl(device_path, MS_RDONLY,
+ bdev = open_bdev_exclusive(device_path, MS_RDONLY,
root->fs_info->bdev_holder);
if (IS_ERR(bdev)) {
ret = PTR_ERR(bdev);
BUG_ON(device->writeable);
brelse(bh);
if (bdev)
- close_bdev_excl(bdev);
+ close_bdev_exclusive(bdev, MS_RDONLY);
if (device->bdev) {
- close_bdev_excl(device->bdev);
+ close_bdev_exclusive(device->bdev, device->mode);
device->bdev = NULL;
device->fs_devices->open_devices--;
}
if (device->bdev) {
/* one close for the device struct or super_block */
- close_bdev_excl(device->bdev);
+ close_bdev_exclusive(device->bdev, device->mode);
}
if (bdev) {
/* one close for us */
- close_bdev_excl(bdev);
+ close_bdev_exclusive(bdev, MS_RDONLY);
}
kfree(device->name);
kfree(device);
brelse(bh);
error_close:
if (bdev)
- close_bdev_excl(bdev);
+ close_bdev_exclusive(bdev, MS_RDONLY);
out:
mutex_unlock(&root->fs_info->volume_mutex);
mutex_unlock(&uuid_mutex);
if ((sb->s_flags & MS_RDONLY) && !root->fs_info->fs_devices->seeding)
return -EINVAL;
- bdev = open_bdev_excl(device_path, 0, root->fs_info->bdev_holder);
+ bdev = open_bdev_exclusive(device_path, 0, root->fs_info->bdev_holder);
if (!bdev) {
return -EIO;
}
device->dev_root = root->fs_info->dev_root;
device->bdev = bdev;
device->in_fs_metadata = 1;
+ device->mode = 0;
set_blocksize(device->bdev, 4096);
if (seeding_dev) {
mutex_unlock(&root->fs_info->volume_mutex);
return ret;
error:
- close_bdev_excl(bdev);
+ close_bdev_exclusive(bdev, 0);
if (seeding_dev) {
mutex_unlock(&uuid_mutex);
up_write(&sb->s_umount);
goto out;
}
- ret = __btrfs_open_devices(fs_devices, root->fs_info->bdev_holder);
+ ret = __btrfs_open_devices(fs_devices, MS_RDONLY,
+ root->fs_info->bdev_holder);
if (ret)
goto out;