}
}
-static void __btrfsic_submit_bio(struct bio *bio)
+void btrfsic_check_bio(struct bio *bio)
{
struct btrfsic_dev_state *dev_state;
mutex_unlock(&btrfsic_mutex);
}
-void btrfsic_submit_bio(struct bio *bio)
-{
- __btrfsic_submit_bio(bio);
- submit_bio(bio);
-}
-
-int btrfsic_submit_bio_wait(struct bio *bio)
-{
- __btrfsic_submit_bio(bio);
- return submit_bio_wait(bio);
-}
-
int btrfsic_mount(struct btrfs_fs_info *fs_info,
struct btrfs_fs_devices *fs_devices,
int including_extent_data, u32 print_mask)
#define BTRFS_CHECK_INTEGRITY_H
#ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
-void btrfsic_submit_bio(struct bio *bio);
-int btrfsic_submit_bio_wait(struct bio *bio);
+void btrfsic_check_bio(struct bio *bio);
#else
-#define btrfsic_submit_bio submit_bio
-#define btrfsic_submit_bio_wait submit_bio_wait
+static inline void btrfsic_check_bio(struct bio *bio) { }
#endif
int btrfsic_mount(struct btrfs_fs_info *fs_info,
if (i == 0 && !btrfs_test_opt(device->fs_info, NOBARRIER))
bio->bi_opf |= REQ_FUA;
- btrfsic_submit_bio(bio);
+ btrfsic_check_bio(bio);
+ submit_bio(bio);
if (btrfs_advance_sb_log(device, i))
errors++;
init_completion(&device->flush_wait);
bio->bi_private = &device->flush_wait;
- btrfsic_submit_bio(bio);
+ btrfsic_check_bio(bio);
+ submit_bio(bio);
set_bit(BTRFS_DEV_STATE_FLUSH_SENT, &device->dev_state);
}
bio->bi_opf = REQ_OP_WRITE | REQ_SYNC;
bio_add_page(bio, page, length, pg_offset);
- if (btrfsic_submit_bio_wait(bio)) {
+ btrfsic_check_bio(bio);
+ if (submit_bio_wait(bio)) {
/* try to remap that extent elsewhere? */
btrfs_bio_counter_dec(fs_info);
bio_put(bio);
bio->bi_iter.bi_sector = sector->physical >> 9;
bio->bi_opf = REQ_OP_READ;
- if (btrfsic_submit_bio_wait(bio)) {
+ btrfsic_check_bio(bio);
+ if (submit_bio_wait(bio)) {
sector->io_error = 1;
sblock->no_io_error_seen = 0;
}
return -EIO;
}
- if (btrfsic_submit_bio_wait(bio)) {
+ btrfsic_check_bio(bio);
+ if (submit_bio_wait(bio)) {
btrfs_dev_stat_inc_and_print(sector_bad->dev,
BTRFS_DEV_STAT_WRITE_ERRS);
atomic64_inc(&fs_info->dev_replace.num_write_errors);
* orders the requests before sending them to the driver which
* doubled the write performance on spinning disks when measured
* with Linux 3.5 */
- btrfsic_submit_bio(sbio->bio);
+ btrfsic_check_bio(sbio->bio);
+ submit_bio(sbio->bio);
if (btrfs_is_zoned(sctx->fs_info))
sctx->write_pointer = sbio->physical + sbio->sector_count *
sbio = sctx->bios[sctx->curr];
sctx->curr = -1;
scrub_pending_bio_inc(sctx);
- btrfsic_submit_bio(sbio->bio);
+ btrfsic_check_bio(sbio->bio);
+ submit_bio(sbio->bio);
}
static int scrub_add_sector_to_rd_bio(struct scrub_ctx *sctx,
btrfs_bio_counter_inc_noblocked(fs_info);
- btrfsic_submit_bio(bio);
+ btrfsic_check_bio(bio);
+ submit_bio(bio);
}
static void bioc_error(struct btrfs_io_context *bioc, struct bio *bio, u64 logical)