/**
* submit_bio_wait - submit a bio, and wait until it completes
- * @rw: whether to %READ or %WRITE, or maybe to %READA (read ahead)
* @bio: The &struct bio which describes the I/O
*
* Simple wrapper around submit_bio(). Returns 0 on success, or the error from
* bio_endio() on failure.
*/
-int submit_bio_wait(int rw, struct bio *bio)
+int submit_bio_wait(struct bio *bio)
{
struct submit_bio_ret ret;
- rw |= REQ_SYNC;
init_completion(&ret.event);
bio->bi_private = &ret;
bio->bi_end_io = submit_bio_wait_endio;
- submit_bio(rw, bio);
+ bio->bi_rw |= REQ_SYNC;
+ submit_bio(bio);
wait_for_completion_io(&ret.event);
return ret.error;
/**
* submit_bio - submit a bio to the block device layer for I/O
- * @rw: whether to %READ or %WRITE, or maybe to %READA (read ahead)
* @bio: The &struct bio which describes the I/O
*
* submit_bio() is very similar in purpose to generic_make_request(), and
* interfaces; @bio must be presetup and ready for I/O.
*
*/
-blk_qc_t submit_bio(int rw, struct bio *bio)
+blk_qc_t submit_bio(struct bio *bio)
{
- bio->bi_rw |= rw;
-
/*
* If it's a regular read/write or a barrier with data attached,
* go through the normal accounting stuff before submission.
if (bio_has_data(bio)) {
unsigned int count;
- if (unlikely(rw & REQ_WRITE_SAME))
+ if (unlikely(bio->bi_rw & REQ_WRITE_SAME))
count = bdev_logical_block_size(bio->bi_bdev) >> 9;
else
count = bio_sectors(bio);
- if (rw & WRITE) {
+ if (bio->bi_rw & WRITE) {
count_vm_events(PGPGOUT, count);
} else {
task_io_account_read(bio->bi_iter.bi_size);
char b[BDEVNAME_SIZE];
printk(KERN_DEBUG "%s(%d): %s block %Lu on %s (%u sectors)\n",
current->comm, task_pid_nr(current),
- (rw & WRITE) ? "WRITE" : "READ",
+ (bio->bi_rw & WRITE) ? "WRITE" : "READ",
(unsigned long long)bio->bi_iter.bi_sector,
bdevname(bio->bi_bdev, b),
count);
bio = bio_alloc(gfp_mask, 0);
bio->bi_bdev = bdev;
+ bio->bi_rw = WRITE_FLUSH;
- ret = submit_bio_wait(WRITE_FLUSH, bio);
+ ret = submit_bio_wait(bio);
/*
* The driver must store the error location in ->bi_sector, if
#include "blk.h"
-static struct bio *next_bio(struct bio *bio, int rw, unsigned int nr_pages,
+static struct bio *next_bio(struct bio *bio, unsigned int nr_pages,
gfp_t gfp)
{
struct bio *new = bio_alloc(gfp, nr_pages);
if (bio) {
bio_chain(bio, new);
- submit_bio(rw, bio);
+ submit_bio(bio);
}
return new;
req_sects = end_sect - sector;
}
- bio = next_bio(bio, type, 1, gfp_mask);
+ bio = next_bio(bio, 1, gfp_mask);
bio->bi_iter.bi_sector = sector;
bio->bi_bdev = bdev;
+ bio->bi_rw = type;
bio->bi_iter.bi_size = req_sects << 9;
nr_sects -= req_sects;
ret = __blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask, type,
&bio);
if (!ret && bio) {
- ret = submit_bio_wait(type, bio);
+ ret = submit_bio_wait(bio);
if (ret == -EOPNOTSUPP)
ret = 0;
}
max_write_same_sectors = UINT_MAX >> 9;
while (nr_sects) {
- bio = next_bio(bio, REQ_WRITE | REQ_WRITE_SAME, 1, gfp_mask);
+ bio = next_bio(bio, 1, gfp_mask);
bio->bi_iter.bi_sector = sector;
bio->bi_bdev = bdev;
bio->bi_vcnt = 1;
bio->bi_io_vec->bv_page = page;
bio->bi_io_vec->bv_offset = 0;
bio->bi_io_vec->bv_len = bdev_logical_block_size(bdev);
+ bio->bi_rw = REQ_WRITE | REQ_WRITE_SAME;
if (nr_sects > max_write_same_sectors) {
bio->bi_iter.bi_size = max_write_same_sectors << 9;
}
if (bio)
- ret = submit_bio_wait(REQ_WRITE | REQ_WRITE_SAME, bio);
+ ret = submit_bio_wait(bio);
return ret != -EOPNOTSUPP ? ret : 0;
}
EXPORT_SYMBOL(blkdev_issue_write_same);
unsigned int sz;
while (nr_sects != 0) {
- bio = next_bio(bio, WRITE,
- min(nr_sects, (sector_t)BIO_MAX_PAGES),
+ bio = next_bio(bio, min(nr_sects, (sector_t)BIO_MAX_PAGES),
gfp_mask);
bio->bi_iter.bi_sector = sector;
bio->bi_bdev = bdev;
+ bio->bi_rw = REQ_WRITE;
while (nr_sects != 0) {
sz = min((sector_t) PAGE_SIZE >> 9 , nr_sects);
}
if (bio)
- return submit_bio_wait(WRITE, bio);
+ return submit_bio_wait(bio);
return 0;
}
if (drbd_insert_fault(device, (rw & WRITE) ? DRBD_FAULT_MD_WR : DRBD_FAULT_MD_RD))
bio_io_error(bio);
else
- submit_bio(rw, bio);
+ submit_bio(bio);
wait_until_done_or_force_detached(device, bdev, &device->md_io.done);
if (!bio->bi_error)
err = device->md_io.error;
bio_add_page(bio, page, len, 0);
bio->bi_private = ctx;
bio->bi_end_io = drbd_bm_endio;
+ bio->bi_rw = rw;
if (drbd_insert_fault(device, (rw & WRITE) ? DRBD_FAULT_MD_WR : DRBD_FAULT_MD_RD)) {
- bio->bi_rw |= rw;
bio_io_error(bio);
} else {
- submit_bio(rw, bio);
+ submit_bio(bio);
/* this should not count as user activity and cause the
* resync to throttle -- see drbd_rs_should_slow_down(). */
atomic_add(len >> 9, &device->rs_sect_ev);
bio.bi_flags |= (1 << BIO_QUIET);
bio.bi_private = &cbdata;
bio.bi_end_io = floppy_rb0_cb;
+ bio.bi_rw = READ;
- submit_bio(READ, &bio);
+ submit_bio(&bio);
process_fd_request();
init_completion(&cbdata.complete);
bio->bi_private = pending_req;
bio->bi_end_io = end_block_io_op;
bio->bi_iter.bi_sector = preq.sector_number;
+ bio->bi_rw = operation;
}
preq.sector_number += seg[i].nsec;
bio->bi_bdev = preq.bdev;
bio->bi_private = pending_req;
bio->bi_end_io = end_block_io_op;
+ bio->bi_rw = operation;
}
atomic_set(&pending_req->pendcnt, nbio);
blk_start_plug(&plug);
for (i = 0; i < nbio; i++)
- submit_bio(operation, biolist[i]);
+ submit_bio(biolist[i]);
/* Let the I/Os go.. */
blk_finish_plug(&plug);
bio_trim(cloned_bio, offset, size);
cloned_bio->bi_private = split_bio;
cloned_bio->bi_end_io = split_bio_end;
- submit_bio(cloned_bio->bi_rw, cloned_bio);
+ submit_bio(cloned_bio);
}
/*
* Now we have to wait for all those smaller bios to
continue;
}
/* We don't need to split this bio */
- submit_bio(bio->bi_rw, bio);
+ submit_bio(bio);
}
return 0;
bio->bi_bdev = PTR_CACHE(b->c, &b->key, 0)->bdev;
bio->bi_iter.bi_sector = PTR_OFFSET(&b->key, 0);
bio->bi_iter.bi_size = KEY_SIZE(&v->key) << 9;
+ bio->bi_rw = REQ_META|READ_SYNC;
bch_bio_map(bio, sorted);
- submit_bio_wait(REQ_META|READ_SYNC, bio);
+ submit_bio_wait(bio);
bch_bbio_free(bio, b->c);
memcpy(ondisk, sorted, KEY_SIZE(&v->key) << 9);
check = bio_clone(bio, GFP_NOIO);
if (!check)
return;
+ check->bi_rw |= READ_SYNC;
if (bio_alloc_pages(check, GFP_NOIO))
goto out_put;
- submit_bio_wait(READ_SYNC, check);
+ submit_bio_wait(check);
bio_for_each_segment(bv, bio, iter) {
void *p1 = kmap_atomic(bv.bv_page);
struct journal_device *ja =
container_of(work, struct journal_device, discard_work);
- submit_bio(0, &ja->discard_bio);
+ submit_bio(&ja->discard_bio);
}
static void do_journal_discard(struct cache *ca)
unsigned i;
bio->bi_iter.bi_sector = SB_SECTOR;
- bio->bi_rw = REQ_SYNC|REQ_META;
+ bio->bi_rw = REQ_WRITE|REQ_SYNC|REQ_META;
bio->bi_iter.bi_size = SB_SIZE;
bch_bio_map(bio, NULL);
pr_debug("ver %llu, flags %llu, seq %llu",
sb->version, sb->flags, sb->seq);
- submit_bio(REQ_WRITE, bio);
+ submit_bio(bio);
}
static void bch_write_bdev_super_unlock(struct closure *cl)
* the dm_buffer's inline bio is local to bufio.
*/
b->bio.bi_private = end_io;
+ b->bio.bi_rw = rw;
/*
* We assume that if len >= PAGE_SIZE ptr is page-aligned.
ptr += PAGE_SIZE;
} while (len > 0);
- submit_bio(rw, &b->bio);
+ submit_bio(&b->bio);
}
static void submit_io(struct dm_buffer *b, int rw, sector_t block,
bio->bi_iter.bi_sector = where->sector + (where->count - remaining);
bio->bi_bdev = where->bdev;
bio->bi_end_io = endio;
+ bio->bi_rw = rw;
store_io_and_region_in_bio(bio, io, region);
if (rw & REQ_DISCARD) {
}
atomic_inc(&io->count);
- submit_bio(rw, bio);
+ submit_bio(bio);
} while (remaining);
}
bio->bi_bdev = lc->logdev->bdev;
bio->bi_end_io = log_end_io;
bio->bi_private = lc;
+ bio->bi_rw = WRITE;
page = alloc_page(GFP_KERNEL);
if (!page) {
DMERR("Couldn't add page to the log block");
goto error_bio;
}
- submit_bio(WRITE, bio);
+ submit_bio(bio);
return 0;
error_bio:
bio_put(bio);
bio->bi_bdev = lc->logdev->bdev;
bio->bi_end_io = log_end_io;
bio->bi_private = lc;
+ bio->bi_rw = WRITE;
for (i = 0; i < block->vec_cnt; i++) {
/*
block->vecs[i].bv_len, 0);
if (ret != block->vecs[i].bv_len) {
atomic_inc(&lc->io_blocks);
- submit_bio(WRITE, bio);
+ submit_bio(bio);
bio = bio_alloc(GFP_KERNEL, block->vec_cnt - i);
if (!bio) {
DMERR("Couldn't alloc log bio");
bio->bi_bdev = lc->logdev->bdev;
bio->bi_end_io = log_end_io;
bio->bi_private = lc;
+ bio->bi_rw = WRITE;
ret = bio_add_page(bio, block->vecs[i].bv_page,
block->vecs[i].bv_len, 0);
}
sector += block->vecs[i].bv_len >> SECTOR_SHIFT;
}
- submit_bio(WRITE, bio);
+ submit_bio(bio);
out:
kfree(block->data);
kfree(block);
* need to wait for the chain to complete.
*/
bio_chain(op->bio, op->parent_bio);
- submit_bio(REQ_WRITE | REQ_DISCARD, op->bio);
+ op->bio->bi_rw = REQ_WRITE | REQ_DISCARD;
+ submit_bio(op->bio);
}
blk_finish_plug(&op->plug);
bi->bi_end_io = md_end_flush;
bi->bi_private = rdev;
bi->bi_bdev = rdev->bdev;
+ bi->bi_rw = WRITE_FLUSH;
atomic_inc(&mddev->flush_pending);
- submit_bio(WRITE_FLUSH, bi);
+ submit_bio(bi);
rcu_read_lock();
rdev_dec_pending(rdev, mddev);
}
bio_add_page(bio, page, size, 0);
bio->bi_private = rdev;
bio->bi_end_io = super_written;
+ bio->bi_rw = WRITE_FLUSH_FUA;
atomic_inc(&mddev->pending_writes);
- submit_bio(WRITE_FLUSH_FUA, bio);
+ submit_bio(bio);
}
void md_super_wait(struct mddev *mddev)
bio->bi_bdev = (metadata_op && rdev->meta_bdev) ?
rdev->meta_bdev : rdev->bdev;
+ bio->bi_rw = rw;
if (metadata_op)
bio->bi_iter.bi_sector = sector + rdev->sb_start;
else if (rdev->mddev->reshape_position != MaxSector &&
else
bio->bi_iter.bi_sector = sector + rdev->data_offset;
bio_add_page(bio, page, size, 0);
- submit_bio_wait(rw, bio);
+
+ submit_bio_wait(bio);
ret = !bio->bi_error;
bio_put(bio);
bio_trim(wbio, sector - r1_bio->sector, sectors);
wbio->bi_iter.bi_sector += rdev->data_offset;
wbio->bi_bdev = rdev->bdev;
- if (submit_bio_wait(WRITE, wbio) < 0)
+
+ if (submit_bio_wait(wbio) < 0)
/* failure! */
ok = rdev_set_badblocks(rdev, sector,
sectors, 0)
choose_data_offset(r10_bio, rdev) +
(sector - r10_bio->sector));
wbio->bi_bdev = rdev->bdev;
- if (submit_bio_wait(WRITE, wbio) < 0)
+ wbio->bi_rw = WRITE;
+
+ if (submit_bio_wait(wbio) < 0)
/* Failure! */
ok = rdev_set_badblocks(rdev, sector,
sectors, 0)
__r5l_set_io_unit_state(io, IO_UNIT_IO_START);
spin_unlock_irqrestore(&log->io_list_lock, flags);
- submit_bio(WRITE, io->current_bio);
+ submit_bio(io->current_bio);
}
static struct bio *r5l_bio_alloc(struct r5l_log *log)
io->current_bio = r5l_bio_alloc(log);
bio_chain(io->current_bio, prev);
- submit_bio(WRITE, prev);
+ submit_bio(prev);
}
if (!bio_add_page(io->current_bio, page, PAGE_SIZE, 0))
bio_reset(&log->flush_bio);
log->flush_bio.bi_bdev = log->rdev->bdev;
log->flush_bio.bi_end_io = r5l_log_flush_endio;
- submit_bio(WRITE_FLUSH, &log->flush_bio);
+ log->flush_bio.bi_rw = WRITE_FLUSH;
+ submit_bio(&log->flush_bio);
}
static void r5l_write_super(struct r5l_log *log, sector_t cp);
}
static struct bio *
-iblock_get_bio(struct se_cmd *cmd, sector_t lba, u32 sg_num)
+iblock_get_bio(struct se_cmd *cmd, sector_t lba, u32 sg_num, int rw)
{
struct iblock_dev *ib_dev = IBLOCK_DEV(cmd->se_dev);
struct bio *bio;
bio->bi_private = cmd;
bio->bi_end_io = &iblock_bio_done;
bio->bi_iter.bi_sector = lba;
+ bio->bi_rw = rw;
return bio;
}
-static void iblock_submit_bios(struct bio_list *list, int rw)
+static void iblock_submit_bios(struct bio_list *list)
{
struct blk_plug plug;
struct bio *bio;
blk_start_plug(&plug);
while ((bio = bio_list_pop(list)))
- submit_bio(rw, bio);
+ submit_bio(bio);
blk_finish_plug(&plug);
}
bio = bio_alloc(GFP_KERNEL, 0);
bio->bi_end_io = iblock_end_io_flush;
bio->bi_bdev = ib_dev->ibd_bd;
+ bio->bi_rw = WRITE_FLUSH;
if (!immed)
bio->bi_private = cmd;
- submit_bio(WRITE_FLUSH, bio);
+ submit_bio(bio);
return 0;
}
goto fail;
cmd->priv = ibr;
- bio = iblock_get_bio(cmd, block_lba, 1);
+ bio = iblock_get_bio(cmd, block_lba, 1, WRITE);
if (!bio)
goto fail_free_ibr;
while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset)
!= sg->length) {
- bio = iblock_get_bio(cmd, block_lba, 1);
+ bio = iblock_get_bio(cmd, block_lba, 1, WRITE);
if (!bio)
goto fail_put_bios;
sectors -= 1;
}
- iblock_submit_bios(&list, WRITE);
+ iblock_submit_bios(&list);
return 0;
fail_put_bios:
return 0;
}
- bio = iblock_get_bio(cmd, block_lba, sgl_nents);
+ bio = iblock_get_bio(cmd, block_lba, sgl_nents, rw);
if (!bio)
goto fail_free_ibr;
while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset)
!= sg->length) {
if (bio_cnt >= IBLOCK_MAX_BIO_PER_TASK) {
- iblock_submit_bios(&list, rw);
+ iblock_submit_bios(&list);
bio_cnt = 0;
}
- bio = iblock_get_bio(cmd, block_lba, sg_num);
+ bio = iblock_get_bio(cmd, block_lba, sg_num, rw);
if (!bio)
goto fail_put_bios;
goto fail_put_bios;
}
- iblock_submit_bios(&list, rw);
+ iblock_submit_bios(&list);
iblock_complete_cmd(cmd);
return 0;
}
bio->bi_bdev = block_ctx->dev->bdev;
bio->bi_iter.bi_sector = dev_bytenr >> 9;
+ bio->bi_rw = READ;
for (j = i; j < num_pages; j++) {
ret = bio_add_page(bio, block_ctx->pagev[j],
"btrfsic: error, failed to add a single page!\n");
return -1;
}
- if (submit_bio_wait(READ, bio)) {
+ if (submit_bio_wait(bio)) {
printk(KERN_INFO
"btrfsic: read error at logical %llu dev %s!\n",
block_ctx->start, block_ctx->dev->name);
return submit_bh(rw, bh);
}
-static void __btrfsic_submit_bio(int rw, struct bio *bio)
+static void __btrfsic_submit_bio(struct bio *bio)
{
struct btrfsic_dev_state *dev_state;
+ int rw = bio->bi_rw;
if (!btrfsic_is_initialized)
return;
mutex_unlock(&btrfsic_mutex);
}
-void btrfsic_submit_bio(int rw, struct bio *bio)
+void btrfsic_submit_bio(struct bio *bio)
{
- __btrfsic_submit_bio(rw, bio);
- submit_bio(rw, bio);
+ __btrfsic_submit_bio(bio);
+ submit_bio(bio);
}
-int btrfsic_submit_bio_wait(int rw, struct bio *bio)
+int btrfsic_submit_bio_wait(struct bio *bio)
{
- __btrfsic_submit_bio(rw, bio);
- return submit_bio_wait(rw, bio);
+ __btrfsic_submit_bio(bio);
+ return submit_bio_wait(bio);
}
int btrfsic_mount(struct btrfs_root *root,
#ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
int btrfsic_submit_bh(int rw, struct buffer_head *bh);
-void btrfsic_submit_bio(int rw, struct bio *bio);
-int btrfsic_submit_bio_wait(int rw, struct bio *bio);
+void btrfsic_submit_bio(struct bio *bio);
+int btrfsic_submit_bio_wait(struct bio *bio);
#else
#define btrfsic_submit_bh submit_bh
#define btrfsic_submit_bio submit_bio
bio->bi_end_io = btrfs_end_empty_barrier;
bio->bi_bdev = device->bdev;
+ bio->bi_rw = WRITE_FLUSH;
init_completion(&device->flush_wait);
bio->bi_private = &device->flush_wait;
device->flush_bio = bio;
bio_get(bio);
- btrfsic_submit_bio(WRITE_FLUSH, bio);
+ btrfsic_submit_bio(bio);
return 0;
}
return -EIO;
}
bio->bi_bdev = dev->bdev;
+ bio->bi_rw = WRITE_SYNC;
bio_add_page(bio, page, length, pg_offset);
- if (btrfsic_submit_bio_wait(WRITE_SYNC, bio)) {
+ if (btrfsic_submit_bio_wait(bio)) {
/* try to remap that extent elsewhere? */
btrfs_bio_counter_dec(fs_info);
bio_put(bio);
start = page_offset(page) + bvec->bv_offset;
bio->bi_private = NULL;
-
+ bio->bi_rw = rw;
bio_get(bio);
if (tree->ops && tree->ops->submit_bio_hook)
ret = tree->ops->submit_bio_hook(page->mapping->host, rw, bio,
mirror_num, bio_flags, start);
else
- btrfsic_submit_bio(rw, bio);
+ btrfsic_submit_bio(bio);
bio_put(bio);
return ret;
bio->bi_private = rbio;
bio->bi_end_io = raid_write_end_io;
- submit_bio(WRITE, bio);
+ bio->bi_rw = WRITE;
+
+ submit_bio(bio);
}
return;
bio->bi_private = rbio;
bio->bi_end_io = raid_rmw_end_io;
+ bio->bi_rw = READ;
btrfs_bio_wq_end_io(rbio->fs_info, bio,
BTRFS_WQ_ENDIO_RAID56);
- submit_bio(READ, bio);
+ submit_bio(bio);
}
/* the actual write will happen once the reads are done */
return 0;
bio->bi_private = rbio;
bio->bi_end_io = raid_recover_end_io;
+ bio->bi_rw = READ;
btrfs_bio_wq_end_io(rbio->fs_info, bio,
BTRFS_WQ_ENDIO_RAID56);
- submit_bio(READ, bio);
+ submit_bio(bio);
}
out:
return 0;
bio->bi_private = rbio;
bio->bi_end_io = raid_write_end_io;
- submit_bio(WRITE, bio);
+ bio->bi_rw = WRITE;
+
+ submit_bio(bio);
}
return;
bio->bi_private = rbio;
bio->bi_end_io = raid56_parity_scrub_end_io;
+ bio->bi_rw = READ;
btrfs_bio_wq_end_io(rbio->fs_info, bio,
BTRFS_WQ_ENDIO_RAID56);
- submit_bio(READ, bio);
+ submit_bio(bio);
}
/* the actual write will happen once the reads are done */
return;
sblock->no_io_error_seen = 0;
} else {
bio->bi_iter.bi_sector = page->physical >> 9;
+ bio->bi_rw = READ;
- if (btrfsic_submit_bio_wait(READ, bio))
+ if (btrfsic_submit_bio_wait(bio))
sblock->no_io_error_seen = 0;
}
return -EIO;
bio->bi_bdev = page_bad->dev->bdev;
bio->bi_iter.bi_sector = page_bad->physical >> 9;
+ bio->bi_rw = WRITE;
ret = bio_add_page(bio, page_good->page, PAGE_SIZE, 0);
if (PAGE_SIZE != ret) {
return -EIO;
}
- if (btrfsic_submit_bio_wait(WRITE, bio)) {
+ if (btrfsic_submit_bio_wait(bio)) {
btrfs_dev_stat_inc_and_print(page_bad->dev,
BTRFS_DEV_STAT_WRITE_ERRS);
btrfs_dev_replace_stats_inc(
bio->bi_end_io = scrub_wr_bio_end_io;
bio->bi_bdev = sbio->dev->bdev;
bio->bi_iter.bi_sector = sbio->physical >> 9;
+ bio->bi_rw = WRITE;
sbio->err = 0;
} else if (sbio->physical + sbio->page_count * PAGE_SIZE !=
spage->physical_for_dev_replace ||
* orders the requests before sending them to the driver which
* doubled the write performance on spinning disks when measured
* with Linux 3.5 */
- btrfsic_submit_bio(WRITE, sbio->bio);
+ btrfsic_submit_bio(sbio->bio);
}
static void scrub_wr_bio_end_io(struct bio *bio)
sbio = sctx->bios[sctx->curr];
sctx->curr = -1;
scrub_pending_bio_inc(sctx);
- btrfsic_submit_bio(READ, sbio->bio);
+ btrfsic_submit_bio(sbio->bio);
}
static int scrub_add_page_to_rd_bio(struct scrub_ctx *sctx,
bio->bi_end_io = scrub_bio_end_io;
bio->bi_bdev = sbio->dev->bdev;
bio->bi_iter.bi_sector = sbio->physical >> 9;
+ bio->bi_rw = READ;
sbio->err = 0;
} else if (sbio->physical + sbio->page_count * PAGE_SIZE !=
spage->physical ||
bio->bi_iter.bi_size = 0;
bio->bi_iter.bi_sector = physical_for_dev_replace >> 9;
bio->bi_bdev = dev->bdev;
+ bio->bi_rw = WRITE_SYNC;
ret = bio_add_page(bio, page, PAGE_SIZE, 0);
if (ret != PAGE_SIZE) {
leave_with_eio:
return -EIO;
}
- if (btrfsic_submit_bio_wait(WRITE_SYNC, bio))
+ if (btrfsic_submit_bio_wait(bio))
goto leave_with_eio;
bio_put(bio);
sync_pending = 0;
}
- btrfsic_submit_bio(cur->bi_rw, cur);
+ btrfsic_submit_bio(cur);
num_run++;
batch_run++;
*/
static noinline void btrfs_schedule_bio(struct btrfs_root *root,
struct btrfs_device *device,
- int rw, struct bio *bio)
+ struct bio *bio)
{
int should_queue = 1;
struct btrfs_pending_bios *pending_bios;
}
/* don't bother with additional async steps for reads, right now */
- if (!(rw & REQ_WRITE)) {
+ if (!(bio->bi_rw & REQ_WRITE)) {
bio_get(bio);
- btrfsic_submit_bio(rw, bio);
+ btrfsic_submit_bio(bio);
bio_put(bio);
return;
}
atomic_inc(&root->fs_info->nr_async_bios);
WARN_ON(bio->bi_next);
bio->bi_next = NULL;
- bio->bi_rw |= rw;
spin_lock(&device->io_lock);
if (bio->bi_rw & REQ_SYNC)
btrfs_io_bio(bio)->stripe_index = dev_nr;
bio->bi_end_io = btrfs_end_bio;
bio->bi_iter.bi_sector = physical >> 9;
+ bio->bi_rw |= rw;
#ifdef DEBUG
{
struct rcu_string *name;
btrfs_bio_counter_inc_noblocked(root->fs_info);
if (async)
- btrfs_schedule_bio(root, dev, rw, bio);
+ btrfs_schedule_bio(root, dev, bio);
else
- btrfsic_submit_bio(rw, bio);
+ btrfsic_submit_bio(bio);
}
static void bbio_error(struct btrfs_bio *bbio, struct bio *bio, u64 logical)
rw |= REQ_META;
if (buffer_prio(bh))
rw |= REQ_PRIO;
+ bio->bi_rw = rw;
- submit_bio(rw, bio);
+ submit_bio(bio);
return 0;
}
bio->bi_bdev = inode->i_sb->s_bdev;
bio->bi_iter.bi_sector =
pblk << (inode->i_sb->s_blocksize_bits - 9);
+ bio->bi_rw = WRITE;
ret = bio_add_page(bio, ciphertext_page,
inode->i_sb->s_blocksize, 0);
if (ret != inode->i_sb->s_blocksize) {
err = -EIO;
goto errout;
}
- err = submit_bio_wait(WRITE, bio);
+ err = submit_bio_wait(bio);
if ((err == 0) && bio->bi_error)
err = -EIO;
bio_put(bio);
bio->bi_bdev = bdev;
bio->bi_iter.bi_sector = first_sector;
+ bio->bi_rw = dio->rw;
if (dio->is_async)
bio->bi_end_io = dio_bio_end_aio;
else
sdio->logical_offset_in_bio);
dio->bio_cookie = BLK_QC_T_NONE;
} else
- dio->bio_cookie = submit_bio(dio->rw, bio);
+ dio->bio_cookie = submit_bio(bio);
sdio->bio = NULL;
sdio->boundary = 0;
bio->bi_bdev = inode->i_sb->s_bdev;
bio->bi_iter.bi_sector =
pblk << (inode->i_sb->s_blocksize_bits - 9);
+ bio->bi_rw = WRITE;
ret = bio_add_page(bio, ciphertext_page,
inode->i_sb->s_blocksize, 0);
if (ret != inode->i_sb->s_blocksize) {
err = -EIO;
goto errout;
}
- err = submit_bio_wait(WRITE, bio);
+ err = submit_bio_wait(bio);
if ((err == 0) && bio->bi_error)
err = -EIO;
bio_put(bio);
if (bio) {
int io_op = io->io_wbc->sync_mode == WB_SYNC_ALL ?
WRITE_SYNC : WRITE;
- submit_bio(io_op, io->io_bio);
+ io->io_bio->bi_rw = io_op;
+ submit_bio(io->io_bio);
}
io->io_bio = NULL;
}
*/
if (bio && (last_block_in_bio != blocks[0] - 1)) {
submit_and_realloc:
- submit_bio(READ, bio);
+ submit_bio(bio);
bio = NULL;
}
if (bio == NULL) {
bio->bi_iter.bi_sector = blocks[0] << (blkbits - 9);
bio->bi_end_io = mpage_end_io;
bio->bi_private = ctx;
+ bio->bi_rw = READ;
}
length = first_hole << blkbits;
if (((map.m_flags & EXT4_MAP_BOUNDARY) &&
(relative_block == map.m_len)) ||
(first_hole != blocks_per_page)) {
- submit_bio(READ, bio);
+ submit_bio(bio);
bio = NULL;
} else
last_block_in_bio = blocks[blocks_per_page - 1];
goto next_page;
confused:
if (bio) {
- submit_bio(READ, bio);
+ submit_bio(bio);
bio = NULL;
}
if (!PageUptodate(page))
}
BUG_ON(pages && !list_empty(pages));
if (bio)
- submit_bio(READ, bio);
+ submit_bio(bio);
return 0;
}
{
if (!is_read_io(rw))
atomic_inc(&sbi->nr_wb_bios);
- submit_bio(rw, bio);
+ bio->bi_rw = rw;
+ submit_bio(bio);
}
static void __submit_merged_bio(struct f2fs_bio_info *io)
bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(block_nr);
bio->bi_end_io = f2fs_read_end_io;
bio->bi_private = ctx;
+ bio->bi_rw = READ;
}
if (bio_add_page(bio, page, blocksize, 0) < blocksize)
fcc->dispatch_list = llist_reverse_order(fcc->dispatch_list);
bio->bi_bdev = sbi->sb->s_bdev;
- ret = submit_bio_wait(WRITE_FLUSH, bio);
+ bio->bi_rw = WRITE_FLUSH;
+ ret = submit_bio_wait(bio);
llist_for_each_entry_safe(cmd, next,
fcc->dispatch_list, llnode) {
int ret;
bio->bi_bdev = sbi->sb->s_bdev;
- ret = submit_bio_wait(WRITE_FLUSH, bio);
+ bio->bi_rw = WRITE_FLUSH;
+ ret = submit_bio_wait(bio);
bio_put(bio);
return ret;
}
{
if (sdp->sd_log_bio) {
atomic_inc(&sdp->sd_log_in_flight);
- submit_bio(rw, sdp->sd_log_bio);
+ sdp->sd_log_bio->bi_rw = rw;
+ submit_bio(sdp->sd_log_bio);
sdp->sd_log_bio = NULL;
}
}
bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh));
}
bio->bi_end_io = gfs2_meta_read_endio;
- submit_bio(rw, bio);
+ bio->bi_rw = rw;
+ submit_bio(bio);
}
/**
bio->bi_end_io = end_bio_io_page;
bio->bi_private = page;
- submit_bio(READ_SYNC | REQ_META, bio);
+ bio->bi_rw = READ_SYNC | REQ_META;
+ submit_bio(bio);
wait_on_page_locked(page);
bio_put(bio);
if (!PageUptodate(page)) {
bio = bio_alloc(GFP_NOIO, 1);
bio->bi_iter.bi_sector = sector;
bio->bi_bdev = sb->s_bdev;
+ bio->bi_rw = rw;
if (!(rw & WRITE) && data)
*data = (u8 *)buf + offset;
buf = (u8 *)buf + len;
}
- ret = submit_bio_wait(rw, bio);
+ ret = submit_bio_wait(bio);
out:
bio_put(bio);
return ret < 0 ? ret : 0;
bio->bi_end_io = lbmIODone;
bio->bi_private = bp;
+ bio->bi_rw = READ_SYNC;
/*check if journaling to disk has been disabled*/
if (log->no_integrity) {
bio->bi_iter.bi_size = 0;
lbmIODone(bio);
} else {
- submit_bio(READ_SYNC, bio);
+ submit_bio(bio);
}
wait_event(bp->l_ioevent, (bp->l_flag != lbmREAD));
bio->bi_end_io = lbmIODone;
bio->bi_private = bp;
+ bio->bi_rw = WRITE_SYNC;
/* check if journaling to disk has been disabled */
if (log->no_integrity) {
bio->bi_iter.bi_size = 0;
lbmIODone(bio);
} else {
- submit_bio(WRITE_SYNC, bio);
+ submit_bio(bio);
INCREMENT(lmStat.submitted);
}
}
inc_io(page);
if (!bio->bi_iter.bi_size)
goto dump_bio;
- submit_bio(WRITE, bio);
+ submit_bio(bio);
nr_underway++;
bio = NULL;
} else
bio->bi_iter.bi_sector = pblock << (inode->i_blkbits - 9);
bio->bi_end_io = metapage_write_end_io;
bio->bi_private = page;
+ bio->bi_rw = WRITE;
/* Don't call bio_add_page yet, we may add to this vec */
bio_offset = offset;
if (!bio->bi_iter.bi_size)
goto dump_bio;
- submit_bio(WRITE, bio);
+ submit_bio(bio);
nr_underway++;
}
if (redirty)
insert_metapage(page, NULL);
inc_io(page);
if (bio)
- submit_bio(READ, bio);
+ submit_bio(bio);
bio = bio_alloc(GFP_NOFS, 1);
bio->bi_bdev = inode->i_sb->s_bdev;
pblock << (inode->i_blkbits - 9);
bio->bi_end_io = metapage_read_end_io;
bio->bi_private = page;
+ bio->bi_rw = READ;
len = xlen << inode->i_blkbits;
offset = block_offset << inode->i_blkbits;
if (bio_add_page(bio, page, len, offset) < len)
block_offset++;
}
if (bio)
- submit_bio(READ, bio);
+ submit_bio(bio);
else
unlock_page(page);
bio.bi_bdev = bdev;
bio.bi_iter.bi_sector = page->index * (PAGE_SIZE >> 9);
bio.bi_iter.bi_size = PAGE_SIZE;
+ bio.bi_rw = rw;
- return submit_bio_wait(rw, &bio);
+ return submit_bio_wait(&bio);
}
static int bdev_readpage(void *_sb, struct page *page)
bio->bi_iter.bi_sector = ofs >> 9;
bio->bi_private = sb;
bio->bi_end_io = writeseg_end_io;
+ bio->bi_rw = WRITE;
atomic_inc(&super->s_pending_writes);
- submit_bio(WRITE, bio);
+ submit_bio(bio);
ofs += i * PAGE_SIZE;
index += i;
bio->bi_iter.bi_sector = ofs >> 9;
bio->bi_private = sb;
bio->bi_end_io = writeseg_end_io;
+ bio->bi_rw = WRITE;
atomic_inc(&super->s_pending_writes);
- submit_bio(WRITE, bio);
+ submit_bio(bio);
return 0;
}
bio->bi_iter.bi_sector = ofs >> 9;
bio->bi_private = sb;
bio->bi_end_io = erase_end_io;
+ bio->bi_rw = WRITE;
atomic_inc(&super->s_pending_writes);
- submit_bio(WRITE, bio);
+ submit_bio(bio);
ofs += i * PAGE_SIZE;
index += i;
bio->bi_iter.bi_sector = ofs >> 9;
bio->bi_private = sb;
bio->bi_end_io = erase_end_io;
+ bio->bi_rw = WRITE;
atomic_inc(&super->s_pending_writes);
- submit_bio(WRITE, bio);
+ submit_bio(bio);
return 0;
}
static struct bio *mpage_bio_submit(int rw, struct bio *bio)
{
bio->bi_end_io = mpage_end_io;
+ bio->bi_rw = rw;
guard_bio_eod(rw, bio);
- submit_bio(rw, bio);
+ submit_bio(bio);
return NULL;
}
}
static struct bio *
-bl_submit_bio(int rw, struct bio *bio)
+bl_submit_bio(struct bio *bio)
{
if (bio) {
get_parallel(bio->bi_private);
dprintk("%s submitting %s bio %u@%llu\n", __func__,
- rw == READ ? "read" : "write", bio->bi_iter.bi_size,
+ bio->bi_rw == READ ? "read" : "write",
+ bio->bi_iter.bi_size,
(unsigned long long)bio->bi_iter.bi_sector);
- submit_bio(rw, bio);
+ submit_bio(bio);
}
return NULL;
}
if (disk_addr < map->start || disk_addr >= map->start + map->len) {
if (!dev->map(dev, disk_addr, map))
return ERR_PTR(-EIO);
- bio = bl_submit_bio(rw, bio);
+ bio = bl_submit_bio(bio);
}
disk_addr += map->disk_offset;
disk_addr -= map->start;
disk_addr >> SECTOR_SHIFT, end_io, par);
if (!bio)
return ERR_PTR(-ENOMEM);
+ bio->bi_rw = rw;
}
if (bio_add_page(bio, page, *len, offset) < *len) {
- bio = bl_submit_bio(rw, bio);
+ bio = bl_submit_bio(bio);
goto retry;
}
return bio;
for (i = pg_index; i < header->page_array.npages; i++) {
if (extent_length <= 0) {
/* We've used up the previous extent */
- bio = bl_submit_bio(READ, bio);
+ bio = bl_submit_bio(bio);
/* Get the next one */
if (!ext_tree_lookup(bl, isect, &be, false)) {
}
if (is_hole(&be)) {
- bio = bl_submit_bio(READ, bio);
+ bio = bl_submit_bio(bio);
/* Fill hole w/ zeroes w/o accessing device */
dprintk("%s Zeroing page for hole\n", __func__);
zero_user_segment(pages[i], pg_offset, pg_len);
header->res.count = (isect << SECTOR_SHIFT) - header->args.offset;
}
out:
- bl_submit_bio(READ, bio);
+ bl_submit_bio(bio);
blk_finish_plug(&plug);
put_parallel(par);
return PNFS_ATTEMPTED;
for (i = pg_index; i < header->page_array.npages; i++) {
if (extent_length <= 0) {
/* We've used up the previous extent */
- bio = bl_submit_bio(WRITE, bio);
+ bio = bl_submit_bio(bio);
/* Get the next one */
if (!ext_tree_lookup(bl, isect, &be, true)) {
header->pnfs_error = -EINVAL;
header->res.count = header->args.count;
out:
- bl_submit_bio(WRITE, bio);
+ bl_submit_bio(bio);
blk_finish_plug(&plug);
put_parallel(par);
return PNFS_ATTEMPTED;
bio->bi_end_io = nilfs_end_bio_write;
bio->bi_private = segbuf;
- submit_bio(mode, bio);
+ bio->bi_rw = mode;
+ submit_bio(bio);
segbuf->sb_nbio++;
wi->bio = NULL;
static struct bio *o2hb_setup_one_bio(struct o2hb_region *reg,
struct o2hb_bio_wait_ctxt *wc,
unsigned int *current_slot,
- unsigned int max_slots)
+ unsigned int max_slots, int rw)
{
int len, current_page;
unsigned int vec_len, vec_start;
bio->bi_bdev = reg->hr_bdev;
bio->bi_private = wc;
bio->bi_end_io = o2hb_bio_end_io;
+ bio->bi_rw = rw;
vec_start = (cs << bits) % PAGE_SIZE;
while(cs < max_slots) {
o2hb_bio_wait_init(&wc);
while(current_slot < max_slots) {
- bio = o2hb_setup_one_bio(reg, &wc, ¤t_slot, max_slots);
+ bio = o2hb_setup_one_bio(reg, &wc, ¤t_slot, max_slots,
+ READ);
if (IS_ERR(bio)) {
status = PTR_ERR(bio);
mlog_errno(status);
}
atomic_inc(&wc.wc_num_reqs);
- submit_bio(READ, bio);
+ submit_bio(bio);
}
status = 0;
slot = o2nm_this_node();
- bio = o2hb_setup_one_bio(reg, write_wc, &slot, slot+1);
+ bio = o2hb_setup_one_bio(reg, write_wc, &slot, slot+1, WRITE_SYNC);
if (IS_ERR(bio)) {
status = PTR_ERR(bio);
mlog_errno(status);
}
atomic_inc(&write_wc->wc_num_reqs);
- submit_bio(WRITE_SYNC, bio);
+ submit_bio(bio);
status = 0;
bail:
ioend->io_bio->bi_private = ioend;
ioend->io_bio->bi_end_io = xfs_end_bio;
-
+ if (wbc->sync_mode == WB_SYNC_ALL)
+ ioend->io_bio->bi_rw = WRITE_SYNC;
+ else
+ ioend->io_bio->bi_rw = WRITE;
/*
* If we are failing the IO now, just mark the ioend with an
* error and finish it. This will run IO completion immediately
return status;
}
- submit_bio(wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : WRITE,
- ioend->io_bio);
+ submit_bio(ioend->io_bio);
return 0;
}
bio_chain(ioend->io_bio, new);
bio_get(ioend->io_bio); /* for xfs_destroy_ioend */
- submit_bio(wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : WRITE,
- ioend->io_bio);
+ if (wbc->sync_mode == WB_SYNC_ALL)
+ ioend->io_bio->bi_rw = WRITE_SYNC;
+ else
+ ioend->io_bio->bi_rw = WRITE;
+ submit_bio(ioend->io_bio);
ioend->io_bio = new;
}
bio->bi_iter.bi_sector = sector;
bio->bi_end_io = xfs_buf_bio_end_io;
bio->bi_private = bp;
-
+ bio->bi_rw = rw;
for (; size && nr_pages; nr_pages--, page_index++) {
int rbytes, nbytes = PAGE_SIZE - offset;
flush_kernel_vmap_range(bp->b_addr,
xfs_buf_vmap_len(bp));
}
- submit_bio(rw, bio);
+ submit_bio(bio);
if (size)
goto next_chunk;
} else {
struct request_queue;
extern int bio_phys_segments(struct request_queue *, struct bio *);
-extern int submit_bio_wait(int rw, struct bio *bio);
+extern int submit_bio_wait(struct bio *bio);
extern void bio_advance(struct bio *, unsigned);
extern void bio_init(struct bio *);
extern void inode_sb_list_add(struct inode *inode);
#ifdef CONFIG_BLOCK
-extern blk_qc_t submit_bio(int, struct bio *);
+extern blk_qc_t submit_bio(struct bio *);
extern int bdev_read_only(struct block_device *);
#endif
extern int set_blocksize(struct block_device *, int);
bio = bio_alloc(__GFP_RECLAIM | __GFP_HIGH, 1);
bio->bi_iter.bi_sector = page_off * (PAGE_SIZE >> 9);
bio->bi_bdev = hib_resume_bdev;
+ bio->bi_rw = rw;
if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
printk(KERN_ERR "PM: Adding page to bio failed at %llu\n",
bio->bi_end_io = hib_end_io;
bio->bi_private = hb;
atomic_inc(&hb->count);
- submit_bio(rw, bio);
+ submit_bio(bio);
} else {
- error = submit_bio_wait(rw, bio);
+ error = submit_bio_wait(bio);
bio_put(bio);
}
bio_end_io_t end_write_func)
{
struct bio *bio;
- int ret, rw = WRITE;
+ int ret;
struct swap_info_struct *sis = page_swap_info(page);
if (sis->flags & SWP_FILE) {
ret = -ENOMEM;
goto out;
}
+ bio->bi_rw = WRITE;
if (wbc->sync_mode == WB_SYNC_ALL)
- rw |= REQ_SYNC;
+ bio->bi_rw |= REQ_SYNC;
count_vm_event(PSWPOUT);
set_page_writeback(page);
unlock_page(page);
- submit_bio(rw, bio);
+ submit_bio(bio);
out:
return ret;
}
ret = -ENOMEM;
goto out;
}
+ bio->bi_rw = READ;
count_vm_event(PSWPIN);
- submit_bio(READ, bio);
+ submit_bio(bio);
out:
return ret;
}