cb->orig_bio = NULL;
cb->nr_pages = nr_pages;
- bio = btrfs_bio_alloc(first_byte);
+ bio = btrfs_io_bio_alloc(BIO_MAX_VECS);
+ bio->bi_iter.bi_sector = first_byte >> SECTOR_SHIFT;
bio->bi_opf = bio_op | write_flags;
bio->bi_private = cb;
bio->bi_end_io = end_compressed_bio_write;
bio_endio(bio);
}
- bio = btrfs_bio_alloc(first_byte);
+ bio = btrfs_io_bio_alloc(BIO_MAX_VECS);
+ bio->bi_iter.bi_sector = first_byte >> SECTOR_SHIFT;
bio->bi_opf = bio_op | write_flags;
bio->bi_private = cb;
bio->bi_end_io = end_compressed_bio_write;
/* include any pages we added in add_ra-bio_pages */
cb->len = bio->bi_iter.bi_size;
- comp_bio = btrfs_bio_alloc(cur_disk_byte);
+ comp_bio = btrfs_io_bio_alloc(BIO_MAX_VECS);
+ comp_bio->bi_iter.bi_sector = cur_disk_byte >> SECTOR_SHIFT;
comp_bio->bi_opf = REQ_OP_READ;
comp_bio->bi_private = cb;
comp_bio->bi_end_io = end_compressed_bio_read;
bio_endio(comp_bio);
}
- comp_bio = btrfs_bio_alloc(cur_disk_byte);
+ comp_bio = btrfs_io_bio_alloc(BIO_MAX_VECS);
+ comp_bio->bi_iter.bi_sector = cur_disk_byte >> SECTOR_SHIFT;
comp_bio->bi_opf = REQ_OP_READ;
comp_bio->bi_private = cb;
comp_bio->bi_end_io = end_compressed_bio_read;
}
/*
- * The following helpers allocate a bio. As it's backed by a bioset, it'll
- * never fail. We're returning a bio right now but you can call btrfs_io_bio
- * for the appropriate container_of magic
+ * Allocate a btrfs_io_bio, with @nr_iovecs as maximum number of iovecs.
+ *
+ * The bio allocation is backed by bioset and does not fail.
*/
-struct bio *btrfs_bio_alloc(u64 first_byte)
+struct bio *btrfs_io_bio_alloc(unsigned int nr_iovecs)
{
struct bio *bio;
- bio = bio_alloc_bioset(GFP_NOFS, BIO_MAX_VECS, &btrfs_bioset);
- bio->bi_iter.bi_sector = first_byte >> 9;
+ ASSERT(0 < nr_iovecs && nr_iovecs <= BIO_MAX_VECS);
+ bio = bio_alloc_bioset(GFP_NOFS, nr_iovecs, &btrfs_bioset);
btrfs_io_bio_init(btrfs_io_bio(bio));
return bio;
}
return new;
}
-struct bio *btrfs_io_bio_alloc(unsigned int nr_iovecs)
-{
- struct bio *bio;
-
- /* Bio allocation backed by a bioset does not fail */
- bio = bio_alloc_bioset(GFP_NOFS, nr_iovecs, &btrfs_bioset);
- btrfs_io_bio_init(btrfs_io_bio(bio));
- return bio;
-}
-
struct bio *btrfs_bio_clone_partial(struct bio *orig, u64 offset, u64 size)
{
struct bio *bio;
struct bio *bio;
int ret;
+ bio = btrfs_io_bio_alloc(BIO_MAX_VECS);
/*
* For compressed page range, its disk_bytenr is always @disk_bytenr
* passed in, no matter if we have added any range into previous bio.
*/
if (bio_flags & EXTENT_BIO_COMPRESSED)
- bio = btrfs_bio_alloc(disk_bytenr);
+ bio->bi_iter.bi_sector = disk_bytenr >> SECTOR_SHIFT;
else
- bio = btrfs_bio_alloc(disk_bytenr + offset);
+ bio->bi_iter.bi_sector = (disk_bytenr + offset) >> SECTOR_SHIFT;
bio_ctrl->bio = bio;
bio_ctrl->bio_flags = bio_flags;
bio->bi_end_io = end_io_func;
void extent_clear_unlock_delalloc(struct btrfs_inode *inode, u64 start, u64 end,
struct page *locked_page,
u32 bits_to_clear, unsigned long page_ops);
-struct bio *btrfs_bio_alloc(u64 first_byte);
struct bio *btrfs_io_bio_alloc(unsigned int nr_iovecs);
struct bio *btrfs_bio_clone(struct bio *bio);
struct bio *btrfs_bio_clone_partial(struct bio *orig, u64 offset, u64 size);
goto bioc_out;
}
- bio = btrfs_io_bio_alloc(0);
+ bio = btrfs_io_bio_alloc(BIO_MAX_VECS);
bio->bi_iter.bi_sector = logical >> 9;
bio->bi_private = sblock;
bio->bi_end_io = scrub_missing_raid56_end_io;
if (ret || !bioc || !bioc->raid_map)
goto bioc_out;
- bio = btrfs_io_bio_alloc(0);
+ bio = btrfs_io_bio_alloc(BIO_MAX_VECS);
bio->bi_iter.bi_sector = sparity->logic_start >> 9;
bio->bi_private = sparity;
bio->bi_end_io = scrub_parity_bio_endio;