void btrfs_submit_data_write_bio(struct btrfs_inode *inode, struct bio *bio, int mirror_num);
void btrfs_submit_data_read_bio(struct btrfs_inode *inode, struct bio *bio,
int mirror_num, enum btrfs_compression_type compress_type);
-blk_status_t btrfs_submit_bio_start(struct btrfs_inode *inode, struct bio *bio);
-blk_status_t btrfs_submit_bio_start_direct_io(struct btrfs_inode *inode,
- struct bio *bio,
- u64 dio_file_offset);
int btrfs_check_sector_csum(struct btrfs_fs_info *fs_info, struct page *page,
u32 pgoff, u8 *csum, const u8 * const csum_expected);
bool btrfs_data_csum_ok(struct btrfs_bio *bbio, struct btrfs_device *dev,
#include "relocation.h"
#include "scrub.h"
#include "super.h"
+#include "file-item.h"
#define BTRFS_SUPER_FLAG_SUPP (BTRFS_HEADER_FLAG_WRITTEN |\
BTRFS_HEADER_FLAG_RELOC |\
return csum_one_extent_buffer(eb);
}
+static blk_status_t btree_csum_one_bio(struct bio *bio)
+{
+ struct bio_vec *bvec;
+ struct btrfs_root *root;
+ struct bvec_iter_all iter_all;
+ int ret = 0;
+
+ ASSERT(!bio_flagged(bio, BIO_CLONED));
+ bio_for_each_segment_all(bvec, bio, iter_all) {
+ root = BTRFS_I(bvec->bv_page->mapping->host)->root;
+ ret = csum_dirty_buffer(root->fs_info, bvec);
+ if (ret)
+ break;
+ }
+
+ return errno_to_blk_status(ret);
+}
+
static int check_tree_block_fsid(struct extent_buffer *eb)
{
struct btrfs_fs_info *fs_info = eb->fs_info;
async = container_of(work, struct async_submit_bio, work);
switch (async->submit_cmd) {
case WQ_SUBMIT_METADATA:
- ret = btree_submit_bio_start(async->bio);
+ ret = btree_csum_one_bio(async->bio);
break;
case WQ_SUBMIT_DATA:
- ret = btrfs_submit_bio_start(async->inode, async->bio);
+ ret = btrfs_csum_one_bio(async->inode, async->bio, (u64)-1, false);
break;
case WQ_SUBMIT_DATA_DIO:
- ret = btrfs_submit_bio_start_direct_io(async->inode,
- async->bio, async->dio_file_offset);
+ ret = btrfs_csum_one_bio(async->inode, async->bio,
+ async->dio_file_offset, false);
break;
default:
/* Can't happen so return something that would prevent the IO. */
return true;
}
-static blk_status_t btree_csum_one_bio(struct bio *bio)
-{
- struct bio_vec *bvec;
- struct btrfs_root *root;
- int ret = 0;
- struct bvec_iter_all iter_all;
-
- ASSERT(!bio_flagged(bio, BIO_CLONED));
- bio_for_each_segment_all(bvec, bio, iter_all) {
- root = BTRFS_I(bvec->bv_page->mapping->host)->root;
- ret = csum_dirty_buffer(root->fs_info, bvec);
- if (ret)
- break;
- }
-
- return errno_to_blk_status(ret);
-}
-
-blk_status_t btree_submit_bio_start(struct bio *bio)
-{
- /*
- * when we're called for a write, we're already in the async
- * submission context. Just jump into btrfs_submit_bio.
- */
- return btree_csum_one_bio(bio);
-}
-
static bool should_async_write(struct btrfs_fs_info *fs_info,
struct btrfs_inode *bi)
{
bool btrfs_wq_submit_bio(struct btrfs_inode *inode, struct bio *bio, int mirror_num,
u64 dio_file_offset, enum btrfs_wq_submit_cmd cmd);
-blk_status_t btree_submit_bio_start(struct bio *bio);
int btrfs_alloc_log_tree_node(struct btrfs_trans_handle *trans,
struct btrfs_root *root);
int btrfs_init_log_root_tree(struct btrfs_trans_handle *trans,
}
}
-/*
- * in order to insert checksums into the metadata in large chunks,
- * we wait until bio submission time. All the pages in the bio are
- * checksummed and sums are attached onto the ordered extent record.
- *
- * At IO completion time the cums attached on the ordered extent record
- * are inserted into the btree
- */
-blk_status_t btrfs_submit_bio_start(struct btrfs_inode *inode, struct bio *bio)
-{
- return btrfs_csum_one_bio(inode, bio, (u64)-1, false);
-}
-
/*
* Split an extent_map at [start, start + len]
*
bio_endio(&dip->bio);
}
-blk_status_t btrfs_submit_bio_start_direct_io(struct btrfs_inode *inode,
- struct bio *bio,
- u64 dio_file_offset)
-{
- return btrfs_csum_one_bio(inode, bio, dio_file_offset, false);
-}
-
static void btrfs_end_dio_bio(struct btrfs_bio *bbio)
{
struct btrfs_dio_private *dip = bbio->private;