u32 len_to_stripe_boundary;
u32 len_to_oe_boundary;
btrfs_bio_end_io_t end_io_func;
-};
-struct extent_page_data {
- struct btrfs_bio_ctrl bio_ctrl;
- /* tells writepage not to lock the state bits for this range
- * it still does the unlocking
+ /*
+ * Tell writepage not to lock the state bits for this range, it still
+ * does the unlocking.
*/
bool extent_locked;
- /* tells the submit_bio code to use REQ_SYNC */
+ /* Tell the submit_bio code to use REQ_SYNC */
bool sync_io;
};
}
/*
- * Submit or fail the current bio in an extent_page_data structure.
+ * Submit or fail the current bio in the bio_ctrl structure.
*/
-static void submit_write_bio(struct extent_page_data *epd, int ret)
+static void submit_write_bio(struct btrfs_bio_ctrl *bio_ctrl, int ret)
{
- struct bio *bio = epd->bio_ctrl.bio;
+ struct bio *bio = bio_ctrl->bio;
if (!bio)
return;
ASSERT(ret < 0);
btrfs_bio_end_io(btrfs_bio(bio), errno_to_blk_status(ret));
/* The bio is owned by the end_io handler now */
- epd->bio_ctrl.bio = NULL;
+ bio_ctrl->bio = NULL;
} else {
- submit_one_bio(&epd->bio_ctrl);
+ submit_one_bio(bio_ctrl);
}
}
static noinline_for_stack int __extent_writepage_io(struct btrfs_inode *inode,
struct page *page,
struct writeback_control *wbc,
- struct extent_page_data *epd,
+ struct btrfs_bio_ctrl *bio_ctrl,
loff_t i_size,
int *nr_ret)
{
*/
wbc->nr_to_write--;
- epd->bio_ctrl.end_io_func = end_bio_extent_writepage;
+ bio_ctrl->end_io_func = end_bio_extent_writepage;
while (cur <= end) {
u64 disk_bytenr;
u64 em_end;
btrfs_page_clear_dirty(fs_info, page, cur, iosize);
ret = submit_extent_page(op | write_flags, wbc,
- &epd->bio_ctrl, disk_bytenr,
+ bio_ctrl, disk_bytenr,
page, iosize,
cur - page_offset(page),
0, false);
* Return <0 for error.
*/
static int __extent_writepage(struct page *page, struct writeback_control *wbc,
- struct extent_page_data *epd)
+ struct btrfs_bio_ctrl *bio_ctrl)
{
struct folio *folio = page_folio(page);
struct inode *inode = page->mapping->host;
goto done;
}
- if (!epd->extent_locked) {
+ if (!bio_ctrl->extent_locked) {
ret = writepage_delalloc(BTRFS_I(inode), page, wbc);
if (ret == 1)
return 0;
goto done;
}
- ret = __extent_writepage_io(BTRFS_I(inode), page, wbc, epd, i_size,
+ ret = __extent_writepage_io(BTRFS_I(inode), page, wbc, bio_ctrl, i_size,
&nr);
if (ret == 1)
return 0;
*/
if (PageError(page))
end_extent_writepage(page, ret, page_start, page_end);
- if (epd->extent_locked) {
+ if (bio_ctrl->extent_locked) {
/*
- * If epd->extent_locked, it's from extent_write_locked_range(),
+ * If bio_ctrl->extent_locked, it's from extent_write_locked_range(),
* the page can either be locked by lock_page() or
* process_one_page().
* Let btrfs_page_unlock_writer() handle both cases.
* Return <0 if something went wrong, no page is locked.
*/
static noinline_for_stack int lock_extent_buffer_for_io(struct extent_buffer *eb,
- struct extent_page_data *epd)
+ struct btrfs_bio_ctrl *bio_ctrl)
{
struct btrfs_fs_info *fs_info = eb->fs_info;
int i, num_pages;
int ret = 0;
if (!btrfs_try_tree_write_lock(eb)) {
- submit_write_bio(epd, 0);
+ submit_write_bio(bio_ctrl, 0);
flush = 1;
btrfs_tree_lock(eb);
}
if (test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags)) {
btrfs_tree_unlock(eb);
- if (!epd->sync_io)
+ if (!bio_ctrl->sync_io)
return 0;
if (!flush) {
- submit_write_bio(epd, 0);
+ submit_write_bio(bio_ctrl, 0);
flush = 1;
}
while (1) {
if (!trylock_page(p)) {
if (!flush) {
- submit_write_bio(epd, 0);
+ submit_write_bio(bio_ctrl, 0);
flush = 1;
}
lock_page(p);
*/
static int write_one_subpage_eb(struct extent_buffer *eb,
struct writeback_control *wbc,
- struct extent_page_data *epd)
+ struct btrfs_bio_ctrl *bio_ctrl)
{
struct btrfs_fs_info *fs_info = eb->fs_info;
struct page *page = eb->pages[0];
if (no_dirty_ebs)
clear_page_dirty_for_io(page);
- epd->bio_ctrl.end_io_func = end_bio_subpage_eb_writepage;
+ bio_ctrl->end_io_func = end_bio_subpage_eb_writepage;
ret = submit_extent_page(REQ_OP_WRITE | write_flags, wbc,
- &epd->bio_ctrl, eb->start, page, eb->len,
+ bio_ctrl, eb->start, page, eb->len,
eb->start - page_offset(page), 0, false);
if (ret) {
btrfs_subpage_clear_writeback(fs_info, page, eb->start, eb->len);
static noinline_for_stack int write_one_eb(struct extent_buffer *eb,
struct writeback_control *wbc,
- struct extent_page_data *epd)
+ struct btrfs_bio_ctrl *bio_ctrl)
{
u64 disk_bytenr = eb->start;
int i, num_pages;
prepare_eb_write(eb);
- epd->bio_ctrl.end_io_func = end_bio_extent_buffer_writepage;
+ bio_ctrl->end_io_func = end_bio_extent_buffer_writepage;
num_pages = num_extent_pages(eb);
for (i = 0; i < num_pages; i++) {
clear_page_dirty_for_io(p);
set_page_writeback(p);
ret = submit_extent_page(REQ_OP_WRITE | write_flags, wbc,
- &epd->bio_ctrl, disk_bytenr, p,
+ bio_ctrl, disk_bytenr, p,
PAGE_SIZE, 0, 0, false);
if (ret) {
set_btree_ioerr(p, eb);
*/
static int submit_eb_subpage(struct page *page,
struct writeback_control *wbc,
- struct extent_page_data *epd)
+ struct btrfs_bio_ctrl *bio_ctrl)
{
struct btrfs_fs_info *fs_info = btrfs_sb(page->mapping->host->i_sb);
int submitted = 0;
if (!eb)
continue;
- ret = lock_extent_buffer_for_io(eb, epd);
+ ret = lock_extent_buffer_for_io(eb, bio_ctrl);
if (ret == 0) {
free_extent_buffer(eb);
continue;
free_extent_buffer(eb);
goto cleanup;
}
- ret = write_one_subpage_eb(eb, wbc, epd);
+ ret = write_one_subpage_eb(eb, wbc, bio_ctrl);
free_extent_buffer(eb);
if (ret < 0)
goto cleanup;
cleanup:
/* We hit error, end bio for the submitted extent buffers */
- submit_write_bio(epd, ret);
+ submit_write_bio(bio_ctrl, ret);
return ret;
}
* Return <0 for fatal error.
*/
static int submit_eb_page(struct page *page, struct writeback_control *wbc,
- struct extent_page_data *epd,
+ struct btrfs_bio_ctrl *bio_ctrl,
struct extent_buffer **eb_context)
{
struct address_space *mapping = page->mapping;
return 0;
if (btrfs_sb(page->mapping->host->i_sb)->nodesize < PAGE_SIZE)
- return submit_eb_subpage(page, wbc, epd);
+ return submit_eb_subpage(page, wbc, bio_ctrl);
spin_lock(&mapping->private_lock);
if (!PagePrivate(page)) {
*eb_context = eb;
- ret = lock_extent_buffer_for_io(eb, epd);
+ ret = lock_extent_buffer_for_io(eb, bio_ctrl);
if (ret <= 0) {
btrfs_revert_meta_write_pointer(cache, eb);
if (cache)
btrfs_schedule_zone_finish_bg(cache, eb);
btrfs_put_block_group(cache);
}
- ret = write_one_eb(eb, wbc, epd);
+ ret = write_one_eb(eb, wbc, bio_ctrl);
free_extent_buffer(eb);
if (ret < 0)
return ret;
struct writeback_control *wbc)
{
struct extent_buffer *eb_context = NULL;
- struct extent_page_data epd = {
- .bio_ctrl = { 0 },
+ struct btrfs_bio_ctrl bio_ctrl = {
.extent_locked = 0,
- .sync_io = wbc->sync_mode == WB_SYNC_ALL,
+ .sync_io = (wbc->sync_mode == WB_SYNC_ALL),
};
struct btrfs_fs_info *fs_info = BTRFS_I(mapping->host)->root->fs_info;
int ret = 0;
for (i = 0; i < nr_pages; i++) {
struct page *page = pvec.pages[i];
- ret = submit_eb_page(page, wbc, &epd, &eb_context);
+ ret = submit_eb_page(page, wbc, &bio_ctrl, &eb_context);
if (ret == 0)
continue;
if (ret < 0) {
ret = 0;
if (!ret && BTRFS_FS_ERROR(fs_info))
ret = -EROFS;
- submit_write_bio(&epd, ret);
+ submit_write_bio(&bio_ctrl, ret);
btrfs_zoned_meta_io_unlock(fs_info);
return ret;
/*
* Walk the list of dirty pages of the given address space and write all of them.
*
- * @mapping: address space structure to write
- * @wbc: subtract the number of written pages from *@wbc->nr_to_write
- * @epd: holds context for the write, namely the bio
+ * @mapping: address space structure to write
+ * @wbc: subtract the number of written pages from *@wbc->nr_to_write
+ * @bio_ctrl: holds context for the write, namely the bio
*
* If a page is already under I/O, write_cache_pages() skips it, even
* if it's dirty. This is desirable behaviour for memory-cleaning writeback,
*/
static int extent_write_cache_pages(struct address_space *mapping,
struct writeback_control *wbc,
- struct extent_page_data *epd)
+ struct btrfs_bio_ctrl *bio_ctrl)
{
struct inode *inode = mapping->host;
int ret = 0;
* tmpfs file mapping
*/
if (!trylock_page(page)) {
- submit_write_bio(epd, 0);
+ submit_write_bio(bio_ctrl, 0);
lock_page(page);
}
if (wbc->sync_mode != WB_SYNC_NONE) {
if (PageWriteback(page))
- submit_write_bio(epd, 0);
+ submit_write_bio(bio_ctrl, 0);
wait_on_page_writeback(page);
}
continue;
}
- ret = __extent_writepage(page, wbc, epd);
+ ret = __extent_writepage(page, wbc, bio_ctrl);
if (ret < 0) {
done = 1;
break;
* page in our current bio, and thus deadlock, so flush the
* write bio here.
*/
- submit_write_bio(epd, 0);
+ submit_write_bio(bio_ctrl, 0);
goto retry;
}
u64 cur = start;
unsigned long nr_pages;
const u32 sectorsize = btrfs_sb(inode->i_sb)->sectorsize;
- struct extent_page_data epd = {
- .bio_ctrl = { 0 },
+ struct btrfs_bio_ctrl bio_ctrl = {
.extent_locked = 1,
.sync_io = 1,
};
ASSERT(PageLocked(page));
ASSERT(PageDirty(page));
clear_page_dirty_for_io(page);
- ret = __extent_writepage(page, &wbc_writepages, &epd);
+ ret = __extent_writepage(page, &wbc_writepages, &bio_ctrl);
ASSERT(ret <= 0);
if (ret < 0) {
found_error = true;
cur = cur_end + 1;
}
- submit_write_bio(&epd, found_error ? ret : 0);
+ submit_write_bio(&bio_ctrl, found_error ? ret : 0);
wbc_detach_inode(&wbc_writepages);
if (found_error)
{
struct inode *inode = mapping->host;
int ret = 0;
- struct extent_page_data epd = {
- .bio_ctrl = { 0 },
+ struct btrfs_bio_ctrl bio_ctrl = {
.extent_locked = 0,
- .sync_io = wbc->sync_mode == WB_SYNC_ALL,
+ .sync_io = (wbc->sync_mode == WB_SYNC_ALL),
};
/*
* protect the write pointer updates.
*/
btrfs_zoned_data_reloc_lock(BTRFS_I(inode));
- ret = extent_write_cache_pages(mapping, wbc, &epd);
- submit_write_bio(&epd, ret);
+ ret = extent_write_cache_pages(mapping, wbc, &bio_ctrl);
+ submit_write_bio(&bio_ctrl, ret);
btrfs_zoned_data_reloc_unlock(BTRFS_I(inode));
return ret;
}