}
}
-/*
- * Process one page for __process_pages_contig().
- *
- * Return >0 if we hit @page == @locked_page.
- * Return 0 if we updated the page status.
- * Return -EGAIN if the we need to try again.
- * (For PAGE_LOCK case but got dirty page or page not belong to mapping)
- */
-static int process_one_page(struct btrfs_fs_info *fs_info,
- struct address_space *mapping,
- struct page *page, struct page *locked_page,
- unsigned long page_ops, u64 start, u64 end)
+static void process_one_page(struct btrfs_fs_info *fs_info,
+ struct page *page, struct page *locked_page,
+ unsigned long page_ops, u64 start, u64 end)
{
u32 len;
if (page_ops & PAGE_END_WRITEBACK)
btrfs_page_clamp_clear_writeback(fs_info, page, start, len);
- if (page == locked_page)
- return 1;
-
- if (page_ops & PAGE_LOCK) {
- int ret;
-
- ret = btrfs_page_start_writer_lock(fs_info, page, start, len);
- if (ret)
- return ret;
- if (!PageDirty(page) || page->mapping != mapping) {
- btrfs_page_end_writer_lock(fs_info, page, start, len);
- return -EAGAIN;
- }
- }
- if (page_ops & PAGE_UNLOCK)
+ if (page != locked_page && (page_ops & PAGE_UNLOCK))
btrfs_page_end_writer_lock(fs_info, page, start, len);
- return 0;
}
-static int __process_pages_contig(struct address_space *mapping,
- struct page *locked_page,
- u64 start, u64 end, unsigned long page_ops,
- u64 *processed_end)
+static void __process_pages_contig(struct address_space *mapping,
+ struct page *locked_page, u64 start, u64 end,
+ unsigned long page_ops)
{
struct btrfs_fs_info *fs_info = btrfs_sb(mapping->host->i_sb);
pgoff_t start_index = start >> PAGE_SHIFT;
pgoff_t end_index = end >> PAGE_SHIFT;
pgoff_t index = start_index;
- unsigned long pages_processed = 0;
struct folio_batch fbatch;
- int err = 0;
int i;
- if (page_ops & PAGE_LOCK) {
- ASSERT(page_ops == PAGE_LOCK);
- ASSERT(processed_end && *processed_end == start);
- }
-
folio_batch_init(&fbatch);
while (index <= end_index) {
int found_folios;
found_folios = filemap_get_folios_contig(mapping, &index,
end_index, &fbatch);
-
- if (found_folios == 0) {
- /*
- * Only if we're going to lock these pages, we can find
- * nothing at @index.
- */
- ASSERT(page_ops & PAGE_LOCK);
- err = -EAGAIN;
- goto out;
- }
-
for (i = 0; i < found_folios; i++) {
- int process_ret;
struct folio *folio = fbatch.folios[i];
- process_ret = process_one_page(fs_info, mapping,
- &folio->page, locked_page, page_ops,
- start, end);
- if (process_ret < 0) {
- err = -EAGAIN;
- folio_batch_release(&fbatch);
- goto out;
- }
- pages_processed += folio_nr_pages(folio);
+
+ process_one_page(fs_info, &folio->page, locked_page,
+ page_ops, start, end);
}
folio_batch_release(&fbatch);
cond_resched();
}
-out:
- if (err && processed_end) {
- /*
- * Update @processed_end. I know this is awful since it has
- * two different return value patterns (inclusive vs exclusive).
- *
- * But the exclusive pattern is necessary if @start is 0, or we
- * underflow and check against processed_end won't work as
- * expected.
- */
- if (pages_processed)
- *processed_end = min(end,
- ((u64)(start_index + pages_processed) << PAGE_SHIFT) - 1);
- else
- *processed_end = start;
- }
- return err;
}
static noinline void __unlock_for_delalloc(struct inode *inode,
return;
__process_pages_contig(inode->i_mapping, locked_page, start, end,
- PAGE_UNLOCK, NULL);
+ PAGE_UNLOCK);
}
static noinline int lock_delalloc_pages(struct inode *inode,
struct page *locked_page,
- u64 delalloc_start,
- u64 delalloc_end)
+ u64 start,
+ u64 end)
{
- unsigned long index = delalloc_start >> PAGE_SHIFT;
- unsigned long end_index = delalloc_end >> PAGE_SHIFT;
- u64 processed_end = delalloc_start;
- int ret;
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
+ struct address_space *mapping = inode->i_mapping;
+ pgoff_t start_index = start >> PAGE_SHIFT;
+ pgoff_t end_index = end >> PAGE_SHIFT;
+ pgoff_t index = start_index;
+ u64 processed_end = start;
+ struct folio_batch fbatch;
- ASSERT(locked_page);
if (index == locked_page->index && index == end_index)
return 0;
- ret = __process_pages_contig(inode->i_mapping, locked_page, delalloc_start,
- delalloc_end, PAGE_LOCK, &processed_end);
- if (ret == -EAGAIN && processed_end > delalloc_start)
- __unlock_for_delalloc(inode, locked_page, delalloc_start,
- processed_end);
- return ret;
+ folio_batch_init(&fbatch);
+ while (index <= end_index) {
+ unsigned int found_folios, i;
+
+ found_folios = filemap_get_folios_contig(mapping, &index,
+ end_index, &fbatch);
+ if (found_folios == 0)
+ goto out;
+
+ for (i = 0; i < found_folios; i++) {
+ struct page *page = &fbatch.folios[i]->page;
+ u32 len = end + 1 - start;
+
+ if (page == locked_page)
+ continue;
+
+ if (btrfs_page_start_writer_lock(fs_info, page, start,
+ len))
+ goto out;
+
+ if (!PageDirty(page) || page->mapping != mapping) {
+ btrfs_page_end_writer_lock(fs_info, page, start,
+ len);
+ goto out;
+ }
+
+ processed_end = page_offset(page) + PAGE_SIZE - 1;
+ }
+ folio_batch_release(&fbatch);
+ cond_resched();
+ }
+
+ return 0;
+out:
+ folio_batch_release(&fbatch);
+ if (processed_end > start)
+ __unlock_for_delalloc(inode, locked_page, start, processed_end);
+ return -EAGAIN;
}
/*
clear_extent_bit(&inode->io_tree, start, end, clear_bits, NULL);
__process_pages_contig(inode->vfs_inode.i_mapping, locked_page,
- start, end, page_ops, NULL);
+ start, end, page_ops);
}
static bool btrfs_verify_page(struct page *page, u64 start)