ext4: convert mpage_prepare_extent_to_map() to use filemap_get_folios_tag()
authorVishal Moola (Oracle) <vishal.moola@gmail.com>
Wed, 4 Jan 2023 21:14:35 +0000 (13:14 -0800)
committerAndrew Morton <akpm@linux-foundation.org>
Fri, 3 Feb 2023 06:33:15 +0000 (22:33 -0800)
Convert the function to use folios throughout.  This is in preparation for
the removal of find_get_pages_range_tag().  Now supports large folios.
This change removes 11 calls to compound_head().

Link: https://lkml.kernel.org/r/20230104211448.4804-11-vishal.moola@gmail.com
Signed-off-by: Vishal Moola (Oracle) <vishal.moola@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
fs/ext4/inode.c

index 9d9f414..fb6cd99 100644 (file)
@@ -2595,8 +2595,8 @@ static bool ext4_page_nomap_can_writeout(struct page *page)
 static int mpage_prepare_extent_to_map(struct mpage_da_data *mpd)
 {
        struct address_space *mapping = mpd->inode->i_mapping;
-       struct pagevec pvec;
-       unsigned int nr_pages;
+       struct folio_batch fbatch;
+       unsigned int nr_folios;
        long left = mpd->wbc->nr_to_write;
        pgoff_t index = mpd->first_page;
        pgoff_t end = mpd->last_page;
@@ -2610,18 +2610,17 @@ static int mpage_prepare_extent_to_map(struct mpage_da_data *mpd)
                tag = PAGECACHE_TAG_TOWRITE;
        else
                tag = PAGECACHE_TAG_DIRTY;
-
-       pagevec_init(&pvec);
+       folio_batch_init(&fbatch);
        mpd->map.m_len = 0;
        mpd->next_page = index;
        while (index <= end) {
-               nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, end,
-                               tag);
-               if (nr_pages == 0)
+               nr_folios = filemap_get_folios_tag(mapping, &index, end,
+                               tag, &fbatch);
+               if (nr_folios == 0)
                        break;
 
-               for (i = 0; i < nr_pages; i++) {
-                       struct page *page = pvec.pages[i];
+               for (i = 0; i < nr_folios; i++) {
+                       struct folio *folio = fbatch.folios[i];
 
                        /*
                         * Accumulated enough dirty pages? This doesn't apply
@@ -2635,10 +2634,10 @@ static int mpage_prepare_extent_to_map(struct mpage_da_data *mpd)
                                goto out;
 
                        /* If we can't merge this page, we are done. */
-                       if (mpd->map.m_len > 0 && mpd->next_page != page->index)
+                       if (mpd->map.m_len > 0 && mpd->next_page != folio->index)
                                goto out;
 
-                       lock_page(page);
+                       folio_lock(folio);
                        /*
                         * If the page is no longer dirty, or its mapping no
                         * longer corresponds to inode we are writing (which
@@ -2646,16 +2645,16 @@ static int mpage_prepare_extent_to_map(struct mpage_da_data *mpd)
                         * page is already under writeback and we are not doing
                         * a data integrity writeback, skip the page
                         */
-                       if (!PageDirty(page) ||
-                           (PageWriteback(page) &&
+                       if (!folio_test_dirty(folio) ||
+                           (folio_test_writeback(folio) &&
                             (mpd->wbc->sync_mode == WB_SYNC_NONE)) ||
-                           unlikely(page->mapping != mapping)) {
-                               unlock_page(page);
+                           unlikely(folio->mapping != mapping)) {
+                               folio_unlock(folio);
                                continue;
                        }
 
-                       wait_on_page_writeback(page);
-                       BUG_ON(PageWriteback(page));
+                       folio_wait_writeback(folio);
+                       BUG_ON(folio_test_writeback(folio));
 
                        /*
                         * Should never happen but for buggy code in
@@ -2666,49 +2665,49 @@ static int mpage_prepare_extent_to_map(struct mpage_da_data *mpd)
                         *
                         * [1] https://lore.kernel.org/linux-mm/20180103100430.GE4911@quack2.suse.cz
                         */
-                       if (!page_has_buffers(page)) {
-                               ext4_warning_inode(mpd->inode, "page %lu does not have buffers attached", page->index);
-                               ClearPageDirty(page);
-                               unlock_page(page);
+                       if (!folio_buffers(folio)) {
+                               ext4_warning_inode(mpd->inode, "page %lu does not have buffers attached", folio->index);
+                               folio_clear_dirty(folio);
+                               folio_unlock(folio);
                                continue;
                        }
 
                        if (mpd->map.m_len == 0)
-                               mpd->first_page = page->index;
-                       mpd->next_page = page->index + 1;
+                               mpd->first_page = folio->index;
+                       mpd->next_page = folio->index + folio_nr_pages(folio);
                        /*
                         * Writeout for transaction commit where we cannot
                         * modify metadata is simple. Just submit the page.
                         */
                        if (!mpd->can_map) {
-                               if (ext4_page_nomap_can_writeout(page)) {
-                                       err = mpage_submit_page(mpd, page);
+                               if (ext4_page_nomap_can_writeout(&folio->page)) {
+                                       err = mpage_submit_page(mpd, &folio->page);
                                        if (err < 0)
                                                goto out;
                                } else {
-                                       unlock_page(page);
-                                       mpd->first_page++;
+                                       folio_unlock(folio);
+                                       mpd->first_page += folio_nr_pages(folio);
                                }
                        } else {
                                /* Add all dirty buffers to mpd */
-                               lblk = ((ext4_lblk_t)page->index) <<
+                               lblk = ((ext4_lblk_t)folio->index) <<
                                        (PAGE_SHIFT - blkbits);
-                               head = page_buffers(page);
+                               head = folio_buffers(folio);
                                err = mpage_process_page_bufs(mpd, head, head,
-                                                             lblk);
+                                               lblk);
                                if (err <= 0)
                                        goto out;
                                err = 0;
                        }
-                       left--;
+                       left -= folio_nr_pages(folio);
                }
-               pagevec_release(&pvec);
+               folio_batch_release(&fbatch);
                cond_resched();
        }
        mpd->scanned_until_end = 1;
        return 0;
 out:
-       pagevec_release(&pvec);
+       folio_batch_release(&fbatch);
        return err;
 }