buffer: convert block_page_mkwrite() to use a folio
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Mon, 12 Jun 2023 21:01:34 +0000 (22:01 +0100)
committerAndrew Morton <akpm@linux-foundation.org>
Mon, 19 Jun 2023 23:19:31 +0000 (16:19 -0700)
If any page in a folio is dirtied, dirty the entire folio.  Removes a
number of hidden calls to compound_head() and references to page->mapping
and page->index.  Fixes a pre-existing bug where we could mark a folio as
dirty if the file is truncated to a multiple of the page size just as we
take the page fault.  I don't believe this bug has any bad effect, it's
just inefficient.

Link: https://lkml.kernel.org/r/20230612210141.730128-8-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Andreas Gruenbacher <agruenba@redhat.com>
Cc: Bob Peterson <rpeterso@redhat.com>
Cc: Hannes Reinecke <hare@suse.com>
Cc: Luis Chamberlain <mcgrof@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
fs/buffer.c

index 34ecf55..0af167e 100644 (file)
@@ -2564,38 +2564,37 @@ EXPORT_SYMBOL(block_commit_write);
 int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
                         get_block_t get_block)
 {
-       struct page *page = vmf->page;
+       struct folio *folio = page_folio(vmf->page);
        struct inode *inode = file_inode(vma->vm_file);
        unsigned long end;
        loff_t size;
        int ret;
 
-       lock_page(page);
+       folio_lock(folio);
        size = i_size_read(inode);
-       if ((page->mapping != inode->i_mapping) ||
-           (page_offset(page) > size)) {
+       if ((folio->mapping != inode->i_mapping) ||
+           (folio_pos(folio) >= size)) {
                /* We overload EFAULT to mean page got truncated */
                ret = -EFAULT;
                goto out_unlock;
        }
 
-       /* page is wholly or partially inside EOF */
-       if (((page->index + 1) << PAGE_SHIFT) > size)
-               end = size & ~PAGE_MASK;
-       else
-               end = PAGE_SIZE;
+       end = folio_size(folio);
+       /* folio is wholly or partially inside EOF */
+       if (folio_pos(folio) + end > size)
+               end = size - folio_pos(folio);
 
-       ret = __block_write_begin(page, 0, end, get_block);
+       ret = __block_write_begin_int(folio, 0, end, get_block, NULL);
        if (!ret)
-               ret = block_commit_write(page, 0, end);
+               ret = block_commit_write(&folio->page, 0, end);
 
        if (unlikely(ret < 0))
                goto out_unlock;
-       set_page_dirty(page);
-       wait_for_stable_page(page);
+       folio_mark_dirty(folio);
+       folio_wait_stable(folio);
        return 0;
 out_unlock:
-       unlock_page(page);
+       folio_unlock(folio);
        return ret;
 }
 EXPORT_SYMBOL(block_page_mkwrite);