buffer: convert page_zero_new_buffers() to folio_zero_new_buffers()
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Mon, 12 Jun 2023 21:01:36 +0000 (22:01 +0100)
committerAndrew Morton <akpm@linux-foundation.org>
Mon, 19 Jun 2023 23:19:31 +0000 (16:19 -0700)
Most of the callers already have a folio; convert reiserfs_write_end() to
have a folio.  Removes a couple of hidden calls to compound_head().

Link: https://lkml.kernel.org/r/20230612210141.730128-10-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Andreas Gruenbacher <agruenba@redhat.com>
Cc: Bob Peterson <rpeterso@redhat.com>
Cc: Hannes Reinecke <hare@suse.com>
Cc: Luis Chamberlain <mcgrof@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
fs/buffer.c
fs/ext4/inode.c
fs/reiserfs/inode.c
include/linux/buffer_head.h

index 97c64b0..e4bd465 100644 (file)
@@ -1927,33 +1927,34 @@ recover:
 EXPORT_SYMBOL(__block_write_full_folio);
 
 /*
- * If a page has any new buffers, zero them out here, and mark them uptodate
+ * If a folio has any new buffers, zero them out here, and mark them uptodate
  * and dirty so they'll be written out (in order to prevent uninitialised
  * block data from leaking). And clear the new bit.
  */
-void page_zero_new_buffers(struct page *page, unsigned from, unsigned to)
+void folio_zero_new_buffers(struct folio *folio, size_t from, size_t to)
 {
-       unsigned int block_start, block_end;
+       size_t block_start, block_end;
        struct buffer_head *head, *bh;
 
-       BUG_ON(!PageLocked(page));
-       if (!page_has_buffers(page))
+       BUG_ON(!folio_test_locked(folio));
+       head = folio_buffers(folio);
+       if (!head)
                return;
 
-       bh = head = page_buffers(page);
+       bh = head;
        block_start = 0;
        do {
                block_end = block_start + bh->b_size;
 
                if (buffer_new(bh)) {
                        if (block_end > from && block_start < to) {
-                               if (!PageUptodate(page)) {
-                                       unsigned start, size;
+                               if (!folio_test_uptodate(folio)) {
+                                       size_t start, xend;
 
                                        start = max(from, block_start);
-                                       size = min(to, block_end) - start;
+                                       xend = min(to, block_end);
 
-                                       zero_user(page, start, size);
+                                       folio_zero_segment(folio, start, xend);
                                        set_buffer_uptodate(bh);
                                }
 
@@ -1966,7 +1967,7 @@ void page_zero_new_buffers(struct page *page, unsigned from, unsigned to)
                bh = bh->b_this_page;
        } while (bh != head);
 }
-EXPORT_SYMBOL(page_zero_new_buffers);
+EXPORT_SYMBOL(folio_zero_new_buffers);
 
 static void
 iomap_to_bh(struct inode *inode, sector_t block, struct buffer_head *bh,
@@ -2104,7 +2105,7 @@ int __block_write_begin_int(struct folio *folio, loff_t pos, unsigned len,
                        err = -EIO;
        }
        if (unlikely(err))
-               page_zero_new_buffers(&folio->page, from, to);
+               folio_zero_new_buffers(folio, from, to);
        return err;
 }
 
@@ -2208,7 +2209,7 @@ int block_write_end(struct file *file, struct address_space *mapping,
                if (!folio_test_uptodate(folio))
                        copied = 0;
 
-               page_zero_new_buffers(&folio->page, start+copied, start+len);
+               folio_zero_new_buffers(folio, start+copied, start+len);
        }
        flush_dcache_folio(folio);
 
index ce5f21b..31b839a 100644 (file)
@@ -1093,7 +1093,7 @@ static int ext4_block_write_begin(struct folio *folio, loff_t pos, unsigned len,
                        err = -EIO;
        }
        if (unlikely(err)) {
-               page_zero_new_buffers(&folio->page, from, to);
+               folio_zero_new_buffers(folio, from, to);
        } else if (fscrypt_inode_uses_fs_layer_crypto(inode)) {
                for (i = 0; i < nr_wait; i++) {
                        int err2;
@@ -1339,7 +1339,7 @@ static int ext4_write_end(struct file *file,
 }
 
 /*
- * This is a private version of page_zero_new_buffers() which doesn't
+ * This is a private version of folio_zero_new_buffers() which doesn't
  * set the buffer to be dirty, since in data=journalled mode we need
  * to call ext4_dirty_journalled_data() instead.
  */
index ff34ee4..77bd3b2 100644 (file)
@@ -2872,6 +2872,7 @@ static int reiserfs_write_end(struct file *file, struct address_space *mapping,
                              loff_t pos, unsigned len, unsigned copied,
                              struct page *page, void *fsdata)
 {
+       struct folio *folio = page_folio(page);
        struct inode *inode = page->mapping->host;
        int ret = 0;
        int update_sd = 0;
@@ -2887,12 +2888,12 @@ static int reiserfs_write_end(struct file *file, struct address_space *mapping,
 
        start = pos & (PAGE_SIZE - 1);
        if (unlikely(copied < len)) {
-               if (!PageUptodate(page))
+               if (!folio_test_uptodate(folio))
                        copied = 0;
 
-               page_zero_new_buffers(page, start + copied, start + len);
+               folio_zero_new_buffers(folio, start + copied, start + len);
        }
-       flush_dcache_page(page);
+       flush_dcache_folio(folio);
 
        reiserfs_commit_page(inode, page, start, start + copied);
 
index a366e01..c794ea7 100644 (file)
@@ -278,7 +278,7 @@ int block_write_end(struct file *, struct address_space *,
 int generic_write_end(struct file *, struct address_space *,
                                loff_t, unsigned, unsigned,
                                struct page *, void *);
-void page_zero_new_buffers(struct page *page, unsigned from, unsigned to);
+void folio_zero_new_buffers(struct folio *folio, size_t from, size_t to);
 void clean_page_buffers(struct page *page);
 int cont_write_begin(struct file *, struct address_space *, loff_t,
                        unsigned, struct page **, void **,