From abc8a8a2c7dc7b557619befa8fb29be60ed481bc Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Thu, 15 Dec 2022 21:43:52 +0000 Subject: [PATCH] buffer: replace obvious uses of b_page with b_folio These cases just check if it's NULL, or use b_page to get to the page's address space. They are assumptions that b_page never points to a tail page. Link: https://lkml.kernel.org/r/20221215214402.3522366-3-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) Reviewed-by: Jan Kara Signed-off-by: Andrew Morton --- fs/buffer.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/fs/buffer.c b/fs/buffer.c index d9c6d1f..e1055fe 100644 --- a/fs/buffer.c +++ b/fs/buffer.c @@ -321,7 +321,7 @@ static void end_buffer_async_read_io(struct buffer_head *bh, int uptodate) { /* Decrypt if needed */ if (uptodate && - fscrypt_inode_uses_fs_layer_crypto(bh->b_page->mapping->host)) { + fscrypt_inode_uses_fs_layer_crypto(bh->b_folio->mapping->host)) { struct decrypt_bh_ctx *ctx = kmalloc(sizeof(*ctx), GFP_ATOMIC); if (ctx) { @@ -570,7 +570,7 @@ void write_boundary_block(struct block_device *bdev, void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode) { struct address_space *mapping = inode->i_mapping; - struct address_space *buffer_mapping = bh->b_page->mapping; + struct address_space *buffer_mapping = bh->b_folio->mapping; mark_buffer_dirty(bh); if (!mapping->private_data) { @@ -1073,7 +1073,7 @@ __getblk_slow(struct block_device *bdev, sector_t block, * and then attach the address_space's inode to its superblock's dirty * inode list. * - * mark_buffer_dirty() is atomic. It takes bh->b_page->mapping->private_lock, + * mark_buffer_dirty() is atomic. It takes bh->b_folio->mapping->private_lock, * i_pages lock and mapping->host->i_lock. */ void mark_buffer_dirty(struct buffer_head *bh) @@ -1117,8 +1117,8 @@ void mark_buffer_write_io_error(struct buffer_head *bh) set_buffer_write_io_error(bh); /* FIXME: do we need to set this in both places? */ - if (bh->b_page && bh->b_page->mapping) - mapping_set_error(bh->b_page->mapping, -EIO); + if (bh->b_folio && bh->b_folio->mapping) + mapping_set_error(bh->b_folio->mapping, -EIO); if (bh->b_assoc_map) mapping_set_error(bh->b_assoc_map, -EIO); rcu_read_lock(); @@ -1154,7 +1154,7 @@ void __bforget(struct buffer_head *bh) { clear_buffer_dirty(bh); if (bh->b_assoc_map) { - struct address_space *buffer_mapping = bh->b_page->mapping; + struct address_space *buffer_mapping = bh->b_folio->mapping; spin_lock(&buffer_mapping->private_lock); list_del_init(&bh->b_assoc_buffers); -- 2.7.4