hfsplus: Convert to release_folio
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Sun, 1 May 2022 03:53:28 +0000 (23:53 -0400)
committerMatthew Wilcox (Oracle) <willy@infradead.org>
Tue, 10 May 2022 03:12:33 +0000 (23:12 -0400)
Use a folio throughout hfsplus_release_folio().

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: Jeff Layton <jlayton@kernel.org>
fs/hfsplus/inode.c

index 982b34eefec7ea7118e7e16d0ef5156fa58dbf4a..f723e0e91d511df1e1345e57429933c14f1de75f 100644 (file)
@@ -63,14 +63,15 @@ static sector_t hfsplus_bmap(struct address_space *mapping, sector_t block)
        return generic_block_bmap(mapping, block, hfsplus_get_block);
 }
 
-static int hfsplus_releasepage(struct page *page, gfp_t mask)
+static bool hfsplus_release_folio(struct folio *folio, gfp_t mask)
 {
-       struct inode *inode = page->mapping->host;
+       struct inode *inode = folio->mapping->host;
        struct super_block *sb = inode->i_sb;
        struct hfs_btree *tree;
        struct hfs_bnode *node;
        u32 nidx;
-       int i, res = 1;
+       int i;
+       bool res = true;
 
        switch (inode->i_ino) {
        case HFSPLUS_EXT_CNID:
@@ -84,26 +85,26 @@ static int hfsplus_releasepage(struct page *page, gfp_t mask)
                break;
        default:
                BUG();
-               return 0;
+               return false;
        }
        if (!tree)
-               return 0;
+               return false;
        if (tree->node_size >= PAGE_SIZE) {
-               nidx = page->index >>
+               nidx = folio->index >>
                        (tree->node_size_shift - PAGE_SHIFT);
                spin_lock(&tree->hash_lock);
                node = hfs_bnode_findhash(tree, nidx);
                if (!node)
                        ;
                else if (atomic_read(&node->refcnt))
-                       res = 0;
+                       res = false;
                if (res && node) {
                        hfs_bnode_unhash(node);
                        hfs_bnode_free(node);
                }
                spin_unlock(&tree->hash_lock);
        } else {
-               nidx = page->index <<
+               nidx = folio->index <<
                        (PAGE_SHIFT - tree->node_size_shift);
                i = 1 << (PAGE_SHIFT - tree->node_size_shift);
                spin_lock(&tree->hash_lock);
@@ -112,7 +113,7 @@ static int hfsplus_releasepage(struct page *page, gfp_t mask)
                        if (!node)
                                continue;
                        if (atomic_read(&node->refcnt)) {
-                               res = 0;
+                               res = false;
                                break;
                        }
                        hfs_bnode_unhash(node);
@@ -120,7 +121,7 @@ static int hfsplus_releasepage(struct page *page, gfp_t mask)
                } while (--i && nidx < tree->node_count);
                spin_unlock(&tree->hash_lock);
        }
-       return res ? try_to_free_buffers(page) : 0;
+       return res ? try_to_free_buffers(&folio->page) : false;
 }
 
 static ssize_t hfsplus_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
@@ -162,7 +163,7 @@ const struct address_space_operations hfsplus_btree_aops = {
        .write_begin    = hfsplus_write_begin,
        .write_end      = generic_write_end,
        .bmap           = hfsplus_bmap,
-       .releasepage    = hfsplus_releasepage,
+       .release_folio  = hfsplus_release_folio,
 };
 
 const struct address_space_operations hfsplus_aops = {