shmem: make shmem_inode_acct_block() return error
authorLukas Czerner <lczerner@redhat.com>
Tue, 25 Jul 2023 14:45:04 +0000 (16:45 +0200)
committerChristian Brauner <brauner@kernel.org>
Wed, 9 Aug 2023 07:15:38 +0000 (09:15 +0200)
Make shmem_inode_acct_block() return proper error code instead of bool.
This will be useful later when we introduce quota support.

There should be no functional change.

Signed-off-by: Lukas Czerner <lczerner@redhat.com>
Signed-off-by: Carlos Maiolino <cmaiolino@redhat.com>
Reviewed-by: Jan Kara <jack@suse.cz>
Message-Id: <20230725144510.253763-2-cem@kernel.org>
Signed-off-by: Christian Brauner <brauner@kernel.org>
mm/shmem.c

index f5af4b9..be71cdc 100644 (file)
@@ -199,13 +199,14 @@ static inline void shmem_unacct_blocks(unsigned long flags, long pages)
                vm_unacct_memory(pages * VM_ACCT(PAGE_SIZE));
 }
 
-static inline bool shmem_inode_acct_block(struct inode *inode, long pages)
+static inline int shmem_inode_acct_block(struct inode *inode, long pages)
 {
        struct shmem_inode_info *info = SHMEM_I(inode);
        struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
+       int err = -ENOSPC;
 
        if (shmem_acct_block(info->flags, pages))
-               return false;
+               return err;
 
        if (sbinfo->max_blocks) {
                if (percpu_counter_compare(&sbinfo->used_blocks,
@@ -214,11 +215,11 @@ static inline bool shmem_inode_acct_block(struct inode *inode, long pages)
                percpu_counter_add(&sbinfo->used_blocks, pages);
        }
 
-       return true;
+       return 0;
 
 unacct:
        shmem_unacct_blocks(info->flags, pages);
-       return false;
+       return err;
 }
 
 static inline void shmem_inode_unacct_blocks(struct inode *inode, long pages)
@@ -370,7 +371,7 @@ bool shmem_charge(struct inode *inode, long pages)
        struct shmem_inode_info *info = SHMEM_I(inode);
        unsigned long flags;
 
-       if (!shmem_inode_acct_block(inode, pages))
+       if (shmem_inode_acct_block(inode, pages))
                return false;
 
        /* nrpages adjustment first, then shmem_recalc_inode() when balanced */
@@ -1588,13 +1589,14 @@ static struct folio *shmem_alloc_and_acct_folio(gfp_t gfp, struct inode *inode,
        struct shmem_inode_info *info = SHMEM_I(inode);
        struct folio *folio;
        int nr;
-       int err = -ENOSPC;
+       int err;
 
        if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
                huge = false;
        nr = huge ? HPAGE_PMD_NR : 1;
 
-       if (!shmem_inode_acct_block(inode, nr))
+       err = shmem_inode_acct_block(inode, nr);
+       if (err)
                goto failed;
 
        if (huge)
@@ -2445,7 +2447,7 @@ int shmem_mfill_atomic_pte(pmd_t *dst_pmd,
        int ret;
        pgoff_t max_off;
 
-       if (!shmem_inode_acct_block(inode, 1)) {
+       if (shmem_inode_acct_block(inode, 1)) {
                /*
                 * We may have got a page, returned -ENOENT triggering a retry,
                 * and now we find ourselves with -ENOMEM. Release the page, to