mm: return an ERR_PTR from __filemap_get_folio
authorChristoph Hellwig <hch@lst.de>
Tue, 7 Mar 2023 14:34:10 +0000 (15:34 +0100)
committerAndrew Morton <akpm@linux-foundation.org>
Thu, 6 Apr 2023 02:42:42 +0000 (19:42 -0700)
Instead of returning NULL for all errors, distinguish between:

 - no entry found and not asked to allocated (-ENOENT)
 - failed to allocate memory (-ENOMEM)
 - would block (-EAGAIN)

so that callers don't have to guess the error based on the passed in
flags.

Also pass through the error through the direct callers: filemap_get_folio,
filemap_lock_folio filemap_grab_folio and filemap_get_incore_folio.

[hch@lst.de: fix null-pointer deref]
Link: https://lkml.kernel.org/r/20230310070023.GA13563@lst.de
Link: https://lkml.kernel.org/r/20230310043137.GA1624890@u2004
Link: https://lkml.kernel.org/r/20230307143410.28031-8-hch@lst.de
Signed-off-by: Christoph Hellwig <hch@lst.de>
Acked-by: Ryusuke Konishi <konishi.ryusuke@gmail.com> [nilfs2]
Cc: Andreas Gruenbacher <agruenba@redhat.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Naoya Horiguchi <naoya.horiguchi@linux.dev>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
21 files changed:
fs/afs/dir.c
fs/afs/dir_edit.c
fs/afs/write.c
fs/ext4/inode.c
fs/ext4/move_extent.c
fs/hugetlbfs/inode.c
fs/iomap/buffered-io.c
fs/netfs/buffered_read.c
fs/nfs/file.c
fs/nilfs2/page.c
include/linux/pagemap.h
mm/filemap.c
mm/folio-compat.c
mm/huge_memory.c
mm/hugetlb.c
mm/memcontrol.c
mm/mincore.c
mm/shmem.c
mm/swap_state.c
mm/swapfile.c
mm/truncate.c

index 82690d1dd49a026a93b33d2ebce71bbf7747774e..f92b9e62d567b9faf9f2e9fcb624200604cbbe4d 100644 (file)
@@ -319,16 +319,16 @@ expand:
                struct folio *folio;
 
                folio = filemap_get_folio(mapping, i);
-               if (!folio) {
+               if (IS_ERR(folio)) {
                        if (test_and_clear_bit(AFS_VNODE_DIR_VALID, &dvnode->flags))
                                afs_stat_v(dvnode, n_inval);
-
-                       ret = -ENOMEM;
                        folio = __filemap_get_folio(mapping,
                                                    i, FGP_LOCK | FGP_CREAT,
                                                    mapping->gfp_mask);
-                       if (!folio)
+                       if (IS_ERR(folio)) {
+                               ret = PTR_ERR(folio);
                                goto error;
+                       }
                        folio_attach_private(folio, (void *)1);
                        folio_unlock(folio);
                }
@@ -524,7 +524,7 @@ static int afs_dir_iterate(struct inode *dir, struct dir_context *ctx,
                 */
                folio = __filemap_get_folio(dir->i_mapping, ctx->pos / PAGE_SIZE,
                                            FGP_ACCESSED, 0);
-               if (!folio) {
+               if (IS_ERR(folio)) {
                        ret = afs_bad(dvnode, afs_file_error_dir_missing_page);
                        break;
                }
index 0ab7752d1b758e7bd593bfe44faf04bf43951f0b..f0eddccbdd954154c67e28b1db8239b764e97c2b 100644 (file)
@@ -115,7 +115,7 @@ static struct folio *afs_dir_get_folio(struct afs_vnode *vnode, pgoff_t index)
        folio = __filemap_get_folio(mapping, index,
                                    FGP_LOCK | FGP_ACCESSED | FGP_CREAT,
                                    mapping->gfp_mask);
-       if (!folio)
+       if (IS_ERR(folio))
                clear_bit(AFS_VNODE_DIR_VALID, &vnode->flags);
        else if (folio && !folio_test_private(folio))
                folio_attach_private(folio, (void *)1);
index 571f3b9a417e5f8a8fd5d6c900c0961da83c6eae..c822d6006033a7c5367e39ac8e4bda149a8c85b7 100644 (file)
@@ -232,7 +232,7 @@ static void afs_kill_pages(struct address_space *mapping,
                _debug("kill %lx (to %lx)", index, last);
 
                folio = filemap_get_folio(mapping, index);
-               if (!folio) {
+               if (IS_ERR(folio)) {
                        next = index + 1;
                        continue;
                }
@@ -270,7 +270,7 @@ static void afs_redirty_pages(struct writeback_control *wbc,
                _debug("redirty %llx @%llx", len, start);
 
                folio = filemap_get_folio(mapping, index);
-               if (!folio) {
+               if (IS_ERR(folio)) {
                        next = index + 1;
                        continue;
                }
index bf0b7dea4900afed9d81496c47a7aa741a76c32b..d7973743417b52cacf532248f70a5dfae80bc013 100644 (file)
@@ -5395,7 +5395,7 @@ static void ext4_wait_for_tail_page_commit(struct inode *inode)
        while (1) {
                struct folio *folio = filemap_lock_folio(inode->i_mapping,
                                      inode->i_size >> PAGE_SHIFT);
-               if (!folio)
+               if (IS_ERR(folio))
                        return;
                ret = __ext4_journalled_invalidate_folio(folio, offset,
                                                folio_size(folio) - offset);
index 2de9829aed63bfa631a4a529c64eb4b449f1f1af..7bf6d069199cbb7870220755e590e54b1fa93bad 100644 (file)
@@ -141,18 +141,18 @@ mext_folio_double_lock(struct inode *inode1, struct inode *inode2,
        flags = memalloc_nofs_save();
        folio[0] = __filemap_get_folio(mapping[0], index1, fgp_flags,
                        mapping_gfp_mask(mapping[0]));
-       if (!folio[0]) {
+       if (IS_ERR(folio[0])) {
                memalloc_nofs_restore(flags);
-               return -ENOMEM;
+               return PTR_ERR(folio[0]);
        }
 
        folio[1] = __filemap_get_folio(mapping[1], index2, fgp_flags,
                        mapping_gfp_mask(mapping[1]));
        memalloc_nofs_restore(flags);
-       if (!folio[1]) {
+       if (IS_ERR(folio[1])) {
                folio_unlock(folio[0]);
                folio_put(folio[0]);
-               return -ENOMEM;
+               return PTR_ERR(folio[1]);
        }
        /*
         * __filemap_get_folio() may not wait on folio's writeback if
index 9062da6da567534c50c8edccf8ae50c82b21b602..702d79639c0dffe3028bb7cd9d80edca25e5fe0e 100644 (file)
@@ -697,7 +697,7 @@ static void hugetlbfs_zero_partial_page(struct hstate *h,
        struct folio *folio;
 
        folio = filemap_lock_folio(mapping, idx);
-       if (!folio)
+       if (IS_ERR(folio))
                return;
 
        start = start & ~huge_page_mask(h);
index 6f4c97a6d7e9dcdaa5aa9a06159d33d91c0aa9f6..96bb56c203f49d0c3b8f96c01db03f0b194ab2bc 100644 (file)
@@ -468,19 +468,12 @@ EXPORT_SYMBOL_GPL(iomap_is_partially_uptodate);
 struct folio *iomap_get_folio(struct iomap_iter *iter, loff_t pos)
 {
        unsigned fgp = FGP_LOCK | FGP_WRITE | FGP_CREAT | FGP_STABLE | FGP_NOFS;
-       struct folio *folio;
 
        if (iter->flags & IOMAP_NOWAIT)
                fgp |= FGP_NOWAIT;
 
-       folio = __filemap_get_folio(iter->inode->i_mapping, pos >> PAGE_SHIFT,
+       return __filemap_get_folio(iter->inode->i_mapping, pos >> PAGE_SHIFT,
                        fgp, mapping_gfp_mask(iter->inode->i_mapping));
-       if (folio)
-               return folio;
-
-       if (iter->flags & IOMAP_NOWAIT)
-               return ERR_PTR(-EAGAIN);
-       return ERR_PTR(-ENOMEM);
 }
 EXPORT_SYMBOL_GPL(iomap_get_folio);
 
@@ -911,7 +904,7 @@ static int iomap_write_delalloc_scan(struct inode *inode,
                /* grab locked page */
                folio = filemap_lock_folio(inode->i_mapping,
                                start_byte >> PAGE_SHIFT);
-               if (!folio) {
+               if (IS_ERR(folio)) {
                        start_byte = ALIGN_DOWN(start_byte, PAGE_SIZE) +
                                        PAGE_SIZE;
                        continue;
index 7679a68e8193070bdf22e3554c102e25bf6621e2..209726a9cfdb9cf86d568b72a910c9ead6440809 100644 (file)
@@ -350,8 +350,8 @@ int netfs_write_begin(struct netfs_inode *ctx,
 retry:
        folio = __filemap_get_folio(mapping, index, fgp_flags,
                                    mapping_gfp_mask(mapping));
-       if (!folio)
-               return -ENOMEM;
+       if (IS_ERR(folio))
+               return PTR_ERR(folio);
 
        if (ctx->ops->check_write_begin) {
                /* Allow the netfs (eg. ceph) to flush conflicts. */
index 893625eacab9faf9a9e4203c7c8aa92fb474aaa3..1d03406e6c039a0dfb7faf9ca1960a8277044c05 100644 (file)
@@ -336,8 +336,8 @@ static int nfs_write_begin(struct file *file, struct address_space *mapping,
 
 start:
        folio = nfs_folio_grab_cache_write_begin(mapping, pos >> PAGE_SHIFT);
-       if (!folio)
-               return -ENOMEM;
+       if (IS_ERR(folio))
+               return PTR_ERR(folio);
        *pagep = &folio->page;
 
        ret = nfs_flush_incompatible(file, folio);
index 41ccd43cd9797fe93ba071a564bbb41381530750..5cf30827f244c42d2a201cc2d85c815655cbbed1 100644 (file)
@@ -259,10 +259,10 @@ repeat:
                        NILFS_PAGE_BUG(&folio->page, "inconsistent dirty state");
 
                dfolio = filemap_grab_folio(dmap, folio->index);
-               if (unlikely(!dfolio)) {
+               if (unlikely(IS_ERR(dfolio))) {
                        /* No empty page is added to the page cache */
-                       err = -ENOMEM;
                        folio_unlock(folio);
+                       err = PTR_ERR(dfolio);
                        break;
                }
                if (unlikely(!folio_buffers(folio)))
@@ -311,7 +311,7 @@ repeat:
 
                folio_lock(folio);
                dfolio = filemap_lock_folio(dmap, index);
-               if (dfolio) {
+               if (!IS_ERR(dfolio)) {
                        /* overwrite existing folio in the destination cache */
                        WARN_ON(folio_test_dirty(dfolio));
                        nilfs_copy_page(&dfolio->page, &folio->page, 0);
index 306a0f63cea83fe2a38d57a44a922b1a4378c299..fdcd595d2294403e485ee54cc6a9b770ff6b5725 100644 (file)
@@ -520,7 +520,8 @@ struct page *pagecache_get_page(struct address_space *mapping, pgoff_t index,
  * Looks up the page cache entry at @mapping & @index.  If a folio is
  * present, it is returned with an increased refcount.
  *
- * Otherwise, %NULL is returned.
+ * Return: A folio or ERR_PTR(-ENOENT) if there is no folio in the cache for
+ * this index.  Will not return a shadow, swap or DAX entry.
  */
 static inline struct folio *filemap_get_folio(struct address_space *mapping,
                                        pgoff_t index)
@@ -537,8 +538,8 @@ static inline struct folio *filemap_get_folio(struct address_space *mapping,
  * present, it is returned locked with an increased refcount.
  *
  * Context: May sleep.
- * Return: A folio or %NULL if there is no folio in the cache for this
- * index.  Will not return a shadow, swap or DAX entry.
+ * Return: A folio or ERR_PTR(-ENOENT) if there is no folio in the cache for
+ * this index.  Will not return a shadow, swap or DAX entry.
  */
 static inline struct folio *filemap_lock_folio(struct address_space *mapping,
                                        pgoff_t index)
@@ -555,8 +556,8 @@ static inline struct folio *filemap_lock_folio(struct address_space *mapping,
  * a new folio is created. The folio is locked, marked as accessed, and
  * returned.
  *
- * Return: A found or created folio. NULL if no folio is found and failed to
- * create a folio.
+ * Return: A found or created folio. ERR_PTR(-ENOMEM) if no folio is found
+ * and failed to create a folio.
  */
 static inline struct folio *filemap_grab_folio(struct address_space *mapping,
                                        pgoff_t index)
index ac161b50f5bc178a0e2e7fc6263d556c6ad06f34..a34abfe8c65430fbded2473544ef4fffee322dd0 100644 (file)
@@ -1907,7 +1907,7 @@ out:
  *
  * If there is a page cache page, it is returned with an increased refcount.
  *
- * Return: The found folio or %NULL otherwise.
+ * Return: The found folio or an ERR_PTR() otherwise.
  */
 struct folio *__filemap_get_folio(struct address_space *mapping, pgoff_t index,
                int fgp_flags, gfp_t gfp)
@@ -1925,7 +1925,7 @@ repeat:
                if (fgp_flags & FGP_NOWAIT) {
                        if (!folio_trylock(folio)) {
                                folio_put(folio);
-                               return NULL;
+                               return ERR_PTR(-EAGAIN);
                        }
                } else {
                        folio_lock(folio);
@@ -1964,7 +1964,7 @@ no_page:
 
                folio = filemap_alloc_folio(gfp, 0);
                if (!folio)
-                       return NULL;
+                       return ERR_PTR(-ENOMEM);
 
                if (WARN_ON_ONCE(!(fgp_flags & (FGP_LOCK | FGP_FOR_MMAP))))
                        fgp_flags |= FGP_LOCK;
@@ -1989,6 +1989,8 @@ no_page:
                        folio_unlock(folio);
        }
 
+       if (!folio)
+               return ERR_PTR(-ENOENT);
        return folio;
 }
 EXPORT_SYMBOL(__filemap_get_folio);
@@ -3258,7 +3260,7 @@ vm_fault_t filemap_fault(struct vm_fault *vmf)
         * Do we have something in the page cache already?
         */
        folio = filemap_get_folio(mapping, index);
-       if (likely(folio)) {
+       if (likely(!IS_ERR(folio))) {
                /*
                 * We found the page, so try async readahead before waiting for
                 * the lock.
@@ -3287,7 +3289,7 @@ retry_find:
                folio = __filemap_get_folio(mapping, index,
                                          FGP_CREAT|FGP_FOR_MMAP,
                                          vmf->gfp_mask);
-               if (!folio) {
+               if (IS_ERR(folio)) {
                        if (fpin)
                                goto out_retry;
                        filemap_invalidate_unlock_shared(mapping);
@@ -3638,7 +3640,7 @@ static struct folio *do_read_cache_folio(struct address_space *mapping,
                filler = mapping->a_ops->read_folio;
 repeat:
        folio = filemap_get_folio(mapping, index);
-       if (!folio) {
+       if (IS_ERR(folio)) {
                folio = filemap_alloc_folio(gfp, 0);
                if (!folio)
                        return ERR_PTR(-ENOMEM);
index 1754daa85d35c2e482a97a2d69fc10ecc61e9883..2511c055a35ff693f636684469e93cd02e153fc6 100644 (file)
@@ -97,7 +97,7 @@ struct page *pagecache_get_page(struct address_space *mapping, pgoff_t index,
        struct folio *folio;
 
        folio = __filemap_get_folio(mapping, index, fgp_flags, gfp);
-       if (!folio)
+       if (IS_ERR(folio))
                return NULL;
        return folio_file_page(folio, index);
 }
index 70008dd7f215922fbe98487c41deef1c4566484a..2d860e70fe8885fd185bcc1cbd67a4da5e283372 100644 (file)
@@ -3092,7 +3092,7 @@ static int split_huge_pages_in_file(const char *file_path, pgoff_t off_start,
                struct folio *folio = filemap_get_folio(mapping, index);
 
                nr_pages = 1;
-               if (!folio)
+               if (IS_ERR(folio))
                        continue;
 
                if (!folio_test_large(folio))
index 07abcb6eb203044e39ed9cb839023774c53f3a8a..712e32b382950e55b1652282b2bcfd8c9da28107 100644 (file)
@@ -5780,7 +5780,7 @@ static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
         */
        new_folio = false;
        folio = filemap_lock_folio(mapping, idx);
-       if (!folio) {
+       if (IS_ERR(folio)) {
                size = i_size_read(mapping->host) >> huge_page_shift(h);
                if (idx >= size)
                        goto out;
@@ -6071,6 +6071,8 @@ vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
                vma_end_reservation(h, vma, haddr);
 
                pagecache_folio = filemap_lock_folio(mapping, idx);
+               if (IS_ERR(pagecache_folio))
+                       pagecache_folio = NULL;
        }
 
        ptl = huge_pte_lock(h, mm, ptep);
@@ -6182,7 +6184,7 @@ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
        if (is_continue) {
                ret = -EFAULT;
                folio = filemap_lock_folio(mapping, idx);
-               if (!folio)
+               if (IS_ERR(folio))
                        goto out;
                folio_in_pagecache = true;
        } else if (!*pagep) {
index 13ec89c45389269fbca98f2d0b3a0b2128f9a298..0524add35cae04114a9c2fff44340280b602cdd2 100644 (file)
@@ -5705,7 +5705,7 @@ static struct page *mc_handle_file_pte(struct vm_area_struct *vma,
        /* shmem/tmpfs may report page out on swap: account for that too. */
        index = linear_page_index(vma, addr);
        folio = filemap_get_incore_folio(vma->vm_file->f_mapping, index);
-       if (!folio)
+       if (IS_ERR(folio))
                return NULL;
        return folio_file_page(folio, index);
 }
index d359650b0f75b9c099e75ecd994a0b82ac38a9fa..2d5be013a25a0ad9f941d4c9376102c2cb7b06a2 100644 (file)
@@ -61,7 +61,7 @@ static unsigned char mincore_page(struct address_space *mapping, pgoff_t index)
         * tmpfs's .fault). So swapped out tmpfs mappings are tested here.
         */
        folio = filemap_get_incore_folio(mapping, index);
-       if (folio) {
+       if (!IS_ERR(folio)) {
                present = folio_test_uptodate(folio);
                folio_put(folio);
        }
index 93cb39852a16ed30c2c7ec0ec1dc7bbcebc9a3fe..fa6e38f2f55f401006c0b62cbc67051957675e77 100644 (file)
@@ -605,7 +605,7 @@ next:
 
                index = (inode->i_size & HPAGE_PMD_MASK) >> PAGE_SHIFT;
                folio = filemap_get_folio(inode->i_mapping, index);
-               if (!folio)
+               if (IS_ERR(folio))
                        goto drop;
 
                /* No huge page at the end of the file: nothing to split */
@@ -3214,7 +3214,7 @@ static const char *shmem_get_link(struct dentry *dentry,
 
        if (!dentry) {
                folio = filemap_get_folio(inode->i_mapping, 0);
-               if (!folio)
+               if (IS_ERR(folio))
                        return ERR_PTR(-ECHILD);
                if (PageHWPoison(folio_page(folio, 0)) ||
                    !folio_test_uptodate(folio)) {
index 92234f4b51d29aa40de12a9dd720e5a84a18e7cc..b76a65ac28b319ca2a73b33a430a0195e8584b62 100644 (file)
@@ -336,7 +336,7 @@ struct folio *swap_cache_get_folio(swp_entry_t entry,
        struct folio *folio;
 
        folio = filemap_get_folio(swap_address_space(entry), swp_offset(entry));
-       if (folio) {
+       if (!IS_ERR(folio)) {
                bool vma_ra = swap_use_vma_readahead();
                bool readahead;
 
@@ -366,6 +366,8 @@ struct folio *swap_cache_get_folio(swp_entry_t entry,
                        if (!vma || !vma_ra)
                                atomic_inc(&swapin_readahead_hits);
                }
+       } else {
+               folio = NULL;
        }
 
        return folio;
@@ -388,23 +390,24 @@ struct folio *filemap_get_incore_folio(struct address_space *mapping,
        struct swap_info_struct *si;
        struct folio *folio = filemap_get_entry(mapping, index);
 
+       if (!folio)
+               return ERR_PTR(-ENOENT);
        if (!xa_is_value(folio))
-               goto out;
+               return folio;
        if (!shmem_mapping(mapping))
-               return NULL;
+               return ERR_PTR(-ENOENT);
 
        swp = radix_to_swp_entry(folio);
        /* There might be swapin error entries in shmem mapping. */
        if (non_swap_entry(swp))
-               return NULL;
+               return ERR_PTR(-ENOENT);
        /* Prevent swapoff from happening to us */
        si = get_swap_device(swp);
        if (!si)
-               return NULL;
+               return ERR_PTR(-ENOENT);
        index = swp_offset(swp);
        folio = filemap_get_folio(swap_address_space(swp), index);
        put_swap_device(si);
-out:
        return folio;
 }
 
@@ -431,7 +434,7 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
                folio = filemap_get_folio(swap_address_space(entry),
                                                swp_offset(entry));
                put_swap_device(si);
-               if (folio)
+               if (!IS_ERR(folio))
                        return folio_file_page(folio, swp_offset(entry));
 
                /*
index c1b97436f8110a3b4b52f0c0d26dd82fc83fd0f3..00b3e46becadbf3fb7c078b3aabb04069424508c 100644 (file)
@@ -136,7 +136,7 @@ static int __try_to_reclaim_swap(struct swap_info_struct *si,
        int ret = 0;
 
        folio = filemap_get_folio(swap_address_space(entry), offset);
-       if (!folio)
+       if (IS_ERR(folio))
                return 0;
        /*
         * When this function is called from scan_swap_map_slots() and it's
@@ -2095,7 +2095,7 @@ retry:
 
                entry = swp_entry(type, i);
                folio = filemap_get_folio(swap_address_space(entry), i);
-               if (!folio)
+               if (IS_ERR(folio))
                        continue;
 
                /*
index 7b4ea4c4a46b20777720d077a77fcbb6d6f9bd92..86de31ed4d323808378fbe8f7ed77b15abeb7192 100644 (file)
@@ -375,7 +375,7 @@ void truncate_inode_pages_range(struct address_space *mapping,
 
        same_folio = (lstart >> PAGE_SHIFT) == (lend >> PAGE_SHIFT);
        folio = __filemap_get_folio(mapping, lstart >> PAGE_SHIFT, FGP_LOCK, 0);
-       if (folio) {
+       if (!IS_ERR(folio)) {
                same_folio = lend < folio_pos(folio) + folio_size(folio);
                if (!truncate_inode_partial_folio(folio, lstart, lend)) {
                        start = folio->index + folio_nr_pages(folio);
@@ -387,14 +387,15 @@ void truncate_inode_pages_range(struct address_space *mapping,
                folio = NULL;
        }
 
-       if (!same_folio)
+       if (!same_folio) {
                folio = __filemap_get_folio(mapping, lend >> PAGE_SHIFT,
                                                FGP_LOCK, 0);
-       if (folio) {
-               if (!truncate_inode_partial_folio(folio, lstart, lend))
-                       end = folio->index;
-               folio_unlock(folio);
-               folio_put(folio);
+               if (!IS_ERR(folio)) {
+                       if (!truncate_inode_partial_folio(folio, lstart, lend))
+                               end = folio->index;
+                       folio_unlock(folio);
+                       folio_put(folio);
+               }
        }
 
        index = start;