Merge tag 'folio-6.0' of git://git.infradead.org/users/willy/pagecache
authorLinus Torvalds <torvalds@linux-foundation.org>
Wed, 3 Aug 2022 17:35:43 +0000 (10:35 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Wed, 3 Aug 2022 17:35:43 +0000 (10:35 -0700)
Pull folio updates from Matthew Wilcox:

 - Fix an accounting bug that made NR_FILE_DIRTY grow without limit
   when running xfstests

 - Convert more of mpage to use folios

 - Remove add_to_page_cache() and add_to_page_cache_locked()

 - Convert find_get_pages_range() to filemap_get_folios()

 - Improvements to the read_cache_page() family of functions

 - Remove a few unnecessary checks of PageError

 - Some straightforward filesystem conversions to use folios

 - Split PageMovable users out from address_space_operations into
   their own movable_operations

 - Convert aops->migratepage to aops->migrate_folio

 - Remove nobh support (Christoph Hellwig)

* tag 'folio-6.0' of git://git.infradead.org/users/willy/pagecache: (78 commits)
  fs: remove the NULL get_block case in mpage_writepages
  fs: don't call ->writepage from __mpage_writepage
  fs: remove the nobh helpers
  jfs: stop using the nobh helper
  ext2: remove nobh support
  ntfs3: refactor ntfs_writepages
  mm/folio-compat: Remove migration compatibility functions
  fs: Remove aops->migratepage()
  secretmem: Convert to migrate_folio
  hugetlb: Convert to migrate_folio
  aio: Convert to migrate_folio
  f2fs: Convert to filemap_migrate_folio()
  ubifs: Convert to filemap_migrate_folio()
  btrfs: Convert btrfs_migratepage to migrate_folio
  mm/migrate: Add filemap_migrate_folio()
  mm/migrate: Convert migrate_page() to migrate_folio()
  nfs: Convert to migrate_folio
  btrfs: Convert btree_migratepage to migrate_folio
  mm/migrate: Convert expected_page_refs() to folio_expected_refs()
  mm/migrate: Convert buffer_migrate_page() to buffer_migrate_folio()
  ...

97 files changed:
Documentation/admin-guide/cgroup-v1/memcg_test.rst
Documentation/filesystems/ext2.rst
Documentation/filesystems/locking.rst
Documentation/filesystems/vfs.rst
Documentation/vm/page_migration.rst
arch/powerpc/platforms/pseries/cmm.c
block/fops.c
block/partitions/check.h
block/partitions/core.c
drivers/gpu/drm/i915/gem/i915_gem_userptr.c
drivers/misc/vmw_balloon.c
drivers/virtio/virtio_balloon.c
fs/afs/mntpt.c
fs/aio.c
fs/befs/linuxvfs.c
fs/btrfs/disk-io.c
fs/btrfs/inode.c
fs/buffer.c
fs/coda/symlink.c
fs/cramfs/inode.c
fs/ext2/dir.c
fs/ext2/ext2.h
fs/ext2/inode.c
fs/ext2/namei.c
fs/ext2/super.c
fs/ext4/inode.c
fs/f2fs/checkpoint.c
fs/f2fs/compress.c
fs/f2fs/data.c
fs/f2fs/f2fs.h
fs/f2fs/node.c
fs/freevxfs/vxfs_immed.c
fs/freevxfs/vxfs_subr.c
fs/gfs2/aops.c
fs/gfs2/lops.c
fs/hfs/bnode.c
fs/hfsplus/bnode.c
fs/hostfs/hostfs_kern.c
fs/hugetlbfs/inode.c
fs/inode.c
fs/iomap/buffered-io.c
fs/jfs/inode.c
fs/jfs/jfs_metapage.c
fs/mpage.c
fs/nfs/file.c
fs/nfs/internal.h
fs/nfs/read.c
fs/nfs/write.c
fs/nilfs2/dir.c
fs/nilfs2/page.c
fs/ntfs/aops.c
fs/ntfs/aops.h
fs/ntfs/file.c
fs/ntfs3/inode.c
fs/ntfs3/ntfs_fs.h
fs/ocfs2/aops.c
fs/ocfs2/refcounttree.c
fs/orangefs/inode.c
fs/reiserfs/xattr.c
fs/remap_range.c
fs/squashfs/file.c
fs/ubifs/file.c
fs/ufs/dir.c
fs/ufs/util.c
fs/xfs/xfs_aops.c
fs/zonefs/super.c
include/linux/balloon_compaction.h
include/linux/buffer_head.h
include/linux/fs.h
include/linux/iomap.h
include/linux/migrate.h
include/linux/mpage.h
include/linux/netfs.h
include/linux/page-flags.h
include/linux/pagemap.h
include/linux/pagevec.h
include/linux/swap.h
include/uapi/linux/magic.h
mm/balloon_compaction.c
mm/compaction.c
mm/filemap.c
mm/folio-compat.c
mm/huge_memory.c
mm/hugetlb.c
mm/ksm.c
mm/memory-failure.c
mm/migrate.c
mm/migrate_device.c
mm/secretmem.c
mm/shmem.c
mm/swap.c
mm/swap_state.c
mm/truncate.c
mm/util.c
mm/vmscan.c
mm/z3fold.c
mm/zsmalloc.c

index 45b94f7..a402359 100644 (file)
@@ -97,7 +97,7 @@ Under below explanation, we assume CONFIG_MEM_RES_CTRL_SWAP=y.
 =============
 
        Page Cache is charged at
-       - add_to_page_cache_locked().
+       - filemap_add_folio().
 
        The logic is very clear. (About migration, see below)
 
index 154101c..92aae68 100644 (file)
@@ -59,8 +59,6 @@ acl                           Enable POSIX Access Control Lists support
                                (requires CONFIG_EXT2_FS_POSIX_ACL).
 noacl                          Don't support POSIX ACLs.
 
-nobh                           Do not attach buffer_heads to file pagecache.
-
 quota, usrquota                        Enable user disk quota support
                                (requires CONFIG_QUOTA).
 
index c0fe711..4bb2627 100644 (file)
@@ -252,9 +252,8 @@ prototypes::
        bool (*release_folio)(struct folio *, gfp_t);
        void (*free_folio)(struct folio *);
        int (*direct_IO)(struct kiocb *, struct iov_iter *iter);
-       bool (*isolate_page) (struct page *, isolate_mode_t);
-       int (*migratepage)(struct address_space *, struct page *, struct page *);
-       void (*putback_page) (struct page *);
+       int (*migrate_folio)(struct address_space *, struct folio *dst,
+                       struct folio *src, enum migrate_mode);
        int (*launder_folio)(struct folio *);
        bool (*is_partially_uptodate)(struct folio *, size_t from, size_t count);
        int (*error_remove_page)(struct address_space *, struct page *);
@@ -280,9 +279,7 @@ invalidate_folio:   yes                                     exclusive
 release_folio:         yes
 free_folio:            yes
 direct_IO:
-isolate_page:          yes
-migratepage:           yes (both)
-putback_page:          yes
+migrate_folio:         yes (both)
 launder_folio:         yes
 is_partially_uptodate: yes
 error_remove_page:     yes
index 08069ec..6cd6953 100644 (file)
@@ -737,12 +737,8 @@ cache in your filesystem.  The following members are defined:
                bool (*release_folio)(struct folio *, gfp_t);
                void (*free_folio)(struct folio *);
                ssize_t (*direct_IO)(struct kiocb *, struct iov_iter *iter);
-               /* isolate a page for migration */
-               bool (*isolate_page) (struct page *, isolate_mode_t);
-               /* migrate the contents of a page to the specified target */
-               int (*migratepage) (struct page *, struct page *);
-               /* put migration-failed page back to right list */
-               void (*putback_page) (struct page *);
+               int (*migrate_folio)(struct mapping *, struct folio *dst,
+                               struct folio *src, enum migrate_mode);
                int (*launder_folio) (struct folio *);
 
                bool (*is_partially_uptodate) (struct folio *, size_t from,
@@ -774,13 +770,38 @@ cache in your filesystem.  The following members are defined:
        See the file "Locking" for more details.
 
 ``read_folio``
-       called by the VM to read a folio from backing store.  The folio
-       will be locked when read_folio is called, and should be unlocked
-       and marked uptodate once the read completes.  If ->read_folio
-       discovers that it cannot perform the I/O at this time, it can
-        unlock the folio and return AOP_TRUNCATED_PAGE.  In this case,
-       the folio will be looked up again, relocked and if that all succeeds,
-       ->read_folio will be called again.
+       Called by the page cache to read a folio from the backing store.
+       The 'file' argument supplies authentication information to network
+       filesystems, and is generally not used by block based filesystems.
+       It may be NULL if the caller does not have an open file (eg if
+       the kernel is performing a read for itself rather than on behalf
+       of a userspace process with an open file).
+
+       If the mapping does not support large folios, the folio will
+       contain a single page.  The folio will be locked when read_folio
+       is called.  If the read completes successfully, the folio should
+       be marked uptodate.  The filesystem should unlock the folio
+       once the read has completed, whether it was successful or not.
+       The filesystem does not need to modify the refcount on the folio;
+       the page cache holds a reference count and that will not be
+       released until the folio is unlocked.
+
+       Filesystems may implement ->read_folio() synchronously.
+       In normal operation, folios are read through the ->readahead()
+       method.  Only if this fails, or if the caller needs to wait for
+       the read to complete will the page cache call ->read_folio().
+       Filesystems should not attempt to perform their own readahead
+       in the ->read_folio() operation.
+
+       If the filesystem cannot perform the read at this time, it can
+       unlock the folio, do whatever action it needs to ensure that the
+       read will succeed in the future and return AOP_TRUNCATED_PAGE.
+       In this case, the caller should look up the folio, lock it,
+       and call ->read_folio again.
+
+       Callers may invoke the ->read_folio() method directly, but using
+       read_mapping_folio() will take care of locking, waiting for the
+       read to complete and handle cases such as AOP_TRUNCATED_PAGE.
 
 ``writepages``
        called by the VM to write out pages associated with the
@@ -905,20 +926,12 @@ cache in your filesystem.  The following members are defined:
        data directly between the storage and the application's address
        space.
 
-``isolate_page``
-       Called by the VM when isolating a movable non-lru page.  If page
-       is successfully isolated, VM marks the page as PG_isolated via
-       __SetPageIsolated.
-
-``migrate_page``
+``migrate_folio``
        This is used to compact the physical memory usage.  If the VM
-       wants to relocate a page (maybe off a memory card that is
-       signalling imminent failure) it will pass a new page and an old
-       page to this function.  migrate_page should transfer any private
-       data across and update any references that it has to the page.
-
-``putback_page``
-       Called by the VM when isolated page's migration fails.
+       wants to relocate a folio (maybe from a memory device that is
+       signalling imminent failure) it will pass a new folio and an old
+       folio to this function.  migrate_folio should transfer any private
+       data across and update any references that it has to the folio.
 
 ``launder_folio``
        Called before freeing a folio - it writes back the dirty folio.
index 8c5cb81..11493ba 100644 (file)
@@ -152,110 +152,15 @@ Steps:
 Non-LRU page migration
 ======================
 
-Although migration originally aimed for reducing the latency of memory accesses
-for NUMA, compaction also uses migration to create high-order pages.
+Although migration originally aimed for reducing the latency of memory
+accesses for NUMA, compaction also uses migration to create high-order
+pages.  For compaction purposes, it is also useful to be able to move
+non-LRU pages, such as zsmalloc and virtio-balloon pages.
 
-Current problem of the implementation is that it is designed to migrate only
-*LRU* pages. However, there are potential non-LRU pages which can be migrated
-in drivers, for example, zsmalloc, virtio-balloon pages.
-
-For virtio-balloon pages, some parts of migration code path have been hooked
-up and added virtio-balloon specific functions to intercept migration logics.
-It's too specific to a driver so other drivers who want to make their pages
-movable would have to add their own specific hooks in the migration path.
-
-To overcome the problem, VM supports non-LRU page migration which provides
-generic functions for non-LRU movable pages without driver specific hooks
-in the migration path.
-
-If a driver wants to make its pages movable, it should define three functions
-which are function pointers of struct address_space_operations.
-
-1. ``bool (*isolate_page) (struct page *page, isolate_mode_t mode);``
-
-   What VM expects from isolate_page() function of driver is to return *true*
-   if driver isolates the page successfully. On returning true, VM marks the page
-   as PG_isolated so concurrent isolation in several CPUs skip the page
-   for isolation. If a driver cannot isolate the page, it should return *false*.
-
-   Once page is successfully isolated, VM uses page.lru fields so driver
-   shouldn't expect to preserve values in those fields.
-
-2. ``int (*migratepage) (struct address_space *mapping,``
-|      ``struct page *newpage, struct page *oldpage, enum migrate_mode);``
-
-   After isolation, VM calls migratepage() of driver with the isolated page.
-   The function of migratepage() is to move the contents of the old page to the
-   new page
-   and set up fields of struct page newpage. Keep in mind that you should
-   indicate to the VM the oldpage is no longer movable via __ClearPageMovable()
-   under page_lock if you migrated the oldpage successfully and returned
-   MIGRATEPAGE_SUCCESS. If driver cannot migrate the page at the moment, driver
-   can return -EAGAIN. On -EAGAIN, VM will retry page migration in a short time
-   because VM interprets -EAGAIN as "temporary migration failure". On returning
-   any error except -EAGAIN, VM will give up the page migration without
-   retrying.
-
-   Driver shouldn't touch the page.lru field while in the migratepage() function.
-
-3. ``void (*putback_page)(struct page *);``
-
-   If migration fails on the isolated page, VM should return the isolated page
-   to the driver so VM calls the driver's putback_page() with the isolated page.
-   In this function, the driver should put the isolated page back into its own data
-   structure.
-
-Non-LRU movable page flags
-
-   There are two page flags for supporting non-LRU movable page.
-
-   * PG_movable
-
-     Driver should use the function below to make page movable under page_lock::
-
-       void __SetPageMovable(struct page *page, struct address_space *mapping)
-
-     It needs argument of address_space for registering migration
-     family functions which will be called by VM. Exactly speaking,
-     PG_movable is not a real flag of struct page. Rather, VM
-     reuses the page->mapping's lower bits to represent it::
-
-       #define PAGE_MAPPING_MOVABLE 0x2
-       page->mapping = page->mapping | PAGE_MAPPING_MOVABLE;
-
-     so driver shouldn't access page->mapping directly. Instead, driver should
-     use page_mapping() which masks off the low two bits of page->mapping under
-     page lock so it can get the right struct address_space.
-
-     For testing of non-LRU movable pages, VM supports __PageMovable() function.
-     However, it doesn't guarantee to identify non-LRU movable pages because
-     the page->mapping field is unified with other variables in struct page.
-     If the driver releases the page after isolation by VM, page->mapping
-     doesn't have a stable value although it has PAGE_MAPPING_MOVABLE set
-     (look at __ClearPageMovable). But __PageMovable() is cheap to call whether
-     page is LRU or non-LRU movable once the page has been isolated because LRU
-     pages can never have PAGE_MAPPING_MOVABLE set in page->mapping. It is also
-     good for just peeking to test non-LRU movable pages before more expensive
-     checking with lock_page() in pfn scanning to select a victim.
-
-     For guaranteeing non-LRU movable page, VM provides PageMovable() function.
-     Unlike __PageMovable(), PageMovable() validates page->mapping and
-     mapping->a_ops->isolate_page under lock_page(). The lock_page() prevents
-     sudden destroying of page->mapping.
-
-     Drivers using __SetPageMovable() should clear the flag via
-     __ClearMovablePage() under page_lock() before the releasing the page.
-
-   * PG_isolated
-
-     To prevent concurrent isolation among several CPUs, VM marks isolated page
-     as PG_isolated under lock_page(). So if a CPU encounters PG_isolated
-     non-LRU movable page, it can skip it. Driver doesn't need to manipulate the
-     flag because VM will set/clear it automatically. Keep in mind that if the
-     driver sees a PG_isolated page, it means the page has been isolated by the
-     VM so it shouldn't touch the page.lru field.
-     The PG_isolated flag is aliased with the PG_reclaim flag so drivers
-     shouldn't use PG_isolated for its own purposes.
+If a driver wants to make its pages movable, it should define a struct
+movable_operations.  It then needs to call __SetPageMovable() on each
+page that it may be able to move.  This uses the ``page->mapping`` field,
+so this field is not available for the driver to use for other purposes.
 
 Monitoring Migration
 =====================
@@ -286,3 +191,5 @@ THP_MIGRATION_FAIL and PGMIGRATE_FAIL to increase.
 
 Christoph Lameter, May 8, 2006.
 Minchan Kim, Mar 28, 2016.
+
+.. kernel-doc:: include/linux/migrate.h
index 15ed820..5f4037c 100644 (file)
@@ -19,9 +19,6 @@
 #include <linux/stringify.h>
 #include <linux/swap.h>
 #include <linux/device.h>
-#include <linux/mount.h>
-#include <linux/pseudo_fs.h>
-#include <linux/magic.h>
 #include <linux/balloon_compaction.h>
 #include <asm/firmware.h>
 #include <asm/hvcall.h>
@@ -500,19 +497,6 @@ static struct notifier_block cmm_mem_nb = {
 };
 
 #ifdef CONFIG_BALLOON_COMPACTION
-static struct vfsmount *balloon_mnt;
-
-static int cmm_init_fs_context(struct fs_context *fc)
-{
-       return init_pseudo(fc, PPC_CMM_MAGIC) ? 0 : -ENOMEM;
-}
-
-static struct file_system_type balloon_fs = {
-       .name = "ppc-cmm",
-       .init_fs_context = cmm_init_fs_context,
-       .kill_sb = kill_anon_super,
-};
-
 static int cmm_migratepage(struct balloon_dev_info *b_dev_info,
                           struct page *newpage, struct page *page,
                           enum migrate_mode mode)
@@ -564,47 +548,13 @@ static int cmm_migratepage(struct balloon_dev_info *b_dev_info,
        return MIGRATEPAGE_SUCCESS;
 }
 
-static int cmm_balloon_compaction_init(void)
+static void cmm_balloon_compaction_init(void)
 {
-       int rc;
-
        balloon_devinfo_init(&b_dev_info);
        b_dev_info.migratepage = cmm_migratepage;
-
-       balloon_mnt = kern_mount(&balloon_fs);
-       if (IS_ERR(balloon_mnt)) {
-               rc = PTR_ERR(balloon_mnt);
-               balloon_mnt = NULL;
-               return rc;
-       }
-
-       b_dev_info.inode = alloc_anon_inode(balloon_mnt->mnt_sb);
-       if (IS_ERR(b_dev_info.inode)) {
-               rc = PTR_ERR(b_dev_info.inode);
-               b_dev_info.inode = NULL;
-               kern_unmount(balloon_mnt);
-               balloon_mnt = NULL;
-               return rc;
-       }
-
-       b_dev_info.inode->i_mapping->a_ops = &balloon_aops;
-       return 0;
-}
-static void cmm_balloon_compaction_deinit(void)
-{
-       if (b_dev_info.inode)
-               iput(b_dev_info.inode);
-       b_dev_info.inode = NULL;
-       kern_unmount(balloon_mnt);
-       balloon_mnt = NULL;
 }
 #else /* CONFIG_BALLOON_COMPACTION */
-static int cmm_balloon_compaction_init(void)
-{
-       return 0;
-}
-
-static void cmm_balloon_compaction_deinit(void)
+static void cmm_balloon_compaction_init(void)
 {
 }
 #endif /* CONFIG_BALLOON_COMPACTION */
@@ -622,9 +572,7 @@ static int cmm_init(void)
        if (!firmware_has_feature(FW_FEATURE_CMO) && !simulate)
                return -EOPNOTSUPP;
 
-       rc = cmm_balloon_compaction_init();
-       if (rc)
-               return rc;
+       cmm_balloon_compaction_init();
 
        rc = register_oom_notifier(&cmm_oom_nb);
        if (rc < 0)
@@ -658,7 +606,6 @@ out_reboot_notifier:
 out_oom_notifier:
        unregister_oom_notifier(&cmm_oom_nb);
 out_balloon_compaction:
-       cmm_balloon_compaction_deinit();
        return rc;
 }
 
@@ -677,7 +624,6 @@ static void cmm_exit(void)
        unregister_memory_notifier(&cmm_mem_nb);
        cmm_free_pages(atomic_long_read(&loaned_pages));
        cmm_unregister_sysfs(&cmm_dev);
-       cmm_balloon_compaction_deinit();
 }
 
 /**
index 29066ac..92ee820 100644 (file)
@@ -421,7 +421,7 @@ const struct address_space_operations def_blk_aops = {
        .write_end      = blkdev_write_end,
        .writepages     = blkdev_writepages,
        .direct_IO      = blkdev_direct_IO,
-       .migratepage    = buffer_migrate_page_norefs,
+       .migrate_folio  = buffer_migrate_folio_norefs,
        .is_dirty_writeback = buffer_check_dirty_writeback,
 };
 
index 4ffa235..8d70a88 100644 (file)
@@ -24,13 +24,13 @@ struct parsed_partitions {
 };
 
 typedef struct {
-       struct page *v;
+       struct folio *v;
 } Sector;
 
 void *read_part_sector(struct parsed_partitions *state, sector_t n, Sector *p);
 static inline void put_dev_sector(Sector p)
 {
-       put_page(p.v);
+       folio_put(p.v);
 }
 
 static inline void
index 1a45b1d..fc1d703 100644 (file)
@@ -704,25 +704,19 @@ EXPORT_SYMBOL_GPL(bdev_disk_changed);
 void *read_part_sector(struct parsed_partitions *state, sector_t n, Sector *p)
 {
        struct address_space *mapping = state->disk->part0->bd_inode->i_mapping;
-       struct page *page;
+       struct folio *folio;
 
        if (n >= get_capacity(state->disk)) {
                state->access_beyond_eod = true;
-               return NULL;
+               goto out;
        }
 
-       page = read_mapping_page(mapping,
-                       (pgoff_t)(n >> (PAGE_SHIFT - 9)), NULL);
-       if (IS_ERR(page))
+       folio = read_mapping_folio(mapping, n >> PAGE_SECTORS_SHIFT, NULL);
+       if (IS_ERR(folio))
                goto out;
-       if (PageError(page))
-               goto out_put_page;
-
-       p->v = page;
-       return (unsigned char *)page_address(page) +
-                       ((n & ((1 << (PAGE_SHIFT - 9)) - 1)) << SECTOR_SHIFT);
-out_put_page:
-       put_page(page);
+
+       p->v = folio;
+       return folio_address(folio) + offset_in_folio(folio, n * SECTOR_SIZE);
 out:
        p->v = NULL;
        return NULL;
index 094f06b..8423df0 100644 (file)
@@ -216,8 +216,8 @@ i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj,
                         * However...!
                         *
                         * The mmu-notifier can be invalidated for a
-                        * migrate_page, that is alreadying holding the lock
-                        * on the page. Such a try_to_unmap() will result
+                        * migrate_folio, that is alreadying holding the lock
+                        * on the folio. Such a try_to_unmap() will result
                         * in us calling put_pages() and so recursively try
                         * to lock the page. We avoid that deadlock with
                         * a trylock_page() and in exchange we risk missing
index 086ce77..85dd6aa 100644 (file)
@@ -29,8 +29,6 @@
 #include <linux/rwsem.h>
 #include <linux/slab.h>
 #include <linux/spinlock.h>
-#include <linux/mount.h>
-#include <linux/pseudo_fs.h>
 #include <linux/balloon_compaction.h>
 #include <linux/vmw_vmci_defs.h>
 #include <linux/vmw_vmci_api.h>
@@ -1730,20 +1728,6 @@ static inline void vmballoon_debugfs_exit(struct vmballoon *b)
 
 
 #ifdef CONFIG_BALLOON_COMPACTION
-
-static int vmballoon_init_fs_context(struct fs_context *fc)
-{
-       return init_pseudo(fc, BALLOON_VMW_MAGIC) ? 0 : -ENOMEM;
-}
-
-static struct file_system_type vmballoon_fs = {
-       .name                   = "balloon-vmware",
-       .init_fs_context        = vmballoon_init_fs_context,
-       .kill_sb                = kill_anon_super,
-};
-
-static struct vfsmount *vmballoon_mnt;
-
 /**
  * vmballoon_migratepage() - migrates a balloon page.
  * @b_dev_info: balloon device information descriptor.
@@ -1863,21 +1847,6 @@ out_unlock:
 }
 
 /**
- * vmballoon_compaction_deinit() - removes compaction related data.
- *
- * @b: pointer to the balloon.
- */
-static void vmballoon_compaction_deinit(struct vmballoon *b)
-{
-       if (!IS_ERR(b->b_dev_info.inode))
-               iput(b->b_dev_info.inode);
-
-       b->b_dev_info.inode = NULL;
-       kern_unmount(vmballoon_mnt);
-       vmballoon_mnt = NULL;
-}
-
-/**
  * vmballoon_compaction_init() - initialized compaction for the balloon.
  *
  * @b: pointer to the balloon.
@@ -1888,33 +1857,15 @@ static void vmballoon_compaction_deinit(struct vmballoon *b)
  *
  * Return: zero on success or error code on failure.
  */
-static __init int vmballoon_compaction_init(struct vmballoon *b)
+static __init void vmballoon_compaction_init(struct vmballoon *b)
 {
-       vmballoon_mnt = kern_mount(&vmballoon_fs);
-       if (IS_ERR(vmballoon_mnt))
-               return PTR_ERR(vmballoon_mnt);
-
        b->b_dev_info.migratepage = vmballoon_migratepage;
-       b->b_dev_info.inode = alloc_anon_inode(vmballoon_mnt->mnt_sb);
-
-       if (IS_ERR(b->b_dev_info.inode))
-               return PTR_ERR(b->b_dev_info.inode);
-
-       b->b_dev_info.inode->i_mapping->a_ops = &balloon_aops;
-       return 0;
 }
 
 #else /* CONFIG_BALLOON_COMPACTION */
-
-static void vmballoon_compaction_deinit(struct vmballoon *b)
-{
-}
-
-static int vmballoon_compaction_init(struct vmballoon *b)
+static inline void vmballoon_compaction_init(struct vmballoon *b)
 {
-       return 0;
 }
-
 #endif /* CONFIG_BALLOON_COMPACTION */
 
 static int __init vmballoon_init(void)
@@ -1939,9 +1890,7 @@ static int __init vmballoon_init(void)
         * balloon_devinfo_init() .
         */
        balloon_devinfo_init(&balloon.b_dev_info);
-       error = vmballoon_compaction_init(&balloon);
-       if (error)
-               goto fail;
+       vmballoon_compaction_init(&balloon);
 
        INIT_LIST_HEAD(&balloon.huge_pages);
        spin_lock_init(&balloon.comm_lock);
@@ -1958,7 +1907,6 @@ static int __init vmballoon_init(void)
        return 0;
 fail:
        vmballoon_unregister_shrinker(&balloon);
-       vmballoon_compaction_deinit(&balloon);
        return error;
 }
 
@@ -1985,8 +1933,5 @@ static void __exit vmballoon_exit(void)
         */
        vmballoon_send_start(&balloon, 0);
        vmballoon_pop(&balloon);
-
-       /* Only once we popped the balloon, compaction can be deinit */
-       vmballoon_compaction_deinit(&balloon);
 }
 module_exit(vmballoon_exit);
index b9737da..bd360b9 100644 (file)
@@ -17,9 +17,6 @@
 #include <linux/oom.h>
 #include <linux/wait.h>
 #include <linux/mm.h>
-#include <linux/mount.h>
-#include <linux/magic.h>
-#include <linux/pseudo_fs.h>
 #include <linux/page_reporting.h>
 
 /*
        (1 << (VIRTIO_BALLOON_HINT_BLOCK_ORDER + PAGE_SHIFT))
 #define VIRTIO_BALLOON_HINT_BLOCK_PAGES (1 << VIRTIO_BALLOON_HINT_BLOCK_ORDER)
 
-#ifdef CONFIG_BALLOON_COMPACTION
-static struct vfsmount *balloon_mnt;
-#endif
-
 enum virtio_balloon_vq {
        VIRTIO_BALLOON_VQ_INFLATE,
        VIRTIO_BALLOON_VQ_DEFLATE,
@@ -805,18 +798,6 @@ static int virtballoon_migratepage(struct balloon_dev_info *vb_dev_info,
 
        return MIGRATEPAGE_SUCCESS;
 }
-
-static int balloon_init_fs_context(struct fs_context *fc)
-{
-       return init_pseudo(fc, BALLOON_KVM_MAGIC) ? 0 : -ENOMEM;
-}
-
-static struct file_system_type balloon_fs = {
-       .name           = "balloon-kvm",
-       .init_fs_context = balloon_init_fs_context,
-       .kill_sb        = kill_anon_super,
-};
-
 #endif /* CONFIG_BALLOON_COMPACTION */
 
 static unsigned long shrink_free_pages(struct virtio_balloon *vb,
@@ -909,19 +890,7 @@ static int virtballoon_probe(struct virtio_device *vdev)
                goto out_free_vb;
 
 #ifdef CONFIG_BALLOON_COMPACTION
-       balloon_mnt = kern_mount(&balloon_fs);
-       if (IS_ERR(balloon_mnt)) {
-               err = PTR_ERR(balloon_mnt);
-               goto out_del_vqs;
-       }
-
        vb->vb_dev_info.migratepage = virtballoon_migratepage;
-       vb->vb_dev_info.inode = alloc_anon_inode(balloon_mnt->mnt_sb);
-       if (IS_ERR(vb->vb_dev_info.inode)) {
-               err = PTR_ERR(vb->vb_dev_info.inode);
-               goto out_kern_unmount;
-       }
-       vb->vb_dev_info.inode->i_mapping->a_ops = &balloon_aops;
 #endif
        if (virtio_has_feature(vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT)) {
                /*
@@ -930,13 +899,13 @@ static int virtballoon_probe(struct virtio_device *vdev)
                 */
                if (virtqueue_get_vring_size(vb->free_page_vq) < 2) {
                        err = -ENOSPC;
-                       goto out_iput;
+                       goto out_del_vqs;
                }
                vb->balloon_wq = alloc_workqueue("balloon-wq",
                                        WQ_FREEZABLE | WQ_CPU_INTENSIVE, 0);
                if (!vb->balloon_wq) {
                        err = -ENOMEM;
-                       goto out_iput;
+                       goto out_del_vqs;
                }
                INIT_WORK(&vb->report_free_page_work, report_free_page_func);
                vb->cmd_id_received_cache = VIRTIO_BALLOON_CMD_ID_STOP;
@@ -1030,13 +999,7 @@ out_unregister_shrinker:
 out_del_balloon_wq:
        if (virtio_has_feature(vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT))
                destroy_workqueue(vb->balloon_wq);
-out_iput:
-#ifdef CONFIG_BALLOON_COMPACTION
-       iput(vb->vb_dev_info.inode);
-out_kern_unmount:
-       kern_unmount(balloon_mnt);
 out_del_vqs:
-#endif
        vdev->config->del_vqs(vdev);
 out_free_vb:
        kfree(vb);
@@ -1083,12 +1046,6 @@ static void virtballoon_remove(struct virtio_device *vdev)
        }
 
        remove_common(vb);
-#ifdef CONFIG_BALLOON_COMPACTION
-       if (vb->vb_dev_info.inode)
-               iput(vb->vb_dev_info.inode);
-
-       kern_unmount(balloon_mnt);
-#endif
        kfree(vb);
 }
 
index bbb2c21..97f50e9 100644 (file)
@@ -132,12 +132,6 @@ static int afs_mntpt_set_params(struct fs_context *fc, struct dentry *mntpt)
                if (IS_ERR(page))
                        return PTR_ERR(page);
 
-               if (PageError(page)) {
-                       ret = afs_bad(AFS_FS_I(d_inode(mntpt)), afs_file_error_mntpt);
-                       put_page(page);
-                       return ret;
-               }
-
                buf = kmap(page);
                ret = -EINVAL;
                if (buf[size - 1] == '.')
index 3c249b9..a1911e8 100644 (file)
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -400,8 +400,8 @@ static const struct file_operations aio_ring_fops = {
 };
 
 #if IS_ENABLED(CONFIG_MIGRATION)
-static int aio_migratepage(struct address_space *mapping, struct page *new,
-                       struct page *old, enum migrate_mode mode)
+static int aio_migrate_folio(struct address_space *mapping, struct folio *dst,
+                       struct folio *src, enum migrate_mode mode)
 {
        struct kioctx *ctx;
        unsigned long flags;
@@ -435,10 +435,10 @@ static int aio_migratepage(struct address_space *mapping, struct page *new,
                goto out;
        }
 
-       idx = old->index;
+       idx = src->index;
        if (idx < (pgoff_t)ctx->nr_pages) {
-               /* Make sure the old page hasn't already been changed */
-               if (ctx->ring_pages[idx] != old)
+               /* Make sure the old folio hasn't already been changed */
+               if (ctx->ring_pages[idx] != &src->page)
                        rc = -EAGAIN;
        } else
                rc = -EINVAL;
@@ -447,27 +447,27 @@ static int aio_migratepage(struct address_space *mapping, struct page *new,
                goto out_unlock;
 
        /* Writeback must be complete */
-       BUG_ON(PageWriteback(old));
-       get_page(new);
+       BUG_ON(folio_test_writeback(src));
+       folio_get(dst);
 
-       rc = migrate_page_move_mapping(mapping, new, old, 1);
+       rc = folio_migrate_mapping(mapping, dst, src, 1);
        if (rc != MIGRATEPAGE_SUCCESS) {
-               put_page(new);
+               folio_put(dst);
                goto out_unlock;
        }
 
        /* Take completion_lock to prevent other writes to the ring buffer
-        * while the old page is copied to the new.  This prevents new
+        * while the old folio is copied to the new.  This prevents new
         * events from being lost.
         */
        spin_lock_irqsave(&ctx->completion_lock, flags);
-       migrate_page_copy(new, old);
-       BUG_ON(ctx->ring_pages[idx] != old);
-       ctx->ring_pages[idx] = new;
+       folio_migrate_copy(dst, src);
+       BUG_ON(ctx->ring_pages[idx] != &src->page);
+       ctx->ring_pages[idx] = &dst->page;
        spin_unlock_irqrestore(&ctx->completion_lock, flags);
 
-       /* The old page is no longer accessible. */
-       put_page(old);
+       /* The old folio is no longer accessible. */
+       folio_put(src);
 
 out_unlock:
        mutex_unlock(&ctx->ring_lock);
@@ -475,13 +475,13 @@ out:
        spin_unlock(&mapping->private_lock);
        return rc;
 }
+#else
+#define aio_migrate_folio NULL
 #endif
 
 static const struct address_space_operations aio_ctx_aops = {
        .dirty_folio    = noop_dirty_folio,
-#if IS_ENABLED(CONFIG_MIGRATION)
-       .migratepage    = aio_migratepage,
-#endif
+       .migrate_folio  = aio_migrate_folio,
 };
 
 static int aio_setup_ring(struct kioctx *ctx, unsigned int nr_events)
index be383fa..32749fc 100644 (file)
@@ -108,8 +108,7 @@ static const struct export_operations befs_export_operations = {
  * passes it the address of befs_get_block, for mapping file
  * positions to disk blocks.
  */
-static int
-befs_read_folio(struct file *file, struct folio *folio)
+static int befs_read_folio(struct file *file, struct folio *folio)
 {
        return block_read_full_folio(folio, befs_get_block);
 }
@@ -470,13 +469,12 @@ befs_destroy_inodecache(void)
  */
 static int befs_symlink_read_folio(struct file *unused, struct folio *folio)
 {
-       struct page *page = &folio->page;
-       struct inode *inode = page->mapping->host;
+       struct inode *inode = folio->mapping->host;
        struct super_block *sb = inode->i_sb;
        struct befs_inode_info *befs_ino = BEFS_I(inode);
        befs_data_stream *data = &befs_ino->i_data.ds;
        befs_off_t len = data->size;
-       char *link = page_address(page);
+       char *link = folio_address(folio);
 
        if (len == 0 || len > PAGE_SIZE) {
                befs_error(sb, "Long symlink with illegal length");
@@ -489,12 +487,12 @@ static int befs_symlink_read_folio(struct file *unused, struct folio *folio)
                goto fail;
        }
        link[len - 1] = '\0';
-       SetPageUptodate(page);
-       unlock_page(page);
+       folio_mark_uptodate(folio);
+       folio_unlock(folio);
        return 0;
 fail:
-       SetPageError(page);
-       unlock_page(page);
+       folio_set_error(folio);
+       folio_unlock(folio);
        return -EIO;
 }
 
index de440eb..1b23d80 100644 (file)
@@ -953,28 +953,28 @@ void btrfs_submit_metadata_bio(struct inode *inode, struct bio *bio, int mirror_
 }
 
 #ifdef CONFIG_MIGRATION
-static int btree_migratepage(struct address_space *mapping,
-                       struct page *newpage, struct page *page,
-                       enum migrate_mode mode)
+static int btree_migrate_folio(struct address_space *mapping,
+               struct folio *dst, struct folio *src, enum migrate_mode mode)
 {
        /*
         * we can't safely write a btree page from here,
         * we haven't done the locking hook
         */
-       if (PageDirty(page))
+       if (folio_test_dirty(src))
                return -EAGAIN;
        /*
         * Buffers may be managed in a filesystem specific way.
         * We must have no buffers or drop them.
         */
-       if (page_has_private(page) &&
-           !try_to_release_page(page, GFP_KERNEL))
+       if (folio_get_private(src) &&
+           !filemap_release_folio(src, GFP_KERNEL))
                return -EAGAIN;
-       return migrate_page(mapping, newpage, page, mode);
+       return migrate_folio(mapping, dst, src, mode);
 }
+#else
+#define btree_migrate_folio NULL
 #endif
 
-
 static int btree_writepages(struct address_space *mapping,
                            struct writeback_control *wbc)
 {
@@ -1074,10 +1074,8 @@ static const struct address_space_operations btree_aops = {
        .writepages     = btree_writepages,
        .release_folio  = btree_release_folio,
        .invalidate_folio = btree_invalidate_folio,
-#ifdef CONFIG_MIGRATION
-       .migratepage    = btree_migratepage,
-#endif
-       .dirty_folio = btree_dirty_folio,
+       .migrate_folio  = btree_migrate_folio,
+       .dirty_folio    = btree_dirty_folio,
 };
 
 struct extent_buffer *btrfs_find_create_tree_block(
index 57915bf..11a52db 100644 (file)
@@ -8268,30 +8268,24 @@ static bool btrfs_release_folio(struct folio *folio, gfp_t gfp_flags)
 }
 
 #ifdef CONFIG_MIGRATION
-static int btrfs_migratepage(struct address_space *mapping,
-                            struct page *newpage, struct page *page,
+static int btrfs_migrate_folio(struct address_space *mapping,
+                            struct folio *dst, struct folio *src,
                             enum migrate_mode mode)
 {
-       int ret;
+       int ret = filemap_migrate_folio(mapping, dst, src, mode);
 
-       ret = migrate_page_move_mapping(mapping, newpage, page, 0);
        if (ret != MIGRATEPAGE_SUCCESS)
                return ret;
 
-       if (page_has_private(page))
-               attach_page_private(newpage, detach_page_private(page));
-
-       if (PageOrdered(page)) {
-               ClearPageOrdered(page);
-               SetPageOrdered(newpage);
+       if (folio_test_ordered(src)) {
+               folio_clear_ordered(src);
+               folio_set_ordered(dst);
        }
 
-       if (mode != MIGRATE_SYNC_NO_COPY)
-               migrate_page_copy(newpage, page);
-       else
-               migrate_page_states(newpage, page);
        return MIGRATEPAGE_SUCCESS;
 }
+#else
+#define btrfs_migrate_folio NULL
 #endif
 
 static void btrfs_invalidate_folio(struct folio *folio, size_t offset,
@@ -11436,9 +11430,7 @@ static const struct address_space_operations btrfs_aops = {
        .direct_IO      = noop_direct_IO,
        .invalidate_folio = btrfs_invalidate_folio,
        .release_folio  = btrfs_release_folio,
-#ifdef CONFIG_MIGRATION
-       .migratepage    = btrfs_migratepage,
-#endif
+       .migrate_folio  = btrfs_migrate_folio,
        .dirty_folio    = filemap_dirty_folio,
        .error_remove_page = generic_error_remove_page,
        .swap_activate  = btrfs_swap_activate,
index 82de136..55e762a 100644 (file)
@@ -282,10 +282,10 @@ static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
        spin_unlock_irqrestore(&first->b_uptodate_lock, flags);
 
        /*
-        * If none of the buffers had errors and they are all
-        * uptodate then we can set the page uptodate.
+        * If all of the buffers are uptodate then we can set the page
+        * uptodate.
         */
-       if (page_uptodate && !PageError(page))
+       if (page_uptodate)
                SetPageUptodate(page);
        unlock_page(page);
        return;
@@ -1604,7 +1604,7 @@ void clean_bdev_aliases(struct block_device *bdev, sector_t block, sector_t len)
 {
        struct inode *bd_inode = bdev->bd_inode;
        struct address_space *bd_mapping = bd_inode->i_mapping;
-       struct pagevec pvec;
+       struct folio_batch fbatch;
        pgoff_t index = block >> (PAGE_SHIFT - bd_inode->i_blkbits);
        pgoff_t end;
        int i, count;
@@ -1612,24 +1612,24 @@ void clean_bdev_aliases(struct block_device *bdev, sector_t block, sector_t len)
        struct buffer_head *head;
 
        end = (block + len - 1) >> (PAGE_SHIFT - bd_inode->i_blkbits);
-       pagevec_init(&pvec);
-       while (pagevec_lookup_range(&pvec, bd_mapping, &index, end)) {
-               count = pagevec_count(&pvec);
+       folio_batch_init(&fbatch);
+       while (filemap_get_folios(bd_mapping, &index, end, &fbatch)) {
+               count = folio_batch_count(&fbatch);
                for (i = 0; i < count; i++) {
-                       struct page *page = pvec.pages[i];
+                       struct folio *folio = fbatch.folios[i];
 
-                       if (!page_has_buffers(page))
+                       if (!folio_buffers(folio))
                                continue;
                        /*
-                        * We use page lock instead of bd_mapping->private_lock
+                        * We use folio lock instead of bd_mapping->private_lock
                         * to pin buffers here since we can afford to sleep and
                         * it scales better than a global spinlock lock.
                         */
-                       lock_page(page);
-                       /* Recheck when the page is locked which pins bhs */
-                       if (!page_has_buffers(page))
+                       folio_lock(folio);
+                       /* Recheck when the folio is locked which pins bhs */
+                       head = folio_buffers(folio);
+                       if (!head)
                                goto unlock_page;
-                       head = page_buffers(page);
                        bh = head;
                        do {
                                if (!buffer_mapped(bh) || (bh->b_blocknr < block))
@@ -1643,9 +1643,9 @@ next:
                                bh = bh->b_this_page;
                        } while (bh != head);
 unlock_page:
-                       unlock_page(page);
+                       folio_unlock(folio);
                }
-               pagevec_release(&pvec);
+               folio_batch_release(&fbatch);
                cond_resched();
                /* End of range already reached? */
                if (index > end || !index)
@@ -2259,6 +2259,7 @@ int block_read_full_folio(struct folio *folio, get_block_t *get_block)
        unsigned int blocksize, bbits;
        int nr, i;
        int fully_mapped = 1;
+       bool page_error = false;
 
        VM_BUG_ON_FOLIO(folio_test_large(folio), folio);
 
@@ -2283,8 +2284,10 @@ int block_read_full_folio(struct folio *folio, get_block_t *get_block)
                        if (iblock < lblock) {
                                WARN_ON(bh->b_size != blocksize);
                                err = get_block(inode, iblock, bh, 0);
-                               if (err)
+                               if (err) {
                                        folio_set_error(folio);
+                                       page_error = true;
+                               }
                        }
                        if (!buffer_mapped(bh)) {
                                folio_zero_range(folio, i * blocksize,
@@ -2311,7 +2314,7 @@ int block_read_full_folio(struct folio *folio, get_block_t *get_block)
                 * All buffers are uptodate - we can set the folio uptodate
                 * as well. But not if get_block() returned an error.
                 */
-               if (!folio_test_error(folio))
+               if (!page_error)
                        folio_mark_uptodate(folio);
                folio_unlock(folio);
                return 0;
@@ -2534,330 +2537,6 @@ out_unlock:
 }
 EXPORT_SYMBOL(block_page_mkwrite);
 
-/*
- * nobh_write_begin()'s prereads are special: the buffer_heads are freed
- * immediately, while under the page lock.  So it needs a special end_io
- * handler which does not touch the bh after unlocking it.
- */
-static void end_buffer_read_nobh(struct buffer_head *bh, int uptodate)
-{
-       __end_buffer_read_notouch(bh, uptodate);
-}
-
-/*
- * Attach the singly-linked list of buffers created by nobh_write_begin, to
- * the page (converting it to circular linked list and taking care of page
- * dirty races).
- */
-static void attach_nobh_buffers(struct page *page, struct buffer_head *head)
-{
-       struct buffer_head *bh;
-
-       BUG_ON(!PageLocked(page));
-
-       spin_lock(&page->mapping->private_lock);
-       bh = head;
-       do {
-               if (PageDirty(page))
-                       set_buffer_dirty(bh);
-               if (!bh->b_this_page)
-                       bh->b_this_page = head;
-               bh = bh->b_this_page;
-       } while (bh != head);
-       attach_page_private(page, head);
-       spin_unlock(&page->mapping->private_lock);
-}
-
-/*
- * On entry, the page is fully not uptodate.
- * On exit the page is fully uptodate in the areas outside (from,to)
- * The filesystem needs to handle block truncation upon failure.
- */
-int nobh_write_begin(struct address_space *mapping, loff_t pos, unsigned len,
-                       struct page **pagep, void **fsdata,
-                       get_block_t *get_block)
-{
-       struct inode *inode = mapping->host;
-       const unsigned blkbits = inode->i_blkbits;
-       const unsigned blocksize = 1 << blkbits;
-       struct buffer_head *head, *bh;
-       struct page *page;
-       pgoff_t index;
-       unsigned from, to;
-       unsigned block_in_page;
-       unsigned block_start, block_end;
-       sector_t block_in_file;
-       int nr_reads = 0;
-       int ret = 0;
-       int is_mapped_to_disk = 1;
-
-       index = pos >> PAGE_SHIFT;
-       from = pos & (PAGE_SIZE - 1);
-       to = from + len;
-
-       page = grab_cache_page_write_begin(mapping, index);
-       if (!page)
-               return -ENOMEM;
-       *pagep = page;
-       *fsdata = NULL;
-
-       if (page_has_buffers(page)) {
-               ret = __block_write_begin(page, pos, len, get_block);
-               if (unlikely(ret))
-                       goto out_release;
-               return ret;
-       }
-
-       if (PageMappedToDisk(page))
-               return 0;
-
-       /*
-        * Allocate buffers so that we can keep track of state, and potentially
-        * attach them to the page if an error occurs. In the common case of
-        * no error, they will just be freed again without ever being attached
-        * to the page (which is all OK, because we're under the page lock).
-        *
-        * Be careful: the buffer linked list is a NULL terminated one, rather
-        * than the circular one we're used to.
-        */
-       head = alloc_page_buffers(page, blocksize, false);
-       if (!head) {
-               ret = -ENOMEM;
-               goto out_release;
-       }
-
-       block_in_file = (sector_t)page->index << (PAGE_SHIFT - blkbits);
-
-       /*
-        * We loop across all blocks in the page, whether or not they are
-        * part of the affected region.  This is so we can discover if the
-        * page is fully mapped-to-disk.
-        */
-       for (block_start = 0, block_in_page = 0, bh = head;
-                 block_start < PAGE_SIZE;
-                 block_in_page++, block_start += blocksize, bh = bh->b_this_page) {
-               int create;
-
-               block_end = block_start + blocksize;
-               bh->b_state = 0;
-               create = 1;
-               if (block_start >= to)
-                       create = 0;
-               ret = get_block(inode, block_in_file + block_in_page,
-                                       bh, create);
-               if (ret)
-                       goto failed;
-               if (!buffer_mapped(bh))
-                       is_mapped_to_disk = 0;
-               if (buffer_new(bh))
-                       clean_bdev_bh_alias(bh);
-               if (PageUptodate(page)) {
-                       set_buffer_uptodate(bh);
-                       continue;
-               }
-               if (buffer_new(bh) || !buffer_mapped(bh)) {
-                       zero_user_segments(page, block_start, from,
-                                                       to, block_end);
-                       continue;
-               }
-               if (buffer_uptodate(bh))
-                       continue;       /* reiserfs does this */
-               if (block_start < from || block_end > to) {
-                       lock_buffer(bh);
-                       bh->b_end_io = end_buffer_read_nobh;
-                       submit_bh(REQ_OP_READ, bh);
-                       nr_reads++;
-               }
-       }
-
-       if (nr_reads) {
-               /*
-                * The page is locked, so these buffers are protected from
-                * any VM or truncate activity.  Hence we don't need to care
-                * for the buffer_head refcounts.
-                */
-               for (bh = head; bh; bh = bh->b_this_page) {
-                       wait_on_buffer(bh);
-                       if (!buffer_uptodate(bh))
-                               ret = -EIO;
-               }
-               if (ret)
-                       goto failed;
-       }
-
-       if (is_mapped_to_disk)
-               SetPageMappedToDisk(page);
-
-       *fsdata = head; /* to be released by nobh_write_end */
-
-       return 0;
-
-failed:
-       BUG_ON(!ret);
-       /*
-        * Error recovery is a bit difficult. We need to zero out blocks that
-        * were newly allocated, and dirty them to ensure they get written out.
-        * Buffers need to be attached to the page at this point, otherwise
-        * the handling of potential IO errors during writeout would be hard
-        * (could try doing synchronous writeout, but what if that fails too?)
-        */
-       attach_nobh_buffers(page, head);
-       page_zero_new_buffers(page, from, to);
-
-out_release:
-       unlock_page(page);
-       put_page(page);
-       *pagep = NULL;
-
-       return ret;
-}
-EXPORT_SYMBOL(nobh_write_begin);
-
-int nobh_write_end(struct file *file, struct address_space *mapping,
-                       loff_t pos, unsigned len, unsigned copied,
-                       struct page *page, void *fsdata)
-{
-       struct inode *inode = page->mapping->host;
-       struct buffer_head *head = fsdata;
-       struct buffer_head *bh;
-       BUG_ON(fsdata != NULL && page_has_buffers(page));
-
-       if (unlikely(copied < len) && head)
-               attach_nobh_buffers(page, head);
-       if (page_has_buffers(page))
-               return generic_write_end(file, mapping, pos, len,
-                                       copied, page, fsdata);
-
-       SetPageUptodate(page);
-       set_page_dirty(page);
-       if (pos+copied > inode->i_size) {
-               i_size_write(inode, pos+copied);
-               mark_inode_dirty(inode);
-       }
-
-       unlock_page(page);
-       put_page(page);
-
-       while (head) {
-               bh = head;
-               head = head->b_this_page;
-               free_buffer_head(bh);
-       }
-
-       return copied;
-}
-EXPORT_SYMBOL(nobh_write_end);
-
-/*
- * nobh_writepage() - based on block_full_write_page() except
- * that it tries to operate without attaching bufferheads to
- * the page.
- */
-int nobh_writepage(struct page *page, get_block_t *get_block,
-                       struct writeback_control *wbc)
-{
-       struct inode * const inode = page->mapping->host;
-       loff_t i_size = i_size_read(inode);
-       const pgoff_t end_index = i_size >> PAGE_SHIFT;
-       unsigned offset;
-       int ret;
-
-       /* Is the page fully inside i_size? */
-       if (page->index < end_index)
-               goto out;
-
-       /* Is the page fully outside i_size? (truncate in progress) */
-       offset = i_size & (PAGE_SIZE-1);
-       if (page->index >= end_index+1 || !offset) {
-               unlock_page(page);
-               return 0; /* don't care */
-       }
-
-       /*
-        * The page straddles i_size.  It must be zeroed out on each and every
-        * writepage invocation because it may be mmapped.  "A file is mapped
-        * in multiples of the page size.  For a file that is not a multiple of
-        * the  page size, the remaining memory is zeroed when mapped, and
-        * writes to that region are not written out to the file."
-        */
-       zero_user_segment(page, offset, PAGE_SIZE);
-out:
-       ret = mpage_writepage(page, get_block, wbc);
-       if (ret == -EAGAIN)
-               ret = __block_write_full_page(inode, page, get_block, wbc,
-                                             end_buffer_async_write);
-       return ret;
-}
-EXPORT_SYMBOL(nobh_writepage);
-
-int nobh_truncate_page(struct address_space *mapping,
-                       loff_t from, get_block_t *get_block)
-{
-       pgoff_t index = from >> PAGE_SHIFT;
-       struct inode *inode = mapping->host;
-       unsigned blocksize = i_blocksize(inode);
-       struct folio *folio;
-       struct buffer_head map_bh;
-       size_t offset;
-       sector_t iblock;
-       int err;
-
-       /* Block boundary? Nothing to do */
-       if (!(from & (blocksize - 1)))
-               return 0;
-
-       folio = __filemap_get_folio(mapping, index, FGP_LOCK | FGP_CREAT,
-                       mapping_gfp_mask(mapping));
-       err = -ENOMEM;
-       if (!folio)
-               goto out;
-
-       if (folio_buffers(folio))
-               goto has_buffers;
-
-       iblock = from >> inode->i_blkbits;
-       map_bh.b_size = blocksize;
-       map_bh.b_state = 0;
-       err = get_block(inode, iblock, &map_bh, 0);
-       if (err)
-               goto unlock;
-       /* unmapped? It's a hole - nothing to do */
-       if (!buffer_mapped(&map_bh))
-               goto unlock;
-
-       /* Ok, it's mapped. Make sure it's up-to-date */
-       if (!folio_test_uptodate(folio)) {
-               err = mapping->a_ops->read_folio(NULL, folio);
-               if (err) {
-                       folio_put(folio);
-                       goto out;
-               }
-               folio_lock(folio);
-               if (!folio_test_uptodate(folio)) {
-                       err = -EIO;
-                       goto unlock;
-               }
-               if (folio_buffers(folio))
-                       goto has_buffers;
-       }
-       offset = offset_in_folio(folio, from);
-       folio_zero_segment(folio, offset, round_up(offset, blocksize));
-       folio_mark_dirty(folio);
-       err = 0;
-
-unlock:
-       folio_unlock(folio);
-       folio_put(folio);
-out:
-       return err;
-
-has_buffers:
-       folio_unlock(folio);
-       folio_put(folio);
-       return block_truncate_page(mapping, from, get_block);
-}
-EXPORT_SYMBOL(nobh_truncate_page);
-
 int block_truncate_page(struct address_space *mapping,
                        loff_t from, get_block_t *get_block)
 {
index 8adf810..ccdbec3 100644 (file)
 
 static int coda_symlink_filler(struct file *file, struct folio *folio)
 {
-       struct page *page = &folio->page;
        struct inode *inode = folio->mapping->host;
        int error;
        struct coda_inode_info *cii;
        unsigned int len = PAGE_SIZE;
-       char *p = page_address(page);
+       char *p = folio_address(folio);
 
        cii = ITOC(inode);
 
        error = venus_readlink(inode->i_sb, &cii->c_fid, p, &len);
        if (error)
                goto fail;
-       SetPageUptodate(page);
-       unlock_page(page);
+       folio_mark_uptodate(folio);
+       folio_unlock(folio);
        return 0;
 
 fail:
-       SetPageError(page);
-       unlock_page(page);
+       folio_set_error(folio);
+       folio_unlock(folio);
        return error;
 }
 
index 7ae59a6..61ccf77 100644 (file)
@@ -183,6 +183,7 @@ static void *cramfs_blkdev_read(struct super_block *sb, unsigned int offset,
                                unsigned int len)
 {
        struct address_space *mapping = sb->s_bdev->bd_inode->i_mapping;
+       struct file_ra_state ra;
        struct page *pages[BLKS_PER_BUF];
        unsigned i, blocknr, buffer;
        unsigned long devsize;
@@ -212,6 +213,9 @@ static void *cramfs_blkdev_read(struct super_block *sb, unsigned int offset,
        devsize = bdev_nr_bytes(sb->s_bdev) >> PAGE_SHIFT;
 
        /* Ok, read in BLKS_PER_BUF pages completely first. */
+       file_ra_state_init(&ra, mapping);
+       page_cache_sync_readahead(mapping, &ra, NULL, blocknr, BLKS_PER_BUF);
+
        for (i = 0; i < BLKS_PER_BUF; i++) {
                struct page *page = NULL;
 
@@ -224,19 +228,6 @@ static void *cramfs_blkdev_read(struct super_block *sb, unsigned int offset,
                pages[i] = page;
        }
 
-       for (i = 0; i < BLKS_PER_BUF; i++) {
-               struct page *page = pages[i];
-
-               if (page) {
-                       wait_on_page_locked(page);
-                       if (!PageUptodate(page)) {
-                               /* asynchronous error */
-                               put_page(page);
-                               pages[i] = NULL;
-                       }
-               }
-       }
-
        buffer = next_buffer;
        next_buffer = NEXT_BUFFER(buffer);
        buffer_blocknr[buffer] = blocknr;
index 43de293..8f59775 100644 (file)
@@ -200,19 +200,19 @@ static struct page * ext2_get_page(struct inode *dir, unsigned long n,
                                   int quiet, void **page_addr)
 {
        struct address_space *mapping = dir->i_mapping;
-       struct page *page = read_mapping_page(mapping, n, NULL);
-       if (!IS_ERR(page)) {
-               *page_addr = kmap_local_page(page);
-               if (unlikely(!PageChecked(page))) {
-                       if (PageError(page) || !ext2_check_page(page, quiet,
-                                                               *page_addr))
-                               goto fail;
-               }
+       struct folio *folio = read_mapping_folio(mapping, n, NULL);
+
+       if (IS_ERR(folio))
+               return &folio->page;
+       *page_addr = kmap_local_folio(folio, n & (folio_nr_pages(folio) - 1));
+       if (unlikely(!folio_test_checked(folio))) {
+               if (!ext2_check_page(&folio->page, quiet, *page_addr))
+                       goto fail;
        }
-       return page;
+       return &folio->page;
 
 fail:
-       ext2_put_page(page, *page_addr);
+       ext2_put_page(&folio->page, *page_addr);
        return ERR_PTR(-EIO);
 }
 
index d4f306a..28de11a 100644 (file)
@@ -795,7 +795,6 @@ extern const struct file_operations ext2_file_operations;
 /* inode.c */
 extern void ext2_set_file_ops(struct inode *inode);
 extern const struct address_space_operations ext2_aops;
-extern const struct address_space_operations ext2_nobh_aops;
 extern const struct iomap_ops ext2_iomap_ops;
 
 /* namei.c */
index 7a192e4..918ab2f 100644 (file)
@@ -908,25 +908,6 @@ static int ext2_write_end(struct file *file, struct address_space *mapping,
        return ret;
 }
 
-static int
-ext2_nobh_write_begin(struct file *file, struct address_space *mapping,
-               loff_t pos, unsigned len, struct page **pagep, void **fsdata)
-{
-       int ret;
-
-       ret = nobh_write_begin(mapping, pos, len, pagep, fsdata,
-                              ext2_get_block);
-       if (ret < 0)
-               ext2_write_failed(mapping, pos + len);
-       return ret;
-}
-
-static int ext2_nobh_writepage(struct page *page,
-                       struct writeback_control *wbc)
-{
-       return nobh_writepage(page, ext2_get_block, wbc);
-}
-
 static sector_t ext2_bmap(struct address_space *mapping, sector_t block)
 {
        return generic_block_bmap(mapping,block,ext2_get_block);
@@ -973,26 +954,11 @@ const struct address_space_operations ext2_aops = {
        .bmap                   = ext2_bmap,
        .direct_IO              = ext2_direct_IO,
        .writepages             = ext2_writepages,
-       .migratepage            = buffer_migrate_page,
+       .migrate_folio          = buffer_migrate_folio,
        .is_partially_uptodate  = block_is_partially_uptodate,
        .error_remove_page      = generic_error_remove_page,
 };
 
-const struct address_space_operations ext2_nobh_aops = {
-       .dirty_folio            = block_dirty_folio,
-       .invalidate_folio       = block_invalidate_folio,
-       .read_folio             = ext2_read_folio,
-       .readahead              = ext2_readahead,
-       .writepage              = ext2_nobh_writepage,
-       .write_begin            = ext2_nobh_write_begin,
-       .write_end              = nobh_write_end,
-       .bmap                   = ext2_bmap,
-       .direct_IO              = ext2_direct_IO,
-       .writepages             = ext2_writepages,
-       .migratepage            = buffer_migrate_page,
-       .error_remove_page      = generic_error_remove_page,
-};
-
 static const struct address_space_operations ext2_dax_aops = {
        .writepages             = ext2_dax_writepages,
        .direct_IO              = noop_direct_IO,
@@ -1298,13 +1264,10 @@ static int ext2_setsize(struct inode *inode, loff_t newsize)
 
        inode_dio_wait(inode);
 
-       if (IS_DAX(inode)) {
+       if (IS_DAX(inode))
                error = dax_zero_range(inode, newsize,
                                       PAGE_ALIGN(newsize) - newsize, NULL,
                                       &ext2_iomap_ops);
-       } else if (test_opt(inode->i_sb, NOBH))
-               error = nobh_truncate_page(inode->i_mapping,
-                               newsize, ext2_get_block);
        else
                error = block_truncate_page(inode->i_mapping,
                                newsize, ext2_get_block);
@@ -1396,8 +1359,6 @@ void ext2_set_file_ops(struct inode *inode)
        inode->i_fop = &ext2_file_operations;
        if (IS_DAX(inode))
                inode->i_mapping->a_ops = &ext2_dax_aops;
-       else if (test_opt(inode->i_sb, NOBH))
-               inode->i_mapping->a_ops = &ext2_nobh_aops;
        else
                inode->i_mapping->a_ops = &ext2_aops;
 }
@@ -1497,10 +1458,7 @@ struct inode *ext2_iget (struct super_block *sb, unsigned long ino)
        } else if (S_ISDIR(inode->i_mode)) {
                inode->i_op = &ext2_dir_inode_operations;
                inode->i_fop = &ext2_dir_operations;
-               if (test_opt(inode->i_sb, NOBH))
-                       inode->i_mapping->a_ops = &ext2_nobh_aops;
-               else
-                       inode->i_mapping->a_ops = &ext2_aops;
+               inode->i_mapping->a_ops = &ext2_aops;
        } else if (S_ISLNK(inode->i_mode)) {
                if (ext2_inode_is_fast_symlink(inode)) {
                        inode->i_link = (char *)ei->i_data;
@@ -1510,10 +1468,7 @@ struct inode *ext2_iget (struct super_block *sb, unsigned long ino)
                } else {
                        inode->i_op = &ext2_symlink_inode_operations;
                        inode_nohighmem(inode);
-                       if (test_opt(inode->i_sb, NOBH))
-                               inode->i_mapping->a_ops = &ext2_nobh_aops;
-                       else
-                               inode->i_mapping->a_ops = &ext2_aops;
+                       inode->i_mapping->a_ops = &ext2_aops;
                }
        } else {
                inode->i_op = &ext2_special_inode_operations;
index 5f6b756..5fd9a22 100644 (file)
@@ -178,10 +178,7 @@ static int ext2_symlink (struct user_namespace * mnt_userns, struct inode * dir,
                /* slow symlink */
                inode->i_op = &ext2_symlink_inode_operations;
                inode_nohighmem(inode);
-               if (test_opt(inode->i_sb, NOBH))
-                       inode->i_mapping->a_ops = &ext2_nobh_aops;
-               else
-                       inode->i_mapping->a_ops = &ext2_aops;
+               inode->i_mapping->a_ops = &ext2_aops;
                err = page_symlink(inode, symname, l);
                if (err)
                        goto out_fail;
@@ -247,10 +244,7 @@ static int ext2_mkdir(struct user_namespace * mnt_userns,
 
        inode->i_op = &ext2_dir_inode_operations;
        inode->i_fop = &ext2_dir_operations;
-       if (test_opt(inode->i_sb, NOBH))
-               inode->i_mapping->a_ops = &ext2_nobh_aops;
-       else
-               inode->i_mapping->a_ops = &ext2_aops;
+       inode->i_mapping->a_ops = &ext2_aops;
 
        inode_inc_link_count(inode);
 
index 6f475d2..27a0a8c 100644 (file)
@@ -296,9 +296,6 @@ static int ext2_show_options(struct seq_file *seq, struct dentry *root)
                seq_puts(seq, ",noacl");
 #endif
 
-       if (test_opt(sb, NOBH))
-               seq_puts(seq, ",nobh");
-
        if (test_opt(sb, USRQUOTA))
                seq_puts(seq, ",usrquota");
 
@@ -551,7 +548,8 @@ static int parse_options(char *options, struct super_block *sb,
                        clear_opt (opts->s_mount_opt, OLDALLOC);
                        break;
                case Opt_nobh:
-                       set_opt (opts->s_mount_opt, NOBH);
+                       ext2_msg(sb, KERN_INFO,
+                               "nobh option not supported");
                        break;
 #ifdef CONFIG_EXT2_FS_XATTR
                case Opt_user_xattr:
index 3dcc1dd..9fd60fc 100644 (file)
@@ -1554,9 +1554,9 @@ struct mpage_da_data {
 static void mpage_release_unused_pages(struct mpage_da_data *mpd,
                                       bool invalidate)
 {
-       int nr_pages, i;
+       unsigned nr, i;
        pgoff_t index, end;
-       struct pagevec pvec;
+       struct folio_batch fbatch;
        struct inode *inode = mpd->inode;
        struct address_space *mapping = inode->i_mapping;
 
@@ -1574,15 +1574,18 @@ static void mpage_release_unused_pages(struct mpage_da_data *mpd,
                ext4_es_remove_extent(inode, start, last - start + 1);
        }
 
-       pagevec_init(&pvec);
+       folio_batch_init(&fbatch);
        while (index <= end) {
-               nr_pages = pagevec_lookup_range(&pvec, mapping, &index, end);
-               if (nr_pages == 0)
+               nr = filemap_get_folios(mapping, &index, end, &fbatch);
+               if (nr == 0)
                        break;
-               for (i = 0; i < nr_pages; i++) {
-                       struct page *page = pvec.pages[i];
-                       struct folio *folio = page_folio(page);
+               for (i = 0; i < nr; i++) {
+                       struct folio *folio = fbatch.folios[i];
 
+                       if (folio->index < mpd->first_page)
+                               continue;
+                       if (folio->index + folio_nr_pages(folio) - 1 > end)
+                               continue;
                        BUG_ON(!folio_test_locked(folio));
                        BUG_ON(folio_test_writeback(folio));
                        if (invalidate) {
@@ -1594,7 +1597,7 @@ static void mpage_release_unused_pages(struct mpage_da_data *mpd,
                        }
                        folio_unlock(folio);
                }
-               pagevec_release(&pvec);
+               folio_batch_release(&fbatch);
        }
 }
 
@@ -2311,8 +2314,8 @@ out:
  */
 static int mpage_map_and_submit_buffers(struct mpage_da_data *mpd)
 {
-       struct pagevec pvec;
-       int nr_pages, i;
+       struct folio_batch fbatch;
+       unsigned nr, i;
        struct inode *inode = mpd->inode;
        int bpp_bits = PAGE_SHIFT - inode->i_blkbits;
        pgoff_t start, end;
@@ -2326,14 +2329,13 @@ static int mpage_map_and_submit_buffers(struct mpage_da_data *mpd)
        lblk = start << bpp_bits;
        pblock = mpd->map.m_pblk;
 
-       pagevec_init(&pvec);
+       folio_batch_init(&fbatch);
        while (start <= end) {
-               nr_pages = pagevec_lookup_range(&pvec, inode->i_mapping,
-                                               &start, end);
-               if (nr_pages == 0)
+               nr = filemap_get_folios(inode->i_mapping, &start, end, &fbatch);
+               if (nr == 0)
                        break;
-               for (i = 0; i < nr_pages; i++) {
-                       struct page *page = pvec.pages[i];
+               for (i = 0; i < nr; i++) {
+                       struct page *page = &fbatch.folios[i]->page;
 
                        err = mpage_process_page(mpd, page, &lblk, &pblock,
                                                 &map_bh);
@@ -2349,14 +2351,14 @@ static int mpage_map_and_submit_buffers(struct mpage_da_data *mpd)
                        if (err < 0)
                                goto out;
                }
-               pagevec_release(&pvec);
+               folio_batch_release(&fbatch);
        }
        /* Extent fully mapped and matches with page boundary. We are done. */
        mpd->map.m_len = 0;
        mpd->map.m_flags = 0;
        return 0;
 out:
-       pagevec_release(&pvec);
+       folio_batch_release(&fbatch);
        return err;
 }
 
@@ -3631,7 +3633,7 @@ static const struct address_space_operations ext4_aops = {
        .invalidate_folio       = ext4_invalidate_folio,
        .release_folio          = ext4_release_folio,
        .direct_IO              = noop_direct_IO,
-       .migratepage            = buffer_migrate_page,
+       .migrate_folio          = buffer_migrate_folio,
        .is_partially_uptodate  = block_is_partially_uptodate,
        .error_remove_page      = generic_error_remove_page,
        .swap_activate          = ext4_iomap_swap_activate,
@@ -3666,7 +3668,7 @@ static const struct address_space_operations ext4_da_aops = {
        .invalidate_folio       = ext4_invalidate_folio,
        .release_folio          = ext4_release_folio,
        .direct_IO              = noop_direct_IO,
-       .migratepage            = buffer_migrate_page,
+       .migrate_folio          = buffer_migrate_folio,
        .is_partially_uptodate  = block_is_partially_uptodate,
        .error_remove_page      = generic_error_remove_page,
        .swap_activate          = ext4_iomap_swap_activate,
index 6d8b2bf..8259e0f 100644 (file)
@@ -463,9 +463,7 @@ const struct address_space_operations f2fs_meta_aops = {
        .dirty_folio    = f2fs_dirty_meta_folio,
        .invalidate_folio = f2fs_invalidate_folio,
        .release_folio  = f2fs_release_folio,
-#ifdef CONFIG_MIGRATION
-       .migratepage    = f2fs_migrate_page,
-#endif
+       .migrate_folio  = filemap_migrate_folio,
 };
 
 static void __add_ino_entry(struct f2fs_sb_info *sbi, nid_t ino,
index 24824cd..009e6c5 100644 (file)
@@ -1832,45 +1832,40 @@ bool f2fs_load_compressed_page(struct f2fs_sb_info *sbi, struct page *page,
 void f2fs_invalidate_compress_pages(struct f2fs_sb_info *sbi, nid_t ino)
 {
        struct address_space *mapping = sbi->compress_inode->i_mapping;
-       struct pagevec pvec;
+       struct folio_batch fbatch;
        pgoff_t index = 0;
        pgoff_t end = MAX_BLKADDR(sbi);
 
        if (!mapping->nrpages)
                return;
 
-       pagevec_init(&pvec);
+       folio_batch_init(&fbatch);
 
        do {
-               unsigned int nr_pages;
-               int i;
+               unsigned int nr, i;
 
-               nr_pages = pagevec_lookup_range(&pvec, mapping,
-                                               &index, end - 1);
-               if (!nr_pages)
+               nr = filemap_get_folios(mapping, &index, end - 1, &fbatch);
+               if (!nr)
                        break;
 
-               for (i = 0; i < nr_pages; i++) {
-                       struct page *page = pvec.pages[i];
-
-                       if (page->index > end)
-                               break;
+               for (i = 0; i < nr; i++) {
+                       struct folio *folio = fbatch.folios[i];
 
-                       lock_page(page);
-                       if (page->mapping != mapping) {
-                               unlock_page(page);
+                       folio_lock(folio);
+                       if (folio->mapping != mapping) {
+                               folio_unlock(folio);
                                continue;
                        }
 
-                       if (ino != get_page_private_data(page)) {
-                               unlock_page(page);
+                       if (ino != get_page_private_data(&folio->page)) {
+                               folio_unlock(folio);
                                continue;
                        }
 
-                       generic_error_remove_page(mapping, page);
-                       unlock_page(page);
+                       generic_error_remove_page(mapping, &folio->page);
+                       folio_unlock(folio);
                }
-               pagevec_release(&pvec);
+               folio_batch_release(&fbatch);
                cond_resched();
        } while (index < end);
 }
index 5c13ee3..ed503d5 100644 (file)
@@ -3752,42 +3752,6 @@ out:
        return blknr;
 }
 
-#ifdef CONFIG_MIGRATION
-#include <linux/migrate.h>
-
-int f2fs_migrate_page(struct address_space *mapping,
-               struct page *newpage, struct page *page, enum migrate_mode mode)
-{
-       int rc, extra_count = 0;
-
-       BUG_ON(PageWriteback(page));
-
-       rc = migrate_page_move_mapping(mapping, newpage,
-                               page, extra_count);
-       if (rc != MIGRATEPAGE_SUCCESS)
-               return rc;
-
-       /* guarantee to start from no stale private field */
-       set_page_private(newpage, 0);
-       if (PagePrivate(page)) {
-               set_page_private(newpage, page_private(page));
-               SetPagePrivate(newpage);
-               get_page(newpage);
-
-               set_page_private(page, 0);
-               ClearPagePrivate(page);
-               put_page(page);
-       }
-
-       if (mode != MIGRATE_SYNC_NO_COPY)
-               migrate_page_copy(newpage, page);
-       else
-               migrate_page_states(newpage, page);
-
-       return MIGRATEPAGE_SUCCESS;
-}
-#endif
-
 #ifdef CONFIG_SWAP
 static int f2fs_migrate_blocks(struct inode *inode, block_t start_blk,
                                                        unsigned int blkcnt)
@@ -4019,15 +3983,13 @@ const struct address_space_operations f2fs_dblock_aops = {
        .write_begin    = f2fs_write_begin,
        .write_end      = f2fs_write_end,
        .dirty_folio    = f2fs_dirty_data_folio,
+       .migrate_folio  = filemap_migrate_folio,
        .invalidate_folio = f2fs_invalidate_folio,
        .release_folio  = f2fs_release_folio,
        .direct_IO      = noop_direct_IO,
        .bmap           = f2fs_bmap,
        .swap_activate  = f2fs_swap_activate,
        .swap_deactivate = f2fs_swap_deactivate,
-#ifdef CONFIG_MIGRATION
-       .migratepage    = f2fs_migrate_page,
-#endif
 };
 
 void f2fs_clear_page_cache_dirty_tag(struct page *page)
index 868170b..85921a1 100644 (file)
@@ -3764,10 +3764,6 @@ int f2fs_write_single_data_page(struct page *page, int *submitted,
 void f2fs_write_failed(struct inode *inode, loff_t to);
 void f2fs_invalidate_folio(struct folio *folio, size_t offset, size_t length);
 bool f2fs_release_folio(struct folio *folio, gfp_t wait);
-#ifdef CONFIG_MIGRATION
-int f2fs_migrate_page(struct address_space *mapping, struct page *newpage,
-                       struct page *page, enum migrate_mode mode);
-#endif
 bool f2fs_overwrite_io(struct inode *inode, loff_t pos, size_t len);
 void f2fs_clear_page_cache_dirty_tag(struct page *page);
 int f2fs_init_post_read_processing(void);
index 04a145f..740c72d 100644 (file)
@@ -2165,9 +2165,7 @@ const struct address_space_operations f2fs_node_aops = {
        .dirty_folio    = f2fs_dirty_node_folio,
        .invalidate_folio = f2fs_invalidate_folio,
        .release_folio  = f2fs_release_folio,
-#ifdef CONFIG_MIGRATION
-       .migratepage    = f2fs_migrate_page,
-#endif
+       .migrate_folio  = filemap_migrate_folio,
 };
 
 static struct free_nid *__lookup_free_nid_list(struct f2fs_nm_info *nm_i,
index c2ef9f0..9b49ec3 100644 (file)
 #include "vxfs_extern.h"
 #include "vxfs_inode.h"
 
-
-static int     vxfs_immed_read_folio(struct file *, struct folio *);
-
-/*
- * Address space operations for immed files and directories.
- */
-const struct address_space_operations vxfs_immed_aops = {
-       .read_folio =           vxfs_immed_read_folio,
-};
-
 /**
  * vxfs_immed_read_folio - read part of an immed inode into pagecache
  * @file:      file context (unused)
@@ -30,7 +20,7 @@ const struct address_space_operations vxfs_immed_aops = {
  *
  * Description:
  *   vxfs_immed_read_folio reads a part of the immed area of the
- *   file that hosts @pp into the pagecache.
+ *   file that hosts @folio into the pagecache.
  *
  * Returns:
  *   Zero on success, else a negative error code.
@@ -38,21 +28,26 @@ const struct address_space_operations vxfs_immed_aops = {
  * Locking status:
  *   @folio is locked and will be unlocked.
  */
-static int
-vxfs_immed_read_folio(struct file *fp, struct folio *folio)
+static int vxfs_immed_read_folio(struct file *fp, struct folio *folio)
 {
-       struct page *pp = &folio->page;
-       struct vxfs_inode_info  *vip = VXFS_INO(pp->mapping->host);
-       u_int64_t       offset = (u_int64_t)pp->index << PAGE_SHIFT;
-       caddr_t         kaddr;
+       struct vxfs_inode_info *vip = VXFS_INO(folio->mapping->host);
+       void *src = vip->vii_immed.vi_immed + folio_pos(folio);
+       unsigned long i;
 
-       kaddr = kmap(pp);
-       memcpy(kaddr, vip->vii_immed.vi_immed + offset, PAGE_SIZE);
-       kunmap(pp);
-       
-       flush_dcache_page(pp);
-       SetPageUptodate(pp);
-        unlock_page(pp);
+       for (i = 0; i < folio_nr_pages(folio); i++) {
+               memcpy_to_page(folio_page(folio, i), 0, src, PAGE_SIZE);
+               src += PAGE_SIZE;
+       }
+
+       folio_mark_uptodate(folio);
+       folio_unlock(folio);
 
        return 0;
 }
+
+/*
+ * Address space operations for immed files and directories.
+ */
+const struct address_space_operations vxfs_immed_aops = {
+       .read_folio =   vxfs_immed_read_folio,
+};
index 0e633d2..c99282d 100644 (file)
@@ -51,15 +51,9 @@ vxfs_get_page(struct address_space *mapping, u_long n)
                kmap(pp);
                /** if (!PageChecked(pp)) **/
                        /** vxfs_check_page(pp); **/
-               if (PageError(pp))
-                       goto fail;
        }
        
        return (pp);
-                
-fail:
-       vxfs_put_page(pp);
-       return ERR_PTR(-EIO);
 }
 
 /**
index 106e90a..57ff883 100644 (file)
@@ -774,7 +774,7 @@ static const struct address_space_operations gfs2_aops = {
        .invalidate_folio = iomap_invalidate_folio,
        .bmap = gfs2_bmap,
        .direct_IO = noop_direct_IO,
-       .migratepage = iomap_migrate_page,
+       .migrate_folio = filemap_migrate_folio,
        .is_partially_uptodate = iomap_is_partially_uptodate,
        .error_remove_page = generic_error_remove_page,
 };
index 90a2d7b..1902413 100644 (file)
@@ -452,36 +452,36 @@ static bool gfs2_jhead_pg_srch(struct gfs2_jdesc *jd,
  * @head: The journal head to start from
  * @done: If set, perform only cleanup, else search and set if found.
  *
- * Find the page with 'index' in the journal's mapping. Search the page for
+ * Find the folio with 'index' in the journal's mapping. Search the folio for
  * the journal head if requested (cleanup == false). Release refs on the
- * page so the page cache can reclaim it (put_page() twice). We grabbed a
- * reference on this page two times, first when we did a find_or_create_page()
- * to obtain the page to add it to the bio and second when we do a
- * find_get_page() here to get the page to wait on while I/O on it is being
+ * folio so the page cache can reclaim it. We grabbed a
+ * reference on this folio twice, first when we did a find_or_create_page()
+ * to obtain the folio to add it to the bio and second when we do a
+ * filemap_get_folio() here to get the folio to wait on while I/O on it is being
  * completed.
- * This function is also used to free up a page we might've grabbed but not
+ * This function is also used to free up a folio we might've grabbed but not
  * used. Maybe we added it to a bio, but not submitted it for I/O. Or we
  * submitted the I/O, but we already found the jhead so we only need to drop
- * our references to the page.
+ * our references to the folio.
  */
 
 static void gfs2_jhead_process_page(struct gfs2_jdesc *jd, unsigned long index,
                                    struct gfs2_log_header_host *head,
                                    bool *done)
 {
-       struct page *page;
+       struct folio *folio;
 
-       page = find_get_page(jd->jd_inode->i_mapping, index);
-       wait_on_page_locked(page);
+       folio = filemap_get_folio(jd->jd_inode->i_mapping, index);
 
-       if (PageError(page))
+       folio_wait_locked(folio);
+       if (folio_test_error(folio))
                *done = true;
 
        if (!*done)
-               *done = gfs2_jhead_pg_srch(jd, head, page);
+               *done = gfs2_jhead_pg_srch(jd, head, &folio->page);
 
-       put_page(page); /* Once for find_get_page */
-       put_page(page); /* Once more for find_or_create_page */
+       /* filemap_get_folio() and the earlier find_or_create_page() */
+       folio_put_refs(folio, 2);
 }
 
 static struct bio *gfs2_chain_bio(struct bio *prev, unsigned int nr_iovecs)
index c0a73a6..c83fd0e 100644 (file)
@@ -296,10 +296,6 @@ static struct hfs_bnode *__hfs_bnode_create(struct hfs_btree *tree, u32 cnid)
                page = read_mapping_page(mapping, block++, NULL);
                if (IS_ERR(page))
                        goto fail;
-               if (PageError(page)) {
-                       put_page(page);
-                       goto fail;
-               }
                node->page[i] = page;
        }
 
index 177fae4..a5ab00e 100644 (file)
@@ -447,10 +447,6 @@ static struct hfs_bnode *__hfs_bnode_create(struct hfs_btree *tree, u32 cnid)
                page = read_mapping_page(mapping, block, NULL);
                if (IS_ERR(page))
                        goto fail;
-               if (PageError(page)) {
-                       put_page(page);
-                       goto fail;
-               }
                node->page[i] = page;
        }
 
index cc1bc6f..07881b7 100644 (file)
@@ -416,15 +416,15 @@ static int hostfs_writepage(struct page *page, struct writeback_control *wbc)
 
        err = write_file(HOSTFS_I(inode)->fd, &base, buffer, count);
        if (err != count) {
-               ClearPageUptodate(page);
+               if (err >= 0)
+                       err = -EIO;
+               mapping_set_error(mapping, err);
                goto out;
        }
 
        if (base > inode->i_size)
                inode->i_size = base;
 
-       if (PageError(page))
-               ClearPageError(page);
        err = 0;
 
  out:
index 02eb723..20336cb 100644 (file)
@@ -108,16 +108,6 @@ static inline void hugetlb_drop_vma_policy(struct vm_area_struct *vma)
 }
 #endif
 
-static void huge_pagevec_release(struct pagevec *pvec)
-{
-       int i;
-
-       for (i = 0; i < pagevec_count(pvec); ++i)
-               put_page(pvec->pages[i]);
-
-       pagevec_reinit(pvec);
-}
-
 /*
  * Mask used when checking the page offset value passed in via system
  * calls.  This value will be converted to a loff_t which is signed.
@@ -480,25 +470,19 @@ static void remove_inode_hugepages(struct inode *inode, loff_t lstart,
        struct address_space *mapping = &inode->i_data;
        const pgoff_t start = lstart >> huge_page_shift(h);
        const pgoff_t end = lend >> huge_page_shift(h);
-       struct pagevec pvec;
+       struct folio_batch fbatch;
        pgoff_t next, index;
        int i, freed = 0;
        bool truncate_op = (lend == LLONG_MAX);
 
-       pagevec_init(&pvec);
+       folio_batch_init(&fbatch);
        next = start;
-       while (next < end) {
-               /*
-                * When no more pages are found, we are done.
-                */
-               if (!pagevec_lookup_range(&pvec, mapping, &next, end - 1))
-                       break;
-
-               for (i = 0; i < pagevec_count(&pvec); ++i) {
-                       struct page *page = pvec.pages[i];
+       while (filemap_get_folios(mapping, &next, end - 1, &fbatch)) {
+               for (i = 0; i < folio_batch_count(&fbatch); ++i) {
+                       struct folio *folio = fbatch.folios[i];
                        u32 hash = 0;
 
-                       index = page->index;
+                       index = folio->index;
                        if (!truncate_op) {
                                /*
                                 * Only need to hold the fault mutex in the
@@ -511,15 +495,15 @@ static void remove_inode_hugepages(struct inode *inode, loff_t lstart,
                        }
 
                        /*
-                        * If page is mapped, it was faulted in after being
+                        * If folio is mapped, it was faulted in after being
                         * unmapped in caller.  Unmap (again) now after taking
                         * the fault mutex.  The mutex will prevent faults
-                        * until we finish removing the page.
+                        * until we finish removing the folio.
                         *
                         * This race can only happen in the hole punch case.
                         * Getting here in a truncate operation is a bug.
                         */
-                       if (unlikely(page_mapped(page))) {
+                       if (unlikely(folio_mapped(folio))) {
                                BUG_ON(truncate_op);
 
                                mutex_unlock(&hugetlb_fault_mutex_table[hash]);
@@ -532,7 +516,7 @@ static void remove_inode_hugepages(struct inode *inode, loff_t lstart,
                                i_mmap_unlock_write(mapping);
                        }
 
-                       lock_page(page);
+                       folio_lock(folio);
                        /*
                         * We must free the huge page and remove from page
                         * cache (remove_huge_page) BEFORE removing the
@@ -542,8 +526,8 @@ static void remove_inode_hugepages(struct inode *inode, loff_t lstart,
                         * the subpool and global reserve usage count can need
                         * to be adjusted.
                         */
-                       VM_BUG_ON(HPageRestoreReserve(page));
-                       remove_huge_page(page);
+                       VM_BUG_ON(HPageRestoreReserve(&folio->page));
+                       remove_huge_page(&folio->page);
                        freed++;
                        if (!truncate_op) {
                                if (unlikely(hugetlb_unreserve_pages(inode,
@@ -551,11 +535,11 @@ static void remove_inode_hugepages(struct inode *inode, loff_t lstart,
                                        hugetlb_fix_reserve_counts(inode);
                        }
 
-                       unlock_page(page);
+                       folio_unlock(folio);
                        if (!truncate_op)
                                mutex_unlock(&hugetlb_fault_mutex_table[hash]);
                }
-               huge_pagevec_release(&pvec);
+               folio_batch_release(&fbatch);
                cond_resched();
        }
 
@@ -797,7 +781,7 @@ static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset,
 
                SetHPageMigratable(page);
                /*
-                * unlock_page because locked by add_to_page_cache()
+                * unlock_page because locked by huge_add_to_page_cache()
                 * put_page() due to reference from alloc_huge_page()
                 */
                unlock_page(page);
@@ -1008,28 +992,33 @@ static int hugetlbfs_symlink(struct user_namespace *mnt_userns,
        return error;
 }
 
-static int hugetlbfs_migrate_page(struct address_space *mapping,
-                               struct page *newpage, struct page *page,
+#ifdef CONFIG_MIGRATION
+static int hugetlbfs_migrate_folio(struct address_space *mapping,
+                               struct folio *dst, struct folio *src,
                                enum migrate_mode mode)
 {
        int rc;
 
-       rc = migrate_huge_page_move_mapping(mapping, newpage, page);
+       rc = migrate_huge_page_move_mapping(mapping, dst, src);
        if (rc != MIGRATEPAGE_SUCCESS)
                return rc;
 
-       if (hugetlb_page_subpool(page)) {
-               hugetlb_set_page_subpool(newpage, hugetlb_page_subpool(page));
-               hugetlb_set_page_subpool(page, NULL);
+       if (hugetlb_page_subpool(&src->page)) {
+               hugetlb_set_page_subpool(&dst->page,
+                                       hugetlb_page_subpool(&src->page));
+               hugetlb_set_page_subpool(&src->page, NULL);
        }
 
        if (mode != MIGRATE_SYNC_NO_COPY)
-               migrate_page_copy(newpage, page);
+               folio_migrate_copy(dst, src);
        else
-               migrate_page_states(newpage, page);
+               folio_migrate_flags(dst, src);
 
        return MIGRATEPAGE_SUCCESS;
 }
+#else
+#define hugetlbfs_migrate_folio NULL
+#endif
 
 static int hugetlbfs_error_remove_page(struct address_space *mapping,
                                struct page *page)
@@ -1196,7 +1185,7 @@ static const struct address_space_operations hugetlbfs_aops = {
        .write_begin    = hugetlbfs_write_begin,
        .write_end      = hugetlbfs_write_end,
        .dirty_folio    = noop_dirty_folio,
-       .migratepage    = hugetlbfs_migrate_page,
+       .migrate_folio  = hugetlbfs_migrate_folio,
        .error_remove_page      = hugetlbfs_error_remove_page,
 };
 
index 259ebf4..524ee91 100644 (file)
@@ -604,7 +604,7 @@ void clear_inode(struct inode *inode)
 {
        /*
         * We have to cycle the i_pages lock here because reclaim can be in the
-        * process of removing the last page (in __delete_from_page_cache())
+        * process of removing the last page (in __filemap_remove_folio())
         * and we must not free the mapping under it.
         */
        xa_lock_irq(&inode->i_data.i_pages);
index c681eac..6505d45 100644 (file)
@@ -162,9 +162,6 @@ static void iomap_iop_set_range_uptodate(struct folio *folio,
 static void iomap_set_range_uptodate(struct folio *folio,
                struct iomap_page *iop, size_t off, size_t len)
 {
-       if (folio_test_error(folio))
-               return;
-
        if (iop)
                iomap_iop_set_range_uptodate(folio, iop, off, len);
        else
@@ -500,31 +497,6 @@ void iomap_invalidate_folio(struct folio *folio, size_t offset, size_t len)
 }
 EXPORT_SYMBOL_GPL(iomap_invalidate_folio);
 
-#ifdef CONFIG_MIGRATION
-int
-iomap_migrate_page(struct address_space *mapping, struct page *newpage,
-               struct page *page, enum migrate_mode mode)
-{
-       struct folio *folio = page_folio(page);
-       struct folio *newfolio = page_folio(newpage);
-       int ret;
-
-       ret = folio_migrate_mapping(mapping, newfolio, folio, 0);
-       if (ret != MIGRATEPAGE_SUCCESS)
-               return ret;
-
-       if (folio_test_private(folio))
-               folio_attach_private(newfolio, folio_detach_private(folio));
-
-       if (mode != MIGRATE_SYNC_NO_COPY)
-               folio_migrate_copy(newfolio, folio);
-       else
-               folio_migrate_flags(newfolio, folio);
-       return MIGRATEPAGE_SUCCESS;
-}
-EXPORT_SYMBOL_GPL(iomap_migrate_page);
-#endif /* CONFIG_MIGRATION */
-
 static void
 iomap_write_failed(struct inode *inode, loff_t pos, unsigned len)
 {
index 2593265..d1ec920 100644 (file)
@@ -301,13 +301,25 @@ static int jfs_write_begin(struct file *file, struct address_space *mapping,
 {
        int ret;
 
-       ret = nobh_write_begin(mapping, pos, len, pagep, fsdata, jfs_get_block);
+       ret = block_write_begin(mapping, pos, len, pagep, jfs_get_block);
        if (unlikely(ret))
                jfs_write_failed(mapping, pos + len);
 
        return ret;
 }
 
+static int jfs_write_end(struct file *file, struct address_space *mapping,
+               loff_t pos, unsigned len, unsigned copied, struct page *page,
+               void *fsdata)
+{
+       int ret;
+
+       ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata);
+       if (ret < len)
+               jfs_write_failed(mapping, pos + len);
+       return ret;
+}
+
 static sector_t jfs_bmap(struct address_space *mapping, sector_t block)
 {
        return generic_block_bmap(mapping, block, jfs_get_block);
@@ -346,7 +358,7 @@ const struct address_space_operations jfs_aops = {
        .writepage      = jfs_writepage,
        .writepages     = jfs_writepages,
        .write_begin    = jfs_write_begin,
-       .write_end      = nobh_write_end,
+       .write_end      = jfs_write_end,
        .bmap           = jfs_bmap,
        .direct_IO      = jfs_direct_IO,
 };
@@ -399,7 +411,7 @@ void jfs_truncate(struct inode *ip)
 {
        jfs_info("jfs_truncate: size = 0x%lx", (ulong) ip->i_size);
 
-       nobh_truncate_page(ip->i_mapping, ip->i_size, jfs_get_block);
+       block_truncate_page(ip->i_mapping, ip->i_size, jfs_get_block);
 
        IWRITE_LOCK(ip, RDWRLOCK_NORMAL);
        jfs_truncate_nolock(ip, ip->i_size);
index 387652a..2e8461c 100644 (file)
@@ -618,7 +618,7 @@ struct metapage *__get_metapage(struct inode *inode, unsigned long lblock,
                SetPageUptodate(page);
        } else {
                page = read_mapping_page(mapping, page_index, NULL);
-               if (IS_ERR(page) || !PageUptodate(page)) {
+               if (IS_ERR(page)) {
                        jfs_err("read_mapping_page failed!");
                        return NULL;
                }
index c6d8bf8..0f8ae95 100644 (file)
@@ -75,26 +75,28 @@ static struct bio *mpage_bio_submit(struct bio *bio)
  * them.  So when the buffer is up to date and the page size == block size,
  * this marks the page up to date instead of adding new buffers.
  */
-static void 
-map_buffer_to_page(struct page *page, struct buffer_head *bh, int page_block) 
+static void map_buffer_to_folio(struct folio *folio, struct buffer_head *bh,
+               int page_block)
 {
-       struct inode *inode = page->mapping->host;
+       struct inode *inode = folio->mapping->host;
        struct buffer_head *page_bh, *head;
        int block = 0;
 
-       if (!page_has_buffers(page)) {
+       head = folio_buffers(folio);
+       if (!head) {
                /*
                 * don't make any buffers if there is only one buffer on
-                * the page and the page just needs to be set up to date
+                * the folio and the folio just needs to be set up to date
                 */
                if (inode->i_blkbits == PAGE_SHIFT &&
                    buffer_uptodate(bh)) {
-                       SetPageUptodate(page);    
+                       folio_mark_uptodate(folio);
                        return;
                }
-               create_empty_buffers(page, i_blocksize(inode), 0);
+               create_empty_buffers(&folio->page, i_blocksize(inode), 0);
+               head = folio_buffers(folio);
        }
-       head = page_buffers(page);
+
        page_bh = head;
        do {
                if (block == page_block) {
@@ -110,7 +112,7 @@ map_buffer_to_page(struct page *page, struct buffer_head *bh, int page_block)
 
 struct mpage_readpage_args {
        struct bio *bio;
-       struct page *page;
+       struct folio *folio;
        unsigned int nr_pages;
        bool is_readahead;
        sector_t last_block_in_bio;
@@ -130,8 +132,8 @@ struct mpage_readpage_args {
  */
 static struct bio *do_mpage_readpage(struct mpage_readpage_args *args)
 {
-       struct page *page = args->page;
-       struct inode *inode = page->mapping->host;
+       struct folio *folio = args->folio;
+       struct inode *inode = folio->mapping->host;
        const unsigned blkbits = inode->i_blkbits;
        const unsigned blocks_per_page = PAGE_SIZE >> blkbits;
        const unsigned blocksize = 1 << blkbits;
@@ -148,17 +150,20 @@ static struct bio *do_mpage_readpage(struct mpage_readpage_args *args)
        blk_opf_t opf = REQ_OP_READ;
        unsigned nblocks;
        unsigned relative_block;
-       gfp_t gfp = mapping_gfp_constraint(page->mapping, GFP_KERNEL);
+       gfp_t gfp = mapping_gfp_constraint(folio->mapping, GFP_KERNEL);
+
+       /* MAX_BUF_PER_PAGE, for example */
+       VM_BUG_ON_FOLIO(folio_test_large(folio), folio);
 
        if (args->is_readahead) {
                opf |= REQ_RAHEAD;
                gfp |= __GFP_NORETRY | __GFP_NOWARN;
        }
 
-       if (page_has_buffers(page))
+       if (folio_buffers(folio))
                goto confused;
 
-       block_in_file = (sector_t)page->index << (PAGE_SHIFT - blkbits);
+       block_in_file = (sector_t)folio->index << (PAGE_SHIFT - blkbits);
        last_block = block_in_file + args->nr_pages * blocks_per_page;
        last_block_in_file = (i_size_read(inode) + blocksize - 1) >> blkbits;
        if (last_block > last_block_in_file)
@@ -191,9 +196,9 @@ static struct bio *do_mpage_readpage(struct mpage_readpage_args *args)
        }
 
        /*
-        * Then do more get_blocks calls until we are done with this page.
+        * Then do more get_blocks calls until we are done with this folio.
         */
-       map_bh->b_page = page;
+       map_bh->b_page = &folio->page;
        while (page_block < blocks_per_page) {
                map_bh->b_state = 0;
                map_bh->b_size = 0;
@@ -216,12 +221,12 @@ static struct bio *do_mpage_readpage(struct mpage_readpage_args *args)
 
                /* some filesystems will copy data into the page during
                 * the get_block call, in which case we don't want to
-                * read it again.  map_buffer_to_page copies the data
-                * we just collected from get_block into the page's buffers
-                * so readpage doesn't have to repeat the get_block call
+                * read it again.  map_buffer_to_folio copies the data
+                * we just collected from get_block into the folio's buffers
+                * so read_folio doesn't have to repeat the get_block call
                 */
                if (buffer_uptodate(map_bh)) {
-                       map_buffer_to_page(page, map_bh, page_block);
+                       map_buffer_to_folio(folio, map_bh, page_block);
                        goto confused;
                }
        
@@ -246,18 +251,18 @@ static struct bio *do_mpage_readpage(struct mpage_readpage_args *args)
        }
 
        if (first_hole != blocks_per_page) {
-               zero_user_segment(page, first_hole << blkbits, PAGE_SIZE);
+               folio_zero_segment(folio, first_hole << blkbits, PAGE_SIZE);
                if (first_hole == 0) {
-                       SetPageUptodate(page);
-                       unlock_page(page);
+                       folio_mark_uptodate(folio);
+                       folio_unlock(folio);
                        goto out;
                }
        } else if (fully_mapped) {
-               SetPageMappedToDisk(page);
+               folio_set_mappedtodisk(folio);
        }
 
        /*
-        * This page will go to BIO.  Do we need to send this BIO off first?
+        * This folio will go to BIO.  Do we need to send this BIO off first?
         */
        if (args->bio && (args->last_block_in_bio != blocks[0] - 1))
                args->bio = mpage_bio_submit(args->bio);
@@ -266,7 +271,7 @@ alloc_new:
        if (args->bio == NULL) {
                if (first_hole == blocks_per_page) {
                        if (!bdev_read_page(bdev, blocks[0] << (blkbits - 9),
-                                                               page))
+                                                               &folio->page))
                                goto out;
                }
                args->bio = bio_alloc(bdev, bio_max_segs(args->nr_pages), opf,
@@ -277,7 +282,7 @@ alloc_new:
        }
 
        length = first_hole << blkbits;
-       if (bio_add_page(args->bio, page, length, 0) < length) {
+       if (!bio_add_folio(args->bio, folio, length, 0)) {
                args->bio = mpage_bio_submit(args->bio);
                goto alloc_new;
        }
@@ -295,10 +300,10 @@ out:
 confused:
        if (args->bio)
                args->bio = mpage_bio_submit(args->bio);
-       if (!PageUptodate(page))
-               block_read_full_folio(page_folio(page), args->get_block);
+       if (!folio_test_uptodate(folio))
+               block_read_full_folio(folio, args->get_block);
        else
-               unlock_page(page);
+               folio_unlock(folio);
        goto out;
 }
 
@@ -343,18 +348,17 @@ confused:
  */
 void mpage_readahead(struct readahead_control *rac, get_block_t get_block)
 {
-       struct page *page;
+       struct folio *folio;
        struct mpage_readpage_args args = {
                .get_block = get_block,
                .is_readahead = true,
        };
 
-       while ((page = readahead_page(rac))) {
-               prefetchw(&page->flags);
-               args.page = page;
+       while ((folio = readahead_folio(rac))) {
+               prefetchw(&folio->flags);
+               args.folio = folio;
                args.nr_pages = readahead_count(rac);
                args.bio = do_mpage_readpage(&args);
-               put_page(page);
        }
        if (args.bio)
                mpage_bio_submit(args.bio);
@@ -367,13 +371,11 @@ EXPORT_SYMBOL(mpage_readahead);
 int mpage_read_folio(struct folio *folio, get_block_t get_block)
 {
        struct mpage_readpage_args args = {
-               .page = &folio->page,
+               .folio = folio,
                .nr_pages = 1,
                .get_block = get_block,
        };
 
-       VM_BUG_ON_FOLIO(folio_test_large(folio), folio);
-
        args.bio = do_mpage_readpage(&args);
        if (args.bio)
                mpage_bio_submit(args.bio);
@@ -402,7 +404,6 @@ struct mpage_data {
        struct bio *bio;
        sector_t last_block_in_bio;
        get_block_t *get_block;
-       unsigned use_writepage;
 };
 
 /*
@@ -622,15 +623,10 @@ confused:
        if (bio)
                bio = mpage_bio_submit(bio);
 
-       if (mpd->use_writepage) {
-               ret = mapping->a_ops->writepage(page, wbc);
-       } else {
-               ret = -EAGAIN;
-               goto out;
-       }
        /*
         * The caller has a ref on the inode, so *mapping is stable
         */
+       ret = block_write_full_page(page, mpd->get_block, wbc);
        mapping_set_error(mapping, ret);
 out:
        mpd->bio = bio;
@@ -642,8 +638,6 @@ out:
  * @mapping: address space structure to write
  * @wbc: subtract the number of written pages from *@wbc->nr_to_write
  * @get_block: the filesystem's block mapper function.
- *             If this is NULL then use a_ops->writepage.  Otherwise, go
- *             direct-to-BIO.
  *
  * This is a library function, which implements the writepages()
  * address_space_operation.
@@ -660,42 +654,17 @@ int
 mpage_writepages(struct address_space *mapping,
                struct writeback_control *wbc, get_block_t get_block)
 {
+       struct mpage_data mpd = {
+               .get_block      = get_block,
+       };
        struct blk_plug plug;
        int ret;
 
        blk_start_plug(&plug);
-
-       if (!get_block)
-               ret = generic_writepages(mapping, wbc);
-       else {
-               struct mpage_data mpd = {
-                       .bio = NULL,
-                       .last_block_in_bio = 0,
-                       .get_block = get_block,
-                       .use_writepage = 1,
-               };
-
-               ret = write_cache_pages(mapping, wbc, __mpage_writepage, &mpd);
-               if (mpd.bio)
-                       mpage_bio_submit(mpd.bio);
-       }
-       blk_finish_plug(&plug);
-       return ret;
-}
-EXPORT_SYMBOL(mpage_writepages);
-
-int mpage_writepage(struct page *page, get_block_t get_block,
-       struct writeback_control *wbc)
-{
-       struct mpage_data mpd = {
-               .bio = NULL,
-               .last_block_in_bio = 0,
-               .get_block = get_block,
-               .use_writepage = 0,
-       };
-       int ret = __mpage_writepage(page, wbc, &mpd);
+       ret = write_cache_pages(mapping, wbc, __mpage_writepage, &mpd);
        if (mpd.bio)
                mpage_bio_submit(mpd.bio);
+       blk_finish_plug(&plug);
        return ret;
 }
-EXPORT_SYMBOL(mpage_writepage);
+EXPORT_SYMBOL(mpage_writepages);
index 2d72b1b..549baed 100644 (file)
@@ -533,9 +533,7 @@ const struct address_space_operations nfs_file_aops = {
        .write_end = nfs_write_end,
        .invalidate_folio = nfs_invalidate_folio,
        .release_folio = nfs_release_folio,
-#ifdef CONFIG_MIGRATION
-       .migratepage = nfs_migrate_page,
-#endif
+       .migrate_folio = nfs_migrate_folio,
        .launder_folio = nfs_launder_folio,
        .is_dirty_writeback = nfs_check_dirty_writeback,
        .error_remove_page = generic_error_remove_page,
index 8f8cd6e..437ebe5 100644 (file)
@@ -578,8 +578,10 @@ void nfs_clear_pnfs_ds_commit_verifiers(struct pnfs_ds_commit_info *cinfo)
 #endif
 
 #ifdef CONFIG_MIGRATION
-extern int nfs_migrate_page(struct address_space *,
-               struct page *, struct page *, enum migrate_mode);
+int nfs_migrate_folio(struct address_space *, struct folio *dst,
+               struct folio *src, enum migrate_mode);
+#else
+#define nfs_migrate_folio NULL
 #endif
 
 static inline int
index 5a9b043..8ae2c8d 100644 (file)
@@ -120,12 +120,8 @@ static void nfs_readpage_release(struct nfs_page *req, int error)
        if (nfs_error_is_fatal_on_server(error) && error != -ETIMEDOUT)
                SetPageError(page);
        if (nfs_page_group_sync_on_bit(req, PG_UNLOCKPAGE)) {
-               struct address_space *mapping = page_file_mapping(page);
-
                if (PageUptodate(page))
                        nfs_fscache_write_page(inode, page);
-               else if (!PageError(page) && !PagePrivate(page))
-                       generic_error_remove_page(mapping, page);
                unlock_page(page);
        }
        nfs_release_request(req);
index 1c70646..6956969 100644 (file)
@@ -2119,27 +2119,27 @@ out_error:
 }
 
 #ifdef CONFIG_MIGRATION
-int nfs_migrate_page(struct address_space *mapping, struct page *newpage,
-               struct page *page, enum migrate_mode mode)
+int nfs_migrate_folio(struct address_space *mapping, struct folio *dst,
+               struct folio *src, enum migrate_mode mode)
 {
        /*
-        * If PagePrivate is set, then the page is currently associated with
+        * If the private flag is set, the folio is currently associated with
         * an in-progress read or write request. Don't try to migrate it.
         *
         * FIXME: we could do this in principle, but we'll need a way to ensure
         *        that we can safely release the inode reference while holding
-        *        the page lock.
+        *        the folio lock.
         */
-       if (PagePrivate(page))
+       if (folio_test_private(src))
                return -EBUSY;
 
-       if (PageFsCache(page)) {
+       if (folio_test_fscache(src)) {
                if (mode == MIGRATE_ASYNC)
                        return -EBUSY;
-               wait_on_page_fscache(page);
+               folio_wait_fscache(src);
        }
 
-       return migrate_page(mapping, newpage, page, mode);
+       return migrate_folio(mapping, dst, src, mode);
 }
 #endif
 
index f8f4c2f..decd647 100644 (file)
@@ -194,7 +194,7 @@ static struct page *nilfs_get_page(struct inode *dir, unsigned long n)
        if (!IS_ERR(page)) {
                kmap(page);
                if (unlikely(!PageChecked(page))) {
-                       if (PageError(page) || !nilfs_check_page(page))
+                       if (!nilfs_check_page(page))
                                goto fail;
                }
        }
index a8e88cc..3267e96 100644 (file)
@@ -294,57 +294,57 @@ repeat:
 void nilfs_copy_back_pages(struct address_space *dmap,
                           struct address_space *smap)
 {
-       struct pagevec pvec;
+       struct folio_batch fbatch;
        unsigned int i, n;
-       pgoff_t index = 0;
+       pgoff_t start = 0;
 
-       pagevec_init(&pvec);
+       folio_batch_init(&fbatch);
 repeat:
-       n = pagevec_lookup(&pvec, smap, &index);
+       n = filemap_get_folios(smap, &start, ~0UL, &fbatch);
        if (!n)
                return;
 
-       for (i = 0; i < pagevec_count(&pvec); i++) {
-               struct page *page = pvec.pages[i], *dpage;
-               pgoff_t offset = page->index;
-
-               lock_page(page);
-               dpage = find_lock_page(dmap, offset);
-               if (dpage) {
-                       /* overwrite existing page in the destination cache */
-                       WARN_ON(PageDirty(dpage));
-                       nilfs_copy_page(dpage, page, 0);
-                       unlock_page(dpage);
-                       put_page(dpage);
-                       /* Do we not need to remove page from smap here? */
+       for (i = 0; i < folio_batch_count(&fbatch); i++) {
+               struct folio *folio = fbatch.folios[i], *dfolio;
+               pgoff_t index = folio->index;
+
+               folio_lock(folio);
+               dfolio = filemap_lock_folio(dmap, index);
+               if (dfolio) {
+                       /* overwrite existing folio in the destination cache */
+                       WARN_ON(folio_test_dirty(dfolio));
+                       nilfs_copy_page(&dfolio->page, &folio->page, 0);
+                       folio_unlock(dfolio);
+                       folio_put(dfolio);
+                       /* Do we not need to remove folio from smap here? */
                } else {
-                       struct page *p;
+                       struct folio *f;
 
-                       /* move the page to the destination cache */
+                       /* move the folio to the destination cache */
                        xa_lock_irq(&smap->i_pages);
-                       p = __xa_erase(&smap->i_pages, offset);
-                       WARN_ON(page != p);
+                       f = __xa_erase(&smap->i_pages, index);
+                       WARN_ON(folio != f);
                        smap->nrpages--;
                        xa_unlock_irq(&smap->i_pages);
 
                        xa_lock_irq(&dmap->i_pages);
-                       p = __xa_store(&dmap->i_pages, offset, page, GFP_NOFS);
-                       if (unlikely(p)) {
+                       f = __xa_store(&dmap->i_pages, index, folio, GFP_NOFS);
+                       if (unlikely(f)) {
                                /* Probably -ENOMEM */
-                               page->mapping = NULL;
-                               put_page(page);
+                               folio->mapping = NULL;
+                               folio_put(folio);
                        } else {
-                               page->mapping = dmap;
+                               folio->mapping = dmap;
                                dmap->nrpages++;
-                               if (PageDirty(page))
-                                       __xa_set_mark(&dmap->i_pages, offset,
+                               if (folio_test_dirty(folio))
+                                       __xa_set_mark(&dmap->i_pages, index,
                                                        PAGECACHE_TAG_DIRTY);
                        }
                        xa_unlock_irq(&dmap->i_pages);
                }
-               unlock_page(page);
+               folio_unlock(folio);
        }
-       pagevec_release(&pvec);
+       folio_batch_release(&fbatch);
        cond_resched();
 
        goto repeat;
index b5765fd..9364d35 100644 (file)
@@ -1659,7 +1659,7 @@ const struct address_space_operations ntfs_normal_aops = {
        .dirty_folio    = block_dirty_folio,
 #endif /* NTFS_RW */
        .bmap           = ntfs_bmap,
-       .migratepage    = buffer_migrate_page,
+       .migrate_folio  = buffer_migrate_folio,
        .is_partially_uptodate = block_is_partially_uptodate,
        .error_remove_page = generic_error_remove_page,
 };
@@ -1673,7 +1673,7 @@ const struct address_space_operations ntfs_compressed_aops = {
        .writepage      = ntfs_writepage,
        .dirty_folio    = block_dirty_folio,
 #endif /* NTFS_RW */
-       .migratepage    = buffer_migrate_page,
+       .migrate_folio  = buffer_migrate_folio,
        .is_partially_uptodate = block_is_partially_uptodate,
        .error_remove_page = generic_error_remove_page,
 };
@@ -1688,7 +1688,7 @@ const struct address_space_operations ntfs_mst_aops = {
        .writepage      = ntfs_writepage,       /* Write dirty page to disk. */
        .dirty_folio    = filemap_dirty_folio,
 #endif /* NTFS_RW */
-       .migratepage    = buffer_migrate_page,
+       .migrate_folio  = buffer_migrate_folio,
        .is_partially_uptodate  = block_is_partially_uptodate,
        .error_remove_page = generic_error_remove_page,
 };
index 934d5f7..0cac545 100644 (file)
@@ -74,13 +74,8 @@ static inline struct page *ntfs_map_page(struct address_space *mapping,
 {
        struct page *page = read_mapping_page(mapping, index, NULL);
 
-       if (!IS_ERR(page)) {
+       if (!IS_ERR(page))
                kmap(page);
-               if (!PageError(page))
-                       return page;
-               ntfs_unmap_page(page);
-               return ERR_PTR(-EIO);
-       }
        return page;
 }
 
index 46ed69b..58b660d 100644 (file)
@@ -219,11 +219,6 @@ do_non_resident_extend:
                        err = PTR_ERR(page);
                        goto init_err_out;
                }
-               if (unlikely(PageError(page))) {
-                       put_page(page);
-                       err = -EIO;
-                       goto init_err_out;
-               }
                /*
                 * Update the initialized size in the ntfs inode.  This is
                 * enough to make ntfs_writepage() work.
index d100a06..80104af 100644 (file)
@@ -851,12 +851,10 @@ static int ntfs_writepage(struct page *page, struct writeback_control *wbc)
 static int ntfs_writepages(struct address_space *mapping,
                           struct writeback_control *wbc)
 {
-       struct inode *inode = mapping->host;
-       struct ntfs_inode *ni = ntfs_i(inode);
        /* Redirect call to 'ntfs_writepage' for resident files. */
-       get_block_t *get_block = is_resident(ni) ? NULL : &ntfs_get_block;
-
-       return mpage_writepages(mapping, wbc, get_block);
+       if (is_resident(ntfs_i(mapping->host)))
+               return generic_writepages(mapping, wbc);
+       return mpage_writepages(mapping, wbc, ntfs_get_block);
 }
 
 static int ntfs_get_block_write_begin(struct inode *inode, sector_t vbn,
index 3a8abf1..8dbdca0 100644 (file)
@@ -896,13 +896,8 @@ static inline struct page *ntfs_map_page(struct address_space *mapping,
 {
        struct page *page = read_mapping_page(mapping, index, NULL);
 
-       if (!IS_ERR(page)) {
+       if (!IS_ERR(page))
                kmap(page);
-               if (!PageError(page))
-                       return page;
-               ntfs_unmap_page(page);
-               return ERR_PTR(-EIO);
-       }
        return page;
 }
 
index 304ed2b..af4157f 100644 (file)
@@ -277,16 +277,14 @@ out:
 
 static int ocfs2_read_folio(struct file *file, struct folio *folio)
 {
-       struct page *page = &folio->page;
-       struct inode *inode = page->mapping->host;
+       struct inode *inode = folio->mapping->host;
        struct ocfs2_inode_info *oi = OCFS2_I(inode);
-       loff_t start = (loff_t)page->index << PAGE_SHIFT;
+       loff_t start = folio_pos(folio);
        int ret, unlock = 1;
 
-       trace_ocfs2_readpage((unsigned long long)oi->ip_blkno,
-                            (page ? page->index : 0));
+       trace_ocfs2_readpage((unsigned long long)oi->ip_blkno, folio->index);
 
-       ret = ocfs2_inode_lock_with_page(inode, NULL, 0, page);
+       ret = ocfs2_inode_lock_with_page(inode, NULL, 0, &folio->page);
        if (ret != 0) {
                if (ret == AOP_TRUNCATED_PAGE)
                        unlock = 0;
@@ -296,11 +294,11 @@ static int ocfs2_read_folio(struct file *file, struct folio *folio)
 
        if (down_read_trylock(&oi->ip_alloc_sem) == 0) {
                /*
-                * Unlock the page and cycle ip_alloc_sem so that we don't
+                * Unlock the folio and cycle ip_alloc_sem so that we don't
                 * busyloop waiting for ip_alloc_sem to unlock
                 */
                ret = AOP_TRUNCATED_PAGE;
-               unlock_page(page);
+               folio_unlock(folio);
                unlock = 0;
                down_read(&oi->ip_alloc_sem);
                up_read(&oi->ip_alloc_sem);
@@ -313,21 +311,21 @@ static int ocfs2_read_folio(struct file *file, struct folio *folio)
         * block_read_full_folio->get_block freaks out if it is asked to read
         * beyond the end of a file, so we check here.  Callers
         * (generic_file_read, vm_ops->fault) are clever enough to check i_size
-        * and notice that the page they just read isn't needed.
+        * and notice that the folio they just read isn't needed.
         *
         * XXX sys_readahead() seems to get that wrong?
         */
        if (start >= i_size_read(inode)) {
-               zero_user(page, 0, PAGE_SIZE);
-               SetPageUptodate(page);
+               folio_zero_segment(folio, 0, folio_size(folio));
+               folio_mark_uptodate(folio);
                ret = 0;
                goto out_alloc;
        }
 
        if (oi->ip_dyn_features & OCFS2_INLINE_DATA_FL)
-               ret = ocfs2_readpage_inline(inode, page);
+               ret = ocfs2_readpage_inline(inode, &folio->page);
        else
-               ret = block_read_full_folio(page_folio(page), ocfs2_get_block);
+               ret = block_read_full_folio(folio, ocfs2_get_block);
        unlock = 0;
 
 out_alloc:
@@ -336,7 +334,7 @@ out_inode_unlock:
        ocfs2_inode_unlock(inode, 0);
 out:
        if (unlock)
-               unlock_page(page);
+               folio_unlock(folio);
        return ret;
 }
 
@@ -2464,7 +2462,7 @@ const struct address_space_operations ocfs2_aops = {
        .direct_IO              = ocfs2_direct_IO,
        .invalidate_folio       = block_invalidate_folio,
        .release_folio          = ocfs2_release_folio,
-       .migratepage            = buffer_migrate_page,
+       .migrate_folio          = buffer_migrate_folio,
        .is_partially_uptodate  = block_is_partially_uptodate,
        .error_remove_page      = generic_error_remove_page,
 };
index e04358a..1358981 100644 (file)
@@ -3146,48 +3146,18 @@ int ocfs2_cow_sync_writeback(struct super_block *sb,
                             struct inode *inode,
                             u32 cpos, u32 num_clusters)
 {
-       int ret = 0;
-       loff_t offset, end, map_end;
-       pgoff_t page_index;
-       struct page *page;
+       int ret;
+       loff_t start, end;
 
        if (ocfs2_should_order_data(inode))
                return 0;
 
-       offset = ((loff_t)cpos) << OCFS2_SB(sb)->s_clustersize_bits;
-       end = offset + (num_clusters << OCFS2_SB(sb)->s_clustersize_bits);
+       start = ((loff_t)cpos) << OCFS2_SB(sb)->s_clustersize_bits;
+       end = start + (num_clusters << OCFS2_SB(sb)->s_clustersize_bits) - 1;
 
-       ret = filemap_fdatawrite_range(inode->i_mapping,
-                                      offset, end - 1);
-       if (ret < 0) {
+       ret = filemap_write_and_wait_range(inode->i_mapping, start, end);
+       if (ret < 0)
                mlog_errno(ret);
-               return ret;
-       }
-
-       while (offset < end) {
-               page_index = offset >> PAGE_SHIFT;
-               map_end = ((loff_t)page_index + 1) << PAGE_SHIFT;
-               if (map_end > end)
-                       map_end = end;
-
-               page = find_or_create_page(inode->i_mapping,
-                                          page_index, GFP_NOFS);
-               BUG_ON(!page);
-
-               wait_on_page_writeback(page);
-               if (PageError(page)) {
-                       ret = -EIO;
-                       mlog_errno(ret);
-               } else
-                       mark_page_accessed(page);
-
-               unlock_page(page);
-               put_page(page);
-               page = NULL;
-               offset = map_end;
-               if (ret)
-                       break;
-       }
 
        return ret;
 }
index 5ce27dd..7a8c0c6 100644 (file)
@@ -307,7 +307,7 @@ static int orangefs_read_folio(struct file *file, struct folio *folio)
 
        ret = wait_for_direct_io(ORANGEFS_IO_READ, inode, &off, &iter,
                        folio_size(folio), inode->i_size, NULL, NULL, file);
-       /* this will only zero remaining unread portions of the page data */
+       /* this will only zero remaining unread portions of the folio data */
        iov_iter_zero(~0U, &iter);
        /* takes care of potential aliasing */
        flush_dcache_folio(folio);
@@ -315,8 +315,6 @@ static int orangefs_read_folio(struct file *file, struct folio *folio)
                folio_set_error(folio);
        } else {
                folio_mark_uptodate(folio);
-               if (folio_test_error(folio))
-                       folio_clear_error(folio);
                ret = 0;
        }
        /* unlock the folio after the ->read_folio() routine completes */
index bd07383..4366413 100644 (file)
@@ -440,16 +440,9 @@ static struct page *reiserfs_get_page(struct inode *dir, size_t n)
         */
        mapping_set_gfp_mask(mapping, GFP_NOFS);
        page = read_mapping_page(mapping, n >> PAGE_SHIFT, NULL);
-       if (!IS_ERR(page)) {
+       if (!IS_ERR(page))
                kmap(page);
-               if (PageError(page))
-                       goto fail;
-       }
        return page;
-
-fail:
-       reiserfs_put_page(page);
-       return ERR_PTR(-EIO);
 }
 
 static inline __u32 xattr_hash(const char *msg, int len)
index 881a306..046a513 100644 (file)
@@ -149,16 +149,7 @@ static int generic_remap_check_len(struct inode *inode_in,
 /* Read a page's worth of file data into the page cache. */
 static struct folio *vfs_dedupe_get_folio(struct file *file, loff_t pos)
 {
-       struct folio *folio;
-
-       folio = read_mapping_folio(file->f_mapping, pos >> PAGE_SHIFT, file);
-       if (IS_ERR(folio))
-               return folio;
-       if (!folio_test_uptodate(folio)) {
-               folio_put(folio);
-               return ERR_PTR(-EIO);
-       }
-       return folio;
+       return read_mapping_folio(file->f_mapping, pos >> PAGE_SHIFT, file);
 }
 
 /*
index a8e495d..7f0904b 100644 (file)
@@ -454,7 +454,7 @@ static int squashfs_read_folio(struct file *file, struct folio *folio)
        int expected = index == file_end ?
                        (i_size_read(inode) & (msblk->block_size - 1)) :
                         msblk->block_size;
-       int res;
+       int res = 0;
        void *pageaddr;
 
        TRACE("Entered squashfs_readpage, page index %lx, start block %llx\n",
@@ -467,14 +467,15 @@ static int squashfs_read_folio(struct file *file, struct folio *folio)
        if (index < file_end || squashfs_i(inode)->fragment_block ==
                                        SQUASHFS_INVALID_BLK) {
                u64 block = 0;
-               int bsize = read_blocklist(inode, index, &block);
-               if (bsize < 0)
+
+               res = read_blocklist(inode, index, &block);
+               if (res < 0)
                        goto error_out;
 
-               if (bsize == 0)
+               if (res == 0)
                        res = squashfs_readpage_sparse(page, expected);
                else
-                       res = squashfs_readpage_block(page, block, bsize, expected);
+                       res = squashfs_readpage_block(page, block, res, expected);
        } else
                res = squashfs_readpage_fragment(page, expected);
 
@@ -488,11 +489,11 @@ out:
        memset(pageaddr, 0, PAGE_SIZE);
        kunmap_atomic(pageaddr);
        flush_dcache_page(page);
-       if (!PageError(page))
+       if (res == 0)
                SetPageUptodate(page);
        unlock_page(page);
 
-       return 0;
+       return res;
 }
 
 
index 04ced15..f2353dd 100644 (file)
@@ -1461,29 +1461,6 @@ static bool ubifs_dirty_folio(struct address_space *mapping,
        return ret;
 }
 
-#ifdef CONFIG_MIGRATION
-static int ubifs_migrate_page(struct address_space *mapping,
-               struct page *newpage, struct page *page, enum migrate_mode mode)
-{
-       int rc;
-
-       rc = migrate_page_move_mapping(mapping, newpage, page, 0);
-       if (rc != MIGRATEPAGE_SUCCESS)
-               return rc;
-
-       if (PagePrivate(page)) {
-               detach_page_private(page);
-               attach_page_private(newpage, (void *)1);
-       }
-
-       if (mode != MIGRATE_SYNC_NO_COPY)
-               migrate_page_copy(newpage, page);
-       else
-               migrate_page_states(newpage, page);
-       return MIGRATEPAGE_SUCCESS;
-}
-#endif
-
 static bool ubifs_release_folio(struct folio *folio, gfp_t unused_gfp_flags)
 {
        struct inode *inode = folio->mapping->host;
@@ -1649,10 +1626,8 @@ const struct address_space_operations ubifs_file_address_operations = {
        .write_end      = ubifs_write_end,
        .invalidate_folio = ubifs_invalidate_folio,
        .dirty_folio    = ubifs_dirty_folio,
-#ifdef CONFIG_MIGRATION
-       .migratepage    = ubifs_migrate_page,
-#endif
-       .release_folio    = ubifs_release_folio,
+       .migrate_folio  = filemap_migrate_folio,
+       .release_folio  = ubifs_release_folio,
 };
 
 const struct inode_operations ubifs_file_inode_operations = {
index b721d0b..391efaf 100644 (file)
@@ -193,7 +193,7 @@ static struct page *ufs_get_page(struct inode *dir, unsigned long n)
        if (!IS_ERR(page)) {
                kmap(page);
                if (unlikely(!PageChecked(page))) {
-                       if (PageError(page) || !ufs_check_page(page))
+                       if (!ufs_check_page(page))
                                goto fail;
                }
        }
index 4fa633f..08ddf41 100644 (file)
@@ -264,17 +264,6 @@ struct page *ufs_get_locked_page(struct address_space *mapping,
                        put_page(page);
                        return NULL;
                }
-
-               if (!PageUptodate(page) || PageError(page)) {
-                       unlock_page(page);
-                       put_page(page);
-
-                       printk(KERN_ERR "ufs_change_blocknr: "
-                              "can not read page: ino %lu, index: %lu\n",
-                              inode->i_ino, index);
-
-                       return ERR_PTR(-EIO);
-               }
        }
        if (!page_has_buffers(page))
                create_empty_buffers(page, 1 << inode->i_blkbits, 0);
index 8ec38b2..5d1a995 100644 (file)
@@ -570,7 +570,7 @@ const struct address_space_operations xfs_address_space_operations = {
        .invalidate_folio       = iomap_invalidate_folio,
        .bmap                   = xfs_vm_bmap,
        .direct_IO              = noop_direct_IO,
-       .migratepage            = iomap_migrate_page,
+       .migrate_folio          = filemap_migrate_folio,
        .is_partially_uptodate  = iomap_is_partially_uptodate,
        .error_remove_page      = generic_error_remove_page,
        .swap_activate          = xfs_iomap_swapfile_activate,
index 295ed36..c5fa8ad 100644 (file)
@@ -270,7 +270,7 @@ static const struct address_space_operations zonefs_file_aops = {
        .dirty_folio            = filemap_dirty_folio,
        .release_folio          = iomap_release_folio,
        .invalidate_folio       = iomap_invalidate_folio,
-       .migratepage            = iomap_migrate_page,
+       .migrate_folio          = filemap_migrate_folio,
        .is_partially_uptodate  = iomap_is_partially_uptodate,
        .error_remove_page      = generic_error_remove_page,
        .direct_IO              = noop_direct_IO,
index edb7f6d..5ca2d56 100644 (file)
@@ -57,7 +57,6 @@ struct balloon_dev_info {
        struct list_head pages;         /* Pages enqueued & handled to Host */
        int (*migratepage)(struct balloon_dev_info *, struct page *newpage,
                        struct page *page, enum migrate_mode mode);
-       struct inode *inode;
 };
 
 extern struct page *balloon_page_alloc(void);
@@ -75,11 +74,10 @@ static inline void balloon_devinfo_init(struct balloon_dev_info *balloon)
        spin_lock_init(&balloon->pages_lock);
        INIT_LIST_HEAD(&balloon->pages);
        balloon->migratepage = NULL;
-       balloon->inode = NULL;
 }
 
 #ifdef CONFIG_BALLOON_COMPACTION
-extern const struct address_space_operations balloon_aops;
+extern const struct movable_operations balloon_mops;
 
 /*
  * balloon_page_insert - insert a page into the balloon's page list and make
@@ -94,7 +92,7 @@ static inline void balloon_page_insert(struct balloon_dev_info *balloon,
                                       struct page *page)
 {
        __SetPageOffline(page);
-       __SetPageMovable(page, balloon->inode->i_mapping);
+       __SetPageMovable(page, &balloon_mops);
        set_page_private(page, (unsigned long)balloon);
        list_add(&page->lru, &balloon->pages);
 }
index bb68eb6..307445d 100644 (file)
@@ -259,14 +259,16 @@ static inline vm_fault_t block_page_mkwrite_return(int err)
 }
 sector_t generic_block_bmap(struct address_space *, sector_t, get_block_t *);
 int block_truncate_page(struct address_space *, loff_t, get_block_t *);
-int nobh_write_begin(struct address_space *, loff_t, unsigned len,
-                               struct page **, void **, get_block_t*);
-int nobh_write_end(struct file *, struct address_space *,
-                               loff_t, unsigned, unsigned,
-                               struct page *, void *);
-int nobh_truncate_page(struct address_space *, loff_t, get_block_t *);
-int nobh_writepage(struct page *page, get_block_t *get_block,
-                        struct writeback_control *wbc);
+
+#ifdef CONFIG_MIGRATION
+extern int buffer_migrate_folio(struct address_space *,
+               struct folio *dst, struct folio *src, enum migrate_mode);
+extern int buffer_migrate_folio_norefs(struct address_space *,
+               struct folio *dst, struct folio *src, enum migrate_mode);
+#else
+#define buffer_migrate_folio NULL
+#define buffer_migrate_folio_norefs NULL
+#endif
 
 void buffer_init(void);
 
index 9f131e5..7d32b7e 100644 (file)
@@ -383,13 +383,11 @@ struct address_space_operations {
        void (*free_folio)(struct folio *folio);
        ssize_t (*direct_IO)(struct kiocb *, struct iov_iter *iter);
        /*
-        * migrate the contents of a page to the specified target. If
+        * migrate the contents of a folio to the specified target. If
         * migrate_mode is MIGRATE_ASYNC, it must not block.
         */
-       int (*migratepage) (struct address_space *,
-                       struct page *, struct page *, enum migrate_mode);
-       bool (*isolate_page)(struct page *, isolate_mode_t);
-       void (*putback_page)(struct page *);
+       int (*migrate_folio)(struct address_space *, struct folio *dst,
+                       struct folio *src, enum migrate_mode);
        int (*launder_folio)(struct folio *);
        bool (*is_partially_uptodate) (struct folio *, size_t from,
                        size_t count);
@@ -3347,18 +3345,6 @@ extern int generic_check_addressable(unsigned, u64);
 
 extern void generic_set_encrypted_ci_d_ops(struct dentry *dentry);
 
-#ifdef CONFIG_MIGRATION
-extern int buffer_migrate_page(struct address_space *,
-                               struct page *, struct page *,
-                               enum migrate_mode);
-extern int buffer_migrate_page_norefs(struct address_space *,
-                               struct page *, struct page *,
-                               enum migrate_mode);
-#else
-#define buffer_migrate_page NULL
-#define buffer_migrate_page_norefs NULL
-#endif
-
 int may_setattr(struct user_namespace *mnt_userns, struct inode *inode,
                unsigned int ia_valid);
 int setattr_prepare(struct user_namespace *, struct dentry *, struct iattr *);
index e552097..758a112 100644 (file)
@@ -231,12 +231,6 @@ void iomap_readahead(struct readahead_control *, const struct iomap_ops *ops);
 bool iomap_is_partially_uptodate(struct folio *, size_t from, size_t count);
 bool iomap_release_folio(struct folio *folio, gfp_t gfp_flags);
 void iomap_invalidate_folio(struct folio *folio, size_t offset, size_t len);
-#ifdef CONFIG_MIGRATION
-int iomap_migrate_page(struct address_space *mapping, struct page *newpage,
-               struct page *page, enum migrate_mode mode);
-#else
-#define iomap_migrate_page NULL
-#endif
 int iomap_file_unshare(struct inode *inode, loff_t pos, loff_t len,
                const struct iomap_ops *ops);
 int iomap_zero_range(struct inode *inode, loff_t pos, loff_t len,
index 069a89e..ae5bb67 100644 (file)
@@ -19,27 +19,59 @@ struct migration_target_control;
  */
 #define MIGRATEPAGE_SUCCESS            0
 
+/**
+ * struct movable_operations - Driver page migration
+ * @isolate_page:
+ * The VM calls this function to prepare the page to be moved.  The page
+ * is locked and the driver should not unlock it.  The driver should
+ * return ``true`` if the page is movable and ``false`` if it is not
+ * currently movable.  After this function returns, the VM uses the
+ * page->lru field, so the driver must preserve any information which
+ * is usually stored here.
+ *
+ * @migrate_page:
+ * After isolation, the VM calls this function with the isolated
+ * @src page.  The driver should copy the contents of the
+ * @src page to the @dst page and set up the fields of @dst page.
+ * Both pages are locked.
+ * If page migration is successful, the driver should call
+ * __ClearPageMovable(@src) and return MIGRATEPAGE_SUCCESS.
+ * If the driver cannot migrate the page at the moment, it can return
+ * -EAGAIN.  The VM interprets this as a temporary migration failure and
+ * will retry it later.  Any other error value is a permanent migration
+ * failure and migration will not be retried.
+ * The driver shouldn't touch the @src->lru field while in the
+ * migrate_page() function.  It may write to @dst->lru.
+ *
+ * @putback_page:
+ * If migration fails on the isolated page, the VM informs the driver
+ * that the page is no longer a candidate for migration by calling
+ * this function.  The driver should put the isolated page back into
+ * its own data structure.
+ */
+struct movable_operations {
+       bool (*isolate_page)(struct page *, isolate_mode_t);
+       int (*migrate_page)(struct page *dst, struct page *src,
+                       enum migrate_mode);
+       void (*putback_page)(struct page *);
+};
+
 /* Defined in mm/debug.c: */
 extern const char *migrate_reason_names[MR_TYPES];
 
 #ifdef CONFIG_MIGRATION
 
 extern void putback_movable_pages(struct list_head *l);
-extern int migrate_page(struct address_space *mapping,
-                       struct page *newpage, struct page *page,
-                       enum migrate_mode mode);
+int migrate_folio(struct address_space *mapping, struct folio *dst,
+               struct folio *src, enum migrate_mode mode);
 extern int migrate_pages(struct list_head *l, new_page_t new, free_page_t free,
                unsigned long private, enum migrate_mode mode, int reason,
                unsigned int *ret_succeeded);
 extern struct page *alloc_migration_target(struct page *page, unsigned long private);
 extern int isolate_movable_page(struct page *page, isolate_mode_t mode);
 
-extern void migrate_page_states(struct page *newpage, struct page *page);
-extern void migrate_page_copy(struct page *newpage, struct page *page);
-extern int migrate_huge_page_move_mapping(struct address_space *mapping,
-                                 struct page *newpage, struct page *page);
-extern int migrate_page_move_mapping(struct address_space *mapping,
-               struct page *newpage, struct page *page, int extra_count);
+int migrate_huge_page_move_mapping(struct address_space *mapping,
+               struct folio *dst, struct folio *src);
 void migration_entry_wait_on_locked(swp_entry_t entry, pte_t *ptep,
                                spinlock_t *ptl);
 void folio_migrate_flags(struct folio *newfolio, struct folio *folio);
@@ -60,15 +92,8 @@ static inline struct page *alloc_migration_target(struct page *page,
 static inline int isolate_movable_page(struct page *page, isolate_mode_t mode)
        { return -EBUSY; }
 
-static inline void migrate_page_states(struct page *newpage, struct page *page)
-{
-}
-
-static inline void migrate_page_copy(struct page *newpage,
-                                    struct page *page) {}
-
 static inline int migrate_huge_page_move_mapping(struct address_space *mapping,
-                                 struct page *newpage, struct page *page)
+                                 struct folio *dst, struct folio *src)
 {
        return -ENOSYS;
 }
@@ -91,13 +116,13 @@ static inline int next_demotion_node(int node)
 #endif
 
 #ifdef CONFIG_COMPACTION
-extern int PageMovable(struct page *page);
-extern void __SetPageMovable(struct page *page, struct address_space *mapping);
-extern void __ClearPageMovable(struct page *page);
+bool PageMovable(struct page *page);
+void __SetPageMovable(struct page *page, const struct movable_operations *ops);
+void __ClearPageMovable(struct page *page);
 #else
-static inline int PageMovable(struct page *page) { return 0; }
+static inline bool PageMovable(struct page *page) { return false; }
 static inline void __SetPageMovable(struct page *page,
-                               struct address_space *mapping)
+               const struct movable_operations *ops)
 {
 }
 static inline void __ClearPageMovable(struct page *page)
@@ -110,6 +135,15 @@ static inline bool folio_test_movable(struct folio *folio)
        return PageMovable(&folio->page);
 }
 
+static inline
+const struct movable_operations *page_movable_ops(struct page *page)
+{
+       VM_BUG_ON(!__PageMovable(page));
+
+       return (const struct movable_operations *)
+               ((unsigned long)page->mapping - PAGE_MAPPING_MOVABLE);
+}
+
 #ifdef CONFIG_NUMA_BALANCING
 extern int migrate_misplaced_page(struct page *page,
                                  struct vm_area_struct *vma, int node);
index 43986f7..1bdc39d 100644 (file)
@@ -19,7 +19,5 @@ void mpage_readahead(struct readahead_control *, get_block_t get_block);
 int mpage_read_folio(struct folio *folio, get_block_t get_block);
 int mpage_writepages(struct address_space *mapping,
                struct writeback_control *wbc, get_block_t get_block);
-int mpage_writepage(struct page *page, get_block_t *get_block,
-               struct writeback_control *wbc);
 
 #endif
index 1b18dfa..f2402dd 100644 (file)
@@ -276,19 +276,18 @@ struct netfs_cache_ops {
 };
 
 struct readahead_control;
-extern void netfs_readahead(struct readahead_control *);
+void netfs_readahead(struct readahead_control *);
 int netfs_read_folio(struct file *, struct folio *);
-extern int netfs_write_begin(struct netfs_inode *,
-                            struct file *, struct address_space *,
-                            loff_t, unsigned int, struct folio **,
-                            void **);
-
-extern void netfs_subreq_terminated(struct netfs_io_subrequest *, ssize_t, bool);
-extern void netfs_get_subrequest(struct netfs_io_subrequest *subreq,
-                                enum netfs_sreq_ref_trace what);
-extern void netfs_put_subrequest(struct netfs_io_subrequest *subreq,
-                                bool was_async, enum netfs_sreq_ref_trace what);
-extern void netfs_stats_show(struct seq_file *);
+int netfs_write_begin(struct netfs_inode *, struct file *,
+               struct address_space *, loff_t pos, unsigned int len,
+               struct folio **, void **fsdata);
+
+void netfs_subreq_terminated(struct netfs_io_subrequest *, ssize_t, bool);
+void netfs_get_subrequest(struct netfs_io_subrequest *subreq,
+                         enum netfs_sreq_ref_trace what);
+void netfs_put_subrequest(struct netfs_io_subrequest *subreq,
+                         bool was_async, enum netfs_sreq_ref_trace what);
+void netfs_stats_show(struct seq_file *);
 
 /**
  * netfs_inode - Get the netfs inode context from the inode
index e66f7aa..3f5490f 100644 (file)
@@ -639,7 +639,7 @@ __PAGEFLAG(Reported, reported, PF_NO_COMPOUND)
  * structure which KSM associates with that merged page.  See ksm.h.
  *
  * PAGE_MAPPING_KSM without PAGE_MAPPING_ANON is used for non-lru movable
- * page and then page->mapping points a struct address_space.
+ * page and then page->mapping points to a struct movable_operations.
  *
  * Please note that, confusingly, "page_mapping" refers to the inode
  * address_space which maps the page from disk; whereas "page_mapped"
index ce96866..cc9adba 100644 (file)
@@ -718,9 +718,8 @@ static inline struct page *find_subpage(struct page *head, pgoff_t index)
        return head + (index & (thp_nr_pages(head) - 1));
 }
 
-unsigned find_get_pages_range(struct address_space *mapping, pgoff_t *start,
-                       pgoff_t end, unsigned int nr_pages,
-                       struct page **pages);
+unsigned filemap_get_folios(struct address_space *mapping, pgoff_t *start,
+               pgoff_t end, struct folio_batch *fbatch);
 unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start,
                               unsigned int nr_pages, struct page **pages);
 unsigned find_get_pages_range_tag(struct address_space *mapping, pgoff_t *index,
@@ -1079,6 +1078,12 @@ static inline int __must_check write_one_page(struct page *page)
 int __set_page_dirty_nobuffers(struct page *page);
 bool noop_dirty_folio(struct address_space *mapping, struct folio *folio);
 
+#ifdef CONFIG_MIGRATION
+int filemap_migrate_folio(struct address_space *mapping, struct folio *dst,
+               struct folio *src, enum migrate_mode mode);
+#else
+#define filemap_migrate_folio NULL
+#endif
 void page_endio(struct page *page, bool is_write, int err);
 
 void folio_end_private_2(struct folio *folio);
@@ -1098,8 +1103,6 @@ size_t fault_in_subpage_writeable(char __user *uaddr, size_t size);
 size_t fault_in_safe_writeable(const char __user *uaddr, size_t size);
 size_t fault_in_readable(const char __user *uaddr, size_t size);
 
-int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
-               pgoff_t index, gfp_t gfp);
 int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
                pgoff_t index, gfp_t gfp);
 int filemap_add_folio(struct address_space *mapping, struct folio *folio,
@@ -1107,10 +1110,6 @@ int filemap_add_folio(struct address_space *mapping, struct folio *folio,
 void filemap_remove_folio(struct folio *folio);
 void delete_from_page_cache(struct page *page);
 void __filemap_remove_folio(struct folio *folio, void *shadow);
-static inline void __delete_from_page_cache(struct page *page, void *shadow)
-{
-       __filemap_remove_folio(page_folio(page), shadow);
-}
 void replace_page_cache_page(struct page *old, struct page *new);
 void delete_from_page_cache_batch(struct address_space *mapping,
                                  struct folio_batch *fbatch);
@@ -1119,22 +1118,6 @@ bool filemap_release_folio(struct folio *folio, gfp_t gfp);
 loff_t mapping_seek_hole_data(struct address_space *, loff_t start, loff_t end,
                int whence);
 
-/*
- * Like add_to_page_cache_locked, but used to add newly allocated pages:
- * the page is new, so we can just run __SetPageLocked() against it.
- */
-static inline int add_to_page_cache(struct page *page,
-               struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask)
-{
-       int error;
-
-       __SetPageLocked(page);
-       error = add_to_page_cache_locked(page, mapping, offset, gfp_mask);
-       if (unlikely(error))
-               __ClearPageLocked(page);
-       return error;
-}
-
 /* Must be non-static for BPF error injection */
 int __filemap_add_folio(struct address_space *mapping, struct folio *folio,
                pgoff_t index, gfp_t gfp, void **shadowp);
index 67b1246..6649154 100644 (file)
@@ -27,16 +27,6 @@ struct pagevec {
 
 void __pagevec_release(struct pagevec *pvec);
 void __pagevec_lru_add(struct pagevec *pvec);
-unsigned pagevec_lookup_range(struct pagevec *pvec,
-                             struct address_space *mapping,
-                             pgoff_t *start, pgoff_t end);
-static inline unsigned pagevec_lookup(struct pagevec *pvec,
-                                     struct address_space *mapping,
-                                     pgoff_t *start)
-{
-       return pagevec_lookup_range(pvec, mapping, start, (pgoff_t)-1);
-}
-
 unsigned pagevec_lookup_range_tag(struct pagevec *pvec,
                struct address_space *mapping, pgoff_t *index, pgoff_t end,
                xa_mark_t tag);
index 0c0fed1..8672a71 100644 (file)
@@ -438,7 +438,8 @@ static inline bool node_reclaim_enabled(void)
        return node_reclaim_mode & (RECLAIM_ZONE|RECLAIM_WRITE|RECLAIM_UNMAP);
 }
 
-extern void check_move_unevictable_pages(struct pagevec *pvec);
+void check_move_unevictable_folios(struct folio_batch *fbatch);
+void check_move_unevictable_pages(struct pagevec *pvec);
 
 extern void kswapd_run(int nid);
 extern void kswapd_stop(int nid);
index f724129..6325d1d 100644 (file)
 
 /* Since UDF 2.01 is ISO 13346 based... */
 #define UDF_SUPER_MAGIC                0x15013346
-#define BALLOON_KVM_MAGIC      0x13661366
-#define ZSMALLOC_MAGIC         0x58295829
 #define DMA_BUF_MAGIC          0x444d4142      /* "DMAB" */
 #define DEVMEM_MAGIC           0x454d444d      /* "DMEM" */
-#define Z3FOLD_MAGIC           0x33
-#define PPC_CMM_MAGIC          0xc7571590
 #define SECRETMEM_MAGIC                0x5345434d      /* "SECM" */
 
 #endif /* __LINUX_MAGIC_H__ */
index 4b8eab4..22c96fe 100644 (file)
@@ -228,10 +228,8 @@ static void balloon_page_putback(struct page *page)
        spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
 }
 
-
 /* move_to_new_page() counterpart for a ballooned page */
-static int balloon_page_migrate(struct address_space *mapping,
-               struct page *newpage, struct page *page,
+static int balloon_page_migrate(struct page *newpage, struct page *page,
                enum migrate_mode mode)
 {
        struct balloon_dev_info *balloon = balloon_page_device(page);
@@ -250,11 +248,11 @@ static int balloon_page_migrate(struct address_space *mapping,
        return balloon->migratepage(balloon, newpage, page, mode);
 }
 
-const struct address_space_operations balloon_aops = {
-       .migratepage = balloon_page_migrate,
+const struct movable_operations balloon_mops = {
+       .migrate_page = balloon_page_migrate,
        .isolate_page = balloon_page_isolate,
        .putback_page = balloon_page_putback,
 };
-EXPORT_SYMBOL_GPL(balloon_aops);
+EXPORT_SYMBOL_GPL(balloon_mops);
 
 #endif /* CONFIG_BALLOON_COMPACTION */
index 1f89b96..a2c53fc 100644 (file)
@@ -110,28 +110,27 @@ static void split_map_pages(struct list_head *list)
 }
 
 #ifdef CONFIG_COMPACTION
-
-int PageMovable(struct page *page)
+bool PageMovable(struct page *page)
 {
-       struct address_space *mapping;
+       const struct movable_operations *mops;
 
        VM_BUG_ON_PAGE(!PageLocked(page), page);
        if (!__PageMovable(page))
-               return 0;
+               return false;
 
-       mapping = page_mapping(page);
-       if (mapping && mapping->a_ops && mapping->a_ops->isolate_page)
-               return 1;
+       mops = page_movable_ops(page);
+       if (mops)
+               return true;
 
-       return 0;
+       return false;
 }
 EXPORT_SYMBOL(PageMovable);
 
-void __SetPageMovable(struct page *page, struct address_space *mapping)
+void __SetPageMovable(struct page *page, const struct movable_operations *mops)
 {
        VM_BUG_ON_PAGE(!PageLocked(page), page);
-       VM_BUG_ON_PAGE((unsigned long)mapping & PAGE_MAPPING_MOVABLE, page);
-       page->mapping = (void *)((unsigned long)mapping | PAGE_MAPPING_MOVABLE);
+       VM_BUG_ON_PAGE((unsigned long)mops & PAGE_MAPPING_MOVABLE, page);
+       page->mapping = (void *)((unsigned long)mops | PAGE_MAPPING_MOVABLE);
 }
 EXPORT_SYMBOL(__SetPageMovable);
 
@@ -139,12 +138,10 @@ void __ClearPageMovable(struct page *page)
 {
        VM_BUG_ON_PAGE(!PageMovable(page), page);
        /*
-        * Clear registered address_space val with keeping PAGE_MAPPING_MOVABLE
-        * flag so that VM can catch up released page by driver after isolation.
-        * With it, VM migration doesn't try to put it back.
+        * This page still has the type of a movable page, but it's
+        * actually not movable any more.
         */
-       page->mapping = (void *)((unsigned long)page->mapping &
-                               PAGE_MAPPING_MOVABLE);
+       page->mapping = (void *)PAGE_MAPPING_MOVABLE;
 }
 EXPORT_SYMBOL(__ClearPageMovable);
 
@@ -1034,7 +1031,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
 
                        /*
                         * Only pages without mappings or that have a
-                        * ->migratepage callback are possible to migrate
+                        * ->migrate_folio callback are possible to migrate
                         * without blocking. However, we can be racing with
                         * truncation so it's necessary to lock the page
                         * to stabilise the mapping as truncation holds
@@ -1045,7 +1042,8 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
                                goto isolate_fail_put;
 
                        mapping = page_mapping(page);
-                       migrate_dirty = !mapping || mapping->a_ops->migratepage;
+                       migrate_dirty = !mapping ||
+                                       mapping->a_ops->migrate_folio;
                        unlock_page(page);
                        if (!migrate_dirty)
                                goto isolate_fail_put;
index 254931a..0dec96e 100644 (file)
@@ -929,26 +929,6 @@ error:
 }
 ALLOW_ERROR_INJECTION(__filemap_add_folio, ERRNO);
 
-/**
- * add_to_page_cache_locked - add a locked page to the pagecache
- * @page:      page to add
- * @mapping:   the page's address_space
- * @offset:    page index
- * @gfp_mask:  page allocation mode
- *
- * This function is used to add a page to the pagecache. It must be locked.
- * This function does not add the page to the LRU.  The caller must do that.
- *
- * Return: %0 on success, negative error code otherwise.
- */
-int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
-               pgoff_t offset, gfp_t gfp_mask)
-{
-       return __filemap_add_folio(mapping, page_folio(page), offset,
-                                         gfp_mask, NULL);
-}
-EXPORT_SYMBOL(add_to_page_cache_locked);
-
 int filemap_add_folio(struct address_space *mapping, struct folio *folio,
                                pgoff_t index, gfp_t gfp)
 {
@@ -2151,65 +2131,46 @@ put:
        return folio_batch_count(fbatch);
 }
 
-static inline
-bool folio_more_pages(struct folio *folio, pgoff_t index, pgoff_t max)
-{
-       if (!folio_test_large(folio) || folio_test_hugetlb(folio))
-               return false;
-       if (index >= max)
-               return false;
-       return index < folio->index + folio_nr_pages(folio) - 1;
-}
-
 /**
- * find_get_pages_range - gang pagecache lookup
+ * filemap_get_folios - Get a batch of folios
  * @mapping:   The address_space to search
  * @start:     The starting page index
  * @end:       The final page index (inclusive)
- * @nr_pages:  The maximum number of pages
- * @pages:     Where the resulting pages are placed
+ * @fbatch:    The batch to fill.
  *
- * find_get_pages_range() will search for and return a group of up to @nr_pages
- * pages in the mapping starting at index @start and up to index @end
- * (inclusive).  The pages are placed at @pages.  find_get_pages_range() takes
- * a reference against the returned pages.
+ * Search for and return a batch of folios in the mapping starting at
+ * index @start and up to index @end (inclusive).  The folios are returned
+ * in @fbatch with an elevated reference count.
  *
- * The search returns a group of mapping-contiguous pages with ascending
- * indexes.  There may be holes in the indices due to not-present pages.
- * We also update @start to index the next page for the traversal.
+ * The first folio may start before @start; if it does, it will contain
+ * @start.  The final folio may extend beyond @end; if it does, it will
+ * contain @end.  The folios have ascending indices.  There may be gaps
+ * between the folios if there are indices which have no folio in the
+ * page cache.  If folios are added to or removed from the page cache
+ * while this is running, they may or may not be found by this call.
  *
- * Return: the number of pages which were found. If this number is
- * smaller than @nr_pages, the end of specified range has been
- * reached.
+ * Return: The number of folios which were found.
+ * We also update @start to index the next folio for the traversal.
  */
-unsigned find_get_pages_range(struct address_space *mapping, pgoff_t *start,
-                             pgoff_t end, unsigned int nr_pages,
-                             struct page **pages)
+unsigned filemap_get_folios(struct address_space *mapping, pgoff_t *start,
+               pgoff_t end, struct folio_batch *fbatch)
 {
        XA_STATE(xas, &mapping->i_pages, *start);
        struct folio *folio;
-       unsigned ret = 0;
-
-       if (unlikely(!nr_pages))
-               return 0;
 
        rcu_read_lock();
-       while ((folio = find_get_entry(&xas, end, XA_PRESENT))) {
+       while ((folio = find_get_entry(&xas, end, XA_PRESENT)) != NULL) {
                /* Skip over shadow, swap and DAX entries */
                if (xa_is_value(folio))
                        continue;
+               if (!folio_batch_add(fbatch, folio)) {
+                       unsigned long nr = folio_nr_pages(folio);
 
-again:
-               pages[ret] = folio_file_page(folio, xas.xa_index);
-               if (++ret == nr_pages) {
-                       *start = xas.xa_index + 1;
+                       if (folio_test_hugetlb(folio))
+                               nr = 1;
+                       *start = folio->index + nr;
                        goto out;
                }
-               if (folio_more_pages(folio, xas.xa_index, end)) {
-                       xas.xa_index++;
-                       folio_ref_inc(folio);
-                       goto again;
-               }
        }
 
        /*
@@ -2225,7 +2186,18 @@ again:
 out:
        rcu_read_unlock();
 
-       return ret;
+       return folio_batch_count(fbatch);
+}
+EXPORT_SYMBOL(filemap_get_folios);
+
+static inline
+bool folio_more_pages(struct folio *folio, pgoff_t index, pgoff_t max)
+{
+       if (!folio_test_large(folio) || folio_test_hugetlb(folio))
+               return false;
+       if (index >= max)
+               return false;
+       return index < folio->index + folio_nr_pages(folio) - 1;
 }
 
 /**
@@ -2413,7 +2385,7 @@ retry:
        rcu_read_unlock();
 }
 
-static int filemap_read_folio(struct file *file, struct address_space *mapping,
+static int filemap_read_folio(struct file *file, filler_t filler,
                struct folio *folio)
 {
        int error;
@@ -2425,7 +2397,7 @@ static int filemap_read_folio(struct file *file, struct address_space *mapping,
         */
        folio_clear_error(folio);
        /* Start the actual read. The read will unlock the page. */
-       error = mapping->a_ops->read_folio(file, folio);
+       error = filler(file, folio);
        if (error)
                return error;
 
@@ -2434,7 +2406,8 @@ static int filemap_read_folio(struct file *file, struct address_space *mapping,
                return error;
        if (folio_test_uptodate(folio))
                return 0;
-       shrink_readahead_size_eio(&file->f_ra);
+       if (file)
+               shrink_readahead_size_eio(&file->f_ra);
        return -EIO;
 }
 
@@ -2507,7 +2480,8 @@ static int filemap_update_page(struct kiocb *iocb,
        if (iocb->ki_flags & (IOCB_NOIO | IOCB_NOWAIT | IOCB_WAITQ))
                goto unlock;
 
-       error = filemap_read_folio(iocb->ki_filp, mapping, folio);
+       error = filemap_read_folio(iocb->ki_filp, mapping->a_ops->read_folio,
+                       folio);
        goto unlock_mapping;
 unlock:
        folio_unlock(folio);
@@ -2550,7 +2524,7 @@ static int filemap_create_folio(struct file *file,
        if (error)
                goto error;
 
-       error = filemap_read_folio(file, mapping, folio);
+       error = filemap_read_folio(file, mapping->a_ops->read_folio, folio);
        if (error)
                goto error;
 
@@ -3234,7 +3208,7 @@ page_not_uptodate:
         * and we need to check for errors.
         */
        fpin = maybe_unlock_mmap_for_io(vmf, fpin);
-       error = filemap_read_folio(file, mapping, folio);
+       error = filemap_read_folio(file, mapping->a_ops->read_folio, folio);
        if (fpin)
                goto out_retry;
        folio_put(folio);
@@ -3524,20 +3498,7 @@ repeat:
                        return ERR_PTR(err);
                }
 
-filler:
-               err = filler(file, folio);
-               if (err < 0) {
-                       folio_put(folio);
-                       return ERR_PTR(err);
-               }
-
-               folio_wait_locked(folio);
-               if (!folio_test_uptodate(folio)) {
-                       folio_put(folio);
-                       return ERR_PTR(-EIO);
-               }
-
-               goto out;
+               goto filler;
        }
        if (folio_test_uptodate(folio))
                goto out;
@@ -3560,14 +3521,14 @@ filler:
                goto out;
        }
 
-       /*
-        * A previous I/O error may have been due to temporary
-        * failures.
-        * Clear page error before actual read, PG_error will be
-        * set again if read page fails.
-        */
-       folio_clear_error(folio);
-       goto filler;
+filler:
+       err = filemap_read_folio(file, filler, folio);
+       if (err) {
+               folio_put(folio);
+               if (err == AOP_TRUNCATED_PAGE)
+                       goto repeat;
+               return ERR_PTR(err);
+       }
 
 out:
        folio_mark_accessed(folio);
index 20bc15b..458618c 100644 (file)
@@ -51,28 +51,6 @@ void mark_page_accessed(struct page *page)
 }
 EXPORT_SYMBOL(mark_page_accessed);
 
-#ifdef CONFIG_MIGRATION
-int migrate_page_move_mapping(struct address_space *mapping,
-               struct page *newpage, struct page *page, int extra_count)
-{
-       return folio_migrate_mapping(mapping, page_folio(newpage),
-                                       page_folio(page), extra_count);
-}
-EXPORT_SYMBOL(migrate_page_move_mapping);
-
-void migrate_page_states(struct page *newpage, struct page *page)
-{
-       folio_migrate_flags(page_folio(newpage), page_folio(page));
-}
-EXPORT_SYMBOL(migrate_page_states);
-
-void migrate_page_copy(struct page *newpage, struct page *page)
-{
-       folio_migrate_copy(page_folio(newpage), page_folio(page));
-}
-EXPORT_SYMBOL(migrate_page_copy);
-#endif
-
 bool set_page_writeback(struct page *page)
 {
        return folio_start_writeback(page_folio(page));
index 834f288..1596508 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/shrinker.h>
 #include <linux/mm_inline.h>
 #include <linux/swapops.h>
+#include <linux/backing-dev.h>
 #include <linux/dax.h>
 #include <linux/khugepaged.h>
 #include <linux/freezer.h>
@@ -2440,11 +2441,15 @@ static void __split_huge_page(struct page *page, struct list_head *list,
                __split_huge_page_tail(head, i, lruvec, list);
                /* Some pages can be beyond EOF: drop them from page cache */
                if (head[i].index >= end) {
-                       ClearPageDirty(head + i);
-                       __delete_from_page_cache(head + i, NULL);
+                       struct folio *tail = page_folio(head + i);
+
                        if (shmem_mapping(head->mapping))
                                shmem_uncharge(head->mapping->host, 1);
-                       put_page(head + i);
+                       else if (folio_test_clear_dirty(tail))
+                               folio_account_cleaned(tail,
+                                       inode_to_wb(folio->mapping->host));
+                       __filemap_remove_folio(tail, NULL);
+                       folio_put(tail);
                } else if (!PageAnon(page)) {
                        __xa_store(&head->mapping->i_pages, head[i].index,
                                        head + i, 0);
index a18c071..aa39534 100644 (file)
@@ -5419,19 +5419,25 @@ static bool hugetlbfs_pagecache_present(struct hstate *h,
 int huge_add_to_page_cache(struct page *page, struct address_space *mapping,
                           pgoff_t idx)
 {
+       struct folio *folio = page_folio(page);
        struct inode *inode = mapping->host;
        struct hstate *h = hstate_inode(inode);
-       int err = add_to_page_cache(page, mapping, idx, GFP_KERNEL);
+       int err;
 
-       if (err)
+       __folio_set_locked(folio);
+       err = __filemap_add_folio(mapping, folio, idx, GFP_KERNEL, NULL);
+
+       if (unlikely(err)) {
+               __folio_clear_locked(folio);
                return err;
+       }
        ClearHPageRestoreReserve(page);
 
        /*
-        * set page dirty so that it will not be removed from cache/file
+        * mark folio dirty so that it will not be removed from cache/file
         * by non-hugetlbfs specific code paths.
         */
-       set_page_dirty(page);
+       folio_mark_dirty(folio);
 
        spin_lock(&inode->i_lock);
        inode->i_blocks += blocks_per_huge_page(h);
index 54f78c9..e8f8c1a 100644 (file)
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -712,7 +712,7 @@ again:
         * however, it might mean that the page is under page_ref_freeze().
         * The __remove_mapping() case is easy, again the node is now stale;
         * the same is in reuse_ksm_page() case; but if page is swapcache
-        * in migrate_page_move_mapping(), it might still be our page,
+        * in folio_migrate_mapping(), it might still be our page,
         * in which case it's essential to keep the node.
         */
        while (!get_page_unless_zero(page)) {
index da39ec8..b864c2e 100644 (file)
@@ -1940,7 +1940,7 @@ try_again:
 
        /*
         * Now take care of user space mappings.
-        * Abort on fail: __delete_from_page_cache() assumes unmapped page.
+        * Abort on fail: __filemap_remove_folio() assumes unmapped page.
         */
        if (!hwpoison_user_mappings(p, pfn, flags, p)) {
                action_result(pfn, MF_MSG_UNMAP_FAILED, MF_IGNORED);
index 6c1ea61..1b4b977 100644 (file)
@@ -59,7 +59,7 @@
 
 int isolate_movable_page(struct page *page, isolate_mode_t mode)
 {
-       struct address_space *mapping;
+       const struct movable_operations *mops;
 
        /*
         * Avoid burning cycles with pages that are yet under __free_pages(),
@@ -97,10 +97,10 @@ int isolate_movable_page(struct page *page, isolate_mode_t mode)
        if (!PageMovable(page) || PageIsolated(page))
                goto out_no_isolated;
 
-       mapping = page_mapping(page);
-       VM_BUG_ON_PAGE(!mapping, page);
+       mops = page_movable_ops(page);
+       VM_BUG_ON_PAGE(!mops, page);
 
-       if (!mapping->a_ops->isolate_page(page, mode))
+       if (!mops->isolate_page(page, mode))
                goto out_no_isolated;
 
        /* Driver shouldn't use PG_isolated bit of page->flags */
@@ -120,10 +120,9 @@ out:
 
 static void putback_movable_page(struct page *page)
 {
-       struct address_space *mapping;
+       const struct movable_operations *mops = page_movable_ops(page);
 
-       mapping = page_mapping(page);
-       mapping->a_ops->putback_page(page);
+       mops->putback_page(page);
        ClearPageIsolated(page);
 }
 
@@ -337,13 +336,18 @@ unlock:
 }
 #endif
 
-static int expected_page_refs(struct address_space *mapping, struct page *page)
+static int folio_expected_refs(struct address_space *mapping,
+               struct folio *folio)
 {
-       int expected_count = 1;
+       int refs = 1;
+       if (!mapping)
+               return refs;
 
-       if (mapping)
-               expected_count += compound_nr(page) + page_has_private(page);
-       return expected_count;
+       refs += folio_nr_pages(folio);
+       if (folio_test_private(folio))
+               refs++;
+
+       return refs;
 }
 
 /*
@@ -360,7 +364,7 @@ int folio_migrate_mapping(struct address_space *mapping,
        XA_STATE(xas, &mapping->i_pages, folio_index(folio));
        struct zone *oldzone, *newzone;
        int dirty;
-       int expected_count = expected_page_refs(mapping, &folio->page) + extra_count;
+       int expected_count = folio_expected_refs(mapping, folio) + extra_count;
        long nr = folio_nr_pages(folio);
 
        if (!mapping) {
@@ -470,26 +474,26 @@ EXPORT_SYMBOL(folio_migrate_mapping);
  * of folio_migrate_mapping().
  */
 int migrate_huge_page_move_mapping(struct address_space *mapping,
-                                  struct page *newpage, struct page *page)
+                                  struct folio *dst, struct folio *src)
 {
-       XA_STATE(xas, &mapping->i_pages, page_index(page));
+       XA_STATE(xas, &mapping->i_pages, folio_index(src));
        int expected_count;
 
        xas_lock_irq(&xas);
-       expected_count = 2 + page_has_private(page);
-       if (!page_ref_freeze(page, expected_count)) {
+       expected_count = 2 + folio_has_private(src);
+       if (!folio_ref_freeze(src, expected_count)) {
                xas_unlock_irq(&xas);
                return -EAGAIN;
        }
 
-       newpage->index = page->index;
-       newpage->mapping = page->mapping;
+       dst->index = src->index;
+       dst->mapping = src->mapping;
 
-       get_page(newpage);
+       folio_get(dst);
 
-       xas_store(&xas, newpage);
+       xas_store(&xas, dst);
 
-       page_ref_unfreeze(page, expected_count - 1);
+       folio_ref_unfreeze(src, expected_count - 1);
 
        xas_unlock_irq(&xas);
 
@@ -589,34 +593,37 @@ EXPORT_SYMBOL(folio_migrate_copy);
  *                    Migration functions
  ***********************************************************/
 
-/*
- * Common logic to directly migrate a single LRU page suitable for
- * pages that do not use PagePrivate/PagePrivate2.
+/**
+ * migrate_folio() - Simple folio migration.
+ * @mapping: The address_space containing the folio.
+ * @dst: The folio to migrate the data to.
+ * @src: The folio containing the current data.
+ * @mode: How to migrate the page.
+ *
+ * Common logic to directly migrate a single LRU folio suitable for
+ * folios that do not use PagePrivate/PagePrivate2.
  *
- * Pages are locked upon entry and exit.
+ * Folios are locked upon entry and exit.
  */
-int migrate_page(struct address_space *mapping,
-               struct page *newpage, struct page *page,
-               enum migrate_mode mode)
+int migrate_folio(struct address_space *mapping, struct folio *dst,
+               struct folio *src, enum migrate_mode mode)
 {
-       struct folio *newfolio = page_folio(newpage);
-       struct folio *folio = page_folio(page);
        int rc;
 
-       BUG_ON(folio_test_writeback(folio));    /* Writeback must be complete */
+       BUG_ON(folio_test_writeback(src));      /* Writeback must be complete */
 
-       rc = folio_migrate_mapping(mapping, newfolio, folio, 0);
+       rc = folio_migrate_mapping(mapping, dst, src, 0);
 
        if (rc != MIGRATEPAGE_SUCCESS)
                return rc;
 
        if (mode != MIGRATE_SYNC_NO_COPY)
-               folio_migrate_copy(newfolio, folio);
+               folio_migrate_copy(dst, src);
        else
-               folio_migrate_flags(newfolio, folio);
+               folio_migrate_flags(dst, src);
        return MIGRATEPAGE_SUCCESS;
 }
-EXPORT_SYMBOL(migrate_page);
+EXPORT_SYMBOL(migrate_folio);
 
 #ifdef CONFIG_BLOCK
 /* Returns true if all buffers are successfully locked */
@@ -657,23 +664,23 @@ static bool buffer_migrate_lock_buffers(struct buffer_head *head,
        return true;
 }
 
-static int __buffer_migrate_page(struct address_space *mapping,
-               struct page *newpage, struct page *page, enum migrate_mode mode,
+static int __buffer_migrate_folio(struct address_space *mapping,
+               struct folio *dst, struct folio *src, enum migrate_mode mode,
                bool check_refs)
 {
        struct buffer_head *bh, *head;
        int rc;
        int expected_count;
 
-       if (!page_has_buffers(page))
-               return migrate_page(mapping, newpage, page, mode);
+       head = folio_buffers(src);
+       if (!head)
+               return migrate_folio(mapping, dst, src, mode);
 
        /* Check whether page does not have extra refs before we do more work */
-       expected_count = expected_page_refs(mapping, page);
-       if (page_count(page) != expected_count)
+       expected_count = folio_expected_refs(mapping, src);
+       if (folio_ref_count(src) != expected_count)
                return -EAGAIN;
 
-       head = page_buffers(page);
        if (!buffer_migrate_lock_buffers(head, mode))
                return -EAGAIN;
 
@@ -704,23 +711,22 @@ recheck_buffers:
                }
        }
 
-       rc = migrate_page_move_mapping(mapping, newpage, page, 0);
+       rc = folio_migrate_mapping(mapping, dst, src, 0);
        if (rc != MIGRATEPAGE_SUCCESS)
                goto unlock_buffers;
 
-       attach_page_private(newpage, detach_page_private(page));
+       folio_attach_private(dst, folio_detach_private(src));
 
        bh = head;
        do {
-               set_bh_page(bh, newpage, bh_offset(bh));
+               set_bh_page(bh, &dst->page, bh_offset(bh));
                bh = bh->b_this_page;
-
        } while (bh != head);
 
        if (mode != MIGRATE_SYNC_NO_COPY)
-               migrate_page_copy(newpage, page);
+               folio_migrate_copy(dst, src);
        else
-               migrate_page_states(newpage, page);
+               folio_migrate_flags(dst, src);
 
        rc = MIGRATEPAGE_SUCCESS;
 unlock_buffers:
@@ -730,43 +736,79 @@ unlock_buffers:
        do {
                unlock_buffer(bh);
                bh = bh->b_this_page;
-
        } while (bh != head);
 
        return rc;
 }
 
-/*
- * Migration function for pages with buffers. This function can only be used
- * if the underlying filesystem guarantees that no other references to "page"
- * exist. For example attached buffer heads are accessed only under page lock.
+/**
+ * buffer_migrate_folio() - Migration function for folios with buffers.
+ * @mapping: The address space containing @src.
+ * @dst: The folio to migrate to.
+ * @src: The folio to migrate from.
+ * @mode: How to migrate the folio.
+ *
+ * This function can only be used if the underlying filesystem guarantees
+ * that no other references to @src exist. For example attached buffer
+ * heads are accessed only under the folio lock.  If your filesystem cannot
+ * provide this guarantee, buffer_migrate_folio_norefs() may be more
+ * appropriate.
+ *
+ * Return: 0 on success or a negative errno on failure.
  */
-int buffer_migrate_page(struct address_space *mapping,
-               struct page *newpage, struct page *page, enum migrate_mode mode)
+int buffer_migrate_folio(struct address_space *mapping,
+               struct folio *dst, struct folio *src, enum migrate_mode mode)
 {
-       return __buffer_migrate_page(mapping, newpage, page, mode, false);
+       return __buffer_migrate_folio(mapping, dst, src, mode, false);
 }
-EXPORT_SYMBOL(buffer_migrate_page);
+EXPORT_SYMBOL(buffer_migrate_folio);
 
-/*
- * Same as above except that this variant is more careful and checks that there
- * are also no buffer head references. This function is the right one for
- * mappings where buffer heads are directly looked up and referenced (such as
- * block device mappings).
+/**
+ * buffer_migrate_folio_norefs() - Migration function for folios with buffers.
+ * @mapping: The address space containing @src.
+ * @dst: The folio to migrate to.
+ * @src: The folio to migrate from.
+ * @mode: How to migrate the folio.
+ *
+ * Like buffer_migrate_folio() except that this variant is more careful
+ * and checks that there are also no buffer head references. This function
+ * is the right one for mappings where buffer heads are directly looked
+ * up and referenced (such as block device mappings).
+ *
+ * Return: 0 on success or a negative errno on failure.
  */
-int buffer_migrate_page_norefs(struct address_space *mapping,
-               struct page *newpage, struct page *page, enum migrate_mode mode)
+int buffer_migrate_folio_norefs(struct address_space *mapping,
+               struct folio *dst, struct folio *src, enum migrate_mode mode)
 {
-       return __buffer_migrate_page(mapping, newpage, page, mode, true);
+       return __buffer_migrate_folio(mapping, dst, src, mode, true);
 }
 #endif
 
+int filemap_migrate_folio(struct address_space *mapping,
+               struct folio *dst, struct folio *src, enum migrate_mode mode)
+{
+       int ret;
+
+       ret = folio_migrate_mapping(mapping, dst, src, 0);
+       if (ret != MIGRATEPAGE_SUCCESS)
+               return ret;
+
+       if (folio_get_private(src))
+               folio_attach_private(dst, folio_detach_private(src));
+
+       if (mode != MIGRATE_SYNC_NO_COPY)
+               folio_migrate_copy(dst, src);
+       else
+               folio_migrate_flags(dst, src);
+       return MIGRATEPAGE_SUCCESS;
+}
+EXPORT_SYMBOL_GPL(filemap_migrate_folio);
+
 /*
- * Writeback a page to clean the dirty state
+ * Writeback a folio to clean the dirty state
  */
-static int writeout(struct address_space *mapping, struct page *page)
+static int writeout(struct address_space *mapping, struct folio *folio)
 {
-       struct folio *folio = page_folio(page);
        struct writeback_control wbc = {
                .sync_mode = WB_SYNC_NONE,
                .nr_to_write = 1,
@@ -780,25 +822,25 @@ static int writeout(struct address_space *mapping, struct page *page)
                /* No write method for the address space */
                return -EINVAL;
 
-       if (!clear_page_dirty_for_io(page))
+       if (!folio_clear_dirty_for_io(folio))
                /* Someone else already triggered a write */
                return -EAGAIN;
 
        /*
-        * A dirty page may imply that the underlying filesystem has
-        * the page on some queue. So the page must be clean for
-        * migration. Writeout may mean we loose the lock and the
-        * page state is no longer what we checked for earlier.
+        * A dirty folio may imply that the underlying filesystem has
+        * the folio on some queue. So the folio must be clean for
+        * migration. Writeout may mean we lose the lock and the
+        * folio state is no longer what we checked for earlier.
         * At this point we know that the migration attempt cannot
         * be successful.
         */
        remove_migration_ptes(folio, folio, false);
 
-       rc = mapping->a_ops->writepage(page, &wbc);
+       rc = mapping->a_ops->writepage(&folio->page, &wbc);
 
        if (rc != AOP_WRITEPAGE_ACTIVATE)
                /* unlocked. Relock */
-               lock_page(page);
+               folio_lock(folio);
 
        return (rc < 0) ? -EIO : -EAGAIN;
 }
@@ -806,11 +848,11 @@ static int writeout(struct address_space *mapping, struct page *page)
 /*
  * Default handling if a filesystem does not provide a migration function.
  */
-static int fallback_migrate_page(struct address_space *mapping,
-       struct page *newpage, struct page *page, enum migrate_mode mode)
+static int fallback_migrate_folio(struct address_space *mapping,
+               struct folio *dst, struct folio *src, enum migrate_mode mode)
 {
-       if (PageDirty(page)) {
-               /* Only writeback pages in full synchronous migration */
+       if (folio_test_dirty(src)) {
+               /* Only writeback folios in full synchronous migration */
                switch (mode) {
                case MIGRATE_SYNC:
                case MIGRATE_SYNC_NO_COPY:
@@ -818,18 +860,18 @@ static int fallback_migrate_page(struct address_space *mapping,
                default:
                        return -EBUSY;
                }
-               return writeout(mapping, page);
+               return writeout(mapping, src);
        }
 
        /*
         * Buffers may be managed in a filesystem specific way.
         * We must have no buffers or drop them.
         */
-       if (page_has_private(page) &&
-           !try_to_release_page(page, GFP_KERNEL))
+       if (folio_test_private(src) &&
+           !filemap_release_folio(src, GFP_KERNEL))
                return mode == MIGRATE_SYNC ? -EAGAIN : -EBUSY;
 
-       return migrate_page(mapping, newpage, page, mode);
+       return migrate_folio(mapping, dst, src, mode);
 }
 
 /*
@@ -846,32 +888,32 @@ static int fallback_migrate_page(struct address_space *mapping,
 static int move_to_new_folio(struct folio *dst, struct folio *src,
                                enum migrate_mode mode)
 {
-       struct address_space *mapping;
        int rc = -EAGAIN;
        bool is_lru = !__PageMovable(&src->page);
 
        VM_BUG_ON_FOLIO(!folio_test_locked(src), src);
        VM_BUG_ON_FOLIO(!folio_test_locked(dst), dst);
 
-       mapping = folio_mapping(src);
-
        if (likely(is_lru)) {
+               struct address_space *mapping = folio_mapping(src);
+
                if (!mapping)
-                       rc = migrate_page(mapping, &dst->page, &src->page, mode);
-               else if (mapping->a_ops->migratepage)
+                       rc = migrate_folio(mapping, dst, src, mode);
+               else if (mapping->a_ops->migrate_folio)
                        /*
-                        * Most pages have a mapping and most filesystems
-                        * provide a migratepage callback. Anonymous pages
+                        * Most folios have a mapping and most filesystems
+                        * provide a migrate_folio callback. Anonymous folios
                         * are part of swap space which also has its own
-                        * migratepage callback. This is the most common path
+                        * migrate_folio callback. This is the most common path
                         * for page migration.
                         */
-                       rc = mapping->a_ops->migratepage(mapping, &dst->page,
-                                                       &src->page, mode);
+                       rc = mapping->a_ops->migrate_folio(mapping, dst, src,
+                                                               mode);
                else
-                       rc = fallback_migrate_page(mapping, &dst->page,
-                                                       &src->page, mode);
+                       rc = fallback_migrate_folio(mapping, dst, src, mode);
        } else {
+               const struct movable_operations *mops;
+
                /*
                 * In case of non-lru page, it could be released after
                 * isolation step. In that case, we shouldn't try migration.
@@ -883,8 +925,8 @@ static int move_to_new_folio(struct folio *dst, struct folio *src,
                        goto out;
                }
 
-               rc = mapping->a_ops->migratepage(mapping, &dst->page,
-                                               &src->page, mode);
+               mops = page_movable_ops(&src->page);
+               rc = mops->migrate_page(&dst->page, &src->page, mode);
                WARN_ON_ONCE(rc == MIGRATEPAGE_SUCCESS &&
                                !folio_test_isolated(src));
        }
index 5052093..5dd97c3 100644 (file)
@@ -718,7 +718,8 @@ void migrate_vma_pages(struct migrate_vma *migrate)
                        continue;
                }
 
-               r = migrate_page(mapping, newpage, page, MIGRATE_SYNC_NO_COPY);
+               r = migrate_folio(mapping, page_folio(newpage),
+                               page_folio(page), MIGRATE_SYNC_NO_COPY);
                if (r != MIGRATEPAGE_SUCCESS)
                        migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
        }
index 71fb78f..e3e9590 100644 (file)
@@ -144,14 +144,8 @@ static const struct file_operations secretmem_fops = {
        .mmap           = secretmem_mmap,
 };
 
-static bool secretmem_isolate_page(struct page *page, isolate_mode_t mode)
-{
-       return false;
-}
-
-static int secretmem_migratepage(struct address_space *mapping,
-                                struct page *newpage, struct page *page,
-                                enum migrate_mode mode)
+static int secretmem_migrate_folio(struct address_space *mapping,
+               struct folio *dst, struct folio *src, enum migrate_mode mode)
 {
        return -EBUSY;
 }
@@ -165,8 +159,7 @@ static void secretmem_free_folio(struct folio *folio)
 const struct address_space_operations secretmem_aops = {
        .dirty_folio    = noop_dirty_folio,
        .free_folio     = secretmem_free_folio,
-       .migratepage    = secretmem_migratepage,
-       .isolate_page   = secretmem_isolate_page,
+       .migrate_folio  = secretmem_migrate_folio,
 };
 
 static int secretmem_setattr(struct user_namespace *mnt_userns,
index b7f2d4a..e5e43b9 100644 (file)
@@ -392,7 +392,7 @@ void shmem_uncharge(struct inode *inode, long pages)
        struct shmem_inode_info *info = SHMEM_I(inode);
        unsigned long flags;
 
-       /* nrpages adjustment done by __delete_from_page_cache() or caller */
+       /* nrpages adjustment done by __filemap_remove_folio() or caller */
 
        spin_lock_irqsave(&info->lock, flags);
        info->alloced -= pages;
@@ -693,7 +693,7 @@ static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
 
 /*
- * Like add_to_page_cache_locked, but error if expected item has gone.
+ * Like filemap_add_folio, but error if expected item has gone.
  */
 static int shmem_add_to_page_cache(struct folio *folio,
                                   struct address_space *mapping,
@@ -867,18 +867,17 @@ unsigned long shmem_swap_usage(struct vm_area_struct *vma)
  */
 void shmem_unlock_mapping(struct address_space *mapping)
 {
-       struct pagevec pvec;
+       struct folio_batch fbatch;
        pgoff_t index = 0;
 
-       pagevec_init(&pvec);
+       folio_batch_init(&fbatch);
        /*
         * Minor point, but we might as well stop if someone else SHM_LOCKs it.
         */
-       while (!mapping_unevictable(mapping)) {
-               if (!pagevec_lookup(&pvec, mapping, &index))
-                       break;
-               check_move_unevictable_pages(&pvec);
-               pagevec_release(&pvec);
+       while (!mapping_unevictable(mapping) &&
+              filemap_get_folios(mapping, &index, ~0UL, &fbatch)) {
+               check_move_unevictable_folios(&fbatch);
+               folio_batch_release(&fbatch);
                cond_resched();
        }
 }
@@ -3799,7 +3798,7 @@ const struct address_space_operations shmem_aops = {
        .write_end      = shmem_write_end,
 #endif
 #ifdef CONFIG_MIGRATION
-       .migratepage    = migrate_page,
+       .migrate_folio  = migrate_folio,
 #endif
        .error_remove_page = shmem_error_remove_page,
 };
index 034bb24..275a4ea 100644 (file)
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -1086,35 +1086,6 @@ void folio_batch_remove_exceptionals(struct folio_batch *fbatch)
        fbatch->nr = j;
 }
 
-/**
- * pagevec_lookup_range - gang pagecache lookup
- * @pvec:      Where the resulting pages are placed
- * @mapping:   The address_space to search
- * @start:     The starting page index
- * @end:       The final page index
- *
- * pagevec_lookup_range() will search for & return a group of up to PAGEVEC_SIZE
- * pages in the mapping starting from index @start and upto index @end
- * (inclusive).  The pages are placed in @pvec.  pagevec_lookup() takes a
- * reference against the pages in @pvec.
- *
- * The search returns a group of mapping-contiguous pages with ascending
- * indexes.  There may be holes in the indices due to not-present pages. We
- * also update @start to index the next page for the traversal.
- *
- * pagevec_lookup_range() returns the number of pages which were found. If this
- * number is smaller than PAGEVEC_SIZE, the end of specified range has been
- * reached.
- */
-unsigned pagevec_lookup_range(struct pagevec *pvec,
-               struct address_space *mapping, pgoff_t *start, pgoff_t end)
-{
-       pvec->nr = find_get_pages_range(mapping, start, end, PAGEVEC_SIZE,
-                                       pvec->pages);
-       return pagevec_count(pvec);
-}
-EXPORT_SYMBOL(pagevec_lookup_range);
-
 unsigned pagevec_lookup_range_tag(struct pagevec *pvec,
                struct address_space *mapping, pgoff_t *index, pgoff_t end,
                xa_mark_t tag)
index 778d57d..0a2021f 100644 (file)
@@ -33,7 +33,7 @@ static const struct address_space_operations swap_aops = {
        .writepage      = swap_writepage,
        .dirty_folio    = noop_dirty_folio,
 #ifdef CONFIG_MIGRATION
-       .migratepage    = migrate_page,
+       .migrate_folio  = migrate_folio,
 #endif
 };
 
@@ -95,7 +95,7 @@ void *get_shadow_from_swap_cache(swp_entry_t entry)
 }
 
 /*
- * add_to_swap_cache resembles add_to_page_cache_locked on swapper_space,
+ * add_to_swap_cache resembles filemap_add_folio on swapper_space,
  * but sets SwapCache flag and private instead of mapping and index.
  */
 int add_to_swap_cache(struct page *page, swp_entry_t entry,
index ab50d0d..0b0708b 100644 (file)
@@ -443,7 +443,7 @@ EXPORT_SYMBOL(truncate_inode_pages_range);
  * mapping->invalidate_lock.
  *
  * Note: When this function returns, there can be a page in the process of
- * deletion (inside __delete_from_page_cache()) in the specified range.  Thus
+ * deletion (inside __filemap_remove_folio()) in the specified range.  Thus
  * mapping->nrpages can be non-zero when this function returns even after
  * truncation of the whole mapping.
  */
index 0837570..53af0e7 100644 (file)
--- a/mm/util.c
+++ b/mm/util.c
@@ -804,10 +804,10 @@ struct address_space *folio_mapping(struct folio *folio)
                return swap_address_space(folio_swap_entry(folio));
 
        mapping = folio->mapping;
-       if ((unsigned long)mapping & PAGE_MAPPING_ANON)
+       if ((unsigned long)mapping & PAGE_MAPPING_FLAGS)
                return NULL;
 
-       return (void *)((unsigned long)mapping & ~PAGE_MAPPING_FLAGS);
+       return mapping;
 }
 EXPORT_SYMBOL(folio_mapping);
 
index f7d9a68..04f8671 100644 (file)
@@ -4790,45 +4790,57 @@ int node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned int order)
 }
 #endif
 
+void check_move_unevictable_pages(struct pagevec *pvec)
+{
+       struct folio_batch fbatch;
+       unsigned i;
+
+       folio_batch_init(&fbatch);
+       for (i = 0; i < pvec->nr; i++) {
+               struct page *page = pvec->pages[i];
+
+               if (PageTransTail(page))
+                       continue;
+               folio_batch_add(&fbatch, page_folio(page));
+       }
+       check_move_unevictable_folios(&fbatch);
+}
+EXPORT_SYMBOL_GPL(check_move_unevictable_pages);
+
 /**
- * check_move_unevictable_pages - check pages for evictability and move to
- * appropriate zone lru list
- * @pvec: pagevec with lru pages to check
+ * check_move_unevictable_folios - Move evictable folios to appropriate zone
+ * lru list
+ * @fbatch: Batch of lru folios to check.
  *
- * Checks pages for evictability, if an evictable page is in the unevictable
+ * Checks folios for evictability, if an evictable folio is in the unevictable
  * lru list, moves it to the appropriate evictable lru list. This function
- * should be only used for lru pages.
+ * should be only used for lru folios.
  */
-void check_move_unevictable_pages(struct pagevec *pvec)
+void check_move_unevictable_folios(struct folio_batch *fbatch)
 {
        struct lruvec *lruvec = NULL;
        int pgscanned = 0;
        int pgrescued = 0;
        int i;
 
-       for (i = 0; i < pvec->nr; i++) {
-               struct page *page = pvec->pages[i];
-               struct folio *folio = page_folio(page);
-               int nr_pages;
-
-               if (PageTransTail(page))
-                       continue;
+       for (i = 0; i < fbatch->nr; i++) {
+               struct folio *folio = fbatch->folios[i];
+               int nr_pages = folio_nr_pages(folio);
 
-               nr_pages = thp_nr_pages(page);
                pgscanned += nr_pages;
 
-               /* block memcg migration during page moving between lru */
-               if (!TestClearPageLRU(page))
+               /* block memcg migration while the folio moves between lrus */
+               if (!folio_test_clear_lru(folio))
                        continue;
 
                lruvec = folio_lruvec_relock_irq(folio, lruvec);
-               if (page_evictable(page) && PageUnevictable(page)) {
-                       del_page_from_lru_list(page, lruvec);
-                       ClearPageUnevictable(page);
-                       add_page_to_lru_list(page, lruvec);
+               if (folio_evictable(folio) && folio_test_unevictable(folio)) {
+                       lruvec_del_folio(lruvec, folio);
+                       folio_clear_unevictable(folio);
+                       lruvec_add_folio(lruvec, folio);
                        pgrescued += nr_pages;
                }
-               SetPageLRU(page);
+               folio_set_lru(folio);
        }
 
        if (lruvec) {
@@ -4839,4 +4851,4 @@ void check_move_unevictable_pages(struct pagevec *pvec)
                count_vm_events(UNEVICTABLE_PGSCANNED, pgscanned);
        }
 }
-EXPORT_SYMBOL_GPL(check_move_unevictable_pages);
+EXPORT_SYMBOL_GPL(check_move_unevictable_folios);
index f41f8b0..cf71da1 100644 (file)
 #include <linux/node.h>
 #include <linux/compaction.h>
 #include <linux/percpu.h>
-#include <linux/mount.h>
-#include <linux/pseudo_fs.h>
-#include <linux/fs.h>
 #include <linux/preempt.h>
 #include <linux/workqueue.h>
 #include <linux/slab.h>
 #include <linux/spinlock.h>
 #include <linux/zpool.h>
-#include <linux/magic.h>
 #include <linux/kmemleak.h>
 
 /*
@@ -149,7 +145,6 @@ struct z3fold_header {
  * @compact_wq:        workqueue for page layout background optimization
  * @release_wq:        workqueue for safe page release
  * @work:      work_struct for safe page release
- * @inode:     inode for z3fold pseudo filesystem
  *
  * This structure is allocated at pool creation time and maintains metadata
  * pertaining to a particular z3fold pool.
@@ -169,7 +164,6 @@ struct z3fold_pool {
        struct workqueue_struct *compact_wq;
        struct workqueue_struct *release_wq;
        struct work_struct work;
-       struct inode *inode;
 };
 
 /*
@@ -334,54 +328,6 @@ static inline void free_handle(unsigned long handle, struct z3fold_header *zhdr)
        }
 }
 
-static int z3fold_init_fs_context(struct fs_context *fc)
-{
-       return init_pseudo(fc, Z3FOLD_MAGIC) ? 0 : -ENOMEM;
-}
-
-static struct file_system_type z3fold_fs = {
-       .name           = "z3fold",
-       .init_fs_context = z3fold_init_fs_context,
-       .kill_sb        = kill_anon_super,
-};
-
-static struct vfsmount *z3fold_mnt;
-static int __init z3fold_mount(void)
-{
-       int ret = 0;
-
-       z3fold_mnt = kern_mount(&z3fold_fs);
-       if (IS_ERR(z3fold_mnt))
-               ret = PTR_ERR(z3fold_mnt);
-
-       return ret;
-}
-
-static void z3fold_unmount(void)
-{
-       kern_unmount(z3fold_mnt);
-}
-
-static const struct address_space_operations z3fold_aops;
-static int z3fold_register_migration(struct z3fold_pool *pool)
-{
-       pool->inode = alloc_anon_inode(z3fold_mnt->mnt_sb);
-       if (IS_ERR(pool->inode)) {
-               pool->inode = NULL;
-               return 1;
-       }
-
-       pool->inode->i_mapping->private_data = pool;
-       pool->inode->i_mapping->a_ops = &z3fold_aops;
-       return 0;
-}
-
-static void z3fold_unregister_migration(struct z3fold_pool *pool)
-{
-       if (pool->inode)
-               iput(pool->inode);
-}
-
 /* Initializes the z3fold header of a newly allocated z3fold page */
 static struct z3fold_header *init_z3fold_page(struct page *page, bool headless,
                                        struct z3fold_pool *pool, gfp_t gfp)
@@ -1002,14 +948,10 @@ static struct z3fold_pool *z3fold_create_pool(const char *name, gfp_t gfp,
        pool->release_wq = create_singlethread_workqueue(pool->name);
        if (!pool->release_wq)
                goto out_wq;
-       if (z3fold_register_migration(pool))
-               goto out_rwq;
        INIT_WORK(&pool->work, free_pages_work);
        pool->ops = ops;
        return pool;
 
-out_rwq:
-       destroy_workqueue(pool->release_wq);
 out_wq:
        destroy_workqueue(pool->compact_wq);
 out_unbuddied:
@@ -1043,11 +985,12 @@ static void z3fold_destroy_pool(struct z3fold_pool *pool)
 
        destroy_workqueue(pool->compact_wq);
        destroy_workqueue(pool->release_wq);
-       z3fold_unregister_migration(pool);
        free_percpu(pool->unbuddied);
        kfree(pool);
 }
 
+static const struct movable_operations z3fold_mops;
+
 /**
  * z3fold_alloc() - allocates a region of a given size
  * @pool:      z3fold pool from which to allocate
@@ -1117,11 +1060,11 @@ retry:
        }
        if (can_sleep) {
                lock_page(page);
-               __SetPageMovable(page, pool->inode->i_mapping);
+               __SetPageMovable(page, &z3fold_mops);
                unlock_page(page);
        } else {
                WARN_ON(!trylock_page(page));
-               __SetPageMovable(page, pool->inode->i_mapping);
+               __SetPageMovable(page, &z3fold_mops);
                unlock_page(page);
        }
        z3fold_page_lock(zhdr);
@@ -1554,12 +1497,11 @@ out:
        return false;
 }
 
-static int z3fold_page_migrate(struct address_space *mapping, struct page *newpage,
-                              struct page *page, enum migrate_mode mode)
+static int z3fold_page_migrate(struct page *newpage, struct page *page,
+               enum migrate_mode mode)
 {
        struct z3fold_header *zhdr, *new_zhdr;
        struct z3fold_pool *pool;
-       struct address_space *new_mapping;
 
        VM_BUG_ON_PAGE(!PageMovable(page), page);
        VM_BUG_ON_PAGE(!PageIsolated(page), page);
@@ -1592,7 +1534,6 @@ static int z3fold_page_migrate(struct address_space *mapping, struct page *newpa
         * so we only have to reinitialize it.
         */
        INIT_LIST_HEAD(&new_zhdr->buddy);
-       new_mapping = page_mapping(page);
        __ClearPageMovable(page);
 
        get_page(newpage);
@@ -1608,7 +1549,7 @@ static int z3fold_page_migrate(struct address_space *mapping, struct page *newpa
        spin_lock(&pool->lock);
        list_add(&newpage->lru, &pool->lru);
        spin_unlock(&pool->lock);
-       __SetPageMovable(newpage, new_mapping);
+       __SetPageMovable(newpage, &z3fold_mops);
        z3fold_page_unlock(new_zhdr);
 
        queue_work_on(new_zhdr->cpu, pool->compact_wq, &new_zhdr->work);
@@ -1642,9 +1583,9 @@ static void z3fold_page_putback(struct page *page)
        z3fold_page_unlock(zhdr);
 }
 
-static const struct address_space_operations z3fold_aops = {
+static const struct movable_operations z3fold_mops = {
        .isolate_page = z3fold_page_isolate,
-       .migratepage = z3fold_page_migrate,
+       .migrate_page = z3fold_page_migrate,
        .putback_page = z3fold_page_putback,
 };
 
@@ -1746,17 +1687,11 @@ MODULE_ALIAS("zpool-z3fold");
 
 static int __init init_z3fold(void)
 {
-       int ret;
-
        /*
         * Make sure the z3fold header is not larger than the page size and
         * there has remaining spaces for its buddy.
         */
        BUILD_BUG_ON(ZHDR_SIZE_ALIGNED > PAGE_SIZE - CHUNK_SIZE);
-       ret = z3fold_mount();
-       if (ret)
-               return ret;
-
        zpool_register_driver(&z3fold_zpool_driver);
 
        return 0;
@@ -1764,7 +1699,6 @@ static int __init init_z3fold(void)
 
 static void __exit exit_z3fold(void)
 {
-       z3fold_unmount();
        zpool_unregister_driver(&z3fold_zpool_driver);
 }
 
index 5d5fc04..71d6edc 100644 (file)
@@ -41,7 +41,6 @@
 #include <linux/module.h>
 #include <linux/kernel.h>
 #include <linux/sched.h>
-#include <linux/magic.h>
 #include <linux/bitops.h>
 #include <linux/errno.h>
 #include <linux/highmem.h>
@@ -59,8 +58,6 @@
 #include <linux/debugfs.h>
 #include <linux/zsmalloc.h>
 #include <linux/zpool.h>
-#include <linux/mount.h>
-#include <linux/pseudo_fs.h>
 #include <linux/migrate.h>
 #include <linux/wait.h>
 #include <linux/pagemap.h>
@@ -177,10 +174,6 @@ struct zs_size_stat {
 static struct dentry *zs_stat_root;
 #endif
 
-#ifdef CONFIG_COMPACTION
-static struct vfsmount *zsmalloc_mnt;
-#endif
-
 /*
  * We assign a page to ZS_ALMOST_EMPTY fullness group when:
  *     n <= N / f, where
@@ -252,7 +245,6 @@ struct zs_pool {
        struct dentry *stat_dentry;
 #endif
 #ifdef CONFIG_COMPACTION
-       struct inode *inode;
        struct work_struct free_work;
 #endif
        /* protect page/zspage migration */
@@ -271,6 +263,7 @@ struct zspage {
        unsigned int freeobj;
        struct page *first_page;
        struct list_head list; /* fullness list */
+       struct zs_pool *pool;
 #ifdef CONFIG_COMPACTION
        rwlock_t lock;
 #endif
@@ -295,8 +288,6 @@ static bool ZsHugePage(struct zspage *zspage)
 }
 
 #ifdef CONFIG_COMPACTION
-static int zs_register_migration(struct zs_pool *pool);
-static void zs_unregister_migration(struct zs_pool *pool);
 static void migrate_lock_init(struct zspage *zspage);
 static void migrate_read_lock(struct zspage *zspage);
 static void migrate_read_unlock(struct zspage *zspage);
@@ -307,10 +298,6 @@ static void kick_deferred_free(struct zs_pool *pool);
 static void init_deferred_free(struct zs_pool *pool);
 static void SetZsPageMovable(struct zs_pool *pool, struct zspage *zspage);
 #else
-static int zsmalloc_mount(void) { return 0; }
-static void zsmalloc_unmount(void) {}
-static int zs_register_migration(struct zs_pool *pool) { return 0; }
-static void zs_unregister_migration(struct zs_pool *pool) {}
 static void migrate_lock_init(struct zspage *zspage) {}
 static void migrate_read_lock(struct zspage *zspage) {}
 static void migrate_read_unlock(struct zspage *zspage) {}
@@ -1083,6 +1070,7 @@ static struct zspage *alloc_zspage(struct zs_pool *pool,
 
        create_page_chain(class, zspage, pages);
        init_zspage(class, zspage);
+       zspage->pool = pool;
 
        return zspage;
 }
@@ -1754,33 +1742,6 @@ static void lock_zspage(struct zspage *zspage)
        migrate_read_unlock(zspage);
 }
 
-static int zs_init_fs_context(struct fs_context *fc)
-{
-       return init_pseudo(fc, ZSMALLOC_MAGIC) ? 0 : -ENOMEM;
-}
-
-static struct file_system_type zsmalloc_fs = {
-       .name           = "zsmalloc",
-       .init_fs_context = zs_init_fs_context,
-       .kill_sb        = kill_anon_super,
-};
-
-static int zsmalloc_mount(void)
-{
-       int ret = 0;
-
-       zsmalloc_mnt = kern_mount(&zsmalloc_fs);
-       if (IS_ERR(zsmalloc_mnt))
-               ret = PTR_ERR(zsmalloc_mnt);
-
-       return ret;
-}
-
-static void zsmalloc_unmount(void)
-{
-       kern_unmount(zsmalloc_mnt);
-}
-
 static void migrate_lock_init(struct zspage *zspage)
 {
        rwlock_init(&zspage->lock);
@@ -1823,6 +1784,8 @@ static void dec_zspage_isolation(struct zspage *zspage)
        zspage->isolated--;
 }
 
+static const struct movable_operations zsmalloc_mops;
+
 static void replace_sub_page(struct size_class *class, struct zspage *zspage,
                                struct page *newpage, struct page *oldpage)
 {
@@ -1843,7 +1806,7 @@ static void replace_sub_page(struct size_class *class, struct zspage *zspage,
        set_first_obj_offset(newpage, get_first_obj_offset(oldpage));
        if (unlikely(ZsHugePage(zspage)))
                newpage->index = oldpage->index;
-       __SetPageMovable(newpage, page_mapping(oldpage));
+       __SetPageMovable(newpage, &zsmalloc_mops);
 }
 
 static bool zs_page_isolate(struct page *page, isolate_mode_t mode)
@@ -1865,8 +1828,8 @@ static bool zs_page_isolate(struct page *page, isolate_mode_t mode)
        return true;
 }
 
-static int zs_page_migrate(struct address_space *mapping, struct page *newpage,
-               struct page *page, enum migrate_mode mode)
+static int zs_page_migrate(struct page *newpage, struct page *page,
+               enum migrate_mode mode)
 {
        struct zs_pool *pool;
        struct size_class *class;
@@ -1889,14 +1852,15 @@ static int zs_page_migrate(struct address_space *mapping, struct page *newpage,
        VM_BUG_ON_PAGE(!PageMovable(page), page);
        VM_BUG_ON_PAGE(!PageIsolated(page), page);
 
-       pool = mapping->private_data;
+       /* The page is locked, so this pointer must remain valid */
+       zspage = get_zspage(page);
+       pool = zspage->pool;
 
        /*
         * The pool migrate_lock protects the race between zpage migration
         * and zs_free.
         */
        write_lock(&pool->migrate_lock);
-       zspage = get_zspage(page);
        class = zspage_class(pool, zspage);
 
        /*
@@ -1964,31 +1928,12 @@ static void zs_page_putback(struct page *page)
        migrate_write_unlock(zspage);
 }
 
-static const struct address_space_operations zsmalloc_aops = {
+static const struct movable_operations zsmalloc_mops = {
        .isolate_page = zs_page_isolate,
-       .migratepage = zs_page_migrate,
+       .migrate_page = zs_page_migrate,
        .putback_page = zs_page_putback,
 };
 
-static int zs_register_migration(struct zs_pool *pool)
-{
-       pool->inode = alloc_anon_inode(zsmalloc_mnt->mnt_sb);
-       if (IS_ERR(pool->inode)) {
-               pool->inode = NULL;
-               return 1;
-       }
-
-       pool->inode->i_mapping->private_data = pool;
-       pool->inode->i_mapping->a_ops = &zsmalloc_aops;
-       return 0;
-}
-
-static void zs_unregister_migration(struct zs_pool *pool)
-{
-       flush_work(&pool->free_work);
-       iput(pool->inode);
-}
-
 /*
  * Caller should hold page_lock of all pages in the zspage
  * In here, we cannot use zspage meta data.
@@ -2032,6 +1977,11 @@ static void kick_deferred_free(struct zs_pool *pool)
        schedule_work(&pool->free_work);
 }
 
+static void zs_flush_migration(struct zs_pool *pool)
+{
+       flush_work(&pool->free_work);
+}
+
 static void init_deferred_free(struct zs_pool *pool)
 {
        INIT_WORK(&pool->free_work, async_free_zspage);
@@ -2043,10 +1993,12 @@ static void SetZsPageMovable(struct zs_pool *pool, struct zspage *zspage)
 
        do {
                WARN_ON(!trylock_page(page));
-               __SetPageMovable(page, pool->inode->i_mapping);
+               __SetPageMovable(page, &zsmalloc_mops);
                unlock_page(page);
        } while ((page = get_next_page(page)) != NULL);
 }
+#else
+static inline void zs_flush_migration(struct zs_pool *pool) { }
 #endif
 
 /*
@@ -2324,9 +2276,6 @@ struct zs_pool *zs_create_pool(const char *name)
        /* debug only, don't abort if it fails */
        zs_pool_stat_create(pool, name);
 
-       if (zs_register_migration(pool))
-               goto err;
-
        /*
         * Not critical since shrinker is only used to trigger internal
         * defragmentation of the pool which is pretty optional thing.  If
@@ -2348,7 +2297,7 @@ void zs_destroy_pool(struct zs_pool *pool)
        int i;
 
        zs_unregister_shrinker(pool);
-       zs_unregister_migration(pool);
+       zs_flush_migration(pool);
        zs_pool_stat_destroy(pool);
 
        for (i = 0; i < ZS_SIZE_CLASSES; i++) {
@@ -2380,14 +2329,10 @@ static int __init zs_init(void)
 {
        int ret;
 
-       ret = zsmalloc_mount();
-       if (ret)
-               goto out;
-
        ret = cpuhp_setup_state(CPUHP_MM_ZS_PREPARE, "mm/zsmalloc:prepare",
                                zs_cpu_prepare, zs_cpu_dead);
        if (ret)
-               goto hp_setup_fail;
+               goto out;
 
 #ifdef CONFIG_ZPOOL
        zpool_register_driver(&zs_zpool_driver);
@@ -2397,8 +2342,6 @@ static int __init zs_init(void)
 
        return 0;
 
-hp_setup_fail:
-       zsmalloc_unmount();
 out:
        return ret;
 }
@@ -2408,7 +2351,6 @@ static void __exit zs_exit(void)
 #ifdef CONFIG_ZPOOL
        zpool_unregister_driver(&zs_zpool_driver);
 #endif
-       zsmalloc_unmount();
        cpuhp_remove_state(CPUHP_MM_ZS_PREPARE);
 
        zs_stat_exit();