fsdax: use a saner calling convention for copy_cow_page_dax
authorChristoph Hellwig <hch@lst.de>
Mon, 29 Nov 2021 10:21:47 +0000 (11:21 +0100)
committerDan Williams <dan.j.williams@intel.com>
Sat, 4 Dec 2021 16:58:52 +0000 (08:58 -0800)
Just pass the vm_fault and iomap_iter structures, and figure out the rest
locally.  Note that this requires moving dax_iomap_sector up in the file.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Dan Williams <dan.j.williams@intel.com>
Reviewed-by: Darrick J. Wong <djwong@kernel.org>
Link: https://lore.kernel.org/r/20211129102203.2243509-14-hch@lst.de
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
fs/dax.c

index 73bd143..e51b412 100644 (file)
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -709,26 +709,31 @@ int dax_invalidate_mapping_entry_sync(struct address_space *mapping,
        return __dax_invalidate_entry(mapping, index, false);
 }
 
-static int copy_cow_page_dax(struct block_device *bdev, struct dax_device *dax_dev,
-                            sector_t sector, struct page *to, unsigned long vaddr)
+static sector_t dax_iomap_sector(const struct iomap *iomap, loff_t pos)
 {
+       return (iomap->addr + (pos & PAGE_MASK) - iomap->offset) >> 9;
+}
+
+static int copy_cow_page_dax(struct vm_fault *vmf, const struct iomap_iter *iter)
+{
+       sector_t sector = dax_iomap_sector(&iter->iomap, iter->pos);
        void *vto, *kaddr;
        pgoff_t pgoff;
        long rc;
        int id;
 
-       rc = bdev_dax_pgoff(bdev, sector, PAGE_SIZE, &pgoff);
+       rc = bdev_dax_pgoff(iter->iomap.bdev, sector, PAGE_SIZE, &pgoff);
        if (rc)
                return rc;
 
        id = dax_read_lock();
-       rc = dax_direct_access(dax_dev, pgoff, 1, &kaddr, NULL);
+       rc = dax_direct_access(iter->iomap.dax_dev, pgoff, 1, &kaddr, NULL);
        if (rc < 0) {
                dax_read_unlock(id);
                return rc;
        }
-       vto = kmap_atomic(to);
-       copy_user_page(vto, kaddr, vaddr, to);
+       vto = kmap_atomic(vmf->cow_page);
+       copy_user_page(vto, kaddr, vmf->address, vmf->cow_page);
        kunmap_atomic(vto);
        dax_read_unlock(id);
        return 0;
@@ -1005,11 +1010,6 @@ int dax_writeback_mapping_range(struct address_space *mapping,
 }
 EXPORT_SYMBOL_GPL(dax_writeback_mapping_range);
 
-static sector_t dax_iomap_sector(const struct iomap *iomap, loff_t pos)
-{
-       return (iomap->addr + (pos & PAGE_MASK) - iomap->offset) >> 9;
-}
-
 static int dax_iomap_pfn(const struct iomap *iomap, loff_t pos, size_t size,
                         pfn_t *pfnp)
 {
@@ -1332,19 +1332,16 @@ static vm_fault_t dax_fault_synchronous_pfnp(pfn_t *pfnp, pfn_t pfn)
 static vm_fault_t dax_fault_cow_page(struct vm_fault *vmf,
                const struct iomap_iter *iter)
 {
-       sector_t sector = dax_iomap_sector(&iter->iomap, iter->pos);
-       unsigned long vaddr = vmf->address;
        vm_fault_t ret;
        int error = 0;
 
        switch (iter->iomap.type) {
        case IOMAP_HOLE:
        case IOMAP_UNWRITTEN:
-               clear_user_highpage(vmf->cow_page, vaddr);
+               clear_user_highpage(vmf->cow_page, vmf->address);
                break;
        case IOMAP_MAPPED:
-               error = copy_cow_page_dax(iter->iomap.bdev, iter->iomap.dax_dev,
-                                         sector, vmf->cow_page, vaddr);
+               error = copy_cow_page_dax(vmf, iter);
                break;
        default:
                WARN_ON_ONCE(1);