drm/ttm: use an operation ctx for ttm_tt_populate in ttm_bo_driver (v2)
authorRoger He <Hongbo.He@amd.com>
Thu, 21 Dec 2017 09:42:50 +0000 (17:42 +0800)
committerAlex Deucher <alexander.deucher@amd.com>
Thu, 28 Dec 2017 14:48:19 +0000 (09:48 -0500)
forward the operation context to ttm_tt_populate as well,
and the ultimate goal is swapout enablement for reserved BOs.

v2: squash in fix for vboxvideo

Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Roger He <Hongbo.He@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
21 files changed:
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
drivers/gpu/drm/ast/ast_ttm.c
drivers/gpu/drm/cirrus/cirrus_ttm.c
drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c
drivers/gpu/drm/mgag200/mgag200_ttm.c
drivers/gpu/drm/nouveau/nouveau_bo.c
drivers/gpu/drm/qxl/qxl_ttm.c
drivers/gpu/drm/radeon/radeon_ttm.c
drivers/gpu/drm/ttm/ttm_agp_backend.c
drivers/gpu/drm/ttm/ttm_bo_util.c
drivers/gpu/drm/ttm/ttm_bo_vm.c
drivers/gpu/drm/ttm/ttm_page_alloc.c
drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
drivers/gpu/drm/ttm/ttm_tt.c
drivers/gpu/drm/virtio/virtgpu_object.c
drivers/gpu/drm/virtio/virtgpu_ttm.c
drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
drivers/staging/vboxvideo/vbox_ttm.c
include/drm/ttm/ttm_bo_driver.h
include/drm/ttm/ttm_page_alloc.h

index f1b7d98..044f5b5 100644 (file)
@@ -990,7 +990,8 @@ static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_bo_device *bdev,
        return &gtt->ttm.ttm;
 }
 
-static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm)
+static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm,
+                       struct ttm_operation_ctx *ctx)
 {
        struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
        struct amdgpu_ttm_tt *gtt = (void *)ttm;
@@ -1018,11 +1019,11 @@ static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm)
 
 #ifdef CONFIG_SWIOTLB
        if (swiotlb_nr_tbl()) {
-               return ttm_dma_populate(&gtt->ttm, adev->dev);
+               return ttm_dma_populate(&gtt->ttm, adev->dev, ctx);
        }
 #endif
 
-       return ttm_populate_and_map_pages(adev->dev, &gtt->ttm);
+       return ttm_populate_and_map_pages(adev->dev, &gtt->ttm, ctx);
 }
 
 static void amdgpu_ttm_tt_unpopulate(struct ttm_tt *ttm)
index 572d2d2..7b784d9 100644 (file)
@@ -216,9 +216,10 @@ static struct ttm_tt *ast_ttm_tt_create(struct ttm_bo_device *bdev,
        return tt;
 }
 
-static int ast_ttm_tt_populate(struct ttm_tt *ttm)
+static int ast_ttm_tt_populate(struct ttm_tt *ttm,
+                       struct ttm_operation_ctx *ctx)
 {
-       return ttm_pool_populate(ttm);
+       return ttm_pool_populate(ttm, ctx);
 }
 
 static void ast_ttm_tt_unpopulate(struct ttm_tt *ttm)
index 5a08224..a8e31ea 100644 (file)
@@ -216,9 +216,10 @@ static struct ttm_tt *cirrus_ttm_tt_create(struct ttm_bo_device *bdev,
        return tt;
 }
 
-static int cirrus_ttm_tt_populate(struct ttm_tt *ttm)
+static int cirrus_ttm_tt_populate(struct ttm_tt *ttm,
+               struct ttm_operation_ctx *ctx)
 {
-       return ttm_pool_populate(ttm);
+       return ttm_pool_populate(ttm, ctx);
 }
 
 static void cirrus_ttm_tt_unpopulate(struct ttm_tt *ttm)
index ab4ee59..8516e00 100644 (file)
@@ -223,9 +223,10 @@ static struct ttm_tt *hibmc_ttm_tt_create(struct ttm_bo_device *bdev,
        return tt;
 }
 
-static int hibmc_ttm_tt_populate(struct ttm_tt *ttm)
+static int hibmc_ttm_tt_populate(struct ttm_tt *ttm,
+               struct ttm_operation_ctx *ctx)
 {
-       return ttm_pool_populate(ttm);
+       return ttm_pool_populate(ttm, ctx);
 }
 
 static void hibmc_ttm_tt_unpopulate(struct ttm_tt *ttm)
index 2d61869..c97009b 100644 (file)
@@ -216,9 +216,10 @@ static struct ttm_tt *mgag200_ttm_tt_create(struct ttm_bo_device *bdev,
        return tt;
 }
 
-static int mgag200_ttm_tt_populate(struct ttm_tt *ttm)
+static int mgag200_ttm_tt_populate(struct ttm_tt *ttm,
+                       struct ttm_operation_ctx *ctx)
 {
-       return ttm_pool_populate(ttm);
+       return ttm_pool_populate(ttm, ctx);
 }
 
 static void mgag200_ttm_tt_unpopulate(struct ttm_tt *ttm)
index b0084bd..a7df632 100644 (file)
@@ -1547,7 +1547,7 @@ nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
 }
 
 static int
-nouveau_ttm_tt_populate(struct ttm_tt *ttm)
+nouveau_ttm_tt_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
 {
        struct ttm_dma_tt *ttm_dma = (void *)ttm;
        struct nouveau_drm *drm;
@@ -1572,17 +1572,17 @@ nouveau_ttm_tt_populate(struct ttm_tt *ttm)
 
 #if IS_ENABLED(CONFIG_AGP)
        if (drm->agp.bridge) {
-               return ttm_agp_tt_populate(ttm);
+               return ttm_agp_tt_populate(ttm, ctx);
        }
 #endif
 
 #if IS_ENABLED(CONFIG_SWIOTLB) && IS_ENABLED(CONFIG_X86)
        if (swiotlb_nr_tbl()) {
-               return ttm_dma_populate((void *)ttm, dev);
+               return ttm_dma_populate((void *)ttm, dev, ctx);
        }
 #endif
 
-       r = ttm_pool_populate(ttm);
+       r = ttm_pool_populate(ttm, ctx);
        if (r) {
                return r;
        }
index 145175b..59cd74c 100644 (file)
@@ -291,14 +291,15 @@ static struct ttm_backend_func qxl_backend_func = {
        .destroy = &qxl_ttm_backend_destroy,
 };
 
-static int qxl_ttm_tt_populate(struct ttm_tt *ttm)
+static int qxl_ttm_tt_populate(struct ttm_tt *ttm,
+                       struct ttm_operation_ctx *ctx)
 {
        int r;
 
        if (ttm->state != tt_unpopulated)
                return 0;
 
-       r = ttm_pool_populate(ttm);
+       r = ttm_pool_populate(ttm, ctx);
        if (r)
                return r;
 
index 7b915aa..afc5eed 100644 (file)
@@ -721,7 +721,8 @@ static struct radeon_ttm_tt *radeon_ttm_tt_to_gtt(struct ttm_tt *ttm)
        return (struct radeon_ttm_tt *)ttm;
 }
 
-static int radeon_ttm_tt_populate(struct ttm_tt *ttm)
+static int radeon_ttm_tt_populate(struct ttm_tt *ttm,
+                       struct ttm_operation_ctx *ctx)
 {
        struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(ttm);
        struct radeon_device *rdev;
@@ -750,17 +751,17 @@ static int radeon_ttm_tt_populate(struct ttm_tt *ttm)
        rdev = radeon_get_rdev(ttm->bdev);
 #if IS_ENABLED(CONFIG_AGP)
        if (rdev->flags & RADEON_IS_AGP) {
-               return ttm_agp_tt_populate(ttm);
+               return ttm_agp_tt_populate(ttm, ctx);
        }
 #endif
 
 #ifdef CONFIG_SWIOTLB
        if (swiotlb_nr_tbl()) {
-               return ttm_dma_populate(&gtt->ttm, rdev->dev);
+               return ttm_dma_populate(&gtt->ttm, rdev->dev, ctx);
        }
 #endif
 
-       return ttm_populate_and_map_pages(rdev->dev, &gtt->ttm);
+       return ttm_populate_and_map_pages(rdev->dev, &gtt->ttm, ctx);
 }
 
 static void radeon_ttm_tt_unpopulate(struct ttm_tt *ttm)
index 028ab60..3e795a0 100644 (file)
@@ -133,12 +133,12 @@ struct ttm_tt *ttm_agp_tt_create(struct ttm_bo_device *bdev,
 }
 EXPORT_SYMBOL(ttm_agp_tt_create);
 
-int ttm_agp_tt_populate(struct ttm_tt *ttm)
+int ttm_agp_tt_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
 {
        if (ttm->state != tt_unpopulated)
                return 0;
 
-       return ttm_pool_populate(ttm);
+       return ttm_pool_populate(ttm, ctx);
 }
 EXPORT_SYMBOL(ttm_agp_tt_populate);
 
index 6e353df..b7eb507 100644 (file)
@@ -376,7 +376,7 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
         * TTM might be null for moves within the same region.
         */
        if (ttm && ttm->state == tt_unpopulated) {
-               ret = ttm->bdev->driver->ttm_tt_populate(ttm);
+               ret = ttm->bdev->driver->ttm_tt_populate(ttm, ctx);
                if (ret)
                        goto out1;
        }
@@ -545,14 +545,19 @@ static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
                           unsigned long num_pages,
                           struct ttm_bo_kmap_obj *map)
 {
-       struct ttm_mem_reg *mem = &bo->mem; pgprot_t prot;
+       struct ttm_mem_reg *mem = &bo->mem;
+       struct ttm_operation_ctx ctx = {
+               .interruptible = false,
+               .no_wait_gpu = false
+       };
        struct ttm_tt *ttm = bo->ttm;
+       pgprot_t prot;
        int ret;
 
        BUG_ON(!ttm);
 
        if (ttm->state == tt_unpopulated) {
-               ret = ttm->bdev->driver->ttm_tt_populate(ttm);
+               ret = ttm->bdev->driver->ttm_tt_populate(ttm, &ctx);
                if (ret)
                        return ret;
        }
index 292d157..8e68e70 100644 (file)
@@ -226,12 +226,17 @@ static int ttm_bo_vm_fault(struct vm_fault *vmf)
                cvma.vm_page_prot = ttm_io_prot(bo->mem.placement,
                                                cvma.vm_page_prot);
        } else {
+               struct ttm_operation_ctx ctx = {
+                       .interruptible = false,
+                       .no_wait_gpu = false
+               };
+
                ttm = bo->ttm;
                cvma.vm_page_prot = ttm_io_prot(bo->mem.placement,
                                                cvma.vm_page_prot);
 
                /* Allocate all page at once, most common usage */
-               if (ttm->bdev->driver->ttm_tt_populate(ttm)) {
+               if (ttm->bdev->driver->ttm_tt_populate(ttm, &ctx)) {
                        retval = VM_FAULT_OOM;
                        goto out_io_unlock;
                }
index 8f93ff3..f1a3d55 100644 (file)
@@ -1058,13 +1058,9 @@ void ttm_page_alloc_fini(void)
        _manager = NULL;
 }
 
-int ttm_pool_populate(struct ttm_tt *ttm)
+int ttm_pool_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
 {
        struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
-       struct ttm_operation_ctx ctx = {
-               .interruptible = false,
-               .no_wait_gpu = false
-       };
        unsigned i;
        int ret;
 
@@ -1080,7 +1076,7 @@ int ttm_pool_populate(struct ttm_tt *ttm)
 
        for (i = 0; i < ttm->num_pages; ++i) {
                ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
-                                               PAGE_SIZE, &ctx);
+                                               PAGE_SIZE, ctx);
                if (unlikely(ret != 0)) {
                        ttm_pool_unpopulate(ttm);
                        return -ENOMEM;
@@ -1117,12 +1113,13 @@ void ttm_pool_unpopulate(struct ttm_tt *ttm)
 }
 EXPORT_SYMBOL(ttm_pool_unpopulate);
 
-int ttm_populate_and_map_pages(struct device *dev, struct ttm_dma_tt *tt)
+int ttm_populate_and_map_pages(struct device *dev, struct ttm_dma_tt *tt,
+                                       struct ttm_operation_ctx *ctx)
 {
        unsigned i, j;
        int r;
 
-       r = ttm_pool_populate(&tt->ttm);
+       r = ttm_pool_populate(&tt->ttm, ctx);
        if (r)
                return r;
 
index 8aac86a..3ac5391 100644 (file)
@@ -923,14 +923,11 @@ static gfp_t ttm_dma_pool_gfp_flags(struct ttm_dma_tt *ttm_dma, bool huge)
  * On success pages list will hold count number of correctly
  * cached pages. On failure will hold the negative return value (-ENOMEM, etc).
  */
-int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev)
+int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev,
+                       struct ttm_operation_ctx *ctx)
 {
        struct ttm_tt *ttm = &ttm_dma->ttm;
        struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
-       struct ttm_operation_ctx ctx = {
-               .interruptible = false,
-               .no_wait_gpu = false
-       };
        unsigned long num_pages = ttm->num_pages;
        struct dma_pool *pool;
        enum pool_type type;
@@ -966,7 +963,7 @@ int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev)
                        break;
 
                ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
-                                               pool->size, &ctx);
+                                               pool->size, ctx);
                if (unlikely(ret != 0)) {
                        ttm_dma_unpopulate(ttm_dma, dev);
                        return -ENOMEM;
@@ -1002,7 +999,7 @@ skip_huge:
                }
 
                ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
-                                               pool->size, &ctx);
+                                               pool->size, ctx);
                if (unlikely(ret != 0)) {
                        ttm_dma_unpopulate(ttm_dma, dev);
                        return -ENOMEM;
index 8ebc8d3..b48d7a0 100644 (file)
@@ -263,6 +263,10 @@ void ttm_tt_unbind(struct ttm_tt *ttm)
 
 int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
 {
+       struct ttm_operation_ctx ctx = {
+               .interruptible = false,
+               .no_wait_gpu = false
+       };
        int ret = 0;
 
        if (!ttm)
@@ -271,7 +275,7 @@ int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
        if (ttm->state == tt_bound)
                return 0;
 
-       ret = ttm->bdev->driver->ttm_tt_populate(ttm);
+       ret = ttm->bdev->driver->ttm_tt_populate(ttm, &ctx);
        if (ret)
                return ret;
 
index 6f66b73..0b90cdb 100644 (file)
@@ -124,13 +124,17 @@ int virtio_gpu_object_get_sg_table(struct virtio_gpu_device *qdev,
        int ret;
        struct page **pages = bo->tbo.ttm->pages;
        int nr_pages = bo->tbo.num_pages;
+       struct ttm_operation_ctx ctx = {
+               .interruptible = false,
+               .no_wait_gpu = false
+       };
 
        /* wtf swapping */
        if (bo->pages)
                return 0;
 
        if (bo->tbo.ttm->state == tt_unpopulated)
-               bo->tbo.ttm->bdev->driver->ttm_tt_populate(bo->tbo.ttm);
+               bo->tbo.ttm->bdev->driver->ttm_tt_populate(bo->tbo.ttm, &ctx);
        bo->pages = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
        if (!bo->pages)
                goto out;
index 43483e9..36655b7 100644 (file)
@@ -324,12 +324,13 @@ static struct ttm_backend_func virtio_gpu_backend_func = {
        .destroy = &virtio_gpu_ttm_backend_destroy,
 };
 
-static int virtio_gpu_ttm_tt_populate(struct ttm_tt *ttm)
+static int virtio_gpu_ttm_tt_populate(struct ttm_tt *ttm,
+               struct ttm_operation_ctx *ctx)
 {
        if (ttm->state != tt_unpopulated)
                return 0;
 
-       return ttm_pool_populate(ttm);
+       return ttm_pool_populate(ttm, ctx);
 }
 
 static void virtio_gpu_ttm_tt_unpopulate(struct ttm_tt *ttm)
index cb386eb..22231bc 100644 (file)
@@ -635,16 +635,12 @@ static void vmw_ttm_destroy(struct ttm_tt *ttm)
 }
 
 
-static int vmw_ttm_populate(struct ttm_tt *ttm)
+static int vmw_ttm_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
 {
        struct vmw_ttm_tt *vmw_tt =
                container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
        struct vmw_private *dev_priv = vmw_tt->dev_priv;
        struct ttm_mem_global *glob = vmw_mem_glob(dev_priv);
-       struct ttm_operation_ctx ctx = {
-               .interruptible = true,
-               .no_wait_gpu = false
-       };
        int ret;
 
        if (ttm->state != tt_unpopulated)
@@ -653,15 +649,16 @@ static int vmw_ttm_populate(struct ttm_tt *ttm)
        if (dev_priv->map_mode == vmw_dma_alloc_coherent) {
                size_t size =
                        ttm_round_pot(ttm->num_pages * sizeof(dma_addr_t));
-               ret = ttm_mem_global_alloc(glob, size, &ctx);
+               ret = ttm_mem_global_alloc(glob, size, ctx);
                if (unlikely(ret != 0))
                        return ret;
 
-               ret = ttm_dma_populate(&vmw_tt->dma_ttm, dev_priv->dev->dev);
+               ret = ttm_dma_populate(&vmw_tt->dma_ttm, dev_priv->dev->dev,
+                                       ctx);
                if (unlikely(ret != 0))
                        ttm_mem_global_free(glob, size);
        } else
-               ret = ttm_pool_populate(ttm);
+               ret = ttm_pool_populate(ttm, ctx);
 
        return ret;
 }
index b17f08f..736ca47 100644 (file)
@@ -240,6 +240,10 @@ static int vmw_otable_batch_setup(struct vmw_private *dev_priv,
        unsigned long offset;
        unsigned long bo_size;
        struct vmw_otable *otables = batch->otables;
+       struct ttm_operation_ctx ctx = {
+               .interruptible = false,
+               .no_wait_gpu = false
+       };
        SVGAOTableType i;
        int ret;
 
@@ -264,7 +268,7 @@ static int vmw_otable_batch_setup(struct vmw_private *dev_priv,
 
        ret = ttm_bo_reserve(batch->otable_bo, false, true, NULL);
        BUG_ON(ret != 0);
-       ret = vmw_bo_driver.ttm_tt_populate(batch->otable_bo->ttm);
+       ret = vmw_bo_driver.ttm_tt_populate(batch->otable_bo->ttm, &ctx);
        if (unlikely(ret != 0))
                goto out_unreserve;
        ret = vmw_bo_map_dma(batch->otable_bo);
@@ -430,6 +434,11 @@ static int vmw_mob_pt_populate(struct vmw_private *dev_priv,
                               struct vmw_mob *mob)
 {
        int ret;
+       struct ttm_operation_ctx ctx = {
+               .interruptible = false,
+               .no_wait_gpu = false
+       };
+
        BUG_ON(mob->pt_bo != NULL);
 
        ret = ttm_bo_create(&dev_priv->bdev, mob->num_pages * PAGE_SIZE,
@@ -442,7 +451,7 @@ static int vmw_mob_pt_populate(struct vmw_private *dev_priv,
        ret = ttm_bo_reserve(mob->pt_bo, false, true, NULL);
 
        BUG_ON(ret != 0);
-       ret = vmw_bo_driver.ttm_tt_populate(mob->pt_bo->ttm);
+       ret = vmw_bo_driver.ttm_tt_populate(mob->pt_bo->ttm, &ctx);
        if (unlikely(ret != 0))
                goto out_unreserve;
        ret = vmw_bo_map_dma(mob->pt_bo);
index 231c89e..55f14c9 100644 (file)
@@ -213,9 +213,10 @@ static struct ttm_tt *vbox_ttm_tt_create(struct ttm_bo_device *bdev,
        return tt;
 }
 
-static int vbox_ttm_tt_populate(struct ttm_tt *ttm)
+static int vbox_ttm_tt_populate(struct ttm_tt *ttm,
+                               struct ttm_operation_ctx *ctx)
 {
-       return ttm_pool_populate(ttm);
+       return ttm_pool_populate(ttm, ctx);
 }
 
 static void vbox_ttm_tt_unpopulate(struct ttm_tt *ttm)
index 934fecf..84860ec 100644 (file)
@@ -352,7 +352,8 @@ struct ttm_bo_driver {
         * Returns:
         * -ENOMEM: Out of memory.
         */
-       int (*ttm_tt_populate)(struct ttm_tt *ttm);
+       int (*ttm_tt_populate)(struct ttm_tt *ttm,
+                       struct ttm_operation_ctx *ctx);
 
        /**
         * ttm_tt_unpopulate
@@ -1077,7 +1078,7 @@ struct ttm_tt *ttm_agp_tt_create(struct ttm_bo_device *bdev,
                                 struct agp_bridge_data *bridge,
                                 unsigned long size, uint32_t page_flags,
                                 struct page *dummy_read_page);
-int ttm_agp_tt_populate(struct ttm_tt *ttm);
+int ttm_agp_tt_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx);
 void ttm_agp_tt_unpopulate(struct ttm_tt *ttm);
 #endif
 
index 5938113..4d9b019 100644 (file)
@@ -47,7 +47,7 @@ void ttm_page_alloc_fini(void);
  *
  * Add backing pages to all of @ttm
  */
-int ttm_pool_populate(struct ttm_tt *ttm);
+int ttm_pool_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx);
 
 /**
  * ttm_pool_unpopulate:
@@ -61,7 +61,8 @@ void ttm_pool_unpopulate(struct ttm_tt *ttm);
 /**
  * Populates and DMA maps pages to fullfil a ttm_dma_populate() request
  */
-int ttm_populate_and_map_pages(struct device *dev, struct ttm_dma_tt *tt);
+int ttm_populate_and_map_pages(struct device *dev, struct ttm_dma_tt *tt,
+                               struct ttm_operation_ctx *ctx);
 
 /**
  * Unpopulates and DMA unmaps pages as part of a
@@ -89,7 +90,8 @@ void ttm_dma_page_alloc_fini(void);
  */
 int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data);
 
-int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev);
+int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev,
+                       struct ttm_operation_ctx *ctx);
 void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev);
 
 #else
@@ -106,7 +108,8 @@ static inline int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data)
        return 0;
 }
 static inline int ttm_dma_populate(struct ttm_dma_tt *ttm_dma,
-                                  struct device *dev)
+                               struct device *dev,
+                               struct ttm_operation_ctx *ctx)
 {
        return -ENOMEM;
 }