We actually don't bind here, but rather allocate GART space if necessary.
Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
if (!r && p->uf_entry.robj) {
struct amdgpu_bo *uf = p->uf_entry.robj;
- r = amdgpu_ttm_bind(&uf->tbo);
+ r = amdgpu_ttm_alloc_gart(&uf->tbo);
p->job->uf_addr += amdgpu_bo_gpu_offset(uf);
}
return r;
}
- return amdgpu_ttm_bind(&(*bo)->tbo);
+ return amdgpu_ttm_alloc_gart(&(*bo)->tbo);
}
goto error;
}
- r = amdgpu_ttm_bind(&bo->tbo);
+ r = amdgpu_ttm_alloc_gart(&bo->tbo);
if (unlikely(r)) {
dev_err(adev->dev, "%p bind failed\n", bo);
goto error;
return r;
}
-int amdgpu_ttm_bind(struct ttm_buffer_object *bo)
+int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
struct ttm_mem_reg tmp;
}
if (bo->tbo.mem.mem_type == TTM_PL_TT) {
- r = amdgpu_ttm_bind(&bo->tbo);
+ r = amdgpu_ttm_alloc_gart(&bo->tbo);
if (r)
return r;
}
struct dma_fence **fence);
int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma);
-int amdgpu_ttm_bind(struct ttm_buffer_object *bo);
+int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo);
int amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo);
int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages);