uint64_t userptr;
struct task_struct *usertask;
uint32_t userflags;
+ bool bound;
#if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR)
struct hmm_range *range;
#endif
uint64_t flags;
int r = 0;
+ if (!bo_mem)
+ return -EINVAL;
+
+ if (gtt->bound)
+ return 0;
+
if (gtt->userptr) {
r = amdgpu_ttm_tt_pin_userptr(bdev, ttm);
if (r) {
if (r)
DRM_ERROR("failed to bind %lu pages at 0x%08llX\n",
ttm->num_pages, gtt->offset);
+ gtt->bound = true;
return r;
}
struct amdgpu_ttm_tt *gtt = (void *)ttm;
int r;
+ if (!gtt->bound)
+ return;
+
/* if the pages have userptr pinning then clear that first */
if (gtt->userptr)
amdgpu_ttm_tt_unpin_userptr(bdev, ttm);
if (r)
DRM_ERROR("failed to unbind %lu pages at 0x%08llX\n",
gtt->ttm.ttm.num_pages, gtt->offset);
+ gtt->bound = false;
}
static void amdgpu_ttm_backend_destroy(struct ttm_bo_device *bdev,
{
#if IS_ENABLED(CONFIG_AGP)
struct nouveau_drm *drm = nouveau_bdev(bdev);
-
+#endif
+ if (!reg)
+ return -EINVAL;
+#if IS_ENABLED(CONFIG_AGP)
if (drm->agp.bridge)
return ttm_agp_bind(ttm, reg);
#endif
struct nouveau_mem *mem = nouveau_mem(reg);
int ret;
+ if (nvbe->mem)
+ return 0;
+
ret = nouveau_mem_host(reg, &nvbe->ttm);
if (ret)
return ret;
nouveau_sgdma_unbind(struct ttm_bo_device *bdev, struct ttm_tt *ttm)
{
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
- nouveau_mem_fini(nvbe->mem);
+ if (nvbe->mem) {
+ nouveau_mem_fini(nvbe->mem);
+ nvbe->mem = NULL;
+ }
}
struct ttm_tt *
uint32_t flags);
extern bool radeon_ttm_tt_has_userptr(struct radeon_device *rdev, struct ttm_tt *ttm);
extern bool radeon_ttm_tt_is_readonly(struct radeon_device *rdev, struct ttm_tt *ttm);
+bool radeon_ttm_tt_is_bound(struct ttm_bo_device *bdev, struct ttm_tt *ttm);
extern void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base);
extern void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc);
extern int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon);
struct ttm_operation_ctx ctx = { false, false };
long r;
- if (!bo->tbo.ttm || !ttm_bo_tt_is_bound(&bo->tbo))
+ if (!bo->tbo.ttm || !radeon_ttm_tt_is_bound(bo->tbo.bdev, bo->tbo.ttm))
return true;
if (!mmu_notifier_range_blockable(range))
uint64_t userptr;
struct mm_struct *usermm;
uint32_t userflags;
+ bool bound;
};
/* prepare the sg table with the user pages */
sg_free_table(ttm->sg);
}
+static bool radeon_ttm_backend_is_bound(struct ttm_tt *ttm)
+{
+ struct radeon_ttm_tt *gtt = (void*)ttm;
+
+ return (gtt->bound);
+}
+
static int radeon_ttm_backend_bind(struct ttm_bo_device *bdev,
struct ttm_tt *ttm,
struct ttm_resource *bo_mem)
RADEON_GART_PAGE_WRITE;
int r;
+ if (gtt->bound)
+ return 0;
+
if (gtt->userptr) {
radeon_ttm_tt_pin_userptr(bdev, ttm);
flags &= ~RADEON_GART_PAGE_WRITE;
ttm->num_pages, (unsigned)gtt->offset);
return r;
}
+ gtt->bound = true;
return 0;
}
struct radeon_ttm_tt *gtt = (void *)ttm;
struct radeon_device *rdev = radeon_get_rdev(bdev);
+ if (!gtt->bound)
+ return;
+
radeon_gart_unbind(rdev, gtt->offset, ttm->num_pages);
if (gtt->userptr)
radeon_ttm_tt_unpin_userptr(bdev, ttm);
+ gtt->bound = false;
}
static void radeon_ttm_backend_destroy(struct ttm_bo_device *bdev, struct ttm_tt *ttm)
return 0;
}
+bool radeon_ttm_tt_is_bound(struct ttm_bo_device *bdev,
+ struct ttm_tt *ttm)
+{
+#if IS_ENABLED(CONFIG_AGP)
+ struct radeon_device *rdev = radeon_get_rdev(bdev);
+ if (rdev->flags & RADEON_IS_AGP)
+ return ttm_agp_is_bound(ttm);
+#endif
+ return radeon_ttm_backend_is_bound(ttm);
+}
+
static int radeon_ttm_tt_bind(struct ttm_bo_device *bdev,
struct ttm_tt *ttm,
struct ttm_resource *bo_mem)
{
+#if IS_ENABLED(CONFIG_AGP)
struct radeon_device *rdev = radeon_get_rdev(bdev);
+#endif
+ if (!bo_mem)
+ return -EINVAL;
#if IS_ENABLED(CONFIG_AGP)
if (rdev->flags & RADEON_IS_AGP)
return ttm_agp_bind(ttm, bo_mem);
int ret, cached = (bo_mem->placement & TTM_PL_FLAG_CACHED);
unsigned i;
+ if (agp_be->mem)
+ return 0;
+
mem = agp_allocate_memory(agp_be->bridge, ttm->num_pages, AGP_USER_MEMORY);
if (unlikely(mem == NULL))
return -ENOMEM;
}
EXPORT_SYMBOL(ttm_agp_unbind);
+bool ttm_agp_is_bound(struct ttm_tt *ttm)
+{
+ struct ttm_agp_backend *agp_be = container_of(ttm, struct ttm_agp_backend, ttm);
+
+ if (!ttm)
+ return false;
+
+ return (agp_be->mem != NULL);
+}
+EXPORT_SYMBOL(ttm_agp_is_bound);
+
void ttm_agp_destroy(struct ttm_tt *ttm)
{
struct ttm_agp_backend *agp_be = container_of(ttm, struct ttm_agp_backend, ttm);
int ttm_bo_tt_bind(struct ttm_buffer_object *bo, struct ttm_resource *mem)
{
- int ret;
-
- if (!bo->ttm)
- return -EINVAL;
-
- if (ttm_bo_tt_is_bound(bo))
- return 0;
-
- ret = bo->bdev->driver->ttm_tt_bind(bo->bdev, bo->ttm, mem);
- if (unlikely(ret != 0))
- return ret;
-
- ttm_bo_tt_set_bound(bo);
- return 0;
+ return bo->bdev->driver->ttm_tt_bind(bo->bdev, bo->ttm, mem);
}
EXPORT_SYMBOL(ttm_bo_tt_bind);
void ttm_bo_tt_unbind(struct ttm_buffer_object *bo)
{
- if (ttm_bo_tt_is_bound(bo)) {
- bo->bdev->driver->ttm_tt_unbind(bo->bdev, bo->ttm);
- ttm_bo_tt_set_unbound(bo);
- }
+ bo->bdev->driver->ttm_tt_unbind(bo->bdev, bo->ttm);
}
if (man->use_tt) {
ghost_obj->ttm = NULL;
- ttm_bo_tt_set_unbound(ghost_obj);
} else {
bo->ttm = NULL;
- ttm_bo_tt_set_unbound(bo);
}
dma_resv_unlock(&ghost_obj->base._resv);
if (to->use_tt) {
ghost_obj->ttm = NULL;
- ttm_bo_tt_set_unbound(ghost_obj);
} else {
bo->ttm = NULL;
- ttm_bo_tt_set_unbound(bo);
}
dma_resv_unlock(&ghost_obj->base._resv);
memset(&bo->mem, 0, sizeof(bo->mem));
bo->mem.mem_type = TTM_PL_SYSTEM;
bo->ttm = NULL;
- ttm_bo_tt_set_unbound(bo);
dma_resv_unlock(&ghost->base._resv);
ttm_bo_put(ghost);
struct vmw_sg_table vsgt;
uint64_t sg_alloc_size;
bool mapped;
+ bool bound;
};
const size_t vmw_tt_size = sizeof(struct vmw_ttm_tt);
{
struct vmw_ttm_tt *vmw_be =
container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
- int ret;
+ int ret = 0;
+
+ if (!bo_mem)
+ return -EINVAL;
+
+ if (vmw_be->bound)
+ return 0;
ret = vmw_ttm_map_dma(vmw_be);
if (unlikely(ret != 0))
switch (bo_mem->mem_type) {
case VMW_PL_GMR:
- return vmw_gmr_bind(vmw_be->dev_priv, &vmw_be->vsgt,
+ ret = vmw_gmr_bind(vmw_be->dev_priv, &vmw_be->vsgt,
ttm->num_pages, vmw_be->gmr_id);
+ break;
case VMW_PL_MOB:
if (unlikely(vmw_be->mob == NULL)) {
vmw_be->mob =
return -ENOMEM;
}
- return vmw_mob_bind(vmw_be->dev_priv, vmw_be->mob,
+ ret = vmw_mob_bind(vmw_be->dev_priv, vmw_be->mob,
&vmw_be->vsgt, ttm->num_pages,
vmw_be->gmr_id);
+ break;
default:
BUG();
}
- return 0;
+ vmw_be->bound = true;
+ return ret;
}
static void vmw_ttm_unbind(struct ttm_bo_device *bdev,
struct vmw_ttm_tt *vmw_be =
container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
+ if (!vmw_be->bound)
+ return;
+
switch (vmw_be->mem_type) {
case VMW_PL_GMR:
vmw_gmr_unbind(vmw_be->dev_priv, vmw_be->gmr_id);
if (vmw_be->dev_priv->map_mode == vmw_dma_map_bind)
vmw_ttm_unmap_dma(vmw_be);
+ vmw_be->bound = false;
}
struct ttm_resource mem;
struct file *persistent_swap_storage;
struct ttm_tt *ttm;
- bool ttm_bound;
bool evicted;
bool deleted;
*/
void ttm_bo_tt_unbind(struct ttm_buffer_object *bo);
-static inline bool ttm_bo_tt_is_bound(struct ttm_buffer_object *bo)
-{
- return bo->ttm_bound;
-}
-
-static inline void ttm_bo_tt_set_unbound(struct ttm_buffer_object *bo)
-{
- bo->ttm_bound = false;
-}
-
-static inline void ttm_bo_tt_set_bound(struct ttm_buffer_object *bo)
-{
- bo->ttm_bound = true;
-}
/**
* ttm_bo_tt_destroy.
*/
int ttm_agp_bind(struct ttm_tt *ttm, struct ttm_resource *bo_mem);
void ttm_agp_unbind(struct ttm_tt *ttm);
void ttm_agp_destroy(struct ttm_tt *ttm);
+bool ttm_agp_is_bound(struct ttm_tt *ttm);
#endif
#endif