}
/**
+ * amdgpu_dma_buf_pin - &dma_buf_ops.pin implementation
+ *
+ * @attach: attachment to pin down
+ *
+ * Pin the BO which is backing the DMA-buf so that it can't move any more.
+ */
+static int amdgpu_dma_buf_pin(struct dma_buf_attachment *attach)
+{
+ struct drm_gem_object *obj = attach->dmabuf->priv;
+ struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
+
+ /* pin buffer into GTT */
+ return amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
+}
+
+/**
+ * amdgpu_dma_buf_unpin - &dma_buf_ops.unpin implementation
+ *
+ * @attach: attachment to unpin
+ *
+ * Unpin a previously pinned BO to make it movable again.
+ */
+static void amdgpu_dma_buf_unpin(struct dma_buf_attachment *attach)
+{
+ struct drm_gem_object *obj = attach->dmabuf->priv;
+ struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
+
+ amdgpu_bo_unpin(bo);
+}
+
+/**
* amdgpu_dma_buf_map - &dma_buf_ops.map_dma_buf implementation
* @attach: DMA-buf attachment
* @dir: DMA direction
struct sg_table *sgt;
long r;
- r = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
- if (r)
- return ERR_PTR(r);
+ if (!bo->pin_count) {
+ /* move buffer into GTT */
+ struct ttm_operation_ctx ctx = { false, false };
+
+ amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
+ r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
+ if (r)
+ return ERR_PTR(r);
+
+ } else if (!(amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type) &
+ AMDGPU_GEM_DOMAIN_GTT)) {
+ return ERR_PTR(-EBUSY);
+ }
sgt = drm_prime_pages_to_sg(bo->tbo.ttm->pages, bo->tbo.num_pages);
if (IS_ERR(sgt))
struct sg_table *sgt,
enum dma_data_direction dir)
{
- struct drm_gem_object *obj = attach->dmabuf->priv;
- struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
-
dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir);
sg_free_table(sgt);
kfree(sgt);
- amdgpu_bo_unpin(bo);
}
/**
.dynamic_mapping = true,
.attach = amdgpu_dma_buf_attach,
.detach = amdgpu_dma_buf_detach,
+ .pin = amdgpu_dma_buf_pin,
+ .unpin = amdgpu_dma_buf_unpin,
.map_dma_buf = amdgpu_dma_buf_map,
.unmap_dma_buf = amdgpu_dma_buf_unmap,
.release = drm_gem_dmabuf_release,