extern void vmw_bo_unmap_dma(struct ttm_buffer_object *bo);
extern const struct vmw_sg_table *
vmw_bo_sg_table(struct ttm_buffer_object *bo);
+extern int vmw_bo_create_and_populate(struct vmw_private *dev_priv,
+ unsigned long bo_size,
+ struct ttm_buffer_object **bo_p);
+
extern void vmw_piter_start(struct vmw_piter *viter,
const struct vmw_sg_table *vsgt,
unsigned long p_offs);
unsigned long offset;
unsigned long bo_size;
struct vmw_otable *otables = batch->otables;
- struct ttm_operation_ctx ctx = {
- .interruptible = false,
- .no_wait_gpu = false
- };
SVGAOTableType i;
int ret;
bo_size += otables[i].size;
}
- ret = ttm_bo_create(&dev_priv->bdev, bo_size,
- ttm_bo_type_device,
- &vmw_sys_ne_placement,
- 0, false, &batch->otable_bo);
-
- if (unlikely(ret != 0))
- goto out_no_bo;
-
- ret = ttm_bo_reserve(batch->otable_bo, false, true, NULL);
- BUG_ON(ret != 0);
- ret = vmw_bo_driver.ttm_tt_populate(batch->otable_bo->ttm, &ctx);
- if (unlikely(ret != 0))
- goto out_unreserve;
- ret = vmw_bo_map_dma(batch->otable_bo);
+ ret = vmw_bo_create_and_populate(dev_priv, bo_size, &batch->otable_bo);
if (unlikely(ret != 0))
- goto out_unreserve;
-
- ttm_bo_unreserve(batch->otable_bo);
+ return ret;
offset = 0;
for (i = 0; i < batch->num_otables; ++i) {
return 0;
-out_unreserve:
- ttm_bo_unreserve(batch->otable_bo);
out_no_setup:
for (i = 0; i < batch->num_otables; ++i) {
if (batch->otables[i].enabled)
ttm_bo_put(batch->otable_bo);
batch->otable_bo = NULL;
-out_no_bo:
return ret;
}
static int vmw_mob_pt_populate(struct vmw_private *dev_priv,
struct vmw_mob *mob)
{
- int ret;
- struct ttm_operation_ctx ctx = {
- .interruptible = false,
- .no_wait_gpu = false
- };
-
BUG_ON(mob->pt_bo != NULL);
- ret = ttm_bo_create(&dev_priv->bdev, mob->num_pages * PAGE_SIZE,
- ttm_bo_type_device,
- &vmw_sys_ne_placement,
- 0, false, &mob->pt_bo);
- if (unlikely(ret != 0))
- return ret;
-
- ret = ttm_bo_reserve(mob->pt_bo, false, true, NULL);
-
- BUG_ON(ret != 0);
- ret = vmw_bo_driver.ttm_tt_populate(mob->pt_bo->ttm, &ctx);
- if (unlikely(ret != 0))
- goto out_unreserve;
- ret = vmw_bo_map_dma(mob->pt_bo);
- if (unlikely(ret != 0))
- goto out_unreserve;
-
- ttm_bo_unreserve(mob->pt_bo);
-
- return 0;
-
-out_unreserve:
- ttm_bo_unreserve(mob->pt_bo);
- ttm_bo_put(mob->pt_bo);
- mob->pt_bo = NULL;
-
- return ret;
+ return vmw_bo_create_and_populate(dev_priv, mob->num_pages * PAGE_SIZE, &mob->pt_bo);
}
/**
.swap_notify = vmw_swap_notify,
.io_mem_reserve = &vmw_ttm_io_mem_reserve,
};
+
+int vmw_bo_create_and_populate(struct vmw_private *dev_priv,
+ unsigned long bo_size,
+ struct ttm_buffer_object **bo_p)
+{
+ struct ttm_operation_ctx ctx = {
+ .interruptible = false,
+ .no_wait_gpu = false
+ };
+ struct ttm_buffer_object *bo;
+ int ret;
+
+ ret = ttm_bo_create(&dev_priv->bdev, bo_size,
+ ttm_bo_type_device,
+ &vmw_sys_ne_placement,
+ 0, false, &bo);
+
+ if (unlikely(ret != 0))
+ return ret;
+
+ ret = ttm_bo_reserve(bo, false, true, NULL);
+ BUG_ON(ret != 0);
+ ret = vmw_bo_driver.ttm_tt_populate(bo->ttm, &ctx);
+ if (likely(ret == 0))
+ ret = vmw_bo_map_dma(bo);
+
+ ttm_bo_unreserve(bo);
+
+ if (likely(ret == 0))
+ *bo_p = bo;
+ return ret;
+}