if (ret)
return ret;
- ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, NULL, evict,
+ ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, evict,
no_wait_reserve, no_wait_gpu, new_mem);
nouveau_fence_unref(&fence);
return ret;
new_mem->num_pages * (PAGE_SIZE / RADEON_GPU_PAGE_SIZE), /* GPU pages */
&fence);
/* FIXME: handle copy error */
- r = ttm_bo_move_accel_cleanup(bo, (void *)fence, NULL,
+ r = ttm_bo_move_accel_cleanup(bo, (void *)fence,
evict, no_wait_reserve, no_wait_gpu, new_mem);
radeon_fence_unref(&fence);
return r;
int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
void *sync_obj,
- void *sync_obj_arg,
bool evict, bool no_wait_reserve,
bool no_wait_gpu,
struct ttm_mem_reg *new_mem)
*
* @bo: A pointer to a struct ttm_buffer_object.
* @sync_obj: A sync object that signals when moving is complete.
- * @sync_obj_arg: An argument to pass to the sync object idle / wait
- * functions.
* @evict: This is an evict move. Don't return until the buffer is idle.
* @no_wait_reserve: Return immediately if other buffers are busy.
* @no_wait_gpu: Return immediately if the GPU is busy.
extern int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
void *sync_obj,
- void *sync_obj_arg,
bool evict, bool no_wait_reserve,
bool no_wait_gpu,
struct ttm_mem_reg *new_mem);