start_jiffies = jiffies;
for (i = 0; i < n; i++) {
struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
- r = amdgpu_copy_buffer(ring, saddr, daddr, size, NULL, &fence);
+ r = amdgpu_copy_buffer(ring, saddr, daddr, size, NULL, &fence,
+ false);
if (r)
goto exit_do_move;
r = fence_wait(fence, false);
amdgpu_bo_kunmap(gtt_obj[i]);
r = amdgpu_copy_buffer(ring, gtt_addr, vram_addr,
- size, NULL, &fence);
+ size, NULL, &fence, false);
if (r) {
DRM_ERROR("Failed GTT->VRAM copy %d\n", i);
amdgpu_bo_kunmap(vram_obj);
r = amdgpu_copy_buffer(ring, vram_addr, gtt_addr,
- size, NULL, &fence);
+ size, NULL, &fence, false);
if (r) {
DRM_ERROR("Failed VRAM->GTT copy %d\n", i);
r = amdgpu_copy_buffer(ring, old_start, new_start,
new_mem->num_pages * PAGE_SIZE, /* bytes */
- bo->resv, &fence);
+ bo->resv, &fence, false);
if (r)
return r;
uint64_t dst_offset,
uint32_t byte_count,
struct reservation_object *resv,
- struct fence **fence)
+ struct fence **fence, bool direct_submit)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_job *job;
amdgpu_ring_pad_ib(ring, &job->ibs[0]);
WARN_ON(job->ibs[0].length_dw > num_dw);
- r = amdgpu_job_submit(job, ring, &adev->mman.entity,
- AMDGPU_FENCE_OWNER_UNDEFINED, fence);
- if (r)
- goto error_free;
+ if (direct_submit) {
+ r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs,
+ NULL, NULL, fence);
+ job->fence = fence_get(*fence);
+ if (r)
+ DRM_ERROR("Error scheduling IBs (%d)\n", r);
+ amdgpu_job_free(job);
+ } else {
+ r = amdgpu_job_submit(job, ring, &adev->mman.entity,
+ AMDGPU_FENCE_OWNER_UNDEFINED, fence);
+ if (r)
+ goto error_free;
+ }
- return 0;
+ return r;
error_free:
amdgpu_job_free(job);
uint64_t dst_offset,
uint32_t byte_count,
struct reservation_object *resv,
- struct fence **fence);
+ struct fence **fence, bool direct_submit);
int amdgpu_fill_buffer(struct amdgpu_bo *bo,
uint32_t src_data,
struct reservation_object *resv,