{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_ib *ib = &ibs[0];
+ struct dma_fence *tmp;
bool skip_preamble, need_ctx_switch;
unsigned patch_offset = ~0;
struct amdgpu_vm *vm;
dev_err(adev->dev, "scheduling IB failed (%d).\n", r);
return r;
}
- if (ring->funcs->emit_pipeline_sync && job && job->need_pipeline_sync)
+
+ if (ring->funcs->emit_pipeline_sync && job &&
+ (tmp = amdgpu_sync_get_fence(&job->sched_sync))) {
+ job->need_pipeline_sync = true;
amdgpu_ring_emit_pipeline_sync(ring);
+ dma_fence_put(tmp);
+ }
if (vm) {
r = amdgpu_vm_flush(ring, job);
(*job)->need_pipeline_sync = false;
amdgpu_sync_create(&(*job)->sync);
+ amdgpu_sync_create(&(*job)->sched_sync);
return 0;
}
dma_fence_put(job->fence);
amdgpu_sync_free(&job->sync);
+ amdgpu_sync_free(&job->sched_sync);
kfree(job);
}
dma_fence_put(job->fence);
amdgpu_sync_free(&job->sync);
+ amdgpu_sync_free(&job->sched_sync);
kfree(job);
}
struct amdgpu_vm *vm = job->vm;
struct dma_fence *fence = amdgpu_sync_get_fence(&job->sync);
+ int r;
while (fence == NULL && vm && !job->vm_id) {
struct amdgpu_ring *ring = job->ring;
- int r;
r = amdgpu_vm_grab_id(vm, ring, &job->sync,
&job->base.s_fence->finished,
fence = amdgpu_sync_get_fence(&job->sync);
}
- if (amd_sched_dependency_optimized(fence, sched_job->s_entity))
- job->need_pipeline_sync = true;
-
+ if (amd_sched_dependency_optimized(fence, sched_job->s_entity)) {
+ r = amdgpu_sync_fence(job->adev, &job->sched_sync, fence);
+ if (r)
+ DRM_ERROR("Error adding fence to sync (%d)\n", r);
+ }
return fence;
}