amdgpu_ring_priority_get(job->ring, job->base.s_priority);
trace_amdgpu_cs_ioctl(job);
- amd_sched_entity_push_job(&job->base);
+ amd_sched_entity_push_job(&job->base, entity);
ttm_eu_fence_buffer_objects(&p->ticket, &p->validated, p->fence);
amdgpu_mn_unlock(p->mn);
*f = dma_fence_get(&job->base.s_fence->finished);
amdgpu_job_free_resources(job);
amdgpu_ring_priority_get(job->ring, job->base.s_priority);
- amd_sched_entity_push_job(&job->base);
+ amd_sched_entity_push_job(&job->base, entity);
return 0;
}
-static struct dma_fence *amdgpu_job_dependency(struct amd_sched_job *sched_job)
+static struct dma_fence *amdgpu_job_dependency(struct amd_sched_job *sched_job,
+ struct amd_sched_entity *s_entity)
{
struct amdgpu_job *job = to_amdgpu_job(sched_job);
struct amdgpu_vm *vm = job->vm;
struct dma_fence *fence = amdgpu_sync_get_fence(&job->dep_sync);
int r;
- if (amd_sched_dependency_optimized(fence, sched_job->s_entity)) {
+ if (amd_sched_dependency_optimized(fence, s_entity)) {
r = amdgpu_sync_fence(job->adev, &job->sched_sync, fence);
if (r)
DRM_ERROR("Error adding fence to sync (%d)\n", r);
#define TRACE_INCLUDE_FILE gpu_sched_trace
TRACE_EVENT(amd_sched_job,
- TP_PROTO(struct amd_sched_job *sched_job),
- TP_ARGS(sched_job),
+ TP_PROTO(struct amd_sched_job *sched_job, struct amd_sched_entity *entity),
+ TP_ARGS(sched_job, entity),
TP_STRUCT__entry(
__field(struct amd_sched_entity *, entity)
__field(struct dma_fence *, fence)
),
TP_fast_assign(
- __entry->entity = sched_job->s_entity;
+ __entry->entity = entity;
__entry->id = sched_job->id;
__entry->fence = &sched_job->s_fence->finished;
__entry->name = sched_job->sched->name;
- __entry->job_count = spsc_queue_count(
- &sched_job->s_entity->job_queue);
+ __entry->job_count = spsc_queue_count(&entity->job_queue);
__entry->hw_job_count = atomic_read(
&sched_job->sched->hw_rq_count);
),
if (!sched_job)
return NULL;
- while ((entity->dependency = sched->ops->dependency(sched_job)))
+ while ((entity->dependency = sched->ops->dependency(sched_job, entity)))
if (amd_sched_entity_add_dependency_cb(entity))
return NULL;
- sched_job->s_entity = NULL;
spsc_queue_pop(&entity->job_queue);
return sched_job;
}
*
* Returns 0 for success, negative error code otherwise.
*/
-void amd_sched_entity_push_job(struct amd_sched_job *sched_job)
+void amd_sched_entity_push_job(struct amd_sched_job *sched_job,
+ struct amd_sched_entity *entity)
{
struct amd_gpu_scheduler *sched = sched_job->sched;
- struct amd_sched_entity *entity = sched_job->s_entity;
bool first = false;
- trace_amd_sched_job(sched_job);
+ trace_amd_sched_job(sched_job, entity);
spin_lock(&entity->queue_lock);
first = spsc_queue_push(&entity->job_queue, &sched_job->queue_node);
job->sched->ops->timedout_job(job);
}
-static void amd_sched_set_guilty(struct amd_sched_job *s_job)
+static void amd_sched_set_guilty(struct amd_sched_job *s_job,
+ struct amd_sched_entity *s_entity)
{
if (atomic_inc_return(&s_job->karma) > s_job->sched->hang_limit)
- if (s_job->s_entity->guilty)
- atomic_set(s_job->s_entity->guilty, 1);
+ if (s_entity->guilty)
+ atomic_set(s_entity->guilty, 1);
}
void amd_sched_hw_job_reset(struct amd_gpu_scheduler *sched, struct amd_sched_job *bad)
list_for_each_entry_safe(entity, tmp, &rq->entities, list) {
if (bad->s_fence->scheduled.context == entity->fence_context) {
found = true;
- amd_sched_set_guilty(bad);
+ amd_sched_set_guilty(bad, entity);
break;
}
}
void *owner)
{
job->sched = sched;
- job->s_entity = entity;
job->s_priority = entity->rq - sched->sched_rq;
job->s_fence = amd_sched_fence_create(entity, owner);
if (!job->s_fence)
struct amd_sched_job {
struct spsc_node queue_node;
struct amd_gpu_scheduler *sched;
- struct amd_sched_entity *s_entity;
struct amd_sched_fence *s_fence;
struct dma_fence_cb finish_cb;
struct work_struct finish_work;
* these functions should be implemented in driver side
*/
struct amd_sched_backend_ops {
- struct dma_fence *(*dependency)(struct amd_sched_job *sched_job);
+ struct dma_fence *(*dependency)(struct amd_sched_job *sched_job,
+ struct amd_sched_entity *s_entity);
struct dma_fence *(*run_job)(struct amd_sched_job *sched_job);
void (*timedout_job)(struct amd_sched_job *sched_job);
void (*free_job)(struct amd_sched_job *sched_job);
uint32_t jobs, atomic_t* guilty);
void amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
struct amd_sched_entity *entity);
-void amd_sched_entity_push_job(struct amd_sched_job *sched_job);
+void amd_sched_entity_push_job(struct amd_sched_job *sched_job,
+ struct amd_sched_entity *entity);
void amd_sched_entity_set_rq(struct amd_sched_entity *entity,
struct amd_sched_rq *rq);