static int amdgpu_sched_prepare_job(struct amd_gpu_scheduler *sched,
struct amd_sched_entity *entity,
- void *job)
+ struct amd_sched_job *job)
{
int r = 0;
- struct amdgpu_cs_parser *sched_job = (struct amdgpu_cs_parser *)job;
+ struct amdgpu_cs_parser *sched_job;
+ if (!job || !job->data) {
+ DRM_ERROR("job is null\n");
+ return -EINVAL;
+ }
+
+ sched_job = (struct amdgpu_cs_parser *)job->data;
if (sched_job->prepare_job) {
r = sched_job->prepare_job(sched_job);
if (r) {
struct amdgpu_cs_parser *sched_job;
struct amdgpu_fence *fence;
- if (!job || !job->job) {
+ if (!job || !job->data) {
DRM_ERROR("job is null\n");
return NULL;
}
- sched_job = (struct amdgpu_cs_parser *)job->job;
+ sched_job = (struct amdgpu_cs_parser *)job->data;
mutex_lock(&sched_job->job_lock);
r = amdgpu_ib_schedule(sched_job->adev,
sched_job->num_ibs,
return NULL;
}
-static void amdgpu_sched_process_job(struct amd_gpu_scheduler *sched, void *job)
+static void amdgpu_sched_process_job(struct amd_gpu_scheduler *sched,
+ struct amd_sched_job *job)
{
- struct amdgpu_cs_parser *sched_job = NULL;
- struct amdgpu_fence *fence = NULL;
- struct amdgpu_ring *ring = NULL;
- struct amdgpu_device *adev = NULL;
+ struct amdgpu_cs_parser *sched_job;
- if (!job)
- return;
- sched_job = (struct amdgpu_cs_parser *)job;
- fence = sched_job->ibs[sched_job->num_ibs - 1].fence;
- if (!fence)
+ if (!job || !job->data) {
+ DRM_ERROR("job is null\n");
return;
- ring = fence->ring;
- adev = ring->adev;
-
+ }
+ sched_job = (struct amdgpu_cs_parser *)job->data;
schedule_work(&sched_job->job_work);
}
*/
int amd_sched_push_job(struct amd_gpu_scheduler *sched,
struct amd_sched_entity *c_entity,
- void *job)
+ void *data)
{
+ struct amd_sched_job *job = kzalloc(sizeof(struct amd_sched_job),
+ GFP_KERNEL);
+ if (!job)
+ return -ENOMEM;
+ job->sched = sched;
+ job->s_entity = c_entity;
+ job->data = data;
while (kfifo_in_spinlocked(&c_entity->job_queue, &job, sizeof(void *),
&c_entity->queue_lock) != sizeof(void *)) {
/**
atomic64_dec(&sched->hw_rq_count);
spin_unlock_irqrestore(&sched->queue_lock, flags);
- sched->ops->process_job(sched, sched_job->job);
+ sched->ops->process_job(sched, sched_job);
kfree(sched_job);
wake_up_interruptible(&sched->wait_queue);
}
static int amd_sched_main(void *param)
{
int r;
- void *job;
+ struct amd_sched_job *job;
struct sched_param sparam = {.sched_priority = 1};
struct amd_sched_entity *c_entity = NULL;
struct amd_gpu_scheduler *sched = (struct amd_gpu_scheduler *)param;
sched_setscheduler(current, SCHED_FIFO, &sparam);
while (!kthread_should_stop()) {
- struct amd_sched_job *sched_job = NULL;
struct fence *fence;
wait_event_interruptible(sched->wait_queue,
r = sched->ops->prepare_job(sched, c_entity, job);
if (!r) {
unsigned long flags;
- sched_job = kzalloc(sizeof(struct amd_sched_job),
- GFP_KERNEL);
- if (!sched_job) {
- WARN(true, "No memory to allocate\n");
- continue;
- }
- sched_job->job = job;
- sched_job->sched = sched;
spin_lock_irqsave(&sched->queue_lock, flags);
- list_add_tail(&sched_job->list, &sched->active_hw_rq);
+ list_add_tail(&job->list, &sched->active_hw_rq);
atomic64_inc(&sched->hw_rq_count);
spin_unlock_irqrestore(&sched->queue_lock, flags);
}
mutex_lock(&sched->sched_lock);
- fence = sched->ops->run_job(sched, c_entity, sched_job);
+ fence = sched->ops->run_job(sched, c_entity, job);
if (fence) {
- r = fence_add_callback(fence, &sched_job->cb,
+ r = fence_add_callback(fence, &job->cb,
amd_sched_process_job);
if (r == -ENOENT)
- amd_sched_process_job(fence, &sched_job->cb);
+ amd_sched_process_job(fence, &job->cb);
else if (r)
DRM_ERROR("fence add callback failed (%d)\n", r);
fence_put(fence);
struct list_head list;
struct fence_cb cb;
struct amd_gpu_scheduler *sched;
- void *job;
+ struct amd_sched_entity *s_entity;
+ void *data;
};
/**
struct amd_sched_backend_ops {
int (*prepare_job)(struct amd_gpu_scheduler *sched,
struct amd_sched_entity *c_entity,
- void *job);
+ struct amd_sched_job *job);
struct fence *(*run_job)(struct amd_gpu_scheduler *sched,
struct amd_sched_entity *c_entity,
struct amd_sched_job *job);
- void (*process_job)(struct amd_gpu_scheduler *sched, void *job);
+ void (*process_job)(struct amd_gpu_scheduler *sched,
+ struct amd_sched_job *job);
};
/**
uint32_t granularity,
uint32_t preemption,
uint32_t hw_submission);
-
int amd_sched_destroy(struct amd_gpu_scheduler *sched);
int amd_sched_push_job(struct amd_gpu_scheduler *sched,
struct amd_sched_entity *c_entity,
- void *job);
+ void *data);
int amd_sched_wait_emit(struct amd_sched_entity *c_entity,
uint64_t seq,