drm/amd: abstract kernel rq and normal rq to priority of run queue
authorChunming Zhou <David1.Zhou@amd.com>
Thu, 5 Nov 2015 07:23:09 +0000 (15:23 +0800)
committerAlex Deucher <alexander.deucher@amd.com>
Wed, 2 Dec 2015 20:54:33 +0000 (15:54 -0500)
Allows us to set priorities in the scheduler.

Signed-off-by: Chunming Zhou <David1.Zhou@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Junwei Zhang <Jerry.Zhang@amd.com>
drivers/gpu/drm/amd/amdgpu/amdgpu.h
drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
drivers/gpu/drm/amd/scheduler/gpu_scheduler.h

index 1799973..5f97503 100644 (file)
@@ -1044,7 +1044,7 @@ struct amdgpu_ctx_mgr {
        struct idr              ctx_handles;
 };
 
-int amdgpu_ctx_init(struct amdgpu_device *adev, bool kernel,
+int amdgpu_ctx_init(struct amdgpu_device *adev, enum amd_sched_priority pri,
                    struct amdgpu_ctx *ctx);
 void amdgpu_ctx_fini(struct amdgpu_ctx *ctx);
 
index fec65f0..c1f2308 100644 (file)
@@ -25,7 +25,7 @@
 #include <drm/drmP.h>
 #include "amdgpu.h"
 
-int amdgpu_ctx_init(struct amdgpu_device *adev, bool kernel,
+int amdgpu_ctx_init(struct amdgpu_device *adev, enum amd_sched_priority pri,
                    struct amdgpu_ctx *ctx)
 {
        unsigned i, j;
@@ -42,10 +42,9 @@ int amdgpu_ctx_init(struct amdgpu_device *adev, bool kernel,
                /* create context entity for each ring */
                for (i = 0; i < adev->num_rings; i++) {
                        struct amd_sched_rq *rq;
-                       if (kernel)
-                               rq = &adev->rings[i]->sched.kernel_rq;
-                       else
-                               rq = &adev->rings[i]->sched.sched_rq;
+                       if (pri >= AMD_SCHED_MAX_PRIORITY)
+                               return -EINVAL;
+                       rq = &adev->rings[i]->sched.sched_rq[pri];
                        r = amd_sched_entity_init(&adev->rings[i]->sched,
                                                  &ctx->rings[i].entity,
                                                  rq, amdgpu_sched_jobs);
@@ -103,7 +102,7 @@ static int amdgpu_ctx_alloc(struct amdgpu_device *adev,
                return r;
        }
        *id = (uint32_t)r;
-       r = amdgpu_ctx_init(adev, false, ctx);
+       r = amdgpu_ctx_init(adev, AMD_SCHED_PRIORITY_NORMAL, ctx);
        mutex_unlock(&mgr->lock);
 
        return r;
index 58cb698..8477596 100644 (file)
@@ -1528,7 +1528,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
                return r;
        }
 
-       r = amdgpu_ctx_init(adev, true, &adev->kernel_ctx);
+       r = amdgpu_ctx_init(adev, AMD_SCHED_PRIORITY_KERNEL, &adev->kernel_ctx);
        if (r) {
                dev_err(adev->dev, "failed to create kernel context (%d).\n", r);
                return r;
index 651129f..e13b7a0 100644 (file)
@@ -348,14 +348,17 @@ static struct amd_sched_entity *
 amd_sched_select_entity(struct amd_gpu_scheduler *sched)
 {
        struct amd_sched_entity *entity;
+       int i;
 
        if (!amd_sched_ready(sched))
                return NULL;
 
        /* Kernel run queue has higher priority than normal run queue*/
-       entity = amd_sched_rq_select_entity(&sched->kernel_rq);
-       if (entity == NULL)
-               entity = amd_sched_rq_select_entity(&sched->sched_rq);
+       for (i = 0; i < AMD_SCHED_MAX_PRIORITY; i++) {
+               entity = amd_sched_rq_select_entity(&sched->sched_rq[i]);
+               if (entity)
+                       break;
+       }
 
        return entity;
 }
@@ -477,12 +480,13 @@ int amd_sched_init(struct amd_gpu_scheduler *sched,
                   struct amd_sched_backend_ops *ops,
                   unsigned hw_submission, long timeout, const char *name)
 {
+       int i;
        sched->ops = ops;
        sched->hw_submission_limit = hw_submission;
        sched->name = name;
        sched->timeout = timeout;
-       amd_sched_rq_init(&sched->sched_rq);
-       amd_sched_rq_init(&sched->kernel_rq);
+       for (i = 0; i < AMD_SCHED_MAX_PRIORITY; i++)
+               amd_sched_rq_init(&sched->sched_rq[i]);
 
        init_waitqueue_head(&sched->wake_up_worker);
        init_waitqueue_head(&sched->job_scheduled);
index a0f0ae5..9403145 100644 (file)
@@ -104,6 +104,12 @@ struct amd_sched_backend_ops {
        struct fence *(*run_job)(struct amd_sched_job *sched_job);
 };
 
+enum amd_sched_priority {
+       AMD_SCHED_PRIORITY_KERNEL = 0,
+       AMD_SCHED_PRIORITY_NORMAL,
+       AMD_SCHED_MAX_PRIORITY
+};
+
 /**
  * One scheduler is implemented for each hardware ring
 */
@@ -112,8 +118,7 @@ struct amd_gpu_scheduler {
        uint32_t                        hw_submission_limit;
        long                            timeout;
        const char                      *name;
-       struct amd_sched_rq             sched_rq;
-       struct amd_sched_rq             kernel_rq;
+       struct amd_sched_rq             sched_rq[AMD_SCHED_MAX_PRIORITY];
        wait_queue_head_t               wake_up_worker;
        wait_queue_head_t               job_scheduled;
        atomic_t                        hw_rq_count;