drm/scheduler: modify args of drm_sched_entity_init
authorNayan Deshmukh <nayan26deshmukh@gmail.com>
Fri, 13 Jul 2018 09:51:14 +0000 (15:21 +0530)
committerAlex Deucher <alexander.deucher@amd.com>
Fri, 13 Jul 2018 19:46:05 +0000 (14:46 -0500)
replace run queue by a list of run queues and remove the
sched arg as that is part of run queue itself

Signed-off-by: Nayan Deshmukh <nayan26deshmukh@gmail.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Acked-by: Eric Anholt <eric@anholt.net>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
drivers/gpu/drm/etnaviv/etnaviv_drv.c
drivers/gpu/drm/scheduler/gpu_scheduler.c
drivers/gpu/drm/v3d/v3d_drv.c
include/drm/gpu_scheduler.h

index 0120b24..83e3b32 100644 (file)
@@ -90,8 +90,8 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev,
                if (ring == &adev->gfx.kiq.ring)
                        continue;
 
-               r = drm_sched_entity_init(&ring->sched, &ctx->rings[i].entity,
-                                         rq, &ctx->guilty);
+               r = drm_sched_entity_init(&ctx->rings[i].entity,
+                                         &rq, 1, &ctx->guilty);
                if (r)
                        goto failed;
        }
index 6a3fead..11a1248 100644 (file)
@@ -1918,8 +1918,7 @@ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable)
 
                ring = adev->mman.buffer_funcs_ring;
                rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_KERNEL];
-               r = drm_sched_entity_init(&ring->sched, &adev->mman.entity,
-                                         rq, NULL);
+               r = drm_sched_entity_init(&adev->mman.entity, &rq, 1, NULL);
                if (r) {
                        DRM_ERROR("Failed setting up TTM BO move entity (%d)\n",
                                  r);
index 3e70eb6..a6c2cac 100644 (file)
@@ -266,8 +266,8 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
 
                ring = &adev->uvd.inst[j].ring;
                rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
-               r = drm_sched_entity_init(&ring->sched, &adev->uvd.inst[j].entity,
-                                         rq, NULL);
+               r = drm_sched_entity_init(&adev->uvd.inst[j].entity, &rq,
+                                         1, NULL);
                if (r != 0) {
                        DRM_ERROR("Failed setting up UVD(%d) run queue.\n", j);
                        return r;
index 6ae1ad7..ffb0fcc 100644 (file)
@@ -190,8 +190,7 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
 
        ring = &adev->vce.ring[0];
        rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
-       r = drm_sched_entity_init(&ring->sched, &adev->vce.entity,
-                                 rq, NULL);
+       r = drm_sched_entity_init(&adev->vce.entity, &rq, 1, NULL);
        if (r != 0) {
                DRM_ERROR("Failed setting up VCE run queue.\n");
                return r;
index 0fd0a71..484e2c1 100644 (file)
@@ -2564,8 +2564,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
        ring_instance %= adev->vm_manager.vm_pte_num_rings;
        ring = adev->vm_manager.vm_pte_rings[ring_instance];
        rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_KERNEL];
-       r = drm_sched_entity_init(&ring->sched, &vm->entity,
-                                 rq, NULL);
+       r = drm_sched_entity_init(&vm->entity, &rq, 1, NULL);
        if (r)
                return r;
 
index 2623f24..1c118c0 100644 (file)
@@ -430,8 +430,8 @@ static int uvd_v6_0_sw_init(void *handle)
                struct drm_sched_rq *rq;
                ring = &adev->uvd.inst->ring_enc[0];
                rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
-               r = drm_sched_entity_init(&ring->sched, &adev->uvd.inst->entity_enc,
-                                         rq, NULL);
+               r = drm_sched_entity_init(&adev->uvd.inst->entity_enc,
+                                         &rq, 1, NULL);
                if (r) {
                        DRM_ERROR("Failed setting up UVD ENC run queue.\n");
                        return r;
index ce360ad..d48bc33 100644 (file)
@@ -432,8 +432,8 @@ static int uvd_v7_0_sw_init(void *handle)
        for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
                ring = &adev->uvd.inst[j].ring_enc[0];
                rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
-               r = drm_sched_entity_init(&ring->sched, &adev->uvd.inst[j].entity_enc,
-                                         rq, NULL);
+               r = drm_sched_entity_init(&adev->uvd.inst[j].entity_enc,
+                                         &rq, 1, NULL);
                if (r) {
                        DRM_ERROR("(%d)Failed setting up UVD ENC run queue.\n", j);
                        return r;
index 45bfdf4..36414ba 100644 (file)
@@ -49,12 +49,12 @@ static int etnaviv_open(struct drm_device *dev, struct drm_file *file)
 
        for (i = 0; i < ETNA_MAX_PIPES; i++) {
                struct etnaviv_gpu *gpu = priv->gpu[i];
+               struct drm_sched_rq *rq;
 
                if (gpu) {
-                       drm_sched_entity_init(&gpu->sched,
-                               &ctx->sched_entity[i],
-                               &gpu->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL],
-                               NULL);
+                       rq = &gpu->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
+                       drm_sched_entity_init(&ctx->sched_entity[i],
+                                             &rq, 1, NULL);
                        }
        }
 
index 429b132..16bf446 100644 (file)
@@ -162,26 +162,30 @@ drm_sched_rq_select_entity(struct drm_sched_rq *rq)
  * drm_sched_entity_init - Init a context entity used by scheduler when
  * submit to HW ring.
  *
- * @sched: scheduler instance
  * @entity: scheduler entity to init
- * @rq: the run queue this entity belongs
+ * @rq_list: the list of run queue on which jobs from this
+ *           entity can be submitted
+ * @num_rq_list: number of run queue in rq_list
  * @guilty: atomic_t set to 1 when a job on this queue
  *          is found to be guilty causing a timeout
  *
+ * Note: the rq_list should have atleast one element to schedule
+ *       the entity
+ *
  * Returns 0 on success or a negative error code on failure.
 */
-int drm_sched_entity_init(struct drm_gpu_scheduler *sched,
-                         struct drm_sched_entity *entity,
-                         struct drm_sched_rq *rq,
+int drm_sched_entity_init(struct drm_sched_entity *entity,
+                         struct drm_sched_rq **rq_list,
+                         unsigned int num_rq_list,
                          atomic_t *guilty)
 {
-       if (!(sched && entity && rq))
+       if (!(entity && rq_list && num_rq_list > 0 && rq_list[0]))
                return -EINVAL;
 
        memset(entity, 0, sizeof(struct drm_sched_entity));
        INIT_LIST_HEAD(&entity->list);
-       entity->rq = rq;
-       entity->sched = sched;
+       entity->rq = rq_list[0];
+       entity->sched = rq_list[0]->sched;
        entity->guilty = guilty;
        entity->last_scheduled = NULL;
 
index 567f7d4..1dceba2 100644 (file)
@@ -123,6 +123,7 @@ v3d_open(struct drm_device *dev, struct drm_file *file)
 {
        struct v3d_dev *v3d = to_v3d_dev(dev);
        struct v3d_file_priv *v3d_priv;
+       struct drm_sched_rq *rq;
        int i;
 
        v3d_priv = kzalloc(sizeof(*v3d_priv), GFP_KERNEL);
@@ -132,10 +133,8 @@ v3d_open(struct drm_device *dev, struct drm_file *file)
        v3d_priv->v3d = v3d;
 
        for (i = 0; i < V3D_MAX_QUEUES; i++) {
-               drm_sched_entity_init(&v3d->queue[i].sched,
-                                     &v3d_priv->sched_entity[i],
-                                     &v3d->queue[i].sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL],
-                                     NULL);
+               rq = &v3d->queue[i].sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
+               drm_sched_entity_init(&v3d_priv->sched_entity[i], &rq, 1, NULL);
        }
 
        file->driver_priv = v3d_priv;
index 43e93d6..2205e89 100644 (file)
@@ -282,9 +282,9 @@ int drm_sched_init(struct drm_gpu_scheduler *sched,
                   const char *name);
 void drm_sched_fini(struct drm_gpu_scheduler *sched);
 
-int drm_sched_entity_init(struct drm_gpu_scheduler *sched,
-                         struct drm_sched_entity *entity,
-                         struct drm_sched_rq *rq,
+int drm_sched_entity_init(struct drm_sched_entity *entity,
+                         struct drm_sched_rq **rq_list,
+                         unsigned int num_rq_list,
                          atomic_t *guilty);
 long drm_sched_entity_flush(struct drm_gpu_scheduler *sched,
                           struct drm_sched_entity *entity, long timeout);