drm/amdgpu: Rename DRM schedulers in amdgpu TTM
authorMukul Joshi <mukul.joshi@amd.com>
Tue, 23 May 2023 15:55:54 +0000 (11:55 -0400)
committerAlex Deucher <alexander.deucher@amd.com>
Thu, 15 Jun 2023 14:42:33 +0000 (10:42 -0400)
Rename mman.entity to mman.high_pr to make the distinction
clearer that this is a high priority scheduler. Similarly,
rename the recently added mman.delayed to mman.low_pr to
make it clear it is a low priority scheduler.
No functional change in this patch.

Signed-off-by: Mukul Joshi <mukul.joshi@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
drivers/gpu/drm/amd/amdkfd/kfd_migrate.c

index d2d0d27f9053962f216f8006ae7f3c72d48822a0..0534ab7168094c273cd1deca72adfd32c87c0804 100644 (file)
@@ -228,7 +228,7 @@ static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo,
        num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8);
        num_bytes = num_pages * 8 * AMDGPU_GPU_PAGES_IN_CPU_PAGE;
 
-       r = amdgpu_job_alloc_with_ib(adev, &adev->mman.entity,
+       r = amdgpu_job_alloc_with_ib(adev, &adev->mman.high_pr,
                                     AMDGPU_FENCE_OWNER_UNDEFINED,
                                     num_dw * 4 + num_bytes,
                                     AMDGPU_IB_POOL_DELAYED, &job);
@@ -1456,7 +1456,7 @@ static int amdgpu_ttm_access_memory_sdma(struct ttm_buffer_object *bo,
                memcpy(adev->mman.sdma_access_ptr, buf, len);
 
        num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8);
-       r = amdgpu_job_alloc_with_ib(adev, &adev->mman.entity,
+       r = amdgpu_job_alloc_with_ib(adev, &adev->mman.high_pr,
                                     AMDGPU_FENCE_OWNER_UNDEFINED,
                                     num_dw * 4, AMDGPU_IB_POOL_DELAYED,
                                     &job);
@@ -2032,7 +2032,7 @@ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable)
 
                ring = adev->mman.buffer_funcs_ring;
                sched = &ring->sched;
-               r = drm_sched_entity_init(&adev->mman.entity,
+               r = drm_sched_entity_init(&adev->mman.high_pr,
                                          DRM_SCHED_PRIORITY_KERNEL, &sched,
                                          1, NULL);
                if (r) {
@@ -2041,7 +2041,7 @@ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable)
                        return;
                }
 
-               r = drm_sched_entity_init(&adev->mman.delayed,
+               r = drm_sched_entity_init(&adev->mman.low_pr,
                                          DRM_SCHED_PRIORITY_NORMAL, &sched,
                                          1, NULL);
                if (r) {
@@ -2050,8 +2050,8 @@ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable)
                        goto error_free_entity;
                }
        } else {
-               drm_sched_entity_destroy(&adev->mman.entity);
-               drm_sched_entity_destroy(&adev->mman.delayed);
+               drm_sched_entity_destroy(&adev->mman.high_pr);
+               drm_sched_entity_destroy(&adev->mman.low_pr);
                dma_fence_put(man->move);
                man->move = NULL;
        }
@@ -2067,7 +2067,7 @@ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable)
        return;
 
 error_free_entity:
-       drm_sched_entity_destroy(&adev->mman.entity);
+       drm_sched_entity_destroy(&adev->mman.high_pr);
 }
 
 static int amdgpu_ttm_prepare_job(struct amdgpu_device *adev,
@@ -2082,8 +2082,8 @@ static int amdgpu_ttm_prepare_job(struct amdgpu_device *adev,
                AMDGPU_IB_POOL_DIRECT :
                AMDGPU_IB_POOL_DELAYED;
        int r;
-       struct drm_sched_entity *entity = delayed ? &adev->mman.delayed :
-                                                   &adev->mman.entity;
+       struct drm_sched_entity *entity = delayed ? &adev->mman.low_pr :
+                                                   &adev->mman.high_pr;
        r = amdgpu_job_alloc_with_ib(adev, entity,
                                     AMDGPU_FENCE_OWNER_UNDEFINED,
                                     num_dw * 4, pool, job);
index e82b1edee7a4e90bf568239432d7101e8cb67927..6d0d66e40db934abefa8c25e973ddcd283138c07 100644 (file)
@@ -59,10 +59,10 @@ struct amdgpu_mman {
        bool                                    buffer_funcs_enabled;
 
        struct mutex                            gtt_window_lock;
-       /* Scheduler entity for buffer moves */
-       struct drm_sched_entity                 entity;
-       /* Scheduler entity for VRAM clearing */
-       struct drm_sched_entity                 delayed;
+       /* High priority scheduler entity for buffer moves */
+       struct drm_sched_entity                 high_pr;
+       /* Low priority scheduler entity for VRAM clearing */
+       struct drm_sched_entity                 low_pr;
 
        struct amdgpu_vram_mgr vram_mgr;
        struct amdgpu_gtt_mgr gtt_mgr;
index b2e42f1b0f12ddb48dcc4908852c6933e89370ff..0c8a479895761e46bc2563da43a9fe318a9b5b4d 100644 (file)
@@ -382,7 +382,7 @@ static void gmc_v10_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
         * translation. Avoid this by doing the invalidation from the SDMA
         * itself.
         */
-       r = amdgpu_job_alloc_with_ib(ring->adev, &adev->mman.entity,
+       r = amdgpu_job_alloc_with_ib(ring->adev, &adev->mman.high_pr,
                                     AMDGPU_FENCE_OWNER_UNDEFINED,
                                     16 * 4, AMDGPU_IB_POOL_IMMEDIATE,
                                     &job);
index 58d95fb995959a4afcfac08ebd89f2fb73765f6f..709ac885ca6d7b725266632c6a6ab9d2c2dc893d 100644 (file)
@@ -64,7 +64,7 @@ svm_migrate_gart_map(struct amdgpu_ring *ring, uint64_t npages,
        num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8);
        num_bytes = npages * 8;
 
-       r = amdgpu_job_alloc_with_ib(adev, &adev->mman.entity,
+       r = amdgpu_job_alloc_with_ib(adev, &adev->mman.high_pr,
                                     AMDGPU_FENCE_OWNER_UNDEFINED,
                                     num_dw * 4 + num_bytes,
                                     AMDGPU_IB_POOL_DELAYED,