drm/amdgpu: switch sdma buffer function tear down to a helper
authorAlex Deucher <alexander.deucher@amd.com>
Thu, 6 Oct 2022 19:31:40 +0000 (15:31 -0400)
committerAlex Deucher <alexander.deucher@amd.com>
Mon, 10 Oct 2022 21:32:56 +0000 (17:32 -0400)
Switch all of the SDMA implementations to use the helper to
tear down the ttm buffer manager.

Tested-by: Bokun Zhang <Bokun.Zhang@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
drivers/gpu/drm/amd/amdgpu/cik_sdma.c
drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c
drivers/gpu/drm/amd/amdgpu/si_dma.c

index 43cf863..ea5278f 100644 (file)
@@ -285,3 +285,24 @@ out:
        }
        return err;
 }
+
+void amdgpu_sdma_unset_buffer_funcs_helper(struct amdgpu_device *adev)
+{
+       struct amdgpu_ring *sdma;
+       int i;
+
+       for (i = 0; i < adev->sdma.num_instances; i++) {
+               if (adev->sdma.has_page_queue) {
+                       sdma = &adev->sdma.instance[i].page;
+                       if (adev->mman.buffer_funcs_ring == sdma) {
+                               amdgpu_ttm_set_buffer_funcs_status(adev, false);
+                               break;
+                       }
+               }
+               sdma = &adev->sdma.instance[i].ring;
+               if (adev->mman.buffer_funcs_ring == sdma) {
+                       amdgpu_ttm_set_buffer_funcs_status(adev, false);
+                       break;
+               }
+       }
+}
index d2d8827..7d99205 100644 (file)
@@ -128,4 +128,6 @@ int amdgpu_sdma_init_microcode(struct amdgpu_device *adev,
         char *fw_name, u32 instance, bool duplicate);
 void amdgpu_sdma_destroy_inst_ctx(struct amdgpu_device *adev,
         bool duplicate);
+void amdgpu_sdma_unset_buffer_funcs_helper(struct amdgpu_device *adev);
+
 #endif
index 5647f13..cbca986 100644 (file)
@@ -309,14 +309,10 @@ static void cik_sdma_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq
  */
 static void cik_sdma_gfx_stop(struct amdgpu_device *adev)
 {
-       struct amdgpu_ring *sdma0 = &adev->sdma.instance[0].ring;
-       struct amdgpu_ring *sdma1 = &adev->sdma.instance[1].ring;
        u32 rb_cntl;
        int i;
 
-       if ((adev->mman.buffer_funcs_ring == sdma0) ||
-           (adev->mman.buffer_funcs_ring == sdma1))
-                       amdgpu_ttm_set_buffer_funcs_status(adev, false);
+       amdgpu_sdma_unset_buffer_funcs_helper(adev);
 
        for (i = 0; i < adev->sdma.num_instances; i++) {
                rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]);
index 6bdffdc..c52d246 100644 (file)
@@ -342,14 +342,10 @@ static void sdma_v2_4_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 se
  */
 static void sdma_v2_4_gfx_stop(struct amdgpu_device *adev)
 {
-       struct amdgpu_ring *sdma0 = &adev->sdma.instance[0].ring;
-       struct amdgpu_ring *sdma1 = &adev->sdma.instance[1].ring;
        u32 rb_cntl, ib_cntl;
        int i;
 
-       if ((adev->mman.buffer_funcs_ring == sdma0) ||
-           (adev->mman.buffer_funcs_ring == sdma1))
-               amdgpu_ttm_set_buffer_funcs_status(adev, false);
+       amdgpu_sdma_unset_buffer_funcs_helper(adev);
 
        for (i = 0; i < adev->sdma.num_instances; i++) {
                rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]);
index 2584fa3..486d9b5 100644 (file)
@@ -516,14 +516,10 @@ static void sdma_v3_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 se
  */
 static void sdma_v3_0_gfx_stop(struct amdgpu_device *adev)
 {
-       struct amdgpu_ring *sdma0 = &adev->sdma.instance[0].ring;
-       struct amdgpu_ring *sdma1 = &adev->sdma.instance[1].ring;
        u32 rb_cntl, ib_cntl;
        int i;
 
-       if ((adev->mman.buffer_funcs_ring == sdma0) ||
-           (adev->mman.buffer_funcs_ring == sdma1))
-               amdgpu_ttm_set_buffer_funcs_status(adev, false);
+       amdgpu_sdma_unset_buffer_funcs_helper(adev);
 
        for (i = 0; i < adev->sdma.num_instances; i++) {
                rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]);
index 7241a9f..7b4195f 100644 (file)
@@ -915,18 +915,12 @@ static void sdma_v4_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 se
  */
 static void sdma_v4_0_gfx_stop(struct amdgpu_device *adev)
 {
-       struct amdgpu_ring *sdma[AMDGPU_MAX_SDMA_INSTANCES];
        u32 rb_cntl, ib_cntl;
-       int i, unset = 0;
+       int i;
 
-       for (i = 0; i < adev->sdma.num_instances; i++) {
-               sdma[i] = &adev->sdma.instance[i].ring;
-
-               if ((adev->mman.buffer_funcs_ring == sdma[i]) && unset != 1) {
-                       amdgpu_ttm_set_buffer_funcs_status(adev, false);
-                       unset = 1;
-               }
+       amdgpu_sdma_unset_buffer_funcs_helper(adev);
 
+       for (i = 0; i < adev->sdma.num_instances; i++) {
                rb_cntl = RREG32_SDMA(i, mmSDMA0_GFX_RB_CNTL);
                rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0);
                WREG32_SDMA(i, mmSDMA0_GFX_RB_CNTL, rb_cntl);
@@ -957,20 +951,12 @@ static void sdma_v4_0_rlc_stop(struct amdgpu_device *adev)
  */
 static void sdma_v4_0_page_stop(struct amdgpu_device *adev)
 {
-       struct amdgpu_ring *sdma[AMDGPU_MAX_SDMA_INSTANCES];
        u32 rb_cntl, ib_cntl;
        int i;
-       bool unset = false;
 
-       for (i = 0; i < adev->sdma.num_instances; i++) {
-               sdma[i] = &adev->sdma.instance[i].page;
-
-               if ((adev->mman.buffer_funcs_ring == sdma[i]) &&
-                       (!unset)) {
-                       amdgpu_ttm_set_buffer_funcs_status(adev, false);
-                       unset = true;
-               }
+       amdgpu_sdma_unset_buffer_funcs_helper(adev);
 
+       for (i = 0; i < adev->sdma.num_instances; i++) {
                rb_cntl = RREG32_SDMA(i, mmSDMA0_PAGE_RB_CNTL);
                rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_PAGE_RB_CNTL,
                                        RB_ENABLE, 0);
index c05c3ee..783048e 100644 (file)
@@ -584,14 +584,10 @@ static void sdma_v5_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 se
  */
 static void sdma_v5_0_gfx_stop(struct amdgpu_device *adev)
 {
-       struct amdgpu_ring *sdma0 = &adev->sdma.instance[0].ring;
-       struct amdgpu_ring *sdma1 = &adev->sdma.instance[1].ring;
        u32 rb_cntl, ib_cntl;
        int i;
 
-       if ((adev->mman.buffer_funcs_ring == sdma0) ||
-           (adev->mman.buffer_funcs_ring == sdma1))
-               amdgpu_ttm_set_buffer_funcs_status(adev, false);
+       amdgpu_sdma_unset_buffer_funcs_helper(adev);
 
        for (i = 0; i < adev->sdma.num_instances; i++) {
                rb_cntl = RREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL));
index 3eaf1a5..c2ee53c 100644 (file)
@@ -414,18 +414,10 @@ static void sdma_v5_2_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 se
  */
 static void sdma_v5_2_gfx_stop(struct amdgpu_device *adev)
 {
-       struct amdgpu_ring *sdma0 = &adev->sdma.instance[0].ring;
-       struct amdgpu_ring *sdma1 = &adev->sdma.instance[1].ring;
-       struct amdgpu_ring *sdma2 = &adev->sdma.instance[2].ring;
-       struct amdgpu_ring *sdma3 = &adev->sdma.instance[3].ring;
        u32 rb_cntl, ib_cntl;
        int i;
 
-       if ((adev->mman.buffer_funcs_ring == sdma0) ||
-           (adev->mman.buffer_funcs_ring == sdma1) ||
-           (adev->mman.buffer_funcs_ring == sdma2) ||
-           (adev->mman.buffer_funcs_ring == sdma3))
-               amdgpu_ttm_set_buffer_funcs_status(adev, false);
+       amdgpu_sdma_unset_buffer_funcs_helper(adev);
 
        for (i = 0; i < adev->sdma.num_instances; i++) {
                rb_cntl = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL));
index 0150f66..a648348 100644 (file)
@@ -398,14 +398,10 @@ static void sdma_v6_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 se
  */
 static void sdma_v6_0_gfx_stop(struct amdgpu_device *adev)
 {
-       struct amdgpu_ring *sdma0 = &adev->sdma.instance[0].ring;
-       struct amdgpu_ring *sdma1 = &adev->sdma.instance[1].ring;
        u32 rb_cntl, ib_cntl;
        int i;
 
-       if ((adev->mman.buffer_funcs_ring == sdma0) ||
-           (adev->mman.buffer_funcs_ring == sdma1))
-               amdgpu_ttm_set_buffer_funcs_status(adev, false);
+       amdgpu_sdma_unset_buffer_funcs_helper(adev);
 
        for (i = 0; i < adev->sdma.num_instances; i++) {
                rb_cntl = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_CNTL));
@@ -415,9 +411,6 @@ static void sdma_v6_0_gfx_stop(struct amdgpu_device *adev)
                ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_QUEUE0_IB_CNTL, IB_ENABLE, 0);
                WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_IB_CNTL), ib_cntl);
        }
-
-       sdma0->sched.ready = false;
-       sdma1->sched.ready = false;
 }
 
 /**
index f675111..4d5e718 100644 (file)
@@ -116,15 +116,14 @@ static void si_dma_stop(struct amdgpu_device *adev)
        u32 rb_cntl;
        unsigned i;
 
+       amdgpu_sdma_unset_buffer_funcs_helper(adev);
+
        for (i = 0; i < adev->sdma.num_instances; i++) {
                ring = &adev->sdma.instance[i].ring;
                /* dma0 */
                rb_cntl = RREG32(DMA_RB_CNTL + sdma_offsets[i]);
                rb_cntl &= ~DMA_RB_ENABLE;
                WREG32(DMA_RB_CNTL + sdma_offsets[i], rb_cntl);
-
-               if (adev->mman.buffer_funcs_ring == ring)
-                       amdgpu_ttm_set_buffer_funcs_status(adev, false);
        }
 }