drm/amdgpu/sdma_v4_0: turn off SDMA ring buffer in the s2idle suspend
authorPrike Liang <Prike.Liang@amd.com>
Thu, 1 Dec 2022 03:17:31 +0000 (11:17 +0800)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 14 Dec 2022 10:37:20 +0000 (11:37 +0100)
commit bc21fe9a5844c5bc8f7ec319b11d2671a94eb867 upstream.

In the SDMA s0ix save process requires to turn off SDMA ring buffer for
avoiding the SDMA in-flight request, otherwise will suffer from SDMA page
fault which causes by page request from in-flight SDMA ring accessing at
SDMA restore phase.

Link: https://gitlab.freedesktop.org/drm/amd/-/issues/2248
Cc: stable@vger.kernel.org # 6.0,5.15+
Fixes: f8f4e2a51834 ("drm/amdgpu: skipping SDMA hw_init and hw_fini for S0ix.")
Signed-off-by: Prike Liang <Prike.Liang@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Tested-by: Mario Limonciello <mario.limonciello@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c

index 9014f71..f14f7bb 100644 (file)
@@ -978,13 +978,13 @@ static void sdma_v4_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 se
 
 
 /**
- * sdma_v4_0_gfx_stop - stop the gfx async dma engines
+ * sdma_v4_0_gfx_enable - enable the gfx async dma engines
  *
  * @adev: amdgpu_device pointer
- *
- * Stop the gfx async dma ring buffers (VEGA10).
+ * @enable: enable SDMA RB/IB
+ * control the gfx async dma ring buffers (VEGA10).
  */
-static void sdma_v4_0_gfx_stop(struct amdgpu_device *adev)
+static void sdma_v4_0_gfx_enable(struct amdgpu_device *adev, bool enable)
 {
        struct amdgpu_ring *sdma[AMDGPU_MAX_SDMA_INSTANCES];
        u32 rb_cntl, ib_cntl;
@@ -999,10 +999,10 @@ static void sdma_v4_0_gfx_stop(struct amdgpu_device *adev)
                }
 
                rb_cntl = RREG32_SDMA(i, mmSDMA0_GFX_RB_CNTL);
-               rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0);
+               rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, enable ? 1 : 0);
                WREG32_SDMA(i, mmSDMA0_GFX_RB_CNTL, rb_cntl);
                ib_cntl = RREG32_SDMA(i, mmSDMA0_GFX_IB_CNTL);
-               ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0);
+               ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, enable ? 1 : 0);
                WREG32_SDMA(i, mmSDMA0_GFX_IB_CNTL, ib_cntl);
        }
 }
@@ -1129,7 +1129,7 @@ static void sdma_v4_0_enable(struct amdgpu_device *adev, bool enable)
        int i;
 
        if (!enable) {
-               sdma_v4_0_gfx_stop(adev);
+               sdma_v4_0_gfx_enable(adev, enable);
                sdma_v4_0_rlc_stop(adev);
                if (adev->sdma.has_page_queue)
                        sdma_v4_0_page_stop(adev);
@@ -2063,8 +2063,10 @@ static int sdma_v4_0_suspend(void *handle)
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
        /* SMU saves SDMA state for us */
-       if (adev->in_s0ix)
+       if (adev->in_s0ix) {
+               sdma_v4_0_gfx_enable(adev, false);
                return 0;
+       }
 
        return sdma_v4_0_hw_fini(adev);
 }
@@ -2074,8 +2076,12 @@ static int sdma_v4_0_resume(void *handle)
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
        /* SMU restores SDMA state for us */
-       if (adev->in_s0ix)
+       if (adev->in_s0ix) {
+               sdma_v4_0_enable(adev, true);
+               sdma_v4_0_gfx_enable(adev, true);
+               amdgpu_ttm_set_buffer_funcs_status(adev, true);
                return 0;
+       }
 
        return sdma_v4_0_hw_init(adev);
 }