drm/amdgpu: fix SDMA suspend/resume on SR-IOV
authorAlex Deucher <alexander.deucher@amd.com>
Thu, 6 Oct 2022 19:53:10 +0000 (15:53 -0400)
committerAlex Deucher <alexander.deucher@amd.com>
Mon, 10 Oct 2022 21:32:56 +0000 (17:32 -0400)
Update all SDMA versions that support SR-IOV to properly
tear down the ttm buffer functions on suspend.

Tested-by: Bokun Zhang <Bokun.Zhang@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c

index 7b4195f..298fa11 100644 (file)
@@ -1940,8 +1940,11 @@ static int sdma_v4_0_hw_fini(void *handle)
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
        int i;
 
-       if (amdgpu_sriov_vf(adev))
+       if (amdgpu_sriov_vf(adev)) {
+               /* disable the scheduler for SDMA */
+               amdgpu_sdma_unset_buffer_funcs_helper(adev);
                return 0;
+       }
 
        for (i = 0; i < adev->sdma.num_instances; i++) {
                amdgpu_irq_put(adev, &adev->sdma.ecc_irq,
index 783048e..d4d9f19 100644 (file)
@@ -1456,8 +1456,11 @@ static int sdma_v5_0_hw_fini(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-       if (amdgpu_sriov_vf(adev))
+       if (amdgpu_sriov_vf(adev)) {
+               /* disable the scheduler for SDMA */
+               amdgpu_sdma_unset_buffer_funcs_helper(adev);
                return 0;
+       }
 
        sdma_v5_0_ctx_switch_enable(adev, false);
        sdma_v5_0_enable(adev, false);
index c2ee53c..809eca5 100644 (file)
@@ -1349,19 +1349,15 @@ static int sdma_v5_2_hw_fini(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-       /*
-        * Under SRIOV, the VF cannot single-mindedly stop SDMA engine
-        * However, we still need to clean up the DRM entity
-        * Therefore, we will re-enable SDMA afterwards.
-        */
-       sdma_v5_2_ctx_switch_enable(adev, false);
-       sdma_v5_2_enable(adev, false);
-
        if (amdgpu_sriov_vf(adev)) {
-               sdma_v5_2_enable(adev, true);
-               sdma_v5_2_ctx_switch_enable(adev, true);
+               /* disable the scheduler for SDMA */
+               amdgpu_sdma_unset_buffer_funcs_helper(adev);
+               return 0;
        }
 
+       sdma_v5_2_ctx_switch_enable(adev, false);
+       sdma_v5_2_enable(adev, false);
+
        return 0;
 }
 
index a648348..da3beb0 100644 (file)
@@ -1311,8 +1311,11 @@ static int sdma_v6_0_hw_fini(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-       if (amdgpu_sriov_vf(adev))
+       if (amdgpu_sriov_vf(adev)) {
+               /* disable the scheduler for SDMA */
+               amdgpu_sdma_unset_buffer_funcs_helper(adev);
                return 0;
+       }
 
        sdma_v6_0_ctx_switch_enable(adev, false);
        sdma_v6_0_enable(adev, false);