struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int i;
- if (amdgpu_sriov_vf(adev))
+ if (amdgpu_sriov_vf(adev)) {
+ /* disable the scheduler for SDMA */
+ amdgpu_sdma_unset_buffer_funcs_helper(adev);
return 0;
+ }
for (i = 0; i < adev->sdma.num_instances; i++) {
amdgpu_irq_put(adev, &adev->sdma.ecc_irq,
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- if (amdgpu_sriov_vf(adev))
+ if (amdgpu_sriov_vf(adev)) {
+ /* disable the scheduler for SDMA */
+ amdgpu_sdma_unset_buffer_funcs_helper(adev);
return 0;
+ }
sdma_v5_0_ctx_switch_enable(adev, false);
sdma_v5_0_enable(adev, false);
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- /*
- * Under SRIOV, the VF cannot single-mindedly stop SDMA engine
- * However, we still need to clean up the DRM entity
- * Therefore, we will re-enable SDMA afterwards.
- */
- sdma_v5_2_ctx_switch_enable(adev, false);
- sdma_v5_2_enable(adev, false);
-
if (amdgpu_sriov_vf(adev)) {
- sdma_v5_2_enable(adev, true);
- sdma_v5_2_ctx_switch_enable(adev, true);
+ /* disable the scheduler for SDMA */
+ amdgpu_sdma_unset_buffer_funcs_helper(adev);
+ return 0;
}
+ sdma_v5_2_ctx_switch_enable(adev, false);
+ sdma_v5_2_enable(adev, false);
+
return 0;
}
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- if (amdgpu_sriov_vf(adev))
+ if (amdgpu_sriov_vf(adev)) {
+ /* disable the scheduler for SDMA */
+ amdgpu_sdma_unset_buffer_funcs_helper(adev);
return 0;
+ }
sdma_v6_0_ctx_switch_enable(adev, false);
sdma_v6_0_enable(adev, false);