}
/**
- * sdma_v6_0_ctx_switch_enable - stop the async dma engines context switch
+ * sdma_v6_0_ctxempty_int_enable - enable or disable context empty interrupts
*
* @adev: amdgpu_device pointer
- * @enable: enable/disable the DMA MEs context switch.
+ * @enable: enable/disable context switching due to queue empty conditions
*
- * Halt or unhalt the async dma engines context switch.
+ * Enable or disable the async dma engines queue empty context switch.
*/
-static void sdma_v6_0_ctx_switch_enable(struct amdgpu_device *adev, bool enable)
+static void sdma_v6_0_ctxempty_int_enable(struct amdgpu_device *adev, bool enable)
{
+ u32 f32_cntl;
+ int i;
+
+ if (!amdgpu_sriov_vf(adev)) {
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ f32_cntl = RREG32(sdma_v6_0_get_reg_offset(adev, i, regSDMA0_CNTL));
+ f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL,
+ CTXEMPTY_INT_ENABLE, enable ? 1 : 0);
+ WREG32(sdma_v6_0_get_reg_offset(adev, i, regSDMA0_CNTL), f32_cntl);
+ }
+ }
}
/**
ring->sched.ready = true;
- if (amdgpu_sriov_vf(adev)) { /* bare-metal sequence doesn't need below to lines */
- sdma_v6_0_ctx_switch_enable(adev, true);
+ if (amdgpu_sriov_vf(adev))
sdma_v6_0_enable(adev, true);
- }
r = amdgpu_ring_test_helper(ring);
if (r) {
int r = 0;
if (amdgpu_sriov_vf(adev)) {
- sdma_v6_0_ctx_switch_enable(adev, false);
sdma_v6_0_enable(adev, false);
/* set RB registers */
/* unhalt the MEs */
sdma_v6_0_enable(adev, true);
/* enable sdma ring preemption */
- sdma_v6_0_ctx_switch_enable(adev, true);
+ sdma_v6_0_ctxempty_int_enable(adev, true);
/* start the gfx rings and rlc compute queues */
r = sdma_v6_0_gfx_resume(adev);
return 0;
}
- sdma_v6_0_ctx_switch_enable(adev, false);
+ sdma_v6_0_ctxempty_int_enable(adev, false);
sdma_v6_0_enable(adev, false);
return 0;