drm/amdgpu: add start DPG mode for VCN3.0
authorBoyuan Zhang <boyuan.zhang@amd.com>
Fri, 27 Mar 2020 17:30:53 +0000 (13:30 -0400)
committerAlex Deucher <alexander.deucher@amd.com>
Wed, 1 Jul 2020 05:59:12 +0000 (01:59 -0400)
Add vcn_v3_0_start_dpg_mode to setup and start VCN block in DPG mode for VCN3.0

V2: Separate from previous patch-0002, and update description.

Signed-off-by: Boyuan Zhang <boyuan.zhang@amd.com>
Reviewed-by: James Zhu <james.zhu@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c

index 86db365..5578af5 100644 (file)
@@ -818,6 +818,142 @@ static void vcn_v3_0_enable_clock_gating(struct amdgpu_device *adev, int inst)
        WREG32_SOC15(VCN, inst, mmUVD_SUVD_CGC_CTRL, data);
 }
 
+static int vcn_v3_0_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)
+{
+       struct amdgpu_ring *ring;
+       uint32_t rb_bufsz, tmp;
+
+       /* disable register anti-hang mechanism */
+       WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS), 1,
+               ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
+       /* enable dynamic power gating mode */
+       tmp = RREG32_SOC15(VCN, inst_idx, mmUVD_POWER_STATUS);
+       tmp |= UVD_POWER_STATUS__UVD_PG_MODE_MASK;
+       tmp |= UVD_POWER_STATUS__UVD_PG_EN_MASK;
+       WREG32_SOC15(VCN, inst_idx, mmUVD_POWER_STATUS, tmp);
+
+       if (indirect)
+               adev->vcn.inst[inst_idx].dpg_sram_curr_addr = (uint32_t*)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr;
+
+       /* enable clock gating */
+       vcn_v3_0_clock_gating_dpg_mode(adev, 0, inst_idx, indirect);
+
+       /* enable VCPU clock */
+       tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
+       tmp |= UVD_VCPU_CNTL__CLK_EN_MASK;
+       tmp |= UVD_VCPU_CNTL__BLK_RST_MASK;
+       WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+               VCN, inst_idx, mmUVD_VCPU_CNTL), tmp, 0, indirect);
+
+       /* disable master interupt */
+       WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+               VCN, inst_idx, mmUVD_MASTINT_EN), 0, 0, indirect);
+
+       /* setup mmUVD_LMI_CTRL */
+       tmp = (0x8 | UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
+               UVD_LMI_CTRL__REQ_MODE_MASK |
+               UVD_LMI_CTRL__CRC_RESET_MASK |
+               UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
+               UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
+               UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
+               (8 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
+               0x00100000L);
+       WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+               VCN, inst_idx, mmUVD_LMI_CTRL), tmp, 0, indirect);
+
+       WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+               VCN, inst_idx, mmUVD_MPC_CNTL),
+               0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT, 0, indirect);
+
+       WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+               VCN, inst_idx, mmUVD_MPC_SET_MUXA0),
+               ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
+                (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
+                (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
+                (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)), 0, indirect);
+
+       WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+               VCN, inst_idx, mmUVD_MPC_SET_MUXB0),
+                ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
+                (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
+                (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
+                (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)), 0, indirect);
+
+       WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+               VCN, inst_idx, mmUVD_MPC_SET_MUX),
+               ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
+                (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
+                (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)), 0, indirect);
+
+       vcn_v3_0_mc_resume_dpg_mode(adev, inst_idx, indirect);
+
+       WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+               VCN, inst_idx, mmUVD_REG_XX_MASK), 0x10, 0, indirect);
+       WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+               VCN, inst_idx, mmUVD_RBC_XX_IB_REG_CHECK), 0x3, 0, indirect);
+
+       /* enable LMI MC and UMC channels */
+       WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+               VCN, inst_idx, mmUVD_LMI_CTRL2), 0, 0, indirect);
+
+       /* unblock VCPU register access */
+       WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+               VCN, inst_idx, mmUVD_RB_ARB_CTRL), 0, 0, indirect);
+
+       tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
+       tmp |= UVD_VCPU_CNTL__CLK_EN_MASK;
+       WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+               VCN, inst_idx, mmUVD_VCPU_CNTL), tmp, 0, indirect);
+
+       /* enable master interrupt */
+       WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+               VCN, inst_idx, mmUVD_MASTINT_EN),
+               UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect);
+
+       /* add nop to workaround PSP size check */
+       WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
+               VCN, inst_idx, mmUVD_VCPU_CNTL), tmp, 0, indirect);
+
+       if (indirect)
+               psp_update_vcn_sram(adev, inst_idx, adev->vcn.inst[inst_idx].dpg_sram_gpu_addr,
+                       (uint32_t)((uintptr_t)adev->vcn.inst[inst_idx].dpg_sram_curr_addr -
+                               (uintptr_t)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr));
+
+       ring = &adev->vcn.inst[inst_idx].ring_dec;
+       /* force RBC into idle state */
+       rb_bufsz = order_base_2(ring->ring_size);
+       tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
+       tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
+       tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
+       tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
+       tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
+       WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_CNTL, tmp);
+
+       /* set the write pointer delay */
+       WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_WPTR_CNTL, 0);
+
+       /* set the wb address */
+       WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_RPTR_ADDR,
+               (upper_32_bits(ring->gpu_addr) >> 2));
+
+       /* programm the RB_BASE for ring buffer */
+       WREG32_SOC15(VCN, inst_idx, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
+               lower_32_bits(ring->gpu_addr));
+       WREG32_SOC15(VCN, inst_idx, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
+               upper_32_bits(ring->gpu_addr));
+
+       /* Initialize the ring buffer's read and write pointers */
+       WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_RPTR, 0);
+
+       WREG32_SOC15(VCN, inst_idx, mmUVD_SCRATCH2, 0);
+
+       ring->wptr = RREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_RPTR);
+       WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_WPTR,
+               lower_32_bits(ring->wptr));
+
+       return 0;
+}
+
 static int vcn_v3_0_start(struct amdgpu_device *adev)
 {
        struct amdgpu_ring *ring;
@@ -831,6 +967,11 @@ static int vcn_v3_0_start(struct amdgpu_device *adev)
                if (adev->vcn.harvest_config & (1 << i))
                        continue;
 
+               if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG){
+                       r = vcn_v3_0_start_dpg_mode(adev, i, adev->vcn.indirect_sram);
+                       continue;
+               }
+
                /* disable VCN power gating */
                vcn_v3_0_disable_static_power_gating(adev, i);