} \
} while (0)
+#define AMDGPU_VCN_FW_SHARED_FLAG_0_RB (1 << 6)
#define AMDGPU_VCN_MULTI_QUEUE_FLAG (1 << 8)
#define AMDGPU_VCN_SW_RING_FLAG (1 << 9)
int inst_idx, struct dpg_pause_state *new_state);
};
+struct amdgpu_fw_shared_rb_ptrs_struct {
+ /* to WA DPG R/W ptr issues.*/
+ uint32_t rptr;
+ uint32_t wptr;
+};
+
struct amdgpu_fw_shared_multi_queue {
uint8_t decode_queue_mode;
uint8_t encode_generalpurpose_queue_mode;
struct amdgpu_fw_shared {
uint32_t present_flag_0;
- uint8_t pad[53];
+ uint8_t pad[44];
+ struct amdgpu_fw_shared_rb_ptrs_struct rb;
+ uint8_t pad1[1];
struct amdgpu_fw_shared_multi_queue multi_queue;
struct amdgpu_fw_shared_sw_ring sw_ring;
-} __attribute__((__packed__));
+};
struct amdgpu_vcn_decode_buffer {
uint32_t valid_buf_flag;
fw_shared = adev->vcn.inst[i].fw_shared_cpu_addr;
fw_shared->present_flag_0 |= cpu_to_le32(AMDGPU_VCN_SW_RING_FLAG) |
- cpu_to_le32(AMDGPU_VCN_MULTI_QUEUE_FLAG);
+ cpu_to_le32(AMDGPU_VCN_MULTI_QUEUE_FLAG) |
+ cpu_to_le32(AMDGPU_VCN_FW_SHARED_FLAG_0_RB);
fw_shared->sw_ring.is_enabled = cpu_to_le32(DEC_SW_RING_ENABLED);
}
WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_WPTR,
lower_32_bits(ring->wptr));
+ /* Reset FW shared memory RBC WPTR/RPTR */
+ fw_shared->rb.rptr = 0;
+ fw_shared->rb.wptr = lower_32_bits(ring->wptr);
+
+ /*resetting done, fw can check RB ring */
fw_shared->multi_queue.decode_queue_mode &= cpu_to_le32(~FW_QUEUE_RING_RESET);
+
/* Unstall DPG */
WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS),
0, ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
/* Initialize the ring buffer's read and write pointers */
WREG32_SOC15(VCN, i, mmUVD_RBC_RB_RPTR, 0);
+ WREG32_SOC15(VCN, i, mmUVD_SCRATCH2, 0);
ring->wptr = RREG32_SOC15(VCN, i, mmUVD_RBC_RB_RPTR);
WREG32_SOC15(VCN, i, mmUVD_RBC_RB_WPTR,
lower_32_bits(ring->wptr));
+ fw_shared->rb.wptr = lower_32_bits(ring->wptr);
fw_shared->multi_queue.decode_queue_mode &= cpu_to_le32(~FW_QUEUE_RING_RESET);
fw_shared->multi_queue.encode_generalpurpose_queue_mode |= cpu_to_le32(FW_QUEUE_RING_RESET);
WREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
fw_shared->multi_queue.encode_lowlatency_queue_mode &= cpu_to_le32(~FW_QUEUE_RING_RESET);
+ /* restore wptr/rptr with pointers saved in FW shared memory*/
+ WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_RPTR, fw_shared->rb.rptr);
+ WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_WPTR, fw_shared->rb.wptr);
+
/* Unstall DPG */
WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS),
0, ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
static void vcn_v3_0_dec_ring_set_wptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
+ volatile struct amdgpu_fw_shared *fw_shared;
+
+ if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
+ /*whenever update RBC_RB_WPTR, we save the wptr in shared rb.wptr and scratch2 */
+ fw_shared = adev->vcn.inst[ring->me].fw_shared_cpu_addr;
+ fw_shared->rb.wptr = lower_32_bits(ring->wptr);
+ WREG32_SOC15(VCN, ring->me, mmUVD_SCRATCH2,
+ lower_32_bits(ring->wptr));
+ }
if (ring->use_doorbell) {
adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);