return r;
}
-static uint32_t * amdgpu_vcn_unified_ring_ib_header(struct amdgpu_ib *ib,
- uint32_t ib_pack_in_dw, bool enc)
+static uint32_t *amdgpu_vcn_unified_ring_ib_header(struct amdgpu_ib *ib,
+ uint32_t ib_pack_in_dw, bool enc)
{
uint32_t *ib_checksum;
}
static void amdgpu_vcn_unified_ring_ib_checksum(uint32_t **ib_checksum,
- uint32_t ib_pack_in_dw)
+ uint32_t ib_pack_in_dw)
{
uint32_t i;
uint32_t checksum = 0;
/* single queue headers */
if (sq) {
ib_pack_in_dw = sizeof(struct amdgpu_vcn_decode_buffer) / sizeof(uint32_t)
- + 4 + 2; /* engine info + decoding ib in dw */
+ + 4 + 2; /* engine info + decoding ib in dw */
ib_checksum = amdgpu_vcn_unified_ring_ib_header(ib, ib_pack_in_dw, false);
}
struct amdgpu_ib *ib_msg,
struct dma_fence **fence)
{
- unsigned ib_size_dw = 16;
+ unsigned int ib_size_dw = 16;
struct amdgpu_job *job;
struct amdgpu_ib *ib;
struct dma_fence *f = NULL;
struct amdgpu_ib *ib_msg,
struct dma_fence **fence)
{
- unsigned ib_size_dw = 16;
+ unsigned int ib_size_dw = 16;
struct amdgpu_job *job;
struct amdgpu_ib *ib;
struct dma_fence *f = NULL;
sprintf(ring->name, "vcn_unified_%d", i);
r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[i].irq, 0,
- AMDGPU_RING_PRIO_0, &adev->vcn.inst[i].sched_score);
+ AMDGPU_RING_PRIO_0, &adev->vcn.inst[i].sched_score);
if (r)
return r;
WREG32_SOC15(VCN, inst_idx, regUVD_RB_RPTR, 0);
WREG32_SOC15(VCN, inst_idx, regUVD_RB_WPTR, 0);
- tmp= RREG32_SOC15(VCN, inst_idx, regUVD_RB_RPTR);
+ tmp = RREG32_SOC15(VCN, inst_idx, regUVD_RB_RPTR);
WREG32_SOC15(VCN, inst_idx, regUVD_RB_WPTR, tmp);
ring->wptr = RREG32_SOC15(VCN, inst_idx, regUVD_RB_WPTR);
dev_err(adev->dev, "VCN[%d] is not responding, trying to reset the VCPU!!!\n", i);
WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL),
- UVD_VCPU_CNTL__BLK_RST_MASK,
- ~UVD_VCPU_CNTL__BLK_RST_MASK);
+ UVD_VCPU_CNTL__BLK_RST_MASK,
+ ~UVD_VCPU_CNTL__BLK_RST_MASK);
mdelay(10);
WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL), 0,
~UVD_VCPU_CNTL__BLK_RST_MASK);