{
struct amdgpu_device *adev = ring->adev;
- if (ring == &adev->vce.ring[0])
+ if (ring->me == 0)
return RREG32(mmVCE_RB_RPTR);
else
return RREG32(mmVCE_RB_RPTR2);
{
struct amdgpu_device *adev = ring->adev;
- if (ring == &adev->vce.ring[0])
+ if (ring->me == 0)
return RREG32(mmVCE_RB_WPTR);
else
return RREG32(mmVCE_RB_WPTR2);
{
struct amdgpu_device *adev = ring->adev;
- if (ring == &adev->vce.ring[0])
+ if (ring->me == 0)
WREG32(mmVCE_RB_WPTR, lower_32_bits(ring->wptr));
else
WREG32(mmVCE_RB_WPTR2, lower_32_bits(ring->wptr));
{
int i;
- for (i = 0; i < adev->vce.num_rings; i++)
+ for (i = 0; i < adev->vce.num_rings; i++) {
adev->vce.ring[i].funcs = &vce_v2_0_ring_funcs;
+ adev->vce.ring[i].me = i;
+ }
}
static const struct amdgpu_irq_src_funcs vce_v2_0_irq_funcs = {
else if (adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE0)
WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(1));
- if (ring == &adev->vce.ring[0])
+ if (ring->me == 0)
v = RREG32(mmVCE_RB_RPTR);
- else if (ring == &adev->vce.ring[1])
+ else if (ring->me == 1)
v = RREG32(mmVCE_RB_RPTR2);
else
v = RREG32(mmVCE_RB_RPTR3);
else if (adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE0)
WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(1));
- if (ring == &adev->vce.ring[0])
+ if (ring->me == 0)
v = RREG32(mmVCE_RB_WPTR);
- else if (ring == &adev->vce.ring[1])
+ else if (ring->me == 1)
v = RREG32(mmVCE_RB_WPTR2);
else
v = RREG32(mmVCE_RB_WPTR3);
else if (adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE0)
WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(1));
- if (ring == &adev->vce.ring[0])
+ if (ring->me == 0)
WREG32(mmVCE_RB_WPTR, lower_32_bits(ring->wptr));
- else if (ring == &adev->vce.ring[1])
+ else if (ring->me == 1)
WREG32(mmVCE_RB_WPTR2, lower_32_bits(ring->wptr));
else
WREG32(mmVCE_RB_WPTR3, lower_32_bits(ring->wptr));
int i;
if (adev->asic_type >= CHIP_STONEY) {
- for (i = 0; i < adev->vce.num_rings; i++)
+ for (i = 0; i < adev->vce.num_rings; i++) {
adev->vce.ring[i].funcs = &vce_v3_0_ring_vm_funcs;
+ adev->vce.ring[i].me = i;
+ }
DRM_INFO("VCE enabled in VM mode\n");
} else {
- for (i = 0; i < adev->vce.num_rings; i++)
+ for (i = 0; i < adev->vce.num_rings; i++) {
adev->vce.ring[i].funcs = &vce_v3_0_ring_phys_funcs;
+ adev->vce.ring[i].me = i;
+ }
DRM_INFO("VCE enabled in physical mode\n");
}
}
{
struct amdgpu_device *adev = ring->adev;
- if (ring == &adev->vce.ring[0])
+ if (ring->me == 0)
return RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_RPTR));
- else if (ring == &adev->vce.ring[1])
+ else if (ring->me == 1)
return RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_RPTR2));
else
return RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_RPTR3));
if (ring->use_doorbell)
return adev->wb.wb[ring->wptr_offs];
- if (ring == &adev->vce.ring[0])
+ if (ring->me == 0)
return RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_WPTR));
- else if (ring == &adev->vce.ring[1])
+ else if (ring->me == 1)
return RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_WPTR2));
else
return RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_WPTR3));
return;
}
- if (ring == &adev->vce.ring[0])
+ if (ring->me == 0)
WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_WPTR),
lower_32_bits(ring->wptr));
- else if (ring == &adev->vce.ring[1])
+ else if (ring->me == 1)
WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_WPTR2),
lower_32_bits(ring->wptr));
else
{
int i;
- for (i = 0; i < adev->vce.num_rings; i++)
+ for (i = 0; i < adev->vce.num_rings; i++) {
adev->vce.ring[i].funcs = &vce_v4_0_ring_vm_funcs;
+ adev->vce.ring[i].me = i;
+ }
DRM_INFO("VCE enabled in VM mode\n");
}