adev->gfx.rlc.funcs &&
adev->gfx.rlc.funcs->is_rlcg_access_range) {
if (adev->gfx.rlc.funcs->is_rlcg_access_range(adev, reg))
- return adev->gfx.rlc.funcs->rlcg_wreg(adev, reg, v, 0, 0);
+ return adev->gfx.rlc.funcs->sriov_wreg(adev, reg, v, 0, 0);
} else {
writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
}
void (*reset)(struct amdgpu_device *adev);
void (*start)(struct amdgpu_device *adev);
void (*update_spm_vmid)(struct amdgpu_device *adev, unsigned vmid);
- void (*rlcg_wreg)(struct amdgpu_device *adev, u32 offset, u32 v, u32 acc_flags, u32 hwip);
- u32 (*rlcg_rreg)(struct amdgpu_device *adev, u32 offset, u32 acc_flags, u32 hwip);
+ void (*sriov_wreg)(struct amdgpu_device *adev, u32 offset, u32 v, u32 acc_flags, u32 hwip);
+ u32 (*sriov_rreg)(struct amdgpu_device *adev, u32 offset, u32 acc_flags, u32 hwip);
bool (*is_rlcg_access_range)(struct amdgpu_device *adev, uint32_t reg);
};
return ret;
}
-static void gfx_v10_rlcg_wreg(struct amdgpu_device *adev, u32 offset, u32 value, u32 acc_flags, u32 hwip)
+static void gfx_v10_sriov_wreg(struct amdgpu_device *adev, u32 offset, u32 value, u32 acc_flags, u32 hwip)
{
u32 rlcg_flag;
WREG32(offset, value);
}
-static u32 gfx_v10_rlcg_rreg(struct amdgpu_device *adev, u32 offset, u32 acc_flags, u32 hwip)
+static u32 gfx_v10_sriov_rreg(struct amdgpu_device *adev, u32 offset, u32 acc_flags, u32 hwip)
{
u32 rlcg_flag;
.reset = gfx_v10_0_rlc_reset,
.start = gfx_v10_0_rlc_start,
.update_spm_vmid = gfx_v10_0_update_spm_vmid,
- .rlcg_wreg = gfx_v10_rlcg_wreg,
- .rlcg_rreg = gfx_v10_rlcg_rreg,
+ .sriov_wreg = gfx_v10_sriov_wreg,
+ .sriov_rreg = gfx_v10_sriov_rreg,
.is_rlcg_access_range = gfx_v10_0_is_rlcg_access_range,
};
}
-static void gfx_v9_0_rlcg_wreg(struct amdgpu_device *adev, u32 offset,
+static void gfx_v9_0_sriov_wreg(struct amdgpu_device *adev, u32 offset,
u32 v, u32 acc_flags, u32 hwip)
{
if ((acc_flags & AMDGPU_REGS_RLC) &&
.reset = gfx_v9_0_rlc_reset,
.start = gfx_v9_0_rlc_start,
.update_spm_vmid = gfx_v9_0_update_spm_vmid,
- .rlcg_wreg = gfx_v9_0_rlcg_wreg,
+ .sriov_wreg = gfx_v9_0_sriov_wreg,
.is_rlcg_access_range = gfx_v9_0_is_rlcg_access_range,
};
#define SOC15_REG_OFFSET(ip, inst, reg) (adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg)
#define __WREG32_SOC15_RLC__(reg, value, flag, hwip) \
- ((amdgpu_sriov_vf(adev) && adev->gfx.rlc.funcs && adev->gfx.rlc.funcs->rlcg_wreg) ? \
- adev->gfx.rlc.funcs->rlcg_wreg(adev, reg, value, flag, hwip) : \
+ ((amdgpu_sriov_vf(adev) && adev->gfx.rlc.funcs && adev->gfx.rlc.funcs->sriov_wreg) ? \
+ adev->gfx.rlc.funcs->sriov_wreg(adev, reg, value, flag, hwip) : \
WREG32(reg, value))
#define __RREG32_SOC15_RLC__(reg, flag, hwip) \
- ((amdgpu_sriov_vf(adev) && adev->gfx.rlc.funcs && adev->gfx.rlc.funcs->rlcg_rreg) ? \
- adev->gfx.rlc.funcs->rlcg_rreg(adev, reg, flag, hwip) : \
+ ((amdgpu_sriov_vf(adev) && adev->gfx.rlc.funcs && adev->gfx.rlc.funcs->sriov_rreg) ? \
+ adev->gfx.rlc.funcs->sriov_rreg(adev, reg, flag, hwip) : \
RREG32(reg))
#define WREG32_FIELD15(ip, idx, reg, field, val) \