.sysfs_name = sysfs_name,
};
- snprintf(sysfs_name, sizeof(sysfs_name), "%s_err_count", mca_dev->ras->ras_block.name);
+ snprintf(sysfs_name, sizeof(sysfs_name), "%s_err_count",
+ mca_dev->ras->ras_block.ras_comm.name);
if (!mca_dev->ras_if) {
mca_dev->ras_if = kmalloc(sizeof(struct ras_common_if), GFP_KERNEL);
if (!mca_dev->ras_if)
return -ENOMEM;
- mca_dev->ras_if->block = mca_dev->ras->ras_block.block;
- mca_dev->ras_if->sub_block_index = mca_dev->ras->ras_block.sub_block_index;
+ mca_dev->ras_if->block = mca_dev->ras->ras_block.ras_comm.block;
+ mca_dev->ras_if->sub_block_index = mca_dev->ras->ras_block.ras_comm.sub_block_index;
mca_dev->ras_if->type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
}
ih_info.head = fs_info.head = *mca_dev->ras_if;
if (!block_obj)
return -EINVAL;
- if (block_obj->block == block)
+ if (block_obj->ras_comm.block == block)
return 0;
return -EINVAL;
return r;
}
+int amdgpu_ras_block_late_init(struct amdgpu_device *adev,
+ struct ras_common_if *ras_block)
+{
+ char sysfs_name[32];
+ struct ras_ih_if ih_info;
+ struct ras_fs_if fs_info;
+ struct amdgpu_ras_block_object *obj;
+
+ obj = container_of(ras_block, struct amdgpu_ras_block_object, ras_comm);
+ ih_info.cb = obj->ras_cb;
+ ih_info.head = *ras_block;
+ snprintf(sysfs_name, sizeof(sysfs_name), "%s_err_count", ras_block->name);
+ fs_info.sysfs_name = (const char *)sysfs_name;
+ fs_info.head = *ras_block;
+ return amdgpu_ras_late_init(adev, ras_block, &fs_info, &ih_info);
+}
+
/* helper function to remove ras fs node and interrupt handler */
void amdgpu_ras_late_fini(struct amdgpu_device *adev,
struct ras_common_if *ras_block,
amdgpu_ras_interrupt_remove_handler(adev, ih_info);
}
+void amdgpu_ras_block_late_fini(struct amdgpu_device *adev,
+ struct ras_common_if *ras_block)
+{
+ struct ras_ih_if ih_info;
+ struct amdgpu_ras_block_object *obj;
+
+ if (!ras_block)
+ return;
+
+ obj = container_of(ras_block, struct amdgpu_ras_block_object, ras_comm);
+ ih_info.head = *ras_block;
+ ih_info.cb = obj->ras_cb;
+
+ amdgpu_ras_late_fini(adev, ras_block, &ih_info);
+}
+
/* do some init work after IP late init as dependence.
* and it runs in resume/gpu reset/booting up cases.
*/
};
struct amdgpu_ras_block_object {
- /* block name */
- char name[32];
-
- enum amdgpu_ras_block block;
-
- uint32_t sub_block_index;
+ struct ras_common_if ras_comm;
int (*ras_block_match)(struct amdgpu_ras_block_object *block_obj,
enum amdgpu_ras_block block, uint32_t sub_block_index);
int (*ras_late_init)(struct amdgpu_device *adev, void *ras_info);
void (*ras_fini)(struct amdgpu_device *adev);
+ ras_ih_cb ras_cb;
const struct amdgpu_ras_block_hw_ops *hw_ops;
};
struct ras_common_if *ras_block,
struct ras_fs_if *fs_info,
struct ras_ih_if *ih_info);
+
+int amdgpu_ras_block_late_init(struct amdgpu_device *adev,
+ struct ras_common_if *ras_block);
+
void amdgpu_ras_late_fini(struct amdgpu_device *adev,
struct ras_common_if *ras_block,
struct ras_ih_if *ih_info);
+void amdgpu_ras_block_late_fini(struct amdgpu_device *adev,
+ struct ras_common_if *ras_block);
+
int amdgpu_ras_feature_enable(struct amdgpu_device *adev,
struct ras_common_if *head, bool enable);
struct amdgpu_xgmi_ras xgmi_ras = {
.ras_block = {
- .name = "xgmi",
- .block = AMDGPU_RAS_BLOCK__XGMI_WAFL,
+ .ras_comm = {
+ .name = "xgmi",
+ .block = AMDGPU_RAS_BLOCK__XGMI_WAFL,
+ },
.hw_ops = &xgmi_ras_hw_ops,
.ras_late_init = amdgpu_xgmi_ras_late_init,
.ras_fini = amdgpu_xgmi_ras_fini,
return err;
}
- strcpy(adev->gfx.ras->ras_block.name,"gfx");
- adev->gfx.ras->ras_block.block = AMDGPU_RAS_BLOCK__GFX;
+ strcpy(adev->gfx.ras->ras_block.ras_comm.name, "gfx");
+ adev->gfx.ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__GFX;
/* If not define special ras_late_init function, use gfx default ras_late_init */
if (!adev->gfx.ras->ras_block.ras_late_init)
if (adev->umc.ras) {
amdgpu_ras_register_ras_block(adev, &adev->umc.ras->ras_block);
- strcpy(adev->umc.ras->ras_block.name, "umc");
- adev->umc.ras->ras_block.block = AMDGPU_RAS_BLOCK__UMC;
+ strcpy(adev->umc.ras->ras_block.ras_comm.name, "umc");
+ adev->umc.ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__UMC;
/* If don't define special ras_late_init function, use default ras_late_init */
if (!adev->umc.ras->ras_block.ras_late_init)
if (adev->umc.ras) {
amdgpu_ras_register_ras_block(adev, &adev->umc.ras->ras_block);
- strcpy(adev->umc.ras->ras_block.name, "umc");
- adev->umc.ras->ras_block.block = AMDGPU_RAS_BLOCK__UMC;
+ strcpy(adev->umc.ras->ras_block.ras_comm.name, "umc");
+ adev->umc.ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__UMC;
/* If don't define special ras_late_init function, use default ras_late_init */
if (!adev->umc.ras->ras_block.ras_late_init)
if (adev->mmhub.ras) {
amdgpu_ras_register_ras_block(adev, &adev->mmhub.ras->ras_block);
- strcpy(adev->mmhub.ras->ras_block.name,"mmhub");
- adev->mmhub.ras->ras_block.block = AMDGPU_RAS_BLOCK__MMHUB;
+ strcpy(adev->mmhub.ras->ras_block.ras_comm.name, "mmhub");
+ adev->mmhub.ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__MMHUB;
/* If don't define special ras_late_init function, use default ras_late_init */
if (!adev->mmhub.ras->ras_block.ras_late_init)
struct amdgpu_hdp_ras hdp_v4_0_ras = {
.ras_block = {
- .name = "hdp",
- .block = AMDGPU_RAS_BLOCK__HDP,
+ .ras_comm = {
+ .name = "hdp",
+ .block = AMDGPU_RAS_BLOCK__HDP,
+ },
.hw_ops = &hdp_v4_0_ras_hw_ops,
.ras_late_init = amdgpu_hdp_ras_late_init,
.ras_fini = amdgpu_hdp_ras_fini,
if (!block_obj)
return -EINVAL;
- if ((block_obj->block == block) &&
- (block_obj->sub_block_index == sub_block_index)) {
+ if ((block_obj->ras_comm.block == block) &&
+ (block_obj->ras_comm.sub_block_index == sub_block_index)) {
return 0;
}
struct amdgpu_mca_ras_block mca_v3_0_mp0_ras = {
.ras_block = {
- .block = AMDGPU_RAS_BLOCK__MCA,
- .sub_block_index = AMDGPU_RAS_MCA_BLOCK__MP0,
- .name = "mp0",
+ .ras_comm = {
+ .block = AMDGPU_RAS_BLOCK__MCA,
+ .sub_block_index = AMDGPU_RAS_MCA_BLOCK__MP0,
+ .name = "mp0",
+ },
.hw_ops = &mca_v3_0_mp0_hw_ops,
.ras_block_match = mca_v3_0_ras_block_match,
.ras_late_init = mca_v3_0_mp0_ras_late_init,
struct amdgpu_mca_ras_block mca_v3_0_mp1_ras = {
.ras_block = {
- .block = AMDGPU_RAS_BLOCK__MCA,
- .sub_block_index = AMDGPU_RAS_MCA_BLOCK__MP1,
- .name = "mp1",
+ .ras_comm = {
+ .block = AMDGPU_RAS_BLOCK__MCA,
+ .sub_block_index = AMDGPU_RAS_MCA_BLOCK__MP1,
+ .name = "mp1",
+ },
.hw_ops = &mca_v3_0_mp1_hw_ops,
.ras_block_match = mca_v3_0_ras_block_match,
.ras_late_init = mca_v3_0_mp1_ras_late_init,
struct amdgpu_mca_ras_block mca_v3_0_mpio_ras = {
.ras_block = {
- .block = AMDGPU_RAS_BLOCK__MCA,
- .sub_block_index = AMDGPU_RAS_MCA_BLOCK__MPIO,
- .name = "mpio",
+ .ras_comm = {
+ .block = AMDGPU_RAS_BLOCK__MCA,
+ .sub_block_index = AMDGPU_RAS_MCA_BLOCK__MPIO,
+ .name = "mpio",
+ },
.hw_ops = &mca_v3_0_mpio_hw_ops,
.ras_block_match = mca_v3_0_ras_block_match,
.ras_late_init = mca_v3_0_mpio_ras_late_init,
struct amdgpu_nbio_ras nbio_v7_4_ras = {
.ras_block = {
- .name = "pcie_bif",
- .block = AMDGPU_RAS_BLOCK__PCIE_BIF,
+ .ras_comm = {
+ .name = "pcie_bif",
+ .block = AMDGPU_RAS_BLOCK__PCIE_BIF,
+ },
.hw_ops = &nbio_v7_4_ras_hw_ops,
.ras_late_init = amdgpu_nbio_ras_late_init,
.ras_fini = amdgpu_nbio_ras_fini,
if (adev->sdma.ras) {
amdgpu_ras_register_ras_block(adev, &adev->sdma.ras->ras_block);
- strcpy(adev->sdma.ras->ras_block.name, "sdma");
- adev->sdma.ras->ras_block.block = AMDGPU_RAS_BLOCK__SDMA;
+ strcpy(adev->sdma.ras->ras_block.ras_comm.name, "sdma");
+ adev->sdma.ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__SDMA;
/* If don't define special ras_late_init function, use default ras_late_init */
if (!adev->sdma.ras->ras_block.ras_late_init)