} while (fault->timestamp < tmp);
}
-int amdgpu_gmc_ras_early_init(struct amdgpu_device *adev)
+int amdgpu_gmc_ras_sw_init(struct amdgpu_device *adev)
{
+ int r;
+
+ /* umc ras block */
+ r = amdgpu_umc_ras_sw_init(adev);
+ if (r)
+ return r;
+
if (!adev->gmc.xgmi.connected_to_cpu) {
adev->gmc.xgmi.ras = &xgmi_ras;
amdgpu_ras_register_ras_block(adev, &adev->gmc.xgmi.ras->ras_block);
uint16_t pasid, uint64_t timestamp);
void amdgpu_gmc_filter_faults_remove(struct amdgpu_device *adev, uint64_t addr,
uint16_t pasid);
-int amdgpu_gmc_ras_early_init(struct amdgpu_device *adev);
+int amdgpu_gmc_ras_sw_init(struct amdgpu_device *adev);
int amdgpu_gmc_ras_late_init(struct amdgpu_device *adev);
void amdgpu_gmc_ras_fini(struct amdgpu_device *adev);
int amdgpu_gmc_allocate_vm_inv_eng(struct amdgpu_device *adev);
return amdgpu_umc_do_page_retirement(adev, ras_error_status, entry, true);
}
+int amdgpu_umc_ras_sw_init(struct amdgpu_device *adev)
+{
+ int err;
+ struct amdgpu_umc_ras *ras;
+
+ if (!adev->umc.ras)
+ return 0;
+
+ ras = adev->umc.ras;
+
+ err = amdgpu_ras_register_ras_block(adev, &ras->ras_block);
+ if (err) {
+ dev_err(adev->dev, "Failed to register umc ras block!\n");
+ return err;
+ }
+
+ strcpy(adev->umc.ras->ras_block.ras_comm.name, "umc");
+ ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__UMC;
+ ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
+ adev->umc.ras_if = &ras->ras_block.ras_comm;
+
+ if (!ras->ras_block.ras_late_init)
+ ras->ras_block.ras_late_init = amdgpu_umc_ras_late_init;
+
+ if (ras->ras_block.ras_cb)
+ ras->ras_block.ras_cb = amdgpu_umc_process_ras_data_cb;
+
+ return 0;
+}
+
int amdgpu_umc_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block)
{
int r;
unsigned long active_mask;
};
+int amdgpu_umc_ras_sw_init(struct amdgpu_device *adev);
int amdgpu_umc_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block);
int amdgpu_umc_poison_handler(struct amdgpu_device *adev, bool reset);
int amdgpu_umc_process_ecc_irq(struct amdgpu_device *adev,
default:
break;
}
- if (adev->umc.ras) {
- amdgpu_ras_register_ras_block(adev, &adev->umc.ras->ras_block);
-
- strcpy(adev->umc.ras->ras_block.ras_comm.name, "umc");
- adev->umc.ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__UMC;
- adev->umc.ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
- adev->umc.ras_if = &adev->umc.ras->ras_block.ras_comm;
-
- /* If don't define special ras_late_init function, use default ras_late_init */
- if (!adev->umc.ras->ras_block.ras_late_init)
- adev->umc.ras->ras_block.ras_late_init = amdgpu_umc_ras_late_init;
-
- /* If not defined special ras_cb function, use default ras_cb */
- if (!adev->umc.ras->ras_block.ras_cb)
- adev->umc.ras->ras_block.ras_cb = amdgpu_umc_process_ras_data_cb;
- }
}
-
static void gmc_v10_0_set_mmhub_funcs(struct amdgpu_device *adev)
{
switch (adev->ip_versions[MMHUB_HWIP][0]) {
static int gmc_v10_0_early_init(void *handle)
{
- int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
gmc_v10_0_set_mmhub_funcs(adev);
adev->gmc.private_aperture_end =
adev->gmc.private_aperture_start + (4ULL << 30) - 1;
- r = amdgpu_gmc_ras_early_init(adev);
- if (r)
- return r;
-
return 0;
}
amdgpu_vm_manager_init(adev);
+ r = amdgpu_gmc_ras_sw_init(adev);
+ if (r)
+ return r;
+
return 0;
}
default:
break;
}
-
- if (adev->umc.ras) {
- amdgpu_ras_register_ras_block(adev, &adev->umc.ras->ras_block);
-
- strcpy(adev->umc.ras->ras_block.ras_comm.name, "umc");
- adev->umc.ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__UMC;
- adev->umc.ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
- adev->umc.ras_if = &adev->umc.ras->ras_block.ras_comm;
-
- /* If don't define special ras_late_init function, use default ras_late_init */
- if (!adev->umc.ras->ras_block.ras_late_init)
- adev->umc.ras->ras_block.ras_late_init = amdgpu_umc_ras_late_init;
-
- /* If not define special ras_cb function, use default ras_cb */
- if (!adev->umc.ras->ras_block.ras_cb)
- adev->umc.ras->ras_block.ras_cb = amdgpu_umc_process_ras_data_cb;
- }
}
amdgpu_vm_manager_init(adev);
+ r = amdgpu_gmc_ras_sw_init(adev);
+ if (r)
+ return r;
+
return 0;
}
default:
break;
}
-
- if (adev->umc.ras) {
- amdgpu_ras_register_ras_block(adev, &adev->umc.ras->ras_block);
-
- strcpy(adev->umc.ras->ras_block.ras_comm.name, "umc");
- adev->umc.ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__UMC;
- adev->umc.ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
- adev->umc.ras_if = &adev->umc.ras->ras_block.ras_comm;
-
- /* If don't define special ras_late_init function, use default ras_late_init */
- if (!adev->umc.ras->ras_block.ras_late_init)
- adev->umc.ras->ras_block.ras_late_init = amdgpu_umc_ras_late_init;
-
- /* If not defined special ras_cb function, use default ras_cb */
- if (!adev->umc.ras->ras_block.ras_cb)
- adev->umc.ras->ras_block.ras_cb = amdgpu_umc_process_ras_data_cb;
- }
}
static void gmc_v9_0_set_mmhub_funcs(struct amdgpu_device *adev)
static int gmc_v9_0_early_init(void *handle)
{
- int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
/* ARCT and VEGA20 don't have XGMI defined in their IP discovery tables */
adev->gmc.private_aperture_end =
adev->gmc.private_aperture_start + (4ULL << 30) - 1;
- r = amdgpu_gmc_ras_early_init(adev);
- if (r)
- return r;
-
return 0;
}
gmc_v9_0_save_registers(adev);
+ r = amdgpu_gmc_ras_sw_init(adev);
+ if (r)
+ return r;
+
return 0;
}