control = &con->eeprom_control;
data = con->eh_data;
- save_count = data->count - control->num_recs;
+ save_count = data->count - control->ras_num_recs;
/* only new entries are saved */
if (save_count > 0) {
if (amdgpu_ras_eeprom_write(control,
- &data->bps[control->num_recs],
+ &data->bps[control->ras_num_recs],
save_count)) {
dev_err(adev->dev, "Failed to save EEPROM table data!");
return -EIO;
int ret;
/* no bad page record, skip eeprom access */
- if (control->num_recs == 0 || amdgpu_bad_page_threshold == 0)
+ if (control->ras_num_recs == 0 || amdgpu_bad_page_threshold == 0)
return 0;
- bps = kcalloc(control->num_recs, sizeof(*bps), GFP_KERNEL);
+ bps = kcalloc(control->ras_num_recs, sizeof(*bps), GFP_KERNEL);
if (!bps)
return -ENOMEM;
- ret = amdgpu_ras_eeprom_read(control, bps, control->num_recs);
+ ret = amdgpu_ras_eeprom_read(control, bps, control->ras_num_recs);
if (ret)
dev_err(adev->dev, "Failed to load EEPROM table records!");
else
- ret = amdgpu_ras_add_bad_pages(adev, bps, control->num_recs);
+ ret = amdgpu_ras_add_bad_pages(adev, bps, control->ras_num_recs);
kfree(bps);
return ret;
if (exc_err_limit || ret)
goto free;
- if (con->eeprom_control.num_recs) {
+ if (con->eeprom_control.ras_num_recs) {
ret = amdgpu_ras_load_bad_pages(adev);
if (ret)
goto free;
if (adev->smu.ppt_funcs && adev->smu.ppt_funcs->send_hbm_bad_pages_num)
- adev->smu.ppt_funcs->send_hbm_bad_pages_num(&adev->smu, con->eeprom_control.num_recs);
+ adev->smu.ppt_funcs->send_hbm_bad_pages_num(&adev->smu, con->eeprom_control.ras_num_recs);
}
return 0;
memset(buf, 0, RAS_TABLE_HEADER_SIZE);
- mutex_lock(&control->tbl_mutex);
+ mutex_lock(&control->ras_tbl_mutex);
hdr->header = header;
ret = __write_table_header(control, buf);
- mutex_unlock(&control->tbl_mutex);
+ mutex_unlock(&control->ras_tbl_mutex);
return ret;
}
struct amdgpu_ras_eeprom_table_header *hdr = &control->tbl_hdr;
int ret = 0;
- mutex_lock(&control->tbl_mutex);
+ mutex_lock(&control->ras_tbl_mutex);
hdr->header = RAS_TABLE_HDR_VAL;
hdr->version = RAS_TABLE_VER;
control->next_addr = RAS_RECORD_START;
ret = __write_table_header(control, buf);
- mutex_unlock(&control->tbl_mutex);
+ mutex_unlock(&control->ras_tbl_mutex);
return ret;
if (!__get_eeprom_i2c_addr(adev, control))
return -EINVAL;
- mutex_init(&control->tbl_mutex);
+ mutex_init(&control->ras_tbl_mutex);
/* Read/Create table header from EEPROM address 0 */
ret = amdgpu_eeprom_read(&adev->pm.smu_i2c,
__decode_table_header_from_buf(hdr, buf);
if (hdr->header == RAS_TABLE_HDR_VAL) {
- control->num_recs = (hdr->tbl_size - RAS_TABLE_HEADER_SIZE) /
+ control->ras_num_recs = (hdr->tbl_size - RAS_TABLE_HEADER_SIZE) /
RAS_TABLE_RECORD_SIZE;
control->tbl_byte_sum = __calc_hdr_byte_sum(control);
control->next_addr = RAS_RECORD_START;
DRM_DEBUG_DRIVER("Found existing EEPROM table with %d records",
- control->num_recs);
+ control->ras_num_recs);
} else if ((hdr->header == RAS_TABLE_HDR_BAD) &&
(amdgpu_bad_page_threshold != 0)) {
- if (ras->bad_page_cnt_threshold > control->num_recs) {
+ if (ras->bad_page_cnt_threshold > control->ras_num_recs) {
dev_info(adev->dev, "Using one valid bigger bad page "
"threshold and correcting eeprom header tag.\n");
ret = amdgpu_ras_eeprom_correct_header_tag(control,
if (!bufs)
return -ENOMEM;
- mutex_lock(&control->tbl_mutex);
+ mutex_lock(&control->ras_tbl_mutex);
/*
* If saved bad pages number exceeds the bad page threshold for
* further check.
*/
if (write && (amdgpu_bad_page_threshold != 0) &&
- ((control->num_recs + num) >= ras->bad_page_cnt_threshold)) {
+ ((control->ras_num_recs + num) >= ras->bad_page_cnt_threshold)) {
dev_warn(adev->dev,
"Saved bad pages(%d) reaches threshold value(%d).\n",
- control->num_recs + num, ras->bad_page_cnt_threshold);
+ control->ras_num_recs + num, ras->bad_page_cnt_threshold);
control->tbl_hdr.header = RAS_TABLE_HDR_BAD;
}
*
* TODO - Check the assumption is correct
*/
- control->num_recs += num;
- control->num_recs %= RAS_MAX_RECORD_COUNT;
+ control->ras_num_recs += num;
+ control->ras_num_recs %= RAS_MAX_RECORD_COUNT;
control->tbl_hdr.tbl_size += RAS_TABLE_RECORD_SIZE * num;
if (control->tbl_hdr.tbl_size > RAS_TBL_SIZE_BYTES)
control->tbl_hdr.tbl_size = RAS_TABLE_HEADER_SIZE +
- control->num_recs * RAS_TABLE_RECORD_SIZE;
+ control->ras_num_recs * RAS_TABLE_RECORD_SIZE;
__update_tbl_checksum(control, records, num);
__write_table_header(control, bufs);
free_buf:
kfree(bufs);
- mutex_unlock(&control->tbl_mutex);
+ mutex_unlock(&control->ras_tbl_mutex);
return ret == num ? 0 : -EIO;
}
uint32_t first_rec_offset;
uint32_t tbl_size;
uint32_t checksum;
-}__attribute__((__packed__));
+} __packed;
struct amdgpu_ras_eeprom_control {
struct amdgpu_ras_eeprom_table_header tbl_hdr;
- u32 i2c_address; /* Base I2C 19-bit memory address */
+
+ /* Base I2C EEPPROM 19-bit memory address,
+ * where the table is located. For more information,
+ * see top of amdgpu_eeprom.c.
+ */
+ u32 i2c_address;
+
uint32_t next_addr;
- unsigned int num_recs;
- struct mutex tbl_mutex;
+
+ /* Number of records in the table.
+ */
+ unsigned int ras_num_recs;
+
+ /* Protect table access via this mutex.
+ */
+ struct mutex ras_tbl_mutex;
+
u8 tbl_byte_sum;
};
unsigned char mem_channel;
unsigned char mcumc_id;
-}__attribute__((__packed__));
+} __packed;
int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control,
- bool *exceed_err_limit);
+ bool *exceed_err_limit);
int amdgpu_ras_eeprom_reset_table(struct amdgpu_ras_eeprom_control *control);
bool amdgpu_ras_eeprom_check_err_threshold(struct amdgpu_device *adev);