drm/amdgpu: define RAS convert_error_address API
authorTao Zhou <tao.zhou1@amd.com>
Tue, 27 Sep 2022 03:36:46 +0000 (11:36 +0800)
committerAlex Deucher <alexander.deucher@amd.com>
Tue, 11 Oct 2022 15:05:17 +0000 (11:05 -0400)
Make the code reusable and remove redundant code.

Signed-off-by: Tao Zhou <tao.zhou1@amd.com>
Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
drivers/gpu/drm/amd/amdgpu/umc_v6_7.c

index ccebd8e..c2f9970 100644 (file)
@@ -2889,7 +2889,7 @@ static int amdgpu_bad_page_notifier(struct notifier_block *nb,
        if (adev->umc.ras &&
            adev->umc.ras->convert_ras_error_address)
                adev->umc.ras->convert_ras_error_address(adev,
-                       &err_data, 0, ch_inst, umc_inst, m->addr);
+                       &err_data, m->addr, ch_inst, umc_inst);
 
        if (amdgpu_bad_page_threshold != 0) {
                amdgpu_ras_add_bad_pages(adev, err_data.err_addr,
index 2fb4951..e464392 100644 (file)
@@ -22,8 +22,6 @@
 #define __AMDGPU_UMC_H__
 #include "amdgpu_ras.h"
 
-#define UMC_INVALID_ADDR 0x1ULL
-
 /*
  * (addr / 256) * 4096, the higher 26 bits in ErrorAddr
  * is the index of 4KB block
@@ -54,9 +52,8 @@ struct amdgpu_umc_ras {
        void (*err_cnt_init)(struct amdgpu_device *adev);
        bool (*query_ras_poison_mode)(struct amdgpu_device *adev);
        void (*convert_ras_error_address)(struct amdgpu_device *adev,
-                                                struct ras_err_data *err_data,
-                                                uint32_t umc_reg_offset, uint32_t ch_inst,
-                                                uint32_t umc_inst, uint64_t mca_addr);
+                               struct ras_err_data *err_data, uint64_t err_addr,
+                               uint32_t ch_inst, uint32_t umc_inst);
        void (*ecc_info_query_ras_error_count)(struct amdgpu_device *adev,
                                      void *ras_error_status);
        void (*ecc_info_query_ras_error_address)(struct amdgpu_device *adev,
index 64d760e..5d5d031 100644 (file)
@@ -187,20 +187,51 @@ static void umc_v6_7_ecc_info_query_ras_error_count(struct amdgpu_device *adev,
        }
 }
 
+static void umc_v6_7_convert_error_address(struct amdgpu_device *adev,
+                                       struct ras_err_data *err_data, uint64_t err_addr,
+                                       uint32_t ch_inst, uint32_t umc_inst)
+{
+       uint32_t channel_index;
+       uint64_t soc_pa, retired_page, column;
+
+       channel_index =
+               adev->umc.channel_idx_tbl[umc_inst * adev->umc.channel_inst_num + ch_inst];
+       /* translate umc channel address to soc pa, 3 parts are included */
+       soc_pa = ADDR_OF_8KB_BLOCK(err_addr) |
+                       ADDR_OF_256B_BLOCK(channel_index) |
+                       OFFSET_IN_256B_BLOCK(err_addr);
+
+       /* The umc channel bits are not original values, they are hashed */
+       SET_CHANNEL_HASH(channel_index, soc_pa);
+
+       /* clear [C4 C3 C2] in soc physical address */
+       soc_pa &= ~(0x7ULL << UMC_V6_7_PA_C2_BIT);
+
+       /* loop for all possibilities of [C4 C3 C2] */
+       for (column = 0; column < UMC_V6_7_NA_MAP_PA_NUM; column++) {
+               retired_page = soc_pa | (column << UMC_V6_7_PA_C2_BIT);
+               dev_info(adev->dev, "Error Address(PA): 0x%llx\n", retired_page);
+               amdgpu_umc_fill_error_record(err_data, err_addr,
+                       retired_page, channel_index, umc_inst);
+
+               /* shift R14 bit */
+               retired_page ^= (0x1ULL << UMC_V6_7_PA_R14_BIT);
+               dev_info(adev->dev, "Error Address(PA): 0x%llx\n", retired_page);
+               amdgpu_umc_fill_error_record(err_data, err_addr,
+                       retired_page, channel_index, umc_inst);
+       }
+}
+
 static void umc_v6_7_ecc_info_query_error_address(struct amdgpu_device *adev,
                                         struct ras_err_data *err_data,
                                         uint32_t ch_inst,
                                         uint32_t umc_inst)
 {
-       uint64_t mc_umc_status, err_addr, soc_pa, retired_page, column;
-       uint32_t channel_index;
+       uint64_t mc_umc_status, err_addr;
        uint32_t eccinfo_table_idx;
        struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
 
        eccinfo_table_idx = umc_inst * adev->umc.channel_inst_num + ch_inst;
-       channel_index =
-               adev->umc.channel_idx_tbl[umc_inst * adev->umc.channel_inst_num + ch_inst];
-
        mc_umc_status = ras->umc_ecc.ecc[eccinfo_table_idx].mca_umc_status;
 
        if (mc_umc_status == 0)
@@ -216,30 +247,8 @@ static void umc_v6_7_ecc_info_query_error_address(struct amdgpu_device *adev,
                err_addr = ras->umc_ecc.ecc[eccinfo_table_idx].mca_umc_addr;
                err_addr = REG_GET_FIELD(err_addr, MCA_UMC_UMC0_MCUMC_ADDRT0, ErrorAddr);
 
-               /* translate umc channel address to soc pa, 3 parts are included */
-               soc_pa = ADDR_OF_8KB_BLOCK(err_addr) |
-                               ADDR_OF_256B_BLOCK(channel_index) |
-                               OFFSET_IN_256B_BLOCK(err_addr);
-
-               /* The umc channel bits are not original values, they are hashed */
-               SET_CHANNEL_HASH(channel_index, soc_pa);
-
-               /* clear [C4 C3 C2] in soc physical address */
-               soc_pa &= ~(0x7ULL << UMC_V6_7_PA_C2_BIT);
-
-               /* loop for all possibilities of [C4 C3 C2] */
-               for (column = 0; column < UMC_V6_7_NA_MAP_PA_NUM; column++) {
-                       retired_page = soc_pa | (column << UMC_V6_7_PA_C2_BIT);
-                       dev_info(adev->dev, "Error Address(PA): 0x%llx\n", retired_page);
-                       amdgpu_umc_fill_error_record(err_data, err_addr,
-                               retired_page, channel_index, umc_inst);
-
-                       /* shift R14 bit */
-                       retired_page ^= (0x1ULL << UMC_V6_7_PA_R14_BIT);
-                       dev_info(adev->dev, "Error Address(PA): 0x%llx\n", retired_page);
-                       amdgpu_umc_fill_error_record(err_data, err_addr,
-                               retired_page, channel_index, umc_inst);
-               }
+               umc_v6_7_convert_error_address(adev, err_data, err_addr,
+                                       ch_inst, umc_inst);
        }
 }
 
@@ -448,75 +457,40 @@ static void umc_v6_7_query_ras_error_count(struct amdgpu_device *adev,
 static void umc_v6_7_query_error_address(struct amdgpu_device *adev,
                                         struct ras_err_data *err_data,
                                         uint32_t umc_reg_offset, uint32_t ch_inst,
-                                        uint32_t umc_inst, uint64_t mca_addr)
+                                        uint32_t umc_inst)
 {
        uint32_t mc_umc_status_addr;
-       uint32_t channel_index;
-       uint64_t mc_umc_status = 0, mc_umc_addrt0;
-       uint64_t err_addr, soc_pa, retired_page, column;
+       uint64_t mc_umc_status = 0, mc_umc_addrt0, err_addr;
 
-       if (mca_addr == UMC_INVALID_ADDR) {
-               mc_umc_status_addr =
-                       SOC15_REG_OFFSET(UMC, 0, regMCA_UMC_UMC0_MCUMC_STATUST0);
-               mc_umc_addrt0 =
-                       SOC15_REG_OFFSET(UMC, 0, regMCA_UMC_UMC0_MCUMC_ADDRT0);
+       mc_umc_status_addr =
+               SOC15_REG_OFFSET(UMC, 0, regMCA_UMC_UMC0_MCUMC_STATUST0);
+       mc_umc_addrt0 =
+               SOC15_REG_OFFSET(UMC, 0, regMCA_UMC_UMC0_MCUMC_ADDRT0);
 
-               mc_umc_status = RREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4);
+       mc_umc_status = RREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4);
 
-               if (mc_umc_status == 0)
-                       return;
+       if (mc_umc_status == 0)
+               return;
 
-               if (!err_data->err_addr) {
-                       /* clear umc status */
-                       WREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4, 0x0ULL);
-                       return;
-               }
+       if (!err_data->err_addr) {
+               /* clear umc status */
+               WREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4, 0x0ULL);
+               return;
        }
 
-       channel_index =
-               adev->umc.channel_idx_tbl[umc_inst * adev->umc.channel_inst_num + ch_inst];
-
        /* calculate error address if ue error is detected */
-       if ((REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
-           REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1) ||
-           mca_addr != UMC_INVALID_ADDR) {
-               if (mca_addr == UMC_INVALID_ADDR) {
-                       err_addr = RREG64_PCIE((mc_umc_addrt0 + umc_reg_offset) * 4);
-                       err_addr =
-                               REG_GET_FIELD(err_addr, MCA_UMC_UMC0_MCUMC_ADDRT0, ErrorAddr);
-               } else {
-                       err_addr = mca_addr;
-               }
+       if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
+           REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1) {
+               err_addr = RREG64_PCIE((mc_umc_addrt0 + umc_reg_offset) * 4);
+               err_addr =
+                       REG_GET_FIELD(err_addr, MCA_UMC_UMC0_MCUMC_ADDRT0, ErrorAddr);
 
-               /* translate umc channel address to soc pa, 3 parts are included */
-               soc_pa = ADDR_OF_8KB_BLOCK(err_addr) |
-                               ADDR_OF_256B_BLOCK(channel_index) |
-                               OFFSET_IN_256B_BLOCK(err_addr);
-
-               /* The umc channel bits are not original values, they are hashed */
-               SET_CHANNEL_HASH(channel_index, soc_pa);
-
-               /* clear [C4 C3 C2] in soc physical address */
-               soc_pa &= ~(0x7ULL << UMC_V6_7_PA_C2_BIT);
-
-               /* loop for all possibilities of [C4 C3 C2] */
-               for (column = 0; column < UMC_V6_7_NA_MAP_PA_NUM; column++) {
-                       retired_page = soc_pa | (column << UMC_V6_7_PA_C2_BIT);
-                       dev_info(adev->dev, "Error Address(PA): 0x%llx\n", retired_page);
-                       amdgpu_umc_fill_error_record(err_data, err_addr,
-                               retired_page, channel_index, umc_inst);
-
-                       /* shift R14 bit */
-                       retired_page ^= (0x1ULL << UMC_V6_7_PA_R14_BIT);
-                       dev_info(adev->dev, "Error Address(PA): 0x%llx\n", retired_page);
-                       amdgpu_umc_fill_error_record(err_data, err_addr,
-                               retired_page, channel_index, umc_inst);
-               }
+               umc_v6_7_convert_error_address(adev, err_data, err_addr,
+                                       ch_inst, umc_inst);
        }
 
        /* clear umc status */
-       if (mca_addr == UMC_INVALID_ADDR)
-               WREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4, 0x0ULL);
+       WREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4, 0x0ULL);
 }
 
 static void umc_v6_7_query_ras_error_address(struct amdgpu_device *adev,
@@ -538,7 +512,7 @@ static void umc_v6_7_query_ras_error_address(struct amdgpu_device *adev,
                umc_v6_7_query_error_address(adev,
                                             err_data,
                                             umc_reg_offset, ch_inst,
-                                            umc_inst, UMC_INVALID_ADDR);
+                                            umc_inst);
        }
 }
 
@@ -579,5 +553,5 @@ struct amdgpu_umc_ras umc_v6_7_ras = {
        .query_ras_poison_mode = umc_v6_7_query_ras_poison_mode,
        .ecc_info_query_ras_error_count = umc_v6_7_ecc_info_query_ras_error_count,
        .ecc_info_query_ras_error_address = umc_v6_7_ecc_info_query_ras_error_address,
-       .convert_ras_error_address = umc_v6_7_query_error_address,
+       .convert_ras_error_address = umc_v6_7_convert_error_address,
 };