hub->vm_l2_pro_fault_cntl =
SOC15_REG_OFFSET(GC, 0, mmGCVM_L2_PROTECTION_FAULT_CNTL);
}
+
+int gfxhub_v2_1_get_xgmi_info(struct amdgpu_device *adev)
+{
+ u32 xgmi_lfb_cntl = RREG32_SOC15(GC, 0, mmGCMC_VM_XGMI_LFB_CNTL);
+ u32 max_region =
+ REG_GET_FIELD(xgmi_lfb_cntl, GCMC_VM_XGMI_LFB_CNTL, PF_MAX_REGION);
+ u32 max_num_physical_nodes = 0;
+ u32 max_physical_node_id = 0;
+
+ switch (adev->asic_type) {
+ case CHIP_SIENNA_CICHLID:
+ max_num_physical_nodes = 4;
+ max_physical_node_id = 3;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* PF_MAX_REGION=0 means xgmi is disabled */
+ if (max_region) {
+ adev->gmc.xgmi.num_physical_nodes = max_region + 1;
+ if (adev->gmc.xgmi.num_physical_nodes > max_num_physical_nodes)
+ return -EINVAL;
+
+ adev->gmc.xgmi.physical_node_id =
+ REG_GET_FIELD(xgmi_lfb_cntl, GCMC_VM_XGMI_LFB_CNTL, PF_LFB_REGION);
+ if (adev->gmc.xgmi.physical_node_id > max_physical_node_id)
+ return -EINVAL;
+
+ adev->gmc.xgmi.node_segment_size = REG_GET_FIELD(
+ RREG32_SOC15(GC, 0, mmGCMC_VM_XGMI_LFB_SIZE),
+ GCMC_VM_XGMI_LFB_SIZE, PF_LFB_SIZE) << 24;
+ }
+
+ return 0;
+}
else
base = gfxhub_v2_0_get_fb_location(adev);
+ /* add the xgmi offset of the physical node */
+ base += adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
+
amdgpu_gmc_vram_location(adev, &adev->gmc, base);
amdgpu_gmc_gart_location(adev, mc);
adev->vm_manager.vram_base_offset = gfxhub_v2_1_get_mc_fb_offset(adev);
else
adev->vm_manager.vram_base_offset = gfxhub_v2_0_get_mc_fb_offset(adev);
+
+ /* add the xgmi offset of the physical node */
+ adev->vm_manager.vram_base_offset +=
+ adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
}
/**
return r;
}
+ if (adev->gmc.xgmi.supported) {
+ r = gfxhub_v2_1_get_xgmi_info(adev);
+ if (r)
+ return r;
+ }
+
r = gmc_v10_0_mc_init(adev);
if (r)
return r;