drm/amdgpu: cleanup all virtualization detection routine
authorMonk Liu <Monk.Liu@amd.com>
Wed, 4 Mar 2020 06:02:55 +0000 (14:02 +0800)
committerAlex Deucher <alexander.deucher@amd.com>
Wed, 1 Apr 2020 18:44:42 +0000 (14:44 -0400)
we need to move virt detection much earlier because:
1) HW team confirms us that RCC_IOV_FUNC_IDENTIFIER will always
be at DE5 (dw) mmio offset from vega10, this way there is no
need to implement detect_hw_virt() routine in each nbio/chip file.
for VI SRIOV chip (tonga & fiji), the BIF_IOV_FUNC_IDENTIFIER is at
0x1503

2) we need to acknowledged we are SRIOV VF before we do IP discovery because
the IP discovery content will be updated by host everytime after it recieved
a new coming "REQ_GPU_INIT_DATA" request from guest (there will be patches
for this new handshake soon).

Signed-off-by: Monk Liu <Monk.Liu@amd.com>
Reviewed-by: Emily Deng <Emily.Deng@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
16 files changed:
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h
drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
drivers/gpu/drm/amd/amdgpu/cik.c
drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c
drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c
drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c
drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
drivers/gpu/drm/amd/amdgpu/nv.c
drivers/gpu/drm/amd/amdgpu/si.c
drivers/gpu/drm/amd/amdgpu/soc15.c
drivers/gpu/drm/amd/amdgpu/vi.c
drivers/gpu/drm/amd/include/asic_reg/nbif/nbif_6_1_offset.h
drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_0_offset.h
drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_offset.h

index f422ef58b4d859a8b2054ce96292c213281a25d5..449720086fbce6057f9997bd96a189003c72f3b7 100644 (file)
@@ -3055,6 +3055,9 @@ int amdgpu_device_init(struct amdgpu_device *adev,
        if (amdgpu_mes && adev->asic_type >= CHIP_NAVI10)
                adev->enable_mes = true;
 
+       /* detect hw virtualization here */
+       amdgpu_detect_virtualization(adev);
+
        if (amdgpu_discovery && adev->asic_type >= CHIP_NAVI10) {
                r = amdgpu_discovery_init(adev);
                if (r) {
index 919bd566ba3cfc20670da24a2647ae1425f1c4dd..edaac242ff85708a8f57635791c954c8792c0dcc 100644 (file)
@@ -77,7 +77,6 @@ struct amdgpu_nbio_funcs {
                                      u32 *flags);
        void (*ih_control)(struct amdgpu_device *adev);
        void (*init_registers)(struct amdgpu_device *adev);
-       void (*detect_hw_virt)(struct amdgpu_device *adev);
        void (*remap_hdp_registers)(struct amdgpu_device *adev);
        void (*handle_ras_controller_intr_no_bifring)(struct amdgpu_device *adev);
        void (*handle_ras_err_event_athub_intr_no_bifring)(struct amdgpu_device *adev);
index adc813cde8e281617118725a544483bc57a64d21..43a1ee332727447d9d05faa007eb47fa4202cfcd 100644 (file)
@@ -287,3 +287,36 @@ void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev)
                }
        }
 }
+
+void amdgpu_detect_virtualization(struct amdgpu_device *adev)
+{
+       uint32_t reg;
+
+       switch (adev->asic_type) {
+       case CHIP_TONGA:
+       case CHIP_FIJI:
+               reg = RREG32(mmBIF_IOV_FUNC_IDENTIFIER);
+               break;
+       case CHIP_VEGA10:
+       case CHIP_VEGA20:
+       case CHIP_NAVI10:
+       case CHIP_NAVI12:
+       case CHIP_ARCTURUS:
+               reg = RREG32(mmRCC_IOV_FUNC_IDENTIFIER);
+               break;
+       default: /* other chip doesn't support SRIOV */
+               reg = 0;
+               break;
+       }
+
+       if (reg & 1)
+               adev->virt.caps |= AMDGPU_SRIOV_CAPS_IS_VF;
+
+       if (reg & 0x80000000)
+               adev->virt.caps |= AMDGPU_SRIOV_CAPS_ENABLE_IOV;
+
+       if (!reg) {
+               if (is_virtual_machine())       /* passthrough mode exclus sriov mod */
+                       adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE;
+       }
+}
index 0a95b137eadb9f43e5b8d8f71f5d9ae9518e085d..74f9843fce828cb154efd4a00bfac4d2bbf69dc9 100644 (file)
 #define AMDGPU_PASSTHROUGH_MODE        (1 << 3) /* thw whole GPU is pass through for VM */
 #define AMDGPU_SRIOV_CAPS_RUNTIME      (1 << 4) /* is out of full access mode */
 
+/* all asic after AI use this offset */
+#define mmRCC_IOV_FUNC_IDENTIFIER 0xDE5
+/* tonga/fiji use this offset */
+#define mmBIF_IOV_FUNC_IDENTIFIER 0x1503
+
 struct amdgpu_mm_table {
        struct amdgpu_bo        *bo;
        uint32_t                *cpu_addr;
@@ -305,4 +310,5 @@ int amdgpu_virt_fw_reserve_get_checksum(void *obj, unsigned long obj_size,
                                        unsigned int key,
                                        unsigned int chksum);
 void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev);
+void amdgpu_detect_virtualization(struct amdgpu_device *adev);
 #endif
index 006f21ef7ddf09a375ead24f7b31af1fa41fd207..db68ffa279845d8969aba08a90af8c39c1a0956c 100644 (file)
@@ -1811,12 +1811,6 @@ static uint32_t cik_get_rev_id(struct amdgpu_device *adev)
                >> CC_DRM_ID_STRAPS__ATI_REV_ID__SHIFT;
 }
 
-static void cik_detect_hw_virtualization(struct amdgpu_device *adev)
-{
-       if (is_virtual_machine()) /* passthrough mode */
-               adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE;
-}
-
 static void cik_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring)
 {
        if (!ring || !ring->funcs->emit_wreg) {
@@ -2179,8 +2173,6 @@ static const struct amdgpu_ip_block_version cik_common_ip_block =
 
 int cik_set_ip_blocks(struct amdgpu_device *adev)
 {
-       cik_detect_hw_virtualization(adev);
-
        switch (adev->asic_type) {
        case CHIP_BONAIRE:
                amdgpu_device_ip_block_add(adev, &cik_common_ip_block);
index f3a3fe746222f39ff681212a40a10feb3777facb..cbcf04578b999b968f49efee40a4e44fad4f59e0 100644 (file)
@@ -290,23 +290,6 @@ const struct nbio_hdp_flush_reg nbio_v2_3_hdp_flush_reg = {
        .ref_and_mask_sdma1 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__SDMA1_MASK,
 };
 
-static void nbio_v2_3_detect_hw_virt(struct amdgpu_device *adev)
-{
-       uint32_t reg;
-
-       reg = RREG32_SOC15(NBIO, 0, mmRCC_DEV0_EPF0_RCC_IOV_FUNC_IDENTIFIER);
-       if (reg & 1)
-               adev->virt.caps |= AMDGPU_SRIOV_CAPS_IS_VF;
-
-       if (reg & 0x80000000)
-               adev->virt.caps |= AMDGPU_SRIOV_CAPS_ENABLE_IOV;
-
-       if (!reg) {
-               if (is_virtual_machine())       /* passthrough mode exclus sriov mod */
-                       adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE;
-       }
-}
-
 static void nbio_v2_3_init_registers(struct amdgpu_device *adev)
 {
        uint32_t def, data;
@@ -338,6 +321,5 @@ const struct amdgpu_nbio_funcs nbio_v2_3_funcs = {
        .get_clockgating_state = nbio_v2_3_get_clockgating_state,
        .ih_control = nbio_v2_3_ih_control,
        .init_registers = nbio_v2_3_init_registers,
-       .detect_hw_virt = nbio_v2_3_detect_hw_virt,
        .remap_hdp_registers = nbio_v2_3_remap_hdp_registers,
 };
index 635d9e1fc0a364db991317bec42e6d6edccc29b1..7b2fb050407d2fe785bfa389a92497e2bdbdbedf 100644 (file)
@@ -241,23 +241,6 @@ const struct nbio_hdp_flush_reg nbio_v6_1_hdp_flush_reg = {
        .ref_and_mask_sdma1 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__SDMA1_MASK
 };
 
-static void nbio_v6_1_detect_hw_virt(struct amdgpu_device *adev)
-{
-       uint32_t reg;
-
-       reg = RREG32_SOC15(NBIO, 0, mmRCC_PF_0_0_RCC_IOV_FUNC_IDENTIFIER);
-       if (reg & 1)
-               adev->virt.caps |= AMDGPU_SRIOV_CAPS_IS_VF;
-
-       if (reg & 0x80000000)
-               adev->virt.caps |= AMDGPU_SRIOV_CAPS_ENABLE_IOV;
-
-       if (!reg) {
-               if (is_virtual_machine())       /* passthrough mode exclus sriov mod */
-                       adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE;
-       }
-}
-
 static void nbio_v6_1_init_registers(struct amdgpu_device *adev)
 {
        uint32_t def, data;
@@ -294,5 +277,4 @@ const struct amdgpu_nbio_funcs nbio_v6_1_funcs = {
        .get_clockgating_state = nbio_v6_1_get_clockgating_state,
        .ih_control = nbio_v6_1_ih_control,
        .init_registers = nbio_v6_1_init_registers,
-       .detect_hw_virt = nbio_v6_1_detect_hw_virt,
 };
index d6cbf26074bca475d915d1ac9f1d6b6c13665130..d34628e113fc389bb1809a0905aad09e12d37d46 100644 (file)
@@ -280,12 +280,6 @@ const struct nbio_hdp_flush_reg nbio_v7_0_hdp_flush_reg = {
        .ref_and_mask_sdma1 = GPU_HDP_FLUSH_DONE__SDMA1_MASK,
 };
 
-static void nbio_v7_0_detect_hw_virt(struct amdgpu_device *adev)
-{
-       if (is_virtual_machine())       /* passthrough mode exclus sriov mod */
-               adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE;
-}
-
 static void nbio_v7_0_init_registers(struct amdgpu_device *adev)
 {
 
@@ -310,6 +304,5 @@ const struct amdgpu_nbio_funcs nbio_v7_0_funcs = {
        .get_clockgating_state = nbio_v7_0_get_clockgating_state,
        .ih_control = nbio_v7_0_ih_control,
        .init_registers = nbio_v7_0_init_registers,
-       .detect_hw_virt = nbio_v7_0_detect_hw_virt,
        .remap_hdp_registers = nbio_v7_0_remap_hdp_registers,
 };
index 149d386590df2beb6b7f034ec46061e5b6c58be8..41c53c149852e51066ba593956b44d64ece72a11 100644 (file)
@@ -292,23 +292,6 @@ const struct nbio_hdp_flush_reg nbio_v7_4_hdp_flush_reg = {
        .ref_and_mask_sdma7 = GPU_HDP_FLUSH_DONE__RSVD_ENG5_MASK,
 };
 
-static void nbio_v7_4_detect_hw_virt(struct amdgpu_device *adev)
-{
-       uint32_t reg;
-
-       reg = RREG32_SOC15(NBIO, 0, mmRCC_IOV_FUNC_IDENTIFIER);
-       if (reg & 1)
-               adev->virt.caps |= AMDGPU_SRIOV_CAPS_IS_VF;
-
-       if (reg & 0x80000000)
-               adev->virt.caps |= AMDGPU_SRIOV_CAPS_ENABLE_IOV;
-
-       if (!reg) {
-               if (is_virtual_machine())       /* passthrough mode exclus sriov mod */
-                       adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE;
-       }
-}
-
 static void nbio_v7_4_init_registers(struct amdgpu_device *adev)
 {
 
@@ -561,7 +544,6 @@ const struct amdgpu_nbio_funcs nbio_v7_4_funcs = {
        .get_clockgating_state = nbio_v7_4_get_clockgating_state,
        .ih_control = nbio_v7_4_ih_control,
        .init_registers = nbio_v7_4_init_registers,
-       .detect_hw_virt = nbio_v7_4_detect_hw_virt,
        .remap_hdp_registers = nbio_v7_4_remap_hdp_registers,
        .handle_ras_controller_intr_no_bifring = nbio_v7_4_handle_ras_controller_intr_no_bifring,
        .handle_ras_err_event_athub_intr_no_bifring = nbio_v7_4_handle_ras_err_event_athub_intr_no_bifring,
index 033cbbca2072d0eb430738dccbafddb5c718d119..a67d78d7eeeb2018a8ec42c89f68f9b741e3136e 100644 (file)
@@ -465,8 +465,6 @@ int nv_set_ip_blocks(struct amdgpu_device *adev)
        adev->nbio.funcs = &nbio_v2_3_funcs;
        adev->nbio.hdp_flush_reg = &nbio_v2_3_hdp_flush_reg;
 
-       adev->nbio.funcs->detect_hw_virt(adev);
-
        if (amdgpu_sriov_vf(adev))
                adev->virt.ops = &xgpu_nv_virt_ops;
 
index 4d415bfdb42ff2ac62e4431d90a64a9ee3692b2d..153db3f763bc15d4a012b413e835208b01af11b5 100644 (file)
@@ -1249,12 +1249,6 @@ static int si_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk)
        return 0;
 }
 
-static void si_detect_hw_virtualization(struct amdgpu_device *adev)
-{
-       if (is_virtual_machine()) /* passthrough mode */
-               adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE;
-}
-
 static void si_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring)
 {
        if (!ring || !ring->funcs->emit_wreg) {
@@ -2165,8 +2159,6 @@ static const struct amdgpu_ip_block_version si_common_ip_block =
 
 int si_set_ip_blocks(struct amdgpu_device *adev)
 {
-       si_detect_hw_virtualization(adev);
-
        switch (adev->asic_type) {
        case CHIP_VERDE:
        case CHIP_TAHITI:
index a40499d51c93cd9a1fd3f262402f92c6ae592dc4..a8c90d83a9eed9ddaa4c0923ba8bb8df421f6877 100644 (file)
@@ -712,7 +712,6 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
                adev->df.funcs = &df_v1_7_funcs;
 
        adev->rev_id = soc15_get_rev_id(adev);
-       adev->nbio.funcs->detect_hw_virt(adev);
 
        if (amdgpu_sriov_vf(adev))
                adev->virt.ops = &xgpu_ai_virt_ops;
index 78b35901643bcf8707cdaa1a7a60e4cbfbeb5771..0a90c296409bab004bf820ab4f5376c48d67a130 100644 (file)
@@ -448,27 +448,6 @@ static bool vi_read_bios_from_rom(struct amdgpu_device *adev,
        return true;
 }
 
-static void vi_detect_hw_virtualization(struct amdgpu_device *adev)
-{
-       uint32_t reg = 0;
-
-       if (adev->asic_type == CHIP_TONGA ||
-           adev->asic_type == CHIP_FIJI) {
-              reg = RREG32(mmBIF_IOV_FUNC_IDENTIFIER);
-              /* bit0: 0 means pf and 1 means vf */
-              if (REG_GET_FIELD(reg, BIF_IOV_FUNC_IDENTIFIER, FUNC_IDENTIFIER))
-                      adev->virt.caps |= AMDGPU_SRIOV_CAPS_IS_VF;
-              /* bit31: 0 means disable IOV and 1 means enable */
-              if (REG_GET_FIELD(reg, BIF_IOV_FUNC_IDENTIFIER, IOV_ENABLE))
-                      adev->virt.caps |= AMDGPU_SRIOV_CAPS_ENABLE_IOV;
-       }
-
-       if (reg == 0) {
-               if (is_virtual_machine()) /* passthrough mode exclus sr-iov mode */
-                       adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE;
-       }
-}
-
 static const struct amdgpu_allowed_register_entry vi_allowed_read_registers[] = {
        {mmGRBM_STATUS},
        {mmGRBM_STATUS2},
@@ -1730,9 +1709,6 @@ static const struct amdgpu_ip_block_version vi_common_ip_block =
 
 int vi_set_ip_blocks(struct amdgpu_device *adev)
 {
-       /* in early init stage, vbios code won't work */
-       vi_detect_hw_virtualization(adev);
-
        if (amdgpu_sriov_vf(adev))
                adev->virt.ops = &xgpu_vi_virt_ops;
 
index 68d0ffad28c7dd6716e02a2c1a9b79c452e8fc62..92fd27c26a77b1e16605d99669d1b2e2b3c827d8 100644 (file)
 #define mmRCC_CONFIG_MEMSIZE_BASE_IDX                                                                  0
 #define mmRCC_CONFIG_RESERVED                                                                          0x0de4 // duplicate 
 #define mmRCC_CONFIG_RESERVED_BASE_IDX                                                                 0
+#ifndef mmRCC_IOV_FUNC_IDENTIFIER
 #define mmRCC_IOV_FUNC_IDENTIFIER                                                                      0x0de5 // duplicate 
 #define mmRCC_IOV_FUNC_IDENTIFIER_BASE_IDX                                                             0
+#endif
 
 
 // addressBlock: syshub_mmreg_ind_syshubdec
index 435462294fbc514b343dd709c3d5cd55cff7034f..a7cd760ebf8f14edaa72462245d35ddee8718b90 100644 (file)
 #define mmRCC_CONFIG_MEMSIZE_BASE_IDX                                                                  2
 #define mmRCC_CONFIG_RESERVED                                                                          0x00c4
 #define mmRCC_CONFIG_RESERVED_BASE_IDX                                                                 2
+#ifndef mmRCC_IOV_FUNC_IDENTIFIER
 #define mmRCC_IOV_FUNC_IDENTIFIER                                                                      0x00c5
 #define mmRCC_IOV_FUNC_IDENTIFIER_BASE_IDX                                                             2
+#endif
 
 
 // addressBlock: nbio_nbif0_rcc_dev0_BIFDEC1
index ce5830ebe095ab6635971041fcc16e8ca4fbe4ba..0c5a08bc034a6b16422da9925c6273184a8a5040 100644 (file)
 #define mmRCC_CONFIG_MEMSIZE_BASE_IDX                                                                  2
 #define mmRCC_CONFIG_RESERVED                                                                          0x00c4
 #define mmRCC_CONFIG_RESERVED_BASE_IDX                                                                 2
+#ifndef mmRCC_IOV_FUNC_IDENTIFIER
 #define mmRCC_IOV_FUNC_IDENTIFIER                                                                      0x00c5
 #define mmRCC_IOV_FUNC_IDENTIFIER_BASE_IDX                                                             2
+#endif
 
 
 // addressBlock: nbio_nbif0_rcc_dev0_BIFDEC1