drm/amdgpu: Separate vf2pf work item init from virt data exchange
authorVictor Skvortsov <victor.skvortsov@amd.com>
Thu, 16 Dec 2021 17:01:45 +0000 (17:01 +0000)
committerAlex Deucher <alexander.deucher@amd.com>
Thu, 16 Dec 2021 19:08:20 +0000 (14:08 -0500)
We want to be able to call virt data exchange conditionally
after gmc sw init to reserve bad pages as early as possible.
Since this is a conditional call, we will need
to call it again unconditionally later in the init sequence.

Refactor the data exchange function so it can be
called multiple times without re-initializing the work item.

v2: Cleaned up the code. Kept the original call to init_exchange_data()
inside early init to initialize the work item, afterwards call
exchange_data() when needed.

Signed-off-by: Victor Skvortsov <victor.skvortsov@amd.com>
Reviewed By: Shaoyun.liu <Shaoyun.liu@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h

index bd42326..0cd2404 100644 (file)
@@ -2317,6 +2317,10 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
 
                /* need to do gmc hw init early so we can allocate gpu mem */
                if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
+                       /* Try to reserve bad pages early */
+                       if (amdgpu_sriov_vf(adev))
+                               amdgpu_virt_exchange_data(adev);
+
                        r = amdgpu_device_vram_scratch_init(adev);
                        if (r) {
                                DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r);
@@ -2348,7 +2352,7 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
        }
 
        if (amdgpu_sriov_vf(adev))
-               amdgpu_virt_init_data_exchange(adev);
+               amdgpu_virt_exchange_data(adev);
 
        r = amdgpu_ib_pool_init(adev);
        if (r) {
index 3fc4982..f8e574c 100644 (file)
@@ -622,19 +622,37 @@ void amdgpu_virt_fini_data_exchange(struct amdgpu_device *adev)
 
 void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev)
 {
-       uint64_t bp_block_offset = 0;
-       uint32_t bp_block_size = 0;
-       struct amd_sriov_msg_pf2vf_info *pf2vf_v2 = NULL;
-
        adev->virt.fw_reserve.p_pf2vf = NULL;
        adev->virt.fw_reserve.p_vf2pf = NULL;
        adev->virt.vf2pf_update_interval_ms = 0;
 
-       if (adev->mman.fw_vram_usage_va != NULL) {
+       if (adev->bios != NULL) {
                adev->virt.vf2pf_update_interval_ms = 2000;
 
                adev->virt.fw_reserve.p_pf2vf =
                        (struct amd_sriov_msg_pf2vf_info_header *)
+                       (adev->bios + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB << 10));
+
+               amdgpu_virt_read_pf2vf_data(adev);
+       }
+
+       if (adev->virt.vf2pf_update_interval_ms != 0) {
+               INIT_DELAYED_WORK(&adev->virt.vf2pf_work, amdgpu_virt_update_vf2pf_work_item);
+               schedule_delayed_work(&(adev->virt.vf2pf_work), msecs_to_jiffies(adev->virt.vf2pf_update_interval_ms));
+       }
+}
+
+
+void amdgpu_virt_exchange_data(struct amdgpu_device *adev)
+{
+       uint64_t bp_block_offset = 0;
+       uint32_t bp_block_size = 0;
+       struct amd_sriov_msg_pf2vf_info *pf2vf_v2 = NULL;
+
+       if (adev->mman.fw_vram_usage_va != NULL) {
+
+               adev->virt.fw_reserve.p_pf2vf =
+                       (struct amd_sriov_msg_pf2vf_info_header *)
                        (adev->mman.fw_vram_usage_va + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB << 10));
                adev->virt.fw_reserve.p_vf2pf =
                        (struct amd_sriov_msg_vf2pf_info_header *)
@@ -663,16 +681,10 @@ void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev)
                        (adev->bios + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB << 10));
 
                amdgpu_virt_read_pf2vf_data(adev);
-
-               return;
-       }
-
-       if (adev->virt.vf2pf_update_interval_ms != 0) {
-               INIT_DELAYED_WORK(&adev->virt.vf2pf_work, amdgpu_virt_update_vf2pf_work_item);
-               schedule_delayed_work(&(adev->virt.vf2pf_work), adev->virt.vf2pf_update_interval_ms);
        }
 }
 
+
 void amdgpu_detect_virtualization(struct amdgpu_device *adev)
 {
        uint32_t reg;
index 8d4c20b..9adfb8d 100644 (file)
@@ -308,6 +308,7 @@ int amdgpu_virt_alloc_mm_table(struct amdgpu_device *adev);
 void amdgpu_virt_free_mm_table(struct amdgpu_device *adev);
 void amdgpu_virt_release_ras_err_handler_data(struct amdgpu_device *adev);
 void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev);
+void amdgpu_virt_exchange_data(struct amdgpu_device *adev);
 void amdgpu_virt_fini_data_exchange(struct amdgpu_device *adev);
 void amdgpu_detect_virtualization(struct amdgpu_device *adev);