drm/amdgpu: Refine CSA related functions
authorRex Zhu <Rex.Zhu@amd.com>
Mon, 15 Oct 2018 09:08:38 +0000 (17:08 +0800)
committerAlex Deucher <alexander.deucher@amd.com>
Mon, 5 Nov 2018 19:21:48 +0000 (14:21 -0500)
There is no functional changes,
Use function arguments for SRIOV special variables which
is hardcode in those functions.

so we can share those functions in baremetal.

Reviewed-by: Monk Liu <Monk.Liu@amd.com>
Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h

index 416a676..0bf13d6 100644 (file)
@@ -1656,7 +1656,9 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
 
                        /* right after GMC hw init, we create CSA */
                        if (amdgpu_sriov_vf(adev)) {
-                               r = amdgpu_allocate_static_csa(adev);
+                               r = amdgpu_allocate_static_csa(adev, &adev->virt.csa_obj,
+                                                               AMDGPU_GEM_DOMAIN_VRAM,
+                                                               AMDGPU_CSA_SIZE);
                                if (r) {
                                        DRM_ERROR("allocate CSA failed %d\n", r);
                                        return r;
@@ -1890,7 +1892,7 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
 
                if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
                        amdgpu_ucode_free_bo(adev);
-                       amdgpu_free_static_csa(adev);
+                       amdgpu_free_static_csa(&adev->virt.csa_obj);
                        amdgpu_device_wb_fini(adev);
                        amdgpu_device_vram_scratch_fini(adev);
                }
index 8f6ff9f..9b3164c 100644 (file)
@@ -978,7 +978,10 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
        }
 
        if (amdgpu_sriov_vf(adev)) {
-               r = amdgpu_map_static_csa(adev, &fpriv->vm, &fpriv->csa_va);
+               uint64_t csa_addr = amdgpu_csa_vaddr(adev) & AMDGPU_GMC_HOLE_MASK;
+
+               r = amdgpu_map_static_csa(adev, &fpriv->vm, adev->virt.csa_obj,
+                                               &fpriv->csa_va, csa_addr, AMDGPU_CSA_SIZE);
                if (r)
                        goto error_vm;
        }
index 9ff16b7..f71bc6f 100644 (file)
@@ -41,25 +41,25 @@ bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev)
        return RREG32_NO_KIQ(0xc040) == 0xffffffff;
 }
 
-int amdgpu_allocate_static_csa(struct amdgpu_device *adev)
+int amdgpu_allocate_static_csa(struct amdgpu_device *adev, struct amdgpu_bo **bo,
+                               u32 domain, uint32_t size)
 {
        int r;
        void *ptr;
 
-       r = amdgpu_bo_create_kernel(adev, AMDGPU_CSA_SIZE, PAGE_SIZE,
-                               AMDGPU_GEM_DOMAIN_VRAM, &adev->virt.csa_obj,
+       r = amdgpu_bo_create_kernel(adev, size, PAGE_SIZE,
+                               domain, bo,
                                NULL, &ptr);
-       if (r)
-               return r;
+       if (!bo)
+               return -ENOMEM;
 
-       memset(ptr, 0, AMDGPU_CSA_SIZE);
+       memset(ptr, 0, size);
        return 0;
 }
 
-void amdgpu_free_static_csa(struct amdgpu_device *adev) {
-       amdgpu_bo_free_kernel(&adev->virt.csa_obj,
-                                               NULL,
-                                               NULL);
+void amdgpu_free_static_csa(struct amdgpu_bo **bo)
+{
+       amdgpu_bo_free_kernel(bo, NULL, NULL);
 }
 
 /*
@@ -69,9 +69,9 @@ void amdgpu_free_static_csa(struct amdgpu_device *adev) {
  * package to support SRIOV gfx preemption.
  */
 int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
-                         struct amdgpu_bo_va **bo_va)
+                         struct amdgpu_bo *bo, struct amdgpu_bo_va **bo_va,
+                         uint64_t csa_addr,  uint32_t size)
 {
-       uint64_t csa_addr = amdgpu_csa_vaddr(adev) & AMDGPU_GMC_HOLE_MASK;
        struct ww_acquire_ctx ticket;
        struct list_head list;
        struct amdgpu_bo_list_entry pd;
@@ -80,7 +80,7 @@ int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
 
        INIT_LIST_HEAD(&list);
        INIT_LIST_HEAD(&csa_tv.head);
-       csa_tv.bo = &adev->virt.csa_obj->tbo;
+       csa_tv.bo = &bo->tbo;
        csa_tv.shared = true;
 
        list_add(&csa_tv.head, &list);
@@ -92,7 +92,7 @@ int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
                return r;
        }
 
-       *bo_va = amdgpu_vm_bo_add(adev, vm, adev->virt.csa_obj);
+       *bo_va = amdgpu_vm_bo_add(adev, vm, bo);
        if (!*bo_va) {
                ttm_eu_backoff_reservation(&ticket, &list);
                DRM_ERROR("failed to create bo_va for static CSA\n");
@@ -100,7 +100,7 @@ int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
        }
 
        r = amdgpu_vm_alloc_pts(adev, (*bo_va)->base.vm, csa_addr,
-                               AMDGPU_CSA_SIZE);
+                               size);
        if (r) {
                DRM_ERROR("failed to allocate pts for static CSA, err=%d\n", r);
                amdgpu_vm_bo_rmv(adev, *bo_va);
@@ -108,7 +108,7 @@ int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
                return r;
        }
 
-       r = amdgpu_vm_bo_map(adev, *bo_va, csa_addr, 0, AMDGPU_CSA_SIZE,
+       r = amdgpu_vm_bo_map(adev, *bo_va, csa_addr, 0, size,
                             AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE |
                             AMDGPU_PTE_EXECUTABLE);
 
index f1a6a50..09a7ebe 100644 (file)
@@ -280,10 +280,12 @@ struct amdgpu_vm;
 
 uint64_t amdgpu_csa_vaddr(struct amdgpu_device *adev);
 bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev);
-int amdgpu_allocate_static_csa(struct amdgpu_device *adev);
+int amdgpu_allocate_static_csa(struct amdgpu_device *adev, struct amdgpu_bo **bo,
+                               u32 domain, uint32_t size);
 int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
-                         struct amdgpu_bo_va **bo_va);
-void amdgpu_free_static_csa(struct amdgpu_device *adev);
+                         struct amdgpu_bo *bo,
+                         struct amdgpu_bo_va **bo_va, uint64_t csa_addr, uint32_t size);
+void amdgpu_free_static_csa(struct amdgpu_bo **bo);
 void amdgpu_virt_init_setting(struct amdgpu_device *adev);
 uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg);
 void amdgpu_virt_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v);