drm/amdgpu: remove gart.ready flag
authorChristian König <christian.koenig@amd.com>
Tue, 18 Jan 2022 11:53:11 +0000 (12:53 +0100)
committerAlex Deucher <alexander.deucher@amd.com>
Thu, 20 Jan 2022 03:32:47 +0000 (22:32 -0500)
That's just a leftover from old radeon days and was preventing CS and GART
bindings before the hardware was initialized. But nowdays that is
perfectly valid.

The only thing we need to warn about are GART binding before the table
is even allocated.

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Guchun Chen <guchun.chen@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h
drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
drivers/gpu/drm/amd/amdkfd/kfd_migrate.c

index 645950a653a0c3a7e70c4f0f91f4fb90f222959e..53cc844346f064f331bb35aa941ff6fc1717d4ef 100644 (file)
@@ -150,7 +150,7 @@ void amdgpu_gart_table_vram_free(struct amdgpu_device *adev)
  * replaces them with the dummy page (all asics).
  * Returns 0 for success, -EINVAL for failure.
  */
-int amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
+void amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
                        int pages)
 {
        unsigned t;
@@ -161,13 +161,11 @@ int amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
        uint64_t flags = 0;
        int idx;
 
-       if (!adev->gart.ready) {
-               WARN(1, "trying to unbind memory from uninitialized GART !\n");
-               return -EINVAL;
-       }
+       if (WARN_ON(!adev->gart.ptr))
+               return;
 
        if (!drm_dev_enter(adev_to_drm(adev), &idx))
-               return 0;
+               return;
 
        t = offset / AMDGPU_GPU_PAGE_SIZE;
        p = t / AMDGPU_GPU_PAGES_IN_CPU_PAGE;
@@ -188,7 +186,6 @@ int amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
                amdgpu_gmc_flush_gpu_tlb(adev, 0, i, 0);
 
        drm_dev_exit(idx);
-       return 0;
 }
 
 /**
@@ -204,7 +201,7 @@ int amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
  * Map the dma_addresses into GART entries (all asics).
  * Returns 0 for success, -EINVAL for failure.
  */
-int amdgpu_gart_map(struct amdgpu_device *adev, uint64_t offset,
+void amdgpu_gart_map(struct amdgpu_device *adev, uint64_t offset,
                    int pages, dma_addr_t *dma_addr, uint64_t flags,
                    void *dst)
 {
@@ -212,13 +209,8 @@ int amdgpu_gart_map(struct amdgpu_device *adev, uint64_t offset,
        unsigned i, j, t;
        int idx;
 
-       if (!adev->gart.ready) {
-               WARN(1, "trying to bind memory to uninitialized GART !\n");
-               return -EINVAL;
-       }
-
        if (!drm_dev_enter(adev_to_drm(adev), &idx))
-               return 0;
+               return;
 
        t = offset / AMDGPU_GPU_PAGE_SIZE;
 
@@ -230,7 +222,6 @@ int amdgpu_gart_map(struct amdgpu_device *adev, uint64_t offset,
                }
        }
        drm_dev_exit(idx);
-       return 0;
 }
 
 /**
@@ -246,20 +237,14 @@ int amdgpu_gart_map(struct amdgpu_device *adev, uint64_t offset,
  * (all asics).
  * Returns 0 for success, -EINVAL for failure.
  */
-int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset,
+void amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset,
                     int pages, dma_addr_t *dma_addr,
                     uint64_t flags)
 {
-       if (!adev->gart.ready) {
-               WARN(1, "trying to bind memory to uninitialized GART !\n");
-               return -EINVAL;
-       }
-
-       if (!adev->gart.ptr)
-               return 0;
+       if (WARN_ON(!adev->gart.ptr))
+               return;
 
-       return amdgpu_gart_map(adev, offset, pages, dma_addr, flags,
-                              adev->gart.ptr);
+       amdgpu_gart_map(adev, offset, pages, dma_addr, flags, adev->gart.ptr);
 }
 
 /**
index 78895413cf9fe35433d16b05cc6b921da10988c4..8fea3e04e4110696983ec6f4911424060300cea6 100644 (file)
@@ -46,7 +46,6 @@ struct amdgpu_gart {
        unsigned                        num_gpu_pages;
        unsigned                        num_cpu_pages;
        unsigned                        table_size;
-       bool                            ready;
 
        /* Asic default pte flags */
        uint64_t                        gart_pte_flags;
@@ -58,12 +57,12 @@ int amdgpu_gart_table_vram_pin(struct amdgpu_device *adev);
 void amdgpu_gart_table_vram_unpin(struct amdgpu_device *adev);
 int amdgpu_gart_init(struct amdgpu_device *adev);
 void amdgpu_gart_dummy_page_fini(struct amdgpu_device *adev);
-int amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
-                      int pages);
-int amdgpu_gart_map(struct amdgpu_device *adev, uint64_t offset,
-                   int pages, dma_addr_t *dma_addr, uint64_t flags,
-                   void *dst);
-int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset,
-                    int pages, dma_addr_t *dma_addr, uint64_t flags);
+void amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
+                       int pages);
+void amdgpu_gart_map(struct amdgpu_device *adev, uint64_t offset,
+                    int pages, dma_addr_t *dma_addr, uint64_t flags,
+                    void *dst);
+void amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset,
+                     int pages, dma_addr_t *dma_addr, uint64_t flags);
 void amdgpu_gart_invalidate_tlb(struct amdgpu_device *adev);
 #endif
index 72022df264f63ee7f3b638eed118ef6ed9f68b31..c5263908caec05044246584077684b371b838f7d 100644 (file)
@@ -220,26 +220,21 @@ uint64_t amdgpu_gtt_mgr_usage(struct amdgpu_gtt_mgr *mgr)
  *
  * Re-init the gart for each known BO in the GTT.
  */
-int amdgpu_gtt_mgr_recover(struct amdgpu_gtt_mgr *mgr)
+void amdgpu_gtt_mgr_recover(struct amdgpu_gtt_mgr *mgr)
 {
        struct amdgpu_gtt_node *node;
        struct drm_mm_node *mm_node;
        struct amdgpu_device *adev;
-       int r = 0;
 
        adev = container_of(mgr, typeof(*adev), mman.gtt_mgr);
        spin_lock(&mgr->lock);
        drm_mm_for_each_node(mm_node, &mgr->mm) {
                node = container_of(mm_node, typeof(*node), base.mm_nodes[0]);
-               r = amdgpu_ttm_recover_gart(node->tbo);
-               if (r)
-                       break;
+               amdgpu_ttm_recover_gart(node->tbo);
        }
        spin_unlock(&mgr->lock);
 
        amdgpu_gart_invalidate_tlb(adev);
-
-       return r;
 }
 
 /**
index 1e012b45f66357ff01f07a6ba22ccaf0ab084502..f0cd52b157f8d2c0fbfaa295304ad42c4725ccdc 100644 (file)
@@ -242,10 +242,7 @@ static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo,
                dma_addr_t *dma_addr;
 
                dma_addr = &bo->ttm->dma_address[mm_cur->start >> PAGE_SHIFT];
-               r = amdgpu_gart_map(adev, 0, num_pages, dma_addr, flags,
-                                   cpu_addr);
-               if (r)
-                       goto error_free;
+               amdgpu_gart_map(adev, 0, num_pages, dma_addr, flags, cpu_addr);
        } else {
                dma_addr_t dma_address;
 
@@ -253,11 +250,8 @@ static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo,
                dma_address += adev->vm_manager.vram_base_offset;
 
                for (i = 0; i < num_pages; ++i) {
-                       r = amdgpu_gart_map(adev, i << PAGE_SHIFT, 1,
-                                           &dma_address, flags, cpu_addr);
-                       if (r)
-                               goto error_free;
-
+                       amdgpu_gart_map(adev, i << PAGE_SHIFT, 1, &dma_address,
+                                       flags, cpu_addr);
                        dma_address += PAGE_SIZE;
                }
        }
@@ -822,14 +816,13 @@ static void amdgpu_ttm_tt_unpin_userptr(struct ttm_device *bdev,
 #endif
 }
 
-static int amdgpu_ttm_gart_bind(struct amdgpu_device *adev,
-                               struct ttm_buffer_object *tbo,
-                               uint64_t flags)
+static void amdgpu_ttm_gart_bind(struct amdgpu_device *adev,
+                                struct ttm_buffer_object *tbo,
+                                uint64_t flags)
 {
        struct amdgpu_bo *abo = ttm_to_amdgpu_bo(tbo);
        struct ttm_tt *ttm = tbo->ttm;
        struct amdgpu_ttm_tt *gtt = (void *)ttm;
-       int r;
 
        if (amdgpu_bo_encrypted(abo))
                flags |= AMDGPU_PTE_TMZ;
@@ -837,10 +830,8 @@ static int amdgpu_ttm_gart_bind(struct amdgpu_device *adev,
        if (abo->flags & AMDGPU_GEM_CREATE_CP_MQD_GFX9) {
                uint64_t page_idx = 1;
 
-               r = amdgpu_gart_bind(adev, gtt->offset, page_idx,
-                               gtt->ttm.dma_address, flags);
-               if (r)
-                       goto gart_bind_fail;
+               amdgpu_gart_bind(adev, gtt->offset, page_idx,
+                                gtt->ttm.dma_address, flags);
 
                /* The memory type of the first page defaults to UC. Now
                 * modify the memory type to NC from the second page of
@@ -849,21 +840,13 @@ static int amdgpu_ttm_gart_bind(struct amdgpu_device *adev,
                flags &= ~AMDGPU_PTE_MTYPE_VG10_MASK;
                flags |= AMDGPU_PTE_MTYPE_VG10(AMDGPU_MTYPE_NC);
 
-               r = amdgpu_gart_bind(adev,
-                               gtt->offset + (page_idx << PAGE_SHIFT),
-                               ttm->num_pages - page_idx,
-                               &(gtt->ttm.dma_address[page_idx]), flags);
+               amdgpu_gart_bind(adev, gtt->offset + (page_idx << PAGE_SHIFT),
+                                ttm->num_pages - page_idx,
+                                &(gtt->ttm.dma_address[page_idx]), flags);
        } else {
-               r = amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages,
-                                    gtt->ttm.dma_address, flags);
+               amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages,
+                                gtt->ttm.dma_address, flags);
        }
-
-gart_bind_fail:
-       if (r)
-               DRM_ERROR("failed to bind %u pages at 0x%08llX\n",
-                         ttm->num_pages, gtt->offset);
-
-       return r;
 }
 
 /*
@@ -879,7 +862,7 @@ static int amdgpu_ttm_backend_bind(struct ttm_device *bdev,
        struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
        struct amdgpu_ttm_tt *gtt = (void*)ttm;
        uint64_t flags;
-       int r = 0;
+       int r;
 
        if (!bo_mem)
                return -EINVAL;
@@ -926,14 +909,10 @@ static int amdgpu_ttm_backend_bind(struct ttm_device *bdev,
 
        /* bind pages into GART page tables */
        gtt->offset = (u64)bo_mem->start << PAGE_SHIFT;
-       r = amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages,
-               gtt->ttm.dma_address, flags);
-
-       if (r)
-               DRM_ERROR("failed to bind %u pages at 0x%08llX\n",
-                         ttm->num_pages, gtt->offset);
+       amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages,
+                        gtt->ttm.dma_address, flags);
        gtt->bound = true;
-       return r;
+       return 0;
 }
 
 /*
@@ -983,12 +962,7 @@ int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo)
 
        /* Bind pages */
        gtt->offset = (u64)tmp->start << PAGE_SHIFT;
-       r = amdgpu_ttm_gart_bind(adev, bo, flags);
-       if (unlikely(r)) {
-               ttm_resource_free(bo, &tmp);
-               return r;
-       }
-
+       amdgpu_ttm_gart_bind(adev, bo, flags);
        amdgpu_gart_invalidate_tlb(adev);
        ttm_resource_free(bo, &bo->resource);
        ttm_bo_assign_mem(bo, tmp);
@@ -1002,19 +976,16 @@ int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo)
  * Called by amdgpu_gtt_mgr_recover() from amdgpu_device_reset() to
  * rebind GTT pages during a GPU reset.
  */
-int amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo)
+void amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo)
 {
        struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
        uint64_t flags;
-       int r;
 
        if (!tbo->ttm)
-               return 0;
+               return;
 
        flags = amdgpu_ttm_tt_pte_flags(adev, tbo->ttm, tbo->resource);
-       r = amdgpu_ttm_gart_bind(adev, tbo, flags);
-
-       return r;
+       amdgpu_ttm_gart_bind(adev, tbo, flags);
 }
 
 /*
@@ -1028,7 +999,6 @@ static void amdgpu_ttm_backend_unbind(struct ttm_device *bdev,
 {
        struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
        struct amdgpu_ttm_tt *gtt = (void *)ttm;
-       int r;
 
        /* if the pages have userptr pinning then clear that first */
        if (gtt->userptr) {
@@ -1048,10 +1018,7 @@ static void amdgpu_ttm_backend_unbind(struct ttm_device *bdev,
                return;
 
        /* unbind shouldn't be done for GDS/GWS/OA in ttm_bo_clean_mm */
-       r = amdgpu_gart_unbind(adev, gtt->offset, ttm->num_pages);
-       if (r)
-               DRM_ERROR("failed to unbind %u pages at 0x%08llX\n",
-                         gtt->ttm.num_pages, gtt->offset);
+       amdgpu_gart_unbind(adev, gtt->offset, ttm->num_pages);
        gtt->bound = false;
 }
 
index f06fd19b489538e409e3fce1e3d54313bed9cff3..0efc31e3a45777195c07e61119f82c31f6b92a90 100644 (file)
@@ -119,7 +119,7 @@ void amdgpu_vram_mgr_fini(struct amdgpu_device *adev);
 
 bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_resource *mem);
 uint64_t amdgpu_gtt_mgr_usage(struct amdgpu_gtt_mgr *mgr);
-int amdgpu_gtt_mgr_recover(struct amdgpu_gtt_mgr *mgr);
+void amdgpu_gtt_mgr_recover(struct amdgpu_gtt_mgr *mgr);
 
 uint64_t amdgpu_preempt_mgr_usage(struct ttm_resource_manager *man);
 
@@ -162,7 +162,7 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
                        struct dma_fence **fence);
 
 int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo);
-int amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo);
+void amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo);
 uint64_t amdgpu_ttm_domain_start(struct amdgpu_device *adev, uint32_t type);
 
 #if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR)
index bb9a11bc644b93bc22036ce57a312bbde23f677f..5e88655cdfa54921a6a714215946a6991a57f353 100644 (file)
@@ -1000,14 +1000,7 @@ static int gmc_v10_0_gart_enable(struct amdgpu_device *adev)
                return -EINVAL;
        }
 
-       if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev))
-               goto skip_pin_bo;
-
-       r = amdgpu_gtt_mgr_recover(&adev->mman.gtt_mgr);
-       if (r)
-               return r;
-
-skip_pin_bo:
+       amdgpu_gtt_mgr_recover(&adev->mman.gtt_mgr);
        r = adev->gfxhub.funcs->gart_enable(adev);
        if (r)
                return r;
@@ -1033,8 +1026,6 @@ skip_pin_bo:
                 (unsigned)(adev->gmc.gart_size >> 20),
                 (unsigned long long)amdgpu_bo_gpu_offset(adev->gart.bo));
 
-       adev->gart.ready = true;
-
        return 0;
 }
 
index 84f0debe8264d54b053a5661915f55c1fa924e4b..ec291d28edffd882aefb03f9fd6ee2ce54dd5f2c 100644 (file)
@@ -469,16 +469,14 @@ static void gmc_v6_0_set_prt(struct amdgpu_device *adev, bool enable)
 static int gmc_v6_0_gart_enable(struct amdgpu_device *adev)
 {
        uint64_t table_addr;
-       int r, i;
        u32 field;
+       int i;
 
        if (adev->gart.bo == NULL) {
                dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
                return -EINVAL;
        }
-       r = amdgpu_gtt_mgr_recover(&adev->mman.gtt_mgr);
-       if (r)
-               return r;
+       amdgpu_gtt_mgr_recover(&adev->mman.gtt_mgr);
 
        table_addr = amdgpu_bo_gpu_offset(adev->gart.bo);
 
@@ -558,7 +556,6 @@ static int gmc_v6_0_gart_enable(struct amdgpu_device *adev)
        dev_info(adev->dev, "PCIE GART of %uM enabled (table at 0x%016llX).\n",
                 (unsigned)(adev->gmc.gart_size >> 20),
                 (unsigned long long)table_addr);
-       adev->gart.ready = true;
        return 0;
 }
 
index 8800a18b0cf69025fc7395717a5bff2025e6d1bb..344d819b4c1b6e9b03d772adefee87ac2ac956ef 100644 (file)
@@ -613,17 +613,14 @@ static void gmc_v7_0_set_prt(struct amdgpu_device *adev, bool enable)
 static int gmc_v7_0_gart_enable(struct amdgpu_device *adev)
 {
        uint64_t table_addr;
-       int r, i;
        u32 tmp, field;
+       int i;
 
        if (adev->gart.bo == NULL) {
                dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
                return -EINVAL;
        }
-       r = amdgpu_gtt_mgr_recover(&adev->mman.gtt_mgr);
-       if (r)
-               return r;
-
+       amdgpu_gtt_mgr_recover(&adev->mman.gtt_mgr);
        table_addr = amdgpu_bo_gpu_offset(adev->gart.bo);
 
        /* Setup TLB control */
@@ -712,7 +709,6 @@ static int gmc_v7_0_gart_enable(struct amdgpu_device *adev)
        DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
                 (unsigned)(adev->gmc.gart_size >> 20),
                 (unsigned long long)table_addr);
-       adev->gart.ready = true;
        return 0;
 }
 
index 1c10fa5d0db7bd77c4719bd8ea4923173a86562b..ca9841d5669fb9829cf6471b51b918d43835a195 100644 (file)
@@ -837,17 +837,14 @@ static void gmc_v8_0_set_prt(struct amdgpu_device *adev, bool enable)
 static int gmc_v8_0_gart_enable(struct amdgpu_device *adev)
 {
        uint64_t table_addr;
-       int r, i;
        u32 tmp, field;
+       int i;
 
        if (adev->gart.bo == NULL) {
                dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
                return -EINVAL;
        }
-       r = amdgpu_gtt_mgr_recover(&adev->mman.gtt_mgr);
-       if (r)
-               return r;
-
+       amdgpu_gtt_mgr_recover(&adev->mman.gtt_mgr);
        table_addr = amdgpu_bo_gpu_offset(adev->gart.bo);
 
        /* Setup TLB control */
@@ -953,7 +950,6 @@ static int gmc_v8_0_gart_enable(struct amdgpu_device *adev)
        DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
                 (unsigned)(adev->gmc.gart_size >> 20),
                 (unsigned long long)table_addr);
-       adev->gart.ready = true;
        return 0;
 }
 
index 6866e0311b4987f4560be59bd98875a041d031d6..de32dbca9ab89b8e58b96074495d1494065101c6 100644 (file)
@@ -1783,14 +1783,7 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
                return -EINVAL;
        }
 
-       if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev))
-               goto skip_pin_bo;
-
-       r = amdgpu_gtt_mgr_recover(&adev->mman.gtt_mgr);
-       if (r)
-               return r;
-
-skip_pin_bo:
+       amdgpu_gtt_mgr_recover(&adev->mman.gtt_mgr);
        r = adev->gfxhub.funcs->gart_enable(adev);
        if (r)
                return r;
@@ -1807,7 +1800,6 @@ skip_pin_bo:
        DRM_INFO("PTB located at 0x%016llX\n",
                        (unsigned long long)amdgpu_bo_gpu_offset(adev->gart.bo));
 
-       adev->gart.ready = true;
        return 0;
 }
 
index ed5385137f4831ee71ed191ad3dc97e50b8dde71..d986f9ee0e1f46ad6f55a9b70a2af21e57ae63b0 100644 (file)
@@ -86,10 +86,7 @@ svm_migrate_gart_map(struct amdgpu_ring *ring, uint64_t npages,
 
        cpu_addr = &job->ibs[0].ptr[num_dw];
 
-       r = amdgpu_gart_map(adev, 0, npages, addr, pte_flags, cpu_addr);
-       if (r)
-               goto error_free;
-
+       amdgpu_gart_map(adev, 0, npages, addr, pte_flags, cpu_addr);
        r = amdgpu_job_submit(job, &adev->mman.entity,
                              AMDGPU_FENCE_OWNER_UNDEFINED, &fence);
        if (r)