From c1c7ce8f5687bb01b2eb0db3c19cb375267bb16d Mon Sep 17 00:00:00 2001 From: =?utf8?q?Christian=20K=C3=B6nig?= Date: Mon, 16 Oct 2017 16:50:32 +0200 Subject: [PATCH] drm/amdgpu: move GART recovery into GTT manager v2 MIME-Version: 1.0 Content-Type: text/plain; charset=utf8 Content-Transfer-Encoding: 8bit The GTT manager handles the GART address space anyway, so it is completely pointless to keep the same information around twice. v2: rebased Signed-off-by: Christian König Reviewed-by: Chunming Zhou Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 3 -- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 8 ++--- drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c | 53 +++++++++++++++++++++-------- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 51 ++++++++------------------- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h | 3 +- 5 files changed, 59 insertions(+), 59 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index f25d246..d11967a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -1630,9 +1630,6 @@ struct amdgpu_device { /* link all shadow bo */ struct list_head shadow_list; struct mutex shadow_list_lock; - /* link all gtt */ - spinlock_t gtt_list_lock; - struct list_head gtt_list; /* keep an lru list of rings by HW IP */ struct list_head ring_lru_list; spinlock_t ring_lru_list_lock; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 9d4e0b8..7af0d5d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -2180,9 +2180,6 @@ int amdgpu_device_init(struct amdgpu_device *adev, INIT_LIST_HEAD(&adev->shadow_list); mutex_init(&adev->shadow_list_lock); - INIT_LIST_HEAD(&adev->gtt_list); - spin_lock_init(&adev->gtt_list_lock); - INIT_LIST_HEAD(&adev->ring_lru_list); spin_lock_init(&adev->ring_lru_list_lock); @@ -2877,7 +2874,8 @@ retry: atomic_inc(&adev->vram_lost_counter); } - r = amdgpu_ttm_recover_gart(adev); + r = amdgpu_gtt_mgr_recover( + &adev->mman.bdev.man[TTM_PL_TT]); if (r) goto out; @@ -2939,7 +2937,7 @@ static int amdgpu_reset_sriov(struct amdgpu_device *adev, uint64_t *reset_flags, goto error; /* we need recover gart prior to run SMC/CP/SDMA resume */ - amdgpu_ttm_recover_gart(adev); + amdgpu_gtt_mgr_recover(&adev->mman.bdev.man[TTM_PL_TT]); /* now we are okay to resume SMC/CP/SDMA */ r = amdgpu_sriov_reinit_late(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c index f7669dc..e14ab34 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c @@ -31,6 +31,11 @@ struct amdgpu_gtt_mgr { atomic64_t available; }; +struct amdgpu_gtt_node { + struct drm_mm_node node; + struct ttm_buffer_object *tbo; +}; + /** * amdgpu_gtt_mgr_init - init GTT manager and DRM MM * @@ -87,9 +92,9 @@ static int amdgpu_gtt_mgr_fini(struct ttm_mem_type_manager *man) */ bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_mem_reg *mem) { - struct drm_mm_node *node = mem->mm_node; + struct amdgpu_gtt_node *node = mem->mm_node; - return (node->start != AMDGPU_BO_INVALID_OFFSET); + return (node->node.start != AMDGPU_BO_INVALID_OFFSET); } /** @@ -109,7 +114,7 @@ static int amdgpu_gtt_mgr_alloc(struct ttm_mem_type_manager *man, { struct amdgpu_device *adev = amdgpu_ttm_adev(man->bdev); struct amdgpu_gtt_mgr *mgr = man->priv; - struct drm_mm_node *node = mem->mm_node; + struct amdgpu_gtt_node *node = mem->mm_node; enum drm_mm_insert_mode mode; unsigned long fpfn, lpfn; int r; @@ -132,13 +137,13 @@ static int amdgpu_gtt_mgr_alloc(struct ttm_mem_type_manager *man, mode = DRM_MM_INSERT_HIGH; spin_lock(&mgr->lock); - r = drm_mm_insert_node_in_range(&mgr->mm, node, - mem->num_pages, mem->page_alignment, 0, - fpfn, lpfn, mode); + r = drm_mm_insert_node_in_range(&mgr->mm, &node->node, mem->num_pages, + mem->page_alignment, 0, fpfn, lpfn, + mode); spin_unlock(&mgr->lock); if (!r) - mem->start = node->start; + mem->start = node->node.start; return r; } @@ -159,7 +164,7 @@ static int amdgpu_gtt_mgr_new(struct ttm_mem_type_manager *man, struct ttm_mem_reg *mem) { struct amdgpu_gtt_mgr *mgr = man->priv; - struct drm_mm_node *node; + struct amdgpu_gtt_node *node; int r; spin_lock(&mgr->lock); @@ -177,8 +182,9 @@ static int amdgpu_gtt_mgr_new(struct ttm_mem_type_manager *man, goto err_out; } - node->start = AMDGPU_BO_INVALID_OFFSET; - node->size = mem->num_pages; + node->node.start = AMDGPU_BO_INVALID_OFFSET; + node->node.size = mem->num_pages; + node->tbo = tbo; mem->mm_node = node; if (place->fpfn || place->lpfn || place->flags & TTM_PL_FLAG_TOPDOWN) { @@ -190,7 +196,7 @@ static int amdgpu_gtt_mgr_new(struct ttm_mem_type_manager *man, goto err_out; } } else { - mem->start = node->start; + mem->start = node->node.start; } return 0; @@ -214,14 +220,14 @@ static void amdgpu_gtt_mgr_del(struct ttm_mem_type_manager *man, struct ttm_mem_reg *mem) { struct amdgpu_gtt_mgr *mgr = man->priv; - struct drm_mm_node *node = mem->mm_node; + struct amdgpu_gtt_node *node = mem->mm_node; if (!node) return; spin_lock(&mgr->lock); - if (node->start != AMDGPU_BO_INVALID_OFFSET) - drm_mm_remove_node(node); + if (node->node.start != AMDGPU_BO_INVALID_OFFSET) + drm_mm_remove_node(&node->node); spin_unlock(&mgr->lock); atomic64_add(mem->num_pages, &mgr->available); @@ -244,6 +250,25 @@ uint64_t amdgpu_gtt_mgr_usage(struct ttm_mem_type_manager *man) return (result > 0 ? result : 0) * PAGE_SIZE; } +int amdgpu_gtt_mgr_recover(struct ttm_mem_type_manager *man) +{ + struct amdgpu_gtt_mgr *mgr = man->priv; + struct amdgpu_gtt_node *node; + struct drm_mm_node *mm_node; + int r = 0; + + spin_lock(&mgr->lock); + drm_mm_for_each_node(mm_node, &mgr->mm) { + node = container_of(mm_node, struct amdgpu_gtt_node, node); + r = amdgpu_ttm_recover_gart(node->tbo); + if (r) + break; + } + spin_unlock(&mgr->lock); + + return r; +} + /** * amdgpu_gtt_mgr_debug - dump VRAM table * diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 3d02c2d..34dbe7a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -689,7 +689,6 @@ struct amdgpu_ttm_tt { struct list_head guptasks; atomic_t mmu_invalidations; uint32_t last_set_pages; - struct list_head list; }; int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages) @@ -865,21 +864,14 @@ static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm, return 0; } - spin_lock(>t->adev->gtt_list_lock); flags = amdgpu_ttm_tt_pte_flags(gtt->adev, ttm, bo_mem); gtt->offset = (u64)bo_mem->start << PAGE_SHIFT; r = amdgpu_gart_bind(gtt->adev, gtt->offset, ttm->num_pages, ttm->pages, gtt->ttm.dma_address, flags); - if (r) { + if (r) DRM_ERROR("failed to bind %lu pages at 0x%08llX\n", ttm->num_pages, gtt->offset); - goto error_gart_bind; - } - - list_add_tail(>t->list, >t->adev->gtt_list); -error_gart_bind: - spin_unlock(>t->adev->gtt_list_lock); return r; } @@ -920,29 +912,23 @@ int amdgpu_ttm_bind(struct ttm_buffer_object *bo) return r; } -int amdgpu_ttm_recover_gart(struct amdgpu_device *adev) +int amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo) { - struct amdgpu_ttm_tt *gtt, *tmp; - struct ttm_mem_reg bo_mem; + struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev); + struct amdgpu_ttm_tt *gtt = (void *)tbo->ttm; uint64_t flags; int r; - bo_mem.mem_type = TTM_PL_TT; - spin_lock(&adev->gtt_list_lock); - list_for_each_entry_safe(gtt, tmp, &adev->gtt_list, list) { - flags = amdgpu_ttm_tt_pte_flags(gtt->adev, >t->ttm.ttm, &bo_mem); - r = amdgpu_gart_bind(adev, gtt->offset, gtt->ttm.ttm.num_pages, - gtt->ttm.ttm.pages, gtt->ttm.dma_address, - flags); - if (r) { - spin_unlock(&adev->gtt_list_lock); - DRM_ERROR("failed to bind %lu pages at 0x%08llX\n", - gtt->ttm.ttm.num_pages, gtt->offset); - return r; - } - } - spin_unlock(&adev->gtt_list_lock); - return 0; + if (!gtt) + return 0; + + flags = amdgpu_ttm_tt_pte_flags(adev, >t->ttm.ttm, &tbo->mem); + r = amdgpu_gart_bind(adev, gtt->offset, gtt->ttm.ttm.num_pages, + gtt->ttm.ttm.pages, gtt->ttm.dma_address, flags); + if (r) + DRM_ERROR("failed to bind %lu pages at 0x%08llX\n", + gtt->ttm.ttm.num_pages, gtt->offset); + return r; } static int amdgpu_ttm_backend_unbind(struct ttm_tt *ttm) @@ -957,16 +943,10 @@ static int amdgpu_ttm_backend_unbind(struct ttm_tt *ttm) return 0; /* unbind shouldn't be done for GDS/GWS/OA in ttm_bo_clean_mm */ - spin_lock(>t->adev->gtt_list_lock); r = amdgpu_gart_unbind(gtt->adev, gtt->offset, ttm->num_pages); - if (r) { + if (r) DRM_ERROR("failed to unbind %lu pages at 0x%08llX\n", gtt->ttm.ttm.num_pages, gtt->offset); - goto error_unbind; - } - list_del_init(>t->list); -error_unbind: - spin_unlock(>t->adev->gtt_list_lock); return r; } @@ -1003,7 +983,6 @@ static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_bo_device *bdev, kfree(gtt); return NULL; } - INIT_LIST_HEAD(>t->list); return >t->ttm.ttm; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h index 016d2af..d2985de 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h @@ -69,6 +69,7 @@ extern const struct ttm_mem_type_manager_func amdgpu_vram_mgr_func; bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_mem_reg *mem); uint64_t amdgpu_gtt_mgr_usage(struct ttm_mem_type_manager *man); +int amdgpu_gtt_mgr_recover(struct ttm_mem_type_manager *man); uint64_t amdgpu_vram_mgr_usage(struct ttm_mem_type_manager *man); uint64_t amdgpu_vram_mgr_vis_usage(struct ttm_mem_type_manager *man); @@ -91,7 +92,7 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo, int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma); int amdgpu_ttm_bind(struct ttm_buffer_object *bo); -int amdgpu_ttm_recover_gart(struct amdgpu_device *adev); +int amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo); int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages); void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct page **pages); -- 2.7.4