drm/amdgpu: drop amdgpu_gtt_node
authorChristian König <christian.koenig@amd.com>
Fri, 16 Jul 2021 12:37:14 +0000 (14:37 +0200)
committerChristian König <christian.koenig@amd.com>
Tue, 29 Mar 2022 08:57:12 +0000 (10:57 +0200)
We have the BO pointer in the base structure now as well.

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Tested-by: Bas Nieuwenhuizen <bas@basnieuwenhuizen.nl>
Link: https://patchwork.freedesktop.org/patch/msgid/20220321132601.2161-6-christian.koenig@amd.com
drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
include/drm/ttm/ttm_resource.h

index 3bcd27a..68494b9 100644 (file)
 
 #include "amdgpu.h"
 
-struct amdgpu_gtt_node {
-       struct ttm_buffer_object *tbo;
-       struct ttm_range_mgr_node base;
-};
-
 static inline struct amdgpu_gtt_mgr *
 to_gtt_mgr(struct ttm_resource_manager *man)
 {
        return container_of(man, struct amdgpu_gtt_mgr, manager);
 }
 
-static inline struct amdgpu_gtt_node *
-to_amdgpu_gtt_node(struct ttm_resource *res)
-{
-       return container_of(res, struct amdgpu_gtt_node, base.base);
-}
-
 /**
  * DOC: mem_info_gtt_total
  *
@@ -106,9 +95,9 @@ const struct attribute_group amdgpu_gtt_mgr_attr_group = {
  */
 bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_resource *res)
 {
-       struct amdgpu_gtt_node *node = to_amdgpu_gtt_node(res);
+       struct ttm_range_mgr_node *node = to_ttm_range_mgr_node(res);
 
-       return drm_mm_node_allocated(&node->base.mm_nodes[0]);
+       return drm_mm_node_allocated(&node->mm_nodes[0]);
 }
 
 /**
@@ -128,15 +117,14 @@ static int amdgpu_gtt_mgr_new(struct ttm_resource_manager *man,
 {
        struct amdgpu_gtt_mgr *mgr = to_gtt_mgr(man);
        uint32_t num_pages = PFN_UP(tbo->base.size);
-       struct amdgpu_gtt_node *node;
+       struct ttm_range_mgr_node *node;
        int r;
 
-       node = kzalloc(struct_size(node, base.mm_nodes, 1), GFP_KERNEL);
+       node = kzalloc(struct_size(node, mm_nodes, 1), GFP_KERNEL);
        if (!node)
                return -ENOMEM;
 
-       node->tbo = tbo;
-       ttm_resource_init(tbo, place, &node->base.base);
+       ttm_resource_init(tbo, place, &node->base);
        if (!(place->flags & TTM_PL_FLAG_TEMPORARY) &&
            ttm_resource_manager_usage(man) > man->size) {
                r = -ENOSPC;
@@ -145,8 +133,7 @@ static int amdgpu_gtt_mgr_new(struct ttm_resource_manager *man,
 
        if (place->lpfn) {
                spin_lock(&mgr->lock);
-               r = drm_mm_insert_node_in_range(&mgr->mm,
-                                               &node->base.mm_nodes[0],
+               r = drm_mm_insert_node_in_range(&mgr->mm, &node->mm_nodes[0],
                                                num_pages, tbo->page_alignment,
                                                0, place->fpfn, place->lpfn,
                                                DRM_MM_INSERT_BEST);
@@ -154,18 +141,18 @@ static int amdgpu_gtt_mgr_new(struct ttm_resource_manager *man,
                if (unlikely(r))
                        goto err_free;
 
-               node->base.base.start = node->base.mm_nodes[0].start;
+               node->base.start = node->mm_nodes[0].start;
        } else {
-               node->base.mm_nodes[0].start = 0;
-               node->base.mm_nodes[0].size = node->base.base.num_pages;
-               node->base.base.start = AMDGPU_BO_INVALID_OFFSET;
+               node->mm_nodes[0].start = 0;
+               node->mm_nodes[0].size = node->base.num_pages;
+               node->base.start = AMDGPU_BO_INVALID_OFFSET;
        }
 
-       *res = &node->base.base;
+       *res = &node->base;
        return 0;
 
 err_free:
-       ttm_resource_fini(man, &node->base.base);
+       ttm_resource_fini(man, &node->base);
        kfree(node);
        return r;
 }
@@ -181,12 +168,12 @@ err_free:
 static void amdgpu_gtt_mgr_del(struct ttm_resource_manager *man,
                               struct ttm_resource *res)
 {
-       struct amdgpu_gtt_node *node = to_amdgpu_gtt_node(res);
+       struct ttm_range_mgr_node *node = to_ttm_range_mgr_node(res);
        struct amdgpu_gtt_mgr *mgr = to_gtt_mgr(man);
 
        spin_lock(&mgr->lock);
-       if (drm_mm_node_allocated(&node->base.mm_nodes[0]))
-               drm_mm_remove_node(&node->base.mm_nodes[0]);
+       if (drm_mm_node_allocated(&node->mm_nodes[0]))
+               drm_mm_remove_node(&node->mm_nodes[0]);
        spin_unlock(&mgr->lock);
 
        ttm_resource_fini(man, res);
@@ -202,7 +189,7 @@ static void amdgpu_gtt_mgr_del(struct ttm_resource_manager *man,
  */
 int amdgpu_gtt_mgr_recover(struct amdgpu_gtt_mgr *mgr)
 {
-       struct amdgpu_gtt_node *node;
+       struct ttm_range_mgr_node *node;
        struct drm_mm_node *mm_node;
        struct amdgpu_device *adev;
        int r = 0;
@@ -210,8 +197,8 @@ int amdgpu_gtt_mgr_recover(struct amdgpu_gtt_mgr *mgr)
        adev = container_of(mgr, typeof(*adev), mman.gtt_mgr);
        spin_lock(&mgr->lock);
        drm_mm_for_each_node(mm_node, &mgr->mm) {
-               node = container_of(mm_node, typeof(*node), base.mm_nodes[0]);
-               r = amdgpu_ttm_recover_gart(node->tbo);
+               node = container_of(mm_node, typeof(*node), mm_nodes[0]);
+               r = amdgpu_ttm_recover_gart(node->base.bo);
                if (r)
                        break;
        }
index 4ca960b..4428a62 100644 (file)
@@ -355,6 +355,14 @@ ttm_resource_manager_next(struct ttm_resource_manager *man,
                          struct ttm_resource_cursor *cursor,
                          struct ttm_resource *res);
 
+/**
+ * ttm_resource_manager_for_each_res - iterate over all resources
+ * @man: the resource manager
+ * @cursor: struct ttm_resource_cursor for the current position
+ * @res: the current resource
+ *
+ * Iterate over all the evictable resources in a resource manager.
+ */
 #define ttm_resource_manager_for_each_res(man, cursor, res)            \
        for (res = ttm_resource_manager_first(man, cursor); res;        \
             res = ttm_resource_manager_next(man, cursor, res))