#include "amdgpu.h"
-struct amdgpu_gtt_node {
- struct ttm_buffer_object *tbo;
- struct ttm_range_mgr_node base;
-};
-
static inline struct amdgpu_gtt_mgr *
to_gtt_mgr(struct ttm_resource_manager *man)
{
return container_of(man, struct amdgpu_gtt_mgr, manager);
}
-static inline struct amdgpu_gtt_node *
-to_amdgpu_gtt_node(struct ttm_resource *res)
-{
- return container_of(res, struct amdgpu_gtt_node, base.base);
-}
-
/**
* DOC: mem_info_gtt_total
*
*/
bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_resource *res)
{
- struct amdgpu_gtt_node *node = to_amdgpu_gtt_node(res);
+ struct ttm_range_mgr_node *node = to_ttm_range_mgr_node(res);
- return drm_mm_node_allocated(&node->base.mm_nodes[0]);
+ return drm_mm_node_allocated(&node->mm_nodes[0]);
}
/**
{
struct amdgpu_gtt_mgr *mgr = to_gtt_mgr(man);
uint32_t num_pages = PFN_UP(tbo->base.size);
- struct amdgpu_gtt_node *node;
+ struct ttm_range_mgr_node *node;
int r;
- node = kzalloc(struct_size(node, base.mm_nodes, 1), GFP_KERNEL);
+ node = kzalloc(struct_size(node, mm_nodes, 1), GFP_KERNEL);
if (!node)
return -ENOMEM;
- node->tbo = tbo;
- ttm_resource_init(tbo, place, &node->base.base);
+ ttm_resource_init(tbo, place, &node->base);
if (!(place->flags & TTM_PL_FLAG_TEMPORARY) &&
ttm_resource_manager_usage(man) > man->size) {
r = -ENOSPC;
if (place->lpfn) {
spin_lock(&mgr->lock);
- r = drm_mm_insert_node_in_range(&mgr->mm,
- &node->base.mm_nodes[0],
+ r = drm_mm_insert_node_in_range(&mgr->mm, &node->mm_nodes[0],
num_pages, tbo->page_alignment,
0, place->fpfn, place->lpfn,
DRM_MM_INSERT_BEST);
if (unlikely(r))
goto err_free;
- node->base.base.start = node->base.mm_nodes[0].start;
+ node->base.start = node->mm_nodes[0].start;
} else {
- node->base.mm_nodes[0].start = 0;
- node->base.mm_nodes[0].size = node->base.base.num_pages;
- node->base.base.start = AMDGPU_BO_INVALID_OFFSET;
+ node->mm_nodes[0].start = 0;
+ node->mm_nodes[0].size = node->base.num_pages;
+ node->base.start = AMDGPU_BO_INVALID_OFFSET;
}
- *res = &node->base.base;
+ *res = &node->base;
return 0;
err_free:
- ttm_resource_fini(man, &node->base.base);
+ ttm_resource_fini(man, &node->base);
kfree(node);
return r;
}
static void amdgpu_gtt_mgr_del(struct ttm_resource_manager *man,
struct ttm_resource *res)
{
- struct amdgpu_gtt_node *node = to_amdgpu_gtt_node(res);
+ struct ttm_range_mgr_node *node = to_ttm_range_mgr_node(res);
struct amdgpu_gtt_mgr *mgr = to_gtt_mgr(man);
spin_lock(&mgr->lock);
- if (drm_mm_node_allocated(&node->base.mm_nodes[0]))
- drm_mm_remove_node(&node->base.mm_nodes[0]);
+ if (drm_mm_node_allocated(&node->mm_nodes[0]))
+ drm_mm_remove_node(&node->mm_nodes[0]);
spin_unlock(&mgr->lock);
ttm_resource_fini(man, res);
*/
int amdgpu_gtt_mgr_recover(struct amdgpu_gtt_mgr *mgr)
{
- struct amdgpu_gtt_node *node;
+ struct ttm_range_mgr_node *node;
struct drm_mm_node *mm_node;
struct amdgpu_device *adev;
int r = 0;
adev = container_of(mgr, typeof(*adev), mman.gtt_mgr);
spin_lock(&mgr->lock);
drm_mm_for_each_node(mm_node, &mgr->mm) {
- node = container_of(mm_node, typeof(*node), base.mm_nodes[0]);
- r = amdgpu_ttm_recover_gart(node->tbo);
+ node = container_of(mm_node, typeof(*node), mm_nodes[0]);
+ r = amdgpu_ttm_recover_gart(node->base.bo);
if (r)
break;
}