*/
bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_mem_reg *mem)
{
- struct amdgpu_gtt_node *node = mem->mm_node;
-
- return (node->node.start != AMDGPU_BO_INVALID_OFFSET);
-}
-
-/**
- * amdgpu_gtt_mgr_alloc - allocate new ranges
- *
- * @man: TTM memory type manager
- * @tbo: TTM BO we need this range for
- * @place: placement flags and restrictions
- * @mem: the resulting mem object
- *
- * Allocate the address space for a node.
- */
-static int amdgpu_gtt_mgr_alloc(struct ttm_mem_type_manager *man,
- struct ttm_buffer_object *tbo,
- const struct ttm_place *place,
- struct ttm_mem_reg *mem)
-{
- struct amdgpu_device *adev = amdgpu_ttm_adev(man->bdev);
- struct amdgpu_gtt_mgr *mgr = man->priv;
- struct amdgpu_gtt_node *node = mem->mm_node;
- enum drm_mm_insert_mode mode;
- unsigned long fpfn, lpfn;
- int r;
-
- if (amdgpu_gtt_mgr_has_gart_addr(mem))
- return 0;
-
- if (place)
- fpfn = place->fpfn;
- else
- fpfn = 0;
-
- if (place && place->lpfn)
- lpfn = place->lpfn;
- else
- lpfn = adev->gart.num_cpu_pages;
-
- mode = DRM_MM_INSERT_BEST;
- if (place && place->flags & TTM_PL_FLAG_TOPDOWN)
- mode = DRM_MM_INSERT_HIGH;
-
- spin_lock(&mgr->lock);
- r = drm_mm_insert_node_in_range(&mgr->mm, &node->node, mem->num_pages,
- mem->page_alignment, 0, fpfn, lpfn,
- mode);
- spin_unlock(&mgr->lock);
-
- if (!r)
- mem->start = node->node.start;
-
- return r;
+ return mem->mm_node != NULL;
}
/**
atomic64_sub(mem->num_pages, &mgr->available);
spin_unlock(&mgr->lock);
+ if (!place->lpfn) {
+ mem->mm_node = NULL;
+ mem->start = AMDGPU_BO_INVALID_OFFSET;
+ return 0;
+ }
+
node = kzalloc(sizeof(*node), GFP_KERNEL);
if (!node) {
r = -ENOMEM;
goto err_out;
}
- node->node.start = AMDGPU_BO_INVALID_OFFSET;
- node->node.size = mem->num_pages;
node->tbo = tbo;
- mem->mm_node = node;
- if (place->fpfn || place->lpfn || place->flags & TTM_PL_FLAG_TOPDOWN) {
- r = amdgpu_gtt_mgr_alloc(man, tbo, place, mem);
- if (unlikely(r)) {
- kfree(node);
- mem->mm_node = NULL;
- goto err_out;
- }
- } else {
- mem->start = node->node.start;
- }
+ spin_lock(&mgr->lock);
+ r = drm_mm_insert_node_in_range(&mgr->mm, &node->node, mem->num_pages,
+ mem->page_alignment, 0, place->fpfn,
+ place->lpfn, DRM_MM_INSERT_BEST);
+ spin_unlock(&mgr->lock);
+
+ if (unlikely(r))
+ goto err_free;
+
+ mem->mm_node = node;
+ mem->start = node->node.start;
return 0;
+
+err_free:
+ kfree(node);
+
err_out:
atomic64_add(mem->num_pages, &mgr->available);
struct amdgpu_gtt_mgr *mgr = man->priv;
struct amdgpu_gtt_node *node = mem->mm_node;
- if (!node)
- return;
-
- spin_lock(&mgr->lock);
- if (node->node.start != AMDGPU_BO_INVALID_OFFSET)
+ if (node) {
+ spin_lock(&mgr->lock);
drm_mm_remove_node(&node->node);
- spin_unlock(&mgr->lock);
- atomic64_add(mem->num_pages, &mgr->available);
+ spin_unlock(&mgr->lock);
+ kfree(node);
+ }
- kfree(node);
- mem->mm_node = NULL;
+ atomic64_add(mem->num_pages, &mgr->available);
}
/**
}
src_offset = src->offset;
- src_mm = amdgpu_find_mm_node(src->mem, &src_offset);
- src_node_size = (src_mm->size << PAGE_SHIFT) - src_offset;
+ if (src->mem->mm_node) {
+ src_mm = amdgpu_find_mm_node(src->mem, &src_offset);
+ src_node_size = (src_mm->size << PAGE_SHIFT) - src_offset;
+ } else {
+ src_mm = NULL;
+ src_node_size = ULLONG_MAX;
+ }
dst_offset = dst->offset;
- dst_mm = amdgpu_find_mm_node(dst->mem, &dst_offset);
- dst_node_size = (dst_mm->size << PAGE_SHIFT) - dst_offset;
+ if (dst->mem->mm_node) {
+ dst_mm = amdgpu_find_mm_node(dst->mem, &dst_offset);
+ dst_node_size = (dst_mm->size << PAGE_SHIFT) - dst_offset;
+ } else {
+ dst_mm = NULL;
+ dst_node_size = ULLONG_MAX;
+ }
mutex_lock(&adev->mman.gtt_window_lock);