Only 8GB virtual address space is used by default now
v2: use -ENOSPC for the error case
Signed-off-by: Jammy Zhou <Jammy.Zhou@amd.com>
memset(&va, 0, sizeof(va));
memset(&va, 0, sizeof(va));
- bo->virtual_mc_base_address = amdgpu_vamgr_find_va(&dev->vamgr, alloc_buffer->alloc_size, alloc_buffer->phys_alignment);
+ bo->virtual_mc_base_address = amdgpu_vamgr_find_va(&dev->vamgr,
+ alloc_buffer->alloc_size,
+ alloc_buffer->phys_alignment);
+
+ if (bo->virtual_mc_base_address == AMDGPU_INVALID_VA_ADDRESS) {
+ amdgpu_bo_free_internal(bo);
+ return -ENOSPC;
+ }
va.in.handle = bo->handle;
va.in.operation = AMDGPU_VA_OP_MAP;
va.in.handle = bo->handle;
va.in.operation = AMDGPU_VA_OP_MAP;
bo->virtual_mc_base_address = amdgpu_vamgr_find_va(&dev->vamgr, bo->alloc_size, 1 << 20);
bo->virtual_mc_base_address = amdgpu_vamgr_find_va(&dev->vamgr, bo->alloc_size, 1 << 20);
+ if (bo->virtual_mc_base_address == AMDGPU_INVALID_VA_ADDRESS) {
+ pthread_mutex_unlock(&dev->bo_table_mutex);
+ amdgpu_bo_reference(&bo, NULL);
+ return -ENOSPC;
+ }
+
memset(&va, 0, sizeof(va));
va.in.handle = bo->handle;
va.in.operation = AMDGPU_VA_OP_MAP;
memset(&va, 0, sizeof(va));
va.in.handle = bo->handle;
va.in.operation = AMDGPU_VA_OP_MAP;
bo->handle = args.handle;
bo->virtual_mc_base_address = amdgpu_vamgr_find_va(&dev->vamgr, size, 4 * 1024);
bo->handle = args.handle;
bo->virtual_mc_base_address = amdgpu_vamgr_find_va(&dev->vamgr, size, 4 * 1024);
+ if (bo->virtual_mc_base_address == AMDGPU_INVALID_VA_ADDRESS) {
+ amdgpu_bo_free_internal(bo);
+ return -ENOSPC;
+ }
+
memset(&va, 0, sizeof(va));
va.in.handle = bo->handle;
va.in.operation = AMDGPU_VA_OP_MAP;
memset(&va, 0, sizeof(va));
va.in.handle = bo->handle;
va.in.operation = AMDGPU_VA_OP_MAP;
#define ROUND_UP(x, y) ((((x)-1) | __round_mask(x, y))+1)
#define ROUND_DOWN(x, y) ((x) & ~__round_mask(x, y))
#define ROUND_UP(x, y) ((((x)-1) | __round_mask(x, y))+1)
#define ROUND_DOWN(x, y) ((x) & ~__round_mask(x, y))
+#define AMDGPU_INVALID_VA_ADDRESS 0xffffffffffffffff
+
struct amdgpu_bo_va_hole {
struct list_head list;
uint64_t offset;
struct amdgpu_bo_va_hole {
struct list_head list;
uint64_t offset;
struct amdgpu_bo_va_mgr {
/* the start virtual address */
uint64_t va_offset;
struct amdgpu_bo_va_mgr {
/* the start virtual address */
uint64_t va_offset;
struct list_head va_holes;
pthread_mutex_t bo_va_mutex;
uint32_t va_alignment;
struct list_head va_holes;
pthread_mutex_t bo_va_mutex;
uint32_t va_alignment;
struct amdgpu_bo_va_mgr *vamgr = &dev->vamgr;
vamgr->va_offset = dev->dev_info.virtual_address_offset;
struct amdgpu_bo_va_mgr *vamgr = &dev->vamgr;
vamgr->va_offset = dev->dev_info.virtual_address_offset;
+ vamgr->va_max = dev->dev_info.virtual_address_max;
vamgr->va_alignment = dev->dev_info.virtual_address_alignment;
list_inithead(&vamgr->va_holes);
vamgr->va_alignment = dev->dev_info.virtual_address_alignment;
list_inithead(&vamgr->va_holes);
offset = mgr->va_offset;
waste = offset % alignment;
waste = waste ? alignment - waste : 0;
offset = mgr->va_offset;
waste = offset % alignment;
waste = waste ? alignment - waste : 0;
+
+ if (offset + waste + size > mgr->va_max) {
+ pthread_mutex_unlock(&mgr->bo_va_mutex);
+ return AMDGPU_INVALID_VA_ADDRESS;
+ }
+
if (waste) {
n = calloc(1, sizeof(struct amdgpu_bo_va_hole));
n->size = waste;
if (waste) {
n = calloc(1, sizeof(struct amdgpu_bo_va_hole));
n->size = waste;
uint64_t ids_flags;
/** Starting virtual address for UMDs. */
uint64_t virtual_address_offset;
uint64_t ids_flags;
/** Starting virtual address for UMDs. */
uint64_t virtual_address_offset;
+ /** The maximum virtual address */
+ uint64_t virtual_address_max;
/** Required alignment of virtual addresses. */
uint32_t virtual_address_alignment;
/** Page table entry - fragment size */
/** Required alignment of virtual addresses. */
uint32_t virtual_address_alignment;
/** Page table entry - fragment size */