amdgpu_fence_reference(&bo->fence[i], NULL);
if (bo->initial_domain & RADEON_DOMAIN_VRAM)
- bo->ws->allocated_vram -= align64(bo->base.size, bo->ws->gart_page_size);
+ bo->ws->allocated_vram -= align64(bo->base.size, bo->ws->info.gart_page_size);
else if (bo->initial_domain & RADEON_DOMAIN_GTT)
- bo->ws->allocated_gtt -= align64(bo->base.size, bo->ws->gart_page_size);
+ bo->ws->allocated_gtt -= align64(bo->base.size, bo->ws->info.gart_page_size);
FREE(bo);
}
bo->unique_id = __sync_fetch_and_add(&ws->next_bo_unique_id, 1);
if (initial_domain & RADEON_DOMAIN_VRAM)
- ws->allocated_vram += align64(size, ws->gart_page_size);
+ ws->allocated_vram += align64(size, ws->info.gart_page_size);
else if (initial_domain & RADEON_DOMAIN_GTT)
- ws->allocated_gtt += align64(size, ws->gart_page_size);
+ ws->allocated_gtt += align64(size, ws->info.gart_page_size);
amdgpu_add_buffer_to_global_list(bo);
* BOs. Aligning this here helps the cached bufmgr. Especially small BOs,
* like constant/uniform buffers, can benefit from better and more reuse.
*/
- size = align64(size, ws->gart_page_size);
+ size = align64(size, ws->info.gart_page_size);
/* Only set one usage bit each for domains and flags, or the cache manager
* might consider different sets of domains / flags compatible
*offset = whandle->offset;
if (bo->initial_domain & RADEON_DOMAIN_VRAM)
- ws->allocated_vram += align64(bo->base.size, ws->gart_page_size);
+ ws->allocated_vram += align64(bo->base.size, ws->info.gart_page_size);
else if (bo->initial_domain & RADEON_DOMAIN_GTT)
- ws->allocated_gtt += align64(bo->base.size, ws->gart_page_size);
+ ws->allocated_gtt += align64(bo->base.size, ws->info.gart_page_size);
amdgpu_add_buffer_to_global_list(bo);
bo->initial_domain = RADEON_DOMAIN_GTT;
bo->unique_id = __sync_fetch_and_add(&ws->next_bo_unique_id, 1);
- ws->allocated_gtt += align64(bo->base.size, ws->gart_page_size);
+ ws->allocated_gtt += align64(bo->base.size, ws->info.gart_page_size);
amdgpu_add_buffer_to_global_list(bo);