bo_create_internal(struct zink_screen *screen,
uint64_t size,
unsigned alignment,
- enum zink_heap heap,
+ unsigned heap_idx,
unsigned flags,
const void *pNext)
{
else
mai.pNext = pNext;
mai.allocationSize = size;
- mai.memoryTypeIndex = screen->heap_map[heap];
+ mai.memoryTypeIndex = heap_idx;
if (screen->info.mem_props.memoryTypes[mai.memoryTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) {
alignment = MAX2(alignment, screen->info.props.limits.minMemoryMapAlignment);
mai.allocationSize = align64(mai.allocationSize, screen->info.props.limits.minMemoryMapAlignment);
}
- unsigned heap_idx = screen->info.mem_props.memoryTypes[screen->heap_map[heap]].heapIndex;
- if (mai.allocationSize > screen->info.mem_props.memoryHeaps[heap_idx].size) {
- mesa_loge("zink: can't allocate %"PRIu64" bytes from heap that's only %"PRIu64" bytes!\n", mai.allocationSize, screen->info.mem_props.memoryHeaps[heap_idx].size);
+ unsigned heap = screen->info.mem_props.memoryTypes[heap_idx].heapIndex;
+ if (mai.allocationSize > screen->info.mem_props.memoryHeaps[heap].size) {
+ mesa_loge("zink: can't allocate %"PRIu64" bytes from heap that's only %"PRIu64" bytes!\n", mai.allocationSize, screen->info.mem_props.memoryHeaps[heap].size);
return NULL;
}
VkResult ret = VKSCR(AllocateMemory)(screen->dev, &mai, NULL, &bo->mem);
if (!zink_screen_handle_vkresult(screen, ret)) {
- mesa_loge("zink: couldn't allocate memory: heap=%u size=%" PRIu64, heap, size);
+ mesa_loge("zink: couldn't allocate memory: heap=%u size=%" PRIu64, heap_idx, size);
goto fail;
}
if (init_pb_cache) {
bo->u.real.use_reusable_pool = true;
- pb_cache_init_entry(&screen->pb.bo_cache, bo->cache_entry, &bo->base, heap);
+ pb_cache_init_entry(&screen->pb.bo_cache, bo->cache_entry, &bo->base, heap_idx);
} else {
#ifdef ZINK_USE_DMABUF
list_inithead(&bo->u.real.exports);
bo->base.alignment_log2 = util_logbase2(alignment);
bo->base.size = mai.allocationSize;
bo->base.vtbl = &bo_vtbl;
- bo->base.placement = screen->heap_flags[heap];
+ bo->base.placement = heap_idx;
bo->base.usage = flags;
bo->unique_id = p_atomic_inc_return(&screen->pb.next_bo_unique_id);
size = MAX2(size, ZINK_SPARSE_BUFFER_PAGE_SIZE);
buf = zink_bo_create(screen, size, ZINK_SPARSE_BUFFER_PAGE_SIZE,
- ZINK_HEAP_DEVICE_LOCAL, 0, NULL);
+ ZINK_HEAP_DEVICE_LOCAL, 0, screen->heap_map[ZINK_HEAP_DEVICE_LOCAL], NULL);
if (!buf) {
FREE(best_backing->chunks);
FREE(best_backing);
}
struct pb_buffer *
-zink_bo_create(struct zink_screen *screen, uint64_t size, unsigned alignment, enum zink_heap heap, enum zink_alloc_flag flags, const void *pNext)
+zink_bo_create(struct zink_screen *screen, uint64_t size, unsigned alignment, enum zink_heap heap, enum zink_alloc_flag flags, unsigned heap_idx, const void *pNext)
{
struct zink_bo *bo;
/* pull in sparse flag */
unsigned low_bound = 128 * 1024 * 1024; //128MB is a very small BAR
if (screen->info.driver_props.driverID == VK_DRIVER_ID_NVIDIA_PROPRIETARY)
low_bound *= 2; //nvidia has fat textures or something
- unsigned heapidx = screen->info.mem_props.memoryTypes[screen->heap_map[heap]].heapIndex;
- reclaim_all = screen->info.mem_props.memoryHeaps[heapidx].size <= low_bound;
+ reclaim_all = screen->info.mem_props.memoryHeaps[heap_idx].size <= low_bound;
}
- entry = pb_slab_alloc_reclaimed(slabs, alloc_size, heap, reclaim_all);
+ entry = pb_slab_alloc_reclaimed(slabs, alloc_size, heap_idx, reclaim_all);
if (!entry) {
/* Clean up buffer managers and try again. */
clean_up_buffer_managers(screen);
- entry = pb_slab_alloc_reclaimed(slabs, alloc_size, heap, true);
+ entry = pb_slab_alloc_reclaimed(slabs, alloc_size, heap_idx, true);
}
if (!entry)
return NULL;
if (use_reusable_pool) {
/* Get a buffer from the cache. */
bo = (struct zink_bo*)
- pb_cache_reclaim_buffer(&screen->pb.bo_cache, size, alignment, 0, heap);
+ pb_cache_reclaim_buffer(&screen->pb.bo_cache, size, alignment, 0, heap_idx);
if (bo)
return &bo->base;
}
/* Create a new one. */
- bo = bo_create_internal(screen, size, alignment, heap, flags, pNext);
+ bo = bo_create_internal(screen, size, alignment, heap_idx, flags, pNext);
if (!bo) {
/* Clean up buffer managers and try again. */
clean_up_buffer_managers(screen);
- bo = bo_create_internal(screen, size, alignment, heap, flags, pNext);
+ bo = bo_create_internal(screen, size, alignment, heap_idx, flags, pNext);
if (!bo)
return NULL;
}
}
assert(slab_size != 0);
- slab->buffer = zink_bo(zink_bo_create(screen, slab_size, slab_size, heap, 0, NULL));
+ slab->buffer = zink_bo(zink_bo_create(screen, slab_size, slab_size,
+ zink_heap_from_domain_flags(screen->info.mem_props.memoryTypes[heap].propertyFlags, 0),
+ 0, heap, NULL));
if (!slab->buffer)
goto fail;
alignment = MAX2(alignment, screen->info.props.limits.minMemoryMapAlignment);
obj->alignment = alignment;
retry:
- obj->bo = zink_bo(zink_bo_create(screen, reqs.size, alignment, heap, mai.pNext ? ZINK_ALLOC_NO_SUBALLOC : 0, mai.pNext));
+ obj->bo = zink_bo(zink_bo_create(screen, reqs.size, alignment, heap, mai.pNext ? ZINK_ALLOC_NO_SUBALLOC : 0, mai.memoryTypeIndex, mai.pNext));
if (!obj->bo) {
if (heap == ZINK_HEAP_DEVICE_LOCAL_VISIBLE) {
if (templ->flags & PIPE_RESOURCE_FLAG_MAP_COHERENT || templ->usage == PIPE_USAGE_DYNAMIC)
obj->size = zink_bo_get_size(obj->bo);
}
- obj->coherent = obj->bo->base.placement & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
+ obj->coherent = screen->info.mem_props.memoryTypes[obj->bo->base.placement].propertyFlags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
if (!(templ->flags & PIPE_RESOURCE_FLAG_SPARSE)) {
- obj->host_visible = obj->bo->base.placement & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
+ obj->host_visible = screen->info.mem_props.memoryTypes[obj->bo->base.placement].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
}
if (templ->target == PIPE_BUFFER) {
usage |= PIPE_MAP_UNSYNCHRONIZED;
} else if (!(usage & PIPE_MAP_UNSYNCHRONIZED) &&
(((usage & PIPE_MAP_READ) && !(usage & PIPE_MAP_PERSISTENT) &&
- ((res->obj->bo->base.placement & VK_STAGING_RAM) != VK_STAGING_RAM)) ||
+ ((screen->info.mem_props.memoryTypes[res->obj->bo->base.placement].propertyFlags & VK_STAGING_RAM) != VK_STAGING_RAM)) ||
!res->obj->host_visible)) {
assert(!(usage & (TC_TRANSFER_MAP_THREADED_UNSYNC | PIPE_MAP_THREAD_SAFE)));
if (!res->obj->host_visible || !(usage & PIPE_MAP_ONCE)) {