assert(!err);
/* bind memory */
- err = vkBindObjectMemory(demo->depth.image, i,
+ err = vkQueueBindObjectMemory(demo->queue, demo->depth.image, i,
demo->depth.mem[i], 0);
assert(!err);
}
assert(!err);
/* bind memory */
- err = vkBindObjectMemory(tex_obj->image, j, tex_obj->mem[j], 0);
+ err = vkQueueBindObjectMemory(demo->queue, tex_obj->image, j, tex_obj->mem[j], 0);
assert(!err);
}
free(mem_reqs);
/* setting the image layout does not reference the actual memory so no need to add a mem ref */
}
-static void demo_destroy_texture_image(struct texture_object *tex_objs)
+static void demo_destroy_texture_image(struct demo *demo, struct texture_object *tex_objs)
{
/* clean up staging resources */
for (uint32_t j = 0; j < tex_objs->num_mem; j ++) {
- vkBindObjectMemory(tex_objs->image, j, VK_NULL_HANDLE, 0);
+ vkQueueBindObjectMemory(demo->queue, tex_objs->image, j, VK_NULL_HANDLE, 0);
vkFreeMemory(tex_objs->mem[j]);
}
demo_flush_init_cmd(demo);
- demo_destroy_texture_image(&staging_texture);
+ demo_destroy_texture_image(demo, &staging_texture);
demo_remove_mem_refs(demo, staging_texture.num_mem, staging_texture.mem);
} else {
/* Can't support VK_FMT_B8G8R8A8_UNORM !? */
err = vkUnmapMemory(demo->uniform_data.mem[i]);
assert(!err);
- err = vkBindObjectMemory(demo->uniform_data.buf, i,
+ err = vkQueueBindObjectMemory(demo->queue, demo->uniform_data.buf, i,
demo->uniform_data.mem[i], 0);
assert(!err);
}
queue_count = (uint32_t)(data_size / sizeof(VkPhysicalGpuQueueProperties));
assert(queue_count >= 1);
+ // Graphics queue and MemMgr queue can be separate.
+ // TODO: Add support for separate queues, including synchronization,
+ // and appropriate tracking for QueueSubmit and QueueBindObjectMemory
for (i = 0; i < queue_count; i++) {
- if (demo->queue_props[i].queueFlags & VK_QUEUE_GRAPHICS_BIT)
+ if ((demo->queue_props[i].queueFlags & VK_QUEUE_GRAPHICS_BIT) &&
+ (demo->queue_props[i].queueFlags & VK_QUEUE_MEMMGR_BIT) )
break;
}
assert(i < queue_count);
for (i = 0; i < DEMO_TEXTURE_COUNT; i++) {
vkDestroyObject(demo->textures[i].view);
- vkBindObjectMemory(demo->textures[i].image, 0, VK_NULL_HANDLE, 0);
+ vkQueueBindObjectMemory(demo->queue, demo->textures[i].image, 0, VK_NULL_HANDLE, 0);
vkDestroyObject(demo->textures[i].image);
demo_remove_mem_refs(demo, demo->textures[i].num_mem, demo->textures[i].mem);
for (j = 0; j < demo->textures[i].num_mem; j++)
}
vkDestroyObject(demo->depth.view);
- vkBindObjectMemory(demo->depth.image, 0, VK_NULL_HANDLE, 0);
+ vkQueueBindObjectMemory(demo->queue, demo->depth.image, 0, VK_NULL_HANDLE, 0);
vkDestroyObject(demo->depth.image);
demo_remove_mem_refs(demo, demo->depth.num_mem, demo->depth.mem);
for (j = 0; j < demo->depth.num_mem; j++) {
}
vkDestroyObject(demo->uniform_data.view);
- vkBindObjectMemory(demo->uniform_data.buf, 0, VK_NULL_HANDLE, 0);
+ vkQueueBindObjectMemory(demo->queue, demo->uniform_data.buf, 0, VK_NULL_HANDLE, 0);
vkDestroyObject(demo->uniform_data.buf);
demo_remove_mem_refs(demo, demo->uniform_data.num_mem, demo->uniform_data.mem);
for (j = 0; j < demo->uniform_data.num_mem; j++)
assert(!err);
/* bind memory */
- err = vkBindObjectMemory(demo->depth.image, i,
+ err = vkQueueBindObjectMemory(demo->queue, demo->depth.image, i,
demo->depth.mem[i], 0);
assert(!err);
}
assert(!err);
/* bind memory */
- err = vkBindObjectMemory(tex_obj->image, j, tex_obj->mem[j], 0);
+ err = vkQueueBindObjectMemory(demo->queue, tex_obj->image, j, tex_obj->mem[j], 0);
assert(!err);
}
free(mem_reqs);
/* setting the image layout does not reference the actual memory so no need to add a mem ref */
}
-static void demo_destroy_texture_image(struct texture_object *tex_obj)
+static void demo_destroy_texture_image(struct demo *demo, struct texture_object *tex_obj)
{
/* clean up staging resources */
for (uint32_t j = 0; j < tex_obj->num_mem; j ++) {
- vkBindObjectMemory(tex_obj->image, j, VK_NULL_HANDLE, 0);
+ vkQueueBindObjectMemory(demo->queue, tex_obj->image, j, VK_NULL_HANDLE, 0);
vkFreeMemory(tex_obj->mem[j]);
}
demo_flush_init_cmd(demo);
- demo_destroy_texture_image(&staging_texture);
+ demo_destroy_texture_image(demo, &staging_texture);
demo_remove_mem_refs(demo, staging_texture.num_mem, staging_texture.mem);
} else {
/* Can't support VK_FMT_B8G8R8A8_UNORM !? */
err = vkUnmapMemory(demo->vertices.mem[i]);
assert(!err);
- err = vkBindObjectMemory(demo->vertices.buf, i, demo->vertices.mem[i], 0);
+ err = vkQueueBindObjectMemory(demo->queue, demo->vertices.buf, i, demo->vertices.mem[i], 0);
assert(!err);
}
for (i = 0; i < queue_count; i++) {
if (demo->queue_props[i].queueFlags & VK_QUEUE_GRAPHICS_BIT)
break;
+ if (demo->queue_props[i].queueFlags & VK_QUEUE_MEMMGR_BIT)
+ break;
}
assert(i < queue_count);
demo->graphics_queue_node_index = i;
vkDestroyObject(demo->desc_layout_chain);
vkDestroyObject(demo->desc_layout);
- vkBindObjectMemory(demo->vertices.buf, 0, VK_NULL_HANDLE, 0);
+ vkQueueBindObjectMemory(demo->queue, demo->vertices.buf, 0, VK_NULL_HANDLE, 0);
vkDestroyObject(demo->vertices.buf);
demo_remove_mem_refs(demo, demo->vertices.num_mem, demo->vertices.mem);
for (j = 0; j < demo->vertices.num_mem; j++)
for (i = 0; i < DEMO_TEXTURE_COUNT; i++) {
vkDestroyObject(demo->textures[i].view);
- vkBindObjectMemory(demo->textures[i].image, 0, VK_NULL_HANDLE, 0);
+ vkQueueBindObjectMemory(demo->queue, demo->textures[i].image, 0, VK_NULL_HANDLE, 0);
vkDestroyObject(demo->textures[i].image);
demo_remove_mem_refs(demo, demo->textures[i].num_mem, demo->textures[i].mem);
for (j = 0; j < demo->textures[i].num_mem; j++)
}
vkDestroyObject(demo->depth.view);
- vkBindObjectMemory(demo->depth.image, 0, VK_NULL_HANDLE, 0);
+ vkQueueBindObjectMemory(demo->queue, demo->depth.image, 0, VK_NULL_HANDLE, 0);
demo_remove_mem_refs(demo, demo->depth.num_mem, demo->depth.mem);
vkDestroyObject(demo->depth.image);
for (j = 0; j < demo->depth.num_mem; j++)
return base->get_info(base, infoType, pDataSize, pData);
}
-ICD_EXPORT VkResult VKAPI vkBindObjectMemory(
+ICD_EXPORT VkResult VKAPI vkQueueBindObjectMemory(
+ VkQueue queue,
VkObject object,
uint32_t allocationIdx,
VkGpuMemory mem_,
return VK_SUCCESS;
}
-ICD_EXPORT VkResult VKAPI vkBindObjectMemoryRange(
+ICD_EXPORT VkResult VKAPI vkQueueBindObjectMemoryRange(
+ VkQueue queue,
VkObject object,
uint32_t allocationIdx,
VkGpuSize rangeOffset,
return VK_SUCCESS;
}
-ICD_EXPORT VkResult VKAPI vkBindImageMemoryRange(
+ICD_EXPORT VkResult VKAPI vkQueueBindImageMemoryRange(
+ VkQueue queue,
VkImage image,
uint32_t allocationIdx,
const VkImageMemoryBindInfo* bindInfo,
PFN_vkOpenPeerImage OpenPeerImage;
PFN_vkDestroyObject DestroyObject;
PFN_vkGetObjectInfo GetObjectInfo;
- PFN_vkBindObjectMemory BindObjectMemory;
- PFN_vkBindObjectMemoryRange BindObjectMemoryRange;
- PFN_vkBindImageMemoryRange BindImageMemoryRange;
+ PFN_vkQueueBindObjectMemory QueueBindObjectMemory;
+ PFN_vkQueueBindObjectMemoryRange QueueBindObjectMemoryRange;
+ PFN_vkQueueBindImageMemoryRange QueueBindImageMemoryRange;
PFN_vkCreateFence CreateFence;
PFN_vkGetFenceStatus GetFenceStatus;
PFN_vkResetFences ResetFences;
VK_QUEUE_GRAPHICS_BIT = 0x00000001, // Queue supports graphics operations
VK_QUEUE_COMPUTE_BIT = 0x00000002, // Queue supports compute operations
VK_QUEUE_DMA_BIT = 0x00000004, // Queue supports DMA operations
+ VK_QUEUE_MEMMGR_BIT = 0x00000008, // Queue supports memory management operations
VK_QUEUE_EXTENDED_BIT = 0x40000000, // Extended queue
VK_MAX_ENUM(VkQueueFlags)
} VkQueueFlags;
{
VkGpuSize size; // Specified in bytes
VkGpuSize alignment; // Specified in bytes
- VkGpuSize granularity; // Granularity on which vkBindObjectMemoryRange can bind sub-ranges of memory specified in bytes (usually the page size)
+ VkGpuSize granularity; // Granularity on which vkQueueBindObjectMemoryRange can bind sub-ranges of memory specified in bytes (usually the page size)
VkFlags memProps; // VkMemoryPropertyFlags
} VkMemoryRequirements;
typedef VkResult (VKAPI *PFN_vkOpenPeerImage)(VkDevice device, const VkPeerImageOpenInfo* pOpenInfo, VkImage* pImage, VkGpuMemory* pMem);
typedef VkResult (VKAPI *PFN_vkDestroyObject)(VkObject object);
typedef VkResult (VKAPI *PFN_vkGetObjectInfo)(VkBaseObject object, VkObjectInfoType infoType, size_t* pDataSize, void* pData);
-typedef VkResult (VKAPI *PFN_vkBindObjectMemory)(VkObject object, uint32_t allocationIdx, VkGpuMemory mem, VkGpuSize offset);
-typedef VkResult (VKAPI *PFN_vkBindObjectMemoryRange)(VkObject object, uint32_t allocationIdx, VkGpuSize rangeOffset,VkGpuSize rangeSize, VkGpuMemory mem, VkGpuSize memOffset);
-typedef VkResult (VKAPI *PFN_vkBindImageMemoryRange)(VkImage image, uint32_t allocationIdx, const VkImageMemoryBindInfo* bindInfo, VkGpuMemory mem, VkGpuSize memOffset);
+typedef VkResult (VKAPI *PFN_vkQueueBindObjectMemory)(VkQueue queue, VkObject object, uint32_t allocationIdx, VkGpuMemory mem, VkGpuSize offset);
+typedef VkResult (VKAPI *PFN_vkQueueBindObjectMemoryRange)(VkQueue queue, VkObject object, uint32_t allocationIdx, VkGpuSize rangeOffset,VkGpuSize rangeSize, VkGpuMemory mem, VkGpuSize memOffset);
+typedef VkResult (VKAPI *PFN_vkQueueBindImageMemoryRange)(VkQueue queue, VkImage image, uint32_t allocationIdx, const VkImageMemoryBindInfo* bindInfo, VkGpuMemory mem, VkGpuSize memOffset);
typedef VkResult (VKAPI *PFN_vkCreateFence)(VkDevice device, const VkFenceCreateInfo* pCreateInfo, VkFence* pFence);
typedef VkResult (VKAPI *PFN_vkResetFences)(VkDevice device, uint32_t fenceCount, VkFence* pFences);
typedef VkResult (VKAPI *PFN_vkGetFenceStatus)(VkFence fence);
size_t* pDataSize,
void* pData);
-VkResult VKAPI vkBindObjectMemory(
+// Memory namagement API functions
+
+VkResult VKAPI vkQueueBindObjectMemory(
+ VkQueue queue,
VkObject object,
uint32_t allocationIdx,
VkGpuMemory mem,
VkGpuSize memOffset);
-VkResult VKAPI vkBindObjectMemoryRange(
+VkResult VKAPI vkQueueBindObjectMemoryRange(
+ VkQueue queue,
VkObject object,
uint32_t allocationIdx,
VkGpuSize rangeOffset,
VkGpuMemory mem,
VkGpuSize memOffset);
-VkResult VKAPI vkBindImageMemoryRange(
+VkResult VKAPI vkQueueBindImageMemoryRange(
+ VkQueue queue,
VkImage image,
uint32_t allocationIdx,
const VkImageMemoryBindInfo* bindInfo,
}
// For given MemObjInfo, report Obj & CB bindings
-static void reportMemReferences(const MT_MEM_OBJ_INFO* pMemObjInfo)
+static void reportMemReferencesAndCleanUp(MT_MEM_OBJ_INFO* pMemObjInfo)
{
- uint32_t refCount = 0; // Count found references
+ uint32_t cmdBufRefCount = pMemObjInfo->pCmdBufferBindings.size();
+ uint32_t objRefCount = pMemObjInfo->pObjBindings.size();
- for (list<VkCmdBuffer>::const_iterator it = pMemObjInfo->pCmdBufferBindings.begin(); it != pMemObjInfo->pCmdBufferBindings.end(); ++it) {
- refCount++;
+ if ((pMemObjInfo->pCmdBufferBindings.size() + pMemObjInfo->pObjBindings.size()) != 0) {
char str[1024];
- sprintf(str, "Command Buffer %p has reference to mem obj %p", (*it), pMemObjInfo->mem);
- layerCbMsg(VK_DBG_MSG_UNKNOWN, VK_VALIDATION_LEVEL_0, (*it), 0, MEMTRACK_NONE, "MEM", str);
+ sprintf(str, "Attempting to free memory object %p which still contains %d references", pMemObjInfo->mem, (cmdBufRefCount + objRefCount));
+ layerCbMsg(VK_DBG_MSG_ERROR, VK_VALIDATION_LEVEL_0, pMemObjInfo->mem, 0, MEMTRACK_INTERNAL_ERROR, "MEM", str);
}
- for (list<VkObject>::const_iterator it = pMemObjInfo->pObjBindings.begin(); it != pMemObjInfo->pObjBindings.end(); ++it) {
- char str[1024];
- sprintf(str, "VK Object %p has reference to mem obj %p", (*it), pMemObjInfo->mem);
- layerCbMsg(VK_DBG_MSG_UNKNOWN, VK_VALIDATION_LEVEL_0, (*it), 0, MEMTRACK_NONE, "MEM", str);
+
+ if (cmdBufRefCount > 0) {
+ for (list<VkCmdBuffer>::const_iterator it = pMemObjInfo->pCmdBufferBindings.begin(); it != pMemObjInfo->pCmdBufferBindings.end(); ++it) {
+ char str[1024];
+ sprintf(str, "Command Buffer %p still has a reference to mem obj %p", (*it), pMemObjInfo->mem);
+ layerCbMsg(VK_DBG_MSG_UNKNOWN, VK_VALIDATION_LEVEL_0, (*it), 0, MEMTRACK_NONE, "MEM", str);
+ }
+ // Clear the list of hanging references
+ pMemObjInfo->pCmdBufferBindings.clear();
}
- if (refCount != pMemObjInfo->refCount) {
- char str[1024];
- sprintf(str, "Refcount of %u for Mem Obj %p does't match reported refs of %u", pMemObjInfo->refCount, pMemObjInfo->mem, refCount);
- layerCbMsg(VK_DBG_MSG_ERROR, VK_VALIDATION_LEVEL_0, pMemObjInfo->mem, 0, MEMTRACK_INTERNAL_ERROR, "MEM", str);
+
+ if (objRefCount > 0) {
+ for (list<VkObject>::const_iterator it = pMemObjInfo->pObjBindings.begin(); it != pMemObjInfo->pObjBindings.end(); ++it) {
+ char str[1024];
+ sprintf(str, "VK Object %p still has a reference to mem obj %p", (*it), pMemObjInfo->mem);
+ layerCbMsg(VK_DBG_MSG_UNKNOWN, VK_VALIDATION_LEVEL_0, (*it), 0, MEMTRACK_NONE, "MEM", str);
+ }
+ // Clear the list of hanging references
+ pMemObjInfo->pObjBindings.clear();
}
}
static void deleteMemObjInfo(VkGpuMemory mem)
{
- MT_MEM_OBJ_INFO* pDelInfo = memObjMap[mem];
if (memObjMap.find(mem) != memObjMap.end()) {
MT_MEM_OBJ_INFO* pDelInfo = memObjMap[mem];
delete pDelInfo;
memObjMap.erase(mem);
}
+ else {
+ char str[1024];
+ sprintf(str, "Request to delete memory object %p not present in memory Object Map", mem);
+ layerCbMsg(VK_DBG_MSG_ERROR, VK_VALIDATION_LEVEL_0, mem, 0, MEMTRACK_INVALID_MEM_OBJ, "MEM", str);
+ }
}
// Check if fence for given CB is completed
char str[1024];
sprintf(str, "Freeing mem obj %p while it still has references", (void*)mem);
layerCbMsg(VK_DBG_MSG_ERROR, VK_VALIDATION_LEVEL_0, mem, 0, MEMTRACK_FREED_MEM_REF, "MEM", str);
- reportMemReferences(pInfo);
+ reportMemReferencesAndCleanUp(pInfo);
result = VK_FALSE;
}
// Delete mem obj info
sprintf(str, "Attempting to clear mem binding on obj %p but it has no binding.", (void*)object);
layerCbMsg(VK_DBG_MSG_WARNING, VK_VALIDATION_LEVEL_0, object, 0, MEMTRACK_MEM_OBJ_CLEAR_EMPTY_BINDINGS, "MEM", str);
} else {
+ // This obj is bound to a memory object. Remove the reference to this object in that memory object's list, decrement the memObj's refcount
+ // and set the objects memory binding pointer to NULL.
for (list<VkObject>::iterator it = pObjInfo->pMemObjInfo->pObjBindings.begin(); it != pObjInfo->pMemObjInfo->pObjBindings.end(); ++it) {
- pObjInfo->pMemObjInfo->refCount--;
- pObjInfo->pMemObjInfo = NULL;
- it = pObjInfo->pMemObjInfo->pObjBindings.erase(it);
- result = VK_TRUE;
- break;
+ if ((*it) == object) {
+ pObjInfo->pMemObjInfo->refCount--;
+ pObjInfo->pMemObjInfo->pObjBindings.erase(it);
+ pObjInfo->pMemObjInfo = NULL;
+ result = VK_TRUE;
+ break;
+ }
}
if (result == VK_FALSE) {
char str[1024];
}
else {
char str[1024];
- sprintf(str, "Destroying obj %p that is still bound to memory object %p\nYou should first clear binding by calling vkBindObjectMemory(%p, 0, VK_NULL_HANDLE, 0)", object, (void*)pDelInfo->pMemObjInfo->mem, object);
+ sprintf(str, "Destroying obj %p that is still bound to memory object %p\nYou should first clear binding by calling vkQueueBindObjectMemory(queue, %p, 0, VK_NULL_HANDLE, 0)", object, (void*)pDelInfo->pMemObjInfo->mem, object);
layerCbMsg(VK_DBG_MSG_ERROR, VK_VALIDATION_LEVEL_0, object, 0, MEMTRACK_DESTROY_OBJECT_ERROR, "MEM", str);
// From the spec : If an object has previous memory binding, it is required to unbind memory from an API object before it is destroyed.
clearObjectBinding(object);
VK_LAYER_EXPORT VkResult VKAPI vkGetObjectInfo(VkBaseObject object, VkObjectInfoType infoType, size_t* pDataSize, void* pData)
{
// TODO : What to track here?
- // Could potentially save returned mem requirements and validate values passed into BindObjectMemory for this object
+ // Could potentially save returned mem requirements and validate values passed into QueueBindObjectMemory for this object
// From spec : The only objects that are guaranteed to have no external memory requirements are devices, queues, command buffers, shaders and memory objects.
VkResult result = nextTable.GetObjectInfo(object, infoType, pDataSize, pData);
return result;
}
-VK_LAYER_EXPORT VkResult VKAPI vkBindObjectMemory(VkObject object, uint32_t allocationIdx, VkGpuMemory mem, VkGpuSize offset)
+VK_LAYER_EXPORT VkResult VKAPI vkQueueBindObjectMemory(VkQueue queue, VkObject object, uint32_t allocationIdx, VkGpuMemory mem, VkGpuSize offset)
{
- VkResult result = nextTable.BindObjectMemory(object, allocationIdx, mem, offset);
+ VkResult result = nextTable.QueueBindObjectMemory(queue, object, allocationIdx, mem, offset);
loader_platform_thread_lock_mutex(&globalLock);
// Track objects tied to memory
if (VK_FALSE == updateObjectBinding(object, mem)) {
return (void*) vkDestroyObject;
if (!strcmp(funcName, "vkGetObjectInfo"))
return (void*) vkGetObjectInfo;
- if (!strcmp(funcName, "vkBindObjectMemory"))
- return (void*) vkBindObjectMemory;
+ if (!strcmp(funcName, "vkQueueBindObjectMemory"))
+ return (void*) vkQueueBindObjectMemory;
if (!strcmp(funcName, "vkCreateFence"))
return (void*) vkCreateFence;
if (!strcmp(funcName, "vkGetFenceStatus"))
return result;
}
-VK_LAYER_EXPORT VkResult VKAPI vkBindObjectMemory(VkObject object, uint32_t allocationIdx, VkGpuMemory mem, VkGpuSize offset)
+VK_LAYER_EXPORT VkResult VKAPI vkQueueBindObjectMemory(VkQueue queue, VkObject object, uint32_t allocationIdx, VkGpuMemory mem, VkGpuSize offset)
{
- VkResult result = nextTable.BindObjectMemory(object, allocationIdx, mem, offset);
+ VkResult result = nextTable.QueueBindObjectMemory(queue, object, allocationIdx, mem, offset);
return result;
}
-VK_LAYER_EXPORT VkResult VKAPI vkBindObjectMemoryRange(VkObject object, uint32_t allocationIdx, VkGpuSize rangeOffset, VkGpuSize rangeSize, VkGpuMemory mem, VkGpuSize memOffset)
+VK_LAYER_EXPORT VkResult VKAPI vkQueueBindObjectMemoryRange(VkQueue queue, VkObject object, uint32_t allocationIdx, VkGpuSize rangeOffset, VkGpuSize rangeSize, VkGpuMemory mem, VkGpuSize memOffset)
{
- VkResult result = nextTable.BindObjectMemoryRange(object, allocationIdx, rangeOffset, rangeSize, mem, memOffset);
+ VkResult result = nextTable.QueueBindObjectMemoryRange(queue, object, allocationIdx, rangeOffset, rangeSize, mem, memOffset);
return result;
}
-VK_LAYER_EXPORT VkResult VKAPI vkBindImageMemoryRange(VkImage image, uint32_t allocationIdx, const VkImageMemoryBindInfo* bindInfo, VkGpuMemory mem, VkGpuSize memOffset)
+VK_LAYER_EXPORT VkResult VKAPI vkQueueBindImageMemoryRange(VkQueue queue, VkImage image, uint32_t allocationIdx, const VkImageMemoryBindInfo* bindInfo, VkGpuMemory mem, VkGpuSize memOffset)
{
char str[1024];
if (!bindInfo) {
- sprintf(str, "Struct ptr parameter bindInfo to function BindImageMemoryRange is NULL.");
+ sprintf(str, "Struct ptr parameter bindInfo to function QueueBindImageMemoryRange is NULL.");
layerCbMsg(VK_DBG_MSG_UNKNOWN, VK_VALIDATION_LEVEL_0, NULL, 0, 1, "PARAMCHECK", str);
}
else if (!vk_validate_vkimagememorybindinfo(bindInfo)) {
sprintf(str, "Parameter bindInfo to function BindImageMemoryRange contains an invalid value.");
layerCbMsg(VK_DBG_MSG_ERROR, VK_VALIDATION_LEVEL_0, NULL, 0, 1, "PARAMCHECK", str);
}
- VkResult result = nextTable.BindImageMemoryRange(image, allocationIdx, bindInfo, mem, memOffset);
+ VkResult result = nextTable.QueueBindImageMemoryRange(queue, image, allocationIdx, bindInfo, mem, memOffset);
return result;
}
Param("size_t*", "pDataSize"),
Param("void*", "pData")]),
- Proto("VkResult", "BindObjectMemory",
- [Param("VkObject", "object"),
+ Proto("VkResult", "QueueBindObjectMemory",
+ [Param("VkQueue", "queue"),
+ Param("VkObject", "object"),
Param("uint32_t", "allocationIdx"),
Param("VkGpuMemory", "mem"),
Param("VkGpuSize", "offset")]),
- Proto("VkResult", "BindObjectMemoryRange",
- [Param("VkObject", "object"),
+ Proto("VkResult", "QueueBindObjectMemoryRange",
+ [Param("VkQueue", "queue"),
+ Param("VkObject", "object"),
Param("uint32_t", "allocationIdx"),
Param("VkGpuSize", "rangeOffset"),
Param("VkGpuSize", "rangeSize"),
Param("VkGpuMemory", "mem"),
Param("VkGpuSize", "memOffset")]),
- Proto("VkResult", "BindImageMemoryRange",
- [Param("VkImage", "image"),
+ Proto("VkResult", "QueueBindImageMemoryRange",
+ [Param("VkQueue", "queue"),
+ Param("VkImage", "image"),
Param("uint32_t", "allocationIdx"),
Param("const VkImageMemoryBindInfo*", "bindInfo"),
Param("VkGpuMemory", "mem"),