VkImageLayout imageLayout;
uint32_t num_mem;
- VkGpuMemory *mem;
+ VkDeviceMemory *mem;
VkImageView view;
int32_t tex_width, tex_height;
};
bool use_staging_buffer;
VkInstance inst;
- VkPhysicalGpu gpu;
+ VkPhysicalDevice gpu;
VkDevice device;
VkQueue queue;
uint32_t graphics_queue_node_index;
- VkPhysicalGpuProperties *gpu_props;
- VkPhysicalGpuQueueProperties *queue_props;
+ VkPhysicalDeviceProperties *gpu_props;
+ VkPhysicalDeviceQueueProperties *queue_props;
VkFramebuffer framebuffer;
int width, height;
struct {
VkImage image;
- VkGpuMemory mem;
+ VkDeviceMemory mem;
VkCmdBuffer cmd;
VkColorAttachmentView view;
VkImage image;
uint32_t num_mem;
- VkGpuMemory *mem;
+ VkDeviceMemory *mem;
VkDepthStencilView view;
} depth;
struct {
VkBuffer buf;
uint32_t num_mem;
- VkGpuMemory *mem;
+ VkDeviceMemory *mem;
VkBufferView view;
VkBufferViewAttachInfo attach;
} uniform_data;
static void demo_add_mem_refs(
struct demo *demo,
- int num_refs, VkGpuMemory *mem)
+ int num_refs, VkDeviceMemory *mem)
{
vkQueueAddMemReferences(demo->queue, num_refs, mem);
}
static void demo_remove_mem_refs(
struct demo *demo,
- int num_refs, VkGpuMemory *mem)
+ int num_refs, VkDeviceMemory *mem)
{
vkQueueRemoveMemReferences(demo->queue, num_refs, mem);
}
VkCmdBufferBeginInfo cmd_buf_info = {
.sType = VK_STRUCTURE_TYPE_CMD_BUFFER_BEGIN_INFO,
.pNext = NULL,
- .flags = VK_CMD_BUFFER_OPTIMIZE_GPU_SMALL_BATCH_BIT |
+ .flags = VK_CMD_BUFFER_OPTIMIZE_SMALL_BATCH_BIT |
VK_CMD_BUFFER_OPTIMIZE_ONE_TIME_SUBMIT_BIT,
};
err = vkBeginCommandBuffer(demo->cmd, &cmd_buf_info);
VkPipeEvent set_events[] = { VK_PIPE_EVENT_TOP_OF_PIPE };
- VkPipelineBarrier pipeline_barrier;
- pipeline_barrier.sType = VK_STRUCTURE_TYPE_PIPELINE_BARRIER;
- pipeline_barrier.pNext = NULL;
- pipeline_barrier.eventCount = 1;
- pipeline_barrier.pEvents = set_events;
- pipeline_barrier.waitEvent = VK_WAIT_EVENT_TOP_OF_PIPE;
- pipeline_barrier.memBarrierCount = 1;
- pipeline_barrier.ppMemBarriers = (const void **)&pmemory_barrier;
-
- vkCmdPipelineBarrier(demo->cmd, &pipeline_barrier);
+ vkCmdPipelineBarrier(demo->cmd, VK_WAIT_EVENT_TOP_OF_PIPE, 1, set_events, 1, (const void **)&pmemory_barrier);
}
static void demo_draw_build_cmd(struct demo *demo, VkCmdBuffer cmd_buf)
VkCmdBufferBeginInfo cmd_buf_info = {
.sType = VK_STRUCTURE_TYPE_CMD_BUFFER_BEGIN_INFO,
.pNext = NULL,
- .flags = VK_CMD_BUFFER_OPTIMIZE_GPU_SMALL_BATCH_BIT |
+ .flags = VK_CMD_BUFFER_OPTIMIZE_SMALL_BATCH_BIT |
VK_CMD_BUFFER_OPTIMIZE_ONE_TIME_SUBMIT_BIT,
};
VkResult err;
rp_info.pColorLoadOps = &load_op;
rp_info.pColorStoreOps = &store_op;
rp_info.pColorLoadClearValues = &clear_color;
- rp_info.depthStencilFormat = VK_FMT_D16_UNORM;
+ rp_info.depthStencilFormat = VK_FORMAT_D16_UNORM;
rp_info.depthStencilLayout = depth_stencil.layout;
rp_info.depthLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
rp_info.depthLoadClearValue = clear_depth;
vkCmdBindDescriptorSets(cmd_buf, VK_PIPELINE_BIND_POINT_GRAPHICS,
demo->desc_layout_chain, 0, 1, &demo->desc_set, NULL);
- vkCmdBindDynamicStateObject(cmd_buf, VK_STATE_BIND_VIEWPORT, demo->viewport);
- vkCmdBindDynamicStateObject(cmd_buf, VK_STATE_BIND_RASTER, demo->raster);
- vkCmdBindDynamicStateObject(cmd_buf, VK_STATE_BIND_COLOR_BLEND,
+ vkCmdBindDynamicStateObject(cmd_buf, VK_STATE_BIND_POINT_VIEWPORT, demo->viewport);
+ vkCmdBindDynamicStateObject(cmd_buf, VK_STATE_BIND_POINT_RASTER, demo->raster);
+ vkCmdBindDynamicStateObject(cmd_buf, VK_STATE_BIND_POINT_COLOR_BLEND,
demo->color_blend);
- vkCmdBindDynamicStateObject(cmd_buf, VK_STATE_BIND_DEPTH_STENCIL,
+ vkCmdBindDynamicStateObject(cmd_buf, VK_STATE_BIND_POINT_DEPTH_STENCIL,
demo->depth_stencil);
vkCmdBeginRenderPass(cmd_buf, &rp_begin);
static void demo_prepare_depth(struct demo *demo)
{
- const VkFormat depth_format = VK_FMT_D16_UNORM;
+ const VkFormat depth_format = VK_FORMAT_D16_UNORM;
const VkImageCreateInfo image = {
.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
.pNext = NULL,
- .imageType = VK_IMAGE_2D,
+ .imageType = VK_IMAGE_TYPE_2D,
.format = depth_format,
.extent = { demo->width, demo->height, 1 },
.mipLevels = 1,
.arraySize = 1,
.samples = 1,
- .tiling = VK_OPTIMAL_TILING,
+ .tiling = VK_IMAGE_TILING_OPTIMAL,
.usage = VK_IMAGE_USAGE_DEPTH_STENCIL_BIT,
.flags = 0,
};
.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOC_INFO,
.pNext = NULL,
.allocationSize = 0,
- .memProps = VK_MEMORY_PROPERTY_GPU_ONLY,
+ .memProps = VK_MEMORY_PROPERTY_DEVICE_ONLY,
.memPriority = VK_MEMORY_PRIORITY_NORMAL,
};
VkDepthStencilViewCreateInfo view = {
&demo->depth.image);
assert(!err);
- err = vkGetObjectInfo(demo->depth.image, VK_INFO_TYPE_MEMORY_ALLOCATION_COUNT, &num_alloc_size, &num_allocations);
+ err = vkGetObjectInfo(demo->depth.image, VK_OBJECT_INFO_TYPE_MEMORY_ALLOCATION_COUNT, &num_alloc_size, &num_allocations);
assert(!err && num_alloc_size == sizeof(num_allocations));
mem_reqs = malloc(num_allocations * sizeof(VkMemoryRequirements));
- demo->depth.mem = malloc(num_allocations * sizeof(VkGpuMemory));
+ demo->depth.mem = malloc(num_allocations * sizeof(VkDeviceMemory));
demo->depth.num_mem = num_allocations;
err = vkGetObjectInfo(demo->depth.image,
- VK_INFO_TYPE_MEMORY_REQUIREMENTS,
+ VK_OBJECT_INFO_TYPE_MEMORY_REQUIREMENTS,
&mem_reqs_size, mem_reqs);
assert(!err && mem_reqs_size == num_allocations * sizeof(VkMemoryRequirements));
for (uint32_t i = 0; i < num_allocations; i ++) {
VkImageTiling tiling,
VkFlags mem_props)
{
- const VkFormat tex_format = VK_FMT_B8G8R8A8_UNORM;
+ const VkFormat tex_format = VK_FORMAT_B8G8R8A8_UNORM;
int32_t tex_width;
int32_t tex_height;
VkResult err;
const VkImageCreateInfo image_create_info = {
.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
.pNext = NULL,
- .imageType = VK_IMAGE_2D,
+ .imageType = VK_IMAGE_TYPE_2D,
.format = tex_format,
.extent = { tex_width, tex_height, 1 },
.mipLevels = 1,
assert(!err);
err = vkGetObjectInfo(tex_obj->image,
- VK_INFO_TYPE_MEMORY_ALLOCATION_COUNT,
+ VK_OBJECT_INFO_TYPE_MEMORY_ALLOCATION_COUNT,
&num_alloc_size, &num_allocations);
assert(!err && num_alloc_size == sizeof(num_allocations));
mem_reqs = malloc(num_allocations * sizeof(VkMemoryRequirements));
- tex_obj->mem = malloc(num_allocations * sizeof(VkGpuMemory));
+ tex_obj->mem = malloc(num_allocations * sizeof(VkDeviceMemory));
err = vkGetObjectInfo(tex_obj->image,
- VK_INFO_TYPE_MEMORY_REQUIREMENTS,
+ VK_OBJECT_INFO_TYPE_MEMORY_REQUIREMENTS,
&mem_reqs_size, mem_reqs);
assert(!err && mem_reqs_size == num_allocations * sizeof(VkMemoryRequirements));
- mem_alloc.memProps = VK_MEMORY_PROPERTY_CPU_VISIBLE_BIT;
+ mem_alloc.memProps = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
for (uint32_t j = 0; j < num_allocations; j ++) {
mem_alloc.allocationSize = mem_reqs[j].size;
tex_obj->num_mem = num_allocations;
- if (mem_props & VK_MEMORY_PROPERTY_CPU_VISIBLE_BIT) {
+ if (mem_props & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) {
const VkImageSubresource subres = {
.aspect = VK_IMAGE_ASPECT_COLOR,
.mipLevel = 0,
void *data;
err = vkGetImageSubresourceInfo(tex_obj->image, &subres,
- VK_INFO_TYPE_SUBRESOURCE_LAYOUT,
+ VK_SUBRESOURCE_INFO_TYPE_LAYOUT,
&layout_size, &layout);
assert(!err && layout_size == sizeof(layout));
/* Linear texture must be within a single memory object */
static void demo_prepare_textures(struct demo *demo)
{
- const VkFormat tex_format = VK_FMT_R8G8B8A8_UNORM;
+ const VkFormat tex_format = VK_FORMAT_R8G8B8A8_UNORM;
VkFormatProperties props;
size_t size = sizeof(props);
VkResult err;
uint32_t i;
err = vkGetFormatInfo(demo->device, tex_format,
- VK_INFO_TYPE_FORMAT_PROPERTIES,
+ VK_FORMAT_INFO_TYPE_PROPERTIES,
&size, &props);
assert(!err);
for (i = 0; i < DEMO_TEXTURE_COUNT; i++) {
- if (props.linearTilingFeatures & VK_FORMAT_SAMPLED_IMAGE_BIT && !demo->use_staging_buffer) {
+ if (props.linearTilingFeatures & VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT && !demo->use_staging_buffer) {
/* Device can texture using linear textures */
demo_prepare_texture_image(demo, tex_files[i], &demo->textures[i],
- VK_LINEAR_TILING, VK_MEMORY_PROPERTY_CPU_VISIBLE_BIT);
- } else if (props.optimalTilingFeatures & VK_FORMAT_SAMPLED_IMAGE_BIT) {
+ VK_IMAGE_TILING_LINEAR, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT);
+ } else if (props.optimalTilingFeatures & VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT) {
/* Must use staging buffer to copy linear texture to optimized */
struct texture_object staging_texture;
memset(&staging_texture, 0, sizeof(staging_texture));
demo_prepare_texture_image(demo, tex_files[i], &staging_texture,
- VK_LINEAR_TILING, VK_MEMORY_PROPERTY_CPU_VISIBLE_BIT);
+ VK_IMAGE_TILING_LINEAR, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT);
demo_prepare_texture_image(demo, tex_files[i], &demo->textures[i],
- VK_OPTIMAL_TILING, VK_MEMORY_PROPERTY_GPU_ONLY);
+ VK_IMAGE_TILING_OPTIMAL, VK_MEMORY_PROPERTY_DEVICE_ONLY);
demo_set_image_layout(demo, staging_texture.image,
staging_texture.imageLayout,
demo_destroy_texture_image(demo, &staging_texture);
demo_remove_mem_refs(demo, staging_texture.num_mem, staging_texture.mem);
} else {
- /* Can't support VK_FMT_B8G8R8A8_UNORM !? */
+ /* Can't support VK_FORMAT_B8G8R8A8_UNORM !? */
assert(!"No support for tB8G8R8A8_UNORM as texture image format");
}
.pNext = NULL,
.magFilter = VK_TEX_FILTER_NEAREST,
.minFilter = VK_TEX_FILTER_NEAREST,
- .mipMode = VK_TEX_MIPMAP_BASE,
+ .mipMode = VK_TEX_MIPMAP_MODE_BASE,
.addressU = VK_TEX_ADDRESS_CLAMP,
.addressV = VK_TEX_ADDRESS_CLAMP,
.addressW = VK_TEX_ADDRESS_CLAMP,
.mipLodBias = 0.0f,
.maxAnisotropy = 1,
- .compareFunc = VK_COMPARE_NEVER,
+ .compareOp = VK_COMPARE_OP_NEVER,
.minLod = 0.0f,
.maxLod = 0.0f,
- .borderColorType = VK_BORDER_COLOR_OPAQUE_WHITE,
+ .borderColor = VK_BORDER_COLOR_OPAQUE_WHITE,
};
VkImageViewCreateInfo view = {
.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
.pNext = NULL,
.image = VK_NULL_HANDLE,
- .viewType = VK_IMAGE_VIEW_2D,
+ .viewType = VK_IMAGE_VIEW_TYPE_2D,
.format = tex_format,
.channels = { VK_CHANNEL_SWIZZLE_R,
VK_CHANNEL_SWIZZLE_G,
.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOC_INFO,
.pNext = NULL,
.allocationSize = 0,
- .memProps = VK_MEMORY_PROPERTY_CPU_VISIBLE_BIT,
+ .memProps = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT,
.memPriority = VK_MEMORY_PRIORITY_NORMAL,
};
VkMemoryRequirements *mem_reqs;
assert(!err);
err = vkGetObjectInfo(demo->uniform_data.buf,
- VK_INFO_TYPE_MEMORY_ALLOCATION_COUNT,
+ VK_OBJECT_INFO_TYPE_MEMORY_ALLOCATION_COUNT,
&num_alloc_size, &num_allocations);
assert(!err && num_alloc_size == sizeof(num_allocations));
mem_reqs = malloc(num_allocations * sizeof(VkMemoryRequirements));
- demo->uniform_data.mem = malloc(num_allocations * sizeof(VkGpuMemory));
+ demo->uniform_data.mem = malloc(num_allocations * sizeof(VkDeviceMemory));
demo->uniform_data.num_mem = num_allocations;
err = vkGetObjectInfo(demo->uniform_data.buf,
- VK_INFO_TYPE_MEMORY_REQUIREMENTS,
+ VK_OBJECT_INFO_TYPE_MEMORY_REQUIREMENTS,
&mem_reqs_size, mem_reqs);
assert(!err && mem_reqs_size == num_allocations * sizeof(*mem_reqs));
for (uint32_t i = 0; i < num_allocations; i ++) {
memset(&view_info, 0, sizeof(view_info));
view_info.sType = VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO;
view_info.buffer = demo->uniform_data.buf;
- view_info.viewType = VK_BUFFER_VIEW_RAW;
+ view_info.viewType = VK_BUFFER_VIEW_TYPE_RAW;
view_info.offset = 0;
view_info.range = sizeof(data);
[0] = {
.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,
.count = 1,
- .stageFlags = VK_SHADER_STAGE_FLAGS_VERTEX_BIT,
+ .stageFlags = VK_SHADER_STAGE_VERTEX_BIT,
.pImmutableSamplers = NULL,
},
[1] = {
.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
.count = DEMO_TEXTURE_COUNT,
- .stageFlags = VK_SHADER_STAGE_FLAGS_FRAGMENT_BIT,
+ .stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT,
.pImmutableSamplers = NULL,
},
};
}
static VkShader demo_prepare_shader(struct demo *demo,
- VkPipelineShaderStage stage,
+ VkShaderStage stage,
const void *code,
size_t size)
{
createInfo.pCode = malloc(createInfo.codeSize);
createInfo.flags = 0;
- /* try version 0 first: VkPipelineShaderStage followed by GLSL */
+ /* try version 0 first: VkShaderStage followed by GLSL */
((uint32_t *) createInfo.pCode)[0] = ICD_SPV_MAGIC;
((uint32_t *) createInfo.pCode)[1] = 0;
((uint32_t *) createInfo.pCode)[2] = stage;
memset(&ia, 0, sizeof(ia));
ia.sType = VK_STRUCTURE_TYPE_PIPELINE_IA_STATE_CREATE_INFO;
- ia.topology = VK_TOPOLOGY_TRIANGLE_LIST;
+ ia.topology = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST;
memset(&rs, 0, sizeof(rs));
rs.sType = VK_STRUCTURE_TYPE_PIPELINE_RS_STATE_CREATE_INFO;
- rs.fillMode = VK_FILL_SOLID;
- rs.cullMode = VK_CULL_BACK;
+ rs.fillMode = VK_FILL_MODE_SOLID;
+ rs.cullMode = VK_CULL_MODE_BACK;
rs.frontFace = VK_FRONT_FACE_CCW;
memset(&cb, 0, sizeof(cb));
memset(&vp, 0, sizeof(vp));
vp.sType = VK_STRUCTURE_TYPE_PIPELINE_VP_STATE_CREATE_INFO;
- vp.numViewports = 1;
+ vp.viewportCount = 1;
vp.clipOrigin = VK_COORDINATE_ORIGIN_LOWER_LEFT;
memset(&ds, 0, sizeof(ds));
ds.format = demo->depth.format;
ds.depthTestEnable = VK_TRUE;
ds.depthWriteEnable = VK_TRUE;
- ds.depthFunc = VK_COMPARE_LESS_EQUAL;
+ ds.depthCompareOp = VK_COMPARE_OP_LESS_EQUAL;
ds.depthBoundsEnable = VK_FALSE;
ds.back.stencilFailOp = VK_STENCIL_OP_KEEP;
ds.back.stencilPassOp = VK_STENCIL_OP_KEEP;
- ds.back.stencilFunc = VK_COMPARE_ALWAYS;
+ ds.back.stencilCompareOp = VK_COMPARE_OP_ALWAYS;
ds.stencilTestEnable = VK_FALSE;
ds.front = ds.back;
err = vkCreateDevice(demo->gpu, &device, &demo->device);
assert(!err);
- err = vkGetGpuInfo(demo->gpu, VK_INFO_TYPE_PHYSICAL_GPU_PROPERTIES,
+ err = vkGetPhysicalDeviceInfo(demo->gpu, VK_PHYSICAL_DEVICE_INFO_TYPE_PROPERTIES,
&data_size, NULL);
assert(!err);
- demo->gpu_props = (VkPhysicalGpuProperties *) malloc(data_size);
- err = vkGetGpuInfo(demo->gpu, VK_INFO_TYPE_PHYSICAL_GPU_PROPERTIES,
+ demo->gpu_props = (VkPhysicalDeviceProperties *) malloc(data_size);
+ err = vkGetPhysicalDeviceInfo(demo->gpu, VK_PHYSICAL_DEVICE_INFO_TYPE_PROPERTIES,
&data_size, demo->gpu_props);
assert(!err);
- err = vkGetGpuInfo(demo->gpu, VK_INFO_TYPE_PHYSICAL_GPU_QUEUE_PROPERTIES,
+ err = vkGetPhysicalDeviceInfo(demo->gpu, VK_PHYSICAL_DEVICE_INFO_TYPE_QUEUE_PROPERTIES,
&data_size, NULL);
assert(!err);
- demo->queue_props = (VkPhysicalGpuQueueProperties *) malloc(data_size);
- err = vkGetGpuInfo(demo->gpu, VK_INFO_TYPE_PHYSICAL_GPU_QUEUE_PROPERTIES,
+ demo->queue_props = (VkPhysicalDeviceQueueProperties *) malloc(data_size);
+ err = vkGetPhysicalDeviceInfo(demo->gpu, VK_PHYSICAL_DEVICE_INFO_TYPE_QUEUE_PROPERTIES,
&data_size, demo->queue_props);
assert(!err);
- queue_count = (uint32_t)(data_size / sizeof(VkPhysicalGpuQueueProperties));
+ queue_count = (uint32_t)(data_size / sizeof(VkPhysicalDeviceQueueProperties));
assert(queue_count >= 1);
// Graphics queue and MemMgr queue can be separate.
demo->width = 500;
demo->height = 500;
- demo->format = VK_FMT_B8G8R8A8_UNORM;
+ demo->format = VK_FORMAT_B8G8R8A8_UNORM;
demo->spin_angle = 0.01f;
demo->spin_increment = 0.01f;
VkImageLayout imageLayout;
uint32_t num_mem;
- VkGpuMemory *mem;
+ VkDeviceMemory *mem;
VkImageView view;
int32_t tex_width, tex_height;
};
xcb_screen_t *screen;
VkInstance inst;
- VkPhysicalGpu gpu;
+ VkPhysicalDevice gpu;
VkDevice device;
VkQueue queue;
- VkPhysicalGpuProperties *gpu_props;
- VkPhysicalGpuQueueProperties *queue_props;
+ VkPhysicalDeviceProperties *gpu_props;
+ VkPhysicalDeviceQueueProperties *queue_props;
uint32_t graphics_queue_node_index;
int width, height;
struct {
VkImage image;
uint32_t num_mem;
- VkGpuMemory mem;
+ VkDeviceMemory mem;
VkColorAttachmentView view;
VkFence fence;
VkImage image;
uint32_t num_mem;
- VkGpuMemory *mem;
+ VkDeviceMemory *mem;
VkDepthStencilView view;
} depth;
struct {
VkBuffer buf;
uint32_t num_mem;
- VkGpuMemory *mem;
+ VkDeviceMemory *mem;
VkPipelineVertexInputCreateInfo vi;
VkVertexInputBindingDescription vi_bindings[1];
static void demo_add_mem_refs(
struct demo *demo,
- int num_refs, VkGpuMemory *mem)
+ int num_refs, VkDeviceMemory *mem)
{
vkQueueAddMemReferences(demo->queue, num_refs, mem);
}
static void demo_remove_mem_refs(
struct demo *demo,
- int num_refs, VkGpuMemory *mem)
+ int num_refs, VkDeviceMemory *mem)
{
vkQueueRemoveMemReferences(demo->queue, num_refs, mem);
}
VkCmdBufferBeginInfo cmd_buf_info = {
.sType = VK_STRUCTURE_TYPE_CMD_BUFFER_BEGIN_INFO,
.pNext = NULL,
- .flags = VK_CMD_BUFFER_OPTIMIZE_GPU_SMALL_BATCH_BIT |
+ .flags = VK_CMD_BUFFER_OPTIMIZE_SMALL_BATCH_BIT |
VK_CMD_BUFFER_OPTIMIZE_ONE_TIME_SUBMIT_BIT,
};
err = vkBeginCommandBuffer(demo->cmd, &cmd_buf_info);
VkPipeEvent set_events[] = { VK_PIPE_EVENT_TOP_OF_PIPE };
- VkPipelineBarrier pipeline_barrier;
- pipeline_barrier.sType = VK_STRUCTURE_TYPE_PIPELINE_BARRIER;
- pipeline_barrier.pNext = NULL;
- pipeline_barrier.eventCount = 1;
- pipeline_barrier.pEvents = set_events;
- pipeline_barrier.waitEvent = VK_WAIT_EVENT_TOP_OF_PIPE;
- pipeline_barrier.memBarrierCount = 1;
- pipeline_barrier.ppMemBarriers = (const void **)&pmemory_barrier;
-
- vkCmdPipelineBarrier(demo->cmd, &pipeline_barrier);
+ vkCmdPipelineBarrier(demo->cmd, VK_WAIT_EVENT_TOP_OF_PIPE, 1, set_events, 1, (const void **)&pmemory_barrier);
}
static void demo_draw_build_cmd(struct demo *demo)
VkCmdBufferBeginInfo cmd_buf_info = {
.sType = VK_STRUCTURE_TYPE_CMD_BUFFER_BEGIN_INFO,
.pNext = NULL,
- .flags = VK_CMD_BUFFER_OPTIMIZE_GPU_SMALL_BATCH_BIT |
+ .flags = VK_CMD_BUFFER_OPTIMIZE_SMALL_BATCH_BIT |
VK_CMD_BUFFER_OPTIMIZE_ONE_TIME_SUBMIT_BIT,
};
VkResult err;
rp_info.pColorLoadOps = &load_op;
rp_info.pColorStoreOps = &store_op;
rp_info.pColorLoadClearValues = &clear_color;
- rp_info.depthStencilFormat = VK_FMT_D16_UNORM;
+ rp_info.depthStencilFormat = VK_FORMAT_D16_UNORM;
rp_info.depthStencilLayout = depth_stencil.layout;
rp_info.depthLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
rp_info.depthLoadClearValue = clear_depth;
vkCmdBindDescriptorSets(demo->cmd, VK_PIPELINE_BIND_POINT_GRAPHICS,
demo->desc_layout_chain, 0, 1, & demo->desc_set, NULL);
- vkCmdBindDynamicStateObject(demo->cmd, VK_STATE_BIND_VIEWPORT, demo->viewport);
- vkCmdBindDynamicStateObject(demo->cmd, VK_STATE_BIND_RASTER, demo->raster);
- vkCmdBindDynamicStateObject(demo->cmd, VK_STATE_BIND_COLOR_BLEND,
+ vkCmdBindDynamicStateObject(demo->cmd, VK_STATE_BIND_POINT_VIEWPORT, demo->viewport);
+ vkCmdBindDynamicStateObject(demo->cmd, VK_STATE_BIND_POINT_RASTER, demo->raster);
+ vkCmdBindDynamicStateObject(demo->cmd, VK_STATE_BIND_POINT_COLOR_BLEND,
demo->color_blend);
- vkCmdBindDynamicStateObject(demo->cmd, VK_STATE_BIND_DEPTH_STENCIL,
+ vkCmdBindDynamicStateObject(demo->cmd, VK_STATE_BIND_POINT_DEPTH_STENCIL,
demo->depth_stencil);
- VkGpuSize offsets[1] = {0};
+ VkDeviceSize offsets[1] = {0};
vkCmdBindVertexBuffers(demo->cmd, VERTEX_BUFFER_BIND_ID, 1, &demo->vertices.buf, offsets);
vkCmdBeginRenderPass(demo->cmd, &rp_begin);
static void demo_prepare_depth(struct demo *demo)
{
- const VkFormat depth_format = VK_FMT_D16_UNORM;
+ const VkFormat depth_format = VK_FORMAT_D16_UNORM;
const VkImageCreateInfo image = {
.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
.pNext = NULL,
- .imageType = VK_IMAGE_2D,
+ .imageType = VK_IMAGE_TYPE_2D,
.format = depth_format,
.extent = { demo->width, demo->height, 1 },
.mipLevels = 1,
.arraySize = 1,
.samples = 1,
- .tiling = VK_OPTIMAL_TILING,
+ .tiling = VK_IMAGE_TILING_OPTIMAL,
.usage = VK_IMAGE_USAGE_DEPTH_STENCIL_BIT,
.flags = 0,
};
.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOC_INFO,
.pNext = NULL,
.allocationSize = 0,
- .memProps = VK_MEMORY_PROPERTY_GPU_ONLY,
+ .memProps = VK_MEMORY_PROPERTY_DEVICE_ONLY,
.memPriority = VK_MEMORY_PRIORITY_NORMAL,
};
VkDepthStencilViewCreateInfo view = {
&demo->depth.image);
assert(!err);
- err = vkGetObjectInfo(demo->depth.image, VK_INFO_TYPE_MEMORY_ALLOCATION_COUNT, &num_alloc_size, &num_allocations);
+ err = vkGetObjectInfo(demo->depth.image, VK_OBJECT_INFO_TYPE_MEMORY_ALLOCATION_COUNT, &num_alloc_size, &num_allocations);
assert(!err && num_alloc_size == sizeof(num_allocations));
mem_reqs = malloc(num_allocations * sizeof(VkMemoryRequirements));
- demo->depth.mem = malloc(num_allocations * sizeof(VkGpuMemory));
+ demo->depth.mem = malloc(num_allocations * sizeof(VkDeviceMemory));
demo->depth.num_mem = num_allocations;
err = vkGetObjectInfo(demo->depth.image,
- VK_INFO_TYPE_MEMORY_REQUIREMENTS,
+ VK_OBJECT_INFO_TYPE_MEMORY_REQUIREMENTS,
&mem_reqs_size, mem_reqs);
assert(!err && mem_reqs_size == num_allocations * sizeof(VkMemoryRequirements));
for (uint32_t i = 0; i < num_allocations; i ++) {
VkImageTiling tiling,
VkFlags mem_props)
{
- const VkFormat tex_format = VK_FMT_B8G8R8A8_UNORM;
+ const VkFormat tex_format = VK_FORMAT_B8G8R8A8_UNORM;
const int32_t tex_width = 2;
const int32_t tex_height = 2;
VkResult err;
const VkImageCreateInfo image_create_info = {
.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
.pNext = NULL,
- .imageType = VK_IMAGE_2D,
+ .imageType = VK_IMAGE_TYPE_2D,
.format = tex_format,
.extent = { tex_width, tex_height, 1 },
.mipLevels = 1,
assert(!err);
err = vkGetObjectInfo(tex_obj->image,
- VK_INFO_TYPE_MEMORY_ALLOCATION_COUNT,
+ VK_OBJECT_INFO_TYPE_MEMORY_ALLOCATION_COUNT,
&num_alloc_size, &num_allocations);
assert(!err && num_alloc_size == sizeof(num_allocations));
mem_reqs = malloc(num_allocations * sizeof(VkMemoryRequirements));
- tex_obj->mem = malloc(num_allocations * sizeof(VkGpuMemory));
+ tex_obj->mem = malloc(num_allocations * sizeof(VkDeviceMemory));
err = vkGetObjectInfo(tex_obj->image,
- VK_INFO_TYPE_MEMORY_REQUIREMENTS,
+ VK_OBJECT_INFO_TYPE_MEMORY_REQUIREMENTS,
&mem_reqs_size, mem_reqs);
assert(!err && mem_reqs_size == num_allocations * sizeof(VkMemoryRequirements));
- mem_alloc.memProps = VK_MEMORY_PROPERTY_CPU_VISIBLE_BIT;
+ mem_alloc.memProps = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
for (uint32_t j = 0; j < num_allocations; j ++) {
mem_alloc.allocationSize = mem_reqs[j].size;
tex_obj->num_mem = num_allocations;
- if (mem_props & VK_MEMORY_PROPERTY_CPU_VISIBLE_BIT) {
+ if (mem_props & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) {
const VkImageSubresource subres = {
.aspect = VK_IMAGE_ASPECT_COLOR,
.mipLevel = 0,
int32_t x, y;
err = vkGetImageSubresourceInfo(tex_obj->image, &subres,
- VK_INFO_TYPE_SUBRESOURCE_LAYOUT,
+ VK_SUBRESOURCE_INFO_TYPE_LAYOUT,
&layout_size, &layout);
assert(!err && layout_size == sizeof(layout));
/* Linear texture must be within a single memory object */
static void demo_prepare_textures(struct demo *demo)
{
- const VkFormat tex_format = VK_FMT_B8G8R8A8_UNORM;
+ const VkFormat tex_format = VK_FORMAT_B8G8R8A8_UNORM;
VkFormatProperties props;
size_t size = sizeof(props);
const uint32_t tex_colors[DEMO_TEXTURE_COUNT][2] = {
uint32_t i;
err = vkGetFormatInfo(demo->device, tex_format,
- VK_INFO_TYPE_FORMAT_PROPERTIES,
+ VK_FORMAT_INFO_TYPE_PROPERTIES,
&size, &props);
assert(!err);
for (i = 0; i < DEMO_TEXTURE_COUNT; i++) {
- if ((props.linearTilingFeatures & VK_FORMAT_SAMPLED_IMAGE_BIT) && !demo->use_staging_buffer) {
+ if ((props.linearTilingFeatures & VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT) && !demo->use_staging_buffer) {
/* Device can texture using linear textures */
demo_prepare_texture_image(demo, tex_colors[i], &demo->textures[i],
- VK_LINEAR_TILING, VK_MEMORY_PROPERTY_CPU_VISIBLE_BIT);
- } else if (props.optimalTilingFeatures & VK_FORMAT_SAMPLED_IMAGE_BIT){
+ VK_IMAGE_TILING_LINEAR, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT);
+ } else if (props.optimalTilingFeatures & VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT){
/* Must use staging buffer to copy linear texture to optimized */
struct texture_object staging_texture;
memset(&staging_texture, 0, sizeof(staging_texture));
demo_prepare_texture_image(demo, tex_colors[i], &staging_texture,
- VK_LINEAR_TILING, VK_MEMORY_PROPERTY_CPU_VISIBLE_BIT);
+ VK_IMAGE_TILING_LINEAR, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT);
demo_prepare_texture_image(demo, tex_colors[i], &demo->textures[i],
- VK_OPTIMAL_TILING, VK_MEMORY_PROPERTY_GPU_ONLY);
+ VK_IMAGE_TILING_OPTIMAL, VK_MEMORY_PROPERTY_DEVICE_ONLY);
demo_set_image_layout(demo, staging_texture.image,
staging_texture.imageLayout,
demo_destroy_texture_image(demo, &staging_texture);
demo_remove_mem_refs(demo, staging_texture.num_mem, staging_texture.mem);
} else {
- /* Can't support VK_FMT_B8G8R8A8_UNORM !? */
+ /* Can't support VK_FORMAT_B8G8R8A8_UNORM !? */
assert(!"No support for B8G8R8A8_UNORM as texture image format");
}
.pNext = NULL,
.magFilter = VK_TEX_FILTER_NEAREST,
.minFilter = VK_TEX_FILTER_NEAREST,
- .mipMode = VK_TEX_MIPMAP_BASE,
+ .mipMode = VK_TEX_MIPMAP_MODE_BASE,
.addressU = VK_TEX_ADDRESS_WRAP,
.addressV = VK_TEX_ADDRESS_WRAP,
.addressW = VK_TEX_ADDRESS_WRAP,
.mipLodBias = 0.0f,
.maxAnisotropy = 1,
- .compareFunc = VK_COMPARE_NEVER,
+ .compareOp = VK_COMPARE_OP_NEVER,
.minLod = 0.0f,
.maxLod = 0.0f,
- .borderColorType = VK_BORDER_COLOR_OPAQUE_WHITE,
+ .borderColor = VK_BORDER_COLOR_OPAQUE_WHITE,
};
VkImageViewCreateInfo view = {
.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
.pNext = NULL,
.image = VK_NULL_HANDLE,
- .viewType = VK_IMAGE_VIEW_2D,
+ .viewType = VK_IMAGE_VIEW_TYPE_2D,
.format = tex_format,
.channels = { VK_CHANNEL_SWIZZLE_R,
VK_CHANNEL_SWIZZLE_G,
.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOC_INFO,
.pNext = NULL,
.allocationSize = 0,
- .memProps = VK_MEMORY_PROPERTY_CPU_VISIBLE_BIT,
+ .memProps = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT,
.memPriority = VK_MEMORY_PRIORITY_NORMAL,
};
VkMemoryRequirements *mem_reqs;
assert(!err);
err = vkGetObjectInfo(demo->vertices.buf,
- VK_INFO_TYPE_MEMORY_ALLOCATION_COUNT,
+ VK_OBJECT_INFO_TYPE_MEMORY_ALLOCATION_COUNT,
&num_alloc_size, &num_allocations);
assert(!err && num_alloc_size == sizeof(num_allocations));
mem_reqs = malloc(num_allocations * sizeof(VkMemoryRequirements));
- demo->vertices.mem = malloc(num_allocations * sizeof(VkGpuMemory));
+ demo->vertices.mem = malloc(num_allocations * sizeof(VkDeviceMemory));
demo->vertices.num_mem = num_allocations;
err = vkGetObjectInfo(demo->vertices.buf,
- VK_INFO_TYPE_MEMORY_REQUIREMENTS,
+ VK_OBJECT_INFO_TYPE_MEMORY_REQUIREMENTS,
&mem_reqs_size, mem_reqs);
assert(!err && mem_reqs_size == sizeof(*mem_reqs));
for (uint32_t i = 0; i < num_allocations; i ++) {
demo->vertices.vi_attrs[0].binding = VERTEX_BUFFER_BIND_ID;
demo->vertices.vi_attrs[0].location = 0;
- demo->vertices.vi_attrs[0].format = VK_FMT_R32G32B32_SFLOAT;
+ demo->vertices.vi_attrs[0].format = VK_FORMAT_R32G32B32_SFLOAT;
demo->vertices.vi_attrs[0].offsetInBytes = 0;
demo->vertices.vi_attrs[1].binding = VERTEX_BUFFER_BIND_ID;
demo->vertices.vi_attrs[1].location = 1;
- demo->vertices.vi_attrs[1].format = VK_FMT_R32G32_SFLOAT;
+ demo->vertices.vi_attrs[1].format = VK_FORMAT_R32G32_SFLOAT;
demo->vertices.vi_attrs[1].offsetInBytes = sizeof(float) * 3;
}
const VkDescriptorSetLayoutBinding layout_binding = {
.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
.count = DEMO_TEXTURE_COUNT,
- .stageFlags = VK_SHADER_STAGE_FLAGS_FRAGMENT_BIT,
+ .stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT,
.pImmutableSamplers = NULL,
};
const VkDescriptorSetLayoutCreateInfo descriptor_layout = {
}
static VkShader demo_prepare_shader(struct demo *demo,
- VkPipelineShaderStage stage,
+ VkShaderStage stage,
const void *code,
size_t size)
{
createInfo.pCode = malloc(createInfo.codeSize);
createInfo.flags = 0;
- /* try version 0 first: VkPipelineShaderStage followed by GLSL */
+ /* try version 0 first: VkShaderStage followed by GLSL */
((uint32_t *) createInfo.pCode)[0] = ICD_SPV_MAGIC;
((uint32_t *) createInfo.pCode)[1] = 0;
((uint32_t *) createInfo.pCode)[2] = stage;
memset(&ia, 0, sizeof(ia));
ia.sType = VK_STRUCTURE_TYPE_PIPELINE_IA_STATE_CREATE_INFO;
- ia.topology = VK_TOPOLOGY_TRIANGLE_LIST;
+ ia.topology = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST;
memset(&rs, 0, sizeof(rs));
rs.sType = VK_STRUCTURE_TYPE_PIPELINE_RS_STATE_CREATE_INFO;
- rs.fillMode = VK_FILL_SOLID;
- rs.cullMode = VK_CULL_NONE;
+ rs.fillMode = VK_FILL_MODE_SOLID;
+ rs.cullMode = VK_CULL_MODE_NONE;
rs.frontFace = VK_FRONT_FACE_CCW;
memset(&cb, 0, sizeof(cb));
memset(&vp, 0, sizeof(vp));
vp.sType = VK_STRUCTURE_TYPE_PIPELINE_VP_STATE_CREATE_INFO;
- vp.numViewports = 1;
+ vp.viewportCount = 1;
vp.clipOrigin = VK_COORDINATE_ORIGIN_UPPER_LEFT;
memset(&ds, 0, sizeof(ds));
ds.format = demo->depth.format;
ds.depthTestEnable = VK_TRUE;
ds.depthWriteEnable = VK_TRUE;
- ds.depthFunc = VK_COMPARE_LESS_EQUAL;
+ ds.depthCompareOp = VK_COMPARE_OP_LESS_EQUAL;
ds.depthBoundsEnable = VK_FALSE;
ds.back.stencilFailOp = VK_STENCIL_OP_KEEP;
ds.back.stencilPassOp = VK_STENCIL_OP_KEEP;
- ds.back.stencilFunc = VK_COMPARE_ALWAYS;
+ ds.back.stencilCompareOp = VK_COMPARE_OP_ALWAYS;
ds.stencilTestEnable = VK_FALSE;
ds.front = ds.back;
err = vkCreateDevice(demo->gpu, &device, &demo->device);
assert(!err);
- err = vkGetGpuInfo(demo->gpu, VK_INFO_TYPE_PHYSICAL_GPU_PROPERTIES,
+ err = vkGetPhysicalDeviceInfo(demo->gpu, VK_PHYSICAL_DEVICE_INFO_TYPE_PROPERTIES,
&data_size, NULL);
assert(!err);
- demo->gpu_props = (VkPhysicalGpuProperties *) malloc(data_size);
- err = vkGetGpuInfo(demo->gpu, VK_INFO_TYPE_PHYSICAL_GPU_PROPERTIES,
+ demo->gpu_props = (VkPhysicalDeviceProperties *) malloc(data_size);
+ err = vkGetPhysicalDeviceInfo(demo->gpu, VK_PHYSICAL_DEVICE_INFO_TYPE_PROPERTIES,
&data_size, demo->gpu_props);
assert(!err);
- err = vkGetGpuInfo(demo->gpu, VK_INFO_TYPE_PHYSICAL_GPU_QUEUE_PROPERTIES,
+ err = vkGetPhysicalDeviceInfo(demo->gpu, VK_PHYSICAL_DEVICE_INFO_TYPE_QUEUE_PROPERTIES,
&data_size, NULL);
assert(!err);
- demo->queue_props = (VkPhysicalGpuQueueProperties *) malloc(data_size);
- err = vkGetGpuInfo(demo->gpu, VK_INFO_TYPE_PHYSICAL_GPU_QUEUE_PROPERTIES,
+ demo->queue_props = (VkPhysicalDeviceQueueProperties *) malloc(data_size);
+ err = vkGetPhysicalDeviceInfo(demo->gpu, VK_PHYSICAL_DEVICE_INFO_TYPE_QUEUE_PROPERTIES,
&data_size, demo->queue_props);
assert(!err);
- queue_count = (uint32_t) (data_size / sizeof(VkPhysicalGpuQueueProperties));
+ queue_count = (uint32_t) (data_size / sizeof(VkPhysicalDeviceQueueProperties));
assert(queue_count >= 1);
for (i = 0; i < queue_count; i++) {
demo->width = 300;
demo->height = 300;
- demo->format = VK_FMT_B8G8R8A8_UNORM;
+ demo->format = VK_FORMAT_B8G8R8A8_UNORM;
}
static void demo_cleanup(struct demo *demo)
VkDevice obj;
- VkFormatProperties format_props[VK_NUM_FMT];
+ VkFormatProperties format_props[VK_NUM_FORMAT];
};
struct app_gpu {
uint32_t id;
- VkPhysicalGpu obj;
+ VkPhysicalDevice obj;
- VkPhysicalGpuProperties props;
- VkPhysicalGpuPerformance perf;
+ VkPhysicalDeviceProperties props;
+ VkPhysicalDevicePerformance perf;
uint32_t queue_count;
- VkPhysicalGpuQueueProperties *queue_props;
+ VkPhysicalDeviceQueueProperties *queue_props;
VkDeviceQueueCreateInfo *queue_reqs;
- VkPhysicalGpuMemoryProperties memory_props;
+ VkPhysicalDeviceMemoryProperties memory_props;
uint32_t extension_count;
char **extensions;
STR(VK_ERROR_UNKNOWN);
STR(VK_ERROR_UNAVAILABLE);
STR(VK_ERROR_INITIALIZATION_FAILED);
- STR(VK_ERROR_OUT_OF_MEMORY);
- STR(VK_ERROR_OUT_OF_GPU_MEMORY);
+ STR(VK_ERROR_OUT_OF_HOST_MEMORY);
+ STR(VK_ERROR_OUT_OF_DEVICE_MEMORY);
STR(VK_ERROR_DEVICE_ALREADY_CREATED);
STR(VK_ERROR_DEVICE_LOST);
STR(VK_ERROR_INVALID_POINTER);
}
}
-static const char *vk_gpu_type_string(VkPhysicalGpuType type)
+static const char *vk_physical_device_type_string(VkPhysicalDeviceType type)
{
switch (type) {
-#define STR(r) case VK_GPU_TYPE_ ##r: return #r
+#define STR(r) case VK_PHYSICAL_DEVICE_TYPE_ ##r: return #r
STR(OTHER);
- STR(INTEGRATED);
- STR(DISCRETE);
- STR(VIRTUAL);
+ STR(INTEGRATED_GPU);
+ STR(DISCRETE_GPU);
+ STR(VIRTUAL_GPU);
#undef STR
- default: return "UNKNOWN_GPU";
+ default: return "UNKNOWN_DEVICE";
}
}
static const char *vk_format_string(VkFormat fmt)
{
switch (fmt) {
-#define STR(r) case VK_FMT_ ##r: return #r
+#define STR(r) case VK_FORMAT_ ##r: return #r
STR(UNDEFINED);
STR(R4G4_UNORM);
STR(R4G4_USCALED);
{
VkFormat f;
- for (f = 0; f < VK_NUM_FMT; f++) {
+ for (f = 0; f < VK_NUM_FORMAT; f++) {
const VkFormat fmt = f;
VkResult err;
size_t size = sizeof(dev->format_props[f]);
err = vkGetFormatInfo(dev->obj, fmt,
- VK_INFO_TYPE_FORMAT_PROPERTIES,
+ VK_FORMAT_INFO_TYPE_PROPERTIES,
&size, &dev->format_props[f]);
if (err) {
memset(&dev->format_props[f], 0,
gpu->extensions =
malloc(sizeof(gpu->extensions[0]) * gpu->extension_count);
if (!gpu->extensions)
- ERR_EXIT(VK_ERROR_OUT_OF_MEMORY);
+ ERR_EXIT(VK_ERROR_OUT_OF_HOST_MEMORY);
gpu->extension_count = 0;
for (i = 0; i < ARRAY_SIZE(known_extensions); i++) {
}
}
-static void app_gpu_init(struct app_gpu *gpu, uint32_t id, VkPhysicalGpu obj)
+static void app_gpu_init(struct app_gpu *gpu, uint32_t id, VkPhysicalDevice obj)
{
size_t size;
VkResult err;
gpu->id = id;
gpu->obj = obj;
size = sizeof(gpu->props);
- err = vkGetGpuInfo(gpu->obj,
- VK_INFO_TYPE_PHYSICAL_GPU_PROPERTIES,
+ err = vkGetPhysicalDeviceInfo(gpu->obj,
+ VK_PHYSICAL_DEVICE_INFO_TYPE_PROPERTIES,
&size, &gpu->props);
if (err || size != sizeof(gpu->props))
ERR_EXIT(err);
size = sizeof(gpu->perf);
- err = vkGetGpuInfo(gpu->obj,
- VK_INFO_TYPE_PHYSICAL_GPU_PERFORMANCE,
+ err = vkGetPhysicalDeviceInfo(gpu->obj,
+ VK_PHYSICAL_DEVICE_INFO_TYPE_PERFORMANCE,
&size, &gpu->perf);
if (err || size != sizeof(gpu->perf))
ERR_EXIT(err);
/* get queue count */
- err = vkGetGpuInfo(gpu->obj,
- VK_INFO_TYPE_PHYSICAL_GPU_QUEUE_PROPERTIES,
+ err = vkGetPhysicalDeviceInfo(gpu->obj,
+ VK_PHYSICAL_DEVICE_INFO_TYPE_QUEUE_PROPERTIES,
&size, NULL);
if (err || size % sizeof(gpu->queue_props[0]))
ERR_EXIT(err);
malloc(sizeof(gpu->queue_props[0]) * gpu->queue_count);
size = sizeof(gpu->queue_props[0]) * gpu->queue_count;
if (!gpu->queue_props)
- ERR_EXIT(VK_ERROR_OUT_OF_MEMORY);
- err = vkGetGpuInfo(gpu->obj,
- VK_INFO_TYPE_PHYSICAL_GPU_QUEUE_PROPERTIES,
+ ERR_EXIT(VK_ERROR_OUT_OF_HOST_MEMORY);
+ err = vkGetPhysicalDeviceInfo(gpu->obj,
+ VK_PHYSICAL_DEVICE_INFO_TYPE_QUEUE_PROPERTIES,
&size, gpu->queue_props);
if (err || size != sizeof(gpu->queue_props[0]) * gpu->queue_count)
ERR_EXIT(err);
size = sizeof(*gpu->queue_reqs) * gpu->queue_count;
gpu->queue_reqs = malloc(sizeof(*gpu->queue_reqs) * gpu->queue_count);
if (!gpu->queue_reqs)
- ERR_EXIT(VK_ERROR_OUT_OF_MEMORY);
+ ERR_EXIT(VK_ERROR_OUT_OF_HOST_MEMORY);
for (i = 0; i < gpu->queue_count; i++) {
gpu->queue_reqs[i].queueNodeIndex = i;
gpu->queue_reqs[i].queueCount = gpu->queue_props[i].queueCount;
}
size = sizeof(gpu->memory_props);
- err = vkGetGpuInfo(gpu->obj,
- VK_INFO_TYPE_PHYSICAL_GPU_MEMORY_PROPERTIES,
+ err = vkGetPhysicalDeviceInfo(gpu->obj,
+ VK_PHYSICAL_DEVICE_INFO_TYPE_MEMORY_PROPERTIES,
&size, &gpu->memory_props);
if (err || size != sizeof(gpu->memory_props))
ERR_EXIT(err);
continue;
printf("\t%s tiling image =%s%s%s\n", tilings[i].name,
- (tilings[i].flags & VK_FORMAT_SAMPLED_IMAGE_BIT) ? " sampled" : "",
- (tilings[i].flags & VK_FORMAT_STORAGE_IMAGE_BIT) ? " storage" : "",
- (tilings[i].flags & VK_FORMAT_STORAGE_IMAGE_ATOMIC_BIT) ? " atomic" : "");
+ (tilings[i].flags & VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT) ? " sampled" : "",
+ (tilings[i].flags & VK_FORMAT_FEATURE_STORAGE_IMAGE_BIT) ? " storage" : "",
+ (tilings[i].flags & VK_FORMAT_FEATURE_STORAGE_IMAGE_ATOMIC_BIT) ? " atomic" : "");
printf("\t%s tiling texel =%s%s%s\n", tilings[i].name,
- (tilings[i].flags & VK_FORMAT_UNIFORM_TEXEL_BUFFER_BIT) ? " TBO" : "",
- (tilings[i].flags & VK_FORMAT_STORAGE_TEXEL_BUFFER_BIT) ? " IBO" : "",
- (tilings[i].flags & VK_FORMAT_STORAGE_TEXEL_BUFFER_ATOMIC_BIT) ? " atomic" : "");
+ (tilings[i].flags & VK_FORMAT_FEATURE_UNIFORM_TEXEL_BUFFER_BIT) ? " TBO" : "",
+ (tilings[i].flags & VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_BIT) ? " IBO" : "",
+ (tilings[i].flags & VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_ATOMIC_BIT) ? " atomic" : "");
printf("\t%s tiling attachment =%s%s%s\n", tilings[i].name,
- (tilings[i].flags & VK_FORMAT_COLOR_ATTACHMENT_BIT) ? " color" : "",
- (tilings[i].flags & VK_FORMAT_COLOR_ATTACHMENT_BLEND_BIT) ? " blend" : "",
- (tilings[i].flags & VK_FORMAT_DEPTH_STENCIL_ATTACHMENT_BIT) ? " depth/stencil" : "");
+ (tilings[i].flags & VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT) ? " color" : "",
+ (tilings[i].flags & VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BLEND_BIT) ? " blend" : "",
+ (tilings[i].flags & VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT) ? " depth/stencil" : "");
printf("\t%s tiling vertex = %u\n", tilings[i].name,
- (bool) (tilings[i].flags & VK_FORMAT_VERTEX_BUFFER_BIT));
+ (bool) (tilings[i].flags & VK_FORMAT_FEATURE_VERTEX_BUFFER_BIT));
printf("\t%s tiling conversion = %u\n", tilings[i].name,
- (bool) (tilings[i].flags & VK_FORMAT_CONVERSION_BIT));
+ (bool) (tilings[i].flags & VK_FORMAT_FEATURE_CONVERSION_BIT));
}
}
{
VkFormat fmt;
- for (fmt = 0; fmt < VK_NUM_FMT; fmt++) {
+ for (fmt = 0; fmt < VK_NUM_FORMAT; fmt++) {
app_dev_dump_format_props(dev, fmt);
}
}
static void app_gpu_dump_multi_compat(const struct app_gpu *gpu, const struct app_gpu *other,
- const VkGpuCompatibilityInfo *info)
+ const VkPhysicalDeviceCompatibilityInfo *info)
{
- printf("VkGpuCompatibilityInfo[GPU%d]\n", other->id);
+ printf("VkPhysicalDeviceCompatibilityInfo[GPU%d]\n", other->id);
-#define TEST(info, b) printf(#b " = %u\n", (bool) (info->compatibilityFlags & VK_GPU_COMPAT_ ##b## _BIT))
- TEST(info, ASIC_FEATURES);
+#define TEST(info, b) printf(#b " = %u\n", (bool) (info->compatibilityFlags & VK_PHYSICAL_DEVICE_COMPATIBILITY_ ##b## _BIT))
+ TEST(info, FEATURES);
TEST(info, IQ_MATCH);
TEST(info, PEER_TRANSFER);
TEST(info, SHARED_MEMORY);
TEST(info, SHARED_SYNC);
- TEST(info, SHARED_GPU0_DISPLAY);
- TEST(info, SHARED_GPU1_DISPLAY);
+ TEST(info, SHARED_DEVICE0_DISPLAY);
+ TEST(info, SHARED_DEVICE1_DISPLAY);
#undef TEST
}
for (i = 0; i < gpu_count; i++) {
for (j = 0; j < gpu_count; j++) {
- VkGpuCompatibilityInfo info;
+ VkPhysicalDeviceCompatibilityInfo info;
if (i == j)
continue;
- err = vkGetMultiGpuCompatibility(gpus[i].obj,
+ err = vkGetMultiDeviceCompatibility(gpus[i].obj,
gpus[j].obj, &info);
if (err)
ERR_EXIT(err);
static void app_gpu_dump_props(const struct app_gpu *gpu)
{
- const VkPhysicalGpuProperties *props = &gpu->props;
+ const VkPhysicalDeviceProperties *props = &gpu->props;
- printf("VkPhysicalGpuProperties\n");
+ printf("VkPhysicalDeviceProperties\n");
printf("\tapiVersion = %u\n", props->apiVersion);
printf("\tdriverVersion = %u\n", props->driverVersion);
printf("\tvendorId = 0x%04x\n", props->vendorId);
printf("\tdeviceId = 0x%04x\n", props->deviceId);
- printf("\tgpuType = %s\n", vk_gpu_type_string(props->gpuType));
- printf("\tgpuName = %s\n", props->gpuName);
+ printf("\tdeviceType = %s\n", vk_physical_device_type_string(props->deviceType));
+ printf("\tdeviceName = %s\n", props->deviceName);
printf("\tmaxInlineMemoryUpdateSize = %zu\n", props->maxInlineMemoryUpdateSize);
printf("\tmaxBoundDescriptorSets = %u\n", props->maxBoundDescriptorSets);
printf("\tmaxThreadGroupSize = %u\n", props->maxThreadGroupSize);
static void app_gpu_dump_perf(const struct app_gpu *gpu)
{
- const VkPhysicalGpuPerformance *perf = &gpu->perf;
+ const VkPhysicalDevicePerformance *perf = &gpu->perf;
- printf("VkPhysicalGpuPerformance\n");
- printf("\tmaxGpuClock = %f\n", perf->maxGpuClock);
+ printf("VkPhysicalDevicePerformance\n");
+ printf("\tmaxGpuClock = %f\n", perf->maxDeviceClock);
printf("\taluPerClock = %f\n", perf->aluPerClock);
printf("\ttexPerClock = %f\n", perf->texPerClock);
printf("\tprimsPerClock = %f\n", perf->primsPerClock);
static void app_gpu_dump_queue_props(const struct app_gpu *gpu, uint32_t id)
{
- const VkPhysicalGpuQueueProperties *props = &gpu->queue_props[id];
+ const VkPhysicalDeviceQueueProperties *props = &gpu->queue_props[id];
- printf("VkPhysicalGpuQueueProperties[%d]\n", id);
+ printf("VkPhysicalDeviceQueueProperties[%d]\n", id);
printf("\tqueueFlags = %c%c%c%c\n",
(props->queueFlags & VK_QUEUE_GRAPHICS_BIT) ? 'G' : '.',
(props->queueFlags & VK_QUEUE_COMPUTE_BIT) ? 'C' : '.',
static void app_gpu_dump_memory_props(const struct app_gpu *gpu)
{
- const VkPhysicalGpuMemoryProperties *props = &gpu->memory_props;
+ const VkPhysicalDeviceMemoryProperties *props = &gpu->memory_props;
- printf("VkPhysicalGpuMemoryProperties\n");
+ printf("VkPhysicalDeviceMemoryProperties\n");
printf("\tsupportsMigration = %u\n", props->supportsMigration);
printf("\tsupportsPinning = %u\n", props->supportsPinning);
}
.ppEnabledExtensionNames = NULL,
};
struct app_gpu gpus[MAX_GPUS];
- VkPhysicalGpu objs[MAX_GPUS];
+ VkPhysicalDevice objs[MAX_GPUS];
VkInstance inst;
uint32_t gpu_count, i;
VkResult err;
{
label="vkCreateSampler - multiple calls return unique VkSampler handles"
"_VK_SAMPLER_CREATE_INFO_0" [
-label = <<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0"> <TR><TD COLSPAN="2" PORT="f0">VkSamplerCreateInfo</TD></TR><TR><TD PORT="f1">VkStructureType</TD><TD PORT="f2">sType=<BR/>VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO</TD></TR><TR><TD PORT="f3">const void*</TD><TD PORT="f4">pNext=NULL</TD></TR><TR><TD PORT="f5">VkTexFilter</TD><TD PORT="f6">magFilter</TD></TR><TR><TD PORT="f7">VkTexFilter</TD><TD PORT="f8">minFilter</TD></TR><TR><TD PORT="f9">VkTexMipmapMode</TD><TD PORT="f10">mipMode</TD></TR><TR><TD PORT="f11">VkTexAddress</TD><TD PORT="f12">addressU</TD></TR><TR><TD PORT="f13">VkTexAddress</TD><TD PORT="f14">addressV</TD></TR><TR><TD PORT="f15">VkTexAddress</TD><TD PORT="f16">addressW</TD></TR><TR><TD PORT="f17">float</TD><TD PORT="f18">mipLodBias</TD></TR><TR><TD PORT="f19">uint32_t</TD><TD PORT="f20">maxAnisotropy</TD></TR><TR><TD PORT="f21">VkCompareFunc</TD><TD PORT="f22">compareFunc</TD></TR><TR><TD PORT="f23">float</TD><TD PORT="f24">minLod</TD></TR><TR><TD PORT="f25">float</TD><TD PORT="f26">maxLod</TD></TR><TR><TD PORT="f27">VkBorderColorType</TD><TD PORT="f28">borderColorType</TD></TR></TABLE>>
+label = <<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0"> <TR><TD COLSPAN="2" PORT="f0">VkSamplerCreateInfo</TD></TR><TR><TD PORT="f1">VkStructureType</TD><TD PORT="f2">sType=<BR/>VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO</TD></TR><TR><TD PORT="f3">const void*</TD><TD PORT="f4">pNext=NULL</TD></TR><TR><TD PORT="f5">VkTexFilter</TD><TD PORT="f6">magFilter</TD></TR><TR><TD PORT="f7">VkTexFilter</TD><TD PORT="f8">minFilter</TD></TR><TR><TD PORT="f9">VkTexMipmapMode</TD><TD PORT="f10">mipMode</TD></TR><TR><TD PORT="f11">VkTexAddress</TD><TD PORT="f12">addressU</TD></TR><TR><TD PORT="f13">VkTexAddress</TD><TD PORT="f14">addressV</TD></TR><TR><TD PORT="f15">VkTexAddress</TD><TD PORT="f16">addressW</TD></TR><TR><TD PORT="f17">float</TD><TD PORT="f18">mipLodBias</TD></TR><TR><TD PORT="f19">uint32_t</TD><TD PORT="f20">maxAnisotropy</TD></TR><TR><TD PORT="f21">VkCompareOp</TD><TD PORT="f22">compareFunc</TD></TR><TR><TD PORT="f23">float</TD><TD PORT="f24">minLod</TD></TR><TR><TD PORT="f25">float</TD><TD PORT="f26">maxLod</TD></TR><TR><TD PORT="f27">VkBorderColor</TD><TD PORT="f28">borderColorType</TD></TR></TABLE>>
];
"SAMPLER_ELLIPSES" [
label = "..."
];
"_VK_SAMPLER_CREATE_INFO_19" [
-label = <<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0"> <TR><TD COLSPAN="2" PORT="f0">VkSamplerCreateInfo</TD></TR><TR><TD PORT="f1">VkStructureType</TD><TD PORT="f2">sType=<BR/>VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO</TD></TR><TR><TD PORT="f3">const void*</TD><TD PORT="f4">pNext=NULL</TD></TR><TR><TD PORT="f5">VkTexFilter</TD><TD PORT="f6">magFilter</TD></TR><TR><TD PORT="f7">VkTexFilter</TD><TD PORT="f8">minFilter</TD></TR><TR><TD PORT="f9">VkTexMipmapMode</TD><TD PORT="f10">mipMode</TD></TR><TR><TD PORT="f11">VkTexAddress</TD><TD PORT="f12">addressU</TD></TR><TR><TD PORT="f13">VkTexAddress</TD><TD PORT="f14">addressV</TD></TR><TR><TD PORT="f15">VkTexAddress</TD><TD PORT="f16">addressW</TD></TR><TR><TD PORT="f17">float</TD><TD PORT="f18">mipLodBias</TD></TR><TR><TD PORT="f19">uint32_t</TD><TD PORT="f20">maxAnisotropy</TD></TR><TR><TD PORT="f21">VkCompareFunc</TD><TD PORT="f22">compareFunc</TD></TR><TR><TD PORT="f23">float</TD><TD PORT="f24">minLod</TD></TR><TR><TD PORT="f25">float</TD><TD PORT="f26">maxLod</TD></TR><TR><TD PORT="f27">VkBorderColorType</TD><TD PORT="f28">borderColorType</TD></TR></TABLE>>
+label = <<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0"> <TR><TD COLSPAN="2" PORT="f0">VkSamplerCreateInfo</TD></TR><TR><TD PORT="f1">VkStructureType</TD><TD PORT="f2">sType=<BR/>VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO</TD></TR><TR><TD PORT="f3">const void*</TD><TD PORT="f4">pNext=NULL</TD></TR><TR><TD PORT="f5">VkTexFilter</TD><TD PORT="f6">magFilter</TD></TR><TR><TD PORT="f7">VkTexFilter</TD><TD PORT="f8">minFilter</TD></TR><TR><TD PORT="f9">VkTexMipmapMode</TD><TD PORT="f10">mipMode</TD></TR><TR><TD PORT="f11">VkTexAddress</TD><TD PORT="f12">addressU</TD></TR><TR><TD PORT="f13">VkTexAddress</TD><TD PORT="f14">addressV</TD></TR><TR><TD PORT="f15">VkTexAddress</TD><TD PORT="f16">addressW</TD></TR><TR><TD PORT="f17">float</TD><TD PORT="f18">mipLodBias</TD></TR><TR><TD PORT="f19">uint32_t</TD><TD PORT="f20">maxAnisotropy</TD></TR><TR><TD PORT="f21">VkCompareOp</TD><TD PORT="f22">compareFunc</TD></TR><TR><TD PORT="f23">float</TD><TD PORT="f24">minLod</TD></TR><TR><TD PORT="f25">float</TD><TD PORT="f26">maxLod</TD></TR><TR><TD PORT="f27">VkBorderColor</TD><TD PORT="f28">borderColorType</TD></TR></TABLE>>
];
}
subgraph clusterSamplerAttach
{
label="vkAttachMemoryViewDescriptors - pMemViews array of VK_MEMORY_VIEW_ATTACH_INFO structs"
"_VK_MEMORY_VIEW_ATTACH_INFO_3" [
-label = <<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0"> <TR><TD COLSPAN="2" PORT="f0">VK_MEMORY_VIEW_ATTACH_INFO</TD></TR><TR><TD PORT="f1">VkStructureType</TD><TD PORT="f2">sType=<BR/>VK_STRUCTURE_TYPE_MEMORY_VIEW_ATTACH_INFO</TD></TR><TR><TD PORT="f3">void*</TD><TD PORT="f4">pNext=NULL</TD></TR><TR><TD PORT="f5">VkGpuMemory</TD><TD PORT="f6">mem</TD></TR><TR><TD PORT="f7">VkGpuSize</TD><TD PORT="f8">offset</TD></TR><TR><TD PORT="f9">VkGpuSize</TD><TD PORT="f10">range</TD></TR><TR><TD PORT="f11">VkGpuSize</TD><TD PORT="f12">stride</TD></TR><TR><TD PORT="f13">VkFormat</TD><TD PORT="f14">format</TD></TR><TR><TD PORT="f15">VK_MEMORY_STATE</TD><TD PORT="f16">state</TD></TR></TABLE>>
+label = <<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0"> <TR><TD COLSPAN="2" PORT="f0">VK_MEMORY_VIEW_ATTACH_INFO</TD></TR><TR><TD PORT="f1">VkStructureType</TD><TD PORT="f2">sType=<BR/>VK_STRUCTURE_TYPE_MEMORY_VIEW_ATTACH_INFO</TD></TR><TR><TD PORT="f3">void*</TD><TD PORT="f4">pNext=NULL</TD></TR><TR><TD PORT="f5">VkDeviceMemory</TD><TD PORT="f6">mem</TD></TR><TR><TD PORT="f7">VkDeviceSize</TD><TD PORT="f8">offset</TD></TR><TR><TD PORT="f9">VkDeviceSize</TD><TD PORT="f10">range</TD></TR><TR><TD PORT="f11">VkDeviceSize</TD><TD PORT="f12">stride</TD></TR><TR><TD PORT="f13">VkFormat</TD><TD PORT="f14">format</TD></TR><TR><TD PORT="f15">VK_MEMORY_STATE</TD><TD PORT="f16">state</TD></TR></TABLE>>
];
"MEM_VIEW_ELLIPSES" [
label = "..."
];
"_VK_MEMORY_VIEW_ATTACH_INFO_0" [
-label = <<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0"> <TR><TD COLSPAN="2" PORT="f0">VK_MEMORY_VIEW_ATTACH_INFO</TD></TR><TR><TD PORT="f1">VkStructureType</TD><TD PORT="f2">sType=<BR/>VK_STRUCTURE_TYPE_MEMORY_VIEW_ATTACH_INFO</TD></TR><TR><TD PORT="f3">void*</TD><TD PORT="f4">pNext=NULL</TD></TR><TR><TD PORT="f5">VkGpuMemory</TD><TD PORT="f6">mem</TD></TR><TR><TD PORT="f7">VkGpuSize</TD><TD PORT="f8">offset</TD></TR><TR><TD PORT="f9">VkGpuSize</TD><TD PORT="f10">range</TD></TR><TR><TD PORT="f11">VkGpuSize</TD><TD PORT="f12">stride</TD></TR><TR><TD PORT="f13">VkFormat</TD><TD PORT="f14">format</TD></TR><TR><TD PORT="f15">VK_MEMORY_STATE</TD><TD PORT="f16">state</TD></TR></TABLE>>
+label = <<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0"> <TR><TD COLSPAN="2" PORT="f0">VK_MEMORY_VIEW_ATTACH_INFO</TD></TR><TR><TD PORT="f1">VkStructureType</TD><TD PORT="f2">sType=<BR/>VK_STRUCTURE_TYPE_MEMORY_VIEW_ATTACH_INFO</TD></TR><TR><TD PORT="f3">void*</TD><TD PORT="f4">pNext=NULL</TD></TR><TR><TD PORT="f5">VkDeviceMemory</TD><TD PORT="f6">mem</TD></TR><TR><TD PORT="f7">VkDeviceSize</TD><TD PORT="f8">offset</TD></TR><TR><TD PORT="f9">VkDeviceSize</TD><TD PORT="f10">range</TD></TR><TR><TD PORT="f11">VkDeviceSize</TD><TD PORT="f12">stride</TD></TR><TR><TD PORT="f13">VkFormat</TD><TD PORT="f14">format</TD></TR><TR><TD PORT="f15">VK_MEMORY_STATE</TD><TD PORT="f16">state</TD></TR></TABLE>>
];
}
subgraph clusterImageView
label = <<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0"> <TR><TD COLSPAN="2" PORT="f0">VkGraphicsPipelineCreateInfo</TD></TR><TR><TD>VkStructureType</TD><TD>sType=<BR/>VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO</TD></TR><TR><TD>const void*</TD><TD PORT="f2">pNext</TD></TR><TR><TD>VkFlags</TD><TD>flags</TD></TR></TABLE>>
];
"VkPipelineIaStateCreateInfo_" [
-label = <<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0"> <TR><TD COLSPAN="2" PORT="f0">VkPipelineIaStateCreateInfo</TD></TR><TR><TD>VkStructureType</TD><TD>sType=<BR/>VK_STRUCTURE_TYPE_PIPELINE_IA_STATE_CREATE_INFO</TD></TR><TR><TD>const void*</TD><TD PORT="f2">pNext</TD></TR><TR><TD>VkPrimitiveTopology</TD><TD>topology</TD></TR><TR><TD>bool32_t</TD><TD>disableVertexReuse</TD></TR><TR><TD>VkProvokingVertexConvention</TD><TD>provokingVertex</TD></TR><TR><TD>bool32_t</TD><TD>primitiveRestartEnable</TD></TR><TR><TD>uint32_t</TD><TD>primitiveRestartIndex</TD></TR></TABLE>>
+label = <<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0"> <TR><TD COLSPAN="2" PORT="f0">VkPipelineIaStateCreateInfo</TD></TR><TR><TD>VkStructureType</TD><TD>sType=<BR/>VK_STRUCTURE_TYPE_PIPELINE_IA_STATE_CREATE_INFO</TD></TR><TR><TD>const void*</TD><TD PORT="f2">pNext</TD></TR><TR><TD>VkPrimitiveTopology</TD><TD>topology</TD></TR><TR><TD>bool32_t</TD><TD>disableVertexReuse</TD></TR><TR><TD>VkProvokingVertex</TD><TD>provokingVertex</TD></TR><TR><TD>bool32_t</TD><TD>primitiveRestartEnable</TD></TR><TR><TD>uint32_t</TD><TD>primitiveRestartIndex</TD></TR></TABLE>>
];
"VkPipelineTessStateCreateInfo_" [
label = <<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0"> <TR><TD COLSPAN="2" PORT="f0">VkPipelineTessStateCreateInfo</TD></TR><TR><TD>VkStructureType</TD><TD>sType=<BR/>VK_STRUCTURE_TYPE_PIPELINE_TESS_STATE_CREATE_INFO</TD></TR><TR><TD>const void*</TD><TD PORT="f2">pNext</TD></TR><TR><TD>uint32_t</TD><TD>patchControlPoints</TD></TR> <TR><TD>float</TD><TD>optimalTessFactor</TD></TR><TR><TD>float</TD><TD>fixedTessFactor</TD></TR></TABLE>>
label = <<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0"> <TR><TD COLSPAN="2" PORT="f0">VkPipelineShaderStageCreateInfo</TD></TR><TR><TD>VkStructureType</TD><TD>sType=<BR/>VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO</TD></TR> <TR><TD>const void*</TD><TD PORT="f2">pNext=NULL</TD></TR> <TR><TD>VkPipelineShader</TD><TD PORT="f4">shader</TD></TR> </TABLE>>
];
"VS_VK_PIPELINE_SHADER" [
-label = <<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0"> <TR><TD COLSPAN="2" PORT="f0">VkPipelineShader</TD></TR><TR><TD PORT="f1">VkPipelineShaderStage</TD><TD PORT="f2">stage=VS</TD></TR><TR><TD PORT="f3">VkShader</TD><TD PORT="f4">shader</TD></TR><TR><TD PORT="f5">VK_DESCRIPTOR_SET_MAPPING</TD><TD PORT="f6">descriptorSetMapping[2]</TD></TR><TR><TD PORT="f7">uint32_t</TD><TD PORT="f8">linkConstBufferCount</TD></TR><TR><TD PORT="f9">const VkLinkConstBuffer*</TD><TD PORT="f10">pLinkConstBufferInfo</TD></TR><TR><TD PORT="f11">VK_DYNAMIC_MEMORY_VIEW_SLOT_INFO</TD><TD PORT="f12">dynamicMemoryViewMapping</TD></TR></TABLE>>
+label = <<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0"> <TR><TD COLSPAN="2" PORT="f0">VkPipelineShader</TD></TR><TR><TD PORT="f1">VkShaderStage</TD><TD PORT="f2">stage=VS</TD></TR><TR><TD PORT="f3">VkShader</TD><TD PORT="f4">shader</TD></TR><TR><TD PORT="f5">VK_DESCRIPTOR_SET_MAPPING</TD><TD PORT="f6">descriptorSetMapping[2]</TD></TR><TR><TD PORT="f7">uint32_t</TD><TD PORT="f8">linkConstBufferCount</TD></TR><TR><TD PORT="f9">const VkLinkConstBuffer*</TD><TD PORT="f10">pLinkConstBufferInfo</TD></TR><TR><TD PORT="f11">VK_DYNAMIC_MEMORY_VIEW_SLOT_INFO</TD><TD PORT="f12">dynamicMemoryViewMapping</TD></TR></TABLE>>
];
"TC_VK_PIPELINE_SHADER" [
-label = <<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0"> <TR><TD COLSPAN="2" PORT="f0">VkPipelineShader</TD></TR><TR><TD PORT="f1">VkPipelineShaderStage</TD><TD PORT="f2">stage=TC</TD></TR><TR><TD PORT="f3">VkShader</TD><TD PORT="f4">shader</TD></TR><TR><TD PORT="f5">VK_DESCRIPTOR_SET_MAPPING</TD><TD PORT="f6">descriptorSetMapping[2]</TD></TR><TR><TD PORT="f7">uint32_t</TD><TD PORT="f8">linkConstBufferCount</TD></TR><TR><TD PORT="f9">const VkLinkConstBuffer*</TD><TD PORT="f10">pLinkConstBufferInfo</TD></TR><TR><TD PORT="f11">VK_DYNAMIC_MEMORY_VIEW_SLOT_INFO</TD><TD PORT="f12">dynamicMemoryViewMapping</TD></TR></TABLE>>
+label = <<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0"> <TR><TD COLSPAN="2" PORT="f0">VkPipelineShader</TD></TR><TR><TD PORT="f1">VkShaderStage</TD><TD PORT="f2">stage=TC</TD></TR><TR><TD PORT="f3">VkShader</TD><TD PORT="f4">shader</TD></TR><TR><TD PORT="f5">VK_DESCRIPTOR_SET_MAPPING</TD><TD PORT="f6">descriptorSetMapping[2]</TD></TR><TR><TD PORT="f7">uint32_t</TD><TD PORT="f8">linkConstBufferCount</TD></TR><TR><TD PORT="f9">const VkLinkConstBuffer*</TD><TD PORT="f10">pLinkConstBufferInfo</TD></TR><TR><TD PORT="f11">VK_DYNAMIC_MEMORY_VIEW_SLOT_INFO</TD><TD PORT="f12">dynamicMemoryViewMapping</TD></TR></TABLE>>
];
"TE_VK_PIPELINE_SHADER" [
-label = <<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0"> <TR><TD COLSPAN="2" PORT="f0">VkPipelineShader</TD></TR><TR><TD PORT="f1">VkPipelineShaderStage</TD><TD PORT="f2">stage=TE</TD></TR><TR><TD PORT="f3">VkShader</TD><TD PORT="f4">shader</TD></TR><TR><TD PORT="f5">VK_DESCRIPTOR_SET_MAPPING</TD><TD PORT="f6">descriptorSetMapping[2]</TD></TR><TR><TD PORT="f7">uint32_t</TD><TD PORT="f8">linkConstBufferCount</TD></TR><TR><TD PORT="f9">const VkLinkConstBuffer*</TD><TD PORT="f10">pLinkConstBufferInfo</TD></TR><TR><TD PORT="f11">VK_DYNAMIC_MEMORY_VIEW_SLOT_INFO</TD><TD PORT="f12">dynamicMemoryViewMapping</TD></TR></TABLE>>
+label = <<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0"> <TR><TD COLSPAN="2" PORT="f0">VkPipelineShader</TD></TR><TR><TD PORT="f1">VkShaderStage</TD><TD PORT="f2">stage=TE</TD></TR><TR><TD PORT="f3">VkShader</TD><TD PORT="f4">shader</TD></TR><TR><TD PORT="f5">VK_DESCRIPTOR_SET_MAPPING</TD><TD PORT="f6">descriptorSetMapping[2]</TD></TR><TR><TD PORT="f7">uint32_t</TD><TD PORT="f8">linkConstBufferCount</TD></TR><TR><TD PORT="f9">const VkLinkConstBuffer*</TD><TD PORT="f10">pLinkConstBufferInfo</TD></TR><TR><TD PORT="f11">VK_DYNAMIC_MEMORY_VIEW_SLOT_INFO</TD><TD PORT="f12">dynamicMemoryViewMapping</TD></TR></TABLE>>
];
"GS_VK_PIPELINE_SHADER" [
-label = <<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0"> <TR><TD COLSPAN="2" PORT="f0">VkPipelineShader</TD></TR><TR><TD PORT="f1">VkPipelineShaderStage</TD><TD PORT="f2">stage=GS</TD></TR><TR><TD PORT="f3">VkShader</TD><TD PORT="f4">shader</TD></TR><TR><TD PORT="f5">VK_DESCRIPTOR_SET_MAPPING</TD><TD PORT="f6">descriptorSetMapping[2]</TD></TR><TR><TD PORT="f7">uint32_t</TD><TD PORT="f8">linkConstBufferCount</TD></TR><TR><TD PORT="f9">const VkLinkConstBuffer*</TD><TD PORT="f10">pLinkConstBufferInfo</TD></TR><TR><TD PORT="f11">VK_DYNAMIC_MEMORY_VIEW_SLOT_INFO</TD><TD PORT="f12">dynamicMemoryViewMapping</TD></TR></TABLE>>
+label = <<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0"> <TR><TD COLSPAN="2" PORT="f0">VkPipelineShader</TD></TR><TR><TD PORT="f1">VkShaderStage</TD><TD PORT="f2">stage=GS</TD></TR><TR><TD PORT="f3">VkShader</TD><TD PORT="f4">shader</TD></TR><TR><TD PORT="f5">VK_DESCRIPTOR_SET_MAPPING</TD><TD PORT="f6">descriptorSetMapping[2]</TD></TR><TR><TD PORT="f7">uint32_t</TD><TD PORT="f8">linkConstBufferCount</TD></TR><TR><TD PORT="f9">const VkLinkConstBuffer*</TD><TD PORT="f10">pLinkConstBufferInfo</TD></TR><TR><TD PORT="f11">VK_DYNAMIC_MEMORY_VIEW_SLOT_INFO</TD><TD PORT="f12">dynamicMemoryViewMapping</TD></TR></TABLE>>
];
"FS_VK_PIPELINE_SHADER" [
-label = <<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0"> <TR><TD COLSPAN="2" PORT="f0">VkPipelineShader</TD></TR><TR><TD PORT="f1">VkPipelineShaderStage</TD><TD PORT="f2">stage=FS</TD></TR><TR><TD PORT="f3">VkShader</TD><TD PORT="f4">shader</TD></TR><TR><TD PORT="f5">VK_DESCRIPTOR_SET_MAPPING</TD><TD PORT="f6">descriptorSetMapping[2]</TD></TR><TR><TD PORT="f7">uint32_t</TD><TD PORT="f8">linkConstBufferCount</TD></TR><TR><TD PORT="f9">const VkLinkConstBuffer*</TD><TD PORT="f10">pLinkConstBufferInfo</TD></TR><TR><TD PORT="f11">VK_DYNAMIC_MEMORY_VIEW_SLOT_INFO</TD><TD PORT="f12">dynamicMemoryViewMapping</TD></TR></TABLE>>
+label = <<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0"> <TR><TD COLSPAN="2" PORT="f0">VkPipelineShader</TD></TR><TR><TD PORT="f1">VkShaderStage</TD><TD PORT="f2">stage=FS</TD></TR><TR><TD PORT="f3">VkShader</TD><TD PORT="f4">shader</TD></TR><TR><TD PORT="f5">VK_DESCRIPTOR_SET_MAPPING</TD><TD PORT="f6">descriptorSetMapping[2]</TD></TR><TR><TD PORT="f7">uint32_t</TD><TD PORT="f8">linkConstBufferCount</TD></TR><TR><TD PORT="f9">const VkLinkConstBuffer*</TD><TD PORT="f10">pLinkConstBufferInfo</TD></TR><TR><TD PORT="f11">VK_DYNAMIC_MEMORY_VIEW_SLOT_INFO</TD><TD PORT="f12">dynamicMemoryViewMapping</TD></TR></TABLE>>
];
"VkGraphicsPipelineCreateInfo_":f2 -> "VkPipelineIaStateCreateInfo_" [
id = 100
{
label="vkCreateSampler - multiple calls return unique VkSampler handles"
"_VK_SAMPLER_CREATE_INFO_0" [
-label = <<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0"> <TR><TD COLSPAN="2" PORT="f0">VkSamplerCreateInfo</TD></TR><TR><TD PORT="f1">VkStructureType</TD><TD PORT="f2">sType=<BR/>VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO</TD></TR><TR><TD PORT="f3">const void*</TD><TD PORT="f4">pNext=NULL</TD></TR><TR><TD PORT="f5">VkTexFilter</TD><TD PORT="f6">magFilter</TD></TR><TR><TD PORT="f7">VkTexFilter</TD><TD PORT="f8">minFilter</TD></TR><TR><TD PORT="f9">VkTexMipmapMode</TD><TD PORT="f10">mipMode</TD></TR><TR><TD PORT="f11">VkTexAddress</TD><TD PORT="f12">addressU</TD></TR><TR><TD PORT="f13">VkTexAddress</TD><TD PORT="f14">addressV</TD></TR><TR><TD PORT="f15">VkTexAddress</TD><TD PORT="f16">addressW</TD></TR><TR><TD PORT="f17">float</TD><TD PORT="f18">mipLodBias</TD></TR><TR><TD PORT="f19">uint32_t</TD><TD PORT="f20">maxAnisotropy</TD></TR><TR><TD PORT="f21">VkCompareFunc</TD><TD PORT="f22">compareFunc</TD></TR><TR><TD PORT="f23">float</TD><TD PORT="f24">minLod</TD></TR><TR><TD PORT="f25">float</TD><TD PORT="f26">maxLod</TD></TR><TR><TD PORT="f27">VkBorderColorType</TD><TD PORT="f28">borderColorType</TD></TR></TABLE>>
+label = <<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0"> <TR><TD COLSPAN="2" PORT="f0">VkSamplerCreateInfo</TD></TR><TR><TD PORT="f1">VkStructureType</TD><TD PORT="f2">sType=<BR/>VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO</TD></TR><TR><TD PORT="f3">const void*</TD><TD PORT="f4">pNext=NULL</TD></TR><TR><TD PORT="f5">VkTexFilter</TD><TD PORT="f6">magFilter</TD></TR><TR><TD PORT="f7">VkTexFilter</TD><TD PORT="f8">minFilter</TD></TR><TR><TD PORT="f9">VkTexMipmapMode</TD><TD PORT="f10">mipMode</TD></TR><TR><TD PORT="f11">VkTexAddress</TD><TD PORT="f12">addressU</TD></TR><TR><TD PORT="f13">VkTexAddress</TD><TD PORT="f14">addressV</TD></TR><TR><TD PORT="f15">VkTexAddress</TD><TD PORT="f16">addressW</TD></TR><TR><TD PORT="f17">float</TD><TD PORT="f18">mipLodBias</TD></TR><TR><TD PORT="f19">uint32_t</TD><TD PORT="f20">maxAnisotropy</TD></TR><TR><TD PORT="f21">VkCompareOp</TD><TD PORT="f22">compareFunc</TD></TR><TR><TD PORT="f23">float</TD><TD PORT="f24">minLod</TD></TR><TR><TD PORT="f25">float</TD><TD PORT="f26">maxLod</TD></TR><TR><TD PORT="f27">VkBorderColor</TD><TD PORT="f28">borderColorType</TD></TR></TABLE>>
];
"SAMPLER_ELLIPSES" [
label = "..."
];
"_VK_SAMPLER_CREATE_INFO_19" [
-label = <<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0"> <TR><TD COLSPAN="2" PORT="f0">VkSamplerCreateInfo</TD></TR><TR><TD PORT="f1">VkStructureType</TD><TD PORT="f2">sType=<BR/>VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO</TD></TR><TR><TD PORT="f3">const void*</TD><TD PORT="f4">pNext=NULL</TD></TR><TR><TD PORT="f5">VkTexFilter</TD><TD PORT="f6">magFilter</TD></TR><TR><TD PORT="f7">VkTexFilter</TD><TD PORT="f8">minFilter</TD></TR><TR><TD PORT="f9">VkTexMipmapMode</TD><TD PORT="f10">mipMode</TD></TR><TR><TD PORT="f11">VkTexAddress</TD><TD PORT="f12">addressU</TD></TR><TR><TD PORT="f13">VkTexAddress</TD><TD PORT="f14">addressV</TD></TR><TR><TD PORT="f15">VkTexAddress</TD><TD PORT="f16">addressW</TD></TR><TR><TD PORT="f17">float</TD><TD PORT="f18">mipLodBias</TD></TR><TR><TD PORT="f19">uint32_t</TD><TD PORT="f20">maxAnisotropy</TD></TR><TR><TD PORT="f21">VkCompareFunc</TD><TD PORT="f22">compareFunc</TD></TR><TR><TD PORT="f23">float</TD><TD PORT="f24">minLod</TD></TR><TR><TD PORT="f25">float</TD><TD PORT="f26">maxLod</TD></TR><TR><TD PORT="f27">VkBorderColorType</TD><TD PORT="f28">borderColorType</TD></TR></TABLE>>
+label = <<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0"> <TR><TD COLSPAN="2" PORT="f0">VkSamplerCreateInfo</TD></TR><TR><TD PORT="f1">VkStructureType</TD><TD PORT="f2">sType=<BR/>VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO</TD></TR><TR><TD PORT="f3">const void*</TD><TD PORT="f4">pNext=NULL</TD></TR><TR><TD PORT="f5">VkTexFilter</TD><TD PORT="f6">magFilter</TD></TR><TR><TD PORT="f7">VkTexFilter</TD><TD PORT="f8">minFilter</TD></TR><TR><TD PORT="f9">VkTexMipmapMode</TD><TD PORT="f10">mipMode</TD></TR><TR><TD PORT="f11">VkTexAddress</TD><TD PORT="f12">addressU</TD></TR><TR><TD PORT="f13">VkTexAddress</TD><TD PORT="f14">addressV</TD></TR><TR><TD PORT="f15">VkTexAddress</TD><TD PORT="f16">addressW</TD></TR><TR><TD PORT="f17">float</TD><TD PORT="f18">mipLodBias</TD></TR><TR><TD PORT="f19">uint32_t</TD><TD PORT="f20">maxAnisotropy</TD></TR><TR><TD PORT="f21">VkCompareOp</TD><TD PORT="f22">compareFunc</TD></TR><TR><TD PORT="f23">float</TD><TD PORT="f24">minLod</TD></TR><TR><TD PORT="f25">float</TD><TD PORT="f26">maxLod</TD></TR><TR><TD PORT="f27">VkBorderColor</TD><TD PORT="f28">borderColorType</TD></TR></TABLE>>
];
}
subgraph clusterSamplerAttach
{
label="vkAttachMemoryViewDescriptors - pMemViews array of VK_MEMORY_VIEW_ATTACH_INFO structs"
"_VK_MEMORY_VIEW_ATTACH_INFO_3" [
-label = <<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0"> <TR><TD COLSPAN="2" PORT="f0">VK_MEMORY_VIEW_ATTACH_INFO</TD></TR><TR><TD PORT="f1">VkStructureType</TD><TD PORT="f2">sType=<BR/>VK_STRUCTURE_TYPE_MEMORY_VIEW_ATTACH_INFO</TD></TR><TR><TD PORT="f3">void*</TD><TD PORT="f4">pNext=NULL</TD></TR><TR><TD PORT="f5">VkGpuMemory</TD><TD PORT="f6">mem</TD></TR><TR><TD PORT="f7">VkGpuSize</TD><TD PORT="f8">offset</TD></TR><TR><TD PORT="f9">VkGpuSize</TD><TD PORT="f10">range</TD></TR><TR><TD PORT="f11">VkGpuSize</TD><TD PORT="f12">stride</TD></TR><TR><TD PORT="f13">VkFormat</TD><TD PORT="f14">format</TD></TR><TR><TD PORT="f15">VK_MEMORY_STATE</TD><TD PORT="f16">state</TD></TR></TABLE>>
+label = <<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0"> <TR><TD COLSPAN="2" PORT="f0">VK_MEMORY_VIEW_ATTACH_INFO</TD></TR><TR><TD PORT="f1">VkStructureType</TD><TD PORT="f2">sType=<BR/>VK_STRUCTURE_TYPE_MEMORY_VIEW_ATTACH_INFO</TD></TR><TR><TD PORT="f3">void*</TD><TD PORT="f4">pNext=NULL</TD></TR><TR><TD PORT="f5">VkDeviceMemory</TD><TD PORT="f6">mem</TD></TR><TR><TD PORT="f7">VkDeviceSize</TD><TD PORT="f8">offset</TD></TR><TR><TD PORT="f9">VkDeviceSize</TD><TD PORT="f10">range</TD></TR><TR><TD PORT="f11">VkDeviceSize</TD><TD PORT="f12">stride</TD></TR><TR><TD PORT="f13">VkFormat</TD><TD PORT="f14">format</TD></TR><TR><TD PORT="f15">VK_MEMORY_STATE</TD><TD PORT="f16">state</TD></TR></TABLE>>
];
"MEM_VIEW_ELLIPSES" [
label = "..."
];
"_VK_MEMORY_VIEW_ATTACH_INFO_0" [
-label = <<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0"> <TR><TD COLSPAN="2" PORT="f0">VK_MEMORY_VIEW_ATTACH_INFO</TD></TR><TR><TD PORT="f1">VkStructureType</TD><TD PORT="f2">sType=<BR/>VK_STRUCTURE_TYPE_MEMORY_VIEW_ATTACH_INFO</TD></TR><TR><TD PORT="f3">void*</TD><TD PORT="f4">pNext=NULL</TD></TR><TR><TD PORT="f5">VkGpuMemory</TD><TD PORT="f6">mem</TD></TR><TR><TD PORT="f7">VkGpuSize</TD><TD PORT="f8">offset</TD></TR><TR><TD PORT="f9">VkGpuSize</TD><TD PORT="f10">range</TD></TR><TR><TD PORT="f11">VkGpuSize</TD><TD PORT="f12">stride</TD></TR><TR><TD PORT="f13">VkFormat</TD><TD PORT="f14">format</TD></TR><TR><TD PORT="f15">VK_MEMORY_STATE</TD><TD PORT="f16">state</TD></TR></TABLE>>
+label = <<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0"> <TR><TD COLSPAN="2" PORT="f0">VK_MEMORY_VIEW_ATTACH_INFO</TD></TR><TR><TD PORT="f1">VkStructureType</TD><TD PORT="f2">sType=<BR/>VK_STRUCTURE_TYPE_MEMORY_VIEW_ATTACH_INFO</TD></TR><TR><TD PORT="f3">void*</TD><TD PORT="f4">pNext=NULL</TD></TR><TR><TD PORT="f5">VkDeviceMemory</TD><TD PORT="f6">mem</TD></TR><TR><TD PORT="f7">VkDeviceSize</TD><TD PORT="f8">offset</TD></TR><TR><TD PORT="f9">VkDeviceSize</TD><TD PORT="f10">range</TD></TR><TR><TD PORT="f11">VkDeviceSize</TD><TD PORT="f12">stride</TD></TR><TR><TD PORT="f13">VkFormat</TD><TD PORT="f14">format</TD></TR><TR><TD PORT="f15">VK_MEMORY_STATE</TD><TD PORT="f16">state</TD></TR></TABLE>>
];
}
subgraph clusterImageView
label = <<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0"> <TR><TD COLSPAN="2" PORT="f0">VkGraphicsPipelineCreateInfo</TD></TR><TR><TD>VkStructureType</TD><TD>sType=<BR/>VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO</TD></TR><TR><TD>const void*</TD><TD PORT="f2">pNext</TD></TR><TR><TD>VkFlags</TD><TD>flags</TD></TR></TABLE>>
];
"VkPipelineIaStateCreateInfo_" [
-label = <<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0"> <TR><TD COLSPAN="2" PORT="f0">VkPipelineIaStateCreateInfo</TD></TR><TR><TD>VkStructureType</TD><TD>sType=<BR/>VK_STRUCTURE_TYPE_PIPELINE_IA_STATE_CREATE_INFO</TD></TR><TR><TD>const void*</TD><TD PORT="f2">pNext</TD></TR><TR><TD>VkPrimitiveTopology</TD><TD>topology</TD></TR><TR><TD>bool32_t</TD><TD>disableVertexReuse</TD></TR><TR><TD>VkProvokingVertexConvention</TD><TD>provokingVertex</TD></TR><TR><TD>bool32_t</TD><TD>primitiveRestartEnable</TD></TR><TR><TD>uint32_t</TD><TD>primitiveRestartIndex</TD></TR></TABLE>>
+label = <<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0"> <TR><TD COLSPAN="2" PORT="f0">VkPipelineIaStateCreateInfo</TD></TR><TR><TD>VkStructureType</TD><TD>sType=<BR/>VK_STRUCTURE_TYPE_PIPELINE_IA_STATE_CREATE_INFO</TD></TR><TR><TD>const void*</TD><TD PORT="f2">pNext</TD></TR><TR><TD>VkPrimitiveTopology</TD><TD>topology</TD></TR><TR><TD>bool32_t</TD><TD>disableVertexReuse</TD></TR><TR><TD>VkProvokingVertex</TD><TD>provokingVertex</TD></TR><TR><TD>bool32_t</TD><TD>primitiveRestartEnable</TD></TR><TR><TD>uint32_t</TD><TD>primitiveRestartIndex</TD></TR></TABLE>>
];
"VkPipelineTessStateCreateInfo_" [
label = <<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0"> <TR><TD COLSPAN="2" PORT="f0">VkPipelineTessStateCreateInfo</TD></TR><TR><TD>VkStructureType</TD><TD>sType=<BR/>VK_STRUCTURE_TYPE_PIPELINE_TESS_STATE_CREATE_INFO</TD></TR><TR><TD>const void*</TD><TD PORT="f2">pNext</TD></TR><TR><TD>uint32_t</TD><TD>patchControlPoints</TD></TR> <TR><TD>float</TD><TD>optimalTessFactor</TD></TR><TR><TD>float</TD><TD>fixedTessFactor</TD></TR></TABLE>>
label = <<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0"> <TR><TD COLSPAN="2" PORT="f0">VkPipelineShaderStageCreateInfo</TD></TR><TR><TD>VkStructureType</TD><TD>sType=<BR/>VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO</TD></TR> <TR><TD>const void*</TD><TD PORT="f2">pNext=NULL</TD></TR> <TR><TD>VkPipelineShader</TD><TD PORT="f4">shader</TD></TR> </TABLE>>
];
"VS_VK_PIPELINE_SHADER" [
-label = <<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0"> <TR><TD COLSPAN="2" PORT="f0">VkPipelineShader</TD></TR><TR><TD PORT="f1">VkPipelineShaderStage</TD><TD PORT="f2">stage=VS</TD></TR><TR><TD PORT="f3">VkShader</TD><TD PORT="f4">shader</TD></TR><TR><TD PORT="f5">VK_DESCRIPTOR_SET_MAPPING</TD><TD PORT="f6">descriptorSetMapping[2]</TD></TR><TR><TD PORT="f7">uint32_t</TD><TD PORT="f8">linkConstBufferCount</TD></TR><TR><TD PORT="f9">const VkLinkConstBuffer*</TD><TD PORT="f10">pLinkConstBufferInfo</TD></TR><TR><TD PORT="f11">VK_DYNAMIC_MEMORY_VIEW_SLOT_INFO</TD><TD PORT="f12">dynamicMemoryViewMapping</TD></TR></TABLE>>
+label = <<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0"> <TR><TD COLSPAN="2" PORT="f0">VkPipelineShader</TD></TR><TR><TD PORT="f1">VkShaderStage</TD><TD PORT="f2">stage=VS</TD></TR><TR><TD PORT="f3">VkShader</TD><TD PORT="f4">shader</TD></TR><TR><TD PORT="f5">VK_DESCRIPTOR_SET_MAPPING</TD><TD PORT="f6">descriptorSetMapping[2]</TD></TR><TR><TD PORT="f7">uint32_t</TD><TD PORT="f8">linkConstBufferCount</TD></TR><TR><TD PORT="f9">const VkLinkConstBuffer*</TD><TD PORT="f10">pLinkConstBufferInfo</TD></TR><TR><TD PORT="f11">VK_DYNAMIC_MEMORY_VIEW_SLOT_INFO</TD><TD PORT="f12">dynamicMemoryViewMapping</TD></TR></TABLE>>
];
"TC_VK_PIPELINE_SHADER" [
-label = <<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0"> <TR><TD COLSPAN="2" PORT="f0">VkPipelineShader</TD></TR><TR><TD PORT="f1">VkPipelineShaderStage</TD><TD PORT="f2">stage=TC</TD></TR><TR><TD PORT="f3">VkShader</TD><TD PORT="f4">shader</TD></TR><TR><TD PORT="f5">VK_DESCRIPTOR_SET_MAPPING</TD><TD PORT="f6">descriptorSetMapping[2]</TD></TR><TR><TD PORT="f7">uint32_t</TD><TD PORT="f8">linkConstBufferCount</TD></TR><TR><TD PORT="f9">const VkLinkConstBuffer*</TD><TD PORT="f10">pLinkConstBufferInfo</TD></TR><TR><TD PORT="f11">VK_DYNAMIC_MEMORY_VIEW_SLOT_INFO</TD><TD PORT="f12">dynamicMemoryViewMapping</TD></TR></TABLE>>
+label = <<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0"> <TR><TD COLSPAN="2" PORT="f0">VkPipelineShader</TD></TR><TR><TD PORT="f1">VkShaderStage</TD><TD PORT="f2">stage=TC</TD></TR><TR><TD PORT="f3">VkShader</TD><TD PORT="f4">shader</TD></TR><TR><TD PORT="f5">VK_DESCRIPTOR_SET_MAPPING</TD><TD PORT="f6">descriptorSetMapping[2]</TD></TR><TR><TD PORT="f7">uint32_t</TD><TD PORT="f8">linkConstBufferCount</TD></TR><TR><TD PORT="f9">const VkLinkConstBuffer*</TD><TD PORT="f10">pLinkConstBufferInfo</TD></TR><TR><TD PORT="f11">VK_DYNAMIC_MEMORY_VIEW_SLOT_INFO</TD><TD PORT="f12">dynamicMemoryViewMapping</TD></TR></TABLE>>
];
"TE_VK_PIPELINE_SHADER" [
-label = <<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0"> <TR><TD COLSPAN="2" PORT="f0">VkPipelineShader</TD></TR><TR><TD PORT="f1">VkPipelineShaderStage</TD><TD PORT="f2">stage=TE</TD></TR><TR><TD PORT="f3">VkShader</TD><TD PORT="f4">shader</TD></TR><TR><TD PORT="f5">VK_DESCRIPTOR_SET_MAPPING</TD><TD PORT="f6">descriptorSetMapping[2]</TD></TR><TR><TD PORT="f7">uint32_t</TD><TD PORT="f8">linkConstBufferCount</TD></TR><TR><TD PORT="f9">const VkLinkConstBuffer*</TD><TD PORT="f10">pLinkConstBufferInfo</TD></TR><TR><TD PORT="f11">VK_DYNAMIC_MEMORY_VIEW_SLOT_INFO</TD><TD PORT="f12">dynamicMemoryViewMapping</TD></TR></TABLE>>
+label = <<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0"> <TR><TD COLSPAN="2" PORT="f0">VkPipelineShader</TD></TR><TR><TD PORT="f1">VkShaderStage</TD><TD PORT="f2">stage=TE</TD></TR><TR><TD PORT="f3">VkShader</TD><TD PORT="f4">shader</TD></TR><TR><TD PORT="f5">VK_DESCRIPTOR_SET_MAPPING</TD><TD PORT="f6">descriptorSetMapping[2]</TD></TR><TR><TD PORT="f7">uint32_t</TD><TD PORT="f8">linkConstBufferCount</TD></TR><TR><TD PORT="f9">const VkLinkConstBuffer*</TD><TD PORT="f10">pLinkConstBufferInfo</TD></TR><TR><TD PORT="f11">VK_DYNAMIC_MEMORY_VIEW_SLOT_INFO</TD><TD PORT="f12">dynamicMemoryViewMapping</TD></TR></TABLE>>
];
"GS_VK_PIPELINE_SHADER" [
-label = <<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0"> <TR><TD COLSPAN="2" PORT="f0">VkPipelineShader</TD></TR><TR><TD PORT="f1">VkPipelineShaderStage</TD><TD PORT="f2">stage=GS</TD></TR><TR><TD PORT="f3">VkShader</TD><TD PORT="f4">shader</TD></TR><TR><TD PORT="f5">VK_DESCRIPTOR_SET_MAPPING</TD><TD PORT="f6">descriptorSetMapping[2]</TD></TR><TR><TD PORT="f7">uint32_t</TD><TD PORT="f8">linkConstBufferCount</TD></TR><TR><TD PORT="f9">const VkLinkConstBuffer*</TD><TD PORT="f10">pLinkConstBufferInfo</TD></TR><TR><TD PORT="f11">VK_DYNAMIC_MEMORY_VIEW_SLOT_INFO</TD><TD PORT="f12">dynamicMemoryViewMapping</TD></TR></TABLE>>
+label = <<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0"> <TR><TD COLSPAN="2" PORT="f0">VkPipelineShader</TD></TR><TR><TD PORT="f1">VkShaderStage</TD><TD PORT="f2">stage=GS</TD></TR><TR><TD PORT="f3">VkShader</TD><TD PORT="f4">shader</TD></TR><TR><TD PORT="f5">VK_DESCRIPTOR_SET_MAPPING</TD><TD PORT="f6">descriptorSetMapping[2]</TD></TR><TR><TD PORT="f7">uint32_t</TD><TD PORT="f8">linkConstBufferCount</TD></TR><TR><TD PORT="f9">const VkLinkConstBuffer*</TD><TD PORT="f10">pLinkConstBufferInfo</TD></TR><TR><TD PORT="f11">VK_DYNAMIC_MEMORY_VIEW_SLOT_INFO</TD><TD PORT="f12">dynamicMemoryViewMapping</TD></TR></TABLE>>
];
"FS_VK_PIPELINE_SHADER" [
-label = <<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0"> <TR><TD COLSPAN="2" PORT="f0">VkPipelineShader</TD></TR><TR><TD PORT="f1">VkPipelineShaderStage</TD><TD PORT="f2">stage=FS</TD></TR><TR><TD PORT="f3">VkShader</TD><TD PORT="f4">shader</TD></TR><TR><TD PORT="f5">VK_DESCRIPTOR_SET_MAPPING</TD><TD PORT="f6">descriptorSetMapping[2]</TD></TR><TR><TD PORT="f7">uint32_t</TD><TD PORT="f8">linkConstBufferCount</TD></TR><TR><TD PORT="f9">const VkLinkConstBuffer*</TD><TD PORT="f10">pLinkConstBufferInfo</TD></TR><TR><TD PORT="f11">VK_DYNAMIC_MEMORY_VIEW_SLOT_INFO</TD><TD PORT="f12">dynamicMemoryViewMapping</TD></TR></TABLE>>
+label = <<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0"> <TR><TD COLSPAN="2" PORT="f0">VkPipelineShader</TD></TR><TR><TD PORT="f1">VkShaderStage</TD><TD PORT="f2">stage=FS</TD></TR><TR><TD PORT="f3">VkShader</TD><TD PORT="f4">shader</TD></TR><TR><TD PORT="f5">VK_DESCRIPTOR_SET_MAPPING</TD><TD PORT="f6">descriptorSetMapping[2]</TD></TR><TR><TD PORT="f7">uint32_t</TD><TD PORT="f8">linkConstBufferCount</TD></TR><TR><TD PORT="f9">const VkLinkConstBuffer*</TD><TD PORT="f10">pLinkConstBufferInfo</TD></TR><TR><TD PORT="f11">VK_DYNAMIC_MEMORY_VIEW_SLOT_INFO</TD><TD PORT="f12">dynamicMemoryViewMapping</TD></TR></TABLE>>
];
"VkGraphicsPipelineCreateInfo_":f2 -> "VkPipelineIaStateCreateInfo_" [
id = 0
common/ provides helper and utility functions, as well as all VK entry points
except vkInitAndEnumerateGpus. Hardware drivers are required to provide that
function, and to embed a "VkLayerDispatchTable *" as the first member of
-VkPhysicalGpu and all VkBaseObject.
+VkPhysicalDevice and all VkBaseObject.
Thread safety
return devices;
} else {
dev = icd_instance_alloc(instance, sizeof(*dev), 0,
- VK_SYSTEM_ALLOC_INTERNAL_TEMP);
+ VK_SYSTEM_ALLOC_TYPE_INTERNAL_TEMP);
if (!dev)
return devices;
static const struct icd_format_info {
size_t size;
uint32_t channel_count;
-} icd_format_table[VK_NUM_FMT] = {
- [VK_FMT_UNDEFINED] = { 0, 0 },
- [VK_FMT_R4G4_UNORM] = { 1, 2 },
- [VK_FMT_R4G4_USCALED] = { 1, 2 },
- [VK_FMT_R4G4B4A4_UNORM] = { 2, 4 },
- [VK_FMT_R4G4B4A4_USCALED] = { 2, 4 },
- [VK_FMT_R5G6B5_UNORM] = { 2, 3 },
- [VK_FMT_R5G6B5_USCALED] = { 2, 3 },
- [VK_FMT_R5G5B5A1_UNORM] = { 2, 4 },
- [VK_FMT_R5G5B5A1_USCALED] = { 2, 4 },
- [VK_FMT_R8_UNORM] = { 1, 1 },
- [VK_FMT_R8_SNORM] = { 1, 1 },
- [VK_FMT_R8_USCALED] = { 1, 1 },
- [VK_FMT_R8_SSCALED] = { 1, 1 },
- [VK_FMT_R8_UINT] = { 1, 1 },
- [VK_FMT_R8_SINT] = { 1, 1 },
- [VK_FMT_R8_SRGB] = { 1, 1 },
- [VK_FMT_R8G8_UNORM] = { 2, 2 },
- [VK_FMT_R8G8_SNORM] = { 2, 2 },
- [VK_FMT_R8G8_USCALED] = { 2, 2 },
- [VK_FMT_R8G8_SSCALED] = { 2, 2 },
- [VK_FMT_R8G8_UINT] = { 2, 2 },
- [VK_FMT_R8G8_SINT] = { 2, 2 },
- [VK_FMT_R8G8_SRGB] = { 2, 2 },
- [VK_FMT_R8G8B8_UNORM] = { 3, 3 },
- [VK_FMT_R8G8B8_SNORM] = { 3, 3 },
- [VK_FMT_R8G8B8_USCALED] = { 3, 3 },
- [VK_FMT_R8G8B8_SSCALED] = { 3, 3 },
- [VK_FMT_R8G8B8_UINT] = { 3, 3 },
- [VK_FMT_R8G8B8_SINT] = { 3, 3 },
- [VK_FMT_R8G8B8_SRGB] = { 3, 3 },
- [VK_FMT_R8G8B8A8_UNORM] = { 4, 4 },
- [VK_FMT_R8G8B8A8_SNORM] = { 4, 4 },
- [VK_FMT_R8G8B8A8_USCALED] = { 4, 4 },
- [VK_FMT_R8G8B8A8_SSCALED] = { 4, 4 },
- [VK_FMT_R8G8B8A8_UINT] = { 4, 4 },
- [VK_FMT_R8G8B8A8_SINT] = { 4, 4 },
- [VK_FMT_R8G8B8A8_SRGB] = { 4, 4 },
- [VK_FMT_R10G10B10A2_UNORM] = { 4, 4 },
- [VK_FMT_R10G10B10A2_SNORM] = { 4, 4 },
- [VK_FMT_R10G10B10A2_USCALED] = { 4, 4 },
- [VK_FMT_R10G10B10A2_SSCALED] = { 4, 4 },
- [VK_FMT_R10G10B10A2_UINT] = { 4, 4 },
- [VK_FMT_R10G10B10A2_SINT] = { 4, 4 },
- [VK_FMT_R16_UNORM] = { 2, 1 },
- [VK_FMT_R16_SNORM] = { 2, 1 },
- [VK_FMT_R16_USCALED] = { 2, 1 },
- [VK_FMT_R16_SSCALED] = { 2, 1 },
- [VK_FMT_R16_UINT] = { 2, 1 },
- [VK_FMT_R16_SINT] = { 2, 1 },
- [VK_FMT_R16_SFLOAT] = { 2, 1 },
- [VK_FMT_R16G16_UNORM] = { 4, 2 },
- [VK_FMT_R16G16_SNORM] = { 4, 2 },
- [VK_FMT_R16G16_USCALED] = { 4, 2 },
- [VK_FMT_R16G16_SSCALED] = { 4, 2 },
- [VK_FMT_R16G16_UINT] = { 4, 2 },
- [VK_FMT_R16G16_SINT] = { 4, 2 },
- [VK_FMT_R16G16_SFLOAT] = { 4, 2 },
- [VK_FMT_R16G16B16_UNORM] = { 6, 3 },
- [VK_FMT_R16G16B16_SNORM] = { 6, 3 },
- [VK_FMT_R16G16B16_USCALED] = { 6, 3 },
- [VK_FMT_R16G16B16_SSCALED] = { 6, 3 },
- [VK_FMT_R16G16B16_UINT] = { 6, 3 },
- [VK_FMT_R16G16B16_SINT] = { 6, 3 },
- [VK_FMT_R16G16B16_SFLOAT] = { 6, 3 },
- [VK_FMT_R16G16B16A16_UNORM] = { 8, 4 },
- [VK_FMT_R16G16B16A16_SNORM] = { 8, 4 },
- [VK_FMT_R16G16B16A16_USCALED] = { 8, 4 },
- [VK_FMT_R16G16B16A16_SSCALED] = { 8, 4 },
- [VK_FMT_R16G16B16A16_UINT] = { 8, 4 },
- [VK_FMT_R16G16B16A16_SINT] = { 8, 4 },
- [VK_FMT_R16G16B16A16_SFLOAT] = { 8, 4 },
- [VK_FMT_R32_UINT] = { 4, 1 },
- [VK_FMT_R32_SINT] = { 4, 1 },
- [VK_FMT_R32_SFLOAT] = { 4, 1 },
- [VK_FMT_R32G32_UINT] = { 8, 2 },
- [VK_FMT_R32G32_SINT] = { 8, 2 },
- [VK_FMT_R32G32_SFLOAT] = { 8, 2 },
- [VK_FMT_R32G32B32_UINT] = { 12, 3 },
- [VK_FMT_R32G32B32_SINT] = { 12, 3 },
- [VK_FMT_R32G32B32_SFLOAT] = { 12, 3 },
- [VK_FMT_R32G32B32A32_UINT] = { 16, 4 },
- [VK_FMT_R32G32B32A32_SINT] = { 16, 4 },
- [VK_FMT_R32G32B32A32_SFLOAT] = { 16, 4 },
- [VK_FMT_R64_SFLOAT] = { 8, 1 },
- [VK_FMT_R64G64_SFLOAT] = { 16, 2 },
- [VK_FMT_R64G64B64_SFLOAT] = { 24, 3 },
- [VK_FMT_R64G64B64A64_SFLOAT] = { 32, 4 },
- [VK_FMT_R11G11B10_UFLOAT] = { 4, 3 },
- [VK_FMT_R9G9B9E5_UFLOAT] = { 4, 3 },
- [VK_FMT_D16_UNORM] = { 2, 1 },
- [VK_FMT_D24_UNORM] = { 3, 1 },
- [VK_FMT_D32_SFLOAT] = { 4, 1 },
- [VK_FMT_S8_UINT] = { 1, 1 },
- [VK_FMT_D16_UNORM_S8_UINT] = { 3, 2 },
- [VK_FMT_D24_UNORM_S8_UINT] = { 4, 2 },
- [VK_FMT_D32_SFLOAT_S8_UINT] = { 4, 2 },
- [VK_FMT_BC1_RGB_UNORM] = { 8, 4 },
- [VK_FMT_BC1_RGB_SRGB] = { 8, 4 },
- [VK_FMT_BC1_RGBA_UNORM] = { 8, 4 },
- [VK_FMT_BC1_RGBA_SRGB] = { 8, 4 },
- [VK_FMT_BC2_UNORM] = { 16, 4 },
- [VK_FMT_BC2_SRGB] = { 16, 4 },
- [VK_FMT_BC3_UNORM] = { 16, 4 },
- [VK_FMT_BC3_SRGB] = { 16, 4 },
- [VK_FMT_BC4_UNORM] = { 8, 4 },
- [VK_FMT_BC4_SNORM] = { 8, 4 },
- [VK_FMT_BC5_UNORM] = { 16, 4 },
- [VK_FMT_BC5_SNORM] = { 16, 4 },
- [VK_FMT_BC6H_UFLOAT] = { 16, 4 },
- [VK_FMT_BC6H_SFLOAT] = { 16, 4 },
- [VK_FMT_BC7_UNORM] = { 16, 4 },
- [VK_FMT_BC7_SRGB] = { 16, 4 },
+} icd_format_table[VK_NUM_FORMAT] = {
+ [VK_FORMAT_UNDEFINED] = { 0, 0 },
+ [VK_FORMAT_R4G4_UNORM] = { 1, 2 },
+ [VK_FORMAT_R4G4_USCALED] = { 1, 2 },
+ [VK_FORMAT_R4G4B4A4_UNORM] = { 2, 4 },
+ [VK_FORMAT_R4G4B4A4_USCALED] = { 2, 4 },
+ [VK_FORMAT_R5G6B5_UNORM] = { 2, 3 },
+ [VK_FORMAT_R5G6B5_USCALED] = { 2, 3 },
+ [VK_FORMAT_R5G5B5A1_UNORM] = { 2, 4 },
+ [VK_FORMAT_R5G5B5A1_USCALED] = { 2, 4 },
+ [VK_FORMAT_R8_UNORM] = { 1, 1 },
+ [VK_FORMAT_R8_SNORM] = { 1, 1 },
+ [VK_FORMAT_R8_USCALED] = { 1, 1 },
+ [VK_FORMAT_R8_SSCALED] = { 1, 1 },
+ [VK_FORMAT_R8_UINT] = { 1, 1 },
+ [VK_FORMAT_R8_SINT] = { 1, 1 },
+ [VK_FORMAT_R8_SRGB] = { 1, 1 },
+ [VK_FORMAT_R8G8_UNORM] = { 2, 2 },
+ [VK_FORMAT_R8G8_SNORM] = { 2, 2 },
+ [VK_FORMAT_R8G8_USCALED] = { 2, 2 },
+ [VK_FORMAT_R8G8_SSCALED] = { 2, 2 },
+ [VK_FORMAT_R8G8_UINT] = { 2, 2 },
+ [VK_FORMAT_R8G8_SINT] = { 2, 2 },
+ [VK_FORMAT_R8G8_SRGB] = { 2, 2 },
+ [VK_FORMAT_R8G8B8_UNORM] = { 3, 3 },
+ [VK_FORMAT_R8G8B8_SNORM] = { 3, 3 },
+ [VK_FORMAT_R8G8B8_USCALED] = { 3, 3 },
+ [VK_FORMAT_R8G8B8_SSCALED] = { 3, 3 },
+ [VK_FORMAT_R8G8B8_UINT] = { 3, 3 },
+ [VK_FORMAT_R8G8B8_SINT] = { 3, 3 },
+ [VK_FORMAT_R8G8B8_SRGB] = { 3, 3 },
+ [VK_FORMAT_R8G8B8A8_UNORM] = { 4, 4 },
+ [VK_FORMAT_R8G8B8A8_SNORM] = { 4, 4 },
+ [VK_FORMAT_R8G8B8A8_USCALED] = { 4, 4 },
+ [VK_FORMAT_R8G8B8A8_SSCALED] = { 4, 4 },
+ [VK_FORMAT_R8G8B8A8_UINT] = { 4, 4 },
+ [VK_FORMAT_R8G8B8A8_SINT] = { 4, 4 },
+ [VK_FORMAT_R8G8B8A8_SRGB] = { 4, 4 },
+ [VK_FORMAT_R10G10B10A2_UNORM] = { 4, 4 },
+ [VK_FORMAT_R10G10B10A2_SNORM] = { 4, 4 },
+ [VK_FORMAT_R10G10B10A2_USCALED] = { 4, 4 },
+ [VK_FORMAT_R10G10B10A2_SSCALED] = { 4, 4 },
+ [VK_FORMAT_R10G10B10A2_UINT] = { 4, 4 },
+ [VK_FORMAT_R10G10B10A2_SINT] = { 4, 4 },
+ [VK_FORMAT_R16_UNORM] = { 2, 1 },
+ [VK_FORMAT_R16_SNORM] = { 2, 1 },
+ [VK_FORMAT_R16_USCALED] = { 2, 1 },
+ [VK_FORMAT_R16_SSCALED] = { 2, 1 },
+ [VK_FORMAT_R16_UINT] = { 2, 1 },
+ [VK_FORMAT_R16_SINT] = { 2, 1 },
+ [VK_FORMAT_R16_SFLOAT] = { 2, 1 },
+ [VK_FORMAT_R16G16_UNORM] = { 4, 2 },
+ [VK_FORMAT_R16G16_SNORM] = { 4, 2 },
+ [VK_FORMAT_R16G16_USCALED] = { 4, 2 },
+ [VK_FORMAT_R16G16_SSCALED] = { 4, 2 },
+ [VK_FORMAT_R16G16_UINT] = { 4, 2 },
+ [VK_FORMAT_R16G16_SINT] = { 4, 2 },
+ [VK_FORMAT_R16G16_SFLOAT] = { 4, 2 },
+ [VK_FORMAT_R16G16B16_UNORM] = { 6, 3 },
+ [VK_FORMAT_R16G16B16_SNORM] = { 6, 3 },
+ [VK_FORMAT_R16G16B16_USCALED] = { 6, 3 },
+ [VK_FORMAT_R16G16B16_SSCALED] = { 6, 3 },
+ [VK_FORMAT_R16G16B16_UINT] = { 6, 3 },
+ [VK_FORMAT_R16G16B16_SINT] = { 6, 3 },
+ [VK_FORMAT_R16G16B16_SFLOAT] = { 6, 3 },
+ [VK_FORMAT_R16G16B16A16_UNORM] = { 8, 4 },
+ [VK_FORMAT_R16G16B16A16_SNORM] = { 8, 4 },
+ [VK_FORMAT_R16G16B16A16_USCALED] = { 8, 4 },
+ [VK_FORMAT_R16G16B16A16_SSCALED] = { 8, 4 },
+ [VK_FORMAT_R16G16B16A16_UINT] = { 8, 4 },
+ [VK_FORMAT_R16G16B16A16_SINT] = { 8, 4 },
+ [VK_FORMAT_R16G16B16A16_SFLOAT] = { 8, 4 },
+ [VK_FORMAT_R32_UINT] = { 4, 1 },
+ [VK_FORMAT_R32_SINT] = { 4, 1 },
+ [VK_FORMAT_R32_SFLOAT] = { 4, 1 },
+ [VK_FORMAT_R32G32_UINT] = { 8, 2 },
+ [VK_FORMAT_R32G32_SINT] = { 8, 2 },
+ [VK_FORMAT_R32G32_SFLOAT] = { 8, 2 },
+ [VK_FORMAT_R32G32B32_UINT] = { 12, 3 },
+ [VK_FORMAT_R32G32B32_SINT] = { 12, 3 },
+ [VK_FORMAT_R32G32B32_SFLOAT] = { 12, 3 },
+ [VK_FORMAT_R32G32B32A32_UINT] = { 16, 4 },
+ [VK_FORMAT_R32G32B32A32_SINT] = { 16, 4 },
+ [VK_FORMAT_R32G32B32A32_SFLOAT] = { 16, 4 },
+ [VK_FORMAT_R64_SFLOAT] = { 8, 1 },
+ [VK_FORMAT_R64G64_SFLOAT] = { 16, 2 },
+ [VK_FORMAT_R64G64B64_SFLOAT] = { 24, 3 },
+ [VK_FORMAT_R64G64B64A64_SFLOAT] = { 32, 4 },
+ [VK_FORMAT_R11G11B10_UFLOAT] = { 4, 3 },
+ [VK_FORMAT_R9G9B9E5_UFLOAT] = { 4, 3 },
+ [VK_FORMAT_D16_UNORM] = { 2, 1 },
+ [VK_FORMAT_D24_UNORM] = { 3, 1 },
+ [VK_FORMAT_D32_SFLOAT] = { 4, 1 },
+ [VK_FORMAT_S8_UINT] = { 1, 1 },
+ [VK_FORMAT_D16_UNORM_S8_UINT] = { 3, 2 },
+ [VK_FORMAT_D24_UNORM_S8_UINT] = { 4, 2 },
+ [VK_FORMAT_D32_SFLOAT_S8_UINT] = { 4, 2 },
+ [VK_FORMAT_BC1_RGB_UNORM] = { 8, 4 },
+ [VK_FORMAT_BC1_RGB_SRGB] = { 8, 4 },
+ [VK_FORMAT_BC1_RGBA_UNORM] = { 8, 4 },
+ [VK_FORMAT_BC1_RGBA_SRGB] = { 8, 4 },
+ [VK_FORMAT_BC2_UNORM] = { 16, 4 },
+ [VK_FORMAT_BC2_SRGB] = { 16, 4 },
+ [VK_FORMAT_BC3_UNORM] = { 16, 4 },
+ [VK_FORMAT_BC3_SRGB] = { 16, 4 },
+ [VK_FORMAT_BC4_UNORM] = { 8, 4 },
+ [VK_FORMAT_BC4_SNORM] = { 8, 4 },
+ [VK_FORMAT_BC5_UNORM] = { 16, 4 },
+ [VK_FORMAT_BC5_SNORM] = { 16, 4 },
+ [VK_FORMAT_BC6H_UFLOAT] = { 16, 4 },
+ [VK_FORMAT_BC6H_SFLOAT] = { 16, 4 },
+ [VK_FORMAT_BC7_UNORM] = { 16, 4 },
+ [VK_FORMAT_BC7_SRGB] = { 16, 4 },
/* TODO: Initialize remaining compressed formats. */
- [VK_FMT_ETC2_R8G8B8_UNORM] = { 0, 0 },
- [VK_FMT_ETC2_R8G8B8A1_UNORM] = { 0, 0 },
- [VK_FMT_ETC2_R8G8B8A8_UNORM] = { 0, 0 },
- [VK_FMT_EAC_R11_UNORM] = { 0, 0 },
- [VK_FMT_EAC_R11_SNORM] = { 0, 0 },
- [VK_FMT_EAC_R11G11_UNORM] = { 0, 0 },
- [VK_FMT_EAC_R11G11_SNORM] = { 0, 0 },
- [VK_FMT_ASTC_4x4_UNORM] = { 0, 0 },
- [VK_FMT_ASTC_4x4_SRGB] = { 0, 0 },
- [VK_FMT_ASTC_5x4_UNORM] = { 0, 0 },
- [VK_FMT_ASTC_5x4_SRGB] = { 0, 0 },
- [VK_FMT_ASTC_5x5_UNORM] = { 0, 0 },
- [VK_FMT_ASTC_5x5_SRGB] = { 0, 0 },
- [VK_FMT_ASTC_6x5_UNORM] = { 0, 0 },
- [VK_FMT_ASTC_6x5_SRGB] = { 0, 0 },
- [VK_FMT_ASTC_6x6_UNORM] = { 0, 0 },
- [VK_FMT_ASTC_6x6_SRGB] = { 0, 0 },
- [VK_FMT_ASTC_8x5_UNORM] = { 0, 0 },
- [VK_FMT_ASTC_8x5_SRGB] = { 0, 0 },
- [VK_FMT_ASTC_8x6_UNORM] = { 0, 0 },
- [VK_FMT_ASTC_8x6_SRGB] = { 0, 0 },
- [VK_FMT_ASTC_8x8_UNORM] = { 0, 0 },
- [VK_FMT_ASTC_8x8_SRGB] = { 0, 0 },
- [VK_FMT_ASTC_10x5_UNORM] = { 0, 0 },
- [VK_FMT_ASTC_10x5_SRGB] = { 0, 0 },
- [VK_FMT_ASTC_10x6_UNORM] = { 0, 0 },
- [VK_FMT_ASTC_10x6_SRGB] = { 0, 0 },
- [VK_FMT_ASTC_10x8_UNORM] = { 0, 0 },
- [VK_FMT_ASTC_10x8_SRGB] = { 0, 0 },
- [VK_FMT_ASTC_10x10_UNORM] = { 0, 0 },
- [VK_FMT_ASTC_10x10_SRGB] = { 0, 0 },
- [VK_FMT_ASTC_12x10_UNORM] = { 0, 0 },
- [VK_FMT_ASTC_12x10_SRGB] = { 0, 0 },
- [VK_FMT_ASTC_12x12_UNORM] = { 0, 0 },
- [VK_FMT_ASTC_12x12_SRGB] = { 0, 0 },
- [VK_FMT_B5G6R5_UNORM] = { 2, 3 },
- [VK_FMT_B5G6R5_USCALED] = { 2, 3 },
- [VK_FMT_B8G8R8_UNORM] = { 3, 3 },
- [VK_FMT_B8G8R8_SNORM] = { 3, 3 },
- [VK_FMT_B8G8R8_USCALED] = { 3, 3 },
- [VK_FMT_B8G8R8_SSCALED] = { 3, 3 },
- [VK_FMT_B8G8R8_UINT] = { 3, 3 },
- [VK_FMT_B8G8R8_SINT] = { 3, 3 },
- [VK_FMT_B8G8R8_SRGB] = { 3, 3 },
- [VK_FMT_B8G8R8A8_UNORM] = { 4, 4 },
- [VK_FMT_B8G8R8A8_SNORM] = { 4, 4 },
- [VK_FMT_B8G8R8A8_USCALED] = { 4, 4 },
- [VK_FMT_B8G8R8A8_SSCALED] = { 4, 4 },
- [VK_FMT_B8G8R8A8_UINT] = { 4, 4 },
- [VK_FMT_B8G8R8A8_SINT] = { 4, 4 },
- [VK_FMT_B8G8R8A8_SRGB] = { 4, 4 },
- [VK_FMT_B10G10R10A2_UNORM] = { 4, 4 },
- [VK_FMT_B10G10R10A2_SNORM] = { 4, 4 },
- [VK_FMT_B10G10R10A2_USCALED] = { 4, 4 },
- [VK_FMT_B10G10R10A2_SSCALED] = { 4, 4 },
- [VK_FMT_B10G10R10A2_UINT] = { 4, 4 },
- [VK_FMT_B10G10R10A2_SINT] = { 4, 4 },
+ [VK_FORMAT_ETC2_R8G8B8_UNORM] = { 0, 0 },
+ [VK_FORMAT_ETC2_R8G8B8A1_UNORM] = { 0, 0 },
+ [VK_FORMAT_ETC2_R8G8B8A8_UNORM] = { 0, 0 },
+ [VK_FORMAT_EAC_R11_UNORM] = { 0, 0 },
+ [VK_FORMAT_EAC_R11_SNORM] = { 0, 0 },
+ [VK_FORMAT_EAC_R11G11_UNORM] = { 0, 0 },
+ [VK_FORMAT_EAC_R11G11_SNORM] = { 0, 0 },
+ [VK_FORMAT_ASTC_4x4_UNORM] = { 0, 0 },
+ [VK_FORMAT_ASTC_4x4_SRGB] = { 0, 0 },
+ [VK_FORMAT_ASTC_5x4_UNORM] = { 0, 0 },
+ [VK_FORMAT_ASTC_5x4_SRGB] = { 0, 0 },
+ [VK_FORMAT_ASTC_5x5_UNORM] = { 0, 0 },
+ [VK_FORMAT_ASTC_5x5_SRGB] = { 0, 0 },
+ [VK_FORMAT_ASTC_6x5_UNORM] = { 0, 0 },
+ [VK_FORMAT_ASTC_6x5_SRGB] = { 0, 0 },
+ [VK_FORMAT_ASTC_6x6_UNORM] = { 0, 0 },
+ [VK_FORMAT_ASTC_6x6_SRGB] = { 0, 0 },
+ [VK_FORMAT_ASTC_8x5_UNORM] = { 0, 0 },
+ [VK_FORMAT_ASTC_8x5_SRGB] = { 0, 0 },
+ [VK_FORMAT_ASTC_8x6_UNORM] = { 0, 0 },
+ [VK_FORMAT_ASTC_8x6_SRGB] = { 0, 0 },
+ [VK_FORMAT_ASTC_8x8_UNORM] = { 0, 0 },
+ [VK_FORMAT_ASTC_8x8_SRGB] = { 0, 0 },
+ [VK_FORMAT_ASTC_10x5_UNORM] = { 0, 0 },
+ [VK_FORMAT_ASTC_10x5_SRGB] = { 0, 0 },
+ [VK_FORMAT_ASTC_10x6_UNORM] = { 0, 0 },
+ [VK_FORMAT_ASTC_10x6_SRGB] = { 0, 0 },
+ [VK_FORMAT_ASTC_10x8_UNORM] = { 0, 0 },
+ [VK_FORMAT_ASTC_10x8_SRGB] = { 0, 0 },
+ [VK_FORMAT_ASTC_10x10_UNORM] = { 0, 0 },
+ [VK_FORMAT_ASTC_10x10_SRGB] = { 0, 0 },
+ [VK_FORMAT_ASTC_12x10_UNORM] = { 0, 0 },
+ [VK_FORMAT_ASTC_12x10_SRGB] = { 0, 0 },
+ [VK_FORMAT_ASTC_12x12_UNORM] = { 0, 0 },
+ [VK_FORMAT_ASTC_12x12_SRGB] = { 0, 0 },
+ [VK_FORMAT_B5G6R5_UNORM] = { 2, 3 },
+ [VK_FORMAT_B5G6R5_USCALED] = { 2, 3 },
+ [VK_FORMAT_B8G8R8_UNORM] = { 3, 3 },
+ [VK_FORMAT_B8G8R8_SNORM] = { 3, 3 },
+ [VK_FORMAT_B8G8R8_USCALED] = { 3, 3 },
+ [VK_FORMAT_B8G8R8_SSCALED] = { 3, 3 },
+ [VK_FORMAT_B8G8R8_UINT] = { 3, 3 },
+ [VK_FORMAT_B8G8R8_SINT] = { 3, 3 },
+ [VK_FORMAT_B8G8R8_SRGB] = { 3, 3 },
+ [VK_FORMAT_B8G8R8A8_UNORM] = { 4, 4 },
+ [VK_FORMAT_B8G8R8A8_SNORM] = { 4, 4 },
+ [VK_FORMAT_B8G8R8A8_USCALED] = { 4, 4 },
+ [VK_FORMAT_B8G8R8A8_SSCALED] = { 4, 4 },
+ [VK_FORMAT_B8G8R8A8_UINT] = { 4, 4 },
+ [VK_FORMAT_B8G8R8A8_SINT] = { 4, 4 },
+ [VK_FORMAT_B8G8R8A8_SRGB] = { 4, 4 },
+ [VK_FORMAT_B10G10R10A2_UNORM] = { 4, 4 },
+ [VK_FORMAT_B10G10R10A2_SNORM] = { 4, 4 },
+ [VK_FORMAT_B10G10R10A2_USCALED] = { 4, 4 },
+ [VK_FORMAT_B10G10R10A2_SSCALED] = { 4, 4 },
+ [VK_FORMAT_B10G10R10A2_UINT] = { 4, 4 },
+ [VK_FORMAT_B10G10R10A2_SINT] = { 4, 4 },
};
bool icd_format_is_ds(VkFormat format)
bool is_ds = false;
switch (format) {
- case VK_FMT_D16_UNORM:
- case VK_FMT_D24_UNORM:
- case VK_FMT_D32_SFLOAT:
- case VK_FMT_S8_UINT:
- case VK_FMT_D16_UNORM_S8_UINT:
- case VK_FMT_D24_UNORM_S8_UINT:
- case VK_FMT_D32_SFLOAT_S8_UINT:
+ case VK_FORMAT_D16_UNORM:
+ case VK_FORMAT_D24_UNORM:
+ case VK_FORMAT_D32_SFLOAT:
+ case VK_FORMAT_S8_UINT:
+ case VK_FORMAT_D16_UNORM_S8_UINT:
+ case VK_FORMAT_D24_UNORM_S8_UINT:
+ case VK_FORMAT_D32_SFLOAT_S8_UINT:
is_ds = true;
break;
default:
bool is_norm = false;
switch (format) {
- case VK_FMT_R4G4_UNORM:
- case VK_FMT_R4G4B4A4_UNORM:
- case VK_FMT_R5G6B5_UNORM:
- case VK_FMT_R5G5B5A1_UNORM:
- case VK_FMT_R8_UNORM:
- case VK_FMT_R8_SNORM:
- case VK_FMT_R8G8_UNORM:
- case VK_FMT_R8G8_SNORM:
- case VK_FMT_R8G8B8_UNORM:
- case VK_FMT_R8G8B8_SNORM:
- case VK_FMT_R8G8B8A8_UNORM:
- case VK_FMT_R8G8B8A8_SNORM:
- case VK_FMT_R10G10B10A2_UNORM:
- case VK_FMT_R10G10B10A2_SNORM:
- case VK_FMT_R16_UNORM:
- case VK_FMT_R16_SNORM:
- case VK_FMT_R16G16_UNORM:
- case VK_FMT_R16G16_SNORM:
- case VK_FMT_R16G16B16_UNORM:
- case VK_FMT_R16G16B16_SNORM:
- case VK_FMT_R16G16B16A16_UNORM:
- case VK_FMT_R16G16B16A16_SNORM:
- case VK_FMT_BC1_RGB_UNORM:
- case VK_FMT_BC2_UNORM:
- case VK_FMT_BC3_UNORM:
- case VK_FMT_BC4_UNORM:
- case VK_FMT_BC4_SNORM:
- case VK_FMT_BC5_UNORM:
- case VK_FMT_BC5_SNORM:
- case VK_FMT_BC7_UNORM:
- case VK_FMT_ETC2_R8G8B8_UNORM:
- case VK_FMT_ETC2_R8G8B8A1_UNORM:
- case VK_FMT_ETC2_R8G8B8A8_UNORM:
- case VK_FMT_EAC_R11_UNORM:
- case VK_FMT_EAC_R11_SNORM:
- case VK_FMT_EAC_R11G11_UNORM:
- case VK_FMT_EAC_R11G11_SNORM:
- case VK_FMT_ASTC_4x4_UNORM:
- case VK_FMT_ASTC_5x4_UNORM:
- case VK_FMT_ASTC_5x5_UNORM:
- case VK_FMT_ASTC_6x5_UNORM:
- case VK_FMT_ASTC_6x6_UNORM:
- case VK_FMT_ASTC_8x5_UNORM:
- case VK_FMT_ASTC_8x6_UNORM:
- case VK_FMT_ASTC_8x8_UNORM:
- case VK_FMT_ASTC_10x5_UNORM:
- case VK_FMT_ASTC_10x6_UNORM:
- case VK_FMT_ASTC_10x8_UNORM:
- case VK_FMT_ASTC_10x10_UNORM:
- case VK_FMT_ASTC_12x10_UNORM:
- case VK_FMT_ASTC_12x12_UNORM:
- case VK_FMT_B5G6R5_UNORM:
- case VK_FMT_B8G8R8_UNORM:
- case VK_FMT_B8G8R8_SNORM:
- case VK_FMT_B8G8R8A8_UNORM:
- case VK_FMT_B8G8R8A8_SNORM:
- case VK_FMT_B10G10R10A2_UNORM:
- case VK_FMT_B10G10R10A2_SNORM:
+ case VK_FORMAT_R4G4_UNORM:
+ case VK_FORMAT_R4G4B4A4_UNORM:
+ case VK_FORMAT_R5G6B5_UNORM:
+ case VK_FORMAT_R5G5B5A1_UNORM:
+ case VK_FORMAT_R8_UNORM:
+ case VK_FORMAT_R8_SNORM:
+ case VK_FORMAT_R8G8_UNORM:
+ case VK_FORMAT_R8G8_SNORM:
+ case VK_FORMAT_R8G8B8_UNORM:
+ case VK_FORMAT_R8G8B8_SNORM:
+ case VK_FORMAT_R8G8B8A8_UNORM:
+ case VK_FORMAT_R8G8B8A8_SNORM:
+ case VK_FORMAT_R10G10B10A2_UNORM:
+ case VK_FORMAT_R10G10B10A2_SNORM:
+ case VK_FORMAT_R16_UNORM:
+ case VK_FORMAT_R16_SNORM:
+ case VK_FORMAT_R16G16_UNORM:
+ case VK_FORMAT_R16G16_SNORM:
+ case VK_FORMAT_R16G16B16_UNORM:
+ case VK_FORMAT_R16G16B16_SNORM:
+ case VK_FORMAT_R16G16B16A16_UNORM:
+ case VK_FORMAT_R16G16B16A16_SNORM:
+ case VK_FORMAT_BC1_RGB_UNORM:
+ case VK_FORMAT_BC2_UNORM:
+ case VK_FORMAT_BC3_UNORM:
+ case VK_FORMAT_BC4_UNORM:
+ case VK_FORMAT_BC4_SNORM:
+ case VK_FORMAT_BC5_UNORM:
+ case VK_FORMAT_BC5_SNORM:
+ case VK_FORMAT_BC7_UNORM:
+ case VK_FORMAT_ETC2_R8G8B8_UNORM:
+ case VK_FORMAT_ETC2_R8G8B8A1_UNORM:
+ case VK_FORMAT_ETC2_R8G8B8A8_UNORM:
+ case VK_FORMAT_EAC_R11_UNORM:
+ case VK_FORMAT_EAC_R11_SNORM:
+ case VK_FORMAT_EAC_R11G11_UNORM:
+ case VK_FORMAT_EAC_R11G11_SNORM:
+ case VK_FORMAT_ASTC_4x4_UNORM:
+ case VK_FORMAT_ASTC_5x4_UNORM:
+ case VK_FORMAT_ASTC_5x5_UNORM:
+ case VK_FORMAT_ASTC_6x5_UNORM:
+ case VK_FORMAT_ASTC_6x6_UNORM:
+ case VK_FORMAT_ASTC_8x5_UNORM:
+ case VK_FORMAT_ASTC_8x6_UNORM:
+ case VK_FORMAT_ASTC_8x8_UNORM:
+ case VK_FORMAT_ASTC_10x5_UNORM:
+ case VK_FORMAT_ASTC_10x6_UNORM:
+ case VK_FORMAT_ASTC_10x8_UNORM:
+ case VK_FORMAT_ASTC_10x10_UNORM:
+ case VK_FORMAT_ASTC_12x10_UNORM:
+ case VK_FORMAT_ASTC_12x12_UNORM:
+ case VK_FORMAT_B5G6R5_UNORM:
+ case VK_FORMAT_B8G8R8_UNORM:
+ case VK_FORMAT_B8G8R8_SNORM:
+ case VK_FORMAT_B8G8R8A8_UNORM:
+ case VK_FORMAT_B8G8R8A8_SNORM:
+ case VK_FORMAT_B10G10R10A2_UNORM:
+ case VK_FORMAT_B10G10R10A2_SNORM:
is_norm = true;
break;
default:
bool is_int = false;
switch (format) {
- case VK_FMT_R8_UINT:
- case VK_FMT_R8_SINT:
- case VK_FMT_R8G8_UINT:
- case VK_FMT_R8G8_SINT:
- case VK_FMT_R8G8B8_UINT:
- case VK_FMT_R8G8B8_SINT:
- case VK_FMT_R8G8B8A8_UINT:
- case VK_FMT_R8G8B8A8_SINT:
- case VK_FMT_R10G10B10A2_UINT:
- case VK_FMT_R10G10B10A2_SINT:
- case VK_FMT_R16_UINT:
- case VK_FMT_R16_SINT:
- case VK_FMT_R16G16_UINT:
- case VK_FMT_R16G16_SINT:
- case VK_FMT_R16G16B16_UINT:
- case VK_FMT_R16G16B16_SINT:
- case VK_FMT_R16G16B16A16_UINT:
- case VK_FMT_R16G16B16A16_SINT:
- case VK_FMT_R32_UINT:
- case VK_FMT_R32_SINT:
- case VK_FMT_R32G32_UINT:
- case VK_FMT_R32G32_SINT:
- case VK_FMT_R32G32B32_UINT:
- case VK_FMT_R32G32B32_SINT:
- case VK_FMT_R32G32B32A32_UINT:
- case VK_FMT_R32G32B32A32_SINT:
- case VK_FMT_B8G8R8_UINT:
- case VK_FMT_B8G8R8_SINT:
- case VK_FMT_B8G8R8A8_UINT:
- case VK_FMT_B8G8R8A8_SINT:
- case VK_FMT_B10G10R10A2_UINT:
- case VK_FMT_B10G10R10A2_SINT:
+ case VK_FORMAT_R8_UINT:
+ case VK_FORMAT_R8_SINT:
+ case VK_FORMAT_R8G8_UINT:
+ case VK_FORMAT_R8G8_SINT:
+ case VK_FORMAT_R8G8B8_UINT:
+ case VK_FORMAT_R8G8B8_SINT:
+ case VK_FORMAT_R8G8B8A8_UINT:
+ case VK_FORMAT_R8G8B8A8_SINT:
+ case VK_FORMAT_R10G10B10A2_UINT:
+ case VK_FORMAT_R10G10B10A2_SINT:
+ case VK_FORMAT_R16_UINT:
+ case VK_FORMAT_R16_SINT:
+ case VK_FORMAT_R16G16_UINT:
+ case VK_FORMAT_R16G16_SINT:
+ case VK_FORMAT_R16G16B16_UINT:
+ case VK_FORMAT_R16G16B16_SINT:
+ case VK_FORMAT_R16G16B16A16_UINT:
+ case VK_FORMAT_R16G16B16A16_SINT:
+ case VK_FORMAT_R32_UINT:
+ case VK_FORMAT_R32_SINT:
+ case VK_FORMAT_R32G32_UINT:
+ case VK_FORMAT_R32G32_SINT:
+ case VK_FORMAT_R32G32B32_UINT:
+ case VK_FORMAT_R32G32B32_SINT:
+ case VK_FORMAT_R32G32B32A32_UINT:
+ case VK_FORMAT_R32G32B32A32_SINT:
+ case VK_FORMAT_B8G8R8_UINT:
+ case VK_FORMAT_B8G8R8_SINT:
+ case VK_FORMAT_B8G8R8A8_UINT:
+ case VK_FORMAT_B8G8R8A8_SINT:
+ case VK_FORMAT_B10G10R10A2_UINT:
+ case VK_FORMAT_B10G10R10A2_SINT:
is_int = true;
break;
default:
bool is_float = false;
switch (format) {
- case VK_FMT_R16_SFLOAT:
- case VK_FMT_R16G16_SFLOAT:
- case VK_FMT_R16G16B16_SFLOAT:
- case VK_FMT_R16G16B16A16_SFLOAT:
- case VK_FMT_R32_SFLOAT:
- case VK_FMT_R32G32_SFLOAT:
- case VK_FMT_R32G32B32_SFLOAT:
- case VK_FMT_R32G32B32A32_SFLOAT:
- case VK_FMT_R64_SFLOAT:
- case VK_FMT_R64G64_SFLOAT:
- case VK_FMT_R64G64B64_SFLOAT:
- case VK_FMT_R64G64B64A64_SFLOAT:
- case VK_FMT_R11G11B10_UFLOAT:
- case VK_FMT_R9G9B9E5_UFLOAT:
- case VK_FMT_BC6H_UFLOAT:
- case VK_FMT_BC6H_SFLOAT:
+ case VK_FORMAT_R16_SFLOAT:
+ case VK_FORMAT_R16G16_SFLOAT:
+ case VK_FORMAT_R16G16B16_SFLOAT:
+ case VK_FORMAT_R16G16B16A16_SFLOAT:
+ case VK_FORMAT_R32_SFLOAT:
+ case VK_FORMAT_R32G32_SFLOAT:
+ case VK_FORMAT_R32G32B32_SFLOAT:
+ case VK_FORMAT_R32G32B32A32_SFLOAT:
+ case VK_FORMAT_R64_SFLOAT:
+ case VK_FORMAT_R64G64_SFLOAT:
+ case VK_FORMAT_R64G64B64_SFLOAT:
+ case VK_FORMAT_R64G64B64A64_SFLOAT:
+ case VK_FORMAT_R11G11B10_UFLOAT:
+ case VK_FORMAT_R9G9B9E5_UFLOAT:
+ case VK_FORMAT_BC6H_UFLOAT:
+ case VK_FORMAT_BC6H_SFLOAT:
is_float = true;
break;
default:
bool is_srgb = false;
switch (format) {
- case VK_FMT_R8_SRGB:
- case VK_FMT_R8G8_SRGB:
- case VK_FMT_R8G8B8_SRGB:
- case VK_FMT_R8G8B8A8_SRGB:
- case VK_FMT_BC1_RGB_SRGB:
- case VK_FMT_BC2_SRGB:
- case VK_FMT_BC3_SRGB:
- case VK_FMT_BC7_SRGB:
- case VK_FMT_ASTC_4x4_SRGB:
- case VK_FMT_ASTC_5x4_SRGB:
- case VK_FMT_ASTC_5x5_SRGB:
- case VK_FMT_ASTC_6x5_SRGB:
- case VK_FMT_ASTC_6x6_SRGB:
- case VK_FMT_ASTC_8x5_SRGB:
- case VK_FMT_ASTC_8x6_SRGB:
- case VK_FMT_ASTC_8x8_SRGB:
- case VK_FMT_ASTC_10x5_SRGB:
- case VK_FMT_ASTC_10x6_SRGB:
- case VK_FMT_ASTC_10x8_SRGB:
- case VK_FMT_ASTC_10x10_SRGB:
- case VK_FMT_ASTC_12x10_SRGB:
- case VK_FMT_ASTC_12x12_SRGB:
- case VK_FMT_B8G8R8_SRGB:
- case VK_FMT_B8G8R8A8_SRGB:
+ case VK_FORMAT_R8_SRGB:
+ case VK_FORMAT_R8G8_SRGB:
+ case VK_FORMAT_R8G8B8_SRGB:
+ case VK_FORMAT_R8G8B8A8_SRGB:
+ case VK_FORMAT_BC1_RGB_SRGB:
+ case VK_FORMAT_BC2_SRGB:
+ case VK_FORMAT_BC3_SRGB:
+ case VK_FORMAT_BC7_SRGB:
+ case VK_FORMAT_ASTC_4x4_SRGB:
+ case VK_FORMAT_ASTC_5x4_SRGB:
+ case VK_FORMAT_ASTC_5x5_SRGB:
+ case VK_FORMAT_ASTC_6x5_SRGB:
+ case VK_FORMAT_ASTC_6x6_SRGB:
+ case VK_FORMAT_ASTC_8x5_SRGB:
+ case VK_FORMAT_ASTC_8x6_SRGB:
+ case VK_FORMAT_ASTC_8x8_SRGB:
+ case VK_FORMAT_ASTC_10x5_SRGB:
+ case VK_FORMAT_ASTC_10x6_SRGB:
+ case VK_FORMAT_ASTC_10x8_SRGB:
+ case VK_FORMAT_ASTC_10x10_SRGB:
+ case VK_FORMAT_ASTC_12x10_SRGB:
+ case VK_FORMAT_ASTC_12x12_SRGB:
+ case VK_FORMAT_B8G8R8_SRGB:
+ case VK_FORMAT_B8G8R8A8_SRGB:
is_srgb = true;
break;
default:
bool icd_format_is_compressed(VkFormat format)
{
switch (format) {
- case VK_FMT_BC1_RGB_UNORM:
- case VK_FMT_BC1_RGB_SRGB:
- case VK_FMT_BC2_UNORM:
- case VK_FMT_BC2_SRGB:
- case VK_FMT_BC3_UNORM:
- case VK_FMT_BC3_SRGB:
- case VK_FMT_BC4_UNORM:
- case VK_FMT_BC4_SNORM:
- case VK_FMT_BC5_UNORM:
- case VK_FMT_BC5_SNORM:
- case VK_FMT_BC6H_UFLOAT:
- case VK_FMT_BC6H_SFLOAT:
- case VK_FMT_BC7_UNORM:
- case VK_FMT_BC7_SRGB:
- case VK_FMT_ETC2_R8G8B8_UNORM:
- case VK_FMT_ETC2_R8G8B8A1_UNORM:
- case VK_FMT_ETC2_R8G8B8A8_UNORM:
- case VK_FMT_EAC_R11_UNORM:
- case VK_FMT_EAC_R11_SNORM:
- case VK_FMT_EAC_R11G11_UNORM:
- case VK_FMT_EAC_R11G11_SNORM:
- case VK_FMT_ASTC_4x4_UNORM:
- case VK_FMT_ASTC_4x4_SRGB:
- case VK_FMT_ASTC_5x4_UNORM:
- case VK_FMT_ASTC_5x4_SRGB:
- case VK_FMT_ASTC_5x5_UNORM:
- case VK_FMT_ASTC_5x5_SRGB:
- case VK_FMT_ASTC_6x5_UNORM:
- case VK_FMT_ASTC_6x5_SRGB:
- case VK_FMT_ASTC_6x6_UNORM:
- case VK_FMT_ASTC_6x6_SRGB:
- case VK_FMT_ASTC_8x5_UNORM:
- case VK_FMT_ASTC_8x5_SRGB:
- case VK_FMT_ASTC_8x6_UNORM:
- case VK_FMT_ASTC_8x6_SRGB:
- case VK_FMT_ASTC_8x8_UNORM:
- case VK_FMT_ASTC_8x8_SRGB:
- case VK_FMT_ASTC_10x5_UNORM:
- case VK_FMT_ASTC_10x5_SRGB:
- case VK_FMT_ASTC_10x6_UNORM:
- case VK_FMT_ASTC_10x6_SRGB:
- case VK_FMT_ASTC_10x8_UNORM:
- case VK_FMT_ASTC_10x8_SRGB:
- case VK_FMT_ASTC_10x10_UNORM:
- case VK_FMT_ASTC_10x10_SRGB:
- case VK_FMT_ASTC_12x10_UNORM:
- case VK_FMT_ASTC_12x10_SRGB:
- case VK_FMT_ASTC_12x12_UNORM:
- case VK_FMT_ASTC_12x12_SRGB:
+ case VK_FORMAT_BC1_RGB_UNORM:
+ case VK_FORMAT_BC1_RGB_SRGB:
+ case VK_FORMAT_BC2_UNORM:
+ case VK_FORMAT_BC2_SRGB:
+ case VK_FORMAT_BC3_UNORM:
+ case VK_FORMAT_BC3_SRGB:
+ case VK_FORMAT_BC4_UNORM:
+ case VK_FORMAT_BC4_SNORM:
+ case VK_FORMAT_BC5_UNORM:
+ case VK_FORMAT_BC5_SNORM:
+ case VK_FORMAT_BC6H_UFLOAT:
+ case VK_FORMAT_BC6H_SFLOAT:
+ case VK_FORMAT_BC7_UNORM:
+ case VK_FORMAT_BC7_SRGB:
+ case VK_FORMAT_ETC2_R8G8B8_UNORM:
+ case VK_FORMAT_ETC2_R8G8B8A1_UNORM:
+ case VK_FORMAT_ETC2_R8G8B8A8_UNORM:
+ case VK_FORMAT_EAC_R11_UNORM:
+ case VK_FORMAT_EAC_R11_SNORM:
+ case VK_FORMAT_EAC_R11G11_UNORM:
+ case VK_FORMAT_EAC_R11G11_SNORM:
+ case VK_FORMAT_ASTC_4x4_UNORM:
+ case VK_FORMAT_ASTC_4x4_SRGB:
+ case VK_FORMAT_ASTC_5x4_UNORM:
+ case VK_FORMAT_ASTC_5x4_SRGB:
+ case VK_FORMAT_ASTC_5x5_UNORM:
+ case VK_FORMAT_ASTC_5x5_SRGB:
+ case VK_FORMAT_ASTC_6x5_UNORM:
+ case VK_FORMAT_ASTC_6x5_SRGB:
+ case VK_FORMAT_ASTC_6x6_UNORM:
+ case VK_FORMAT_ASTC_6x6_SRGB:
+ case VK_FORMAT_ASTC_8x5_UNORM:
+ case VK_FORMAT_ASTC_8x5_SRGB:
+ case VK_FORMAT_ASTC_8x6_UNORM:
+ case VK_FORMAT_ASTC_8x6_SRGB:
+ case VK_FORMAT_ASTC_8x8_UNORM:
+ case VK_FORMAT_ASTC_8x8_SRGB:
+ case VK_FORMAT_ASTC_10x5_UNORM:
+ case VK_FORMAT_ASTC_10x5_SRGB:
+ case VK_FORMAT_ASTC_10x6_UNORM:
+ case VK_FORMAT_ASTC_10x6_SRGB:
+ case VK_FORMAT_ASTC_10x8_UNORM:
+ case VK_FORMAT_ASTC_10x8_SRGB:
+ case VK_FORMAT_ASTC_10x10_UNORM:
+ case VK_FORMAT_ASTC_10x10_SRGB:
+ case VK_FORMAT_ASTC_12x10_UNORM:
+ case VK_FORMAT_ASTC_12x10_SRGB:
+ case VK_FORMAT_ASTC_12x12_UNORM:
+ case VK_FORMAT_ASTC_12x12_SRGB:
return true;
default:
return false;
{
/* assume little-endian */
switch (format) {
- case VK_FMT_UNDEFINED:
+ case VK_FORMAT_UNDEFINED:
break;
- case VK_FMT_R4G4_UNORM:
- case VK_FMT_R4G4_USCALED:
+ case VK_FORMAT_R4G4_UNORM:
+ case VK_FORMAT_R4G4_USCALED:
((uint8_t *) value)[0] = (color[0] & 0xf) << 0 |
(color[1] & 0xf) << 4;
break;
- case VK_FMT_R4G4B4A4_UNORM:
- case VK_FMT_R4G4B4A4_USCALED:
+ case VK_FORMAT_R4G4B4A4_UNORM:
+ case VK_FORMAT_R4G4B4A4_USCALED:
((uint16_t *) value)[0] = (color[0] & 0xf) << 0 |
(color[1] & 0xf) << 4 |
(color[2] & 0xf) << 8 |
(color[3] & 0xf) << 12;
break;
- case VK_FMT_R5G6B5_UNORM:
- case VK_FMT_R5G6B5_USCALED:
+ case VK_FORMAT_R5G6B5_UNORM:
+ case VK_FORMAT_R5G6B5_USCALED:
((uint16_t *) value)[0] = (color[0] & 0x1f) << 0 |
(color[1] & 0x3f) << 5 |
(color[2] & 0x1f) << 11;
break;
- case VK_FMT_B5G6R5_UNORM:
+ case VK_FORMAT_B5G6R5_UNORM:
((uint16_t *) value)[0] = (color[2] & 0x1f) << 0 |
(color[1] & 0x3f) << 5 |
(color[0] & 0x1f) << 11;
break;
- case VK_FMT_R5G5B5A1_UNORM:
- case VK_FMT_R5G5B5A1_USCALED:
+ case VK_FORMAT_R5G5B5A1_UNORM:
+ case VK_FORMAT_R5G5B5A1_USCALED:
((uint16_t *) value)[0] = (color[0] & 0x1f) << 0 |
(color[1] & 0x1f) << 5 |
(color[2] & 0x1f) << 10 |
(color[3] & 0x1) << 15;
break;
- case VK_FMT_R8_UNORM:
- case VK_FMT_R8_SNORM:
- case VK_FMT_R8_USCALED:
- case VK_FMT_R8_SSCALED:
- case VK_FMT_R8_UINT:
- case VK_FMT_R8_SINT:
- case VK_FMT_R8_SRGB:
+ case VK_FORMAT_R8_UNORM:
+ case VK_FORMAT_R8_SNORM:
+ case VK_FORMAT_R8_USCALED:
+ case VK_FORMAT_R8_SSCALED:
+ case VK_FORMAT_R8_UINT:
+ case VK_FORMAT_R8_SINT:
+ case VK_FORMAT_R8_SRGB:
((uint8_t *) value)[0] = (uint8_t) color[0];
break;
- case VK_FMT_R8G8_UNORM:
- case VK_FMT_R8G8_SNORM:
- case VK_FMT_R8G8_USCALED:
- case VK_FMT_R8G8_SSCALED:
- case VK_FMT_R8G8_UINT:
- case VK_FMT_R8G8_SINT:
- case VK_FMT_R8G8_SRGB:
+ case VK_FORMAT_R8G8_UNORM:
+ case VK_FORMAT_R8G8_SNORM:
+ case VK_FORMAT_R8G8_USCALED:
+ case VK_FORMAT_R8G8_SSCALED:
+ case VK_FORMAT_R8G8_UINT:
+ case VK_FORMAT_R8G8_SINT:
+ case VK_FORMAT_R8G8_SRGB:
((uint8_t *) value)[0] = (uint8_t) color[0];
((uint8_t *) value)[1] = (uint8_t) color[1];
break;
- case VK_FMT_R8G8B8A8_UNORM:
- case VK_FMT_R8G8B8A8_SNORM:
- case VK_FMT_R8G8B8A8_USCALED:
- case VK_FMT_R8G8B8A8_SSCALED:
- case VK_FMT_R8G8B8A8_UINT:
- case VK_FMT_R8G8B8A8_SINT:
- case VK_FMT_R8G8B8A8_SRGB:
+ case VK_FORMAT_R8G8B8A8_UNORM:
+ case VK_FORMAT_R8G8B8A8_SNORM:
+ case VK_FORMAT_R8G8B8A8_USCALED:
+ case VK_FORMAT_R8G8B8A8_SSCALED:
+ case VK_FORMAT_R8G8B8A8_UINT:
+ case VK_FORMAT_R8G8B8A8_SINT:
+ case VK_FORMAT_R8G8B8A8_SRGB:
((uint8_t *) value)[0] = (uint8_t) color[0];
((uint8_t *) value)[1] = (uint8_t) color[1];
((uint8_t *) value)[2] = (uint8_t) color[2];
((uint8_t *) value)[3] = (uint8_t) color[3];
break;
- case VK_FMT_B8G8R8A8_UNORM:
- case VK_FMT_B8G8R8A8_SRGB:
+ case VK_FORMAT_B8G8R8A8_UNORM:
+ case VK_FORMAT_B8G8R8A8_SRGB:
((uint8_t *) value)[0] = (uint8_t) color[2];
((uint8_t *) value)[1] = (uint8_t) color[1];
((uint8_t *) value)[2] = (uint8_t) color[0];
((uint8_t *) value)[3] = (uint8_t) color[3];
break;
- case VK_FMT_R11G11B10_UFLOAT:
+ case VK_FORMAT_R11G11B10_UFLOAT:
((uint32_t *) value)[0] = (color[0] & 0x7ff) << 0 |
(color[1] & 0x7ff) << 11 |
(color[2] & 0x3ff) << 22;
break;
- case VK_FMT_R10G10B10A2_UNORM:
- case VK_FMT_R10G10B10A2_SNORM:
- case VK_FMT_R10G10B10A2_USCALED:
- case VK_FMT_R10G10B10A2_SSCALED:
- case VK_FMT_R10G10B10A2_UINT:
- case VK_FMT_R10G10B10A2_SINT:
+ case VK_FORMAT_R10G10B10A2_UNORM:
+ case VK_FORMAT_R10G10B10A2_SNORM:
+ case VK_FORMAT_R10G10B10A2_USCALED:
+ case VK_FORMAT_R10G10B10A2_SSCALED:
+ case VK_FORMAT_R10G10B10A2_UINT:
+ case VK_FORMAT_R10G10B10A2_SINT:
((uint32_t *) value)[0] = (color[0] & 0x3ff) << 0 |
(color[1] & 0x3ff) << 10 |
(color[2] & 0x3ff) << 20 |
(color[3] & 0x3) << 30;
break;
- case VK_FMT_R16_UNORM:
- case VK_FMT_R16_SNORM:
- case VK_FMT_R16_USCALED:
- case VK_FMT_R16_SSCALED:
- case VK_FMT_R16_UINT:
- case VK_FMT_R16_SINT:
- case VK_FMT_R16_SFLOAT:
+ case VK_FORMAT_R16_UNORM:
+ case VK_FORMAT_R16_SNORM:
+ case VK_FORMAT_R16_USCALED:
+ case VK_FORMAT_R16_SSCALED:
+ case VK_FORMAT_R16_UINT:
+ case VK_FORMAT_R16_SINT:
+ case VK_FORMAT_R16_SFLOAT:
((uint16_t *) value)[0] = (uint16_t) color[0];
break;
- case VK_FMT_R16G16_UNORM:
- case VK_FMT_R16G16_SNORM:
- case VK_FMT_R16G16_USCALED:
- case VK_FMT_R16G16_SSCALED:
- case VK_FMT_R16G16_UINT:
- case VK_FMT_R16G16_SINT:
- case VK_FMT_R16G16_SFLOAT:
+ case VK_FORMAT_R16G16_UNORM:
+ case VK_FORMAT_R16G16_SNORM:
+ case VK_FORMAT_R16G16_USCALED:
+ case VK_FORMAT_R16G16_SSCALED:
+ case VK_FORMAT_R16G16_UINT:
+ case VK_FORMAT_R16G16_SINT:
+ case VK_FORMAT_R16G16_SFLOAT:
((uint16_t *) value)[0] = (uint16_t) color[0];
((uint16_t *) value)[1] = (uint16_t) color[1];
break;
- case VK_FMT_R16G16B16A16_UNORM:
- case VK_FMT_R16G16B16A16_SNORM:
- case VK_FMT_R16G16B16A16_USCALED:
- case VK_FMT_R16G16B16A16_SSCALED:
- case VK_FMT_R16G16B16A16_UINT:
- case VK_FMT_R16G16B16A16_SINT:
- case VK_FMT_R16G16B16A16_SFLOAT:
+ case VK_FORMAT_R16G16B16A16_UNORM:
+ case VK_FORMAT_R16G16B16A16_SNORM:
+ case VK_FORMAT_R16G16B16A16_USCALED:
+ case VK_FORMAT_R16G16B16A16_SSCALED:
+ case VK_FORMAT_R16G16B16A16_UINT:
+ case VK_FORMAT_R16G16B16A16_SINT:
+ case VK_FORMAT_R16G16B16A16_SFLOAT:
((uint16_t *) value)[0] = (uint16_t) color[0];
((uint16_t *) value)[1] = (uint16_t) color[1];
((uint16_t *) value)[2] = (uint16_t) color[2];
((uint16_t *) value)[3] = (uint16_t) color[3];
break;
- case VK_FMT_R32_UINT:
- case VK_FMT_R32_SINT:
- case VK_FMT_R32_SFLOAT:
+ case VK_FORMAT_R32_UINT:
+ case VK_FORMAT_R32_SINT:
+ case VK_FORMAT_R32_SFLOAT:
((uint32_t *) value)[0] = color[0];
break;
- case VK_FMT_R32G32_UINT:
- case VK_FMT_R32G32_SINT:
- case VK_FMT_R32G32_SFLOAT:
+ case VK_FORMAT_R32G32_UINT:
+ case VK_FORMAT_R32G32_SINT:
+ case VK_FORMAT_R32G32_SFLOAT:
((uint32_t *) value)[0] = color[0];
((uint32_t *) value)[1] = color[1];
break;
- case VK_FMT_R32G32B32_UINT:
- case VK_FMT_R32G32B32_SINT:
- case VK_FMT_R32G32B32_SFLOAT:
+ case VK_FORMAT_R32G32B32_UINT:
+ case VK_FORMAT_R32G32B32_SINT:
+ case VK_FORMAT_R32G32B32_SFLOAT:
((uint32_t *) value)[0] = color[0];
((uint32_t *) value)[1] = color[1];
((uint32_t *) value)[2] = color[2];
break;
- case VK_FMT_R32G32B32A32_UINT:
- case VK_FMT_R32G32B32A32_SINT:
- case VK_FMT_R32G32B32A32_SFLOAT:
+ case VK_FORMAT_R32G32B32A32_UINT:
+ case VK_FORMAT_R32G32B32A32_SINT:
+ case VK_FORMAT_R32G32B32A32_SFLOAT:
((uint32_t *) value)[0] = color[0];
((uint32_t *) value)[1] = color[1];
((uint32_t *) value)[2] = color[2];
((uint32_t *) value)[3] = color[3];
break;
- case VK_FMT_D16_UNORM_S8_UINT:
+ case VK_FORMAT_D16_UNORM_S8_UINT:
((uint16_t *) value)[0] = (uint16_t) color[0];
((char *) value)[2] = (uint8_t) color[1];
break;
- case VK_FMT_D32_SFLOAT_S8_UINT:
+ case VK_FORMAT_D32_SFLOAT_S8_UINT:
((uint32_t *) value)[0] = (uint32_t) color[0];
((char *) value)[4] = (uint8_t) color[1];
break;
- case VK_FMT_R9G9B9E5_UFLOAT:
+ case VK_FORMAT_R9G9B9E5_UFLOAT:
((uint32_t *) value)[0] = (color[0] & 0x1ff) << 0 |
(color[1] & 0x1ff) << 9 |
(color[2] & 0x1ff) << 18 |
(color[3] & 0x1f) << 27;
break;
- case VK_FMT_BC1_RGB_UNORM:
- case VK_FMT_BC1_RGB_SRGB:
- case VK_FMT_BC4_UNORM:
- case VK_FMT_BC4_SNORM:
+ case VK_FORMAT_BC1_RGB_UNORM:
+ case VK_FORMAT_BC1_RGB_SRGB:
+ case VK_FORMAT_BC4_UNORM:
+ case VK_FORMAT_BC4_SNORM:
memcpy(value, color, 8);
break;
- case VK_FMT_BC2_UNORM:
- case VK_FMT_BC2_SRGB:
- case VK_FMT_BC3_UNORM:
- case VK_FMT_BC3_SRGB:
- case VK_FMT_BC5_UNORM:
- case VK_FMT_BC5_SNORM:
- case VK_FMT_BC6H_UFLOAT:
- case VK_FMT_BC6H_SFLOAT:
- case VK_FMT_BC7_UNORM:
- case VK_FMT_BC7_SRGB:
+ case VK_FORMAT_BC2_UNORM:
+ case VK_FORMAT_BC2_SRGB:
+ case VK_FORMAT_BC3_UNORM:
+ case VK_FORMAT_BC3_SRGB:
+ case VK_FORMAT_BC5_UNORM:
+ case VK_FORMAT_BC5_SNORM:
+ case VK_FORMAT_BC6H_UFLOAT:
+ case VK_FORMAT_BC6H_SFLOAT:
+ case VK_FORMAT_BC7_UNORM:
+ case VK_FORMAT_BC7_SRGB:
memcpy(value, color, 16);
break;
- case VK_FMT_R8G8B8_UNORM:
- case VK_FMT_R8G8B8_SNORM:
- case VK_FMT_R8G8B8_USCALED:
- case VK_FMT_R8G8B8_SSCALED:
- case VK_FMT_R8G8B8_UINT:
- case VK_FMT_R8G8B8_SINT:
- case VK_FMT_R8G8B8_SRGB:
+ case VK_FORMAT_R8G8B8_UNORM:
+ case VK_FORMAT_R8G8B8_SNORM:
+ case VK_FORMAT_R8G8B8_USCALED:
+ case VK_FORMAT_R8G8B8_SSCALED:
+ case VK_FORMAT_R8G8B8_UINT:
+ case VK_FORMAT_R8G8B8_SINT:
+ case VK_FORMAT_R8G8B8_SRGB:
((uint8_t *) value)[0] = (uint8_t) color[0];
((uint8_t *) value)[1] = (uint8_t) color[1];
((uint8_t *) value)[2] = (uint8_t) color[2];
break;
- case VK_FMT_R16G16B16_UNORM:
- case VK_FMT_R16G16B16_SNORM:
- case VK_FMT_R16G16B16_USCALED:
- case VK_FMT_R16G16B16_SSCALED:
- case VK_FMT_R16G16B16_UINT:
- case VK_FMT_R16G16B16_SINT:
- case VK_FMT_R16G16B16_SFLOAT:
+ case VK_FORMAT_R16G16B16_UNORM:
+ case VK_FORMAT_R16G16B16_SNORM:
+ case VK_FORMAT_R16G16B16_USCALED:
+ case VK_FORMAT_R16G16B16_SSCALED:
+ case VK_FORMAT_R16G16B16_UINT:
+ case VK_FORMAT_R16G16B16_SINT:
+ case VK_FORMAT_R16G16B16_SFLOAT:
((uint16_t *) value)[0] = (uint16_t) color[0];
((uint16_t *) value)[1] = (uint16_t) color[1];
((uint16_t *) value)[2] = (uint16_t) color[2];
break;
- case VK_FMT_B10G10R10A2_UNORM:
- case VK_FMT_B10G10R10A2_SNORM:
- case VK_FMT_B10G10R10A2_USCALED:
- case VK_FMT_B10G10R10A2_SSCALED:
- case VK_FMT_B10G10R10A2_UINT:
- case VK_FMT_B10G10R10A2_SINT:
+ case VK_FORMAT_B10G10R10A2_UNORM:
+ case VK_FORMAT_B10G10R10A2_SNORM:
+ case VK_FORMAT_B10G10R10A2_USCALED:
+ case VK_FORMAT_B10G10R10A2_SSCALED:
+ case VK_FORMAT_B10G10R10A2_UINT:
+ case VK_FORMAT_B10G10R10A2_SINT:
((uint32_t *) value)[0] = (color[2] & 0x3ff) << 0 |
(color[1] & 0x3ff) << 10 |
(color[0] & 0x3ff) << 20 |
(color[3] & 0x3) << 30;
break;
- case VK_FMT_R64_SFLOAT:
+ case VK_FORMAT_R64_SFLOAT:
/* higher 32 bits always 0 */
((uint64_t *) value)[0] = color[0];
break;
- case VK_FMT_R64G64_SFLOAT:
+ case VK_FORMAT_R64G64_SFLOAT:
((uint64_t *) value)[0] = color[0];
((uint64_t *) value)[1] = color[1];
break;
- case VK_FMT_R64G64B64_SFLOAT:
+ case VK_FORMAT_R64G64B64_SFLOAT:
((uint64_t *) value)[0] = color[0];
((uint64_t *) value)[1] = color[1];
((uint64_t *) value)[2] = color[2];
break;
- case VK_FMT_R64G64B64A64_SFLOAT:
+ case VK_FORMAT_R64G64B64A64_SFLOAT:
((uint64_t *) value)[0] = color[0];
((uint64_t *) value)[1] = color[1];
((uint64_t *) value)[2] = color[2];
static inline bool icd_format_is_undef(VkFormat format)
{
- return (format == VK_FMT_UNDEFINED);
+ return (format == VK_FORMAT_UNDEFINED);
}
bool icd_format_is_ds(VkFormat format);
alloc_cb = &default_alloc_cb;
instance = alloc_cb->pfnAlloc(alloc_cb->pUserData, sizeof(*instance), 0,
- VK_SYSTEM_ALLOC_API_OBJECT);
+ VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
if (!instance)
return NULL;
name = (app_info->pAppName) ? app_info->pAppName : "unnamed";
len = strlen(name);
instance->name = alloc_cb->pfnAlloc(alloc_cb->pUserData, len + 1, 0,
- VK_SYSTEM_ALLOC_INTERNAL);
+ VK_SYSTEM_ALLOC_TYPE_INTERNAL);
if (!instance->name) {
alloc_cb->pfnFree(alloc_cb->pUserData, instance);
return NULL;
if (!logger) {
logger = icd_instance_alloc(instance, sizeof(*logger), 0,
- VK_SYSTEM_ALLOC_DEBUG);
+ VK_SYSTEM_ALLOC_TYPE_DEBUG);
if (!logger)
- return VK_ERROR_OUT_OF_MEMORY;
+ return VK_ERROR_OUT_OF_HOST_MEMORY;
logger->func = func;
logger->next = instance->loggers;
uint32_t *count;
switch (type) {
- case VK_INFO_TYPE_MEMORY_REQUIREMENTS:
+ case VK_OBJECT_INFO_TYPE_MEMORY_REQUIREMENTS:
{
s = sizeof(VkMemoryRequirements);
*size = s;
memset(data, 0, s);
break;
}
- case VK_INFO_TYPE_MEMORY_ALLOCATION_COUNT:
+ case VK_OBJECT_INFO_TYPE_MEMORY_ALLOCATION_COUNT:
*size = sizeof(uint32_t);
if (data == NULL)
return ret;
gpu = malloc(sizeof(*gpu));
if (!gpu)
- return VK_ERROR_OUT_OF_MEMORY;
+ return VK_ERROR_OUT_OF_HOST_MEMORY;
memset(gpu, 0, sizeof(*gpu));
// Initialize pointer to loader's dispatch table with ICD_LOADER_MAGIC
queue = (struct nulldrv_queue *) nulldrv_base_create(dev, sizeof(*queue),
VK_DBG_OBJECT_QUEUE);
if (!queue)
- return VK_ERROR_OUT_OF_MEMORY;
+ return VK_ERROR_OUT_OF_HOST_MEMORY;
queue->dev = dev;
ooxx = malloc(sizeof(*ooxx));
if (!ooxx)
- return VK_ERROR_OUT_OF_MEMORY;
+ return VK_ERROR_OUT_OF_HOST_MEMORY;
memset(ooxx, 0, sizeof(*ooxx));
dev = (struct nulldrv_dev *) nulldrv_base_create(NULL, sizeof(*dev),
VK_DBG_OBJECT_DEVICE);
if (!dev)
- return VK_ERROR_OUT_OF_MEMORY;
+ return VK_ERROR_OUT_OF_HOST_MEMORY;
for (i = 0; i < info->extensionCount; i++) {
const enum nulldrv_ext_type ext = nulldrv_gpu_lookup_extension(gpu,
return VK_SUCCESS;
}
-static struct nulldrv_gpu *nulldrv_gpu(VkPhysicalGpu gpu)
+static struct nulldrv_gpu *nulldrv_gpu(VkPhysicalDevice gpu)
{
return (struct nulldrv_gpu *) gpu;
}
view = (struct nulldrv_rt_view *) nulldrv_base_create(dev, sizeof(*view),
VK_DBG_OBJECT_COLOR_TARGET_VIEW);
if (!view)
- return VK_ERROR_OUT_OF_MEMORY;
+ return VK_ERROR_OUT_OF_HOST_MEMORY;
*view_ret = view;
fence = (struct nulldrv_fence *) nulldrv_base_create(dev, sizeof(*fence),
VK_DBG_OBJECT_FENCE);
if (!fence)
- return VK_ERROR_OUT_OF_MEMORY;
+ return VK_ERROR_OUT_OF_HOST_MEMORY;
*fence_ret = fence;
VkResult ret = VK_SUCCESS;
switch (type) {
- case VK_INFO_TYPE_MEMORY_REQUIREMENTS:
+ case VK_OBJECT_INFO_TYPE_MEMORY_REQUIREMENTS:
{
VkMemoryRequirements *mem_req = data;
img = (struct nulldrv_img *) nulldrv_base_create(dev, sizeof(*img),
VK_DBG_OBJECT_IMAGE);
if (!img)
- return VK_ERROR_OUT_OF_MEMORY;
+ return VK_ERROR_OUT_OF_HOST_MEMORY;
img->type = info->imageType;
img->depth = info->extent.depth;
mem = (struct nulldrv_mem *) nulldrv_base_create(dev, sizeof(*mem),
VK_DBG_OBJECT_GPU_MEMORY);
if (!mem)
- return VK_ERROR_OUT_OF_MEMORY;
+ return VK_ERROR_OUT_OF_HOST_MEMORY;
mem->bo = malloc(info->allocationSize);
if (!mem->bo) {
- return VK_ERROR_OUT_OF_MEMORY;
+ return VK_ERROR_OUT_OF_HOST_MEMORY;
}
mem->size = info->allocationSize;
view = (struct nulldrv_ds_view *) nulldrv_base_create(dev, sizeof(*view),
VK_DBG_OBJECT_DEPTH_STENCIL_VIEW);
if (!view)
- return VK_ERROR_OUT_OF_MEMORY;
+ return VK_ERROR_OUT_OF_HOST_MEMORY;
view->img = img;
sampler = (struct nulldrv_sampler *) nulldrv_base_create(dev,
sizeof(*sampler), VK_DBG_OBJECT_SAMPLER);
if (!sampler)
- return VK_ERROR_OUT_OF_MEMORY;
+ return VK_ERROR_OUT_OF_HOST_MEMORY;
*sampler_ret = sampler;
view = (struct nulldrv_img_view *) nulldrv_base_create(dev, sizeof(*view),
VK_DBG_OBJECT_IMAGE_VIEW);
if (!view)
- return VK_ERROR_OUT_OF_MEMORY;
+ return VK_ERROR_OUT_OF_HOST_MEMORY;
view->img = img;
view->min_lod = info->minLod;
return mem->bo;
}
-static struct nulldrv_mem *nulldrv_mem(VkGpuMemory mem)
+static struct nulldrv_mem *nulldrv_mem(VkDeviceMemory mem)
{
return (struct nulldrv_mem *) mem;
}
VkResult ret = VK_SUCCESS;
switch (type) {
- case VK_INFO_TYPE_MEMORY_REQUIREMENTS:
+ case VK_OBJECT_INFO_TYPE_MEMORY_REQUIREMENTS:
{
VkMemoryRequirements *mem_req = data;
buf = (struct nulldrv_buf *) nulldrv_base_create(dev, sizeof(*buf),
VK_DBG_OBJECT_BUFFER);
if (!buf)
- return VK_ERROR_OUT_OF_MEMORY;
+ return VK_ERROR_OUT_OF_HOST_MEMORY;
buf->size = info->size;
buf->usage = info->usage;
nulldrv_base_create(dev, sizeof(*layout),
VK_DBG_OBJECT_DESCRIPTOR_SET_LAYOUT);
if (!layout)
- return VK_ERROR_OUT_OF_MEMORY;
+ return VK_ERROR_OUT_OF_HOST_MEMORY;
*layout_ret = layout;
nulldrv_base_create(dev, sizeof(*chain),
VK_DBG_OBJECT_DESCRIPTOR_SET_LAYOUT_CHAIN);
if (!chain)
- return VK_ERROR_OUT_OF_MEMORY;
+ return VK_ERROR_OUT_OF_HOST_MEMORY;
*chain_ret = chain;
sh = (struct nulldrv_shader *) nulldrv_base_create(dev, sizeof(*sh),
VK_DBG_OBJECT_SHADER);
if (!sh)
- return VK_ERROR_OUT_OF_MEMORY;
+ return VK_ERROR_OUT_OF_HOST_MEMORY;
*sh_ret = sh;
nulldrv_base_create(dev, sizeof(*pipeline),
VK_DBG_OBJECT_GRAPHICS_PIPELINE);
if (!pipeline)
- return VK_ERROR_OUT_OF_MEMORY;
+ return VK_ERROR_OUT_OF_HOST_MEMORY;
*pipeline_ret = pipeline;
state = (struct nulldrv_dynamic_vp *) nulldrv_base_create(dev,
sizeof(*state), VK_DBG_OBJECT_VIEWPORT_STATE);
if (!state)
- return VK_ERROR_OUT_OF_MEMORY;
+ return VK_ERROR_OUT_OF_HOST_MEMORY;
*state_ret = state;
state = (struct nulldrv_dynamic_rs *) nulldrv_base_create(dev,
sizeof(*state), VK_DBG_OBJECT_RASTER_STATE);
if (!state)
- return VK_ERROR_OUT_OF_MEMORY;
+ return VK_ERROR_OUT_OF_HOST_MEMORY;
*state_ret = state;
state = (struct nulldrv_dynamic_cb *) nulldrv_base_create(dev,
sizeof(*state), VK_DBG_OBJECT_COLOR_BLEND_STATE);
if (!state)
- return VK_ERROR_OUT_OF_MEMORY;
+ return VK_ERROR_OUT_OF_HOST_MEMORY;
*state_ret = state;
state = (struct nulldrv_dynamic_ds *) nulldrv_base_create(dev,
sizeof(*state), VK_DBG_OBJECT_DEPTH_STENCIL_STATE);
if (!state)
- return VK_ERROR_OUT_OF_MEMORY;
+ return VK_ERROR_OUT_OF_HOST_MEMORY;
*state_ret = state;
cmd = (struct nulldrv_cmd *) nulldrv_base_create(dev, sizeof(*cmd),
VK_DBG_OBJECT_CMD_BUFFER);
if (!cmd)
- return VK_ERROR_OUT_OF_MEMORY;
+ return VK_ERROR_OUT_OF_HOST_MEMORY;
*cmd_ret = cmd;
nulldrv_base_create(dev, sizeof(*pool),
VK_DBG_OBJECT_DESCRIPTOR_POOL);
if (!pool)
- return VK_ERROR_OUT_OF_MEMORY;
+ return VK_ERROR_OUT_OF_HOST_MEMORY;
pool->dev = dev;
nulldrv_base_create(dev, sizeof(*set),
VK_DBG_OBJECT_DESCRIPTOR_SET);
if (!set)
- return VK_ERROR_OUT_OF_MEMORY;
+ return VK_ERROR_OUT_OF_HOST_MEMORY;
set->ooxx = dev->desc_ooxx;
set->layout = layout;
fb = (struct nulldrv_framebuffer *) nulldrv_base_create(dev, sizeof(*fb),
VK_DBG_OBJECT_FRAMEBUFFER);
if (!fb)
- return VK_ERROR_OUT_OF_MEMORY;
+ return VK_ERROR_OUT_OF_HOST_MEMORY;
*fb_ret = fb;
rp = (struct nulldrv_render_pass *) nulldrv_base_create(dev, sizeof(*rp),
VK_DBG_OBJECT_RENDER_PASS);
if (!rp)
- return VK_ERROR_OUT_OF_MEMORY;
+ return VK_ERROR_OUT_OF_HOST_MEMORY;
*rp_ret = rp;
view = (struct nulldrv_buf_view *) nulldrv_base_create(dev, sizeof(*view),
VK_DBG_OBJECT_BUFFER_VIEW);
if (!view)
- return VK_ERROR_OUT_OF_MEMORY;
+ return VK_ERROR_OUT_OF_HOST_MEMORY;
view->buf = buf;
uint32_t startCounter,
uint32_t counterCount,
VkBuffer srcBuffer,
- VkGpuSize srcOffset)
+ VkDeviceSize srcOffset)
{
NULLDRV_LOG_FUNC;
}
uint32_t startCounter,
uint32_t counterCount,
VkBuffer destBuffer,
- VkGpuSize destOffset)
+ VkDeviceSize destOffset)
{
NULLDRV_LOG_FUNC;
}
ICD_EXPORT void VKAPI vkCmdUpdateBuffer(
VkCmdBuffer cmdBuffer,
VkBuffer destBuffer,
- VkGpuSize destOffset,
- VkGpuSize dataSize,
+ VkDeviceSize destOffset,
+ VkDeviceSize dataSize,
const uint32_t* pData)
{
NULLDRV_LOG_FUNC;
ICD_EXPORT void VKAPI vkCmdFillBuffer(
VkCmdBuffer cmdBuffer,
VkBuffer destBuffer,
- VkGpuSize destOffset,
- VkGpuSize fillSize,
+ VkDeviceSize destOffset,
+ VkDeviceSize fillSize,
uint32_t data)
{
NULLDRV_LOG_FUNC;
uint32_t startQuery,
uint32_t queryCount,
VkBuffer destBuffer,
- VkGpuSize destOffset,
- VkGpuSize destStride,
+ VkDeviceSize destOffset,
+ VkDeviceSize destStride,
VkFlags flags)
{
NULLDRV_LOG_FUNC;
VkCmdBuffer cmdBuffer,
VkTimestampType timestampType,
VkBuffer destBuffer,
- VkGpuSize destOffset)
+ VkDeviceSize destOffset)
{
NULLDRV_LOG_FUNC;
}
uint32_t startBinding,
uint32_t bindingCount,
const VkBuffer* pBuffers,
- const VkGpuSize* pOffsets)
+ const VkDeviceSize* pOffsets)
{
NULLDRV_LOG_FUNC;
}
ICD_EXPORT void VKAPI vkCmdBindIndexBuffer(
VkCmdBuffer cmdBuffer,
VkBuffer buffer,
- VkGpuSize offset,
+ VkDeviceSize offset,
VkIndexType indexType)
{
NULLDRV_LOG_FUNC;
ICD_EXPORT void VKAPI vkCmdDrawIndirect(
VkCmdBuffer cmdBuffer,
VkBuffer buffer,
- VkGpuSize offset,
+ VkDeviceSize offset,
uint32_t count,
uint32_t stride)
{
ICD_EXPORT void VKAPI vkCmdDrawIndexedIndirect(
VkCmdBuffer cmdBuffer,
VkBuffer buffer,
- VkGpuSize offset,
+ VkDeviceSize offset,
uint32_t count,
uint32_t stride)
{
ICD_EXPORT void VKAPI vkCmdDispatchIndirect(
VkCmdBuffer cmdBuffer,
VkBuffer buffer,
- VkGpuSize offset)
+ VkDeviceSize offset)
{
NULLDRV_LOG_FUNC;
}
-ICD_EXPORT void VKAPI vkCmdWaitEvents(
- VkCmdBuffer cmdBuffer,
- const VkEventWaitInfo* pWaitInfo)
+void VKAPI vkCmdWaitEvents(
+ VkCmdBuffer cmdBuffer,
+ VkWaitEvent waitEvent,
+ uint32_t eventCount,
+ const VkEvent* pEvents,
+ uint32_t memBarrierCount,
+ const void** ppMemBarriers)
{
NULLDRV_LOG_FUNC;
}
-ICD_EXPORT void VKAPI vkCmdPipelineBarrier(
- VkCmdBuffer cmdBuffer,
- const VkPipelineBarrier* pBarrier)
+void VKAPI vkCmdPipelineBarrier(
+ VkCmdBuffer cmdBuffer,
+ VkWaitEvent waitEvent,
+ uint32_t pipeEventCount,
+ const VkPipeEvent* pPipeEvents,
+ uint32_t memBarrierCount,
+ const void** ppMemBarriers)
{
NULLDRV_LOG_FUNC;
}
ICD_EXPORT VkResult VKAPI vkCreateDevice(
- VkPhysicalGpu gpu_,
+ VkPhysicalDevice gpu_,
const VkDeviceCreateInfo* pCreateInfo,
VkDevice* pDevice)
{
return VK_SUCCESS;
}
-ICD_EXPORT VkResult VKAPI vkGetGpuInfo(
- VkPhysicalGpu gpu_,
- VkPhysicalGpuInfoType infoType,
+ICD_EXPORT VkResult VKAPI vkGetPhysicalDeviceInfo(
+ VkPhysicalDevice gpu_,
+ VkPhysicalDeviceInfoType infoType,
size_t* pDataSize,
void* pData)
{
}
VkResult VKAPI vkGetPhysicalDeviceExtensionInfo(
- VkPhysicalGpu gpu,
+ VkPhysicalDevice gpu,
VkExtensionInfoType infoType,
uint32_t extensionIndex,
size_t* pDataSize,
return VK_SUCCESS;
}
-ICD_EXPORT VkResult VKAPI vkGetMultiGpuCompatibility(
- VkPhysicalGpu gpu0_,
- VkPhysicalGpu gpu1_,
- VkGpuCompatibilityInfo* pInfo)
+ICD_EXPORT VkResult VKAPI vkGetMultiDeviceCompatibility(
+ VkPhysicalDevice gpu0_,
+ VkPhysicalDevice gpu1_,
+ VkPhysicalDeviceCompatibilityInfo* pInfo)
{
NULLDRV_LOG_FUNC;
return VK_SUCCESS;
VkDevice device,
const VkPeerImageOpenInfo* pOpenInfo,
VkImage* pImage,
- VkGpuMemory* pMem)
+ VkDeviceMemory* pMem)
{
NULLDRV_LOG_FUNC;
return VK_SUCCESS;
VkResult ret = VK_SUCCESS;
switch (infoType) {
- case VK_INFO_TYPE_SUBRESOURCE_LAYOUT:
+ case VK_SUBRESOURCE_INFO_TYPE_LAYOUT:
{
VkSubresourceLayout *layout = (VkSubresourceLayout *) pData;
ICD_EXPORT VkResult VKAPI vkAllocMemory(
VkDevice device,
const VkMemoryAllocInfo* pAllocInfo,
- VkGpuMemory* pMem)
+ VkDeviceMemory* pMem)
{
NULLDRV_LOG_FUNC;
struct nulldrv_dev *dev = nulldrv_dev(device);
}
ICD_EXPORT VkResult VKAPI vkFreeMemory(
- VkGpuMemory mem_)
+ VkDeviceMemory mem_)
{
NULLDRV_LOG_FUNC;
return VK_SUCCESS;
}
ICD_EXPORT VkResult VKAPI vkSetMemoryPriority(
- VkGpuMemory mem_,
+ VkDeviceMemory mem_,
VkMemoryPriority priority)
{
NULLDRV_LOG_FUNC;
}
ICD_EXPORT VkResult VKAPI vkMapMemory(
- VkGpuMemory mem_,
+ VkDeviceMemory mem_,
VkFlags flags,
void** ppData)
{
}
ICD_EXPORT VkResult VKAPI vkUnmapMemory(
- VkGpuMemory mem_)
+ VkDeviceMemory mem_)
{
NULLDRV_LOG_FUNC;
return VK_SUCCESS;
VkDevice device,
const void* pSysMem,
size_t memSize,
- VkGpuMemory* pMem)
+ VkDeviceMemory* pMem)
{
NULLDRV_LOG_FUNC;
return VK_SUCCESS;
ICD_EXPORT VkResult VKAPI vkOpenSharedMemory(
VkDevice device,
const VkMemoryOpenInfo* pOpenInfo,
- VkGpuMemory* pMem)
+ VkDeviceMemory* pMem)
{
NULLDRV_LOG_FUNC;
return VK_SUCCESS;
ICD_EXPORT VkResult VKAPI vkOpenPeerMemory(
VkDevice device,
const VkPeerMemoryOpenInfo* pOpenInfo,
- VkGpuMemory* pMem)
+ VkDeviceMemory* pMem)
{
NULLDRV_LOG_FUNC;
return VK_SUCCESS;
inst = (struct nulldrv_instance *) nulldrv_base_create(NULL, sizeof(*inst),
VK_DBG_OBJECT_INSTANCE);
if (!inst)
- return VK_ERROR_OUT_OF_MEMORY;
+ return VK_ERROR_OUT_OF_HOST_MEMORY;
inst->obj.base.get_info = NULL;
return VK_SUCCESS;
}
-ICD_EXPORT VkResult VKAPI vkEnumerateGpus(
+ICD_EXPORT VkResult VKAPI vkEnumeratePhysicalDevices(
VkInstance instance,
- uint32_t maxGpus,
uint32_t* pGpuCount,
- VkPhysicalGpu* pGpus)
+ VkPhysicalDevice* pGpus)
{
NULLDRV_LOG_FUNC;
VkResult ret;
*pGpuCount = 1;
ret = nulldrv_gpu_add(0, 0, 0, &gpu);
if (ret == VK_SUCCESS)
- pGpus[0] = (VkPhysicalGpu) gpu;
+ pGpus[0] = (VkPhysicalDevice) gpu;
return ret;
}
ICD_EXPORT VkResult VKAPI vkEnumerateLayers(
- VkPhysicalGpu gpu,
+ VkPhysicalDevice gpu,
size_t maxLayerCount,
size_t maxStringSize,
size_t* pOutLayerCount,
VkQueue queue,
VkObject object,
uint32_t allocationIdx,
- VkGpuMemory mem_,
- VkGpuSize memOffset)
+ VkDeviceMemory mem_,
+ VkDeviceSize memOffset)
{
NULLDRV_LOG_FUNC;
return VK_SUCCESS;
VkQueue queue,
VkObject object,
uint32_t allocationIdx,
- VkGpuSize rangeOffset,
- VkGpuSize rangeSize,
- VkGpuMemory mem,
- VkGpuSize memOffset)
+ VkDeviceSize rangeOffset,
+ VkDeviceSize rangeSize,
+ VkDeviceMemory mem,
+ VkDeviceSize memOffset)
{
NULLDRV_LOG_FUNC;
return VK_SUCCESS;
ICD_EXPORT VkResult VKAPI vkQueueBindImageMemoryRange(
VkQueue queue,
VkImage image,
- uint32_t allocationIdx,
- const VkImageMemoryBindInfo* pBindInfo,
- VkGpuMemory mem,
- VkGpuSize memOffset)
+ uint32_t allocationIdx,
+ const VkImageMemoryBindInfo* pBindInfo,
+ VkDeviceMemory mem,
+ VkDeviceSize memOffset)
{
NULLDRV_LOG_FUNC;
return VK_SUCCESS;
uint32_t startQuery,
uint32_t queryCount,
size_t* pDataSize,
- void* pData)
+ void* pData,
+ VkQueryResultFlags flags)
{
NULLDRV_LOG_FUNC;
return VK_SUCCESS;
ICD_EXPORT VkResult VKAPI vkQueueAddMemReferences(
VkQueue queue,
uint32_t count,
- const VkGpuMemory* pMems)
+ const VkDeviceMemory* pMems)
{
NULLDRV_LOG_FUNC;
return VK_SUCCESS;
ICD_EXPORT VkResult VKAPI vkQueueRemoveMemReferences(
VkQueue queue,
uint32_t count,
- const VkGpuMemory* pMems)
+ const VkDeviceMemory* pMems)
{
NULLDRV_LOG_FUNC;
return VK_SUCCESS;
struct nulldrv_mem {
struct nulldrv_base base;
struct nulldrv_bo *bo;
- VkGpuSize size;
+ VkDeviceSize size;
};
struct nulldrv_ds_view {
struct nulldrv_buf {
struct nulldrv_obj obj;
- VkGpuSize size;
+ VkDeviceSize size;
VkFlags usage;
};
PFN_vkCreateInstance CreateInstance;
PFN_vkDestroyInstance DestroyInstance;
PFN_vkEnumeratePhysicalDevices EnumeratePhysicalDevices;
- PFN_vkGetGpuInfo GetGpuInfo;
+ PFN_vkGetPhysicalDeviceInfo GetPhysicalDeviceInfo;
PFN_vkCreateDevice CreateDevice;
PFN_vkDestroyDevice DestroyDevice;
PFN_vkGetGlobalExtensionInfo GetGlobalExtensionInfo;
PFN_vkMapMemory MapMemory;
PFN_vkUnmapMemory UnmapMemory;
PFN_vkPinSystemMemory PinSystemMemory;
- PFN_vkGetMultiGpuCompatibility GetMultiGpuCompatibility;
+ PFN_vkGetMultiDeviceCompatibility GetMultiDeviceCompatibility;
PFN_vkOpenSharedMemory OpenSharedMemory;
PFN_vkOpenSharedSemaphore OpenSharedSemaphore;
PFN_vkOpenPeerMemory OpenPeerMemory;
bool32_t flip;
} VK_WSI_X11_PRESENT_INFO;
-typedef VkResult (VKAPI *PFN_vkWsiX11AssociateConnection)(VkPhysicalGpu gpu, const VK_WSI_X11_CONNECTION_INFO* pConnectionInfo);
+typedef VkResult (VKAPI *PFN_vkWsiX11AssociateConnection)(VkPhysicalDevice gpu, const VK_WSI_X11_CONNECTION_INFO* pConnectionInfo);
typedef VkResult (VKAPI *PFN_vkWsiX11GetMSC)(VkDevice device, xcb_window_t window, xcb_randr_crtc_t crtc, uint64_t* pMsc);
-typedef VkResult (VKAPI *PFN_vkWsiX11CreatePresentableImage)(VkDevice device, const VK_WSI_X11_PRESENTABLE_IMAGE_CREATE_INFO* pCreateInfo, VkImage* pImage, VkGpuMemory* pMem);
+typedef VkResult (VKAPI *PFN_vkWsiX11CreatePresentableImage)(VkDevice device, const VK_WSI_X11_PRESENTABLE_IMAGE_CREATE_INFO* pCreateInfo, VkImage* pImage, VkDeviceMemory* pMem);
typedef VkResult (VKAPI *PFN_vkWsiX11QueuePresent)(VkQueue queue, const VK_WSI_X11_PRESENT_INFO* pPresentInfo, VkFence fence);
/**
* is supported.
*/
VkResult VKAPI vkWsiX11AssociateConnection(
- VkPhysicalGpu gpu,
+ VkPhysicalDevice gpu,
const VK_WSI_X11_CONNECTION_INFO* pConnectionInfo);
/**
uint64_t* pMsc);
/**
- * Create an VkImage that can be presented. An VkGpuMemory is created
+ * Create an VkImage that can be presented. An VkDeviceMemory is created
* and bound automatically. The memory returned can only be used in
* vkQueue[Add|Remove]MemReference. Destroying the memory or binding another memory to the
* image is not allowed.
VkDevice device,
const VK_WSI_X11_PRESENTABLE_IMAGE_CREATE_INFO* pCreateInfo,
VkImage* pImage,
- VkGpuMemory* pMem);
+ VkDeviceMemory* pMem);
/**
* Present an image to an X11 window. The presentation always occurs after
#endif
#endif // !defined(VK_NO_STDINT_H)
-typedef uint64_t VkGpuSize;
+typedef uint64_t VkDeviceSize;
typedef uint32_t bool32_t;
typedef uint32_t VkSampleMask;
#endif // __cplusplus
VK_DEFINE_HANDLE(VkInstance)
-VK_DEFINE_HANDLE(VkPhysicalGpu)
+VK_DEFINE_HANDLE(VkPhysicalDevice)
VK_DEFINE_HANDLE(VkBaseObject)
VK_DEFINE_SUBCLASS_HANDLE(VkDevice, VkBaseObject)
VK_DEFINE_SUBCLASS_HANDLE(VkQueue, VkBaseObject)
-VK_DEFINE_SUBCLASS_HANDLE(VkGpuMemory, VkBaseObject)
+VK_DEFINE_SUBCLASS_HANDLE(VkDeviceMemory, VkBaseObject)
VK_DEFINE_SUBCLASS_HANDLE(VkObject, VkBaseObject)
VK_DEFINE_SUBCLASS_HANDLE(VkBuffer, VkObject)
VK_DEFINE_SUBCLASS_HANDLE(VkBufferView, VkObject)
VK_DEFINE_SUBCLASS_HANDLE(VkFramebuffer, VkObject)
VK_DEFINE_SUBCLASS_HANDLE(VkRenderPass, VkObject)
-#define VK_MAX_PHYSICAL_GPU_NAME 256
+#define VK_MAX_PHYSICAL_DEVICE_NAME 256
#define VK_MAX_EXTENSION_NAME 256
#define VK_LOD_CLAMP_NONE MAX_FLOAT
#define VK_LAST_MIP_OR_SLICE 0xffffffff
+#define VK_WHOLE_SIZE UINT64_MAX
+
#define VK_TRUE 1
#define VK_FALSE 0
// to represent them. This may or may not be necessary on some compilers. The
// option to compile it out may allow compilers that warn about missing enumerants
// in switch statements to be silenced.
-#define VK_MAX_ENUM(T) T##_MAX_ENUM = 0x7FFFFFFF
+// Using this macro is not needed for flag bit enums because those aren't used
+// as storage type anywhere.
+#define VK_MAX_ENUM(Prefix) VK_##Prefix##_MAX_ENUM = 0x7FFFFFFF
+
+// This macro defines the BEGIN_RANGE, END_RANGE, NUM, and MAX_ENUM constants for
+// the enumerations.
+#define VK_ENUM_RANGE(Prefix, First, Last) \
+ VK_##Prefix##_BEGIN_RANGE = VK_##Prefix##_##First, \
+ VK_##Prefix##_END_RANGE = VK_##Prefix##_##Last, \
+ VK_NUM_##Prefix = (VK_##Prefix##_END_RANGE - VK_##Prefix##_BEGIN_RANGE + 1), \
+ VK_MAX_ENUM(Prefix)
+
+// This is a helper macro to define the value of flag bit enum values.
+#define VK_BIT(bit) (1 << (bit))
// ------------------------------------------------------------------------------------------------
// Enumerations
-
typedef enum VkMemoryPriority_
{
- VK_MEMORY_PRIORITY_UNUSED = 0x0,
- VK_MEMORY_PRIORITY_VERY_LOW = 0x1,
- VK_MEMORY_PRIORITY_LOW = 0x2,
- VK_MEMORY_PRIORITY_NORMAL = 0x3,
- VK_MEMORY_PRIORITY_HIGH = 0x4,
- VK_MEMORY_PRIORITY_VERY_HIGH = 0x5,
-
- VK_MEMORY_PRIORITY_BEGIN_RANGE = VK_MEMORY_PRIORITY_UNUSED,
- VK_MEMORY_PRIORITY_END_RANGE = VK_MEMORY_PRIORITY_VERY_HIGH,
- VK_NUM_MEMORY_PRIORITY = (VK_MEMORY_PRIORITY_END_RANGE - VK_MEMORY_PRIORITY_BEGIN_RANGE + 1),
- VK_MAX_ENUM(VkMemoryPriority)
+ VK_MEMORY_PRIORITY_UNUSED = 0x00000000,
+ VK_MEMORY_PRIORITY_VERY_LOW = 0x00000001,
+ VK_MEMORY_PRIORITY_LOW = 0x00000002,
+ VK_MEMORY_PRIORITY_NORMAL = 0x00000003,
+ VK_MEMORY_PRIORITY_HIGH = 0x00000004,
+ VK_MEMORY_PRIORITY_VERY_HIGH = 0x00000005,
+
+ VK_ENUM_RANGE(MEMORY_PRIORITY, UNUSED, VERY_HIGH)
} VkMemoryPriority;
typedef enum VkImageLayout_
VK_IMAGE_LAYOUT_CLEAR_OPTIMAL = 0x00000006, // Optimal layout when image is used only for clear operations
VK_IMAGE_LAYOUT_TRANSFER_SOURCE_OPTIMAL = 0x00000007, // Optimal layout when image is used only as source of transfer operations
VK_IMAGE_LAYOUT_TRANSFER_DESTINATION_OPTIMAL = 0x00000008, // Optimal layout when image is used only as destination of transfer operations
-
- VK_IMAGE_LAYOUT_BEGIN_RANGE = VK_IMAGE_LAYOUT_UNDEFINED,
- VK_IMAGE_LAYOUT_END_RANGE = VK_IMAGE_LAYOUT_TRANSFER_DESTINATION_OPTIMAL,
- VK_NUM_IMAGE_LAYOUT = (VK_IMAGE_LAYOUT_END_RANGE - VK_IMAGE_LAYOUT_BEGIN_RANGE + 1),
- VK_MAX_ENUM(VkImageLayout)
+
+ VK_ENUM_RANGE(IMAGE_LAYOUT, UNDEFINED, TRANSFER_DESTINATION_OPTIMAL)
} VkImageLayout;
typedef enum VkPipeEvent_
{
- VK_PIPE_EVENT_TOP_OF_PIPE = 0x00000001, // Set event before the GPU starts processing subsequent command
+ VK_PIPE_EVENT_TOP_OF_PIPE = 0x00000001, // Set event before the device starts processing subsequent command
VK_PIPE_EVENT_VERTEX_PROCESSING_COMPLETE = 0x00000002, // Set event when all pending vertex processing is complete
VK_PIPE_EVENT_LOCAL_FRAGMENT_PROCESSING_COMPLETE = 0x00000003, // Set event when all pending fragment shader executions are complete, within each fragment location
VK_PIPE_EVENT_FRAGMENT_PROCESSING_COMPLETE = 0x00000004, // Set event when all pending fragment shader executions are complete
VK_PIPE_EVENT_GRAPHICS_PIPELINE_COMPLETE = 0x00000005, // Set event when all pending graphics operations are complete
VK_PIPE_EVENT_COMPUTE_PIPELINE_COMPLETE = 0x00000006, // Set event when all pending compute operations are complete
VK_PIPE_EVENT_TRANSFER_COMPLETE = 0x00000007, // Set event when all pending transfer operations are complete
- VK_PIPE_EVENT_GPU_COMMANDS_COMPLETE = 0x00000008, // Set event when all pending GPU work is complete
+ VK_PIPE_EVENT_COMMANDS_COMPLETE = 0x00000008, // Set event when all pending work is complete
- VK_PIPE_EVENT_BEGIN_RANGE = VK_PIPE_EVENT_TOP_OF_PIPE,
- VK_PIPE_EVENT_END_RANGE = VK_PIPE_EVENT_GPU_COMMANDS_COMPLETE,
- VK_NUM_PIPE_EVENT = (VK_PIPE_EVENT_END_RANGE - VK_PIPE_EVENT_BEGIN_RANGE + 1),
- VK_MAX_ENUM(VkPipeEvent)
+ VK_ENUM_RANGE(PIPE_EVENT, TOP_OF_PIPE, COMMANDS_COMPLETE)
} VkPipeEvent;
typedef enum VkWaitEvent_
{
- VK_WAIT_EVENT_TOP_OF_PIPE = 0x00000001, // Wait event before the GPU starts processing subsequent commands
+ VK_WAIT_EVENT_TOP_OF_PIPE = 0x00000001, // Wait event before the device starts processing subsequent commands
VK_WAIT_EVENT_BEFORE_RASTERIZATION = 0x00000002, // Wait event before rasterizing subsequent primitives
- VK_WAIT_EVENT_BEGIN_RANGE = VK_WAIT_EVENT_TOP_OF_PIPE,
- VK_WAIT_EVENT_END_RANGE = VK_WAIT_EVENT_BEFORE_RASTERIZATION,
- VK_NUM_WAIT_EVENT = (VK_WAIT_EVENT_END_RANGE - VK_WAIT_EVENT_BEGIN_RANGE + 1),
- VK_MAX_ENUM(VkWaitEvent)
+ VK_ENUM_RANGE(WAIT_EVENT, TOP_OF_PIPE, BEFORE_RASTERIZATION)
} VkWaitEvent;
-typedef enum VkMemoryOutputFlags_
-{
- VK_MEMORY_OUTPUT_CPU_WRITE_BIT = 0x00000001, // Controls output coherency of CPU writes
- VK_MEMORY_OUTPUT_SHADER_WRITE_BIT = 0x00000002, // Controls output coherency of generic shader writes
- VK_MEMORY_OUTPUT_COLOR_ATTACHMENT_BIT = 0x00000004, // Controls output coherency of color attachment writes
- VK_MEMORY_OUTPUT_DEPTH_STENCIL_ATTACHMENT_BIT = 0x00000008, // Controls output coherency of depth/stencil attachment writes
- VK_MEMORY_OUTPUT_TRANSFER_BIT = 0x00000010, // Controls output coherency of transfer operations
- VK_MAX_ENUM(VkMemoryOutputFlags)
-} VkMemoryOutputFlags;
-
-typedef enum VkMemoryInputFlags_
-{
- VK_MEMORY_INPUT_CPU_READ_BIT = 0x00000001, // Controls input coherency of CPU reads
- VK_MEMORY_INPUT_INDIRECT_COMMAND_BIT = 0x00000002, // Controls input coherency of indirect command reads
- VK_MEMORY_INPUT_INDEX_FETCH_BIT = 0x00000004, // Controls input coherency of index fetches
- VK_MEMORY_INPUT_VERTEX_ATTRIBUTE_FETCH_BIT = 0x00000008, // Controls input coherency of vertex attribute fetches
- VK_MEMORY_INPUT_UNIFORM_READ_BIT = 0x00000010, // Controls input coherency of uniform buffer reads
- VK_MEMORY_INPUT_SHADER_READ_BIT = 0x00000020, // Controls input coherency of generic shader reads
- VK_MEMORY_INPUT_COLOR_ATTACHMENT_BIT = 0x00000040, // Controls input coherency of color attachment reads
- VK_MEMORY_INPUT_DEPTH_STENCIL_ATTACHMENT_BIT = 0x00000080, // Controls input coherency of depth/stencil attachment reads
- VK_MEMORY_INPUT_TRANSFER_BIT = 0x00000100, // Controls input coherency of transfer operations
- VK_MAX_ENUM(VkMemoryInputFlags)
-} VkMemoryInputFlags;
-
typedef enum VkAttachmentLoadOp_
{
VK_ATTACHMENT_LOAD_OP_LOAD = 0x00000000,
VK_ATTACHMENT_LOAD_OP_CLEAR = 0x00000001,
VK_ATTACHMENT_LOAD_OP_DONT_CARE = 0x00000002,
- VK_ATTACHMENT_LOAD_OP_BEGIN_RANGE = VK_ATTACHMENT_LOAD_OP_LOAD,
- VK_ATTACHMENT_LOAD_OP_END_RANGE = VK_ATTACHMENT_LOAD_OP_DONT_CARE,
- VK_NUM_ATTACHMENT_LOAD_OP = (VK_ATTACHMENT_LOAD_OP_END_RANGE - VK_ATTACHMENT_LOAD_OP_BEGIN_RANGE + 1),
- VK_MAX_ENUM(VkAttachmentLoadOp)
+ VK_ENUM_RANGE(ATTACHMENT_LOAD_OP, LOAD, DONT_CARE)
} VkAttachmentLoadOp;
typedef enum VkAttachmentStoreOp_
VK_ATTACHMENT_STORE_OP_RESOLVE_MSAA = 0x00000001,
VK_ATTACHMENT_STORE_OP_DONT_CARE = 0x00000002,
- VK_ATTACHMENT_STORE_OP_BEGIN_RANGE = VK_ATTACHMENT_STORE_OP_STORE,
- VK_ATTACHMENT_STORE_OP_END_RANGE = VK_ATTACHMENT_STORE_OP_DONT_CARE,
- VK_NUM_ATTACHMENT_STORE_OP = (VK_ATTACHMENT_STORE_OP_END_RANGE - VK_ATTACHMENT_STORE_OP_BEGIN_RANGE + 1),
- VK_MAX_ENUM(VkAttachmentStoreOp)
+ VK_ENUM_RANGE(ATTACHMENT_STORE_OP, STORE, DONT_CARE)
} VkAttachmentStoreOp;
typedef enum VkImageType_
{
- VK_IMAGE_1D = 0x00000000,
- VK_IMAGE_2D = 0x00000001,
- VK_IMAGE_3D = 0x00000002,
+ VK_IMAGE_TYPE_1D = 0x00000000,
+ VK_IMAGE_TYPE_2D = 0x00000001,
+ VK_IMAGE_TYPE_3D = 0x00000002,
- VK_IMAGE_TYPE_BEGIN_RANGE = VK_IMAGE_1D,
- VK_IMAGE_TYPE_END_RANGE = VK_IMAGE_3D,
- VK_NUM_IMAGE_TYPE = (VK_IMAGE_TYPE_END_RANGE - VK_IMAGE_TYPE_BEGIN_RANGE + 1),
- VK_MAX_ENUM(VkImageType)
+ VK_ENUM_RANGE(IMAGE_TYPE, 1D, 3D)
} VkImageType;
typedef enum VkImageTiling_
{
- VK_LINEAR_TILING = 0x00000000,
- VK_OPTIMAL_TILING = 0x00000001,
+ VK_IMAGE_TILING_LINEAR = 0x00000000,
+ VK_IMAGE_TILING_OPTIMAL = 0x00000001,
- VK_IMAGE_TILING_BEGIN_RANGE = VK_LINEAR_TILING,
- VK_IMAGE_TILING_END_RANGE = VK_OPTIMAL_TILING,
- VK_NUM_IMAGE_TILING = (VK_IMAGE_TILING_END_RANGE - VK_IMAGE_TILING_BEGIN_RANGE + 1),
- VK_MAX_ENUM(VkImageTiling)
+ VK_ENUM_RANGE(IMAGE_TILING, LINEAR, OPTIMAL)
} VkImageTiling;
typedef enum VkImageViewType_
{
- VK_IMAGE_VIEW_1D = 0x00000000,
- VK_IMAGE_VIEW_2D = 0x00000001,
- VK_IMAGE_VIEW_3D = 0x00000002,
- VK_IMAGE_VIEW_CUBE = 0x00000003,
+ VK_IMAGE_VIEW_TYPE_1D = 0x00000000,
+ VK_IMAGE_VIEW_TYPE_2D = 0x00000001,
+ VK_IMAGE_VIEW_TYPE_3D = 0x00000002,
+ VK_IMAGE_VIEW_TYPE_CUBE = 0x00000003,
- VK_IMAGE_VIEW_TYPE_BEGIN_RANGE = VK_IMAGE_VIEW_1D,
- VK_IMAGE_VIEW_TYPE_END_RANGE = VK_IMAGE_VIEW_CUBE,
- VK_NUM_IMAGE_VIEW_TYPE = (VK_IMAGE_VIEW_TYPE_END_RANGE - VK_IMAGE_VIEW_TYPE_BEGIN_RANGE + 1),
- VK_MAX_ENUM(VkImageViewType)
+ VK_ENUM_RANGE(IMAGE_VIEW_TYPE, 1D, CUBE)
} VkImageViewType;
typedef enum VkImageAspect_
VK_IMAGE_ASPECT_DEPTH = 0x00000001,
VK_IMAGE_ASPECT_STENCIL = 0x00000002,
- VK_IMAGE_ASPECT_BEGIN_RANGE = VK_IMAGE_ASPECT_COLOR,
- VK_IMAGE_ASPECT_END_RANGE = VK_IMAGE_ASPECT_STENCIL,
- VK_NUM_IMAGE_ASPECT = (VK_IMAGE_ASPECT_END_RANGE - VK_IMAGE_ASPECT_BEGIN_RANGE + 1),
- VK_MAX_ENUM(VkImageAspect)
+ VK_ENUM_RANGE(IMAGE_ASPECT, COLOR, STENCIL)
} VkImageAspect;
+typedef enum VkBufferViewType_
+{
+ VK_BUFFER_VIEW_TYPE_RAW = 0x00000000, // Raw buffer without special structure (UBO, SSBO)
+ VK_BUFFER_VIEW_TYPE_FORMATTED = 0x00000001, // Buffer with format (TBO, IBO)
+
+ VK_ENUM_RANGE(BUFFER_VIEW_TYPE, RAW, FORMATTED)
+} VkBufferViewType;
+
typedef enum VkChannelSwizzle_
{
VK_CHANNEL_SWIZZLE_ZERO = 0x00000000,
VK_CHANNEL_SWIZZLE_B = 0x00000004,
VK_CHANNEL_SWIZZLE_A = 0x00000005,
- VK_CHANNEL_SWIZZLE_BEGIN_RANGE = VK_CHANNEL_SWIZZLE_ZERO,
- VK_CHANNEL_SWIZZLE_END_RANGE = VK_CHANNEL_SWIZZLE_A,
- VK_NUM_CHANNEL_SWIZZLE = (VK_CHANNEL_SWIZZLE_END_RANGE - VK_CHANNEL_SWIZZLE_BEGIN_RANGE + 1),
- VK_MAX_ENUM(VkChannelSwizzle)
+ VK_ENUM_RANGE(CHANNEL_SWIZZLE, ZERO, A)
} VkChannelSwizzle;
typedef enum VkDescriptorType_
VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC = 0x00000008,
VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC = 0x00000009,
- VK_DESCRIPTOR_TYPE_BEGIN_RANGE = VK_DESCRIPTOR_TYPE_SAMPLER,
- VK_DESCRIPTOR_TYPE_END_RANGE = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC,
- VK_NUM_DESCRIPTOR_TYPE = (VK_DESCRIPTOR_TYPE_END_RANGE - VK_DESCRIPTOR_TYPE_BEGIN_RANGE + 1),
- VK_MAX_ENUM(VkDescriptorType)
+ VK_ENUM_RANGE(DESCRIPTOR_TYPE, SAMPLER, STORAGE_BUFFER_DYNAMIC)
} VkDescriptorType;
typedef enum VkDescriptorPoolUsage_
VK_DESCRIPTOR_POOL_USAGE_ONE_SHOT = 0x00000000,
VK_DESCRIPTOR_POOL_USAGE_DYNAMIC = 0x00000001,
- VK_DESCRIPTOR_POOL_USAGE_BEGIN_RANGE = VK_DESCRIPTOR_POOL_USAGE_ONE_SHOT,
- VK_DESCRIPTOR_POOL_USAGE_END_RANGE = VK_DESCRIPTOR_POOL_USAGE_DYNAMIC,
- VK_NUM_DESCRIPTOR_POOL_USAGE = (VK_DESCRIPTOR_POOL_USAGE_END_RANGE - VK_DESCRIPTOR_POOL_USAGE_BEGIN_RANGE + 1),
- VK_MAX_ENUM(VkDescriptorPoolUsage)
+ VK_ENUM_RANGE(DESCRIPTOR_POOL_USAGE, ONE_SHOT, DYNAMIC)
} VkDescriptorPoolUsage;
typedef enum VkDescriptorUpdateMode_
{
- VK_DESCRIPTOR_UDPATE_MODE_COPY = 0x00000000,
+ VK_DESCRIPTOR_UPDATE_MODE_COPY = 0x00000000,
VK_DESCRIPTOR_UPDATE_MODE_FASTEST = 0x00000001,
- VK_DESCRIPTOR_UPDATE_MODE_BEGIN_RANGE = VK_DESCRIPTOR_UDPATE_MODE_COPY,
- VK_DESCRIPTOR_UPDATE_MODE_END_RANGE = VK_DESCRIPTOR_UPDATE_MODE_FASTEST,
- VK_NUM_DESCRIPTOR_UPDATE_MODE = (VK_DESCRIPTOR_UPDATE_MODE_END_RANGE - VK_DESCRIPTOR_UPDATE_MODE_BEGIN_RANGE + 1),
- VK_MAX_ENUM(VkDescriptorUpdateMode)
+ VK_ENUM_RANGE(DESCRIPTOR_UPDATE_MODE, COPY, FASTEST)
} VkDescriptorUpdateMode;
typedef enum VkDescriptorSetUsage_
VK_DESCRIPTOR_SET_USAGE_ONE_SHOT = 0x00000000,
VK_DESCRIPTOR_SET_USAGE_STATIC = 0x00000001,
- VK_DESCRIPTOR_SET_USAGE_BEGIN_RANGE = VK_DESCRIPTOR_SET_USAGE_ONE_SHOT,
- VK_DESCRIPTOR_SET_USAGE_END_RANGE = VK_DESCRIPTOR_SET_USAGE_STATIC,
- VK_NUM_DESCRIPTOR_SET_USAGE = (VK_DESCRIPTOR_SET_USAGE_END_RANGE - VK_DESCRIPTOR_SET_USAGE_BEGIN_RANGE + 1),
- VK_MAX_ENUM(VkDescriptorSetUsage)
+ VK_ENUM_RANGE(DESCRIPTOR_SET_USAGE, ONE_SHOT, STATIC)
} VkDescriptorSetUsage;
typedef enum VkQueryType_
{
- VK_QUERY_OCCLUSION = 0x00000000,
- VK_QUERY_PIPELINE_STATISTICS = 0x00000001,
+ VK_QUERY_TYPE_OCCLUSION = 0x00000000,
+ VK_QUERY_TYPE_PIPELINE_STATISTICS = 0x00000001,
- VK_QUERY_TYPE_BEGIN_RANGE = VK_QUERY_OCCLUSION,
- VK_QUERY_TYPE_END_RANGE = VK_QUERY_PIPELINE_STATISTICS,
- VK_NUM_QUERY_TYPE = (VK_QUERY_TYPE_END_RANGE - VK_QUERY_TYPE_BEGIN_RANGE + 1),
- VK_MAX_ENUM(VkQueryType)
+ VK_ENUM_RANGE(QUERY_TYPE, OCCLUSION, PIPELINE_STATISTICS)
} VkQueryType;
typedef enum VkTimestampType_
{
- VK_TIMESTAMP_TOP = 0x00000000,
- VK_TIMESTAMP_BOTTOM = 0x00000001,
+ VK_TIMESTAMP_TYPE_TOP = 0x00000000,
+ VK_TIMESTAMP_TYPE_BOTTOM = 0x00000001,
- VK_TIMESTAMP_TYPE_BEGIN_RANGE = VK_TIMESTAMP_TOP,
- VK_TIMESTAMP_TYPE_END_RANGE = VK_TIMESTAMP_BOTTOM,
- VK_NUM_TIMESTAMP_TYPE = (VK_TIMESTAMP_TYPE_END_RANGE - VK_TIMESTAMP_TYPE_BEGIN_RANGE + 1),
- VK_MAX_ENUM(VkTimestampType)
+ VK_ENUM_RANGE(TIMESTAMP_TYPE, TOP, BOTTOM)
} VkTimestampType;
-typedef enum VkBorderColorType_
+typedef enum VkBorderColor_
{
VK_BORDER_COLOR_OPAQUE_WHITE = 0x00000000,
VK_BORDER_COLOR_TRANSPARENT_BLACK = 0x00000001,
VK_BORDER_COLOR_OPAQUE_BLACK = 0x00000002,
- VK_BORDER_COLOR_TYPE_BEGIN_RANGE = VK_BORDER_COLOR_OPAQUE_WHITE,
- VK_BORDER_COLOR_TYPE_END_RANGE = VK_BORDER_COLOR_OPAQUE_BLACK,
- VK_NUM_BORDER_COLOR_TYPE = (VK_BORDER_COLOR_TYPE_END_RANGE - VK_BORDER_COLOR_TYPE_BEGIN_RANGE + 1),
- VK_MAX_ENUM(VkBorderColorType)
-} VkBorderColorType;
+ VK_ENUM_RANGE(BORDER_COLOR, OPAQUE_WHITE, OPAQUE_BLACK)
+} VkBorderColor;
typedef enum VkPipelineBindPoint_
{
VK_PIPELINE_BIND_POINT_COMPUTE = 0x00000000,
VK_PIPELINE_BIND_POINT_GRAPHICS = 0x00000001,
- VK_PIPELINE_BIND_POINT_BEGIN_RANGE = VK_PIPELINE_BIND_POINT_COMPUTE,
- VK_PIPELINE_BIND_POINT_END_RANGE = VK_PIPELINE_BIND_POINT_GRAPHICS,
- VK_NUM_PIPELINE_BIND_POINT = (VK_PIPELINE_BIND_POINT_END_RANGE - VK_PIPELINE_BIND_POINT_BEGIN_RANGE + 1),
- VK_MAX_ENUM(VkPipelineBindPoint)
+ VK_ENUM_RANGE(PIPELINE_BIND_POINT, COMPUTE, GRAPHICS)
} VkPipelineBindPoint;
typedef enum VkStateBindPoint_
{
- VK_STATE_BIND_VIEWPORT = 0x00000000,
- VK_STATE_BIND_RASTER = 0x00000001,
- VK_STATE_BIND_COLOR_BLEND = 0x00000002,
- VK_STATE_BIND_DEPTH_STENCIL = 0x00000003,
+ VK_STATE_BIND_POINT_VIEWPORT = 0x00000000,
+ VK_STATE_BIND_POINT_RASTER = 0x00000001,
+ VK_STATE_BIND_POINT_COLOR_BLEND = 0x00000002,
+ VK_STATE_BIND_POINT_DEPTH_STENCIL = 0x00000003,
- VK_STATE_BIND_POINT_BEGIN_RANGE = VK_STATE_BIND_VIEWPORT,
- VK_STATE_BIND_POINT_END_RANGE = VK_STATE_BIND_DEPTH_STENCIL,
- VK_NUM_STATE_BIND_POINT = (VK_STATE_BIND_POINT_END_RANGE - VK_STATE_BIND_POINT_BEGIN_RANGE + 1),
- VK_MAX_ENUM(VkStateBindPoint)
+ VK_ENUM_RANGE(STATE_BIND_POINT, VIEWPORT, DEPTH_STENCIL)
} VkStateBindPoint;
typedef enum VkPrimitiveTopology_
{
- VK_TOPOLOGY_POINT_LIST = 0x00000000,
- VK_TOPOLOGY_LINE_LIST = 0x00000001,
- VK_TOPOLOGY_LINE_STRIP = 0x00000002,
- VK_TOPOLOGY_TRIANGLE_LIST = 0x00000003,
- VK_TOPOLOGY_TRIANGLE_STRIP = 0x00000004,
- VK_TOPOLOGY_TRIANGLE_FAN = 0x00000005,
- VK_TOPOLOGY_LINE_LIST_ADJ = 0x00000006,
- VK_TOPOLOGY_LINE_STRIP_ADJ = 0x00000007,
- VK_TOPOLOGY_TRIANGLE_LIST_ADJ = 0x00000008,
- VK_TOPOLOGY_TRIANGLE_STRIP_ADJ = 0x00000009,
- VK_TOPOLOGY_PATCH = 0x0000000a,
-
- VK_PRIMITIVE_TOPOLOGY_BEGIN_RANGE = VK_TOPOLOGY_POINT_LIST,
- VK_PRIMITIVE_TOPOLOGY_END_RANGE = VK_TOPOLOGY_PATCH,
- VK_NUM_PRIMITIVE_TOPOLOGY = (VK_PRIMITIVE_TOPOLOGY_END_RANGE - VK_PRIMITIVE_TOPOLOGY_BEGIN_RANGE + 1),
- VK_MAX_ENUM(VkPrimitiveTopology)
+ VK_PRIMITIVE_TOPOLOGY_POINT_LIST = 0x00000000,
+ VK_PRIMITIVE_TOPOLOGY_LINE_LIST = 0x00000001,
+ VK_PRIMITIVE_TOPOLOGY_LINE_STRIP = 0x00000002,
+ VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST = 0x00000003,
+ VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP = 0x00000004,
+ VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN = 0x00000005,
+ VK_PRIMITIVE_TOPOLOGY_LINE_LIST_ADJ = 0x00000006,
+ VK_PRIMITIVE_TOPOLOGY_LINE_STRIP_ADJ = 0x00000007,
+ VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_ADJ = 0x00000008,
+ VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP_ADJ = 0x00000009,
+ VK_PRIMITIVE_TOPOLOGY_PATCH = 0x0000000a,
+
+ VK_ENUM_RANGE(PRIMITIVE_TOPOLOGY, POINT_LIST, PATCH)
} VkPrimitiveTopology;
typedef enum VkIndexType_
{
- VK_INDEX_8 = 0x00000000,
- VK_INDEX_16 = 0x00000001,
- VK_INDEX_32 = 0x00000002,
+ VK_INDEX_TYPE_UINT8 = 0x00000000,
+ VK_INDEX_TYPE_UINT16 = 0x00000001,
+ VK_INDEX_TYPE_UINT32 = 0x00000002,
- VK_INDEX_TYPE_BEGIN_RANGE = VK_INDEX_8,
- VK_INDEX_TYPE_END_RANGE = VK_INDEX_32,
- VK_NUM_INDEX_TYPE = (VK_INDEX_TYPE_END_RANGE - VK_INDEX_TYPE_BEGIN_RANGE + 1),
- VK_MAX_ENUM(VkIndexType)
+ VK_ENUM_RANGE(INDEX_TYPE, UINT8, UINT32)
} VkIndexType;
typedef enum VkTexFilter_
{
- VK_TEX_FILTER_NEAREST = 0,
- VK_TEX_FILTER_LINEAR = 1,
+ VK_TEX_FILTER_NEAREST = 0x00000000,
+ VK_TEX_FILTER_LINEAR = 0x00000001,
- VK_TEX_FILTER_BEGIN_RANGE = VK_TEX_FILTER_NEAREST,
- VK_TEX_FILTER_END_RANGE = VK_TEX_FILTER_LINEAR,
- VK_NUM_TEX_FILTER = (VK_TEX_FILTER_END_RANGE - VK_TEX_FILTER_BEGIN_RANGE + 1),
- VK_MAX_ENUM(VkTexFilter)
+ VK_ENUM_RANGE(TEX_FILTER, NEAREST, LINEAR)
} VkTexFilter;
typedef enum VkTexMipmapMode_
{
- VK_TEX_MIPMAP_BASE = 0, // Always choose base level
- VK_TEX_MIPMAP_NEAREST = 1, // Choose nearest mip level
- VK_TEX_MIPMAP_LINEAR = 2, // Linear filter between mip levels
+ VK_TEX_MIPMAP_MODE_BASE = 0x00000000, // Always choose base level
+ VK_TEX_MIPMAP_MODE_NEAREST = 0x00000001, // Choose nearest mip level
+ VK_TEX_MIPMAP_MODE_LINEAR = 0x00000002, // Linear filter between mip levels
- VK_TEX_MIPMAP_BEGIN_RANGE = VK_TEX_MIPMAP_BASE,
- VK_TEX_MIPMAP_END_RANGE = VK_TEX_MIPMAP_LINEAR,
- VK_NUM_TEX_MIPMAP = (VK_TEX_MIPMAP_END_RANGE - VK_TEX_MIPMAP_BEGIN_RANGE + 1),
- VK_MAX_ENUM(VkTexMipmapMode)
+ VK_ENUM_RANGE(TEX_MIPMAP_MODE, BASE, LINEAR)
} VkTexMipmapMode;
typedef enum VkTexAddress_
VK_TEX_ADDRESS_MIRROR_ONCE = 0x00000003,
VK_TEX_ADDRESS_CLAMP_BORDER = 0x00000004,
- VK_TEX_ADDRESS_BEGIN_RANGE = VK_TEX_ADDRESS_WRAP,
- VK_TEX_ADDRESS_END_RANGE = VK_TEX_ADDRESS_CLAMP_BORDER,
- VK_NUM_TEX_ADDRESS = (VK_TEX_ADDRESS_END_RANGE - VK_TEX_ADDRESS_BEGIN_RANGE + 1),
- VK_MAX_ENUM(VkTexAddress)
+ VK_ENUM_RANGE(TEX_ADDRESS, WRAP, CLAMP_BORDER)
} VkTexAddress;
-typedef enum VkCompareFunc_
+typedef enum VkCompareOp_
{
- VK_COMPARE_NEVER = 0x00000000,
- VK_COMPARE_LESS = 0x00000001,
- VK_COMPARE_EQUAL = 0x00000002,
- VK_COMPARE_LESS_EQUAL = 0x00000003,
- VK_COMPARE_GREATER = 0x00000004,
- VK_COMPARE_NOT_EQUAL = 0x00000005,
- VK_COMPARE_GREATER_EQUAL = 0x00000006,
- VK_COMPARE_ALWAYS = 0x00000007,
+ VK_COMPARE_OP_NEVER = 0x00000000,
+ VK_COMPARE_OP_LESS = 0x00000001,
+ VK_COMPARE_OP_EQUAL = 0x00000002,
+ VK_COMPARE_OP_LESS_EQUAL = 0x00000003,
+ VK_COMPARE_OP_GREATER = 0x00000004,
+ VK_COMPARE_OP_NOT_EQUAL = 0x00000005,
+ VK_COMPARE_OP_GREATER_EQUAL = 0x00000006,
+ VK_COMPARE_OP_ALWAYS = 0x00000007,
- VK_COMPARE_FUNC_BEGIN_RANGE = VK_COMPARE_NEVER,
- VK_COMPARE_FUNC_END_RANGE = VK_COMPARE_ALWAYS,
- VK_NUM_COMPARE_FUNC = (VK_COMPARE_FUNC_END_RANGE - VK_COMPARE_FUNC_BEGIN_RANGE + 1),
- VK_MAX_ENUM(VkCompareFunc)
-} VkCompareFunc;
+ VK_ENUM_RANGE(COMPARE_OP, NEVER, ALWAYS)
+} VkCompareOp;
typedef enum VkFillMode_
{
- VK_FILL_POINTS = 0x00000000,
- VK_FILL_WIREFRAME = 0x00000001,
- VK_FILL_SOLID = 0x00000002,
+ VK_FILL_MODE_POINTS = 0x00000000,
+ VK_FILL_MODE_WIREFRAME = 0x00000001,
+ VK_FILL_MODE_SOLID = 0x00000002,
- VK_FILL_MODE_BEGIN_RANGE = VK_FILL_POINTS,
- VK_FILL_MODE_END_RANGE = VK_FILL_SOLID,
- VK_NUM_FILL_MODE = (VK_FILL_MODE_END_RANGE - VK_FILL_MODE_BEGIN_RANGE + 1),
- VK_MAX_ENUM(VkFillMode)
+ VK_ENUM_RANGE(FILL_MODE, POINTS, SOLID)
} VkFillMode;
typedef enum VkCullMode_
{
- VK_CULL_NONE = 0x00000000,
- VK_CULL_FRONT = 0x00000001,
- VK_CULL_BACK = 0x00000002,
- VK_CULL_FRONT_AND_BACK = 0x00000003,
+ VK_CULL_MODE_NONE = 0x00000000,
+ VK_CULL_MODE_FRONT = 0x00000001,
+ VK_CULL_MODE_BACK = 0x00000002,
+ VK_CULL_MODE_FRONT_AND_BACK = 0x00000003,
- VK_CULL_MODE_BEGIN_RANGE = VK_CULL_NONE,
- VK_CULL_MODE_END_RANGE = VK_CULL_FRONT_AND_BACK,
- VK_NUM_CULL_MODE = (VK_CULL_MODE_END_RANGE - VK_CULL_MODE_BEGIN_RANGE + 1),
- VK_MAX_ENUM(VkCullMode)
+ VK_ENUM_RANGE(CULL_MODE, NONE, FRONT_AND_BACK)
} VkCullMode;
-typedef enum VkFaceOrientation_
+typedef enum VkFrontFace_
{
VK_FRONT_FACE_CCW = 0x00000000,
VK_FRONT_FACE_CW = 0x00000001,
- VK_FACE_ORIENTATION_BEGIN_RANGE = VK_FRONT_FACE_CCW,
- VK_FACE_ORIENTATION_END_RANGE = VK_FRONT_FACE_CW,
- VK_NUM_FACE_ORIENTATION = (VK_FACE_ORIENTATION_END_RANGE - VK_FACE_ORIENTATION_BEGIN_RANGE + 1),
- VK_MAX_ENUM(VkFaceOrientation)
-} VkFaceOrientation;
+ VK_ENUM_RANGE(FRONT_FACE, CCW, CW)
+} VkFrontFace;
-typedef enum VkProvokingVertexConvention_
+typedef enum VkProvokingVertex_
{
VK_PROVOKING_VERTEX_FIRST = 0x00000000,
VK_PROVOKING_VERTEX_LAST = 0x00000001,
- VK_PROVOKING_VERTEX_BEGIN_RANGE = VK_PROVOKING_VERTEX_FIRST,
- VK_PROVOKING_VERTEX_END_RANGE = VK_PROVOKING_VERTEX_LAST,
- VK_NUM_PROVOKING_VERTEX_CONVENTION = (VK_PROVOKING_VERTEX_END_RANGE - VK_PROVOKING_VERTEX_BEGIN_RANGE + 1),
- VK_MAX_ENUM(VkProvokingVertexConvention)
-} VkProvokingVertexConvention;
+ VK_ENUM_RANGE(PROVOKING_VERTEX, FIRST, LAST)
+} VkProvokingVertex;
typedef enum VkCoordinateOrigin_
{
VK_COORDINATE_ORIGIN_UPPER_LEFT = 0x00000000,
VK_COORDINATE_ORIGIN_LOWER_LEFT = 0x00000001,
- VK_COORDINATE_ORIGIN_BEGIN_RANGE = VK_COORDINATE_ORIGIN_UPPER_LEFT,
- VK_COORDINATE_ORIGIN_END_RANGE = VK_COORDINATE_ORIGIN_LOWER_LEFT,
- VK_NUM_COORDINATE_ORIGIN = (VK_COORDINATE_ORIGIN_END_RANGE - VK_COORDINATE_ORIGIN_END_RANGE + 1),
- VK_MAX_ENUM(VkCoordinateOrigin)
+ VK_ENUM_RANGE(COORDINATE_ORIGIN, UPPER_LEFT, LOWER_LEFT)
} VkCoordinateOrigin;
typedef enum VkDepthMode_
VK_DEPTH_MODE_ZERO_TO_ONE = 0x00000000,
VK_DEPTH_MODE_NEGATIVE_ONE_TO_ONE = 0x00000001,
- VK_DEPTH_MODE_BEGIN_RANGE = VK_DEPTH_MODE_ZERO_TO_ONE,
- VK_DEPTH_MODE_END_RANGE = VK_DEPTH_MODE_NEGATIVE_ONE_TO_ONE,
- VK_NUM_DEPTH_MODE = (VK_DEPTH_MODE_END_RANGE - VK_DEPTH_MODE_BEGIN_RANGE + 1),
- VK_MAX_ENUM(VkDepthMode)
+ VK_ENUM_RANGE(DEPTH_MODE, ZERO_TO_ONE, NEGATIVE_ONE_TO_ONE)
} VkDepthMode;
typedef enum VkBlend_
VK_BLEND_SRC1_ALPHA = 0x00000011,
VK_BLEND_ONE_MINUS_SRC1_ALPHA = 0x00000012,
- VK_BLEND_BEGIN_RANGE = VK_BLEND_ZERO,
- VK_BLEND_END_RANGE = VK_BLEND_ONE_MINUS_SRC1_ALPHA,
- VK_NUM_BLEND = (VK_BLEND_END_RANGE - VK_BLEND_BEGIN_RANGE + 1),
- VK_MAX_ENUM(VkBlend)
+ VK_ENUM_RANGE(BLEND, ZERO, ONE_MINUS_SRC1_ALPHA)
} VkBlend;
-typedef enum VkBlendFunc_
+typedef enum VkBlendOp_
{
- VK_BLEND_FUNC_ADD = 0x00000000,
- VK_BLEND_FUNC_SUBTRACT = 0x00000001,
- VK_BLEND_FUNC_REVERSE_SUBTRACT = 0x00000002,
- VK_BLEND_FUNC_MIN = 0x00000003,
- VK_BLEND_FUNC_MAX = 0x00000004,
+ VK_BLEND_OP_ADD = 0x00000000,
+ VK_BLEND_OP_SUBTRACT = 0x00000001,
+ VK_BLEND_OP_REVERSE_SUBTRACT = 0x00000002,
+ VK_BLEND_OP_MIN = 0x00000003,
+ VK_BLEND_OP_MAX = 0x00000004,
- VK_BLEND_FUNC_BEGIN_RANGE = VK_BLEND_FUNC_ADD,
- VK_BLEND_FUNC_END_RANGE = VK_BLEND_FUNC_MAX,
- VK_NUM_BLEND_FUNC = (VK_BLEND_FUNC_END_RANGE - VK_BLEND_FUNC_BEGIN_RANGE + 1),
- VK_MAX_ENUM(VkBlendFunc)
-} VkBlendFunc;
+ VK_ENUM_RANGE(BLEND_OP, ADD, MAX)
+} VkBlendOp;
typedef enum VkStencilOp_
{
VK_STENCIL_OP_INC_WRAP = 0x00000006,
VK_STENCIL_OP_DEC_WRAP = 0x00000007,
- VK_STENCIL_OP_BEGIN_RANGE = VK_STENCIL_OP_KEEP,
- VK_STENCIL_OP_END_RANGE = VK_STENCIL_OP_DEC_WRAP,
- VK_NUM_STENCIL_OP = (VK_STENCIL_OP_END_RANGE - VK_STENCIL_OP_BEGIN_RANGE + 1),
- VK_MAX_ENUM(VkStencilOp)
+ VK_ENUM_RANGE(STENCIL_OP, KEEP, DEC_WRAP)
} VkStencilOp;
typedef enum VkLogicOp_
VK_LOGIC_OP_NAND = 0x0000000e,
VK_LOGIC_OP_SET = 0x0000000f,
- VK_LOGIC_OP_BEGIN_RANGE = VK_LOGIC_OP_COPY,
- VK_LOGIC_OP_END_RANGE = VK_LOGIC_OP_SET,
- VK_NUM_LOGIC_OP = (VK_LOGIC_OP_END_RANGE - VK_LOGIC_OP_BEGIN_RANGE + 1),
- VK_MAX_ENUM(VkLogicOp)
+ VK_ENUM_RANGE(LOGIC_OP, COPY, SET)
} VkLogicOp;
typedef enum VkSystemAllocType_
{
- VK_SYSTEM_ALLOC_API_OBJECT = 0x00000000,
- VK_SYSTEM_ALLOC_INTERNAL = 0x00000001,
- VK_SYSTEM_ALLOC_INTERNAL_TEMP = 0x00000002,
- VK_SYSTEM_ALLOC_INTERNAL_SHADER = 0x00000003,
- VK_SYSTEM_ALLOC_DEBUG = 0x00000004,
+ VK_SYSTEM_ALLOC_TYPE_API_OBJECT = 0x00000000,
+ VK_SYSTEM_ALLOC_TYPE_INTERNAL = 0x00000001,
+ VK_SYSTEM_ALLOC_TYPE_INTERNAL_TEMP = 0x00000002,
+ VK_SYSTEM_ALLOC_TYPE_INTERNAL_SHADER = 0x00000003,
+ VK_SYSTEM_ALLOC_TYPE_DEBUG = 0x00000004,
- VK_SYSTEM_ALLOC_BEGIN_RANGE = VK_SYSTEM_ALLOC_API_OBJECT,
- VK_SYSTEM_ALLOC_END_RANGE = VK_SYSTEM_ALLOC_DEBUG,
- VK_NUM_SYSTEM_ALLOC_TYPE = (VK_SYSTEM_ALLOC_END_RANGE - VK_SYSTEM_ALLOC_BEGIN_RANGE + 1),
- VK_MAX_ENUM(VkSystemAllocType)
+ VK_ENUM_RANGE(SYSTEM_ALLOC_TYPE, API_OBJECT, DEBUG)
} VkSystemAllocType;
-typedef enum VkPhysicalGpuType_
+typedef enum VkPhysicalDeviceType_
{
- VK_GPU_TYPE_OTHER = 0x00000000,
- VK_GPU_TYPE_INTEGRATED = 0x00000001,
- VK_GPU_TYPE_DISCRETE = 0x00000002,
- VK_GPU_TYPE_VIRTUAL = 0x00000003,
+ VK_PHYSICAL_DEVICE_TYPE_OTHER = 0x00000000,
+ VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU = 0x00000001,
+ VK_PHYSICAL_DEVICE_TYPE_DISCRETE_GPU = 0x00000002,
+ VK_PHYSICAL_DEVICE_TYPE_VIRTUAL_GPU = 0x00000003,
+ VK_PHYSICAL_DEVICE_TYPE_CPU = 0x00000004,
- VK_PHYSICAL_GPU_TYPE_BEGIN_RANGE = VK_GPU_TYPE_OTHER,
- VK_PHYSICAL_GPU_TYPE_END_RANGE = VK_GPU_TYPE_VIRTUAL,
- VK_NUM_PHYSICAL_GPU_TYPE = (VK_PHYSICAL_GPU_TYPE_END_RANGE - VK_PHYSICAL_GPU_TYPE_BEGIN_RANGE + 1),
- VK_MAX_ENUM(VkPhysicalGpuType)
-} VkPhysicalGpuType;
+ VK_ENUM_RANGE(PHYSICAL_DEVICE_TYPE, OTHER, CPU)
+} VkPhysicalDeviceType;
-typedef enum VkPhysicalGpuInfoType_
+typedef enum VkPhysicalDeviceInfoType_
{
- // Info type for vkGetGpuInfo()
- VK_INFO_TYPE_PHYSICAL_GPU_PROPERTIES = 0x00000000,
- VK_INFO_TYPE_PHYSICAL_GPU_PERFORMANCE = 0x00000001,
- VK_INFO_TYPE_PHYSICAL_GPU_QUEUE_PROPERTIES = 0x00000002,
- VK_INFO_TYPE_PHYSICAL_GPU_MEMORY_PROPERTIES = 0x00000003,
+ // Info type for vkGetPhysicalDeviceInfo()
+ VK_PHYSICAL_DEVICE_INFO_TYPE_PROPERTIES = 0x00000000,
+ VK_PHYSICAL_DEVICE_INFO_TYPE_PERFORMANCE = 0x00000001,
+ VK_PHYSICAL_DEVICE_INFO_TYPE_QUEUE_PROPERTIES = 0x00000002,
+ VK_PHYSICAL_DEVICE_INFO_TYPE_MEMORY_PROPERTIES = 0x00000003,
- VK_INFO_TYPE_PHYSICAL_GPU_BEGIN_RANGE = VK_INFO_TYPE_PHYSICAL_GPU_PROPERTIES,
- VK_INFO_TYPE_PHYSICAL_GPU_END_RANGE = VK_INFO_TYPE_PHYSICAL_GPU_MEMORY_PROPERTIES,
- VK_NUM_INFO_TYPE_PHYSICAL_GPU = (VK_INFO_TYPE_PHYSICAL_GPU_END_RANGE - VK_INFO_TYPE_PHYSICAL_GPU_BEGIN_RANGE + 1),
- VK_MAX_ENUM(VkPhysicalGpuInfoType)
-} VkPhysicalGpuInfoType;
+ VK_ENUM_RANGE(PHYSICAL_DEVICE_INFO_TYPE, PROPERTIES, MEMORY_PROPERTIES)
+} VkPhysicalDeviceInfoType;
typedef enum VkExtensionInfoType_
{
typedef enum VkFormatInfoType_
{
// Info type for vkGetFormatInfo()
- VK_INFO_TYPE_FORMAT_PROPERTIES = 0x00000000,
+ VK_FORMAT_INFO_TYPE_PROPERTIES = 0x00000000,
- VK_INFO_TYPE_FORMAT_BEGIN_RANGE = VK_INFO_TYPE_FORMAT_PROPERTIES,
- VK_INFO_TYPE_FORMAT_END_RANGE = VK_INFO_TYPE_FORMAT_PROPERTIES,
- VK_NUM_INFO_TYPE_FORMAT = (VK_INFO_TYPE_FORMAT_END_RANGE - VK_INFO_TYPE_FORMAT_BEGIN_RANGE + 1),
- VK_MAX_ENUM(VkFormatInfoType)
+ VK_ENUM_RANGE(FORMAT_INFO_TYPE, PROPERTIES, PROPERTIES)
} VkFormatInfoType;
typedef enum VkSubresourceInfoType_
{
// Info type for vkGetImageSubresourceInfo()
- VK_INFO_TYPE_SUBRESOURCE_LAYOUT = 0x00000000,
+ VK_SUBRESOURCE_INFO_TYPE_LAYOUT = 0x00000000,
- VK_INFO_TYPE_SUBRESOURCE_BEGIN_RANGE = VK_INFO_TYPE_SUBRESOURCE_LAYOUT,
- VK_INFO_TYPE_SUBRESOURCE_END_RANGE = VK_INFO_TYPE_SUBRESOURCE_LAYOUT,
- VK_NUM_INFO_TYPE_SUBRESOURCE = (VK_INFO_TYPE_SUBRESOURCE_END_RANGE - VK_INFO_TYPE_SUBRESOURCE_BEGIN_RANGE + 1),
- VK_MAX_ENUM(VkSubresourceInfoType)
+ VK_ENUM_RANGE(SUBRESOURCE_INFO_TYPE, LAYOUT, LAYOUT)
} VkSubresourceInfoType;
typedef enum VkObjectInfoType_
{
// Info type for vkGetObjectInfo()
- VK_INFO_TYPE_MEMORY_ALLOCATION_COUNT = 0x00000000,
- VK_INFO_TYPE_MEMORY_REQUIREMENTS = 0x00000001,
+ VK_OBJECT_INFO_TYPE_MEMORY_ALLOCATION_COUNT = 0x00000000,
+ VK_OBJECT_INFO_TYPE_MEMORY_REQUIREMENTS = 0x00000001,
- VK_INFO_TYPE_BEGIN_RANGE = VK_INFO_TYPE_MEMORY_ALLOCATION_COUNT,
- VK_INFO_TYPE_END_RANGE = VK_INFO_TYPE_MEMORY_REQUIREMENTS,
- VK_NUM_INFO_TYPE = (VK_INFO_TYPE_END_RANGE - VK_INFO_TYPE_BEGIN_RANGE + 1),
- VK_MAX_ENUM(VkObjectInfoType)
+ VK_ENUM_RANGE(OBJECT_INFO_TYPE, MEMORY_ALLOCATION_COUNT, MEMORY_REQUIREMENTS)
} VkObjectInfoType;
+typedef enum VkVertexInputStepRate_
+{
+ VK_VERTEX_INPUT_STEP_RATE_VERTEX = 0x0,
+ VK_VERTEX_INPUT_STEP_RATE_INSTANCE = 0x1,
+ VK_VERTEX_INPUT_STEP_RATE_DRAW = 0x2, //Optional
+
+ VK_ENUM_RANGE(VERTEX_INPUT_STEP_RATE, VERTEX, DRAW)
+} VkVertexInputStepRate;
+
+// ------------------------------------------------------------------------------------------------
+// Vulkan format definitions
+typedef enum VkFormat_
+{
+ VK_FORMAT_UNDEFINED = 0x00000000,
+ VK_FORMAT_R4G4_UNORM = 0x00000001,
+ VK_FORMAT_R4G4_USCALED = 0x00000002,
+ VK_FORMAT_R4G4B4A4_UNORM = 0x00000003,
+ VK_FORMAT_R4G4B4A4_USCALED = 0x00000004,
+ VK_FORMAT_R5G6B5_UNORM = 0x00000005,
+ VK_FORMAT_R5G6B5_USCALED = 0x00000006,
+ VK_FORMAT_R5G5B5A1_UNORM = 0x00000007,
+ VK_FORMAT_R5G5B5A1_USCALED = 0x00000008,
+ VK_FORMAT_R8_UNORM = 0x00000009,
+ VK_FORMAT_R8_SNORM = 0x0000000A,
+ VK_FORMAT_R8_USCALED = 0x0000000B,
+ VK_FORMAT_R8_SSCALED = 0x0000000C,
+ VK_FORMAT_R8_UINT = 0x0000000D,
+ VK_FORMAT_R8_SINT = 0x0000000E,
+ VK_FORMAT_R8_SRGB = 0x0000000F,
+ VK_FORMAT_R8G8_UNORM = 0x00000010,
+ VK_FORMAT_R8G8_SNORM = 0x00000011,
+ VK_FORMAT_R8G8_USCALED = 0x00000012,
+ VK_FORMAT_R8G8_SSCALED = 0x00000013,
+ VK_FORMAT_R8G8_UINT = 0x00000014,
+ VK_FORMAT_R8G8_SINT = 0x00000015,
+ VK_FORMAT_R8G8_SRGB = 0x00000016,
+ VK_FORMAT_R8G8B8_UNORM = 0x00000017,
+ VK_FORMAT_R8G8B8_SNORM = 0x00000018,
+ VK_FORMAT_R8G8B8_USCALED = 0x00000019,
+ VK_FORMAT_R8G8B8_SSCALED = 0x0000001A,
+ VK_FORMAT_R8G8B8_UINT = 0x0000001B,
+ VK_FORMAT_R8G8B8_SINT = 0x0000001C,
+ VK_FORMAT_R8G8B8_SRGB = 0x0000001D,
+ VK_FORMAT_R8G8B8A8_UNORM = 0x0000001E,
+ VK_FORMAT_R8G8B8A8_SNORM = 0x0000001F,
+ VK_FORMAT_R8G8B8A8_USCALED = 0x00000020,
+ VK_FORMAT_R8G8B8A8_SSCALED = 0x00000021,
+ VK_FORMAT_R8G8B8A8_UINT = 0x00000022,
+ VK_FORMAT_R8G8B8A8_SINT = 0x00000023,
+ VK_FORMAT_R8G8B8A8_SRGB = 0x00000024,
+ VK_FORMAT_R10G10B10A2_UNORM = 0x00000025,
+ VK_FORMAT_R10G10B10A2_SNORM = 0x00000026,
+ VK_FORMAT_R10G10B10A2_USCALED = 0x00000027,
+ VK_FORMAT_R10G10B10A2_SSCALED = 0x00000028,
+ VK_FORMAT_R10G10B10A2_UINT = 0x00000029,
+ VK_FORMAT_R10G10B10A2_SINT = 0x0000002A,
+ VK_FORMAT_R16_UNORM = 0x0000002B,
+ VK_FORMAT_R16_SNORM = 0x0000002C,
+ VK_FORMAT_R16_USCALED = 0x0000002D,
+ VK_FORMAT_R16_SSCALED = 0x0000002E,
+ VK_FORMAT_R16_UINT = 0x0000002F,
+ VK_FORMAT_R16_SINT = 0x00000030,
+ VK_FORMAT_R16_SFLOAT = 0x00000031,
+ VK_FORMAT_R16G16_UNORM = 0x00000032,
+ VK_FORMAT_R16G16_SNORM = 0x00000033,
+ VK_FORMAT_R16G16_USCALED = 0x00000034,
+ VK_FORMAT_R16G16_SSCALED = 0x00000035,
+ VK_FORMAT_R16G16_UINT = 0x00000036,
+ VK_FORMAT_R16G16_SINT = 0x00000037,
+ VK_FORMAT_R16G16_SFLOAT = 0x00000038,
+ VK_FORMAT_R16G16B16_UNORM = 0x00000039,
+ VK_FORMAT_R16G16B16_SNORM = 0x0000003A,
+ VK_FORMAT_R16G16B16_USCALED = 0x0000003B,
+ VK_FORMAT_R16G16B16_SSCALED = 0x0000003C,
+ VK_FORMAT_R16G16B16_UINT = 0x0000003D,
+ VK_FORMAT_R16G16B16_SINT = 0x0000003E,
+ VK_FORMAT_R16G16B16_SFLOAT = 0x0000003F,
+ VK_FORMAT_R16G16B16A16_UNORM = 0x00000040,
+ VK_FORMAT_R16G16B16A16_SNORM = 0x00000041,
+ VK_FORMAT_R16G16B16A16_USCALED = 0x00000042,
+ VK_FORMAT_R16G16B16A16_SSCALED = 0x00000043,
+ VK_FORMAT_R16G16B16A16_UINT = 0x00000044,
+ VK_FORMAT_R16G16B16A16_SINT = 0x00000045,
+ VK_FORMAT_R16G16B16A16_SFLOAT = 0x00000046,
+ VK_FORMAT_R32_UINT = 0x00000047,
+ VK_FORMAT_R32_SINT = 0x00000048,
+ VK_FORMAT_R32_SFLOAT = 0x00000049,
+ VK_FORMAT_R32G32_UINT = 0x0000004A,
+ VK_FORMAT_R32G32_SINT = 0x0000004B,
+ VK_FORMAT_R32G32_SFLOAT = 0x0000004C,
+ VK_FORMAT_R32G32B32_UINT = 0x0000004D,
+ VK_FORMAT_R32G32B32_SINT = 0x0000004E,
+ VK_FORMAT_R32G32B32_SFLOAT = 0x0000004F,
+ VK_FORMAT_R32G32B32A32_UINT = 0x00000050,
+ VK_FORMAT_R32G32B32A32_SINT = 0x00000051,
+ VK_FORMAT_R32G32B32A32_SFLOAT = 0x00000052,
+ VK_FORMAT_R64_SFLOAT = 0x00000053,
+ VK_FORMAT_R64G64_SFLOAT = 0x00000054,
+ VK_FORMAT_R64G64B64_SFLOAT = 0x00000055,
+ VK_FORMAT_R64G64B64A64_SFLOAT = 0x00000056,
+ VK_FORMAT_R11G11B10_UFLOAT = 0x00000057,
+ VK_FORMAT_R9G9B9E5_UFLOAT = 0x00000058,
+ VK_FORMAT_D16_UNORM = 0x00000059,
+ VK_FORMAT_D24_UNORM = 0x0000005A,
+ VK_FORMAT_D32_SFLOAT = 0x0000005B,
+ VK_FORMAT_S8_UINT = 0x0000005C,
+ VK_FORMAT_D16_UNORM_S8_UINT = 0x0000005D,
+ VK_FORMAT_D24_UNORM_S8_UINT = 0x0000005E,
+ VK_FORMAT_D32_SFLOAT_S8_UINT = 0x0000005F,
+ VK_FORMAT_BC1_RGB_UNORM = 0x00000060,
+ VK_FORMAT_BC1_RGB_SRGB = 0x00000061,
+ VK_FORMAT_BC1_RGBA_UNORM = 0x00000062,
+ VK_FORMAT_BC1_RGBA_SRGB = 0x00000063,
+ VK_FORMAT_BC2_UNORM = 0x00000064,
+ VK_FORMAT_BC2_SRGB = 0x00000065,
+ VK_FORMAT_BC3_UNORM = 0x00000066,
+ VK_FORMAT_BC3_SRGB = 0x00000067,
+ VK_FORMAT_BC4_UNORM = 0x00000068,
+ VK_FORMAT_BC4_SNORM = 0x00000069,
+ VK_FORMAT_BC5_UNORM = 0x0000006A,
+ VK_FORMAT_BC5_SNORM = 0x0000006B,
+ VK_FORMAT_BC6H_UFLOAT = 0x0000006C,
+ VK_FORMAT_BC6H_SFLOAT = 0x0000006D,
+ VK_FORMAT_BC7_UNORM = 0x0000006E,
+ VK_FORMAT_BC7_SRGB = 0x0000006F,
+ VK_FORMAT_ETC2_R8G8B8_UNORM = 0x00000070,
+ VK_FORMAT_ETC2_R8G8B8_SRGB = 0x00000071,
+ VK_FORMAT_ETC2_R8G8B8A1_UNORM = 0x00000072,
+ VK_FORMAT_ETC2_R8G8B8A1_SRGB = 0x00000073,
+ VK_FORMAT_ETC2_R8G8B8A8_UNORM = 0x00000074,
+ VK_FORMAT_ETC2_R8G8B8A8_SRGB = 0x00000075,
+ VK_FORMAT_EAC_R11_UNORM = 0x00000076,
+ VK_FORMAT_EAC_R11_SNORM = 0x00000077,
+ VK_FORMAT_EAC_R11G11_UNORM = 0x00000078,
+ VK_FORMAT_EAC_R11G11_SNORM = 0x00000079,
+ VK_FORMAT_ASTC_4x4_UNORM = 0x0000007A,
+ VK_FORMAT_ASTC_4x4_SRGB = 0x0000007B,
+ VK_FORMAT_ASTC_5x4_UNORM = 0x0000007C,
+ VK_FORMAT_ASTC_5x4_SRGB = 0x0000007D,
+ VK_FORMAT_ASTC_5x5_UNORM = 0x0000007E,
+ VK_FORMAT_ASTC_5x5_SRGB = 0x0000007F,
+ VK_FORMAT_ASTC_6x5_UNORM = 0x00000080,
+ VK_FORMAT_ASTC_6x5_SRGB = 0x00000081,
+ VK_FORMAT_ASTC_6x6_UNORM = 0x00000082,
+ VK_FORMAT_ASTC_6x6_SRGB = 0x00000083,
+ VK_FORMAT_ASTC_8x5_UNORM = 0x00000084,
+ VK_FORMAT_ASTC_8x5_SRGB = 0x00000085,
+ VK_FORMAT_ASTC_8x6_UNORM = 0x00000086,
+ VK_FORMAT_ASTC_8x6_SRGB = 0x00000087,
+ VK_FORMAT_ASTC_8x8_UNORM = 0x00000088,
+ VK_FORMAT_ASTC_8x8_SRGB = 0x00000089,
+ VK_FORMAT_ASTC_10x5_UNORM = 0x0000008A,
+ VK_FORMAT_ASTC_10x5_SRGB = 0x0000008B,
+ VK_FORMAT_ASTC_10x6_UNORM = 0x0000008C,
+ VK_FORMAT_ASTC_10x6_SRGB = 0x0000008D,
+ VK_FORMAT_ASTC_10x8_UNORM = 0x0000008E,
+ VK_FORMAT_ASTC_10x8_SRGB = 0x0000008F,
+ VK_FORMAT_ASTC_10x10_UNORM = 0x00000090,
+ VK_FORMAT_ASTC_10x10_SRGB = 0x00000091,
+ VK_FORMAT_ASTC_12x10_UNORM = 0x00000092,
+ VK_FORMAT_ASTC_12x10_SRGB = 0x00000093,
+ VK_FORMAT_ASTC_12x12_UNORM = 0x00000094,
+ VK_FORMAT_ASTC_12x12_SRGB = 0x00000095,
+ VK_FORMAT_B4G4R4A4_UNORM = 0x00000096,
+ VK_FORMAT_B5G5R5A1_UNORM = 0x00000097,
+ VK_FORMAT_B5G6R5_UNORM = 0x00000098,
+ VK_FORMAT_B5G6R5_USCALED = 0x00000099,
+ VK_FORMAT_B8G8R8_UNORM = 0x0000009A,
+ VK_FORMAT_B8G8R8_SNORM = 0x0000009B,
+ VK_FORMAT_B8G8R8_USCALED = 0x0000009C,
+ VK_FORMAT_B8G8R8_SSCALED = 0x0000009D,
+ VK_FORMAT_B8G8R8_UINT = 0x0000009E,
+ VK_FORMAT_B8G8R8_SINT = 0x0000009F,
+ VK_FORMAT_B8G8R8_SRGB = 0x000000A0,
+ VK_FORMAT_B8G8R8A8_UNORM = 0x000000A1,
+ VK_FORMAT_B8G8R8A8_SNORM = 0x000000A2,
+ VK_FORMAT_B8G8R8A8_USCALED = 0x000000A3,
+ VK_FORMAT_B8G8R8A8_SSCALED = 0x000000A4,
+ VK_FORMAT_B8G8R8A8_UINT = 0x000000A5,
+ VK_FORMAT_B8G8R8A8_SINT = 0x000000A6,
+ VK_FORMAT_B8G8R8A8_SRGB = 0x000000A7,
+ VK_FORMAT_B10G10R10A2_UNORM = 0x000000A8,
+ VK_FORMAT_B10G10R10A2_SNORM = 0x000000A9,
+ VK_FORMAT_B10G10R10A2_USCALED = 0x000000AA,
+ VK_FORMAT_B10G10R10A2_SSCALED = 0x000000AB,
+ VK_FORMAT_B10G10R10A2_UINT = 0x000000AC,
+ VK_FORMAT_B10G10R10A2_SINT = 0x000000AD,
+
+ VK_ENUM_RANGE(FORMAT, UNDEFINED, B10G10R10A2_SINT)
+} VkFormat;
+
+// Shader stage enumerant
+typedef enum VkShaderStage_
+{
+ VK_SHADER_STAGE_VERTEX = 0,
+ VK_SHADER_STAGE_TESS_CONTROL = 1,
+ VK_SHADER_STAGE_TESS_EVALUATION = 2,
+ VK_SHADER_STAGE_GEOMETRY = 3,
+ VK_SHADER_STAGE_FRAGMENT = 4,
+ VK_SHADER_STAGE_COMPUTE = 5,
+
+ VK_ENUM_RANGE(SHADER_STAGE, VERTEX, COMPUTE)
+} VkShaderStage;
+
+// Structure type enumerant
+typedef enum VkStructureType_
+{
+ VK_STRUCTURE_TYPE_APPLICATION_INFO = 0,
+ VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO = 1,
+ VK_STRUCTURE_TYPE_MEMORY_ALLOC_INFO = 2,
+ VK_STRUCTURE_TYPE_MEMORY_OPEN_INFO = 3,
+ VK_STRUCTURE_TYPE_PEER_MEMORY_OPEN_INFO = 4,
+ VK_STRUCTURE_TYPE_BUFFER_VIEW_ATTACH_INFO = 5,
+ VK_STRUCTURE_TYPE_IMAGE_VIEW_ATTACH_INFO = 6,
+ VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO = 7,
+ VK_STRUCTURE_TYPE_COLOR_ATTACHMENT_VIEW_CREATE_INFO = 8,
+ VK_STRUCTURE_TYPE_DEPTH_STENCIL_VIEW_CREATE_INFO = 9,
+ VK_STRUCTURE_TYPE_SHADER_CREATE_INFO = 10,
+ VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO = 11,
+ VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO = 12,
+ VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO = 13,
+ VK_STRUCTURE_TYPE_DYNAMIC_VP_STATE_CREATE_INFO = 14,
+ VK_STRUCTURE_TYPE_DYNAMIC_RS_STATE_CREATE_INFO = 15,
+ VK_STRUCTURE_TYPE_DYNAMIC_CB_STATE_CREATE_INFO = 16,
+ VK_STRUCTURE_TYPE_DYNAMIC_DS_STATE_CREATE_INFO = 17,
+ VK_STRUCTURE_TYPE_CMD_BUFFER_CREATE_INFO = 18,
+ VK_STRUCTURE_TYPE_EVENT_CREATE_INFO = 19,
+ VK_STRUCTURE_TYPE_FENCE_CREATE_INFO = 20,
+ VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO = 21,
+ VK_STRUCTURE_TYPE_SEMAPHORE_OPEN_INFO = 22,
+ VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO = 23,
+ VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO = 24,
+ VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO = 25,
+ VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_CREATE_INFO = 26,
+ VK_STRUCTURE_TYPE_PIPELINE_IA_STATE_CREATE_INFO = 27,
+ VK_STRUCTURE_TYPE_PIPELINE_TESS_STATE_CREATE_INFO = 28,
+ VK_STRUCTURE_TYPE_PIPELINE_VP_STATE_CREATE_INFO = 29,
+ VK_STRUCTURE_TYPE_PIPELINE_RS_STATE_CREATE_INFO = 30,
+ VK_STRUCTURE_TYPE_PIPELINE_MS_STATE_CREATE_INFO = 31,
+ VK_STRUCTURE_TYPE_PIPELINE_CB_STATE_CREATE_INFO = 32,
+ VK_STRUCTURE_TYPE_PIPELINE_DS_STATE_CREATE_INFO = 33,
+ VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO = 34,
+ VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO = 35,
+ VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO = 36,
+ VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO = 37,
+ VK_STRUCTURE_TYPE_CMD_BUFFER_BEGIN_INFO = 38,
+ VK_STRUCTURE_TYPE_CMD_BUFFER_GRAPHICS_BEGIN_INFO = 39,
+ VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO = 40,
+ VK_STRUCTURE_TYPE_LAYER_CREATE_INFO = 41,
+ VK_STRUCTURE_TYPE_MEMORY_BARRIER = 42,
+ VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER = 43,
+ VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER = 44,
+ VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO = 45,
+ VK_STRUCTURE_TYPE_UPDATE_SAMPLERS = 46,
+ VK_STRUCTURE_TYPE_UPDATE_SAMPLER_TEXTURES = 47,
+ VK_STRUCTURE_TYPE_UPDATE_IMAGES = 48,
+ VK_STRUCTURE_TYPE_UPDATE_BUFFERS = 49,
+ VK_STRUCTURE_TYPE_UPDATE_AS_COPY = 50,
+ VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO = 51,
+
+ VK_ENUM_RANGE(STRUCTURE_TYPE, APPLICATION_INFO, INSTANCE_CREATE_INFO)
+} VkStructureType;
// ------------------------------------------------------------------------------------------------
// Error and return codes
VK_ERROR_UNKNOWN = -(0x00000001),
VK_ERROR_UNAVAILABLE = -(0x00000002),
VK_ERROR_INITIALIZATION_FAILED = -(0x00000003),
- VK_ERROR_OUT_OF_MEMORY = -(0x00000004),
- VK_ERROR_OUT_OF_GPU_MEMORY = -(0x00000005),
+ VK_ERROR_OUT_OF_HOST_MEMORY = -(0x00000004),
+ VK_ERROR_OUT_OF_DEVICE_MEMORY = -(0x00000005),
VK_ERROR_DEVICE_ALREADY_CREATED = -(0x00000006),
VK_ERROR_DEVICE_LOST = -(0x00000007),
VK_ERROR_INVALID_POINTER = -(0x00000008),
VK_ERROR_MEMORY_NOT_BOUND = -(0x00000020),
VK_ERROR_INCOMPATIBLE_QUEUE = -(0x00000021),
VK_ERROR_NOT_SHAREABLE = -(0x00000022),
- VK_MAX_ENUM(VkResult)
-} VkResult;
-
-// ------------------------------------------------------------------------------------------------
-// Vulkan format definitions
-
-typedef enum VkVertexInputStepRate_
-{
- VK_VERTEX_INPUT_STEP_RATE_VERTEX = 0x0,
- VK_VERTEX_INPUT_STEP_RATE_INSTANCE = 0x1,
- VK_VERTEX_INPUT_STEP_RATE_DRAW = 0x2, //Optional
- VK_VERTEX_INPUT_STEP_RATE_BEGIN_RANGE = VK_VERTEX_INPUT_STEP_RATE_VERTEX,
- VK_VERTEX_INPUT_STEP_RATE_END_RANGE = VK_VERTEX_INPUT_STEP_RATE_DRAW,
- VK_NUM_VERTEX_INPUT_STEP_RATE = (VK_VERTEX_INPUT_STEP_RATE_END_RANGE - VK_VERTEX_INPUT_STEP_RATE_BEGIN_RANGE + 1),
- VK_MAX_ENUM(VkVertexInputStepRate)
-} VkVertexInputStepRate;
-
-typedef enum VkFormat_
-{
- VK_FMT_UNDEFINED = 0x00000000,
- VK_FMT_R4G4_UNORM = 0x00000001,
- VK_FMT_R4G4_USCALED = 0x00000002,
- VK_FMT_R4G4B4A4_UNORM = 0x00000003,
- VK_FMT_R4G4B4A4_USCALED = 0x00000004,
- VK_FMT_R5G6B5_UNORM = 0x00000005,
- VK_FMT_R5G6B5_USCALED = 0x00000006,
- VK_FMT_R5G5B5A1_UNORM = 0x00000007,
- VK_FMT_R5G5B5A1_USCALED = 0x00000008,
- VK_FMT_R8_UNORM = 0x00000009,
- VK_FMT_R8_SNORM = 0x0000000A,
- VK_FMT_R8_USCALED = 0x0000000B,
- VK_FMT_R8_SSCALED = 0x0000000C,
- VK_FMT_R8_UINT = 0x0000000D,
- VK_FMT_R8_SINT = 0x0000000E,
- VK_FMT_R8_SRGB = 0x0000000F,
- VK_FMT_R8G8_UNORM = 0x00000010,
- VK_FMT_R8G8_SNORM = 0x00000011,
- VK_FMT_R8G8_USCALED = 0x00000012,
- VK_FMT_R8G8_SSCALED = 0x00000013,
- VK_FMT_R8G8_UINT = 0x00000014,
- VK_FMT_R8G8_SINT = 0x00000015,
- VK_FMT_R8G8_SRGB = 0x00000016,
- VK_FMT_R8G8B8_UNORM = 0x00000017,
- VK_FMT_R8G8B8_SNORM = 0x00000018,
- VK_FMT_R8G8B8_USCALED = 0x00000019,
- VK_FMT_R8G8B8_SSCALED = 0x0000001A,
- VK_FMT_R8G8B8_UINT = 0x0000001B,
- VK_FMT_R8G8B8_SINT = 0x0000001C,
- VK_FMT_R8G8B8_SRGB = 0x0000001D,
- VK_FMT_R8G8B8A8_UNORM = 0x0000001E,
- VK_FMT_R8G8B8A8_SNORM = 0x0000001F,
- VK_FMT_R8G8B8A8_USCALED = 0x00000020,
- VK_FMT_R8G8B8A8_SSCALED = 0x00000021,
- VK_FMT_R8G8B8A8_UINT = 0x00000022,
- VK_FMT_R8G8B8A8_SINT = 0x00000023,
- VK_FMT_R8G8B8A8_SRGB = 0x00000024,
- VK_FMT_R10G10B10A2_UNORM = 0x00000025,
- VK_FMT_R10G10B10A2_SNORM = 0x00000026,
- VK_FMT_R10G10B10A2_USCALED = 0x00000027,
- VK_FMT_R10G10B10A2_SSCALED = 0x00000028,
- VK_FMT_R10G10B10A2_UINT = 0x00000029,
- VK_FMT_R10G10B10A2_SINT = 0x0000002A,
- VK_FMT_R16_UNORM = 0x0000002B,
- VK_FMT_R16_SNORM = 0x0000002C,
- VK_FMT_R16_USCALED = 0x0000002D,
- VK_FMT_R16_SSCALED = 0x0000002E,
- VK_FMT_R16_UINT = 0x0000002F,
- VK_FMT_R16_SINT = 0x00000030,
- VK_FMT_R16_SFLOAT = 0x00000031,
- VK_FMT_R16G16_UNORM = 0x00000032,
- VK_FMT_R16G16_SNORM = 0x00000033,
- VK_FMT_R16G16_USCALED = 0x00000034,
- VK_FMT_R16G16_SSCALED = 0x00000035,
- VK_FMT_R16G16_UINT = 0x00000036,
- VK_FMT_R16G16_SINT = 0x00000037,
- VK_FMT_R16G16_SFLOAT = 0x00000038,
- VK_FMT_R16G16B16_UNORM = 0x00000039,
- VK_FMT_R16G16B16_SNORM = 0x0000003A,
- VK_FMT_R16G16B16_USCALED = 0x0000003B,
- VK_FMT_R16G16B16_SSCALED = 0x0000003C,
- VK_FMT_R16G16B16_UINT = 0x0000003D,
- VK_FMT_R16G16B16_SINT = 0x0000003E,
- VK_FMT_R16G16B16_SFLOAT = 0x0000003F,
- VK_FMT_R16G16B16A16_UNORM = 0x00000040,
- VK_FMT_R16G16B16A16_SNORM = 0x00000041,
- VK_FMT_R16G16B16A16_USCALED = 0x00000042,
- VK_FMT_R16G16B16A16_SSCALED = 0x00000043,
- VK_FMT_R16G16B16A16_UINT = 0x00000044,
- VK_FMT_R16G16B16A16_SINT = 0x00000045,
- VK_FMT_R16G16B16A16_SFLOAT = 0x00000046,
- VK_FMT_R32_UINT = 0x00000047,
- VK_FMT_R32_SINT = 0x00000048,
- VK_FMT_R32_SFLOAT = 0x00000049,
- VK_FMT_R32G32_UINT = 0x0000004A,
- VK_FMT_R32G32_SINT = 0x0000004B,
- VK_FMT_R32G32_SFLOAT = 0x0000004C,
- VK_FMT_R32G32B32_UINT = 0x0000004D,
- VK_FMT_R32G32B32_SINT = 0x0000004E,
- VK_FMT_R32G32B32_SFLOAT = 0x0000004F,
- VK_FMT_R32G32B32A32_UINT = 0x00000050,
- VK_FMT_R32G32B32A32_SINT = 0x00000051,
- VK_FMT_R32G32B32A32_SFLOAT = 0x00000052,
- VK_FMT_R64_SFLOAT = 0x00000053,
- VK_FMT_R64G64_SFLOAT = 0x00000054,
- VK_FMT_R64G64B64_SFLOAT = 0x00000055,
- VK_FMT_R64G64B64A64_SFLOAT = 0x00000056,
- VK_FMT_R11G11B10_UFLOAT = 0x00000057,
- VK_FMT_R9G9B9E5_UFLOAT = 0x00000058,
- VK_FMT_D16_UNORM = 0x00000059,
- VK_FMT_D24_UNORM = 0x0000005A,
- VK_FMT_D32_SFLOAT = 0x0000005B,
- VK_FMT_S8_UINT = 0x0000005C,
- VK_FMT_D16_UNORM_S8_UINT = 0x0000005D,
- VK_FMT_D24_UNORM_S8_UINT = 0x0000005E,
- VK_FMT_D32_SFLOAT_S8_UINT = 0x0000005F,
- VK_FMT_BC1_RGB_UNORM = 0x00000060,
- VK_FMT_BC1_RGB_SRGB = 0x00000061,
- VK_FMT_BC1_RGBA_UNORM = 0x00000062,
- VK_FMT_BC1_RGBA_SRGB = 0x00000063,
- VK_FMT_BC2_UNORM = 0x00000064,
- VK_FMT_BC2_SRGB = 0x00000065,
- VK_FMT_BC3_UNORM = 0x00000066,
- VK_FMT_BC3_SRGB = 0x00000067,
- VK_FMT_BC4_UNORM = 0x00000068,
- VK_FMT_BC4_SNORM = 0x00000069,
- VK_FMT_BC5_UNORM = 0x0000006A,
- VK_FMT_BC5_SNORM = 0x0000006B,
- VK_FMT_BC6H_UFLOAT = 0x0000006C,
- VK_FMT_BC6H_SFLOAT = 0x0000006D,
- VK_FMT_BC7_UNORM = 0x0000006E,
- VK_FMT_BC7_SRGB = 0x0000006F,
- VK_FMT_ETC2_R8G8B8_UNORM = 0x00000070,
- VK_FMT_ETC2_R8G8B8_SRGB = 0x00000071,
- VK_FMT_ETC2_R8G8B8A1_UNORM = 0x00000072,
- VK_FMT_ETC2_R8G8B8A1_SRGB = 0x00000073,
- VK_FMT_ETC2_R8G8B8A8_UNORM = 0x00000074,
- VK_FMT_ETC2_R8G8B8A8_SRGB = 0x00000075,
- VK_FMT_EAC_R11_UNORM = 0x00000076,
- VK_FMT_EAC_R11_SNORM = 0x00000077,
- VK_FMT_EAC_R11G11_UNORM = 0x00000078,
- VK_FMT_EAC_R11G11_SNORM = 0x00000079,
- VK_FMT_ASTC_4x4_UNORM = 0x0000007A,
- VK_FMT_ASTC_4x4_SRGB = 0x0000007B,
- VK_FMT_ASTC_5x4_UNORM = 0x0000007C,
- VK_FMT_ASTC_5x4_SRGB = 0x0000007D,
- VK_FMT_ASTC_5x5_UNORM = 0x0000007E,
- VK_FMT_ASTC_5x5_SRGB = 0x0000007F,
- VK_FMT_ASTC_6x5_UNORM = 0x00000080,
- VK_FMT_ASTC_6x5_SRGB = 0x00000081,
- VK_FMT_ASTC_6x6_UNORM = 0x00000082,
- VK_FMT_ASTC_6x6_SRGB = 0x00000083,
- VK_FMT_ASTC_8x5_UNORM = 0x00000084,
- VK_FMT_ASTC_8x5_SRGB = 0x00000085,
- VK_FMT_ASTC_8x6_UNORM = 0x00000086,
- VK_FMT_ASTC_8x6_SRGB = 0x00000087,
- VK_FMT_ASTC_8x8_UNORM = 0x00000088,
- VK_FMT_ASTC_8x8_SRGB = 0x00000089,
- VK_FMT_ASTC_10x5_UNORM = 0x0000008A,
- VK_FMT_ASTC_10x5_SRGB = 0x0000008B,
- VK_FMT_ASTC_10x6_UNORM = 0x0000008C,
- VK_FMT_ASTC_10x6_SRGB = 0x0000008D,
- VK_FMT_ASTC_10x8_UNORM = 0x0000008E,
- VK_FMT_ASTC_10x8_SRGB = 0x0000008F,
- VK_FMT_ASTC_10x10_UNORM = 0x00000090,
- VK_FMT_ASTC_10x10_SRGB = 0x00000091,
- VK_FMT_ASTC_12x10_UNORM = 0x00000092,
- VK_FMT_ASTC_12x10_SRGB = 0x00000093,
- VK_FMT_ASTC_12x12_UNORM = 0x00000094,
- VK_FMT_ASTC_12x12_SRGB = 0x00000095,
- VK_FMT_B4G4R4A4_UNORM = 0x00000096,
- VK_FMT_B5G5R5A1_UNORM = 0x00000097,
- VK_FMT_B5G6R5_UNORM = 0x00000098,
- VK_FMT_B5G6R5_USCALED = 0x00000099,
- VK_FMT_B8G8R8_UNORM = 0x0000009A,
- VK_FMT_B8G8R8_SNORM = 0x0000009B,
- VK_FMT_B8G8R8_USCALED = 0x0000009C,
- VK_FMT_B8G8R8_SSCALED = 0x0000009D,
- VK_FMT_B8G8R8_UINT = 0x0000009E,
- VK_FMT_B8G8R8_SINT = 0x0000009F,
- VK_FMT_B8G8R8_SRGB = 0x000000A0,
- VK_FMT_B8G8R8A8_UNORM = 0x000000A1,
- VK_FMT_B8G8R8A8_SNORM = 0x000000A2,
- VK_FMT_B8G8R8A8_USCALED = 0x000000A3,
- VK_FMT_B8G8R8A8_SSCALED = 0x000000A4,
- VK_FMT_B8G8R8A8_UINT = 0x000000A5,
- VK_FMT_B8G8R8A8_SINT = 0x000000A6,
- VK_FMT_B8G8R8A8_SRGB = 0x000000A7,
- VK_FMT_B10G10R10A2_UNORM = 0x000000A8,
- VK_FMT_B10G10R10A2_SNORM = 0x000000A9,
- VK_FMT_B10G10R10A2_USCALED = 0x000000AA,
- VK_FMT_B10G10R10A2_SSCALED = 0x000000AB,
- VK_FMT_B10G10R10A2_UINT = 0x000000AC,
- VK_FMT_B10G10R10A2_SINT = 0x000000AD,
-
- VK_FMT_BEGIN_RANGE = VK_FMT_UNDEFINED,
- VK_FMT_END_RANGE = VK_FMT_B10G10R10A2_SINT,
- VK_NUM_FMT = (VK_FMT_END_RANGE - VK_FMT_BEGIN_RANGE + 1),
- VK_MAX_ENUM(VkFormat)
-} VkFormat;
-
-// Shader stage enumerant
-typedef enum VkPipelineShaderStage_
-{
- VK_SHADER_STAGE_VERTEX = 0,
- VK_SHADER_STAGE_TESS_CONTROL = 1,
- VK_SHADER_STAGE_TESS_EVALUATION = 2,
- VK_SHADER_STAGE_GEOMETRY = 3,
- VK_SHADER_STAGE_FRAGMENT = 4,
- VK_SHADER_STAGE_COMPUTE = 5,
-
- VK_SHADER_STAGE_BEGIN_RANGE = VK_SHADER_STAGE_VERTEX,
- VK_SHADER_STAGE_END_RANGE = VK_SHADER_STAGE_COMPUTE,
- VK_NUM_SHADER_STAGE = (VK_SHADER_STAGE_END_RANGE - VK_SHADER_STAGE_BEGIN_RANGE + 1),
- VK_MAX_ENUM(VkPipelineShaderStage)
-} VkPipelineShaderStage;
-
-typedef enum VkShaderStageFlags_
-{
- VK_SHADER_STAGE_FLAGS_VERTEX_BIT = 0x00000001,
- VK_SHADER_STAGE_FLAGS_TESS_CONTROL_BIT = 0x00000002,
- VK_SHADER_STAGE_FLAGS_TESS_EVALUATION_BIT = 0x00000004,
- VK_SHADER_STAGE_FLAGS_GEOMETRY_BIT = 0x00000008,
- VK_SHADER_STAGE_FLAGS_FRAGMENT_BIT = 0x00000010,
- VK_SHADER_STAGE_FLAGS_COMPUTE_BIT = 0x00000020,
-
- VK_SHADER_STAGE_FLAGS_ALL = 0x7FFFFFFF,
- VK_MAX_ENUM(VkShaderStageFlags)
-} VkShaderStageFlags;
-
-// Structure type enumerant
-typedef enum VkStructureType_
-{
- VK_STRUCTURE_TYPE_APPLICATION_INFO = 0,
- VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO = 1,
- VK_STRUCTURE_TYPE_MEMORY_ALLOC_INFO = 2,
- VK_STRUCTURE_TYPE_MEMORY_OPEN_INFO = 4,
- VK_STRUCTURE_TYPE_PEER_MEMORY_OPEN_INFO = 5,
- VK_STRUCTURE_TYPE_BUFFER_VIEW_ATTACH_INFO = 6,
- VK_STRUCTURE_TYPE_IMAGE_VIEW_ATTACH_INFO = 7,
- VK_STRUCTURE_TYPE_EVENT_WAIT_INFO = 8,
- VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO = 9,
- VK_STRUCTURE_TYPE_COLOR_ATTACHMENT_VIEW_CREATE_INFO = 10,
- VK_STRUCTURE_TYPE_DEPTH_STENCIL_VIEW_CREATE_INFO = 11,
- VK_STRUCTURE_TYPE_SHADER_CREATE_INFO = 12,
- VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO = 13,
- VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO = 14,
- VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO = 15,
- VK_STRUCTURE_TYPE_DYNAMIC_VP_STATE_CREATE_INFO = 16,
- VK_STRUCTURE_TYPE_DYNAMIC_RS_STATE_CREATE_INFO = 17,
- VK_STRUCTURE_TYPE_DYNAMIC_CB_STATE_CREATE_INFO = 18,
- VK_STRUCTURE_TYPE_DYNAMIC_DS_STATE_CREATE_INFO = 19,
- VK_STRUCTURE_TYPE_CMD_BUFFER_CREATE_INFO = 20,
- VK_STRUCTURE_TYPE_EVENT_CREATE_INFO = 21,
- VK_STRUCTURE_TYPE_FENCE_CREATE_INFO = 22,
- VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO = 23,
- VK_STRUCTURE_TYPE_SEMAPHORE_OPEN_INFO = 24,
- VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO = 25,
- VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO = 26,
- VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO = 27,
- VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_CREATE_INFO = 28,
- VK_STRUCTURE_TYPE_PIPELINE_IA_STATE_CREATE_INFO = 29,
- VK_STRUCTURE_TYPE_PIPELINE_TESS_STATE_CREATE_INFO = 30,
- VK_STRUCTURE_TYPE_PIPELINE_VP_STATE_CREATE_INFO = 31,
- VK_STRUCTURE_TYPE_PIPELINE_RS_STATE_CREATE_INFO = 32,
- VK_STRUCTURE_TYPE_PIPELINE_MS_STATE_CREATE_INFO = 33,
- VK_STRUCTURE_TYPE_PIPELINE_CB_STATE_CREATE_INFO = 34,
- VK_STRUCTURE_TYPE_PIPELINE_DS_STATE_CREATE_INFO = 35,
- VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO = 36,
- VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO = 37,
- VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO = 38,
- VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO = 39,
- VK_STRUCTURE_TYPE_CMD_BUFFER_BEGIN_INFO = 40,
- VK_STRUCTURE_TYPE_CMD_BUFFER_GRAPHICS_BEGIN_INFO = 41,
- VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO = 42,
- VK_STRUCTURE_TYPE_LAYER_CREATE_INFO = 43,
- VK_STRUCTURE_TYPE_PIPELINE_BARRIER = 44,
- VK_STRUCTURE_TYPE_MEMORY_BARRIER = 45,
- VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER = 46,
- VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER = 47,
- VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO = 48,
- VK_STRUCTURE_TYPE_UPDATE_SAMPLERS = 49,
- VK_STRUCTURE_TYPE_UPDATE_SAMPLER_TEXTURES = 50,
- VK_STRUCTURE_TYPE_UPDATE_IMAGES = 51,
- VK_STRUCTURE_TYPE_UPDATE_BUFFERS = 52,
- VK_STRUCTURE_TYPE_UPDATE_AS_COPY = 53,
- VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO = 54,
-
- VK_STRUCTURE_TYPE_BEGIN_RANGE = VK_STRUCTURE_TYPE_APPLICATION_INFO,
- VK_STRUCTURE_TYPE_END_RANGE = VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO,
-
- VK_NUM_STRUCTURE_TYPE = (VK_STRUCTURE_TYPE_END_RANGE - VK_STRUCTURE_TYPE_BEGIN_RANGE + 1),
- VK_MAX_ENUM(VkStructureType)
-} VkStructureType;
+ VK_MAX_ENUM(RESULT)
+} VkResult;
// ------------------------------------------------------------------------------------------------
// Flags
// Device creation flags
-typedef enum VkDeviceCreateFlags_
+typedef VkFlags VkDeviceCreateFlags;
+typedef enum VkDeviceCreateFlagBits_
{
- VK_DEVICE_CREATE_VALIDATION_BIT = 0x00000001,
- VK_DEVICE_CREATE_MGPU_IQ_MATCH_BIT = 0x00000002,
- VK_MAX_ENUM(VkDeviceCreateFlags)
-} VkDeviceCreateFlags;
+ VK_DEVICE_CREATE_VALIDATION_BIT = VK_BIT(0),
+ VK_DEVICE_CREATE_MULTI_DEVICE_IQ_MATCH_BIT = VK_BIT(1),
+} VkDeviceCreateFlagBits;
// Queue capabilities
-typedef enum VkQueueFlags_
-{
- VK_QUEUE_GRAPHICS_BIT = 0x00000001, // Queue supports graphics operations
- VK_QUEUE_COMPUTE_BIT = 0x00000002, // Queue supports compute operations
- VK_QUEUE_DMA_BIT = 0x00000004, // Queue supports DMA operations
- VK_QUEUE_MEMMGR_BIT = 0x00000008, // Queue supports memory management operations
- VK_QUEUE_EXTENDED_BIT = 0x40000000, // Extended queue
- VK_MAX_ENUM(VkQueueFlags)
-} VkQueueFlags;
-
-// memory properties passed into vkAllocMemory().
-typedef enum VkMemoryPropertyFlags_
-{
- VK_MEMORY_PROPERTY_GPU_ONLY = 0x00000000, // If not set, then allocate memory on device (GPU)
- VK_MEMORY_PROPERTY_CPU_VISIBLE_BIT = 0x00000001,
- VK_MEMORY_PROPERTY_CPU_GPU_COHERENT_BIT = 0x00000002,
- VK_MEMORY_PROPERTY_CPU_UNCACHED_BIT = 0x00000004,
- VK_MEMORY_PROPERTY_CPU_WRITE_COMBINED_BIT = 0x00000008,
- VK_MEMORY_PROPERTY_PREFER_CPU_LOCAL = 0x00000010, // all else being equal, prefer CPU access
- VK_MEMORY_PROPERTY_SHAREABLE_BIT = 0x00000020,
- VK_MAX_ENUM(VkMemoryPropertyFlags)
-} VkMemoryPropertyFlags;
-
-// Buffer and buffer allocation usage flags
-typedef enum VkBufferUsageFlags_
-{
- VK_BUFFER_USAGE_GENERAL = 0x00000000, // No special usage
- VK_BUFFER_USAGE_TRANSFER_SOURCE_BIT = 0x00000001, // Can be used as a source of transfer operations
- VK_BUFFER_USAGE_TRANSFER_DESTINATION_BIT = 0x00000002, // Can be used as a destination of transfer operations
- VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT = 0x00000004, // Can be used as TBO
- VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT = 0x00000008, // Can be used as IBO
- VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT = 0x00000010, // Can be used as UBO
- VK_BUFFER_USAGE_STORAGE_BUFFER_BIT = 0x00000020, // Can be used as SSBO
- VK_BUFFER_USAGE_INDEX_BUFFER_BIT = 0x00000040, // Can be used as source of fixed function index fetch (index buffer)
- VK_BUFFER_USAGE_VERTEX_BUFFER_BIT = 0x00000080, // Can be used as source of fixed function vertex fetch (VBO)
- VK_BUFFER_USAGE_INDIRECT_BUFFER_BIT = 0x00000100, // Can be the source of indirect parameters (e.g. indirect buffer, parameter buffer)
- VK_MAX_ENUM(VkBufferUsageFlags)
-} VkBufferUsageFlags;
-
-// Buffer flags
-typedef enum VkBufferCreateFlags_
-{
- VK_BUFFER_CREATE_SHAREABLE_BIT = 0x00000001,
- VK_BUFFER_CREATE_SPARSE_BIT = 0x00000002,
- VK_MAX_ENUM(VkBufferCreateFlags)
-} VkBufferCreateFlags;
-
-typedef enum VkBufferViewType_
-{
- VK_BUFFER_VIEW_RAW = 0x00000000, // Raw buffer without special structure (UBO, SSBO)
- VK_BUFFER_VIEW_FORMATTED = 0x00000001, // Buffer with format (TBO, IBO)
-
- VK_BUFFER_VIEW_TYPE_BEGIN_RANGE = VK_BUFFER_VIEW_RAW,
- VK_BUFFER_VIEW_TYPE_END_RANGE = VK_BUFFER_VIEW_FORMATTED,
- VK_NUM_BUFFER_VIEW_TYPE = (VK_BUFFER_VIEW_TYPE_END_RANGE - VK_BUFFER_VIEW_TYPE_BEGIN_RANGE + 1),
- VK_MAX_ENUM(VkBufferViewType)
-} VkBufferViewType;
-
-// Image and image allocation usage flags
-typedef enum VkImageUsageFlags_
-{
- VK_IMAGE_USAGE_GENERAL = 0x00000000, // no special usage
- VK_IMAGE_USAGE_TRANSFER_SOURCE_BIT = 0x00000001, // Can be used as a source of transfer operations
- VK_IMAGE_USAGE_TRANSFER_DESTINATION_BIT = 0x00000002, // Can be used as a destination of transfer operations
- VK_IMAGE_USAGE_SAMPLED_BIT = 0x00000004, // Can be sampled from (SAMPLED_IMAGE and COMBINED_IMAGE_SAMPLER descriptor types)
- VK_IMAGE_USAGE_STORAGE_BIT = 0x00000008, // Can be used as storage image (STORAGE_IMAGE descriptor type)
- VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT = 0x00000010, // Can be used as framebuffer color attachment
- VK_IMAGE_USAGE_DEPTH_STENCIL_BIT = 0x00000020, // Can be used as framebuffer depth/stencil attachment
- VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT = 0x00000040, // Image data not needed outside of rendering
- VK_MAX_ENUM(VkImageUsageFlags)
-} VkImageUsageFlags;
-
-// Image flags
-typedef enum VkImageCreateFlags_
-{
- VK_IMAGE_CREATE_INVARIANT_DATA_BIT = 0x00000001,
- VK_IMAGE_CREATE_CLONEABLE_BIT = 0x00000002,
- VK_IMAGE_CREATE_SHAREABLE_BIT = 0x00000004,
- VK_IMAGE_CREATE_SPARSE_BIT = 0x00000008,
- VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT = 0x00000010, // Allows image views to have different format than the base image
- VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT = 0x00000020, // Allows creating image views with cube type from the created image
- VK_MAX_ENUM(VkImageCreateFlags)
-} VkImageCreateFlags;
+typedef VkFlags VkQueueFlags;
+typedef enum VkQueueFlagBits_
+{
+ VK_QUEUE_GRAPHICS_BIT = VK_BIT(0), // Queue supports graphics operations
+ VK_QUEUE_COMPUTE_BIT = VK_BIT(1), // Queue supports compute operations
+ VK_QUEUE_DMA_BIT = VK_BIT(2), // Queue supports DMA operations
+ VK_QUEUE_MEMMGR_BIT = VK_BIT(3), // Queue supports memory management operations
+ VK_QUEUE_EXTENDED_BIT = VK_BIT(30), // Extended queue
+} VkQueueFlagBits;
+
+// Memory properties passed into vkAllocMemory().
+typedef VkFlags VkMemoryPropertyFlags;
+typedef enum VkMemoryPropertyFlagBits_
+{
+ VK_MEMORY_PROPERTY_DEVICE_ONLY = 0, // If otherwise stated, then allocate memory on device
+ VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT = VK_BIT(0), // Memory should be mappable by host
+ VK_MEMORY_PROPERTY_HOST_DEVICE_COHERENT_BIT = VK_BIT(1), // Memory should be coherent between host and device accesses
+ VK_MEMORY_PROPERTY_HOST_UNCACHED_BIT = VK_BIT(2), // Memory should not be cached by the host
+ VK_MEMORY_PROPERTY_HOST_WRITE_COMBINED_BIT = VK_BIT(3), // Memory should support host write combining
+ VK_MEMORY_PROPERTY_PREFER_HOST_LOCAL = VK_BIT(4), // If set, prefer host access
+ VK_MEMORY_PROPERTY_SHAREABLE_BIT = VK_BIT(5),
+} VkMemoryPropertyFlagBits;
+
+// Memory output flags passed to resource transition commands
+typedef VkFlags VkMemoryOutputFlags;
+typedef enum VkMemoryOutputFlagBits_
+{
+ VK_MEMORY_OUTPUT_CPU_WRITE_BIT = VK_BIT(0), // Controls output coherency of CPU writes
+ VK_MEMORY_OUTPUT_SHADER_WRITE_BIT = VK_BIT(1), // Controls output coherency of generic shader writes
+ VK_MEMORY_OUTPUT_COLOR_ATTACHMENT_BIT = VK_BIT(2), // Controls output coherency of color attachment writes
+ VK_MEMORY_OUTPUT_DEPTH_STENCIL_ATTACHMENT_BIT = VK_BIT(3), // Controls output coherency of depth/stencil attachment writes
+ VK_MEMORY_OUTPUT_TRANSFER_BIT = VK_BIT(4), // Controls output coherency of transfer operations
+} VkMemoryOutputFlagBits;
+
+// Memory input flags passed to resource transition commands
+typedef VkFlags VkMemoryInputFlags;
+typedef enum VkMemoryInputFlagBits_
+{
+ VK_MEMORY_INPUT_CPU_READ_BIT = VK_BIT(0), // Controls input coherency of CPU reads
+ VK_MEMORY_INPUT_INDIRECT_COMMAND_BIT = VK_BIT(1), // Controls input coherency of indirect command reads
+ VK_MEMORY_INPUT_INDEX_FETCH_BIT = VK_BIT(2), // Controls input coherency of index fetches
+ VK_MEMORY_INPUT_VERTEX_ATTRIBUTE_FETCH_BIT = VK_BIT(3), // Controls input coherency of vertex attribute fetches
+ VK_MEMORY_INPUT_UNIFORM_READ_BIT = VK_BIT(4), // Controls input coherency of uniform buffer reads
+ VK_MEMORY_INPUT_SHADER_READ_BIT = VK_BIT(5), // Controls input coherency of generic shader reads
+ VK_MEMORY_INPUT_COLOR_ATTACHMENT_BIT = VK_BIT(6), // Controls input coherency of color attachment reads
+ VK_MEMORY_INPUT_DEPTH_STENCIL_ATTACHMENT_BIT = VK_BIT(7), // Controls input coherency of depth/stencil attachment reads
+ VK_MEMORY_INPUT_TRANSFER_BIT = VK_BIT(8), // Controls input coherency of transfer operations
+} VkMemoryInputFlagBits;
+
+// Buffer usage flags
+typedef VkFlags VkBufferUsageFlags;
+typedef enum VkBufferUsageFlagBits_
+{
+ VK_BUFFER_USAGE_GENERAL = 0, // No special usage
+ VK_BUFFER_USAGE_TRANSFER_SOURCE_BIT = VK_BIT(0), // Can be used as a source of transfer operations
+ VK_BUFFER_USAGE_TRANSFER_DESTINATION_BIT = VK_BIT(1), // Can be used as a destination of transfer operations
+ VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT = VK_BIT(2), // Can be used as TBO
+ VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT = VK_BIT(3), // Can be used as IBO
+ VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT = VK_BIT(4), // Can be used as UBO
+ VK_BUFFER_USAGE_STORAGE_BUFFER_BIT = VK_BIT(5), // Can be used as SSBO
+ VK_BUFFER_USAGE_INDEX_BUFFER_BIT = VK_BIT(6), // Can be used as source of fixed function index fetch (index buffer)
+ VK_BUFFER_USAGE_VERTEX_BUFFER_BIT = VK_BIT(7), // Can be used as source of fixed function vertex fetch (VBO)
+ VK_BUFFER_USAGE_INDIRECT_BUFFER_BIT = VK_BIT(8), // Can be the source of indirect parameters (e.g. indirect buffer, parameter buffer)
+} VkBufferUsageFlagBits;
+
+// Buffer creation flags
+typedef VkFlags VkBufferCreateFlags;
+typedef enum VkBufferCreateFlagBits_
+{
+ VK_BUFFER_CREATE_SHAREABLE_BIT = VK_BIT(0), // Buffer should be shareable
+ VK_BUFFER_CREATE_SPARSE_BIT = VK_BIT(1), // Buffer should support sparse backing
+} VkBufferCreateFlagBits;
+
+// Shader stage flags
+typedef VkFlags VkShaderStageFlags;
+typedef enum VkShaderStageFlagBits_
+{
+ VK_SHADER_STAGE_VERTEX_BIT = VK_BIT(0),
+ VK_SHADER_STAGE_TESS_CONTROL_BIT = VK_BIT(1),
+ VK_SHADER_STAGE_TESS_EVALUATION_BIT = VK_BIT(2),
+ VK_SHADER_STAGE_GEOMETRY_BIT = VK_BIT(3),
+ VK_SHADER_STAGE_FRAGMENT_BIT = VK_BIT(4),
+ VK_SHADER_STAGE_COMPUTE_BIT = VK_BIT(5),
+
+ VK_SHADER_STAGE_ALL = 0x7FFFFFFF,
+} VkShaderStageFlagBits;
+
+// Image usage flags
+typedef VkFlags VkImageUsageFlags;
+typedef enum VkImageUsageFlagBits_
+{
+ VK_IMAGE_USAGE_GENERAL = 0, // No special usage
+ VK_IMAGE_USAGE_TRANSFER_SOURCE_BIT = VK_BIT(0), // Can be used as a source of transfer operations
+ VK_IMAGE_USAGE_TRANSFER_DESTINATION_BIT = VK_BIT(1), // Can be used as a destination of transfer operations
+ VK_IMAGE_USAGE_SAMPLED_BIT = VK_BIT(2), // Can be sampled from (SAMPLED_IMAGE and COMBINED_IMAGE_SAMPLER descriptor types)
+ VK_IMAGE_USAGE_STORAGE_BIT = VK_BIT(3), // Can be used as storage image (STORAGE_IMAGE descriptor type)
+ VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT = VK_BIT(4), // Can be used as framebuffer color attachment
+ VK_IMAGE_USAGE_DEPTH_STENCIL_BIT = VK_BIT(5), // Can be used as framebuffer depth/stencil attachment
+ VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT = VK_BIT(6), // Image data not needed outside of rendering
+} VkImageUsageFlagBits;
+
+// Image creation flags
+typedef VkFlags VkImageCreateFlags;
+typedef enum VkImageCreateFlagBits_
+{
+ VK_IMAGE_CREATE_INVARIANT_DATA_BIT = VK_BIT(0),
+ VK_IMAGE_CREATE_CLONEABLE_BIT = VK_BIT(1),
+ VK_IMAGE_CREATE_SHAREABLE_BIT = VK_BIT(2), // Image should be shareable
+ VK_IMAGE_CREATE_SPARSE_BIT = VK_BIT(3), // Image should support sparse backing
+ VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT = VK_BIT(4), // Allows image views to have different format than the base image
+ VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT = VK_BIT(5), // Allows creating image views with cube type from the created image
+} VkImageCreateFlagBits;
// Depth-stencil view creation flags
-typedef enum VkDepthStencilViewCreateFlags_
+typedef VkFlags VkDepthStencilViewCreateFlags;
+typedef enum VkDepthStencilViewCreateFlagBits_
{
- VK_DEPTH_STENCIL_VIEW_CREATE_READ_ONLY_DEPTH_BIT = 0x00000001,
- VK_DEPTH_STENCIL_VIEW_CREATE_READ_ONLY_STENCIL_BIT = 0x00000002,
- VK_MAX_ENUM(VkDepthStencilViewCreateFlags)
-} VkDepthStencilViewCreateFlags;
+ VK_DEPTH_STENCIL_VIEW_CREATE_READ_ONLY_DEPTH_BIT = VK_BIT(0),
+ VK_DEPTH_STENCIL_VIEW_CREATE_READ_ONLY_STENCIL_BIT = VK_BIT(1),
+} VkDepthStencilViewCreateFlagBits;
// Pipeline creation flags
-typedef enum VkPipelineCreateFlags_
+typedef VkFlags VkPipelineCreateFlags;
+typedef enum VkPipelineCreateFlagBits_
{
- VK_PIPELINE_CREATE_DISABLE_OPTIMIZATION_BIT = 0x00000001,
- VK_PIPELINE_CREATE_ALLOW_DERIVATIVES_BIT = 0x00000002,
- VK_MAX_ENUM(VkPipelineCreateFlags)
-} VkPipelineCreateFlags;
+ VK_PIPELINE_CREATE_DISABLE_OPTIMIZATION_BIT = VK_BIT(0),
+ VK_PIPELINE_CREATE_ALLOW_DERIVATIVES_BIT = VK_BIT(1),
+} VkPipelineCreateFlagBits;
+
+// Channel flags
+typedef VkFlags VkChannelFlags;
+typedef enum VkChannelFlagBits_
+{
+ VK_CHANNEL_R_BIT = VK_BIT(0),
+ VK_CHANNEL_G_BIT = VK_BIT(1),
+ VK_CHANNEL_B_BIT = VK_BIT(2),
+ VK_CHANNEL_A_BIT = VK_BIT(3),
+} VkChannelFlagBits;
// Fence creation flags
-typedef enum VkFenceCreateFlags_
+typedef VkFlags VkFenceCreateFlags;
+typedef enum VkFenceCreateFlagBits_
{
- VK_FENCE_CREATE_SIGNALED_BIT = 0x00000001,
- VK_MAX_ENUM(VkFenceCreateFlags)
-} VkFenceCreateFlags;
+ VK_FENCE_CREATE_SIGNALED_BIT = VK_BIT(0),
+} VkFenceCreateFlagBits;
// Semaphore creation flags
-typedef enum VkSemaphoreCreateFlags_
+typedef VkFlags VkSemaphoreCreateFlags;
+typedef enum VkSemaphoreCreateFlagBits_
{
- VK_SEMAPHORE_CREATE_SHAREABLE_BIT = 0x00000001,
- VK_MAX_ENUM(VkSemaphoreCreateFlags)
-} VkSemaphoreCreateFlags;
+ VK_SEMAPHORE_CREATE_SHAREABLE_BIT = VK_BIT(0),
+} VkSemaphoreCreateFlagBits;
// Format capability flags
-typedef enum VkFormatFeatureFlags_
-{
- VK_FORMAT_SAMPLED_IMAGE_BIT = 0x00000001, // Format can be used for sampled images (SAMPLED_IMAGE and COMBINED_IMAGE_SAMPLER descriptor types)
- VK_FORMAT_STORAGE_IMAGE_BIT = 0x00000002, // Format can be used for storage images (STORAGE_IMAGE descriptor type)
- VK_FORMAT_STORAGE_IMAGE_ATOMIC_BIT = 0x00000004, // Format supports atomic operations in case it's used for storage images
- VK_FORMAT_UNIFORM_TEXEL_BUFFER_BIT = 0x00000008, // Format can be used for uniform texel buffers (TBOs)
- VK_FORMAT_STORAGE_TEXEL_BUFFER_BIT = 0x00000010, // Format can be used for storage texel buffers (IBOs)
- VK_FORMAT_STORAGE_TEXEL_BUFFER_ATOMIC_BIT = 0x00000020, // Format supports atomic operations in case it's used for storage texel buffers
- VK_FORMAT_VERTEX_BUFFER_BIT = 0x00000040, // Format can be used for vertex buffers (VBOs)
- VK_FORMAT_COLOR_ATTACHMENT_BIT = 0x00000080, // Format can be used for color attachment images
- VK_FORMAT_COLOR_ATTACHMENT_BLEND_BIT = 0x00000100, // Format supports blending in case it's used for color attachment images
- VK_FORMAT_DEPTH_STENCIL_ATTACHMENT_BIT = 0x00000200, // Format can be used for depth/stencil attachment images
- VK_FORMAT_CONVERSION_BIT = 0x00000400, // Format can be used as the source or destination of format converting blits
- VK_MAX_ENUM(VkFormatFeatureFlags)
-} VkFormatFeatureFlags;
-
-// Query flags
-typedef enum VkQueryControlFlags_
-{
- VK_QUERY_IMPRECISE_DATA_BIT = 0x00000001,
- VK_MAX_ENUM(VkQueryControlFlags)
-} VkQueryControlFlags;
+typedef VkFlags VkFormatFeatureFlags;
+typedef enum VkFormatFeatureFlagBits_
+{
+ VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT = VK_BIT(0), // Format can be used for sampled images (SAMPLED_IMAGE and COMBINED_IMAGE_SAMPLER descriptor types)
+ VK_FORMAT_FEATURE_STORAGE_IMAGE_BIT = VK_BIT(1), // Format can be used for storage images (STORAGE_IMAGE descriptor type)
+ VK_FORMAT_FEATURE_STORAGE_IMAGE_ATOMIC_BIT = VK_BIT(2), // Format supports atomic operations in case it's used for storage images
+ VK_FORMAT_FEATURE_UNIFORM_TEXEL_BUFFER_BIT = VK_BIT(3), // Format can be used for uniform texel buffers (TBOs)
+ VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_BIT = VK_BIT(4), // Format can be used for storage texel buffers (IBOs)
+ VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_ATOMIC_BIT = VK_BIT(5), // Format supports atomic operations in case it's used for storage texel buffers
+ VK_FORMAT_FEATURE_VERTEX_BUFFER_BIT = VK_BIT(6), // Format can be used for vertex buffers (VBOs)
+ VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT = VK_BIT(7), // Format can be used for color attachment images
+ VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BLEND_BIT = VK_BIT(8), // Format supports blending in case it's used for color attachment images
+ VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT = VK_BIT(9), // Format can be used for depth/stencil attachment images
+ VK_FORMAT_FEATURE_CONVERSION_BIT = VK_BIT(10), // Format can be used as the source or destination of format converting blits
+} VkFormatFeatureFlagBits;
+
+// Query control flags
+typedef VkFlags VkQueryControlFlags;
+typedef enum VkQueryControlFlagBits_
+{
+ VK_QUERY_CONTROL_CONSERVATIVE_BIT = VK_BIT(0), // Allow conservative results to be collected by the query
+} VkQueryControlFlagBits;
// Query result flags
-typedef enum VkQueryResultFlags_
-{
- VK_QUERY_RESULT_32_BIT = 0x00000000, // Results of the queries are written to the destination buffer as 32-bit values
- VK_QUERY_RESULT_64_BIT = 0x00000001, // Results of the queries are written to the destination buffer as 64-bit values
- // Duplicate enum result messes with validation
-// VK_QUERY_RESULT_NO_WAIT_BIT = 0x00000000, // Results of the queries aren't waited on before proceeding with the result copy
- VK_QUERY_RESULT_WAIT_BIT = 0x00000002, // Results of the queries are waited on before proceeding with the result copy
- VK_QUERY_RESULT_WITH_AVAILABILITY_BIT = 0x00000004, // Besides the results of the query, the availability of the results is also written
- VK_QUERY_RESULT_PARTIAL_BIT = 0x00000008 // Copy the partial results of the query even if the final results aren't available
-} VkQueryResultFlags;
-
-// GPU compatibility flags
-typedef enum VkGpuCompatibilityFlags_
-{
- VK_GPU_COMPAT_ASIC_FEATURES_BIT = 0x00000001,
- VK_GPU_COMPAT_IQ_MATCH_BIT = 0x00000002,
- VK_GPU_COMPAT_PEER_TRANSFER_BIT = 0x00000004,
- VK_GPU_COMPAT_SHARED_MEMORY_BIT = 0x00000008,
- VK_GPU_COMPAT_SHARED_SYNC_BIT = 0x00000010,
- VK_GPU_COMPAT_SHARED_GPU0_DISPLAY_BIT = 0x00000020,
- VK_GPU_COMPAT_SHARED_GPU1_DISPLAY_BIT = 0x00000040,
- VK_MAX_ENUM(VkGpuCompatibilityFlags)
-} VkGpuCompatibilityFlags;
-
-// Command buffer building flags
-typedef enum VkCmdBufferBuildFlags_
-{
- VK_CMD_BUFFER_OPTIMIZE_GPU_SMALL_BATCH_BIT = 0x00000001,
- VK_CMD_BUFFER_OPTIMIZE_PIPELINE_SWITCH_BIT = 0x00000002,
- VK_CMD_BUFFER_OPTIMIZE_ONE_TIME_SUBMIT_BIT = 0x00000004,
- VK_CMD_BUFFER_OPTIMIZE_DESCRIPTOR_SET_SWITCH_BIT = 0x00000008,
- VK_MAX_ENUM(VkCmdBufferBuildFlags)
-} VkCmdBufferBuildFlags;
+typedef VkFlags VkQueryResultFlags;
+typedef enum VkQueryResultFlagBits_
+{
+ VK_QUERY_RESULT_32_BIT = 0, // Results of the queries are written to the destination buffer as 32-bit values
+ VK_QUERY_RESULT_64_BIT = VK_BIT(0), // Results of the queries are written to the destination buffer as 64-bit values
+ // VK_QUERY_RESULT_NO_WAIT_BIT = 0, // Results of the queries aren't waited on before proceeding with the result copy
+ VK_QUERY_RESULT_WAIT_BIT = VK_BIT(1), // Results of the queries are waited on before proceeding with the result copy
+ VK_QUERY_RESULT_WITH_AVAILABILITY_BIT = VK_BIT(2), // Besides the results of the query, the availability of the results is also written
+ VK_QUERY_RESULT_PARTIAL_BIT = VK_BIT(3), // Copy the partial results of the query even if the final results aren't available
+} VkQueryResultFlagBits;
+
+// Physical device compatibility flags
+typedef VkFlags VkPhysicalDeviceCompatibilityFlags;
+typedef enum VkPhysicalDeviceCompatibilityFlagBits_
+{
+ VK_PHYSICAL_DEVICE_COMPATIBILITY_FEATURES_BIT = VK_BIT(0),
+ VK_PHYSICAL_DEVICE_COMPATIBILITY_IQ_MATCH_BIT = VK_BIT(1),
+ VK_PHYSICAL_DEVICE_COMPATIBILITY_PEER_TRANSFER_BIT = VK_BIT(2),
+ VK_PHYSICAL_DEVICE_COMPATIBILITY_SHARED_MEMORY_BIT = VK_BIT(3),
+ VK_PHYSICAL_DEVICE_COMPATIBILITY_SHARED_SYNC_BIT = VK_BIT(4),
+ VK_PHYSICAL_DEVICE_COMPATIBILITY_SHARED_DEVICE0_DISPLAY_BIT = VK_BIT(5),
+ VK_PHYSICAL_DEVICE_COMPATIBILITY_SHARED_DEVICE1_DISPLAY_BIT = VK_BIT(6),
+} VkPhysicalDeviceCompatibilityFlagBits;
+
+// Shader creation flags
+typedef VkFlags VkShaderCreateFlags;
+
+// Event creation flags
+typedef VkFlags VkEventCreateFlags;
+
+// Command buffer creation flags
+typedef VkFlags VkCmdBufferCreateFlags;
+
+// Command buffer optimization flags
+typedef VkFlags VkCmdBufferOptimizeFlags;
+typedef enum VkCmdBufferOptimizeFlagBits_
+{
+ VK_CMD_BUFFER_OPTIMIZE_SMALL_BATCH_BIT = VK_BIT(0),
+ VK_CMD_BUFFER_OPTIMIZE_PIPELINE_SWITCH_BIT = VK_BIT(1),
+ VK_CMD_BUFFER_OPTIMIZE_ONE_TIME_SUBMIT_BIT = VK_BIT(2),
+ VK_CMD_BUFFER_OPTIMIZE_DESCRIPTOR_SET_SWITCH_BIT = VK_BIT(3),
+} VkCmdBufferOptimizeFlagBits;
+
+// Memory mapping flags
+typedef VkFlags VkMemoryMapFlags;
// ------------------------------------------------------------------------------------------------
// Vulkan structures
VkChannelSwizzle a;
} VkChannelMapping;
-typedef struct VkPhysicalGpuProperties_
+typedef struct VkPhysicalDeviceProperties_
{
uint32_t apiVersion;
uint32_t driverVersion;
uint32_t vendorId;
uint32_t deviceId;
- VkPhysicalGpuType gpuType;
- char gpuName[VK_MAX_PHYSICAL_GPU_NAME];
- VkGpuSize maxInlineMemoryUpdateSize;
+ VkPhysicalDeviceType deviceType;
+ char deviceName[VK_MAX_PHYSICAL_DEVICE_NAME];
+ VkDeviceSize maxInlineMemoryUpdateSize;
uint32_t maxBoundDescriptorSets;
uint32_t maxThreadGroupSize;
uint64_t timestampFrequency;
uint32_t maxDescriptorSets; // at least 2?
uint32_t maxViewports; // at least 16?
uint32_t maxColorAttachments; // at least 8?
-} VkPhysicalGpuProperties;
+} VkPhysicalDeviceProperties;
-typedef struct VkPhysicalGpuPerformance_
+typedef struct VkPhysicalDevicePerformance_
{
- float maxGpuClock;
+ float maxDeviceClock;
float aluPerClock;
float texPerClock;
float primsPerClock;
float pixelsPerClock;
-} VkPhysicalGpuPerformance;
+} VkPhysicalDevicePerformance;
-typedef struct VkGpuCompatibilityInfo_
+typedef struct VkPhysicalDeviceCompatibilityInfo_
{
- VkFlags compatibilityFlags; // VkGpuCompatibilityFlags
-} VkGpuCompatibilityInfo;
+ VkPhysicalDeviceCompatibilityFlags compatibilityFlags;
+} VkPhysicalDeviceCompatibilityInfo;
typedef struct VkExtensionProperties_
{
const VkDeviceQueueCreateInfo* pRequestedQueues;
uint32_t extensionCount;
const char*const* ppEnabledExtensionNames;
- VkFlags flags; // VkDeviceCreateFlags
+ VkDeviceCreateFlags flags; // Device creation flags
} VkDeviceCreateInfo;
typedef struct VkInstanceCreateInfo_
const char *const* ppActiveLayerNames; // layer name from the layer's vkEnumerateLayers())
} VkLayerCreateInfo;
-typedef struct VkPhysicalGpuQueueProperties_
+typedef struct VkPhysicalDeviceQueueProperties_
{
- VkFlags queueFlags; // VkQueueFlags
+ VkQueueFlags queueFlags; // Queue flags
uint32_t queueCount;
uint32_t maxAtomicCounters;
bool32_t supportsTimestamps;
uint32_t maxMemReferences; // Tells how many memory references can be active for the given queue
-} VkPhysicalGpuQueueProperties;
+} VkPhysicalDeviceQueueProperties;
-typedef struct VkPhysicalGpuMemoryProperties_
+typedef struct VkPhysicalDeviceMemoryProperties_
{
bool32_t supportsMigration;
bool32_t supportsPinning;
-} VkPhysicalGpuMemoryProperties;
+} VkPhysicalDeviceMemoryProperties;
typedef struct VkMemoryAllocInfo_
{
VkStructureType sType; // Must be VK_STRUCTURE_TYPE_MEMORY_ALLOC_INFO
const void* pNext; // Pointer to next structure
- VkGpuSize allocationSize; // Size of memory allocation
- VkFlags memProps; // VkMemoryPropertyFlags
+ VkDeviceSize allocationSize; // Size of memory allocation
+ VkMemoryPropertyFlags memProps; // Memory property flags
VkMemoryPriority memPriority;
} VkMemoryAllocInfo;
{
VkStructureType sType; // Must be VK_STRUCTURE_TYPE_MEMORY_OPEN_INFO
const void* pNext; // Pointer to next structure
- VkGpuMemory sharedMem;
+ VkDeviceMemory sharedMem;
} VkMemoryOpenInfo;
typedef struct VkPeerMemoryOpenInfo_
{
VkStructureType sType; // Must be VK_STRUCTURE_TYPE_PEER_MEMORY_OPEN_INFO
const void* pNext; // Pointer to next structure
- VkGpuMemory originalMem;
+ VkDeviceMemory originalMem;
} VkPeerMemoryOpenInfo;
typedef struct VkMemoryRequirements_
{
- VkGpuSize size; // Specified in bytes
- VkGpuSize alignment; // Specified in bytes
- VkGpuSize granularity; // Granularity on which vkQueueBindObjectMemoryRange can bind sub-ranges of memory specified in bytes (usually the page size)
+ VkDeviceSize size; // Specified in bytes
+ VkDeviceSize alignment; // Specified in bytes
+ VkDeviceSize granularity; // Granularity on which vkBindObjectMemoryRange can bind sub-ranges of memory specified in bytes (usually the page size)
VkMemoryPropertyFlags memPropsAllowed; // Allowed memory property flags
VkMemoryPropertyFlags memPropsRequired; // Required memory property flags
} VkMemoryRequirements;
typedef struct VkFormatProperties_
{
- VkFlags linearTilingFeatures; // VkFormatFeatureFlags
- VkFlags optimalTilingFeatures; // VkFormatFeatureFlags
+ VkFormatFeatureFlags linearTilingFeatures; // Format features in case of linear tiling
+ VkFormatFeatureFlags optimalTilingFeatures; // Format features in case of optimal tiling
} VkFormatProperties;
typedef struct VkBufferViewAttachInfo_
{
VkStructureType sType; // Must be VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO
const void* pNext; // Pointer to next structure.
- VkGpuSize size; // Specified in bytes
- VkFlags usage; // VkBufferUsageFlags
- VkFlags flags; // VkBufferCreateFlags
+ VkDeviceSize size; // Specified in bytes
+ VkBufferUsageFlags usage; // Buffer usage flags
+ VkBufferCreateFlags flags; // Buffer creation flags
} VkBufferCreateInfo;
typedef struct VkBufferViewCreateInfo_
VkBuffer buffer;
VkBufferViewType viewType;
VkFormat format; // Optionally specifies format of elements
- VkGpuSize offset; // Specified in bytes
- VkGpuSize range; // View size specified in bytes
+ VkDeviceSize offset; // Specified in bytes
+ VkDeviceSize range; // View size specified in bytes
} VkBufferViewCreateInfo;
typedef struct VkImageSubresource_
uint32_t arraySize;
} VkImageSubresourceRange;
-typedef struct VkEventWaitInfo_
-{
- VkStructureType sType; // Must be VK_STRUCTURE_TYPE_EVENT_WAIT_INFO
- const void* pNext; // Pointer to next structure.
-
- uint32_t eventCount; // Number of events to wait on
- const VkEvent* pEvents; // Array of event objects to wait on
-
- VkWaitEvent waitEvent; // Pipeline event where the wait should happen
-
- uint32_t memBarrierCount; // Number of memory barriers
- const void** ppMemBarriers; // Array of pointers to memory barriers (any of them can be either VkMemoryBarrier, VkBufferMemoryBarrier, or VkImageMemoryBarrier)
-} VkEventWaitInfo;
-
-typedef struct VkPipelineBarrier_
-{
- VkStructureType sType; // Must be VK_STRUCTURE_TYPE_PIPELINE_BARRIER
- const void* pNext; // Pointer to next structure.
-
- uint32_t eventCount; // Number of events to wait on
- const VkPipeEvent* pEvents; // Array of pipeline events to wait on
-
- VkWaitEvent waitEvent; // Pipeline event where the wait should happen
-
- uint32_t memBarrierCount; // Number of memory barriers
- const void** ppMemBarriers; // Array of pointers to memory barriers (any of them can be either VkMemoryBarrier, VkBufferMemoryBarrier, or VkImageMemoryBarrier)
-} VkPipelineBarrier;
-
typedef struct VkMemoryBarrier_
{
VkStructureType sType; // Must be VK_STRUCTURE_TYPE_MEMORY_BARRIER
const void* pNext; // Pointer to next structure.
- VkFlags outputMask; // Outputs the barrier should sync (see VkMemoryOutputFlags)
- VkFlags inputMask; // Inputs the barrier should sync to (see VkMemoryInputFlags)
+ VkMemoryOutputFlags outputMask; // Outputs the barrier should sync
+ VkMemoryInputFlags inputMask; // Inputs the barrier should sync to
} VkMemoryBarrier;
typedef struct VkBufferMemoryBarrier_
VkStructureType sType; // Must be VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER
const void* pNext; // Pointer to next structure.
- VkFlags outputMask; // Outputs the barrier should sync (see VkMemoryOutputFlags)
- VkFlags inputMask; // Inputs the barrier should sync to (see VkMemoryInputFlags)
+ VkMemoryOutputFlags outputMask; // Outputs the barrier should sync
+ VkMemoryInputFlags inputMask; // Inputs the barrier should sync to
VkBuffer buffer; // Buffer to sync
- VkGpuSize offset; // Offset within the buffer to sync
- VkGpuSize size; // Amount of bytes to sync
+ VkDeviceSize offset; // Offset within the buffer to sync
+ VkDeviceSize size; // Amount of bytes to sync
} VkBufferMemoryBarrier;
typedef struct VkImageMemoryBarrier_
VkStructureType sType; // Must be VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER
const void* pNext; // Pointer to next structure.
- VkFlags outputMask; // Outputs the barrier should sync (see VkMemoryOutputFlags)
- VkFlags inputMask; // Inputs the barrier should sync to (see VkMemoryInputFlags)
+ VkMemoryOutputFlags outputMask; // Outputs the barrier should sync
+ VkMemoryInputFlags inputMask; // Inputs the barrier should sync to
VkImageLayout oldLayout; // Current layout of the image
VkImageLayout newLayout; // New layout to transition the image to
uint32_t arraySize;
uint32_t samples;
VkImageTiling tiling;
- VkFlags usage; // VkImageUsageFlags
- VkFlags flags; // VkImageCreateFlags
+ VkImageUsageFlags usage; // Image usage flags
+ VkImageCreateFlags flags; // Image creation flags
} VkImageCreateInfo;
typedef struct VkPeerImageOpenInfo_
typedef struct VkSubresourceLayout_
{
- VkGpuSize offset; // Specified in bytes
- VkGpuSize size; // Specified in bytes
- VkGpuSize rowPitch; // Specified in bytes
- VkGpuSize depthPitch; // Specified in bytes
+ VkDeviceSize offset; // Specified in bytes
+ VkDeviceSize size; // Specified in bytes
+ VkDeviceSize rowPitch; // Specified in bytes
+ VkDeviceSize depthPitch; // Specified in bytes
} VkSubresourceLayout;
typedef struct VkImageViewCreateInfo_
uint32_t arraySize;
VkImage msaaResolveImage;
VkImageSubresourceRange msaaResolveSubResource;
- VkFlags flags; // VkDepthStencilViewCreateFlags
+ VkDepthStencilViewCreateFlags flags; // Depth stencil attachment view flags
} VkDepthStencilViewCreateInfo;
typedef struct VkColorAttachmentBindInfo_
typedef struct VkBufferCopy_
{
- VkGpuSize srcOffset; // Specified in bytes
- VkGpuSize destOffset; // Specified in bytes
- VkGpuSize copySize; // Specified in bytes
+ VkDeviceSize srcOffset; // Specified in bytes
+ VkDeviceSize destOffset; // Specified in bytes
+ VkDeviceSize copySize; // Specified in bytes
} VkBufferCopy;
typedef struct VkImageMemoryBindInfo_
typedef struct VkBufferImageCopy_
{
- VkGpuSize bufferOffset; // Specified in bytes
+ VkDeviceSize bufferOffset; // Specified in bytes
VkImageSubresource imageSubresource;
VkOffset3D imageOffset; // Specified in pixels for both compressed and uncompressed images
VkExtent3D imageExtent; // Specified in pixels for both compressed and uncompressed images
const void* pNext; // Pointer to next structure
size_t codeSize; // Specified in bytes
const void* pCode;
- VkFlags flags; // Reserved
+ VkShaderCreateFlags flags; // Reserved
} VkShaderCreateInfo;
typedef struct VkDescriptorSetLayoutBinding_
{
- VkDescriptorType descriptorType;
- uint32_t count;
- VkFlags stageFlags; // VkShaderStageFlags
- const VkSampler* pImmutableSamplers;
+ VkDescriptorType descriptorType; // Type of the descriptors in this binding
+ uint32_t count; // Number of descriptors in this binding
+ VkShaderStageFlags stageFlags; // Shader stages this binding is visible to
+ const VkSampler* pImmutableSamplers; // Immutable samplers (used if descriptor type is SAMPLER or COMBINED_IMAGE_SAMPLER, is either NULL or contains <count> number of elements)
} VkDescriptorSetLayoutBinding;
typedef struct VkDescriptorSetLayoutCreateInfo_
{
- VkStructureType sType; // Must be VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO
- const void* pNext; // Pointer to next structure
- uint32_t count; // Number of bindings in the descriptor set layout
- const VkDescriptorSetLayoutBinding* pBinding; // Array of descriptor set layout bindings
+ VkStructureType sType; // Must be VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO
+ const void* pNext; // Pointer to next structure
+ uint32_t count; // Number of bindings in the descriptor set layout
+ const VkDescriptorSetLayoutBinding* pBinding; // Array of descriptor set layout bindings
} VkDescriptorSetLayoutCreateInfo;
typedef struct VkDescriptorTypeCount_
typedef struct VkPipelineShader_
{
- VkPipelineShaderStage stage;
+ VkShaderStage stage;
VkShader shader;
uint32_t linkConstBufferCount;
const VkLinkConstBuffer* pLinkConstBufferInfo;
typedef struct VkComputePipelineCreateInfo_
{
- VkStructureType sType; // Must be VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO
- const void* pNext; // Pointer to next structure
+ VkStructureType sType; // Must be VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO
+ const void* pNext; // Pointer to next structure
VkPipelineShader cs;
- VkFlags flags; // VkPipelineCreateFlags
+ VkPipelineCreateFlags flags; // Pipeline creation flags
VkDescriptorSetLayoutChain setLayoutChain;
uint32_t localSizeX;
uint32_t localSizeY;
{
VkStructureType sType; // Must be VK_STRUCTURE_TYPE_PIPELINE_VP_STATE_CREATE_INFO
const void* pNext; // Pointer to next structure
- uint32_t numViewports;
+ uint32_t viewportCount;
VkCoordinateOrigin clipOrigin; // optional (GL45)
VkDepthMode depthMode; // optional (GL45)
} VkPipelineVpStateCreateInfo;
bool32_t rasterizerDiscardEnable;
bool32_t programPointSize; // optional (GL45)
VkCoordinateOrigin pointOrigin; // optional (GL45)
- VkProvokingVertexConvention provokingVertex; // optional (GL45)
+ VkProvokingVertex provokingVertex; // optional (GL45)
VkFillMode fillMode; // optional (GL45)
VkCullMode cullMode;
- VkFaceOrientation frontFace;
+ VkFrontFace frontFace;
} VkPipelineRsStateCreateInfo;
typedef struct VkPipelineMsStateCreateInfo_
VkFormat format;
VkBlend srcBlendColor;
VkBlend destBlendColor;
- VkBlendFunc blendFuncColor;
+ VkBlendOp blendOpColor;
VkBlend srcBlendAlpha;
VkBlend destBlendAlpha;
- VkBlendFunc blendFuncAlpha;
- uint8_t channelWriteMask;
+ VkBlendOp blendOpAlpha;
+ VkChannelFlags channelWriteMask;
} VkPipelineCbAttachmentState;
typedef struct VkPipelineCbStateCreateInfo_
VkStencilOp stencilFailOp;
VkStencilOp stencilPassOp;
VkStencilOp stencilDepthFailOp;
- VkCompareFunc stencilFunc;
+ VkCompareOp stencilCompareOp;
} VkStencilOpState;
typedef struct VkPipelineDsStateCreateInfo_
VkFormat format;
bool32_t depthTestEnable;
bool32_t depthWriteEnable;
- VkCompareFunc depthFunc;
+ VkCompareOp depthCompareOp;
bool32_t depthBoundsEnable; // optional (depth_bounds_test)
bool32_t stencilTestEnable;
VkStencilOpState front;
{
VkStructureType sType; // Must be VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO
const void* pNext; // Pointer to next structure
- VkFlags flags; // VkPipelineCreateFlags
+ VkPipelineCreateFlags flags; // Pipeline creation flags
VkDescriptorSetLayoutChain pSetLayoutChain;
} VkGraphicsPipelineCreateInfo;
VkTexAddress addressW;
float mipLodBias;
uint32_t maxAnisotropy;
- VkCompareFunc compareFunc;
+ VkCompareOp compareOp;
float minLod;
float maxLod;
- VkBorderColorType borderColorType;
+ VkBorderColor borderColor;
} VkSamplerCreateInfo;
typedef struct VkDynamicVpStateCreateInfo_
VkStructureType sType; // Must be VK_STRUCTURE_TYPE_CMD_BUFFER_CREATE_INFO
const void* pNext; // Pointer to next structure
uint32_t queueNodeIndex;
- VkFlags flags;
+ VkCmdBufferCreateFlags flags; // Command buffer creation flags
} VkCmdBufferCreateInfo;
typedef struct VkCmdBufferBeginInfo_
VkStructureType sType; // Must be VK_STRUCTURE_TYPE_CMD_BUFFER_BEGIN_INFO
const void* pNext; // Pointer to next structure
- VkFlags flags; // VkCmdBufferBuildFlags
+ VkCmdBufferOptimizeFlags flags; // Command buffer optimization flags
} VkCmdBufferBeginInfo;
typedef struct VkRenderPassBegin_
{
VkStructureType sType; // Must be VK_STRUCTURE_TYPE_EVENT_CREATE_INFO
const void* pNext; // Pointer to next structure
- VkFlags flags; // Reserved
+ VkEventCreateFlags flags; // Event creation flags
} VkEventCreateInfo;
typedef struct VkFenceCreateInfo_
{
VkStructureType sType; // Must be VK_STRUCTURE_TYPE_FENCE_CREATE_INFO
const void* pNext; // Pointer to next structure
- VkFenceCreateFlags flags; // VkFenceCreateFlags
+ VkFenceCreateFlags flags; // Fence creation flags
} VkFenceCreateInfo;
typedef struct VkSemaphoreCreateInfo_
VkStructureType sType; // Must be VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO
const void* pNext; // Pointer to next structure
uint32_t initialCount;
- VkFlags flags; // VkSemaphoreCreateFlags
+ VkSemaphoreCreateFlags flags; // Semaphore creation flags
} VkSemaphoreCreateInfo;
typedef struct VkSemaphoreOpenInfo_
// API functions
typedef VkResult (VKAPI *PFN_vkCreateInstance)(const VkInstanceCreateInfo* pCreateInfo, VkInstance* pInstance);
typedef VkResult (VKAPI *PFN_vkDestroyInstance)(VkInstance instance);
-typedef VkResult (VKAPI *PFN_vkEnumeratePhysicalDevices)(VkInstance instance, uint32_t* pPhysicalDeviceCount, VkPhysicalGpu* pPhysicalDevices);
-typedef VkResult (VKAPI *PFN_vkGetGpuInfo)(VkPhysicalGpu gpu, VkPhysicalGpuInfoType infoType, size_t* pDataSize, void* pData);
-typedef void * (VKAPI *PFN_vkGetProcAddr)(VkPhysicalGpu gpu, const char * pName);
-typedef VkResult (VKAPI *PFN_vkCreateDevice)(VkPhysicalGpu gpu, const VkDeviceCreateInfo* pCreateInfo, VkDevice* pDevice);
+typedef VkResult (VKAPI *PFN_vkEnumeratePhysicalDevices)(VkInstance instance, uint32_t* pPhysicalDeviceCount, VkPhysicalDevice* pPhysicalDevices);
+typedef VkResult (VKAPI *PFN_vkGetPhysicalDeviceInfo)(VkPhysicalDevice physicalDevice, VkPhysicalDeviceInfoType infoType, size_t* pDataSize, void* pData);
+typedef void * (VKAPI *PFN_vkGetProcAddr)(VkPhysicalDevice physicalDevice, const char * pName);
+typedef VkResult (VKAPI *PFN_vkCreateDevice)(VkPhysicalDevice physicalDevice, const VkDeviceCreateInfo* pCreateInfo, VkDevice* pDevice);
typedef VkResult (VKAPI *PFN_vkDestroyDevice)(VkDevice device);
typedef VkResult (VKAPI *PFN_vkGetGlobalExtensionInfo)(VkExtensionInfoType infoType, uint32_t extensionIndex, size_t* pDataSize, void* pData);
-typedef VkResult (VKAPI *PFN_vkGetPhysicalDeviceExtensionInfo)(VkPhysicalGpu gpu, VkExtensionInfoType infoType, uint32_t extensionIndex, size_t* pDataSize, void* pData);
-typedef VkResult (VKAPI *PFN_vkEnumerateLayers)(VkPhysicalGpu gpu, size_t maxLayerCount, size_t maxStringSize, size_t* pOutLayerCount, char* const* pOutLayers, void* pReserved);
+typedef VkResult (VKAPI *PFN_vkGetPhysicalDeviceExtensionInfo)(VkPhysicalDevice gpu, VkExtensionInfoType infoType, uint32_t extensionIndex, size_t* pDataSize, void* pData);
+typedef VkResult (VKAPI *PFN_vkGetExtensionSupport)(VkPhysicalDevice physicalDevice, const char* pExtName);
+typedef VkResult (VKAPI *PFN_vkEnumerateLayers)(VkPhysicalDevice physicalDevice, size_t maxLayerCount, size_t maxStringSize, size_t* pOutLayerCount, char* const* pOutLayers, void* pReserved);
typedef VkResult (VKAPI *PFN_vkGetDeviceQueue)(VkDevice device, uint32_t queueNodeIndex, uint32_t queueIndex, VkQueue* pQueue);
typedef VkResult (VKAPI *PFN_vkQueueSubmit)(VkQueue queue, uint32_t cmdBufferCount, const VkCmdBuffer* pCmdBuffers, VkFence fence);
-typedef VkResult (VKAPI *PFN_vkQueueAddMemReferences)(VkQueue queue, uint32_t count, const VkGpuMemory* pMems);
-typedef VkResult (VKAPI *PFN_vkQueueRemoveMemReferences)(VkQueue queue, uint32_t count, const VkGpuMemory* pMems);
+typedef VkResult (VKAPI *PFN_vkQueueAddMemReferences)(VkQueue queue, uint32_t count, const VkDeviceMemory* pMems);
+typedef VkResult (VKAPI *PFN_vkQueueRemoveMemReferences)(VkQueue queue, uint32_t count, const VkDeviceMemory* pMems);
typedef VkResult (VKAPI *PFN_vkQueueWaitIdle)(VkQueue queue);
typedef VkResult (VKAPI *PFN_vkDeviceWaitIdle)(VkDevice device);
-typedef VkResult (VKAPI *PFN_vkAllocMemory)(VkDevice device, const VkMemoryAllocInfo* pAllocInfo, VkGpuMemory* pMem);
-typedef VkResult (VKAPI *PFN_vkFreeMemory)(VkGpuMemory mem);
-typedef VkResult (VKAPI *PFN_vkSetMemoryPriority)(VkGpuMemory mem, VkMemoryPriority priority);
-typedef VkResult (VKAPI *PFN_vkMapMemory)(VkGpuMemory mem, VkFlags flags, void** ppData);
-typedef VkResult (VKAPI *PFN_vkUnmapMemory)(VkGpuMemory mem);
-typedef VkResult (VKAPI *PFN_vkPinSystemMemory)(VkDevice device, const void* pSysMem, size_t memSize, VkGpuMemory* pMem);
-typedef VkResult (VKAPI *PFN_vkGetMultiGpuCompatibility)(VkPhysicalGpu gpu0, VkPhysicalGpu gpu1, VkGpuCompatibilityInfo* pInfo);
-typedef VkResult (VKAPI *PFN_vkOpenSharedMemory)(VkDevice device, const VkMemoryOpenInfo* pOpenInfo, VkGpuMemory* pMem);
+typedef VkResult (VKAPI *PFN_vkAllocMemory)(VkDevice device, const VkMemoryAllocInfo* pAllocInfo, VkDeviceMemory* pMem);
+typedef VkResult (VKAPI *PFN_vkFreeMemory)(VkDeviceMemory mem);
+typedef VkResult (VKAPI *PFN_vkSetMemoryPriority)(VkDeviceMemory mem, VkMemoryPriority priority);
+typedef VkResult (VKAPI *PFN_vkMapMemory)(VkDeviceMemory mem, VkFlags flags, void** ppData);
+typedef VkResult (VKAPI *PFN_vkUnmapMemory)(VkDeviceMemory mem);
+typedef VkResult (VKAPI *PFN_vkPinSystemMemory)(VkDevice device, const void* pSysMem, size_t memSize, VkDeviceMemory* pMem);
+typedef VkResult (VKAPI *PFN_vkGetMultiDeviceCompatibility)(VkPhysicalDevice physicalDevice0, VkPhysicalDevice physicalDevice1, VkPhysicalDeviceCompatibilityInfo* pInfo);
+typedef VkResult (VKAPI *PFN_vkOpenSharedMemory)(VkDevice device, const VkMemoryOpenInfo* pOpenInfo, VkDeviceMemory* pMem);
typedef VkResult (VKAPI *PFN_vkOpenSharedSemaphore)(VkDevice device, const VkSemaphoreOpenInfo* pOpenInfo, VkSemaphore* pSemaphore);
-typedef VkResult (VKAPI *PFN_vkOpenPeerMemory)(VkDevice device, const VkPeerMemoryOpenInfo* pOpenInfo, VkGpuMemory* pMem);
-typedef VkResult (VKAPI *PFN_vkOpenPeerImage)(VkDevice device, const VkPeerImageOpenInfo* pOpenInfo, VkImage* pImage, VkGpuMemory* pMem);
+typedef VkResult (VKAPI *PFN_vkOpenPeerMemory)(VkDevice device, const VkPeerMemoryOpenInfo* pOpenInfo, VkDeviceMemory* pMem);
+typedef VkResult (VKAPI *PFN_vkOpenPeerImage)(VkDevice device, const VkPeerImageOpenInfo* pOpenInfo, VkImage* pImage, VkDeviceMemory* pMem);
typedef VkResult (VKAPI *PFN_vkDestroyObject)(VkObject object);
typedef VkResult (VKAPI *PFN_vkGetObjectInfo)(VkBaseObject object, VkObjectInfoType infoType, size_t* pDataSize, void* pData);
-typedef VkResult (VKAPI *PFN_vkQueueBindObjectMemory)(VkQueue queue, VkObject object, uint32_t allocationIdx, VkGpuMemory mem, VkGpuSize offset);
-typedef VkResult (VKAPI *PFN_vkQueueBindObjectMemoryRange)(VkQueue queue, VkObject object, uint32_t allocationIdx, VkGpuSize rangeOffset,VkGpuSize rangeSize, VkGpuMemory mem, VkGpuSize memOffset);
-typedef VkResult (VKAPI *PFN_vkQueueBindImageMemoryRange)(VkQueue queue, VkImage image, uint32_t allocationIdx, const VkImageMemoryBindInfo* pBindInfo, VkGpuMemory mem, VkGpuSize memOffset);
+typedef VkResult (VKAPI *PFN_vkQueueBindObjectMemory)(VkQueue queue, VkObject object, uint32_t allocationIdx, VkDeviceMemory mem, VkDeviceSize offset);
+typedef VkResult (VKAPI *PFN_vkQueueBindObjectMemoryRange)(VkQueue queue, VkObject object, uint32_t allocationIdx, VkDeviceSize rangeOffset,VkDeviceSize rangeSize, VkDeviceMemory mem, VkDeviceSize memOffset);
+typedef VkResult (VKAPI *PFN_vkQueueBindImageMemoryRange)(VkQueue queue, VkImage image, uint32_t allocationIdx, const VkImageMemoryBindInfo* pBindInfo, VkDeviceMemory mem, VkDeviceSize memOffset);
typedef VkResult (VKAPI *PFN_vkCreateFence)(VkDevice device, const VkFenceCreateInfo* pCreateInfo, VkFence* pFence);
typedef VkResult (VKAPI *PFN_vkResetFences)(VkDevice device, uint32_t fenceCount, VkFence* pFences);
typedef VkResult (VKAPI *PFN_vkGetFenceStatus)(VkFence fence);
typedef VkResult (VKAPI *PFN_vkSetEvent)(VkEvent event);
typedef VkResult (VKAPI *PFN_vkResetEvent)(VkEvent event);
typedef VkResult (VKAPI *PFN_vkCreateQueryPool)(VkDevice device, const VkQueryPoolCreateInfo* pCreateInfo, VkQueryPool* pQueryPool);
-typedef VkResult (VKAPI *PFN_vkGetQueryPoolResults)(VkQueryPool queryPool, uint32_t startQuery, uint32_t queryCount, size_t* pDataSize, void* pData);
+typedef VkResult (VKAPI *PFN_vkGetQueryPoolResults)(VkQueryPool queryPool, uint32_t startQuery, uint32_t queryCount, size_t* pDataSize, void* pData, VkQueryResultFlags flags);
typedef VkResult (VKAPI *PFN_vkGetFormatInfo)(VkDevice device, VkFormat format, VkFormatInfoType infoType, size_t* pDataSize, void* pData);
typedef VkResult (VKAPI *PFN_vkCreateBuffer)(VkDevice device, const VkBufferCreateInfo* pCreateInfo, VkBuffer* pBuffer);
typedef VkResult (VKAPI *PFN_vkCreateBufferView)(VkDevice device, const VkBufferViewCreateInfo* pCreateInfo, VkBufferView* pView);
typedef VkResult (VKAPI *PFN_vkResetDescriptorPool)(VkDescriptorPool descriptorPool);
typedef VkResult (VKAPI *PFN_vkAllocDescriptorSets)(VkDescriptorPool descriptorPool, VkDescriptorSetUsage setUsage, uint32_t count, const VkDescriptorSetLayout* pSetLayouts, VkDescriptorSet* pDescriptorSets, uint32_t* pCount);
typedef void (VKAPI *PFN_vkClearDescriptorSets)(VkDescriptorPool descriptorPool, uint32_t count, const VkDescriptorSet* pDescriptorSets);
-typedef void (VKAPI *PFN_vkUpdateDescriptors)(VkDescriptorSet descriptorSet, uint32_t updateCount, const void** pUpdateArray);
+typedef void (VKAPI *PFN_vkUpdateDescriptors)(VkDescriptorSet descriptorSet, uint32_t updateCount, const void** ppUpdateArray);
typedef VkResult (VKAPI *PFN_vkCreateDynamicViewportState)(VkDevice device, const VkDynamicVpStateCreateInfo* pCreateInfo, VkDynamicVpState* pState);
typedef VkResult (VKAPI *PFN_vkCreateDynamicRasterState)(VkDevice device, const VkDynamicRsStateCreateInfo* pCreateInfo, VkDynamicRsState* pState);
typedef VkResult (VKAPI *PFN_vkCreateDynamicColorBlendState)(VkDevice device, const VkDynamicCbStateCreateInfo* pCreateInfo, VkDynamicCbState* pState);
typedef void (VKAPI *PFN_vkCmdBindPipeline)(VkCmdBuffer cmdBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipeline pipeline);
typedef void (VKAPI *PFN_vkCmdBindDynamicStateObject)(VkCmdBuffer cmdBuffer, VkStateBindPoint stateBindPoint, VkDynamicStateObject state);
typedef void (VKAPI *PFN_vkCmdBindDescriptorSets)(VkCmdBuffer cmdBuffer, VkPipelineBindPoint pipelineBindPoint, VkDescriptorSetLayoutChain layoutChain, uint32_t layoutChainSlot, uint32_t count, const VkDescriptorSet* pDescriptorSets, const uint32_t* pUserData);
-typedef void (VKAPI *PFN_vkCmdBindIndexBuffer)(VkCmdBuffer cmdBuffer, VkBuffer buffer, VkGpuSize offset, VkIndexType indexType);
-typedef void (VKAPI *PFN_vkCmdBindVertexBuffers)(VkCmdBuffer cmdBuffer, uint32_t startBinding, uint32_t bindingCount, const VkBuffer* pBuffers, const VkGpuSize* pOffsets);
+typedef void (VKAPI *PFN_vkCmdBindIndexBuffer)(VkCmdBuffer cmdBuffer, VkBuffer buffer, VkDeviceSize offset, VkIndexType indexType);
+typedef void (VKAPI *PFN_vkCmdBindVertexBuffers)(VkCmdBuffer cmdBuffer, uint32_t startBinding, uint32_t bindingCount, const VkBuffer* pBuffers, const VkDeviceSize* pOffsets);
typedef void (VKAPI *PFN_vkCmdDraw)(VkCmdBuffer cmdBuffer, uint32_t firstVertex, uint32_t vertexCount, uint32_t firstInstance, uint32_t instanceCount);
typedef void (VKAPI *PFN_vkCmdDrawIndexed)(VkCmdBuffer cmdBuffer, uint32_t firstIndex, uint32_t indexCount, int32_t vertexOffset, uint32_t firstInstance, uint32_t instanceCount);
-typedef void (VKAPI *PFN_vkCmdDrawIndirect)(VkCmdBuffer cmdBuffer, VkBuffer buffer, VkGpuSize offset, uint32_t count, uint32_t stride);
-typedef void (VKAPI *PFN_vkCmdDrawIndexedIndirect)(VkCmdBuffer cmdBuffer, VkBuffer buffer, VkGpuSize offset, uint32_t count, uint32_t stride);
+typedef void (VKAPI *PFN_vkCmdDrawIndirect)(VkCmdBuffer cmdBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t count, uint32_t stride);
+typedef void (VKAPI *PFN_vkCmdDrawIndexedIndirect)(VkCmdBuffer cmdBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t count, uint32_t stride);
typedef void (VKAPI *PFN_vkCmdDispatch)(VkCmdBuffer cmdBuffer, uint32_t x, uint32_t y, uint32_t z);
-typedef void (VKAPI *PFN_vkCmdDispatchIndirect)(VkCmdBuffer cmdBuffer, VkBuffer buffer, VkGpuSize offset);
+typedef void (VKAPI *PFN_vkCmdDispatchIndirect)(VkCmdBuffer cmdBuffer, VkBuffer buffer, VkDeviceSize offset);
typedef void (VKAPI *PFN_vkCmdCopyBuffer)(VkCmdBuffer cmdBuffer, VkBuffer srcBuffer, VkBuffer destBuffer, uint32_t regionCount, const VkBufferCopy* pRegions);
typedef void (VKAPI *PFN_vkCmdCopyImage)(VkCmdBuffer cmdBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage destImage, VkImageLayout destImageLayout, uint32_t regionCount, const VkImageCopy* pRegions);
typedef void (VKAPI *PFN_vkCmdBlitImage)(VkCmdBuffer cmdBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage destImage, VkImageLayout destImageLayout, uint32_t regionCount, const VkImageBlit* pRegions);
typedef void (VKAPI *PFN_vkCmdCopyBufferToImage)(VkCmdBuffer cmdBuffer, VkBuffer srcBuffer, VkImage destImage, VkImageLayout destImageLayout, uint32_t regionCount, const VkBufferImageCopy* pRegions);
typedef void (VKAPI *PFN_vkCmdCopyImageToBuffer)(VkCmdBuffer cmdBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkBuffer destBuffer, uint32_t regionCount, const VkBufferImageCopy* pRegions);
typedef void (VKAPI *PFN_vkCmdCloneImageData)(VkCmdBuffer cmdBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage destImage, VkImageLayout destImageLayout);
-typedef void (VKAPI *PFN_vkCmdUpdateBuffer)(VkCmdBuffer cmdBuffer, VkBuffer destBuffer, VkGpuSize destOffset, VkGpuSize dataSize, const uint32_t* pData);
-typedef void (VKAPI *PFN_vkCmdFillBuffer)(VkCmdBuffer cmdBuffer, VkBuffer destBuffer, VkGpuSize destOffset, VkGpuSize fillSize, uint32_t data);
+typedef void (VKAPI *PFN_vkCmdUpdateBuffer)(VkCmdBuffer cmdBuffer, VkBuffer destBuffer, VkDeviceSize destOffset, VkDeviceSize dataSize, const uint32_t* pData);
+typedef void (VKAPI *PFN_vkCmdFillBuffer)(VkCmdBuffer cmdBuffer, VkBuffer destBuffer, VkDeviceSize destOffset, VkDeviceSize fillSize, uint32_t data);
typedef void (VKAPI *PFN_vkCmdClearColorImage)(VkCmdBuffer cmdBuffer, VkImage image, VkImageLayout imageLayout, VkClearColor color, uint32_t rangeCount, const VkImageSubresourceRange* pRanges);
typedef void (VKAPI *PFN_vkCmdClearDepthStencil)(VkCmdBuffer cmdBuffer, VkImage image, VkImageLayout imageLayout, float depth, uint32_t stencil, uint32_t rangeCount, const VkImageSubresourceRange* pRanges);
typedef void (VKAPI *PFN_vkCmdResolveImage)(VkCmdBuffer cmdBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage destImage, VkImageLayout destImageLayout, uint32_t regionCount, const VkImageResolve* pRegions);
typedef void (VKAPI *PFN_vkCmdSetEvent)(VkCmdBuffer cmdBuffer, VkEvent event, VkPipeEvent pipeEvent);
typedef void (VKAPI *PFN_vkCmdResetEvent)(VkCmdBuffer cmdBuffer, VkEvent event, VkPipeEvent pipeEvent);
-typedef void (VKAPI *PFN_vkCmdWaitEvents)(VkCmdBuffer cmdBuffer, const VkEventWaitInfo* pWaitInfo);
-typedef void (VKAPI *PFN_vkCmdPipelineBarrier)(VkCmdBuffer cmdBuffer, const VkPipelineBarrier* pBarrier);
-typedef void (VKAPI *PFN_vkCmdBeginQuery)(VkCmdBuffer cmdBuffer, VkQueryPool queryPool, uint32_t slot, VkFlags flags);
+typedef void (VKAPI *PFN_vkCmdWaitEvents)(VkCmdBuffer cmdBuffer, VkWaitEvent waitEvent, uint32_t eventCount, const VkEvent* pEvents, uint32_t memBarrierCount, const void** ppMemBarriers);
+typedef void (VKAPI *PFN_vkCmdPipelineBarrier)(VkCmdBuffer cmdBuffer, VkWaitEvent waitEvent, uint32_t pipeEventCount, const VkPipeEvent* pPipeEvents, uint32_t memBarrierCount, const void** ppMemBarriers);
+typedef void (VKAPI *PFN_vkCmdBeginQuery)(VkCmdBuffer cmdBuffer, VkQueryPool queryPool, uint32_t slot, VkQueryControlFlags flags);
typedef void (VKAPI *PFN_vkCmdEndQuery)(VkCmdBuffer cmdBuffer, VkQueryPool queryPool, uint32_t slot);
typedef void (VKAPI *PFN_vkCmdResetQueryPool)(VkCmdBuffer cmdBuffer, VkQueryPool queryPool, uint32_t startQuery, uint32_t queryCount);
-typedef void (VKAPI *PFN_vkCmdWriteTimestamp)(VkCmdBuffer cmdBuffer, VkTimestampType timestampType, VkBuffer destBuffer, VkGpuSize destOffset);
-typedef void (VKAPI *PFN_vkCmdCopyQueryPoolResults)(VkCmdBuffer cmdBuffer, VkQueryPool queryPool, uint32_t startQuery, uint32_t queryCount, VkBuffer destBuffer, VkGpuSize destOffset, VkGpuSize destStride, VkFlags flags);
+typedef void (VKAPI *PFN_vkCmdWriteTimestamp)(VkCmdBuffer cmdBuffer, VkTimestampType timestampType, VkBuffer destBuffer, VkDeviceSize destOffset);
+typedef void (VKAPI *PFN_vkCmdCopyQueryPoolResults)(VkCmdBuffer cmdBuffer, VkQueryPool queryPool, uint32_t startQuery, uint32_t queryCount, VkBuffer destBuffer, VkDeviceSize destOffset, VkDeviceSize destStride, VkFlags flags);
typedef void (VKAPI *PFN_vkCmdInitAtomicCounters)(VkCmdBuffer cmdBuffer, VkPipelineBindPoint pipelineBindPoint, uint32_t startCounter, uint32_t counterCount, const uint32_t* pData);
-typedef void (VKAPI *PFN_vkCmdLoadAtomicCounters)(VkCmdBuffer cmdBuffer, VkPipelineBindPoint pipelineBindPoint, uint32_t startCounter, uint32_t counterCount, VkBuffer srcBuffer, VkGpuSize srcOffset);
-typedef void (VKAPI *PFN_vkCmdSaveAtomicCounters)(VkCmdBuffer cmdBuffer, VkPipelineBindPoint pipelineBindPoint, uint32_t startCounter, uint32_t counterCount, VkBuffer destBuffer, VkGpuSize destOffset);
+typedef void (VKAPI *PFN_vkCmdLoadAtomicCounters)(VkCmdBuffer cmdBuffer, VkPipelineBindPoint pipelineBindPoint, uint32_t startCounter, uint32_t counterCount, VkBuffer srcBuffer, VkDeviceSize srcOffset);
+typedef void (VKAPI *PFN_vkCmdSaveAtomicCounters)(VkCmdBuffer cmdBuffer, VkPipelineBindPoint pipelineBindPoint, uint32_t startCounter, uint32_t counterCount, VkBuffer destBuffer, VkDeviceSize destOffset);
typedef VkResult (VKAPI *PFN_vkCreateFramebuffer)(VkDevice device, const VkFramebufferCreateInfo* pCreateInfo, VkFramebuffer* pFramebuffer);
typedef VkResult (VKAPI *PFN_vkCreateRenderPass)(VkDevice device, const VkRenderPassCreateInfo* pCreateInfo, VkRenderPass* pRenderPass);
typedef void (VKAPI *PFN_vkCmdBeginRenderPass)(VkCmdBuffer cmdBuffer, const VkRenderPassBegin* pRenderPassBegin);
#ifdef VK_PROTOTYPES
-// GPU initialization
+// Device initialization
VkResult VKAPI vkCreateInstance(
const VkInstanceCreateInfo* pCreateInfo,
VkResult VKAPI vkEnumeratePhysicalDevices(
VkInstance instance,
uint32_t* pPhysicalDeviceCount,
- VkPhysicalGpu* pPhysicalDevices);
+ VkPhysicalDevice* pPhysicalDevices);
-VkResult VKAPI vkGetGpuInfo(
- VkPhysicalGpu gpu,
- VkPhysicalGpuInfoType infoType,
+VkResult VKAPI vkGetPhysicalDeviceInfo(
+ VkPhysicalDevice physicalDevice,
+ VkPhysicalDeviceInfoType infoType,
size_t* pDataSize,
void* pData);
void * VKAPI vkGetProcAddr(
- VkPhysicalGpu gpu,
+ VkPhysicalDevice physicalDevice,
const char* pName);
// Device functions
VkResult VKAPI vkCreateDevice(
- VkPhysicalGpu gpu,
+ VkPhysicalDevice physicalDevice,
const VkDeviceCreateInfo* pCreateInfo,
VkDevice* pDevice);
void* pData);
VkResult VKAPI vkGetPhysicalDeviceExtensionInfo(
- VkPhysicalGpu gpu,
+ VkPhysicalDevice gpu,
VkExtensionInfoType infoType,
uint32_t extensionIndex,
size_t* pDataSize,
// Layer discovery functions
VkResult VKAPI vkEnumerateLayers(
- VkPhysicalGpu gpu,
+ VkPhysicalDevice physicalDevice,
size_t maxLayerCount,
size_t maxStringSize,
size_t* pOutLayerCount,
VkResult VKAPI vkQueueAddMemReferences(
VkQueue queue,
uint32_t count,
- const VkGpuMemory* pMems);
+ const VkDeviceMemory* pMems);
VkResult VKAPI vkQueueRemoveMemReferences(
VkQueue queue,
uint32_t count,
- const VkGpuMemory* pMems);
+ const VkDeviceMemory* pMems);
VkResult VKAPI vkQueueWaitIdle(
VkQueue queue);
VkResult VKAPI vkAllocMemory(
VkDevice device,
const VkMemoryAllocInfo* pAllocInfo,
- VkGpuMemory* pMem);
+ VkDeviceMemory* pMem);
VkResult VKAPI vkFreeMemory(
- VkGpuMemory mem);
+ VkDeviceMemory mem);
VkResult VKAPI vkSetMemoryPriority(
- VkGpuMemory mem,
+ VkDeviceMemory mem,
VkMemoryPriority priority);
VkResult VKAPI vkMapMemory(
- VkGpuMemory mem,
- VkFlags flags, // Reserved
+ VkDeviceMemory mem,
+ VkMemoryMapFlags flags,
void** ppData);
VkResult VKAPI vkUnmapMemory(
- VkGpuMemory mem);
+ VkDeviceMemory mem);
VkResult VKAPI vkPinSystemMemory(
VkDevice device,
const void* pSysMem,
size_t memSize,
- VkGpuMemory* pMem);
+ VkDeviceMemory* pMem);
// Multi-device functions
-VkResult VKAPI vkGetMultiGpuCompatibility(
- VkPhysicalGpu gpu0,
- VkPhysicalGpu gpu1,
- VkGpuCompatibilityInfo* pInfo);
+VkResult VKAPI vkGetMultiDeviceCompatibility(
+ VkPhysicalDevice physicalDevice0,
+ VkPhysicalDevice physicalDevice1,
+ VkPhysicalDeviceCompatibilityInfo* pInfo);
VkResult VKAPI vkOpenSharedMemory(
VkDevice device,
const VkMemoryOpenInfo* pOpenInfo,
- VkGpuMemory* pMem);
+ VkDeviceMemory* pMem);
VkResult VKAPI vkOpenSharedSemaphore(
VkDevice device,
VkResult VKAPI vkOpenPeerMemory(
VkDevice device,
const VkPeerMemoryOpenInfo* pOpenInfo,
- VkGpuMemory* pMem);
+ VkDeviceMemory* pMem);
VkResult VKAPI vkOpenPeerImage(
VkDevice device,
const VkPeerImageOpenInfo* pOpenInfo,
VkImage* pImage,
- VkGpuMemory* pMem);
+ VkDeviceMemory* pMem);
// Generic API object functions
VkQueue queue,
VkObject object,
uint32_t allocationIdx,
- VkGpuMemory mem,
- VkGpuSize memOffset);
+ VkDeviceMemory mem,
+ VkDeviceSize memOffset);
VkResult VKAPI vkQueueBindObjectMemoryRange(
VkQueue queue,
VkObject object,
uint32_t allocationIdx,
- VkGpuSize rangeOffset,
- VkGpuSize rangeSize,
- VkGpuMemory mem,
- VkGpuSize memOffset);
+ VkDeviceSize rangeOffset,
+ VkDeviceSize rangeSize,
+ VkDeviceMemory mem,
+ VkDeviceSize memOffset);
VkResult VKAPI vkQueueBindImageMemoryRange(
VkQueue queue,
VkImage image,
uint32_t allocationIdx,
const VkImageMemoryBindInfo* pBindInfo,
- VkGpuMemory mem,
- VkGpuSize memOffset);
+ VkDeviceMemory mem,
+ VkDeviceSize memOffset);
// Fence functions
uint32_t startQuery,
uint32_t queryCount,
size_t* pDataSize,
- void* pData);
+ void* pData,
+ VkQueryResultFlags flags);
// Format capabilities
void VKAPI vkUpdateDescriptors(
VkDescriptorSet descriptorSet,
uint32_t updateCount,
- const void** pUpdateArray);
+ const void** ppUpdateArray);
// State object functions
uint32_t layoutChainSlot,
uint32_t count,
const VkDescriptorSet* pDescriptorSets,
- const uint32_t* pUserData);
+ const uint32_t * pUserData);
void VKAPI vkCmdBindIndexBuffer(
VkCmdBuffer cmdBuffer,
VkBuffer buffer,
- VkGpuSize offset,
+ VkDeviceSize offset,
VkIndexType indexType);
void VKAPI vkCmdBindVertexBuffers(
uint32_t startBinding,
uint32_t bindingCount,
const VkBuffer* pBuffers,
- const VkGpuSize* pOffsets);
+ const VkDeviceSize* pOffsets);
void VKAPI vkCmdDraw(
VkCmdBuffer cmdBuffer,
void VKAPI vkCmdDrawIndirect(
VkCmdBuffer cmdBuffer,
VkBuffer buffer,
- VkGpuSize offset,
+ VkDeviceSize offset,
uint32_t count,
uint32_t stride);
void VKAPI vkCmdDrawIndexedIndirect(
VkCmdBuffer cmdBuffer,
VkBuffer buffer,
- VkGpuSize offset,
+ VkDeviceSize offset,
uint32_t count,
uint32_t stride);
void VKAPI vkCmdDispatchIndirect(
VkCmdBuffer cmdBuffer,
VkBuffer buffer,
- VkGpuSize offset);
+ VkDeviceSize offset);
void VKAPI vkCmdCopyBuffer(
VkCmdBuffer cmdBuffer,
void VKAPI vkCmdUpdateBuffer(
VkCmdBuffer cmdBuffer,
VkBuffer destBuffer,
- VkGpuSize destOffset,
- VkGpuSize dataSize,
+ VkDeviceSize destOffset,
+ VkDeviceSize dataSize,
const uint32_t* pData);
void VKAPI vkCmdFillBuffer(
VkCmdBuffer cmdBuffer,
VkBuffer destBuffer,
- VkGpuSize destOffset,
- VkGpuSize fillSize,
+ VkDeviceSize destOffset,
+ VkDeviceSize fillSize,
uint32_t data);
void VKAPI vkCmdClearColorImage(
void VKAPI vkCmdWaitEvents(
VkCmdBuffer cmdBuffer,
- const VkEventWaitInfo* pWaitInfo);
+ VkWaitEvent waitEvent,
+ uint32_t eventCount,
+ const VkEvent* pEvents,
+ uint32_t memBarrierCount,
+ const void** ppMemBarriers);
void VKAPI vkCmdPipelineBarrier(
VkCmdBuffer cmdBuffer,
- const VkPipelineBarrier* pBarrier);
+ VkWaitEvent waitEvent,
+ uint32_t pipeEventCount,
+ const VkPipeEvent* pPipeEvents,
+ uint32_t memBarrierCount,
+ const void** ppMemBarriers);
void VKAPI vkCmdBeginQuery(
VkCmdBuffer cmdBuffer,
VkQueryPool queryPool,
uint32_t slot,
- VkFlags flags);
+ VkQueryControlFlags flags);
void VKAPI vkCmdEndQuery(
VkCmdBuffer cmdBuffer,
VkCmdBuffer cmdBuffer,
VkTimestampType timestampType,
VkBuffer destBuffer,
- VkGpuSize destOffset);
+ VkDeviceSize destOffset);
void VKAPI vkCmdCopyQueryPoolResults(
VkCmdBuffer cmdBuffer,
uint32_t startQuery,
uint32_t queryCount,
VkBuffer destBuffer,
- VkGpuSize destOffset,
- VkGpuSize destStride,
+ VkDeviceSize destOffset,
+ VkDeviceSize destStride,
VkFlags flags); // VkQueryResultFlags
void VKAPI vkCmdInitAtomicCounters(
uint32_t startCounter,
uint32_t counterCount,
VkBuffer srcBuffer,
- VkGpuSize srcOffset);
+ VkDeviceSize srcOffset);
void VKAPI vkCmdSaveAtomicCounters(
VkCmdBuffer cmdBuffer,
uint32_t startCounter,
uint32_t counterCount,
VkBuffer destBuffer,
- VkGpuSize destOffset);
+ VkDeviceSize destOffset);
VkResult VKAPI vkCreateFramebuffer(
VkDevice device,
return it->second;
}
- layer_initialize_dispatch_table(pTable, gpuw->pGPA, (VkPhysicalGpu) gpuw->nextObject);
+ layer_initialize_dispatch_table(pTable, gpuw->pGPA, (VkPhysicalDevice) gpuw->nextObject);
return pTable;
}
return VK_SUCCESS;
}
-VK_LAYER_EXPORT VkResult VKAPI vkCreateDevice(VkPhysicalGpu gpu, const VkDeviceCreateInfo* pCreateInfo, VkDevice* pDevice)
+VK_LAYER_EXPORT VkResult VKAPI vkCreateDevice(VkPhysicalDevice gpu, const VkDeviceCreateInfo* pCreateInfo, VkDevice* pDevice)
{
VkLayerDispatchTable* pTable = tableMap[gpu];
return result;
}
-VK_LAYER_EXPORT VkResult VKAPI vkEnumerateLayers(VkPhysicalGpu gpu, size_t maxLayerCount, size_t maxStringSize, size_t* pOutLayerCount, char* const* pOutLayers, void* pReserved)
+VK_LAYER_EXPORT VkResult VKAPI vkEnumerateLayers(VkPhysicalDevice gpu, size_t maxLayerCount, size_t maxStringSize, size_t* pOutLayerCount, char* const* pOutLayers, void* pReserved)
{
if (gpu != NULL)
{
// Example of a layer that is only compatible with Intel's GPUs
VkBaseLayerObject* gpuw = (VkBaseLayerObject*) pReserved;
- PFN_vkGetGpuInfo fpGetGpuInfo;
- VkPhysicalGpuProperties gpuProps;
- size_t dataSize = sizeof(VkPhysicalGpuProperties);
- fpGetGpuInfo = (PFN_vkGetGpuInfo) gpuw->pGPA((VkPhysicalGpu) gpuw->nextObject, "vkGetGpuInfo");
- fpGetGpuInfo((VkPhysicalGpu) gpuw->nextObject, VK_INFO_TYPE_PHYSICAL_GPU_PROPERTIES, &dataSize, &gpuProps);
+ PFN_vkGetPhysicalDeviceInfo fpGetGpuInfo;
+ VkPhysicalDeviceProperties gpuProps;
+ size_t dataSize = sizeof(VkPhysicalDeviceProperties);
+ fpGetGpuInfo = (PFN_vkGetPhysicalDeviceInfo) gpuw->pGPA((VkPhysicalDevice) gpuw->nextObject, "vkGetPhysicalDeviceInfo");
+ fpGetGpuInfo((VkPhysicalDevice) gpuw->nextObject, VK_PHYSICAL_DEVICE_INFO_TYPE_PROPERTIES, &dataSize, &gpuProps);
if (gpuProps.vendorId == 0x8086)
{
*pOutLayerCount = 1;
}
}
-VK_LAYER_EXPORT void * VKAPI vkGetProcAddr(VkPhysicalGpu gpu, const char* pName)
+VK_LAYER_EXPORT void * VKAPI vkGetProcAddr(VkPhysicalDevice gpu, const char* pName)
{
if (gpu == NULL)
return NULL;
VkBaseLayerObject* gpuw = (VkBaseLayerObject *) gpu;
if (gpuw->pGPA == NULL)
return NULL;
- return gpuw->pGPA((VkPhysicalGpu) gpuw->nextObject, pName);
+ return gpuw->pGPA((VkPhysicalDevice) gpuw->nextObject, pName);
}
}
fpNextGPA = pCurObj->pGPA;
assert(fpNextGPA);
- layer_initialize_dispatch_table(&nextTable, fpNextGPA, (VkPhysicalGpu) pCurObj->nextObject);
+ layer_initialize_dispatch_table(&nextTable, fpNextGPA, (VkPhysicalDevice) pCurObj->nextObject);
if (!globalLockInitialized)
{
}
}
-VK_LAYER_EXPORT VkResult VKAPI vkCreateDevice(VkPhysicalGpu gpu, const VkDeviceCreateInfo* pCreateInfo, VkDevice* pDevice)
+VK_LAYER_EXPORT VkResult VKAPI vkCreateDevice(VkPhysicalDevice gpu, const VkDeviceCreateInfo* pCreateInfo, VkDevice* pDevice)
{
pCurObj = (VkBaseLayerObject *) gpu;
loader_platform_thread_once(&g_initOnce, initDrawState);
return VK_SUCCESS;
}
-VK_LAYER_EXPORT VkResult VKAPI vkEnumerateLayers(VkPhysicalGpu gpu, size_t maxLayerCount, size_t maxStringSize, size_t* pOutLayerCount, char* const* pOutLayers, void* pReserved)
+VK_LAYER_EXPORT VkResult VKAPI vkEnumerateLayers(VkPhysicalDevice gpu, size_t maxLayerCount, size_t maxStringSize, size_t* pOutLayerCount, char* const* pOutLayers, void* pReserved)
{
if (gpu != NULL)
{
VK_LAYER_EXPORT VkResult VKAPI vkCreateDynamicViewportState(VkDevice device, const VkDynamicVpStateCreateInfo* pCreateInfo, VkDynamicVpState* pState)
{
VkResult result = nextTable.CreateDynamicViewportState(device, pCreateInfo, pState);
- insertDynamicState(*pState, (GENERIC_HEADER*)pCreateInfo, VK_STATE_BIND_VIEWPORT);
+ insertDynamicState(*pState, (GENERIC_HEADER*)pCreateInfo, VK_STATE_BIND_POINT_VIEWPORT);
return result;
}
VK_LAYER_EXPORT VkResult VKAPI vkCreateDynamicRasterState(VkDevice device, const VkDynamicRsStateCreateInfo* pCreateInfo, VkDynamicRsState* pState)
{
VkResult result = nextTable.CreateDynamicRasterState(device, pCreateInfo, pState);
- insertDynamicState(*pState, (GENERIC_HEADER*)pCreateInfo, VK_STATE_BIND_RASTER);
+ insertDynamicState(*pState, (GENERIC_HEADER*)pCreateInfo, VK_STATE_BIND_POINT_RASTER);
return result;
}
VK_LAYER_EXPORT VkResult VKAPI vkCreateDynamicColorBlendState(VkDevice device, const VkDynamicCbStateCreateInfo* pCreateInfo, VkDynamicCbState* pState)
{
VkResult result = nextTable.CreateDynamicColorBlendState(device, pCreateInfo, pState);
- insertDynamicState(*pState, (GENERIC_HEADER*)pCreateInfo, VK_STATE_BIND_COLOR_BLEND);
+ insertDynamicState(*pState, (GENERIC_HEADER*)pCreateInfo, VK_STATE_BIND_POINT_COLOR_BLEND);
return result;
}
VK_LAYER_EXPORT VkResult VKAPI vkCreateDynamicDepthStencilState(VkDevice device, const VkDynamicDsStateCreateInfo* pCreateInfo, VkDynamicDsState* pState)
{
VkResult result = nextTable.CreateDynamicDepthStencilState(device, pCreateInfo, pState);
- insertDynamicState(*pState, (GENERIC_HEADER*)pCreateInfo, VK_STATE_BIND_DEPTH_STENCIL);
+ insertDynamicState(*pState, (GENERIC_HEADER*)pCreateInfo, VK_STATE_BIND_POINT_DEPTH_STENCIL);
return result;
}
nextTable.CmdBindDescriptorSets(cmdBuffer, pipelineBindPoint, layoutChain, layoutChainSlot, count, pDescriptorSets, pUserData);
}
-VK_LAYER_EXPORT void VKAPI vkCmdBindIndexBuffer(VkCmdBuffer cmdBuffer, VkBuffer buffer, VkGpuSize offset, VkIndexType indexType)
+VK_LAYER_EXPORT void VKAPI vkCmdBindIndexBuffer(VkCmdBuffer cmdBuffer, VkBuffer buffer, VkDeviceSize offset, VkIndexType indexType)
{
GLOBAL_CB_NODE* pCB = getCBNode(cmdBuffer);
if (pCB) {
uint32_t startBinding,
uint32_t bindingCount,
const VkBuffer* pBuffers,
- const VkGpuSize* pOffsets)
+ const VkDeviceSize* pOffsets)
{
GLOBAL_CB_NODE* pCB = getCBNode(cmdBuffer);
if (pCB) {
nextTable.CmdDrawIndexed(cmdBuffer, firstIndex, indexCount, vertexOffset, firstInstance, instanceCount);
}
-VK_LAYER_EXPORT void VKAPI vkCmdDrawIndirect(VkCmdBuffer cmdBuffer, VkBuffer buffer, VkGpuSize offset, uint32_t count, uint32_t stride)
+VK_LAYER_EXPORT void VKAPI vkCmdDrawIndirect(VkCmdBuffer cmdBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t count, uint32_t stride)
{
GLOBAL_CB_NODE* pCB = getCBNode(cmdBuffer);
if (pCB) {
nextTable.CmdDrawIndirect(cmdBuffer, buffer, offset, count, stride);
}
-VK_LAYER_EXPORT void VKAPI vkCmdDrawIndexedIndirect(VkCmdBuffer cmdBuffer, VkBuffer buffer, VkGpuSize offset, uint32_t count, uint32_t stride)
+VK_LAYER_EXPORT void VKAPI vkCmdDrawIndexedIndirect(VkCmdBuffer cmdBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t count, uint32_t stride)
{
GLOBAL_CB_NODE* pCB = getCBNode(cmdBuffer);
if (pCB) {
nextTable.CmdDispatch(cmdBuffer, x, y, z);
}
-VK_LAYER_EXPORT void VKAPI vkCmdDispatchIndirect(VkCmdBuffer cmdBuffer, VkBuffer buffer, VkGpuSize offset)
+VK_LAYER_EXPORT void VKAPI vkCmdDispatchIndirect(VkCmdBuffer cmdBuffer, VkBuffer buffer, VkDeviceSize offset)
{
GLOBAL_CB_NODE* pCB = getCBNode(cmdBuffer);
if (pCB) {
nextTable.CmdCloneImageData(cmdBuffer, srcImage, srcImageLayout, destImage, destImageLayout);
}
-VK_LAYER_EXPORT void VKAPI vkCmdUpdateBuffer(VkCmdBuffer cmdBuffer, VkBuffer destBuffer, VkGpuSize destOffset, VkGpuSize dataSize, const uint32_t* pData)
+VK_LAYER_EXPORT void VKAPI vkCmdUpdateBuffer(VkCmdBuffer cmdBuffer, VkBuffer destBuffer, VkDeviceSize destOffset, VkDeviceSize dataSize, const uint32_t* pData)
{
GLOBAL_CB_NODE* pCB = getCBNode(cmdBuffer);
if (pCB) {
nextTable.CmdUpdateBuffer(cmdBuffer, destBuffer, destOffset, dataSize, pData);
}
-VK_LAYER_EXPORT void VKAPI vkCmdFillBuffer(VkCmdBuffer cmdBuffer, VkBuffer destBuffer, VkGpuSize destOffset, VkGpuSize fillSize, uint32_t data)
+VK_LAYER_EXPORT void VKAPI vkCmdFillBuffer(VkCmdBuffer cmdBuffer, VkBuffer destBuffer, VkDeviceSize destOffset, VkDeviceSize fillSize, uint32_t data)
{
GLOBAL_CB_NODE* pCB = getCBNode(cmdBuffer);
if (pCB) {
nextTable.CmdResetEvent(cmdBuffer, event, pipeEvent);
}
-VK_LAYER_EXPORT void VKAPI vkCmdWaitEvents(VkCmdBuffer cmdBuffer, const VkEventWaitInfo* pWaitInfo)
+VK_LAYER_EXPORT void VKAPI vkCmdWaitEvents(VkCmdBuffer cmdBuffer, VkWaitEvent waitEvent, uint32_t eventCount, const VkEvent* pEvents, uint32_t memBarrierCount, const void** ppMemBarriers)
{
GLOBAL_CB_NODE* pCB = getCBNode(cmdBuffer);
if (pCB) {
sprintf(str, "Attempt to use CmdBuffer %p that doesn't exist!", (void*)cmdBuffer);
layerCbMsg(VK_DBG_MSG_ERROR, VK_VALIDATION_LEVEL_0, cmdBuffer, 0, DRAWSTATE_INVALID_CMD_BUFFER, "DS", str);
}
- nextTable.CmdWaitEvents(cmdBuffer, pWaitInfo);
+ nextTable.CmdWaitEvents(cmdBuffer, waitEvent, eventCount, pEvents, memBarrierCount, ppMemBarriers);
}
-VK_LAYER_EXPORT void VKAPI vkCmdPipelineBarrier(VkCmdBuffer cmdBuffer, const VkPipelineBarrier* pBarrier)
+VK_LAYER_EXPORT void VKAPI vkCmdPipelineBarrier(VkCmdBuffer cmdBuffer, VkWaitEvent waitEvent, uint32_t pipeEventCount, const VkPipeEvent* pPipeEvents, uint32_t memBarrierCount, const void** ppMemBarriers)
{
GLOBAL_CB_NODE* pCB = getCBNode(cmdBuffer);
if (pCB) {
sprintf(str, "Attempt to use CmdBuffer %p that doesn't exist!", (void*)cmdBuffer);
layerCbMsg(VK_DBG_MSG_ERROR, VK_VALIDATION_LEVEL_0, cmdBuffer, 0, DRAWSTATE_INVALID_CMD_BUFFER, "DS", str);
}
- nextTable.CmdPipelineBarrier(cmdBuffer, pBarrier);
+ nextTable.CmdPipelineBarrier(cmdBuffer, waitEvent, pipeEventCount, pPipeEvents, memBarrierCount, ppMemBarriers);
}
VK_LAYER_EXPORT void VKAPI vkCmdBeginQuery(VkCmdBuffer cmdBuffer, VkQueryPool queryPool, uint32_t slot, VkFlags flags)
nextTable.CmdResetQueryPool(cmdBuffer, queryPool, startQuery, queryCount);
}
-VK_LAYER_EXPORT void VKAPI vkCmdWriteTimestamp(VkCmdBuffer cmdBuffer, VkTimestampType timestampType, VkBuffer destBuffer, VkGpuSize destOffset)
+VK_LAYER_EXPORT void VKAPI vkCmdWriteTimestamp(VkCmdBuffer cmdBuffer, VkTimestampType timestampType, VkBuffer destBuffer, VkDeviceSize destOffset)
{
GLOBAL_CB_NODE* pCB = getCBNode(cmdBuffer);
if (pCB) {
nextTable.CmdInitAtomicCounters(cmdBuffer, pipelineBindPoint, startCounter, counterCount, pData);
}
-VK_LAYER_EXPORT void VKAPI vkCmdLoadAtomicCounters(VkCmdBuffer cmdBuffer, VkPipelineBindPoint pipelineBindPoint, uint32_t startCounter, uint32_t counterCount, VkBuffer srcBuffer, VkGpuSize srcOffset)
+VK_LAYER_EXPORT void VKAPI vkCmdLoadAtomicCounters(VkCmdBuffer cmdBuffer, VkPipelineBindPoint pipelineBindPoint, uint32_t startCounter, uint32_t counterCount, VkBuffer srcBuffer, VkDeviceSize srcOffset)
{
GLOBAL_CB_NODE* pCB = getCBNode(cmdBuffer);
if (pCB) {
nextTable.CmdLoadAtomicCounters(cmdBuffer, pipelineBindPoint, startCounter, counterCount, srcBuffer, srcOffset);
}
-VK_LAYER_EXPORT void VKAPI vkCmdSaveAtomicCounters(VkCmdBuffer cmdBuffer, VkPipelineBindPoint pipelineBindPoint, uint32_t startCounter, uint32_t counterCount, VkBuffer destBuffer, VkGpuSize destOffset)
+VK_LAYER_EXPORT void VKAPI vkCmdSaveAtomicCounters(VkCmdBuffer cmdBuffer, VkPipelineBindPoint pipelineBindPoint, uint32_t startCounter, uint32_t counterCount, VkBuffer destBuffer, VkDeviceSize destOffset)
{
GLOBAL_CB_NODE* pCB = getCBNode(cmdBuffer);
if (pCB) {
// This layer intercepts callbacks
VK_LAYER_DBG_FUNCTION_NODE* pNewDbgFuncNode = new VK_LAYER_DBG_FUNCTION_NODE;
if (!pNewDbgFuncNode)
- return VK_ERROR_OUT_OF_MEMORY;
+ return VK_ERROR_OUT_OF_HOST_MEMORY;
pNewDbgFuncNode->pfnMsgCallback = pfnMsgCallback;
pNewDbgFuncNode->pUserData = pUserData;
pNewDbgFuncNode->pNext = g_pDbgFunctionHead;
#endif // WIN32
}
-VK_LAYER_EXPORT void* VKAPI vkGetProcAddr(VkPhysicalGpu gpu, const char* funcName)
+VK_LAYER_EXPORT void* VKAPI vkGetProcAddr(VkPhysicalDevice gpu, const char* funcName)
{
VkBaseLayerObject* gpuw = (VkBaseLayerObject *) gpu;
else {
if (gpuw->pGPA == NULL)
return NULL;
- return gpuw->pGPA((VkPhysicalGpu)gpuw->nextObject, funcName);
+ return gpuw->pGPA((VkPhysicalDevice)gpuw->nextObject, funcName);
}
}
#define MAX_BINDING 0xFFFFFFFF
map<VkCmdBuffer, MT_CB_INFO*> cbMap;
-map<VkGpuMemory, MT_MEM_OBJ_INFO*> memObjMap;
+map<VkDeviceMemory, MT_MEM_OBJ_INFO*> memObjMap;
map<VkObject, MT_OBJ_INFO*> objectMap;
map<uint64_t, MT_FENCE_INFO*> fenceMap; // Map fenceId to fence info
map<VkQueue, MT_QUEUE_INFO*> queueMap;
static void addCBInfo(const VkCmdBuffer cb)
{
MT_CB_INFO* pInfo = new MT_CB_INFO;
- memset(pInfo, 0, (sizeof(MT_CB_INFO) - sizeof(list<VkGpuMemory>)));
+ memset(pInfo, 0, (sizeof(MT_CB_INFO) - sizeof(list<VkDeviceMemory>)));
pInfo->cmdBuffer = cb;
cbMap[cb] = pInfo;
}
// Queue is validated by caller
static bool32_t checkMemRef(
VkQueue queue,
- VkGpuMemory mem)
+ VkDeviceMemory mem)
{
bool32_t result = VK_FALSE;
- list<VkGpuMemory>::iterator it;
+ list<VkDeviceMemory>::iterator it;
MT_QUEUE_INFO *pQueueInfo = queueMap[queue];
for (it = pQueueInfo->pMemRefList.begin(); it != pQueueInfo->pMemRefList.end(); ++it) {
if ((*it) == mem) {
result = VK_FALSE;
} else {
// Validate that all actual references are accounted for in pMemRefs
- for (list<VkGpuMemory>::iterator it = pCBInfo->pMemObjList.begin(); it != pCBInfo->pMemObjList.end(); ++it) {
+ for (list<VkDeviceMemory>::iterator it = pCBInfo->pMemObjList.begin(); it != pCBInfo->pMemObjList.end(); ++it) {
// Search for each memref in queues memreflist.
if (checkMemRef(queue, *it)) {
char str[1024];
// Return ptr to info in map container containing mem, or NULL if not found
// Calls to this function should be wrapped in mutex
-static MT_MEM_OBJ_INFO* getMemObjInfo(const VkGpuMemory mem)
+static MT_MEM_OBJ_INFO* getMemObjInfo(const VkDeviceMemory mem)
{
MT_MEM_OBJ_INFO* pMemObjInfo = NULL;
return pMemObjInfo;
}
-static void addMemObjInfo(const VkGpuMemory mem, const VkMemoryAllocInfo* pAllocInfo)
+static void addMemObjInfo(const VkDeviceMemory mem, const VkMemoryAllocInfo* pAllocInfo)
{
MT_MEM_OBJ_INFO* pInfo = new MT_MEM_OBJ_INFO;
pInfo->refCount = 0;
// Find CB Info and add mem binding to list container
// Find Mem Obj Info and add CB binding to list container
-static bool32_t updateCBBinding(const VkCmdBuffer cb, const VkGpuMemory mem)
+static bool32_t updateCBBinding(const VkCmdBuffer cb, const VkDeviceMemory mem)
{
bool32_t result = VK_TRUE;
// First update CB binding in MemObj mini CB list
} else {
// Search for memory object in cmd buffer's binding list
bool32_t found = VK_FALSE;
- for (list<VkGpuMemory>::iterator it = pCBInfo->pMemObjList.begin(); it != pCBInfo->pMemObjList.end(); ++it) {
+ for (list<VkDeviceMemory>::iterator it = pCBInfo->pMemObjList.begin(); it != pCBInfo->pMemObjList.end(); ++it) {
if ((*it) == mem) {
found = VK_TRUE;
break;
// Clear the CB Binding for mem
// Calls to this function should be wrapped in mutex
-static void clearCBBinding(const VkCmdBuffer cb, const VkGpuMemory mem)
+static void clearCBBinding(const VkCmdBuffer cb, const VkDeviceMemory mem)
{
MT_MEM_OBJ_INFO* pInfo = getMemObjInfo(mem);
// TODO : Having this check is not ideal, really if memInfo was deleted,
deleteFenceInfo(pCBInfo->fenceId);
}
- for (list<VkGpuMemory>::iterator it=pCBInfo->pMemObjList.begin(); it!=pCBInfo->pMemObjList.end(); ++it) {
+ for (list<VkDeviceMemory>::iterator it=pCBInfo->pMemObjList.begin(); it!=pCBInfo->pMemObjList.end(); ++it) {
clearCBBinding(cb, (*it));
}
pCBInfo->pMemObjList.clear();
}
}
-static void deleteMemObjInfo(VkGpuMemory mem)
+static void deleteMemObjInfo(VkDeviceMemory mem)
{
if (memObjMap.find(mem) != memObjMap.end()) {
MT_MEM_OBJ_INFO* pDelInfo = memObjMap[mem];
return result;
}
-static bool32_t freeMemObjInfo(VkGpuMemory mem, bool internal)
+static bool32_t freeMemObjInfo(VkDeviceMemory mem, bool internal)
{
bool32_t result = VK_TRUE;
// Parse global list to find info w/ mem
// Add reference from objectInfo to memoryInfo
// Add reference off of objInfo
// Return VK_TRUE if addition is successful, VK_FALSE otherwise
-static bool32_t updateObjectBinding(VkObject object, VkGpuMemory mem)
+static bool32_t updateObjectBinding(VkObject object, VkDeviceMemory mem)
{
bool32_t result = VK_FALSE;
// Handle NULL case separately, just clear previous binding & decrement reference
}
// For given Object, get 'mem' obj that it's bound to or NULL if no binding
-static VkGpuMemory getMemBindingFromObject(const VkObject object)
+static VkDeviceMemory getMemBindingFromObject(const VkObject object)
{
- VkGpuMemory mem = NULL;
+ VkDeviceMemory mem = NULL;
MT_OBJ_INFO* pObjInfo = getObjectInfo(object);
if (pObjInfo) {
if (pObjInfo->pMemObjInfo) {
sprintf(str, "MEM INFO : Details of Memory Object list of size %lu elements", memObjMap.size());
layerCbMsg(VK_DBG_MSG_UNKNOWN, VK_VALIDATION_LEVEL_0, NULL, 0, MEMTRACK_NONE, "MEM", str);
- for (map<VkGpuMemory, MT_MEM_OBJ_INFO*>::iterator ii=memObjMap.begin(); ii!=memObjMap.end(); ++ii) {
+ for (map<VkDeviceMemory, MT_MEM_OBJ_INFO*>::iterator ii=memObjMap.begin(); ii!=memObjMap.end(); ++ii) {
pInfo = (*ii).second;
sprintf(str, " ===MemObjInfo at %p===", (void*)pInfo);
(void*)getFenceFromId(pCBInfo->fenceId));
layerCbMsg(VK_DBG_MSG_UNKNOWN, VK_VALIDATION_LEVEL_0, NULL, 0, MEMTRACK_NONE, "MEM", str);
- for (list<VkGpuMemory>::iterator it = pCBInfo->pMemObjList.begin(); it != pCBInfo->pMemObjList.end(); ++it) {
+ for (list<VkDeviceMemory>::iterator it = pCBInfo->pMemObjList.begin(); it != pCBInfo->pMemObjList.end(); ++it) {
sprintf(str, " Mem obj %p", (*it));
layerCbMsg(VK_DBG_MSG_UNKNOWN, VK_VALIDATION_LEVEL_0, NULL, 0, MEMTRACK_NONE, "MEM", str);
}
fpNextGPA = pCurObj->pGPA;
assert(fpNextGPA);
- layer_initialize_dispatch_table(&nextTable, fpNextGPA, (VkPhysicalGpu) pCurObj->nextObject);
+ layer_initialize_dispatch_table(&nextTable, fpNextGPA, (VkPhysicalDevice) pCurObj->nextObject);
if (!globalLockInitialized)
{
}
}
-VK_LAYER_EXPORT VkResult VKAPI vkCreateDevice(VkPhysicalGpu gpu, const VkDeviceCreateInfo* pCreateInfo, VkDevice* pDevice)
+VK_LAYER_EXPORT VkResult VKAPI vkCreateDevice(VkPhysicalDevice gpu, const VkDeviceCreateInfo* pCreateInfo, VkDevice* pDevice)
{
pCurObj = (VkBaseLayerObject *) gpu;
loader_platform_thread_once(&g_initOnce, initMemTracker);
}
// Report any memory leaks
MT_MEM_OBJ_INFO* pInfo = NULL;
- for (map<VkGpuMemory, MT_MEM_OBJ_INFO*>::iterator ii=memObjMap.begin(); ii!=memObjMap.end(); ++ii) {
+ for (map<VkDeviceMemory, MT_MEM_OBJ_INFO*>::iterator ii=memObjMap.begin(); ii!=memObjMap.end(); ++ii) {
pInfo = (*ii).second;
if (pInfo->allocInfo.allocationSize != 0) {
return VK_SUCCESS;
}
-VK_LAYER_EXPORT VkResult VKAPI vkEnumerateLayers(VkPhysicalGpu gpu, size_t maxLayerCount,
+VK_LAYER_EXPORT VkResult VKAPI vkEnumerateLayers(VkPhysicalDevice gpu, size_t maxLayerCount,
size_t maxStringSize, size_t* pOutLayerCount, char* const* pOutLayers, void* pReserved)
{
if (gpu != NULL)
return result;
}
-VK_LAYER_EXPORT VkResult VKAPI vkQueueAddMemReferences(VkQueue queue, uint32_t count, const VkGpuMemory* pMems)
+VK_LAYER_EXPORT VkResult VKAPI vkQueueAddMemReferences(VkQueue queue, uint32_t count, const VkDeviceMemory* pMems)
{
VkResult result = nextTable.QueueAddMemReferences(queue, count, pMems);
if (result == VK_SUCCESS) {
return result;
}
-VK_LAYER_EXPORT VkResult VKAPI vkQueueRemoveMemReferences(VkQueue queue, uint32_t count, const VkGpuMemory* pMems)
+VK_LAYER_EXPORT VkResult VKAPI vkQueueRemoveMemReferences(VkQueue queue, uint32_t count, const VkDeviceMemory* pMems)
{
// TODO : Decrement ref count for this memory reference on this queue. Remove if ref count is zero.
VkResult result = nextTable.QueueRemoveMemReferences(queue, count, pMems);
layerCbMsg(VK_DBG_MSG_ERROR, VK_VALIDATION_LEVEL_0, queue, 0, MEMTRACK_INVALID_QUEUE, "MEM", str);
} else {
for (int i = 0; i < count; i++) {
- for (list<VkGpuMemory>::iterator it = pQueueInfo->pMemRefList.begin(); it != pQueueInfo->pMemRefList.end(); ++it) {
+ for (list<VkDeviceMemory>::iterator it = pQueueInfo->pMemRefList.begin(); it != pQueueInfo->pMemRefList.end(); ++it) {
if ((*it) == pMems[i]) {
it = pQueueInfo->pMemRefList.erase(it);
}
return result;
}
-VK_LAYER_EXPORT VkResult VKAPI vkAllocMemory(VkDevice device, const VkMemoryAllocInfo* pAllocInfo, VkGpuMemory* pMem)
+VK_LAYER_EXPORT VkResult VKAPI vkAllocMemory(VkDevice device, const VkMemoryAllocInfo* pAllocInfo, VkDeviceMemory* pMem)
{
VkResult result = nextTable.AllocMemory(device, pAllocInfo, pMem);
// TODO : Track allocations and overall size here
return result;
}
-VK_LAYER_EXPORT VkResult VKAPI vkFreeMemory(VkGpuMemory mem)
+VK_LAYER_EXPORT VkResult VKAPI vkFreeMemory(VkDeviceMemory mem)
{
/* From spec : A memory object is freed by calling vkFreeMemory() when it is no longer needed. Before
* freeing a memory object, an application must ensure the memory object is unbound from
return result;
}
-VK_LAYER_EXPORT VkResult VKAPI vkSetMemoryPriority(VkGpuMemory mem, VkMemoryPriority priority)
+VK_LAYER_EXPORT VkResult VKAPI vkSetMemoryPriority(VkDeviceMemory mem, VkMemoryPriority priority)
{
// TODO : Update tracking for this alloc
// Make sure memory is not pinned, which can't have priority set
return result;
}
-VK_LAYER_EXPORT VkResult VKAPI vkMapMemory(VkGpuMemory mem, VkFlags flags, void** ppData)
+VK_LAYER_EXPORT VkResult VKAPI vkMapMemory(VkDeviceMemory mem, VkFlags flags, void** ppData)
{
// TODO : Track when memory is mapped
loader_platform_thread_lock_mutex(&globalLock);
MT_MEM_OBJ_INFO *pMemObj = getMemObjInfo(mem);
- if ((pMemObj->allocInfo.memProps & VK_MEMORY_PROPERTY_CPU_VISIBLE_BIT) == 0) {
+ if ((pMemObj->allocInfo.memProps & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0) {
char str[1024];
- sprintf(str, "Mapping Memory (%p) without VK_MEMORY_PROPERTY_CPU_VISIBLE_BIT set", (void*)mem);
+ sprintf(str, "Mapping Memory (%p) without VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT set", (void*)mem);
layerCbMsg(VK_DBG_MSG_ERROR, VK_VALIDATION_LEVEL_0, mem, 0, MEMTRACK_INVALID_STATE, "MEM", str);
}
loader_platform_thread_unlock_mutex(&globalLock);
return result;
}
-VK_LAYER_EXPORT VkResult VKAPI vkUnmapMemory(VkGpuMemory mem)
+VK_LAYER_EXPORT VkResult VKAPI vkUnmapMemory(VkDeviceMemory mem)
{
// TODO : Track as memory gets unmapped, do we want to check what changed following map?
// Make sure that memory was ever mapped to begin with
return result;
}
-VK_LAYER_EXPORT VkResult VKAPI vkPinSystemMemory(VkDevice device, const void* pSysMem, size_t memSize, VkGpuMemory* pMem)
+VK_LAYER_EXPORT VkResult VKAPI vkPinSystemMemory(VkDevice device, const void* pSysMem, size_t memSize, VkDeviceMemory* pMem)
{
// TODO : Track this
// Verify that memory is actually pinnable
return result;
}
-VK_LAYER_EXPORT VkResult VKAPI vkOpenSharedMemory(VkDevice device, const VkMemoryOpenInfo* pOpenInfo, VkGpuMemory* pMem)
+VK_LAYER_EXPORT VkResult VKAPI vkOpenSharedMemory(VkDevice device, const VkMemoryOpenInfo* pOpenInfo, VkDeviceMemory* pMem)
{
// TODO : Track this
VkResult result = nextTable.OpenSharedMemory(device, pOpenInfo, pMem);
return result;
}
-VK_LAYER_EXPORT VkResult VKAPI vkOpenPeerMemory(VkDevice device, const VkPeerMemoryOpenInfo* pOpenInfo, VkGpuMemory* pMem)
+VK_LAYER_EXPORT VkResult VKAPI vkOpenPeerMemory(VkDevice device, const VkPeerMemoryOpenInfo* pOpenInfo, VkDeviceMemory* pMem)
{
// TODO : Track this
VkResult result = nextTable.OpenPeerMemory(device, pOpenInfo, pMem);
return result;
}
-VK_LAYER_EXPORT VkResult VKAPI vkOpenPeerImage(VkDevice device, const VkPeerImageOpenInfo* pOpenInfo, VkImage* pImage, VkGpuMemory* pMem)
+VK_LAYER_EXPORT VkResult VKAPI vkOpenPeerImage(VkDevice device, const VkPeerImageOpenInfo* pOpenInfo, VkImage* pImage, VkDeviceMemory* pMem)
{
// TODO : Track this
VkResult result = nextTable.OpenPeerImage(device, pOpenInfo, pImage, pMem);
if (pDelInfo->pMemObjInfo) {
// Wsi allocated Memory is tied to image object so clear the binding and free that memory automatically
if (0 == pDelInfo->pMemObjInfo->allocInfo.allocationSize) { // Wsi allocated memory has NULL allocInfo w/ 0 size
- VkGpuMemory memToFree = pDelInfo->pMemObjInfo->mem;
+ VkDeviceMemory memToFree = pDelInfo->pMemObjInfo->mem;
clearObjectBinding(object);
freeMemObjInfo(memToFree, true);
}
return result;
}
-VK_LAYER_EXPORT VkResult VKAPI vkQueueBindObjectMemory(VkQueue queue, VkObject object, uint32_t allocationIdx, VkGpuMemory mem, VkGpuSize offset)
+VK_LAYER_EXPORT VkResult VKAPI vkQueueBindObjectMemory(VkQueue queue, VkObject object, uint32_t allocationIdx, VkDeviceMemory mem, VkDeviceSize offset)
{
VkResult result = nextTable.QueueBindObjectMemory(queue, object, allocationIdx, mem, offset);
loader_platform_thread_lock_mutex(&globalLock);
uint32_t startBinding,
uint32_t bindingCount,
const VkBuffer* pBuffers,
- const VkGpuSize* pOffsets)
+ const VkDeviceSize* pOffsets)
{
nextTable.CmdBindVertexBuffers(cmdBuffer, startBinding, bindingCount, pBuffers, pOffsets);
}
-VK_LAYER_EXPORT void VKAPI vkCmdBindIndexBuffer(VkCmdBuffer cmdBuffer, VkBuffer buffer, VkGpuSize offset, VkIndexType indexType)
+VK_LAYER_EXPORT void VKAPI vkCmdBindIndexBuffer(VkCmdBuffer cmdBuffer, VkBuffer buffer, VkDeviceSize offset, VkIndexType indexType)
{
nextTable.CmdBindIndexBuffer(cmdBuffer, buffer, offset, indexType);
}
-VK_LAYER_EXPORT void VKAPI vkCmdDrawIndirect(VkCmdBuffer cmdBuffer, VkBuffer buffer, VkGpuSize offset, uint32_t count, uint32_t stride)
+VK_LAYER_EXPORT void VKAPI vkCmdDrawIndirect(VkCmdBuffer cmdBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t count, uint32_t stride)
{
loader_platform_thread_lock_mutex(&globalLock);
- VkGpuMemory mem = getMemBindingFromObject(buffer);
+ VkDeviceMemory mem = getMemBindingFromObject(buffer);
if (VK_FALSE == updateCBBinding(cmdBuffer, mem)) {
char str[1024];
sprintf(str, "In vkCmdDrawIndirect() call unable to update binding of buffer %p to cmdBuffer %p", buffer, cmdBuffer);
nextTable.CmdDrawIndirect(cmdBuffer, buffer, offset, count, stride);
}
-VK_LAYER_EXPORT void VKAPI vkCmdDrawIndexedIndirect(VkCmdBuffer cmdBuffer, VkBuffer buffer, VkGpuSize offset, uint32_t count, uint32_t stride)
+VK_LAYER_EXPORT void VKAPI vkCmdDrawIndexedIndirect(VkCmdBuffer cmdBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t count, uint32_t stride)
{
loader_platform_thread_lock_mutex(&globalLock);
- VkGpuMemory mem = getMemBindingFromObject(buffer);
+ VkDeviceMemory mem = getMemBindingFromObject(buffer);
if (VK_FALSE == updateCBBinding(cmdBuffer, mem)) {
char str[1024];
sprintf(str, "In vkCmdDrawIndexedIndirect() call unable to update binding of buffer %p to cmdBuffer %p", buffer, cmdBuffer);
nextTable.CmdDrawIndexedIndirect(cmdBuffer, buffer, offset, count, stride);
}
-VK_LAYER_EXPORT void VKAPI vkCmdDispatchIndirect(VkCmdBuffer cmdBuffer, VkBuffer buffer, VkGpuSize offset)
+VK_LAYER_EXPORT void VKAPI vkCmdDispatchIndirect(VkCmdBuffer cmdBuffer, VkBuffer buffer, VkDeviceSize offset)
{
loader_platform_thread_lock_mutex(&globalLock);
- VkGpuMemory mem = getMemBindingFromObject(buffer);
+ VkDeviceMemory mem = getMemBindingFromObject(buffer);
if (VK_FALSE == updateCBBinding(cmdBuffer, mem)) {
char str[1024];
sprintf(str, "In vkCmdDispatchIndirect() call unable to update binding of buffer %p to cmdBuffer %p", buffer, cmdBuffer);
uint32_t regionCount, const VkBufferCopy* pRegions)
{
loader_platform_thread_lock_mutex(&globalLock);
- VkGpuMemory mem = getMemBindingFromObject(srcBuffer);
+ VkDeviceMemory mem = getMemBindingFromObject(srcBuffer);
if (VK_FALSE == updateCBBinding(cmdBuffer, mem)) {
char str[1024];
sprintf(str, "In vkCmdCopyBuffer() call unable to update binding of srcBuffer %p to cmdBuffer %p", srcBuffer, cmdBuffer);
{
// TODO : Track this
loader_platform_thread_lock_mutex(&globalLock);
- VkGpuMemory mem = getMemBindingFromObject(destImage);
+ VkDeviceMemory mem = getMemBindingFromObject(destImage);
if (VK_FALSE == updateCBBinding(cmdBuffer, mem)) {
char str[1024];
sprintf(str, "In vkCmdCopyMemoryToImage() call unable to update binding of destImage buffer %p to cmdBuffer %p", destImage, cmdBuffer);
{
// TODO : Track this
loader_platform_thread_lock_mutex(&globalLock);
- VkGpuMemory mem = getMemBindingFromObject(srcImage);
+ VkDeviceMemory mem = getMemBindingFromObject(srcImage);
if (VK_FALSE == updateCBBinding(cmdBuffer, mem)) {
char str[1024];
sprintf(str, "In vkCmdCopyImageToMemory() call unable to update binding of srcImage buffer %p to cmdBuffer %p", srcImage, cmdBuffer);
{
// TODO : Each image will have mem mapping so track them
loader_platform_thread_lock_mutex(&globalLock);
- VkGpuMemory mem = getMemBindingFromObject(srcImage);
+ VkDeviceMemory mem = getMemBindingFromObject(srcImage);
if (VK_FALSE == updateCBBinding(cmdBuffer, mem)) {
char str[1024];
sprintf(str, "In vkCmdCloneImageData() call unable to update binding of srcImage buffer %p to cmdBuffer %p", srcImage, cmdBuffer);
nextTable.CmdCloneImageData(cmdBuffer, srcImage, srcImageLayout, destImage, destImageLayout);
}
-VK_LAYER_EXPORT void VKAPI vkCmdUpdateBuffer(VkCmdBuffer cmdBuffer, VkBuffer destBuffer, VkGpuSize destOffset, VkGpuSize dataSize, const uint32_t* pData)
+VK_LAYER_EXPORT void VKAPI vkCmdUpdateBuffer(VkCmdBuffer cmdBuffer, VkBuffer destBuffer, VkDeviceSize destOffset, VkDeviceSize dataSize, const uint32_t* pData)
{
loader_platform_thread_lock_mutex(&globalLock);
- VkGpuMemory mem = getMemBindingFromObject(destBuffer);
+ VkDeviceMemory mem = getMemBindingFromObject(destBuffer);
if (VK_FALSE == updateCBBinding(cmdBuffer, mem)) {
char str[1024];
sprintf(str, "In vkCmdUpdateMemory() call unable to update binding of destBuffer %p to cmdBuffer %p", destBuffer, cmdBuffer);
nextTable.CmdUpdateBuffer(cmdBuffer, destBuffer, destOffset, dataSize, pData);
}
-VK_LAYER_EXPORT void VKAPI vkCmdFillBuffer(VkCmdBuffer cmdBuffer, VkBuffer destBuffer, VkGpuSize destOffset, VkGpuSize fillSize, uint32_t data)
+VK_LAYER_EXPORT void VKAPI vkCmdFillBuffer(VkCmdBuffer cmdBuffer, VkBuffer destBuffer, VkDeviceSize destOffset, VkDeviceSize fillSize, uint32_t data)
{
loader_platform_thread_lock_mutex(&globalLock);
- VkGpuMemory mem = getMemBindingFromObject(destBuffer);
+ VkDeviceMemory mem = getMemBindingFromObject(destBuffer);
if (VK_FALSE == updateCBBinding(cmdBuffer, mem)) {
char str[1024];
sprintf(str, "In vkCmdFillMemory() call unable to update binding of destBuffer %p to cmdBuffer %p", destBuffer, cmdBuffer);
{
// TODO : Verify memory is in VK_IMAGE_STATE_CLEAR state
loader_platform_thread_lock_mutex(&globalLock);
- VkGpuMemory mem = getMemBindingFromObject(image);
+ VkDeviceMemory mem = getMemBindingFromObject(image);
if (VK_FALSE == updateCBBinding(cmdBuffer, mem)) {
char str[1024];
sprintf(str, "In vkCmdClearColorImage() call unable to update binding of image buffer %p to cmdBuffer %p", image, cmdBuffer);
{
// TODO : Verify memory is in VK_IMAGE_STATE_CLEAR state
loader_platform_thread_lock_mutex(&globalLock);
- VkGpuMemory mem = getMemBindingFromObject(image);
+ VkDeviceMemory mem = getMemBindingFromObject(image);
if (VK_FALSE == updateCBBinding(cmdBuffer, mem)) {
char str[1024];
sprintf(str, "In vkCmdClearDepthStencil() call unable to update binding of image buffer %p to cmdBuffer %p", image, cmdBuffer);
uint32_t regionCount, const VkImageResolve* pRegions)
{
loader_platform_thread_lock_mutex(&globalLock);
- VkGpuMemory mem = getMemBindingFromObject(srcImage);
+ VkDeviceMemory mem = getMemBindingFromObject(srcImage);
if (VK_FALSE == updateCBBinding(cmdBuffer, mem)) {
char str[1024];
sprintf(str, "In vkCmdResolveImage() call unable to update binding of srcImage buffer %p to cmdBuffer %p", srcImage, cmdBuffer);
VK_LAYER_EXPORT void VKAPI vkCmdBeginQuery(VkCmdBuffer cmdBuffer, VkQueryPool queryPool, uint32_t slot, VkFlags flags)
{
loader_platform_thread_lock_mutex(&globalLock);
- VkGpuMemory mem = getMemBindingFromObject(queryPool);
+ VkDeviceMemory mem = getMemBindingFromObject(queryPool);
if (VK_FALSE == updateCBBinding(cmdBuffer, mem)) {
char str[1024];
sprintf(str, "In vkCmdBeginQuery() call unable to update binding of queryPool buffer %p to cmdBuffer %p", queryPool, cmdBuffer);
VK_LAYER_EXPORT void VKAPI vkCmdEndQuery(VkCmdBuffer cmdBuffer, VkQueryPool queryPool, uint32_t slot)
{
loader_platform_thread_lock_mutex(&globalLock);
- VkGpuMemory mem = getMemBindingFromObject(queryPool);
+ VkDeviceMemory mem = getMemBindingFromObject(queryPool);
if (VK_FALSE == updateCBBinding(cmdBuffer, mem)) {
char str[1024];
sprintf(str, "In vkCmdEndQuery() call unable to update binding of queryPool buffer %p to cmdBuffer %p", queryPool, cmdBuffer);
VK_LAYER_EXPORT void VKAPI vkCmdResetQueryPool(VkCmdBuffer cmdBuffer, VkQueryPool queryPool, uint32_t startQuery, uint32_t queryCount)
{
loader_platform_thread_lock_mutex(&globalLock);
- VkGpuMemory mem = getMemBindingFromObject(queryPool);
+ VkDeviceMemory mem = getMemBindingFromObject(queryPool);
if (VK_FALSE == updateCBBinding(cmdBuffer, mem)) {
char str[1024];
sprintf(str, "In vkCmdResetQueryPool() call unable to update binding of queryPool buffer %p to cmdBuffer %p", queryPool, cmdBuffer);
// This layer intercepts callbacks
VK_LAYER_DBG_FUNCTION_NODE *pNewDbgFuncNode = (VK_LAYER_DBG_FUNCTION_NODE*)malloc(sizeof(VK_LAYER_DBG_FUNCTION_NODE));
if (!pNewDbgFuncNode)
- return VK_ERROR_OUT_OF_MEMORY;
+ return VK_ERROR_OUT_OF_HOST_MEMORY;
pNewDbgFuncNode->pfnMsgCallback = pfnMsgCallback;
pNewDbgFuncNode->pUserData = pUserData;
pNewDbgFuncNode->pNext = g_pDbgFunctionHead;
#if !defined(WIN32)
VK_LAYER_EXPORT VkResult VKAPI vkWsiX11CreatePresentableImage(VkDevice device, const VK_WSI_X11_PRESENTABLE_IMAGE_CREATE_INFO* pCreateInfo,
- VkImage* pImage, VkGpuMemory* pMem)
+ VkImage* pImage, VkDeviceMemory* pMem)
{
VkResult result = nextTable.WsiX11CreatePresentableImage(device, pCreateInfo, pImage, pMem);
loader_platform_thread_lock_mutex(&globalLock);
if (VK_SUCCESS == result) {
// Add image object, then insert the new Mem Object and then bind it to created image
- addObjectInfo(*pImage, VkStructureType_MAX_ENUM, pCreateInfo, sizeof(VK_WSI_X11_PRESENTABLE_IMAGE_CREATE_INFO), "wsi_x11_image");
+ addObjectInfo(*pImage, VK_STRUCTURE_TYPE_MAX_ENUM, pCreateInfo, sizeof(VK_WSI_X11_PRESENTABLE_IMAGE_CREATE_INFO), "wsi_x11_image");
addMemObjInfo(*pMem, NULL);
if (VK_FALSE == updateObjectBinding(*pImage, *pMem)) {
char str[1024];
}
#endif // WIN32
-VK_LAYER_EXPORT void* VKAPI vkGetProcAddr(VkPhysicalGpu gpu, const char* funcName)
+VK_LAYER_EXPORT void* VKAPI vkGetProcAddr(VkPhysicalDevice gpu, const char* funcName)
{
VkBaseLayerObject* gpuw = (VkBaseLayerObject *) gpu;
else {
if (gpuw->pGPA == NULL)
return NULL;
- return gpuw->pGPA((VkPhysicalGpu)gpuw->nextObject, funcName);
+ return gpuw->pGPA((VkPhysicalDevice)gpuw->nextObject, funcName);
}
}
// Data struct for tracking memory object
struct MT_MEM_OBJ_INFO {
uint32_t refCount; // Count of references (obj bindings or CB use)
- VkGpuMemory mem;
+ VkDeviceMemory mem;
VkMemoryAllocInfo allocInfo;
list<VkObject> pObjBindings; // list container of objects bound to this memory
list<VkCmdBuffer> pCmdBufferBindings; // list container of cmd buffers that reference this mem object
VkCmdBuffer cmdBuffer;
uint64_t fenceId;
// Order dependent, stl containers must be at end of struct
- list<VkGpuMemory> pMemObjList; // List container of Mem objs referenced by this CB
+ list<VkDeviceMemory> pMemObjList; // List container of Mem objs referenced by this CB
};
// Associate fenceId with a fence object
uint64_t lastRetiredId;
uint64_t lastSubmittedId;
list<VkCmdBuffer> pQueueCmdBuffers;
- list<VkGpuMemory> pMemRefList;
+ list<VkDeviceMemory> pMemRefList;
};
#ifdef __cplusplus
#endif
-VK_LAYER_EXPORT VkResult VKAPI multi1CreateDevice(VkPhysicalGpu gpu, const VkDeviceCreateInfo* pCreateInfo,
+VK_LAYER_EXPORT VkResult VKAPI multi1CreateDevice(VkPhysicalDevice gpu, const VkDeviceCreateInfo* pCreateInfo,
VkDevice* pDevice)
{
VkLayerDispatchTable* pTable = tableMap1[gpu];
return result;
}
-VK_LAYER_EXPORT VkResult VKAPI multi1EnumerateLayers(VkPhysicalGpu gpu, size_t maxLayerCount, size_t maxStringSize,
+VK_LAYER_EXPORT VkResult VKAPI multi1EnumerateLayers(VkPhysicalDevice gpu, size_t maxLayerCount, size_t maxStringSize,
size_t* pOutLayerCount, char* const* pOutLayers,
void* pReserved)
{
return result;
}
-VK_LAYER_EXPORT void * VKAPI multi1GetProcAddr(VkPhysicalGpu gpu, const char* pName)
+VK_LAYER_EXPORT void * VKAPI multi1GetProcAddr(VkPhysicalDevice gpu, const char* pName)
{
VkBaseLayerObject* gpuw = (VkBaseLayerObject *) gpu;
else {
if (gpuw->pGPA == NULL)
return NULL;
- return gpuw->pGPA((VkPhysicalGpu) gpuw->nextObject, pName);
+ return gpuw->pGPA((VkPhysicalDevice) gpuw->nextObject, pName);
}
}
}
}
-VK_LAYER_EXPORT VkResult VKAPI multi2CreateDevice(VkPhysicalGpu gpu, const VkDeviceCreateInfo* pCreateInfo,
+VK_LAYER_EXPORT VkResult VKAPI multi2CreateDevice(VkPhysicalDevice gpu, const VkDeviceCreateInfo* pCreateInfo,
VkDevice* pDevice)
{
VkLayerDispatchTable* pTable = tableMap2[gpu];
}
-VK_LAYER_EXPORT VkResult VKAPI multi2EnumerateLayers(VkPhysicalGpu gpu, size_t maxLayerCount, size_t maxStringSize,
+VK_LAYER_EXPORT VkResult VKAPI multi2EnumerateLayers(VkPhysicalDevice gpu, size_t maxLayerCount, size_t maxStringSize,
size_t* pOutLayerCount, char* const* pOutLayers,
void* pReserved)
{
return result;
}
-VK_LAYER_EXPORT void * VKAPI multi2GetProcAddr(VkPhysicalGpu gpu, const char* pName)
+VK_LAYER_EXPORT void * VKAPI multi2GetProcAddr(VkPhysicalDevice gpu, const char* pName)
{
VkBaseLayerObject* gpuw = (VkBaseLayerObject *) gpu;
else {
if (gpuw->pGPA == NULL)
return NULL;
- return gpuw->pGPA((VkPhysicalGpu) gpuw->nextObject, pName);
+ return gpuw->pGPA((VkPhysicalDevice) gpuw->nextObject, pName);
}
}
/********************************* Common functions ********************************/
-VK_LAYER_EXPORT VkResult VKAPI vkEnumerateLayers(VkPhysicalGpu gpu, size_t maxLayerCount, size_t maxStringSize,
+VK_LAYER_EXPORT VkResult VKAPI vkEnumerateLayers(VkPhysicalDevice gpu, size_t maxLayerCount, size_t maxStringSize,
size_t* pOutLayerCount, char* const* pOutLayers,
void* pReserved)
{
return VK_SUCCESS;
}
-VK_LAYER_EXPORT void * VKAPI vkGetProcAddr(VkPhysicalGpu gpu, const char* pName)
+VK_LAYER_EXPORT void * VKAPI vkGetProcAddr(VkPhysicalDevice gpu, const char* pName)
{
// to find each layers GPA routine Loader will search via "<layerName>GetProcAddr"
if (!strncmp("multi1GetProcAddr", pName, sizeof("multi1GetProcAddr")))
if (layerNum == 1 && layer2_first_activated == false)
layer1_first_activated = true;
- layer_initialize_dispatch_table(pTable, gpuw->pGPA, (VkPhysicalGpu) gpuw->nextObject);
+ layer_initialize_dispatch_table(pTable, gpuw->pGPA, (VkPhysicalDevice) gpuw->nextObject);
}
VkObjectTypeQueryPool,
VkObjectTypeEvent,
VkObjectTypeQueue,
- VkObjectTypePhysicalGpu,
+ VkObjectTypePhysicalDevice,
VkObjectTypeRenderPass,
VkObjectTypeFramebuffer,
VkObjectTypeImage,
VkObjectTypeInstance,
VkObjectTypeDynamicVpState,
VkObjectTypeColorAttachmentView,
- VkObjectTypeGpuMemory,
+ VkObjectTypeDeviceMemory,
VkObjectTypeDynamicRsState,
VkObjectTypeFence,
VkObjectTypeCmdBuffer,
return "DESCRIPTOR_SET_LAYOUT";
case VkObjectTypeDescriptorSetLayoutChain:
return "DESCRIPTOR_SET_LAYOUT_CHAIN";
- case VkObjectTypeGpuMemory:
- return "GPU_MEMORY";
+ case VkObjectTypeDeviceMemory:
+ return "DEVICE_MEMORY";
case VkObjectTypeQueue:
return "QUEUE";
case VkObjectTypeImage:
return "BUFFER_VIEW";
case VkObjectTypeDescriptorSet:
return "DESCRIPTOR_SET";
- case VkObjectTypePhysicalGpu:
- return "PHYSICAL_GPU";
+ case VkObjectTypePhysicalDevice:
+ return "PHYSICAL_DEVICE";
case VkObjectTypeImageView:
return "IMAGE_VIEW";
case VkObjectTypeBuffer:
fpNextGPA = pCurObj->pGPA;
assert(fpNextGPA);
- layer_initialize_dispatch_table(&nextTable, fpNextGPA, (VkPhysicalGpu) pCurObj->nextObject);
+ layer_initialize_dispatch_table(&nextTable, fpNextGPA, (VkPhysicalDevice) pCurObj->nextObject);
}
void PreCreateInstance(const VkApplicationInfo* pAppInfo, const VkAllocCallbacks* pAllocCb)
return result;
}
-VK_LAYER_EXPORT VkResult VKAPI vkGetGpuInfo(VkPhysicalGpu gpu, VkPhysicalGpuInfoType infoType, size_t* pDataSize, void* pData)
+VK_LAYER_EXPORT VkResult VKAPI vkGetPhysicalDeviceInfo(VkPhysicalDevice gpu, VkPhysicalDeviceInfoType infoType, size_t* pDataSize, void* pData)
{
pCurObj = (VkBaseLayerObject *) gpu;
loader_platform_thread_once(&tabOnce, initParamChecker);
char str[1024];
- if (!validate_VkPhysicalGpuInfoType(infoType)) {
- sprintf(str, "Parameter infoType to function GetGpuInfo has invalid value of %i.", (int)infoType);
+ if (!validate_VkPhysicalDeviceInfoType(infoType)) {
+ sprintf(str, "Parameter infoType to function GetPhysicalDeviceInfo has invalid value of %i.", (int)infoType);
layerCbMsg(VK_DBG_MSG_ERROR, VK_VALIDATION_LEVEL_0, NULL, 0, 1, "PARAMCHECK", str);
}
- VkResult result = nextTable.GetGpuInfo(gpu, infoType, pDataSize, pData);
+ VkResult result = nextTable.GetPhysicalDeviceInfo(gpu, infoType, pDataSize, pData);
return result;
}
-void PreCreateDevice(VkPhysicalGpu gpu, const VkDeviceCreateInfo* pCreateInfo)
+void PreCreateDevice(VkPhysicalDevice gpu, const VkDeviceCreateInfo* pCreateInfo)
{
if(gpu == nullptr)
{
- char const str[] = "vkCreateDevice parameter, VkPhysicalGpu gpu, is nullptr "\
+ char const str[] = "vkCreateDevice parameter, VkPhysicalDevice gpu, is nullptr "\
"(precondition).";
layerCbMsg(VK_DBG_MSG_UNKNOWN, VK_VALIDATION_LEVEL_0, NULL, 0, 1, "PARAMCHECK", str);
return;
}
}
-VK_LAYER_EXPORT VkResult VKAPI vkCreateDevice(VkPhysicalGpu gpu, const VkDeviceCreateInfo* pCreateInfo, VkDevice* pDevice)
+VK_LAYER_EXPORT VkResult VKAPI vkCreateDevice(VkPhysicalDevice gpu, const VkDeviceCreateInfo* pCreateInfo, VkDevice* pDevice)
{
pCurObj = (VkBaseLayerObject *) gpu;
loader_platform_thread_once(&tabOnce, initParamChecker);
return VK_SUCCESS;
}
-VK_LAYER_EXPORT VkResult VKAPI vkEnumerateLayers(VkPhysicalGpu gpu, size_t maxLayerCount, size_t maxStringSize, size_t* pOutLayerCount, char* const* pOutLayers, void* pReserved)
+VK_LAYER_EXPORT VkResult VKAPI vkEnumerateLayers(VkPhysicalDevice gpu, size_t maxLayerCount, size_t maxStringSize, size_t* pOutLayerCount, char* const* pOutLayers, void* pReserved)
{
char str[1024];
if (gpu != NULL) {
return result;
}
-VK_LAYER_EXPORT VkResult VKAPI vkQueueAddMemReferences(VkQueue queue, uint32_t count, const VkGpuMemory* pMems)
+VK_LAYER_EXPORT VkResult VKAPI vkQueueAddMemReferences(VkQueue queue, uint32_t count, const VkDeviceMemory* pMems)
{
VkResult result = nextTable.QueueAddMemReferences(queue, count, pMems);
return result;
}
-VK_LAYER_EXPORT VkResult VKAPI vkQueueRemoveMemReferences(VkQueue queue, uint32_t count, const VkGpuMemory* pMems)
+VK_LAYER_EXPORT VkResult VKAPI vkQueueRemoveMemReferences(VkQueue queue, uint32_t count, const VkDeviceMemory* pMems)
{
VkResult result = nextTable.QueueRemoveMemReferences(queue, count, pMems);
return result;
return result;
}
-VK_LAYER_EXPORT VkResult VKAPI vkAllocMemory(VkDevice device, const VkMemoryAllocInfo* pAllocInfo, VkGpuMemory* pMem)
+VK_LAYER_EXPORT VkResult VKAPI vkAllocMemory(VkDevice device, const VkMemoryAllocInfo* pAllocInfo, VkDeviceMemory* pMem)
{
char str[1024];
if (!pAllocInfo) {
return result;
}
-VK_LAYER_EXPORT VkResult VKAPI vkFreeMemory(VkGpuMemory mem)
+VK_LAYER_EXPORT VkResult VKAPI vkFreeMemory(VkDeviceMemory mem)
{
VkResult result = nextTable.FreeMemory(mem);
return result;
}
-VK_LAYER_EXPORT VkResult VKAPI vkSetMemoryPriority(VkGpuMemory mem, VkMemoryPriority priority)
+VK_LAYER_EXPORT VkResult VKAPI vkSetMemoryPriority(VkDeviceMemory mem, VkMemoryPriority priority)
{
char str[1024];
if (!validate_VkMemoryPriority(priority)) {
return result;
}
-VK_LAYER_EXPORT VkResult VKAPI vkMapMemory(VkGpuMemory mem, VkFlags flags, void** ppData)
+VK_LAYER_EXPORT VkResult VKAPI vkMapMemory(VkDeviceMemory mem, VkFlags flags, void** ppData)
{
VkResult result = nextTable.MapMemory(mem, flags, ppData);
return result;
}
-VK_LAYER_EXPORT VkResult VKAPI vkUnmapMemory(VkGpuMemory mem)
+VK_LAYER_EXPORT VkResult VKAPI vkUnmapMemory(VkDeviceMemory mem)
{
VkResult result = nextTable.UnmapMemory(mem);
return result;
}
-VK_LAYER_EXPORT VkResult VKAPI vkPinSystemMemory(VkDevice device, const void* pSysMem, size_t memSize, VkGpuMemory* pMem)
+VK_LAYER_EXPORT VkResult VKAPI vkPinSystemMemory(VkDevice device, const void* pSysMem, size_t memSize, VkDeviceMemory* pMem)
{
VkResult result = nextTable.PinSystemMemory(device, pSysMem, memSize, pMem);
return result;
}
-VK_LAYER_EXPORT VkResult VKAPI vkGetMultiGpuCompatibility(VkPhysicalGpu gpu0, VkPhysicalGpu gpu1, VkGpuCompatibilityInfo* pInfo)
+VK_LAYER_EXPORT VkResult VKAPI vkGetMultiDeviceCompatibility(VkPhysicalDevice gpu0, VkPhysicalDevice gpu1, VkPhysicalDeviceCompatibilityInfo* pInfo)
{
pCurObj = (VkBaseLayerObject *) gpu0;
loader_platform_thread_once(&tabOnce, initParamChecker);
- VkResult result = nextTable.GetMultiGpuCompatibility(gpu0, gpu1, pInfo);
+ VkResult result = nextTable.GetMultiDeviceCompatibility(gpu0, gpu1, pInfo);
return result;
}
-VK_LAYER_EXPORT VkResult VKAPI vkOpenSharedMemory(VkDevice device, const VkMemoryOpenInfo* pOpenInfo, VkGpuMemory* pMem)
+VK_LAYER_EXPORT VkResult VKAPI vkOpenSharedMemory(VkDevice device, const VkMemoryOpenInfo* pOpenInfo, VkDeviceMemory* pMem)
{
char str[1024];
if (!pOpenInfo) {
return result;
}
-VK_LAYER_EXPORT VkResult VKAPI vkOpenPeerMemory(VkDevice device, const VkPeerMemoryOpenInfo* pOpenInfo, VkGpuMemory* pMem)
+VK_LAYER_EXPORT VkResult VKAPI vkOpenPeerMemory(VkDevice device, const VkPeerMemoryOpenInfo* pOpenInfo, VkDeviceMemory* pMem)
{
char str[1024];
if (!pOpenInfo) {
return result;
}
-VK_LAYER_EXPORT VkResult VKAPI vkOpenPeerImage(VkDevice device, const VkPeerImageOpenInfo* pOpenInfo, VkImage* pImage, VkGpuMemory* pMem)
+VK_LAYER_EXPORT VkResult VKAPI vkOpenPeerImage(VkDevice device, const VkPeerImageOpenInfo* pOpenInfo, VkImage* pImage, VkDeviceMemory* pMem)
{
char str[1024];
if (!pOpenInfo) {
return result;
}
-VK_LAYER_EXPORT VkResult VKAPI vkQueueBindObjectMemory(VkQueue queue, VkObject object, uint32_t allocationIdx, VkGpuMemory mem, VkGpuSize offset)
+VK_LAYER_EXPORT VkResult VKAPI vkQueueBindObjectMemory(VkQueue queue, VkObject object, uint32_t allocationIdx, VkDeviceMemory mem, VkDeviceSize offset)
{
VkResult result = nextTable.QueueBindObjectMemory(queue, object, allocationIdx, mem, offset);
return result;
}
-VK_LAYER_EXPORT VkResult VKAPI vkQueueBindObjectMemoryRange(VkQueue queue, VkObject object, uint32_t allocationIdx, VkGpuSize rangeOffset, VkGpuSize rangeSize, VkGpuMemory mem, VkGpuSize memOffset)
+VK_LAYER_EXPORT VkResult VKAPI vkQueueBindObjectMemoryRange(VkQueue queue, VkObject object, uint32_t allocationIdx, VkDeviceSize rangeOffset, VkDeviceSize rangeSize, VkDeviceMemory mem, VkDeviceSize memOffset)
{
VkResult result = nextTable.QueueBindObjectMemoryRange(queue, object, allocationIdx, rangeOffset, rangeSize, mem, memOffset);
return result;
}
-VK_LAYER_EXPORT VkResult VKAPI vkQueueBindImageMemoryRange(VkQueue queue, VkImage image, uint32_t allocationIdx, const VkImageMemoryBindInfo* pBindInfo, VkGpuMemory mem, VkGpuSize memOffset)
+VK_LAYER_EXPORT VkResult VKAPI vkQueueBindImageMemoryRange(VkQueue queue, VkImage image, uint32_t allocationIdx, const VkImageMemoryBindInfo* pBindInfo, VkDeviceMemory mem, VkDeviceSize memOffset)
{
char str[1024];
if (!pBindInfo) {
return result;
}
-VK_LAYER_EXPORT VkResult VKAPI vkGetQueryPoolResults(VkQueryPool queryPool, uint32_t startQuery, uint32_t queryCount, size_t* pDataSize, void* pData)
+VK_LAYER_EXPORT VkResult VKAPI vkGetQueryPoolResults(VkQueryPool queryPool, uint32_t startQuery, uint32_t queryCount, size_t* pDataSize, void* pData, VkQueryResultFlags flags)
{
- VkResult result = nextTable.GetQueryPoolResults(queryPool, startQuery, queryCount, pDataSize, pData);
+ VkResult result = nextTable.GetQueryPoolResults(queryPool, startQuery, queryCount, pDataSize, pData, flags);
return result;
}
VkFormatProperties properties;
size_t size = sizeof(properties);
VkResult result = nextTable.GetFormatInfo(device, pCreateInfo->format,
- VK_INFO_TYPE_FORMAT_PROPERTIES, &size, &properties);
+ VK_FORMAT_INFO_TYPE_PROPERTIES, &size, &properties);
if(result != VK_SUCCESS)
{
char const str[] = "vkCreateImage parameter, VkFormat pCreateInfo->format, cannot be "\
uint32_t startBinding,
uint32_t bindingCount,
const VkBuffer* pBuffers,
- const VkGpuSize* pOffsets)
+ const VkDeviceSize* pOffsets)
{
nextTable.CmdBindVertexBuffers(cmdBuffer, startBinding, bindingCount, pBuffers, pOffsets);
}
-VK_LAYER_EXPORT void VKAPI vkCmdBindIndexBuffer(VkCmdBuffer cmdBuffer, VkBuffer buffer, VkGpuSize offset, VkIndexType indexType)
+VK_LAYER_EXPORT void VKAPI vkCmdBindIndexBuffer(VkCmdBuffer cmdBuffer, VkBuffer buffer, VkDeviceSize offset, VkIndexType indexType)
{
char str[1024];
if (!validate_VkIndexType(indexType)) {
nextTable.CmdDrawIndexed(cmdBuffer, firstIndex, indexCount, vertexOffset, firstInstance, instanceCount);
}
-VK_LAYER_EXPORT void VKAPI vkCmdDrawIndirect(VkCmdBuffer cmdBuffer, VkBuffer buffer, VkGpuSize offset, uint32_t count, uint32_t stride)
+VK_LAYER_EXPORT void VKAPI vkCmdDrawIndirect(VkCmdBuffer cmdBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t count, uint32_t stride)
{
nextTable.CmdDrawIndirect(cmdBuffer, buffer, offset, count, stride);
}
-VK_LAYER_EXPORT void VKAPI vkCmdDrawIndexedIndirect(VkCmdBuffer cmdBuffer, VkBuffer buffer, VkGpuSize offset, uint32_t count, uint32_t stride)
+VK_LAYER_EXPORT void VKAPI vkCmdDrawIndexedIndirect(VkCmdBuffer cmdBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t count, uint32_t stride)
{
nextTable.CmdDrawIndexedIndirect(cmdBuffer, buffer, offset, count, stride);
nextTable.CmdDispatch(cmdBuffer, x, y, z);
}
-VK_LAYER_EXPORT void VKAPI vkCmdDispatchIndirect(VkCmdBuffer cmdBuffer, VkBuffer buffer, VkGpuSize offset)
+VK_LAYER_EXPORT void VKAPI vkCmdDispatchIndirect(VkCmdBuffer cmdBuffer, VkBuffer buffer, VkDeviceSize offset)
{
nextTable.CmdDispatchIndirect(cmdBuffer, buffer, offset);
nextTable.CmdCloneImageData(cmdBuffer, srcImage, srcImageLayout, destImage, destImageLayout);
}
-VK_LAYER_EXPORT void VKAPI vkCmdUpdateBuffer(VkCmdBuffer cmdBuffer, VkBuffer destBuffer, VkGpuSize destOffset, VkGpuSize dataSize, const uint32_t* pData)
+VK_LAYER_EXPORT void VKAPI vkCmdUpdateBuffer(VkCmdBuffer cmdBuffer, VkBuffer destBuffer, VkDeviceSize destOffset, VkDeviceSize dataSize, const uint32_t* pData)
{
nextTable.CmdUpdateBuffer(cmdBuffer, destBuffer, destOffset, dataSize, pData);
}
-VK_LAYER_EXPORT void VKAPI vkCmdFillBuffer(VkCmdBuffer cmdBuffer, VkBuffer destBuffer, VkGpuSize destOffset, VkGpuSize fillSize, uint32_t data)
+VK_LAYER_EXPORT void VKAPI vkCmdFillBuffer(VkCmdBuffer cmdBuffer, VkBuffer destBuffer, VkDeviceSize destOffset, VkDeviceSize fillSize, uint32_t data)
{
nextTable.CmdFillBuffer(cmdBuffer, destBuffer, destOffset, fillSize, data);
nextTable.CmdResetEvent(cmdBuffer, event, pipeEvent);
}
-VK_LAYER_EXPORT void VKAPI vkCmdWaitEvents(VkCmdBuffer cmdBuffer, const VkEventWaitInfo* pWaitInfo)
+VK_LAYER_EXPORT void VKAPI vkCmdWaitEvents(VkCmdBuffer cmdBuffer, VkWaitEvent waitEvent, uint32_t eventCount, const VkEvent* pEvents, uint32_t memBarrierCount, const void** ppMemBarriers)
{
- char str[1024];
- if (!pWaitInfo) {
- sprintf(str, "Struct ptr parameter pWaitInfo to function CmdWaitEvents is NULL.");
- layerCbMsg(VK_DBG_MSG_UNKNOWN, VK_VALIDATION_LEVEL_0, NULL, 0, 1, "PARAMCHECK", str);
- }
- else if (!vk_validate_vkeventwaitinfo(pWaitInfo)) {
- sprintf(str, "Parameter pWaitInfo to function CmdWaitEvents contains an invalid value.");
- layerCbMsg(VK_DBG_MSG_ERROR, VK_VALIDATION_LEVEL_0, NULL, 0, 1, "PARAMCHECK", str);
- }
- nextTable.CmdWaitEvents(cmdBuffer, pWaitInfo);
+ nextTable.CmdWaitEvents(cmdBuffer, waitEvent, eventCount, pEvents, memBarrierCount, ppMemBarriers);
}
-VK_LAYER_EXPORT void VKAPI vkCmdPipelineBarrier(VkCmdBuffer cmdBuffer, const VkPipelineBarrier* pBarrier)
+VK_LAYER_EXPORT void VKAPI vkCmdPipelineBarrier(VkCmdBuffer cmdBuffer, VkWaitEvent waitEvent, uint32_t pipeEventCount, const VkPipeEvent* pPipeEvents, uint32_t memBarrierCount, const void** ppMemBarriers)
{
- char str[1024];
- if (!pBarrier) {
- sprintf(str, "Struct ptr parameter pBarrier to function CmdPipelineBarrier is NULL.");
- layerCbMsg(VK_DBG_MSG_UNKNOWN, VK_VALIDATION_LEVEL_0, NULL, 0, 1, "PARAMCHECK", str);
- }
- else if (!vk_validate_vkpipelinebarrier(pBarrier)) {
- sprintf(str, "Parameter pBarrier to function CmdPipelineBarrier contains an invalid value.");
- layerCbMsg(VK_DBG_MSG_ERROR, VK_VALIDATION_LEVEL_0, NULL, 0, 1, "PARAMCHECK", str);
- }
- nextTable.CmdPipelineBarrier(cmdBuffer, pBarrier);
+ nextTable.CmdPipelineBarrier(cmdBuffer, waitEvent, pipeEventCount, pPipeEvents, memBarrierCount, ppMemBarriers);
}
VK_LAYER_EXPORT void VKAPI vkCmdBeginQuery(VkCmdBuffer cmdBuffer, VkQueryPool queryPool, uint32_t slot, VkFlags flags)
nextTable.CmdResetQueryPool(cmdBuffer, queryPool, startQuery, queryCount);
}
-VK_LAYER_EXPORT void VKAPI vkCmdWriteTimestamp(VkCmdBuffer cmdBuffer, VkTimestampType timestampType, VkBuffer destBuffer, VkGpuSize destOffset)
+VK_LAYER_EXPORT void VKAPI vkCmdWriteTimestamp(VkCmdBuffer cmdBuffer, VkTimestampType timestampType, VkBuffer destBuffer, VkDeviceSize destOffset)
{
char str[1024];
if (!validate_VkTimestampType(timestampType)) {
nextTable.CmdInitAtomicCounters(cmdBuffer, pipelineBindPoint, startCounter, counterCount, pData);
}
-VK_LAYER_EXPORT void VKAPI vkCmdLoadAtomicCounters(VkCmdBuffer cmdBuffer, VkPipelineBindPoint pipelineBindPoint, uint32_t startCounter, uint32_t counterCount, VkBuffer srcBuffer, VkGpuSize srcOffset)
+VK_LAYER_EXPORT void VKAPI vkCmdLoadAtomicCounters(VkCmdBuffer cmdBuffer, VkPipelineBindPoint pipelineBindPoint, uint32_t startCounter, uint32_t counterCount, VkBuffer srcBuffer, VkDeviceSize srcOffset)
{
char str[1024];
if (!validate_VkPipelineBindPoint(pipelineBindPoint)) {
nextTable.CmdLoadAtomicCounters(cmdBuffer, pipelineBindPoint, startCounter, counterCount, srcBuffer, srcOffset);
}
-VK_LAYER_EXPORT void VKAPI vkCmdSaveAtomicCounters(VkCmdBuffer cmdBuffer, VkPipelineBindPoint pipelineBindPoint, uint32_t startCounter, uint32_t counterCount, VkBuffer destBuffer, VkGpuSize destOffset)
+VK_LAYER_EXPORT void VKAPI vkCmdSaveAtomicCounters(VkCmdBuffer cmdBuffer, VkPipelineBindPoint pipelineBindPoint, uint32_t startCounter, uint32_t counterCount, VkBuffer destBuffer, VkDeviceSize destOffset)
{
char str[1024];
if (!validate_VkPipelineBindPoint(pipelineBindPoint)) {
VkFormatProperties properties;
size_t size = sizeof(properties);
VkResult result = nextTable.GetFormatInfo(device, pCreateInfo->pColorFormats[i],
- VK_INFO_TYPE_FORMAT_PROPERTIES, &size, &properties);
+ VK_FORMAT_INFO_TYPE_PROPERTIES, &size, &properties);
if(result != VK_SUCCESS)
{
std::stringstream ss;
VkFormatProperties properties;
size_t size = sizeof(properties);
VkResult result = nextTable.GetFormatInfo(device, pCreateInfo->depthStencilFormat,
- VK_INFO_TYPE_FORMAT_PROPERTIES, &size, &properties);
+ VK_FORMAT_INFO_TYPE_PROPERTIES, &size, &properties);
if(result != VK_SUCCESS)
{
char const str[] = "vkCreateRenderPass parameter, VkFormat pCreateInfo->"\
// This layer intercepts callbacks
VK_LAYER_DBG_FUNCTION_NODE *pNewDbgFuncNode = (VK_LAYER_DBG_FUNCTION_NODE*)malloc(sizeof(VK_LAYER_DBG_FUNCTION_NODE));
if (!pNewDbgFuncNode)
- return VK_ERROR_OUT_OF_MEMORY;
+ return VK_ERROR_OUT_OF_HOST_MEMORY;
pNewDbgFuncNode->pfnMsgCallback = pfnMsgCallback;
pNewDbgFuncNode->pUserData = pUserData;
pNewDbgFuncNode->pNext = g_pDbgFunctionHead;
#if defined(__linux__) || defined(XCB_NVIDIA)
-VK_LAYER_EXPORT VkResult VKAPI vkWsiX11AssociateConnection(VkPhysicalGpu gpu, const VK_WSI_X11_CONNECTION_INFO* pConnectionInfo)
+VK_LAYER_EXPORT VkResult VKAPI vkWsiX11AssociateConnection(VkPhysicalDevice gpu, const VK_WSI_X11_CONNECTION_INFO* pConnectionInfo)
{
pCurObj = (VkBaseLayerObject *) gpu;
loader_platform_thread_once(&tabOnce, initParamChecker);
return result;
}
-VK_LAYER_EXPORT VkResult VKAPI vkWsiX11CreatePresentableImage(VkDevice device, const VK_WSI_X11_PRESENTABLE_IMAGE_CREATE_INFO* pCreateInfo, VkImage* pImage, VkGpuMemory* pMem)
+VK_LAYER_EXPORT VkResult VKAPI vkWsiX11CreatePresentableImage(VkDevice device, const VK_WSI_X11_PRESENTABLE_IMAGE_CREATE_INFO* pCreateInfo, VkImage* pImage, VkDeviceMemory* pMem)
{
VkResult result = nextTable.WsiX11CreatePresentableImage(device, pCreateInfo, pImage, pMem);
#endif
#include "vk_generic_intercept_proc_helper.h"
-VK_LAYER_EXPORT void* VKAPI vkGetProcAddr(VkPhysicalGpu gpu, const char* funcName)
+VK_LAYER_EXPORT void* VKAPI vkGetProcAddr(VkPhysicalDevice gpu, const char* funcName)
{
VkBaseLayerObject* gpuw = (VkBaseLayerObject *) gpu;
void* addr;
else {
if (gpuw->pGPA == NULL)
return NULL;
- return gpuw->pGPA((VkPhysicalGpu)gpuw->nextObject, funcName);
+ return gpuw->pGPA((VkPhysicalDevice)gpuw->nextObject, funcName);
}
}
loader.layer_scanned = true;
}
-static void loader_init_dispatch_table(VkLayerDispatchTable *tab, PFN_vkGetProcAddr fpGPA, VkPhysicalGpu gpu)
+static void loader_init_dispatch_table(VkLayerDispatchTable *tab, PFN_vkGetProcAddr fpGPA, VkPhysicalDevice gpu)
{
loader_initialize_dispatch_table(tab, fpGPA, gpu);
tab->EnumerateLayers = vkEnumerateLayers;
}
-static void *loader_gpa_internal(VkPhysicalGpu gpu, const char * pName)
+static void *loader_gpa_internal(VkPhysicalDevice gpu, const char * pName)
{
if (gpu == NULL) {
return NULL;;
ptr_instance = (struct loader_instance*) malloc(sizeof(struct loader_instance));
if (ptr_instance == NULL) {
- return VK_ERROR_OUT_OF_MEMORY;
+ return VK_ERROR_OUT_OF_HOST_MEMORY;
}
memset(ptr_instance, 0, sizeof(struct loader_instance));
ptr_instance->extension_count = pCreateInfo->extensionCount;
ptr_instance->extension_names = (ptr_instance->extension_count > 0) ?
malloc(sizeof (char *) * ptr_instance->extension_count) : NULL;
if (ptr_instance->extension_names == NULL && (ptr_instance->extension_count > 0))
- return VK_ERROR_OUT_OF_MEMORY;
+ return VK_ERROR_OUT_OF_HOST_MEMORY;
for (i = 0; i < ptr_instance->extension_count; i++) {
if (!loader_is_extension_scanned(pCreateInfo->ppEnabledExtensionNames[i]))
return VK_ERROR_INVALID_EXTENSION;
ptr_instance->extension_names[i] = malloc(strlen(pCreateInfo->ppEnabledExtensionNames[i]) + 1);
if (ptr_instance->extension_names[i] == NULL)
- return VK_ERROR_OUT_OF_MEMORY;
+ return VK_ERROR_OUT_OF_HOST_MEMORY;
strcpy(ptr_instance->extension_names[i], pCreateInfo->ppEnabledExtensionNames[i]);
}
ptr_instance->next = loader.instances;
VkInstance instance,
uint32_t* pPhysicalDeviceCount,
- VkPhysicalGpu* pPhysicalDevices)
+ VkPhysicalDevice* pPhysicalDevices)
{
struct loader_instance *ptr_instance = (struct loader_instance *) instance;
struct loader_icd *icd;
} else
{
- VkPhysicalGpu* gpus;
+ VkPhysicalDevice* gpus;
if (*pPhysicalDeviceCount < ptr_instance->total_gpu_count)
return VK_ERROR_INVALID_VALUE;
- gpus = malloc( sizeof(VkPhysicalGpu) * *pPhysicalDeviceCount);
+ gpus = malloc( sizeof(VkPhysicalDevice) * *pPhysicalDeviceCount);
if (!gpus)
- return VK_ERROR_OUT_OF_MEMORY;
+ return VK_ERROR_OUT_OF_HOST_MEMORY;
while (icd) {
VkBaseLayerObject * wrapped_gpus;
PFN_vkGetProcAddr get_proc_addr = icd->scanned_icds->GetProcAddr;
return (count > 0) ? VK_SUCCESS : res;
}
-LOADER_EXPORT void * VKAPI vkGetProcAddr(VkPhysicalGpu gpu, const char * pName)
+LOADER_EXPORT void * VKAPI vkGetProcAddr(VkPhysicalDevice gpu, const char * pName)
{
if (gpu == NULL) {
return VK_SUCCESS;
}
-LOADER_EXPORT VkResult VKAPI vkEnumerateLayers(VkPhysicalGpu gpu, size_t maxLayerCount, size_t maxStringSize, size_t* pOutLayerCount, char* const* pOutLayers, void* pReserved)
+LOADER_EXPORT VkResult VKAPI vkEnumerateLayers(VkPhysicalDevice gpu, size_t maxLayerCount, size_t maxStringSize, size_t* pOutLayerCount, char* const* pOutLayers, void* pReserved)
{
uint32_t gpu_index;
size_t count = 0;
% self.prefix)
func.append("%s PFN_vkGetProcAddr gpa,"
% (" " * len(self.prefix)))
- func.append("%s VkPhysicalGpu gpu)"
+ func.append("%s VkPhysicalDevice gpu)"
% (" " * len(self.prefix)))
func.append("{")
func.append(" %s" % "\n ".join(stmts))
library_exports = {
"all": [],
"icd": [
- "EnumerateGpus",
+ "EnumeratePhysicalDevices",
"CreateInstance",
"DestroyInstance",
"GetProcAddr",
special_lookups = []
# these functions require special trampoline code beyond just the normal create object trampoline code
- special_names = ["AllocDescriptorSets", "GetMultiGpuCompatibility"]
+ special_names = ["AllocDescriptorSets", "GetMultiDeviceCompatibility"]
for proto in self.protos:
if self._is_loader_special_case(proto) or self._does_function_create_object(proto) or proto.name in special_names:
special_lookups.append("if (!strcmp(name, \"%s\"))" % proto.name)
r_body.append(' // This layer intercepts callbacks')
r_body.append(' VK_LAYER_DBG_FUNCTION_NODE *pNewDbgFuncNode = (VK_LAYER_DBG_FUNCTION_NODE*)malloc(sizeof(VK_LAYER_DBG_FUNCTION_NODE));')
r_body.append(' if (!pNewDbgFuncNode)')
- r_body.append(' return VK_ERROR_OUT_OF_MEMORY;')
+ r_body.append(' return VK_ERROR_OUT_OF_HOST_MEMORY;')
r_body.append(' pNewDbgFuncNode->pfnMsgCallback = pfnMsgCallback;')
r_body.append(' pNewDbgFuncNode->pUserData = pUserData;')
r_body.append(' pNewDbgFuncNode->pNext = g_pDbgFunctionHead;')
ggei_body.append('}')
return "\n".join(ggei_body)
+ def _gen_layer_get_extension_support(self, layer="Generic"):
+ ges_body = []
+ ges_body.append('VK_LAYER_EXPORT VkResult VKAPI xglGetExtensionSupport(VkPhysicalDevice gpu, const char* pExtName)')
+ ges_body.append('{')
+ ges_body.append(' VkResult result;')
+ ges_body.append(' VkBaseLayerObject* gpuw = (VkBaseLayerObject *) gpu;')
+ ges_body.append('')
+ ges_body.append(' /* This entrypoint is NOT going to init its own dispatch table since loader calls here early */')
+ ges_body.append(' if (!strncmp(pExtName, "%s", strlen("%s")))' % (layer, layer))
+ ges_body.append(' {')
+ ges_body.append(' result = VK_SUCCESS;')
+ ges_body.append(' } else if (nextTable.GetExtensionSupport != NULL)')
+ ges_body.append(' {')
+ ges_body.append(' result = nextTable.GetExtensionSupport((VkPhysicalDevice)gpuw->nextObject, pExtName);')
+ ges_body.append(' } else')
+ ges_body.append(' {')
+ ges_body.append(' result = VK_ERROR_INVALID_EXTENSION;')
+ ges_body.append(' }')
+ ges_body.append(' return result;')
+ ges_body.append('}')
+ return "\n".join(ges_body)
+
def _generate_dispatch_entrypoints(self, qual=""):
if qual:
qual += " "
def _generate_layer_gpa_function(self, extensions=[]):
func_body = []
- func_body.append("VK_LAYER_EXPORT void* VKAPI vkGetProcAddr(VkPhysicalGpu gpu, const char* funcName)\n"
+ func_body.append("VK_LAYER_EXPORT void* VKAPI vkGetProcAddr(VkPhysicalDevice gpu, const char* funcName)\n"
"{\n"
" VkBaseLayerObject* gpuw = (VkBaseLayerObject *) gpu;\n"
" void* addr;\n"
func_body.append(" else {\n"
" if (gpuw->pGPA == NULL)\n"
" return NULL;\n"
- " return gpuw->pGPA((VkPhysicalGpu)gpuw->nextObject, funcName);\n"
+ " return gpuw->pGPA((VkPhysicalDevice)gpuw->nextObject, funcName);\n"
" }\n"
"}\n")
return "\n".join(func_body)
' fpNextGPA = pCurObj->pGPA;\n'
' assert(fpNextGPA);\n')
- func_body.append(" layer_initialize_dispatch_table(&nextTable, fpNextGPA, (VkPhysicalGpu) pCurObj->nextObject);")
+ func_body.append(" layer_initialize_dispatch_table(&nextTable, fpNextGPA, (VkPhysicalDevice) pCurObj->nextObject);")
if lockname is not None:
func_body.append(" if (!%sLockInitialized)" % lockname)
func_body.append(" {")
' fpNextGPA = pCurObj->pGPA;\n'
' assert(fpNextGPA);\n' % self.layer_name)
- func_body.append(" layer_initialize_dispatch_table(&nextTable, fpNextGPA, (VkPhysicalGpu) pCurObj->nextObject);\n")
+ func_body.append(" layer_initialize_dispatch_table(&nextTable, fpNextGPA, (VkPhysicalDevice) pCurObj->nextObject);\n")
func_body.append(" if (!printLockInitialized)")
func_body.append(" {")
func_body.append(" // TODO/TBD: Need to delete this mutex sometime. How???")
func_body.append(' PFN_vkGetProcAddr fpNextGPA;')
func_body.append(' fpNextGPA = pCurObj->pGPA;')
func_body.append(' assert(fpNextGPA);')
- func_body.append(' layer_initialize_dispatch_table(&nextTable, fpNextGPA, (VkPhysicalGpu) pCurObj->nextObject);')
+ func_body.append(' layer_initialize_dispatch_table(&nextTable, fpNextGPA, (VkPhysicalDevice) pCurObj->nextObject);')
func_body.append('')
func_body.append(' if (!printLockInitialized)')
func_body.append(' {')
if 'WsiX11AssociateConnection' == proto.name:
funcs.append("#if defined(__linux__) || defined(XCB_NVIDIA)")
if proto.name == "EnumerateLayers":
- c_call = proto.c_call().replace("(" + proto.params[0].name, "((VkPhysicalGpu)gpuw->nextObject", 1)
+ c_call = proto.c_call().replace("(" + proto.params[0].name, "((VkPhysicalDevice)gpuw->nextObject", 1)
funcs.append('%s%s\n'
'{\n'
' using namespace StreamControl;\n'
' return VK_SUCCESS;\n'
' }\n'
'}' % (qual, decl, self.layer_name, ret_val, proto.c_call(),f_open, log_func, f_close, stmt, self.layer_name))
+ elif 'GetExtensionSupport' == proto.name:
+ funcs.append('%s%s\n'
+ '{\n'
+ ' VkResult result;\n'
+ ' /* This entrypoint is NOT going to init its own dispatch table since loader calls here early */\n'
+ ' if (!strncmp(pExtName, "%s", strlen("%s")))\n'
+ ' {\n'
+ ' result = VK_SUCCESS;\n'
+ ' } else if (nextTable.GetExtensionSupport != NULL)\n'
+ ' {\n'
+ ' result = nextTable.%s;\n'
+ ' %s %s %s\n'
+ ' } else\n'
+ ' {\n'
+ ' result = VK_ERROR_INVALID_EXTENSION;\n'
+ ' }\n'
+ '%s'
+ '}' % (qual, decl, self.layer_name, self.layer_name, proto.c_call(), f_open, log_func, f_close, stmt))
+# elif 'vkphysicalgpu' == proto.params[0].ty.lower():
+# c_call = proto.c_call().replace("(" + proto.params[0].name, "((VkPhysicalDevice)gpuw->nextObject", 1)
+# funcs.append('%s%s\n'
+# '{\n'
+# ' using namespace StreamControl;\n'
+# ' VkBaseLayerObject* gpuw = (VkBaseLayerObject *) %s;\n'
+# ' pCurObj = gpuw;\n'
+# ' loader_platform_thread_once(&tabOnce, init%s);\n'
+# ' %snextTable.%s;\n'
+# ' %s%s%s\n'
+# '%s'
+# '}' % (qual, decl, proto.params[0].name, self.layer_name, ret_val, c_call, f_open, log_func, f_close, stmt))
else:
funcs.append('%s%s\n'
'{\n'
header_txt.append('')
header_txt.append('// For each Queue\'s doubly linked-list of mem refs')
header_txt.append('typedef struct _OT_MEM_INFO {')
- header_txt.append(' VkGpuMemory mem;')
+ header_txt.append(' VkDeviceMemory mem;')
header_txt.append(' struct _OT_MEM_INFO *pNextMI;')
header_txt.append(' struct _OT_MEM_INFO *pPrevMI;')
header_txt.append('')
header_txt.append(' }')
header_txt.append(' else {')
header_txt.append(' char str[1024];')
- header_txt.append(' sprintf(str, "ERROR: VK_ERROR_OUT_OF_MEMORY -- could not allocate memory for Queue Information");')
+ header_txt.append(' sprintf(str, "ERROR: VK_ERROR_OUT_OF_HOST_MEMORY -- could not allocate memory for Queue Information");')
header_txt.append(' layerCbMsg(VK_DBG_MSG_ERROR, VK_VALIDATION_LEVEL_0, queue, 0, OBJTRACK_INTERNAL_ERROR, "OBJTRACK", str);')
header_txt.append(' }')
header_txt.append('}')
header_txt.append('')
header_txt.append(' while (pTrav) {')
header_txt.append(' if (pTrav->obj.pObj == pObj) {')
- header_txt.append(' if (stateBindPoint == VK_STATE_BIND_VIEWPORT) {')
+ header_txt.append(' if (stateBindPoint == VK_STATE_BIND_POINT_VIEWPORT) {')
header_txt.append(' pTrav->obj.status |= OBJSTATUS_VIEWPORT_BOUND;')
- header_txt.append(' } else if (stateBindPoint == VK_STATE_BIND_RASTER) {')
+ header_txt.append(' } else if (stateBindPoint == VK_STATE_BIND_POINT_RASTER) {')
header_txt.append(' pTrav->obj.status |= OBJSTATUS_RASTER_BOUND;')
- header_txt.append(' } else if (stateBindPoint == VK_STATE_BIND_COLOR_BLEND) {')
+ header_txt.append(' } else if (stateBindPoint == VK_STATE_BIND_POINT_COLOR_BLEND) {')
header_txt.append(' pTrav->obj.status |= OBJSTATUS_COLOR_BLEND_BOUND;')
- header_txt.append(' } else if (stateBindPoint == VK_STATE_BIND_DEPTH_STENCIL) {')
+ header_txt.append(' } else if (stateBindPoint == VK_STATE_BIND_POINT_DEPTH_STENCIL) {')
header_txt.append(' pTrav->obj.status |= OBJSTATUS_DEPTH_STENCIL_BOUND;')
header_txt.append(' }')
header_txt.append(' return;')
header_txt.append('}')
header_txt.append('')
header_txt.append('static void setGpuQueueInfoState(void *pData) {')
- header_txt.append(' maxMemReferences = ((VkPhysicalGpuQueueProperties *)pData)->maxMemReferences;')
+ header_txt.append(' maxMemReferences = ((VkPhysicalDeviceQueueProperties *)pData)->maxMemReferences;')
header_txt.append('}')
return "\n".join(header_txt)
elif 'CmdDraw' in proto.name:
using_line += ' validate_draw_state_flags((void *)cmdBuffer);\n'
elif 'MapMemory' in proto.name:
- using_line += ' set_status((void*)mem, VkObjectTypeGpuMemory, OBJSTATUS_GPU_MEM_MAPPED);\n'
+ using_line += ' set_status((void*)mem, VkObjectTypeDeviceMemory, OBJSTATUS_GPU_MEM_MAPPED);\n'
elif 'UnmapMemory' in proto.name:
- using_line += ' reset_status((void*)mem, VkObjectTypeGpuMemory, OBJSTATUS_GPU_MEM_MAPPED);\n'
+ using_line += ' reset_status((void*)mem, VkObjectTypeDeviceMemory, OBJSTATUS_GPU_MEM_MAPPED);\n'
if 'AllocDescriptor' in proto.name: # Allocates array of DSs
create_line = ' for (uint32_t i = 0; i < *pCount; i++) {\n'
create_line += ' loader_platform_thread_lock_mutex(&objLock);\n'
' return VK_SUCCESS;\n'
' }\n'
'}' % (qual, decl, using_line, self.layer_name, ret_val, proto.c_call(), create_line, destroy_line, stmt, self.layer_name))
- elif 'GetGpuInfo' in proto.name:
- gpu_state = ' if (infoType == VK_INFO_TYPE_PHYSICAL_GPU_QUEUE_PROPERTIES) {\n'
+ elif 'GetExtensionSupport' == proto.name:
+ funcs.append('%s%s\n'
+ '{\n'
+ ' VkResult result;\n'
+ ' /* This entrypoint is NOT going to init its own dispatch table since loader calls this early */\n'
+ ' if (!strncmp(pExtName, "%s", strlen("%s")) ||\n'
+ ' !strncmp(pExtName, "objTrackGetObjectCount", strlen("objTrackGetObjectCount")) ||\n'
+ ' !strncmp(pExtName, "objTrackGetObjects", strlen("objTrackGetObjects")))\n'
+ ' {\n'
+ ' result = VK_SUCCESS;\n'
+ ' } else if (nextTable.GetExtensionSupport != NULL)\n'
+ ' {\n'
+ ' %s'
+ ' result = nextTable.%s;\n'
+ ' } else\n'
+ ' {\n'
+ ' result = VK_ERROR_INVALID_EXTENSION;\n'
+ ' }\n'
+ '%s'
+ '}' % (qual, decl, self.layer_name, self.layer_name, using_line, proto.c_call(), stmt))
+ elif 'GetPhysicalDeviceInfo' in proto.name:
+ gpu_state = ' if (infoType == VK_PHYSICAL_DEVICE_INFO_TYPE_QUEUE_PROPERTIES) {\n'
gpu_state += ' if (pData != NULL) {\n'
gpu_state += ' setGpuQueueInfoState(pData);\n'
gpu_state += ' }\n'
# Only watch core objects passed as first parameter
elif proto.params[0].ty not in vulkan.core.objects:
return None
- elif proto.params[0].ty != "VkPhysicalGpu":
+ elif proto.params[0].ty != "VkPhysicalDevice":
funcs.append('%s%s\n'
'{\n'
' useObject((VkObject) %s, "%s");\n'
objects=[
"VkInstance",
- "VkPhysicalGpu",
+ "VkPhysicalDevice",
"VkBaseObject",
"VkDevice",
"VkQueue",
- "VkGpuMemory",
+ "VkDeviceMemory",
"VkObject",
"VkBuffer",
"VkBufferView",
Proto("VkResult", "EnumeratePhysicalDevices",
[Param("VkInstance", "instance"),
Param("uint32_t*", "pPhysicalDeviceCount"),
- Param("VkPhysicalGpu*", "pPhysicalDevices")]),
+ Param("VkPhysicalDevice*", "pPhysicalDevices")]),
- Proto("VkResult", "GetGpuInfo",
- [Param("VkPhysicalGpu", "gpu"),
- Param("VkPhysicalGpuInfoType", "infoType"),
+ Proto("VkResult", "GetPhysicalDeviceInfo",
+ [Param("VkPhysicalDevice", "gpu"),
+ Param("VkPhysicalDeviceInfoType", "infoType"),
Param("size_t*", "pDataSize"),
Param("void*", "pData")]),
Proto("void*", "GetProcAddr",
- [Param("VkPhysicalGpu", "gpu"),
+ [Param("VkPhysicalDevice", "gpu"),
Param("const char*", "pName")]),
Proto("VkResult", "CreateDevice",
- [Param("VkPhysicalGpu", "gpu"),
+ [Param("VkPhysicalDevice", "gpu"),
Param("const VkDeviceCreateInfo*", "pCreateInfo"),
Param("VkDevice*", "pDevice")]),
Param("void*", "pData")]),
Proto("VkResult", "GetPhysicalDeviceExtensionInfo",
- [Param("VkPhysicalGpu", "gpu"),
+ [Param("VkPhysicalDevice", "gpu"),
Param("VkExtensionInfoType", "infoType"),
Param("uint32_t", "extensionIndex"),
Param("size_t*", "pDataSize"),
Param("void*", "pData")]),
Proto("VkResult", "EnumerateLayers",
- [Param("VkPhysicalGpu", "gpu"),
+ [Param("VkPhysicalDevice", "gpu"),
Param("size_t", "maxLayerCount"),
Param("size_t", "maxStringSize"),
Param("size_t*", "pOutLayerCount"),
Proto("VkResult", "QueueAddMemReferences",
[Param("VkQueue", "queue"),
Param("uint32_t", "count"),
- Param("const VkGpuMemory*", "pMems")]),
+ Param("const VkDeviceMemory*", "pMems")]),
Proto("VkResult", "QueueRemoveMemReferences",
[Param("VkQueue", "queue"),
Param("uint32_t", "count"),
- Param("const VkGpuMemory*", "pMems")]),
+ Param("const VkDeviceMemory*", "pMems")]),
Proto("VkResult", "QueueWaitIdle",
[Param("VkQueue", "queue")]),
Proto("VkResult", "AllocMemory",
[Param("VkDevice", "device"),
Param("const VkMemoryAllocInfo*", "pAllocInfo"),
- Param("VkGpuMemory*", "pMem")]),
+ Param("VkDeviceMemory*", "pMem")]),
Proto("VkResult", "FreeMemory",
- [Param("VkGpuMemory", "mem")]),
+ [Param("VkDeviceMemory", "mem")]),
Proto("VkResult", "SetMemoryPriority",
- [Param("VkGpuMemory", "mem"),
+ [Param("VkDeviceMemory", "mem"),
Param("VkMemoryPriority", "priority")]),
Proto("VkResult", "MapMemory",
- [Param("VkGpuMemory", "mem"),
+ [Param("VkDeviceMemory", "mem"),
Param("VkFlags", "flags"),
Param("void**", "ppData")]),
Proto("VkResult", "UnmapMemory",
- [Param("VkGpuMemory", "mem")]),
+ [Param("VkDeviceMemory", "mem")]),
Proto("VkResult", "PinSystemMemory",
[Param("VkDevice", "device"),
Param("const void*", "pSysMem"),
Param("size_t", "memSize"),
- Param("VkGpuMemory*", "pMem")]),
+ Param("VkDeviceMemory*", "pMem")]),
- Proto("VkResult", "GetMultiGpuCompatibility",
- [Param("VkPhysicalGpu", "gpu0"),
- Param("VkPhysicalGpu", "gpu1"),
- Param("VkGpuCompatibilityInfo*", "pInfo")]),
+ Proto("VkResult", "GetMultiDeviceCompatibility",
+ [Param("VkPhysicalDevice", "gpu0"),
+ Param("VkPhysicalDevice", "gpu1"),
+ Param("VkPhysicalDeviceCompatibilityInfo*", "pInfo")]),
Proto("VkResult", "OpenSharedMemory",
[Param("VkDevice", "device"),
Param("const VkMemoryOpenInfo*", "pOpenInfo"),
- Param("VkGpuMemory*", "pMem")]),
+ Param("VkDeviceMemory*", "pMem")]),
Proto("VkResult", "OpenSharedSemaphore",
[Param("VkDevice", "device"),
Proto("VkResult", "OpenPeerMemory",
[Param("VkDevice", "device"),
Param("const VkPeerMemoryOpenInfo*", "pOpenInfo"),
- Param("VkGpuMemory*", "pMem")]),
+ Param("VkDeviceMemory*", "pMem")]),
Proto("VkResult", "OpenPeerImage",
[Param("VkDevice", "device"),
Param("const VkPeerImageOpenInfo*", "pOpenInfo"),
Param("VkImage*", "pImage"),
- Param("VkGpuMemory*", "pMem")]),
+ Param("VkDeviceMemory*", "pMem")]),
Proto("VkResult", "DestroyObject",
[Param("VkObject", "object")]),
[Param("VkQueue", "queue"),
Param("VkObject", "object"),
Param("uint32_t", "allocationIdx"),
- Param("VkGpuMemory", "mem"),
- Param("VkGpuSize", "offset")]),
+ Param("VkDeviceMemory", "mem"),
+ Param("VkDeviceSize", "offset")]),
Proto("VkResult", "QueueBindObjectMemoryRange",
[Param("VkQueue", "queue"),
Param("VkObject", "object"),
Param("uint32_t", "allocationIdx"),
- Param("VkGpuSize", "rangeOffset"),
- Param("VkGpuSize", "rangeSize"),
- Param("VkGpuMemory", "mem"),
- Param("VkGpuSize", "memOffset")]),
+ Param("VkDeviceSize", "rangeOffset"),
+ Param("VkDeviceSize", "rangeSize"),
+ Param("VkDeviceMemory", "mem"),
+ Param("VkDeviceSize", "memOffset")]),
Proto("VkResult", "QueueBindImageMemoryRange",
[Param("VkQueue", "queue"),
Param("VkImage", "image"),
Param("uint32_t", "allocationIdx"),
Param("const VkImageMemoryBindInfo*", "pBindInfo"),
- Param("VkGpuMemory", "mem"),
- Param("VkGpuSize", "memOffset")]),
+ Param("VkDeviceMemory", "mem"),
+ Param("VkDeviceSize", "memOffset")]),
Proto("VkResult", "CreateFence",
[Param("VkDevice", "device"),
Param("uint32_t", "startQuery"),
Param("uint32_t", "queryCount"),
Param("size_t*", "pDataSize"),
- Param("void*", "pData")]),
+ Param("void*", "pData"),
+ Param("VkQueryResultFlags", "flags")]),
Proto("VkResult", "GetFormatInfo",
[Param("VkDevice", "device"),
Param("uint32_t", "startBinding"),
Param("uint32_t", "bindingCount"),
Param("const VkBuffer*", "pBuffers"),
- Param("const VkGpuSize*", "pOffsets")]),
+ Param("const VkDeviceSize*", "pOffsets")]),
+
Proto("void", "CmdBindIndexBuffer",
[Param("VkCmdBuffer", "cmdBuffer"),
Param("VkBuffer", "buffer"),
- Param("VkGpuSize", "offset"),
+ Param("VkDeviceSize", "offset"),
Param("VkIndexType", "indexType")]),
Proto("void", "CmdDraw",
Proto("void", "CmdDrawIndirect",
[Param("VkCmdBuffer", "cmdBuffer"),
Param("VkBuffer", "buffer"),
- Param("VkGpuSize", "offset"),
+ Param("VkDeviceSize", "offset"),
Param("uint32_t", "count"),
Param("uint32_t", "stride")]),
Proto("void", "CmdDrawIndexedIndirect",
[Param("VkCmdBuffer", "cmdBuffer"),
Param("VkBuffer", "buffer"),
- Param("VkGpuSize", "offset"),
+ Param("VkDeviceSize", "offset"),
Param("uint32_t", "count"),
Param("uint32_t", "stride")]),
Proto("void", "CmdDispatchIndirect",
[Param("VkCmdBuffer", "cmdBuffer"),
Param("VkBuffer", "buffer"),
- Param("VkGpuSize", "offset")]),
+ Param("VkDeviceSize", "offset")]),
Proto("void", "CmdCopyBuffer",
[Param("VkCmdBuffer", "cmdBuffer"),
Proto("void", "CmdUpdateBuffer",
[Param("VkCmdBuffer", "cmdBuffer"),
Param("VkBuffer", "destBuffer"),
- Param("VkGpuSize", "destOffset"),
- Param("VkGpuSize", "dataSize"),
+ Param("VkDeviceSize", "destOffset"),
+ Param("VkDeviceSize", "dataSize"),
Param("const uint32_t*", "pData")]),
Proto("void", "CmdFillBuffer",
[Param("VkCmdBuffer", "cmdBuffer"),
Param("VkBuffer", "destBuffer"),
- Param("VkGpuSize", "destOffset"),
- Param("VkGpuSize", "fillSize"),
+ Param("VkDeviceSize", "destOffset"),
+ Param("VkDeviceSize", "fillSize"),
Param("uint32_t", "data")]),
Proto("void", "CmdClearColorImage",
Proto("void", "CmdWaitEvents",
[Param("VkCmdBuffer", "cmdBuffer"),
- Param("const VkEventWaitInfo*", "pWaitInfo")]),
+ Param("VkWaitEvent", "waitEvent"),
+ Param("uint32_t", "eventCount"),
+ Param("const VkEvent*", "pEvents"),
+ Param("uint32_t", "memBarrierCount"),
+ Param("const void**", "ppMemBarriers")]),
Proto("void", "CmdPipelineBarrier",
[Param("VkCmdBuffer", "cmdBuffer"),
- Param("const VkPipelineBarrier*", "pBarrier")]),
+ Param("VkWaitEvent", "waitEvent"),
+ Param("uint32_t", "pipeEventCount"),
+ Param("const VkPipeEvent*", "pPipeEvents"),
+ Param("uint32_t", "memBarrierCount"),
+ Param("const void**", "ppMemBarriers")]),
Proto("void", "CmdBeginQuery",
[Param("VkCmdBuffer", "cmdBuffer"),
[Param("VkCmdBuffer", "cmdBuffer"),
Param("VkTimestampType", "timestampType"),
Param("VkBuffer", "destBuffer"),
- Param("VkGpuSize", "destOffset")]),
+ Param("VkDeviceSize", "destOffset")]),
Proto("void", "CmdCopyQueryPoolResults",
[Param("VkCmdBuffer", "cmdBuffer"),
Param("uint32_t", "startQuery"),
Param("uint32_t", "queryCount"),
Param("VkBuffer", "destBuffer"),
- Param("VkGpuSize", "destOffset"),
- Param("VkGpuSize", "destStride"),
+ Param("VkDeviceSize", "destOffset"),
+ Param("VkDeviceSize", "destStride"),
Param("VkFlags", "flags")]),
Proto("void", "CmdInitAtomicCounters",
Param("uint32_t", "startCounter"),
Param("uint32_t", "counterCount"),
Param("VkBuffer", "srcBuffer"),
- Param("VkGpuSize", "srcOffset")]),
+ Param("VkDeviceSize", "srcOffset")]),
Proto("void", "CmdSaveAtomicCounters",
[Param("VkCmdBuffer", "cmdBuffer"),
Param("uint32_t", "startCounter"),
Param("uint32_t", "counterCount"),
Param("VkBuffer", "destBuffer"),
- Param("VkGpuSize", "destOffset")]),
+ Param("VkDeviceSize", "destOffset")]),
Proto("VkResult", "CreateFramebuffer",
[Param("VkDevice", "device"),
objects=[],
protos=[
Proto("VkResult", "WsiX11AssociateConnection",
- [Param("VkPhysicalGpu", "gpu"),
+ [Param("VkPhysicalDevice", "gpu"),
Param("const VK_WSI_X11_CONNECTION_INFO*", "pConnectionInfo")]),
Proto("VkResult", "WsiX11GetMSC",
[Param("VkDevice", "device"),
Param("const VK_WSI_X11_PRESENTABLE_IMAGE_CREATE_INFO*", "pCreateInfo"),
Param("VkImage*", "pImage"),
- Param("VkGpuMemory*", "pMem")]),
+ Param("VkDeviceMemory*", "pMem")]),
Proto("VkResult", "WsiX11QueuePresent",
[Param("VkQueue", "queue"),
object_root_list = [
"VkInstance",
- "VkPhysicalGpu",
+ "VkPhysicalDevice",
"VkBaseObject"
]
object_base_list = [
"VkDevice",
"VkQueue",
- "VkGpuMemory",
+ "VkDeviceMemory",
"VkObject"
]