1 /* Copyright (c) 2015-2017 The Khronos Group Inc.
2 * Copyright (c) 2015-2017 Valve Corporation
3 * Copyright (c) 2015-2017 LunarG, Inc.
4 * Copyright (C) 2015-2017 Google Inc.
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
10 * http://www.apache.org/licenses/LICENSE-2.0
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
18 * Author: Cody Northrop <cnorthrop@google.com>
19 * Author: Michael Lentine <mlentine@google.com>
20 * Author: Tobin Ehlis <tobine@google.com>
21 * Author: Chia-I Wu <olv@google.com>
22 * Author: Chris Forbes <chrisf@ijw.co.nz>
23 * Author: Mark Lobodzinski <mark@lunarg.com>
24 * Author: Ian Elliott <ianelliott@google.com>
25 * Author: Dave Houlton <daveh@lunarg.com>
26 * Author: Dustin Graves <dustin@lunarg.com>
27 * Author: Jeremy Hayes <jeremy@lunarg.com>
28 * Author: Jon Ashburn <jon@lunarg.com>
29 * Author: Karl Schultz <karl@lunarg.com>
30 * Author: Mark Young <marky@lunarg.com>
31 * Author: Mike Schuchardt <mikes@lunarg.com>
32 * Author: Mike Weiblen <mikew@lunarg.com>
33 * Author: Tony Barbour <tony@LunarG.com>
36 // Allow use of STL min and max functions in Windows
55 #include "vk_loader_platform.h"
56 #include "vk_dispatch_table_helper.h"
57 #include "vk_enum_string_helper.h"
59 #pragma GCC diagnostic ignored "-Wwrite-strings"
62 #pragma GCC diagnostic warning "-Wwrite-strings"
64 #include "core_validation.h"
65 #include "buffer_validation.h"
66 #include "shader_validation.h"
67 #include "vk_layer_table.h"
68 #include "vk_layer_data.h"
69 #include "vk_layer_extension_utils.h"
70 #include "vk_layer_utils.h"
71 #include "vk_typemap_helper.h"
73 #if defined __ANDROID__
74 #include <android/log.h>
75 #define LOGCONSOLE(...) ((void)__android_log_print(ANDROID_LOG_INFO, "CORE_VALIDATION", __VA_ARGS__))
77 #define LOGCONSOLE(...) \
79 printf(__VA_ARGS__); \
84 // This intentionally includes a cpp file
85 #include "vk_safe_struct.cpp"
87 using mutex_t = std::mutex;
88 using lock_guard_t = std::lock_guard<mutex_t>;
89 using unique_lock_t = std::unique_lock<mutex_t>;
91 // These functions are defined *outside* the core_validation namespace as their type
92 // is also defined outside that namespace
93 size_t PipelineLayoutCompatDef::hash() const {
94 hash_util::HashCombiner hc;
95 // The set number is integral to the CompatDef's distinctiveness
96 hc << set << push_constant_ranges.get();
97 const auto &descriptor_set_layouts = *set_layouts_id.get();
98 for (uint32_t i = 0; i <= set; i++) {
99 hc << descriptor_set_layouts[i].get();
104 bool PipelineLayoutCompatDef::operator==(const PipelineLayoutCompatDef &other) const {
105 if ((set != other.set) || (push_constant_ranges != other.push_constant_ranges)) {
109 if (set_layouts_id == other.set_layouts_id) {
110 // if it's the same set_layouts_id, then *any* subset will match
114 // They aren't exactly the same PipelineLayoutSetLayouts, so we need to check if the required subsets match
115 const auto &descriptor_set_layouts = *set_layouts_id.get();
116 assert(set < descriptor_set_layouts.size());
117 const auto &other_ds_layouts = *other.set_layouts_id.get();
118 assert(set < other_ds_layouts.size());
119 for (uint32_t i = 0; i <= set; i++) {
120 if (descriptor_set_layouts[i] != other_ds_layouts[i]) {
127 namespace core_validation {
131 using std::stringstream;
132 using std::unique_ptr;
133 using std::unordered_map;
134 using std::unordered_set;
137 // WSI Image Objects bypass usual Image Object creation methods. A special Memory
138 // Object value will be used to identify them internally.
139 static const VkDeviceMemory MEMTRACKER_SWAP_CHAIN_IMAGE_KEY = (VkDeviceMemory)(-1);
140 // 2nd special memory handle used to flag object as unbound from memory
141 static const VkDeviceMemory MEMORY_UNBOUND = VkDeviceMemory(~((uint64_t)(0)) - 1);
143 struct instance_layer_data {
144 VkInstance instance = VK_NULL_HANDLE;
145 debug_report_data *report_data = nullptr;
146 vector<VkDebugReportCallbackEXT> logging_callback;
147 vector<VkDebugUtilsMessengerEXT> logging_messenger;
148 VkLayerInstanceDispatchTable dispatch_table;
150 CALL_STATE vkEnumeratePhysicalDevicesState = UNCALLED;
151 uint32_t physical_devices_count = 0;
152 CALL_STATE vkEnumeratePhysicalDeviceGroupsState = UNCALLED;
153 uint32_t physical_device_groups_count = 0;
154 CHECK_DISABLED disabled = {};
156 unordered_map<VkPhysicalDevice, PHYSICAL_DEVICE_STATE> physical_device_map;
157 unordered_map<VkSurfaceKHR, SURFACE_STATE> surface_map;
159 InstanceExtensions extensions;
160 uint32_t api_version;
164 debug_report_data *report_data = nullptr;
165 VkLayerDispatchTable dispatch_table;
167 DeviceExtensions extensions = {};
168 unordered_set<VkQueue> queues; // All queues under given device
169 // Layer specific data
170 unordered_map<VkSampler, unique_ptr<SAMPLER_STATE>> samplerMap;
171 unordered_map<VkImageView, unique_ptr<IMAGE_VIEW_STATE>> imageViewMap;
172 unordered_map<VkImage, unique_ptr<IMAGE_STATE>> imageMap;
173 unordered_map<VkBufferView, unique_ptr<BUFFER_VIEW_STATE>> bufferViewMap;
174 unordered_map<VkBuffer, unique_ptr<BUFFER_STATE>> bufferMap;
175 unordered_map<VkPipeline, unique_ptr<PIPELINE_STATE>> pipelineMap;
176 unordered_map<VkCommandPool, COMMAND_POOL_NODE> commandPoolMap;
177 unordered_map<VkDescriptorPool, DESCRIPTOR_POOL_STATE *> descriptorPoolMap;
178 unordered_map<VkDescriptorSet, cvdescriptorset::DescriptorSet *> setMap;
179 unordered_map<VkDescriptorSetLayout, std::shared_ptr<cvdescriptorset::DescriptorSetLayout>> descriptorSetLayoutMap;
180 unordered_map<VkPipelineLayout, PIPELINE_LAYOUT_NODE> pipelineLayoutMap;
181 unordered_map<VkDeviceMemory, unique_ptr<DEVICE_MEM_INFO>> memObjMap;
182 unordered_map<VkFence, FENCE_NODE> fenceMap;
183 unordered_map<VkQueue, QUEUE_STATE> queueMap;
184 unordered_map<VkEvent, EVENT_STATE> eventMap;
185 unordered_map<QueryObject, bool> queryToStateMap;
186 unordered_map<VkQueryPool, QUERY_POOL_NODE> queryPoolMap;
187 unordered_map<VkSemaphore, SEMAPHORE_NODE> semaphoreMap;
188 unordered_map<VkCommandBuffer, GLOBAL_CB_NODE *> commandBufferMap;
189 unordered_map<VkFramebuffer, unique_ptr<FRAMEBUFFER_STATE>> frameBufferMap;
190 unordered_map<VkImage, vector<ImageSubresourcePair>> imageSubresourceMap;
191 unordered_map<ImageSubresourcePair, IMAGE_LAYOUT_NODE> imageLayoutMap;
192 unordered_map<VkRenderPass, std::shared_ptr<RENDER_PASS_STATE>> renderPassMap;
193 unordered_map<VkShaderModule, unique_ptr<shader_module>> shaderModuleMap;
194 unordered_map<VkDescriptorUpdateTemplateKHR, unique_ptr<TEMPLATE_STATE>> desc_template_map;
195 unordered_map<VkSwapchainKHR, std::unique_ptr<SWAPCHAIN_NODE>> swapchainMap;
197 VkDevice device = VK_NULL_HANDLE;
198 VkPhysicalDevice physical_device = VK_NULL_HANDLE;
200 instance_layer_data *instance_data = nullptr; // from device to enclosing instance
202 VkPhysicalDeviceFeatures enabled_features = {};
203 // Device specific data
204 PHYS_DEV_PROPERTIES_NODE phys_dev_properties = {};
205 VkPhysicalDeviceMemoryProperties phys_dev_mem_props = {};
206 VkPhysicalDeviceProperties phys_dev_props = {};
207 // Device extension properties -- storing properties gathered from VkPhysicalDeviceProperties2KHR::pNext chain
208 struct DeviceExtensionProperties {
209 uint32_t max_push_descriptors; // from VkPhysicalDevicePushDescriptorPropertiesKHR::maxPushDescriptors
210 VkPhysicalDeviceDescriptorIndexingPropertiesEXT descriptor_indexing_props;
211 VkPhysicalDeviceDescriptorIndexingFeaturesEXT descriptor_indexing_features;
213 DeviceExtensionProperties phys_dev_ext_props = {};
214 bool external_sync_warning = false;
215 uint32_t api_version = 0;
218 // TODO : Do we need to guard access to layer_data_map w/ lock?
219 static unordered_map<void *, layer_data *> layer_data_map;
220 static unordered_map<void *, instance_layer_data *> instance_layer_data_map;
222 static uint32_t loader_layer_if_version = CURRENT_LOADER_LAYER_INTERFACE_VERSION;
224 static const VkLayerProperties global_layer = {
225 "VK_LAYER_LUNARG_core_validation",
226 VK_LAYER_API_VERSION,
228 "LunarG Validation Layer",
231 static const VkExtensionProperties device_extensions[] = {
232 {VK_EXT_VALIDATION_CACHE_EXTENSION_NAME, VK_EXT_VALIDATION_CACHE_SPEC_VERSION},
235 template <class TCreateInfo>
236 void ValidateLayerOrdering(const TCreateInfo &createInfo) {
237 bool foundLayer = false;
238 for (uint32_t i = 0; i < createInfo.enabledLayerCount; ++i) {
239 if (!strcmp(createInfo.ppEnabledLayerNames[i], global_layer.layerName)) {
242 // This has to be logged to console as we don't have a callback at this point.
243 if (!foundLayer && !strcmp(createInfo.ppEnabledLayerNames[0], "VK_LAYER_GOOGLE_unique_objects")) {
244 LOGCONSOLE("Cannot activate layer VK_LAYER_GOOGLE_unique_objects prior to activating %s.", global_layer.layerName);
249 // TODO : This can be much smarter, using separate locks for separate global data
250 static mutex_t global_lock;
252 // Return IMAGE_VIEW_STATE ptr for specified imageView or else NULL
253 IMAGE_VIEW_STATE *GetImageViewState(const layer_data *dev_data, VkImageView image_view) {
254 auto iv_it = dev_data->imageViewMap.find(image_view);
255 if (iv_it == dev_data->imageViewMap.end()) {
258 return iv_it->second.get();
260 // Return sampler node ptr for specified sampler or else NULL
261 SAMPLER_STATE *GetSamplerState(const layer_data *dev_data, VkSampler sampler) {
262 auto sampler_it = dev_data->samplerMap.find(sampler);
263 if (sampler_it == dev_data->samplerMap.end()) {
266 return sampler_it->second.get();
268 // Return image state ptr for specified image or else NULL
269 IMAGE_STATE *GetImageState(const layer_data *dev_data, VkImage image) {
270 auto img_it = dev_data->imageMap.find(image);
271 if (img_it == dev_data->imageMap.end()) {
274 return img_it->second.get();
276 // Return buffer state ptr for specified buffer or else NULL
277 BUFFER_STATE *GetBufferState(const layer_data *dev_data, VkBuffer buffer) {
278 auto buff_it = dev_data->bufferMap.find(buffer);
279 if (buff_it == dev_data->bufferMap.end()) {
282 return buff_it->second.get();
284 // Return swapchain node for specified swapchain or else NULL
285 SWAPCHAIN_NODE *GetSwapchainNode(const layer_data *dev_data, VkSwapchainKHR swapchain) {
286 auto swp_it = dev_data->swapchainMap.find(swapchain);
287 if (swp_it == dev_data->swapchainMap.end()) {
290 return swp_it->second.get();
292 // Return buffer node ptr for specified buffer or else NULL
293 BUFFER_VIEW_STATE *GetBufferViewState(const layer_data *dev_data, VkBufferView buffer_view) {
294 auto bv_it = dev_data->bufferViewMap.find(buffer_view);
295 if (bv_it == dev_data->bufferViewMap.end()) {
298 return bv_it->second.get();
301 FENCE_NODE *GetFenceNode(layer_data *dev_data, VkFence fence) {
302 auto it = dev_data->fenceMap.find(fence);
303 if (it == dev_data->fenceMap.end()) {
309 EVENT_STATE *GetEventNode(layer_data *dev_data, VkEvent event) {
310 auto it = dev_data->eventMap.find(event);
311 if (it == dev_data->eventMap.end()) {
317 QUERY_POOL_NODE *GetQueryPoolNode(layer_data *dev_data, VkQueryPool query_pool) {
318 auto it = dev_data->queryPoolMap.find(query_pool);
319 if (it == dev_data->queryPoolMap.end()) {
325 QUEUE_STATE *GetQueueState(layer_data *dev_data, VkQueue queue) {
326 auto it = dev_data->queueMap.find(queue);
327 if (it == dev_data->queueMap.end()) {
333 SEMAPHORE_NODE *GetSemaphoreNode(layer_data *dev_data, VkSemaphore semaphore) {
334 auto it = dev_data->semaphoreMap.find(semaphore);
335 if (it == dev_data->semaphoreMap.end()) {
341 COMMAND_POOL_NODE *GetCommandPoolNode(layer_data *dev_data, VkCommandPool pool) {
342 auto it = dev_data->commandPoolMap.find(pool);
343 if (it == dev_data->commandPoolMap.end()) {
349 PHYSICAL_DEVICE_STATE *GetPhysicalDeviceState(instance_layer_data *instance_data, VkPhysicalDevice phys) {
350 auto it = instance_data->physical_device_map.find(phys);
351 if (it == instance_data->physical_device_map.end()) {
357 SURFACE_STATE *GetSurfaceState(instance_layer_data *instance_data, VkSurfaceKHR surface) {
358 auto it = instance_data->surface_map.find(surface);
359 if (it == instance_data->surface_map.end()) {
365 // Return ptr to memory binding for given handle of specified type
366 static BINDABLE *GetObjectMemBinding(layer_data *dev_data, uint64_t handle, VulkanObjectType type) {
368 case kVulkanObjectTypeImage:
369 return GetImageState(dev_data, VkImage(handle));
370 case kVulkanObjectTypeBuffer:
371 return GetBufferState(dev_data, VkBuffer(handle));
378 GLOBAL_CB_NODE *GetCBNode(layer_data const *, const VkCommandBuffer);
380 // Return ptr to info in map container containing mem, or NULL if not found
381 // Calls to this function should be wrapped in mutex
382 DEVICE_MEM_INFO *GetMemObjInfo(const layer_data *dev_data, const VkDeviceMemory mem) {
383 auto mem_it = dev_data->memObjMap.find(mem);
384 if (mem_it == dev_data->memObjMap.end()) {
387 return mem_it->second.get();
390 static void add_mem_obj_info(layer_data *dev_data, void *object, const VkDeviceMemory mem,
391 const VkMemoryAllocateInfo *pAllocateInfo) {
392 assert(object != NULL);
394 auto *mem_info = new DEVICE_MEM_INFO(object, mem, pAllocateInfo);
395 dev_data->memObjMap[mem] = unique_ptr<DEVICE_MEM_INFO>(mem_info);
397 auto dedicated = lvl_find_in_chain<VkMemoryDedicatedAllocateInfoKHR>(pAllocateInfo->pNext);
399 mem_info->is_dedicated = true;
400 mem_info->dedicated_buffer = dedicated->buffer;
401 mem_info->dedicated_image = dedicated->image;
405 // Create binding link between given sampler and command buffer node
406 void AddCommandBufferBindingSampler(GLOBAL_CB_NODE *cb_node, SAMPLER_STATE *sampler_state) {
407 sampler_state->cb_bindings.insert(cb_node);
408 cb_node->object_bindings.insert({HandleToUint64(sampler_state->sampler), kVulkanObjectTypeSampler});
411 // Create binding link between given image node and command buffer node
412 void AddCommandBufferBindingImage(const layer_data *dev_data, GLOBAL_CB_NODE *cb_node, IMAGE_STATE *image_state) {
413 // Skip validation if this image was created through WSI
414 if (image_state->binding.mem != MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) {
415 // First update CB binding in MemObj mini CB list
416 for (auto mem_binding : image_state->GetBoundMemory()) {
417 DEVICE_MEM_INFO *pMemInfo = GetMemObjInfo(dev_data, mem_binding);
419 pMemInfo->cb_bindings.insert(cb_node);
420 // Now update CBInfo's Mem reference list
421 cb_node->memObjs.insert(mem_binding);
424 // Now update cb binding for image
425 cb_node->object_bindings.insert({HandleToUint64(image_state->image), kVulkanObjectTypeImage});
426 image_state->cb_bindings.insert(cb_node);
430 // Create binding link between given image view node and its image with command buffer node
431 void AddCommandBufferBindingImageView(const layer_data *dev_data, GLOBAL_CB_NODE *cb_node, IMAGE_VIEW_STATE *view_state) {
432 // First add bindings for imageView
433 view_state->cb_bindings.insert(cb_node);
434 cb_node->object_bindings.insert({HandleToUint64(view_state->image_view), kVulkanObjectTypeImageView});
435 auto image_state = GetImageState(dev_data, view_state->create_info.image);
436 // Add bindings for image within imageView
438 AddCommandBufferBindingImage(dev_data, cb_node, image_state);
442 // Create binding link between given buffer node and command buffer node
443 void AddCommandBufferBindingBuffer(const layer_data *dev_data, GLOBAL_CB_NODE *cb_node, BUFFER_STATE *buffer_state) {
444 // First update CB binding in MemObj mini CB list
445 for (auto mem_binding : buffer_state->GetBoundMemory()) {
446 DEVICE_MEM_INFO *pMemInfo = GetMemObjInfo(dev_data, mem_binding);
448 pMemInfo->cb_bindings.insert(cb_node);
449 // Now update CBInfo's Mem reference list
450 cb_node->memObjs.insert(mem_binding);
453 // Now update cb binding for buffer
454 cb_node->object_bindings.insert({HandleToUint64(buffer_state->buffer), kVulkanObjectTypeBuffer});
455 buffer_state->cb_bindings.insert(cb_node);
458 // Create binding link between given buffer view node and its buffer with command buffer node
459 void AddCommandBufferBindingBufferView(const layer_data *dev_data, GLOBAL_CB_NODE *cb_node, BUFFER_VIEW_STATE *view_state) {
460 // First add bindings for bufferView
461 view_state->cb_bindings.insert(cb_node);
462 cb_node->object_bindings.insert({HandleToUint64(view_state->buffer_view), kVulkanObjectTypeBufferView});
463 auto buffer_state = GetBufferState(dev_data, view_state->create_info.buffer);
464 // Add bindings for buffer within bufferView
466 AddCommandBufferBindingBuffer(dev_data, cb_node, buffer_state);
470 // For every mem obj bound to particular CB, free bindings related to that CB
471 static void clear_cmd_buf_and_mem_references(layer_data *dev_data, GLOBAL_CB_NODE *cb_node) {
473 if (cb_node->memObjs.size() > 0) {
474 for (auto mem : cb_node->memObjs) {
475 DEVICE_MEM_INFO *pInfo = GetMemObjInfo(dev_data, mem);
477 pInfo->cb_bindings.erase(cb_node);
480 cb_node->memObjs.clear();
485 // Clear a single object binding from given memory object, or report error if binding is missing
486 static bool ClearMemoryObjectBinding(layer_data *dev_data, uint64_t handle, VulkanObjectType type, VkDeviceMemory mem) {
487 DEVICE_MEM_INFO *mem_info = GetMemObjInfo(dev_data, mem);
488 // This obj is bound to a memory object. Remove the reference to this object in that memory object's list
490 mem_info->obj_bindings.erase({handle, type});
495 // ClearMemoryObjectBindings clears the binding of objects to memory
496 // For the given object it pulls the memory bindings and makes sure that the bindings
497 // no longer refer to the object being cleared. This occurs when objects are destroyed.
498 bool ClearMemoryObjectBindings(layer_data *dev_data, uint64_t handle, VulkanObjectType type) {
500 BINDABLE *mem_binding = GetObjectMemBinding(dev_data, handle, type);
502 if (!mem_binding->sparse) {
503 skip = ClearMemoryObjectBinding(dev_data, handle, type, mem_binding->binding.mem);
504 } else { // Sparse, clear all bindings
505 for (auto &sparse_mem_binding : mem_binding->sparse_bindings) {
506 skip |= ClearMemoryObjectBinding(dev_data, handle, type, sparse_mem_binding.mem);
513 // For given mem object, verify that it is not null or UNBOUND, if it is, report error. Return skip value.
514 bool VerifyBoundMemoryIsValid(const layer_data *dev_data, VkDeviceMemory mem, uint64_t handle, const char *api_name,
515 const char *type_name, UNIQUE_VALIDATION_ERROR_CODE error_code) {
517 if (VK_NULL_HANDLE == mem) {
519 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, handle, error_code,
520 "%s: Vk%s object 0x%" PRIx64 " used with no memory bound. Memory should be bound by calling vkBind%sMemory().",
521 api_name, type_name, handle, type_name);
522 } else if (MEMORY_UNBOUND == mem) {
524 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, handle, error_code,
525 "%s: Vk%s object 0x%" PRIx64
526 " used with no memory bound and previously bound memory was freed. Memory must not be freed prior to this "
528 api_name, type_name, handle);
533 // Check to see if memory was ever bound to this image
534 bool ValidateMemoryIsBoundToImage(const layer_data *dev_data, const IMAGE_STATE *image_state, const char *api_name,
535 UNIQUE_VALIDATION_ERROR_CODE error_code) {
537 if (0 == (static_cast<uint32_t>(image_state->createInfo.flags) & VK_IMAGE_CREATE_SPARSE_BINDING_BIT)) {
538 result = VerifyBoundMemoryIsValid(dev_data, image_state->binding.mem, HandleToUint64(image_state->image), api_name, "Image",
544 // Check to see if memory was bound to this buffer
545 bool ValidateMemoryIsBoundToBuffer(const layer_data *dev_data, const BUFFER_STATE *buffer_state, const char *api_name,
546 UNIQUE_VALIDATION_ERROR_CODE error_code) {
548 if (0 == (static_cast<uint32_t>(buffer_state->createInfo.flags) & VK_BUFFER_CREATE_SPARSE_BINDING_BIT)) {
549 result = VerifyBoundMemoryIsValid(dev_data, buffer_state->binding.mem, HandleToUint64(buffer_state->buffer), api_name,
550 "Buffer", error_code);
555 // SetMemBinding is used to establish immutable, non-sparse binding between a single image/buffer object and memory object.
556 // Corresponding valid usage checks are in ValidateSetMemBinding().
557 static void SetMemBinding(layer_data *dev_data, VkDeviceMemory mem, BINDABLE *mem_binding, VkDeviceSize memory_offset,
558 uint64_t handle, VulkanObjectType type, const char *apiName) {
560 mem_binding->binding.mem = mem;
561 mem_binding->UpdateBoundMemorySet(); // force recreation of cached set
562 mem_binding->binding.offset = memory_offset;
563 mem_binding->binding.size = mem_binding->requirements.size;
565 if (mem != VK_NULL_HANDLE) {
566 DEVICE_MEM_INFO *mem_info = GetMemObjInfo(dev_data, mem);
568 mem_info->obj_bindings.insert({handle, type});
569 // For image objects, make sure default memory state is correctly set
570 // TODO : What's the best/correct way to handle this?
571 if (kVulkanObjectTypeImage == type) {
572 auto const image_state = reinterpret_cast<const IMAGE_STATE *>(mem_binding);
574 VkImageCreateInfo ici = image_state->createInfo;
575 if (ici.usage & (VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT)) {
576 // TODO:: More memory state transition stuff.
584 // Valid usage checks for a call to SetMemBinding().
585 // For NULL mem case, output warning
586 // Make sure given object is in global object map
587 // IF a previous binding existed, output validation error
588 // Otherwise, add reference from objectInfo to memoryInfo
589 // Add reference off of objInfo
590 // TODO: We may need to refactor or pass in multiple valid usage statements to handle multiple valid usage conditions.
591 static bool ValidateSetMemBinding(layer_data *dev_data, VkDeviceMemory mem, uint64_t handle, VulkanObjectType type,
592 const char *apiName) {
594 // It's an error to bind an object to NULL memory
595 if (mem != VK_NULL_HANDLE) {
596 BINDABLE *mem_binding = GetObjectMemBinding(dev_data, handle, type);
598 if (mem_binding->sparse) {
599 UNIQUE_VALIDATION_ERROR_CODE error_code = VALIDATION_ERROR_1740082a;
600 const char *handle_type = "IMAGE";
601 if (type == kVulkanObjectTypeBuffer) {
602 error_code = VALIDATION_ERROR_1700080c;
603 handle_type = "BUFFER";
605 assert(type == kVulkanObjectTypeImage);
607 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
608 HandleToUint64(mem), error_code,
609 "In %s, attempting to bind memory (0x%" PRIx64 ") to object (0x%" PRIx64
610 ") which was created with sparse memory flags (VK_%s_CREATE_SPARSE_*_BIT).",
611 apiName, HandleToUint64(mem), handle, handle_type);
613 DEVICE_MEM_INFO *mem_info = GetMemObjInfo(dev_data, mem);
615 DEVICE_MEM_INFO *prev_binding = GetMemObjInfo(dev_data, mem_binding->binding.mem);
617 UNIQUE_VALIDATION_ERROR_CODE error_code = VALIDATION_ERROR_17400828;
618 if (type == kVulkanObjectTypeBuffer) {
619 error_code = VALIDATION_ERROR_1700080a;
621 assert(type == kVulkanObjectTypeImage);
623 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
624 HandleToUint64(mem), error_code,
625 "In %s, attempting to bind memory (0x%" PRIx64 ") to object (0x%" PRIx64
626 ") which has already been bound to mem object 0x%" PRIx64 ".",
627 apiName, HandleToUint64(mem), handle, HandleToUint64(prev_binding->mem));
628 } else if (mem_binding->binding.mem == MEMORY_UNBOUND) {
629 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
630 HandleToUint64(mem), MEMTRACK_REBIND_OBJECT,
631 "In %s, attempting to bind memory (0x%" PRIx64 ") to object (0x%" PRIx64
632 ") which was previous bound to memory that has since been freed. Memory bindings are immutable in "
633 "Vulkan so this attempt to bind to new memory is not allowed.",
634 apiName, HandleToUint64(mem), handle);
641 // For NULL mem case, clear any previous binding Else...
642 // Make sure given object is in its object map
643 // IF a previous binding existed, update binding
644 // Add reference from objectInfo to memoryInfo
645 // Add reference off of object's binding info
646 // Return VK_TRUE if addition is successful, VK_FALSE otherwise
647 static bool SetSparseMemBinding(layer_data *dev_data, MEM_BINDING binding, uint64_t handle, VulkanObjectType type) {
648 bool skip = VK_FALSE;
649 // Handle NULL case separately, just clear previous binding & decrement reference
650 if (binding.mem == VK_NULL_HANDLE) {
651 // TODO : This should cause the range of the resource to be unbound according to spec
653 BINDABLE *mem_binding = GetObjectMemBinding(dev_data, handle, type);
655 assert(mem_binding->sparse);
656 DEVICE_MEM_INFO *mem_info = GetMemObjInfo(dev_data, binding.mem);
658 mem_info->obj_bindings.insert({handle, type});
659 // Need to set mem binding for this object
660 mem_binding->sparse_bindings.insert(binding);
661 mem_binding->UpdateBoundMemorySet();
667 // Check object status for selected flag state
668 static bool validate_status(layer_data *dev_data, GLOBAL_CB_NODE *pNode, CBStatusFlags status_mask, VkFlags msg_flags,
669 const char *fail_msg, UNIQUE_VALIDATION_ERROR_CODE const msg_code) {
670 if (!(pNode->status & status_mask)) {
671 return log_msg(dev_data->report_data, msg_flags, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
672 HandleToUint64(pNode->commandBuffer), msg_code, "command buffer object 0x%" PRIx64 ": %s..",
673 HandleToUint64(pNode->commandBuffer), fail_msg);
678 // Retrieve pipeline node ptr for given pipeline object
679 static PIPELINE_STATE *getPipelineState(layer_data const *dev_data, VkPipeline pipeline) {
680 auto it = dev_data->pipelineMap.find(pipeline);
681 if (it == dev_data->pipelineMap.end()) {
684 return it->second.get();
687 RENDER_PASS_STATE *GetRenderPassState(layer_data const *dev_data, VkRenderPass renderpass) {
688 auto it = dev_data->renderPassMap.find(renderpass);
689 if (it == dev_data->renderPassMap.end()) {
692 return it->second.get();
695 std::shared_ptr<RENDER_PASS_STATE> GetRenderPassStateSharedPtr(layer_data const *dev_data, VkRenderPass renderpass) {
696 auto it = dev_data->renderPassMap.find(renderpass);
697 if (it == dev_data->renderPassMap.end()) {
703 FRAMEBUFFER_STATE *GetFramebufferState(const layer_data *dev_data, VkFramebuffer framebuffer) {
704 auto it = dev_data->frameBufferMap.find(framebuffer);
705 if (it == dev_data->frameBufferMap.end()) {
708 return it->second.get();
711 std::shared_ptr<cvdescriptorset::DescriptorSetLayout const> const GetDescriptorSetLayout(layer_data const *dev_data,
712 VkDescriptorSetLayout dsLayout) {
713 auto it = dev_data->descriptorSetLayoutMap.find(dsLayout);
714 if (it == dev_data->descriptorSetLayoutMap.end()) {
720 static PIPELINE_LAYOUT_NODE const *getPipelineLayout(layer_data const *dev_data, VkPipelineLayout pipeLayout) {
721 auto it = dev_data->pipelineLayoutMap.find(pipeLayout);
722 if (it == dev_data->pipelineLayoutMap.end()) {
728 shader_module const *GetShaderModuleState(layer_data const *dev_data, VkShaderModule module) {
729 auto it = dev_data->shaderModuleMap.find(module);
730 if (it == dev_data->shaderModuleMap.end()) {
733 return it->second.get();
736 // Return true if for a given PSO, the given state enum is dynamic, else return false
737 static bool isDynamic(const PIPELINE_STATE *pPipeline, const VkDynamicState state) {
738 if (pPipeline && pPipeline->graphicsPipelineCI.pDynamicState) {
739 for (uint32_t i = 0; i < pPipeline->graphicsPipelineCI.pDynamicState->dynamicStateCount; i++) {
740 if (state == pPipeline->graphicsPipelineCI.pDynamicState->pDynamicStates[i]) return true;
746 // Validate state stored as flags at time of draw call
747 static bool validate_draw_state_flags(layer_data *dev_data, GLOBAL_CB_NODE *pCB, const PIPELINE_STATE *pPipe, bool indexed,
748 UNIQUE_VALIDATION_ERROR_CODE const msg_code) {
750 if (pPipe->topology_at_rasterizer == VK_PRIMITIVE_TOPOLOGY_LINE_LIST ||
751 pPipe->topology_at_rasterizer == VK_PRIMITIVE_TOPOLOGY_LINE_STRIP) {
752 result |= validate_status(dev_data, pCB, CBSTATUS_LINE_WIDTH_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
753 "Dynamic line width state not set for this command buffer", msg_code);
755 if (pPipe->graphicsPipelineCI.pRasterizationState &&
756 (pPipe->graphicsPipelineCI.pRasterizationState->depthBiasEnable == VK_TRUE)) {
757 result |= validate_status(dev_data, pCB, CBSTATUS_DEPTH_BIAS_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
758 "Dynamic depth bias state not set for this command buffer", msg_code);
760 if (pPipe->blendConstantsEnabled) {
761 result |= validate_status(dev_data, pCB, CBSTATUS_BLEND_CONSTANTS_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
762 "Dynamic blend constants state not set for this command buffer", msg_code);
764 if (pPipe->graphicsPipelineCI.pDepthStencilState &&
765 (pPipe->graphicsPipelineCI.pDepthStencilState->depthBoundsTestEnable == VK_TRUE)) {
766 result |= validate_status(dev_data, pCB, CBSTATUS_DEPTH_BOUNDS_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
767 "Dynamic depth bounds state not set for this command buffer", msg_code);
769 if (pPipe->graphicsPipelineCI.pDepthStencilState &&
770 (pPipe->graphicsPipelineCI.pDepthStencilState->stencilTestEnable == VK_TRUE)) {
771 result |= validate_status(dev_data, pCB, CBSTATUS_STENCIL_READ_MASK_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
772 "Dynamic stencil read mask state not set for this command buffer", msg_code);
773 result |= validate_status(dev_data, pCB, CBSTATUS_STENCIL_WRITE_MASK_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
774 "Dynamic stencil write mask state not set for this command buffer", msg_code);
775 result |= validate_status(dev_data, pCB, CBSTATUS_STENCIL_REFERENCE_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
776 "Dynamic stencil reference state not set for this command buffer", msg_code);
779 result |= validate_status(dev_data, pCB, CBSTATUS_INDEX_BUFFER_BOUND, VK_DEBUG_REPORT_ERROR_BIT_EXT,
780 "Index buffer object not bound to this command buffer when Indexed Draw attempted", msg_code);
786 static bool logInvalidAttachmentMessage(layer_data const *dev_data, const char *type1_string, const RENDER_PASS_STATE *rp1_state,
787 const char *type2_string, const RENDER_PASS_STATE *rp2_state, uint32_t primary_attach,
788 uint32_t secondary_attach, const char *msg, const char *caller,
789 UNIQUE_VALIDATION_ERROR_CODE error_code) {
790 return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
791 HandleToUint64(rp1_state->renderPass), error_code,
792 "%s: RenderPasses incompatible between %s w/ renderPass 0x%" PRIx64 " and %s w/ renderPass 0x%" PRIx64
793 " Attachment %u is not compatible with %u: %s.",
794 caller, type1_string, HandleToUint64(rp1_state->renderPass), type2_string, HandleToUint64(rp2_state->renderPass),
795 primary_attach, secondary_attach, msg);
798 static bool validateAttachmentCompatibility(layer_data const *dev_data, const char *type1_string,
799 const RENDER_PASS_STATE *rp1_state, const char *type2_string,
800 const RENDER_PASS_STATE *rp2_state, uint32_t primary_attach, uint32_t secondary_attach,
801 const char *caller, UNIQUE_VALIDATION_ERROR_CODE error_code) {
803 const auto &primaryPassCI = rp1_state->createInfo;
804 const auto &secondaryPassCI = rp2_state->createInfo;
805 if (primaryPassCI.attachmentCount <= primary_attach) {
806 primary_attach = VK_ATTACHMENT_UNUSED;
808 if (secondaryPassCI.attachmentCount <= secondary_attach) {
809 secondary_attach = VK_ATTACHMENT_UNUSED;
811 if (primary_attach == VK_ATTACHMENT_UNUSED && secondary_attach == VK_ATTACHMENT_UNUSED) {
814 if (primary_attach == VK_ATTACHMENT_UNUSED) {
815 skip |= logInvalidAttachmentMessage(dev_data, type1_string, rp1_state, type2_string, rp2_state, primary_attach,
816 secondary_attach, "The first is unused while the second is not.", caller, error_code);
819 if (secondary_attach == VK_ATTACHMENT_UNUSED) {
820 skip |= logInvalidAttachmentMessage(dev_data, type1_string, rp1_state, type2_string, rp2_state, primary_attach,
821 secondary_attach, "The second is unused while the first is not.", caller, error_code);
824 if (primaryPassCI.pAttachments[primary_attach].format != secondaryPassCI.pAttachments[secondary_attach].format) {
825 skip |= logInvalidAttachmentMessage(dev_data, type1_string, rp1_state, type2_string, rp2_state, primary_attach,
826 secondary_attach, "They have different formats.", caller, error_code);
828 if (primaryPassCI.pAttachments[primary_attach].samples != secondaryPassCI.pAttachments[secondary_attach].samples) {
829 skip |= logInvalidAttachmentMessage(dev_data, type1_string, rp1_state, type2_string, rp2_state, primary_attach,
830 secondary_attach, "They have different samples.", caller, error_code);
832 if (primaryPassCI.pAttachments[primary_attach].flags != secondaryPassCI.pAttachments[secondary_attach].flags) {
833 skip |= logInvalidAttachmentMessage(dev_data, type1_string, rp1_state, type2_string, rp2_state, primary_attach,
834 secondary_attach, "They have different flags.", caller, error_code);
840 static bool validateSubpassCompatibility(layer_data const *dev_data, const char *type1_string, const RENDER_PASS_STATE *rp1_state,
841 const char *type2_string, const RENDER_PASS_STATE *rp2_state, const int subpass,
842 const char *caller, UNIQUE_VALIDATION_ERROR_CODE error_code) {
844 const auto &primary_desc = rp1_state->createInfo.pSubpasses[subpass];
845 const auto &secondary_desc = rp2_state->createInfo.pSubpasses[subpass];
846 uint32_t maxInputAttachmentCount = std::max(primary_desc.inputAttachmentCount, secondary_desc.inputAttachmentCount);
847 for (uint32_t i = 0; i < maxInputAttachmentCount; ++i) {
848 uint32_t primary_input_attach = VK_ATTACHMENT_UNUSED, secondary_input_attach = VK_ATTACHMENT_UNUSED;
849 if (i < primary_desc.inputAttachmentCount) {
850 primary_input_attach = primary_desc.pInputAttachments[i].attachment;
852 if (i < secondary_desc.inputAttachmentCount) {
853 secondary_input_attach = secondary_desc.pInputAttachments[i].attachment;
855 skip |= validateAttachmentCompatibility(dev_data, type1_string, rp1_state, type2_string, rp2_state, primary_input_attach,
856 secondary_input_attach, caller, error_code);
858 uint32_t maxColorAttachmentCount = std::max(primary_desc.colorAttachmentCount, secondary_desc.colorAttachmentCount);
859 for (uint32_t i = 0; i < maxColorAttachmentCount; ++i) {
860 uint32_t primary_color_attach = VK_ATTACHMENT_UNUSED, secondary_color_attach = VK_ATTACHMENT_UNUSED;
861 if (i < primary_desc.colorAttachmentCount) {
862 primary_color_attach = primary_desc.pColorAttachments[i].attachment;
864 if (i < secondary_desc.colorAttachmentCount) {
865 secondary_color_attach = secondary_desc.pColorAttachments[i].attachment;
867 skip |= validateAttachmentCompatibility(dev_data, type1_string, rp1_state, type2_string, rp2_state, primary_color_attach,
868 secondary_color_attach, caller, error_code);
869 uint32_t primary_resolve_attach = VK_ATTACHMENT_UNUSED, secondary_resolve_attach = VK_ATTACHMENT_UNUSED;
870 if (i < primary_desc.colorAttachmentCount && primary_desc.pResolveAttachments) {
871 primary_resolve_attach = primary_desc.pResolveAttachments[i].attachment;
873 if (i < secondary_desc.colorAttachmentCount && secondary_desc.pResolveAttachments) {
874 secondary_resolve_attach = secondary_desc.pResolveAttachments[i].attachment;
876 skip |= validateAttachmentCompatibility(dev_data, type1_string, rp1_state, type2_string, rp2_state, primary_resolve_attach,
877 secondary_resolve_attach, caller, error_code);
879 uint32_t primary_depthstencil_attach = VK_ATTACHMENT_UNUSED, secondary_depthstencil_attach = VK_ATTACHMENT_UNUSED;
880 if (primary_desc.pDepthStencilAttachment) {
881 primary_depthstencil_attach = primary_desc.pDepthStencilAttachment[0].attachment;
883 if (secondary_desc.pDepthStencilAttachment) {
884 secondary_depthstencil_attach = secondary_desc.pDepthStencilAttachment[0].attachment;
886 skip |= validateAttachmentCompatibility(dev_data, type1_string, rp1_state, type2_string, rp2_state, primary_depthstencil_attach,
887 secondary_depthstencil_attach, caller, error_code);
891 // Verify that given renderPass CreateInfo for primary and secondary command buffers are compatible.
892 // This function deals directly with the CreateInfo, there are overloaded versions below that can take the renderPass handle and
893 // will then feed into this function
894 static bool validateRenderPassCompatibility(layer_data const *dev_data, const char *type1_string,
895 const RENDER_PASS_STATE *rp1_state, const char *type2_string,
896 const RENDER_PASS_STATE *rp2_state, const char *caller,
897 UNIQUE_VALIDATION_ERROR_CODE error_code) {
900 if (rp1_state->createInfo.subpassCount != rp2_state->createInfo.subpassCount) {
901 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
902 HandleToUint64(rp1_state->renderPass), error_code,
903 "%s: RenderPasses incompatible between %s w/ renderPass 0x%" PRIx64
904 " with a subpassCount of %u and %s w/ renderPass 0x%" PRIx64 " with a subpassCount of %u.",
905 caller, type1_string, HandleToUint64(rp1_state->renderPass), rp1_state->createInfo.subpassCount,
906 type2_string, HandleToUint64(rp2_state->renderPass), rp2_state->createInfo.subpassCount);
908 for (uint32_t i = 0; i < rp1_state->createInfo.subpassCount; ++i) {
909 skip |= validateSubpassCompatibility(dev_data, type1_string, rp1_state, type2_string, rp2_state, i, caller, error_code);
915 // Return Set node ptr for specified set or else NULL
916 cvdescriptorset::DescriptorSet *GetSetNode(const layer_data *dev_data, VkDescriptorSet set) {
917 auto set_it = dev_data->setMap.find(set);
918 if (set_it == dev_data->setMap.end()) {
921 return set_it->second;
924 // For given pipeline, return number of MSAA samples, or one if MSAA disabled
925 static VkSampleCountFlagBits getNumSamples(PIPELINE_STATE const *pipe) {
926 if (pipe->graphicsPipelineCI.pMultisampleState != NULL &&
927 VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO == pipe->graphicsPipelineCI.pMultisampleState->sType) {
928 return pipe->graphicsPipelineCI.pMultisampleState->rasterizationSamples;
930 return VK_SAMPLE_COUNT_1_BIT;
933 static void list_bits(std::ostream &s, uint32_t bits) {
934 for (int i = 0; i < 32 && bits; i++) {
935 if (bits & (1 << i)) {
945 // Validate draw-time state related to the PSO
946 static bool ValidatePipelineDrawtimeState(layer_data const *dev_data, LAST_BOUND_STATE const &state, const GLOBAL_CB_NODE *pCB,
947 CMD_TYPE cmd_type, PIPELINE_STATE const *pPipeline, const char *caller) {
950 // Verify vertex binding
951 if (pPipeline->vertexBindingDescriptions.size() > 0) {
952 for (size_t i = 0; i < pPipeline->vertexBindingDescriptions.size(); i++) {
953 auto vertex_binding = pPipeline->vertexBindingDescriptions[i].binding;
954 if ((pCB->currentDrawData.buffers.size() < (vertex_binding + 1)) ||
955 (pCB->currentDrawData.buffers[vertex_binding] == VK_NULL_HANDLE)) {
957 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
958 HandleToUint64(pCB->commandBuffer), DRAWSTATE_VTX_INDEX_OUT_OF_BOUNDS,
959 "The Pipeline State Object (0x%" PRIx64
960 ") expects that this Command Buffer's vertex binding Index %u should be set via "
961 "vkCmdBindVertexBuffers. This is because VkVertexInputBindingDescription struct at "
962 "index " PRINTF_SIZE_T_SPECIFIER " of pVertexBindingDescriptions has a binding value of %u.",
963 HandleToUint64(state.pipeline_state->pipeline), vertex_binding, i, vertex_binding);
967 if (!pCB->currentDrawData.buffers.empty() && !pCB->vertex_buffer_used) {
968 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
969 VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(pCB->commandBuffer),
970 DRAWSTATE_VTX_INDEX_OUT_OF_BOUNDS,
971 "Vertex buffers are bound to command buffer (0x%" PRIx64
972 ") but no vertex buffers are attached to this Pipeline State Object (0x%" PRIx64 ").",
973 HandleToUint64(pCB->commandBuffer), HandleToUint64(state.pipeline_state->pipeline));
976 // If Viewport or scissors are dynamic, verify that dynamic count matches PSO count.
977 // Skip check if rasterization is disabled or there is no viewport.
978 if ((!pPipeline->graphicsPipelineCI.pRasterizationState ||
979 (pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable == VK_FALSE)) &&
980 pPipeline->graphicsPipelineCI.pViewportState) {
981 bool dynViewport = isDynamic(pPipeline, VK_DYNAMIC_STATE_VIEWPORT);
982 bool dynScissor = isDynamic(pPipeline, VK_DYNAMIC_STATE_SCISSOR);
985 auto requiredViewportsMask = (1 << pPipeline->graphicsPipelineCI.pViewportState->viewportCount) - 1;
986 auto missingViewportMask = ~pCB->viewportMask & requiredViewportsMask;
987 if (missingViewportMask) {
988 std::stringstream ss;
989 ss << "Dynamic viewport(s) ";
990 list_bits(ss, missingViewportMask);
991 ss << " are used by pipeline state object, but were not provided via calls to vkCmdSetViewport().";
992 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
993 DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "%s", ss.str().c_str());
998 auto requiredScissorMask = (1 << pPipeline->graphicsPipelineCI.pViewportState->scissorCount) - 1;
999 auto missingScissorMask = ~pCB->scissorMask & requiredScissorMask;
1000 if (missingScissorMask) {
1001 std::stringstream ss;
1002 ss << "Dynamic scissor(s) ";
1003 list_bits(ss, missingScissorMask);
1004 ss << " are used by pipeline state object, but were not provided via calls to vkCmdSetScissor().";
1005 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
1006 DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "%s", ss.str().c_str());
1011 // Verify that any MSAA request in PSO matches sample# in bound FB
1012 // Skip the check if rasterization is disabled.
1013 if (!pPipeline->graphicsPipelineCI.pRasterizationState ||
1014 (pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable == VK_FALSE)) {
1015 VkSampleCountFlagBits pso_num_samples = getNumSamples(pPipeline);
1016 if (pCB->activeRenderPass) {
1017 auto const render_pass_info = pCB->activeRenderPass->createInfo.ptr();
1018 const VkSubpassDescription *subpass_desc = &render_pass_info->pSubpasses[pCB->activeSubpass];
1020 unsigned subpass_num_samples = 0;
1022 for (i = 0; i < subpass_desc->colorAttachmentCount; i++) {
1023 auto attachment = subpass_desc->pColorAttachments[i].attachment;
1024 if (attachment != VK_ATTACHMENT_UNUSED)
1025 subpass_num_samples |= (unsigned)render_pass_info->pAttachments[attachment].samples;
1028 if (subpass_desc->pDepthStencilAttachment &&
1029 subpass_desc->pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
1030 auto attachment = subpass_desc->pDepthStencilAttachment->attachment;
1031 subpass_num_samples |= (unsigned)render_pass_info->pAttachments[attachment].samples;
1034 if (!dev_data->extensions.vk_amd_mixed_attachment_samples &&
1035 ((subpass_num_samples & static_cast<unsigned>(pso_num_samples)) != subpass_num_samples)) {
1036 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1037 HandleToUint64(pPipeline->pipeline), DRAWSTATE_NUM_SAMPLES_MISMATCH,
1038 "Num samples mismatch! At draw-time in Pipeline (0x%" PRIx64
1039 ") with %u samples while current RenderPass (0x%" PRIx64 ") w/ %u samples!",
1040 HandleToUint64(pPipeline->pipeline), pso_num_samples,
1041 HandleToUint64(pCB->activeRenderPass->renderPass), subpass_num_samples);
1044 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1045 HandleToUint64(pPipeline->pipeline), DRAWSTATE_NUM_SAMPLES_MISMATCH,
1046 "No active render pass found at draw-time in Pipeline (0x%" PRIx64 ")!",
1047 HandleToUint64(pPipeline->pipeline));
1050 // Verify that PSO creation renderPass is compatible with active renderPass
1051 if (pCB->activeRenderPass) {
1052 // TODO: Move all of the error codes common across different Draws into a LUT accessed by cmd_type
1053 // TODO: AMD extension codes are included here, but actual function entrypoints are not yet intercepted
1054 // Error codes for renderpass and subpass mismatches
1055 auto rp_error = VALIDATION_ERROR_1a200366, sp_error = VALIDATION_ERROR_1a200368;
1057 case CMD_DRAWINDEXED:
1058 rp_error = VALIDATION_ERROR_1a40038c;
1059 sp_error = VALIDATION_ERROR_1a40038e;
1061 case CMD_DRAWINDIRECT:
1062 rp_error = VALIDATION_ERROR_1aa003be;
1063 sp_error = VALIDATION_ERROR_1aa003c0;
1065 case CMD_DRAWINDIRECTCOUNTAMD:
1066 rp_error = VALIDATION_ERROR_1ac003f6;
1067 sp_error = VALIDATION_ERROR_1ac003f8;
1069 case CMD_DRAWINDEXEDINDIRECT:
1070 rp_error = VALIDATION_ERROR_1a600426;
1071 sp_error = VALIDATION_ERROR_1a600428;
1073 case CMD_DRAWINDEXEDINDIRECTCOUNTAMD:
1074 rp_error = VALIDATION_ERROR_1a800460;
1075 sp_error = VALIDATION_ERROR_1a800462;
1078 assert(CMD_DRAW == cmd_type);
1081 std::string err_string;
1082 if (pCB->activeRenderPass->renderPass != pPipeline->rp_state->renderPass) {
1083 // renderPass that PSO was created with must be compatible with active renderPass that PSO is being used with
1084 skip |= validateRenderPassCompatibility(dev_data, "active render pass", pCB->activeRenderPass, "pipeline state object",
1085 pPipeline->rp_state.get(), caller, rp_error);
1087 if (pPipeline->graphicsPipelineCI.subpass != pCB->activeSubpass) {
1089 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1090 HandleToUint64(pPipeline->pipeline), sp_error, "Pipeline was built for subpass %u but used in subpass %u.",
1091 pPipeline->graphicsPipelineCI.subpass, pCB->activeSubpass);
1098 // For given cvdescriptorset::DescriptorSet, verify that its Set is compatible w/ the setLayout corresponding to
1099 // pipelineLayout[layoutIndex]
1100 static bool verify_set_layout_compatibility(const cvdescriptorset::DescriptorSet *descriptor_set,
1101 PIPELINE_LAYOUT_NODE const *pipeline_layout, const uint32_t layoutIndex,
1103 auto num_sets = pipeline_layout->set_layouts.size();
1104 if (layoutIndex >= num_sets) {
1105 stringstream errorStr;
1106 errorStr << "VkPipelineLayout (" << pipeline_layout->layout << ") only contains " << num_sets
1107 << " setLayouts corresponding to sets 0-" << num_sets - 1 << ", but you're attempting to bind set to index "
1109 errorMsg = errorStr.str();
1112 if (descriptor_set->IsPushDescriptor()) return true;
1113 auto layout_node = pipeline_layout->set_layouts[layoutIndex];
1114 return descriptor_set->IsCompatible(layout_node.get(), &errorMsg);
1117 // Validate overall state at the time of a draw call
1118 static bool ValidateDrawState(layer_data *dev_data, GLOBAL_CB_NODE *cb_node, CMD_TYPE cmd_type, const bool indexed,
1119 const VkPipelineBindPoint bind_point, const char *function,
1120 UNIQUE_VALIDATION_ERROR_CODE const msg_code) {
1121 bool result = false;
1122 auto const &state = cb_node->lastBound[bind_point];
1123 PIPELINE_STATE *pPipe = state.pipeline_state;
1124 if (nullptr == pPipe) {
1126 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
1127 HandleToUint64(cb_node->commandBuffer), DRAWSTATE_INVALID_PIPELINE,
1128 "At Draw/Dispatch time no valid VkPipeline is bound! This is illegal. Please bind one with vkCmdBindPipeline().");
1129 // Early return as any further checks below will be busted w/o a pipeline
1130 if (result) return true;
1132 // First check flag states
1133 if (VK_PIPELINE_BIND_POINT_GRAPHICS == bind_point)
1134 result = validate_draw_state_flags(dev_data, cb_node, pPipe, indexed, msg_code);
1136 // Now complete other state checks
1138 auto const &pipeline_layout = pPipe->pipeline_layout;
1140 for (const auto &set_binding_pair : pPipe->active_slots) {
1141 uint32_t setIndex = set_binding_pair.first;
1142 // If valid set is not bound throw an error
1143 if ((state.boundDescriptorSets.size() <= setIndex) || (!state.boundDescriptorSets[setIndex])) {
1144 result |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
1145 HandleToUint64(cb_node->commandBuffer), DRAWSTATE_DESCRIPTOR_SET_NOT_BOUND,
1146 "VkPipeline 0x%" PRIx64 " uses set #%u but that set is not bound.", HandleToUint64(pPipe->pipeline),
1148 } else if (!verify_set_layout_compatibility(state.boundDescriptorSets[setIndex], &pipeline_layout, setIndex, errorString)) {
1149 // Set is bound but not compatible w/ overlapping pipeline_layout from PSO
1150 VkDescriptorSet setHandle = state.boundDescriptorSets[setIndex]->GetSet();
1151 result |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
1152 HandleToUint64(setHandle), DRAWSTATE_PIPELINE_LAYOUTS_INCOMPATIBLE,
1153 "VkDescriptorSet (0x%" PRIx64
1154 ") bound as set #%u is not compatible with overlapping VkPipelineLayout 0x%" PRIx64 " due to: %s",
1155 HandleToUint64(setHandle), setIndex, HandleToUint64(pipeline_layout.layout), errorString.c_str());
1156 } else { // Valid set is bound and layout compatible, validate that it's updated
1157 // Pull the set node
1158 cvdescriptorset::DescriptorSet *descriptor_set = state.boundDescriptorSets[setIndex];
1159 // Validate the draw-time state for this descriptor set
1160 std::string err_str;
1161 if (!descriptor_set->IsPushDescriptor()) {
1162 // For the "bindless" style resource usage with many descriptors, need to optimize command <-> descriptor
1163 // binding validation. Take the requested binding set and prefilter it to eliminate redundant validation checks.
1164 // Here, the currently bound pipeline determines whether an image validation check is redundant...
1165 // for images are the "req" portion of the binding_req is indirectly (but tightly) coupled to the pipeline.
1166 const cvdescriptorset::PrefilterBindRequestMap reduced_map(*descriptor_set, set_binding_pair.second, cb_node,
1168 const auto &binding_req_map = reduced_map.Map();
1170 if (!descriptor_set->ValidateDrawState(binding_req_map, state.dynamicOffsets[setIndex], cb_node, function,
1172 auto set = descriptor_set->GetSet();
1174 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
1175 HandleToUint64(set), DRAWSTATE_DESCRIPTOR_SET_NOT_UPDATED,
1176 "Descriptor set 0x%" PRIx64 " bound as set #%u encountered the following validation error at %s time: %s",
1177 HandleToUint64(set), setIndex, function, err_str.c_str());
1183 // Check general pipeline state that needs to be validated at drawtime
1184 if (VK_PIPELINE_BIND_POINT_GRAPHICS == bind_point)
1185 result |= ValidatePipelineDrawtimeState(dev_data, state, cb_node, cmd_type, pPipe, function);
1190 static void UpdateDrawState(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, const VkPipelineBindPoint bind_point) {
1191 auto const &state = cb_state->lastBound[bind_point];
1192 PIPELINE_STATE *pPipe = state.pipeline_state;
1193 if (VK_NULL_HANDLE != state.pipeline_layout) {
1194 for (const auto &set_binding_pair : pPipe->active_slots) {
1195 uint32_t setIndex = set_binding_pair.first;
1196 // Pull the set node
1197 cvdescriptorset::DescriptorSet *descriptor_set = state.boundDescriptorSets[setIndex];
1198 if (!descriptor_set->IsPushDescriptor()) {
1199 // For the "bindless" style resource usage with many descriptors, need to optimize command <-> descriptor binding
1200 const cvdescriptorset::PrefilterBindRequestMap reduced_map(*descriptor_set, set_binding_pair.second, cb_state);
1201 const auto &binding_req_map = reduced_map.Map();
1203 // Bind this set and its active descriptor resources to the command buffer
1204 descriptor_set->BindCommandBuffer(cb_state, binding_req_map);
1205 // For given active slots record updated images & buffers
1206 descriptor_set->GetStorageUpdates(binding_req_map, &cb_state->updateBuffers, &cb_state->updateImages);
1210 if (pPipe->vertexBindingDescriptions.size() > 0) {
1211 cb_state->vertex_buffer_used = true;
1215 static bool ValidatePipelineLocked(layer_data *dev_data, std::vector<std::unique_ptr<PIPELINE_STATE>> const &pPipelines,
1216 int pipelineIndex) {
1219 PIPELINE_STATE *pPipeline = pPipelines[pipelineIndex].get();
1221 // If create derivative bit is set, check that we've specified a base
1222 // pipeline correctly, and that the base pipeline was created to allow
1224 if (pPipeline->graphicsPipelineCI.flags & VK_PIPELINE_CREATE_DERIVATIVE_BIT) {
1225 PIPELINE_STATE *pBasePipeline = nullptr;
1226 if (!((pPipeline->graphicsPipelineCI.basePipelineHandle != VK_NULL_HANDLE) ^
1227 (pPipeline->graphicsPipelineCI.basePipelineIndex != -1))) {
1228 // This check is a superset of VALIDATION_ERROR_096005a8 and VALIDATION_ERROR_096005aa
1229 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1230 HandleToUint64(pPipeline->pipeline), DRAWSTATE_INVALID_PIPELINE_CREATE_STATE,
1231 "Invalid Pipeline CreateInfo: exactly one of base pipeline index and handle must be specified");
1232 } else if (pPipeline->graphicsPipelineCI.basePipelineIndex != -1) {
1233 if (pPipeline->graphicsPipelineCI.basePipelineIndex >= pipelineIndex) {
1234 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1235 HandleToUint64(pPipeline->pipeline), VALIDATION_ERROR_208005a0,
1236 "Invalid Pipeline CreateInfo: base pipeline must occur earlier in array than derivative pipeline.");
1238 pBasePipeline = pPipelines[pPipeline->graphicsPipelineCI.basePipelineIndex].get();
1240 } else if (pPipeline->graphicsPipelineCI.basePipelineHandle != VK_NULL_HANDLE) {
1241 pBasePipeline = getPipelineState(dev_data, pPipeline->graphicsPipelineCI.basePipelineHandle);
1244 if (pBasePipeline && !(pBasePipeline->graphicsPipelineCI.flags & VK_PIPELINE_CREATE_ALLOW_DERIVATIVES_BIT)) {
1245 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1246 HandleToUint64(pPipeline->pipeline), DRAWSTATE_INVALID_PIPELINE_CREATE_STATE,
1247 "Invalid Pipeline CreateInfo: base pipeline does not allow derivatives.");
1254 // UNLOCKED pipeline validation. DO NOT lookup objects in the layer_data->* maps in this function.
1255 static bool ValidatePipelineUnlocked(layer_data *dev_data, std::vector<std::unique_ptr<PIPELINE_STATE>> const &pPipelines,
1256 int pipelineIndex) {
1259 PIPELINE_STATE *pPipeline = pPipelines[pipelineIndex].get();
1261 // Ensure the subpass index is valid. If not, then validate_and_capture_pipeline_shader_state
1262 // produces nonsense errors that confuse users. Other layers should already
1263 // emit errors for renderpass being invalid.
1264 auto subpass_desc = &pPipeline->rp_state->createInfo.pSubpasses[pPipeline->graphicsPipelineCI.subpass];
1265 if (pPipeline->graphicsPipelineCI.subpass >= pPipeline->rp_state->createInfo.subpassCount) {
1266 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1267 HandleToUint64(pPipeline->pipeline), VALIDATION_ERROR_096005ee,
1268 "Invalid Pipeline CreateInfo State: Subpass index %u is out of range for this renderpass (0..%u).",
1269 pPipeline->graphicsPipelineCI.subpass, pPipeline->rp_state->createInfo.subpassCount - 1);
1270 subpass_desc = nullptr;
1273 if (pPipeline->graphicsPipelineCI.pColorBlendState != NULL) {
1274 const safe_VkPipelineColorBlendStateCreateInfo *color_blend_state = pPipeline->graphicsPipelineCI.pColorBlendState;
1275 if (color_blend_state->attachmentCount != subpass_desc->colorAttachmentCount) {
1277 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1278 HandleToUint64(pPipeline->pipeline), VALIDATION_ERROR_096005d4,
1279 "vkCreateGraphicsPipelines(): Render pass (0x%" PRIx64
1280 ") subpass %u has colorAttachmentCount of %u which doesn't match the pColorBlendState->attachmentCount of %u.",
1281 HandleToUint64(pPipeline->rp_state->renderPass), pPipeline->graphicsPipelineCI.subpass,
1282 subpass_desc->colorAttachmentCount, color_blend_state->attachmentCount);
1284 if (!dev_data->enabled_features.independentBlend) {
1285 if (pPipeline->attachments.size() > 1) {
1286 VkPipelineColorBlendAttachmentState *pAttachments = &pPipeline->attachments[0];
1287 for (size_t i = 1; i < pPipeline->attachments.size(); i++) {
1288 // Quoting the spec: "If [the independent blend] feature is not enabled, the VkPipelineColorBlendAttachmentState
1289 // settings for all color attachments must be identical." VkPipelineColorBlendAttachmentState contains
1290 // only attachment state, so memcmp is best suited for the comparison
1291 if (memcmp(static_cast<const void *>(pAttachments), static_cast<const void *>(&pAttachments[i]),
1292 sizeof(pAttachments[0]))) {
1294 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1295 HandleToUint64(pPipeline->pipeline), VALIDATION_ERROR_0f4004ba,
1296 "Invalid Pipeline CreateInfo: If independent blend feature not enabled, all elements of "
1297 "pAttachments must be identical.");
1303 if (!dev_data->enabled_features.logicOp && (pPipeline->graphicsPipelineCI.pColorBlendState->logicOpEnable != VK_FALSE)) {
1305 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1306 HandleToUint64(pPipeline->pipeline), VALIDATION_ERROR_0f4004bc,
1307 "Invalid Pipeline CreateInfo: If logic operations feature not enabled, logicOpEnable must be VK_FALSE.");
1309 for (size_t i = 0; i < pPipeline->attachments.size(); i++) {
1310 if ((pPipeline->attachments[i].srcColorBlendFactor == VK_BLEND_FACTOR_SRC1_COLOR) ||
1311 (pPipeline->attachments[i].srcColorBlendFactor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR) ||
1312 (pPipeline->attachments[i].srcColorBlendFactor == VK_BLEND_FACTOR_SRC1_ALPHA) ||
1313 (pPipeline->attachments[i].srcColorBlendFactor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA)) {
1314 if (!dev_data->enabled_features.dualSrcBlend) {
1316 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1317 HandleToUint64(pPipeline->pipeline), VALIDATION_ERROR_0f2004c0,
1318 "vkCreateGraphicsPipelines(): pPipelines[%d].pColorBlendState.pAttachments[" PRINTF_SIZE_T_SPECIFIER
1319 "].srcColorBlendFactor uses a dual-source blend factor (%d), but this device feature is not "
1321 pipelineIndex, i, pPipeline->attachments[i].srcColorBlendFactor);
1324 if ((pPipeline->attachments[i].dstColorBlendFactor == VK_BLEND_FACTOR_SRC1_COLOR) ||
1325 (pPipeline->attachments[i].dstColorBlendFactor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR) ||
1326 (pPipeline->attachments[i].dstColorBlendFactor == VK_BLEND_FACTOR_SRC1_ALPHA) ||
1327 (pPipeline->attachments[i].dstColorBlendFactor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA)) {
1328 if (!dev_data->enabled_features.dualSrcBlend) {
1330 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1331 HandleToUint64(pPipeline->pipeline), VALIDATION_ERROR_0f2004c2,
1332 "vkCreateGraphicsPipelines(): pPipelines[%d].pColorBlendState.pAttachments[" PRINTF_SIZE_T_SPECIFIER
1333 "].dstColorBlendFactor uses a dual-source blend factor (%d), but this device feature is not "
1335 pipelineIndex, i, pPipeline->attachments[i].dstColorBlendFactor);
1338 if ((pPipeline->attachments[i].srcAlphaBlendFactor == VK_BLEND_FACTOR_SRC1_COLOR) ||
1339 (pPipeline->attachments[i].srcAlphaBlendFactor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR) ||
1340 (pPipeline->attachments[i].srcAlphaBlendFactor == VK_BLEND_FACTOR_SRC1_ALPHA) ||
1341 (pPipeline->attachments[i].srcAlphaBlendFactor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA)) {
1342 if (!dev_data->enabled_features.dualSrcBlend) {
1344 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1345 HandleToUint64(pPipeline->pipeline), VALIDATION_ERROR_0f2004c4,
1346 "vkCreateGraphicsPipelines(): pPipelines[%d].pColorBlendState.pAttachments[" PRINTF_SIZE_T_SPECIFIER
1347 "].srcAlphaBlendFactor uses a dual-source blend factor (%d), but this device feature is not "
1349 pipelineIndex, i, pPipeline->attachments[i].srcAlphaBlendFactor);
1352 if ((pPipeline->attachments[i].dstAlphaBlendFactor == VK_BLEND_FACTOR_SRC1_COLOR) ||
1353 (pPipeline->attachments[i].dstAlphaBlendFactor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR) ||
1354 (pPipeline->attachments[i].dstAlphaBlendFactor == VK_BLEND_FACTOR_SRC1_ALPHA) ||
1355 (pPipeline->attachments[i].dstAlphaBlendFactor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA)) {
1356 if (!dev_data->enabled_features.dualSrcBlend) {
1358 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1359 HandleToUint64(pPipeline->pipeline), VALIDATION_ERROR_0f2004c6,
1360 "vkCreateGraphicsPipelines(): pPipelines[%d].pColorBlendState.pAttachments[" PRINTF_SIZE_T_SPECIFIER
1361 "].dstAlphaBlendFactor uses a dual-source blend factor (%d), but this device feature is not "
1363 pipelineIndex, i, pPipeline->attachments[i].dstAlphaBlendFactor);
1369 if (validate_and_capture_pipeline_shader_state(dev_data, pPipeline)) {
1372 // Each shader's stage must be unique
1373 if (pPipeline->duplicate_shaders) {
1374 for (uint32_t stage = VK_SHADER_STAGE_VERTEX_BIT; stage & VK_SHADER_STAGE_ALL_GRAPHICS; stage <<= 1) {
1375 if (pPipeline->duplicate_shaders & stage) {
1376 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1377 HandleToUint64(pPipeline->pipeline), DRAWSTATE_INVALID_PIPELINE_CREATE_STATE,
1378 "Invalid Pipeline CreateInfo State: Multiple shaders provided for stage %s",
1379 string_VkShaderStageFlagBits(VkShaderStageFlagBits(stage)));
1384 if (!(pPipeline->active_shaders & VK_SHADER_STAGE_VERTEX_BIT)) {
1385 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1386 HandleToUint64(pPipeline->pipeline), VALIDATION_ERROR_096005ae,
1387 "Invalid Pipeline CreateInfo State: Vertex Shader required.");
1389 // Either both or neither TC/TE shaders should be defined
1390 bool has_control = (pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT) != 0;
1391 bool has_eval = (pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT) != 0;
1392 if (has_control && !has_eval) {
1393 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1394 HandleToUint64(pPipeline->pipeline), VALIDATION_ERROR_096005b2,
1395 "Invalid Pipeline CreateInfo State: TE and TC shaders must be included or excluded as a pair.");
1397 if (!has_control && has_eval) {
1398 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1399 HandleToUint64(pPipeline->pipeline), VALIDATION_ERROR_096005b4,
1400 "Invalid Pipeline CreateInfo State: TE and TC shaders must be included or excluded as a pair.");
1402 // Compute shaders should be specified independent of Gfx shaders
1403 if (pPipeline->active_shaders & VK_SHADER_STAGE_COMPUTE_BIT) {
1404 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1405 HandleToUint64(pPipeline->pipeline), VALIDATION_ERROR_096005b0,
1406 "Invalid Pipeline CreateInfo State: Do not specify Compute Shader for Gfx Pipeline.");
1408 // VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive topology is only valid for tessellation pipelines.
1409 // Mismatching primitive topology and tessellation fails graphics pipeline creation.
1410 if (has_control && has_eval &&
1411 (!pPipeline->graphicsPipelineCI.pInputAssemblyState ||
1412 pPipeline->graphicsPipelineCI.pInputAssemblyState->topology != VK_PRIMITIVE_TOPOLOGY_PATCH_LIST)) {
1413 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1414 HandleToUint64(pPipeline->pipeline), VALIDATION_ERROR_096005c0,
1415 "Invalid Pipeline CreateInfo State: VK_PRIMITIVE_TOPOLOGY_PATCH_LIST must be set as IA topology for "
1416 "tessellation pipelines.");
1418 if (pPipeline->graphicsPipelineCI.pInputAssemblyState &&
1419 pPipeline->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_PATCH_LIST) {
1420 if (!has_control || !has_eval) {
1421 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1422 HandleToUint64(pPipeline->pipeline), VALIDATION_ERROR_096005c2,
1423 "Invalid Pipeline CreateInfo State: VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive topology is only valid "
1424 "for tessellation pipelines.");
1428 // If a rasterization state is provided...
1429 if (pPipeline->graphicsPipelineCI.pRasterizationState) {
1430 if ((pPipeline->graphicsPipelineCI.pRasterizationState->depthClampEnable == VK_TRUE) &&
1431 (!dev_data->enabled_features.depthClamp)) {
1432 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1433 HandleToUint64(pPipeline->pipeline), VALIDATION_ERROR_1020061c,
1434 "vkCreateGraphicsPipelines(): the depthClamp device feature is disabled: the depthClampEnable member "
1435 "of the VkPipelineRasterizationStateCreateInfo structure must be set to VK_FALSE.");
1438 if (!isDynamic(pPipeline, VK_DYNAMIC_STATE_DEPTH_BIAS) &&
1439 (pPipeline->graphicsPipelineCI.pRasterizationState->depthBiasClamp != 0.0) &&
1440 (!dev_data->enabled_features.depthBiasClamp)) {
1441 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1442 HandleToUint64(pPipeline->pipeline), DRAWSTATE_INVALID_FEATURE,
1443 "vkCreateGraphicsPipelines(): the depthBiasClamp device feature is disabled: the depthBiasClamp member "
1444 "of the VkPipelineRasterizationStateCreateInfo structure must be set to 0.0 unless the "
1445 "VK_DYNAMIC_STATE_DEPTH_BIAS dynamic state is enabled");
1448 // If rasterization is enabled...
1449 if (pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable == VK_FALSE) {
1450 if ((pPipeline->graphicsPipelineCI.pMultisampleState->alphaToOneEnable == VK_TRUE) &&
1451 (!dev_data->enabled_features.alphaToOne)) {
1452 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1453 HandleToUint64(pPipeline->pipeline), VALIDATION_ERROR_10000622,
1454 "vkCreateGraphicsPipelines(): the alphaToOne device feature is disabled: the alphaToOneEnable "
1455 "member of the VkPipelineMultisampleStateCreateInfo structure must be set to VK_FALSE.");
1458 // If subpass uses a depth/stencil attachment, pDepthStencilState must be a pointer to a valid structure
1459 if (subpass_desc && subpass_desc->pDepthStencilAttachment &&
1460 subpass_desc->pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
1461 if (!pPipeline->graphicsPipelineCI.pDepthStencilState) {
1462 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1463 HandleToUint64(pPipeline->pipeline), VALIDATION_ERROR_096005e0,
1464 "Invalid Pipeline CreateInfo State: pDepthStencilState is NULL when rasterization is enabled "
1465 "and subpass uses a depth/stencil attachment.");
1467 } else if ((pPipeline->graphicsPipelineCI.pDepthStencilState->depthBoundsTestEnable == VK_TRUE) &&
1468 (!dev_data->enabled_features.depthBounds)) {
1469 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1470 HandleToUint64(pPipeline->pipeline), VALIDATION_ERROR_0f6004ac,
1471 "vkCreateGraphicsPipelines(): the depthBounds device feature is disabled: the "
1472 "depthBoundsTestEnable member of the VkPipelineDepthStencilStateCreateInfo structure must be "
1473 "set to VK_FALSE.");
1477 // If subpass uses color attachments, pColorBlendState must be valid pointer
1479 uint32_t color_attachment_count = 0;
1480 for (uint32_t i = 0; i < subpass_desc->colorAttachmentCount; ++i) {
1481 if (subpass_desc->pColorAttachments[i].attachment != VK_ATTACHMENT_UNUSED) {
1482 ++color_attachment_count;
1485 if (color_attachment_count > 0 && pPipeline->graphicsPipelineCI.pColorBlendState == nullptr) {
1486 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1487 HandleToUint64(pPipeline->pipeline), VALIDATION_ERROR_096005e2,
1488 "Invalid Pipeline CreateInfo State: pColorBlendState is NULL when rasterization is enabled and "
1489 "subpass uses color attachments.");
1495 auto vi = pPipeline->graphicsPipelineCI.pVertexInputState;
1497 for (uint32_t j = 0; j < vi->vertexAttributeDescriptionCount; j++) {
1498 VkFormat format = vi->pVertexAttributeDescriptions[j].format;
1499 // Internal call to get format info. Still goes through layers, could potentially go directly to ICD.
1500 VkFormatProperties properties;
1501 dev_data->instance_data->dispatch_table.GetPhysicalDeviceFormatProperties(dev_data->physical_device, format,
1503 if ((properties.bufferFeatures & VK_FORMAT_FEATURE_VERTEX_BUFFER_BIT) == 0) {
1505 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
1506 VALIDATION_ERROR_14a004de,
1507 "vkCreateGraphicsPipelines: pCreateInfo[%d].pVertexInputState->vertexAttributeDescriptions[%d].format "
1508 "(%s) is not a supported vertex buffer format.",
1509 pipelineIndex, j, string_VkFormat(format));
1514 if (dev_data->extensions.vk_amd_mixed_attachment_samples) {
1515 VkSampleCountFlagBits max_sample_count = static_cast<VkSampleCountFlagBits>(0);
1516 for (uint32_t i = 0; i < subpass_desc->colorAttachmentCount; ++i) {
1517 if (subpass_desc->pColorAttachments[i].attachment != VK_ATTACHMENT_UNUSED) {
1519 std::max(max_sample_count,
1520 pPipeline->rp_state->createInfo.pAttachments[subpass_desc->pColorAttachments[i].attachment].samples);
1523 if (subpass_desc->pDepthStencilAttachment && subpass_desc->pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
1525 std::max(max_sample_count,
1526 pPipeline->rp_state->createInfo.pAttachments[subpass_desc->pDepthStencilAttachment->attachment].samples);
1528 if (pPipeline->graphicsPipelineCI.pMultisampleState->rasterizationSamples != max_sample_count) {
1529 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1530 HandleToUint64(pPipeline->pipeline), VALIDATION_ERROR_09600bc2,
1531 "vkCreateGraphicsPipelines: pCreateInfo[%d].pMultisampleState->rasterizationSamples (%s) != max "
1532 "attachment samples (%s) used in subpass %u.",
1534 string_VkSampleCountFlagBits(pPipeline->graphicsPipelineCI.pMultisampleState->rasterizationSamples),
1535 string_VkSampleCountFlagBits(max_sample_count), pPipeline->graphicsPipelineCI.subpass);
1542 // Block of code at start here specifically for managing/tracking DSs
1544 // Return Pool node ptr for specified pool or else NULL
1545 DESCRIPTOR_POOL_STATE *GetDescriptorPoolState(const layer_data *dev_data, const VkDescriptorPool pool) {
1546 auto pool_it = dev_data->descriptorPoolMap.find(pool);
1547 if (pool_it == dev_data->descriptorPoolMap.end()) {
1550 return pool_it->second;
1553 // Validate that given set is valid and that it's not being used by an in-flight CmdBuffer
1554 // func_str is the name of the calling function
1555 // Return false if no errors occur
1556 // Return true if validation error occurs and callback returns true (to skip upcoming API call down the chain)
1557 static bool validateIdleDescriptorSet(const layer_data *dev_data, VkDescriptorSet set, std::string func_str) {
1558 if (dev_data->instance_data->disabled.idle_descriptor_set) return false;
1560 auto set_node = dev_data->setMap.find(set);
1561 if (set_node == dev_data->setMap.end()) {
1562 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
1563 HandleToUint64(set), DRAWSTATE_DOUBLE_DESTROY,
1564 "Cannot call %s() on descriptor set 0x%" PRIx64 " that has not been allocated.", func_str.c_str(),
1565 HandleToUint64(set));
1567 // TODO : This covers various error cases so should pass error enum into this function and use passed in enum here
1568 if (set_node->second->in_use.load()) {
1569 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
1570 HandleToUint64(set), VALIDATION_ERROR_2860026a,
1571 "Cannot call %s() on descriptor set 0x%" PRIx64 " that is in use by a command buffer.",
1572 func_str.c_str(), HandleToUint64(set));
1578 // Validate that given pool does not store any descriptor sets used by an in-flight CmdBuffer
1579 // pool stores the descriptor sets to be validated
1580 // Return false if no errors occur
1581 // Return true if validation error occurs and callback returns true (to skip upcoming API call down the chain)
1582 static bool validateIdleDescriptorSetForPoolReset(const layer_data *dev_data, const VkDescriptorPool pool) {
1583 if (dev_data->instance_data->disabled.idle_descriptor_set) return false;
1585 DESCRIPTOR_POOL_STATE *pPool = GetDescriptorPoolState(dev_data, pool);
1586 if (pPool != nullptr) {
1587 for (auto ds : pPool->sets) {
1588 if (ds && ds->in_use.load()) {
1589 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
1590 VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT, HandleToUint64(pool), VALIDATION_ERROR_32a00272,
1591 "It is invalid to call vkResetDescriptorPool() with descriptor sets in use by a command buffer. %s",
1592 validation_error_map[VALIDATION_ERROR_32a00272]);
1600 // Remove set from setMap and delete the set
1601 static void freeDescriptorSet(layer_data *dev_data, cvdescriptorset::DescriptorSet *descriptor_set) {
1602 dev_data->setMap.erase(descriptor_set->GetSet());
1603 delete descriptor_set;
1605 // Free all DS Pools including their Sets & related sub-structs
1606 // NOTE : Calls to this function should be wrapped in mutex
1607 static void deletePools(layer_data *dev_data) {
1608 for (auto ii = dev_data->descriptorPoolMap.begin(); ii != dev_data->descriptorPoolMap.end();) {
1609 // Remove this pools' sets from setMap and delete them
1610 for (auto ds : ii->second->sets) {
1611 freeDescriptorSet(dev_data, ds);
1613 ii->second->sets.clear();
1615 ii = dev_data->descriptorPoolMap.erase(ii);
1619 static void clearDescriptorPool(layer_data *dev_data, const VkDevice device, const VkDescriptorPool pool,
1620 VkDescriptorPoolResetFlags flags) {
1621 DESCRIPTOR_POOL_STATE *pPool = GetDescriptorPoolState(dev_data, pool);
1622 // TODO: validate flags
1623 // For every set off of this pool, clear it, remove from setMap, and free cvdescriptorset::DescriptorSet
1624 for (auto ds : pPool->sets) {
1625 freeDescriptorSet(dev_data, ds);
1627 pPool->sets.clear();
1628 // Reset available count for each type and available sets for this pool
1629 for (uint32_t i = 0; i < pPool->availableDescriptorTypeCount.size(); ++i) {
1630 pPool->availableDescriptorTypeCount[i] = pPool->maxDescriptorTypeCount[i];
1632 pPool->availableSets = pPool->maxSets;
1635 // For given CB object, fetch associated CB Node from map
1636 GLOBAL_CB_NODE *GetCBNode(layer_data const *dev_data, const VkCommandBuffer cb) {
1637 auto it = dev_data->commandBufferMap.find(cb);
1638 if (it == dev_data->commandBufferMap.end()) {
1644 // If a renderpass is active, verify that the given command type is appropriate for current subpass state
1645 bool ValidateCmdSubpassState(const layer_data *dev_data, const GLOBAL_CB_NODE *pCB, const CMD_TYPE cmd_type) {
1646 if (!pCB->activeRenderPass) return false;
1648 if (pCB->activeSubpassContents == VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS &&
1649 (cmd_type != CMD_EXECUTECOMMANDS && cmd_type != CMD_NEXTSUBPASS && cmd_type != CMD_ENDRENDERPASS)) {
1650 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
1651 HandleToUint64(pCB->commandBuffer), DRAWSTATE_INVALID_COMMAND_BUFFER,
1652 "Commands cannot be called in a subpass using secondary command buffers.");
1653 } else if (pCB->activeSubpassContents == VK_SUBPASS_CONTENTS_INLINE && cmd_type == CMD_EXECUTECOMMANDS) {
1654 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
1655 HandleToUint64(pCB->commandBuffer), DRAWSTATE_INVALID_COMMAND_BUFFER,
1656 "vkCmdExecuteCommands() cannot be called in a subpass using inline commands.");
1661 bool ValidateCmdQueueFlags(layer_data *dev_data, const GLOBAL_CB_NODE *cb_node, const char *caller_name,
1662 VkQueueFlags required_flags, UNIQUE_VALIDATION_ERROR_CODE error_code) {
1663 auto pool = GetCommandPoolNode(dev_data, cb_node->createInfo.commandPool);
1665 VkQueueFlags queue_flags = dev_data->phys_dev_properties.queue_family_properties[pool->queueFamilyIndex].queueFlags;
1666 if (!(required_flags & queue_flags)) {
1667 string required_flags_string;
1668 for (auto flag : {VK_QUEUE_TRANSFER_BIT, VK_QUEUE_GRAPHICS_BIT, VK_QUEUE_COMPUTE_BIT}) {
1669 if (flag & required_flags) {
1670 if (required_flags_string.size()) {
1671 required_flags_string += " or ";
1673 required_flags_string += string_VkQueueFlagBits(flag);
1676 return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
1677 HandleToUint64(cb_node->commandBuffer), error_code,
1678 "Cannot call %s on a command buffer allocated from a pool without %s capabilities..", caller_name,
1679 required_flags_string.c_str());
1685 static char const *GetCauseStr(VK_OBJECT obj) {
1686 if (obj.type == kVulkanObjectTypeDescriptorSet) return "destroyed or updated";
1687 if (obj.type == kVulkanObjectTypeCommandBuffer) return "destroyed or rerecorded";
1691 static bool ReportInvalidCommandBuffer(layer_data *dev_data, const GLOBAL_CB_NODE *cb_state, const char *call_source) {
1693 for (auto obj : cb_state->broken_bindings) {
1694 const char *type_str = object_string[obj.type];
1695 const char *cause_str = GetCauseStr(obj);
1696 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
1697 HandleToUint64(cb_state->commandBuffer), DRAWSTATE_INVALID_COMMAND_BUFFER,
1698 "You are adding %s to command buffer 0x%" PRIx64 " that is invalid because bound %s 0x%" PRIx64 " was %s.",
1699 call_source, HandleToUint64(cb_state->commandBuffer), type_str, obj.handle, cause_str);
1704 // 'commandBuffer must be in the recording state' valid usage error code for each command
1705 // Note: grepping for ^^^^^^^^^ in vk_validation_database is easily massaged into the following list
1706 // Note: C++11 doesn't automatically devolve enum types to the underlying type for hash traits purposes (fixed in C++14)
1707 using CmdTypeHashType = std::underlying_type<CMD_TYPE>::type;
1708 static const std::unordered_map<CmdTypeHashType, UNIQUE_VALIDATION_ERROR_CODE> must_be_recording_map = {
1709 {CMD_NONE, VALIDATION_ERROR_UNDEFINED}, // UNMATCHED
1710 {CMD_BEGINQUERY, VALIDATION_ERROR_17802413},
1711 {CMD_BEGINRENDERPASS, VALIDATION_ERROR_17a02413},
1712 {CMD_BINDDESCRIPTORSETS, VALIDATION_ERROR_17c02413},
1713 {CMD_BINDINDEXBUFFER, VALIDATION_ERROR_17e02413},
1714 {CMD_BINDPIPELINE, VALIDATION_ERROR_18002413},
1715 {CMD_BINDVERTEXBUFFERS, VALIDATION_ERROR_18202413},
1716 {CMD_BLITIMAGE, VALIDATION_ERROR_18402413},
1717 {CMD_CLEARATTACHMENTS, VALIDATION_ERROR_18602413},
1718 {CMD_CLEARCOLORIMAGE, VALIDATION_ERROR_18802413},
1719 {CMD_CLEARDEPTHSTENCILIMAGE, VALIDATION_ERROR_18a02413},
1720 {CMD_COPYBUFFER, VALIDATION_ERROR_18c02413},
1721 {CMD_COPYBUFFERTOIMAGE, VALIDATION_ERROR_18e02413},
1722 {CMD_COPYIMAGE, VALIDATION_ERROR_19002413},
1723 {CMD_COPYIMAGETOBUFFER, VALIDATION_ERROR_19202413},
1724 {CMD_COPYQUERYPOOLRESULTS, VALIDATION_ERROR_19402413},
1725 {CMD_DEBUGMARKERBEGINEXT, VALIDATION_ERROR_19602413},
1726 {CMD_DEBUGMARKERENDEXT, VALIDATION_ERROR_19802413},
1727 {CMD_DEBUGMARKERINSERTEXT, VALIDATION_ERROR_19a02413},
1728 {CMD_DISPATCH, VALIDATION_ERROR_19c02413},
1729 // Exclude KHX (if not already present) { CMD_DISPATCHBASEKHX, VALIDATION_ERROR_19e02413 },
1730 {CMD_DISPATCHINDIRECT, VALIDATION_ERROR_1a002413},
1731 {CMD_DRAW, VALIDATION_ERROR_1a202413},
1732 {CMD_DRAWINDEXED, VALIDATION_ERROR_1a402413},
1733 {CMD_DRAWINDEXEDINDIRECT, VALIDATION_ERROR_1a602413},
1734 // Exclude vendor ext (if not already present) { CMD_DRAWINDEXEDINDIRECTCOUNTAMD, VALIDATION_ERROR_1a802413 },
1735 {CMD_DRAWINDIRECT, VALIDATION_ERROR_1aa02413},
1736 // Exclude vendor ext (if not already present) { CMD_DRAWINDIRECTCOUNTAMD, VALIDATION_ERROR_1ac02413 },
1737 {CMD_ENDCOMMANDBUFFER, VALIDATION_ERROR_27400076},
1738 {CMD_ENDQUERY, VALIDATION_ERROR_1ae02413},
1739 {CMD_ENDRENDERPASS, VALIDATION_ERROR_1b002413},
1740 {CMD_EXECUTECOMMANDS, VALIDATION_ERROR_1b202413},
1741 {CMD_FILLBUFFER, VALIDATION_ERROR_1b402413},
1742 {CMD_NEXTSUBPASS, VALIDATION_ERROR_1b602413},
1743 {CMD_PIPELINEBARRIER, VALIDATION_ERROR_1b802413},
1744 // Exclude vendor ext (if not already present) { CMD_PROCESSCOMMANDSNVX, VALIDATION_ERROR_1ba02413 },
1745 {CMD_PUSHCONSTANTS, VALIDATION_ERROR_1bc02413},
1746 {CMD_PUSHDESCRIPTORSETKHR, VALIDATION_ERROR_1be02413},
1747 {CMD_PUSHDESCRIPTORSETWITHTEMPLATEKHR, VALIDATION_ERROR_1c002413},
1748 // Exclude vendor ext (if not already present) { CMD_RESERVESPACEFORCOMMANDSNVX, VALIDATION_ERROR_1c202413 },
1749 {CMD_RESETEVENT, VALIDATION_ERROR_1c402413},
1750 {CMD_RESETQUERYPOOL, VALIDATION_ERROR_1c602413},
1751 {CMD_RESOLVEIMAGE, VALIDATION_ERROR_1c802413},
1752 {CMD_SETBLENDCONSTANTS, VALIDATION_ERROR_1ca02413},
1753 {CMD_SETDEPTHBIAS, VALIDATION_ERROR_1cc02413},
1754 {CMD_SETDEPTHBOUNDS, VALIDATION_ERROR_1ce02413},
1755 // Exclude KHX (if not already present) { CMD_SETDEVICEMASKKHX, VALIDATION_ERROR_1d002413 },
1756 {CMD_SETDISCARDRECTANGLEEXT, VALIDATION_ERROR_1d202413},
1757 {CMD_SETEVENT, VALIDATION_ERROR_1d402413},
1758 {CMD_SETLINEWIDTH, VALIDATION_ERROR_1d602413},
1759 {CMD_SETSAMPLELOCATIONSEXT, VALIDATION_ERROR_3e202413},
1760 {CMD_SETSCISSOR, VALIDATION_ERROR_1d802413},
1761 {CMD_SETSTENCILCOMPAREMASK, VALIDATION_ERROR_1da02413},
1762 {CMD_SETSTENCILREFERENCE, VALIDATION_ERROR_1dc02413},
1763 {CMD_SETSTENCILWRITEMASK, VALIDATION_ERROR_1de02413},
1764 {CMD_SETVIEWPORT, VALIDATION_ERROR_1e002413},
1765 // Exclude vendor ext (if not already present) { CMD_SETVIEWPORTWSCALINGNV, VALIDATION_ERROR_1e202413 },
1766 {CMD_UPDATEBUFFER, VALIDATION_ERROR_1e402413},
1767 {CMD_WAITEVENTS, VALIDATION_ERROR_1e602413},
1768 {CMD_WRITETIMESTAMP, VALIDATION_ERROR_1e802413},
1771 // Validate the given command being added to the specified cmd buffer, flagging errors if CB is not in the recording state or if
1772 // there's an issue with the Cmd ordering
1773 bool ValidateCmd(layer_data *dev_data, const GLOBAL_CB_NODE *cb_state, const CMD_TYPE cmd, const char *caller_name) {
1774 switch (cb_state->state) {
1776 return ValidateCmdSubpassState(dev_data, cb_state, cmd);
1778 case CB_INVALID_COMPLETE:
1779 case CB_INVALID_INCOMPLETE:
1780 return ReportInvalidCommandBuffer(dev_data, cb_state, caller_name);
1783 auto error_it = must_be_recording_map.find(cmd);
1784 // This assert lets us know that a vkCmd.* entrypoint has been added without enabling it in the map
1785 assert(error_it != must_be_recording_map.cend());
1786 if (error_it == must_be_recording_map.cend()) {
1787 error_it = must_be_recording_map.find(CMD_NONE); // But we'll handle the asserting case, in case of a test gap
1789 const auto error = error_it->second;
1790 return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
1791 HandleToUint64(cb_state->commandBuffer), error,
1792 "You must call vkBeginCommandBuffer() before this call to %s.", caller_name);
1796 // For given object struct return a ptr of BASE_NODE type for its wrapping struct
1797 BASE_NODE *GetStateStructPtrFromObject(layer_data *dev_data, VK_OBJECT object_struct) {
1798 BASE_NODE *base_ptr = nullptr;
1799 switch (object_struct.type) {
1800 case kVulkanObjectTypeDescriptorSet: {
1801 base_ptr = GetSetNode(dev_data, reinterpret_cast<VkDescriptorSet &>(object_struct.handle));
1804 case kVulkanObjectTypeSampler: {
1805 base_ptr = GetSamplerState(dev_data, reinterpret_cast<VkSampler &>(object_struct.handle));
1808 case kVulkanObjectTypeQueryPool: {
1809 base_ptr = GetQueryPoolNode(dev_data, reinterpret_cast<VkQueryPool &>(object_struct.handle));
1812 case kVulkanObjectTypePipeline: {
1813 base_ptr = getPipelineState(dev_data, reinterpret_cast<VkPipeline &>(object_struct.handle));
1816 case kVulkanObjectTypeBuffer: {
1817 base_ptr = GetBufferState(dev_data, reinterpret_cast<VkBuffer &>(object_struct.handle));
1820 case kVulkanObjectTypeBufferView: {
1821 base_ptr = GetBufferViewState(dev_data, reinterpret_cast<VkBufferView &>(object_struct.handle));
1824 case kVulkanObjectTypeImage: {
1825 base_ptr = GetImageState(dev_data, reinterpret_cast<VkImage &>(object_struct.handle));
1828 case kVulkanObjectTypeImageView: {
1829 base_ptr = GetImageViewState(dev_data, reinterpret_cast<VkImageView &>(object_struct.handle));
1832 case kVulkanObjectTypeEvent: {
1833 base_ptr = GetEventNode(dev_data, reinterpret_cast<VkEvent &>(object_struct.handle));
1836 case kVulkanObjectTypeDescriptorPool: {
1837 base_ptr = GetDescriptorPoolState(dev_data, reinterpret_cast<VkDescriptorPool &>(object_struct.handle));
1840 case kVulkanObjectTypeCommandPool: {
1841 base_ptr = GetCommandPoolNode(dev_data, reinterpret_cast<VkCommandPool &>(object_struct.handle));
1844 case kVulkanObjectTypeFramebuffer: {
1845 base_ptr = GetFramebufferState(dev_data, reinterpret_cast<VkFramebuffer &>(object_struct.handle));
1848 case kVulkanObjectTypeRenderPass: {
1849 base_ptr = GetRenderPassState(dev_data, reinterpret_cast<VkRenderPass &>(object_struct.handle));
1852 case kVulkanObjectTypeDeviceMemory: {
1853 base_ptr = GetMemObjInfo(dev_data, reinterpret_cast<VkDeviceMemory &>(object_struct.handle));
1857 // TODO : Any other objects to be handled here?
1864 // Tie the VK_OBJECT to the cmd buffer which includes:
1865 // Add object_binding to cmd buffer
1866 // Add cb_binding to object
1867 static void addCommandBufferBinding(std::unordered_set<GLOBAL_CB_NODE *> *cb_bindings, VK_OBJECT obj, GLOBAL_CB_NODE *cb_node) {
1868 cb_bindings->insert(cb_node);
1869 cb_node->object_bindings.insert(obj);
1871 // For a given object, if cb_node is in that objects cb_bindings, remove cb_node
1872 static void removeCommandBufferBinding(layer_data *dev_data, VK_OBJECT const *object, GLOBAL_CB_NODE *cb_node) {
1873 BASE_NODE *base_obj = GetStateStructPtrFromObject(dev_data, *object);
1874 if (base_obj) base_obj->cb_bindings.erase(cb_node);
1876 // Reset the command buffer state
1877 // Maintain the createInfo and set state to CB_NEW, but clear all other state
1878 static void ResetCommandBufferState(layer_data *dev_data, const VkCommandBuffer cb) {
1879 GLOBAL_CB_NODE *pCB = dev_data->commandBufferMap[cb];
1881 pCB->in_use.store(0);
1882 // Reset CB state (note that createInfo is not cleared)
1883 pCB->commandBuffer = cb;
1884 memset(&pCB->beginInfo, 0, sizeof(VkCommandBufferBeginInfo));
1885 memset(&pCB->inheritanceInfo, 0, sizeof(VkCommandBufferInheritanceInfo));
1886 pCB->hasDrawCmd = false;
1887 pCB->state = CB_NEW;
1888 pCB->submitCount = 0;
1889 pCB->image_layout_change_count = 1; // Start at 1. 0 is insert value for validation cache versions, s.t. new == dirty
1891 pCB->static_status = 0;
1892 pCB->viewportMask = 0;
1893 pCB->scissorMask = 0;
1895 for (uint32_t i = 0; i < VK_PIPELINE_BIND_POINT_RANGE_SIZE; ++i) {
1896 pCB->lastBound[i].reset();
1899 memset(&pCB->activeRenderPassBeginInfo, 0, sizeof(pCB->activeRenderPassBeginInfo));
1900 pCB->activeRenderPass = nullptr;
1901 pCB->activeSubpassContents = VK_SUBPASS_CONTENTS_INLINE;
1902 pCB->activeSubpass = 0;
1903 pCB->broken_bindings.clear();
1904 pCB->waitedEvents.clear();
1905 pCB->events.clear();
1906 pCB->writeEventsBeforeWait.clear();
1907 pCB->waitedEventsBeforeQueryReset.clear();
1908 pCB->queryToStateMap.clear();
1909 pCB->activeQueries.clear();
1910 pCB->startedQueries.clear();
1911 pCB->imageLayoutMap.clear();
1912 pCB->eventToStageMap.clear();
1913 pCB->drawData.clear();
1914 pCB->currentDrawData.buffers.clear();
1915 pCB->vertex_buffer_used = false;
1916 pCB->primaryCommandBuffer = VK_NULL_HANDLE;
1917 // If secondary, invalidate any primary command buffer that may call us.
1918 if (pCB->createInfo.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY) {
1919 invalidateCommandBuffers(dev_data, pCB->linkedCommandBuffers, {HandleToUint64(cb), kVulkanObjectTypeCommandBuffer});
1922 // Remove reverse command buffer links.
1923 for (auto pSubCB : pCB->linkedCommandBuffers) {
1924 pSubCB->linkedCommandBuffers.erase(pCB);
1926 pCB->linkedCommandBuffers.clear();
1927 pCB->updateImages.clear();
1928 pCB->updateBuffers.clear();
1929 clear_cmd_buf_and_mem_references(dev_data, pCB);
1930 pCB->queue_submit_functions.clear();
1931 pCB->cmd_execute_commands_functions.clear();
1932 pCB->eventUpdates.clear();
1933 pCB->queryUpdates.clear();
1935 // Remove object bindings
1936 for (auto obj : pCB->object_bindings) {
1937 removeCommandBufferBinding(dev_data, &obj, pCB);
1939 pCB->object_bindings.clear();
1940 // Remove this cmdBuffer's reference from each FrameBuffer's CB ref list
1941 for (auto framebuffer : pCB->framebuffers) {
1942 auto fb_state = GetFramebufferState(dev_data, framebuffer);
1943 if (fb_state) fb_state->cb_bindings.erase(pCB);
1945 pCB->framebuffers.clear();
1946 pCB->activeFramebuffer = VK_NULL_HANDLE;
1947 memset(&pCB->index_buffer_binding, 0, sizeof(pCB->index_buffer_binding));
1951 CBStatusFlags MakeStaticStateMask(VkPipelineDynamicStateCreateInfo const *ds) {
1952 // initially assume everything is static state
1953 CBStatusFlags flags = CBSTATUS_ALL_STATE_SET;
1956 for (uint32_t i = 0; i < ds->dynamicStateCount; i++) {
1957 switch (ds->pDynamicStates[i]) {
1958 case VK_DYNAMIC_STATE_LINE_WIDTH:
1959 flags &= ~CBSTATUS_LINE_WIDTH_SET;
1961 case VK_DYNAMIC_STATE_DEPTH_BIAS:
1962 flags &= ~CBSTATUS_DEPTH_BIAS_SET;
1964 case VK_DYNAMIC_STATE_BLEND_CONSTANTS:
1965 flags &= ~CBSTATUS_BLEND_CONSTANTS_SET;
1967 case VK_DYNAMIC_STATE_DEPTH_BOUNDS:
1968 flags &= ~CBSTATUS_DEPTH_BOUNDS_SET;
1970 case VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK:
1971 flags &= ~CBSTATUS_STENCIL_READ_MASK_SET;
1973 case VK_DYNAMIC_STATE_STENCIL_WRITE_MASK:
1974 flags &= ~CBSTATUS_STENCIL_WRITE_MASK_SET;
1976 case VK_DYNAMIC_STATE_STENCIL_REFERENCE:
1977 flags &= ~CBSTATUS_STENCIL_REFERENCE_SET;
1979 case VK_DYNAMIC_STATE_SCISSOR:
1980 flags &= ~CBSTATUS_SCISSOR_SET;
1982 case VK_DYNAMIC_STATE_VIEWPORT:
1983 flags &= ~CBSTATUS_VIEWPORT_SET;
1994 // Flags validation error if the associated call is made inside a render pass. The apiName routine should ONLY be called outside a
1996 bool insideRenderPass(const layer_data *dev_data, const GLOBAL_CB_NODE *pCB, const char *apiName,
1997 UNIQUE_VALIDATION_ERROR_CODE msgCode) {
1998 bool inside = false;
1999 if (pCB->activeRenderPass) {
2000 inside = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
2001 HandleToUint64(pCB->commandBuffer), msgCode,
2002 "%s: It is invalid to issue this call inside an active render pass (0x%" PRIx64 ").", apiName,
2003 HandleToUint64(pCB->activeRenderPass->renderPass));
2008 // Flags validation error if the associated call is made outside a render pass. The apiName
2009 // routine should ONLY be called inside a render pass.
2010 bool outsideRenderPass(const layer_data *dev_data, GLOBAL_CB_NODE *pCB, const char *apiName, UNIQUE_VALIDATION_ERROR_CODE msgCode) {
2011 bool outside = false;
2012 if (((pCB->createInfo.level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) && (!pCB->activeRenderPass)) ||
2013 ((pCB->createInfo.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY) && (!pCB->activeRenderPass) &&
2014 !(pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT))) {
2015 outside = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
2016 HandleToUint64(pCB->commandBuffer), msgCode, "%s: This call must be issued inside an active render pass.",
2022 static void init_core_validation(instance_layer_data *instance_data, const VkAllocationCallbacks *pAllocator) {
2023 layer_debug_report_actions(instance_data->report_data, instance_data->logging_callback, pAllocator, "lunarg_core_validation");
2024 layer_debug_messenger_actions(instance_data->report_data, instance_data->logging_messenger, pAllocator,
2025 "lunarg_core_validation");
2028 // For the given ValidationCheck enum, set all relevant instance disabled flags to true
2029 void SetDisabledFlags(instance_layer_data *instance_data, const VkValidationFlagsEXT *val_flags_struct) {
2030 for (uint32_t i = 0; i < val_flags_struct->disabledValidationCheckCount; ++i) {
2031 switch (val_flags_struct->pDisabledValidationChecks[i]) {
2032 case VK_VALIDATION_CHECK_SHADERS_EXT:
2033 instance_data->disabled.shader_validation = true;
2035 case VK_VALIDATION_CHECK_ALL_EXT:
2036 // Set all disabled flags to true
2037 instance_data->disabled.SetAll(true);
2045 VKAPI_ATTR VkResult VKAPI_CALL CreateInstance(const VkInstanceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator,
2046 VkInstance *pInstance) {
2047 VkLayerInstanceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO);
2049 assert(chain_info->u.pLayerInfo);
2050 PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr;
2051 PFN_vkCreateInstance fpCreateInstance = (PFN_vkCreateInstance)fpGetInstanceProcAddr(NULL, "vkCreateInstance");
2052 if (fpCreateInstance == NULL) return VK_ERROR_INITIALIZATION_FAILED;
2054 // Advance the link info for the next element on the chain
2055 chain_info->u.pLayerInfo = chain_info->u.pLayerInfo->pNext;
2057 VkResult result = fpCreateInstance(pCreateInfo, pAllocator, pInstance);
2058 if (result != VK_SUCCESS) return result;
2060 instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(*pInstance), instance_layer_data_map);
2061 instance_data->instance = *pInstance;
2062 layer_init_instance_dispatch_table(*pInstance, &instance_data->dispatch_table, fpGetInstanceProcAddr);
2063 instance_data->report_data = debug_utils_create_instance(
2064 &instance_data->dispatch_table, *pInstance, pCreateInfo->enabledExtensionCount, pCreateInfo->ppEnabledExtensionNames);
2066 instance_data->api_version = instance_data->extensions.InitFromInstanceCreateInfo(
2067 (pCreateInfo->pApplicationInfo ? pCreateInfo->pApplicationInfo->apiVersion : VK_API_VERSION_1_0), pCreateInfo);
2068 init_core_validation(instance_data, pAllocator);
2070 ValidateLayerOrdering(*pCreateInfo);
2071 // Parse any pNext chains
2072 const auto *validation_flags_ext = lvl_find_in_chain<VkValidationFlagsEXT>(pCreateInfo->pNext);
2073 if (validation_flags_ext) {
2074 SetDisabledFlags(instance_data, validation_flags_ext);
2080 // Hook DestroyInstance to remove tableInstanceMap entry
2081 VKAPI_ATTR void VKAPI_CALL DestroyInstance(VkInstance instance, const VkAllocationCallbacks *pAllocator) {
2082 // TODOSC : Shouldn't need any customization here
2083 dispatch_key key = get_dispatch_key(instance);
2084 // TBD: Need any locking this early, in case this function is called at the
2085 // same time by more than one thread?
2086 instance_layer_data *instance_data = GetLayerDataPtr(key, instance_layer_data_map);
2087 instance_data->dispatch_table.DestroyInstance(instance, pAllocator);
2089 lock_guard_t lock(global_lock);
2090 // Clean up logging callback, if any
2091 while (instance_data->logging_messenger.size() > 0) {
2092 VkDebugUtilsMessengerEXT messenger = instance_data->logging_messenger.back();
2093 layer_destroy_messenger_callback(instance_data->report_data, messenger, pAllocator);
2094 instance_data->logging_messenger.pop_back();
2096 while (instance_data->logging_callback.size() > 0) {
2097 VkDebugReportCallbackEXT callback = instance_data->logging_callback.back();
2098 layer_destroy_report_callback(instance_data->report_data, callback, pAllocator);
2099 instance_data->logging_callback.pop_back();
2102 layer_debug_utils_destroy_instance(instance_data->report_data);
2103 FreeLayerDataPtr(key, instance_layer_data_map);
2106 static bool ValidatePhysicalDeviceQueueFamily(instance_layer_data *instance_data, const PHYSICAL_DEVICE_STATE *pd_state,
2107 uint32_t requested_queue_family, int32_t err_code, const char *cmd_name,
2108 const char *queue_family_var_name) {
2111 const char *conditional_ext_cmd = instance_data->extensions.vk_khr_get_physical_device_properties_2
2112 ? "or vkGetPhysicalDeviceQueueFamilyProperties2[KHR]"
2115 std::string count_note = (UNCALLED == pd_state->vkGetPhysicalDeviceQueueFamilyPropertiesState)
2116 ? "the pQueueFamilyPropertyCount was never obtained"
2117 : "i.e. is not less than " + std::to_string(pd_state->queue_family_count);
2119 if (requested_queue_family >= pd_state->queue_family_count) {
2120 skip |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
2121 HandleToUint64(pd_state->phys_device), err_code,
2122 "%s: %s (= %" PRIu32
2123 ") is not less than any previously obtained pQueueFamilyPropertyCount from "
2124 "vkGetPhysicalDeviceQueueFamilyProperties%s (%s).",
2125 cmd_name, queue_family_var_name, requested_queue_family, conditional_ext_cmd, count_note.c_str());
2130 // Verify VkDeviceQueueCreateInfos
2131 static bool ValidateDeviceQueueCreateInfos(instance_layer_data *instance_data, const PHYSICAL_DEVICE_STATE *pd_state,
2132 uint32_t info_count, const VkDeviceQueueCreateInfo *infos) {
2135 for (uint32_t i = 0; i < info_count; ++i) {
2136 const auto requested_queue_family = infos[i].queueFamilyIndex;
2138 // Verify that requested queue family is known to be valid at this point in time
2139 std::string queue_family_var_name = "pCreateInfo->pQueueCreateInfos[" + std::to_string(i) + "].queueFamilyIndex";
2140 skip |= ValidatePhysicalDeviceQueueFamily(instance_data, pd_state, requested_queue_family, VALIDATION_ERROR_06c002fa,
2141 "vkCreateDevice", queue_family_var_name.c_str());
2143 // Verify that requested queue count of queue family is known to be valid at this point in time
2144 if (requested_queue_family < pd_state->queue_family_count) {
2145 const auto requested_queue_count = infos[i].queueCount;
2146 const auto queue_family_props_count = pd_state->queue_family_properties.size();
2147 const bool queue_family_has_props = requested_queue_family < queue_family_props_count;
2148 const char *conditional_ext_cmd = instance_data->extensions.vk_khr_get_physical_device_properties_2
2149 ? "or vkGetPhysicalDeviceQueueFamilyProperties2[KHR]"
2151 std::string count_note =
2152 !queue_family_has_props
2153 ? "the pQueueFamilyProperties[" + std::to_string(requested_queue_family) + "] was never obtained"
2154 : "i.e. is not less than or equal to " +
2155 std::to_string(pd_state->queue_family_properties[requested_queue_family].queueCount);
2157 if (!queue_family_has_props ||
2158 requested_queue_count > pd_state->queue_family_properties[requested_queue_family].queueCount) {
2160 instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
2161 HandleToUint64(pd_state->phys_device), VALIDATION_ERROR_06c002fc,
2162 "vkCreateDevice: pCreateInfo->pQueueCreateInfos[%" PRIu32 "].queueCount (=%" PRIu32
2163 ") is not less than or equal to available queue count for this pCreateInfo->pQueueCreateInfos[%" PRIu32
2164 "].queueFamilyIndex} (=%" PRIu32 ") obtained previously from vkGetPhysicalDeviceQueueFamilyProperties%s (%s).",
2165 i, requested_queue_count, i, requested_queue_family, conditional_ext_cmd, count_note.c_str());
2173 // Verify that features have been queried and that they are available
2174 static bool ValidateRequestedFeatures(instance_layer_data *instance_data, const PHYSICAL_DEVICE_STATE *pd_state,
2175 const VkPhysicalDeviceFeatures *requested_features) {
2178 const VkBool32 *actual = reinterpret_cast<const VkBool32 *>(&pd_state->features);
2179 const VkBool32 *requested = reinterpret_cast<const VkBool32 *>(requested_features);
2180 // TODO : This is a nice, compact way to loop through struct, but a bad way to report issues
2181 // Need to provide the struct member name with the issue. To do that seems like we'll
2182 // have to loop through each struct member which should be done w/ codegen to keep in synch.
2183 uint32_t errors = 0;
2184 uint32_t total_bools = sizeof(VkPhysicalDeviceFeatures) / sizeof(VkBool32);
2185 for (uint32_t i = 0; i < total_bools; i++) {
2186 if (requested[i] > actual[i]) {
2187 skip |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2188 VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, DEVLIMITS_INVALID_FEATURE_REQUESTED,
2189 "While calling vkCreateDevice(), requesting feature '%s' in VkPhysicalDeviceFeatures struct, which is "
2190 "not available on this device.",
2191 GetPhysDevFeatureString(i));
2195 if (errors && (UNCALLED == pd_state->vkGetPhysicalDeviceFeaturesState)) {
2196 // If user didn't request features, notify them that they should
2197 // TODO: Verify this against the spec. I believe this is an invalid use of the API and should return an error
2198 skip |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
2199 0, DEVLIMITS_INVALID_FEATURE_REQUESTED,
2200 "You requested features that are unavailable on this device. You should first query feature availability "
2201 "by calling vkGetPhysicalDeviceFeatures().");
2206 VKAPI_ATTR VkResult VKAPI_CALL CreateDevice(VkPhysicalDevice gpu, const VkDeviceCreateInfo *pCreateInfo,
2207 const VkAllocationCallbacks *pAllocator, VkDevice *pDevice) {
2209 instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(gpu), instance_layer_data_map);
2211 unique_lock_t lock(global_lock);
2212 auto pd_state = GetPhysicalDeviceState(instance_data, gpu);
2214 // TODO: object_tracker should perhaps do this instead
2215 // and it does not seem to currently work anyway -- the loader just crashes before this point
2216 if (!GetPhysicalDeviceState(instance_data, gpu)) {
2218 log_msg(instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0,
2219 DEVLIMITS_MUST_QUERY_COUNT, "Invalid call to vkCreateDevice() w/o first calling vkEnumeratePhysicalDevices().");
2222 // Check that any requested features are available
2223 // The enabled features can come from either pEnabledFeatures, or from the pNext chain
2224 const VkPhysicalDeviceFeatures *enabled_features_found = pCreateInfo->pEnabledFeatures;
2225 if (nullptr == enabled_features_found) {
2226 const auto *features2 = lvl_find_in_chain<VkPhysicalDeviceFeatures2KHR>(pCreateInfo->pNext);
2228 enabled_features_found = &(features2->features);
2232 if (enabled_features_found) {
2233 skip |= ValidateRequestedFeatures(instance_data, pd_state, enabled_features_found);
2237 ValidateDeviceQueueCreateInfos(instance_data, pd_state, pCreateInfo->queueCreateInfoCount, pCreateInfo->pQueueCreateInfos);
2239 if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
2241 VkLayerDeviceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO);
2243 assert(chain_info->u.pLayerInfo);
2244 PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr;
2245 PFN_vkGetDeviceProcAddr fpGetDeviceProcAddr = chain_info->u.pLayerInfo->pfnNextGetDeviceProcAddr;
2246 PFN_vkCreateDevice fpCreateDevice = (PFN_vkCreateDevice)fpGetInstanceProcAddr(instance_data->instance, "vkCreateDevice");
2247 if (fpCreateDevice == NULL) {
2248 return VK_ERROR_INITIALIZATION_FAILED;
2251 // Advance the link info for the next element on the chain
2252 chain_info->u.pLayerInfo = chain_info->u.pLayerInfo->pNext;
2256 VkResult result = fpCreateDevice(gpu, pCreateInfo, pAllocator, pDevice);
2257 if (result != VK_SUCCESS) {
2262 layer_data *device_data = GetLayerDataPtr(get_dispatch_key(*pDevice), layer_data_map);
2264 device_data->instance_data = instance_data;
2265 // Setup device dispatch table
2266 layer_init_device_dispatch_table(*pDevice, &device_data->dispatch_table, fpGetDeviceProcAddr);
2267 device_data->device = *pDevice;
2268 // Save PhysicalDevice handle
2269 device_data->physical_device = gpu;
2271 device_data->report_data = layer_debug_utils_create_device(instance_data->report_data, *pDevice);
2273 // Get physical device limits for this device
2274 instance_data->dispatch_table.GetPhysicalDeviceProperties(gpu, &(device_data->phys_dev_properties.properties));
2276 device_data->api_version = device_data->extensions.InitFromDeviceCreateInfo(
2277 &instance_data->extensions, device_data->phys_dev_properties.properties.apiVersion, pCreateInfo);
2280 instance_data->dispatch_table.GetPhysicalDeviceQueueFamilyProperties(gpu, &count, nullptr);
2281 device_data->phys_dev_properties.queue_family_properties.resize(count);
2282 instance_data->dispatch_table.GetPhysicalDeviceQueueFamilyProperties(
2283 gpu, &count, &device_data->phys_dev_properties.queue_family_properties[0]);
2284 // TODO: device limits should make sure these are compatible
2285 if (enabled_features_found) {
2286 device_data->enabled_features = *enabled_features_found;
2288 memset(&device_data->enabled_features, 0, sizeof(VkPhysicalDeviceFeatures));
2290 // Store physical device properties and physical device mem limits into device layer_data structs
2291 instance_data->dispatch_table.GetPhysicalDeviceMemoryProperties(gpu, &device_data->phys_dev_mem_props);
2292 instance_data->dispatch_table.GetPhysicalDeviceProperties(gpu, &device_data->phys_dev_props);
2294 if (device_data->extensions.vk_khr_push_descriptor) {
2295 // Get the needed push_descriptor limits
2296 auto push_descriptor_prop = lvl_init_struct<VkPhysicalDevicePushDescriptorPropertiesKHR>();
2297 auto prop2 = lvl_init_struct<VkPhysicalDeviceProperties2KHR>(&push_descriptor_prop);
2298 instance_data->dispatch_table.GetPhysicalDeviceProperties2KHR(gpu, &prop2);
2299 device_data->phys_dev_ext_props.max_push_descriptors = push_descriptor_prop.maxPushDescriptors;
2301 if (device_data->extensions.vk_ext_descriptor_indexing) {
2302 // Get the needed descriptor_indexing limits
2303 auto descriptor_indexing_props = lvl_init_struct<VkPhysicalDeviceDescriptorIndexingPropertiesEXT>();
2304 auto prop2 = lvl_init_struct<VkPhysicalDeviceProperties2KHR>(&descriptor_indexing_props);
2305 instance_data->dispatch_table.GetPhysicalDeviceProperties2KHR(gpu, &prop2);
2306 device_data->phys_dev_ext_props.descriptor_indexing_props = descriptor_indexing_props;
2309 const auto *descriptor_indexing_features = lvl_find_in_chain<VkPhysicalDeviceDescriptorIndexingFeaturesEXT>(pCreateInfo->pNext);
2310 if (descriptor_indexing_features) {
2311 device_data->phys_dev_ext_props.descriptor_indexing_features = *descriptor_indexing_features;
2316 ValidateLayerOrdering(*pCreateInfo);
2322 VKAPI_ATTR void VKAPI_CALL DestroyDevice(VkDevice device, const VkAllocationCallbacks *pAllocator) {
2323 // TODOSC : Shouldn't need any customization here
2324 dispatch_key key = get_dispatch_key(device);
2325 layer_data *dev_data = GetLayerDataPtr(key, layer_data_map);
2326 // Free all the memory
2327 unique_lock_t lock(global_lock);
2328 dev_data->pipelineMap.clear();
2329 dev_data->renderPassMap.clear();
2330 for (auto ii = dev_data->commandBufferMap.begin(); ii != dev_data->commandBufferMap.end(); ++ii) {
2331 delete (*ii).second;
2333 dev_data->commandBufferMap.clear();
2334 // This will also delete all sets in the pool & remove them from setMap
2335 deletePools(dev_data);
2336 // All sets should be removed
2337 assert(dev_data->setMap.empty());
2338 dev_data->descriptorSetLayoutMap.clear();
2339 dev_data->imageViewMap.clear();
2340 dev_data->imageMap.clear();
2341 dev_data->imageSubresourceMap.clear();
2342 dev_data->imageLayoutMap.clear();
2343 dev_data->bufferViewMap.clear();
2344 dev_data->bufferMap.clear();
2345 // Queues persist until device is destroyed
2346 dev_data->queueMap.clear();
2347 // Report any memory leaks
2348 layer_debug_utils_destroy_device(device);
2351 #if DISPATCH_MAP_DEBUG
2352 fprintf(stderr, "Device: 0x%p, key: 0x%p\n", device, key);
2355 dev_data->dispatch_table.DestroyDevice(device, pAllocator);
2356 FreeLayerDataPtr(key, layer_data_map);
2359 static const VkExtensionProperties instance_extensions[] = {{VK_EXT_DEBUG_REPORT_EXTENSION_NAME, VK_EXT_DEBUG_REPORT_SPEC_VERSION}};
2361 // For given stage mask, if Geometry shader stage is on w/o GS being enabled, report geo_error_id
2362 // and if Tessellation Control or Evaluation shader stages are on w/o TS being enabled, report tess_error_id
2363 static bool ValidateStageMaskGsTsEnables(layer_data *dev_data, VkPipelineStageFlags stageMask, const char *caller,
2364 UNIQUE_VALIDATION_ERROR_CODE geo_error_id, UNIQUE_VALIDATION_ERROR_CODE tess_error_id) {
2366 if (!dev_data->enabled_features.geometryShader && (stageMask & VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT)) {
2368 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, geo_error_id,
2369 "%s call includes a stageMask with VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT bit set when device does not have "
2370 "geometryShader feature enabled.",
2373 if (!dev_data->enabled_features.tessellationShader &&
2374 (stageMask & (VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT | VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT))) {
2376 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, tess_error_id,
2377 "%s call includes a stageMask with VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT and/or "
2378 "VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT bit(s) set when device does not have "
2379 "tessellationShader feature enabled.",
2385 // Loop through bound objects and increment their in_use counts.
2386 static void IncrementBoundObjects(layer_data *dev_data, GLOBAL_CB_NODE const *cb_node) {
2387 for (auto obj : cb_node->object_bindings) {
2388 auto base_obj = GetStateStructPtrFromObject(dev_data, obj);
2390 base_obj->in_use.fetch_add(1);
2394 // Track which resources are in-flight by atomically incrementing their "in_use" count
2395 static void incrementResources(layer_data *dev_data, GLOBAL_CB_NODE *cb_node) {
2396 cb_node->submitCount++;
2397 cb_node->in_use.fetch_add(1);
2399 // First Increment for all "generic" objects bound to cmd buffer, followed by special-case objects below
2400 IncrementBoundObjects(dev_data, cb_node);
2401 // TODO : We should be able to remove the NULL look-up checks from the code below as long as
2402 // all the corresponding cases are verified to cause CB_INVALID state and the CB_INVALID state
2403 // should then be flagged prior to calling this function
2404 for (auto drawDataElement : cb_node->drawData) {
2405 for (auto buffer : drawDataElement.buffers) {
2406 auto buffer_state = GetBufferState(dev_data, buffer);
2408 buffer_state->in_use.fetch_add(1);
2412 for (auto event : cb_node->writeEventsBeforeWait) {
2413 auto event_state = GetEventNode(dev_data, event);
2414 if (event_state) event_state->write_in_use++;
2418 // Note: This function assumes that the global lock is held by the calling thread.
2419 // For the given queue, verify the queue state up to the given seq number.
2420 // Currently the only check is to make sure that if there are events to be waited on prior to
2421 // a QueryReset, make sure that all such events have been signalled.
2422 static bool VerifyQueueStateToSeq(layer_data *dev_data, QUEUE_STATE *initial_queue, uint64_t initial_seq) {
2425 // sequence number we want to validate up to, per queue
2426 std::unordered_map<QUEUE_STATE *, uint64_t> target_seqs{{initial_queue, initial_seq}};
2427 // sequence number we've completed validation for, per queue
2428 std::unordered_map<QUEUE_STATE *, uint64_t> done_seqs;
2429 std::vector<QUEUE_STATE *> worklist{initial_queue};
2431 while (worklist.size()) {
2432 auto queue = worklist.back();
2433 worklist.pop_back();
2435 auto target_seq = target_seqs[queue];
2436 auto seq = std::max(done_seqs[queue], queue->seq);
2437 auto sub_it = queue->submissions.begin() + int(seq - queue->seq); // seq >= queue->seq
2439 for (; seq < target_seq; ++sub_it, ++seq) {
2440 for (auto &wait : sub_it->waitSemaphores) {
2441 auto other_queue = GetQueueState(dev_data, wait.queue);
2443 if (other_queue == queue) continue; // semaphores /always/ point backwards, so no point here.
2445 auto other_target_seq = std::max(target_seqs[other_queue], wait.seq);
2446 auto other_done_seq = std::max(done_seqs[other_queue], other_queue->seq);
2448 // if this wait is for another queue, and covers new sequence
2449 // numbers beyond what we've already validated, mark the new
2450 // target seq and (possibly-re)add the queue to the worklist.
2451 if (other_done_seq < other_target_seq) {
2452 target_seqs[other_queue] = other_target_seq;
2453 worklist.push_back(other_queue);
2457 for (auto cb : sub_it->cbs) {
2458 auto cb_node = GetCBNode(dev_data, cb);
2460 for (auto queryEventsPair : cb_node->waitedEventsBeforeQueryReset) {
2461 for (auto event : queryEventsPair.second) {
2462 if (dev_data->eventMap[event].needsSignaled) {
2463 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2464 VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, DRAWSTATE_INVALID_QUERY,
2465 "Cannot get query results on queryPool 0x%" PRIx64
2466 " with index %d which was guarded by unsignaled event 0x%" PRIx64 ".",
2467 HandleToUint64(queryEventsPair.first.pool), queryEventsPair.first.index,
2468 HandleToUint64(event));
2476 // finally mark the point we've now validated this queue to.
2477 done_seqs[queue] = seq;
2483 // When the given fence is retired, verify outstanding queue operations through the point of the fence
2484 static bool VerifyQueueStateToFence(layer_data *dev_data, VkFence fence) {
2485 auto fence_state = GetFenceNode(dev_data, fence);
2486 if (fence_state->scope == kSyncScopeInternal && VK_NULL_HANDLE != fence_state->signaler.first) {
2487 return VerifyQueueStateToSeq(dev_data, GetQueueState(dev_data, fence_state->signaler.first), fence_state->signaler.second);
2492 // Decrement in-use count for objects bound to command buffer
2493 static void DecrementBoundResources(layer_data *dev_data, GLOBAL_CB_NODE const *cb_node) {
2494 BASE_NODE *base_obj = nullptr;
2495 for (auto obj : cb_node->object_bindings) {
2496 base_obj = GetStateStructPtrFromObject(dev_data, obj);
2498 base_obj->in_use.fetch_sub(1);
2503 static void RetireWorkOnQueue(layer_data *dev_data, QUEUE_STATE *pQueue, uint64_t seq) {
2504 std::unordered_map<VkQueue, uint64_t> otherQueueSeqs;
2506 // Roll this queue forward, one submission at a time.
2507 while (pQueue->seq < seq) {
2508 auto &submission = pQueue->submissions.front();
2510 for (auto &wait : submission.waitSemaphores) {
2511 auto pSemaphore = GetSemaphoreNode(dev_data, wait.semaphore);
2513 pSemaphore->in_use.fetch_sub(1);
2515 auto &lastSeq = otherQueueSeqs[wait.queue];
2516 lastSeq = std::max(lastSeq, wait.seq);
2519 for (auto &semaphore : submission.signalSemaphores) {
2520 auto pSemaphore = GetSemaphoreNode(dev_data, semaphore);
2522 pSemaphore->in_use.fetch_sub(1);
2526 for (auto &semaphore : submission.externalSemaphores) {
2527 auto pSemaphore = GetSemaphoreNode(dev_data, semaphore);
2529 pSemaphore->in_use.fetch_sub(1);
2533 for (auto cb : submission.cbs) {
2534 auto cb_node = GetCBNode(dev_data, cb);
2538 // First perform decrement on general case bound objects
2539 DecrementBoundResources(dev_data, cb_node);
2540 for (auto drawDataElement : cb_node->drawData) {
2541 for (auto buffer : drawDataElement.buffers) {
2542 auto buffer_state = GetBufferState(dev_data, buffer);
2544 buffer_state->in_use.fetch_sub(1);
2548 for (auto event : cb_node->writeEventsBeforeWait) {
2549 auto eventNode = dev_data->eventMap.find(event);
2550 if (eventNode != dev_data->eventMap.end()) {
2551 eventNode->second.write_in_use--;
2554 for (auto queryStatePair : cb_node->queryToStateMap) {
2555 dev_data->queryToStateMap[queryStatePair.first] = queryStatePair.second;
2557 for (auto eventStagePair : cb_node->eventToStageMap) {
2558 dev_data->eventMap[eventStagePair.first].stageMask = eventStagePair.second;
2561 cb_node->in_use.fetch_sub(1);
2564 auto pFence = GetFenceNode(dev_data, submission.fence);
2565 if (pFence && pFence->scope == kSyncScopeInternal) {
2566 pFence->state = FENCE_RETIRED;
2569 pQueue->submissions.pop_front();
2573 // Roll other queues forward to the highest seq we saw a wait for
2574 for (auto qs : otherQueueSeqs) {
2575 RetireWorkOnQueue(dev_data, GetQueueState(dev_data, qs.first), qs.second);
2579 // Submit a fence to a queue, delimiting previous fences and previous untracked
2581 static void SubmitFence(QUEUE_STATE *pQueue, FENCE_NODE *pFence, uint64_t submitCount) {
2582 pFence->state = FENCE_INFLIGHT;
2583 pFence->signaler.first = pQueue->queue;
2584 pFence->signaler.second = pQueue->seq + pQueue->submissions.size() + submitCount;
2587 static bool validateCommandBufferSimultaneousUse(layer_data *dev_data, GLOBAL_CB_NODE *pCB, int current_submit_count) {
2589 if ((pCB->in_use.load() || current_submit_count > 1) &&
2590 !(pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
2591 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
2592 VALIDATION_ERROR_31a0008e,
2593 "Command Buffer 0x%" PRIx64 " is already in use and is not marked for simultaneous use.",
2594 HandleToUint64(pCB->commandBuffer));
2599 static bool validateCommandBufferState(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, const char *call_source,
2600 int current_submit_count, UNIQUE_VALIDATION_ERROR_CODE vu_id) {
2602 if (dev_data->instance_data->disabled.command_buffer_state) return skip;
2603 // Validate ONE_TIME_SUBMIT_BIT CB is not being submitted more than once
2604 if ((cb_state->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT) &&
2605 (cb_state->submitCount + current_submit_count > 1)) {
2606 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
2607 DRAWSTATE_COMMAND_BUFFER_SINGLE_SUBMIT_VIOLATION,
2608 "Commandbuffer 0x%" PRIx64
2609 " was begun w/ VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT set, but has been submitted 0x%" PRIxLEAST64
2611 HandleToUint64(cb_state->commandBuffer), cb_state->submitCount + current_submit_count);
2614 // Validate that cmd buffers have been updated
2615 switch (cb_state->state) {
2616 case CB_INVALID_INCOMPLETE:
2617 case CB_INVALID_COMPLETE:
2618 skip |= ReportInvalidCommandBuffer(dev_data, cb_state, call_source);
2622 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
2623 (uint64_t)(cb_state->commandBuffer), vu_id,
2624 "Command buffer 0x%" PRIx64 " used in the call to %s is unrecorded and contains no commands.",
2625 HandleToUint64(cb_state->commandBuffer), call_source);
2629 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
2630 HandleToUint64(cb_state->commandBuffer), DRAWSTATE_NO_END_COMMAND_BUFFER,
2631 "You must call vkEndCommandBuffer() on command buffer 0x%" PRIx64 " before this call to %s!",
2632 HandleToUint64(cb_state->commandBuffer), call_source);
2635 default: /* recorded */
2641 static bool validateResources(layer_data *dev_data, GLOBAL_CB_NODE *cb_node) {
2644 // TODO : We should be able to remove the NULL look-up checks from the code below as long as
2645 // all the corresponding cases are verified to cause CB_INVALID state and the CB_INVALID state
2646 // should then be flagged prior to calling this function
2647 for (auto drawDataElement : cb_node->drawData) {
2648 for (auto buffer : drawDataElement.buffers) {
2649 auto buffer_state = GetBufferState(dev_data, buffer);
2650 if (buffer != VK_NULL_HANDLE && !buffer_state) {
2651 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
2652 HandleToUint64(buffer), DRAWSTATE_INVALID_BUFFER,
2653 "Cannot submit cmd buffer using deleted buffer 0x%" PRIx64 ".", HandleToUint64(buffer));
2660 // Check that the queue family index of 'queue' matches one of the entries in pQueueFamilyIndices
2661 bool ValidImageBufferQueue(layer_data *dev_data, GLOBAL_CB_NODE *cb_node, const VK_OBJECT *object, VkQueue queue, uint32_t count,
2662 const uint32_t *indices) {
2665 auto queue_state = GetQueueState(dev_data, queue);
2667 for (uint32_t i = 0; i < count; i++) {
2668 if (indices[i] == queue_state->queueFamilyIndex) {
2675 skip = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, get_debug_report_enum[object->type],
2676 object->handle, DRAWSTATE_INVALID_QUEUE_FAMILY,
2677 "vkQueueSubmit: Command buffer 0x%" PRIx64 " contains %s 0x%" PRIx64
2678 " which was not created allowing concurrent access to this queue family %d.",
2679 HandleToUint64(cb_node->commandBuffer), object_string[object->type], object->handle,
2680 queue_state->queueFamilyIndex);
2686 // Validate that queueFamilyIndices of primary command buffers match this queue
2687 // Secondary command buffers were previously validated in vkCmdExecuteCommands().
2688 static bool validateQueueFamilyIndices(layer_data *dev_data, GLOBAL_CB_NODE *pCB, VkQueue queue) {
2690 auto pPool = GetCommandPoolNode(dev_data, pCB->createInfo.commandPool);
2691 auto queue_state = GetQueueState(dev_data, queue);
2693 if (pPool && queue_state) {
2694 if (pPool->queueFamilyIndex != queue_state->queueFamilyIndex) {
2695 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
2696 HandleToUint64(pCB->commandBuffer), VALIDATION_ERROR_31a00094,
2697 "vkQueueSubmit: Primary command buffer 0x%" PRIx64
2698 " created in queue family %d is being submitted on queue 0x%" PRIx64 " from queue family %d.",
2699 HandleToUint64(pCB->commandBuffer), pPool->queueFamilyIndex, HandleToUint64(queue),
2700 queue_state->queueFamilyIndex);
2703 // Ensure that any bound images or buffers created with SHARING_MODE_CONCURRENT have access to the current queue family
2704 for (auto object : pCB->object_bindings) {
2705 if (object.type == kVulkanObjectTypeImage) {
2706 auto image_state = GetImageState(dev_data, reinterpret_cast<VkImage &>(object.handle));
2707 if (image_state && image_state->createInfo.sharingMode == VK_SHARING_MODE_CONCURRENT) {
2708 skip |= ValidImageBufferQueue(dev_data, pCB, &object, queue, image_state->createInfo.queueFamilyIndexCount,
2709 image_state->createInfo.pQueueFamilyIndices);
2711 } else if (object.type == kVulkanObjectTypeBuffer) {
2712 auto buffer_state = GetBufferState(dev_data, reinterpret_cast<VkBuffer &>(object.handle));
2713 if (buffer_state && buffer_state->createInfo.sharingMode == VK_SHARING_MODE_CONCURRENT) {
2714 skip |= ValidImageBufferQueue(dev_data, pCB, &object, queue, buffer_state->createInfo.queueFamilyIndexCount,
2715 buffer_state->createInfo.pQueueFamilyIndices);
2724 static bool validatePrimaryCommandBufferState(layer_data *dev_data, GLOBAL_CB_NODE *pCB, int current_submit_count) {
2725 // Track in-use for resources off of primary and any secondary CBs
2728 // If USAGE_SIMULTANEOUS_USE_BIT not set then CB cannot already be executing
2730 skip |= validateCommandBufferSimultaneousUse(dev_data, pCB, current_submit_count);
2732 skip |= validateResources(dev_data, pCB);
2734 for (auto pSubCB : pCB->linkedCommandBuffers) {
2735 skip |= validateResources(dev_data, pSubCB);
2736 // TODO: replace with invalidateCommandBuffers() at recording.
2737 if ((pSubCB->primaryCommandBuffer != pCB->commandBuffer) &&
2738 !(pSubCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
2739 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
2740 VALIDATION_ERROR_31a00092,
2741 "Commandbuffer 0x%" PRIx64 " was submitted with secondary buffer 0x%" PRIx64
2742 " but that buffer has subsequently been bound to primary cmd buffer 0x%" PRIx64
2743 " and it does not have VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set.",
2744 HandleToUint64(pCB->commandBuffer), HandleToUint64(pSubCB->commandBuffer),
2745 HandleToUint64(pSubCB->primaryCommandBuffer));
2749 skip |= validateCommandBufferState(dev_data, pCB, "vkQueueSubmit()", current_submit_count, VALIDATION_ERROR_31a00090);
2754 static bool ValidateFenceForSubmit(layer_data *dev_data, FENCE_NODE *pFence) {
2757 if (pFence && pFence->scope == kSyncScopeInternal) {
2758 if (pFence->state == FENCE_INFLIGHT) {
2759 // TODO: opportunities for VALIDATION_ERROR_31a00080, VALIDATION_ERROR_316008b4, VALIDATION_ERROR_16400a0e
2760 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
2761 HandleToUint64(pFence->fence), DRAWSTATE_INVALID_FENCE,
2762 "Fence 0x%" PRIx64 " is already in use by another submission.", HandleToUint64(pFence->fence));
2765 else if (pFence->state == FENCE_RETIRED) {
2766 // TODO: opportunities for VALIDATION_ERROR_31a0007e, VALIDATION_ERROR_316008b2, VALIDATION_ERROR_16400a0e
2767 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
2768 HandleToUint64(pFence->fence), MEMTRACK_INVALID_FENCE_STATE,
2769 "Fence 0x%" PRIx64 " submitted in SIGNALED state. Fences must be reset before being submitted",
2770 HandleToUint64(pFence->fence));
2777 static void PostCallRecordQueueSubmit(layer_data *dev_data, VkQueue queue, uint32_t submitCount, const VkSubmitInfo *pSubmits,
2779 uint64_t early_retire_seq = 0;
2780 auto pQueue = GetQueueState(dev_data, queue);
2781 auto pFence = GetFenceNode(dev_data, fence);
2784 if (pFence->scope == kSyncScopeInternal) {
2785 // Mark fence in use
2786 SubmitFence(pQueue, pFence, std::max(1u, submitCount));
2788 // If no submissions, but just dropping a fence on the end of the queue,
2789 // record an empty submission with just the fence, so we can determine
2791 pQueue->submissions.emplace_back(std::vector<VkCommandBuffer>(), std::vector<SEMAPHORE_WAIT>(),
2792 std::vector<VkSemaphore>(), std::vector<VkSemaphore>(), fence);
2795 // Retire work up until this fence early, we will not see the wait that corresponds to this signal
2796 early_retire_seq = pQueue->seq + pQueue->submissions.size();
2797 if (!dev_data->external_sync_warning) {
2798 dev_data->external_sync_warning = true;
2799 log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
2800 HandleToUint64(fence), DRAWSTATE_QUEUE_FORWARD_PROGRESS,
2801 "vkQueueSubmit(): Signaling external fence 0x%" PRIx64 " on queue 0x%" PRIx64
2802 " will disable validation of preceding command buffer lifecycle states and the in-use status of associated "
2804 HandleToUint64(fence), HandleToUint64(queue));
2809 // Now process each individual submit
2810 for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) {
2811 std::vector<VkCommandBuffer> cbs;
2812 const VkSubmitInfo *submit = &pSubmits[submit_idx];
2813 vector<SEMAPHORE_WAIT> semaphore_waits;
2814 vector<VkSemaphore> semaphore_signals;
2815 vector<VkSemaphore> semaphore_externals;
2816 for (uint32_t i = 0; i < submit->waitSemaphoreCount; ++i) {
2817 VkSemaphore semaphore = submit->pWaitSemaphores[i];
2818 auto pSemaphore = GetSemaphoreNode(dev_data, semaphore);
2820 if (pSemaphore->scope == kSyncScopeInternal) {
2821 if (pSemaphore->signaler.first != VK_NULL_HANDLE) {
2822 semaphore_waits.push_back({semaphore, pSemaphore->signaler.first, pSemaphore->signaler.second});
2823 pSemaphore->in_use.fetch_add(1);
2825 pSemaphore->signaler.first = VK_NULL_HANDLE;
2826 pSemaphore->signaled = false;
2828 semaphore_externals.push_back(semaphore);
2829 pSemaphore->in_use.fetch_add(1);
2830 if (pSemaphore->scope == kSyncScopeExternalTemporary) {
2831 pSemaphore->scope = kSyncScopeInternal;
2836 for (uint32_t i = 0; i < submit->signalSemaphoreCount; ++i) {
2837 VkSemaphore semaphore = submit->pSignalSemaphores[i];
2838 auto pSemaphore = GetSemaphoreNode(dev_data, semaphore);
2840 if (pSemaphore->scope == kSyncScopeInternal) {
2841 pSemaphore->signaler.first = queue;
2842 pSemaphore->signaler.second = pQueue->seq + pQueue->submissions.size() + 1;
2843 pSemaphore->signaled = true;
2844 pSemaphore->in_use.fetch_add(1);
2845 semaphore_signals.push_back(semaphore);
2847 // Retire work up until this submit early, we will not see the wait that corresponds to this signal
2848 early_retire_seq = std::max(early_retire_seq, pQueue->seq + pQueue->submissions.size() + 1);
2849 if (!dev_data->external_sync_warning) {
2850 dev_data->external_sync_warning = true;
2851 log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
2852 HandleToUint64(semaphore), DRAWSTATE_QUEUE_FORWARD_PROGRESS,
2853 "vkQueueSubmit(): Signaling external semaphore 0x%" PRIx64 " on queue 0x%" PRIx64
2854 " will disable validation of preceding command buffer lifecycle states and the in-use status of "
2855 "associated objects.",
2856 HandleToUint64(semaphore), HandleToUint64(queue));
2861 for (uint32_t i = 0; i < submit->commandBufferCount; i++) {
2862 auto cb_node = GetCBNode(dev_data, submit->pCommandBuffers[i]);
2864 cbs.push_back(submit->pCommandBuffers[i]);
2865 for (auto secondaryCmdBuffer : cb_node->linkedCommandBuffers) {
2866 cbs.push_back(secondaryCmdBuffer->commandBuffer);
2867 UpdateCmdBufImageLayouts(dev_data, secondaryCmdBuffer);
2868 incrementResources(dev_data, secondaryCmdBuffer);
2870 UpdateCmdBufImageLayouts(dev_data, cb_node);
2871 incrementResources(dev_data, cb_node);
2874 pQueue->submissions.emplace_back(cbs, semaphore_waits, semaphore_signals, semaphore_externals,
2875 submit_idx == submitCount - 1 ? fence : VK_NULL_HANDLE);
2878 if (early_retire_seq) {
2879 RetireWorkOnQueue(dev_data, pQueue, early_retire_seq);
2883 static bool PreCallValidateQueueSubmit(layer_data *dev_data, VkQueue queue, uint32_t submitCount, const VkSubmitInfo *pSubmits,
2885 auto pFence = GetFenceNode(dev_data, fence);
2886 bool skip = ValidateFenceForSubmit(dev_data, pFence);
2891 unordered_set<VkSemaphore> signaled_semaphores;
2892 unordered_set<VkSemaphore> unsignaled_semaphores;
2893 unordered_set<VkSemaphore> internal_semaphores;
2894 vector<VkCommandBuffer> current_cmds;
2895 unordered_map<ImageSubresourcePair, IMAGE_LAYOUT_NODE> localImageLayoutMap;
2896 // Now verify each individual submit
2897 for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) {
2898 const VkSubmitInfo *submit = &pSubmits[submit_idx];
2899 for (uint32_t i = 0; i < submit->waitSemaphoreCount; ++i) {
2900 skip |= ValidateStageMaskGsTsEnables(dev_data, submit->pWaitDstStageMask[i], "vkQueueSubmit()",
2901 VALIDATION_ERROR_13c00098, VALIDATION_ERROR_13c0009a);
2902 VkSemaphore semaphore = submit->pWaitSemaphores[i];
2903 auto pSemaphore = GetSemaphoreNode(dev_data, semaphore);
2904 if (pSemaphore && (pSemaphore->scope == kSyncScopeInternal || internal_semaphores.count(semaphore))) {
2905 if (unsignaled_semaphores.count(semaphore) ||
2906 (!(signaled_semaphores.count(semaphore)) && !(pSemaphore->signaled))) {
2907 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
2908 HandleToUint64(semaphore), DRAWSTATE_QUEUE_FORWARD_PROGRESS,
2909 "Queue 0x%" PRIx64 " is waiting on semaphore 0x%" PRIx64 " that has no way to be signaled.",
2910 HandleToUint64(queue), HandleToUint64(semaphore));
2912 signaled_semaphores.erase(semaphore);
2913 unsignaled_semaphores.insert(semaphore);
2916 if (pSemaphore && pSemaphore->scope == kSyncScopeExternalTemporary) {
2917 internal_semaphores.insert(semaphore);
2920 for (uint32_t i = 0; i < submit->signalSemaphoreCount; ++i) {
2921 VkSemaphore semaphore = submit->pSignalSemaphores[i];
2922 auto pSemaphore = GetSemaphoreNode(dev_data, semaphore);
2923 if (pSemaphore && (pSemaphore->scope == kSyncScopeInternal || internal_semaphores.count(semaphore))) {
2924 if (signaled_semaphores.count(semaphore) || (!(unsignaled_semaphores.count(semaphore)) && pSemaphore->signaled)) {
2925 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
2926 HandleToUint64(semaphore), DRAWSTATE_QUEUE_FORWARD_PROGRESS,
2927 "Queue 0x%" PRIx64 " is signaling semaphore 0x%" PRIx64
2928 " that has already been signaled but not waited on by queue 0x%" PRIx64 ".",
2929 HandleToUint64(queue), HandleToUint64(semaphore), HandleToUint64(pSemaphore->signaler.first));
2931 unsignaled_semaphores.erase(semaphore);
2932 signaled_semaphores.insert(semaphore);
2936 for (uint32_t i = 0; i < submit->commandBufferCount; i++) {
2937 auto cb_node = GetCBNode(dev_data, submit->pCommandBuffers[i]);
2939 skip |= ValidateCmdBufImageLayouts(dev_data, cb_node, dev_data->imageLayoutMap, localImageLayoutMap);
2940 current_cmds.push_back(submit->pCommandBuffers[i]);
2941 skip |= validatePrimaryCommandBufferState(
2942 dev_data, cb_node, (int)std::count(current_cmds.begin(), current_cmds.end(), submit->pCommandBuffers[i]));
2943 skip |= validateQueueFamilyIndices(dev_data, cb_node, queue);
2945 // Potential early exit here as bad object state may crash in delayed function calls
2950 // Call submit-time functions to validate/update state
2951 for (auto &function : cb_node->queue_submit_functions) {
2954 for (auto &function : cb_node->eventUpdates) {
2955 skip |= function(queue);
2957 for (auto &function : cb_node->queryUpdates) {
2958 skip |= function(queue);
2966 VKAPI_ATTR VkResult VKAPI_CALL QueueSubmit(VkQueue queue, uint32_t submitCount, const VkSubmitInfo *pSubmits, VkFence fence) {
2967 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(queue), layer_data_map);
2968 unique_lock_t lock(global_lock);
2970 bool skip = PreCallValidateQueueSubmit(dev_data, queue, submitCount, pSubmits, fence);
2973 if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
2975 VkResult result = dev_data->dispatch_table.QueueSubmit(queue, submitCount, pSubmits, fence);
2978 PostCallRecordQueueSubmit(dev_data, queue, submitCount, pSubmits, fence);
2983 static bool PreCallValidateAllocateMemory(layer_data *dev_data) {
2985 if (dev_data->memObjMap.size() >= dev_data->phys_dev_properties.properties.limits.maxMemoryAllocationCount) {
2986 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
2987 HandleToUint64(dev_data->device), VALIDATION_ERROR_UNDEFINED,
2988 "Number of currently valid memory objects is not less than the maximum allowed (%u).",
2989 dev_data->phys_dev_properties.properties.limits.maxMemoryAllocationCount);
2994 static void PostCallRecordAllocateMemory(layer_data *dev_data, const VkMemoryAllocateInfo *pAllocateInfo, VkDeviceMemory *pMemory) {
2995 add_mem_obj_info(dev_data, dev_data->device, *pMemory, pAllocateInfo);
2999 VKAPI_ATTR VkResult VKAPI_CALL AllocateMemory(VkDevice device, const VkMemoryAllocateInfo *pAllocateInfo,
3000 const VkAllocationCallbacks *pAllocator, VkDeviceMemory *pMemory) {
3001 VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
3002 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3003 unique_lock_t lock(global_lock);
3004 bool skip = PreCallValidateAllocateMemory(dev_data);
3007 result = dev_data->dispatch_table.AllocateMemory(device, pAllocateInfo, pAllocator, pMemory);
3009 if (VK_SUCCESS == result) {
3010 PostCallRecordAllocateMemory(dev_data, pAllocateInfo, pMemory);
3016 // For given obj node, if it is use, flag a validation error and return callback result, else return false
3017 bool ValidateObjectNotInUse(const layer_data *dev_data, BASE_NODE *obj_node, VK_OBJECT obj_struct, const char *caller_name,
3018 UNIQUE_VALIDATION_ERROR_CODE error_code) {
3019 if (dev_data->instance_data->disabled.object_in_use) return false;
3021 if (obj_node->in_use.load()) {
3023 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, get_debug_report_enum[obj_struct.type], obj_struct.handle,
3024 error_code, "Cannot call %s on %s 0x%" PRIx64 " that is currently in use by a command buffer.", caller_name,
3025 object_string[obj_struct.type], obj_struct.handle);
3030 static bool PreCallValidateFreeMemory(layer_data *dev_data, VkDeviceMemory mem, DEVICE_MEM_INFO **mem_info, VK_OBJECT *obj_struct) {
3031 *mem_info = GetMemObjInfo(dev_data, mem);
3032 *obj_struct = {HandleToUint64(mem), kVulkanObjectTypeDeviceMemory};
3033 if (dev_data->instance_data->disabled.free_memory) return false;
3036 skip |= ValidateObjectNotInUse(dev_data, *mem_info, *obj_struct, "vkFreeMemory", VALIDATION_ERROR_2880054a);
3041 static void PreCallRecordFreeMemory(layer_data *dev_data, VkDeviceMemory mem, DEVICE_MEM_INFO *mem_info, VK_OBJECT obj_struct) {
3042 // Clear mem binding for any bound objects
3043 for (auto obj : mem_info->obj_bindings) {
3044 log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, get_debug_report_enum[obj.type], obj.handle,
3045 MEMTRACK_FREED_MEM_REF, "VK Object 0x%" PRIx64 " still has a reference to mem obj 0x%" PRIx64,
3046 HandleToUint64(obj.handle), HandleToUint64(mem_info->mem));
3047 BINDABLE *bindable_state = nullptr;
3049 case kVulkanObjectTypeImage:
3050 bindable_state = GetImageState(dev_data, reinterpret_cast<VkImage &>(obj.handle));
3052 case kVulkanObjectTypeBuffer:
3053 bindable_state = GetBufferState(dev_data, reinterpret_cast<VkBuffer &>(obj.handle));
3056 // Should only have buffer or image objects bound to memory
3060 assert(bindable_state);
3061 bindable_state->binding.mem = MEMORY_UNBOUND;
3062 bindable_state->UpdateBoundMemorySet();
3064 // Any bound cmd buffers are now invalid
3065 invalidateCommandBuffers(dev_data, mem_info->cb_bindings, obj_struct);
3066 dev_data->memObjMap.erase(mem);
3069 VKAPI_ATTR void VKAPI_CALL FreeMemory(VkDevice device, VkDeviceMemory mem, const VkAllocationCallbacks *pAllocator) {
3070 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3071 DEVICE_MEM_INFO *mem_info = nullptr;
3072 VK_OBJECT obj_struct;
3073 unique_lock_t lock(global_lock);
3074 bool skip = PreCallValidateFreeMemory(dev_data, mem, &mem_info, &obj_struct);
3076 if (mem != VK_NULL_HANDLE) {
3077 // Avoid free/alloc race by recording state change before dispatching
3078 PreCallRecordFreeMemory(dev_data, mem, mem_info, obj_struct);
3081 dev_data->dispatch_table.FreeMemory(device, mem, pAllocator);
3085 // Validate that given Map memory range is valid. This means that the memory should not already be mapped,
3086 // and that the size of the map range should be:
3088 // 2. Within the size of the memory allocation
3089 static bool ValidateMapMemRange(layer_data *dev_data, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size) {
3093 skip = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
3094 HandleToUint64(mem), MEMTRACK_INVALID_MAP, "VkMapMemory: Attempting to map memory range of size zero");
3097 auto mem_element = dev_data->memObjMap.find(mem);
3098 if (mem_element != dev_data->memObjMap.end()) {
3099 auto mem_info = mem_element->second.get();
3100 // It is an application error to call VkMapMemory on an object that is already mapped
3101 if (mem_info->mem_range.size != 0) {
3102 skip = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
3103 HandleToUint64(mem), MEMTRACK_INVALID_MAP,
3104 "VkMapMemory: Attempting to map memory on an already-mapped object 0x%" PRIx64, HandleToUint64(mem));
3107 // Validate that offset + size is within object's allocationSize
3108 if (size == VK_WHOLE_SIZE) {
3109 if (offset >= mem_info->alloc_info.allocationSize) {
3110 skip = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
3111 HandleToUint64(mem), MEMTRACK_INVALID_MAP,
3112 "Mapping Memory from 0x%" PRIx64 " to 0x%" PRIx64
3113 " with size of VK_WHOLE_SIZE oversteps total array size 0x%" PRIx64,
3114 offset, mem_info->alloc_info.allocationSize, mem_info->alloc_info.allocationSize);
3117 if ((offset + size) > mem_info->alloc_info.allocationSize) {
3118 skip = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
3119 HandleToUint64(mem), VALIDATION_ERROR_31200552,
3120 "Mapping Memory from 0x%" PRIx64 " to 0x%" PRIx64 " oversteps total array size 0x%" PRIx64 ".",
3121 offset, size + offset, mem_info->alloc_info.allocationSize);
3128 static void storeMemRanges(layer_data *dev_data, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size) {
3129 auto mem_info = GetMemObjInfo(dev_data, mem);
3131 mem_info->mem_range.offset = offset;
3132 mem_info->mem_range.size = size;
3136 static bool deleteMemRanges(layer_data *dev_data, VkDeviceMemory mem) {
3138 auto mem_info = GetMemObjInfo(dev_data, mem);
3140 if (!mem_info->mem_range.size) {
3141 // Valid Usage: memory must currently be mapped
3142 skip = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
3143 HandleToUint64(mem), VALIDATION_ERROR_33600562,
3144 "Unmapping Memory without memory being mapped: mem obj 0x%" PRIx64 ".", HandleToUint64(mem));
3146 mem_info->mem_range.size = 0;
3147 if (mem_info->shadow_copy) {
3148 free(mem_info->shadow_copy_base);
3149 mem_info->shadow_copy_base = 0;
3150 mem_info->shadow_copy = 0;
3156 // Guard value for pad data
3157 static char NoncoherentMemoryFillValue = 0xb;
3159 static void initializeAndTrackMemory(layer_data *dev_data, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size,
3161 auto mem_info = GetMemObjInfo(dev_data, mem);
3163 mem_info->p_driver_data = *ppData;
3164 uint32_t index = mem_info->alloc_info.memoryTypeIndex;
3165 if (dev_data->phys_dev_mem_props.memoryTypes[index].propertyFlags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) {
3166 mem_info->shadow_copy = 0;
3168 if (size == VK_WHOLE_SIZE) {
3169 size = mem_info->alloc_info.allocationSize - offset;
3171 mem_info->shadow_pad_size = dev_data->phys_dev_properties.properties.limits.minMemoryMapAlignment;
3172 assert(SafeModulo(mem_info->shadow_pad_size, dev_data->phys_dev_properties.properties.limits.minMemoryMapAlignment) ==
3174 // Ensure start of mapped region reflects hardware alignment constraints
3175 uint64_t map_alignment = dev_data->phys_dev_properties.properties.limits.minMemoryMapAlignment;
3177 // From spec: (ppData - offset) must be aligned to at least limits::minMemoryMapAlignment.
3178 uint64_t start_offset = offset % map_alignment;
3179 // Data passed to driver will be wrapped by a guardband of data to detect over- or under-writes.
3180 mem_info->shadow_copy_base =
3181 malloc(static_cast<size_t>(2 * mem_info->shadow_pad_size + size + map_alignment + start_offset));
3183 mem_info->shadow_copy =
3184 reinterpret_cast<char *>((reinterpret_cast<uintptr_t>(mem_info->shadow_copy_base) + map_alignment) &
3185 ~(map_alignment - 1)) +
3187 assert(SafeModulo(reinterpret_cast<uintptr_t>(mem_info->shadow_copy) + mem_info->shadow_pad_size - start_offset,
3188 map_alignment) == 0);
3190 memset(mem_info->shadow_copy, NoncoherentMemoryFillValue, static_cast<size_t>(2 * mem_info->shadow_pad_size + size));
3191 *ppData = static_cast<char *>(mem_info->shadow_copy) + mem_info->shadow_pad_size;
3196 // Verify that state for fence being waited on is appropriate. That is,
3197 // a fence being waited on should not already be signaled and
3198 // it should have been submitted on a queue or during acquire next image
3199 static inline bool verifyWaitFenceState(layer_data *dev_data, VkFence fence, const char *apiCall) {
3202 auto pFence = GetFenceNode(dev_data, fence);
3203 if (pFence && pFence->scope == kSyncScopeInternal) {
3204 if (pFence->state == FENCE_UNSIGNALED) {
3206 log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
3207 HandleToUint64(fence), MEMTRACK_INVALID_FENCE_STATE,
3208 "%s called for fence 0x%" PRIx64 " which has not been submitted on a Queue or during acquire next image.",
3209 apiCall, HandleToUint64(fence));
3215 static void RetireFence(layer_data *dev_data, VkFence fence) {
3216 auto pFence = GetFenceNode(dev_data, fence);
3217 if (pFence->scope == kSyncScopeInternal) {
3218 if (pFence->signaler.first != VK_NULL_HANDLE) {
3219 // Fence signaller is a queue -- use this as proof that prior operations on that queue have completed.
3220 RetireWorkOnQueue(dev_data, GetQueueState(dev_data, pFence->signaler.first), pFence->signaler.second);
3222 // Fence signaller is the WSI. We're not tracking what the WSI op actually /was/ in CV yet, but we need to mark
3223 // the fence as retired.
3224 pFence->state = FENCE_RETIRED;
3229 static bool PreCallValidateWaitForFences(layer_data *dev_data, uint32_t fence_count, const VkFence *fences) {
3230 if (dev_data->instance_data->disabled.wait_for_fences) return false;
3232 for (uint32_t i = 0; i < fence_count; i++) {
3233 skip |= verifyWaitFenceState(dev_data, fences[i], "vkWaitForFences");
3234 skip |= VerifyQueueStateToFence(dev_data, fences[i]);
3239 static void PostCallRecordWaitForFences(layer_data *dev_data, uint32_t fence_count, const VkFence *fences, VkBool32 wait_all) {
3240 // When we know that all fences are complete we can clean/remove their CBs
3241 if ((VK_TRUE == wait_all) || (1 == fence_count)) {
3242 for (uint32_t i = 0; i < fence_count; i++) {
3243 RetireFence(dev_data, fences[i]);
3246 // NOTE : Alternate case not handled here is when some fences have completed. In
3247 // this case for app to guarantee which fences completed it will have to call
3248 // vkGetFenceStatus() at which point we'll clean/remove their CBs if complete.
3251 VKAPI_ATTR VkResult VKAPI_CALL WaitForFences(VkDevice device, uint32_t fenceCount, const VkFence *pFences, VkBool32 waitAll,
3253 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3254 // Verify fence status of submitted fences
3255 unique_lock_t lock(global_lock);
3256 bool skip = PreCallValidateWaitForFences(dev_data, fenceCount, pFences);
3258 if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
3260 VkResult result = dev_data->dispatch_table.WaitForFences(device, fenceCount, pFences, waitAll, timeout);
3262 if (result == VK_SUCCESS) {
3264 PostCallRecordWaitForFences(dev_data, fenceCount, pFences, waitAll);
3270 static bool PreCallValidateGetFenceStatus(layer_data *dev_data, VkFence fence) {
3271 if (dev_data->instance_data->disabled.get_fence_state) return false;
3272 return verifyWaitFenceState(dev_data, fence, "vkGetFenceStatus");
3275 static void PostCallRecordGetFenceStatus(layer_data *dev_data, VkFence fence) { RetireFence(dev_data, fence); }
3277 VKAPI_ATTR VkResult VKAPI_CALL GetFenceStatus(VkDevice device, VkFence fence) {
3278 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3279 unique_lock_t lock(global_lock);
3280 bool skip = PreCallValidateGetFenceStatus(dev_data, fence);
3282 if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
3284 VkResult result = dev_data->dispatch_table.GetFenceStatus(device, fence);
3285 if (result == VK_SUCCESS) {
3287 PostCallRecordGetFenceStatus(dev_data, fence);
3293 static void PostCallRecordGetDeviceQueue(layer_data *dev_data, uint32_t q_family_index, VkQueue queue) {
3294 // Add queue to tracking set only if it is new
3295 auto result = dev_data->queues.emplace(queue);
3296 if (result.second == true) {
3297 QUEUE_STATE *queue_state = &dev_data->queueMap[queue];
3298 queue_state->queue = queue;
3299 queue_state->queueFamilyIndex = q_family_index;
3300 queue_state->seq = 0;
3304 VKAPI_ATTR void VKAPI_CALL GetDeviceQueue(VkDevice device, uint32_t queueFamilyIndex, uint32_t queueIndex, VkQueue *pQueue) {
3305 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3306 dev_data->dispatch_table.GetDeviceQueue(device, queueFamilyIndex, queueIndex, pQueue);
3307 lock_guard_t lock(global_lock);
3309 PostCallRecordGetDeviceQueue(dev_data, queueFamilyIndex, *pQueue);
3312 VKAPI_ATTR void VKAPI_CALL GetDeviceQueue2(VkDevice device, VkDeviceQueueInfo2 *pQueueInfo, VkQueue *pQueue) {
3313 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3314 dev_data->dispatch_table.GetDeviceQueue2(device, pQueueInfo, pQueue);
3315 lock_guard_t lock(global_lock);
3317 if (*pQueue != VK_NULL_HANDLE) {
3318 PostCallRecordGetDeviceQueue(dev_data, pQueueInfo->queueFamilyIndex, *pQueue);
3322 static bool PreCallValidateQueueWaitIdle(layer_data *dev_data, VkQueue queue, QUEUE_STATE **queue_state) {
3323 *queue_state = GetQueueState(dev_data, queue);
3324 if (dev_data->instance_data->disabled.queue_wait_idle) return false;
3325 return VerifyQueueStateToSeq(dev_data, *queue_state, (*queue_state)->seq + (*queue_state)->submissions.size());
3328 static void PostCallRecordQueueWaitIdle(layer_data *dev_data, QUEUE_STATE *queue_state) {
3329 RetireWorkOnQueue(dev_data, queue_state, queue_state->seq + queue_state->submissions.size());
3332 VKAPI_ATTR VkResult VKAPI_CALL QueueWaitIdle(VkQueue queue) {
3333 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(queue), layer_data_map);
3334 QUEUE_STATE *queue_state = nullptr;
3335 unique_lock_t lock(global_lock);
3336 bool skip = PreCallValidateQueueWaitIdle(dev_data, queue, &queue_state);
3338 if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
3339 VkResult result = dev_data->dispatch_table.QueueWaitIdle(queue);
3340 if (VK_SUCCESS == result) {
3342 PostCallRecordQueueWaitIdle(dev_data, queue_state);
3348 static bool PreCallValidateDeviceWaitIdle(layer_data *dev_data) {
3349 if (dev_data->instance_data->disabled.device_wait_idle) return false;
3351 for (auto &queue : dev_data->queueMap) {
3352 skip |= VerifyQueueStateToSeq(dev_data, &queue.second, queue.second.seq + queue.second.submissions.size());
3357 static void PostCallRecordDeviceWaitIdle(layer_data *dev_data) {
3358 for (auto &queue : dev_data->queueMap) {
3359 RetireWorkOnQueue(dev_data, &queue.second, queue.second.seq + queue.second.submissions.size());
3363 VKAPI_ATTR VkResult VKAPI_CALL DeviceWaitIdle(VkDevice device) {
3364 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3365 unique_lock_t lock(global_lock);
3366 bool skip = PreCallValidateDeviceWaitIdle(dev_data);
3368 if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
3369 VkResult result = dev_data->dispatch_table.DeviceWaitIdle(device);
3370 if (VK_SUCCESS == result) {
3372 PostCallRecordDeviceWaitIdle(dev_data);
3378 static bool PreCallValidateDestroyFence(layer_data *dev_data, VkFence fence, FENCE_NODE **fence_node, VK_OBJECT *obj_struct) {
3379 *fence_node = GetFenceNode(dev_data, fence);
3380 *obj_struct = {HandleToUint64(fence), kVulkanObjectTypeFence};
3381 if (dev_data->instance_data->disabled.destroy_fence) return false;
3384 if ((*fence_node)->scope == kSyncScopeInternal && (*fence_node)->state == FENCE_INFLIGHT) {
3386 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
3387 HandleToUint64(fence), VALIDATION_ERROR_24e008c0, "Fence 0x%" PRIx64 " is in use.", HandleToUint64(fence));
3393 static void PreCallRecordDestroyFence(layer_data *dev_data, VkFence fence) { dev_data->fenceMap.erase(fence); }
3395 VKAPI_ATTR void VKAPI_CALL DestroyFence(VkDevice device, VkFence fence, const VkAllocationCallbacks *pAllocator) {
3396 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3397 // Common data objects used pre & post call
3398 FENCE_NODE *fence_node = nullptr;
3399 VK_OBJECT obj_struct;
3400 unique_lock_t lock(global_lock);
3401 bool skip = PreCallValidateDestroyFence(dev_data, fence, &fence_node, &obj_struct);
3404 // Pre-record to avoid Destroy/Create race
3405 PreCallRecordDestroyFence(dev_data, fence);
3407 dev_data->dispatch_table.DestroyFence(device, fence, pAllocator);
3411 static bool PreCallValidateDestroySemaphore(layer_data *dev_data, VkSemaphore semaphore, SEMAPHORE_NODE **sema_node,
3412 VK_OBJECT *obj_struct) {
3413 *sema_node = GetSemaphoreNode(dev_data, semaphore);
3414 *obj_struct = {HandleToUint64(semaphore), kVulkanObjectTypeSemaphore};
3415 if (dev_data->instance_data->disabled.destroy_semaphore) return false;
3418 skip |= ValidateObjectNotInUse(dev_data, *sema_node, *obj_struct, "vkDestroySemaphore", VALIDATION_ERROR_268008e2);
3423 static void PreCallRecordDestroySemaphore(layer_data *dev_data, VkSemaphore sema) { dev_data->semaphoreMap.erase(sema); }
3425 VKAPI_ATTR void VKAPI_CALL DestroySemaphore(VkDevice device, VkSemaphore semaphore, const VkAllocationCallbacks *pAllocator) {
3426 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3427 SEMAPHORE_NODE *sema_node;
3428 VK_OBJECT obj_struct;
3429 unique_lock_t lock(global_lock);
3430 bool skip = PreCallValidateDestroySemaphore(dev_data, semaphore, &sema_node, &obj_struct);
3432 // Pre-record to avoid Destroy/Create race
3433 PreCallRecordDestroySemaphore(dev_data, semaphore);
3435 dev_data->dispatch_table.DestroySemaphore(device, semaphore, pAllocator);
3439 static bool PreCallValidateDestroyEvent(layer_data *dev_data, VkEvent event, EVENT_STATE **event_state, VK_OBJECT *obj_struct) {
3440 *event_state = GetEventNode(dev_data, event);
3441 *obj_struct = {HandleToUint64(event), kVulkanObjectTypeEvent};
3442 if (dev_data->instance_data->disabled.destroy_event) return false;
3445 skip |= ValidateObjectNotInUse(dev_data, *event_state, *obj_struct, "vkDestroyEvent", VALIDATION_ERROR_24c008f2);
3450 static void PreCallRecordDestroyEvent(layer_data *dev_data, VkEvent event, EVENT_STATE *event_state, VK_OBJECT obj_struct) {
3451 invalidateCommandBuffers(dev_data, event_state->cb_bindings, obj_struct);
3452 dev_data->eventMap.erase(event);
3455 VKAPI_ATTR void VKAPI_CALL DestroyEvent(VkDevice device, VkEvent event, const VkAllocationCallbacks *pAllocator) {
3456 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3457 EVENT_STATE *event_state = nullptr;
3458 VK_OBJECT obj_struct;
3459 unique_lock_t lock(global_lock);
3460 bool skip = PreCallValidateDestroyEvent(dev_data, event, &event_state, &obj_struct);
3462 if (event != VK_NULL_HANDLE) {
3463 // Pre-record to avoid Destroy/Create race
3464 PreCallRecordDestroyEvent(dev_data, event, event_state, obj_struct);
3467 dev_data->dispatch_table.DestroyEvent(device, event, pAllocator);
3471 static bool PreCallValidateDestroyQueryPool(layer_data *dev_data, VkQueryPool query_pool, QUERY_POOL_NODE **qp_state,
3472 VK_OBJECT *obj_struct) {
3473 *qp_state = GetQueryPoolNode(dev_data, query_pool);
3474 *obj_struct = {HandleToUint64(query_pool), kVulkanObjectTypeQueryPool};
3475 if (dev_data->instance_data->disabled.destroy_query_pool) return false;
3478 skip |= ValidateObjectNotInUse(dev_data, *qp_state, *obj_struct, "vkDestroyQueryPool", VALIDATION_ERROR_26200632);
3483 static void PreCallRecordDestroyQueryPool(layer_data *dev_data, VkQueryPool query_pool, QUERY_POOL_NODE *qp_state,
3484 VK_OBJECT obj_struct) {
3485 invalidateCommandBuffers(dev_data, qp_state->cb_bindings, obj_struct);
3486 dev_data->queryPoolMap.erase(query_pool);
3489 VKAPI_ATTR void VKAPI_CALL DestroyQueryPool(VkDevice device, VkQueryPool queryPool, const VkAllocationCallbacks *pAllocator) {
3490 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3491 QUERY_POOL_NODE *qp_state = nullptr;
3492 VK_OBJECT obj_struct;
3493 unique_lock_t lock(global_lock);
3494 bool skip = PreCallValidateDestroyQueryPool(dev_data, queryPool, &qp_state, &obj_struct);
3496 if (queryPool != VK_NULL_HANDLE) {
3497 // Pre-record to avoid Destroy/Create race
3498 PreCallRecordDestroyQueryPool(dev_data, queryPool, qp_state, obj_struct);
3501 dev_data->dispatch_table.DestroyQueryPool(device, queryPool, pAllocator);
3504 static bool PreCallValidateGetQueryPoolResults(layer_data *dev_data, VkQueryPool query_pool, uint32_t first_query,
3505 uint32_t query_count, VkQueryResultFlags flags,
3506 unordered_map<QueryObject, vector<VkCommandBuffer>> *queries_in_flight) {
3508 auto query_pool_state = dev_data->queryPoolMap.find(query_pool);
3509 if (query_pool_state != dev_data->queryPoolMap.end()) {
3510 if ((query_pool_state->second.createInfo.queryType == VK_QUERY_TYPE_TIMESTAMP) && (flags & VK_QUERY_RESULT_PARTIAL_BIT)) {
3512 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0,
3513 VALIDATION_ERROR_2fa00664,
3514 "QueryPool 0x%" PRIx64
3515 " was created with a queryType of VK_QUERY_TYPE_TIMESTAMP but flags contains VK_QUERY_RESULT_PARTIAL_BIT.",
3516 HandleToUint64(query_pool));
3520 // TODO: clean this up, it's insanely wasteful.
3521 for (auto cmd_buffer : dev_data->commandBufferMap) {
3522 if (cmd_buffer.second->in_use.load()) {
3523 for (auto query_state_pair : cmd_buffer.second->queryToStateMap) {
3524 (*queries_in_flight)[query_state_pair.first].push_back(cmd_buffer.first);
3529 if (dev_data->instance_data->disabled.get_query_pool_results) return false;
3530 for (uint32_t i = 0; i < query_count; ++i) {
3531 QueryObject query = {query_pool, first_query + i};
3532 auto qif_pair = queries_in_flight->find(query);
3533 auto query_state_pair = dev_data->queryToStateMap.find(query);
3534 if (query_state_pair != dev_data->queryToStateMap.end()) {
3535 // Available and in flight
3536 if (qif_pair != queries_in_flight->end()) {
3537 if (query_state_pair->second) {
3538 for (auto cmd_buffer : qif_pair->second) {
3539 auto cb = GetCBNode(dev_data, cmd_buffer);
3540 auto query_event_pair = cb->waitedEventsBeforeQueryReset.find(query);
3541 if (query_event_pair == cb->waitedEventsBeforeQueryReset.end()) {
3542 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
3543 VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, DRAWSTATE_INVALID_QUERY,
3544 "Cannot get query results on queryPool 0x%" PRIx64 " with index %d which is in flight.",
3545 HandleToUint64(query_pool), first_query + i);
3549 } else if (!query_state_pair->second) { // Unavailable and Not in flight
3550 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0,
3551 DRAWSTATE_INVALID_QUERY,
3552 "Cannot get query results on queryPool 0x%" PRIx64 " with index %d which is unavailable.",
3553 HandleToUint64(query_pool), first_query + i);
3555 } else { // Uninitialized
3556 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0,
3557 DRAWSTATE_INVALID_QUERY,
3558 "Cannot get query results on queryPool 0x%" PRIx64
3559 " with index %d as data has not been collected for this index.",
3560 HandleToUint64(query_pool), first_query + i);
3566 static void PostCallRecordGetQueryPoolResults(layer_data *dev_data, VkQueryPool query_pool, uint32_t first_query,
3567 uint32_t query_count,
3568 unordered_map<QueryObject, vector<VkCommandBuffer>> *queries_in_flight) {
3569 for (uint32_t i = 0; i < query_count; ++i) {
3570 QueryObject query = {query_pool, first_query + i};
3571 auto qif_pair = queries_in_flight->find(query);
3572 auto query_state_pair = dev_data->queryToStateMap.find(query);
3573 if (query_state_pair != dev_data->queryToStateMap.end()) {
3574 // Available and in flight
3575 if (qif_pair != queries_in_flight->end() && query_state_pair != dev_data->queryToStateMap.end() &&
3576 query_state_pair->second) {
3577 for (auto cmd_buffer : qif_pair->second) {
3578 auto cb = GetCBNode(dev_data, cmd_buffer);
3579 auto query_event_pair = cb->waitedEventsBeforeQueryReset.find(query);
3580 if (query_event_pair != cb->waitedEventsBeforeQueryReset.end()) {
3581 for (auto event : query_event_pair->second) {
3582 dev_data->eventMap[event].needsSignaled = true;
3591 VKAPI_ATTR VkResult VKAPI_CALL GetQueryPoolResults(VkDevice device, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount,
3592 size_t dataSize, void *pData, VkDeviceSize stride, VkQueryResultFlags flags) {
3593 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3594 unordered_map<QueryObject, vector<VkCommandBuffer>> queries_in_flight;
3595 unique_lock_t lock(global_lock);
3596 bool skip = PreCallValidateGetQueryPoolResults(dev_data, queryPool, firstQuery, queryCount, flags, &queries_in_flight);
3598 if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
3600 dev_data->dispatch_table.GetQueryPoolResults(device, queryPool, firstQuery, queryCount, dataSize, pData, stride, flags);
3602 PostCallRecordGetQueryPoolResults(dev_data, queryPool, firstQuery, queryCount, &queries_in_flight);
3607 // Return true if given ranges intersect, else false
3608 // Prereq : For both ranges, range->end - range->start > 0. This case should have already resulted
3609 // in an error so not checking that here
3610 // pad_ranges bool indicates a linear and non-linear comparison which requires padding
3611 // In the case where padding is required, if an alias is encountered then a validation error is reported and skip
3612 // may be set by the callback function so caller should merge in skip value if padding case is possible.
3613 // This check can be skipped by passing skip_checks=true, for call sites outside the validation path.
3614 static bool rangesIntersect(layer_data const *dev_data, MEMORY_RANGE const *range1, MEMORY_RANGE const *range2, bool *skip,
3617 auto r1_start = range1->start;
3618 auto r1_end = range1->end;
3619 auto r2_start = range2->start;
3620 auto r2_end = range2->end;
3621 VkDeviceSize pad_align = 1;
3622 if (range1->linear != range2->linear) {
3623 pad_align = dev_data->phys_dev_properties.properties.limits.bufferImageGranularity;
3625 if ((r1_end & ~(pad_align - 1)) < (r2_start & ~(pad_align - 1))) return false;
3626 if ((r1_start & ~(pad_align - 1)) > (r2_end & ~(pad_align - 1))) return false;
3628 if (!skip_checks && (range1->linear != range2->linear)) {
3629 // In linear vs. non-linear case, warn of aliasing
3630 const char *r1_linear_str = range1->linear ? "Linear" : "Non-linear";
3631 const char *r1_type_str = range1->image ? "image" : "buffer";
3632 const char *r2_linear_str = range2->linear ? "linear" : "non-linear";
3633 const char *r2_type_str = range2->image ? "image" : "buffer";
3634 auto obj_type = range1->image ? VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT : VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT;
3636 dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, obj_type, range1->handle, MEMTRACK_INVALID_ALIASING,
3637 "%s %s 0x%" PRIx64 " is aliased with %s %s 0x%" PRIx64
3638 " which may indicate a bug. For further info refer to the Buffer-Image Granularity section of the Vulkan "
3640 "(https://www.khronos.org/registry/vulkan/specs/1.0-extensions/xhtml/vkspec.html#resources-bufferimagegranularity)",
3641 r1_linear_str, r1_type_str, range1->handle, r2_linear_str, r2_type_str, range2->handle);
3646 // Simplified rangesIntersect that calls above function to check range1 for intersection with offset & end addresses
3647 bool rangesIntersect(layer_data const *dev_data, MEMORY_RANGE const *range1, VkDeviceSize offset, VkDeviceSize end) {
3648 // Create a local MEMORY_RANGE struct to wrap offset/size
3649 MEMORY_RANGE range_wrap;
3650 // Synch linear with range1 to avoid padding and potential validation error case
3651 range_wrap.linear = range1->linear;
3652 range_wrap.start = offset;
3653 range_wrap.end = end;
3655 return rangesIntersect(dev_data, range1, &range_wrap, &tmp_bool, true);
3658 static bool ValidateInsertMemoryRange(layer_data const *dev_data, uint64_t handle, DEVICE_MEM_INFO *mem_info,
3659 VkDeviceSize memoryOffset, VkMemoryRequirements memRequirements, bool is_image,
3660 bool is_linear, const char *api_name) {
3664 range.image = is_image;
3665 range.handle = handle;
3666 range.linear = is_linear;
3667 range.memory = mem_info->mem;
3668 range.start = memoryOffset;
3669 range.size = memRequirements.size;
3670 range.end = memoryOffset + memRequirements.size - 1;
3671 range.aliases.clear();
3673 // Check for aliasing problems.
3674 for (auto &obj_range_pair : mem_info->bound_ranges) {
3675 auto check_range = &obj_range_pair.second;
3676 bool intersection_error = false;
3677 if (rangesIntersect(dev_data, &range, check_range, &intersection_error, false)) {
3678 skip |= intersection_error;
3679 range.aliases.insert(check_range);
3683 if (memoryOffset >= mem_info->alloc_info.allocationSize) {
3684 UNIQUE_VALIDATION_ERROR_CODE error_code = is_image ? VALIDATION_ERROR_1740082c : VALIDATION_ERROR_1700080e;
3685 skip = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
3686 HandleToUint64(mem_info->mem), error_code,
3687 "In %s, attempting to bind memory (0x%" PRIx64 ") to object (0x%" PRIx64 "), memoryOffset=0x%" PRIxLEAST64
3688 " must be less than the memory allocation size 0x%" PRIxLEAST64 ".",
3689 api_name, HandleToUint64(mem_info->mem), HandleToUint64(handle), memoryOffset,
3690 mem_info->alloc_info.allocationSize);
3696 // Object with given handle is being bound to memory w/ given mem_info struct.
3697 // Track the newly bound memory range with given memoryOffset
3698 // Also scan any previous ranges, track aliased ranges with new range, and flag an error if a linear
3699 // and non-linear range incorrectly overlap.
3700 // Return true if an error is flagged and the user callback returns "true", otherwise false
3701 // is_image indicates an image object, otherwise handle is for a buffer
3702 // is_linear indicates a buffer or linear image
3703 static void InsertMemoryRange(layer_data const *dev_data, uint64_t handle, DEVICE_MEM_INFO *mem_info, VkDeviceSize memoryOffset,
3704 VkMemoryRequirements memRequirements, bool is_image, bool is_linear) {
3707 range.image = is_image;
3708 range.handle = handle;
3709 range.linear = is_linear;
3710 range.memory = mem_info->mem;
3711 range.start = memoryOffset;
3712 range.size = memRequirements.size;
3713 range.end = memoryOffset + memRequirements.size - 1;
3714 range.aliases.clear();
3715 // Update Memory aliasing
3716 // Save aliased ranges so we can copy into final map entry below. Can't do it in loop b/c we don't yet have final ptr. If we
3717 // inserted into map before loop to get the final ptr, then we may enter loop when not needed & we check range against itself
3718 std::unordered_set<MEMORY_RANGE *> tmp_alias_ranges;
3719 for (auto &obj_range_pair : mem_info->bound_ranges) {
3720 auto check_range = &obj_range_pair.second;
3721 bool intersection_error = false;
3722 if (rangesIntersect(dev_data, &range, check_range, &intersection_error, true)) {
3723 range.aliases.insert(check_range);
3724 tmp_alias_ranges.insert(check_range);
3727 mem_info->bound_ranges[handle] = std::move(range);
3728 for (auto tmp_range : tmp_alias_ranges) {
3729 tmp_range->aliases.insert(&mem_info->bound_ranges[handle]);
3732 mem_info->bound_images.insert(handle);
3734 mem_info->bound_buffers.insert(handle);
3737 static bool ValidateInsertImageMemoryRange(layer_data const *dev_data, VkImage image, DEVICE_MEM_INFO *mem_info,
3738 VkDeviceSize mem_offset, VkMemoryRequirements mem_reqs, bool is_linear,
3739 const char *api_name) {
3740 return ValidateInsertMemoryRange(dev_data, HandleToUint64(image), mem_info, mem_offset, mem_reqs, true, is_linear, api_name);
3742 static void InsertImageMemoryRange(layer_data const *dev_data, VkImage image, DEVICE_MEM_INFO *mem_info, VkDeviceSize mem_offset,
3743 VkMemoryRequirements mem_reqs, bool is_linear) {
3744 InsertMemoryRange(dev_data, HandleToUint64(image), mem_info, mem_offset, mem_reqs, true, is_linear);
3747 static bool ValidateInsertBufferMemoryRange(layer_data const *dev_data, VkBuffer buffer, DEVICE_MEM_INFO *mem_info,
3748 VkDeviceSize mem_offset, VkMemoryRequirements mem_reqs, const char *api_name) {
3749 return ValidateInsertMemoryRange(dev_data, HandleToUint64(buffer), mem_info, mem_offset, mem_reqs, false, true, api_name);
3751 static void InsertBufferMemoryRange(layer_data const *dev_data, VkBuffer buffer, DEVICE_MEM_INFO *mem_info, VkDeviceSize mem_offset,
3752 VkMemoryRequirements mem_reqs) {
3753 InsertMemoryRange(dev_data, HandleToUint64(buffer), mem_info, mem_offset, mem_reqs, false, true);
3756 // Remove MEMORY_RANGE struct for give handle from bound_ranges of mem_info
3757 // is_image indicates if handle is for image or buffer
3758 // This function will also remove the handle-to-index mapping from the appropriate
3759 // map and clean up any aliases for range being removed.
3760 static void RemoveMemoryRange(uint64_t handle, DEVICE_MEM_INFO *mem_info, bool is_image) {
3761 auto erase_range = &mem_info->bound_ranges[handle];
3762 for (auto alias_range : erase_range->aliases) {
3763 alias_range->aliases.erase(erase_range);
3765 erase_range->aliases.clear();
3766 mem_info->bound_ranges.erase(handle);
3768 mem_info->bound_images.erase(handle);
3770 mem_info->bound_buffers.erase(handle);
3774 void RemoveBufferMemoryRange(uint64_t handle, DEVICE_MEM_INFO *mem_info) { RemoveMemoryRange(handle, mem_info, false); }
3776 void RemoveImageMemoryRange(uint64_t handle, DEVICE_MEM_INFO *mem_info) { RemoveMemoryRange(handle, mem_info, true); }
3778 VKAPI_ATTR void VKAPI_CALL DestroyBuffer(VkDevice device, VkBuffer buffer, const VkAllocationCallbacks *pAllocator) {
3779 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3780 BUFFER_STATE *buffer_state = nullptr;
3781 VK_OBJECT obj_struct;
3782 unique_lock_t lock(global_lock);
3783 bool skip = PreCallValidateDestroyBuffer(dev_data, buffer, &buffer_state, &obj_struct);
3785 if (buffer != VK_NULL_HANDLE) {
3786 // Pre-record to avoid Destroy/Create race
3787 PreCallRecordDestroyBuffer(dev_data, buffer, buffer_state, obj_struct);
3790 dev_data->dispatch_table.DestroyBuffer(device, buffer, pAllocator);
3794 VKAPI_ATTR void VKAPI_CALL DestroyBufferView(VkDevice device, VkBufferView bufferView, const VkAllocationCallbacks *pAllocator) {
3795 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3796 // Common data objects used pre & post call
3797 BUFFER_VIEW_STATE *buffer_view_state = nullptr;
3798 VK_OBJECT obj_struct;
3799 unique_lock_t lock(global_lock);
3800 // Validate state before calling down chain, update common data if we'll be calling down chain
3801 bool skip = PreCallValidateDestroyBufferView(dev_data, bufferView, &buffer_view_state, &obj_struct);
3803 if (bufferView != VK_NULL_HANDLE) {
3804 // Pre-record to avoid Destroy/Create race
3805 PreCallRecordDestroyBufferView(dev_data, bufferView, buffer_view_state, obj_struct);
3808 dev_data->dispatch_table.DestroyBufferView(device, bufferView, pAllocator);
3812 VKAPI_ATTR void VKAPI_CALL DestroyImage(VkDevice device, VkImage image, const VkAllocationCallbacks *pAllocator) {
3813 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3814 IMAGE_STATE *image_state = nullptr;
3815 VK_OBJECT obj_struct;
3816 unique_lock_t lock(global_lock);
3817 bool skip = PreCallValidateDestroyImage(dev_data, image, &image_state, &obj_struct);
3819 if (image != VK_NULL_HANDLE) {
3820 // Pre-record to avoid Destroy/Create race
3821 PreCallRecordDestroyImage(dev_data, image, image_state, obj_struct);
3824 dev_data->dispatch_table.DestroyImage(device, image, pAllocator);
3828 static bool ValidateMemoryTypes(const layer_data *dev_data, const DEVICE_MEM_INFO *mem_info, const uint32_t memory_type_bits,
3829 const char *funcName, UNIQUE_VALIDATION_ERROR_CODE msgCode) {
3831 if (((1 << mem_info->alloc_info.memoryTypeIndex) & memory_type_bits) == 0) {
3832 skip = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
3833 HandleToUint64(mem_info->mem), msgCode,
3834 "%s(): MemoryRequirements->memoryTypeBits (0x%X) for this object type are not compatible with the memory "
3835 "type (0x%X) of this memory object 0x%" PRIx64 ".",
3836 funcName, memory_type_bits, mem_info->alloc_info.memoryTypeIndex, HandleToUint64(mem_info->mem));
3841 static bool PreCallValidateBindBufferMemory(layer_data *dev_data, VkBuffer buffer, BUFFER_STATE *buffer_state, VkDeviceMemory mem,
3842 VkDeviceSize memoryOffset, const char *api_name) {
3845 unique_lock_t lock(global_lock);
3846 // Track objects tied to memory
3847 uint64_t buffer_handle = HandleToUint64(buffer);
3848 skip = ValidateSetMemBinding(dev_data, mem, buffer_handle, kVulkanObjectTypeBuffer, api_name);
3849 if (!buffer_state->memory_requirements_checked) {
3850 // There's not an explicit requirement in the spec to call vkGetBufferMemoryRequirements() prior to calling
3851 // BindBufferMemory, but it's implied in that memory being bound must conform with VkMemoryRequirements from
3852 // vkGetBufferMemoryRequirements()
3853 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
3854 buffer_handle, DRAWSTATE_INVALID_BUFFER,
3855 "%s: Binding memory to buffer 0x%" PRIx64
3856 " but vkGetBufferMemoryRequirements() has not been called on that buffer.",
3857 api_name, HandleToUint64(buffer_handle));
3858 // Make the call for them so we can verify the state
3860 dev_data->dispatch_table.GetBufferMemoryRequirements(dev_data->device, buffer, &buffer_state->requirements);
3864 // Validate bound memory range information
3865 const auto mem_info = GetMemObjInfo(dev_data, mem);
3867 skip |= ValidateInsertBufferMemoryRange(dev_data, buffer, mem_info, memoryOffset, buffer_state->requirements, api_name);
3868 skip |= ValidateMemoryTypes(dev_data, mem_info, buffer_state->requirements.memoryTypeBits, api_name,
3869 VALIDATION_ERROR_17000816);
3872 // Validate memory requirements alignment
3873 if (SafeModulo(memoryOffset, buffer_state->requirements.alignment) != 0) {
3874 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
3875 buffer_handle, VALIDATION_ERROR_17000818,
3876 "%s: memoryOffset is 0x%" PRIxLEAST64
3877 " but must be an integer multiple of the VkMemoryRequirements::alignment value 0x%" PRIxLEAST64
3878 ", returned from a call to vkGetBufferMemoryRequirements with buffer.",
3879 api_name, memoryOffset, buffer_state->requirements.alignment);
3883 // Validate memory requirements size
3884 if (buffer_state->requirements.size > (mem_info->alloc_info.allocationSize - memoryOffset)) {
3885 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
3886 buffer_handle, VALIDATION_ERROR_1700081a,
3887 "%s: memory size minus memoryOffset is 0x%" PRIxLEAST64
3888 " but must be at least as large as VkMemoryRequirements::size value 0x%" PRIxLEAST64
3889 ", returned from a call to vkGetBufferMemoryRequirements with buffer.",
3890 api_name, mem_info->alloc_info.allocationSize - memoryOffset, buffer_state->requirements.size);
3893 // Validate dedicated allocation
3894 if (mem_info->is_dedicated && ((mem_info->dedicated_buffer != buffer) || (memoryOffset != 0))) {
3895 // TODO: Add vkBindBufferMemory2KHR error message when added to spec.
3896 auto validation_error = VALIDATION_ERROR_UNDEFINED;
3897 if (strcmp(api_name, "vkBindBufferMemory()") == 0) {
3898 validation_error = VALIDATION_ERROR_17000bc8;
3901 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
3902 buffer_handle, validation_error,
3903 "%s: for dedicated memory allocation 0x%" PRIxLEAST64
3904 ", VkMemoryDedicatedAllocateInfoKHR::buffer 0x%" PRIXLEAST64 " must be equal to buffer 0x%" PRIxLEAST64
3905 " and memoryOffset 0x%" PRIxLEAST64 " must be zero.",
3906 api_name, HandleToUint64(mem), HandleToUint64(mem_info->dedicated_buffer), buffer_handle, memoryOffset);
3910 // Validate device limits alignments
3911 static const VkBufferUsageFlagBits usage_list[3] = {
3912 static_cast<VkBufferUsageFlagBits>(VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT),
3913 VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT, VK_BUFFER_USAGE_STORAGE_BUFFER_BIT};
3914 static const char *memory_type[3] = {"texel", "uniform", "storage"};
3915 static const char *offset_name[3] = {"minTexelBufferOffsetAlignment", "minUniformBufferOffsetAlignment",
3916 "minStorageBufferOffsetAlignment"};
3918 // TODO: vk_validation_stats.py cannot abide braces immediately preceding or following a validation error enum
3920 static const UNIQUE_VALIDATION_ERROR_CODE msgCode[3] = { VALIDATION_ERROR_17000810, VALIDATION_ERROR_17000812,
3921 VALIDATION_ERROR_17000814 };
3924 // Keep this one fresh!
3925 const VkDeviceSize offset_requirement[3] = {
3926 dev_data->phys_dev_properties.properties.limits.minTexelBufferOffsetAlignment,
3927 dev_data->phys_dev_properties.properties.limits.minUniformBufferOffsetAlignment,
3928 dev_data->phys_dev_properties.properties.limits.minStorageBufferOffsetAlignment};
3929 VkBufferUsageFlags usage = dev_data->bufferMap[buffer].get()->createInfo.usage;
3931 for (int i = 0; i < 3; i++) {
3932 if (usage & usage_list[i]) {
3933 if (SafeModulo(memoryOffset, offset_requirement[i]) != 0) {
3934 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
3935 buffer_handle, msgCode[i],
3936 "%s: %s memoryOffset is 0x%" PRIxLEAST64
3937 " but must be a multiple of device limit %s 0x%" PRIxLEAST64 ".",
3938 api_name, memory_type[i], memoryOffset, offset_name[i], offset_requirement[i]);
3946 static void PostCallRecordBindBufferMemory(layer_data *dev_data, VkBuffer buffer, BUFFER_STATE *buffer_state, VkDeviceMemory mem,
3947 VkDeviceSize memoryOffset, const char *api_name) {
3949 unique_lock_t lock(global_lock);
3950 // Track bound memory range information
3951 auto mem_info = GetMemObjInfo(dev_data, mem);
3953 InsertBufferMemoryRange(dev_data, buffer, mem_info, memoryOffset, buffer_state->requirements);
3956 // Track objects tied to memory
3957 uint64_t buffer_handle = HandleToUint64(buffer);
3958 SetMemBinding(dev_data, mem, buffer_state, memoryOffset, buffer_handle, kVulkanObjectTypeBuffer, api_name);
3962 VKAPI_ATTR VkResult VKAPI_CALL BindBufferMemory(VkDevice device, VkBuffer buffer, VkDeviceMemory mem, VkDeviceSize memoryOffset) {
3963 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3964 VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
3965 BUFFER_STATE *buffer_state;
3967 unique_lock_t lock(global_lock);
3968 buffer_state = GetBufferState(dev_data, buffer);
3970 bool skip = PreCallValidateBindBufferMemory(dev_data, buffer, buffer_state, mem, memoryOffset, "vkBindBufferMemory()");
3972 result = dev_data->dispatch_table.BindBufferMemory(device, buffer, mem, memoryOffset);
3973 if (result == VK_SUCCESS) {
3974 PostCallRecordBindBufferMemory(dev_data, buffer, buffer_state, mem, memoryOffset, "vkBindBufferMemory()");
3980 static bool PreCallValidateBindBufferMemory2(layer_data *dev_data, std::vector<BUFFER_STATE *> *buffer_state,
3981 uint32_t bindInfoCount, const VkBindBufferMemoryInfoKHR *pBindInfos) {
3983 unique_lock_t lock(global_lock);
3984 for (uint32_t i = 0; i < bindInfoCount; i++) {
3985 (*buffer_state)[i] = GetBufferState(dev_data, pBindInfos[i].buffer);
3990 for (uint32_t i = 0; i < bindInfoCount; i++) {
3991 sprintf(api_name, "vkBindBufferMemory2() pBindInfos[%u]", i);
3992 skip |= PreCallValidateBindBufferMemory(dev_data, pBindInfos[i].buffer, (*buffer_state)[i], pBindInfos[i].memory,
3993 pBindInfos[i].memoryOffset, api_name);
3998 static void PostCallRecordBindBufferMemory2(layer_data *dev_data, const std::vector<BUFFER_STATE *> &buffer_state,
3999 uint32_t bindInfoCount, const VkBindBufferMemoryInfoKHR *pBindInfos) {
4000 for (uint32_t i = 0; i < bindInfoCount; i++) {
4001 PostCallRecordBindBufferMemory(dev_data, pBindInfos[i].buffer, buffer_state[i], pBindInfos[i].memory,
4002 pBindInfos[i].memoryOffset, "vkBindBufferMemory2()");
4006 VKAPI_ATTR VkResult VKAPI_CALL BindBufferMemory2(VkDevice device, uint32_t bindInfoCount,
4007 const VkBindBufferMemoryInfoKHR *pBindInfos) {
4008 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4009 VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
4010 std::vector<BUFFER_STATE *> buffer_state(bindInfoCount);
4011 if (!PreCallValidateBindBufferMemory2(dev_data, &buffer_state, bindInfoCount, pBindInfos)) {
4012 result = dev_data->dispatch_table.BindBufferMemory2(device, bindInfoCount, pBindInfos);
4013 if (result == VK_SUCCESS) {
4014 PostCallRecordBindBufferMemory2(dev_data, buffer_state, bindInfoCount, pBindInfos);
4020 VKAPI_ATTR VkResult VKAPI_CALL BindBufferMemory2KHR(VkDevice device, uint32_t bindInfoCount,
4021 const VkBindBufferMemoryInfoKHR *pBindInfos) {
4022 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4023 VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
4024 std::vector<BUFFER_STATE *> buffer_state(bindInfoCount);
4025 if (!PreCallValidateBindBufferMemory2(dev_data, &buffer_state, bindInfoCount, pBindInfos)) {
4026 result = dev_data->dispatch_table.BindBufferMemory2KHR(device, bindInfoCount, pBindInfos);
4027 if (result == VK_SUCCESS) {
4028 PostCallRecordBindBufferMemory2(dev_data, buffer_state, bindInfoCount, pBindInfos);
4034 static void PostCallRecordGetBufferMemoryRequirements(layer_data *dev_data, VkBuffer buffer,
4035 VkMemoryRequirements *pMemoryRequirements) {
4036 BUFFER_STATE *buffer_state;
4038 unique_lock_t lock(global_lock);
4039 buffer_state = GetBufferState(dev_data, buffer);
4042 buffer_state->requirements = *pMemoryRequirements;
4043 buffer_state->memory_requirements_checked = true;
4047 VKAPI_ATTR void VKAPI_CALL GetBufferMemoryRequirements(VkDevice device, VkBuffer buffer,
4048 VkMemoryRequirements *pMemoryRequirements) {
4049 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4050 dev_data->dispatch_table.GetBufferMemoryRequirements(device, buffer, pMemoryRequirements);
4051 PostCallRecordGetBufferMemoryRequirements(dev_data, buffer, pMemoryRequirements);
4054 VKAPI_ATTR void VKAPI_CALL GetBufferMemoryRequirements2(VkDevice device, const VkBufferMemoryRequirementsInfo2KHR *pInfo,
4055 VkMemoryRequirements2KHR *pMemoryRequirements) {
4056 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4057 dev_data->dispatch_table.GetBufferMemoryRequirements2(device, pInfo, pMemoryRequirements);
4058 PostCallRecordGetBufferMemoryRequirements(dev_data, pInfo->buffer, &pMemoryRequirements->memoryRequirements);
4061 VKAPI_ATTR void VKAPI_CALL GetBufferMemoryRequirements2KHR(VkDevice device, const VkBufferMemoryRequirementsInfo2KHR *pInfo,
4062 VkMemoryRequirements2KHR *pMemoryRequirements) {
4063 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4064 dev_data->dispatch_table.GetBufferMemoryRequirements2KHR(device, pInfo, pMemoryRequirements);
4065 PostCallRecordGetBufferMemoryRequirements(dev_data, pInfo->buffer, &pMemoryRequirements->memoryRequirements);
4068 static void PostCallRecordGetImageMemoryRequirements(layer_data *dev_data, VkImage image,
4069 VkMemoryRequirements *pMemoryRequirements) {
4070 IMAGE_STATE *image_state;
4072 unique_lock_t lock(global_lock);
4073 image_state = GetImageState(dev_data, image);
4076 image_state->requirements = *pMemoryRequirements;
4077 image_state->memory_requirements_checked = true;
4081 VKAPI_ATTR void VKAPI_CALL GetImageMemoryRequirements(VkDevice device, VkImage image, VkMemoryRequirements *pMemoryRequirements) {
4082 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4083 dev_data->dispatch_table.GetImageMemoryRequirements(device, image, pMemoryRequirements);
4084 PostCallRecordGetImageMemoryRequirements(dev_data, image, pMemoryRequirements);
4087 VKAPI_ATTR void VKAPI_CALL GetImageMemoryRequirements2(VkDevice device, const VkImageMemoryRequirementsInfo2KHR *pInfo,
4088 VkMemoryRequirements2KHR *pMemoryRequirements) {
4089 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4090 dev_data->dispatch_table.GetImageMemoryRequirements2(device, pInfo, pMemoryRequirements);
4091 PostCallRecordGetImageMemoryRequirements(dev_data, pInfo->image, &pMemoryRequirements->memoryRequirements);
4094 VKAPI_ATTR void VKAPI_CALL GetImageMemoryRequirements2KHR(VkDevice device, const VkImageMemoryRequirementsInfo2KHR *pInfo,
4095 VkMemoryRequirements2KHR *pMemoryRequirements) {
4096 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4097 dev_data->dispatch_table.GetImageMemoryRequirements2KHR(device, pInfo, pMemoryRequirements);
4098 PostCallRecordGetImageMemoryRequirements(dev_data, pInfo->image, &pMemoryRequirements->memoryRequirements);
4101 static void PostCallRecordGetImageSparseMemoryRequirements(IMAGE_STATE *image_state, uint32_t req_count,
4102 VkSparseImageMemoryRequirements *reqs) {
4103 image_state->get_sparse_reqs_called = true;
4104 image_state->sparse_requirements.resize(req_count);
4106 std::copy(reqs, reqs + req_count, image_state->sparse_requirements.begin());
4108 for (const auto &req : image_state->sparse_requirements) {
4109 if (req.formatProperties.aspectMask & VK_IMAGE_ASPECT_METADATA_BIT) {
4110 image_state->sparse_metadata_required = true;
4115 VKAPI_ATTR void VKAPI_CALL GetImageSparseMemoryRequirements(VkDevice device, VkImage image, uint32_t *pSparseMemoryRequirementCount,
4116 VkSparseImageMemoryRequirements *pSparseMemoryRequirements) {
4117 // TODO : Implement tracking here, just passthrough initially
4118 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4119 dev_data->dispatch_table.GetImageSparseMemoryRequirements(device, image, pSparseMemoryRequirementCount,
4120 pSparseMemoryRequirements);
4121 unique_lock_t lock(global_lock);
4122 auto image_state = GetImageState(dev_data, image);
4123 PostCallRecordGetImageSparseMemoryRequirements(image_state, *pSparseMemoryRequirementCount, pSparseMemoryRequirements);
4126 static void PostCallRecordGetImageSparseMemoryRequirements2(IMAGE_STATE *image_state, uint32_t req_count,
4127 VkSparseImageMemoryRequirements2KHR *reqs) {
4128 // reqs is empty, so there is nothing to loop over and read.
4129 if (reqs == nullptr) {
4132 std::vector<VkSparseImageMemoryRequirements> sparse_reqs(req_count);
4133 // Migrate to old struct type for common handling with GetImageSparseMemoryRequirements()
4134 for (uint32_t i = 0; i < req_count; ++i) {
4135 assert(!reqs[i].pNext); // TODO: If an extension is ever added here we need to handle it
4136 sparse_reqs[i] = reqs[i].memoryRequirements;
4138 PostCallRecordGetImageSparseMemoryRequirements(image_state, req_count, sparse_reqs.data());
4141 VKAPI_ATTR void VKAPI_CALL GetImageSparseMemoryRequirements2(VkDevice device, const VkImageSparseMemoryRequirementsInfo2KHR *pInfo,
4142 uint32_t *pSparseMemoryRequirementCount,
4143 VkSparseImageMemoryRequirements2KHR *pSparseMemoryRequirements) {
4144 // TODO : Implement tracking here, just passthrough initially
4145 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4146 dev_data->dispatch_table.GetImageSparseMemoryRequirements2(device, pInfo, pSparseMemoryRequirementCount,
4147 pSparseMemoryRequirements);
4148 unique_lock_t lock(global_lock);
4149 auto image_state = GetImageState(dev_data, pInfo->image);
4150 PostCallRecordGetImageSparseMemoryRequirements2(image_state, *pSparseMemoryRequirementCount, pSparseMemoryRequirements);
4153 VKAPI_ATTR void VKAPI_CALL GetImageSparseMemoryRequirements2KHR(VkDevice device,
4154 const VkImageSparseMemoryRequirementsInfo2KHR *pInfo,
4155 uint32_t *pSparseMemoryRequirementCount,
4156 VkSparseImageMemoryRequirements2KHR *pSparseMemoryRequirements) {
4157 // TODO : Implement tracking here, just passthrough initially
4158 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4159 dev_data->dispatch_table.GetImageSparseMemoryRequirements2KHR(device, pInfo, pSparseMemoryRequirementCount,
4160 pSparseMemoryRequirements);
4161 unique_lock_t lock(global_lock);
4162 auto image_state = GetImageState(dev_data, pInfo->image);
4163 PostCallRecordGetImageSparseMemoryRequirements2(image_state, *pSparseMemoryRequirementCount, pSparseMemoryRequirements);
4166 VKAPI_ATTR void VKAPI_CALL GetPhysicalDeviceSparseImageFormatProperties(VkPhysicalDevice physicalDevice, VkFormat format,
4167 VkImageType type, VkSampleCountFlagBits samples,
4168 VkImageUsageFlags usage, VkImageTiling tiling,
4169 uint32_t *pPropertyCount,
4170 VkSparseImageFormatProperties *pProperties) {
4171 // TODO : Implement this intercept, track sparse image format properties and make sure they are obeyed.
4172 instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
4173 instance_data->dispatch_table.GetPhysicalDeviceSparseImageFormatProperties(physicalDevice, format, type, samples, usage, tiling,
4174 pPropertyCount, pProperties);
4177 VKAPI_ATTR void VKAPI_CALL GetPhysicalDeviceSparseImageFormatProperties2(
4178 VkPhysicalDevice physicalDevice, const VkPhysicalDeviceSparseImageFormatInfo2KHR *pFormatInfo, uint32_t *pPropertyCount,
4179 VkSparseImageFormatProperties2KHR *pProperties) {
4180 // TODO : Implement this intercept, track sparse image format properties and make sure they are obeyed.
4181 instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
4182 instance_data->dispatch_table.GetPhysicalDeviceSparseImageFormatProperties2(physicalDevice, pFormatInfo, pPropertyCount,
4186 VKAPI_ATTR void VKAPI_CALL GetPhysicalDeviceSparseImageFormatProperties2KHR(
4187 VkPhysicalDevice physicalDevice, const VkPhysicalDeviceSparseImageFormatInfo2KHR *pFormatInfo, uint32_t *pPropertyCount,
4188 VkSparseImageFormatProperties2KHR *pProperties) {
4189 // TODO : Implement this intercept, track sparse image format properties and make sure they are obeyed.
4190 instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
4191 instance_data->dispatch_table.GetPhysicalDeviceSparseImageFormatProperties2KHR(physicalDevice, pFormatInfo, pPropertyCount,
4195 VKAPI_ATTR void VKAPI_CALL DestroyImageView(VkDevice device, VkImageView imageView, const VkAllocationCallbacks *pAllocator) {
4196 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4197 // Common data objects used pre & post call
4198 IMAGE_VIEW_STATE *image_view_state = nullptr;
4199 VK_OBJECT obj_struct;
4200 unique_lock_t lock(global_lock);
4201 bool skip = PreCallValidateDestroyImageView(dev_data, imageView, &image_view_state, &obj_struct);
4203 if (imageView != VK_NULL_HANDLE) {
4204 // Pre-record to avoid Destroy/Create race
4205 PreCallRecordDestroyImageView(dev_data, imageView, image_view_state, obj_struct);
4208 dev_data->dispatch_table.DestroyImageView(device, imageView, pAllocator);
4212 VKAPI_ATTR void VKAPI_CALL DestroyShaderModule(VkDevice device, VkShaderModule shaderModule,
4213 const VkAllocationCallbacks *pAllocator) {
4214 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4216 unique_lock_t lock(global_lock);
4217 // Pre-record to avoid Destroy/Create race
4218 dev_data->shaderModuleMap.erase(shaderModule);
4221 dev_data->dispatch_table.DestroyShaderModule(device, shaderModule, pAllocator);
4224 static bool PreCallValidateDestroyPipeline(layer_data *dev_data, VkPipeline pipeline, PIPELINE_STATE **pipeline_state,
4225 VK_OBJECT *obj_struct) {
4226 *pipeline_state = getPipelineState(dev_data, pipeline);
4227 *obj_struct = {HandleToUint64(pipeline), kVulkanObjectTypePipeline};
4228 if (dev_data->instance_data->disabled.destroy_pipeline) return false;
4230 if (*pipeline_state) {
4231 skip |= ValidateObjectNotInUse(dev_data, *pipeline_state, *obj_struct, "vkDestroyPipeline", VALIDATION_ERROR_25c005fa);
4236 static void PreCallRecordDestroyPipeline(layer_data *dev_data, VkPipeline pipeline, PIPELINE_STATE *pipeline_state,
4237 VK_OBJECT obj_struct) {
4238 // Any bound cmd buffers are now invalid
4239 invalidateCommandBuffers(dev_data, pipeline_state->cb_bindings, obj_struct);
4240 dev_data->pipelineMap.erase(pipeline);
4243 VKAPI_ATTR void VKAPI_CALL DestroyPipeline(VkDevice device, VkPipeline pipeline, const VkAllocationCallbacks *pAllocator) {
4244 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4245 PIPELINE_STATE *pipeline_state = nullptr;
4246 VK_OBJECT obj_struct;
4247 unique_lock_t lock(global_lock);
4248 bool skip = PreCallValidateDestroyPipeline(dev_data, pipeline, &pipeline_state, &obj_struct);
4250 if (pipeline != VK_NULL_HANDLE) {
4251 // Pre-record to avoid Destroy/Create race
4252 PreCallRecordDestroyPipeline(dev_data, pipeline, pipeline_state, obj_struct);
4255 dev_data->dispatch_table.DestroyPipeline(device, pipeline, pAllocator);
4259 VKAPI_ATTR void VKAPI_CALL DestroyPipelineLayout(VkDevice device, VkPipelineLayout pipelineLayout,
4260 const VkAllocationCallbacks *pAllocator) {
4261 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4262 unique_lock_t lock(global_lock);
4263 // Pre-record to avoid Destroy/Create race
4264 dev_data->pipelineLayoutMap.erase(pipelineLayout);
4267 dev_data->dispatch_table.DestroyPipelineLayout(device, pipelineLayout, pAllocator);
4270 static bool PreCallValidateDestroySampler(layer_data *dev_data, VkSampler sampler, SAMPLER_STATE **sampler_state,
4271 VK_OBJECT *obj_struct) {
4272 *sampler_state = GetSamplerState(dev_data, sampler);
4273 *obj_struct = {HandleToUint64(sampler), kVulkanObjectTypeSampler};
4274 if (dev_data->instance_data->disabled.destroy_sampler) return false;
4276 if (*sampler_state) {
4277 skip |= ValidateObjectNotInUse(dev_data, *sampler_state, *obj_struct, "vkDestroySampler", VALIDATION_ERROR_26600874);
4282 static void PreCallRecordDestroySampler(layer_data *dev_data, VkSampler sampler, SAMPLER_STATE *sampler_state,
4283 VK_OBJECT obj_struct) {
4284 // Any bound cmd buffers are now invalid
4285 if (sampler_state) invalidateCommandBuffers(dev_data, sampler_state->cb_bindings, obj_struct);
4286 dev_data->samplerMap.erase(sampler);
4289 VKAPI_ATTR void VKAPI_CALL DestroySampler(VkDevice device, VkSampler sampler, const VkAllocationCallbacks *pAllocator) {
4290 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4291 SAMPLER_STATE *sampler_state = nullptr;
4292 VK_OBJECT obj_struct;
4293 unique_lock_t lock(global_lock);
4294 bool skip = PreCallValidateDestroySampler(dev_data, sampler, &sampler_state, &obj_struct);
4296 if (sampler != VK_NULL_HANDLE) {
4297 // Pre-record to avoid Destroy/Create race
4298 PreCallRecordDestroySampler(dev_data, sampler, sampler_state, obj_struct);
4301 dev_data->dispatch_table.DestroySampler(device, sampler, pAllocator);
4305 static void PreCallRecordDestroyDescriptorSetLayout(layer_data *dev_data, VkDescriptorSetLayout ds_layout) {
4306 auto layout_it = dev_data->descriptorSetLayoutMap.find(ds_layout);
4307 if (layout_it != dev_data->descriptorSetLayoutMap.end()) {
4308 layout_it->second.get()->MarkDestroyed();
4309 dev_data->descriptorSetLayoutMap.erase(layout_it);
4313 VKAPI_ATTR void VKAPI_CALL DestroyDescriptorSetLayout(VkDevice device, VkDescriptorSetLayout descriptorSetLayout,
4314 const VkAllocationCallbacks *pAllocator) {
4315 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4317 lock_guard_t lock(global_lock);
4318 // Pre-record to avoid Destroy/Create race
4319 PreCallRecordDestroyDescriptorSetLayout(dev_data, descriptorSetLayout);
4321 dev_data->dispatch_table.DestroyDescriptorSetLayout(device, descriptorSetLayout, pAllocator);
4324 static bool PreCallValidateDestroyDescriptorPool(layer_data *dev_data, VkDescriptorPool pool,
4325 DESCRIPTOR_POOL_STATE **desc_pool_state, VK_OBJECT *obj_struct) {
4326 *desc_pool_state = GetDescriptorPoolState(dev_data, pool);
4327 *obj_struct = {HandleToUint64(pool), kVulkanObjectTypeDescriptorPool};
4328 if (dev_data->instance_data->disabled.destroy_descriptor_pool) return false;
4330 if (*desc_pool_state) {
4332 ValidateObjectNotInUse(dev_data, *desc_pool_state, *obj_struct, "vkDestroyDescriptorPool", VALIDATION_ERROR_2440025e);
4337 static void PreCallRecordDestroyDescriptorPool(layer_data *dev_data, VkDescriptorPool descriptorPool,
4338 DESCRIPTOR_POOL_STATE *desc_pool_state, VK_OBJECT obj_struct) {
4339 if (desc_pool_state) {
4340 // Any bound cmd buffers are now invalid
4341 invalidateCommandBuffers(dev_data, desc_pool_state->cb_bindings, obj_struct);
4342 // Free sets that were in this pool
4343 for (auto ds : desc_pool_state->sets) {
4344 freeDescriptorSet(dev_data, ds);
4346 dev_data->descriptorPoolMap.erase(descriptorPool);
4347 delete desc_pool_state;
4351 VKAPI_ATTR void VKAPI_CALL DestroyDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool,
4352 const VkAllocationCallbacks *pAllocator) {
4353 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4354 DESCRIPTOR_POOL_STATE *desc_pool_state = nullptr;
4355 VK_OBJECT obj_struct;
4356 unique_lock_t lock(global_lock);
4357 bool skip = PreCallValidateDestroyDescriptorPool(dev_data, descriptorPool, &desc_pool_state, &obj_struct);
4359 // Pre-record to avoid Destroy/Create race
4360 PreCallRecordDestroyDescriptorPool(dev_data, descriptorPool, desc_pool_state, obj_struct);
4362 dev_data->dispatch_table.DestroyDescriptorPool(device, descriptorPool, pAllocator);
4366 // Verify cmdBuffer in given cb_node is not in global in-flight set, and return skip result
4367 // If this is a secondary command buffer, then make sure its primary is also in-flight
4368 // If primary is not in-flight, then remove secondary from global in-flight set
4369 // This function is only valid at a point when cmdBuffer is being reset or freed
4370 static bool checkCommandBufferInFlight(layer_data *dev_data, const GLOBAL_CB_NODE *cb_node, const char *action,
4371 UNIQUE_VALIDATION_ERROR_CODE error_code) {
4373 if (cb_node->in_use.load()) {
4374 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4375 HandleToUint64(cb_node->commandBuffer), error_code,
4376 "Attempt to %s command buffer (0x%" PRIx64 ") which is in use.", action,
4377 HandleToUint64(cb_node->commandBuffer));
4382 // Iterate over all cmdBuffers in given commandPool and verify that each is not in use
4383 static bool checkCommandBuffersInFlight(layer_data *dev_data, COMMAND_POOL_NODE *pPool, const char *action,
4384 UNIQUE_VALIDATION_ERROR_CODE error_code) {
4386 for (auto cmd_buffer : pPool->commandBuffers) {
4387 skip |= checkCommandBufferInFlight(dev_data, GetCBNode(dev_data, cmd_buffer), action, error_code);
4392 // Free all command buffers in given list, removing all references/links to them using ResetCommandBufferState
4393 static void FreeCommandBufferStates(layer_data *dev_data, COMMAND_POOL_NODE *pool_state, const uint32_t command_buffer_count,
4394 const VkCommandBuffer *command_buffers) {
4395 for (uint32_t i = 0; i < command_buffer_count; i++) {
4396 auto cb_state = GetCBNode(dev_data, command_buffers[i]);
4397 // Remove references to command buffer's state and delete
4399 // reset prior to delete, removing various references to it.
4400 // TODO: fix this, it's insane.
4401 ResetCommandBufferState(dev_data, cb_state->commandBuffer);
4402 // Remove the cb_state's references from layer_data and COMMAND_POOL_NODE
4403 dev_data->commandBufferMap.erase(cb_state->commandBuffer);
4404 pool_state->commandBuffers.erase(command_buffers[i]);
4410 VKAPI_ATTR void VKAPI_CALL FreeCommandBuffers(VkDevice device, VkCommandPool commandPool, uint32_t commandBufferCount,
4411 const VkCommandBuffer *pCommandBuffers) {
4412 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4414 unique_lock_t lock(global_lock);
4416 for (uint32_t i = 0; i < commandBufferCount; i++) {
4417 auto cb_node = GetCBNode(dev_data, pCommandBuffers[i]);
4418 // Delete CB information structure, and remove from commandBufferMap
4420 skip |= checkCommandBufferInFlight(dev_data, cb_node, "free", VALIDATION_ERROR_2840005e);
4426 auto pPool = GetCommandPoolNode(dev_data, commandPool);
4427 FreeCommandBufferStates(dev_data, pPool, commandBufferCount, pCommandBuffers);
4430 dev_data->dispatch_table.FreeCommandBuffers(device, commandPool, commandBufferCount, pCommandBuffers);
4433 VKAPI_ATTR VkResult VKAPI_CALL CreateCommandPool(VkDevice device, const VkCommandPoolCreateInfo *pCreateInfo,
4434 const VkAllocationCallbacks *pAllocator, VkCommandPool *pCommandPool) {
4435 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4437 VkResult result = dev_data->dispatch_table.CreateCommandPool(device, pCreateInfo, pAllocator, pCommandPool);
4439 if (VK_SUCCESS == result) {
4440 lock_guard_t lock(global_lock);
4441 dev_data->commandPoolMap[*pCommandPool].createFlags = pCreateInfo->flags;
4442 dev_data->commandPoolMap[*pCommandPool].queueFamilyIndex = pCreateInfo->queueFamilyIndex;
4447 VKAPI_ATTR VkResult VKAPI_CALL CreateQueryPool(VkDevice device, const VkQueryPoolCreateInfo *pCreateInfo,
4448 const VkAllocationCallbacks *pAllocator, VkQueryPool *pQueryPool) {
4449 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4451 if (pCreateInfo && pCreateInfo->queryType == VK_QUERY_TYPE_PIPELINE_STATISTICS) {
4452 if (!dev_data->enabled_features.pipelineStatisticsQuery) {
4453 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0,
4454 VALIDATION_ERROR_11c0062e,
4455 "Query pool with type VK_QUERY_TYPE_PIPELINE_STATISTICS created on a device with "
4456 "VkDeviceCreateInfo.pEnabledFeatures.pipelineStatisticsQuery == VK_FALSE.");
4460 VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
4462 result = dev_data->dispatch_table.CreateQueryPool(device, pCreateInfo, pAllocator, pQueryPool);
4464 if (result == VK_SUCCESS) {
4465 lock_guard_t lock(global_lock);
4466 QUERY_POOL_NODE *qp_node = &dev_data->queryPoolMap[*pQueryPool];
4467 qp_node->createInfo = *pCreateInfo;
4472 static bool PreCallValidateDestroyCommandPool(layer_data *dev_data, VkCommandPool pool) {
4473 COMMAND_POOL_NODE *cp_state = GetCommandPoolNode(dev_data, pool);
4474 if (dev_data->instance_data->disabled.destroy_command_pool) return false;
4477 // Verify that command buffers in pool are complete (not in-flight)
4478 skip |= checkCommandBuffersInFlight(dev_data, cp_state, "destroy command pool with", VALIDATION_ERROR_24000052);
4483 static void PreCallRecordDestroyCommandPool(layer_data *dev_data, VkCommandPool pool) {
4484 COMMAND_POOL_NODE *cp_state = GetCommandPoolNode(dev_data, pool);
4485 // Remove cmdpool from cmdpoolmap, after freeing layer data for the command buffers
4486 // "When a pool is destroyed, all command buffers allocated from the pool are freed."
4488 // Create a vector, as FreeCommandBufferStates deletes from cp_state->commandBuffers during iteration.
4489 std::vector<VkCommandBuffer> cb_vec{cp_state->commandBuffers.begin(), cp_state->commandBuffers.end()};
4490 FreeCommandBufferStates(dev_data, cp_state, static_cast<uint32_t>(cb_vec.size()), cb_vec.data());
4491 dev_data->commandPoolMap.erase(pool);
4495 // Destroy commandPool along with all of the commandBuffers allocated from that pool
4496 VKAPI_ATTR void VKAPI_CALL DestroyCommandPool(VkDevice device, VkCommandPool commandPool, const VkAllocationCallbacks *pAllocator) {
4497 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4498 unique_lock_t lock(global_lock);
4499 bool skip = PreCallValidateDestroyCommandPool(dev_data, commandPool);
4501 // Pre-record to avoid Destroy/Create race
4502 PreCallRecordDestroyCommandPool(dev_data, commandPool);
4504 dev_data->dispatch_table.DestroyCommandPool(device, commandPool, pAllocator);
4508 VKAPI_ATTR VkResult VKAPI_CALL ResetCommandPool(VkDevice device, VkCommandPool commandPool, VkCommandPoolResetFlags flags) {
4509 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4512 unique_lock_t lock(global_lock);
4513 auto pPool = GetCommandPoolNode(dev_data, commandPool);
4514 skip |= checkCommandBuffersInFlight(dev_data, pPool, "reset command pool with", VALIDATION_ERROR_32800050);
4517 if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
4519 VkResult result = dev_data->dispatch_table.ResetCommandPool(device, commandPool, flags);
4521 // Reset all of the CBs allocated from this pool
4522 if (VK_SUCCESS == result) {
4524 for (auto cmdBuffer : pPool->commandBuffers) {
4525 ResetCommandBufferState(dev_data, cmdBuffer);
4532 VKAPI_ATTR VkResult VKAPI_CALL ResetFences(VkDevice device, uint32_t fenceCount, const VkFence *pFences) {
4533 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4535 unique_lock_t lock(global_lock);
4536 for (uint32_t i = 0; i < fenceCount; ++i) {
4537 auto pFence = GetFenceNode(dev_data, pFences[i]);
4538 if (pFence && pFence->scope == kSyncScopeInternal && pFence->state == FENCE_INFLIGHT) {
4539 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
4540 HandleToUint64(pFences[i]), VALIDATION_ERROR_32e008c6, "Fence 0x%" PRIx64 " is in use.",
4541 HandleToUint64(pFences[i]));
4546 if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
4548 VkResult result = dev_data->dispatch_table.ResetFences(device, fenceCount, pFences);
4550 if (result == VK_SUCCESS) {
4552 for (uint32_t i = 0; i < fenceCount; ++i) {
4553 auto pFence = GetFenceNode(dev_data, pFences[i]);
4555 if (pFence->scope == kSyncScopeInternal) {
4556 pFence->state = FENCE_UNSIGNALED;
4557 } else if (pFence->scope == kSyncScopeExternalTemporary) {
4558 pFence->scope = kSyncScopeInternal;
4568 // For given cb_nodes, invalidate them and track object causing invalidation
4569 void invalidateCommandBuffers(const layer_data *dev_data, std::unordered_set<GLOBAL_CB_NODE *> const &cb_nodes, VK_OBJECT obj) {
4570 for (auto cb_node : cb_nodes) {
4571 if (cb_node->state == CB_RECORDING) {
4572 log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4573 HandleToUint64(cb_node->commandBuffer), DRAWSTATE_INVALID_COMMAND_BUFFER,
4574 "Invalidating a command buffer that's currently being recorded: 0x%" PRIx64 ".",
4575 HandleToUint64(cb_node->commandBuffer));
4576 cb_node->state = CB_INVALID_INCOMPLETE;
4577 } else if (cb_node->state == CB_RECORDED) {
4578 cb_node->state = CB_INVALID_COMPLETE;
4580 cb_node->broken_bindings.push_back(obj);
4582 // if secondary, then propagate the invalidation to the primaries that will call us.
4583 if (cb_node->createInfo.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY) {
4584 invalidateCommandBuffers(dev_data, cb_node->linkedCommandBuffers, obj);
4589 static bool PreCallValidateDestroyFramebuffer(layer_data *dev_data, VkFramebuffer framebuffer,
4590 FRAMEBUFFER_STATE **framebuffer_state, VK_OBJECT *obj_struct) {
4591 *framebuffer_state = GetFramebufferState(dev_data, framebuffer);
4592 *obj_struct = {HandleToUint64(framebuffer), kVulkanObjectTypeFramebuffer};
4593 if (dev_data->instance_data->disabled.destroy_framebuffer) return false;
4595 if (*framebuffer_state) {
4597 ValidateObjectNotInUse(dev_data, *framebuffer_state, *obj_struct, "vkDestroyFramebuffer", VALIDATION_ERROR_250006f8);
4602 static void PreCallRecordDestroyFramebuffer(layer_data *dev_data, VkFramebuffer framebuffer, FRAMEBUFFER_STATE *framebuffer_state,
4603 VK_OBJECT obj_struct) {
4604 invalidateCommandBuffers(dev_data, framebuffer_state->cb_bindings, obj_struct);
4605 dev_data->frameBufferMap.erase(framebuffer);
4608 VKAPI_ATTR void VKAPI_CALL DestroyFramebuffer(VkDevice device, VkFramebuffer framebuffer, const VkAllocationCallbacks *pAllocator) {
4609 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4610 FRAMEBUFFER_STATE *framebuffer_state = nullptr;
4611 VK_OBJECT obj_struct;
4612 unique_lock_t lock(global_lock);
4613 bool skip = PreCallValidateDestroyFramebuffer(dev_data, framebuffer, &framebuffer_state, &obj_struct);
4615 if (framebuffer != VK_NULL_HANDLE) {
4616 // Pre-record to avoid Destroy/Create race
4617 PreCallRecordDestroyFramebuffer(dev_data, framebuffer, framebuffer_state, obj_struct);
4620 dev_data->dispatch_table.DestroyFramebuffer(device, framebuffer, pAllocator);
4624 static bool PreCallValidateDestroyRenderPass(layer_data *dev_data, VkRenderPass render_pass, RENDER_PASS_STATE **rp_state,
4625 VK_OBJECT *obj_struct) {
4626 *rp_state = GetRenderPassState(dev_data, render_pass);
4627 *obj_struct = {HandleToUint64(render_pass), kVulkanObjectTypeRenderPass};
4628 if (dev_data->instance_data->disabled.destroy_renderpass) return false;
4631 skip |= ValidateObjectNotInUse(dev_data, *rp_state, *obj_struct, "vkDestroyRenderPass", VALIDATION_ERROR_264006d2);
4636 static void PreCallRecordDestroyRenderPass(layer_data *dev_data, VkRenderPass render_pass, RENDER_PASS_STATE *rp_state,
4637 VK_OBJECT obj_struct) {
4638 invalidateCommandBuffers(dev_data, rp_state->cb_bindings, obj_struct);
4639 dev_data->renderPassMap.erase(render_pass);
4642 VKAPI_ATTR void VKAPI_CALL DestroyRenderPass(VkDevice device, VkRenderPass renderPass, const VkAllocationCallbacks *pAllocator) {
4643 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4644 RENDER_PASS_STATE *rp_state = nullptr;
4645 VK_OBJECT obj_struct;
4646 unique_lock_t lock(global_lock);
4647 bool skip = PreCallValidateDestroyRenderPass(dev_data, renderPass, &rp_state, &obj_struct);
4649 if (renderPass != VK_NULL_HANDLE) {
4650 // Pre-record to avoid Destroy/Create race
4651 PreCallRecordDestroyRenderPass(dev_data, renderPass, rp_state, obj_struct);
4654 dev_data->dispatch_table.DestroyRenderPass(device, renderPass, pAllocator);
4658 VKAPI_ATTR VkResult VKAPI_CALL CreateBuffer(VkDevice device, const VkBufferCreateInfo *pCreateInfo,
4659 const VkAllocationCallbacks *pAllocator, VkBuffer *pBuffer) {
4660 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4661 unique_lock_t lock(global_lock);
4662 bool skip = PreCallValidateCreateBuffer(dev_data, pCreateInfo);
4665 if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
4666 VkResult result = dev_data->dispatch_table.CreateBuffer(device, pCreateInfo, pAllocator, pBuffer);
4668 if (VK_SUCCESS == result) {
4670 PostCallRecordCreateBuffer(dev_data, pCreateInfo, pBuffer);
4676 VKAPI_ATTR VkResult VKAPI_CALL CreateBufferView(VkDevice device, const VkBufferViewCreateInfo *pCreateInfo,
4677 const VkAllocationCallbacks *pAllocator, VkBufferView *pView) {
4678 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4679 unique_lock_t lock(global_lock);
4680 bool skip = PreCallValidateCreateBufferView(dev_data, pCreateInfo);
4682 if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
4683 VkResult result = dev_data->dispatch_table.CreateBufferView(device, pCreateInfo, pAllocator, pView);
4684 if (VK_SUCCESS == result) {
4686 PostCallRecordCreateBufferView(dev_data, pCreateInfo, pView);
4692 // Access helper functions for external modules
4693 VkFormatProperties GetFormatProperties(core_validation::layer_data *device_data, VkFormat format) {
4694 VkFormatProperties format_properties;
4695 instance_layer_data *instance_data =
4696 GetLayerDataPtr(get_dispatch_key(device_data->instance_data->instance), instance_layer_data_map);
4697 instance_data->dispatch_table.GetPhysicalDeviceFormatProperties(device_data->physical_device, format, &format_properties);
4698 return format_properties;
4701 VkResult GetImageFormatProperties(core_validation::layer_data *device_data, const VkImageCreateInfo *image_ci,
4702 VkImageFormatProperties *pImageFormatProperties) {
4703 instance_layer_data *instance_data =
4704 GetLayerDataPtr(get_dispatch_key(device_data->instance_data->instance), instance_layer_data_map);
4705 return instance_data->dispatch_table.GetPhysicalDeviceImageFormatProperties(
4706 device_data->physical_device, image_ci->format, image_ci->imageType, image_ci->tiling, image_ci->usage, image_ci->flags,
4707 pImageFormatProperties);
4710 const debug_report_data *GetReportData(const core_validation::layer_data *device_data) { return device_data->report_data; }
4712 const VkPhysicalDeviceProperties *GetPhysicalDeviceProperties(core_validation::layer_data *device_data) {
4713 return &device_data->phys_dev_props;
4716 const CHECK_DISABLED *GetDisables(core_validation::layer_data *device_data) { return &device_data->instance_data->disabled; }
4718 std::unordered_map<VkImage, std::unique_ptr<IMAGE_STATE>> *GetImageMap(core_validation::layer_data *device_data) {
4719 return &device_data->imageMap;
4722 std::unordered_map<VkImage, std::vector<ImageSubresourcePair>> *GetImageSubresourceMap(core_validation::layer_data *device_data) {
4723 return &device_data->imageSubresourceMap;
4726 std::unordered_map<ImageSubresourcePair, IMAGE_LAYOUT_NODE> *GetImageLayoutMap(layer_data *device_data) {
4727 return &device_data->imageLayoutMap;
4730 std::unordered_map<ImageSubresourcePair, IMAGE_LAYOUT_NODE> const *GetImageLayoutMap(layer_data const *device_data) {
4731 return &device_data->imageLayoutMap;
4734 std::unordered_map<VkBuffer, std::unique_ptr<BUFFER_STATE>> *GetBufferMap(layer_data *device_data) {
4735 return &device_data->bufferMap;
4738 std::unordered_map<VkBufferView, std::unique_ptr<BUFFER_VIEW_STATE>> *GetBufferViewMap(layer_data *device_data) {
4739 return &device_data->bufferViewMap;
4742 std::unordered_map<VkImageView, std::unique_ptr<IMAGE_VIEW_STATE>> *GetImageViewMap(layer_data *device_data) {
4743 return &device_data->imageViewMap;
4746 const PHYS_DEV_PROPERTIES_NODE *GetPhysDevProperties(const layer_data *device_data) { return &device_data->phys_dev_properties; }
4748 const VkPhysicalDeviceFeatures *GetEnabledFeatures(const layer_data *device_data) { return &device_data->enabled_features; }
4750 const VkPhysicalDeviceDescriptorIndexingFeaturesEXT *GetEnabledDescriptorIndexingFeatures(const layer_data *device_data) {
4751 return &device_data->phys_dev_ext_props.descriptor_indexing_features;
4754 const DeviceExtensions *GetDeviceExtensions(const layer_data *device_data) { return &device_data->extensions; }
4756 VKAPI_ATTR VkResult VKAPI_CALL CreateImage(VkDevice device, const VkImageCreateInfo *pCreateInfo,
4757 const VkAllocationCallbacks *pAllocator, VkImage *pImage) {
4758 VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
4759 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4760 bool skip = PreCallValidateCreateImage(dev_data, pCreateInfo, pAllocator, pImage);
4762 result = dev_data->dispatch_table.CreateImage(device, pCreateInfo, pAllocator, pImage);
4764 if (VK_SUCCESS == result) {
4765 lock_guard_t lock(global_lock);
4766 PostCallRecordCreateImage(dev_data, pCreateInfo, pImage);
4771 VKAPI_ATTR VkResult VKAPI_CALL CreateImageView(VkDevice device, const VkImageViewCreateInfo *pCreateInfo,
4772 const VkAllocationCallbacks *pAllocator, VkImageView *pView) {
4773 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4774 unique_lock_t lock(global_lock);
4775 bool skip = PreCallValidateCreateImageView(dev_data, pCreateInfo);
4777 if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
4778 VkResult result = dev_data->dispatch_table.CreateImageView(device, pCreateInfo, pAllocator, pView);
4779 if (VK_SUCCESS == result) {
4781 PostCallRecordCreateImageView(dev_data, pCreateInfo, *pView);
4788 VKAPI_ATTR VkResult VKAPI_CALL CreateFence(VkDevice device, const VkFenceCreateInfo *pCreateInfo,
4789 const VkAllocationCallbacks *pAllocator, VkFence *pFence) {
4790 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4791 VkResult result = dev_data->dispatch_table.CreateFence(device, pCreateInfo, pAllocator, pFence);
4792 if (VK_SUCCESS == result) {
4793 lock_guard_t lock(global_lock);
4794 auto &fence_node = dev_data->fenceMap[*pFence];
4795 fence_node.fence = *pFence;
4796 fence_node.createInfo = *pCreateInfo;
4797 fence_node.state = (pCreateInfo->flags & VK_FENCE_CREATE_SIGNALED_BIT) ? FENCE_RETIRED : FENCE_UNSIGNALED;
4802 // TODO handle pipeline caches
4803 VKAPI_ATTR VkResult VKAPI_CALL CreatePipelineCache(VkDevice device, const VkPipelineCacheCreateInfo *pCreateInfo,
4804 const VkAllocationCallbacks *pAllocator, VkPipelineCache *pPipelineCache) {
4805 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4806 VkResult result = dev_data->dispatch_table.CreatePipelineCache(device, pCreateInfo, pAllocator, pPipelineCache);
4810 VKAPI_ATTR void VKAPI_CALL DestroyPipelineCache(VkDevice device, VkPipelineCache pipelineCache,
4811 const VkAllocationCallbacks *pAllocator) {
4812 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4813 // Pre-record to avoid Destroy/Create race (if/when implemented)
4814 dev_data->dispatch_table.DestroyPipelineCache(device, pipelineCache, pAllocator);
4817 VKAPI_ATTR VkResult VKAPI_CALL GetPipelineCacheData(VkDevice device, VkPipelineCache pipelineCache, size_t *pDataSize,
4819 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4820 VkResult result = dev_data->dispatch_table.GetPipelineCacheData(device, pipelineCache, pDataSize, pData);
4824 VKAPI_ATTR VkResult VKAPI_CALL MergePipelineCaches(VkDevice device, VkPipelineCache dstCache, uint32_t srcCacheCount,
4825 const VkPipelineCache *pSrcCaches) {
4826 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4827 VkResult result = dev_data->dispatch_table.MergePipelineCaches(device, dstCache, srcCacheCount, pSrcCaches);
4831 // Validation cache:
4832 // CV is the bottommost implementor of this extension. Don't pass calls down.
4833 VKAPI_ATTR VkResult VKAPI_CALL CreateValidationCacheEXT(VkDevice device, const VkValidationCacheCreateInfoEXT *pCreateInfo,
4834 const VkAllocationCallbacks *pAllocator,
4835 VkValidationCacheEXT *pValidationCache) {
4836 *pValidationCache = ValidationCache::Create(pCreateInfo);
4837 return *pValidationCache ? VK_SUCCESS : VK_ERROR_INITIALIZATION_FAILED;
4840 VKAPI_ATTR void VKAPI_CALL DestroyValidationCacheEXT(VkDevice device, VkValidationCacheEXT validationCache,
4841 const VkAllocationCallbacks *pAllocator) {
4842 delete (ValidationCache *)validationCache;
4845 VKAPI_ATTR VkResult VKAPI_CALL GetValidationCacheDataEXT(VkDevice device, VkValidationCacheEXT validationCache, size_t *pDataSize,
4847 size_t inSize = *pDataSize;
4848 ((ValidationCache *)validationCache)->Write(pDataSize, pData);
4849 return (pData && *pDataSize != inSize) ? VK_INCOMPLETE : VK_SUCCESS;
4852 VKAPI_ATTR VkResult VKAPI_CALL MergeValidationCachesEXT(VkDevice device, VkValidationCacheEXT dstCache, uint32_t srcCacheCount,
4853 const VkValidationCacheEXT *pSrcCaches) {
4854 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4856 auto dst = (ValidationCache *)dstCache;
4857 auto src = (ValidationCache const *const *)pSrcCaches;
4858 VkResult result = VK_SUCCESS;
4859 for (uint32_t i = 0; i < srcCacheCount; i++) {
4860 if (src[i] == dst) {
4861 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_VALIDATION_CACHE_EXT,
4862 0, VALIDATION_ERROR_3e600c00,
4863 "vkMergeValidationCachesEXT: dstCache (0x%" PRIx64 ") must not appear in pSrcCaches array.",
4864 HandleToUint64(dstCache));
4865 result = VK_ERROR_VALIDATION_FAILED_EXT;
4875 // utility function to set collective state for pipeline
4876 void set_pipeline_state(PIPELINE_STATE *pPipe) {
4877 // If any attachment used by this pipeline has blendEnable, set top-level blendEnable
4878 if (pPipe->graphicsPipelineCI.pColorBlendState) {
4879 for (size_t i = 0; i < pPipe->attachments.size(); ++i) {
4880 if (VK_TRUE == pPipe->attachments[i].blendEnable) {
4881 if (((pPipe->attachments[i].dstAlphaBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
4882 (pPipe->attachments[i].dstAlphaBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA)) ||
4883 ((pPipe->attachments[i].dstColorBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
4884 (pPipe->attachments[i].dstColorBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA)) ||
4885 ((pPipe->attachments[i].srcAlphaBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
4886 (pPipe->attachments[i].srcAlphaBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA)) ||
4887 ((pPipe->attachments[i].srcColorBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
4888 (pPipe->attachments[i].srcColorBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA))) {
4889 pPipe->blendConstantsEnabled = true;
4896 VKAPI_ATTR VkResult VKAPI_CALL CreateGraphicsPipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count,
4897 const VkGraphicsPipelineCreateInfo *pCreateInfos,
4898 const VkAllocationCallbacks *pAllocator, VkPipeline *pPipelines) {
4899 // The order of operations here is a little convoluted but gets the job done
4900 // 1. Pipeline create state is first shadowed into PIPELINE_STATE struct
4901 // 2. Create state is then validated (which uses flags setup during shadowing)
4902 // 3. If everything looks good, we'll then create the pipeline and add NODE to pipelineMap
4904 vector<std::unique_ptr<PIPELINE_STATE>> pipe_state;
4905 pipe_state.reserve(count);
4906 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4909 unique_lock_t lock(global_lock);
4911 for (i = 0; i < count; i++) {
4912 pipe_state.push_back(std::unique_ptr<PIPELINE_STATE>(new PIPELINE_STATE));
4913 pipe_state[i]->initGraphicsPipeline(&pCreateInfos[i], GetRenderPassStateSharedPtr(dev_data, pCreateInfos[i].renderPass));
4914 pipe_state[i]->pipeline_layout = *getPipelineLayout(dev_data, pCreateInfos[i].layout);
4917 for (i = 0; i < count; i++) {
4918 skip |= ValidatePipelineLocked(dev_data, pipe_state, i);
4923 for (i = 0; i < count; i++) {
4924 skip |= ValidatePipelineUnlocked(dev_data, pipe_state, i);
4928 for (i = 0; i < count; i++) {
4929 pPipelines[i] = VK_NULL_HANDLE;
4931 return VK_ERROR_VALIDATION_FAILED_EXT;
4935 dev_data->dispatch_table.CreateGraphicsPipelines(device, pipelineCache, count, pCreateInfos, pAllocator, pPipelines);
4937 for (i = 0; i < count; i++) {
4938 if (pPipelines[i] != VK_NULL_HANDLE) {
4939 pipe_state[i]->pipeline = pPipelines[i];
4940 dev_data->pipelineMap[pPipelines[i]] = std::move(pipe_state[i]);
4947 VKAPI_ATTR VkResult VKAPI_CALL CreateComputePipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count,
4948 const VkComputePipelineCreateInfo *pCreateInfos,
4949 const VkAllocationCallbacks *pAllocator, VkPipeline *pPipelines) {
4952 vector<std::unique_ptr<PIPELINE_STATE>> pPipeState;
4953 pPipeState.reserve(count);
4954 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4957 unique_lock_t lock(global_lock);
4958 for (i = 0; i < count; i++) {
4959 // Create and initialize internal tracking data structure
4960 pPipeState.push_back(unique_ptr<PIPELINE_STATE>(new PIPELINE_STATE));
4961 pPipeState[i]->initComputePipeline(&pCreateInfos[i]);
4962 pPipeState[i]->pipeline_layout = *getPipelineLayout(dev_data, pCreateInfos[i].layout);
4964 // TODO: Add Compute Pipeline Verification
4965 skip |= validate_compute_pipeline(dev_data, pPipeState[i].get());
4969 for (i = 0; i < count; i++) {
4970 pPipelines[i] = VK_NULL_HANDLE;
4972 return VK_ERROR_VALIDATION_FAILED_EXT;
4977 dev_data->dispatch_table.CreateComputePipelines(device, pipelineCache, count, pCreateInfos, pAllocator, pPipelines);
4979 for (i = 0; i < count; i++) {
4980 if (pPipelines[i] != VK_NULL_HANDLE) {
4981 pPipeState[i]->pipeline = pPipelines[i];
4982 dev_data->pipelineMap[pPipelines[i]] = std::move(pPipeState[i]);
4989 VKAPI_ATTR VkResult VKAPI_CALL CreateSampler(VkDevice device, const VkSamplerCreateInfo *pCreateInfo,
4990 const VkAllocationCallbacks *pAllocator, VkSampler *pSampler) {
4991 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4992 VkResult result = dev_data->dispatch_table.CreateSampler(device, pCreateInfo, pAllocator, pSampler);
4993 if (VK_SUCCESS == result) {
4994 lock_guard_t lock(global_lock);
4995 dev_data->samplerMap[*pSampler] = unique_ptr<SAMPLER_STATE>(new SAMPLER_STATE(pSampler, pCreateInfo));
5000 static bool PreCallValidateCreateDescriptorSetLayout(layer_data *dev_data, const VkDescriptorSetLayoutCreateInfo *create_info) {
5001 if (dev_data->instance_data->disabled.create_descriptor_set_layout) return false;
5002 return cvdescriptorset::DescriptorSetLayout::ValidateCreateInfo(
5003 dev_data->report_data, create_info, dev_data->extensions.vk_khr_push_descriptor,
5004 dev_data->phys_dev_ext_props.max_push_descriptors, dev_data->extensions.vk_ext_descriptor_indexing,
5005 &dev_data->phys_dev_ext_props.descriptor_indexing_features);
5008 static void PostCallRecordCreateDescriptorSetLayout(layer_data *dev_data, const VkDescriptorSetLayoutCreateInfo *create_info,
5009 VkDescriptorSetLayout set_layout) {
5010 dev_data->descriptorSetLayoutMap[set_layout] = std::make_shared<cvdescriptorset::DescriptorSetLayout>(create_info, set_layout);
5013 VKAPI_ATTR VkResult VKAPI_CALL CreateDescriptorSetLayout(VkDevice device, const VkDescriptorSetLayoutCreateInfo *pCreateInfo,
5014 const VkAllocationCallbacks *pAllocator,
5015 VkDescriptorSetLayout *pSetLayout) {
5016 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
5017 VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
5018 unique_lock_t lock(global_lock);
5019 bool skip = PreCallValidateCreateDescriptorSetLayout(dev_data, pCreateInfo);
5022 result = dev_data->dispatch_table.CreateDescriptorSetLayout(device, pCreateInfo, pAllocator, pSetLayout);
5023 if (VK_SUCCESS == result) {
5025 PostCallRecordCreateDescriptorSetLayout(dev_data, pCreateInfo, *pSetLayout);
5031 // Used by CreatePipelineLayout and CmdPushConstants.
5032 // Note that the index argument is optional and only used by CreatePipelineLayout.
5033 static bool validatePushConstantRange(const layer_data *dev_data, const uint32_t offset, const uint32_t size,
5034 const char *caller_name, uint32_t index = 0) {
5035 if (dev_data->instance_data->disabled.push_constant_range) return false;
5036 uint32_t const maxPushConstantsSize = dev_data->phys_dev_properties.properties.limits.maxPushConstantsSize;
5038 // Check that offset + size don't exceed the max.
5039 // Prevent arithetic overflow here by avoiding addition and testing in this order.
5040 if ((offset >= maxPushConstantsSize) || (size > maxPushConstantsSize - offset)) {
5041 // This is a pain just to adapt the log message to the caller, but better to sort it out only when there is a problem.
5042 if (0 == strcmp(caller_name, "vkCreatePipelineLayout()")) {
5043 if (offset >= maxPushConstantsSize) {
5045 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5046 VALIDATION_ERROR_11a0024c,
5047 "%s call has push constants index %u with offset %u that exceeds this device's maxPushConstantSize of %u.",
5048 caller_name, index, offset, maxPushConstantsSize);
5050 if (size > maxPushConstantsSize - offset) {
5051 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5052 VALIDATION_ERROR_11a00254,
5053 "%s call has push constants index %u with offset %u and size %u that exceeds this device's "
5054 "maxPushConstantSize of %u.",
5055 caller_name, index, offset, size, maxPushConstantsSize);
5057 } else if (0 == strcmp(caller_name, "vkCmdPushConstants()")) {
5058 if (offset >= maxPushConstantsSize) {
5060 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5061 VALIDATION_ERROR_1bc002e4,
5062 "%s call has push constants index %u with offset %u that exceeds this device's maxPushConstantSize of %u.",
5063 caller_name, index, offset, maxPushConstantsSize);
5065 if (size > maxPushConstantsSize - offset) {
5066 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5067 VALIDATION_ERROR_1bc002e6,
5068 "%s call has push constants index %u with offset %u and size %u that exceeds this device's "
5069 "maxPushConstantSize of %u.",
5070 caller_name, index, offset, size, maxPushConstantsSize);
5073 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5074 DRAWSTATE_INTERNAL_ERROR, "%s caller not supported.", caller_name);
5077 // size needs to be non-zero and a multiple of 4.
5078 if ((size == 0) || ((size & 0x3) != 0)) {
5079 if (0 == strcmp(caller_name, "vkCreatePipelineLayout()")) {
5081 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5082 VALIDATION_ERROR_11a00250,
5083 "%s call has push constants index %u with size %u. Size must be greater than zero.", caller_name,
5087 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5088 VALIDATION_ERROR_11a00252,
5089 "%s call has push constants index %u with size %u. Size must be a multiple of 4.", caller_name,
5092 } else if (0 == strcmp(caller_name, "vkCmdPushConstants()")) {
5094 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5095 VALIDATION_ERROR_1bc2c21b,
5096 "%s call has push constants index %u with size %u. Size must be greater than zero.", caller_name,
5100 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5101 VALIDATION_ERROR_1bc002e2,
5102 "%s call has push constants index %u with size %u. Size must be a multiple of 4.", caller_name,
5106 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5107 DRAWSTATE_INTERNAL_ERROR, "%s caller not supported.", caller_name);
5110 // offset needs to be a multiple of 4.
5111 if ((offset & 0x3) != 0) {
5112 if (0 == strcmp(caller_name, "vkCreatePipelineLayout()")) {
5113 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5114 VALIDATION_ERROR_11a0024e,
5115 "%s call has push constants index %u with offset %u. Offset must be a multiple of 4.", caller_name,
5117 } else if (0 == strcmp(caller_name, "vkCmdPushConstants()")) {
5118 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5119 VALIDATION_ERROR_1bc002e0, "%s call has push constants with offset %u. Offset must be a multiple of 4.",
5120 caller_name, offset);
5122 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5123 DRAWSTATE_INTERNAL_ERROR, "%s caller not supported.", caller_name);
5129 enum DSL_DESCRIPTOR_GROUPS {
5130 DSL_TYPE_SAMPLERS = 0,
5131 DSL_TYPE_UNIFORM_BUFFERS,
5132 DSL_TYPE_STORAGE_BUFFERS,
5133 DSL_TYPE_SAMPLED_IMAGES,
5134 DSL_TYPE_STORAGE_IMAGES,
5135 DSL_TYPE_INPUT_ATTACHMENTS,
5136 DSL_NUM_DESCRIPTOR_GROUPS
5139 // Used by PreCallValiateCreatePipelineLayout.
5140 // Returns an array of size DSL_NUM_DESCRIPTOR_GROUPS of the maximum number of descriptors used in any single pipeline stage
5141 std::valarray<uint32_t> GetDescriptorCountMaxPerStage(
5142 const layer_data *dev_data, const std::vector<std::shared_ptr<cvdescriptorset::DescriptorSetLayout const>> set_layouts,
5143 bool skip_update_after_bind) {
5144 // Identify active pipeline stages
5145 std::vector<VkShaderStageFlags> stage_flags = {VK_SHADER_STAGE_VERTEX_BIT, VK_SHADER_STAGE_FRAGMENT_BIT,
5146 VK_SHADER_STAGE_COMPUTE_BIT};
5147 if (dev_data->enabled_features.geometryShader) {
5148 stage_flags.push_back(VK_SHADER_STAGE_GEOMETRY_BIT);
5150 if (dev_data->enabled_features.tessellationShader) {
5151 stage_flags.push_back(VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT);
5152 stage_flags.push_back(VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT);
5155 // Allow iteration over enum values
5156 std::vector<DSL_DESCRIPTOR_GROUPS> dsl_groups = {DSL_TYPE_SAMPLERS, DSL_TYPE_UNIFORM_BUFFERS, DSL_TYPE_STORAGE_BUFFERS,
5157 DSL_TYPE_SAMPLED_IMAGES, DSL_TYPE_STORAGE_IMAGES, DSL_TYPE_INPUT_ATTACHMENTS};
5159 // Sum by layouts per stage, then pick max of stages per type
5160 std::valarray<uint32_t> max_sum(0U, DSL_NUM_DESCRIPTOR_GROUPS); // max descriptor sum among all pipeline stages
5161 for (auto stage : stage_flags) {
5162 std::valarray<uint32_t> stage_sum(0U, DSL_NUM_DESCRIPTOR_GROUPS); // per-stage sums
5163 for (auto dsl : set_layouts) {
5164 if (skip_update_after_bind &&
5165 (dsl->GetCreateFlags() & VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT_EXT)) {
5169 for (uint32_t binding_idx = 0; binding_idx < dsl->GetBindingCount(); binding_idx++) {
5170 const VkDescriptorSetLayoutBinding *binding = dsl->GetDescriptorSetLayoutBindingPtrFromIndex(binding_idx);
5171 if (0 != (stage & binding->stageFlags)) {
5172 switch (binding->descriptorType) {
5173 case VK_DESCRIPTOR_TYPE_SAMPLER:
5174 stage_sum[DSL_TYPE_SAMPLERS] += binding->descriptorCount;
5176 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
5177 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
5178 stage_sum[DSL_TYPE_UNIFORM_BUFFERS] += binding->descriptorCount;
5180 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
5181 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
5182 stage_sum[DSL_TYPE_STORAGE_BUFFERS] += binding->descriptorCount;
5184 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
5185 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
5186 stage_sum[DSL_TYPE_SAMPLED_IMAGES] += binding->descriptorCount;
5188 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
5189 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
5190 stage_sum[DSL_TYPE_STORAGE_IMAGES] += binding->descriptorCount;
5192 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
5193 stage_sum[DSL_TYPE_SAMPLED_IMAGES] += binding->descriptorCount;
5194 stage_sum[DSL_TYPE_SAMPLERS] += binding->descriptorCount;
5196 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
5197 stage_sum[DSL_TYPE_INPUT_ATTACHMENTS] += binding->descriptorCount;
5205 for (auto type : dsl_groups) {
5206 max_sum[type] = std::max(stage_sum[type], max_sum[type]);
5212 // Used by PreCallValidateCreatePipelineLayout.
5213 // Returns an array of size VK_DESCRIPTOR_TYPE_RANGE_SIZE of the summed descriptors by type.
5214 // Note: descriptors only count against the limit once even if used by multiple stages.
5215 std::valarray<uint32_t> GetDescriptorSum(
5216 const layer_data *dev_data, const std::vector<std::shared_ptr<cvdescriptorset::DescriptorSetLayout const>> &set_layouts,
5217 bool skip_update_after_bind) {
5218 std::valarray<uint32_t> sum_by_type(0U, VK_DESCRIPTOR_TYPE_RANGE_SIZE);
5219 for (auto dsl : set_layouts) {
5220 if (skip_update_after_bind && (dsl->GetCreateFlags() & VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT_EXT)) {
5224 for (uint32_t binding_idx = 0; binding_idx < dsl->GetBindingCount(); binding_idx++) {
5225 const VkDescriptorSetLayoutBinding *binding = dsl->GetDescriptorSetLayoutBindingPtrFromIndex(binding_idx);
5226 sum_by_type[binding->descriptorType] += binding->descriptorCount;
5232 static bool PreCallValiateCreatePipelineLayout(const layer_data *dev_data, const VkPipelineLayoutCreateInfo *pCreateInfo) {
5235 // Validate layout count against device physical limit
5236 if (pCreateInfo->setLayoutCount > dev_data->phys_dev_props.limits.maxBoundDescriptorSets) {
5237 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5238 VALIDATION_ERROR_0fe0023c,
5239 "vkCreatePipelineLayout(): setLayoutCount (%d) exceeds physical device maxBoundDescriptorSets limit (%d).",
5240 pCreateInfo->setLayoutCount, dev_data->phys_dev_props.limits.maxBoundDescriptorSets);
5243 // Validate Push Constant ranges
5245 for (i = 0; i < pCreateInfo->pushConstantRangeCount; ++i) {
5246 skip |= validatePushConstantRange(dev_data, pCreateInfo->pPushConstantRanges[i].offset,
5247 pCreateInfo->pPushConstantRanges[i].size, "vkCreatePipelineLayout()", i);
5248 if (0 == pCreateInfo->pPushConstantRanges[i].stageFlags) {
5249 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5250 VALIDATION_ERROR_11a2dc03, "vkCreatePipelineLayout() call has no stageFlags set.");
5254 // As of 1.0.28, there is a VU that states that a stage flag cannot appear more than once in the list of push constant ranges.
5255 for (i = 0; i < pCreateInfo->pushConstantRangeCount; ++i) {
5256 for (j = i + 1; j < pCreateInfo->pushConstantRangeCount; ++j) {
5257 if (0 != (pCreateInfo->pPushConstantRanges[i].stageFlags & pCreateInfo->pPushConstantRanges[j].stageFlags)) {
5258 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5259 VALIDATION_ERROR_0fe00248,
5260 "vkCreatePipelineLayout() Duplicate stage flags found in ranges %d and %d.", i, j);
5266 if (skip) return skip;
5268 std::vector<std::shared_ptr<cvdescriptorset::DescriptorSetLayout const>> set_layouts(pCreateInfo->setLayoutCount, nullptr);
5269 unsigned int push_descriptor_set_count = 0;
5271 unique_lock_t lock(global_lock); // Lock while accessing global state
5272 for (i = 0; i < pCreateInfo->setLayoutCount; ++i) {
5273 set_layouts[i] = GetDescriptorSetLayout(dev_data, pCreateInfo->pSetLayouts[i]);
5274 if (set_layouts[i]->IsPushDescriptor()) ++push_descriptor_set_count;
5278 if (push_descriptor_set_count > 1) {
5279 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5280 VALIDATION_ERROR_0fe0024a, "vkCreatePipelineLayout() Multiple push descriptor sets found.");
5283 // Max descriptors by type, within a single pipeline stage
5284 std::valarray<uint32_t> max_descriptors_per_stage = GetDescriptorCountMaxPerStage(dev_data, set_layouts, true);
5286 if (max_descriptors_per_stage[DSL_TYPE_SAMPLERS] > dev_data->phys_dev_props.limits.maxPerStageDescriptorSamplers) {
5288 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5289 VALIDATION_ERROR_0fe0023e,
5290 "vkCreatePipelineLayout(): max per-stage sampler bindings count (%d) exceeds device "
5291 "maxPerStageDescriptorSamplers limit (%d).",
5292 max_descriptors_per_stage[DSL_TYPE_SAMPLERS], dev_data->phys_dev_props.limits.maxPerStageDescriptorSamplers);
5296 if (max_descriptors_per_stage[DSL_TYPE_UNIFORM_BUFFERS] > dev_data->phys_dev_props.limits.maxPerStageDescriptorUniformBuffers) {
5297 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5298 VALIDATION_ERROR_0fe00240,
5299 "vkCreatePipelineLayout(): max per-stage uniform buffer bindings count (%d) exceeds device "
5300 "maxPerStageDescriptorUniformBuffers limit (%d).",
5301 max_descriptors_per_stage[DSL_TYPE_UNIFORM_BUFFERS],
5302 dev_data->phys_dev_props.limits.maxPerStageDescriptorUniformBuffers);
5306 if (max_descriptors_per_stage[DSL_TYPE_STORAGE_BUFFERS] > dev_data->phys_dev_props.limits.maxPerStageDescriptorStorageBuffers) {
5307 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5308 VALIDATION_ERROR_0fe00242,
5309 "vkCreatePipelineLayout(): max per-stage storage buffer bindings count (%d) exceeds device "
5310 "maxPerStageDescriptorStorageBuffers limit (%d).",
5311 max_descriptors_per_stage[DSL_TYPE_STORAGE_BUFFERS],
5312 dev_data->phys_dev_props.limits.maxPerStageDescriptorStorageBuffers);
5316 if (max_descriptors_per_stage[DSL_TYPE_SAMPLED_IMAGES] > dev_data->phys_dev_props.limits.maxPerStageDescriptorSampledImages) {
5317 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5318 VALIDATION_ERROR_0fe00244,
5319 "vkCreatePipelineLayout(): max per-stage sampled image bindings count (%d) exceeds device "
5320 "maxPerStageDescriptorSampledImages limit (%d).",
5321 max_descriptors_per_stage[DSL_TYPE_SAMPLED_IMAGES],
5322 dev_data->phys_dev_props.limits.maxPerStageDescriptorSampledImages);
5326 if (max_descriptors_per_stage[DSL_TYPE_STORAGE_IMAGES] > dev_data->phys_dev_props.limits.maxPerStageDescriptorStorageImages) {
5327 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5328 VALIDATION_ERROR_0fe00246,
5329 "vkCreatePipelineLayout(): max per-stage storage image bindings count (%d) exceeds device "
5330 "maxPerStageDescriptorStorageImages limit (%d).",
5331 max_descriptors_per_stage[DSL_TYPE_STORAGE_IMAGES],
5332 dev_data->phys_dev_props.limits.maxPerStageDescriptorStorageImages);
5335 // Input attachments
5336 if (max_descriptors_per_stage[DSL_TYPE_INPUT_ATTACHMENTS] >
5337 dev_data->phys_dev_props.limits.maxPerStageDescriptorInputAttachments) {
5338 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5339 VALIDATION_ERROR_0fe00d18,
5340 "vkCreatePipelineLayout(): max per-stage input attachment bindings count (%d) exceeds device "
5341 "maxPerStageDescriptorInputAttachments limit (%d).",
5342 max_descriptors_per_stage[DSL_TYPE_INPUT_ATTACHMENTS],
5343 dev_data->phys_dev_props.limits.maxPerStageDescriptorInputAttachments);
5346 // Total descriptors by type
5348 std::valarray<uint32_t> sum_all_stages = GetDescriptorSum(dev_data, set_layouts, true);
5350 uint32_t sum = sum_all_stages[VK_DESCRIPTOR_TYPE_SAMPLER] + sum_all_stages[VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER];
5351 if (sum > dev_data->phys_dev_props.limits.maxDescriptorSetSamplers) {
5352 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5353 VALIDATION_ERROR_0fe00d1a,
5354 "vkCreatePipelineLayout(): sum of sampler bindings among all stages (%d) exceeds device "
5355 "maxDescriptorSetSamplers limit (%d).",
5356 sum, dev_data->phys_dev_props.limits.maxDescriptorSetSamplers);
5360 if (sum_all_stages[VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER] > dev_data->phys_dev_props.limits.maxDescriptorSetUniformBuffers) {
5361 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5362 VALIDATION_ERROR_0fe00d1c,
5363 "vkCreatePipelineLayout(): sum of uniform buffer bindings among all stages (%d) exceeds device "
5364 "maxDescriptorSetUniformBuffers limit (%d).",
5365 sum_all_stages[VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER],
5366 dev_data->phys_dev_props.limits.maxDescriptorSetUniformBuffers);
5369 // Dynamic uniform buffers
5370 if (sum_all_stages[VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC] >
5371 dev_data->phys_dev_props.limits.maxDescriptorSetUniformBuffersDynamic) {
5372 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5373 VALIDATION_ERROR_0fe00d1e,
5374 "vkCreatePipelineLayout(): sum of dynamic uniform buffer bindings among all stages (%d) exceeds device "
5375 "maxDescriptorSetUniformBuffersDynamic limit (%d).",
5376 sum_all_stages[VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC],
5377 dev_data->phys_dev_props.limits.maxDescriptorSetUniformBuffersDynamic);
5381 if (sum_all_stages[VK_DESCRIPTOR_TYPE_STORAGE_BUFFER] > dev_data->phys_dev_props.limits.maxDescriptorSetStorageBuffers) {
5382 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5383 VALIDATION_ERROR_0fe00d20,
5384 "vkCreatePipelineLayout(): sum of storage buffer bindings among all stages (%d) exceeds device "
5385 "maxDescriptorSetStorageBuffers limit (%d).",
5386 sum_all_stages[VK_DESCRIPTOR_TYPE_STORAGE_BUFFER],
5387 dev_data->phys_dev_props.limits.maxDescriptorSetStorageBuffers);
5390 // Dynamic storage buffers
5391 if (sum_all_stages[VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC] >
5392 dev_data->phys_dev_props.limits.maxDescriptorSetStorageBuffersDynamic) {
5393 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5394 VALIDATION_ERROR_0fe00d22,
5395 "vkCreatePipelineLayout(): sum of dynamic storage buffer bindings among all stages (%d) exceeds device "
5396 "maxDescriptorSetStorageBuffersDynamic limit (%d).",
5397 sum_all_stages[VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC],
5398 dev_data->phys_dev_props.limits.maxDescriptorSetStorageBuffersDynamic);
5402 sum = sum_all_stages[VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE] + sum_all_stages[VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER] +
5403 sum_all_stages[VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER];
5404 if (sum > dev_data->phys_dev_props.limits.maxDescriptorSetSampledImages) {
5405 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5406 VALIDATION_ERROR_0fe00d24,
5407 "vkCreatePipelineLayout(): sum of sampled image bindings among all stages (%d) exceeds device "
5408 "maxDescriptorSetSampledImages limit (%d).",
5409 sum, dev_data->phys_dev_props.limits.maxDescriptorSetSampledImages);
5413 sum = sum_all_stages[VK_DESCRIPTOR_TYPE_STORAGE_IMAGE] + sum_all_stages[VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER];
5414 if (sum > dev_data->phys_dev_props.limits.maxDescriptorSetStorageImages) {
5415 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5416 VALIDATION_ERROR_0fe00d26,
5417 "vkCreatePipelineLayout(): sum of storage image bindings among all stages (%d) exceeds device "
5418 "maxDescriptorSetStorageImages limit (%d).",
5419 sum, dev_data->phys_dev_props.limits.maxDescriptorSetStorageImages);
5422 // Input attachments
5423 if (sum_all_stages[VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT] > dev_data->phys_dev_props.limits.maxDescriptorSetInputAttachments) {
5424 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5425 VALIDATION_ERROR_0fe00d28,
5426 "vkCreatePipelineLayout(): sum of input attachment bindings among all stages (%d) exceeds device "
5427 "maxDescriptorSetInputAttachments limit (%d).",
5428 sum_all_stages[VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT],
5429 dev_data->phys_dev_props.limits.maxDescriptorSetInputAttachments);
5432 if (dev_data->extensions.vk_ext_descriptor_indexing) {
5433 // XXX TODO: replace with correct VU messages
5435 // Max descriptors by type, within a single pipeline stage
5436 std::valarray<uint32_t> max_descriptors_per_stage_update_after_bind =
5437 GetDescriptorCountMaxPerStage(dev_data, set_layouts, false);
5439 if (max_descriptors_per_stage_update_after_bind[DSL_TYPE_SAMPLERS] >
5440 dev_data->phys_dev_ext_props.descriptor_indexing_props.maxPerStageDescriptorUpdateAfterBindSamplers) {
5441 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5442 VALIDATION_ERROR_0fe0179c,
5443 "vkCreatePipelineLayout(): max per-stage sampler bindings count (%d) exceeds device "
5444 "maxPerStageDescriptorUpdateAfterBindSamplers limit (%d).",
5445 max_descriptors_per_stage_update_after_bind[DSL_TYPE_SAMPLERS],
5446 dev_data->phys_dev_ext_props.descriptor_indexing_props.maxPerStageDescriptorUpdateAfterBindSamplers);
5450 if (max_descriptors_per_stage_update_after_bind[DSL_TYPE_UNIFORM_BUFFERS] >
5451 dev_data->phys_dev_ext_props.descriptor_indexing_props.maxPerStageDescriptorUpdateAfterBindUniformBuffers) {
5453 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5454 VALIDATION_ERROR_0fe0179e,
5455 "vkCreatePipelineLayout(): max per-stage uniform buffer bindings count (%d) exceeds device "
5456 "maxPerStageDescriptorUpdateAfterBindUniformBuffers limit (%d).",
5457 max_descriptors_per_stage_update_after_bind[DSL_TYPE_UNIFORM_BUFFERS],
5458 dev_data->phys_dev_ext_props.descriptor_indexing_props.maxPerStageDescriptorUpdateAfterBindUniformBuffers);
5462 if (max_descriptors_per_stage_update_after_bind[DSL_TYPE_STORAGE_BUFFERS] >
5463 dev_data->phys_dev_ext_props.descriptor_indexing_props.maxPerStageDescriptorUpdateAfterBindStorageBuffers) {
5465 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5466 VALIDATION_ERROR_0fe017a0,
5467 "vkCreatePipelineLayout(): max per-stage storage buffer bindings count (%d) exceeds device "
5468 "maxPerStageDescriptorUpdateAfterBindStorageBuffers limit (%d).",
5469 max_descriptors_per_stage_update_after_bind[DSL_TYPE_STORAGE_BUFFERS],
5470 dev_data->phys_dev_ext_props.descriptor_indexing_props.maxPerStageDescriptorUpdateAfterBindStorageBuffers);
5474 if (max_descriptors_per_stage_update_after_bind[DSL_TYPE_SAMPLED_IMAGES] >
5475 dev_data->phys_dev_ext_props.descriptor_indexing_props.maxPerStageDescriptorUpdateAfterBindSampledImages) {
5477 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5478 VALIDATION_ERROR_0fe017a2,
5479 "vkCreatePipelineLayout(): max per-stage sampled image bindings count (%d) exceeds device "
5480 "maxPerStageDescriptorUpdateAfterBindSampledImages limit (%d).",
5481 max_descriptors_per_stage_update_after_bind[DSL_TYPE_SAMPLED_IMAGES],
5482 dev_data->phys_dev_ext_props.descriptor_indexing_props.maxPerStageDescriptorUpdateAfterBindSampledImages);
5486 if (max_descriptors_per_stage_update_after_bind[DSL_TYPE_STORAGE_IMAGES] >
5487 dev_data->phys_dev_ext_props.descriptor_indexing_props.maxPerStageDescriptorUpdateAfterBindStorageImages) {
5489 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5490 VALIDATION_ERROR_0fe017a4,
5491 "vkCreatePipelineLayout(): max per-stage storage image bindings count (%d) exceeds device "
5492 "maxPerStageDescriptorUpdateAfterBindStorageImages limit (%d).",
5493 max_descriptors_per_stage_update_after_bind[DSL_TYPE_STORAGE_IMAGES],
5494 dev_data->phys_dev_ext_props.descriptor_indexing_props.maxPerStageDescriptorUpdateAfterBindStorageImages);
5497 // Input attachments
5498 if (max_descriptors_per_stage_update_after_bind[DSL_TYPE_INPUT_ATTACHMENTS] >
5499 dev_data->phys_dev_ext_props.descriptor_indexing_props.maxPerStageDescriptorUpdateAfterBindInputAttachments) {
5501 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5502 VALIDATION_ERROR_0fe017a6,
5503 "vkCreatePipelineLayout(): max per-stage input attachment bindings count (%d) exceeds device "
5504 "maxPerStageDescriptorUpdateAfterBindInputAttachments limit (%d).",
5505 max_descriptors_per_stage_update_after_bind[DSL_TYPE_INPUT_ATTACHMENTS],
5506 dev_data->phys_dev_ext_props.descriptor_indexing_props.maxPerStageDescriptorUpdateAfterBindInputAttachments);
5509 // Total descriptors by type, summed across all pipeline stages
5511 std::valarray<uint32_t> sum_all_stages_update_after_bind = GetDescriptorSum(dev_data, set_layouts, false);
5513 sum = sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_SAMPLER] +
5514 sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER];
5515 if (sum > dev_data->phys_dev_ext_props.descriptor_indexing_props.maxDescriptorSetUpdateAfterBindSamplers) {
5516 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5517 VALIDATION_ERROR_0fe017b8,
5518 "vkCreatePipelineLayout(): sum of sampler bindings among all stages (%d) exceeds device "
5519 "maxDescriptorSetUpdateAfterBindSamplers limit (%d).",
5520 sum, dev_data->phys_dev_ext_props.descriptor_indexing_props.maxDescriptorSetUpdateAfterBindSamplers);
5524 if (sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER] >
5525 dev_data->phys_dev_ext_props.descriptor_indexing_props.maxDescriptorSetUpdateAfterBindUniformBuffers) {
5526 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5527 VALIDATION_ERROR_0fe017ba,
5528 "vkCreatePipelineLayout(): sum of uniform buffer bindings among all stages (%d) exceeds device "
5529 "maxDescriptorSetUpdateAfterBindUniformBuffers limit (%d).",
5530 sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER],
5531 dev_data->phys_dev_ext_props.descriptor_indexing_props.maxDescriptorSetUpdateAfterBindUniformBuffers);
5534 // Dynamic uniform buffers
5535 if (sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC] >
5536 dev_data->phys_dev_ext_props.descriptor_indexing_props.maxDescriptorSetUpdateAfterBindUniformBuffersDynamic) {
5538 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5539 VALIDATION_ERROR_0fe017bc,
5540 "vkCreatePipelineLayout(): sum of dynamic uniform buffer bindings among all stages (%d) exceeds device "
5541 "maxDescriptorSetUpdateAfterBindUniformBuffersDynamic limit (%d).",
5542 sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC],
5543 dev_data->phys_dev_ext_props.descriptor_indexing_props.maxDescriptorSetUpdateAfterBindUniformBuffersDynamic);
5547 if (sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_STORAGE_BUFFER] >
5548 dev_data->phys_dev_ext_props.descriptor_indexing_props.maxDescriptorSetUpdateAfterBindStorageBuffers) {
5549 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5550 VALIDATION_ERROR_0fe017be,
5551 "vkCreatePipelineLayout(): sum of storage buffer bindings among all stages (%d) exceeds device "
5552 "maxDescriptorSetUpdateAfterBindStorageBuffers limit (%d).",
5553 sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_STORAGE_BUFFER],
5554 dev_data->phys_dev_ext_props.descriptor_indexing_props.maxDescriptorSetUpdateAfterBindStorageBuffers);
5557 // Dynamic storage buffers
5558 if (sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC] >
5559 dev_data->phys_dev_ext_props.descriptor_indexing_props.maxDescriptorSetUpdateAfterBindStorageBuffersDynamic) {
5561 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5562 VALIDATION_ERROR_0fe017c0,
5563 "vkCreatePipelineLayout(): sum of dynamic storage buffer bindings among all stages (%d) exceeds device "
5564 "maxDescriptorSetUpdateAfterBindStorageBuffersDynamic limit (%d).",
5565 sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC],
5566 dev_data->phys_dev_ext_props.descriptor_indexing_props.maxDescriptorSetUpdateAfterBindStorageBuffersDynamic);
5570 sum = sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE] +
5571 sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER] +
5572 sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER];
5573 if (sum > dev_data->phys_dev_ext_props.descriptor_indexing_props.maxDescriptorSetUpdateAfterBindSampledImages) {
5575 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5576 VALIDATION_ERROR_0fe017c2,
5577 "vkCreatePipelineLayout(): sum of sampled image bindings among all stages (%d) exceeds device "
5578 "maxDescriptorSetUpdateAfterBindSampledImages limit (%d).",
5579 sum, dev_data->phys_dev_ext_props.descriptor_indexing_props.maxDescriptorSetUpdateAfterBindSampledImages);
5583 sum = sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_STORAGE_IMAGE] +
5584 sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER];
5585 if (sum > dev_data->phys_dev_ext_props.descriptor_indexing_props.maxDescriptorSetUpdateAfterBindStorageImages) {
5587 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5588 VALIDATION_ERROR_0fe017c4,
5589 "vkCreatePipelineLayout(): sum of storage image bindings among all stages (%d) exceeds device "
5590 "maxDescriptorSetUpdateAfterBindStorageImages limit (%d).",
5591 sum, dev_data->phys_dev_ext_props.descriptor_indexing_props.maxDescriptorSetUpdateAfterBindStorageImages);
5594 // Input attachments
5595 if (sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT] >
5596 dev_data->phys_dev_ext_props.descriptor_indexing_props.maxDescriptorSetUpdateAfterBindInputAttachments) {
5597 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5598 VALIDATION_ERROR_0fe017c6,
5599 "vkCreatePipelineLayout(): sum of input attachment bindings among all stages (%d) exceeds device "
5600 "maxDescriptorSetUpdateAfterBindInputAttachments limit (%d).",
5601 sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT],
5602 dev_data->phys_dev_ext_props.descriptor_indexing_props.maxDescriptorSetUpdateAfterBindInputAttachments);
5608 // For repeatable sorting, not very useful for "memory in range" search
5609 struct PushConstantRangeCompare {
5610 bool operator()(const VkPushConstantRange *lhs, const VkPushConstantRange *rhs) const {
5611 if (lhs->offset == rhs->offset) {
5612 if (lhs->size == rhs->size) {
5613 // The comparison is arbitrary, but avoids false aliasing by comparing all fields.
5614 return lhs->stageFlags < rhs->stageFlags;
5616 // If the offsets are the same then sorting by the end of range is useful for validation
5617 return lhs->size < rhs->size;
5619 return lhs->offset < rhs->offset;
5623 static PushConstantRangesDict push_constant_ranges_dict;
5625 PushConstantRangesId get_canonical_id(const VkPipelineLayoutCreateInfo *info) {
5626 if (!info->pPushConstantRanges) {
5627 // Hand back the empty entry (creating as needed)...
5628 return push_constant_ranges_dict.look_up(PushConstantRanges());
5631 // Sort the input ranges to ensure equivalent ranges map to the same id
5632 std::set<const VkPushConstantRange *, PushConstantRangeCompare> sorted;
5633 for (uint32_t i = 0; i < info->pushConstantRangeCount; i++) {
5634 sorted.insert(info->pPushConstantRanges + i);
5637 PushConstantRanges ranges(sorted.size());
5638 for (const auto range : sorted) {
5639 ranges.emplace_back(*range);
5641 return push_constant_ranges_dict.look_up(std::move(ranges));
5644 // Dictionary of canoncial form of the pipeline set layout of descriptor set layouts
5645 static PipelineLayoutSetLayoutsDict pipeline_layout_set_layouts_dict;
5647 // Dictionary of canonical form of the "compatible for set" records
5648 static PipelineLayoutCompatDict pipeline_layout_compat_dict;
5650 static PipelineLayoutCompatId get_canonical_id(const uint32_t set_index, const PushConstantRangesId pcr_id,
5651 const PipelineLayoutSetLayoutsId set_layouts_id) {
5652 return pipeline_layout_compat_dict.look_up(PipelineLayoutCompatDef(set_index, pcr_id, set_layouts_id));
5655 static void PostCallRecordCreatePipelineLayout(layer_data *dev_data, const VkPipelineLayoutCreateInfo *pCreateInfo,
5656 const VkPipelineLayout *pPipelineLayout) {
5657 unique_lock_t lock(global_lock); // Lock while accessing state
5659 PIPELINE_LAYOUT_NODE &plNode = dev_data->pipelineLayoutMap[*pPipelineLayout];
5660 plNode.layout = *pPipelineLayout;
5661 plNode.set_layouts.resize(pCreateInfo->setLayoutCount);
5662 PipelineLayoutSetLayoutsDef set_layouts(pCreateInfo->setLayoutCount);
5663 for (uint32_t i = 0; i < pCreateInfo->setLayoutCount; ++i) {
5664 plNode.set_layouts[i] = GetDescriptorSetLayout(dev_data, pCreateInfo->pSetLayouts[i]);
5665 set_layouts[i] = plNode.set_layouts[i]->get_layout_id();
5668 // Get canonical form IDs for the "compatible for set" contents
5669 plNode.push_constant_ranges = get_canonical_id(pCreateInfo);
5670 auto set_layouts_id = pipeline_layout_set_layouts_dict.look_up(set_layouts);
5671 plNode.compat_for_set.reserve(pCreateInfo->setLayoutCount);
5673 // Create table of "compatible for set N" cannonical forms for trivial accept validation
5674 for (uint32_t i = 0; i < pCreateInfo->setLayoutCount; ++i) {
5675 plNode.compat_for_set.emplace_back(get_canonical_id(i, plNode.push_constant_ranges, set_layouts_id));
5681 VKAPI_ATTR VkResult VKAPI_CALL CreatePipelineLayout(VkDevice device, const VkPipelineLayoutCreateInfo *pCreateInfo,
5682 const VkAllocationCallbacks *pAllocator, VkPipelineLayout *pPipelineLayout) {
5683 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
5685 bool skip = PreCallValiateCreatePipelineLayout(dev_data, pCreateInfo);
5686 if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
5688 VkResult result = dev_data->dispatch_table.CreatePipelineLayout(device, pCreateInfo, pAllocator, pPipelineLayout);
5690 if (VK_SUCCESS == result) {
5691 PostCallRecordCreatePipelineLayout(dev_data, pCreateInfo, pPipelineLayout);
5696 VKAPI_ATTR VkResult VKAPI_CALL CreateDescriptorPool(VkDevice device, const VkDescriptorPoolCreateInfo *pCreateInfo,
5697 const VkAllocationCallbacks *pAllocator, VkDescriptorPool *pDescriptorPool) {
5698 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
5699 VkResult result = dev_data->dispatch_table.CreateDescriptorPool(device, pCreateInfo, pAllocator, pDescriptorPool);
5700 if (VK_SUCCESS == result) {
5701 DESCRIPTOR_POOL_STATE *pNewNode = new DESCRIPTOR_POOL_STATE(*pDescriptorPool, pCreateInfo);
5702 if (NULL == pNewNode) {
5703 if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
5704 HandleToUint64(*pDescriptorPool), DRAWSTATE_OUT_OF_MEMORY,
5705 "Out of memory while attempting to allocate DESCRIPTOR_POOL_STATE in vkCreateDescriptorPool()"))
5706 return VK_ERROR_VALIDATION_FAILED_EXT;
5708 lock_guard_t lock(global_lock);
5709 dev_data->descriptorPoolMap[*pDescriptorPool] = pNewNode;
5712 // Need to do anything if pool create fails?
5717 VKAPI_ATTR VkResult VKAPI_CALL ResetDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool,
5718 VkDescriptorPoolResetFlags flags) {
5719 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
5721 unique_lock_t lock(global_lock);
5722 // Make sure sets being destroyed are not currently in-use
5723 bool skip = validateIdleDescriptorSetForPoolReset(dev_data, descriptorPool);
5726 if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
5728 VkResult result = dev_data->dispatch_table.ResetDescriptorPool(device, descriptorPool, flags);
5729 if (VK_SUCCESS == result) {
5731 clearDescriptorPool(dev_data, device, descriptorPool, flags);
5736 // Ensure the pool contains enough descriptors and descriptor sets to satisfy
5737 // an allocation request. Fills common_data with the total number of descriptors of each type required,
5738 // as well as DescriptorSetLayout ptrs used for later update.
5739 static bool PreCallValidateAllocateDescriptorSets(layer_data *dev_data, const VkDescriptorSetAllocateInfo *pAllocateInfo,
5740 cvdescriptorset::AllocateDescriptorSetsData *common_data) {
5741 // Always update common data
5742 cvdescriptorset::UpdateAllocateDescriptorSetsData(dev_data, pAllocateInfo, common_data);
5743 if (dev_data->instance_data->disabled.allocate_descriptor_sets) return false;
5744 // All state checks for AllocateDescriptorSets is done in single function
5745 return cvdescriptorset::ValidateAllocateDescriptorSets(dev_data, pAllocateInfo, common_data);
5747 // Allocation state was good and call down chain was made so update state based on allocating descriptor sets
5748 static void PostCallRecordAllocateDescriptorSets(layer_data *dev_data, const VkDescriptorSetAllocateInfo *pAllocateInfo,
5749 VkDescriptorSet *pDescriptorSets,
5750 const cvdescriptorset::AllocateDescriptorSetsData *common_data) {
5751 // All the updates are contained in a single cvdescriptorset function
5752 cvdescriptorset::PerformAllocateDescriptorSets(pAllocateInfo, pDescriptorSets, common_data, &dev_data->descriptorPoolMap,
5753 &dev_data->setMap, dev_data);
5756 // TODO: PostCallRecord routine is dependent on data generated in PreCallValidate -- needs to be moved out
5757 VKAPI_ATTR VkResult VKAPI_CALL AllocateDescriptorSets(VkDevice device, const VkDescriptorSetAllocateInfo *pAllocateInfo,
5758 VkDescriptorSet *pDescriptorSets) {
5759 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
5760 unique_lock_t lock(global_lock);
5761 cvdescriptorset::AllocateDescriptorSetsData common_data(pAllocateInfo->descriptorSetCount);
5762 bool skip = PreCallValidateAllocateDescriptorSets(dev_data, pAllocateInfo, &common_data);
5765 if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
5767 VkResult result = dev_data->dispatch_table.AllocateDescriptorSets(device, pAllocateInfo, pDescriptorSets);
5769 if (VK_SUCCESS == result) {
5771 PostCallRecordAllocateDescriptorSets(dev_data, pAllocateInfo, pDescriptorSets, &common_data);
5776 // Verify state before freeing DescriptorSets
5777 static bool PreCallValidateFreeDescriptorSets(const layer_data *dev_data, VkDescriptorPool pool, uint32_t count,
5778 const VkDescriptorSet *descriptor_sets) {
5779 if (dev_data->instance_data->disabled.free_descriptor_sets) return false;
5781 // First make sure sets being destroyed are not currently in-use
5782 for (uint32_t i = 0; i < count; ++i) {
5783 if (descriptor_sets[i] != VK_NULL_HANDLE) {
5784 skip |= validateIdleDescriptorSet(dev_data, descriptor_sets[i], "vkFreeDescriptorSets");
5788 DESCRIPTOR_POOL_STATE *pool_state = GetDescriptorPoolState(dev_data, pool);
5789 if (pool_state && !(VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT & pool_state->createInfo.flags)) {
5790 // Can't Free from a NON_FREE pool
5791 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
5792 HandleToUint64(pool), VALIDATION_ERROR_28600270,
5793 "It is invalid to call vkFreeDescriptorSets() with a pool created without setting "
5794 "VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT.");
5798 // Sets are being returned to the pool so update the pool state
5799 static void PreCallRecordFreeDescriptorSets(layer_data *dev_data, VkDescriptorPool pool, uint32_t count,
5800 const VkDescriptorSet *descriptor_sets) {
5801 DESCRIPTOR_POOL_STATE *pool_state = GetDescriptorPoolState(dev_data, pool);
5802 // Update available descriptor sets in pool
5803 pool_state->availableSets += count;
5805 // For each freed descriptor add its resources back into the pool as available and remove from pool and setMap
5806 for (uint32_t i = 0; i < count; ++i) {
5807 if (descriptor_sets[i] != VK_NULL_HANDLE) {
5808 auto descriptor_set = dev_data->setMap[descriptor_sets[i]];
5809 uint32_t type_index = 0, descriptor_count = 0;
5810 for (uint32_t j = 0; j < descriptor_set->GetBindingCount(); ++j) {
5811 type_index = static_cast<uint32_t>(descriptor_set->GetTypeFromIndex(j));
5812 descriptor_count = descriptor_set->GetDescriptorCountFromIndex(j);
5813 pool_state->availableDescriptorTypeCount[type_index] += descriptor_count;
5815 freeDescriptorSet(dev_data, descriptor_set);
5816 pool_state->sets.erase(descriptor_set);
5821 VKAPI_ATTR VkResult VKAPI_CALL FreeDescriptorSets(VkDevice device, VkDescriptorPool descriptorPool, uint32_t count,
5822 const VkDescriptorSet *pDescriptorSets) {
5823 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
5824 // Make sure that no sets being destroyed are in-flight
5825 unique_lock_t lock(global_lock);
5826 bool skip = PreCallValidateFreeDescriptorSets(dev_data, descriptorPool, count, pDescriptorSets);
5830 result = VK_ERROR_VALIDATION_FAILED_EXT;
5832 // A race here is invalid (descriptorPool should be externally sync'd), but code defensively against an invalid race
5833 PreCallRecordFreeDescriptorSets(dev_data, descriptorPool, count, pDescriptorSets);
5835 result = dev_data->dispatch_table.FreeDescriptorSets(device, descriptorPool, count, pDescriptorSets);
5839 // TODO : This is a Proof-of-concept for core validation architecture
5840 // Really we'll want to break out these functions to separate files but
5841 // keeping it all together here to prove out design
5842 // PreCallValidate* handles validating all of the state prior to calling down chain to UpdateDescriptorSets()
5843 static bool PreCallValidateUpdateDescriptorSets(layer_data *dev_data, uint32_t descriptorWriteCount,
5844 const VkWriteDescriptorSet *pDescriptorWrites, uint32_t descriptorCopyCount,
5845 const VkCopyDescriptorSet *pDescriptorCopies) {
5846 if (dev_data->instance_data->disabled.update_descriptor_sets) return false;
5847 // First thing to do is perform map look-ups.
5848 // NOTE : UpdateDescriptorSets is somewhat unique in that it's operating on a number of DescriptorSets
5849 // so we can't just do a single map look-up up-front, but do them individually in functions below
5851 // Now make call(s) that validate state, but don't perform state updates in this function
5852 // Note, here DescriptorSets is unique in that we don't yet have an instance. Using a helper function in the
5853 // namespace which will parse params and make calls into specific class instances
5854 return cvdescriptorset::ValidateUpdateDescriptorSets(dev_data->report_data, dev_data, descriptorWriteCount, pDescriptorWrites,
5855 descriptorCopyCount, pDescriptorCopies);
5857 // PostCallRecord* handles recording state updates following call down chain to UpdateDescriptorSets()
5858 static void PreCallRecordUpdateDescriptorSets(layer_data *dev_data, uint32_t descriptorWriteCount,
5859 const VkWriteDescriptorSet *pDescriptorWrites, uint32_t descriptorCopyCount,
5860 const VkCopyDescriptorSet *pDescriptorCopies) {
5861 cvdescriptorset::PerformUpdateDescriptorSets(dev_data, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount,
5865 VKAPI_ATTR void VKAPI_CALL UpdateDescriptorSets(VkDevice device, uint32_t descriptorWriteCount,
5866 const VkWriteDescriptorSet *pDescriptorWrites, uint32_t descriptorCopyCount,
5867 const VkCopyDescriptorSet *pDescriptorCopies) {
5868 // Only map look-up at top level is for device-level layer_data
5869 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
5870 unique_lock_t lock(global_lock);
5871 bool skip = PreCallValidateUpdateDescriptorSets(dev_data, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount,
5874 // Since UpdateDescriptorSets() is void, nothing to check prior to updating state & we can update before call down chain
5875 PreCallRecordUpdateDescriptorSets(dev_data, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount,
5878 dev_data->dispatch_table.UpdateDescriptorSets(device, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount,
5883 VKAPI_ATTR VkResult VKAPI_CALL AllocateCommandBuffers(VkDevice device, const VkCommandBufferAllocateInfo *pCreateInfo,
5884 VkCommandBuffer *pCommandBuffer) {
5885 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
5886 VkResult result = dev_data->dispatch_table.AllocateCommandBuffers(device, pCreateInfo, pCommandBuffer);
5887 if (VK_SUCCESS == result) {
5888 unique_lock_t lock(global_lock);
5889 auto pPool = GetCommandPoolNode(dev_data, pCreateInfo->commandPool);
5892 for (uint32_t i = 0; i < pCreateInfo->commandBufferCount; i++) {
5893 // Add command buffer to its commandPool map
5894 pPool->commandBuffers.insert(pCommandBuffer[i]);
5895 GLOBAL_CB_NODE *pCB = new GLOBAL_CB_NODE;
5896 // Add command buffer to map
5897 dev_data->commandBufferMap[pCommandBuffer[i]] = pCB;
5898 ResetCommandBufferState(dev_data, pCommandBuffer[i]);
5899 pCB->createInfo = *pCreateInfo;
5900 pCB->device = device;
5908 // Add bindings between the given cmd buffer & framebuffer and the framebuffer's children
5909 static void AddFramebufferBinding(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, FRAMEBUFFER_STATE *fb_state) {
5910 addCommandBufferBinding(&fb_state->cb_bindings, {HandleToUint64(fb_state->framebuffer), kVulkanObjectTypeFramebuffer},
5912 for (auto attachment : fb_state->attachments) {
5913 auto view_state = attachment.view_state;
5915 AddCommandBufferBindingImageView(dev_data, cb_state, view_state);
5920 VKAPI_ATTR VkResult VKAPI_CALL BeginCommandBuffer(VkCommandBuffer commandBuffer, const VkCommandBufferBeginInfo *pBeginInfo) {
5922 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
5923 unique_lock_t lock(global_lock);
5924 // Validate command buffer level
5925 GLOBAL_CB_NODE *cb_node = GetCBNode(dev_data, commandBuffer);
5927 // This implicitly resets the Cmd Buffer so make sure any fence is done and then clear memory references
5928 if (cb_node->in_use.load()) {
5929 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
5930 HandleToUint64(commandBuffer), VALIDATION_ERROR_16e00062,
5931 "Calling vkBeginCommandBuffer() on active command buffer %" PRIx64
5932 " before it has completed. You must check command buffer fence before this call.",
5933 HandleToUint64(commandBuffer));
5935 clear_cmd_buf_and_mem_references(dev_data, cb_node);
5936 if (cb_node->createInfo.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
5937 // Secondary Command Buffer
5938 const VkCommandBufferInheritanceInfo *pInfo = pBeginInfo->pInheritanceInfo;
5941 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
5942 HandleToUint64(commandBuffer), VALIDATION_ERROR_16e00066,
5943 "vkBeginCommandBuffer(): Secondary Command Buffer (0x%" PRIx64 ") must have inheritance info.",
5944 HandleToUint64(commandBuffer));
5946 if (pBeginInfo->flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT) {
5947 assert(pInfo->renderPass);
5948 string errorString = "";
5949 auto framebuffer = GetFramebufferState(dev_data, pInfo->framebuffer);
5951 if (framebuffer->createInfo.renderPass != pInfo->renderPass) {
5952 // renderPass that framebuffer was created with must be compatible with local renderPass
5954 validateRenderPassCompatibility(dev_data, "framebuffer", framebuffer->rp_state.get(),
5955 "command buffer", GetRenderPassState(dev_data, pInfo->renderPass),
5956 "vkBeginCommandBuffer()", VALIDATION_ERROR_0280006e);
5958 // Connect this framebuffer and its children to this cmdBuffer
5959 AddFramebufferBinding(dev_data, cb_node, framebuffer);
5962 if ((pInfo->occlusionQueryEnable == VK_FALSE || dev_data->enabled_features.occlusionQueryPrecise == VK_FALSE) &&
5963 (pInfo->queryFlags & VK_QUERY_CONTROL_PRECISE_BIT)) {
5964 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5965 VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(commandBuffer),
5966 VALIDATION_ERROR_16e00068,
5967 "vkBeginCommandBuffer(): Secondary Command Buffer (0x%" PRIx64
5968 ") must not have VK_QUERY_CONTROL_PRECISE_BIT if occulusionQuery is disabled or the device "
5969 "does not support precise occlusion queries.",
5970 HandleToUint64(commandBuffer));
5973 if (pInfo && pInfo->renderPass != VK_NULL_HANDLE) {
5974 auto renderPass = GetRenderPassState(dev_data, pInfo->renderPass);
5976 if (pInfo->subpass >= renderPass->createInfo.subpassCount) {
5977 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5978 VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(commandBuffer),
5979 VALIDATION_ERROR_0280006c,
5980 "vkBeginCommandBuffer(): Secondary Command Buffers (0x%" PRIx64
5981 ") must have a subpass index (%d) that is less than the number of subpasses (%d).",
5982 HandleToUint64(commandBuffer), pInfo->subpass, renderPass->createInfo.subpassCount);
5987 if (CB_RECORDING == cb_node->state) {
5988 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
5989 HandleToUint64(commandBuffer), VALIDATION_ERROR_16e00062,
5990 "vkBeginCommandBuffer(): Cannot call Begin on command buffer (0x%" PRIx64
5991 ") in the RECORDING state. Must first call vkEndCommandBuffer().",
5992 HandleToUint64(commandBuffer));
5993 } else if (CB_RECORDED == cb_node->state || CB_INVALID_COMPLETE == cb_node->state) {
5994 VkCommandPool cmdPool = cb_node->createInfo.commandPool;
5995 auto pPool = GetCommandPoolNode(dev_data, cmdPool);
5996 if (!(VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT & pPool->createFlags)) {
5998 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
5999 HandleToUint64(commandBuffer), VALIDATION_ERROR_16e00064,
6000 "Call to vkBeginCommandBuffer() on command buffer (0x%" PRIx64
6001 ") attempts to implicitly reset cmdBuffer created from command pool (0x%" PRIx64
6002 ") that does NOT have the VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT bit set.",
6003 HandleToUint64(commandBuffer), HandleToUint64(cmdPool));
6005 ResetCommandBufferState(dev_data, commandBuffer);
6007 // Set updated state here in case implicit reset occurs above
6008 cb_node->state = CB_RECORDING;
6009 cb_node->beginInfo = *pBeginInfo;
6010 if (cb_node->beginInfo.pInheritanceInfo) {
6011 cb_node->inheritanceInfo = *(cb_node->beginInfo.pInheritanceInfo);
6012 cb_node->beginInfo.pInheritanceInfo = &cb_node->inheritanceInfo;
6013 // If we are a secondary command-buffer and inheriting. Update the items we should inherit.
6014 if ((cb_node->createInfo.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY) &&
6015 (cb_node->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) {
6016 cb_node->activeRenderPass = GetRenderPassState(dev_data, cb_node->beginInfo.pInheritanceInfo->renderPass);
6017 cb_node->activeSubpass = cb_node->beginInfo.pInheritanceInfo->subpass;
6018 cb_node->activeFramebuffer = cb_node->beginInfo.pInheritanceInfo->framebuffer;
6019 cb_node->framebuffers.insert(cb_node->beginInfo.pInheritanceInfo->framebuffer);
6025 return VK_ERROR_VALIDATION_FAILED_EXT;
6027 VkResult result = dev_data->dispatch_table.BeginCommandBuffer(commandBuffer, pBeginInfo);
6031 static void PostCallRecordEndCommandBuffer(layer_data *dev_data, GLOBAL_CB_NODE *cb_state) {
6032 // Cached validation is specific to a specific recording of a specific command buffer.
6033 for (auto descriptor_set : cb_state->validated_descriptor_sets) {
6034 descriptor_set->ClearCachedValidation(cb_state);
6036 cb_state->validated_descriptor_sets.clear();
6039 VKAPI_ATTR VkResult VKAPI_CALL EndCommandBuffer(VkCommandBuffer commandBuffer) {
6041 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6042 unique_lock_t lock(global_lock);
6043 GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
6045 if ((VK_COMMAND_BUFFER_LEVEL_PRIMARY == pCB->createInfo.level) ||
6046 !(pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) {
6047 // This needs spec clarification to update valid usage, see comments in PR:
6048 // https://github.com/KhronosGroup/Vulkan-LoaderAndValidationLayers/pull/516#discussion_r63013756
6049 skip |= insideRenderPass(dev_data, pCB, "vkEndCommandBuffer()", VALIDATION_ERROR_27400078);
6051 skip |= ValidateCmd(dev_data, pCB, CMD_ENDCOMMANDBUFFER, "vkEndCommandBuffer()");
6052 for (auto query : pCB->activeQueries) {
6053 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6054 HandleToUint64(commandBuffer), VALIDATION_ERROR_2740007a,
6055 "Ending command buffer with in progress query: queryPool 0x%" PRIx64 ", index %d.",
6056 HandleToUint64(query.pool), query.index);
6061 auto result = dev_data->dispatch_table.EndCommandBuffer(commandBuffer);
6063 PostCallRecordEndCommandBuffer(dev_data, pCB);
6064 if (VK_SUCCESS == result) {
6065 pCB->state = CB_RECORDED;
6069 return VK_ERROR_VALIDATION_FAILED_EXT;
6073 VKAPI_ATTR VkResult VKAPI_CALL ResetCommandBuffer(VkCommandBuffer commandBuffer, VkCommandBufferResetFlags flags) {
6075 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6076 unique_lock_t lock(global_lock);
6077 GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
6078 VkCommandPool cmdPool = pCB->createInfo.commandPool;
6079 auto pPool = GetCommandPoolNode(dev_data, cmdPool);
6080 if (!(VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT & pPool->createFlags)) {
6081 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6082 HandleToUint64(commandBuffer), VALIDATION_ERROR_3260005c,
6083 "Attempt to reset command buffer (0x%" PRIx64 ") created from command pool (0x%" PRIx64
6084 ") that does NOT have the VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT bit set.",
6085 HandleToUint64(commandBuffer), HandleToUint64(cmdPool));
6087 skip |= checkCommandBufferInFlight(dev_data, pCB, "reset", VALIDATION_ERROR_3260005a);
6089 if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
6090 VkResult result = dev_data->dispatch_table.ResetCommandBuffer(commandBuffer, flags);
6091 if (VK_SUCCESS == result) {
6093 ResetCommandBufferState(dev_data, commandBuffer);
6099 VKAPI_ATTR void VKAPI_CALL CmdBindPipeline(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint,
6100 VkPipeline pipeline) {
6102 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6103 unique_lock_t lock(global_lock);
6104 GLOBAL_CB_NODE *cb_state = GetCBNode(dev_data, commandBuffer);
6106 skip |= ValidateCmdQueueFlags(dev_data, cb_state, "vkCmdBindPipeline()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
6107 VALIDATION_ERROR_18002415);
6108 skip |= ValidateCmd(dev_data, cb_state, CMD_BINDPIPELINE, "vkCmdBindPipeline()");
6109 // TODO: VALIDATION_ERROR_18000612 VALIDATION_ERROR_18000616 -- using ValidatePipelineBindPoint
6111 auto pipe_state = getPipelineState(dev_data, pipeline);
6112 if (VK_PIPELINE_BIND_POINT_GRAPHICS == pipelineBindPoint) {
6113 cb_state->status &= ~cb_state->static_status;
6114 cb_state->static_status = MakeStaticStateMask(pipe_state->graphicsPipelineCI.ptr()->pDynamicState);
6115 cb_state->status |= cb_state->static_status;
6117 cb_state->lastBound[pipelineBindPoint].pipeline_state = pipe_state;
6118 set_pipeline_state(pipe_state);
6119 addCommandBufferBinding(&pipe_state->cb_bindings, {HandleToUint64(pipeline), kVulkanObjectTypePipeline}, cb_state);
6122 if (!skip) dev_data->dispatch_table.CmdBindPipeline(commandBuffer, pipelineBindPoint, pipeline);
6125 VKAPI_ATTR void VKAPI_CALL CmdSetViewport(VkCommandBuffer commandBuffer, uint32_t firstViewport, uint32_t viewportCount,
6126 const VkViewport *pViewports) {
6128 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6129 unique_lock_t lock(global_lock);
6130 GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
6132 skip |= ValidateCmdQueueFlags(dev_data, pCB, "vkCmdSetViewport()", VK_QUEUE_GRAPHICS_BIT, VALIDATION_ERROR_1e002415);
6133 skip |= ValidateCmd(dev_data, pCB, CMD_SETVIEWPORT, "vkCmdSetViewport()");
6134 if (pCB->static_status & CBSTATUS_VIEWPORT_SET) {
6135 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6136 HandleToUint64(commandBuffer), VALIDATION_ERROR_1e00098a,
6137 "vkCmdSetViewport(): pipeline was created without VK_DYNAMIC_STATE_VIEWPORT flag..");
6140 pCB->viewportMask |= ((1u << viewportCount) - 1u) << firstViewport;
6141 pCB->status |= CBSTATUS_VIEWPORT_SET;
6145 if (!skip) dev_data->dispatch_table.CmdSetViewport(commandBuffer, firstViewport, viewportCount, pViewports);
6148 VKAPI_ATTR void VKAPI_CALL CmdSetScissor(VkCommandBuffer commandBuffer, uint32_t firstScissor, uint32_t scissorCount,
6149 const VkRect2D *pScissors) {
6151 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6152 unique_lock_t lock(global_lock);
6153 GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
6155 skip |= ValidateCmdQueueFlags(dev_data, pCB, "vkCmdSetScissor()", VK_QUEUE_GRAPHICS_BIT, VALIDATION_ERROR_1d802415);
6156 skip |= ValidateCmd(dev_data, pCB, CMD_SETSCISSOR, "vkCmdSetScissor()");
6157 if (pCB->static_status & CBSTATUS_SCISSOR_SET) {
6158 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6159 HandleToUint64(commandBuffer), VALIDATION_ERROR_1d80049c,
6160 "vkCmdSetScissor(): pipeline was created without VK_DYNAMIC_STATE_SCISSOR flag..");
6163 pCB->scissorMask |= ((1u << scissorCount) - 1u) << firstScissor;
6164 pCB->status |= CBSTATUS_SCISSOR_SET;
6168 if (!skip) dev_data->dispatch_table.CmdSetScissor(commandBuffer, firstScissor, scissorCount, pScissors);
6171 VKAPI_ATTR void VKAPI_CALL CmdSetLineWidth(VkCommandBuffer commandBuffer, float lineWidth) {
6173 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6174 unique_lock_t lock(global_lock);
6175 GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
6177 skip |= ValidateCmdQueueFlags(dev_data, pCB, "vkCmdSetLineWidth()", VK_QUEUE_GRAPHICS_BIT, VALIDATION_ERROR_1d602415);
6178 skip |= ValidateCmd(dev_data, pCB, CMD_SETLINEWIDTH, "vkCmdSetLineWidth()");
6180 if (pCB->static_status & CBSTATUS_LINE_WIDTH_SET) {
6181 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6182 HandleToUint64(commandBuffer), VALIDATION_ERROR_1d600626,
6183 "vkCmdSetLineWidth called but pipeline was created without VK_DYNAMIC_STATE_LINE_WIDTH flag.");
6186 pCB->status |= CBSTATUS_LINE_WIDTH_SET;
6190 if (!skip) dev_data->dispatch_table.CmdSetLineWidth(commandBuffer, lineWidth);
6193 VKAPI_ATTR void VKAPI_CALL CmdSetDepthBias(VkCommandBuffer commandBuffer, float depthBiasConstantFactor, float depthBiasClamp,
6194 float depthBiasSlopeFactor) {
6196 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6197 unique_lock_t lock(global_lock);
6198 GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
6200 skip |= ValidateCmdQueueFlags(dev_data, pCB, "vkCmdSetDepthBias()", VK_QUEUE_GRAPHICS_BIT, VALIDATION_ERROR_1cc02415);
6201 skip |= ValidateCmd(dev_data, pCB, CMD_SETDEPTHBIAS, "vkCmdSetDepthBias()");
6202 if (pCB->static_status & CBSTATUS_DEPTH_BIAS_SET) {
6203 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6204 HandleToUint64(commandBuffer), VALIDATION_ERROR_1cc0062a,
6205 "vkCmdSetDepthBias(): pipeline was created without VK_DYNAMIC_STATE_DEPTH_BIAS flag..");
6207 if ((depthBiasClamp != 0.0) && (!dev_data->enabled_features.depthBiasClamp)) {
6208 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6209 HandleToUint64(commandBuffer), VALIDATION_ERROR_1cc0062c,
6210 "vkCmdSetDepthBias(): the depthBiasClamp device feature is disabled: the depthBiasClamp parameter must "
6214 pCB->status |= CBSTATUS_DEPTH_BIAS_SET;
6219 dev_data->dispatch_table.CmdSetDepthBias(commandBuffer, depthBiasConstantFactor, depthBiasClamp, depthBiasSlopeFactor);
6222 VKAPI_ATTR void VKAPI_CALL CmdSetBlendConstants(VkCommandBuffer commandBuffer, const float blendConstants[4]) {
6224 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6225 unique_lock_t lock(global_lock);
6226 GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
6228 skip |= ValidateCmdQueueFlags(dev_data, pCB, "vkCmdSetBlendConstants()", VK_QUEUE_GRAPHICS_BIT, VALIDATION_ERROR_1ca02415);
6229 skip |= ValidateCmd(dev_data, pCB, CMD_SETBLENDCONSTANTS, "vkCmdSetBlendConstants()");
6230 if (pCB->static_status & CBSTATUS_BLEND_CONSTANTS_SET) {
6231 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6232 HandleToUint64(commandBuffer), VALIDATION_ERROR_1ca004c8,
6233 "vkCmdSetBlendConstants(): pipeline was created without VK_DYNAMIC_STATE_BLEND_CONSTANTS flag..");
6236 pCB->status |= CBSTATUS_BLEND_CONSTANTS_SET;
6240 if (!skip) dev_data->dispatch_table.CmdSetBlendConstants(commandBuffer, blendConstants);
6243 VKAPI_ATTR void VKAPI_CALL CmdSetDepthBounds(VkCommandBuffer commandBuffer, float minDepthBounds, float maxDepthBounds) {
6245 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6246 unique_lock_t lock(global_lock);
6247 GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
6249 skip |= ValidateCmdQueueFlags(dev_data, pCB, "vkCmdSetDepthBounds()", VK_QUEUE_GRAPHICS_BIT, VALIDATION_ERROR_1ce02415);
6250 skip |= ValidateCmd(dev_data, pCB, CMD_SETDEPTHBOUNDS, "vkCmdSetDepthBounds()");
6251 if (pCB->static_status & CBSTATUS_DEPTH_BOUNDS_SET) {
6252 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6253 HandleToUint64(commandBuffer), VALIDATION_ERROR_1ce004ae,
6254 "vkCmdSetDepthBounds(): pipeline was created without VK_DYNAMIC_STATE_DEPTH_BOUNDS flag..");
6257 pCB->status |= CBSTATUS_DEPTH_BOUNDS_SET;
6261 if (!skip) dev_data->dispatch_table.CmdSetDepthBounds(commandBuffer, minDepthBounds, maxDepthBounds);
6264 VKAPI_ATTR void VKAPI_CALL CmdSetStencilCompareMask(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask,
6265 uint32_t compareMask) {
6267 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6268 unique_lock_t lock(global_lock);
6269 GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
6272 ValidateCmdQueueFlags(dev_data, pCB, "vkCmdSetStencilCompareMask()", VK_QUEUE_GRAPHICS_BIT, VALIDATION_ERROR_1da02415);
6273 skip |= ValidateCmd(dev_data, pCB, CMD_SETSTENCILCOMPAREMASK, "vkCmdSetStencilCompareMask()");
6274 if (pCB->static_status & CBSTATUS_STENCIL_READ_MASK_SET) {
6276 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6277 HandleToUint64(commandBuffer), VALIDATION_ERROR_1da004b4,
6278 "vkCmdSetStencilCompareMask(): pipeline was created without VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK flag..");
6281 pCB->status |= CBSTATUS_STENCIL_READ_MASK_SET;
6285 if (!skip) dev_data->dispatch_table.CmdSetStencilCompareMask(commandBuffer, faceMask, compareMask);
6288 VKAPI_ATTR void VKAPI_CALL CmdSetStencilWriteMask(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t writeMask) {
6290 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6291 unique_lock_t lock(global_lock);
6292 GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
6295 ValidateCmdQueueFlags(dev_data, pCB, "vkCmdSetStencilWriteMask()", VK_QUEUE_GRAPHICS_BIT, VALIDATION_ERROR_1de02415);
6296 skip |= ValidateCmd(dev_data, pCB, CMD_SETSTENCILWRITEMASK, "vkCmdSetStencilWriteMask()");
6297 if (pCB->static_status & CBSTATUS_STENCIL_WRITE_MASK_SET) {
6298 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6299 HandleToUint64(commandBuffer), VALIDATION_ERROR_1de004b6,
6300 "vkCmdSetStencilWriteMask(): pipeline was created without VK_DYNAMIC_STATE_STENCIL_WRITE_MASK flag..");
6303 pCB->status |= CBSTATUS_STENCIL_WRITE_MASK_SET;
6307 if (!skip) dev_data->dispatch_table.CmdSetStencilWriteMask(commandBuffer, faceMask, writeMask);
6310 VKAPI_ATTR void VKAPI_CALL CmdSetStencilReference(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t reference) {
6312 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6313 unique_lock_t lock(global_lock);
6314 GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
6317 ValidateCmdQueueFlags(dev_data, pCB, "vkCmdSetStencilReference()", VK_QUEUE_GRAPHICS_BIT, VALIDATION_ERROR_1dc02415);
6318 skip |= ValidateCmd(dev_data, pCB, CMD_SETSTENCILREFERENCE, "vkCmdSetStencilReference()");
6319 if (pCB->static_status & CBSTATUS_STENCIL_REFERENCE_SET) {
6320 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6321 HandleToUint64(commandBuffer), VALIDATION_ERROR_1dc004b8,
6322 "vkCmdSetStencilReference(): pipeline was created without VK_DYNAMIC_STATE_STENCIL_REFERENCE flag..");
6325 pCB->status |= CBSTATUS_STENCIL_REFERENCE_SET;
6329 if (!skip) dev_data->dispatch_table.CmdSetStencilReference(commandBuffer, faceMask, reference);
6332 // Update pipeline_layout bind points applying the "Pipeline Layout Compatibility" rules
6333 static void UpdateLastBoundDescriptorSets(layer_data *device_data, GLOBAL_CB_NODE *cb_state,
6334 VkPipelineBindPoint pipeline_bind_point, const PIPELINE_LAYOUT_NODE *pipeline_layout,
6335 uint32_t first_set, uint32_t set_count,
6336 const std::vector<cvdescriptorset::DescriptorSet *> descriptor_sets,
6337 uint32_t dynamic_offset_count, const uint32_t *p_dynamic_offsets) {
6340 if (0 == set_count) return;
6341 assert(pipeline_layout);
6342 if (!pipeline_layout) return;
6344 uint32_t required_size = first_set + set_count;
6345 const uint32_t last_binding_index = required_size - 1;
6346 assert(last_binding_index < pipeline_layout->compat_for_set.size());
6348 // Some useful shorthand
6349 auto &last_bound = cb_state->lastBound[pipeline_bind_point];
6351 auto &bound_sets = last_bound.boundDescriptorSets;
6352 auto &dynamic_offsets = last_bound.dynamicOffsets;
6353 auto &bound_compat_ids = last_bound.compat_id_for_set;
6354 auto &pipe_compat_ids = pipeline_layout->compat_for_set;
6356 const uint32_t current_size = static_cast<uint32_t>(bound_sets.size());
6357 assert(current_size == dynamic_offsets.size());
6358 assert(current_size == bound_compat_ids.size());
6360 // We need this three times in this function, but nowhere else
6361 auto push_descriptor_cleanup = [&last_bound](const cvdescriptorset::DescriptorSet *ds) -> bool {
6362 if (ds && ds->IsPushDescriptor()) {
6363 assert(ds == last_bound.push_descriptor_set.get());
6364 last_bound.push_descriptor_set = nullptr;
6370 // Clean up the "disturbed" before and after the range to be set
6371 if (required_size < current_size) {
6372 if (bound_compat_ids[last_binding_index] != pipe_compat_ids[last_binding_index]) {
6373 // We're disturbing those after last, we'll shrink below, but first need to check for and cleanup the push_descriptor
6374 for (auto set_idx = required_size; set_idx < current_size; ++set_idx) {
6375 if (push_descriptor_cleanup(bound_sets[set_idx])) break;
6378 // We're not disturbing past last, so leave the upper binding data alone.
6379 required_size = current_size;
6383 // We resize if we need more set entries or if those past "last" are disturbed
6384 if (required_size != current_size) {
6385 // TODO: put these size tied things in a struct (touches many lines)
6386 bound_sets.resize(required_size);
6387 dynamic_offsets.resize(required_size);
6388 bound_compat_ids.resize(required_size);
6391 // For any previously bound sets, need to set them to "invalid" if they were disturbed by this update
6392 for (uint32_t set_idx = 0; set_idx < first_set; ++set_idx) {
6393 if (bound_compat_ids[set_idx] != pipe_compat_ids[set_idx]) {
6394 push_descriptor_cleanup(bound_sets[set_idx]);
6395 bound_sets[set_idx] = nullptr;
6396 dynamic_offsets[set_idx].clear();
6397 bound_compat_ids[set_idx] = pipe_compat_ids[set_idx];
6401 // Now update the bound sets with the input sets
6402 const uint32_t *input_dynamic_offsets = p_dynamic_offsets; // "read" pointer for dynamic offset data
6403 for (uint32_t input_idx = 0; input_idx < set_count; input_idx++) {
6404 auto set_idx = input_idx + first_set; // set_idx is index within layout, input_idx is index within input descriptor sets
6405 cvdescriptorset::DescriptorSet *descriptor_set = descriptor_sets[input_idx];
6407 // Record binding (or push)
6408 push_descriptor_cleanup(bound_sets[set_idx]);
6409 bound_sets[set_idx] = descriptor_set;
6410 bound_compat_ids[set_idx] = pipe_compat_ids[set_idx]; // compat ids are canonical *per* set index
6412 if (descriptor_set) {
6413 auto set_dynamic_descriptor_count = descriptor_set->GetDynamicDescriptorCount();
6414 // TODO: Add logic for tracking push_descriptor offsets (here or in caller)
6415 if (set_dynamic_descriptor_count && input_dynamic_offsets) {
6416 const uint32_t *end_offset = input_dynamic_offsets + set_dynamic_descriptor_count;
6417 dynamic_offsets[set_idx] = std::vector<uint32_t>(input_dynamic_offsets, end_offset);
6418 input_dynamic_offsets = end_offset;
6419 assert(input_dynamic_offsets <= (p_dynamic_offsets + dynamic_offset_count));
6421 dynamic_offsets[set_idx].clear();
6423 if (!descriptor_set->IsPushDescriptor()) {
6424 // Can't cache validation of push_descriptors
6425 cb_state->validated_descriptor_sets.insert(descriptor_set);
6431 // Update the bound state for the bind point, including the effects of incompatible pipeline layouts
6432 static void PreCallRecordCmdBindDescriptorSets(layer_data *device_data, GLOBAL_CB_NODE *cb_state,
6433 VkPipelineBindPoint pipelineBindPoint, VkPipelineLayout layout, uint32_t firstSet,
6434 uint32_t setCount, const VkDescriptorSet *pDescriptorSets,
6435 uint32_t dynamicOffsetCount, const uint32_t *pDynamicOffsets) {
6436 auto pipeline_layout = getPipelineLayout(device_data, layout);
6437 std::vector<cvdescriptorset::DescriptorSet *> descriptor_sets;
6438 descriptor_sets.reserve(setCount);
6440 // Construct a list of the descriptors
6441 bool found_non_null = false;
6442 for (uint32_t i = 0; i < setCount; i++) {
6443 cvdescriptorset::DescriptorSet *descriptor_set = GetSetNode(device_data, pDescriptorSets[i]);
6444 descriptor_sets.emplace_back(descriptor_set);
6445 found_non_null |= descriptor_set != nullptr;
6447 if (found_non_null) { // which implies setCount > 0
6448 UpdateLastBoundDescriptorSets(device_data, cb_state, pipelineBindPoint, pipeline_layout, firstSet, setCount,
6449 descriptor_sets, dynamicOffsetCount, pDynamicOffsets);
6450 cb_state->lastBound[pipelineBindPoint].pipeline_layout = layout;
6454 static bool PreCallValidateCmdBindDescriptorSets(layer_data *device_data, GLOBAL_CB_NODE *cb_state,
6455 VkPipelineBindPoint pipelineBindPoint, VkPipelineLayout layout, uint32_t firstSet,
6456 uint32_t setCount, const VkDescriptorSet *pDescriptorSets,
6457 uint32_t dynamicOffsetCount, const uint32_t *pDynamicOffsets) {
6459 skip |= ValidateCmdQueueFlags(device_data, cb_state, "vkCmdBindDescriptorSets()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
6460 VALIDATION_ERROR_17c02415);
6461 skip |= ValidateCmd(device_data, cb_state, CMD_BINDDESCRIPTORSETS, "vkCmdBindDescriptorSets()");
6462 // Track total count of dynamic descriptor types to make sure we have an offset for each one
6463 uint32_t total_dynamic_descriptors = 0;
6464 string error_string = "";
6465 uint32_t last_set_index = firstSet + setCount - 1;
6467 if (last_set_index >= cb_state->lastBound[pipelineBindPoint].boundDescriptorSets.size()) {
6468 cb_state->lastBound[pipelineBindPoint].boundDescriptorSets.resize(last_set_index + 1);
6469 cb_state->lastBound[pipelineBindPoint].dynamicOffsets.resize(last_set_index + 1);
6470 cb_state->lastBound[pipelineBindPoint].compat_id_for_set.resize(last_set_index + 1);
6472 auto pipeline_layout = getPipelineLayout(device_data, layout);
6473 for (uint32_t set_idx = 0; set_idx < setCount; set_idx++) {
6474 cvdescriptorset::DescriptorSet *descriptor_set = GetSetNode(device_data, pDescriptorSets[set_idx]);
6475 if (descriptor_set) {
6476 if (!descriptor_set->IsUpdated() && (descriptor_set->GetTotalDescriptorCount() != 0)) {
6478 device_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
6479 HandleToUint64(pDescriptorSets[set_idx]), DRAWSTATE_DESCRIPTOR_SET_NOT_UPDATED,
6480 "Descriptor Set 0x%" PRIx64 " bound but it was never updated. You may want to either update it or not bind it.",
6481 HandleToUint64(pDescriptorSets[set_idx]));
6483 // Verify that set being bound is compatible with overlapping setLayout of pipelineLayout
6484 if (!verify_set_layout_compatibility(descriptor_set, pipeline_layout, set_idx + firstSet, error_string)) {
6486 log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
6487 HandleToUint64(pDescriptorSets[set_idx]), VALIDATION_ERROR_17c002cc,
6488 "descriptorSet #%u being bound is not compatible with overlapping descriptorSetLayout at index %u of "
6489 "pipelineLayout 0x%" PRIx64 " due to: %s.",
6490 set_idx, set_idx + firstSet, HandleToUint64(layout), error_string.c_str());
6493 auto set_dynamic_descriptor_count = descriptor_set->GetDynamicDescriptorCount();
6495 if (set_dynamic_descriptor_count) {
6496 // First make sure we won't overstep bounds of pDynamicOffsets array
6497 if ((total_dynamic_descriptors + set_dynamic_descriptor_count) > dynamicOffsetCount) {
6498 skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6499 VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, HandleToUint64(pDescriptorSets[set_idx]),
6500 DRAWSTATE_INVALID_DYNAMIC_OFFSET_COUNT,
6501 "descriptorSet #%u (0x%" PRIx64
6502 ") requires %u dynamicOffsets, but only %u dynamicOffsets are left in pDynamicOffsets array. "
6503 "There must be one dynamic offset for each dynamic descriptor being bound.",
6504 set_idx, HandleToUint64(pDescriptorSets[set_idx]), descriptor_set->GetDynamicDescriptorCount(),
6505 (dynamicOffsetCount - total_dynamic_descriptors));
6506 } else { // Validate dynamic offsets and Dynamic Offset Minimums
6507 uint32_t cur_dyn_offset = total_dynamic_descriptors;
6508 for (uint32_t d = 0; d < descriptor_set->GetTotalDescriptorCount(); d++) {
6509 if (descriptor_set->GetTypeFromGlobalIndex(d) == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC) {
6510 if (SafeModulo(pDynamicOffsets[cur_dyn_offset],
6511 device_data->phys_dev_properties.properties.limits.minUniformBufferOffsetAlignment) !=
6513 skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6514 VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, VALIDATION_ERROR_17c002d4,
6515 "vkCmdBindDescriptorSets(): pDynamicOffsets[%d] is %d but must be a multiple of "
6516 "device limit minUniformBufferOffsetAlignment 0x%" PRIxLEAST64 ".",
6517 cur_dyn_offset, pDynamicOffsets[cur_dyn_offset],
6518 device_data->phys_dev_properties.properties.limits.minUniformBufferOffsetAlignment);
6521 } else if (descriptor_set->GetTypeFromGlobalIndex(d) == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC) {
6522 if (SafeModulo(pDynamicOffsets[cur_dyn_offset],
6523 device_data->phys_dev_properties.properties.limits.minStorageBufferOffsetAlignment) !=
6525 skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6526 VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, VALIDATION_ERROR_17c002d4,
6527 "vkCmdBindDescriptorSets(): pDynamicOffsets[%d] is %d but must be a multiple of "
6528 "device limit minStorageBufferOffsetAlignment 0x%" PRIxLEAST64 ".",
6529 cur_dyn_offset, pDynamicOffsets[cur_dyn_offset],
6530 device_data->phys_dev_properties.properties.limits.minStorageBufferOffsetAlignment);
6535 // Keep running total of dynamic descriptor count to verify at the end
6536 total_dynamic_descriptors += set_dynamic_descriptor_count;
6540 skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
6541 HandleToUint64(pDescriptorSets[set_idx]), DRAWSTATE_INVALID_SET,
6542 "Attempt to bind descriptor set 0x%" PRIx64 " that doesn't exist!",
6543 HandleToUint64(pDescriptorSets[set_idx]));
6546 // dynamicOffsetCount must equal the total number of dynamic descriptors in the sets being bound
6547 if (total_dynamic_descriptors != dynamicOffsetCount) {
6548 skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6549 HandleToUint64(cb_state->commandBuffer), VALIDATION_ERROR_17c002ce,
6550 "Attempting to bind %u descriptorSets with %u dynamic descriptors, but dynamicOffsetCount is %u. It should "
6551 "exactly match the number of dynamic descriptors.",
6552 setCount, total_dynamic_descriptors, dynamicOffsetCount);
6557 VKAPI_ATTR void VKAPI_CALL CmdBindDescriptorSets(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint,
6558 VkPipelineLayout layout, uint32_t firstSet, uint32_t setCount,
6559 const VkDescriptorSet *pDescriptorSets, uint32_t dynamicOffsetCount,
6560 const uint32_t *pDynamicOffsets) {
6562 layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6563 unique_lock_t lock(global_lock);
6564 GLOBAL_CB_NODE *cb_state = GetCBNode(device_data, commandBuffer);
6566 skip = PreCallValidateCmdBindDescriptorSets(device_data, cb_state, pipelineBindPoint, layout, firstSet, setCount,
6567 pDescriptorSets, dynamicOffsetCount, pDynamicOffsets);
6569 PreCallRecordCmdBindDescriptorSets(device_data, cb_state, pipelineBindPoint, layout, firstSet, setCount, pDescriptorSets,
6570 dynamicOffsetCount, pDynamicOffsets);
6572 device_data->dispatch_table.CmdBindDescriptorSets(commandBuffer, pipelineBindPoint, layout, firstSet, setCount,
6573 pDescriptorSets, dynamicOffsetCount, pDynamicOffsets);
6579 // Validates that the supplied bind point is supported for the command buffer (vis. the command pool)
6580 // Takes array of error codes as some of the VUID's (e.g. vkCmdBindPipeline) are written per bindpoint
6581 // TODO add vkCmdBindPipeline bind_point validation using this call.
6582 bool ValidatePipelineBindPoint(layer_data *device_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint bind_point,
6583 const char *func_name,
6584 const std::array<UNIQUE_VALIDATION_ERROR_CODE, VK_PIPELINE_BIND_POINT_RANGE_SIZE> &bind_errors) {
6586 auto pool = GetCommandPoolNode(device_data, cb_state->createInfo.commandPool);
6587 if (pool) { // The loss of a pool in a recording cmd is reported in DestroyCommandPool
6588 static const VkQueueFlags flag_mask[VK_PIPELINE_BIND_POINT_RANGE_SIZE] = {VK_QUEUE_GRAPHICS_BIT, VK_QUEUE_COMPUTE_BIT};
6589 const auto bind_point_index = bind_point - VK_PIPELINE_BIND_POINT_BEGIN_RANGE; // typeof enum is not defined, use auto
6590 const auto &qfp = GetPhysDevProperties(device_data)->queue_family_properties[pool->queueFamilyIndex];
6591 if (0 == (qfp.queueFlags & flag_mask[bind_point_index])) {
6592 const UNIQUE_VALIDATION_ERROR_CODE error = bind_errors[bind_point_index];
6593 auto cb_u64 = HandleToUint64(cb_state->commandBuffer);
6594 auto cp_u64 = HandleToUint64(cb_state->createInfo.commandPool);
6595 skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6597 "%s: CommandBuffer 0x%" PRIxLEAST64 " was allocated from VkCommandPool 0x%" PRIxLEAST64
6598 " that does not support bindpoint %s.",
6599 func_name, cb_u64, cp_u64, string_VkPipelineBindPoint(bind_point));
6605 static bool PreCallValidateCmdPushDescriptorSetKHR(layer_data *device_data, GLOBAL_CB_NODE *cb_state,
6606 const VkPipelineBindPoint bind_point, const VkPipelineLayout layout,
6607 const uint32_t set, const uint32_t descriptor_write_count,
6608 const VkWriteDescriptorSet *descriptor_writes, const char *func_name) {
6610 skip |= ValidateCmd(device_data, cb_state, CMD_PUSHDESCRIPTORSETKHR, func_name);
6611 skip |= ValidateCmdQueueFlags(device_data, cb_state, func_name, (VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT),
6612 VALIDATION_ERROR_1be02415);
6613 skip |= ValidatePipelineBindPoint(device_data, cb_state, bind_point, func_name,
6614 {{VALIDATION_ERROR_1be002d6, VALIDATION_ERROR_1be002d6}});
6615 auto layout_data = getPipelineLayout(device_data, layout);
6617 // Validate the set index points to a push descriptor set and is in range
6619 const auto &set_layouts = layout_data->set_layouts;
6620 const auto layout_u64 = HandleToUint64(layout);
6621 if (set < set_layouts.size()) {
6622 const auto *dsl = set_layouts[set].get();
6623 if (dsl && (0 == (dsl->GetCreateFlags() & VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR))) {
6624 skip = log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6625 VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_LAYOUT_EXT, layout_u64, VALIDATION_ERROR_1be002da,
6626 "%s: Set index %" PRIu32
6627 " does not match push descriptor set layout index for VkPipelineLayout 0x%" PRIxLEAST64 ".",
6628 func_name, set, layout_u64);
6631 skip = log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_LAYOUT_EXT,
6632 layout_u64, VALIDATION_ERROR_1be002d8,
6633 "%s: Set index %" PRIu32 " is outside of range for VkPipelineLayout 0x%" PRIxLEAST64 " (set < %" PRIu32
6635 func_name, set, layout_u64, static_cast<uint32_t>(set_layouts.size()));
6641 static void PreCallRecordCmdPushDescriptorSetKHR(layer_data *device_data, GLOBAL_CB_NODE *cb_state,
6642 VkPipelineBindPoint pipelineBindPoint, VkPipelineLayout layout, uint32_t set,
6643 uint32_t descriptorWriteCount, const VkWriteDescriptorSet *pDescriptorWrites) {
6644 const auto &pipeline_layout = getPipelineLayout(device_data, layout);
6645 if (!pipeline_layout) return;
6646 std::unique_ptr<cvdescriptorset::DescriptorSet> new_desc{
6647 new cvdescriptorset::DescriptorSet(0, 0, pipeline_layout->set_layouts[set], 0, device_data)};
6649 std::vector<cvdescriptorset::DescriptorSet *> descriptor_sets = {new_desc.get()};
6650 UpdateLastBoundDescriptorSets(device_data, cb_state, pipelineBindPoint, pipeline_layout, set, 1, descriptor_sets, 0, nullptr);
6651 cb_state->lastBound[pipelineBindPoint].push_descriptor_set = std::move(new_desc);
6652 cb_state->lastBound[pipelineBindPoint].pipeline_layout = layout;
6655 VKAPI_ATTR void VKAPI_CALL CmdPushDescriptorSetKHR(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint,
6656 VkPipelineLayout layout, uint32_t set, uint32_t descriptorWriteCount,
6657 const VkWriteDescriptorSet *pDescriptorWrites) {
6658 layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6659 unique_lock_t lock(global_lock);
6660 auto cb_state = GetCBNode(device_data, commandBuffer);
6661 bool skip = PreCallValidateCmdPushDescriptorSetKHR(device_data, cb_state, pipelineBindPoint, layout, set, descriptorWriteCount,
6662 pDescriptorWrites, "vkCmdPushDescriptorSetKHR()");
6664 PreCallRecordCmdPushDescriptorSetKHR(device_data, cb_state, pipelineBindPoint, layout, set, descriptorWriteCount,
6667 device_data->dispatch_table.CmdPushDescriptorSetKHR(commandBuffer, pipelineBindPoint, layout, set, descriptorWriteCount,
6672 static VkDeviceSize GetIndexAlignment(VkIndexType indexType) {
6673 switch (indexType) {
6674 case VK_INDEX_TYPE_UINT16:
6676 case VK_INDEX_TYPE_UINT32:
6679 // Not a real index type. Express no alignment requirement here; we expect upper layer
6680 // to have already picked up on the enum being nonsense.
6685 VKAPI_ATTR void VKAPI_CALL CmdBindIndexBuffer(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
6686 VkIndexType indexType) {
6688 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6689 unique_lock_t lock(global_lock);
6691 auto buffer_state = GetBufferState(dev_data, buffer);
6692 auto cb_node = GetCBNode(dev_data, commandBuffer);
6694 assert(buffer_state);
6696 skip |= ValidateBufferUsageFlags(dev_data, buffer_state, VK_BUFFER_USAGE_INDEX_BUFFER_BIT, true, VALIDATION_ERROR_17e00362,
6697 "vkCmdBindIndexBuffer()", "VK_BUFFER_USAGE_INDEX_BUFFER_BIT");
6698 skip |= ValidateCmdQueueFlags(dev_data, cb_node, "vkCmdBindIndexBuffer()", VK_QUEUE_GRAPHICS_BIT, VALIDATION_ERROR_17e02415);
6699 skip |= ValidateCmd(dev_data, cb_node, CMD_BINDINDEXBUFFER, "vkCmdBindIndexBuffer()");
6700 skip |= ValidateMemoryIsBoundToBuffer(dev_data, buffer_state, "vkCmdBindIndexBuffer()", VALIDATION_ERROR_17e00364);
6701 auto offset_align = GetIndexAlignment(indexType);
6702 if (offset % offset_align) {
6703 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6704 HandleToUint64(commandBuffer), VALIDATION_ERROR_17e00360,
6705 "vkCmdBindIndexBuffer() offset (0x%" PRIxLEAST64 ") does not fall on alignment (%s) boundary.", offset,
6706 string_VkIndexType(indexType));
6711 cb_node->status |= CBSTATUS_INDEX_BUFFER_BOUND;
6712 cb_node->index_buffer_binding.buffer = buffer;
6713 cb_node->index_buffer_binding.size = buffer_state->createInfo.size;
6714 cb_node->index_buffer_binding.offset = offset;
6715 cb_node->index_buffer_binding.index_type = indexType;
6718 dev_data->dispatch_table.CmdBindIndexBuffer(commandBuffer, buffer, offset, indexType);
6721 void updateResourceTracking(GLOBAL_CB_NODE *pCB, uint32_t firstBinding, uint32_t bindingCount, const VkBuffer *pBuffers) {
6722 uint32_t end = firstBinding + bindingCount;
6723 if (pCB->currentDrawData.buffers.size() < end) {
6724 pCB->currentDrawData.buffers.resize(end);
6726 for (uint32_t i = 0; i < bindingCount; ++i) {
6727 pCB->currentDrawData.buffers[i + firstBinding] = pBuffers[i];
6731 static inline void updateResourceTrackingOnDraw(GLOBAL_CB_NODE *pCB) { pCB->drawData.push_back(pCB->currentDrawData); }
6733 VKAPI_ATTR void VKAPI_CALL CmdBindVertexBuffers(VkCommandBuffer commandBuffer, uint32_t firstBinding, uint32_t bindingCount,
6734 const VkBuffer *pBuffers, const VkDeviceSize *pOffsets) {
6736 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6737 unique_lock_t lock(global_lock);
6739 auto cb_node = GetCBNode(dev_data, commandBuffer);
6742 skip |= ValidateCmdQueueFlags(dev_data, cb_node, "vkCmdBindVertexBuffers()", VK_QUEUE_GRAPHICS_BIT, VALIDATION_ERROR_18202415);
6743 skip |= ValidateCmd(dev_data, cb_node, CMD_BINDVERTEXBUFFERS, "vkCmdBindVertexBuffers()");
6744 for (uint32_t i = 0; i < bindingCount; ++i) {
6745 auto buffer_state = GetBufferState(dev_data, pBuffers[i]);
6746 assert(buffer_state);
6747 skip |= ValidateBufferUsageFlags(dev_data, buffer_state, VK_BUFFER_USAGE_VERTEX_BUFFER_BIT, true, VALIDATION_ERROR_182004e6,
6748 "vkCmdBindVertexBuffers()", "VK_BUFFER_USAGE_VERTEX_BUFFER_BIT");
6749 skip |= ValidateMemoryIsBoundToBuffer(dev_data, buffer_state, "vkCmdBindVertexBuffers()", VALIDATION_ERROR_182004e8);
6750 if (pOffsets[i] >= buffer_state->createInfo.size) {
6751 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
6752 HandleToUint64(buffer_state->buffer), VALIDATION_ERROR_182004e4,
6753 "vkCmdBindVertexBuffers() offset (0x%" PRIxLEAST64 ") is beyond the end of the buffer.", pOffsets[i]);
6759 updateResourceTracking(cb_node, firstBinding, bindingCount, pBuffers);
6762 dev_data->dispatch_table.CmdBindVertexBuffers(commandBuffer, firstBinding, bindingCount, pBuffers, pOffsets);
6765 // Generic function to handle validation for all CmdDraw* type functions
6766 static bool ValidateCmdDrawType(layer_data *dev_data, VkCommandBuffer cmd_buffer, bool indexed, VkPipelineBindPoint bind_point,
6767 CMD_TYPE cmd_type, GLOBAL_CB_NODE **cb_state, const char *caller, VkQueueFlags queue_flags,
6768 UNIQUE_VALIDATION_ERROR_CODE queue_flag_code, UNIQUE_VALIDATION_ERROR_CODE msg_code,
6769 UNIQUE_VALIDATION_ERROR_CODE const dynamic_state_msg_code) {
6771 *cb_state = GetCBNode(dev_data, cmd_buffer);
6773 skip |= ValidateCmdQueueFlags(dev_data, *cb_state, caller, queue_flags, queue_flag_code);
6774 skip |= ValidateCmd(dev_data, *cb_state, cmd_type, caller);
6775 skip |= ValidateDrawState(dev_data, *cb_state, cmd_type, indexed, bind_point, caller, dynamic_state_msg_code);
6776 skip |= (VK_PIPELINE_BIND_POINT_GRAPHICS == bind_point) ? outsideRenderPass(dev_data, *cb_state, caller, msg_code)
6777 : insideRenderPass(dev_data, *cb_state, caller, msg_code);
6782 // Generic function to handle state update for all CmdDraw* and CmdDispatch* type functions
6783 static void UpdateStateCmdDrawDispatchType(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint bind_point) {
6784 UpdateDrawState(dev_data, cb_state, bind_point);
6787 // Generic function to handle state update for all CmdDraw* type functions
6788 static void UpdateStateCmdDrawType(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint bind_point) {
6789 UpdateStateCmdDrawDispatchType(dev_data, cb_state, bind_point);
6790 updateResourceTrackingOnDraw(cb_state);
6791 cb_state->hasDrawCmd = true;
6794 static bool PreCallValidateCmdDraw(layer_data *dev_data, VkCommandBuffer cmd_buffer, bool indexed, VkPipelineBindPoint bind_point,
6795 GLOBAL_CB_NODE **cb_state, const char *caller) {
6796 return ValidateCmdDrawType(dev_data, cmd_buffer, indexed, bind_point, CMD_DRAW, cb_state, caller, VK_QUEUE_GRAPHICS_BIT,
6797 VALIDATION_ERROR_1a202415, VALIDATION_ERROR_1a200017, VALIDATION_ERROR_1a200376);
6800 static void PostCallRecordCmdDraw(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint bind_point) {
6801 UpdateStateCmdDrawType(dev_data, cb_state, bind_point);
6804 VKAPI_ATTR void VKAPI_CALL CmdDraw(VkCommandBuffer commandBuffer, uint32_t vertexCount, uint32_t instanceCount,
6805 uint32_t firstVertex, uint32_t firstInstance) {
6806 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6807 GLOBAL_CB_NODE *cb_state = nullptr;
6808 unique_lock_t lock(global_lock);
6809 bool skip = PreCallValidateCmdDraw(dev_data, commandBuffer, false, VK_PIPELINE_BIND_POINT_GRAPHICS, &cb_state, "vkCmdDraw()");
6812 dev_data->dispatch_table.CmdDraw(commandBuffer, vertexCount, instanceCount, firstVertex, firstInstance);
6814 PostCallRecordCmdDraw(dev_data, cb_state, VK_PIPELINE_BIND_POINT_GRAPHICS);
6819 static bool PreCallValidateCmdDrawIndexed(layer_data *dev_data, VkCommandBuffer cmd_buffer, bool indexed,
6820 VkPipelineBindPoint bind_point, GLOBAL_CB_NODE **cb_state, const char *caller,
6821 uint32_t indexCount, uint32_t firstIndex) {
6823 ValidateCmdDrawType(dev_data, cmd_buffer, indexed, bind_point, CMD_DRAWINDEXED, cb_state, caller, VK_QUEUE_GRAPHICS_BIT,
6824 VALIDATION_ERROR_1a402415, VALIDATION_ERROR_1a400017, VALIDATION_ERROR_1a40039c);
6825 if (!skip && ((*cb_state)->status & CBSTATUS_INDEX_BUFFER_BOUND)) {
6826 unsigned int index_size = 0;
6827 const auto &index_buffer_binding = (*cb_state)->index_buffer_binding;
6828 if (index_buffer_binding.index_type == VK_INDEX_TYPE_UINT16) {
6830 } else if (index_buffer_binding.index_type == VK_INDEX_TYPE_UINT32) {
6833 VkDeviceSize end_offset = (index_size * ((VkDeviceSize)firstIndex + indexCount)) + index_buffer_binding.offset;
6834 if (end_offset > index_buffer_binding.size) {
6835 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
6836 HandleToUint64(index_buffer_binding.buffer), VALIDATION_ERROR_1a40039e,
6837 "vkCmdDrawIndexed() index size (%d) * (firstIndex (%d) + indexCount (%d)) "
6838 "+ binding offset (%" PRIuLEAST64 ") = an ending offset of %" PRIuLEAST64
6840 "which is greater than the index buffer size (%" PRIuLEAST64 ").",
6841 index_size, firstIndex, indexCount, index_buffer_binding.offset, end_offset, index_buffer_binding.size);
6847 static void PostCallRecordCmdDrawIndexed(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint bind_point) {
6848 UpdateStateCmdDrawType(dev_data, cb_state, bind_point);
6851 VKAPI_ATTR void VKAPI_CALL CmdDrawIndexed(VkCommandBuffer commandBuffer, uint32_t indexCount, uint32_t instanceCount,
6852 uint32_t firstIndex, int32_t vertexOffset, uint32_t firstInstance) {
6853 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6854 GLOBAL_CB_NODE *cb_state = nullptr;
6855 unique_lock_t lock(global_lock);
6856 bool skip = PreCallValidateCmdDrawIndexed(dev_data, commandBuffer, true, VK_PIPELINE_BIND_POINT_GRAPHICS, &cb_state,
6857 "vkCmdDrawIndexed()", indexCount, firstIndex);
6860 dev_data->dispatch_table.CmdDrawIndexed(commandBuffer, indexCount, instanceCount, firstIndex, vertexOffset, firstInstance);
6862 PostCallRecordCmdDrawIndexed(dev_data, cb_state, VK_PIPELINE_BIND_POINT_GRAPHICS);
6867 static bool PreCallValidateCmdDrawIndirect(layer_data *dev_data, VkCommandBuffer cmd_buffer, VkBuffer buffer, bool indexed,
6868 VkPipelineBindPoint bind_point, GLOBAL_CB_NODE **cb_state, BUFFER_STATE **buffer_state,
6869 const char *caller) {
6871 ValidateCmdDrawType(dev_data, cmd_buffer, indexed, bind_point, CMD_DRAWINDIRECT, cb_state, caller, VK_QUEUE_GRAPHICS_BIT,
6872 VALIDATION_ERROR_1aa02415, VALIDATION_ERROR_1aa00017, VALIDATION_ERROR_1aa003cc);
6873 *buffer_state = GetBufferState(dev_data, buffer);
6874 skip |= ValidateMemoryIsBoundToBuffer(dev_data, *buffer_state, caller, VALIDATION_ERROR_1aa003b4);
6875 // TODO: If the drawIndirectFirstInstance feature is not enabled, all the firstInstance members of the
6876 // VkDrawIndirectCommand structures accessed by this command must be 0, which will require access to the contents of 'buffer'.
6880 static void PostCallRecordCmdDrawIndirect(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint bind_point,
6881 BUFFER_STATE *buffer_state) {
6882 UpdateStateCmdDrawType(dev_data, cb_state, bind_point);
6883 AddCommandBufferBindingBuffer(dev_data, cb_state, buffer_state);
6886 VKAPI_ATTR void VKAPI_CALL CmdDrawIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t count,
6888 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6889 GLOBAL_CB_NODE *cb_state = nullptr;
6890 BUFFER_STATE *buffer_state = nullptr;
6891 unique_lock_t lock(global_lock);
6892 bool skip = PreCallValidateCmdDrawIndirect(dev_data, commandBuffer, buffer, false, VK_PIPELINE_BIND_POINT_GRAPHICS, &cb_state,
6893 &buffer_state, "vkCmdDrawIndirect()");
6896 dev_data->dispatch_table.CmdDrawIndirect(commandBuffer, buffer, offset, count, stride);
6898 PostCallRecordCmdDrawIndirect(dev_data, cb_state, VK_PIPELINE_BIND_POINT_GRAPHICS, buffer_state);
6903 static bool PreCallValidateCmdDrawIndexedIndirect(layer_data *dev_data, VkCommandBuffer cmd_buffer, VkBuffer buffer, bool indexed,
6904 VkPipelineBindPoint bind_point, GLOBAL_CB_NODE **cb_state,
6905 BUFFER_STATE **buffer_state, const char *caller) {
6907 ValidateCmdDrawType(dev_data, cmd_buffer, indexed, bind_point, CMD_DRAWINDEXEDINDIRECT, cb_state, caller,
6908 VK_QUEUE_GRAPHICS_BIT, VALIDATION_ERROR_1a602415, VALIDATION_ERROR_1a600017, VALIDATION_ERROR_1a600434);
6909 *buffer_state = GetBufferState(dev_data, buffer);
6910 skip |= ValidateMemoryIsBoundToBuffer(dev_data, *buffer_state, caller, VALIDATION_ERROR_1a60041c);
6911 // TODO: If the drawIndirectFirstInstance feature is not enabled, all the firstInstance members of the
6912 // VkDrawIndexedIndirectCommand structures accessed by this command must be 0, which will require access to the contents of
6917 static void PostCallRecordCmdDrawIndexedIndirect(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint bind_point,
6918 BUFFER_STATE *buffer_state) {
6919 UpdateStateCmdDrawType(dev_data, cb_state, bind_point);
6920 AddCommandBufferBindingBuffer(dev_data, cb_state, buffer_state);
6923 VKAPI_ATTR void VKAPI_CALL CmdDrawIndexedIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
6924 uint32_t count, uint32_t stride) {
6925 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6926 GLOBAL_CB_NODE *cb_state = nullptr;
6927 BUFFER_STATE *buffer_state = nullptr;
6928 unique_lock_t lock(global_lock);
6929 bool skip = PreCallValidateCmdDrawIndexedIndirect(dev_data, commandBuffer, buffer, true, VK_PIPELINE_BIND_POINT_GRAPHICS,
6930 &cb_state, &buffer_state, "vkCmdDrawIndexedIndirect()");
6933 dev_data->dispatch_table.CmdDrawIndexedIndirect(commandBuffer, buffer, offset, count, stride);
6935 PostCallRecordCmdDrawIndexedIndirect(dev_data, cb_state, VK_PIPELINE_BIND_POINT_GRAPHICS, buffer_state);
6940 static bool PreCallValidateCmdDispatch(layer_data *dev_data, VkCommandBuffer cmd_buffer, bool indexed,
6941 VkPipelineBindPoint bind_point, GLOBAL_CB_NODE **cb_state, const char *caller) {
6942 return ValidateCmdDrawType(dev_data, cmd_buffer, indexed, bind_point, CMD_DISPATCH, cb_state, caller, VK_QUEUE_COMPUTE_BIT,
6943 VALIDATION_ERROR_19c02415, VALIDATION_ERROR_19c00017, VALIDATION_ERROR_UNDEFINED);
6946 static void PostCallRecordCmdDispatch(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint bind_point) {
6947 UpdateStateCmdDrawDispatchType(dev_data, cb_state, bind_point);
6950 VKAPI_ATTR void VKAPI_CALL CmdDispatch(VkCommandBuffer commandBuffer, uint32_t x, uint32_t y, uint32_t z) {
6951 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6952 GLOBAL_CB_NODE *cb_state = nullptr;
6953 unique_lock_t lock(global_lock);
6955 PreCallValidateCmdDispatch(dev_data, commandBuffer, false, VK_PIPELINE_BIND_POINT_COMPUTE, &cb_state, "vkCmdDispatch()");
6958 dev_data->dispatch_table.CmdDispatch(commandBuffer, x, y, z);
6960 PostCallRecordCmdDispatch(dev_data, cb_state, VK_PIPELINE_BIND_POINT_COMPUTE);
6965 static bool PreCallValidateCmdDispatchIndirect(layer_data *dev_data, VkCommandBuffer cmd_buffer, VkBuffer buffer, bool indexed,
6966 VkPipelineBindPoint bind_point, GLOBAL_CB_NODE **cb_state,
6967 BUFFER_STATE **buffer_state, const char *caller) {
6969 ValidateCmdDrawType(dev_data, cmd_buffer, indexed, bind_point, CMD_DISPATCHINDIRECT, cb_state, caller, VK_QUEUE_COMPUTE_BIT,
6970 VALIDATION_ERROR_1a002415, VALIDATION_ERROR_1a000017, VALIDATION_ERROR_UNDEFINED);
6971 *buffer_state = GetBufferState(dev_data, buffer);
6972 skip |= ValidateMemoryIsBoundToBuffer(dev_data, *buffer_state, caller, VALIDATION_ERROR_1a000322);
6976 static void PostCallRecordCmdDispatchIndirect(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint bind_point,
6977 BUFFER_STATE *buffer_state) {
6978 UpdateStateCmdDrawDispatchType(dev_data, cb_state, bind_point);
6979 AddCommandBufferBindingBuffer(dev_data, cb_state, buffer_state);
6982 VKAPI_ATTR void VKAPI_CALL CmdDispatchIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset) {
6983 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6984 GLOBAL_CB_NODE *cb_state = nullptr;
6985 BUFFER_STATE *buffer_state = nullptr;
6986 unique_lock_t lock(global_lock);
6987 bool skip = PreCallValidateCmdDispatchIndirect(dev_data, commandBuffer, buffer, false, VK_PIPELINE_BIND_POINT_COMPUTE,
6988 &cb_state, &buffer_state, "vkCmdDispatchIndirect()");
6991 dev_data->dispatch_table.CmdDispatchIndirect(commandBuffer, buffer, offset);
6993 PostCallRecordCmdDispatchIndirect(dev_data, cb_state, VK_PIPELINE_BIND_POINT_COMPUTE, buffer_state);
6998 VKAPI_ATTR void VKAPI_CALL CmdCopyBuffer(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkBuffer dstBuffer,
6999 uint32_t regionCount, const VkBufferCopy *pRegions) {
7000 layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
7001 unique_lock_t lock(global_lock);
7003 auto cb_node = GetCBNode(device_data, commandBuffer);
7004 auto src_buffer_state = GetBufferState(device_data, srcBuffer);
7005 auto dst_buffer_state = GetBufferState(device_data, dstBuffer);
7007 if (cb_node && src_buffer_state && dst_buffer_state) {
7008 bool skip = PreCallValidateCmdCopyBuffer(device_data, cb_node, src_buffer_state, dst_buffer_state);
7010 PreCallRecordCmdCopyBuffer(device_data, cb_node, src_buffer_state, dst_buffer_state);
7012 device_data->dispatch_table.CmdCopyBuffer(commandBuffer, srcBuffer, dstBuffer, regionCount, pRegions);
7020 VKAPI_ATTR void VKAPI_CALL CmdCopyImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
7021 VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
7022 const VkImageCopy *pRegions) {
7024 layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
7025 unique_lock_t lock(global_lock);
7027 auto cb_node = GetCBNode(device_data, commandBuffer);
7028 auto src_image_state = GetImageState(device_data, srcImage);
7029 auto dst_image_state = GetImageState(device_data, dstImage);
7030 if (cb_node && src_image_state && dst_image_state) {
7031 skip = PreCallValidateCmdCopyImage(device_data, cb_node, src_image_state, dst_image_state, regionCount, pRegions,
7032 srcImageLayout, dstImageLayout);
7034 PreCallRecordCmdCopyImage(device_data, cb_node, src_image_state, dst_image_state, regionCount, pRegions, srcImageLayout,
7037 device_data->dispatch_table.CmdCopyImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount,
7046 // Validate that an image's sampleCount matches the requirement for a specific API call
7047 bool ValidateImageSampleCount(layer_data *dev_data, IMAGE_STATE *image_state, VkSampleCountFlagBits sample_count,
7048 const char *location, UNIQUE_VALIDATION_ERROR_CODE msgCode) {
7050 if (image_state->createInfo.samples != sample_count) {
7051 skip = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
7052 HandleToUint64(image_state->image), msgCode,
7053 "%s for image 0x%" PRIx64 " was created with a sample count of %s but must be %s.", location,
7054 HandleToUint64(image_state->image), string_VkSampleCountFlagBits(image_state->createInfo.samples),
7055 string_VkSampleCountFlagBits(sample_count));
7060 VKAPI_ATTR void VKAPI_CALL CmdBlitImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
7061 VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
7062 const VkImageBlit *pRegions, VkFilter filter) {
7063 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
7064 unique_lock_t lock(global_lock);
7066 auto cb_node = GetCBNode(dev_data, commandBuffer);
7067 auto src_image_state = GetImageState(dev_data, srcImage);
7068 auto dst_image_state = GetImageState(dev_data, dstImage);
7070 bool skip = PreCallValidateCmdBlitImage(dev_data, cb_node, src_image_state, dst_image_state, regionCount, pRegions,
7071 srcImageLayout, dstImageLayout, filter);
7074 PreCallRecordCmdBlitImage(dev_data, cb_node, src_image_state, dst_image_state, regionCount, pRegions, srcImageLayout,
7077 dev_data->dispatch_table.CmdBlitImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount,
7082 VKAPI_ATTR void VKAPI_CALL CmdCopyBufferToImage(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkImage dstImage,
7083 VkImageLayout dstImageLayout, uint32_t regionCount,
7084 const VkBufferImageCopy *pRegions) {
7085 layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
7086 unique_lock_t lock(global_lock);
7088 auto cb_node = GetCBNode(device_data, commandBuffer);
7089 auto src_buffer_state = GetBufferState(device_data, srcBuffer);
7090 auto dst_image_state = GetImageState(device_data, dstImage);
7091 if (cb_node && src_buffer_state && dst_image_state) {
7092 skip = PreCallValidateCmdCopyBufferToImage(device_data, dstImageLayout, cb_node, src_buffer_state, dst_image_state,
7093 regionCount, pRegions, "vkCmdCopyBufferToImage()");
7097 // TODO: report VU01244 here, or put in object tracker?
7100 PreCallRecordCmdCopyBufferToImage(device_data, cb_node, src_buffer_state, dst_image_state, regionCount, pRegions,
7103 device_data->dispatch_table.CmdCopyBufferToImage(commandBuffer, srcBuffer, dstImage, dstImageLayout, regionCount, pRegions);
7107 VKAPI_ATTR void VKAPI_CALL CmdCopyImageToBuffer(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
7108 VkBuffer dstBuffer, uint32_t regionCount, const VkBufferImageCopy *pRegions) {
7110 layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
7111 unique_lock_t lock(global_lock);
7113 auto cb_node = GetCBNode(device_data, commandBuffer);
7114 auto src_image_state = GetImageState(device_data, srcImage);
7115 auto dst_buffer_state = GetBufferState(device_data, dstBuffer);
7116 if (cb_node && src_image_state && dst_buffer_state) {
7117 skip = PreCallValidateCmdCopyImageToBuffer(device_data, srcImageLayout, cb_node, src_image_state, dst_buffer_state,
7118 regionCount, pRegions, "vkCmdCopyImageToBuffer()");
7122 // TODO: report VU01262 here, or put in object tracker?
7125 PreCallRecordCmdCopyImageToBuffer(device_data, cb_node, src_image_state, dst_buffer_state, regionCount, pRegions,
7128 device_data->dispatch_table.CmdCopyImageToBuffer(commandBuffer, srcImage, srcImageLayout, dstBuffer, regionCount, pRegions);
7132 static bool PreCallCmdUpdateBuffer(layer_data *device_data, const GLOBAL_CB_NODE *cb_state, const BUFFER_STATE *dst_buffer_state) {
7134 skip |= ValidateMemoryIsBoundToBuffer(device_data, dst_buffer_state, "vkCmdUpdateBuffer()", VALIDATION_ERROR_1e400046);
7135 // Validate that DST buffer has correct usage flags set
7136 skip |= ValidateBufferUsageFlags(device_data, dst_buffer_state, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true,
7137 VALIDATION_ERROR_1e400044, "vkCmdUpdateBuffer()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
7138 skip |= ValidateCmdQueueFlags(device_data, cb_state, "vkCmdUpdateBuffer()",
7139 VK_QUEUE_TRANSFER_BIT | VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT, VALIDATION_ERROR_1e402415);
7140 skip |= ValidateCmd(device_data, cb_state, CMD_UPDATEBUFFER, "vkCmdUpdateBuffer()");
7141 skip |= insideRenderPass(device_data, cb_state, "vkCmdUpdateBuffer()", VALIDATION_ERROR_1e400017);
7145 static void PostCallRecordCmdUpdateBuffer(layer_data *device_data, GLOBAL_CB_NODE *cb_state, BUFFER_STATE *dst_buffer_state) {
7146 // Update bindings between buffer and cmd buffer
7147 AddCommandBufferBindingBuffer(device_data, cb_state, dst_buffer_state);
7150 VKAPI_ATTR void VKAPI_CALL CmdUpdateBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset,
7151 VkDeviceSize dataSize, const uint32_t *pData) {
7153 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
7154 unique_lock_t lock(global_lock);
7156 auto cb_state = GetCBNode(dev_data, commandBuffer);
7158 auto dst_buff_state = GetBufferState(dev_data, dstBuffer);
7159 assert(dst_buff_state);
7160 skip |= PreCallCmdUpdateBuffer(dev_data, cb_state, dst_buff_state);
7163 dev_data->dispatch_table.CmdUpdateBuffer(commandBuffer, dstBuffer, dstOffset, dataSize, pData);
7165 PostCallRecordCmdUpdateBuffer(dev_data, cb_state, dst_buff_state);
7170 VKAPI_ATTR void VKAPI_CALL CmdFillBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset,
7171 VkDeviceSize size, uint32_t data) {
7172 layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
7173 unique_lock_t lock(global_lock);
7174 auto cb_node = GetCBNode(device_data, commandBuffer);
7175 auto buffer_state = GetBufferState(device_data, dstBuffer);
7177 if (cb_node && buffer_state) {
7178 bool skip = PreCallValidateCmdFillBuffer(device_data, cb_node, buffer_state);
7180 PreCallRecordCmdFillBuffer(device_data, cb_node, buffer_state);
7182 device_data->dispatch_table.CmdFillBuffer(commandBuffer, dstBuffer, dstOffset, size, data);
7190 VKAPI_ATTR void VKAPI_CALL CmdClearAttachments(VkCommandBuffer commandBuffer, uint32_t attachmentCount,
7191 const VkClearAttachment *pAttachments, uint32_t rectCount,
7192 const VkClearRect *pRects) {
7194 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
7196 lock_guard_t lock(global_lock);
7197 skip = PreCallValidateCmdClearAttachments(dev_data, commandBuffer, attachmentCount, pAttachments, rectCount, pRects);
7199 if (!skip) dev_data->dispatch_table.CmdClearAttachments(commandBuffer, attachmentCount, pAttachments, rectCount, pRects);
7202 VKAPI_ATTR void VKAPI_CALL CmdClearColorImage(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout,
7203 const VkClearColorValue *pColor, uint32_t rangeCount,
7204 const VkImageSubresourceRange *pRanges) {
7205 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
7206 unique_lock_t lock(global_lock);
7208 bool skip = PreCallValidateCmdClearColorImage(dev_data, commandBuffer, image, imageLayout, rangeCount, pRanges);
7210 PreCallRecordCmdClearImage(dev_data, commandBuffer, image, imageLayout, rangeCount, pRanges);
7212 dev_data->dispatch_table.CmdClearColorImage(commandBuffer, image, imageLayout, pColor, rangeCount, pRanges);
7216 VKAPI_ATTR void VKAPI_CALL CmdClearDepthStencilImage(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout,
7217 const VkClearDepthStencilValue *pDepthStencil, uint32_t rangeCount,
7218 const VkImageSubresourceRange *pRanges) {
7219 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
7220 unique_lock_t lock(global_lock);
7222 bool skip = PreCallValidateCmdClearDepthStencilImage(dev_data, commandBuffer, image, imageLayout, rangeCount, pRanges);
7224 PreCallRecordCmdClearImage(dev_data, commandBuffer, image, imageLayout, rangeCount, pRanges);
7226 dev_data->dispatch_table.CmdClearDepthStencilImage(commandBuffer, image, imageLayout, pDepthStencil, rangeCount, pRanges);
7230 VKAPI_ATTR void VKAPI_CALL CmdResolveImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
7231 VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
7232 const VkImageResolve *pRegions) {
7233 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
7234 unique_lock_t lock(global_lock);
7236 auto cb_node = GetCBNode(dev_data, commandBuffer);
7237 auto src_image_state = GetImageState(dev_data, srcImage);
7238 auto dst_image_state = GetImageState(dev_data, dstImage);
7240 bool skip = PreCallValidateCmdResolveImage(dev_data, cb_node, src_image_state, dst_image_state, regionCount, pRegions);
7243 PreCallRecordCmdResolveImage(dev_data, cb_node, src_image_state, dst_image_state);
7245 dev_data->dispatch_table.CmdResolveImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount,
7250 VKAPI_ATTR void VKAPI_CALL GetImageSubresourceLayout(VkDevice device, VkImage image, const VkImageSubresource *pSubresource,
7251 VkSubresourceLayout *pLayout) {
7252 layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
7253 unique_lock_t lock(global_lock);
7255 bool skip = PreCallValidateGetImageSubresourceLayout(device_data, image, pSubresource);
7258 device_data->dispatch_table.GetImageSubresourceLayout(device, image, pSubresource, pLayout);
7262 bool setEventStageMask(VkQueue queue, VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
7263 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
7264 GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
7266 pCB->eventToStageMap[event] = stageMask;
7268 auto queue_data = dev_data->queueMap.find(queue);
7269 if (queue_data != dev_data->queueMap.end()) {
7270 queue_data->second.eventToStageMap[event] = stageMask;
7275 VKAPI_ATTR void VKAPI_CALL CmdSetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
7277 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
7278 unique_lock_t lock(global_lock);
7279 GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
7281 skip |= ValidateCmdQueueFlags(dev_data, pCB, "vkCmdSetEvent()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
7282 VALIDATION_ERROR_1d402415);
7283 skip |= ValidateCmd(dev_data, pCB, CMD_SETEVENT, "vkCmdSetEvent()");
7284 skip |= insideRenderPass(dev_data, pCB, "vkCmdSetEvent()", VALIDATION_ERROR_1d400017);
7285 skip |= ValidateStageMaskGsTsEnables(dev_data, stageMask, "vkCmdSetEvent()", VALIDATION_ERROR_1d4008fc,
7286 VALIDATION_ERROR_1d4008fe);
7287 auto event_state = GetEventNode(dev_data, event);
7289 addCommandBufferBinding(&event_state->cb_bindings, {HandleToUint64(event), kVulkanObjectTypeEvent}, pCB);
7290 event_state->cb_bindings.insert(pCB);
7292 pCB->events.push_back(event);
7293 if (!pCB->waitedEvents.count(event)) {
7294 pCB->writeEventsBeforeWait.push_back(event);
7296 pCB->eventUpdates.emplace_back([=](VkQueue q) { return setEventStageMask(q, commandBuffer, event, stageMask); });
7299 if (!skip) dev_data->dispatch_table.CmdSetEvent(commandBuffer, event, stageMask);
7302 VKAPI_ATTR void VKAPI_CALL CmdResetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
7304 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
7305 unique_lock_t lock(global_lock);
7306 GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
7308 skip |= ValidateCmdQueueFlags(dev_data, pCB, "vkCmdResetEvent()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
7309 VALIDATION_ERROR_1c402415);
7310 skip |= ValidateCmd(dev_data, pCB, CMD_RESETEVENT, "vkCmdResetEvent()");
7311 skip |= insideRenderPass(dev_data, pCB, "vkCmdResetEvent()", VALIDATION_ERROR_1c400017);
7312 skip |= ValidateStageMaskGsTsEnables(dev_data, stageMask, "vkCmdResetEvent()", VALIDATION_ERROR_1c400904,
7313 VALIDATION_ERROR_1c400906);
7314 auto event_state = GetEventNode(dev_data, event);
7316 addCommandBufferBinding(&event_state->cb_bindings, {HandleToUint64(event), kVulkanObjectTypeEvent}, pCB);
7317 event_state->cb_bindings.insert(pCB);
7319 pCB->events.push_back(event);
7320 if (!pCB->waitedEvents.count(event)) {
7321 pCB->writeEventsBeforeWait.push_back(event);
7323 // TODO : Add check for VALIDATION_ERROR_32c008f8
7324 pCB->eventUpdates.emplace_back(
7325 [=](VkQueue q) { return setEventStageMask(q, commandBuffer, event, VkPipelineStageFlags(0)); });
7328 if (!skip) dev_data->dispatch_table.CmdResetEvent(commandBuffer, event, stageMask);
7331 // Return input pipeline stage flags, expanded for individual bits if VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT
7332 static VkPipelineStageFlags ExpandPipelineStageFlags(VkPipelineStageFlags inflags) {
7333 return (inflags != VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT)
7335 : (VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT | VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT | VK_PIPELINE_STAGE_VERTEX_INPUT_BIT |
7336 VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT |
7337 VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT | VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT |
7338 VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT |
7339 VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT | VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT |
7340 VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT);
7343 // Verify image barrier image state and that the image is consistent with FB image
7344 static bool ValidateImageBarrierImage(layer_data *device_data, const char *funcName, GLOBAL_CB_NODE const *cb_state,
7345 VkFramebuffer framebuffer, uint32_t active_subpass, const safe_VkSubpassDescription &sub_desc,
7346 uint64_t rp_handle, uint32_t img_index, const VkImageMemoryBarrier &img_barrier) {
7348 const auto &fb_state = GetFramebufferState(device_data, framebuffer);
7350 const auto img_bar_image = img_barrier.image;
7351 bool image_match = false;
7352 bool sub_image_found = false; // Do we find a corresponding subpass description
7353 VkImageLayout sub_image_layout = VK_IMAGE_LAYOUT_UNDEFINED;
7354 uint32_t attach_index = 0;
7355 uint32_t index_count = 0;
7356 // Verify that a framebuffer image matches barrier image
7357 for (const auto &fb_attach : fb_state->attachments) {
7358 if (img_bar_image == fb_attach.image) {
7360 attach_index = index_count;
7365 if (image_match) { // Make sure subpass is referring to matching attachment
7366 if (sub_desc.pDepthStencilAttachment && sub_desc.pDepthStencilAttachment->attachment == attach_index) {
7367 sub_image_layout = sub_desc.pDepthStencilAttachment->layout;
7368 sub_image_found = true;
7370 for (uint32_t j = 0; j < sub_desc.colorAttachmentCount; ++j) {
7371 if (sub_desc.pColorAttachments && sub_desc.pColorAttachments[j].attachment == attach_index) {
7372 sub_image_layout = sub_desc.pColorAttachments[j].layout;
7373 sub_image_found = true;
7375 } else if (sub_desc.pResolveAttachments && sub_desc.pResolveAttachments[j].attachment == attach_index) {
7376 sub_image_layout = sub_desc.pResolveAttachments[j].layout;
7377 sub_image_found = true;
7382 if (!sub_image_found) {
7384 device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT, rp_handle,
7385 VALIDATION_ERROR_1b800936,
7386 "%s: Barrier pImageMemoryBarriers[%d].image (0x%" PRIx64
7387 ") is not referenced by the VkSubpassDescription for active subpass (%d) of current renderPass (0x%" PRIx64 ").",
7388 funcName, img_index, HandleToUint64(img_bar_image), active_subpass, rp_handle);
7390 } else { // !image_match
7391 auto const fb_handle = HandleToUint64(fb_state->framebuffer);
7392 skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT,
7393 fb_handle, VALIDATION_ERROR_1b800936,
7394 "%s: Barrier pImageMemoryBarriers[%d].image (0x%" PRIx64
7395 ") does not match an image from the current framebuffer (0x%" PRIx64 ").",
7396 funcName, img_index, HandleToUint64(img_bar_image), fb_handle);
7398 if (img_barrier.oldLayout != img_barrier.newLayout) {
7399 skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7400 HandleToUint64(cb_state->commandBuffer), VALIDATION_ERROR_1b80093a,
7401 "%s: As the Image Barrier for image 0x%" PRIx64
7402 " is being executed within a render pass instance, oldLayout must equal newLayout yet they are %s and %s.",
7403 funcName, HandleToUint64(img_barrier.image), string_VkImageLayout(img_barrier.oldLayout),
7404 string_VkImageLayout(img_barrier.newLayout));
7406 if (sub_image_found && sub_image_layout != img_barrier.oldLayout) {
7407 skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
7408 rp_handle, VALIDATION_ERROR_1b800938,
7409 "%s: Barrier pImageMemoryBarriers[%d].image (0x%" PRIx64
7410 ") is referenced by the VkSubpassDescription for active subpass (%d) of current renderPass (0x%" PRIx64
7411 ") as having layout %s, but image barrier has layout %s.",
7412 funcName, img_index, HandleToUint64(img_bar_image), active_subpass, rp_handle,
7413 string_VkImageLayout(img_barrier.oldLayout), string_VkImageLayout(sub_image_layout));
7419 // Validate image barriers within a renderPass
7420 static bool ValidateRenderPassImageBarriers(layer_data *device_data, const char *funcName, GLOBAL_CB_NODE *cb_state,
7421 uint32_t active_subpass, const safe_VkSubpassDescription &sub_desc, uint64_t rp_handle,
7422 VkAccessFlags sub_src_access_mask, VkAccessFlags sub_dst_access_mask,
7423 uint32_t image_mem_barrier_count, const VkImageMemoryBarrier *image_barriers) {
7425 for (uint32_t i = 0; i < image_mem_barrier_count; ++i) {
7426 const auto &img_barrier = image_barriers[i];
7427 const auto &img_src_access_mask = img_barrier.srcAccessMask;
7428 if (img_src_access_mask != (sub_src_access_mask & img_src_access_mask)) {
7429 skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
7430 rp_handle, VALIDATION_ERROR_1b80092e,
7431 "%s: Barrier pImageMemoryBarriers[%d].srcAccessMask(0x%X) is not a subset of VkSubpassDependency "
7432 "srcAccessMask(0x%X) of subpass %d of renderPass 0x%" PRIx64 ".",
7433 funcName, i, img_src_access_mask, sub_src_access_mask, active_subpass, rp_handle);
7435 const auto &img_dst_access_mask = img_barrier.dstAccessMask;
7436 if (img_dst_access_mask != (sub_dst_access_mask & img_dst_access_mask)) {
7437 skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
7438 rp_handle, VALIDATION_ERROR_1b800930,
7439 "%s: Barrier pImageMemoryBarriers[%d].dstAccessMask(0x%X) is not a subset of VkSubpassDependency "
7440 "dstAccessMask(0x%X) of subpass %d of renderPass 0x%" PRIx64 ".",
7441 funcName, i, img_dst_access_mask, sub_dst_access_mask, active_subpass, rp_handle);
7443 if (VK_QUEUE_FAMILY_IGNORED != img_barrier.srcQueueFamilyIndex ||
7444 VK_QUEUE_FAMILY_IGNORED != img_barrier.dstQueueFamilyIndex) {
7445 skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
7446 rp_handle, VALIDATION_ERROR_1b80093c,
7447 "%s: Barrier pImageMemoryBarriers[%d].srcQueueFamilyIndex is %d and "
7448 "pImageMemoryBarriers[%d].dstQueueFamilyIndex is %d but both must be VK_QUEUE_FAMILY_IGNORED.",
7449 funcName, i, img_barrier.srcQueueFamilyIndex, i, img_barrier.dstQueueFamilyIndex);
7451 // Secondary CBs can have null framebuffer so queue up validation in that case 'til FB is known
7452 if (VK_NULL_HANDLE == cb_state->activeFramebuffer) {
7453 assert(VK_COMMAND_BUFFER_LEVEL_SECONDARY == cb_state->createInfo.level);
7454 // Secondary CB case w/o FB specified delay validation
7455 cb_state->cmd_execute_commands_functions.emplace_back([=](GLOBAL_CB_NODE *primary_cb, VkFramebuffer fb) {
7456 return ValidateImageBarrierImage(device_data, funcName, cb_state, fb, active_subpass, sub_desc, rp_handle, i,
7460 skip |= ValidateImageBarrierImage(device_data, funcName, cb_state, cb_state->activeFramebuffer, active_subpass,
7461 sub_desc, rp_handle, i, img_barrier);
7467 // Validate VUs for Pipeline Barriers that are within a renderPass
7468 // Pre: cb_state->activeRenderPass must be a pointer to valid renderPass state
7469 static bool ValidateRenderPassPipelineBarriers(layer_data *device_data, const char *funcName, GLOBAL_CB_NODE *cb_state,
7470 VkPipelineStageFlags src_stage_mask, VkPipelineStageFlags dst_stage_mask,
7471 VkDependencyFlags dependency_flags, uint32_t mem_barrier_count,
7472 const VkMemoryBarrier *mem_barriers, uint32_t buffer_mem_barrier_count,
7473 const VkBufferMemoryBarrier *buffer_mem_barriers, uint32_t image_mem_barrier_count,
7474 const VkImageMemoryBarrier *image_barriers) {
7476 auto rp_state = cb_state->activeRenderPass;
7477 const auto active_subpass = cb_state->activeSubpass;
7478 auto rp_handle = HandleToUint64(rp_state->renderPass);
7479 if (!rp_state->hasSelfDependency[active_subpass]) {
7481 log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT, rp_handle,
7482 VALIDATION_ERROR_1b800928,
7483 "%s: Barriers cannot be set during subpass %d of renderPass 0x%" PRIx64 " with no self-dependency specified.",
7484 funcName, active_subpass, rp_handle);
7486 assert(rp_state->subpass_to_dependency_index[cb_state->activeSubpass] != -1);
7487 // Grab ref to current subpassDescription up-front for use below
7488 const auto &sub_desc = rp_state->createInfo.pSubpasses[active_subpass];
7489 const auto &sub_dep = rp_state->createInfo.pDependencies[rp_state->subpass_to_dependency_index[active_subpass]];
7490 const auto &sub_src_stage_mask = ExpandPipelineStageFlags(sub_dep.srcStageMask);
7491 const auto &sub_dst_stage_mask = ExpandPipelineStageFlags(sub_dep.dstStageMask);
7492 if ((sub_src_stage_mask != VK_PIPELINE_STAGE_ALL_COMMANDS_BIT) &&
7493 (src_stage_mask != (sub_src_stage_mask & src_stage_mask))) {
7494 skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
7495 rp_handle, VALIDATION_ERROR_1b80092a,
7496 "%s: Barrier srcStageMask(0x%X) is not a subset of VkSubpassDependency srcStageMask(0x%X) of subpass "
7497 "%d of renderPass 0x%" PRIx64 ".",
7498 funcName, src_stage_mask, sub_src_stage_mask, active_subpass, rp_handle);
7500 if ((sub_dst_stage_mask != VK_PIPELINE_STAGE_ALL_COMMANDS_BIT) &&
7501 (dst_stage_mask != (sub_dst_stage_mask & dst_stage_mask))) {
7502 skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
7503 rp_handle, VALIDATION_ERROR_1b80092c,
7504 "%s: Barrier dstStageMask(0x%X) is not a subset of VkSubpassDependency dstStageMask(0x%X) of subpass "
7505 "%d of renderPass 0x%" PRIx64 ".",
7506 funcName, dst_stage_mask, sub_dst_stage_mask, active_subpass, rp_handle);
7508 if (0 != buffer_mem_barrier_count) {
7509 skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
7510 rp_handle, VALIDATION_ERROR_1b800934,
7511 "%s: bufferMemoryBarrierCount is non-zero (%d) for subpass %d of renderPass 0x%" PRIx64 ".", funcName,
7512 buffer_mem_barrier_count, active_subpass, rp_handle);
7514 const auto &sub_src_access_mask = sub_dep.srcAccessMask;
7515 const auto &sub_dst_access_mask = sub_dep.dstAccessMask;
7516 for (uint32_t i = 0; i < mem_barrier_count; ++i) {
7517 const auto &mb_src_access_mask = mem_barriers[i].srcAccessMask;
7518 if (mb_src_access_mask != (sub_src_access_mask & mb_src_access_mask)) {
7519 skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7520 VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT, rp_handle, VALIDATION_ERROR_1b80092e,
7521 "%s: Barrier pMemoryBarriers[%d].srcAccessMask(0x%X) is not a subset of VkSubpassDependency "
7522 "srcAccessMask(0x%X) of subpass %d of renderPass 0x%" PRIx64 ".",
7523 funcName, i, mb_src_access_mask, sub_src_access_mask, active_subpass, rp_handle);
7525 const auto &mb_dst_access_mask = mem_barriers[i].dstAccessMask;
7526 if (mb_dst_access_mask != (sub_dst_access_mask & mb_dst_access_mask)) {
7527 skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7528 VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT, rp_handle, VALIDATION_ERROR_1b800930,
7529 "%s: Barrier pMemoryBarriers[%d].dstAccessMask(0x%X) is not a subset of VkSubpassDependency "
7530 "dstAccessMask(0x%X) of subpass %d of renderPass 0x%" PRIx64 ".",
7531 funcName, i, mb_dst_access_mask, sub_dst_access_mask, active_subpass, rp_handle);
7534 skip |= ValidateRenderPassImageBarriers(device_data, funcName, cb_state, active_subpass, sub_desc, rp_handle,
7535 sub_src_access_mask, sub_dst_access_mask, image_mem_barrier_count, image_barriers);
7536 if (sub_dep.dependencyFlags != dependency_flags) {
7537 skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
7538 rp_handle, VALIDATION_ERROR_1b800932,
7539 "%s: dependencyFlags param (0x%X) does not equal VkSubpassDependency dependencyFlags value (0x%X) for "
7540 "subpass %d of renderPass 0x%" PRIx64 ".",
7541 funcName, dependency_flags, sub_dep.dependencyFlags, cb_state->activeSubpass, rp_handle);
7547 // Array to mask individual accessMask to corresponding stageMask
7548 // accessMask active bit position (0-31) maps to index
7549 const static VkPipelineStageFlags AccessMaskToPipeStage[20] = {
7550 // VK_ACCESS_INDIRECT_COMMAND_READ_BIT = 0
7551 VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT,
7552 // VK_ACCESS_INDEX_READ_BIT = 1
7553 VK_PIPELINE_STAGE_VERTEX_INPUT_BIT,
7554 // VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT = 2
7555 VK_PIPELINE_STAGE_VERTEX_INPUT_BIT,
7556 // VK_ACCESS_UNIFORM_READ_BIT = 3
7557 VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT |
7558 VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT | VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT |
7559 VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
7560 // VK_ACCESS_INPUT_ATTACHMENT_READ_BIT = 4
7561 VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
7562 // VK_ACCESS_SHADER_READ_BIT = 5
7563 VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT |
7564 VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT | VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT |
7565 VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
7566 // VK_ACCESS_SHADER_WRITE_BIT = 6
7567 VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT |
7568 VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT | VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT |
7569 VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
7570 // VK_ACCESS_COLOR_ATTACHMENT_READ_BIT = 7
7571 VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
7572 // VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT = 8
7573 VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
7574 // VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT = 9
7575 VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT | VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT,
7576 // VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT = 10
7577 VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT | VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT,
7578 // VK_ACCESS_TRANSFER_READ_BIT = 11
7579 VK_PIPELINE_STAGE_TRANSFER_BIT,
7580 // VK_ACCESS_TRANSFER_WRITE_BIT = 12
7581 VK_PIPELINE_STAGE_TRANSFER_BIT,
7582 // VK_ACCESS_HOST_READ_BIT = 13
7583 VK_PIPELINE_STAGE_HOST_BIT,
7584 // VK_ACCESS_HOST_WRITE_BIT = 14
7585 VK_PIPELINE_STAGE_HOST_BIT,
7586 // VK_ACCESS_MEMORY_READ_BIT = 15
7587 VK_ACCESS_FLAG_BITS_MAX_ENUM, // Always match
7588 // VK_ACCESS_MEMORY_WRITE_BIT = 16
7589 VK_ACCESS_FLAG_BITS_MAX_ENUM, // Always match
7590 // VK_ACCESS_COMMAND_PROCESS_READ_BIT_NVX = 17
7591 VK_PIPELINE_STAGE_COMMAND_PROCESS_BIT_NVX,
7592 // VK_ACCESS_COMMAND_PROCESS_WRITE_BIT_NVX = 18
7593 VK_PIPELINE_STAGE_COMMAND_PROCESS_BIT_NVX,
7596 // Verify that all bits of access_mask are supported by the src_stage_mask
7597 static bool ValidateAccessMaskPipelineStage(VkAccessFlags access_mask, VkPipelineStageFlags stage_mask) {
7598 // Early out if all commands set, or access_mask NULL
7599 if ((stage_mask & VK_PIPELINE_STAGE_ALL_COMMANDS_BIT) || (0 == access_mask)) return true;
7601 stage_mask = ExpandPipelineStageFlags(stage_mask);
7603 // for each of the set bits in access_mask, make sure that supporting stage mask bit(s) are set
7604 while (access_mask) {
7605 index = (u_ffs(access_mask) - 1);
7607 // Must have "!= 0" compare to prevent warning from MSVC
7608 if ((AccessMaskToPipeStage[index] & stage_mask) == 0) return false; // early out
7609 access_mask &= ~(1 << index); // Mask off bit that's been checked
7614 namespace barrier_queue_families {
7616 kSrcOrDstMustBeIgnore,
7617 kSpecialOrIgnoreOnly,
7618 kSrcIgnoreRequiresDstIgnore,
7619 kDstValidOrSpecialIfNotIgnore,
7620 kSrcValidOrSpecialIfNotIgnore,
7621 kSrcAndDestMustBeIgnore,
7622 kBothIgnoreOrBothValid,
7623 kSubmitQueueMustMatchSrcOrDst
7625 static const char *vu_summary[] = {"Source or destination queue family must be ignored.",
7626 "Source or destination queue family must be special or ignored.",
7627 "Destination queue family must be ignored if source queue family is.",
7628 "Destination queue family must be valid, ignored, or special.",
7629 "Source queue family must be valid, ignored, or special.",
7630 "Source and destination queue family must both be ignored.",
7631 "Source and destination queue family must both be ignore or both valid.",
7632 "Source or destination queue family must match submit queue family, if not ignored."};
7634 static const UNIQUE_VALIDATION_ERROR_CODE image_error_codes[] = {
7635 VALIDATION_ERROR_0a000aca, // VUID-VkImageMemoryBarrier-image-01381 -- kSrcOrDstMustBeIgnore
7636 VALIDATION_ERROR_0a000dcc, // VUID-VkImageMemoryBarrier-image-01766 -- kSpecialOrIgnoreOnly
7637 VALIDATION_ERROR_0a000962, // VUID-VkImageMemoryBarrier-image-01201 -- kSrcIgnoreRequiresDstIgnore
7638 VALIDATION_ERROR_0a000dd0, // VUID-VkImageMemoryBarrier-image-01768 -- kDstValidOrSpecialIfNotIgnore
7639 VALIDATION_ERROR_0a000dce, // VUID-VkImageMemoryBarrier-image-01767 -- kSrcValidOrSpecialIfNotIgnore
7640 VALIDATION_ERROR_0a00095e, // VUID-VkImageMemoryBarrier-image-01199 -- kSrcAndDestMustBeIgnore
7641 VALIDATION_ERROR_0a000960, // VUID-VkImageMemoryBarrier-image-01200 -- kBothIgnoreOrBothValid
7642 VALIDATION_ERROR_0a00096a, // VUID-VkImageMemoryBarrier-image-01205 -- kSubmitQueueMustMatchSrcOrDst
7645 static const UNIQUE_VALIDATION_ERROR_CODE buffer_error_codes[] = {
7646 VALIDATION_ERROR_0180094e, // VUID-VkBufferMemoryBarrier-buffer-01191 -- kSrcOrDstMustBeIgnore
7647 VALIDATION_ERROR_01800dc6, // VUID-VkBufferMemoryBarrier-buffer-01763 -- kSpecialOrIgnoreOnly
7648 VALIDATION_ERROR_01800952, // VUID-VkBufferMemoryBarrier-buffer-01193 -- kSrcIgnoreRequiresDstIgnore
7649 VALIDATION_ERROR_01800dca, // VUID-VkBufferMemoryBarrier-buffer-01765 -- kDstValidOrSpecialIfNotIgnore
7650 VALIDATION_ERROR_01800dc8, // VUID-VkBufferMemoryBarrier-buffer-01764 -- kSrcValidOrSpecialIfNotIgnore
7651 VALIDATION_ERROR_0180094c, // VUID-VkBufferMemoryBarrier-buffer-01190 -- kSrcAndDestMustBeIgnore
7652 VALIDATION_ERROR_01800950, // VUID-VkBufferMemoryBarrier-buffer-01192 -- kBothIgnoreOrBothValid
7653 VALIDATION_ERROR_01800958, // VUID-VkBufferMemoryBarrier-buffer-01196 -- kSubmitQueueMustMatchSrcOrDst
7656 class ValidatorState {
7658 ValidatorState(const layer_data *device_data, const char *func_name, const GLOBAL_CB_NODE *cb_state,
7659 const uint64_t barrier_handle64, const VkSharingMode sharing_mode, const VulkanObjectType object_type,
7660 const UNIQUE_VALIDATION_ERROR_CODE *val_codes)
7661 : report_data_(device_data->report_data),
7662 func_name_(func_name),
7663 cb_handle64_(HandleToUint64(cb_state->commandBuffer)),
7664 barrier_handle64_(barrier_handle64),
7665 sharing_mode_(sharing_mode),
7666 object_type_(object_type),
7667 val_codes_(val_codes),
7668 limit_(static_cast<uint32_t>(device_data->phys_dev_properties.queue_family_properties.size())),
7669 mem_ext_(device_data->extensions.vk_khr_external_memory) {}
7671 // Create a validator state from an image state... reducing the image specific to the generic version.
7672 ValidatorState(const layer_data *device_data, const char *func_name, const GLOBAL_CB_NODE *cb_state,
7673 const VkImageMemoryBarrier *barrier, const IMAGE_STATE *state)
7674 : ValidatorState(device_data, func_name, cb_state, HandleToUint64(barrier->image), state->createInfo.sharingMode,
7675 kVulkanObjectTypeImage, image_error_codes) {}
7677 // Create a validator state from an buffer state... reducing the buffer specific to the generic version.
7678 ValidatorState(const layer_data *device_data, const char *func_name, const GLOBAL_CB_NODE *cb_state,
7679 const VkBufferMemoryBarrier *barrier, const BUFFER_STATE *state)
7680 : ValidatorState(device_data, func_name, cb_state, HandleToUint64(barrier->buffer), state->createInfo.sharingMode,
7681 kVulkanObjectTypeImage, buffer_error_codes) {}
7683 // Log the messages using boilerplate from object state, and Vu specific information from the template arg
7684 // One and two family versions, in the single family version, Vu holds the name of the passed parameter
7685 bool LogMsg(VuIndex vu_index, uint32_t family, const char *param_name) const {
7686 const UNIQUE_VALIDATION_ERROR_CODE val_code = val_codes_[vu_index];
7687 const char *annotation = GetFamilyAnnotation(family);
7688 return log_msg(report_data_, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, cb_handle64_,
7689 val_code, "%s: Barrier using %s 0x%" PRIx64 " created with sharingMode %s, has %s %u%s. %s", func_name_,
7690 GetTypeString(), barrier_handle64_, GetModeString(), param_name, family, annotation, vu_summary[vu_index]);
7693 bool LogMsg(VuIndex vu_index, uint32_t src_family, uint32_t dst_family) const {
7694 const UNIQUE_VALIDATION_ERROR_CODE val_code = val_codes_[vu_index];
7695 const char *src_annotation = GetFamilyAnnotation(src_family);
7696 const char *dst_annotation = GetFamilyAnnotation(dst_family);
7697 return log_msg(report_data_, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, cb_handle64_,
7699 "%s: Barrier using %s 0x%" PRIx64
7700 " created with sharingMode %s, has srcQueueFamilyIndex %u%s and dstQueueFamilyIndex %u%s. %s",
7701 func_name_, GetTypeString(), barrier_handle64_, GetModeString(), src_family, src_annotation, dst_family,
7702 dst_annotation, vu_summary[vu_index]);
7705 // This abstract Vu can only be tested at submit time, thus we need a callback from the closure containing the needed
7706 // data. Note that the mem_barrier is copied to the closure as the lambda lifespan exceed the guarantees of validity for
7707 // application input.
7708 static bool ValidateAtQueueSubmit(const VkQueue queue, const layer_data *device_data, uint32_t src_family, uint32_t dst_family,
7709 const ValidatorState &val) {
7710 auto queue_data_it = device_data->queueMap.find(queue);
7711 if (queue_data_it == device_data->queueMap.end()) return false;
7713 uint32_t queue_family = queue_data_it->second.queueFamilyIndex;
7714 if ((src_family != queue_family) && (dst_family != queue_family)) {
7715 const UNIQUE_VALIDATION_ERROR_CODE val_code = val.val_codes_[kSubmitQueueMustMatchSrcOrDst];
7716 const char *src_annotation = val.GetFamilyAnnotation(src_family);
7717 const char *dst_annotation = val.GetFamilyAnnotation(dst_family);
7718 return log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT,
7719 HandleToUint64(queue), val_code,
7720 "%s: Barrier submitted to queue with family index %u, using %s 0x%" PRIx64
7721 " created with sharingMode %s, has srcQueueFamilyIndex %u%s and dstQueueFamilyIndex %u%s. %s",
7722 "vkQueueSubmit", queue_family, val.GetTypeString(), val.barrier_handle64_, val.GetModeString(),
7723 src_family, src_annotation, dst_family, dst_annotation, vu_summary[kSubmitQueueMustMatchSrcOrDst]);
7727 // Logical helpers for semantic clarity
7728 inline bool KhrExternalMem() const { return mem_ext_; }
7729 inline bool IsValid(uint32_t queue_family) const { return (queue_family < limit_); }
7730 inline bool IsSpecial(uint32_t queue_family) const {
7731 return (queue_family == VK_QUEUE_FAMILY_EXTERNAL_KHR) || (queue_family == VK_QUEUE_FAMILY_FOREIGN_EXT);
7733 inline bool IsValidOrSpecial(uint32_t queue_family) const {
7734 return IsValid(queue_family) || (mem_ext_ && IsSpecial(queue_family));
7736 inline bool IsIgnored(uint32_t queue_family) const { return queue_family == VK_QUEUE_FAMILY_IGNORED; }
7738 // Helpers for LogMsg (and log_msg)
7739 const char *GetModeString() const { return string_VkSharingMode(sharing_mode_); }
7741 // Descriptive text for the various types of queue family index
7742 const char *GetFamilyAnnotation(uint32_t family) const {
7743 const char *external = " (VK_QUEUE_FAMILY_EXTERNAL_KHR)";
7744 const char *foreign = " (VK_QUEUE_FAMILY_FOREIGN_EXT)";
7745 const char *ignored = " (VK_QUEUE_FAMILY_IGNORED)";
7746 const char *valid = " (VALID)";
7747 const char *invalid = " (INVALID)";
7749 case VK_QUEUE_FAMILY_EXTERNAL_KHR:
7751 case VK_QUEUE_FAMILY_FOREIGN_EXT:
7753 case VK_QUEUE_FAMILY_IGNORED:
7756 if (IsValid(family)) {
7762 const char *GetTypeString() const { return object_string[object_type_]; }
7763 VkSharingMode GetSharingMode() const { return sharing_mode_; }
7766 const debug_report_data *const report_data_;
7767 const char *const func_name_;
7768 const uint64_t cb_handle64_;
7769 const uint64_t barrier_handle64_;
7770 const VkSharingMode sharing_mode_;
7771 const VulkanObjectType object_type_;
7772 const UNIQUE_VALIDATION_ERROR_CODE *val_codes_;
7773 const uint32_t limit_;
7774 const bool mem_ext_;
7777 bool Validate(const layer_data *device_data, const char *func_name, GLOBAL_CB_NODE *cb_state, const ValidatorState &val,
7778 const uint32_t src_queue_family, const uint32_t dst_queue_family) {
7781 const bool mode_concurrent = val.GetSharingMode() == VK_SHARING_MODE_CONCURRENT;
7782 const bool src_ignored = val.IsIgnored(src_queue_family);
7783 const bool dst_ignored = val.IsIgnored(dst_queue_family);
7784 if (val.KhrExternalMem()) {
7785 if (mode_concurrent) {
7786 if (!(src_ignored || dst_ignored)) {
7787 skip |= val.LogMsg(kSrcOrDstMustBeIgnore, src_queue_family, dst_queue_family);
7789 if ((src_ignored && !(dst_ignored || val.IsSpecial(dst_queue_family))) ||
7790 (dst_ignored && !(src_ignored || val.IsSpecial(src_queue_family)))) {
7791 skip |= val.LogMsg(kSpecialOrIgnoreOnly, src_queue_family, dst_queue_family);
7794 // VK_SHARING_MODE_EXCLUSIVE
7795 if (src_ignored && !dst_ignored) {
7796 skip |= val.LogMsg(kSrcIgnoreRequiresDstIgnore, src_queue_family, dst_queue_family);
7798 if (!dst_ignored && !val.IsValidOrSpecial(dst_queue_family)) {
7799 skip |= val.LogMsg(kDstValidOrSpecialIfNotIgnore, dst_queue_family, "dstQueueFamilyIndex");
7801 if (!src_ignored && !val.IsValidOrSpecial(src_queue_family)) {
7802 skip |= val.LogMsg(kSrcValidOrSpecialIfNotIgnore, src_queue_family, "srcQueueFamilyIndex");
7806 // No memory extension
7807 if (mode_concurrent) {
7808 if (!src_ignored || !dst_ignored) {
7809 skip |= val.LogMsg(kSrcAndDestMustBeIgnore, src_queue_family, dst_queue_family);
7812 // VK_SHARING_MODE_EXCLUSIVE
7813 if (!((src_ignored && dst_ignored) || (val.IsValid(src_queue_family) && val.IsValid(dst_queue_family)))) {
7814 skip |= val.LogMsg(kBothIgnoreOrBothValid, src_queue_family, dst_queue_family);
7818 if (!mode_concurrent && !src_ignored && !dst_ignored) {
7819 // Only enqueue submit time check if it is needed. If more submit time checks are added, change the criteria
7820 // TODO create a better named list, or rename the submit time lists to something that matches the broader usage...
7821 // Note: if we want to create a semantic that separates state lookup, validation, and state update this should go
7822 // to a local queue of update_state_actions or something.
7823 cb_state->eventUpdates.emplace_back([device_data, src_queue_family, dst_queue_family, val](VkQueue queue) {
7824 return ValidatorState::ValidateAtQueueSubmit(queue, device_data, src_queue_family, dst_queue_family, val);
7829 } // namespace barrier_queue_families
7831 // Type specific wrapper for image barriers
7832 bool ValidateBarrierQueueFamilies(const layer_data *device_data, const char *func_name, GLOBAL_CB_NODE *cb_state,
7833 const VkImageMemoryBarrier *barrier, const IMAGE_STATE *state_data) {
7834 // State data is required
7839 // Create the validator state from the image state
7840 barrier_queue_families::ValidatorState val(device_data, func_name, cb_state, barrier, state_data);
7841 const uint32_t src_queue_family = barrier->srcQueueFamilyIndex;
7842 const uint32_t dst_queue_family = barrier->dstQueueFamilyIndex;
7843 return barrier_queue_families::Validate(device_data, func_name, cb_state, val, src_queue_family, dst_queue_family);
7846 // Type specific wrapper for buffer barriers
7847 bool ValidateBarrierQueueFamilies(const layer_data *device_data, const char *func_name, GLOBAL_CB_NODE *cb_state,
7848 const VkBufferMemoryBarrier *barrier, const BUFFER_STATE *state_data) {
7849 // State data is required
7854 // Create the validator state from the buffer state
7855 barrier_queue_families::ValidatorState val(device_data, func_name, cb_state, barrier, state_data);
7856 const uint32_t src_queue_family = barrier->srcQueueFamilyIndex;
7857 const uint32_t dst_queue_family = barrier->dstQueueFamilyIndex;
7858 return barrier_queue_families::Validate(device_data, func_name, cb_state, val, src_queue_family, dst_queue_family);
7861 static bool ValidateBarriers(layer_data *device_data, const char *funcName, GLOBAL_CB_NODE *cb_state,
7862 VkPipelineStageFlags src_stage_mask, VkPipelineStageFlags dst_stage_mask, uint32_t memBarrierCount,
7863 const VkMemoryBarrier *pMemBarriers, uint32_t bufferBarrierCount,
7864 const VkBufferMemoryBarrier *pBufferMemBarriers, uint32_t imageMemBarrierCount,
7865 const VkImageMemoryBarrier *pImageMemBarriers) {
7867 for (uint32_t i = 0; i < memBarrierCount; ++i) {
7868 const auto &mem_barrier = pMemBarriers[i];
7869 if (!ValidateAccessMaskPipelineStage(mem_barrier.srcAccessMask, src_stage_mask)) {
7870 skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7871 HandleToUint64(cb_state->commandBuffer), VALIDATION_ERROR_1b800940,
7872 "%s: pMemBarriers[%d].srcAccessMask (0x%X) is not supported by srcStageMask (0x%X).", funcName, i,
7873 mem_barrier.srcAccessMask, src_stage_mask);
7875 if (!ValidateAccessMaskPipelineStage(mem_barrier.dstAccessMask, dst_stage_mask)) {
7876 skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7877 HandleToUint64(cb_state->commandBuffer), VALIDATION_ERROR_1b800942,
7878 "%s: pMemBarriers[%d].dstAccessMask (0x%X) is not supported by dstStageMask (0x%X).", funcName, i,
7879 mem_barrier.dstAccessMask, dst_stage_mask);
7882 for (uint32_t i = 0; i < imageMemBarrierCount; ++i) {
7883 auto mem_barrier = &pImageMemBarriers[i];
7884 if (!ValidateAccessMaskPipelineStage(mem_barrier->srcAccessMask, src_stage_mask)) {
7885 skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7886 HandleToUint64(cb_state->commandBuffer), VALIDATION_ERROR_1b800940,
7887 "%s: pImageMemBarriers[%d].srcAccessMask (0x%X) is not supported by srcStageMask (0x%X).", funcName, i,
7888 mem_barrier->srcAccessMask, src_stage_mask);
7890 if (!ValidateAccessMaskPipelineStage(mem_barrier->dstAccessMask, dst_stage_mask)) {
7891 skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7892 HandleToUint64(cb_state->commandBuffer), VALIDATION_ERROR_1b800942,
7893 "%s: pImageMemBarriers[%d].dstAccessMask (0x%X) is not supported by dstStageMask (0x%X).", funcName, i,
7894 mem_barrier->dstAccessMask, dst_stage_mask);
7897 auto image_data = GetImageState(device_data, mem_barrier->image);
7898 skip |= ValidateBarrierQueueFamilies(device_data, funcName, cb_state, mem_barrier, image_data);
7900 if (mem_barrier->newLayout == VK_IMAGE_LAYOUT_UNDEFINED || mem_barrier->newLayout == VK_IMAGE_LAYOUT_PREINITIALIZED) {
7901 skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7902 HandleToUint64(cb_state->commandBuffer), VALIDATION_ERROR_0a00095c,
7903 "%s: Image Layout cannot be transitioned to UNDEFINED or PREINITIALIZED.", funcName);
7907 // There is no VUID for this, but there is blanket text:
7908 // "Non-sparse resources must be bound completely and contiguously to a single VkDeviceMemory object before
7909 // recording commands in a command buffer."
7910 // TODO: Update this when VUID is defined
7911 skip |= ValidateMemoryIsBoundToImage(device_data, image_data, funcName, VALIDATION_ERROR_UNDEFINED);
7913 auto aspect_mask = mem_barrier->subresourceRange.aspectMask;
7914 skip |= ValidateImageAspectMask(device_data, image_data->image, image_data->createInfo.format, aspect_mask, funcName);
7916 std::string param_name = "pImageMemoryBarriers[" + std::to_string(i) + "].subresourceRange";
7917 skip |= ValidateImageBarrierSubresourceRange(device_data, image_data, mem_barrier->subresourceRange, funcName,
7918 param_name.c_str());
7922 for (uint32_t i = 0; i < bufferBarrierCount; ++i) {
7923 auto mem_barrier = &pBufferMemBarriers[i];
7924 if (!mem_barrier) continue;
7926 if (!ValidateAccessMaskPipelineStage(mem_barrier->srcAccessMask, src_stage_mask)) {
7927 skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7928 HandleToUint64(cb_state->commandBuffer), VALIDATION_ERROR_1b800940,
7929 "%s: pBufferMemBarriers[%d].srcAccessMask (0x%X) is not supported by srcStageMask (0x%X).", funcName, i,
7930 mem_barrier->srcAccessMask, src_stage_mask);
7932 if (!ValidateAccessMaskPipelineStage(mem_barrier->dstAccessMask, dst_stage_mask)) {
7933 skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7934 HandleToUint64(cb_state->commandBuffer), VALIDATION_ERROR_1b800942,
7935 "%s: pBufferMemBarriers[%d].dstAccessMask (0x%X) is not supported by dstStageMask (0x%X).", funcName, i,
7936 mem_barrier->dstAccessMask, dst_stage_mask);
7938 // Validate buffer barrier queue family indices
7939 auto buffer_state = GetBufferState(device_data, mem_barrier->buffer);
7940 skip |= ValidateBarrierQueueFamilies(device_data, funcName, cb_state, mem_barrier, buffer_state);
7943 // There is no VUID for this, but there is blanket text:
7944 // "Non-sparse resources must be bound completely and contiguously to a single VkDeviceMemory object before
7945 // recording commands in a command buffer"
7946 // TODO: Update this when VUID is defined
7947 skip |= ValidateMemoryIsBoundToBuffer(device_data, buffer_state, funcName, VALIDATION_ERROR_UNDEFINED);
7949 auto buffer_size = buffer_state->createInfo.size;
7950 if (mem_barrier->offset >= buffer_size) {
7952 device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7953 HandleToUint64(cb_state->commandBuffer), VALIDATION_ERROR_01800946,
7954 "%s: Buffer Barrier 0x%" PRIx64 " has offset 0x%" PRIx64 " which is not less than total size 0x%" PRIx64 ".",
7955 funcName, HandleToUint64(mem_barrier->buffer), HandleToUint64(mem_barrier->offset),
7956 HandleToUint64(buffer_size));
7957 } else if (mem_barrier->size != VK_WHOLE_SIZE && (mem_barrier->offset + mem_barrier->size > buffer_size)) {
7959 log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7960 HandleToUint64(cb_state->commandBuffer), VALIDATION_ERROR_0180094a,
7961 "%s: Buffer Barrier 0x%" PRIx64 " has offset 0x%" PRIx64 " and size 0x%" PRIx64
7962 " whose sum is greater than total size 0x%" PRIx64 ".",
7963 funcName, HandleToUint64(mem_barrier->buffer), HandleToUint64(mem_barrier->offset),
7964 HandleToUint64(mem_barrier->size), HandleToUint64(buffer_size));
7971 bool validateEventStageMask(VkQueue queue, GLOBAL_CB_NODE *pCB, uint32_t eventCount, size_t firstEventIndex,
7972 VkPipelineStageFlags sourceStageMask) {
7974 VkPipelineStageFlags stageMask = 0;
7975 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(queue), layer_data_map);
7976 for (uint32_t i = 0; i < eventCount; ++i) {
7977 auto event = pCB->events[firstEventIndex + i];
7978 auto queue_data = dev_data->queueMap.find(queue);
7979 if (queue_data == dev_data->queueMap.end()) return false;
7980 auto event_data = queue_data->second.eventToStageMap.find(event);
7981 if (event_data != queue_data->second.eventToStageMap.end()) {
7982 stageMask |= event_data->second;
7984 auto global_event_data = GetEventNode(dev_data, event);
7985 if (!global_event_data) {
7986 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT,
7987 HandleToUint64(event), DRAWSTATE_INVALID_EVENT,
7988 "Event 0x%" PRIx64 " cannot be waited on if it has never been set.", HandleToUint64(event));
7990 stageMask |= global_event_data->stageMask;
7994 // TODO: Need to validate that host_bit is only set if set event is called
7995 // but set event can be called at any time.
7996 if (sourceStageMask != stageMask && sourceStageMask != (stageMask | VK_PIPELINE_STAGE_HOST_BIT)) {
7997 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7998 HandleToUint64(pCB->commandBuffer), VALIDATION_ERROR_1e62d401,
7999 "Submitting cmdbuffer with call to VkCmdWaitEvents using srcStageMask 0x%X which must be the bitwise OR of "
8000 "the stageMask parameters used in calls to vkCmdSetEvent and VK_PIPELINE_STAGE_HOST_BIT if used with "
8001 "vkSetEvent but instead is 0x%X.",
8002 sourceStageMask, stageMask);
8007 // Note that we only check bits that HAVE required queueflags -- don't care entries are skipped
8008 static std::unordered_map<VkPipelineStageFlags, VkQueueFlags> supported_pipeline_stages_table = {
8009 {VK_PIPELINE_STAGE_COMMAND_PROCESS_BIT_NVX, VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT},
8010 {VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT, VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT},
8011 {VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, VK_QUEUE_GRAPHICS_BIT},
8012 {VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, VK_QUEUE_GRAPHICS_BIT},
8013 {VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT, VK_QUEUE_GRAPHICS_BIT},
8014 {VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT, VK_QUEUE_GRAPHICS_BIT},
8015 {VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT, VK_QUEUE_GRAPHICS_BIT},
8016 {VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_QUEUE_GRAPHICS_BIT},
8017 {VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT, VK_QUEUE_GRAPHICS_BIT},
8018 {VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT, VK_QUEUE_GRAPHICS_BIT},
8019 {VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_QUEUE_GRAPHICS_BIT},
8020 {VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_QUEUE_COMPUTE_BIT},
8021 {VK_PIPELINE_STAGE_TRANSFER_BIT, VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT | VK_QUEUE_TRANSFER_BIT},
8022 {VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT, VK_QUEUE_GRAPHICS_BIT}};
8024 static const VkPipelineStageFlags stage_flag_bit_array[] = {VK_PIPELINE_STAGE_COMMAND_PROCESS_BIT_NVX,
8025 VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT,
8026 VK_PIPELINE_STAGE_VERTEX_INPUT_BIT,
8027 VK_PIPELINE_STAGE_VERTEX_SHADER_BIT,
8028 VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT,
8029 VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT,
8030 VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT,
8031 VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
8032 VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT,
8033 VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT,
8034 VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
8035 VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
8036 VK_PIPELINE_STAGE_TRANSFER_BIT,
8037 VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT};
8039 bool CheckStageMaskQueueCompatibility(layer_data *dev_data, VkCommandBuffer command_buffer, VkPipelineStageFlags stage_mask,
8040 VkQueueFlags queue_flags, const char *function, const char *src_or_dest,
8041 UNIQUE_VALIDATION_ERROR_CODE error_code) {
8043 // Lookup each bit in the stagemask and check for overlap between its table bits and queue_flags
8044 for (const auto &item : stage_flag_bit_array) {
8045 if (stage_mask & item) {
8046 if ((supported_pipeline_stages_table[item] & queue_flags) == 0) {
8047 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
8048 VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(command_buffer), error_code,
8049 "%s(): %s flag %s is not compatible with the queue family properties of this command buffer.",
8050 function, src_or_dest, string_VkPipelineStageFlagBits(static_cast<VkPipelineStageFlagBits>(item)));
8057 // Check if all barriers are of a given operation type.
8058 template <typename Barrier, typename OpCheck>
8059 static bool AllTransferOp(const COMMAND_POOL_NODE *pool, OpCheck &op_check, uint32_t count, const Barrier *barriers) {
8060 if (!pool) return false;
8062 for (uint32_t b = 0; b < count; b++) {
8063 if (!op_check(pool, barriers + b)) return false;
8068 enum BarrierOperationsType {
8069 kAllAcquire, // All Barrier operations are "ownership acquire" operations
8070 kAllRelease, // All Barrier operations are "ownership release" operations
8071 kGeneral, // Either no ownership operations or a mix of ownership operation types and/or non-ownership operations
8074 // Look at the barriers to see if we they are all release or all acquire, the result impacts queue properties validation
8075 BarrierOperationsType ComputeBarrierOperationsType(layer_data *device_data, GLOBAL_CB_NODE *cb_state, uint32_t buffer_barrier_count,
8076 const VkBufferMemoryBarrier *buffer_barriers, uint32_t image_barrier_count,
8077 const VkImageMemoryBarrier *image_barriers) {
8078 auto pool = GetCommandPoolNode(device_data, cb_state->createInfo.commandPool);
8079 BarrierOperationsType op_type = kGeneral;
8081 // Look at the barrier details only if they exist
8082 // Note: AllTransferOp returns true for count == 0
8083 if ((buffer_barrier_count + image_barrier_count) != 0) {
8084 if (AllTransferOp(pool, IsReleaseOp<VkBufferMemoryBarrier>, buffer_barrier_count, buffer_barriers) &&
8085 AllTransferOp(pool, IsReleaseOp<VkImageMemoryBarrier>, image_barrier_count, image_barriers)) {
8086 op_type = kAllRelease;
8087 } else if (AllTransferOp(pool, IsAcquireOp<VkBufferMemoryBarrier>, buffer_barrier_count, buffer_barriers) &&
8088 AllTransferOp(pool, IsAcquireOp<VkImageMemoryBarrier>, image_barrier_count, image_barriers)) {
8089 op_type = kAllAcquire;
8096 bool ValidateStageMasksAgainstQueueCapabilities(layer_data *dev_data, GLOBAL_CB_NODE const *cb_state,
8097 VkPipelineStageFlags source_stage_mask, VkPipelineStageFlags dest_stage_mask,
8098 BarrierOperationsType barrier_op_type, const char *function,
8099 UNIQUE_VALIDATION_ERROR_CODE error_code) {
8101 uint32_t queue_family_index = dev_data->commandPoolMap[cb_state->createInfo.commandPool].queueFamilyIndex;
8102 instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(dev_data->physical_device), instance_layer_data_map);
8103 auto physical_device_state = GetPhysicalDeviceState(instance_data, dev_data->physical_device);
8105 // Any pipeline stage included in srcStageMask or dstStageMask must be supported by the capabilities of the queue family
8106 // specified by the queueFamilyIndex member of the VkCommandPoolCreateInfo structure that was used to create the VkCommandPool
8107 // that commandBuffer was allocated from, as specified in the table of supported pipeline stages.
8109 if (queue_family_index < physical_device_state->queue_family_properties.size()) {
8110 VkQueueFlags specified_queue_flags = physical_device_state->queue_family_properties[queue_family_index].queueFlags;
8112 // Only check the source stage mask if any barriers aren't "acquire ownership"
8113 if ((barrier_op_type != kAllAcquire) && (source_stage_mask & VK_PIPELINE_STAGE_ALL_COMMANDS_BIT) == 0) {
8114 skip |= CheckStageMaskQueueCompatibility(dev_data, cb_state->commandBuffer, source_stage_mask, specified_queue_flags,
8115 function, "srcStageMask", error_code);
8117 // Only check the dest stage mask if any barriers aren't "release ownership"
8118 if ((barrier_op_type != kAllRelease) && (dest_stage_mask & VK_PIPELINE_STAGE_ALL_COMMANDS_BIT) == 0) {
8119 skip |= CheckStageMaskQueueCompatibility(dev_data, cb_state->commandBuffer, dest_stage_mask, specified_queue_flags,
8120 function, "dstStageMask", error_code);
8126 VKAPI_ATTR void VKAPI_CALL CmdWaitEvents(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent *pEvents,
8127 VkPipelineStageFlags sourceStageMask, VkPipelineStageFlags dstStageMask,
8128 uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
8129 uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers,
8130 uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) {
8132 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
8133 unique_lock_t lock(global_lock);
8134 GLOBAL_CB_NODE *cb_state = GetCBNode(dev_data, commandBuffer);
8136 auto barrier_op_type = ComputeBarrierOperationsType(dev_data, cb_state, bufferMemoryBarrierCount, pBufferMemoryBarriers,
8137 imageMemoryBarrierCount, pImageMemoryBarriers);
8138 skip |= ValidateStageMasksAgainstQueueCapabilities(dev_data, cb_state, sourceStageMask, dstStageMask, barrier_op_type,
8139 "vkCmdWaitEvents", VALIDATION_ERROR_1e600918);
8140 skip |= ValidateStageMaskGsTsEnables(dev_data, sourceStageMask, "vkCmdWaitEvents()", VALIDATION_ERROR_1e60090e,
8141 VALIDATION_ERROR_1e600912);
8142 skip |= ValidateStageMaskGsTsEnables(dev_data, dstStageMask, "vkCmdWaitEvents()", VALIDATION_ERROR_1e600910,
8143 VALIDATION_ERROR_1e600914);
8144 skip |= ValidateCmdQueueFlags(dev_data, cb_state, "vkCmdWaitEvents()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
8145 VALIDATION_ERROR_1e602415);
8146 skip |= ValidateCmd(dev_data, cb_state, CMD_WAITEVENTS, "vkCmdWaitEvents()");
8147 skip |= ValidateBarriersToImages(dev_data, cb_state, imageMemoryBarrierCount, pImageMemoryBarriers, "vkCmdWaitEvents()");
8148 skip |= ValidateBarriers(dev_data, "vkCmdWaitEvents()", cb_state, sourceStageMask, dstStageMask, memoryBarrierCount,
8149 pMemoryBarriers, bufferMemoryBarrierCount, pBufferMemoryBarriers, imageMemoryBarrierCount,
8150 pImageMemoryBarriers);
8152 auto first_event_index = cb_state->events.size();
8153 for (uint32_t i = 0; i < eventCount; ++i) {
8154 auto event_state = GetEventNode(dev_data, pEvents[i]);
8156 addCommandBufferBinding(&event_state->cb_bindings, {HandleToUint64(pEvents[i]), kVulkanObjectTypeEvent},
8158 event_state->cb_bindings.insert(cb_state);
8160 cb_state->waitedEvents.insert(pEvents[i]);
8161 cb_state->events.push_back(pEvents[i]);
8163 cb_state->eventUpdates.emplace_back(
8164 [=](VkQueue q) { return validateEventStageMask(q, cb_state, eventCount, first_event_index, sourceStageMask); });
8165 TransitionImageLayouts(dev_data, cb_state, imageMemoryBarrierCount, pImageMemoryBarriers);
8170 dev_data->dispatch_table.CmdWaitEvents(commandBuffer, eventCount, pEvents, sourceStageMask, dstStageMask,
8171 memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount, pBufferMemoryBarriers,
8172 imageMemoryBarrierCount, pImageMemoryBarriers);
8175 static bool PreCallValidateCmdPipelineBarrier(layer_data *device_data, GLOBAL_CB_NODE *cb_state, VkPipelineStageFlags srcStageMask,
8176 VkPipelineStageFlags dstStageMask, VkDependencyFlags dependencyFlags,
8177 uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
8178 uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers,
8179 uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) {
8181 auto barrier_op_type = ComputeBarrierOperationsType(device_data, cb_state, bufferMemoryBarrierCount, pBufferMemoryBarriers,
8182 imageMemoryBarrierCount, pImageMemoryBarriers);
8183 skip |= ValidateStageMasksAgainstQueueCapabilities(device_data, cb_state, srcStageMask, dstStageMask, barrier_op_type,
8184 "vkCmdPipelineBarrier", VALIDATION_ERROR_1b80093e);
8185 skip |= ValidateCmdQueueFlags(device_data, cb_state, "vkCmdPipelineBarrier()",
8186 VK_QUEUE_TRANSFER_BIT | VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT, VALIDATION_ERROR_1b802415);
8187 skip |= ValidateCmd(device_data, cb_state, CMD_PIPELINEBARRIER, "vkCmdPipelineBarrier()");
8188 skip |= ValidateStageMaskGsTsEnables(device_data, srcStageMask, "vkCmdPipelineBarrier()", VALIDATION_ERROR_1b800920,
8189 VALIDATION_ERROR_1b800924);
8190 skip |= ValidateStageMaskGsTsEnables(device_data, dstStageMask, "vkCmdPipelineBarrier()", VALIDATION_ERROR_1b800922,
8191 VALIDATION_ERROR_1b800926);
8192 if (cb_state->activeRenderPass) {
8193 skip |= ValidateRenderPassPipelineBarriers(device_data, "vkCmdPipelineBarrier()", cb_state, srcStageMask, dstStageMask,
8194 dependencyFlags, memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
8195 pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
8196 if (skip) return true; // Early return to avoid redundant errors from below calls
8199 ValidateBarriersToImages(device_data, cb_state, imageMemoryBarrierCount, pImageMemoryBarriers, "vkCmdPipelineBarrier()");
8200 skip |= ValidateBarriers(device_data, "vkCmdPipelineBarrier()", cb_state, srcStageMask, dstStageMask, memoryBarrierCount,
8201 pMemoryBarriers, bufferMemoryBarrierCount, pBufferMemoryBarriers, imageMemoryBarrierCount,
8202 pImageMemoryBarriers);
8206 static void PreCallRecordCmdPipelineBarrier(layer_data *device_data, GLOBAL_CB_NODE *cb_state, VkCommandBuffer commandBuffer,
8207 uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) {
8208 TransitionImageLayouts(device_data, cb_state, imageMemoryBarrierCount, pImageMemoryBarriers);
8211 VKAPI_ATTR void VKAPI_CALL CmdPipelineBarrier(VkCommandBuffer commandBuffer, VkPipelineStageFlags srcStageMask,
8212 VkPipelineStageFlags dstStageMask, VkDependencyFlags dependencyFlags,
8213 uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
8214 uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers,
8215 uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) {
8217 layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
8218 unique_lock_t lock(global_lock);
8219 GLOBAL_CB_NODE *cb_state = GetCBNode(device_data, commandBuffer);
8221 skip |= PreCallValidateCmdPipelineBarrier(device_data, cb_state, srcStageMask, dstStageMask, dependencyFlags,
8222 memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
8223 pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
8225 PreCallRecordCmdPipelineBarrier(device_data, cb_state, commandBuffer, imageMemoryBarrierCount, pImageMemoryBarriers);
8232 device_data->dispatch_table.CmdPipelineBarrier(commandBuffer, srcStageMask, dstStageMask, dependencyFlags,
8233 memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
8234 pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
8238 static bool setQueryState(VkQueue queue, VkCommandBuffer commandBuffer, QueryObject object, bool value) {
8239 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
8240 GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
8242 pCB->queryToStateMap[object] = value;
8244 auto queue_data = dev_data->queueMap.find(queue);
8245 if (queue_data != dev_data->queueMap.end()) {
8246 queue_data->second.queryToStateMap[object] = value;
8251 VKAPI_ATTR void VKAPI_CALL CmdBeginQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot, VkFlags flags) {
8253 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
8254 unique_lock_t lock(global_lock);
8255 GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
8257 skip |= ValidateCmdQueueFlags(dev_data, pCB, "vkCmdBeginQuery()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
8258 VALIDATION_ERROR_17802415);
8259 skip |= ValidateCmd(dev_data, pCB, CMD_BEGINQUERY, "vkCmdBeginQuery()");
8265 dev_data->dispatch_table.CmdBeginQuery(commandBuffer, queryPool, slot, flags);
8269 QueryObject query = {queryPool, slot};
8270 pCB->activeQueries.insert(query);
8271 pCB->startedQueries.insert(query);
8272 addCommandBufferBinding(&GetQueryPoolNode(dev_data, queryPool)->cb_bindings,
8273 {HandleToUint64(queryPool), kVulkanObjectTypeQueryPool}, pCB);
8277 VKAPI_ATTR void VKAPI_CALL CmdEndQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot) {
8279 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
8280 unique_lock_t lock(global_lock);
8281 QueryObject query = {queryPool, slot};
8282 GLOBAL_CB_NODE *cb_state = GetCBNode(dev_data, commandBuffer);
8284 if (!cb_state->activeQueries.count(query)) {
8285 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
8286 HandleToUint64(commandBuffer), VALIDATION_ERROR_1ae00f06,
8287 "Ending a query before it was started: queryPool 0x%" PRIx64 ", index %d.", HandleToUint64(queryPool),
8290 skip |= ValidateCmdQueueFlags(dev_data, cb_state, "VkCmdEndQuery()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
8291 VALIDATION_ERROR_1ae02415);
8292 skip |= ValidateCmd(dev_data, cb_state, CMD_ENDQUERY, "VkCmdEndQuery()");
8298 dev_data->dispatch_table.CmdEndQuery(commandBuffer, queryPool, slot);
8302 cb_state->activeQueries.erase(query);
8303 cb_state->queryUpdates.emplace_back([=](VkQueue q) { return setQueryState(q, commandBuffer, query, true); });
8304 addCommandBufferBinding(&GetQueryPoolNode(dev_data, queryPool)->cb_bindings,
8305 {HandleToUint64(queryPool), kVulkanObjectTypeQueryPool}, cb_state);
8309 VKAPI_ATTR void VKAPI_CALL CmdResetQueryPool(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery,
8310 uint32_t queryCount) {
8312 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
8313 unique_lock_t lock(global_lock);
8314 GLOBAL_CB_NODE *cb_state = GetCBNode(dev_data, commandBuffer);
8315 skip |= insideRenderPass(dev_data, cb_state, "vkCmdResetQueryPool()", VALIDATION_ERROR_1c600017);
8316 skip |= ValidateCmd(dev_data, cb_state, CMD_RESETQUERYPOOL, "VkCmdResetQueryPool()");
8317 skip |= ValidateCmdQueueFlags(dev_data, cb_state, "VkCmdResetQueryPool()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
8318 VALIDATION_ERROR_1c602415);
8323 dev_data->dispatch_table.CmdResetQueryPool(commandBuffer, queryPool, firstQuery, queryCount);
8326 for (uint32_t i = 0; i < queryCount; i++) {
8327 QueryObject query = {queryPool, firstQuery + i};
8328 cb_state->waitedEventsBeforeQueryReset[query] = cb_state->waitedEvents;
8329 cb_state->queryUpdates.emplace_back([=](VkQueue q) { return setQueryState(q, commandBuffer, query, false); });
8331 addCommandBufferBinding(&GetQueryPoolNode(dev_data, queryPool)->cb_bindings,
8332 {HandleToUint64(queryPool), kVulkanObjectTypeQueryPool}, cb_state);
8335 static bool IsQueryInvalid(layer_data *dev_data, QUEUE_STATE *queue_data, VkQueryPool queryPool, uint32_t queryIndex) {
8336 QueryObject query = {queryPool, queryIndex};
8337 auto query_data = queue_data->queryToStateMap.find(query);
8338 if (query_data != queue_data->queryToStateMap.end()) {
8339 if (!query_data->second) return true;
8341 auto it = dev_data->queryToStateMap.find(query);
8342 if (it == dev_data->queryToStateMap.end() || !it->second) return true;
8348 static bool validateQuery(VkQueue queue, GLOBAL_CB_NODE *pCB, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount) {
8350 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(pCB->commandBuffer), layer_data_map);
8351 auto queue_data = GetQueueState(dev_data, queue);
8352 if (!queue_data) return false;
8353 for (uint32_t i = 0; i < queryCount; i++) {
8354 if (IsQueryInvalid(dev_data, queue_data, queryPool, firstQuery + i)) {
8355 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
8356 HandleToUint64(pCB->commandBuffer), DRAWSTATE_INVALID_QUERY,
8357 "Requesting a copy from query to buffer with invalid query: queryPool 0x%" PRIx64 ", index %d",
8358 HandleToUint64(queryPool), firstQuery + i);
8364 VKAPI_ATTR void VKAPI_CALL CmdCopyQueryPoolResults(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery,
8365 uint32_t queryCount, VkBuffer dstBuffer, VkDeviceSize dstOffset,
8366 VkDeviceSize stride, VkQueryResultFlags flags) {
8368 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
8369 unique_lock_t lock(global_lock);
8371 auto cb_node = GetCBNode(dev_data, commandBuffer);
8372 auto dst_buff_state = GetBufferState(dev_data, dstBuffer);
8373 if (cb_node && dst_buff_state) {
8374 skip |= ValidateMemoryIsBoundToBuffer(dev_data, dst_buff_state, "vkCmdCopyQueryPoolResults()", VALIDATION_ERROR_19400674);
8375 // Validate that DST buffer has correct usage flags set
8377 ValidateBufferUsageFlags(dev_data, dst_buff_state, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true, VALIDATION_ERROR_19400672,
8378 "vkCmdCopyQueryPoolResults()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
8379 skip |= ValidateCmdQueueFlags(dev_data, cb_node, "vkCmdCopyQueryPoolResults()",
8380 VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT, VALIDATION_ERROR_19402415);
8381 skip |= ValidateCmd(dev_data, cb_node, CMD_COPYQUERYPOOLRESULTS, "vkCmdCopyQueryPoolResults()");
8382 skip |= insideRenderPass(dev_data, cb_node, "vkCmdCopyQueryPoolResults()", VALIDATION_ERROR_19400017);
8388 dev_data->dispatch_table.CmdCopyQueryPoolResults(commandBuffer, queryPool, firstQuery, queryCount, dstBuffer, dstOffset, stride,
8392 if (cb_node && dst_buff_state) {
8393 AddCommandBufferBindingBuffer(dev_data, cb_node, dst_buff_state);
8394 cb_node->queryUpdates.emplace_back([=](VkQueue q) { return validateQuery(q, cb_node, queryPool, firstQuery, queryCount); });
8395 addCommandBufferBinding(&GetQueryPoolNode(dev_data, queryPool)->cb_bindings,
8396 {HandleToUint64(queryPool), kVulkanObjectTypeQueryPool}, cb_node);
8400 VKAPI_ATTR void VKAPI_CALL CmdPushConstants(VkCommandBuffer commandBuffer, VkPipelineLayout layout, VkShaderStageFlags stageFlags,
8401 uint32_t offset, uint32_t size, const void *pValues) {
8403 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
8404 unique_lock_t lock(global_lock);
8405 GLOBAL_CB_NODE *cb_state = GetCBNode(dev_data, commandBuffer);
8407 skip |= ValidateCmdQueueFlags(dev_data, cb_state, "vkCmdPushConstants()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
8408 VALIDATION_ERROR_1bc02415);
8409 skip |= ValidateCmd(dev_data, cb_state, CMD_PUSHCONSTANTS, "vkCmdPushConstants()");
8411 skip |= validatePushConstantRange(dev_data, offset, size, "vkCmdPushConstants()");
8412 if (0 == stageFlags) {
8414 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
8415 HandleToUint64(commandBuffer), VALIDATION_ERROR_1bc2dc03, "vkCmdPushConstants() call has no stageFlags set.");
8418 // Check if pipeline_layout VkPushConstantRange(s) overlapping offset, size have stageFlags set for each stage in the command
8419 // stageFlags argument, *and* that the command stageFlags argument has bits set for the stageFlags in each overlapping range.
8421 const auto &ranges = *getPipelineLayout(dev_data, layout)->push_constant_ranges;
8422 VkShaderStageFlags found_stages = 0;
8423 for (const auto &range : ranges) {
8424 if ((offset >= range.offset) && (offset + size <= range.offset + range.size)) {
8425 VkShaderStageFlags matching_stages = range.stageFlags & stageFlags;
8426 if (matching_stages != range.stageFlags) {
8427 // VALIDATION_ERROR_1bc00e08 VUID-vkCmdPushConstants-offset-01796
8428 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
8429 VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(commandBuffer),
8430 VALIDATION_ERROR_1bc00e08,
8431 "vkCmdPushConstants(): stageFlags (0x%" PRIx32 ", offset (%" PRIu32 "), and size (%" PRIu32
8433 "must contain all stages in overlapping VkPushConstantRange stageFlags (0x%" PRIx32
8434 "), offset (%" PRIu32 "), and size (%" PRIu32 ") in pipeline layout 0x%" PRIx64 ".",
8435 (uint32_t)stageFlags, offset, size, (uint32_t)range.stageFlags, range.offset, range.size,
8436 HandleToUint64(layout));
8439 // Accumulate all stages we've found
8440 found_stages = matching_stages | found_stages;
8443 if (found_stages != stageFlags) {
8444 // VALIDATION_ERROR_1bc00e06 VUID-vkCmdPushConstants-offset-01795
8445 uint32_t missing_stages = ~found_stages & stageFlags;
8446 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
8447 HandleToUint64(commandBuffer), VALIDATION_ERROR_1bc00e06,
8448 "vkCmdPushConstants(): stageFlags = 0x%" PRIx32 ", VkPushConstantRange in pipeline layout 0x%" PRIx64
8449 " overlapping offset = %d and size = %d, do not contain stageFlags 0x%" PRIx32 ".",
8450 (uint32_t)stageFlags, HandleToUint64(layout), offset, size, missing_stages);
8454 if (!skip) dev_data->dispatch_table.CmdPushConstants(commandBuffer, layout, stageFlags, offset, size, pValues);
8457 VKAPI_ATTR void VKAPI_CALL CmdWriteTimestamp(VkCommandBuffer commandBuffer, VkPipelineStageFlagBits pipelineStage,
8458 VkQueryPool queryPool, uint32_t slot) {
8460 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
8461 unique_lock_t lock(global_lock);
8462 GLOBAL_CB_NODE *cb_state = GetCBNode(dev_data, commandBuffer);
8465 ValidateCmdQueueFlags(dev_data, cb_state, "vkCmdWriteTimestamp()",
8466 VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT | VK_QUEUE_TRANSFER_BIT, VALIDATION_ERROR_1e802415);
8467 skip |= ValidateCmd(dev_data, cb_state, CMD_WRITETIMESTAMP, "vkCmdWriteTimestamp()");
8473 dev_data->dispatch_table.CmdWriteTimestamp(commandBuffer, pipelineStage, queryPool, slot);
8477 QueryObject query = {queryPool, slot};
8478 cb_state->queryUpdates.emplace_back([=](VkQueue q) { return setQueryState(q, commandBuffer, query, true); });
8482 static bool MatchUsage(layer_data *dev_data, uint32_t count, const VkAttachmentReference *attachments,
8483 const VkFramebufferCreateInfo *fbci, VkImageUsageFlagBits usage_flag,
8484 UNIQUE_VALIDATION_ERROR_CODE error_code) {
8487 for (uint32_t attach = 0; attach < count; attach++) {
8488 if (attachments[attach].attachment != VK_ATTACHMENT_UNUSED) {
8489 // Attachment counts are verified elsewhere, but prevent an invalid access
8490 if (attachments[attach].attachment < fbci->attachmentCount) {
8491 const VkImageView *image_view = &fbci->pAttachments[attachments[attach].attachment];
8492 auto view_state = GetImageViewState(dev_data, *image_view);
8494 const VkImageCreateInfo *ici = &GetImageState(dev_data, view_state->create_info.image)->createInfo;
8495 if (ici != nullptr) {
8496 if ((ici->usage & usage_flag) == 0) {
8497 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
8498 VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, error_code,
8499 "vkCreateFramebuffer: Framebuffer Attachment (%d) conflicts with the image's "
8500 "IMAGE_USAGE flags (%s).",
8501 attachments[attach].attachment, string_VkImageUsageFlagBits(usage_flag));
8511 // Validate VkFramebufferCreateInfo which includes:
8512 // 1. attachmentCount equals renderPass attachmentCount
8513 // 2. corresponding framebuffer and renderpass attachments have matching formats
8514 // 3. corresponding framebuffer and renderpass attachments have matching sample counts
8515 // 4. fb attachments only have a single mip level
8516 // 5. fb attachment dimensions are each at least as large as the fb
8517 // 6. fb attachments use idenity swizzle
8518 // 7. fb attachments used by renderPass for color/input/ds have correct usage bit set
8519 // 8. fb dimensions are within physical device limits
8520 static bool ValidateFramebufferCreateInfo(layer_data *dev_data, const VkFramebufferCreateInfo *pCreateInfo) {
8523 auto rp_state = GetRenderPassState(dev_data, pCreateInfo->renderPass);
8525 const VkRenderPassCreateInfo *rpci = rp_state->createInfo.ptr();
8526 if (rpci->attachmentCount != pCreateInfo->attachmentCount) {
8527 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
8528 HandleToUint64(pCreateInfo->renderPass), VALIDATION_ERROR_094006d8,
8529 "vkCreateFramebuffer(): VkFramebufferCreateInfo attachmentCount of %u does not match attachmentCount "
8530 "of %u of renderPass (0x%" PRIx64 ") being used to create Framebuffer.",
8531 pCreateInfo->attachmentCount, rpci->attachmentCount, HandleToUint64(pCreateInfo->renderPass));
8533 // attachmentCounts match, so make sure corresponding attachment details line up
8534 const VkImageView *image_views = pCreateInfo->pAttachments;
8535 for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
8536 auto view_state = GetImageViewState(dev_data, image_views[i]);
8537 auto &ivci = view_state->create_info;
8538 if (ivci.format != rpci->pAttachments[i].format) {
8540 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
8541 HandleToUint64(pCreateInfo->renderPass), VALIDATION_ERROR_094006e0,
8542 "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has format of %s that does not "
8543 "match the format of %s used by the corresponding attachment for renderPass (0x%" PRIx64 ").",
8544 i, string_VkFormat(ivci.format), string_VkFormat(rpci->pAttachments[i].format),
8545 HandleToUint64(pCreateInfo->renderPass));
8547 const VkImageCreateInfo *ici = &GetImageState(dev_data, ivci.image)->createInfo;
8548 if (ici->samples != rpci->pAttachments[i].samples) {
8550 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
8551 HandleToUint64(pCreateInfo->renderPass), VALIDATION_ERROR_094006e2,
8552 "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has %s samples that do not match the %s "
8553 "samples used by the corresponding attachment for renderPass (0x%" PRIx64 ").",
8554 i, string_VkSampleCountFlagBits(ici->samples), string_VkSampleCountFlagBits(rpci->pAttachments[i].samples),
8555 HandleToUint64(pCreateInfo->renderPass));
8557 // Verify that view only has a single mip level
8558 if (ivci.subresourceRange.levelCount != 1) {
8559 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT,
8560 0, VALIDATION_ERROR_094006e6,
8561 "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has mip levelCount of %u but "
8562 "only a single mip level (levelCount == 1) is allowed when creating a Framebuffer.",
8563 i, ivci.subresourceRange.levelCount);
8565 const uint32_t mip_level = ivci.subresourceRange.baseMipLevel;
8566 uint32_t mip_width = max(1u, ici->extent.width >> mip_level);
8567 uint32_t mip_height = max(1u, ici->extent.height >> mip_level);
8568 if ((ivci.subresourceRange.layerCount < pCreateInfo->layers) || (mip_width < pCreateInfo->width) ||
8569 (mip_height < pCreateInfo->height)) {
8570 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT,
8571 0, VALIDATION_ERROR_094006e4,
8572 "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u mip level %u has dimensions "
8573 "smaller than the corresponding framebuffer dimensions. Here are the respective dimensions for "
8574 "attachment #%u, framebuffer:\n"
8577 "layerCount: %u, %u\n",
8578 i, ivci.subresourceRange.baseMipLevel, i, mip_width, pCreateInfo->width, mip_height,
8579 pCreateInfo->height, ivci.subresourceRange.layerCount, pCreateInfo->layers);
8581 if (((ivci.components.r != VK_COMPONENT_SWIZZLE_IDENTITY) && (ivci.components.r != VK_COMPONENT_SWIZZLE_R)) ||
8582 ((ivci.components.g != VK_COMPONENT_SWIZZLE_IDENTITY) && (ivci.components.g != VK_COMPONENT_SWIZZLE_G)) ||
8583 ((ivci.components.b != VK_COMPONENT_SWIZZLE_IDENTITY) && (ivci.components.b != VK_COMPONENT_SWIZZLE_B)) ||
8584 ((ivci.components.a != VK_COMPONENT_SWIZZLE_IDENTITY) && (ivci.components.a != VK_COMPONENT_SWIZZLE_A))) {
8585 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT,
8586 0, VALIDATION_ERROR_094006e8,
8587 "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has non-identy swizzle. All "
8588 "framebuffer attachments must have been created with the identity swizzle. Here are the actual "
8594 i, string_VkComponentSwizzle(ivci.components.r), string_VkComponentSwizzle(ivci.components.g),
8595 string_VkComponentSwizzle(ivci.components.b), string_VkComponentSwizzle(ivci.components.a));
8599 // Verify correct attachment usage flags
8600 for (uint32_t subpass = 0; subpass < rpci->subpassCount; subpass++) {
8601 // Verify input attachments:
8603 MatchUsage(dev_data, rpci->pSubpasses[subpass].inputAttachmentCount, rpci->pSubpasses[subpass].pInputAttachments,
8604 pCreateInfo, VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT, VALIDATION_ERROR_094006de);
8605 // Verify color attachments:
8607 MatchUsage(dev_data, rpci->pSubpasses[subpass].colorAttachmentCount, rpci->pSubpasses[subpass].pColorAttachments,
8608 pCreateInfo, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VALIDATION_ERROR_094006da);
8609 // Verify depth/stencil attachments:
8610 if (rpci->pSubpasses[subpass].pDepthStencilAttachment != nullptr) {
8611 skip |= MatchUsage(dev_data, 1, rpci->pSubpasses[subpass].pDepthStencilAttachment, pCreateInfo,
8612 VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT, VALIDATION_ERROR_094006dc);
8616 // Verify FB dimensions are within physical device limits
8617 if (pCreateInfo->width > dev_data->phys_dev_properties.properties.limits.maxFramebufferWidth) {
8618 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
8619 VALIDATION_ERROR_094006ec,
8620 "vkCreateFramebuffer(): Requested VkFramebufferCreateInfo width exceeds physical device limits. Requested "
8621 "width: %u, device max: %u\n",
8622 pCreateInfo->width, dev_data->phys_dev_properties.properties.limits.maxFramebufferWidth);
8624 if (pCreateInfo->height > dev_data->phys_dev_properties.properties.limits.maxFramebufferHeight) {
8625 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
8626 VALIDATION_ERROR_094006f0,
8627 "vkCreateFramebuffer(): Requested VkFramebufferCreateInfo height exceeds physical device limits. Requested "
8628 "height: %u, device max: %u\n",
8629 pCreateInfo->height, dev_data->phys_dev_properties.properties.limits.maxFramebufferHeight);
8631 if (pCreateInfo->layers > dev_data->phys_dev_properties.properties.limits.maxFramebufferLayers) {
8632 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
8633 VALIDATION_ERROR_094006f4,
8634 "vkCreateFramebuffer(): Requested VkFramebufferCreateInfo layers exceeds physical device limits. Requested "
8635 "layers: %u, device max: %u\n",
8636 pCreateInfo->layers, dev_data->phys_dev_properties.properties.limits.maxFramebufferLayers);
8638 // Verify FB dimensions are greater than zero
8639 if (pCreateInfo->width <= 0) {
8640 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
8641 VALIDATION_ERROR_094006ea,
8642 "vkCreateFramebuffer(): Requested VkFramebufferCreateInfo width must be greater than zero.");
8644 if (pCreateInfo->height <= 0) {
8645 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
8646 VALIDATION_ERROR_094006ee,
8647 "vkCreateFramebuffer(): Requested VkFramebufferCreateInfo height must be greater than zero.");
8649 if (pCreateInfo->layers <= 0) {
8650 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
8651 VALIDATION_ERROR_094006f2,
8652 "vkCreateFramebuffer(): Requested VkFramebufferCreateInfo layers must be greater than zero.");
8657 // Validate VkFramebufferCreateInfo state prior to calling down chain to create Framebuffer object
8658 // Return true if an error is encountered and callback returns true to skip call down chain
8659 // false indicates that call down chain should proceed
8660 static bool PreCallValidateCreateFramebuffer(layer_data *dev_data, const VkFramebufferCreateInfo *pCreateInfo) {
8661 // TODO : Verify that renderPass FB is created with is compatible with FB
8663 skip |= ValidateFramebufferCreateInfo(dev_data, pCreateInfo);
8667 // CreateFramebuffer state has been validated and call down chain completed so record new framebuffer object
8668 static void PostCallRecordCreateFramebuffer(layer_data *dev_data, const VkFramebufferCreateInfo *pCreateInfo, VkFramebuffer fb) {
8669 // Shadow create info and store in map
8670 std::unique_ptr<FRAMEBUFFER_STATE> fb_state(
8671 new FRAMEBUFFER_STATE(fb, pCreateInfo, GetRenderPassStateSharedPtr(dev_data, pCreateInfo->renderPass)));
8673 for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
8674 VkImageView view = pCreateInfo->pAttachments[i];
8675 auto view_state = GetImageViewState(dev_data, view);
8679 MT_FB_ATTACHMENT_INFO fb_info;
8680 fb_info.view_state = view_state;
8681 fb_info.image = view_state->create_info.image;
8682 fb_state->attachments.push_back(fb_info);
8684 dev_data->frameBufferMap[fb] = std::move(fb_state);
8687 VKAPI_ATTR VkResult VKAPI_CALL CreateFramebuffer(VkDevice device, const VkFramebufferCreateInfo *pCreateInfo,
8688 const VkAllocationCallbacks *pAllocator, VkFramebuffer *pFramebuffer) {
8689 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
8690 unique_lock_t lock(global_lock);
8691 bool skip = PreCallValidateCreateFramebuffer(dev_data, pCreateInfo);
8694 if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
8696 VkResult result = dev_data->dispatch_table.CreateFramebuffer(device, pCreateInfo, pAllocator, pFramebuffer);
8698 if (VK_SUCCESS == result) {
8700 PostCallRecordCreateFramebuffer(dev_data, pCreateInfo, *pFramebuffer);
8706 static bool FindDependency(const uint32_t index, const uint32_t dependent, const std::vector<DAGNode> &subpass_to_node,
8707 std::unordered_set<uint32_t> &processed_nodes) {
8708 // If we have already checked this node we have not found a dependency path so return false.
8709 if (processed_nodes.count(index)) return false;
8710 processed_nodes.insert(index);
8711 const DAGNode &node = subpass_to_node[index];
8712 // Look for a dependency path. If one exists return true else recurse on the previous nodes.
8713 if (std::find(node.prev.begin(), node.prev.end(), dependent) == node.prev.end()) {
8714 for (auto elem : node.prev) {
8715 if (FindDependency(elem, dependent, subpass_to_node, processed_nodes)) return true;
8723 static bool CheckDependencyExists(const layer_data *dev_data, const uint32_t subpass,
8724 const std::vector<uint32_t> &dependent_subpasses, const std::vector<DAGNode> &subpass_to_node,
8727 // Loop through all subpasses that share the same attachment and make sure a dependency exists
8728 for (uint32_t k = 0; k < dependent_subpasses.size(); ++k) {
8729 if (static_cast<uint32_t>(subpass) == dependent_subpasses[k]) continue;
8730 const DAGNode &node = subpass_to_node[subpass];
8731 // Check for a specified dependency between the two nodes. If one exists we are done.
8732 auto prev_elem = std::find(node.prev.begin(), node.prev.end(), dependent_subpasses[k]);
8733 auto next_elem = std::find(node.next.begin(), node.next.end(), dependent_subpasses[k]);
8734 if (prev_elem == node.prev.end() && next_elem == node.next.end()) {
8735 // If no dependency exits an implicit dependency still might. If not, throw an error.
8736 std::unordered_set<uint32_t> processed_nodes;
8737 if (!(FindDependency(subpass, dependent_subpasses[k], subpass_to_node, processed_nodes) ||
8738 FindDependency(dependent_subpasses[k], subpass, subpass_to_node, processed_nodes))) {
8739 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
8740 DRAWSTATE_INVALID_RENDERPASS,
8741 "A dependency between subpasses %d and %d must exist but one is not specified.", subpass,
8742 dependent_subpasses[k]);
8750 static bool CheckPreserved(const layer_data *dev_data, const VkRenderPassCreateInfo *pCreateInfo, const int index,
8751 const uint32_t attachment, const std::vector<DAGNode> &subpass_to_node, int depth, bool &skip) {
8752 const DAGNode &node = subpass_to_node[index];
8753 // If this node writes to the attachment return true as next nodes need to preserve the attachment.
8754 const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[index];
8755 for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
8756 if (attachment == subpass.pColorAttachments[j].attachment) return true;
8758 for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
8759 if (attachment == subpass.pInputAttachments[j].attachment) return true;
8761 if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
8762 if (attachment == subpass.pDepthStencilAttachment->attachment) return true;
8764 bool result = false;
8765 // Loop through previous nodes and see if any of them write to the attachment.
8766 for (auto elem : node.prev) {
8767 result |= CheckPreserved(dev_data, pCreateInfo, elem, attachment, subpass_to_node, depth + 1, skip);
8769 // If the attachment was written to by a previous node than this node needs to preserve it.
8770 if (result && depth > 0) {
8771 bool has_preserved = false;
8772 for (uint32_t j = 0; j < subpass.preserveAttachmentCount; ++j) {
8773 if (subpass.pPreserveAttachments[j] == attachment) {
8774 has_preserved = true;
8778 if (!has_preserved) {
8779 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
8780 DRAWSTATE_INVALID_RENDERPASS,
8781 "Attachment %d is used by a later subpass and must be preserved in subpass %d.", attachment, index);
8788 bool isRangeOverlapping(T offset1, T size1, T offset2, T size2) {
8789 return (((offset1 + size1) > offset2) && ((offset1 + size1) < (offset2 + size2))) ||
8790 ((offset1 > offset2) && (offset1 < (offset2 + size2)));
8793 bool isRegionOverlapping(VkImageSubresourceRange range1, VkImageSubresourceRange range2) {
8794 return (isRangeOverlapping(range1.baseMipLevel, range1.levelCount, range2.baseMipLevel, range2.levelCount) &&
8795 isRangeOverlapping(range1.baseArrayLayer, range1.layerCount, range2.baseArrayLayer, range2.layerCount));
8798 static bool ValidateDependencies(const layer_data *dev_data, FRAMEBUFFER_STATE const *framebuffer,
8799 RENDER_PASS_STATE const *renderPass) {
8801 auto const pFramebufferInfo = framebuffer->createInfo.ptr();
8802 auto const pCreateInfo = renderPass->createInfo.ptr();
8803 auto const &subpass_to_node = renderPass->subpassToNode;
8804 std::vector<std::vector<uint32_t>> output_attachment_to_subpass(pCreateInfo->attachmentCount);
8805 std::vector<std::vector<uint32_t>> input_attachment_to_subpass(pCreateInfo->attachmentCount);
8806 std::vector<std::vector<uint32_t>> overlapping_attachments(pCreateInfo->attachmentCount);
8807 // Find overlapping attachments
8808 for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
8809 for (uint32_t j = i + 1; j < pCreateInfo->attachmentCount; ++j) {
8810 VkImageView viewi = pFramebufferInfo->pAttachments[i];
8811 VkImageView viewj = pFramebufferInfo->pAttachments[j];
8812 if (viewi == viewj) {
8813 overlapping_attachments[i].push_back(j);
8814 overlapping_attachments[j].push_back(i);
8817 auto view_state_i = GetImageViewState(dev_data, viewi);
8818 auto view_state_j = GetImageViewState(dev_data, viewj);
8819 if (!view_state_i || !view_state_j) {
8822 auto view_ci_i = view_state_i->create_info;
8823 auto view_ci_j = view_state_j->create_info;
8824 if (view_ci_i.image == view_ci_j.image && isRegionOverlapping(view_ci_i.subresourceRange, view_ci_j.subresourceRange)) {
8825 overlapping_attachments[i].push_back(j);
8826 overlapping_attachments[j].push_back(i);
8829 auto image_data_i = GetImageState(dev_data, view_ci_i.image);
8830 auto image_data_j = GetImageState(dev_data, view_ci_j.image);
8831 if (!image_data_i || !image_data_j) {
8834 if (image_data_i->binding.mem == image_data_j->binding.mem &&
8835 isRangeOverlapping(image_data_i->binding.offset, image_data_i->binding.size, image_data_j->binding.offset,
8836 image_data_j->binding.size)) {
8837 overlapping_attachments[i].push_back(j);
8838 overlapping_attachments[j].push_back(i);
8842 for (uint32_t i = 0; i < overlapping_attachments.size(); ++i) {
8843 uint32_t attachment = i;
8844 for (auto other_attachment : overlapping_attachments[i]) {
8845 if (!(pCreateInfo->pAttachments[attachment].flags & VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT)) {
8846 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT,
8847 HandleToUint64(framebuffer->framebuffer), VALIDATION_ERROR_12200682,
8848 "Attachment %d aliases attachment %d but doesn't set VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT.",
8849 attachment, other_attachment);
8851 if (!(pCreateInfo->pAttachments[other_attachment].flags & VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT)) {
8852 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT,
8853 HandleToUint64(framebuffer->framebuffer), VALIDATION_ERROR_12200682,
8854 "Attachment %d aliases attachment %d but doesn't set VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT.",
8855 other_attachment, attachment);
8859 // Find for each attachment the subpasses that use them.
8860 unordered_set<uint32_t> attachmentIndices;
8861 for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
8862 const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
8863 attachmentIndices.clear();
8864 for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
8865 uint32_t attachment = subpass.pInputAttachments[j].attachment;
8866 if (attachment == VK_ATTACHMENT_UNUSED) continue;
8867 input_attachment_to_subpass[attachment].push_back(i);
8868 for (auto overlapping_attachment : overlapping_attachments[attachment]) {
8869 input_attachment_to_subpass[overlapping_attachment].push_back(i);
8872 for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
8873 uint32_t attachment = subpass.pColorAttachments[j].attachment;
8874 if (attachment == VK_ATTACHMENT_UNUSED) continue;
8875 output_attachment_to_subpass[attachment].push_back(i);
8876 for (auto overlapping_attachment : overlapping_attachments[attachment]) {
8877 output_attachment_to_subpass[overlapping_attachment].push_back(i);
8879 attachmentIndices.insert(attachment);
8881 if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
8882 uint32_t attachment = subpass.pDepthStencilAttachment->attachment;
8883 output_attachment_to_subpass[attachment].push_back(i);
8884 for (auto overlapping_attachment : overlapping_attachments[attachment]) {
8885 output_attachment_to_subpass[overlapping_attachment].push_back(i);
8888 if (attachmentIndices.count(attachment)) {
8890 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
8891 DRAWSTATE_INVALID_RENDERPASS,
8892 "Cannot use same attachment (%u) as both color and depth output in same subpass (%u).", attachment, i);
8896 // If there is a dependency needed make sure one exists
8897 for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
8898 const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
8899 // If the attachment is an input then all subpasses that output must have a dependency relationship
8900 for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
8901 uint32_t attachment = subpass.pInputAttachments[j].attachment;
8902 if (attachment == VK_ATTACHMENT_UNUSED) continue;
8903 CheckDependencyExists(dev_data, i, output_attachment_to_subpass[attachment], subpass_to_node, skip);
8905 // If the attachment is an output then all subpasses that use the attachment must have a dependency relationship
8906 for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
8907 uint32_t attachment = subpass.pColorAttachments[j].attachment;
8908 if (attachment == VK_ATTACHMENT_UNUSED) continue;
8909 CheckDependencyExists(dev_data, i, output_attachment_to_subpass[attachment], subpass_to_node, skip);
8910 CheckDependencyExists(dev_data, i, input_attachment_to_subpass[attachment], subpass_to_node, skip);
8912 if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
8913 const uint32_t &attachment = subpass.pDepthStencilAttachment->attachment;
8914 CheckDependencyExists(dev_data, i, output_attachment_to_subpass[attachment], subpass_to_node, skip);
8915 CheckDependencyExists(dev_data, i, input_attachment_to_subpass[attachment], subpass_to_node, skip);
8918 // Loop through implicit dependencies, if this pass reads make sure the attachment is preserved for all passes after it was
8920 for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
8921 const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
8922 for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
8923 CheckPreserved(dev_data, pCreateInfo, i, subpass.pInputAttachments[j].attachment, subpass_to_node, 0, skip);
8929 static bool CreatePassDAG(const layer_data *dev_data, const VkRenderPassCreateInfo *pCreateInfo,
8930 std::vector<DAGNode> &subpass_to_node, std::vector<bool> &has_self_dependency,
8931 std::vector<int32_t> &subpass_to_dep_index) {
8933 for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
8934 DAGNode &subpass_node = subpass_to_node[i];
8935 subpass_node.pass = i;
8936 subpass_to_dep_index[i] = -1; // Default to no dependency and overwrite below as needed
8938 for (uint32_t i = 0; i < pCreateInfo->dependencyCount; ++i) {
8939 const VkSubpassDependency &dependency = pCreateInfo->pDependencies[i];
8940 if (dependency.srcSubpass == VK_SUBPASS_EXTERNAL || dependency.dstSubpass == VK_SUBPASS_EXTERNAL) {
8941 if (dependency.srcSubpass == dependency.dstSubpass) {
8942 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
8943 DRAWSTATE_INVALID_RENDERPASS, "The src and dest subpasses cannot both be external.");
8945 } else if (dependency.srcSubpass > dependency.dstSubpass) {
8946 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
8947 DRAWSTATE_INVALID_RENDERPASS,
8948 "Dependency graph must be specified such that an earlier pass cannot depend on a later pass.");
8949 } else if (dependency.srcSubpass == dependency.dstSubpass) {
8950 has_self_dependency[dependency.srcSubpass] = true;
8951 subpass_to_dep_index[dependency.srcSubpass] = i;
8953 subpass_to_node[dependency.dstSubpass].prev.push_back(dependency.srcSubpass);
8954 subpass_to_node[dependency.srcSubpass].next.push_back(dependency.dstSubpass);
8960 VKAPI_ATTR VkResult VKAPI_CALL CreateShaderModule(VkDevice device, const VkShaderModuleCreateInfo *pCreateInfo,
8961 const VkAllocationCallbacks *pAllocator, VkShaderModule *pShaderModule) {
8962 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
8965 if (PreCallValidateCreateShaderModule(dev_data, pCreateInfo, &spirv_valid)) return VK_ERROR_VALIDATION_FAILED_EXT;
8967 VkResult res = dev_data->dispatch_table.CreateShaderModule(device, pCreateInfo, pAllocator, pShaderModule);
8969 if (res == VK_SUCCESS) {
8970 lock_guard_t lock(global_lock);
8971 unique_ptr<shader_module> new_shader_module(spirv_valid ? new shader_module(pCreateInfo) : new shader_module());
8972 dev_data->shaderModuleMap[*pShaderModule] = std::move(new_shader_module);
8977 static bool ValidateAttachmentIndex(layer_data *dev_data, uint32_t attachment, uint32_t attachment_count, const char *type) {
8979 if (attachment >= attachment_count && attachment != VK_ATTACHMENT_UNUSED) {
8980 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
8981 VALIDATION_ERROR_12200684,
8982 "CreateRenderPass: %s attachment %d must be less than the total number of attachments %d.", type,
8983 attachment, attachment_count);
8988 static bool IsPowerOfTwo(unsigned x) { return x && !(x & (x - 1)); }
8990 static bool ValidateRenderpassAttachmentUsage(layer_data *dev_data, const VkRenderPassCreateInfo *pCreateInfo) {
8992 for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
8993 const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
8994 if (subpass.pipelineBindPoint != VK_PIPELINE_BIND_POINT_GRAPHICS) {
8995 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
8996 VALIDATION_ERROR_14000698,
8997 "CreateRenderPass: Pipeline bind point for subpass %d must be VK_PIPELINE_BIND_POINT_GRAPHICS.", i);
9000 for (uint32_t j = 0; j < subpass.preserveAttachmentCount; ++j) {
9001 uint32_t attachment = subpass.pPreserveAttachments[j];
9002 if (attachment == VK_ATTACHMENT_UNUSED) {
9003 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
9004 VALIDATION_ERROR_140006aa,
9005 "CreateRenderPass: Preserve attachment (%d) must not be VK_ATTACHMENT_UNUSED.", j);
9007 skip |= ValidateAttachmentIndex(dev_data, attachment, pCreateInfo->attachmentCount, "Preserve");
9009 bool found = (subpass.pDepthStencilAttachment != NULL && subpass.pDepthStencilAttachment->attachment == attachment);
9010 for (uint32_t r = 0; !found && r < subpass.inputAttachmentCount; ++r) {
9011 found = (subpass.pInputAttachments[r].attachment == attachment);
9013 for (uint32_t r = 0; !found && r < subpass.colorAttachmentCount; ++r) {
9014 found = (subpass.pColorAttachments[r].attachment == attachment) ||
9015 (subpass.pResolveAttachments != NULL && subpass.pResolveAttachments[r].attachment == attachment);
9019 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
9020 VALIDATION_ERROR_140006ac,
9021 "CreateRenderPass: subpass %u pPreserveAttachments[%u] (%u) must not be used elsewhere in the subpass.", i,
9027 auto subpass_performs_resolve =
9028 subpass.pResolveAttachments &&
9029 std::any_of(subpass.pResolveAttachments, subpass.pResolveAttachments + subpass.colorAttachmentCount,
9030 [](VkAttachmentReference ref) { return ref.attachment != VK_ATTACHMENT_UNUSED; });
9032 unsigned sample_count = 0;
9034 for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
9035 uint32_t attachment;
9036 if (subpass.pResolveAttachments) {
9037 attachment = subpass.pResolveAttachments[j].attachment;
9038 skip |= ValidateAttachmentIndex(dev_data, attachment, pCreateInfo->attachmentCount, "Resolve");
9040 if (!skip && attachment != VK_ATTACHMENT_UNUSED &&
9041 pCreateInfo->pAttachments[attachment].samples != VK_SAMPLE_COUNT_1_BIT) {
9042 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT,
9043 0, VALIDATION_ERROR_140006a2,
9044 "CreateRenderPass: Subpass %u requests multisample resolve into attachment %u, which must "
9045 "have VK_SAMPLE_COUNT_1_BIT but has %s.",
9046 i, attachment, string_VkSampleCountFlagBits(pCreateInfo->pAttachments[attachment].samples));
9049 if (!skip && subpass.pResolveAttachments[j].attachment != VK_ATTACHMENT_UNUSED &&
9050 subpass.pColorAttachments[j].attachment == VK_ATTACHMENT_UNUSED) {
9051 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT,
9052 0, VALIDATION_ERROR_1400069e,
9053 "CreateRenderPass: Subpass %u requests multisample resolve from attachment %u which has "
9054 "attachment=VK_ATTACHMENT_UNUSED.",
9058 attachment = subpass.pColorAttachments[j].attachment;
9059 skip |= ValidateAttachmentIndex(dev_data, attachment, pCreateInfo->attachmentCount, "Color");
9061 if (!skip && attachment != VK_ATTACHMENT_UNUSED) {
9062 sample_count |= (unsigned)pCreateInfo->pAttachments[attachment].samples;
9064 if (subpass_performs_resolve && pCreateInfo->pAttachments[attachment].samples == VK_SAMPLE_COUNT_1_BIT) {
9065 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT,
9066 0, VALIDATION_ERROR_140006a0,
9067 "CreateRenderPass: Subpass %u requests multisample resolve from attachment %u which has "
9068 "VK_SAMPLE_COUNT_1_BIT.",
9072 if (subpass_performs_resolve && subpass.pResolveAttachments[j].attachment != VK_ATTACHMENT_UNUSED) {
9073 const auto &color_desc = pCreateInfo->pAttachments[attachment];
9074 const auto &resolve_desc = pCreateInfo->pAttachments[subpass.pResolveAttachments[j].attachment];
9075 if (color_desc.format != resolve_desc.format) {
9076 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
9077 VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, VALIDATION_ERROR_140006a4,
9078 "CreateRenderPass: Subpass %u pColorAttachments[%u] resolves to an attachment with a "
9079 "different format. color format: %u, resolve format: %u.",
9080 i, j, color_desc.format, resolve_desc.format);
9084 if (dev_data->extensions.vk_amd_mixed_attachment_samples && subpass.pDepthStencilAttachment &&
9085 subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
9086 const auto depth_stencil_sample_count =
9087 pCreateInfo->pAttachments[subpass.pDepthStencilAttachment->attachment].samples;
9088 if (pCreateInfo->pAttachments[attachment].samples > depth_stencil_sample_count) {
9089 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
9090 VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, VALIDATION_ERROR_14000bc4,
9091 "CreateRenderPass: Subpass %u pColorAttachments[%u] has %s which is larger than "
9092 "depth/stencil attachment %s.",
9093 i, j, string_VkSampleCountFlagBits(pCreateInfo->pAttachments[attachment].samples),
9094 string_VkSampleCountFlagBits(depth_stencil_sample_count));
9100 if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
9101 uint32_t attachment = subpass.pDepthStencilAttachment->attachment;
9102 skip |= ValidateAttachmentIndex(dev_data, attachment, pCreateInfo->attachmentCount, "Depth stencil");
9104 if (!skip && attachment != VK_ATTACHMENT_UNUSED) {
9105 sample_count |= (unsigned)pCreateInfo->pAttachments[attachment].samples;
9109 for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
9110 uint32_t attachment = subpass.pInputAttachments[j].attachment;
9111 skip |= ValidateAttachmentIndex(dev_data, attachment, pCreateInfo->attachmentCount, "Input");
9114 if (!dev_data->extensions.vk_amd_mixed_attachment_samples && sample_count && !IsPowerOfTwo(sample_count)) {
9115 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
9116 VALIDATION_ERROR_0082b401,
9117 "CreateRenderPass: Subpass %u attempts to render to attachments with inconsistent sample counts.", i);
9123 static void MarkAttachmentFirstUse(RENDER_PASS_STATE *render_pass, uint32_t index, bool is_read) {
9124 if (index == VK_ATTACHMENT_UNUSED) return;
9126 if (!render_pass->attachment_first_read.count(index)) render_pass->attachment_first_read[index] = is_read;
9129 VKAPI_ATTR VkResult VKAPI_CALL CreateRenderPass(VkDevice device, const VkRenderPassCreateInfo *pCreateInfo,
9130 const VkAllocationCallbacks *pAllocator, VkRenderPass *pRenderPass) {
9132 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
9134 unique_lock_t lock(global_lock);
9135 // TODO: As part of wrapping up the mem_tracker/core_validation merge the following routine should be consolidated with
9137 skip |= ValidateRenderpassAttachmentUsage(dev_data, pCreateInfo);
9139 std::vector<bool> has_self_dependency(pCreateInfo->subpassCount);
9140 std::vector<DAGNode> subpass_to_node(pCreateInfo->subpassCount);
9141 std::vector<int32_t> subpass_to_dep_index(pCreateInfo->subpassCount);
9142 skip |= CreatePassDAG(dev_data, pCreateInfo, subpass_to_node, has_self_dependency, subpass_to_dep_index);
9144 for (uint32_t i = 0; i < pCreateInfo->dependencyCount; ++i) {
9145 auto const &dependency = pCreateInfo->pDependencies[i];
9146 skip |= ValidateStageMaskGsTsEnables(dev_data, dependency.srcStageMask, "vkCreateRenderPass()", VALIDATION_ERROR_13e006b8,
9147 VALIDATION_ERROR_13e006bc);
9148 skip |= ValidateStageMaskGsTsEnables(dev_data, dependency.dstStageMask, "vkCreateRenderPass()", VALIDATION_ERROR_13e006ba,
9149 VALIDATION_ERROR_13e006be);
9151 if (!ValidateAccessMaskPipelineStage(dependency.srcAccessMask, dependency.srcStageMask)) {
9152 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
9153 VALIDATION_ERROR_13e006c8,
9154 "CreateRenderPass: pDependencies[%u].srcAccessMask (0x%X) is not supported by srcStageMask (0x%X).", i,
9155 dependency.srcAccessMask, dependency.srcStageMask);
9158 if (!ValidateAccessMaskPipelineStage(dependency.dstAccessMask, dependency.dstStageMask)) {
9159 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
9160 VALIDATION_ERROR_13e006ca,
9161 "CreateRenderPass: pDependencies[%u].dstAccessMask (0x%X) is not supported by dstStageMask (0x%X).", i,
9162 dependency.dstAccessMask, dependency.dstStageMask);
9166 skip |= ValidateLayouts(dev_data, device, pCreateInfo);
9171 return VK_ERROR_VALIDATION_FAILED_EXT;
9174 VkResult result = dev_data->dispatch_table.CreateRenderPass(device, pCreateInfo, pAllocator, pRenderPass);
9176 if (VK_SUCCESS == result) {
9179 auto render_pass = std::make_shared<RENDER_PASS_STATE>(pCreateInfo);
9180 render_pass->renderPass = *pRenderPass;
9181 render_pass->hasSelfDependency = has_self_dependency;
9182 render_pass->subpassToNode = subpass_to_node;
9183 render_pass->subpass_to_dependency_index = subpass_to_dep_index;
9185 for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
9186 const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
9187 for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
9188 MarkAttachmentFirstUse(render_pass.get(), subpass.pColorAttachments[j].attachment, false);
9190 // resolve attachments are considered to be written
9191 if (subpass.pResolveAttachments) {
9192 MarkAttachmentFirstUse(render_pass.get(), subpass.pResolveAttachments[j].attachment, false);
9195 if (subpass.pDepthStencilAttachment) {
9196 MarkAttachmentFirstUse(render_pass.get(), subpass.pDepthStencilAttachment->attachment, false);
9198 for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
9199 MarkAttachmentFirstUse(render_pass.get(), subpass.pInputAttachments[j].attachment, true);
9203 dev_data->renderPassMap[*pRenderPass] = std::move(render_pass);
9208 static bool validatePrimaryCommandBuffer(const layer_data *dev_data, const GLOBAL_CB_NODE *pCB, char const *cmd_name,
9209 UNIQUE_VALIDATION_ERROR_CODE error_code) {
9211 if (pCB->createInfo.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
9212 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
9213 HandleToUint64(pCB->commandBuffer), error_code, "Cannot execute command %s on a secondary command buffer.",
9219 static bool VerifyRenderAreaBounds(const layer_data *dev_data, const VkRenderPassBeginInfo *pRenderPassBegin) {
9221 const safe_VkFramebufferCreateInfo *pFramebufferInfo =
9222 &GetFramebufferState(dev_data, pRenderPassBegin->framebuffer)->createInfo;
9223 if (pRenderPassBegin->renderArea.offset.x < 0 ||
9224 (pRenderPassBegin->renderArea.offset.x + pRenderPassBegin->renderArea.extent.width) > pFramebufferInfo->width ||
9225 pRenderPassBegin->renderArea.offset.y < 0 ||
9226 (pRenderPassBegin->renderArea.offset.y + pRenderPassBegin->renderArea.extent.height) > pFramebufferInfo->height) {
9227 skip |= static_cast<bool>(log_msg(
9228 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
9229 DRAWSTATE_INVALID_RENDER_AREA,
9230 "Cannot execute a render pass with renderArea not within the bound of the framebuffer. RenderArea: x %d, y %d, width "
9231 "%d, height %d. Framebuffer: width %d, height %d.",
9232 pRenderPassBegin->renderArea.offset.x, pRenderPassBegin->renderArea.offset.y, pRenderPassBegin->renderArea.extent.width,
9233 pRenderPassBegin->renderArea.extent.height, pFramebufferInfo->width, pFramebufferInfo->height));
9238 // If this is a stencil format, make sure the stencil[Load|Store]Op flag is checked, while if it is a depth/color attachment the
9239 // [load|store]Op flag must be checked
9240 // TODO: The memory valid flag in DEVICE_MEM_INFO should probably be split to track the validity of stencil memory separately.
9241 template <typename T>
9242 static bool FormatSpecificLoadAndStoreOpSettings(VkFormat format, T color_depth_op, T stencil_op, T op) {
9243 if (color_depth_op != op && stencil_op != op) {
9246 bool check_color_depth_load_op = !FormatIsStencilOnly(format);
9247 bool check_stencil_load_op = FormatIsDepthAndStencil(format) || !check_color_depth_load_op;
9249 return ((check_color_depth_load_op && (color_depth_op == op)) || (check_stencil_load_op && (stencil_op == op)));
9252 VKAPI_ATTR void VKAPI_CALL CmdBeginRenderPass(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin,
9253 VkSubpassContents contents) {
9255 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
9256 unique_lock_t lock(global_lock);
9257 GLOBAL_CB_NODE *cb_node = GetCBNode(dev_data, commandBuffer);
9258 auto render_pass_state = pRenderPassBegin ? GetRenderPassState(dev_data, pRenderPassBegin->renderPass) : nullptr;
9259 auto framebuffer = pRenderPassBegin ? GetFramebufferState(dev_data, pRenderPassBegin->framebuffer) : nullptr;
9261 if (render_pass_state) {
9262 uint32_t clear_op_size = 0; // Make sure pClearValues is at least as large as last LOAD_OP_CLEAR
9263 cb_node->activeFramebuffer = pRenderPassBegin->framebuffer;
9265 for (uint32_t i = 0; i < render_pass_state->createInfo.attachmentCount; ++i) {
9266 auto pAttachment = &render_pass_state->createInfo.pAttachments[i];
9267 if (FormatSpecificLoadAndStoreOpSettings(pAttachment->format, pAttachment->loadOp, pAttachment->stencilLoadOp,
9268 VK_ATTACHMENT_LOAD_OP_CLEAR)) {
9269 clear_op_size = static_cast<uint32_t>(i) + 1;
9273 if (clear_op_size > pRenderPassBegin->clearValueCount) {
9274 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
9275 HandleToUint64(render_pass_state->renderPass), VALIDATION_ERROR_1200070c,
9276 "In vkCmdBeginRenderPass() the VkRenderPassBeginInfo struct has a clearValueCount of %u but there "
9277 "must be at least %u entries in pClearValues array to account for the highest index attachment in "
9278 "renderPass 0x%" PRIx64
9279 " that uses VK_ATTACHMENT_LOAD_OP_CLEAR is %u. Note that the pClearValues array is indexed by "
9280 "attachment number so even if some pClearValues entries between 0 and %u correspond to attachments "
9281 "that aren't cleared they will be ignored.",
9282 pRenderPassBegin->clearValueCount, clear_op_size, HandleToUint64(render_pass_state->renderPass),
9283 clear_op_size, clear_op_size - 1);
9285 skip |= VerifyRenderAreaBounds(dev_data, pRenderPassBegin);
9286 skip |= VerifyFramebufferAndRenderPassLayouts(dev_data, cb_node, pRenderPassBegin,
9287 GetFramebufferState(dev_data, pRenderPassBegin->framebuffer));
9288 if (framebuffer->rp_state->renderPass != render_pass_state->renderPass) {
9289 skip |= validateRenderPassCompatibility(dev_data, "render pass", render_pass_state, "framebuffer",
9290 framebuffer->rp_state.get(), "vkCmdBeginRenderPass()",
9291 VALIDATION_ERROR_12000710);
9293 skip |= insideRenderPass(dev_data, cb_node, "vkCmdBeginRenderPass()", VALIDATION_ERROR_17a00017);
9294 skip |= ValidateDependencies(dev_data, framebuffer, render_pass_state);
9295 skip |= validatePrimaryCommandBuffer(dev_data, cb_node, "vkCmdBeginRenderPass()", VALIDATION_ERROR_17a00019);
9296 skip |= ValidateCmdQueueFlags(dev_data, cb_node, "vkCmdBeginRenderPass()", VK_QUEUE_GRAPHICS_BIT,
9297 VALIDATION_ERROR_17a02415);
9298 skip |= ValidateCmd(dev_data, cb_node, CMD_BEGINRENDERPASS, "vkCmdBeginRenderPass()");
9299 cb_node->activeRenderPass = render_pass_state;
9300 // This is a shallow copy as that is all that is needed for now
9301 cb_node->activeRenderPassBeginInfo = *pRenderPassBegin;
9302 cb_node->activeSubpass = 0;
9303 cb_node->activeSubpassContents = contents;
9304 cb_node->framebuffers.insert(pRenderPassBegin->framebuffer);
9305 // Connect this framebuffer and its children to this cmdBuffer
9306 AddFramebufferBinding(dev_data, cb_node, framebuffer);
9307 // Connect this RP to cmdBuffer
9308 addCommandBufferBinding(&render_pass_state->cb_bindings,
9309 {HandleToUint64(render_pass_state->renderPass), kVulkanObjectTypeRenderPass}, cb_node);
9310 // transition attachments to the correct layouts for beginning of renderPass and first subpass
9311 TransitionBeginRenderPassLayouts(dev_data, cb_node, render_pass_state, framebuffer);
9316 dev_data->dispatch_table.CmdBeginRenderPass(commandBuffer, pRenderPassBegin, contents);
9320 VKAPI_ATTR void VKAPI_CALL CmdNextSubpass(VkCommandBuffer commandBuffer, VkSubpassContents contents) {
9322 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
9323 unique_lock_t lock(global_lock);
9324 GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
9326 skip |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdNextSubpass()", VALIDATION_ERROR_1b600019);
9327 skip |= ValidateCmdQueueFlags(dev_data, pCB, "vkCmdNextSubpass()", VK_QUEUE_GRAPHICS_BIT, VALIDATION_ERROR_1b602415);
9328 skip |= ValidateCmd(dev_data, pCB, CMD_NEXTSUBPASS, "vkCmdNextSubpass()");
9329 skip |= outsideRenderPass(dev_data, pCB, "vkCmdNextSubpass()", VALIDATION_ERROR_1b600017);
9331 auto subpassCount = pCB->activeRenderPass->createInfo.subpassCount;
9332 if (pCB->activeSubpass == subpassCount - 1) {
9333 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
9334 HandleToUint64(commandBuffer), VALIDATION_ERROR_1b60071a,
9335 "vkCmdNextSubpass(): Attempted to advance beyond final subpass.");
9342 dev_data->dispatch_table.CmdNextSubpass(commandBuffer, contents);
9346 pCB->activeSubpass++;
9347 pCB->activeSubpassContents = contents;
9348 TransitionSubpassLayouts(dev_data, pCB, pCB->activeRenderPass, pCB->activeSubpass,
9349 GetFramebufferState(dev_data, pCB->activeRenderPassBeginInfo.framebuffer));
9353 VKAPI_ATTR void VKAPI_CALL CmdEndRenderPass(VkCommandBuffer commandBuffer) {
9355 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
9356 unique_lock_t lock(global_lock);
9357 auto pCB = GetCBNode(dev_data, commandBuffer);
9358 FRAMEBUFFER_STATE *framebuffer = NULL;
9360 RENDER_PASS_STATE *rp_state = pCB->activeRenderPass;
9361 framebuffer = GetFramebufferState(dev_data, pCB->activeFramebuffer);
9363 if (pCB->activeSubpass != rp_state->createInfo.subpassCount - 1) {
9364 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
9365 VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(commandBuffer),
9366 VALIDATION_ERROR_1b00071c, "vkCmdEndRenderPass(): Called before reaching final subpass.");
9369 skip |= outsideRenderPass(dev_data, pCB, "vkCmdEndRenderpass()", VALIDATION_ERROR_1b000017);
9370 skip |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdEndRenderPass()", VALIDATION_ERROR_1b000019);
9371 skip |= ValidateCmdQueueFlags(dev_data, pCB, "vkCmdEndRenderPass()", VK_QUEUE_GRAPHICS_BIT, VALIDATION_ERROR_1b002415);
9372 skip |= ValidateCmd(dev_data, pCB, CMD_ENDRENDERPASS, "vkCmdEndRenderPass()");
9378 dev_data->dispatch_table.CmdEndRenderPass(commandBuffer);
9382 TransitionFinalSubpassLayouts(dev_data, pCB, &pCB->activeRenderPassBeginInfo, framebuffer);
9383 pCB->activeRenderPass = nullptr;
9384 pCB->activeSubpass = 0;
9385 pCB->activeFramebuffer = VK_NULL_HANDLE;
9389 static bool validateFramebuffer(layer_data *dev_data, VkCommandBuffer primaryBuffer, const GLOBAL_CB_NODE *pCB,
9390 VkCommandBuffer secondaryBuffer, const GLOBAL_CB_NODE *pSubCB, const char *caller) {
9392 if (!pSubCB->beginInfo.pInheritanceInfo) {
9395 VkFramebuffer primary_fb = pCB->activeFramebuffer;
9396 VkFramebuffer secondary_fb = pSubCB->beginInfo.pInheritanceInfo->framebuffer;
9397 if (secondary_fb != VK_NULL_HANDLE) {
9398 if (primary_fb != secondary_fb) {
9399 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
9400 HandleToUint64(primaryBuffer), VALIDATION_ERROR_1b2000c6,
9401 "vkCmdExecuteCommands() called w/ invalid secondary command buffer 0x%" PRIx64
9402 " which has a framebuffer 0x%" PRIx64
9403 " that is not the same as the primary command buffer's current active framebuffer 0x%" PRIx64 ".",
9404 HandleToUint64(secondaryBuffer), HandleToUint64(secondary_fb), HandleToUint64(primary_fb));
9406 auto fb = GetFramebufferState(dev_data, secondary_fb);
9408 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
9409 HandleToUint64(primaryBuffer), DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER,
9410 "vkCmdExecuteCommands() called w/ invalid Cmd Buffer 0x%" PRIx64
9411 " which has invalid framebuffer 0x%" PRIx64 ".",
9412 HandleToUint64(secondaryBuffer), HandleToUint64(secondary_fb));
9419 static bool validateSecondaryCommandBufferState(layer_data *dev_data, GLOBAL_CB_NODE *pCB, GLOBAL_CB_NODE *pSubCB) {
9421 unordered_set<int> activeTypes;
9422 for (auto queryObject : pCB->activeQueries) {
9423 auto queryPoolData = dev_data->queryPoolMap.find(queryObject.pool);
9424 if (queryPoolData != dev_data->queryPoolMap.end()) {
9425 if (queryPoolData->second.createInfo.queryType == VK_QUERY_TYPE_PIPELINE_STATISTICS &&
9426 pSubCB->beginInfo.pInheritanceInfo) {
9427 VkQueryPipelineStatisticFlags cmdBufStatistics = pSubCB->beginInfo.pInheritanceInfo->pipelineStatistics;
9428 if ((cmdBufStatistics & queryPoolData->second.createInfo.pipelineStatistics) != cmdBufStatistics) {
9430 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
9431 HandleToUint64(pCB->commandBuffer), VALIDATION_ERROR_1b2000d0,
9432 "vkCmdExecuteCommands() called w/ invalid Cmd Buffer 0x%" PRIx64
9433 " which has invalid active query pool 0x%" PRIx64
9434 ". Pipeline statistics is being queried so the command buffer must have all bits set on the queryPool.",
9435 HandleToUint64(pCB->commandBuffer), HandleToUint64(queryPoolData->first));
9438 activeTypes.insert(queryPoolData->second.createInfo.queryType);
9441 for (auto queryObject : pSubCB->startedQueries) {
9442 auto queryPoolData = dev_data->queryPoolMap.find(queryObject.pool);
9443 if (queryPoolData != dev_data->queryPoolMap.end() && activeTypes.count(queryPoolData->second.createInfo.queryType)) {
9444 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
9445 HandleToUint64(pCB->commandBuffer), DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER,
9446 "vkCmdExecuteCommands() called w/ invalid Cmd Buffer 0x%" PRIx64
9447 " which has invalid active query pool 0x%" PRIx64
9448 " of type %d but a query of that type has been started on secondary Cmd Buffer 0x%" PRIx64 ".",
9449 HandleToUint64(pCB->commandBuffer), HandleToUint64(queryPoolData->first),
9450 queryPoolData->second.createInfo.queryType, HandleToUint64(pSubCB->commandBuffer));
9454 auto primary_pool = GetCommandPoolNode(dev_data, pCB->createInfo.commandPool);
9455 auto secondary_pool = GetCommandPoolNode(dev_data, pSubCB->createInfo.commandPool);
9456 if (primary_pool && secondary_pool && (primary_pool->queueFamilyIndex != secondary_pool->queueFamilyIndex)) {
9457 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
9458 HandleToUint64(pSubCB->commandBuffer), DRAWSTATE_INVALID_QUEUE_FAMILY,
9459 "vkCmdExecuteCommands(): Primary command buffer 0x%" PRIx64
9460 " created in queue family %d has secondary command buffer 0x%" PRIx64 " created in queue family %d.",
9461 HandleToUint64(pCB->commandBuffer), primary_pool->queueFamilyIndex, HandleToUint64(pSubCB->commandBuffer),
9462 secondary_pool->queueFamilyIndex);
9468 VKAPI_ATTR void VKAPI_CALL CmdExecuteCommands(VkCommandBuffer commandBuffer, uint32_t commandBuffersCount,
9469 const VkCommandBuffer *pCommandBuffers) {
9471 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
9472 unique_lock_t lock(global_lock);
9473 GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
9475 GLOBAL_CB_NODE *pSubCB = NULL;
9476 for (uint32_t i = 0; i < commandBuffersCount; i++) {
9477 pSubCB = GetCBNode(dev_data, pCommandBuffers[i]);
9479 if (VK_COMMAND_BUFFER_LEVEL_PRIMARY == pSubCB->createInfo.level) {
9481 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
9482 HandleToUint64(pCommandBuffers[i]), VALIDATION_ERROR_1b2000b0,
9483 "vkCmdExecuteCommands() called w/ Primary Cmd Buffer 0x%" PRIx64
9484 " in element %u of pCommandBuffers array. All cmd buffers in pCommandBuffers array must be secondary.",
9485 HandleToUint64(pCommandBuffers[i]), i);
9486 } else if (pCB->activeRenderPass) { // Secondary CB w/i RenderPass must have *CONTINUE_BIT set
9487 if (pSubCB->beginInfo.pInheritanceInfo != nullptr) {
9488 auto secondary_rp_state = GetRenderPassState(dev_data, pSubCB->beginInfo.pInheritanceInfo->renderPass);
9489 if (!(pSubCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) {
9490 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
9491 VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(pCommandBuffers[i]),
9492 VALIDATION_ERROR_1b2000c0,
9493 "vkCmdExecuteCommands(): Secondary Command Buffer (0x%" PRIx64
9494 ") executed within render pass (0x%" PRIx64
9495 ") must have had vkBeginCommandBuffer() called w/ "
9496 "VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT set.",
9497 HandleToUint64(pCommandBuffers[i]), HandleToUint64(pCB->activeRenderPass->renderPass));
9499 // Make sure render pass is compatible with parent command buffer pass if has continue
9500 if (pCB->activeRenderPass->renderPass != secondary_rp_state->renderPass) {
9501 skip |= validateRenderPassCompatibility(dev_data, "primary command buffer", pCB->activeRenderPass,
9502 "secondary command buffer", secondary_rp_state,
9503 "vkCmdExecuteCommands()", VALIDATION_ERROR_1b2000c4);
9505 // If framebuffer for secondary CB is not NULL, then it must match active FB from primaryCB
9507 validateFramebuffer(dev_data, commandBuffer, pCB, pCommandBuffers[i], pSubCB, "vkCmdExecuteCommands()");
9508 if (!pSubCB->cmd_execute_commands_functions.empty()) {
9509 // Inherit primary's activeFramebuffer and while running validate functions
9510 for (auto &function : pSubCB->cmd_execute_commands_functions) {
9511 skip |= function(pCB, pCB->activeFramebuffer);
9517 // TODO(mlentine): Move more logic into this method
9518 skip |= validateSecondaryCommandBufferState(dev_data, pCB, pSubCB);
9519 skip |= validateCommandBufferState(dev_data, pSubCB, "vkCmdExecuteCommands()", 0, VALIDATION_ERROR_1b2000b2);
9520 if (!(pSubCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
9521 if (pSubCB->in_use.load() || pCB->linkedCommandBuffers.count(pSubCB)) {
9522 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
9523 VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(pCB->commandBuffer),
9524 VALIDATION_ERROR_1b2000b4,
9525 "Attempt to simultaneously execute command buffer 0x%" PRIx64
9526 " without VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set!",
9527 HandleToUint64(pCB->commandBuffer));
9529 if (pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT) {
9530 // Warn that non-simultaneous secondary cmd buffer renders primary non-simultaneous
9531 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
9532 VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(pCommandBuffers[i]),
9533 DRAWSTATE_INVALID_CB_SIMULTANEOUS_USE,
9534 "vkCmdExecuteCommands(): Secondary Command Buffer (0x%" PRIx64
9535 ") does not have VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set and will cause primary "
9536 "command buffer (0x%" PRIx64
9537 ") to be treated as if it does not have VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set, even "
9539 HandleToUint64(pCommandBuffers[i]), HandleToUint64(pCB->commandBuffer));
9540 pCB->beginInfo.flags &= ~VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT;
9543 if (!pCB->activeQueries.empty() && !dev_data->enabled_features.inheritedQueries) {
9545 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
9546 HandleToUint64(pCommandBuffers[i]), VALIDATION_ERROR_1b2000ca,
9547 "vkCmdExecuteCommands(): Secondary Command Buffer (0x%" PRIx64
9548 ") cannot be submitted with a query in flight and inherited queries not supported on this device.",
9549 HandleToUint64(pCommandBuffers[i]));
9551 // TODO: separate validate from update! This is very tangled.
9552 // Propagate layout transitions to the primary cmd buffer
9553 for (auto ilm_entry : pSubCB->imageLayoutMap) {
9554 if (pCB->imageLayoutMap.find(ilm_entry.first) != pCB->imageLayoutMap.end()) {
9555 pCB->imageLayoutMap[ilm_entry.first].layout = ilm_entry.second.layout;
9557 assert(ilm_entry.first.hasSubresource);
9558 IMAGE_CMD_BUF_LAYOUT_NODE node;
9559 if (!FindCmdBufLayout(dev_data, pCB, ilm_entry.first.image, ilm_entry.first.subresource, node)) {
9560 node.initialLayout = ilm_entry.second.initialLayout;
9562 node.layout = ilm_entry.second.layout;
9563 SetLayout(dev_data, pCB, ilm_entry.first, node);
9566 pSubCB->primaryCommandBuffer = pCB->commandBuffer;
9567 pCB->linkedCommandBuffers.insert(pSubCB);
9568 pSubCB->linkedCommandBuffers.insert(pCB);
9569 for (auto &function : pSubCB->queryUpdates) {
9570 pCB->queryUpdates.push_back(function);
9572 for (auto &function : pSubCB->queue_submit_functions) {
9573 pCB->queue_submit_functions.push_back(function);
9576 skip |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdExecuteCommands()", VALIDATION_ERROR_1b200019);
9578 ValidateCmdQueueFlags(dev_data, pCB, "vkCmdExecuteCommands()",
9579 VK_QUEUE_TRANSFER_BIT | VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT, VALIDATION_ERROR_1b202415);
9580 skip |= ValidateCmd(dev_data, pCB, CMD_EXECUTECOMMANDS, "vkCmdExecuteCommands()");
9583 if (!skip) dev_data->dispatch_table.CmdExecuteCommands(commandBuffer, commandBuffersCount, pCommandBuffers);
9586 VKAPI_ATTR VkResult VKAPI_CALL MapMemory(VkDevice device, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size, VkFlags flags,
9588 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
9591 VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
9592 unique_lock_t lock(global_lock);
9593 DEVICE_MEM_INFO *mem_info = GetMemObjInfo(dev_data, mem);
9595 auto end_offset = (VK_WHOLE_SIZE == size) ? mem_info->alloc_info.allocationSize - 1 : offset + size - 1;
9596 skip |= ValidateMapImageLayouts(dev_data, device, mem_info, offset, end_offset);
9597 if ((dev_data->phys_dev_mem_props.memoryTypes[mem_info->alloc_info.memoryTypeIndex].propertyFlags &
9598 VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0) {
9599 skip = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
9600 HandleToUint64(mem), VALIDATION_ERROR_31200554,
9601 "Mapping Memory without VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT set: mem obj 0x%" PRIx64 ".",
9602 HandleToUint64(mem));
9605 skip |= ValidateMapMemRange(dev_data, mem, offset, size);
9609 result = dev_data->dispatch_table.MapMemory(device, mem, offset, size, flags, ppData);
9610 if (VK_SUCCESS == result) {
9612 // TODO : What's the point of this range? See comment on creating new "bound_range" above, which may replace this
9613 storeMemRanges(dev_data, mem, offset, size);
9614 initializeAndTrackMemory(dev_data, mem, offset, size, ppData);
9621 VKAPI_ATTR void VKAPI_CALL UnmapMemory(VkDevice device, VkDeviceMemory mem) {
9622 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
9625 unique_lock_t lock(global_lock);
9626 skip |= deleteMemRanges(dev_data, mem);
9629 dev_data->dispatch_table.UnmapMemory(device, mem);
9633 static bool validateMemoryIsMapped(layer_data *dev_data, const char *funcName, uint32_t memRangeCount,
9634 const VkMappedMemoryRange *pMemRanges) {
9636 for (uint32_t i = 0; i < memRangeCount; ++i) {
9637 auto mem_info = GetMemObjInfo(dev_data, pMemRanges[i].memory);
9639 if (pMemRanges[i].size == VK_WHOLE_SIZE) {
9640 if (mem_info->mem_range.offset > pMemRanges[i].offset) {
9642 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
9643 HandleToUint64(pMemRanges[i].memory), VALIDATION_ERROR_0c20055c,
9644 "%s: Flush/Invalidate offset (" PRINTF_SIZE_T_SPECIFIER
9645 ") is less than Memory Object's offset (" PRINTF_SIZE_T_SPECIFIER ").",
9646 funcName, static_cast<size_t>(pMemRanges[i].offset), static_cast<size_t>(mem_info->mem_range.offset));
9649 const uint64_t data_end = (mem_info->mem_range.size == VK_WHOLE_SIZE)
9650 ? mem_info->alloc_info.allocationSize
9651 : (mem_info->mem_range.offset + mem_info->mem_range.size);
9652 if ((mem_info->mem_range.offset > pMemRanges[i].offset) ||
9653 (data_end < (pMemRanges[i].offset + pMemRanges[i].size))) {
9655 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
9656 HandleToUint64(pMemRanges[i].memory), VALIDATION_ERROR_0c20055a,
9657 "%s: Flush/Invalidate size or offset (" PRINTF_SIZE_T_SPECIFIER ", " PRINTF_SIZE_T_SPECIFIER
9658 ") exceed the Memory Object's upper-bound (" PRINTF_SIZE_T_SPECIFIER ").",
9659 funcName, static_cast<size_t>(pMemRanges[i].offset + pMemRanges[i].size),
9660 static_cast<size_t>(pMemRanges[i].offset), static_cast<size_t>(data_end));
9668 static bool ValidateAndCopyNoncoherentMemoryToDriver(layer_data *dev_data, uint32_t mem_range_count,
9669 const VkMappedMemoryRange *mem_ranges) {
9671 for (uint32_t i = 0; i < mem_range_count; ++i) {
9672 auto mem_info = GetMemObjInfo(dev_data, mem_ranges[i].memory);
9674 if (mem_info->shadow_copy) {
9675 VkDeviceSize size = (mem_info->mem_range.size != VK_WHOLE_SIZE)
9676 ? mem_info->mem_range.size
9677 : (mem_info->alloc_info.allocationSize - mem_info->mem_range.offset);
9678 char *data = static_cast<char *>(mem_info->shadow_copy);
9679 for (uint64_t j = 0; j < mem_info->shadow_pad_size; ++j) {
9680 if (data[j] != NoncoherentMemoryFillValue) {
9681 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
9682 VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, HandleToUint64(mem_ranges[i].memory),
9683 MEMTRACK_INVALID_MAP, "Memory underflow was detected on mem obj 0x%" PRIx64,
9684 HandleToUint64(mem_ranges[i].memory));
9687 for (uint64_t j = (size + mem_info->shadow_pad_size); j < (2 * mem_info->shadow_pad_size + size); ++j) {
9688 if (data[j] != NoncoherentMemoryFillValue) {
9689 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
9690 VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, HandleToUint64(mem_ranges[i].memory),
9691 MEMTRACK_INVALID_MAP, "Memory overflow was detected on mem obj 0x%" PRIx64,
9692 HandleToUint64(mem_ranges[i].memory));
9695 memcpy(mem_info->p_driver_data, static_cast<void *>(data + mem_info->shadow_pad_size), (size_t)(size));
9702 static void CopyNoncoherentMemoryFromDriver(layer_data *dev_data, uint32_t mem_range_count, const VkMappedMemoryRange *mem_ranges) {
9703 for (uint32_t i = 0; i < mem_range_count; ++i) {
9704 auto mem_info = GetMemObjInfo(dev_data, mem_ranges[i].memory);
9705 if (mem_info && mem_info->shadow_copy) {
9706 VkDeviceSize size = (mem_info->mem_range.size != VK_WHOLE_SIZE)
9707 ? mem_info->mem_range.size
9708 : (mem_info->alloc_info.allocationSize - mem_ranges[i].offset);
9709 char *data = static_cast<char *>(mem_info->shadow_copy);
9710 memcpy(data + mem_info->shadow_pad_size, mem_info->p_driver_data, (size_t)(size));
9715 static bool ValidateMappedMemoryRangeDeviceLimits(layer_data *dev_data, const char *func_name, uint32_t mem_range_count,
9716 const VkMappedMemoryRange *mem_ranges) {
9718 for (uint32_t i = 0; i < mem_range_count; ++i) {
9719 uint64_t atom_size = dev_data->phys_dev_properties.properties.limits.nonCoherentAtomSize;
9720 if (SafeModulo(mem_ranges[i].offset, atom_size) != 0) {
9721 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
9722 HandleToUint64(mem_ranges->memory), VALIDATION_ERROR_0c20055e,
9723 "%s: Offset in pMemRanges[%d] is 0x%" PRIxLEAST64
9724 ", which is not a multiple of VkPhysicalDeviceLimits::nonCoherentAtomSize (0x%" PRIxLEAST64 ").",
9725 func_name, i, mem_ranges[i].offset, atom_size);
9727 auto mem_info = GetMemObjInfo(dev_data, mem_ranges[i].memory);
9728 if ((mem_ranges[i].size != VK_WHOLE_SIZE) &&
9729 (mem_ranges[i].size + mem_ranges[i].offset != mem_info->alloc_info.allocationSize) &&
9730 (SafeModulo(mem_ranges[i].size, atom_size) != 0)) {
9731 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
9732 HandleToUint64(mem_ranges->memory), VALIDATION_ERROR_0c200adc,
9733 "%s: Size in pMemRanges[%d] is 0x%" PRIxLEAST64
9734 ", which is not a multiple of VkPhysicalDeviceLimits::nonCoherentAtomSize (0x%" PRIxLEAST64 ").",
9735 func_name, i, mem_ranges[i].size, atom_size);
9741 static bool PreCallValidateFlushMappedMemoryRanges(layer_data *dev_data, uint32_t mem_range_count,
9742 const VkMappedMemoryRange *mem_ranges) {
9744 lock_guard_t lock(global_lock);
9745 skip |= ValidateMappedMemoryRangeDeviceLimits(dev_data, "vkFlushMappedMemoryRanges", mem_range_count, mem_ranges);
9746 skip |= ValidateAndCopyNoncoherentMemoryToDriver(dev_data, mem_range_count, mem_ranges);
9747 skip |= validateMemoryIsMapped(dev_data, "vkFlushMappedMemoryRanges", mem_range_count, mem_ranges);
9751 VKAPI_ATTR VkResult VKAPI_CALL FlushMappedMemoryRanges(VkDevice device, uint32_t memRangeCount,
9752 const VkMappedMemoryRange *pMemRanges) {
9753 VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
9754 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
9756 if (!PreCallValidateFlushMappedMemoryRanges(dev_data, memRangeCount, pMemRanges)) {
9757 result = dev_data->dispatch_table.FlushMappedMemoryRanges(device, memRangeCount, pMemRanges);
9762 static bool PreCallValidateInvalidateMappedMemoryRanges(layer_data *dev_data, uint32_t mem_range_count,
9763 const VkMappedMemoryRange *mem_ranges) {
9765 lock_guard_t lock(global_lock);
9766 skip |= ValidateMappedMemoryRangeDeviceLimits(dev_data, "vkInvalidateMappedMemoryRanges", mem_range_count, mem_ranges);
9767 skip |= validateMemoryIsMapped(dev_data, "vkInvalidateMappedMemoryRanges", mem_range_count, mem_ranges);
9771 static void PostCallRecordInvalidateMappedMemoryRanges(layer_data *dev_data, uint32_t mem_range_count,
9772 const VkMappedMemoryRange *mem_ranges) {
9773 lock_guard_t lock(global_lock);
9774 // Update our shadow copy with modified driver data
9775 CopyNoncoherentMemoryFromDriver(dev_data, mem_range_count, mem_ranges);
9778 VKAPI_ATTR VkResult VKAPI_CALL InvalidateMappedMemoryRanges(VkDevice device, uint32_t memRangeCount,
9779 const VkMappedMemoryRange *pMemRanges) {
9780 VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
9781 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
9783 if (!PreCallValidateInvalidateMappedMemoryRanges(dev_data, memRangeCount, pMemRanges)) {
9784 result = dev_data->dispatch_table.InvalidateMappedMemoryRanges(device, memRangeCount, pMemRanges);
9785 if (result == VK_SUCCESS) {
9786 PostCallRecordInvalidateMappedMemoryRanges(dev_data, memRangeCount, pMemRanges);
9792 static bool PreCallValidateBindImageMemory(layer_data *dev_data, VkImage image, IMAGE_STATE *image_state, VkDeviceMemory mem,
9793 VkDeviceSize memoryOffset, const char *api_name) {
9796 unique_lock_t lock(global_lock);
9797 // Track objects tied to memory
9798 uint64_t image_handle = HandleToUint64(image);
9799 skip = ValidateSetMemBinding(dev_data, mem, image_handle, kVulkanObjectTypeImage, api_name);
9800 if (!image_state->memory_requirements_checked) {
9801 // There's not an explicit requirement in the spec to call vkGetImageMemoryRequirements() prior to calling
9802 // BindImageMemory but it's implied in that memory being bound must conform with VkMemoryRequirements from
9803 // vkGetImageMemoryRequirements()
9804 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
9805 image_handle, DRAWSTATE_INVALID_IMAGE,
9806 "%s: Binding memory to image 0x%" PRIx64
9807 " but vkGetImageMemoryRequirements() has not been called on that image.",
9808 api_name, HandleToUint64(image_handle));
9809 // Make the call for them so we can verify the state
9811 dev_data->dispatch_table.GetImageMemoryRequirements(dev_data->device, image, &image_state->requirements);
9815 // Validate bound memory range information
9816 auto mem_info = GetMemObjInfo(dev_data, mem);
9818 skip |= ValidateInsertImageMemoryRange(dev_data, image, mem_info, memoryOffset, image_state->requirements,
9819 image_state->createInfo.tiling == VK_IMAGE_TILING_LINEAR, api_name);
9820 skip |= ValidateMemoryTypes(dev_data, mem_info, image_state->requirements.memoryTypeBits, api_name,
9821 VALIDATION_ERROR_1740082e);
9824 // Validate memory requirements alignment
9825 if (SafeModulo(memoryOffset, image_state->requirements.alignment) != 0) {
9826 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
9827 image_handle, VALIDATION_ERROR_17400830,
9828 "%s: memoryOffset is 0x%" PRIxLEAST64
9829 " but must be an integer multiple of the VkMemoryRequirements::alignment value 0x%" PRIxLEAST64
9830 ", returned from a call to vkGetImageMemoryRequirements with image.",
9831 api_name, memoryOffset, image_state->requirements.alignment);
9835 // Validate memory requirements size
9836 if (image_state->requirements.size > mem_info->alloc_info.allocationSize - memoryOffset) {
9837 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
9838 image_handle, VALIDATION_ERROR_17400832,
9839 "%s: memory size minus memoryOffset is 0x%" PRIxLEAST64
9840 " but must be at least as large as VkMemoryRequirements::size value 0x%" PRIxLEAST64
9841 ", returned from a call to vkGetImageMemoryRequirements with image.",
9842 api_name, mem_info->alloc_info.allocationSize - memoryOffset, image_state->requirements.size);
9845 // Validate dedicated allocation
9846 if (mem_info->is_dedicated && ((mem_info->dedicated_image != image) || (memoryOffset != 0))) {
9847 // TODO: Add vkBindImageMemory2KHR error message when added to spec.
9848 auto validation_error = VALIDATION_ERROR_UNDEFINED;
9849 if (strcmp(api_name, "vkBindImageMemory()") == 0) {
9850 validation_error = VALIDATION_ERROR_17400bca;
9853 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
9854 image_handle, validation_error,
9855 "%s: for dedicated memory allocation 0x%" PRIxLEAST64
9856 ", VkMemoryDedicatedAllocateInfoKHR::image 0x%" PRIXLEAST64 " must be equal to image 0x%" PRIxLEAST64
9857 " and memoryOffset 0x%" PRIxLEAST64 " must be zero.",
9858 api_name, HandleToUint64(mem), HandleToUint64(mem_info->dedicated_image), image_handle, memoryOffset);
9865 static void PostCallRecordBindImageMemory(layer_data *dev_data, VkImage image, IMAGE_STATE *image_state, VkDeviceMemory mem,
9866 VkDeviceSize memoryOffset, const char *api_name) {
9868 unique_lock_t lock(global_lock);
9869 // Track bound memory range information
9870 auto mem_info = GetMemObjInfo(dev_data, mem);
9872 InsertImageMemoryRange(dev_data, image, mem_info, memoryOffset, image_state->requirements,
9873 image_state->createInfo.tiling == VK_IMAGE_TILING_LINEAR);
9876 // Track objects tied to memory
9877 uint64_t image_handle = HandleToUint64(image);
9878 SetMemBinding(dev_data, mem, image_state, memoryOffset, image_handle, kVulkanObjectTypeImage, api_name);
9882 VKAPI_ATTR VkResult VKAPI_CALL BindImageMemory(VkDevice device, VkImage image, VkDeviceMemory mem, VkDeviceSize memoryOffset) {
9883 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
9884 VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
9885 IMAGE_STATE *image_state;
9887 unique_lock_t lock(global_lock);
9888 image_state = GetImageState(dev_data, image);
9890 bool skip = PreCallValidateBindImageMemory(dev_data, image, image_state, mem, memoryOffset, "vkBindImageMemory()");
9892 result = dev_data->dispatch_table.BindImageMemory(device, image, mem, memoryOffset);
9893 if (result == VK_SUCCESS) {
9894 PostCallRecordBindImageMemory(dev_data, image, image_state, mem, memoryOffset, "vkBindImageMemory()");
9900 static bool PreCallValidateBindImageMemory2(layer_data *dev_data, std::vector<IMAGE_STATE *> *image_state, uint32_t bindInfoCount,
9901 const VkBindImageMemoryInfoKHR *pBindInfos) {
9903 unique_lock_t lock(global_lock);
9904 for (uint32_t i = 0; i < bindInfoCount; i++) {
9905 (*image_state)[i] = GetImageState(dev_data, pBindInfos[i].image);
9910 for (uint32_t i = 0; i < bindInfoCount; i++) {
9911 sprintf(api_name, "vkBindImageMemory2() pBindInfos[%u]", i);
9912 skip |= PreCallValidateBindImageMemory(dev_data, pBindInfos[i].image, (*image_state)[i], pBindInfos[i].memory,
9913 pBindInfos[i].memoryOffset, api_name);
9918 static void PostCallRecordBindImageMemory2(layer_data *dev_data, const std::vector<IMAGE_STATE *> &image_state,
9919 uint32_t bindInfoCount, const VkBindImageMemoryInfoKHR *pBindInfos) {
9920 for (uint32_t i = 0; i < bindInfoCount; i++) {
9921 PostCallRecordBindImageMemory(dev_data, pBindInfos[i].image, image_state[i], pBindInfos[i].memory,
9922 pBindInfos[i].memoryOffset, "vkBindImageMemory2()");
9926 VKAPI_ATTR VkResult VKAPI_CALL BindImageMemory2(VkDevice device, uint32_t bindInfoCount,
9927 const VkBindImageMemoryInfoKHR *pBindInfos) {
9928 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
9929 VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
9930 std::vector<IMAGE_STATE *> image_state(bindInfoCount);
9931 if (!PreCallValidateBindImageMemory2(dev_data, &image_state, bindInfoCount, pBindInfos)) {
9932 result = dev_data->dispatch_table.BindImageMemory2(device, bindInfoCount, pBindInfos);
9933 if (result == VK_SUCCESS) {
9934 PostCallRecordBindImageMemory2(dev_data, image_state, bindInfoCount, pBindInfos);
9940 VKAPI_ATTR VkResult VKAPI_CALL BindImageMemory2KHR(VkDevice device, uint32_t bindInfoCount,
9941 const VkBindImageMemoryInfoKHR *pBindInfos) {
9942 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
9943 VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
9944 std::vector<IMAGE_STATE *> image_state(bindInfoCount);
9945 if (!PreCallValidateBindImageMemory2(dev_data, &image_state, bindInfoCount, pBindInfos)) {
9946 result = dev_data->dispatch_table.BindImageMemory2KHR(device, bindInfoCount, pBindInfos);
9947 if (result == VK_SUCCESS) {
9948 PostCallRecordBindImageMemory2(dev_data, image_state, bindInfoCount, pBindInfos);
9954 VKAPI_ATTR VkResult VKAPI_CALL SetEvent(VkDevice device, VkEvent event) {
9956 VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
9957 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
9958 unique_lock_t lock(global_lock);
9959 auto event_state = GetEventNode(dev_data, event);
9961 event_state->needsSignaled = false;
9962 event_state->stageMask = VK_PIPELINE_STAGE_HOST_BIT;
9963 if (event_state->write_in_use) {
9964 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT,
9965 HandleToUint64(event), DRAWSTATE_QUEUE_FORWARD_PROGRESS,
9966 "Cannot call vkSetEvent() on event 0x%" PRIx64 " that is already in use by a command buffer.",
9967 HandleToUint64(event));
9971 // Host setting event is visible to all queues immediately so update stageMask for any queue that's seen this event
9972 // TODO : For correctness this needs separate fix to verify that app doesn't make incorrect assumptions about the
9973 // ordering of this command in relation to vkCmd[Set|Reset]Events (see GH297)
9974 for (auto queue_data : dev_data->queueMap) {
9975 auto event_entry = queue_data.second.eventToStageMap.find(event);
9976 if (event_entry != queue_data.second.eventToStageMap.end()) {
9977 event_entry->second |= VK_PIPELINE_STAGE_HOST_BIT;
9980 if (!skip) result = dev_data->dispatch_table.SetEvent(device, event);
9984 static bool PreCallValidateQueueBindSparse(layer_data *dev_data, VkQueue queue, uint32_t bindInfoCount,
9985 const VkBindSparseInfo *pBindInfo, VkFence fence) {
9986 auto pFence = GetFenceNode(dev_data, fence);
9987 bool skip = ValidateFenceForSubmit(dev_data, pFence);
9992 unordered_set<VkSemaphore> signaled_semaphores;
9993 unordered_set<VkSemaphore> unsignaled_semaphores;
9994 unordered_set<VkSemaphore> internal_semaphores;
9995 for (uint32_t bindIdx = 0; bindIdx < bindInfoCount; ++bindIdx) {
9996 const VkBindSparseInfo &bindInfo = pBindInfo[bindIdx];
9998 std::vector<SEMAPHORE_WAIT> semaphore_waits;
9999 std::vector<VkSemaphore> semaphore_signals;
10000 for (uint32_t i = 0; i < bindInfo.waitSemaphoreCount; ++i) {
10001 VkSemaphore semaphore = bindInfo.pWaitSemaphores[i];
10002 auto pSemaphore = GetSemaphoreNode(dev_data, semaphore);
10003 if (pSemaphore && (pSemaphore->scope == kSyncScopeInternal || internal_semaphores.count(semaphore))) {
10004 if (unsignaled_semaphores.count(semaphore) ||
10005 (!(signaled_semaphores.count(semaphore)) && !(pSemaphore->signaled))) {
10006 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
10007 HandleToUint64(semaphore), DRAWSTATE_QUEUE_FORWARD_PROGRESS,
10008 "Queue 0x%" PRIx64 " is waiting on semaphore 0x%" PRIx64 " that has no way to be signaled.",
10009 HandleToUint64(queue), HandleToUint64(semaphore));
10011 signaled_semaphores.erase(semaphore);
10012 unsignaled_semaphores.insert(semaphore);
10015 if (pSemaphore && pSemaphore->scope == kSyncScopeExternalTemporary) {
10016 internal_semaphores.insert(semaphore);
10019 for (uint32_t i = 0; i < bindInfo.signalSemaphoreCount; ++i) {
10020 VkSemaphore semaphore = bindInfo.pSignalSemaphores[i];
10021 auto pSemaphore = GetSemaphoreNode(dev_data, semaphore);
10022 if (pSemaphore && pSemaphore->scope == kSyncScopeInternal) {
10023 if (signaled_semaphores.count(semaphore) || (!(unsignaled_semaphores.count(semaphore)) && pSemaphore->signaled)) {
10024 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
10025 HandleToUint64(semaphore), DRAWSTATE_QUEUE_FORWARD_PROGRESS,
10026 "Queue 0x%" PRIx64 " is signaling semaphore 0x%" PRIx64
10027 " that has already been signaled but not waited on by queue 0x%" PRIx64 ".",
10028 HandleToUint64(queue), HandleToUint64(semaphore), HandleToUint64(pSemaphore->signaler.first));
10030 unsignaled_semaphores.erase(semaphore);
10031 signaled_semaphores.insert(semaphore);
10035 // Store sparse binding image_state and after binding is complete make sure that any requiring metadata have it bound
10036 std::unordered_set<IMAGE_STATE *> sparse_images;
10037 // If we're binding sparse image memory make sure reqs were queried and note if metadata is required and bound
10038 for (uint32_t i = 0; i < bindInfo.imageBindCount; ++i) {
10039 const auto &image_bind = bindInfo.pImageBinds[i];
10040 auto image_state = GetImageState(dev_data, image_bind.image);
10041 sparse_images.insert(image_state);
10042 if (!image_state->get_sparse_reqs_called || image_state->sparse_requirements.empty()) {
10043 // For now just warning if sparse image binding occurs without calling to get reqs first
10044 return log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
10045 HandleToUint64(image_state->image), MEMTRACK_INVALID_STATE,
10046 "vkQueueBindSparse(): Binding sparse memory to image 0x%" PRIx64
10047 " without first calling vkGetImageSparseMemoryRequirements[2KHR]() to retrieve requirements.",
10048 HandleToUint64(image_state->image));
10050 for (uint32_t j = 0; j < image_bind.bindCount; ++j) {
10051 if (image_bind.pBinds[j].flags & VK_IMAGE_ASPECT_METADATA_BIT) {
10052 image_state->sparse_metadata_bound = true;
10056 for (uint32_t i = 0; i < bindInfo.imageOpaqueBindCount; ++i) {
10057 auto image_state = GetImageState(dev_data, bindInfo.pImageOpaqueBinds[i].image);
10058 sparse_images.insert(image_state);
10059 if (!image_state->get_sparse_reqs_called || image_state->sparse_requirements.empty()) {
10060 // For now just warning if sparse image binding occurs without calling to get reqs first
10061 return log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
10062 HandleToUint64(image_state->image), MEMTRACK_INVALID_STATE,
10063 "vkQueueBindSparse(): Binding opaque sparse memory to image 0x%" PRIx64
10064 " without first calling vkGetImageSparseMemoryRequirements[2KHR]() to retrieve requirements.",
10065 HandleToUint64(image_state->image));
10068 for (const auto &sparse_image_state : sparse_images) {
10069 if (sparse_image_state->sparse_metadata_required && !sparse_image_state->sparse_metadata_bound) {
10070 // Warn if sparse image binding metadata required for image with sparse binding, but metadata not bound
10071 return log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
10072 HandleToUint64(sparse_image_state->image), MEMTRACK_INVALID_STATE,
10073 "vkQueueBindSparse(): Binding sparse memory to image 0x%" PRIx64
10074 " which requires a metadata aspect but no binding with VK_IMAGE_ASPECT_METADATA_BIT set was made.",
10075 HandleToUint64(sparse_image_state->image));
10082 static void PostCallRecordQueueBindSparse(layer_data *dev_data, VkQueue queue, uint32_t bindInfoCount,
10083 const VkBindSparseInfo *pBindInfo, VkFence fence) {
10084 uint64_t early_retire_seq = 0;
10085 auto pFence = GetFenceNode(dev_data, fence);
10086 auto pQueue = GetQueueState(dev_data, queue);
10089 if (pFence->scope == kSyncScopeInternal) {
10090 SubmitFence(pQueue, pFence, std::max(1u, bindInfoCount));
10091 if (!bindInfoCount) {
10092 // No work to do, just dropping a fence in the queue by itself.
10093 pQueue->submissions.emplace_back(std::vector<VkCommandBuffer>(), std::vector<SEMAPHORE_WAIT>(),
10094 std::vector<VkSemaphore>(), std::vector<VkSemaphore>(), fence);
10097 // Retire work up until this fence early, we will not see the wait that corresponds to this signal
10098 early_retire_seq = pQueue->seq + pQueue->submissions.size();
10099 if (!dev_data->external_sync_warning) {
10100 dev_data->external_sync_warning = true;
10101 log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
10102 HandleToUint64(fence), DRAWSTATE_QUEUE_FORWARD_PROGRESS,
10103 "vkQueueBindSparse(): Signaling external fence 0x%" PRIx64 " on queue 0x%" PRIx64
10104 " will disable validation of preceding command buffer lifecycle states and the in-use status of associated "
10106 HandleToUint64(fence), HandleToUint64(queue));
10111 for (uint32_t bindIdx = 0; bindIdx < bindInfoCount; ++bindIdx) {
10112 const VkBindSparseInfo &bindInfo = pBindInfo[bindIdx];
10113 // Track objects tied to memory
10114 for (uint32_t j = 0; j < bindInfo.bufferBindCount; j++) {
10115 for (uint32_t k = 0; k < bindInfo.pBufferBinds[j].bindCount; k++) {
10116 auto sparse_binding = bindInfo.pBufferBinds[j].pBinds[k];
10117 SetSparseMemBinding(dev_data, {sparse_binding.memory, sparse_binding.memoryOffset, sparse_binding.size},
10118 HandleToUint64(bindInfo.pBufferBinds[j].buffer), kVulkanObjectTypeBuffer);
10121 for (uint32_t j = 0; j < bindInfo.imageOpaqueBindCount; j++) {
10122 for (uint32_t k = 0; k < bindInfo.pImageOpaqueBinds[j].bindCount; k++) {
10123 auto sparse_binding = bindInfo.pImageOpaqueBinds[j].pBinds[k];
10124 SetSparseMemBinding(dev_data, {sparse_binding.memory, sparse_binding.memoryOffset, sparse_binding.size},
10125 HandleToUint64(bindInfo.pImageOpaqueBinds[j].image), kVulkanObjectTypeImage);
10128 for (uint32_t j = 0; j < bindInfo.imageBindCount; j++) {
10129 for (uint32_t k = 0; k < bindInfo.pImageBinds[j].bindCount; k++) {
10130 auto sparse_binding = bindInfo.pImageBinds[j].pBinds[k];
10131 // TODO: This size is broken for non-opaque bindings, need to update to comprehend full sparse binding data
10132 VkDeviceSize size = sparse_binding.extent.depth * sparse_binding.extent.height * sparse_binding.extent.width * 4;
10133 SetSparseMemBinding(dev_data, {sparse_binding.memory, sparse_binding.memoryOffset, size},
10134 HandleToUint64(bindInfo.pImageBinds[j].image), kVulkanObjectTypeImage);
10138 std::vector<SEMAPHORE_WAIT> semaphore_waits;
10139 std::vector<VkSemaphore> semaphore_signals;
10140 std::vector<VkSemaphore> semaphore_externals;
10141 for (uint32_t i = 0; i < bindInfo.waitSemaphoreCount; ++i) {
10142 VkSemaphore semaphore = bindInfo.pWaitSemaphores[i];
10143 auto pSemaphore = GetSemaphoreNode(dev_data, semaphore);
10145 if (pSemaphore->scope == kSyncScopeInternal) {
10146 if (pSemaphore->signaler.first != VK_NULL_HANDLE) {
10147 semaphore_waits.push_back({semaphore, pSemaphore->signaler.first, pSemaphore->signaler.second});
10148 pSemaphore->in_use.fetch_add(1);
10150 pSemaphore->signaler.first = VK_NULL_HANDLE;
10151 pSemaphore->signaled = false;
10153 semaphore_externals.push_back(semaphore);
10154 pSemaphore->in_use.fetch_add(1);
10155 if (pSemaphore->scope == kSyncScopeExternalTemporary) {
10156 pSemaphore->scope = kSyncScopeInternal;
10161 for (uint32_t i = 0; i < bindInfo.signalSemaphoreCount; ++i) {
10162 VkSemaphore semaphore = bindInfo.pSignalSemaphores[i];
10163 auto pSemaphore = GetSemaphoreNode(dev_data, semaphore);
10165 if (pSemaphore->scope == kSyncScopeInternal) {
10166 pSemaphore->signaler.first = queue;
10167 pSemaphore->signaler.second = pQueue->seq + pQueue->submissions.size() + 1;
10168 pSemaphore->signaled = true;
10169 pSemaphore->in_use.fetch_add(1);
10170 semaphore_signals.push_back(semaphore);
10172 // Retire work up until this submit early, we will not see the wait that corresponds to this signal
10173 early_retire_seq = std::max(early_retire_seq, pQueue->seq + pQueue->submissions.size() + 1);
10174 if (!dev_data->external_sync_warning) {
10175 dev_data->external_sync_warning = true;
10176 log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
10177 HandleToUint64(semaphore), DRAWSTATE_QUEUE_FORWARD_PROGRESS,
10178 "vkQueueBindSparse(): Signaling external semaphore 0x%" PRIx64 " on queue 0x%" PRIx64
10179 " will disable validation of preceding command buffer lifecycle states and the in-use status of "
10180 "associated objects.",
10181 HandleToUint64(semaphore), HandleToUint64(queue));
10187 pQueue->submissions.emplace_back(std::vector<VkCommandBuffer>(), semaphore_waits, semaphore_signals, semaphore_externals,
10188 bindIdx == bindInfoCount - 1 ? fence : VK_NULL_HANDLE);
10191 if (early_retire_seq) {
10192 RetireWorkOnQueue(dev_data, pQueue, early_retire_seq);
10196 VKAPI_ATTR VkResult VKAPI_CALL QueueBindSparse(VkQueue queue, uint32_t bindInfoCount, const VkBindSparseInfo *pBindInfo,
10198 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(queue), layer_data_map);
10199 unique_lock_t lock(global_lock);
10200 bool skip = PreCallValidateQueueBindSparse(dev_data, queue, bindInfoCount, pBindInfo, fence);
10203 if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
10205 VkResult result = dev_data->dispatch_table.QueueBindSparse(queue, bindInfoCount, pBindInfo, fence);
10208 PostCallRecordQueueBindSparse(dev_data, queue, bindInfoCount, pBindInfo, fence);
10213 VKAPI_ATTR VkResult VKAPI_CALL CreateSemaphore(VkDevice device, const VkSemaphoreCreateInfo *pCreateInfo,
10214 const VkAllocationCallbacks *pAllocator, VkSemaphore *pSemaphore) {
10215 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
10216 VkResult result = dev_data->dispatch_table.CreateSemaphore(device, pCreateInfo, pAllocator, pSemaphore);
10217 if (result == VK_SUCCESS) {
10218 lock_guard_t lock(global_lock);
10219 SEMAPHORE_NODE *sNode = &dev_data->semaphoreMap[*pSemaphore];
10220 sNode->signaler.first = VK_NULL_HANDLE;
10221 sNode->signaler.second = 0;
10222 sNode->signaled = false;
10223 sNode->scope = kSyncScopeInternal;
10228 static bool PreCallValidateImportSemaphore(layer_data *dev_data, VkSemaphore semaphore, const char *caller_name) {
10229 SEMAPHORE_NODE *sema_node = GetSemaphoreNode(dev_data, semaphore);
10230 VK_OBJECT obj_struct = {HandleToUint64(semaphore), kVulkanObjectTypeSemaphore};
10233 skip |= ValidateObjectNotInUse(dev_data, sema_node, obj_struct, caller_name, VALIDATION_ERROR_UNDEFINED);
10238 static void PostCallRecordImportSemaphore(layer_data *dev_data, VkSemaphore semaphore,
10239 VkExternalSemaphoreHandleTypeFlagBitsKHR handle_type, VkSemaphoreImportFlagsKHR flags) {
10240 SEMAPHORE_NODE *sema_node = GetSemaphoreNode(dev_data, semaphore);
10241 if (sema_node && sema_node->scope != kSyncScopeExternalPermanent) {
10242 if ((handle_type == VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT_KHR || flags & VK_SEMAPHORE_IMPORT_TEMPORARY_BIT_KHR) &&
10243 sema_node->scope == kSyncScopeInternal) {
10244 sema_node->scope = kSyncScopeExternalTemporary;
10246 sema_node->scope = kSyncScopeExternalPermanent;
10251 #ifdef VK_USE_PLATFORM_WIN32_KHR
10252 VKAPI_ATTR VkResult VKAPI_CALL
10253 ImportSemaphoreWin32HandleKHR(VkDevice device, const VkImportSemaphoreWin32HandleInfoKHR *pImportSemaphoreWin32HandleInfo) {
10254 VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
10255 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
10257 PreCallValidateImportSemaphore(dev_data, pImportSemaphoreWin32HandleInfo->semaphore, "vkImportSemaphoreWin32HandleKHR");
10260 result = dev_data->dispatch_table.ImportSemaphoreWin32HandleKHR(device, pImportSemaphoreWin32HandleInfo);
10263 if (result == VK_SUCCESS) {
10264 PostCallRecordImportSemaphore(dev_data, pImportSemaphoreWin32HandleInfo->semaphore,
10265 pImportSemaphoreWin32HandleInfo->handleType, pImportSemaphoreWin32HandleInfo->flags);
10271 VKAPI_ATTR VkResult VKAPI_CALL ImportSemaphoreFdKHR(VkDevice device, const VkImportSemaphoreFdInfoKHR *pImportSemaphoreFdInfo) {
10272 VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
10273 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
10274 bool skip = PreCallValidateImportSemaphore(dev_data, pImportSemaphoreFdInfo->semaphore, "vkImportSemaphoreFdKHR");
10277 result = dev_data->dispatch_table.ImportSemaphoreFdKHR(device, pImportSemaphoreFdInfo);
10280 if (result == VK_SUCCESS) {
10281 PostCallRecordImportSemaphore(dev_data, pImportSemaphoreFdInfo->semaphore, pImportSemaphoreFdInfo->handleType,
10282 pImportSemaphoreFdInfo->flags);
10287 static void PostCallRecordGetSemaphore(layer_data *dev_data, VkSemaphore semaphore,
10288 VkExternalSemaphoreHandleTypeFlagBitsKHR handle_type) {
10289 SEMAPHORE_NODE *sema_node = GetSemaphoreNode(dev_data, semaphore);
10290 if (sema_node && handle_type != VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT_KHR) {
10291 // Cannot track semaphore state once it is exported, except for Sync FD handle types which have copy transference
10292 sema_node->scope = kSyncScopeExternalPermanent;
10296 #ifdef VK_USE_PLATFORM_WIN32_KHR
10297 VKAPI_ATTR VkResult VKAPI_CALL GetSemaphoreWin32HandleKHR(VkDevice device,
10298 const VkSemaphoreGetWin32HandleInfoKHR *pGetWin32HandleInfo,
10300 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
10301 VkResult result = dev_data->dispatch_table.GetSemaphoreWin32HandleKHR(device, pGetWin32HandleInfo, pHandle);
10303 if (result == VK_SUCCESS) {
10304 PostCallRecordGetSemaphore(dev_data, pGetWin32HandleInfo->semaphore, pGetWin32HandleInfo->handleType);
10310 VKAPI_ATTR VkResult VKAPI_CALL GetSemaphoreFdKHR(VkDevice device, const VkSemaphoreGetFdInfoKHR *pGetFdInfo, int *pFd) {
10311 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
10312 VkResult result = dev_data->dispatch_table.GetSemaphoreFdKHR(device, pGetFdInfo, pFd);
10314 if (result == VK_SUCCESS) {
10315 PostCallRecordGetSemaphore(dev_data, pGetFdInfo->semaphore, pGetFdInfo->handleType);
10320 static bool PreCallValidateImportFence(layer_data *dev_data, VkFence fence, const char *caller_name) {
10321 FENCE_NODE *fence_node = GetFenceNode(dev_data, fence);
10323 if (fence_node && fence_node->scope == kSyncScopeInternal && fence_node->state == FENCE_INFLIGHT) {
10324 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
10325 HandleToUint64(fence), VALIDATION_ERROR_UNDEFINED,
10326 "Cannot call %s on fence 0x%" PRIx64 " that is currently in use.", caller_name, HandleToUint64(fence));
10331 static void PostCallRecordImportFence(layer_data *dev_data, VkFence fence, VkExternalFenceHandleTypeFlagBitsKHR handle_type,
10332 VkFenceImportFlagsKHR flags) {
10333 FENCE_NODE *fence_node = GetFenceNode(dev_data, fence);
10334 if (fence_node && fence_node->scope != kSyncScopeExternalPermanent) {
10335 if ((handle_type == VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT_KHR || flags & VK_FENCE_IMPORT_TEMPORARY_BIT_KHR) &&
10336 fence_node->scope == kSyncScopeInternal) {
10337 fence_node->scope = kSyncScopeExternalTemporary;
10339 fence_node->scope = kSyncScopeExternalPermanent;
10344 #ifdef VK_USE_PLATFORM_WIN32_KHR
10345 VKAPI_ATTR VkResult VKAPI_CALL ImportFenceWin32HandleKHR(VkDevice device,
10346 const VkImportFenceWin32HandleInfoKHR *pImportFenceWin32HandleInfo) {
10347 VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
10348 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
10349 bool skip = PreCallValidateImportFence(dev_data, pImportFenceWin32HandleInfo->fence, "vkImportFenceWin32HandleKHR");
10352 result = dev_data->dispatch_table.ImportFenceWin32HandleKHR(device, pImportFenceWin32HandleInfo);
10355 if (result == VK_SUCCESS) {
10356 PostCallRecordImportFence(dev_data, pImportFenceWin32HandleInfo->fence, pImportFenceWin32HandleInfo->handleType,
10357 pImportFenceWin32HandleInfo->flags);
10363 VKAPI_ATTR VkResult VKAPI_CALL ImportFenceFdKHR(VkDevice device, const VkImportFenceFdInfoKHR *pImportFenceFdInfo) {
10364 VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
10365 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
10366 bool skip = PreCallValidateImportFence(dev_data, pImportFenceFdInfo->fence, "vkImportFenceFdKHR");
10369 result = dev_data->dispatch_table.ImportFenceFdKHR(device, pImportFenceFdInfo);
10372 if (result == VK_SUCCESS) {
10373 PostCallRecordImportFence(dev_data, pImportFenceFdInfo->fence, pImportFenceFdInfo->handleType, pImportFenceFdInfo->flags);
10378 static void PostCallRecordGetFence(layer_data *dev_data, VkFence fence, VkExternalFenceHandleTypeFlagBitsKHR handle_type) {
10379 FENCE_NODE *fence_node = GetFenceNode(dev_data, fence);
10381 if (handle_type != VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT_KHR) {
10382 // Export with reference transference becomes external
10383 fence_node->scope = kSyncScopeExternalPermanent;
10384 } else if (fence_node->scope == kSyncScopeInternal) {
10385 // Export with copy transference has a side effect of resetting the fence
10386 fence_node->state = FENCE_UNSIGNALED;
10391 #ifdef VK_USE_PLATFORM_WIN32_KHR
10392 VKAPI_ATTR VkResult VKAPI_CALL GetFenceWin32HandleKHR(VkDevice device, const VkFenceGetWin32HandleInfoKHR *pGetWin32HandleInfo,
10394 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
10395 VkResult result = dev_data->dispatch_table.GetFenceWin32HandleKHR(device, pGetWin32HandleInfo, pHandle);
10397 if (result == VK_SUCCESS) {
10398 PostCallRecordGetFence(dev_data, pGetWin32HandleInfo->fence, pGetWin32HandleInfo->handleType);
10404 VKAPI_ATTR VkResult VKAPI_CALL GetFenceFdKHR(VkDevice device, const VkFenceGetFdInfoKHR *pGetFdInfo, int *pFd) {
10405 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
10406 VkResult result = dev_data->dispatch_table.GetFenceFdKHR(device, pGetFdInfo, pFd);
10408 if (result == VK_SUCCESS) {
10409 PostCallRecordGetFence(dev_data, pGetFdInfo->fence, pGetFdInfo->handleType);
10414 VKAPI_ATTR VkResult VKAPI_CALL CreateEvent(VkDevice device, const VkEventCreateInfo *pCreateInfo,
10415 const VkAllocationCallbacks *pAllocator, VkEvent *pEvent) {
10416 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
10417 VkResult result = dev_data->dispatch_table.CreateEvent(device, pCreateInfo, pAllocator, pEvent);
10418 if (result == VK_SUCCESS) {
10419 lock_guard_t lock(global_lock);
10420 dev_data->eventMap[*pEvent].needsSignaled = false;
10421 dev_data->eventMap[*pEvent].write_in_use = 0;
10422 dev_data->eventMap[*pEvent].stageMask = VkPipelineStageFlags(0);
10427 static bool PreCallValidateCreateSwapchainKHR(layer_data *dev_data, const char *func_name,
10428 VkSwapchainCreateInfoKHR const *pCreateInfo, SURFACE_STATE *surface_state,
10429 SWAPCHAIN_NODE *old_swapchain_state) {
10430 auto most_recent_swapchain = surface_state->swapchain ? surface_state->swapchain : surface_state->old_swapchain;
10432 // TODO: revisit this. some of these rules are being relaxed.
10434 // All physical devices and queue families are required to be able
10435 // to present to any native window on Android; require the
10436 // application to have established support on any other platform.
10437 if (!dev_data->instance_data->extensions.vk_khr_android_surface) {
10438 auto support_predicate = [dev_data](decltype(surface_state->gpu_queue_support)::value_type qs) -> bool {
10439 // TODO: should restrict search only to queue families of VkDeviceQueueCreateInfos, not whole phys. device
10440 return (qs.first.gpu == dev_data->physical_device) && qs.second;
10442 const auto &support = surface_state->gpu_queue_support;
10443 bool is_supported = std::any_of(support.begin(), support.end(), support_predicate);
10445 if (!is_supported) {
10446 if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
10447 HandleToUint64(dev_data->device), VALIDATION_ERROR_146009ec,
10448 "%s: pCreateInfo->surface is not known at this time to be supported for presentation by this device. The "
10449 "vkGetPhysicalDeviceSurfaceSupportKHR() must be called beforehand, and it must return VK_TRUE support with "
10450 "this surface for at least one queue family of this device.",
10456 if (most_recent_swapchain != old_swapchain_state || (surface_state->old_swapchain && surface_state->swapchain)) {
10457 if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
10458 HandleToUint64(dev_data->device), DRAWSTATE_SWAPCHAIN_ALREADY_EXISTS,
10459 "%s: surface has an existing swapchain other than oldSwapchain", func_name))
10462 if (old_swapchain_state && old_swapchain_state->createInfo.surface != pCreateInfo->surface) {
10463 if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
10464 HandleToUint64(pCreateInfo->oldSwapchain), DRAWSTATE_SWAPCHAIN_WRONG_SURFACE,
10465 "%s: pCreateInfo->oldSwapchain's surface is not pCreateInfo->surface", func_name))
10469 if ((pCreateInfo->imageExtent.width == 0) || (pCreateInfo->imageExtent.height == 0)) {
10470 if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
10471 HandleToUint64(dev_data->device), VALIDATION_ERROR_14600d32,
10472 "%s: pCreateInfo->imageExtent = (%d, %d) which is illegal.", func_name, pCreateInfo->imageExtent.width,
10473 pCreateInfo->imageExtent.height))
10477 auto physical_device_state = GetPhysicalDeviceState(dev_data->instance_data, dev_data->physical_device);
10478 if (physical_device_state->vkGetPhysicalDeviceSurfaceCapabilitiesKHRState == UNCALLED) {
10479 if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
10480 HandleToUint64(dev_data->physical_device), DRAWSTATE_SWAPCHAIN_CREATE_BEFORE_QUERY,
10481 "%s: surface capabilities not retrieved for this physical device", func_name))
10483 } else { // have valid capabilities
10484 auto &capabilities = physical_device_state->surfaceCapabilities;
10485 // Validate pCreateInfo->minImageCount against VkSurfaceCapabilitiesKHR::{min|max}ImageCount:
10486 if (pCreateInfo->minImageCount < capabilities.minImageCount) {
10487 if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
10488 HandleToUint64(dev_data->device), VALIDATION_ERROR_146009ee,
10489 "%s called with minImageCount = %d, which is outside the bounds returned by "
10490 "vkGetPhysicalDeviceSurfaceCapabilitiesKHR() (i.e. minImageCount = %d, maxImageCount = %d).",
10491 func_name, pCreateInfo->minImageCount, capabilities.minImageCount, capabilities.maxImageCount))
10495 if ((capabilities.maxImageCount > 0) && (pCreateInfo->minImageCount > capabilities.maxImageCount)) {
10496 if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
10497 HandleToUint64(dev_data->device), VALIDATION_ERROR_146009f0,
10498 "%s called with minImageCount = %d, which is outside the bounds returned by "
10499 "vkGetPhysicalDeviceSurfaceCapabilitiesKHR() (i.e. minImageCount = %d, maxImageCount = %d).",
10500 func_name, pCreateInfo->minImageCount, capabilities.minImageCount, capabilities.maxImageCount))
10504 // Validate pCreateInfo->imageExtent against VkSurfaceCapabilitiesKHR::{current|min|max}ImageExtent:
10505 if ((pCreateInfo->imageExtent.width < capabilities.minImageExtent.width) ||
10506 (pCreateInfo->imageExtent.width > capabilities.maxImageExtent.width) ||
10507 (pCreateInfo->imageExtent.height < capabilities.minImageExtent.height) ||
10508 (pCreateInfo->imageExtent.height > capabilities.maxImageExtent.height)) {
10509 if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
10510 HandleToUint64(dev_data->device), VALIDATION_ERROR_146009f4,
10511 "%s called with imageExtent = (%d,%d), which is outside the bounds returned by "
10512 "vkGetPhysicalDeviceSurfaceCapabilitiesKHR(): currentExtent = (%d,%d), minImageExtent = (%d,%d), "
10513 "maxImageExtent = (%d,%d).",
10514 func_name, pCreateInfo->imageExtent.width, pCreateInfo->imageExtent.height,
10515 capabilities.currentExtent.width, capabilities.currentExtent.height, capabilities.minImageExtent.width,
10516 capabilities.minImageExtent.height, capabilities.maxImageExtent.width, capabilities.maxImageExtent.height))
10519 // pCreateInfo->preTransform should have exactly one bit set, and that bit must also be set in
10520 // VkSurfaceCapabilitiesKHR::supportedTransforms.
10521 if (!pCreateInfo->preTransform || (pCreateInfo->preTransform & (pCreateInfo->preTransform - 1)) ||
10522 !(pCreateInfo->preTransform & capabilities.supportedTransforms)) {
10523 // This is an error situation; one for which we'd like to give the developer a helpful, multi-line error message. Build
10524 // it up a little at a time, and then log it:
10525 std::string errorString = "";
10527 // Here's the first part of the message:
10528 sprintf(str, "%s called with a non-supported pCreateInfo->preTransform (i.e. %s). Supported values are:\n", func_name,
10529 string_VkSurfaceTransformFlagBitsKHR(pCreateInfo->preTransform));
10530 errorString += str;
10531 for (int i = 0; i < 32; i++) {
10532 // Build up the rest of the message:
10533 if ((1 << i) & capabilities.supportedTransforms) {
10534 const char *newStr = string_VkSurfaceTransformFlagBitsKHR((VkSurfaceTransformFlagBitsKHR)(1 << i));
10535 sprintf(str, " %s\n", newStr);
10536 errorString += str;
10539 // Log the message that we've built up:
10540 if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
10541 HandleToUint64(dev_data->device), VALIDATION_ERROR_146009fe, "%s.", errorString.c_str()))
10545 // pCreateInfo->compositeAlpha should have exactly one bit set, and that bit must also be set in
10546 // VkSurfaceCapabilitiesKHR::supportedCompositeAlpha
10547 if (!pCreateInfo->compositeAlpha || (pCreateInfo->compositeAlpha & (pCreateInfo->compositeAlpha - 1)) ||
10548 !((pCreateInfo->compositeAlpha) & capabilities.supportedCompositeAlpha)) {
10549 // This is an error situation; one for which we'd like to give the developer a helpful, multi-line error message. Build
10550 // it up a little at a time, and then log it:
10551 std::string errorString = "";
10553 // Here's the first part of the message:
10554 sprintf(str, "%s called with a non-supported pCreateInfo->compositeAlpha (i.e. %s). Supported values are:\n",
10555 func_name, string_VkCompositeAlphaFlagBitsKHR(pCreateInfo->compositeAlpha));
10556 errorString += str;
10557 for (int i = 0; i < 32; i++) {
10558 // Build up the rest of the message:
10559 if ((1 << i) & capabilities.supportedCompositeAlpha) {
10560 const char *newStr = string_VkCompositeAlphaFlagBitsKHR((VkCompositeAlphaFlagBitsKHR)(1 << i));
10561 sprintf(str, " %s\n", newStr);
10562 errorString += str;
10565 // Log the message that we've built up:
10566 if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
10567 HandleToUint64(dev_data->device), VALIDATION_ERROR_14600a00, "%s.", errorString.c_str()))
10570 // Validate pCreateInfo->imageArrayLayers against VkSurfaceCapabilitiesKHR::maxImageArrayLayers:
10571 if (pCreateInfo->imageArrayLayers > capabilities.maxImageArrayLayers) {
10572 if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
10573 HandleToUint64(dev_data->device), VALIDATION_ERROR_146009f6,
10574 "%s called with a non-supported imageArrayLayers (i.e. %d). Maximum value is %d.", func_name,
10575 pCreateInfo->imageArrayLayers, capabilities.maxImageArrayLayers))
10578 // Validate pCreateInfo->imageUsage against VkSurfaceCapabilitiesKHR::supportedUsageFlags:
10579 if (pCreateInfo->imageUsage != (pCreateInfo->imageUsage & capabilities.supportedUsageFlags)) {
10580 if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
10581 HandleToUint64(dev_data->device), VALIDATION_ERROR_146009f8,
10582 "%s called with a non-supported pCreateInfo->imageUsage (i.e. 0x%08x). Supported flag bits are 0x%08x.",
10583 func_name, pCreateInfo->imageUsage, capabilities.supportedUsageFlags))
10588 // Validate pCreateInfo values with the results of vkGetPhysicalDeviceSurfaceFormatsKHR():
10589 if (physical_device_state->vkGetPhysicalDeviceSurfaceFormatsKHRState != QUERY_DETAILS) {
10590 if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
10591 HandleToUint64(dev_data->device), DRAWSTATE_SWAPCHAIN_CREATE_BEFORE_QUERY,
10592 "%s called before calling vkGetPhysicalDeviceSurfaceFormatsKHR().", func_name))
10595 // Validate pCreateInfo->imageFormat against VkSurfaceFormatKHR::format:
10596 bool foundFormat = false;
10597 bool foundColorSpace = false;
10598 bool foundMatch = false;
10599 for (auto const &format : physical_device_state->surface_formats) {
10600 if (pCreateInfo->imageFormat == format.format) {
10601 // Validate pCreateInfo->imageColorSpace against VkSurfaceFormatKHR::colorSpace:
10602 foundFormat = true;
10603 if (pCreateInfo->imageColorSpace == format.colorSpace) {
10608 if (pCreateInfo->imageColorSpace == format.colorSpace) {
10609 foundColorSpace = true;
10614 if (!foundFormat) {
10615 if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
10616 HandleToUint64(dev_data->device), VALIDATION_ERROR_146009f2,
10617 "%s called with a non-supported pCreateInfo->imageFormat (i.e. %d).", func_name,
10618 pCreateInfo->imageFormat))
10621 if (!foundColorSpace) {
10622 if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
10623 HandleToUint64(dev_data->device), VALIDATION_ERROR_146009f2,
10624 "%s called with a non-supported pCreateInfo->imageColorSpace (i.e. %d).", func_name,
10625 pCreateInfo->imageColorSpace))
10631 // Validate pCreateInfo values with the results of vkGetPhysicalDeviceSurfacePresentModesKHR():
10632 if (physical_device_state->vkGetPhysicalDeviceSurfacePresentModesKHRState != QUERY_DETAILS) {
10633 // FIFO is required to always be supported
10634 if (pCreateInfo->presentMode != VK_PRESENT_MODE_FIFO_KHR) {
10635 if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
10636 HandleToUint64(dev_data->device), DRAWSTATE_SWAPCHAIN_CREATE_BEFORE_QUERY,
10637 "%s called before calling vkGetPhysicalDeviceSurfacePresentModesKHR().", func_name))
10641 // Validate pCreateInfo->presentMode against vkGetPhysicalDeviceSurfacePresentModesKHR():
10642 bool foundMatch = std::find(physical_device_state->present_modes.begin(), physical_device_state->present_modes.end(),
10643 pCreateInfo->presentMode) != physical_device_state->present_modes.end();
10645 if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
10646 HandleToUint64(dev_data->device), VALIDATION_ERROR_14600a02,
10647 "%s called with a non-supported presentMode (i.e. %s).", func_name,
10648 string_VkPresentModeKHR(pCreateInfo->presentMode)))
10652 // Validate state for shared presentable case
10653 if (VK_PRESENT_MODE_SHARED_DEMAND_REFRESH_KHR == pCreateInfo->presentMode ||
10654 VK_PRESENT_MODE_SHARED_CONTINUOUS_REFRESH_KHR == pCreateInfo->presentMode) {
10655 if (!dev_data->extensions.vk_khr_shared_presentable_image) {
10656 if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
10657 HandleToUint64(dev_data->device), DRAWSTATE_EXTENSION_NOT_ENABLED,
10658 "%s called with presentMode %s which requires the VK_KHR_shared_presentable_image extension, which has not "
10660 func_name, string_VkPresentModeKHR(pCreateInfo->presentMode)))
10662 } else if (pCreateInfo->minImageCount != 1) {
10663 if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
10664 HandleToUint64(dev_data->device), VALIDATION_ERROR_14600ace,
10665 "%s called with presentMode %s, but minImageCount value is %d. For shared presentable image, minImageCount "
10667 func_name, string_VkPresentModeKHR(pCreateInfo->presentMode), pCreateInfo->minImageCount))
10675 static void PostCallRecordCreateSwapchainKHR(layer_data *dev_data, VkResult result, const VkSwapchainCreateInfoKHR *pCreateInfo,
10676 VkSwapchainKHR *pSwapchain, SURFACE_STATE *surface_state,
10677 SWAPCHAIN_NODE *old_swapchain_state) {
10678 if (VK_SUCCESS == result) {
10679 lock_guard_t lock(global_lock);
10680 auto swapchain_state = unique_ptr<SWAPCHAIN_NODE>(new SWAPCHAIN_NODE(pCreateInfo, *pSwapchain));
10681 if (VK_PRESENT_MODE_SHARED_DEMAND_REFRESH_KHR == pCreateInfo->presentMode ||
10682 VK_PRESENT_MODE_SHARED_CONTINUOUS_REFRESH_KHR == pCreateInfo->presentMode) {
10683 swapchain_state->shared_presentable = true;
10685 surface_state->swapchain = swapchain_state.get();
10686 dev_data->swapchainMap[*pSwapchain] = std::move(swapchain_state);
10688 surface_state->swapchain = nullptr;
10690 // Spec requires that even if CreateSwapchainKHR fails, oldSwapchain behaves as replaced.
10691 if (old_swapchain_state) {
10692 old_swapchain_state->replaced = true;
10694 surface_state->old_swapchain = old_swapchain_state;
10698 VKAPI_ATTR VkResult VKAPI_CALL CreateSwapchainKHR(VkDevice device, const VkSwapchainCreateInfoKHR *pCreateInfo,
10699 const VkAllocationCallbacks *pAllocator, VkSwapchainKHR *pSwapchain) {
10700 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
10701 auto surface_state = GetSurfaceState(dev_data->instance_data, pCreateInfo->surface);
10702 auto old_swapchain_state = GetSwapchainNode(dev_data, pCreateInfo->oldSwapchain);
10704 if (PreCallValidateCreateSwapchainKHR(dev_data, "vkCreateSwapChainKHR()", pCreateInfo, surface_state, old_swapchain_state)) {
10705 return VK_ERROR_VALIDATION_FAILED_EXT;
10708 VkResult result = dev_data->dispatch_table.CreateSwapchainKHR(device, pCreateInfo, pAllocator, pSwapchain);
10710 PostCallRecordCreateSwapchainKHR(dev_data, result, pCreateInfo, pSwapchain, surface_state, old_swapchain_state);
10715 VKAPI_ATTR void VKAPI_CALL DestroySwapchainKHR(VkDevice device, VkSwapchainKHR swapchain, const VkAllocationCallbacks *pAllocator) {
10716 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
10719 unique_lock_t lock(global_lock);
10720 auto swapchain_data = GetSwapchainNode(dev_data, swapchain);
10721 if (swapchain_data) {
10722 // Pre-record to avoid Destroy/Create race
10723 if (swapchain_data->images.size() > 0) {
10724 for (auto swapchain_image : swapchain_data->images) {
10725 auto image_sub = dev_data->imageSubresourceMap.find(swapchain_image);
10726 if (image_sub != dev_data->imageSubresourceMap.end()) {
10727 for (auto imgsubpair : image_sub->second) {
10728 auto image_item = dev_data->imageLayoutMap.find(imgsubpair);
10729 if (image_item != dev_data->imageLayoutMap.end()) {
10730 dev_data->imageLayoutMap.erase(image_item);
10733 dev_data->imageSubresourceMap.erase(image_sub);
10735 skip = ClearMemoryObjectBindings(dev_data, HandleToUint64(swapchain_image), kVulkanObjectTypeSwapchainKHR);
10736 dev_data->imageMap.erase(swapchain_image);
10740 auto surface_state = GetSurfaceState(dev_data->instance_data, swapchain_data->createInfo.surface);
10741 if (surface_state) {
10742 if (surface_state->swapchain == swapchain_data) surface_state->swapchain = nullptr;
10743 if (surface_state->old_swapchain == swapchain_data) surface_state->old_swapchain = nullptr;
10746 dev_data->swapchainMap.erase(swapchain);
10749 if (!skip) dev_data->dispatch_table.DestroySwapchainKHR(device, swapchain, pAllocator);
10752 static bool PreCallValidateGetSwapchainImagesKHR(layer_data *device_data, SWAPCHAIN_NODE *swapchain_state, VkDevice device,
10753 uint32_t *pSwapchainImageCount, VkImage *pSwapchainImages) {
10755 if (swapchain_state && pSwapchainImages) {
10756 lock_guard_t lock(global_lock);
10757 // Compare the preliminary value of *pSwapchainImageCount with the value this time:
10758 if (swapchain_state->vkGetSwapchainImagesKHRState == UNCALLED) {
10759 skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
10760 HandleToUint64(device), SWAPCHAIN_PRIOR_COUNT,
10761 "vkGetSwapchainImagesKHR() called with non-NULL pSwapchainImageCount; but no prior positive value has "
10762 "been seen for pSwapchainImages.");
10763 } else if (*pSwapchainImageCount > swapchain_state->get_swapchain_image_count) {
10765 log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
10766 HandleToUint64(device), SWAPCHAIN_INVALID_COUNT,
10767 "vkGetSwapchainImagesKHR() called with non-NULL pSwapchainImageCount, and with pSwapchainImages set to a "
10768 "value (%d) that is greater than the value (%d) that was returned when pSwapchainImageCount was NULL.",
10769 *pSwapchainImageCount, swapchain_state->get_swapchain_image_count);
10775 static void PostCallRecordGetSwapchainImagesKHR(layer_data *device_data, SWAPCHAIN_NODE *swapchain_state, VkDevice device,
10776 uint32_t *pSwapchainImageCount, VkImage *pSwapchainImages) {
10777 lock_guard_t lock(global_lock);
10779 if (*pSwapchainImageCount > swapchain_state->images.size()) swapchain_state->images.resize(*pSwapchainImageCount);
10781 if (pSwapchainImages) {
10782 if (swapchain_state->vkGetSwapchainImagesKHRState < QUERY_DETAILS) {
10783 swapchain_state->vkGetSwapchainImagesKHRState = QUERY_DETAILS;
10785 for (uint32_t i = 0; i < *pSwapchainImageCount; ++i) {
10786 if (swapchain_state->images[i] != VK_NULL_HANDLE) continue; // Already retrieved this.
10788 IMAGE_LAYOUT_NODE image_layout_node;
10789 image_layout_node.layout = VK_IMAGE_LAYOUT_UNDEFINED;
10790 image_layout_node.format = swapchain_state->createInfo.imageFormat;
10791 // Add imageMap entries for each swapchain image
10792 VkImageCreateInfo image_ci = {};
10793 image_ci.flags = 0;
10794 image_ci.imageType = VK_IMAGE_TYPE_2D;
10795 image_ci.format = swapchain_state->createInfo.imageFormat;
10796 image_ci.extent.width = swapchain_state->createInfo.imageExtent.width;
10797 image_ci.extent.height = swapchain_state->createInfo.imageExtent.height;
10798 image_ci.extent.depth = 1;
10799 image_ci.mipLevels = 1;
10800 image_ci.arrayLayers = swapchain_state->createInfo.imageArrayLayers;
10801 image_ci.samples = VK_SAMPLE_COUNT_1_BIT;
10802 image_ci.tiling = VK_IMAGE_TILING_OPTIMAL;
10803 image_ci.usage = swapchain_state->createInfo.imageUsage;
10804 image_ci.sharingMode = swapchain_state->createInfo.imageSharingMode;
10805 device_data->imageMap[pSwapchainImages[i]] = unique_ptr<IMAGE_STATE>(new IMAGE_STATE(pSwapchainImages[i], &image_ci));
10806 auto &image_state = device_data->imageMap[pSwapchainImages[i]];
10807 image_state->valid = false;
10808 image_state->binding.mem = MEMTRACKER_SWAP_CHAIN_IMAGE_KEY;
10809 swapchain_state->images[i] = pSwapchainImages[i];
10810 ImageSubresourcePair subpair = {pSwapchainImages[i], false, VkImageSubresource()};
10811 device_data->imageSubresourceMap[pSwapchainImages[i]].push_back(subpair);
10812 device_data->imageLayoutMap[subpair] = image_layout_node;
10816 if (*pSwapchainImageCount) {
10817 if (swapchain_state->vkGetSwapchainImagesKHRState < QUERY_COUNT) {
10818 swapchain_state->vkGetSwapchainImagesKHRState = QUERY_COUNT;
10820 swapchain_state->get_swapchain_image_count = *pSwapchainImageCount;
10824 VKAPI_ATTR VkResult VKAPI_CALL GetSwapchainImagesKHR(VkDevice device, VkSwapchainKHR swapchain, uint32_t *pSwapchainImageCount,
10825 VkImage *pSwapchainImages) {
10826 VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
10827 layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
10829 auto swapchain_state = GetSwapchainNode(device_data, swapchain);
10830 bool skip = PreCallValidateGetSwapchainImagesKHR(device_data, swapchain_state, device, pSwapchainImageCount, pSwapchainImages);
10833 result = device_data->dispatch_table.GetSwapchainImagesKHR(device, swapchain, pSwapchainImageCount, pSwapchainImages);
10836 if ((result == VK_SUCCESS || result == VK_INCOMPLETE)) {
10837 PostCallRecordGetSwapchainImagesKHR(device_data, swapchain_state, device, pSwapchainImageCount, pSwapchainImages);
10842 VKAPI_ATTR VkResult VKAPI_CALL QueuePresentKHR(VkQueue queue, const VkPresentInfoKHR *pPresentInfo) {
10843 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(queue), layer_data_map);
10846 lock_guard_t lock(global_lock);
10847 auto queue_state = GetQueueState(dev_data, queue);
10849 for (uint32_t i = 0; i < pPresentInfo->waitSemaphoreCount; ++i) {
10850 auto pSemaphore = GetSemaphoreNode(dev_data, pPresentInfo->pWaitSemaphores[i]);
10851 if (pSemaphore && !pSemaphore->signaled) {
10852 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
10853 DRAWSTATE_QUEUE_FORWARD_PROGRESS,
10854 "Queue 0x%" PRIx64 " is waiting on semaphore 0x%" PRIx64 " that has no way to be signaled.",
10855 HandleToUint64(queue), HandleToUint64(pPresentInfo->pWaitSemaphores[i]));
10859 for (uint32_t i = 0; i < pPresentInfo->swapchainCount; ++i) {
10860 auto swapchain_data = GetSwapchainNode(dev_data, pPresentInfo->pSwapchains[i]);
10861 if (swapchain_data) {
10862 if (pPresentInfo->pImageIndices[i] >= swapchain_data->images.size()) {
10864 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
10865 HandleToUint64(pPresentInfo->pSwapchains[i]), DRAWSTATE_SWAPCHAIN_INVALID_IMAGE,
10866 "vkQueuePresentKHR: Swapchain image index too large (%u). There are only %u images in this swapchain.",
10867 pPresentInfo->pImageIndices[i], (uint32_t)swapchain_data->images.size());
10869 auto image = swapchain_data->images[pPresentInfo->pImageIndices[i]];
10870 auto image_state = GetImageState(dev_data, image);
10872 if (image_state->shared_presentable) {
10873 image_state->layout_locked = true;
10876 if (!image_state->acquired) {
10878 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
10879 HandleToUint64(pPresentInfo->pSwapchains[i]), DRAWSTATE_SWAPCHAIN_IMAGE_NOT_ACQUIRED,
10880 "vkQueuePresentKHR: Swapchain image index %u has not been acquired.", pPresentInfo->pImageIndices[i]);
10883 vector<VkImageLayout> layouts;
10884 if (FindLayouts(dev_data, image, layouts)) {
10885 for (auto layout : layouts) {
10886 if ((layout != VK_IMAGE_LAYOUT_PRESENT_SRC_KHR) && (!dev_data->extensions.vk_khr_shared_presentable_image ||
10887 (layout != VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR))) {
10888 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
10889 VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT, HandleToUint64(queue), VALIDATION_ERROR_11200a20,
10890 "Images passed to present must be in layout VK_IMAGE_LAYOUT_PRESENT_SRC_KHR or "
10891 "VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR but is in %s.",
10892 string_VkImageLayout(layout));
10898 // All physical devices and queue families are required to be able
10899 // to present to any native window on Android; require the
10900 // application to have established support on any other platform.
10901 if (!dev_data->instance_data->extensions.vk_khr_android_surface) {
10902 auto surface_state = GetSurfaceState(dev_data->instance_data, swapchain_data->createInfo.surface);
10903 auto support_it = surface_state->gpu_queue_support.find({dev_data->physical_device, queue_state->queueFamilyIndex});
10905 if (support_it == surface_state->gpu_queue_support.end()) {
10907 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
10908 HandleToUint64(pPresentInfo->pSwapchains[i]), DRAWSTATE_SWAPCHAIN_UNSUPPORTED_QUEUE,
10909 "vkQueuePresentKHR: Presenting image without calling vkGetPhysicalDeviceSurfaceSupportKHR");
10910 } else if (!support_it->second) {
10912 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
10913 HandleToUint64(pPresentInfo->pSwapchains[i]), VALIDATION_ERROR_31800a18,
10914 "vkQueuePresentKHR: Presenting image on queue that cannot present to this surface.");
10919 if (pPresentInfo && pPresentInfo->pNext) {
10920 // Verify ext struct
10921 const auto *present_regions = lvl_find_in_chain<VkPresentRegionsKHR>(pPresentInfo->pNext);
10922 if (present_regions) {
10923 for (uint32_t i = 0; i < present_regions->swapchainCount; ++i) {
10924 auto swapchain_data = GetSwapchainNode(dev_data, pPresentInfo->pSwapchains[i]);
10925 assert(swapchain_data);
10926 VkPresentRegionKHR region = present_regions->pRegions[i];
10927 for (uint32_t j = 0; j < region.rectangleCount; ++j) {
10928 VkRectLayerKHR rect = region.pRectangles[j];
10929 if ((rect.offset.x + rect.extent.width) > swapchain_data->createInfo.imageExtent.width) {
10930 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
10931 VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT, HandleToUint64(pPresentInfo->pSwapchains[i]),
10932 VALIDATION_ERROR_11e009da,
10933 "vkQueuePresentKHR(): For VkPresentRegionKHR down pNext chain, "
10934 "pRegion[%i].pRectangles[%i], the sum of offset.x (%i) and extent.width (%i) is greater "
10935 "than the corresponding swapchain's imageExtent.width (%i).",
10936 i, j, rect.offset.x, rect.extent.width, swapchain_data->createInfo.imageExtent.width);
10938 if ((rect.offset.y + rect.extent.height) > swapchain_data->createInfo.imageExtent.height) {
10939 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
10940 VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT, HandleToUint64(pPresentInfo->pSwapchains[i]),
10941 VALIDATION_ERROR_11e009da,
10942 "vkQueuePresentKHR(): For VkPresentRegionKHR down pNext chain, "
10943 "pRegion[%i].pRectangles[%i], the sum of offset.y (%i) and extent.height (%i) is greater "
10944 "than the corresponding swapchain's imageExtent.height (%i).",
10945 i, j, rect.offset.y, rect.extent.height, swapchain_data->createInfo.imageExtent.height);
10947 if (rect.layer > swapchain_data->createInfo.imageArrayLayers) {
10949 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
10950 HandleToUint64(pPresentInfo->pSwapchains[i]), VALIDATION_ERROR_11e009dc,
10951 "vkQueuePresentKHR(): For VkPresentRegionKHR down pNext chain, pRegion[%i].pRectangles[%i], the layer "
10952 "(%i) is greater than the corresponding swapchain's imageArrayLayers (%i).",
10953 i, j, rect.layer, swapchain_data->createInfo.imageArrayLayers);
10959 const auto *present_times_info = lvl_find_in_chain<VkPresentTimesInfoGOOGLE>(pPresentInfo->pNext);
10960 if (present_times_info) {
10961 if (pPresentInfo->swapchainCount != present_times_info->swapchainCount) {
10963 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
10964 HandleToUint64(pPresentInfo->pSwapchains[0]),
10966 VALIDATION_ERROR_118009be,
10967 "vkQueuePresentKHR(): VkPresentTimesInfoGOOGLE.swapchainCount is %i but pPresentInfo->swapchainCount "
10968 "is %i. For VkPresentTimesInfoGOOGLE down pNext chain of VkPresentInfoKHR, "
10969 "VkPresentTimesInfoGOOGLE.swapchainCount must equal VkPresentInfoKHR.swapchainCount.",
10970 present_times_info->swapchainCount, pPresentInfo->swapchainCount);
10976 return VK_ERROR_VALIDATION_FAILED_EXT;
10979 VkResult result = dev_data->dispatch_table.QueuePresentKHR(queue, pPresentInfo);
10981 if (result != VK_ERROR_VALIDATION_FAILED_EXT) {
10982 // Semaphore waits occur before error generation, if the call reached
10983 // the ICD. (Confirm?)
10984 for (uint32_t i = 0; i < pPresentInfo->waitSemaphoreCount; ++i) {
10985 auto pSemaphore = GetSemaphoreNode(dev_data, pPresentInfo->pWaitSemaphores[i]);
10987 pSemaphore->signaler.first = VK_NULL_HANDLE;
10988 pSemaphore->signaled = false;
10992 for (uint32_t i = 0; i < pPresentInfo->swapchainCount; ++i) {
10993 // Note: this is imperfect, in that we can get confused about what
10994 // did or didn't succeed-- but if the app does that, it's confused
10995 // itself just as much.
10996 auto local_result = pPresentInfo->pResults ? pPresentInfo->pResults[i] : result;
10998 if (local_result != VK_SUCCESS && local_result != VK_SUBOPTIMAL_KHR) continue; // this present didn't actually happen.
11000 // Mark the image as having been released to the WSI
11001 auto swapchain_data = GetSwapchainNode(dev_data, pPresentInfo->pSwapchains[i]);
11002 auto image = swapchain_data->images[pPresentInfo->pImageIndices[i]];
11003 auto image_state = GetImageState(dev_data, image);
11004 image_state->acquired = false;
11007 // Note: even though presentation is directed to a queue, there is no
11008 // direct ordering between QP and subsequent work, so QP (and its
11009 // semaphore waits) /never/ participate in any completion proof.
11015 static bool PreCallValidateCreateSharedSwapchainsKHR(layer_data *dev_data, uint32_t swapchainCount,
11016 const VkSwapchainCreateInfoKHR *pCreateInfos, VkSwapchainKHR *pSwapchains,
11017 std::vector<SURFACE_STATE *> &surface_state,
11018 std::vector<SWAPCHAIN_NODE *> &old_swapchain_state) {
11019 if (pCreateInfos) {
11020 lock_guard_t lock(global_lock);
11021 for (uint32_t i = 0; i < swapchainCount; i++) {
11022 surface_state.push_back(GetSurfaceState(dev_data->instance_data, pCreateInfos[i].surface));
11023 old_swapchain_state.push_back(GetSwapchainNode(dev_data, pCreateInfos[i].oldSwapchain));
11024 std::stringstream func_name;
11025 func_name << "vkCreateSharedSwapchainsKHR[" << swapchainCount << "]";
11026 if (PreCallValidateCreateSwapchainKHR(dev_data, func_name.str().c_str(), &pCreateInfos[i], surface_state[i],
11027 old_swapchain_state[i])) {
11035 static void PostCallRecordCreateSharedSwapchainsKHR(layer_data *dev_data, VkResult result, uint32_t swapchainCount,
11036 const VkSwapchainCreateInfoKHR *pCreateInfos, VkSwapchainKHR *pSwapchains,
11037 std::vector<SURFACE_STATE *> &surface_state,
11038 std::vector<SWAPCHAIN_NODE *> &old_swapchain_state) {
11039 if (VK_SUCCESS == result) {
11040 for (uint32_t i = 0; i < swapchainCount; i++) {
11041 auto swapchain_state = unique_ptr<SWAPCHAIN_NODE>(new SWAPCHAIN_NODE(&pCreateInfos[i], pSwapchains[i]));
11042 if (VK_PRESENT_MODE_SHARED_DEMAND_REFRESH_KHR == pCreateInfos[i].presentMode ||
11043 VK_PRESENT_MODE_SHARED_CONTINUOUS_REFRESH_KHR == pCreateInfos[i].presentMode) {
11044 swapchain_state->shared_presentable = true;
11046 surface_state[i]->swapchain = swapchain_state.get();
11047 dev_data->swapchainMap[pSwapchains[i]] = std::move(swapchain_state);
11050 for (uint32_t i = 0; i < swapchainCount; i++) {
11051 surface_state[i]->swapchain = nullptr;
11054 // Spec requires that even if CreateSharedSwapchainKHR fails, oldSwapchain behaves as replaced.
11055 for (uint32_t i = 0; i < swapchainCount; i++) {
11056 if (old_swapchain_state[i]) {
11057 old_swapchain_state[i]->replaced = true;
11059 surface_state[i]->old_swapchain = old_swapchain_state[i];
11064 VKAPI_ATTR VkResult VKAPI_CALL CreateSharedSwapchainsKHR(VkDevice device, uint32_t swapchainCount,
11065 const VkSwapchainCreateInfoKHR *pCreateInfos,
11066 const VkAllocationCallbacks *pAllocator, VkSwapchainKHR *pSwapchains) {
11067 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
11068 std::vector<SURFACE_STATE *> surface_state;
11069 std::vector<SWAPCHAIN_NODE *> old_swapchain_state;
11071 if (PreCallValidateCreateSharedSwapchainsKHR(dev_data, swapchainCount, pCreateInfos, pSwapchains, surface_state,
11072 old_swapchain_state)) {
11073 return VK_ERROR_VALIDATION_FAILED_EXT;
11077 dev_data->dispatch_table.CreateSharedSwapchainsKHR(device, swapchainCount, pCreateInfos, pAllocator, pSwapchains);
11079 PostCallRecordCreateSharedSwapchainsKHR(dev_data, result, swapchainCount, pCreateInfos, pSwapchains, surface_state,
11080 old_swapchain_state);
11085 static bool PreCallValidateAcquireNextImageKHR(layer_data *dev_data, VkDevice device, VkSwapchainKHR swapchain, uint64_t timeout,
11086 VkSemaphore semaphore, VkFence fence, uint32_t *pImageIndex) {
11088 if (fence == VK_NULL_HANDLE && semaphore == VK_NULL_HANDLE) {
11089 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
11090 HandleToUint64(device), DRAWSTATE_SWAPCHAIN_NO_SYNC_FOR_ACQUIRE,
11091 "vkAcquireNextImageKHR: Semaphore and fence cannot both be VK_NULL_HANDLE. There would be no way to "
11092 "determine the completion of this operation.");
11095 auto pSemaphore = GetSemaphoreNode(dev_data, semaphore);
11096 if (pSemaphore && pSemaphore->scope == kSyncScopeInternal && pSemaphore->signaled) {
11097 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
11098 HandleToUint64(semaphore), VALIDATION_ERROR_16400a0c,
11099 "vkAcquireNextImageKHR: Semaphore must not be currently signaled or in a wait state.");
11102 auto pFence = GetFenceNode(dev_data, fence);
11104 skip |= ValidateFenceForSubmit(dev_data, pFence);
11107 auto swapchain_data = GetSwapchainNode(dev_data, swapchain);
11108 if (swapchain_data->replaced) {
11109 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
11110 HandleToUint64(swapchain), DRAWSTATE_SWAPCHAIN_REPLACED,
11111 "vkAcquireNextImageKHR: This swapchain has been replaced. The application can still present any images it "
11112 "has acquired, but cannot acquire any more.");
11115 auto physical_device_state = GetPhysicalDeviceState(dev_data->instance_data, dev_data->physical_device);
11116 if (physical_device_state->vkGetPhysicalDeviceSurfaceCapabilitiesKHRState != UNCALLED) {
11117 uint64_t acquired_images = std::count_if(swapchain_data->images.begin(), swapchain_data->images.end(),
11118 [=](VkImage image) { return GetImageState(dev_data, image)->acquired; });
11119 if (acquired_images > swapchain_data->images.size() - physical_device_state->surfaceCapabilities.minImageCount) {
11121 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
11122 HandleToUint64(swapchain), DRAWSTATE_SWAPCHAIN_TOO_MANY_IMAGES,
11123 "vkAcquireNextImageKHR: Application has already acquired the maximum number of images (0x%" PRIxLEAST64 ")",
11128 if (swapchain_data->images.size() == 0) {
11129 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
11130 HandleToUint64(swapchain), DRAWSTATE_SWAPCHAIN_IMAGES_NOT_FOUND,
11131 "vkAcquireNextImageKHR: No images found to acquire from. Application probably did not call "
11132 "vkGetSwapchainImagesKHR after swapchain creation.");
11137 static void PostCallRecordAcquireNextImageKHR(layer_data *dev_data, VkDevice device, VkSwapchainKHR swapchain, uint64_t timeout,
11138 VkSemaphore semaphore, VkFence fence, uint32_t *pImageIndex) {
11139 auto pFence = GetFenceNode(dev_data, fence);
11140 if (pFence && pFence->scope == kSyncScopeInternal) {
11141 // Treat as inflight since it is valid to wait on this fence, even in cases where it is technically a temporary
11143 pFence->state = FENCE_INFLIGHT;
11144 pFence->signaler.first = VK_NULL_HANDLE; // ANI isn't on a queue, so this can't participate in a completion proof.
11147 auto pSemaphore = GetSemaphoreNode(dev_data, semaphore);
11148 if (pSemaphore && pSemaphore->scope == kSyncScopeInternal) {
11149 // Treat as signaled since it is valid to wait on this semaphore, even in cases where it is technically a
11150 // temporary import
11151 pSemaphore->signaled = true;
11152 pSemaphore->signaler.first = VK_NULL_HANDLE;
11155 // Mark the image as acquired.
11156 auto swapchain_data = GetSwapchainNode(dev_data, swapchain);
11157 auto image = swapchain_data->images[*pImageIndex];
11158 auto image_state = GetImageState(dev_data, image);
11159 image_state->acquired = true;
11160 image_state->shared_presentable = swapchain_data->shared_presentable;
11163 VKAPI_ATTR VkResult VKAPI_CALL AcquireNextImageKHR(VkDevice device, VkSwapchainKHR swapchain, uint64_t timeout,
11164 VkSemaphore semaphore, VkFence fence, uint32_t *pImageIndex) {
11165 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
11167 unique_lock_t lock(global_lock);
11168 bool skip = PreCallValidateAcquireNextImageKHR(dev_data, device, swapchain, timeout, semaphore, fence, pImageIndex);
11171 if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
11173 VkResult result = dev_data->dispatch_table.AcquireNextImageKHR(device, swapchain, timeout, semaphore, fence, pImageIndex);
11176 if (result == VK_SUCCESS || result == VK_SUBOPTIMAL_KHR) {
11177 PostCallRecordAcquireNextImageKHR(dev_data, device, swapchain, timeout, semaphore, fence, pImageIndex);
11184 VKAPI_ATTR VkResult VKAPI_CALL EnumeratePhysicalDevices(VkInstance instance, uint32_t *pPhysicalDeviceCount,
11185 VkPhysicalDevice *pPhysicalDevices) {
11187 instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
11188 assert(instance_data);
11190 // For this instance, flag when vkEnumeratePhysicalDevices goes to QUERY_COUNT and then QUERY_DETAILS
11191 if (NULL == pPhysicalDevices) {
11192 instance_data->vkEnumeratePhysicalDevicesState = QUERY_COUNT;
11194 if (UNCALLED == instance_data->vkEnumeratePhysicalDevicesState) {
11195 // Flag warning here. You can call this without having queried the count, but it may not be
11196 // robust on platforms with multiple physical devices.
11197 skip |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT,
11198 0, DEVLIMITS_MISSING_QUERY_COUNT,
11199 "Call sequence has vkEnumeratePhysicalDevices() w/ non-NULL pPhysicalDevices. You should first call "
11200 "vkEnumeratePhysicalDevices() w/ NULL pPhysicalDevices to query pPhysicalDeviceCount.");
11201 } // TODO : Could also flag a warning if re-calling this function in QUERY_DETAILS state
11202 else if (instance_data->physical_devices_count != *pPhysicalDeviceCount) {
11203 // Having actual count match count from app is not a requirement, so this can be a warning
11204 skip |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
11205 VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, DEVLIMITS_COUNT_MISMATCH,
11206 "Call to vkEnumeratePhysicalDevices() w/ pPhysicalDeviceCount value %u, but actual count supported by "
11207 "this instance is %u.",
11208 *pPhysicalDeviceCount, instance_data->physical_devices_count);
11210 instance_data->vkEnumeratePhysicalDevicesState = QUERY_DETAILS;
11213 return VK_ERROR_VALIDATION_FAILED_EXT;
11215 VkResult result = instance_data->dispatch_table.EnumeratePhysicalDevices(instance, pPhysicalDeviceCount, pPhysicalDevices);
11216 if (NULL == pPhysicalDevices) {
11217 instance_data->physical_devices_count = *pPhysicalDeviceCount;
11218 } else if (result == VK_SUCCESS) { // Save physical devices
11219 for (uint32_t i = 0; i < *pPhysicalDeviceCount; i++) {
11220 auto &phys_device_state = instance_data->physical_device_map[pPhysicalDevices[i]];
11221 phys_device_state.phys_device = pPhysicalDevices[i];
11222 // Init actual features for each physical device
11223 instance_data->dispatch_table.GetPhysicalDeviceFeatures(pPhysicalDevices[i], &phys_device_state.features);
11229 // Common function to handle validation for GetPhysicalDeviceQueueFamilyProperties & 2KHR version
11230 static bool ValidateCommonGetPhysicalDeviceQueueFamilyProperties(instance_layer_data *instance_data,
11231 PHYSICAL_DEVICE_STATE *pd_state,
11232 uint32_t requested_queue_family_property_count, bool qfp_null,
11233 const char *caller_name) {
11236 // Verify that for each physical device, this command is called first with NULL pQueueFamilyProperties in order to get count
11237 if (UNCALLED == pd_state->vkGetPhysicalDeviceQueueFamilyPropertiesState) {
11239 instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
11240 HandleToUint64(pd_state->phys_device), DEVLIMITS_MISSING_QUERY_COUNT,
11241 "%s is called with non-NULL pQueueFamilyProperties before obtaining pQueueFamilyPropertyCount. It is recommended "
11242 "to first call %s with NULL pQueueFamilyProperties in order to obtain the maximal pQueueFamilyPropertyCount.",
11243 caller_name, caller_name);
11244 // Then verify that pCount that is passed in on second call matches what was returned
11245 } else if (pd_state->queue_family_count != requested_queue_family_property_count) {
11247 instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
11248 HandleToUint64(pd_state->phys_device), DEVLIMITS_COUNT_MISMATCH,
11249 "%s is called with non-NULL pQueueFamilyProperties and pQueueFamilyPropertyCount value %" PRIu32
11250 ", but the largest previously returned pQueueFamilyPropertyCount for this physicalDevice is %" PRIu32
11251 ". It is recommended to instead receive all the properties by calling %s with pQueueFamilyPropertyCount that was "
11252 "previously obtained by calling %s with NULL pQueueFamilyProperties.",
11253 caller_name, requested_queue_family_property_count, pd_state->queue_family_count, caller_name, caller_name);
11255 pd_state->vkGetPhysicalDeviceQueueFamilyPropertiesState = QUERY_DETAILS;
11261 static bool PreCallValidateGetPhysicalDeviceQueueFamilyProperties(instance_layer_data *instance_data,
11262 PHYSICAL_DEVICE_STATE *pd_state,
11263 uint32_t *pQueueFamilyPropertyCount,
11264 VkQueueFamilyProperties *pQueueFamilyProperties) {
11265 return ValidateCommonGetPhysicalDeviceQueueFamilyProperties(instance_data, pd_state, *pQueueFamilyPropertyCount,
11266 (nullptr == pQueueFamilyProperties),
11267 "vkGetPhysicalDeviceQueueFamilyProperties()");
11270 static bool PreCallValidateGetPhysicalDeviceQueueFamilyProperties2(instance_layer_data *instance_data,
11271 PHYSICAL_DEVICE_STATE *pd_state,
11272 uint32_t *pQueueFamilyPropertyCount,
11273 VkQueueFamilyProperties2KHR *pQueueFamilyProperties) {
11274 return ValidateCommonGetPhysicalDeviceQueueFamilyProperties(instance_data, pd_state, *pQueueFamilyPropertyCount,
11275 (nullptr == pQueueFamilyProperties),
11276 "vkGetPhysicalDeviceQueueFamilyProperties2[KHR]()");
11279 // Common function to update state for GetPhysicalDeviceQueueFamilyProperties & 2KHR version
11280 static void StateUpdateCommonGetPhysicalDeviceQueueFamilyProperties(PHYSICAL_DEVICE_STATE *pd_state, uint32_t count,
11281 VkQueueFamilyProperties2KHR *pQueueFamilyProperties) {
11282 if (!pQueueFamilyProperties) {
11283 if (UNCALLED == pd_state->vkGetPhysicalDeviceQueueFamilyPropertiesState)
11284 pd_state->vkGetPhysicalDeviceQueueFamilyPropertiesState = QUERY_COUNT;
11285 pd_state->queue_family_count = count;
11286 } else { // Save queue family properties
11287 pd_state->vkGetPhysicalDeviceQueueFamilyPropertiesState = QUERY_DETAILS;
11288 pd_state->queue_family_count = std::max(pd_state->queue_family_count, count);
11290 pd_state->queue_family_properties.resize(std::max(static_cast<uint32_t>(pd_state->queue_family_properties.size()), count));
11291 for (uint32_t i = 0; i < count; ++i) {
11292 pd_state->queue_family_properties[i] = pQueueFamilyProperties[i].queueFamilyProperties;
11297 static void PostCallRecordGetPhysicalDeviceQueueFamilyProperties(PHYSICAL_DEVICE_STATE *pd_state, uint32_t count,
11298 VkQueueFamilyProperties *pQueueFamilyProperties) {
11299 VkQueueFamilyProperties2KHR *pqfp = nullptr;
11300 std::vector<VkQueueFamilyProperties2KHR> qfp;
11302 if (pQueueFamilyProperties) {
11303 for (uint32_t i = 0; i < count; ++i) {
11304 qfp[i].sType = VK_STRUCTURE_TYPE_QUEUE_FAMILY_PROPERTIES_2_KHR;
11305 qfp[i].pNext = nullptr;
11306 qfp[i].queueFamilyProperties = pQueueFamilyProperties[i];
11310 StateUpdateCommonGetPhysicalDeviceQueueFamilyProperties(pd_state, count, pqfp);
11313 static void PostCallRecordGetPhysicalDeviceQueueFamilyProperties2(PHYSICAL_DEVICE_STATE *pd_state, uint32_t count,
11314 VkQueueFamilyProperties2KHR *pQueueFamilyProperties) {
11315 StateUpdateCommonGetPhysicalDeviceQueueFamilyProperties(pd_state, count, pQueueFamilyProperties);
11318 VKAPI_ATTR void VKAPI_CALL GetPhysicalDeviceQueueFamilyProperties(VkPhysicalDevice physicalDevice,
11319 uint32_t *pQueueFamilyPropertyCount,
11320 VkQueueFamilyProperties *pQueueFamilyProperties) {
11321 instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
11322 auto physical_device_state = GetPhysicalDeviceState(instance_data, physicalDevice);
11323 assert(physical_device_state);
11324 unique_lock_t lock(global_lock);
11326 bool skip = PreCallValidateGetPhysicalDeviceQueueFamilyProperties(instance_data, physical_device_state,
11327 pQueueFamilyPropertyCount, pQueueFamilyProperties);
11333 instance_data->dispatch_table.GetPhysicalDeviceQueueFamilyProperties(physicalDevice, pQueueFamilyPropertyCount,
11334 pQueueFamilyProperties);
11337 PostCallRecordGetPhysicalDeviceQueueFamilyProperties(physical_device_state, *pQueueFamilyPropertyCount, pQueueFamilyProperties);
11340 VKAPI_ATTR void VKAPI_CALL GetPhysicalDeviceQueueFamilyProperties2(VkPhysicalDevice physicalDevice,
11341 uint32_t *pQueueFamilyPropertyCount,
11342 VkQueueFamilyProperties2KHR *pQueueFamilyProperties) {
11343 instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
11344 auto physical_device_state = GetPhysicalDeviceState(instance_data, physicalDevice);
11345 assert(physical_device_state);
11346 unique_lock_t lock(global_lock);
11347 bool skip = PreCallValidateGetPhysicalDeviceQueueFamilyProperties2(instance_data, physical_device_state,
11348 pQueueFamilyPropertyCount, pQueueFamilyProperties);
11352 instance_data->dispatch_table.GetPhysicalDeviceQueueFamilyProperties2(physicalDevice, pQueueFamilyPropertyCount,
11353 pQueueFamilyProperties);
11355 PostCallRecordGetPhysicalDeviceQueueFamilyProperties2(physical_device_state, *pQueueFamilyPropertyCount,
11356 pQueueFamilyProperties);
11359 VKAPI_ATTR void VKAPI_CALL GetPhysicalDeviceQueueFamilyProperties2KHR(VkPhysicalDevice physicalDevice,
11360 uint32_t *pQueueFamilyPropertyCount,
11361 VkQueueFamilyProperties2KHR *pQueueFamilyProperties) {
11362 instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
11363 auto physical_device_state = GetPhysicalDeviceState(instance_data, physicalDevice);
11364 assert(physical_device_state);
11365 unique_lock_t lock(global_lock);
11366 bool skip = PreCallValidateGetPhysicalDeviceQueueFamilyProperties2(instance_data, physical_device_state,
11367 pQueueFamilyPropertyCount, pQueueFamilyProperties);
11371 instance_data->dispatch_table.GetPhysicalDeviceQueueFamilyProperties2KHR(physicalDevice, pQueueFamilyPropertyCount,
11372 pQueueFamilyProperties);
11374 PostCallRecordGetPhysicalDeviceQueueFamilyProperties2(physical_device_state, *pQueueFamilyPropertyCount,
11375 pQueueFamilyProperties);
11378 template <typename TCreateInfo, typename FPtr>
11379 static VkResult CreateSurface(VkInstance instance, TCreateInfo const *pCreateInfo, VkAllocationCallbacks const *pAllocator,
11380 VkSurfaceKHR *pSurface, FPtr fptr) {
11381 instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
11383 // Call down the call chain:
11384 VkResult result = (instance_data->dispatch_table.*fptr)(instance, pCreateInfo, pAllocator, pSurface);
11386 if (result == VK_SUCCESS) {
11387 unique_lock_t lock(global_lock);
11388 instance_data->surface_map[*pSurface] = SURFACE_STATE(*pSurface);
11395 VKAPI_ATTR void VKAPI_CALL DestroySurfaceKHR(VkInstance instance, VkSurfaceKHR surface, const VkAllocationCallbacks *pAllocator) {
11397 instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
11398 unique_lock_t lock(global_lock);
11399 auto surface_state = GetSurfaceState(instance_data, surface);
11401 if ((surface_state) && (surface_state->swapchain)) {
11402 skip |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT,
11403 HandleToUint64(instance), VALIDATION_ERROR_26c009e4,
11404 "vkDestroySurfaceKHR() called before its associated VkSwapchainKHR was destroyed.");
11407 // Pre-record to avoid Destroy/Create race
11408 instance_data->surface_map.erase(surface);
11412 instance_data->dispatch_table.DestroySurfaceKHR(instance, surface, pAllocator);
11416 VKAPI_ATTR VkResult VKAPI_CALL CreateDisplayPlaneSurfaceKHR(VkInstance instance, const VkDisplaySurfaceCreateInfoKHR *pCreateInfo,
11417 const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
11418 return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateDisplayPlaneSurfaceKHR);
11421 #ifdef VK_USE_PLATFORM_ANDROID_KHR
11422 VKAPI_ATTR VkResult VKAPI_CALL CreateAndroidSurfaceKHR(VkInstance instance, const VkAndroidSurfaceCreateInfoKHR *pCreateInfo,
11423 const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
11424 return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateAndroidSurfaceKHR);
11426 #endif // VK_USE_PLATFORM_ANDROID_KHR
11428 #ifdef VK_USE_PLATFORM_IOS_MVK
11429 VKAPI_ATTR VkResult VKAPI_CALL CreateIOSSurfaceMVK(VkInstance instance, const VkIOSSurfaceCreateInfoMVK *pCreateInfo,
11430 const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
11431 return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateIOSSurfaceMVK);
11433 #endif // VK_USE_PLATFORM_IOS_MVK
11435 #ifdef VK_USE_PLATFORM_MACOS_MVK
11436 VKAPI_ATTR VkResult VKAPI_CALL CreateMacOSSurfaceMVK(VkInstance instance, const VkMacOSSurfaceCreateInfoMVK *pCreateInfo,
11437 const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
11438 return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateMacOSSurfaceMVK);
11440 #endif // VK_USE_PLATFORM_MACOS_MVK
11442 #ifdef VK_USE_PLATFORM_MIR_KHR
11443 VKAPI_ATTR VkResult VKAPI_CALL CreateMirSurfaceKHR(VkInstance instance, const VkMirSurfaceCreateInfoKHR *pCreateInfo,
11444 const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
11445 return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateMirSurfaceKHR);
11448 VKAPI_ATTR VkBool32 VKAPI_CALL GetPhysicalDeviceMirPresentationSupportKHR(VkPhysicalDevice physicalDevice,
11449 uint32_t queueFamilyIndex, MirConnection *connection) {
11451 instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
11453 unique_lock_t lock(global_lock);
11454 const auto pd_state = GetPhysicalDeviceState(instance_data, physicalDevice);
11456 skip |= ValidatePhysicalDeviceQueueFamily(instance_data, pd_state, queueFamilyIndex, VALIDATION_ERROR_2d2009e2,
11457 "vkGetPhysicalDeviceMirPresentationSupportKHR", "queueFamilyIndex");
11461 if (skip) return VK_FALSE;
11463 // Call down the call chain:
11465 instance_data->dispatch_table.GetPhysicalDeviceMirPresentationSupportKHR(physicalDevice, queueFamilyIndex, connection);
11469 #endif // VK_USE_PLATFORM_MIR_KHR
11471 #ifdef VK_USE_PLATFORM_WAYLAND_KHR
11472 VKAPI_ATTR VkResult VKAPI_CALL CreateWaylandSurfaceKHR(VkInstance instance, const VkWaylandSurfaceCreateInfoKHR *pCreateInfo,
11473 const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
11474 return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateWaylandSurfaceKHR);
11477 VKAPI_ATTR VkBool32 VKAPI_CALL GetPhysicalDeviceWaylandPresentationSupportKHR(VkPhysicalDevice physicalDevice,
11478 uint32_t queueFamilyIndex,
11479 struct wl_display *display) {
11481 instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
11483 unique_lock_t lock(global_lock);
11484 const auto pd_state = GetPhysicalDeviceState(instance_data, physicalDevice);
11486 skip |= ValidatePhysicalDeviceQueueFamily(instance_data, pd_state, queueFamilyIndex, VALIDATION_ERROR_2f000a34,
11487 "vkGetPhysicalDeviceWaylandPresentationSupportKHR", "queueFamilyIndex");
11491 if (skip) return VK_FALSE;
11493 // Call down the call chain:
11495 instance_data->dispatch_table.GetPhysicalDeviceWaylandPresentationSupportKHR(physicalDevice, queueFamilyIndex, display);
11499 #endif // VK_USE_PLATFORM_WAYLAND_KHR
11501 #ifdef VK_USE_PLATFORM_WIN32_KHR
11502 VKAPI_ATTR VkResult VKAPI_CALL CreateWin32SurfaceKHR(VkInstance instance, const VkWin32SurfaceCreateInfoKHR *pCreateInfo,
11503 const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
11504 return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateWin32SurfaceKHR);
11507 VKAPI_ATTR VkBool32 VKAPI_CALL GetPhysicalDeviceWin32PresentationSupportKHR(VkPhysicalDevice physicalDevice,
11508 uint32_t queueFamilyIndex) {
11510 instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
11512 unique_lock_t lock(global_lock);
11513 const auto pd_state = GetPhysicalDeviceState(instance_data, physicalDevice);
11515 skip |= ValidatePhysicalDeviceQueueFamily(instance_data, pd_state, queueFamilyIndex, VALIDATION_ERROR_2f200a3a,
11516 "vkGetPhysicalDeviceWin32PresentationSupportKHR", "queueFamilyIndex");
11520 if (skip) return VK_FALSE;
11522 // Call down the call chain:
11523 VkBool32 result = instance_data->dispatch_table.GetPhysicalDeviceWin32PresentationSupportKHR(physicalDevice, queueFamilyIndex);
11527 #endif // VK_USE_PLATFORM_WIN32_KHR
11529 #ifdef VK_USE_PLATFORM_XCB_KHR
11530 VKAPI_ATTR VkResult VKAPI_CALL CreateXcbSurfaceKHR(VkInstance instance, const VkXcbSurfaceCreateInfoKHR *pCreateInfo,
11531 const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
11532 return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateXcbSurfaceKHR);
11535 VKAPI_ATTR VkBool32 VKAPI_CALL GetPhysicalDeviceXcbPresentationSupportKHR(VkPhysicalDevice physicalDevice,
11536 uint32_t queueFamilyIndex, xcb_connection_t *connection,
11537 xcb_visualid_t visual_id) {
11539 instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
11541 unique_lock_t lock(global_lock);
11542 const auto pd_state = GetPhysicalDeviceState(instance_data, physicalDevice);
11544 skip |= ValidatePhysicalDeviceQueueFamily(instance_data, pd_state, queueFamilyIndex, VALIDATION_ERROR_2f400a40,
11545 "vkGetPhysicalDeviceXcbPresentationSupportKHR", "queueFamilyIndex");
11549 if (skip) return VK_FALSE;
11551 // Call down the call chain:
11552 VkBool32 result = instance_data->dispatch_table.GetPhysicalDeviceXcbPresentationSupportKHR(physicalDevice, queueFamilyIndex,
11553 connection, visual_id);
11557 #endif // VK_USE_PLATFORM_XCB_KHR
11559 #ifdef VK_USE_PLATFORM_XLIB_KHR
11560 VKAPI_ATTR VkResult VKAPI_CALL CreateXlibSurfaceKHR(VkInstance instance, const VkXlibSurfaceCreateInfoKHR *pCreateInfo,
11561 const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
11562 return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateXlibSurfaceKHR);
11565 VKAPI_ATTR VkBool32 VKAPI_CALL GetPhysicalDeviceXlibPresentationSupportKHR(VkPhysicalDevice physicalDevice,
11566 uint32_t queueFamilyIndex, Display *dpy,
11567 VisualID visualID) {
11569 instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
11571 unique_lock_t lock(global_lock);
11572 const auto pd_state = GetPhysicalDeviceState(instance_data, physicalDevice);
11574 skip |= ValidatePhysicalDeviceQueueFamily(instance_data, pd_state, queueFamilyIndex, VALIDATION_ERROR_2f600a46,
11575 "vkGetPhysicalDeviceXlibPresentationSupportKHR", "queueFamilyIndex");
11579 if (skip) return VK_FALSE;
11581 // Call down the call chain:
11583 instance_data->dispatch_table.GetPhysicalDeviceXlibPresentationSupportKHR(physicalDevice, queueFamilyIndex, dpy, visualID);
11587 #endif // VK_USE_PLATFORM_XLIB_KHR
11589 VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceSurfaceCapabilitiesKHR(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface,
11590 VkSurfaceCapabilitiesKHR *pSurfaceCapabilities) {
11591 auto instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
11593 unique_lock_t lock(global_lock);
11594 auto physical_device_state = GetPhysicalDeviceState(instance_data, physicalDevice);
11598 instance_data->dispatch_table.GetPhysicalDeviceSurfaceCapabilitiesKHR(physicalDevice, surface, pSurfaceCapabilities);
11600 if (result == VK_SUCCESS) {
11601 physical_device_state->vkGetPhysicalDeviceSurfaceCapabilitiesKHRState = QUERY_DETAILS;
11602 physical_device_state->surfaceCapabilities = *pSurfaceCapabilities;
11608 static void PostCallRecordGetPhysicalDeviceSurfaceCapabilities2KHR(instance_layer_data *instanceData,
11609 VkPhysicalDevice physicalDevice,
11610 VkSurfaceCapabilities2KHR *pSurfaceCapabilities) {
11611 unique_lock_t lock(global_lock);
11612 auto physicalDeviceState = GetPhysicalDeviceState(instanceData, physicalDevice);
11613 physicalDeviceState->vkGetPhysicalDeviceSurfaceCapabilitiesKHRState = QUERY_DETAILS;
11614 physicalDeviceState->surfaceCapabilities = pSurfaceCapabilities->surfaceCapabilities;
11617 VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceSurfaceCapabilities2KHR(VkPhysicalDevice physicalDevice,
11618 const VkPhysicalDeviceSurfaceInfo2KHR *pSurfaceInfo,
11619 VkSurfaceCapabilities2KHR *pSurfaceCapabilities) {
11620 auto instanceData = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
11623 instanceData->dispatch_table.GetPhysicalDeviceSurfaceCapabilities2KHR(physicalDevice, pSurfaceInfo, pSurfaceCapabilities);
11625 if (result == VK_SUCCESS) {
11626 PostCallRecordGetPhysicalDeviceSurfaceCapabilities2KHR(instanceData, physicalDevice, pSurfaceCapabilities);
11632 static void PostCallRecordGetPhysicalDeviceSurfaceCapabilities2EXT(instance_layer_data *instanceData,
11633 VkPhysicalDevice physicalDevice,
11634 VkSurfaceCapabilities2EXT *pSurfaceCapabilities) {
11635 unique_lock_t lock(global_lock);
11636 auto physicalDeviceState = GetPhysicalDeviceState(instanceData, physicalDevice);
11637 physicalDeviceState->vkGetPhysicalDeviceSurfaceCapabilitiesKHRState = QUERY_DETAILS;
11638 physicalDeviceState->surfaceCapabilities.minImageCount = pSurfaceCapabilities->minImageCount;
11639 physicalDeviceState->surfaceCapabilities.maxImageCount = pSurfaceCapabilities->maxImageCount;
11640 physicalDeviceState->surfaceCapabilities.currentExtent = pSurfaceCapabilities->currentExtent;
11641 physicalDeviceState->surfaceCapabilities.minImageExtent = pSurfaceCapabilities->minImageExtent;
11642 physicalDeviceState->surfaceCapabilities.maxImageExtent = pSurfaceCapabilities->maxImageExtent;
11643 physicalDeviceState->surfaceCapabilities.maxImageArrayLayers = pSurfaceCapabilities->maxImageArrayLayers;
11644 physicalDeviceState->surfaceCapabilities.supportedTransforms = pSurfaceCapabilities->supportedTransforms;
11645 physicalDeviceState->surfaceCapabilities.currentTransform = pSurfaceCapabilities->currentTransform;
11646 physicalDeviceState->surfaceCapabilities.supportedCompositeAlpha = pSurfaceCapabilities->supportedCompositeAlpha;
11647 physicalDeviceState->surfaceCapabilities.supportedUsageFlags = pSurfaceCapabilities->supportedUsageFlags;
11650 VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceSurfaceCapabilities2EXT(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface,
11651 VkSurfaceCapabilities2EXT *pSurfaceCapabilities) {
11652 auto instanceData = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
11655 instanceData->dispatch_table.GetPhysicalDeviceSurfaceCapabilities2EXT(physicalDevice, surface, pSurfaceCapabilities);
11657 if (result == VK_SUCCESS) {
11658 PostCallRecordGetPhysicalDeviceSurfaceCapabilities2EXT(instanceData, physicalDevice, pSurfaceCapabilities);
11664 VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceSurfaceSupportKHR(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex,
11665 VkSurfaceKHR surface, VkBool32 *pSupported) {
11667 auto instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
11669 unique_lock_t lock(global_lock);
11670 const auto pd_state = GetPhysicalDeviceState(instance_data, physicalDevice);
11671 auto surface_state = GetSurfaceState(instance_data, surface);
11673 skip |= ValidatePhysicalDeviceQueueFamily(instance_data, pd_state, queueFamilyIndex, VALIDATION_ERROR_2ee009ea,
11674 "vkGetPhysicalDeviceSurfaceSupportKHR", "queueFamilyIndex");
11678 if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
11681 instance_data->dispatch_table.GetPhysicalDeviceSurfaceSupportKHR(physicalDevice, queueFamilyIndex, surface, pSupported);
11683 if (result == VK_SUCCESS) {
11684 surface_state->gpu_queue_support[{physicalDevice, queueFamilyIndex}] = (*pSupported == VK_TRUE);
11690 VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceSurfacePresentModesKHR(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface,
11691 uint32_t *pPresentModeCount,
11692 VkPresentModeKHR *pPresentModes) {
11694 auto instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
11695 unique_lock_t lock(global_lock);
11696 // TODO: this isn't quite right. available modes may differ by surface AND physical device.
11697 auto physical_device_state = GetPhysicalDeviceState(instance_data, physicalDevice);
11698 auto &call_state = physical_device_state->vkGetPhysicalDeviceSurfacePresentModesKHRState;
11700 if (pPresentModes) {
11701 // Compare the preliminary value of *pPresentModeCount with the value this time:
11702 auto prev_mode_count = (uint32_t)physical_device_state->present_modes.size();
11703 switch (call_state) {
11705 skip |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
11706 VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, HandleToUint64(physicalDevice),
11707 DEVLIMITS_MUST_QUERY_COUNT,
11708 "vkGetPhysicalDeviceSurfacePresentModesKHR() called with non-NULL pPresentModeCount; but no prior "
11709 "positive value has been seen for pPresentModeCount.");
11712 // both query count and query details
11713 if (*pPresentModeCount != prev_mode_count) {
11714 skip |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
11715 VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, HandleToUint64(physicalDevice),
11716 DEVLIMITS_COUNT_MISMATCH,
11717 "vkGetPhysicalDeviceSurfacePresentModesKHR() called with *pPresentModeCount (%u) that differs "
11718 "from the value (%u) that was returned when pPresentModes was NULL.",
11719 *pPresentModeCount, prev_mode_count);
11726 if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
11728 auto result = instance_data->dispatch_table.GetPhysicalDeviceSurfacePresentModesKHR(physicalDevice, surface, pPresentModeCount,
11731 if (result == VK_SUCCESS || result == VK_INCOMPLETE) {
11734 if (*pPresentModeCount) {
11735 if (call_state < QUERY_COUNT) call_state = QUERY_COUNT;
11736 if (*pPresentModeCount > physical_device_state->present_modes.size())
11737 physical_device_state->present_modes.resize(*pPresentModeCount);
11739 if (pPresentModes) {
11740 if (call_state < QUERY_DETAILS) call_state = QUERY_DETAILS;
11741 for (uint32_t i = 0; i < *pPresentModeCount; i++) {
11742 physical_device_state->present_modes[i] = pPresentModes[i];
11750 VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceSurfaceFormatsKHR(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface,
11751 uint32_t *pSurfaceFormatCount,
11752 VkSurfaceFormatKHR *pSurfaceFormats) {
11754 auto instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
11755 unique_lock_t lock(global_lock);
11756 auto physical_device_state = GetPhysicalDeviceState(instance_data, physicalDevice);
11757 auto &call_state = physical_device_state->vkGetPhysicalDeviceSurfaceFormatsKHRState;
11759 if (pSurfaceFormats) {
11760 auto prev_format_count = (uint32_t)physical_device_state->surface_formats.size();
11762 switch (call_state) {
11764 // Since we haven't recorded a preliminary value of *pSurfaceFormatCount, that likely means that the application
11766 // previously call this function with a NULL value of pSurfaceFormats:
11767 skip |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
11768 VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, HandleToUint64(physicalDevice),
11769 DEVLIMITS_MUST_QUERY_COUNT,
11770 "vkGetPhysicalDeviceSurfaceFormatsKHR() called with non-NULL pSurfaceFormatCount; but no prior "
11771 "positive value has been seen for pSurfaceFormats.");
11774 if (prev_format_count != *pSurfaceFormatCount) {
11775 skip |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
11776 VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, HandleToUint64(physicalDevice),
11777 DEVLIMITS_COUNT_MISMATCH,
11778 "vkGetPhysicalDeviceSurfaceFormatsKHR() called with non-NULL pSurfaceFormatCount, and with "
11779 "pSurfaceFormats set to a value (%u) that is greater than the value (%u) that was returned "
11780 "when pSurfaceFormatCount was NULL.",
11781 *pSurfaceFormatCount, prev_format_count);
11788 if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
11790 // Call down the call chain:
11791 auto result = instance_data->dispatch_table.GetPhysicalDeviceSurfaceFormatsKHR(physicalDevice, surface, pSurfaceFormatCount,
11794 if (result == VK_SUCCESS || result == VK_INCOMPLETE) {
11797 if (*pSurfaceFormatCount) {
11798 if (call_state < QUERY_COUNT) call_state = QUERY_COUNT;
11799 if (*pSurfaceFormatCount > physical_device_state->surface_formats.size())
11800 physical_device_state->surface_formats.resize(*pSurfaceFormatCount);
11802 if (pSurfaceFormats) {
11803 if (call_state < QUERY_DETAILS) call_state = QUERY_DETAILS;
11804 for (uint32_t i = 0; i < *pSurfaceFormatCount; i++) {
11805 physical_device_state->surface_formats[i] = pSurfaceFormats[i];
11812 static void PostCallRecordGetPhysicalDeviceSurfaceFormats2KHR(instance_layer_data *instanceData, VkPhysicalDevice physicalDevice,
11813 uint32_t *pSurfaceFormatCount, VkSurfaceFormat2KHR *pSurfaceFormats) {
11814 unique_lock_t lock(global_lock);
11815 auto physicalDeviceState = GetPhysicalDeviceState(instanceData, physicalDevice);
11816 if (*pSurfaceFormatCount) {
11817 if (physicalDeviceState->vkGetPhysicalDeviceSurfaceFormatsKHRState < QUERY_COUNT) {
11818 physicalDeviceState->vkGetPhysicalDeviceSurfaceFormatsKHRState = QUERY_COUNT;
11820 if (*pSurfaceFormatCount > physicalDeviceState->surface_formats.size())
11821 physicalDeviceState->surface_formats.resize(*pSurfaceFormatCount);
11823 if (pSurfaceFormats) {
11824 if (physicalDeviceState->vkGetPhysicalDeviceSurfaceFormatsKHRState < QUERY_DETAILS) {
11825 physicalDeviceState->vkGetPhysicalDeviceSurfaceFormatsKHRState = QUERY_DETAILS;
11827 for (uint32_t i = 0; i < *pSurfaceFormatCount; i++) {
11828 physicalDeviceState->surface_formats[i] = pSurfaceFormats[i].surfaceFormat;
11833 VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceSurfaceFormats2KHR(VkPhysicalDevice physicalDevice,
11834 const VkPhysicalDeviceSurfaceInfo2KHR *pSurfaceInfo,
11835 uint32_t *pSurfaceFormatCount,
11836 VkSurfaceFormat2KHR *pSurfaceFormats) {
11837 auto instanceData = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
11838 auto result = instanceData->dispatch_table.GetPhysicalDeviceSurfaceFormats2KHR(physicalDevice, pSurfaceInfo,
11839 pSurfaceFormatCount, pSurfaceFormats);
11840 if (result == VK_SUCCESS || result == VK_INCOMPLETE) {
11841 PostCallRecordGetPhysicalDeviceSurfaceFormats2KHR(instanceData, physicalDevice, pSurfaceFormatCount, pSurfaceFormats);
11846 // VK_EXT_debug_utils commands
11847 VKAPI_ATTR VkResult VKAPI_CALL SetDebugUtilsObjectNameEXT(VkDevice device, const VkDebugUtilsObjectNameInfoEXT *pNameInfo) {
11848 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
11849 VkResult result = VK_SUCCESS;
11850 if (pNameInfo->pObjectName) {
11851 dev_data->report_data->debugUtilsObjectNameMap->insert(
11852 std::make_pair<uint64_t, std::string>((uint64_t &&) pNameInfo->objectHandle, pNameInfo->pObjectName));
11854 dev_data->report_data->debugUtilsObjectNameMap->erase(pNameInfo->objectHandle);
11856 if (nullptr != dev_data->dispatch_table.SetDebugUtilsObjectNameEXT) {
11857 result = dev_data->dispatch_table.SetDebugUtilsObjectNameEXT(device, pNameInfo);
11862 VKAPI_ATTR VkResult VKAPI_CALL SetDebugUtilsObjectTagEXT(VkDevice device, const VkDebugUtilsObjectTagInfoEXT *pTagInfo) {
11863 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
11864 VkResult result = VK_SUCCESS;
11865 if (nullptr != dev_data->dispatch_table.SetDebugUtilsObjectTagEXT) {
11866 result = dev_data->dispatch_table.SetDebugUtilsObjectTagEXT(device, pTagInfo);
11871 VKAPI_ATTR void VKAPI_CALL QueueBeginDebugUtilsLabelEXT(VkQueue queue, const VkDebugUtilsLabelEXT *pLabelInfo) {
11872 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(queue), layer_data_map);
11873 BeginQueueDebugUtilsLabel(dev_data->report_data, queue, pLabelInfo);
11874 if (nullptr != dev_data->dispatch_table.QueueBeginDebugUtilsLabelEXT) {
11875 dev_data->dispatch_table.QueueBeginDebugUtilsLabelEXT(queue, pLabelInfo);
11879 VKAPI_ATTR void VKAPI_CALL QueueEndDebugUtilsLabelEXT(VkQueue queue) {
11880 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(queue), layer_data_map);
11881 if (nullptr != dev_data->dispatch_table.QueueEndDebugUtilsLabelEXT) {
11882 dev_data->dispatch_table.QueueEndDebugUtilsLabelEXT(queue);
11884 EndQueueDebugUtilsLabel(dev_data->report_data, queue);
11887 VKAPI_ATTR void VKAPI_CALL QueueInsertDebugUtilsLabelEXT(VkQueue queue, const VkDebugUtilsLabelEXT *pLabelInfo) {
11888 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(queue), layer_data_map);
11889 InsertQueueDebugUtilsLabel(dev_data->report_data, queue, pLabelInfo);
11890 if (nullptr != dev_data->dispatch_table.QueueInsertDebugUtilsLabelEXT) {
11891 dev_data->dispatch_table.QueueInsertDebugUtilsLabelEXT(queue, pLabelInfo);
11895 VKAPI_ATTR void VKAPI_CALL CmdBeginDebugUtilsLabelEXT(VkCommandBuffer commandBuffer, const VkDebugUtilsLabelEXT *pLabelInfo) {
11896 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
11897 BeginCmdDebugUtilsLabel(dev_data->report_data, commandBuffer, pLabelInfo);
11898 if (nullptr != dev_data->dispatch_table.CmdBeginDebugUtilsLabelEXT) {
11899 dev_data->dispatch_table.CmdBeginDebugUtilsLabelEXT(commandBuffer, pLabelInfo);
11903 VKAPI_ATTR void VKAPI_CALL CmdEndDebugUtilsLabelEXT(VkCommandBuffer commandBuffer) {
11904 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
11905 if (nullptr != dev_data->dispatch_table.CmdEndDebugUtilsLabelEXT) {
11906 dev_data->dispatch_table.CmdEndDebugUtilsLabelEXT(commandBuffer);
11908 EndCmdDebugUtilsLabel(dev_data->report_data, commandBuffer);
11911 VKAPI_ATTR void VKAPI_CALL CmdInsertDebugUtilsLabelEXT(VkCommandBuffer commandBuffer, const VkDebugUtilsLabelEXT *pLabelInfo) {
11912 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
11913 InsertCmdDebugUtilsLabel(dev_data->report_data, commandBuffer, pLabelInfo);
11914 if (nullptr != dev_data->dispatch_table.CmdInsertDebugUtilsLabelEXT) {
11915 dev_data->dispatch_table.CmdInsertDebugUtilsLabelEXT(commandBuffer, pLabelInfo);
11919 VKAPI_ATTR VkResult VKAPI_CALL CreateDebugUtilsMessengerEXT(VkInstance instance,
11920 const VkDebugUtilsMessengerCreateInfoEXT *pCreateInfo,
11921 const VkAllocationCallbacks *pAllocator,
11922 VkDebugUtilsMessengerEXT *pMessenger) {
11923 instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
11924 VkResult result = instance_data->dispatch_table.CreateDebugUtilsMessengerEXT(instance, pCreateInfo, pAllocator, pMessenger);
11926 if (VK_SUCCESS == result) {
11927 result = layer_create_messenger_callback(instance_data->report_data, false, pCreateInfo, pAllocator, pMessenger);
11932 VKAPI_ATTR void VKAPI_CALL DestroyDebugUtilsMessengerEXT(VkInstance instance, VkDebugUtilsMessengerEXT messenger,
11933 const VkAllocationCallbacks *pAllocator) {
11934 instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
11935 instance_data->dispatch_table.DestroyDebugUtilsMessengerEXT(instance, messenger, pAllocator);
11936 layer_destroy_messenger_callback(instance_data->report_data, messenger, pAllocator);
11939 VKAPI_ATTR void VKAPI_CALL SubmitDebugUtilsMessageEXT(VkInstance instance, VkDebugUtilsMessageSeverityFlagBitsEXT messageSeverity,
11940 VkDebugUtilsMessageTypeFlagsEXT messageTypes,
11941 const VkDebugUtilsMessengerCallbackDataEXT *pCallbackData) {
11942 instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
11943 instance_data->dispatch_table.SubmitDebugUtilsMessageEXT(instance, messageSeverity, messageTypes, pCallbackData);
11946 // VK_EXT_debug_report commands
11947 VKAPI_ATTR VkResult VKAPI_CALL CreateDebugReportCallbackEXT(VkInstance instance,
11948 const VkDebugReportCallbackCreateInfoEXT *pCreateInfo,
11949 const VkAllocationCallbacks *pAllocator,
11950 VkDebugReportCallbackEXT *pMsgCallback) {
11951 instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
11952 VkResult res = instance_data->dispatch_table.CreateDebugReportCallbackEXT(instance, pCreateInfo, pAllocator, pMsgCallback);
11953 if (VK_SUCCESS == res) {
11954 lock_guard_t lock(global_lock);
11955 res = layer_create_report_callback(instance_data->report_data, false, pCreateInfo, pAllocator, pMsgCallback);
11960 VKAPI_ATTR void VKAPI_CALL DestroyDebugReportCallbackEXT(VkInstance instance, VkDebugReportCallbackEXT msgCallback,
11961 const VkAllocationCallbacks *pAllocator) {
11962 instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
11963 instance_data->dispatch_table.DestroyDebugReportCallbackEXT(instance, msgCallback, pAllocator);
11964 lock_guard_t lock(global_lock);
11965 layer_destroy_report_callback(instance_data->report_data, msgCallback, pAllocator);
11968 VKAPI_ATTR void VKAPI_CALL DebugReportMessageEXT(VkInstance instance, VkDebugReportFlagsEXT flags,
11969 VkDebugReportObjectTypeEXT objType, uint64_t object, size_t location,
11970 int32_t msgCode, const char *pLayerPrefix, const char *pMsg) {
11971 instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
11972 instance_data->dispatch_table.DebugReportMessageEXT(instance, flags, objType, object, location, msgCode, pLayerPrefix, pMsg);
11975 VKAPI_ATTR VkResult VKAPI_CALL EnumerateInstanceLayerProperties(uint32_t *pCount, VkLayerProperties *pProperties) {
11976 return util_GetLayerProperties(1, &global_layer, pCount, pProperties);
11979 VKAPI_ATTR VkResult VKAPI_CALL EnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice, uint32_t *pCount,
11980 VkLayerProperties *pProperties) {
11981 return util_GetLayerProperties(1, &global_layer, pCount, pProperties);
11984 VKAPI_ATTR VkResult VKAPI_CALL EnumerateInstanceExtensionProperties(const char *pLayerName, uint32_t *pCount,
11985 VkExtensionProperties *pProperties) {
11986 if (pLayerName && !strcmp(pLayerName, global_layer.layerName))
11987 return util_GetExtensionProperties(1, instance_extensions, pCount, pProperties);
11989 return VK_ERROR_LAYER_NOT_PRESENT;
11992 VKAPI_ATTR VkResult VKAPI_CALL EnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice, const char *pLayerName,
11993 uint32_t *pCount, VkExtensionProperties *pProperties) {
11994 if (pLayerName && !strcmp(pLayerName, global_layer.layerName))
11995 return util_GetExtensionProperties(1, device_extensions, pCount, pProperties);
11997 assert(physicalDevice);
11999 instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
12000 return instance_data->dispatch_table.EnumerateDeviceExtensionProperties(physicalDevice, NULL, pCount, pProperties);
12003 static bool PreCallValidateEnumeratePhysicalDeviceGroups(VkInstance instance, uint32_t *pPhysicalDeviceGroupCount,
12004 VkPhysicalDeviceGroupPropertiesKHR *pPhysicalDeviceGroupProperties) {
12005 instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
12008 if (instance_data) {
12009 // For this instance, flag when EnumeratePhysicalDeviceGroups goes to QUERY_COUNT and then QUERY_DETAILS.
12010 if (NULL != pPhysicalDeviceGroupProperties) {
12011 if (UNCALLED == instance_data->vkEnumeratePhysicalDeviceGroupsState) {
12012 // Flag warning here. You can call this without having queried the count, but it may not be
12013 // robust on platforms with multiple physical devices.
12014 skip |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
12015 VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT, 0, DEVLIMITS_MISSING_QUERY_COUNT,
12016 "Call sequence has vkEnumeratePhysicalDeviceGroups() w/ non-NULL "
12017 "pPhysicalDeviceGroupProperties. You should first call vkEnumeratePhysicalDeviceGroups() w/ "
12018 "NULL pPhysicalDeviceGroupProperties to query pPhysicalDeviceGroupCount.");
12019 } // TODO : Could also flag a warning if re-calling this function in QUERY_DETAILS state
12020 else if (instance_data->physical_device_groups_count != *pPhysicalDeviceGroupCount) {
12021 // Having actual count match count from app is not a requirement, so this can be a warning
12022 skip |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
12023 VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, DEVLIMITS_COUNT_MISMATCH,
12024 "Call to vkEnumeratePhysicalDeviceGroups() w/ pPhysicalDeviceGroupCount value %u, but actual count "
12025 "supported by this instance is %u.",
12026 *pPhysicalDeviceGroupCount, instance_data->physical_device_groups_count);
12030 log_msg(instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT, 0,
12031 DEVLIMITS_INVALID_INSTANCE, "Invalid instance (0x%" PRIx64 ") passed into vkEnumeratePhysicalDeviceGroups().",
12032 HandleToUint64(instance));
12038 static void PreCallRecordEnumeratePhysicalDeviceGroups(instance_layer_data *instance_data,
12039 VkPhysicalDeviceGroupPropertiesKHR *pPhysicalDeviceGroupProperties) {
12040 if (instance_data) {
12041 // For this instance, flag when EnumeratePhysicalDeviceGroups goes to QUERY_COUNT and then QUERY_DETAILS.
12042 if (NULL == pPhysicalDeviceGroupProperties) {
12043 instance_data->vkEnumeratePhysicalDeviceGroupsState = QUERY_COUNT;
12045 instance_data->vkEnumeratePhysicalDeviceGroupsState = QUERY_DETAILS;
12050 static void PostCallRecordEnumeratePhysicalDeviceGroups(instance_layer_data *instance_data, uint32_t *pPhysicalDeviceGroupCount,
12051 VkPhysicalDeviceGroupPropertiesKHR *pPhysicalDeviceGroupProperties) {
12052 if (NULL == pPhysicalDeviceGroupProperties) {
12053 instance_data->physical_device_groups_count = *pPhysicalDeviceGroupCount;
12054 } else { // Save physical devices
12055 for (uint32_t i = 0; i < *pPhysicalDeviceGroupCount; i++) {
12056 for (uint32_t j = 0; j < pPhysicalDeviceGroupProperties[i].physicalDeviceCount; j++) {
12057 VkPhysicalDevice cur_phys_dev = pPhysicalDeviceGroupProperties[i].physicalDevices[j];
12058 auto &phys_device_state = instance_data->physical_device_map[cur_phys_dev];
12059 phys_device_state.phys_device = cur_phys_dev;
12060 // Init actual features for each physical device
12061 instance_data->dispatch_table.GetPhysicalDeviceFeatures(cur_phys_dev, &phys_device_state.features);
12067 VKAPI_ATTR VkResult VKAPI_CALL EnumeratePhysicalDeviceGroups(VkInstance instance, uint32_t *pPhysicalDeviceGroupCount,
12068 VkPhysicalDeviceGroupPropertiesKHR *pPhysicalDeviceGroupProperties) {
12070 instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
12072 skip = PreCallValidateEnumeratePhysicalDeviceGroups(instance, pPhysicalDeviceGroupCount, pPhysicalDeviceGroupProperties);
12074 return VK_ERROR_VALIDATION_FAILED_EXT;
12076 PreCallRecordEnumeratePhysicalDeviceGroups(instance_data, pPhysicalDeviceGroupProperties);
12077 VkResult result = instance_data->dispatch_table.EnumeratePhysicalDeviceGroups(instance, pPhysicalDeviceGroupCount,
12078 pPhysicalDeviceGroupProperties);
12079 if (result == VK_SUCCESS) {
12080 PostCallRecordEnumeratePhysicalDeviceGroups(instance_data, pPhysicalDeviceGroupCount, pPhysicalDeviceGroupProperties);
12085 VKAPI_ATTR VkResult VKAPI_CALL EnumeratePhysicalDeviceGroupsKHR(
12086 VkInstance instance, uint32_t *pPhysicalDeviceGroupCount, VkPhysicalDeviceGroupPropertiesKHR *pPhysicalDeviceGroupProperties) {
12088 instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
12090 skip = PreCallValidateEnumeratePhysicalDeviceGroups(instance, pPhysicalDeviceGroupCount, pPhysicalDeviceGroupProperties);
12092 return VK_ERROR_VALIDATION_FAILED_EXT;
12094 PreCallRecordEnumeratePhysicalDeviceGroups(instance_data, pPhysicalDeviceGroupProperties);
12095 VkResult result = instance_data->dispatch_table.EnumeratePhysicalDeviceGroupsKHR(instance, pPhysicalDeviceGroupCount,
12096 pPhysicalDeviceGroupProperties);
12097 if (result == VK_SUCCESS) {
12098 PostCallRecordEnumeratePhysicalDeviceGroups(instance_data, pPhysicalDeviceGroupCount, pPhysicalDeviceGroupProperties);
12103 static bool PreCallValidateCreateDescriptorUpdateTemplate(const char *func_name, layer_data *device_data,
12104 const VkDescriptorUpdateTemplateCreateInfoKHR *pCreateInfo,
12105 const VkAllocationCallbacks *pAllocator,
12106 VkDescriptorUpdateTemplateKHR *pDescriptorUpdateTemplate) {
12108 const auto layout = GetDescriptorSetLayout(device_data, pCreateInfo->descriptorSetLayout);
12109 if (VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET == pCreateInfo->templateType && !layout) {
12110 auto ds_uint = HandleToUint64(pCreateInfo->descriptorSetLayout);
12111 skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
12112 ds_uint, VALIDATION_ERROR_052002bc, "%s: Invalid pCreateInfo->descriptorSetLayout (%" PRIx64 ")", func_name,
12114 } else if (VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_PUSH_DESCRIPTORS_KHR == pCreateInfo->templateType) {
12115 auto bind_point = pCreateInfo->pipelineBindPoint;
12116 bool valid_bp = (bind_point == VK_PIPELINE_BIND_POINT_GRAPHICS) || (bind_point == VK_PIPELINE_BIND_POINT_COMPUTE);
12118 skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
12119 VALIDATION_ERROR_052002be, "%s: Invalid pCreateInfo->pipelineBindPoint (%" PRIu32 ").", func_name,
12120 static_cast<uint32_t>(bind_point));
12122 const auto pipeline_layout = getPipelineLayout(device_data, pCreateInfo->pipelineLayout);
12123 if (!pipeline_layout) {
12124 uint64_t pl_uint = HandleToUint64(pCreateInfo->pipelineLayout);
12125 skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
12126 VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_LAYOUT_EXT, pl_uint, VALIDATION_ERROR_052002c0,
12127 "%s: Invalid pCreateInfo->pipelineLayout (%" PRIx64 ")", func_name, pl_uint);
12129 const uint32_t pd_set = pCreateInfo->set;
12130 if ((pd_set >= pipeline_layout->set_layouts.size()) || !pipeline_layout->set_layouts[pd_set] ||
12131 !pipeline_layout->set_layouts[pd_set]->IsPushDescriptor()) {
12132 uint64_t pl_uint = HandleToUint64(pCreateInfo->pipelineLayout);
12133 skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
12134 VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_LAYOUT_EXT, pl_uint, VALIDATION_ERROR_052002c2,
12135 "%s: pCreateInfo->set (%" PRIu32
12136 ") does not refer to the push descriptor set layout for "
12137 "pCreateInfo->pipelineLayout (%" PRIx64 ").",
12138 func_name, pd_set, pl_uint);
12145 static void PostCallRecordCreateDescriptorUpdateTemplate(layer_data *device_data,
12146 const VkDescriptorUpdateTemplateCreateInfoKHR *pCreateInfo,
12147 VkDescriptorUpdateTemplateKHR *pDescriptorUpdateTemplate) {
12148 // Shadow template createInfo for later updates
12149 safe_VkDescriptorUpdateTemplateCreateInfo *local_create_info = new safe_VkDescriptorUpdateTemplateCreateInfo(pCreateInfo);
12150 std::unique_ptr<TEMPLATE_STATE> template_state(new TEMPLATE_STATE(*pDescriptorUpdateTemplate, local_create_info));
12151 device_data->desc_template_map[*pDescriptorUpdateTemplate] = std::move(template_state);
12154 VKAPI_ATTR VkResult VKAPI_CALL CreateDescriptorUpdateTemplate(VkDevice device,
12155 const VkDescriptorUpdateTemplateCreateInfoKHR *pCreateInfo,
12156 const VkAllocationCallbacks *pAllocator,
12157 VkDescriptorUpdateTemplateKHR *pDescriptorUpdateTemplate) {
12158 layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
12159 unique_lock_t lock(global_lock);
12160 bool skip = PreCallValidateCreateDescriptorUpdateTemplate("vkCreateDescriptorUpdateTemplate()", device_data, pCreateInfo,
12161 pAllocator, pDescriptorUpdateTemplate);
12163 VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
12167 device_data->dispatch_table.CreateDescriptorUpdateTemplate(device, pCreateInfo, pAllocator, pDescriptorUpdateTemplate);
12168 if (VK_SUCCESS == result) {
12170 PostCallRecordCreateDescriptorUpdateTemplate(device_data, pCreateInfo, pDescriptorUpdateTemplate);
12176 VKAPI_ATTR VkResult VKAPI_CALL CreateDescriptorUpdateTemplateKHR(VkDevice device,
12177 const VkDescriptorUpdateTemplateCreateInfoKHR *pCreateInfo,
12178 const VkAllocationCallbacks *pAllocator,
12179 VkDescriptorUpdateTemplateKHR *pDescriptorUpdateTemplate) {
12180 layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
12181 unique_lock_t lock(global_lock);
12182 bool skip = PreCallValidateCreateDescriptorUpdateTemplate("vkCreateDescriptorUpdateTemplateKHR()", device_data, pCreateInfo,
12183 pAllocator, pDescriptorUpdateTemplate);
12185 VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
12188 result = device_data->dispatch_table.CreateDescriptorUpdateTemplateKHR(device, pCreateInfo, pAllocator,
12189 pDescriptorUpdateTemplate);
12190 if (VK_SUCCESS == result) {
12192 PostCallRecordCreateDescriptorUpdateTemplate(device_data, pCreateInfo, pDescriptorUpdateTemplate);
12198 static void PreCallRecordDestroyDescriptorUpdateTemplate(layer_data *device_data,
12199 VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate) {
12200 device_data->desc_template_map.erase(descriptorUpdateTemplate);
12203 VKAPI_ATTR void VKAPI_CALL DestroyDescriptorUpdateTemplate(VkDevice device, VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate,
12204 const VkAllocationCallbacks *pAllocator) {
12205 layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
12206 unique_lock_t lock(global_lock);
12207 // Pre-record to avoid Destroy/Create race
12208 PreCallRecordDestroyDescriptorUpdateTemplate(device_data, descriptorUpdateTemplate);
12210 device_data->dispatch_table.DestroyDescriptorUpdateTemplate(device, descriptorUpdateTemplate, pAllocator);
12213 VKAPI_ATTR void VKAPI_CALL DestroyDescriptorUpdateTemplateKHR(VkDevice device,
12214 VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate,
12215 const VkAllocationCallbacks *pAllocator) {
12216 layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
12217 unique_lock_t lock(global_lock);
12218 // Pre-record to avoid Destroy/Create race
12219 PreCallRecordDestroyDescriptorUpdateTemplate(device_data, descriptorUpdateTemplate);
12221 device_data->dispatch_table.DestroyDescriptorUpdateTemplateKHR(device, descriptorUpdateTemplate, pAllocator);
12224 // PostCallRecord* handles recording state updates following call down chain to UpdateDescriptorSetsWithTemplate()
12225 static void PostCallRecordUpdateDescriptorSetWithTemplate(layer_data *device_data, VkDescriptorSet descriptorSet,
12226 VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate,
12227 const void *pData) {
12228 auto const template_map_entry = device_data->desc_template_map.find(descriptorUpdateTemplate);
12229 if (template_map_entry == device_data->desc_template_map.end()) {
12233 cvdescriptorset::PerformUpdateDescriptorSetsWithTemplateKHR(device_data, descriptorSet, template_map_entry->second, pData);
12236 VKAPI_ATTR void VKAPI_CALL UpdateDescriptorSetWithTemplate(VkDevice device, VkDescriptorSet descriptorSet,
12237 VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate,
12238 const void *pData) {
12239 layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
12240 device_data->dispatch_table.UpdateDescriptorSetWithTemplate(device, descriptorSet, descriptorUpdateTemplate, pData);
12242 PostCallRecordUpdateDescriptorSetWithTemplate(device_data, descriptorSet, descriptorUpdateTemplate, pData);
12245 VKAPI_ATTR void VKAPI_CALL UpdateDescriptorSetWithTemplateKHR(VkDevice device, VkDescriptorSet descriptorSet,
12246 VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate,
12247 const void *pData) {
12248 layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
12249 device_data->dispatch_table.UpdateDescriptorSetWithTemplateKHR(device, descriptorSet, descriptorUpdateTemplate, pData);
12251 PostCallRecordUpdateDescriptorSetWithTemplate(device_data, descriptorSet, descriptorUpdateTemplate, pData);
12254 VKAPI_ATTR void VKAPI_CALL CmdPushDescriptorSetWithTemplateKHR(VkCommandBuffer commandBuffer,
12255 VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate,
12256 VkPipelineLayout layout, uint32_t set, const void *pData) {
12257 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
12258 unique_lock_t lock(global_lock);
12260 GLOBAL_CB_NODE *cb_state = GetCBNode(dev_data, commandBuffer);
12261 // Minimal validation for command buffer state
12263 skip |= ValidateCmd(dev_data, cb_state, CMD_PUSHDESCRIPTORSETWITHTEMPLATEKHR, "vkCmdPushDescriptorSetWithTemplateKHR()");
12268 dev_data->dispatch_table.CmdPushDescriptorSetWithTemplateKHR(commandBuffer, descriptorUpdateTemplate, layout, set, pData);
12272 static void PostCallRecordGetPhysicalDeviceDisplayPlanePropertiesKHR(instance_layer_data *instanceData,
12273 VkPhysicalDevice physicalDevice, uint32_t *pPropertyCount,
12274 VkDisplayPlanePropertiesKHR *pProperties) {
12275 unique_lock_t lock(global_lock);
12276 auto physical_device_state = GetPhysicalDeviceState(instanceData, physicalDevice);
12278 if (*pPropertyCount) {
12279 if (physical_device_state->vkGetPhysicalDeviceDisplayPlanePropertiesKHRState < QUERY_COUNT) {
12280 physical_device_state->vkGetPhysicalDeviceDisplayPlanePropertiesKHRState = QUERY_COUNT;
12282 physical_device_state->display_plane_property_count = *pPropertyCount;
12285 if (physical_device_state->vkGetPhysicalDeviceDisplayPlanePropertiesKHRState < QUERY_DETAILS) {
12286 physical_device_state->vkGetPhysicalDeviceDisplayPlanePropertiesKHRState = QUERY_DETAILS;
12291 VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceDisplayPlanePropertiesKHR(VkPhysicalDevice physicalDevice, uint32_t *pPropertyCount,
12292 VkDisplayPlanePropertiesKHR *pProperties) {
12293 VkResult result = VK_SUCCESS;
12294 instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
12296 result = instance_data->dispatch_table.GetPhysicalDeviceDisplayPlanePropertiesKHR(physicalDevice, pPropertyCount, pProperties);
12298 if (result == VK_SUCCESS || result == VK_INCOMPLETE) {
12299 PostCallRecordGetPhysicalDeviceDisplayPlanePropertiesKHR(instance_data, physicalDevice, pPropertyCount, pProperties);
12305 static bool ValidateGetPhysicalDeviceDisplayPlanePropertiesKHRQuery(instance_layer_data *instance_data,
12306 VkPhysicalDevice physicalDevice, uint32_t planeIndex,
12307 const char *api_name) {
12309 auto physical_device_state = GetPhysicalDeviceState(instance_data, physicalDevice);
12310 if (physical_device_state->vkGetPhysicalDeviceDisplayPlanePropertiesKHRState == UNCALLED) {
12312 instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
12313 HandleToUint64(physicalDevice), SWAPCHAIN_GET_SUPPORTED_DISPLAYS_WITHOUT_QUERY,
12314 "Potential problem with calling %s() without first querying vkGetPhysicalDeviceDisplayPlanePropertiesKHR.", api_name);
12316 if (planeIndex >= physical_device_state->display_plane_property_count) {
12318 instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
12319 HandleToUint64(physicalDevice), VALIDATION_ERROR_29c009c2,
12320 "%s(): planeIndex must be in the range [0, %d] that was returned by vkGetPhysicalDeviceDisplayPlanePropertiesKHR. "
12321 "Do you have the plane index hardcoded?",
12322 api_name, physical_device_state->display_plane_property_count - 1);
12328 static bool PreCallValidateGetDisplayPlaneSupportedDisplaysKHR(instance_layer_data *instance_data, VkPhysicalDevice physicalDevice,
12329 uint32_t planeIndex) {
12331 lock_guard_t lock(global_lock);
12332 skip |= ValidateGetPhysicalDeviceDisplayPlanePropertiesKHRQuery(instance_data, physicalDevice, planeIndex,
12333 "vkGetDisplayPlaneSupportedDisplaysKHR");
12337 VKAPI_ATTR VkResult VKAPI_CALL GetDisplayPlaneSupportedDisplaysKHR(VkPhysicalDevice physicalDevice, uint32_t planeIndex,
12338 uint32_t *pDisplayCount, VkDisplayKHR *pDisplays) {
12339 VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
12340 instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
12341 bool skip = PreCallValidateGetDisplayPlaneSupportedDisplaysKHR(instance_data, physicalDevice, planeIndex);
12344 instance_data->dispatch_table.GetDisplayPlaneSupportedDisplaysKHR(physicalDevice, planeIndex, pDisplayCount, pDisplays);
12349 static bool PreCallValidateGetDisplayPlaneCapabilitiesKHR(instance_layer_data *instance_data, VkPhysicalDevice physicalDevice,
12350 uint32_t planeIndex) {
12352 lock_guard_t lock(global_lock);
12353 skip |= ValidateGetPhysicalDeviceDisplayPlanePropertiesKHRQuery(instance_data, physicalDevice, planeIndex,
12354 "vkGetDisplayPlaneCapabilitiesKHR");
12358 VKAPI_ATTR VkResult VKAPI_CALL GetDisplayPlaneCapabilitiesKHR(VkPhysicalDevice physicalDevice, VkDisplayModeKHR mode,
12359 uint32_t planeIndex, VkDisplayPlaneCapabilitiesKHR *pCapabilities) {
12360 VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
12361 instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
12362 bool skip = PreCallValidateGetDisplayPlaneCapabilitiesKHR(instance_data, physicalDevice, planeIndex);
12365 result = instance_data->dispatch_table.GetDisplayPlaneCapabilitiesKHR(physicalDevice, mode, planeIndex, pCapabilities);
12371 VKAPI_ATTR VkResult VKAPI_CALL DebugMarkerSetObjectNameEXT(VkDevice device, const VkDebugMarkerObjectNameInfoEXT *pNameInfo) {
12372 unique_lock_t lock(global_lock);
12373 layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
12374 if (pNameInfo->pObjectName) {
12375 device_data->report_data->debugObjectNameMap->insert(
12376 std::make_pair<uint64_t, std::string>((uint64_t &&) pNameInfo->object, pNameInfo->pObjectName));
12378 device_data->report_data->debugObjectNameMap->erase(pNameInfo->object);
12381 VkResult result = device_data->dispatch_table.DebugMarkerSetObjectNameEXT(device, pNameInfo);
12385 VKAPI_ATTR VkResult VKAPI_CALL DebugMarkerSetObjectTagEXT(VkDevice device, VkDebugMarkerObjectTagInfoEXT *pTagInfo) {
12386 layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
12387 VkResult result = device_data->dispatch_table.DebugMarkerSetObjectTagEXT(device, pTagInfo);
12391 VKAPI_ATTR void VKAPI_CALL CmdDebugMarkerBeginEXT(VkCommandBuffer commandBuffer, VkDebugMarkerMarkerInfoEXT *pMarkerInfo) {
12392 layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
12393 unique_lock_t lock(global_lock);
12395 GLOBAL_CB_NODE *cb_state = GetCBNode(device_data, commandBuffer);
12396 // Minimal validation for command buffer state
12398 skip |= ValidateCmd(device_data, cb_state, CMD_DEBUGMARKERBEGINEXT, "vkCmdDebugMarkerBeginEXT()");
12402 device_data->dispatch_table.CmdDebugMarkerBeginEXT(commandBuffer, pMarkerInfo);
12406 VKAPI_ATTR void VKAPI_CALL CmdDebugMarkerEndEXT(VkCommandBuffer commandBuffer) {
12407 layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
12408 unique_lock_t lock(global_lock);
12410 GLOBAL_CB_NODE *cb_state = GetCBNode(device_data, commandBuffer);
12411 // Minimal validation for command buffer state
12413 skip |= ValidateCmd(device_data, cb_state, CMD_DEBUGMARKERENDEXT, "vkCmdDebugMarkerEndEXT()");
12417 device_data->dispatch_table.CmdDebugMarkerEndEXT(commandBuffer);
12421 VKAPI_ATTR void VKAPI_CALL CmdDebugMarkerInsertEXT(VkCommandBuffer commandBuffer, VkDebugMarkerMarkerInfoEXT *pMarkerInfo) {
12422 layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
12423 device_data->dispatch_table.CmdDebugMarkerInsertEXT(commandBuffer, pMarkerInfo);
12426 VKAPI_ATTR void VKAPI_CALL CmdSetDiscardRectangleEXT(VkCommandBuffer commandBuffer, uint32_t firstDiscardRectangle,
12427 uint32_t discardRectangleCount, const VkRect2D *pDiscardRectangles) {
12428 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
12429 unique_lock_t lock(global_lock);
12431 GLOBAL_CB_NODE *cb_state = GetCBNode(dev_data, commandBuffer);
12432 // Minimal validation for command buffer state
12434 skip |= ValidateCmd(dev_data, cb_state, CMD_SETDISCARDRECTANGLEEXT, "vkCmdSetDiscardRectangleEXT()");
12439 dev_data->dispatch_table.CmdSetDiscardRectangleEXT(commandBuffer, firstDiscardRectangle, discardRectangleCount,
12440 pDiscardRectangles);
12444 VKAPI_ATTR void VKAPI_CALL CmdSetSampleLocationsEXT(VkCommandBuffer commandBuffer,
12445 const VkSampleLocationsInfoEXT *pSampleLocationsInfo) {
12446 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
12447 unique_lock_t lock(global_lock);
12449 GLOBAL_CB_NODE *cb_state = GetCBNode(dev_data, commandBuffer);
12450 // Minimal validation for command buffer state
12452 skip |= ValidateCmd(dev_data, cb_state, CMD_SETSAMPLELOCATIONSEXT, "vkCmdSetSampleLocationsEXT()");
12457 dev_data->dispatch_table.CmdSetSampleLocationsEXT(commandBuffer, pSampleLocationsInfo);
12461 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetDeviceProcAddr(VkDevice device, const char *funcName);
12462 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetPhysicalDeviceProcAddr(VkInstance instance, const char *funcName);
12463 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetInstanceProcAddr(VkInstance instance, const char *funcName);
12465 // Map of all APIs to be intercepted by this layer
12466 static const std::unordered_map<std::string, void *> name_to_funcptr_map = {
12467 {"vkGetInstanceProcAddr", (void *)GetInstanceProcAddr},
12468 {"vk_layerGetPhysicalDeviceProcAddr", (void *)GetPhysicalDeviceProcAddr},
12469 {"vkGetDeviceProcAddr", (void *)GetDeviceProcAddr},
12470 {"vkCreateInstance", (void *)CreateInstance},
12471 {"vkCreateDevice", (void *)CreateDevice},
12472 {"vkEnumeratePhysicalDevices", (void *)EnumeratePhysicalDevices},
12473 {"vkGetPhysicalDeviceQueueFamilyProperties", (void *)GetPhysicalDeviceQueueFamilyProperties},
12474 {"vkDestroyInstance", (void *)DestroyInstance},
12475 {"vkEnumerateInstanceLayerProperties", (void *)EnumerateInstanceLayerProperties},
12476 {"vkEnumerateDeviceLayerProperties", (void *)EnumerateDeviceLayerProperties},
12477 {"vkEnumerateInstanceExtensionProperties", (void *)EnumerateInstanceExtensionProperties},
12478 {"vkEnumerateDeviceExtensionProperties", (void *)EnumerateDeviceExtensionProperties},
12479 {"vkCreateDescriptorUpdateTemplate", (void *)CreateDescriptorUpdateTemplate},
12480 {"vkCreateDescriptorUpdateTemplateKHR", (void *)CreateDescriptorUpdateTemplateKHR},
12481 {"vkDestroyDescriptorUpdateTemplate", (void *)DestroyDescriptorUpdateTemplate},
12482 {"vkDestroyDescriptorUpdateTemplateKHR", (void *)DestroyDescriptorUpdateTemplateKHR},
12483 {"vkUpdateDescriptorSetWithTemplate", (void *)UpdateDescriptorSetWithTemplate},
12484 {"vkUpdateDescriptorSetWithTemplateKHR", (void *)UpdateDescriptorSetWithTemplateKHR},
12485 {"vkCmdPushDescriptorSetWithTemplateKHR", (void *)CmdPushDescriptorSetWithTemplateKHR},
12486 {"vkCmdPushDescriptorSetKHR", (void *)CmdPushDescriptorSetKHR},
12487 {"vkCreateSwapchainKHR", (void *)CreateSwapchainKHR},
12488 {"vkDestroySwapchainKHR", (void *)DestroySwapchainKHR},
12489 {"vkGetSwapchainImagesKHR", (void *)GetSwapchainImagesKHR},
12490 {"vkAcquireNextImageKHR", (void *)AcquireNextImageKHR},
12491 {"vkQueuePresentKHR", (void *)QueuePresentKHR},
12492 {"vkQueueSubmit", (void *)QueueSubmit},
12493 {"vkWaitForFences", (void *)WaitForFences},
12494 {"vkGetFenceStatus", (void *)GetFenceStatus},
12495 {"vkQueueWaitIdle", (void *)QueueWaitIdle},
12496 {"vkDeviceWaitIdle", (void *)DeviceWaitIdle},
12497 {"vkGetDeviceQueue", (void *)GetDeviceQueue},
12498 {"vkGetDeviceQueue2", (void *)GetDeviceQueue2},
12499 {"vkDestroyDevice", (void *)DestroyDevice},
12500 {"vkDestroyFence", (void *)DestroyFence},
12501 {"vkResetFences", (void *)ResetFences},
12502 {"vkDestroySemaphore", (void *)DestroySemaphore},
12503 {"vkDestroyEvent", (void *)DestroyEvent},
12504 {"vkDestroyQueryPool", (void *)DestroyQueryPool},
12505 {"vkDestroyBuffer", (void *)DestroyBuffer},
12506 {"vkDestroyBufferView", (void *)DestroyBufferView},
12507 {"vkDestroyImage", (void *)DestroyImage},
12508 {"vkDestroyImageView", (void *)DestroyImageView},
12509 {"vkDestroyShaderModule", (void *)DestroyShaderModule},
12510 {"vkDestroyPipeline", (void *)DestroyPipeline},
12511 {"vkDestroyPipelineLayout", (void *)DestroyPipelineLayout},
12512 {"vkDestroySampler", (void *)DestroySampler},
12513 {"vkDestroyDescriptorSetLayout", (void *)DestroyDescriptorSetLayout},
12514 {"vkDestroyDescriptorPool", (void *)DestroyDescriptorPool},
12515 {"vkDestroyFramebuffer", (void *)DestroyFramebuffer},
12516 {"vkDestroyRenderPass", (void *)DestroyRenderPass},
12517 {"vkCreateBuffer", (void *)CreateBuffer},
12518 {"vkCreateBufferView", (void *)CreateBufferView},
12519 {"vkCreateImage", (void *)CreateImage},
12520 {"vkCreateImageView", (void *)CreateImageView},
12521 {"vkCreateFence", (void *)CreateFence},
12522 {"vkCreatePipelineCache", (void *)CreatePipelineCache},
12523 {"vkDestroyPipelineCache", (void *)DestroyPipelineCache},
12524 {"vkGetPipelineCacheData", (void *)GetPipelineCacheData},
12525 {"vkMergePipelineCaches", (void *)MergePipelineCaches},
12526 {"vkCreateGraphicsPipelines", (void *)CreateGraphicsPipelines},
12527 {"vkCreateComputePipelines", (void *)CreateComputePipelines},
12528 {"vkCreateSampler", (void *)CreateSampler},
12529 {"vkCreateDescriptorSetLayout", (void *)CreateDescriptorSetLayout},
12530 {"vkCreatePipelineLayout", (void *)CreatePipelineLayout},
12531 {"vkCreateDescriptorPool", (void *)CreateDescriptorPool},
12532 {"vkResetDescriptorPool", (void *)ResetDescriptorPool},
12533 {"vkAllocateDescriptorSets", (void *)AllocateDescriptorSets},
12534 {"vkFreeDescriptorSets", (void *)FreeDescriptorSets},
12535 {"vkUpdateDescriptorSets", (void *)UpdateDescriptorSets},
12536 {"vkCreateCommandPool", (void *)CreateCommandPool},
12537 {"vkDestroyCommandPool", (void *)DestroyCommandPool},
12538 {"vkResetCommandPool", (void *)ResetCommandPool},
12539 {"vkCreateQueryPool", (void *)CreateQueryPool},
12540 {"vkAllocateCommandBuffers", (void *)AllocateCommandBuffers},
12541 {"vkFreeCommandBuffers", (void *)FreeCommandBuffers},
12542 {"vkBeginCommandBuffer", (void *)BeginCommandBuffer},
12543 {"vkEndCommandBuffer", (void *)EndCommandBuffer},
12544 {"vkResetCommandBuffer", (void *)ResetCommandBuffer},
12545 {"vkCmdBindPipeline", (void *)CmdBindPipeline},
12546 {"vkCmdSetViewport", (void *)CmdSetViewport},
12547 {"vkCmdSetScissor", (void *)CmdSetScissor},
12548 {"vkCmdSetLineWidth", (void *)CmdSetLineWidth},
12549 {"vkCmdSetDepthBias", (void *)CmdSetDepthBias},
12550 {"vkCmdSetBlendConstants", (void *)CmdSetBlendConstants},
12551 {"vkCmdSetDepthBounds", (void *)CmdSetDepthBounds},
12552 {"vkCmdSetStencilCompareMask", (void *)CmdSetStencilCompareMask},
12553 {"vkCmdSetStencilWriteMask", (void *)CmdSetStencilWriteMask},
12554 {"vkCmdSetStencilReference", (void *)CmdSetStencilReference},
12555 {"vkCmdBindDescriptorSets", (void *)CmdBindDescriptorSets},
12556 {"vkCmdBindVertexBuffers", (void *)CmdBindVertexBuffers},
12557 {"vkCmdBindIndexBuffer", (void *)CmdBindIndexBuffer},
12558 {"vkCmdDraw", (void *)CmdDraw},
12559 {"vkCmdDrawIndexed", (void *)CmdDrawIndexed},
12560 {"vkCmdDrawIndirect", (void *)CmdDrawIndirect},
12561 {"vkCmdDrawIndexedIndirect", (void *)CmdDrawIndexedIndirect},
12562 {"vkCmdDispatch", (void *)CmdDispatch},
12563 {"vkCmdDispatchIndirect", (void *)CmdDispatchIndirect},
12564 {"vkCmdCopyBuffer", (void *)CmdCopyBuffer},
12565 {"vkCmdCopyImage", (void *)CmdCopyImage},
12566 {"vkCmdBlitImage", (void *)CmdBlitImage},
12567 {"vkCmdCopyBufferToImage", (void *)CmdCopyBufferToImage},
12568 {"vkCmdCopyImageToBuffer", (void *)CmdCopyImageToBuffer},
12569 {"vkCmdUpdateBuffer", (void *)CmdUpdateBuffer},
12570 {"vkCmdFillBuffer", (void *)CmdFillBuffer},
12571 {"vkCmdClearColorImage", (void *)CmdClearColorImage},
12572 {"vkCmdClearDepthStencilImage", (void *)CmdClearDepthStencilImage},
12573 {"vkCmdClearAttachments", (void *)CmdClearAttachments},
12574 {"vkCmdResolveImage", (void *)CmdResolveImage},
12575 {"vkGetImageSubresourceLayout", (void *)GetImageSubresourceLayout},
12576 {"vkCmdSetEvent", (void *)CmdSetEvent},
12577 {"vkCmdResetEvent", (void *)CmdResetEvent},
12578 {"vkCmdWaitEvents", (void *)CmdWaitEvents},
12579 {"vkCmdPipelineBarrier", (void *)CmdPipelineBarrier},
12580 {"vkCmdBeginQuery", (void *)CmdBeginQuery},
12581 {"vkCmdEndQuery", (void *)CmdEndQuery},
12582 {"vkCmdResetQueryPool", (void *)CmdResetQueryPool},
12583 {"vkCmdCopyQueryPoolResults", (void *)CmdCopyQueryPoolResults},
12584 {"vkCmdPushConstants", (void *)CmdPushConstants},
12585 {"vkCmdWriteTimestamp", (void *)CmdWriteTimestamp},
12586 {"vkCreateFramebuffer", (void *)CreateFramebuffer},
12587 {"vkCreateShaderModule", (void *)CreateShaderModule},
12588 {"vkCreateRenderPass", (void *)CreateRenderPass},
12589 {"vkCmdBeginRenderPass", (void *)CmdBeginRenderPass},
12590 {"vkCmdNextSubpass", (void *)CmdNextSubpass},
12591 {"vkCmdEndRenderPass", (void *)CmdEndRenderPass},
12592 {"vkCmdExecuteCommands", (void *)CmdExecuteCommands},
12593 {"vkCmdDebugMarkerBeginEXT", (void *)CmdDebugMarkerBeginEXT},
12594 {"vkCmdDebugMarkerEndEXT", (void *)CmdDebugMarkerEndEXT},
12595 {"vkCmdDebugMarkerInsertEXT", (void *)CmdDebugMarkerInsertEXT},
12596 {"vkDebugMarkerSetObjectNameEXT", (void *)DebugMarkerSetObjectNameEXT},
12597 {"vkDebugMarkerSetObjectTagEXT", (void *)DebugMarkerSetObjectTagEXT},
12598 {"vkSetEvent", (void *)SetEvent},
12599 {"vkMapMemory", (void *)MapMemory},
12600 {"vkUnmapMemory", (void *)UnmapMemory},
12601 {"vkFlushMappedMemoryRanges", (void *)FlushMappedMemoryRanges},
12602 {"vkInvalidateMappedMemoryRanges", (void *)InvalidateMappedMemoryRanges},
12603 {"vkAllocateMemory", (void *)AllocateMemory},
12604 {"vkFreeMemory", (void *)FreeMemory},
12605 {"vkBindBufferMemory", (void *)BindBufferMemory},
12606 {"vkBindBufferMemory2", (void *)BindBufferMemory2},
12607 {"vkBindBufferMemory2KHR", (void *)BindBufferMemory2KHR},
12608 {"vkGetBufferMemoryRequirements", (void *)GetBufferMemoryRequirements},
12609 {"vkGetBufferMemoryRequirements2", (void *)GetBufferMemoryRequirements2},
12610 {"vkGetBufferMemoryRequirements2KHR", (void *)GetBufferMemoryRequirements2KHR},
12611 {"vkGetImageMemoryRequirements", (void *)GetImageMemoryRequirements},
12612 {"vkGetImageMemoryRequirements2", (void *)GetImageMemoryRequirements2},
12613 {"vkGetImageMemoryRequirements2KHR", (void *)GetImageMemoryRequirements2KHR},
12614 {"vkGetImageSparseMemoryRequirements", (void *)GetImageSparseMemoryRequirements},
12615 {"vkGetImageSparseMemoryRequirements2", (void *)GetImageSparseMemoryRequirements2},
12616 {"vkGetImageSparseMemoryRequirements2KHR", (void *)GetImageSparseMemoryRequirements2KHR},
12617 {"vkGetPhysicalDeviceSparseImageFormatProperties", (void *)GetPhysicalDeviceSparseImageFormatProperties},
12618 {"vkGetPhysicalDeviceSparseImageFormatProperties2", (void *)GetPhysicalDeviceSparseImageFormatProperties2},
12619 {"vkGetPhysicalDeviceSparseImageFormatProperties2KHR", (void *)GetPhysicalDeviceSparseImageFormatProperties2KHR},
12620 {"vkGetQueryPoolResults", (void *)GetQueryPoolResults},
12621 {"vkBindImageMemory", (void *)BindImageMemory},
12622 {"vkBindImageMemory2", (void *)BindImageMemory2},
12623 {"vkBindImageMemory2KHR", (void *)BindImageMemory2KHR},
12624 {"vkQueueBindSparse", (void *)QueueBindSparse},
12625 {"vkCreateSemaphore", (void *)CreateSemaphore},
12626 {"vkCreateEvent", (void *)CreateEvent},
12627 #ifdef VK_USE_PLATFORM_ANDROID_KHR
12628 {"vkCreateAndroidSurfaceKHR", (void *)CreateAndroidSurfaceKHR},
12630 #ifdef VK_USE_PLATFORM_MIR_KHR
12631 {"vkCreateMirSurfaceKHR", (void *)CreateMirSurfaceKHR},
12632 {"vkGetPhysicalDeviceMirPresentationSupportKHR", (void *)GetPhysicalDeviceMirPresentationSupportKHR},
12634 #ifdef VK_USE_PLATFORM_WAYLAND_KHR
12635 {"vkCreateWaylandSurfaceKHR", (void *)CreateWaylandSurfaceKHR},
12636 {"vkGetPhysicalDeviceWaylandPresentationSupportKHR", (void *)GetPhysicalDeviceWaylandPresentationSupportKHR},
12638 #ifdef VK_USE_PLATFORM_WIN32_KHR
12639 {"vkCreateWin32SurfaceKHR", (void *)CreateWin32SurfaceKHR},
12640 {"vkGetPhysicalDeviceWin32PresentationSupportKHR", (void *)GetPhysicalDeviceWin32PresentationSupportKHR},
12641 {"vkImportSemaphoreWin32HandleKHR", (void *)ImportSemaphoreWin32HandleKHR},
12642 {"vkGetSemaphoreWin32HandleKHR", (void *)GetSemaphoreWin32HandleKHR},
12643 {"vkImportFenceWin32HandleKHR", (void *)ImportFenceWin32HandleKHR},
12644 {"vkGetFenceWin32HandleKHR", (void *)GetFenceWin32HandleKHR},
12646 #ifdef VK_USE_PLATFORM_XCB_KHR
12647 {"vkCreateXcbSurfaceKHR", (void *)CreateXcbSurfaceKHR},
12648 {"vkGetPhysicalDeviceXcbPresentationSupportKHR", (void *)GetPhysicalDeviceXcbPresentationSupportKHR},
12650 #ifdef VK_USE_PLATFORM_XLIB_KHR
12651 {"vkCreateXlibSurfaceKHR", (void *)CreateXlibSurfaceKHR},
12652 {"vkGetPhysicalDeviceXlibPresentationSupportKHR", (void *)GetPhysicalDeviceXlibPresentationSupportKHR},
12654 #ifdef VK_USE_PLATFORM_IOS_MVK
12655 {"vkCreateIOSSurfaceMVK", (void *)CreateIOSSurfaceMVK},
12657 #ifdef VK_USE_PLATFORM_MACOS_MVK
12658 {"vkCreateMacOSSurfaceMVK", (void *)CreateMacOSSurfaceMVK},
12660 {"vkCreateDisplayPlaneSurfaceKHR", (void *)CreateDisplayPlaneSurfaceKHR},
12661 {"vkDestroySurfaceKHR", (void *)DestroySurfaceKHR},
12662 {"vkGetPhysicalDeviceSurfaceCapabilitiesKHR", (void *)GetPhysicalDeviceSurfaceCapabilitiesKHR},
12663 {"vkGetPhysicalDeviceSurfaceCapabilities2KHR", (void *)GetPhysicalDeviceSurfaceCapabilities2KHR},
12664 {"vkGetPhysicalDeviceSurfaceCapabilities2EXT", (void *)GetPhysicalDeviceSurfaceCapabilities2EXT},
12665 {"vkGetPhysicalDeviceSurfaceSupportKHR", (void *)GetPhysicalDeviceSurfaceSupportKHR},
12666 {"vkGetPhysicalDeviceSurfacePresentModesKHR", (void *)GetPhysicalDeviceSurfacePresentModesKHR},
12667 {"vkGetPhysicalDeviceSurfaceFormatsKHR", (void *)GetPhysicalDeviceSurfaceFormatsKHR},
12668 {"vkGetPhysicalDeviceSurfaceFormats2KHR", (void *)GetPhysicalDeviceSurfaceFormats2KHR},
12669 {"vkGetPhysicalDeviceQueueFamilyProperties2", (void *)GetPhysicalDeviceQueueFamilyProperties2},
12670 {"vkGetPhysicalDeviceQueueFamilyProperties2KHR", (void *)GetPhysicalDeviceQueueFamilyProperties2KHR},
12671 {"vkEnumeratePhysicalDeviceGroups", (void *)EnumeratePhysicalDeviceGroups},
12672 {"vkEnumeratePhysicalDeviceGroupsKHR", (void *)EnumeratePhysicalDeviceGroupsKHR},
12673 {"vkCreateDebugReportCallbackEXT", (void *)CreateDebugReportCallbackEXT},
12674 {"vkDestroyDebugReportCallbackEXT", (void *)DestroyDebugReportCallbackEXT},
12675 {"vkDebugReportMessageEXT", (void *)DebugReportMessageEXT},
12676 {"vkGetPhysicalDeviceDisplayPlanePropertiesKHR", (void *)GetPhysicalDeviceDisplayPlanePropertiesKHR},
12677 {"vkGetDisplayPlaneSupportedDisplaysKHR", (void *)GetDisplayPlaneSupportedDisplaysKHR},
12678 {"vkGetDisplayPlaneCapabilitiesKHR", (void *)GetDisplayPlaneCapabilitiesKHR},
12679 {"vkImportSemaphoreFdKHR", (void *)ImportSemaphoreFdKHR},
12680 {"vkGetSemaphoreFdKHR", (void *)GetSemaphoreFdKHR},
12681 {"vkImportFenceFdKHR", (void *)ImportFenceFdKHR},
12682 {"vkGetFenceFdKHR", (void *)GetFenceFdKHR},
12683 {"vkCreateValidationCacheEXT", (void *)CreateValidationCacheEXT},
12684 {"vkDestroyValidationCacheEXT", (void *)DestroyValidationCacheEXT},
12685 {"vkGetValidationCacheDataEXT", (void *)GetValidationCacheDataEXT},
12686 {"vkMergeValidationCachesEXT", (void *)MergeValidationCachesEXT},
12687 {"vkCmdSetDiscardRectangleEXT", (void *)CmdSetDiscardRectangleEXT},
12688 {"vkCmdSetSampleLocationsEXT", (void *)CmdSetSampleLocationsEXT},
12689 {"vkSetDebugUtilsObjectNameEXT", (void *)SetDebugUtilsObjectNameEXT},
12690 {"vkSetDebugUtilsObjectTagEXT", (void *)SetDebugUtilsObjectTagEXT},
12691 {"vkQueueBeginDebugUtilsLabelEXT", (void *)QueueBeginDebugUtilsLabelEXT},
12692 {"vkQueueEndDebugUtilsLabelEXT", (void *)QueueEndDebugUtilsLabelEXT},
12693 {"vkQueueInsertDebugUtilsLabelEXT", (void *)QueueInsertDebugUtilsLabelEXT},
12694 {"vkCmdBeginDebugUtilsLabelEXT", (void *)CmdBeginDebugUtilsLabelEXT},
12695 {"vkCmdEndDebugUtilsLabelEXT", (void *)CmdEndDebugUtilsLabelEXT},
12696 {"vkCmdInsertDebugUtilsLabelEXT", (void *)CmdInsertDebugUtilsLabelEXT},
12697 {"vkCreateDebugUtilsMessengerEXT", (void *)CreateDebugUtilsMessengerEXT},
12698 {"vkDestroyDebugUtilsMessengerEXT", (void *)DestroyDebugUtilsMessengerEXT},
12699 {"vkSubmitDebugUtilsMessageEXT", (void *)SubmitDebugUtilsMessageEXT},
12702 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetDeviceProcAddr(VkDevice device, const char *funcName) {
12704 layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
12706 // Is API to be intercepted by this layer?
12707 const auto &item = name_to_funcptr_map.find(funcName);
12708 if (item != name_to_funcptr_map.end()) {
12709 return reinterpret_cast<PFN_vkVoidFunction>(item->second);
12712 auto &table = device_data->dispatch_table;
12713 if (!table.GetDeviceProcAddr) return nullptr;
12714 return table.GetDeviceProcAddr(device, funcName);
12717 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetInstanceProcAddr(VkInstance instance, const char *funcName) {
12718 instance_layer_data *instance_data;
12719 // Is API to be intercepted by this layer?
12720 const auto &item = name_to_funcptr_map.find(funcName);
12721 if (item != name_to_funcptr_map.end()) {
12722 return reinterpret_cast<PFN_vkVoidFunction>(item->second);
12725 instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
12726 auto &table = instance_data->dispatch_table;
12727 if (!table.GetInstanceProcAddr) return nullptr;
12728 return table.GetInstanceProcAddr(instance, funcName);
12731 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetPhysicalDeviceProcAddr(VkInstance instance, const char *funcName) {
12733 instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
12735 auto &table = instance_data->dispatch_table;
12736 if (!table.GetPhysicalDeviceProcAddr) return nullptr;
12737 return table.GetPhysicalDeviceProcAddr(instance, funcName);
12740 } // namespace core_validation
12742 // loader-layer interface v0, just wrappers since there is only a layer
12744 VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateInstanceExtensionProperties(const char *pLayerName, uint32_t *pCount,
12745 VkExtensionProperties *pProperties) {
12746 return core_validation::EnumerateInstanceExtensionProperties(pLayerName, pCount, pProperties);
12749 VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateInstanceLayerProperties(uint32_t *pCount,
12750 VkLayerProperties *pProperties) {
12751 return core_validation::EnumerateInstanceLayerProperties(pCount, pProperties);
12754 VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice, uint32_t *pCount,
12755 VkLayerProperties *pProperties) {
12756 // the layer command handles VK_NULL_HANDLE just fine internally
12757 assert(physicalDevice == VK_NULL_HANDLE);
12758 return core_validation::EnumerateDeviceLayerProperties(VK_NULL_HANDLE, pCount, pProperties);
12761 VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice,
12762 const char *pLayerName, uint32_t *pCount,
12763 VkExtensionProperties *pProperties) {
12764 // the layer command handles VK_NULL_HANDLE just fine internally
12765 assert(physicalDevice == VK_NULL_HANDLE);
12766 return core_validation::EnumerateDeviceExtensionProperties(VK_NULL_HANDLE, pLayerName, pCount, pProperties);
12769 VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetDeviceProcAddr(VkDevice dev, const char *funcName) {
12770 return core_validation::GetDeviceProcAddr(dev, funcName);
12773 VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetInstanceProcAddr(VkInstance instance, const char *funcName) {
12774 return core_validation::GetInstanceProcAddr(instance, funcName);
12777 VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vk_layerGetPhysicalDeviceProcAddr(VkInstance instance,
12778 const char *funcName) {
12779 return core_validation::GetPhysicalDeviceProcAddr(instance, funcName);
12782 VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkNegotiateLoaderLayerInterfaceVersion(VkNegotiateLayerInterface *pVersionStruct) {
12783 assert(pVersionStruct != NULL);
12784 assert(pVersionStruct->sType == LAYER_NEGOTIATE_INTERFACE_STRUCT);
12786 // Fill in the function pointers if our version is at least capable of having the structure contain them.
12787 if (pVersionStruct->loaderLayerInterfaceVersion >= 2) {
12788 pVersionStruct->pfnGetInstanceProcAddr = vkGetInstanceProcAddr;
12789 pVersionStruct->pfnGetDeviceProcAddr = vkGetDeviceProcAddr;
12790 pVersionStruct->pfnGetPhysicalDeviceProcAddr = vk_layerGetPhysicalDeviceProcAddr;
12793 if (pVersionStruct->loaderLayerInterfaceVersion < CURRENT_LOADER_LAYER_INTERFACE_VERSION) {
12794 core_validation::loader_layer_if_version = pVersionStruct->loaderLayerInterfaceVersion;
12795 } else if (pVersionStruct->loaderLayerInterfaceVersion > CURRENT_LOADER_LAYER_INTERFACE_VERSION) {
12796 pVersionStruct->loaderLayerInterfaceVersion = CURRENT_LOADER_LAYER_INTERFACE_VERSION;