1 /* Copyright (c) 2015-2017 The Khronos Group Inc.
2 * Copyright (c) 2015-2017 Valve Corporation
3 * Copyright (c) 2015-2017 LunarG, Inc.
4 * Copyright (C) 2015-2017 Google Inc.
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
10 * http://www.apache.org/licenses/LICENSE-2.0
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
18 * Author: Cody Northrop <cnorthrop@google.com>
19 * Author: Michael Lentine <mlentine@google.com>
20 * Author: Tobin Ehlis <tobine@google.com>
21 * Author: Chia-I Wu <olv@google.com>
22 * Author: Chris Forbes <chrisf@ijw.co.nz>
23 * Author: Mark Lobodzinski <mark@lunarg.com>
24 * Author: Ian Elliott <ianelliott@google.com>
25 * Author: Dave Houlton <daveh@lunarg.com>
26 * Author: Dustin Graves <dustin@lunarg.com>
27 * Author: Jeremy Hayes <jeremy@lunarg.com>
28 * Author: Jon Ashburn <jon@lunarg.com>
29 * Author: Karl Schultz <karl@lunarg.com>
30 * Author: Mark Young <marky@lunarg.com>
31 * Author: Mike Schuchardt <mikes@lunarg.com>
32 * Author: Mike Weiblen <mikew@lunarg.com>
33 * Author: Tony Barbour <tony@LunarG.com>
36 // Allow use of STL min and max functions in Windows
55 #include "vk_loader_platform.h"
56 #include "vk_dispatch_table_helper.h"
57 #include "vk_enum_string_helper.h"
59 #pragma GCC diagnostic ignored "-Wwrite-strings"
62 #pragma GCC diagnostic warning "-Wwrite-strings"
64 #include "core_validation.h"
65 #include "buffer_validation.h"
66 #include "shader_validation.h"
67 #include "vk_layer_table.h"
68 #include "vk_layer_data.h"
69 #include "vk_layer_extension_utils.h"
70 #include "vk_layer_utils.h"
71 #include "vk_typemap_helper.h"
73 #if defined __ANDROID__
74 #include <android/log.h>
75 #define LOGCONSOLE(...) ((void)__android_log_print(ANDROID_LOG_INFO, "CORE_VALIDATION", __VA_ARGS__))
77 #define LOGCONSOLE(...) \
79 printf(__VA_ARGS__); \
84 // This intentionally includes a cpp file
85 #include "vk_safe_struct.cpp"
87 using mutex_t = std::mutex;
88 using lock_guard_t = std::lock_guard<mutex_t>;
89 using unique_lock_t = std::unique_lock<mutex_t>;
91 // These functions are defined *outside* the core_validation namespace as their type
92 // is also defined outside that namespace
93 size_t PipelineLayoutCompatDef::hash() const {
94 hash_util::HashCombiner hc;
95 // The set number is integral to the CompatDef's distinctiveness
96 hc << set << push_constant_ranges.get();
97 const auto &descriptor_set_layouts = *set_layouts_id.get();
98 for (uint32_t i = 0; i <= set; i++) {
99 hc << descriptor_set_layouts[i].get();
104 bool PipelineLayoutCompatDef::operator==(const PipelineLayoutCompatDef &other) const {
105 if ((set != other.set) || (push_constant_ranges != other.push_constant_ranges)) {
109 if (set_layouts_id == other.set_layouts_id) {
110 // if it's the same set_layouts_id, then *any* subset will match
114 // They aren't exactly the same PipelineLayoutSetLayouts, so we need to check if the required subsets match
115 const auto &descriptor_set_layouts = *set_layouts_id.get();
116 assert(set < descriptor_set_layouts.size());
117 const auto &other_ds_layouts = *other.set_layouts_id.get();
118 assert(set < other_ds_layouts.size());
119 for (uint32_t i = 0; i <= set; i++) {
120 if (descriptor_set_layouts[i] != other_ds_layouts[i]) {
127 namespace core_validation {
131 using std::stringstream;
132 using std::unique_ptr;
133 using std::unordered_map;
134 using std::unordered_set;
137 // WSI Image Objects bypass usual Image Object creation methods. A special Memory
138 // Object value will be used to identify them internally.
139 static const VkDeviceMemory MEMTRACKER_SWAP_CHAIN_IMAGE_KEY = (VkDeviceMemory)(-1);
140 // 2nd special memory handle used to flag object as unbound from memory
141 static const VkDeviceMemory MEMORY_UNBOUND = VkDeviceMemory(~((uint64_t)(0)) - 1);
143 struct instance_layer_data {
144 VkInstance instance = VK_NULL_HANDLE;
145 debug_report_data *report_data = nullptr;
146 vector<VkDebugReportCallbackEXT> logging_callback;
147 vector<VkDebugUtilsMessengerEXT> logging_messenger;
148 VkLayerInstanceDispatchTable dispatch_table;
150 CALL_STATE vkEnumeratePhysicalDevicesState = UNCALLED;
151 uint32_t physical_devices_count = 0;
152 CALL_STATE vkEnumeratePhysicalDeviceGroupsState = UNCALLED;
153 uint32_t physical_device_groups_count = 0;
154 CHECK_DISABLED disabled = {};
156 unordered_map<VkPhysicalDevice, PHYSICAL_DEVICE_STATE> physical_device_map;
157 unordered_map<VkSurfaceKHR, SURFACE_STATE> surface_map;
159 InstanceExtensions extensions;
160 uint32_t api_version;
164 debug_report_data *report_data = nullptr;
165 VkLayerDispatchTable dispatch_table;
167 DeviceExtensions extensions = {};
168 unordered_set<VkQueue> queues; // All queues under given device
169 // Layer specific data
170 unordered_map<VkSampler, unique_ptr<SAMPLER_STATE>> samplerMap;
171 unordered_map<VkImageView, unique_ptr<IMAGE_VIEW_STATE>> imageViewMap;
172 unordered_map<VkImage, unique_ptr<IMAGE_STATE>> imageMap;
173 unordered_map<VkBufferView, unique_ptr<BUFFER_VIEW_STATE>> bufferViewMap;
174 unordered_map<VkBuffer, unique_ptr<BUFFER_STATE>> bufferMap;
175 unordered_map<VkPipeline, unique_ptr<PIPELINE_STATE>> pipelineMap;
176 unordered_map<VkCommandPool, COMMAND_POOL_NODE> commandPoolMap;
177 unordered_map<VkDescriptorPool, DESCRIPTOR_POOL_STATE *> descriptorPoolMap;
178 unordered_map<VkDescriptorSet, cvdescriptorset::DescriptorSet *> setMap;
179 unordered_map<VkDescriptorSetLayout, std::shared_ptr<cvdescriptorset::DescriptorSetLayout>> descriptorSetLayoutMap;
180 unordered_map<VkPipelineLayout, PIPELINE_LAYOUT_NODE> pipelineLayoutMap;
181 unordered_map<VkDeviceMemory, unique_ptr<DEVICE_MEM_INFO>> memObjMap;
182 unordered_map<VkFence, FENCE_NODE> fenceMap;
183 unordered_map<VkQueue, QUEUE_STATE> queueMap;
184 unordered_map<VkEvent, EVENT_STATE> eventMap;
185 unordered_map<QueryObject, bool> queryToStateMap;
186 unordered_map<VkQueryPool, QUERY_POOL_NODE> queryPoolMap;
187 unordered_map<VkSemaphore, SEMAPHORE_NODE> semaphoreMap;
188 unordered_map<VkCommandBuffer, GLOBAL_CB_NODE *> commandBufferMap;
189 unordered_map<VkFramebuffer, unique_ptr<FRAMEBUFFER_STATE>> frameBufferMap;
190 unordered_map<VkImage, vector<ImageSubresourcePair>> imageSubresourceMap;
191 unordered_map<ImageSubresourcePair, IMAGE_LAYOUT_NODE> imageLayoutMap;
192 unordered_map<VkRenderPass, std::shared_ptr<RENDER_PASS_STATE>> renderPassMap;
193 unordered_map<VkShaderModule, unique_ptr<shader_module>> shaderModuleMap;
194 unordered_map<VkDescriptorUpdateTemplateKHR, unique_ptr<TEMPLATE_STATE>> desc_template_map;
195 unordered_map<VkSwapchainKHR, std::unique_ptr<SWAPCHAIN_NODE>> swapchainMap;
197 VkDevice device = VK_NULL_HANDLE;
198 VkPhysicalDevice physical_device = VK_NULL_HANDLE;
200 instance_layer_data *instance_data = nullptr; // from device to enclosing instance
202 VkPhysicalDeviceFeatures enabled_features = {};
203 // Device specific data
204 PHYS_DEV_PROPERTIES_NODE phys_dev_properties = {};
205 VkPhysicalDeviceMemoryProperties phys_dev_mem_props = {};
206 VkPhysicalDeviceProperties phys_dev_props = {};
207 // Device extension properties -- storing properties gathered from VkPhysicalDeviceProperties2KHR::pNext chain
208 struct DeviceExtensionProperties {
209 uint32_t max_push_descriptors; // from VkPhysicalDevicePushDescriptorPropertiesKHR::maxPushDescriptors
210 VkPhysicalDeviceDescriptorIndexingPropertiesEXT descriptor_indexing_props;
211 VkPhysicalDeviceDescriptorIndexingFeaturesEXT descriptor_indexing_features;
213 DeviceExtensionProperties phys_dev_ext_props = {};
214 bool external_sync_warning = false;
215 uint32_t api_version = 0;
218 // TODO : Do we need to guard access to layer_data_map w/ lock?
219 static unordered_map<void *, layer_data *> layer_data_map;
220 static unordered_map<void *, instance_layer_data *> instance_layer_data_map;
222 static uint32_t loader_layer_if_version = CURRENT_LOADER_LAYER_INTERFACE_VERSION;
224 static const VkLayerProperties global_layer = {
225 "VK_LAYER_LUNARG_core_validation",
226 VK_LAYER_API_VERSION,
228 "LunarG Validation Layer",
231 static const VkExtensionProperties device_extensions[] = {
232 {VK_EXT_VALIDATION_CACHE_EXTENSION_NAME, VK_EXT_VALIDATION_CACHE_SPEC_VERSION},
235 template <class TCreateInfo>
236 void ValidateLayerOrdering(const TCreateInfo &createInfo) {
237 bool foundLayer = false;
238 for (uint32_t i = 0; i < createInfo.enabledLayerCount; ++i) {
239 if (!strcmp(createInfo.ppEnabledLayerNames[i], global_layer.layerName)) {
242 // This has to be logged to console as we don't have a callback at this point.
243 if (!foundLayer && !strcmp(createInfo.ppEnabledLayerNames[0], "VK_LAYER_GOOGLE_unique_objects")) {
244 LOGCONSOLE("Cannot activate layer VK_LAYER_GOOGLE_unique_objects prior to activating %s.", global_layer.layerName);
249 // TODO : This can be much smarter, using separate locks for separate global data
250 static mutex_t global_lock;
252 // Return IMAGE_VIEW_STATE ptr for specified imageView or else NULL
253 IMAGE_VIEW_STATE *GetImageViewState(const layer_data *dev_data, VkImageView image_view) {
254 auto iv_it = dev_data->imageViewMap.find(image_view);
255 if (iv_it == dev_data->imageViewMap.end()) {
258 return iv_it->second.get();
260 // Return sampler node ptr for specified sampler or else NULL
261 SAMPLER_STATE *GetSamplerState(const layer_data *dev_data, VkSampler sampler) {
262 auto sampler_it = dev_data->samplerMap.find(sampler);
263 if (sampler_it == dev_data->samplerMap.end()) {
266 return sampler_it->second.get();
268 // Return image state ptr for specified image or else NULL
269 IMAGE_STATE *GetImageState(const layer_data *dev_data, VkImage image) {
270 auto img_it = dev_data->imageMap.find(image);
271 if (img_it == dev_data->imageMap.end()) {
274 return img_it->second.get();
276 // Return buffer state ptr for specified buffer or else NULL
277 BUFFER_STATE *GetBufferState(const layer_data *dev_data, VkBuffer buffer) {
278 auto buff_it = dev_data->bufferMap.find(buffer);
279 if (buff_it == dev_data->bufferMap.end()) {
282 return buff_it->second.get();
284 // Return swapchain node for specified swapchain or else NULL
285 SWAPCHAIN_NODE *GetSwapchainNode(const layer_data *dev_data, VkSwapchainKHR swapchain) {
286 auto swp_it = dev_data->swapchainMap.find(swapchain);
287 if (swp_it == dev_data->swapchainMap.end()) {
290 return swp_it->second.get();
292 // Return buffer node ptr for specified buffer or else NULL
293 BUFFER_VIEW_STATE *GetBufferViewState(const layer_data *dev_data, VkBufferView buffer_view) {
294 auto bv_it = dev_data->bufferViewMap.find(buffer_view);
295 if (bv_it == dev_data->bufferViewMap.end()) {
298 return bv_it->second.get();
301 FENCE_NODE *GetFenceNode(layer_data *dev_data, VkFence fence) {
302 auto it = dev_data->fenceMap.find(fence);
303 if (it == dev_data->fenceMap.end()) {
309 EVENT_STATE *GetEventNode(layer_data *dev_data, VkEvent event) {
310 auto it = dev_data->eventMap.find(event);
311 if (it == dev_data->eventMap.end()) {
317 QUERY_POOL_NODE *GetQueryPoolNode(layer_data *dev_data, VkQueryPool query_pool) {
318 auto it = dev_data->queryPoolMap.find(query_pool);
319 if (it == dev_data->queryPoolMap.end()) {
325 QUEUE_STATE *GetQueueState(layer_data *dev_data, VkQueue queue) {
326 auto it = dev_data->queueMap.find(queue);
327 if (it == dev_data->queueMap.end()) {
333 SEMAPHORE_NODE *GetSemaphoreNode(layer_data *dev_data, VkSemaphore semaphore) {
334 auto it = dev_data->semaphoreMap.find(semaphore);
335 if (it == dev_data->semaphoreMap.end()) {
341 COMMAND_POOL_NODE *GetCommandPoolNode(layer_data *dev_data, VkCommandPool pool) {
342 auto it = dev_data->commandPoolMap.find(pool);
343 if (it == dev_data->commandPoolMap.end()) {
349 PHYSICAL_DEVICE_STATE *GetPhysicalDeviceState(instance_layer_data *instance_data, VkPhysicalDevice phys) {
350 auto it = instance_data->physical_device_map.find(phys);
351 if (it == instance_data->physical_device_map.end()) {
357 SURFACE_STATE *GetSurfaceState(instance_layer_data *instance_data, VkSurfaceKHR surface) {
358 auto it = instance_data->surface_map.find(surface);
359 if (it == instance_data->surface_map.end()) {
365 DeviceExtensions const *GetEnabledExtensions(layer_data const *dev_data) { return &dev_data->extensions; }
367 // Return ptr to memory binding for given handle of specified type
368 static BINDABLE *GetObjectMemBinding(layer_data *dev_data, uint64_t handle, VulkanObjectType type) {
370 case kVulkanObjectTypeImage:
371 return GetImageState(dev_data, VkImage(handle));
372 case kVulkanObjectTypeBuffer:
373 return GetBufferState(dev_data, VkBuffer(handle));
380 GLOBAL_CB_NODE *GetCBNode(layer_data const *, const VkCommandBuffer);
382 // Return ptr to info in map container containing mem, or NULL if not found
383 // Calls to this function should be wrapped in mutex
384 DEVICE_MEM_INFO *GetMemObjInfo(const layer_data *dev_data, const VkDeviceMemory mem) {
385 auto mem_it = dev_data->memObjMap.find(mem);
386 if (mem_it == dev_data->memObjMap.end()) {
389 return mem_it->second.get();
392 static void add_mem_obj_info(layer_data *dev_data, void *object, const VkDeviceMemory mem,
393 const VkMemoryAllocateInfo *pAllocateInfo) {
394 assert(object != NULL);
396 auto *mem_info = new DEVICE_MEM_INFO(object, mem, pAllocateInfo);
397 dev_data->memObjMap[mem] = unique_ptr<DEVICE_MEM_INFO>(mem_info);
399 // TODO: If the number of things we search for goes much higher, need a map...
400 mem_info->global_valid = nullptr != lvl_find_in_chain<VkImportMemoryFdInfoKHR>(pAllocateInfo->pNext);
401 #ifdef VK_USE_PLATFORM_WIN32_KHR
402 mem_info->global_valid |= nullptr != lvl_find_in_chain<VkImportMemoryWin32HandleInfoKHR>(pAllocateInfo->pNext);
405 auto dedicated = lvl_find_in_chain<VkMemoryDedicatedAllocateInfoKHR>(pAllocateInfo->pNext);
407 mem_info->is_dedicated = true;
408 mem_info->dedicated_buffer = dedicated->buffer;
409 mem_info->dedicated_image = dedicated->image;
413 // For given bound_object_handle, bound to given mem allocation, verify that the range for the bound object is valid
414 static bool ValidateMemoryIsValid(layer_data *dev_data, VkDeviceMemory mem, uint64_t bound_object_handle, VulkanObjectType type,
415 const char *functionName) {
416 DEVICE_MEM_INFO *mem_info = GetMemObjInfo(dev_data, mem);
418 if (!mem_info->bound_ranges[bound_object_handle].valid) {
419 return log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
420 HandleToUint64(mem), MEMTRACK_INVALID_MEM_REGION,
421 "%s: Cannot read invalid region of memory allocation 0x%" PRIx64 " for bound %s object 0x%" PRIx64
422 ", please fill the memory before using.",
423 functionName, HandleToUint64(mem), object_string[type], bound_object_handle);
428 // For given image_state
429 // If mem is special swapchain key, then verify that image_state valid member is true
430 // Else verify that the image's bound memory range is valid
431 bool ValidateImageMemoryIsValid(layer_data *dev_data, IMAGE_STATE *image_state, const char *functionName) {
432 if (image_state->binding.mem == MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) {
433 if (!image_state->valid) {
434 return log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
435 HandleToUint64(image_state->binding.mem), MEMTRACK_INVALID_MEM_REGION,
436 "%s: Cannot read invalid swapchain image 0x%" PRIx64 ", please fill the memory before using.",
437 functionName, HandleToUint64(image_state->image));
440 return ValidateMemoryIsValid(dev_data, image_state->binding.mem, HandleToUint64(image_state->image), kVulkanObjectTypeImage,
445 // For given buffer_state, verify that the range it's bound to is valid
446 bool ValidateBufferMemoryIsValid(layer_data *dev_data, BUFFER_STATE *buffer_state, const char *functionName) {
447 return ValidateMemoryIsValid(dev_data, buffer_state->binding.mem, HandleToUint64(buffer_state->buffer), kVulkanObjectTypeBuffer,
450 // For the given memory allocation, set the range bound by the given handle object to the valid param value
451 static void SetMemoryValid(layer_data *dev_data, VkDeviceMemory mem, uint64_t handle, bool valid) {
452 DEVICE_MEM_INFO *mem_info = GetMemObjInfo(dev_data, mem);
454 mem_info->bound_ranges[handle].valid = valid;
457 // For given image node
458 // If mem is special swapchain key, then set entire image_state to valid param value
459 // Else set the image's bound memory range to valid param value
460 void SetImageMemoryValid(layer_data *dev_data, IMAGE_STATE *image_state, bool valid) {
461 if (image_state->binding.mem == MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) {
462 image_state->valid = valid;
464 SetMemoryValid(dev_data, image_state->binding.mem, HandleToUint64(image_state->image), valid);
467 // For given buffer node set the buffer's bound memory range to valid param value
468 void SetBufferMemoryValid(layer_data *dev_data, BUFFER_STATE *buffer_state, bool valid) {
469 SetMemoryValid(dev_data, buffer_state->binding.mem, HandleToUint64(buffer_state->buffer), valid);
472 // Create binding link between given sampler and command buffer node
473 void AddCommandBufferBindingSampler(GLOBAL_CB_NODE *cb_node, SAMPLER_STATE *sampler_state) {
474 sampler_state->cb_bindings.insert(cb_node);
475 cb_node->object_bindings.insert({HandleToUint64(sampler_state->sampler), kVulkanObjectTypeSampler});
478 // Create binding link between given image node and command buffer node
479 void AddCommandBufferBindingImage(const layer_data *dev_data, GLOBAL_CB_NODE *cb_node, IMAGE_STATE *image_state) {
480 // Skip validation if this image was created through WSI
481 if (image_state->binding.mem != MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) {
482 // First update CB binding in MemObj mini CB list
483 for (auto mem_binding : image_state->GetBoundMemory()) {
484 DEVICE_MEM_INFO *pMemInfo = GetMemObjInfo(dev_data, mem_binding);
486 pMemInfo->cb_bindings.insert(cb_node);
487 // Now update CBInfo's Mem reference list
488 cb_node->memObjs.insert(mem_binding);
491 // Now update cb binding for image
492 cb_node->object_bindings.insert({HandleToUint64(image_state->image), kVulkanObjectTypeImage});
493 image_state->cb_bindings.insert(cb_node);
497 // Create binding link between given image view node and its image with command buffer node
498 void AddCommandBufferBindingImageView(const layer_data *dev_data, GLOBAL_CB_NODE *cb_node, IMAGE_VIEW_STATE *view_state) {
499 // First add bindings for imageView
500 view_state->cb_bindings.insert(cb_node);
501 cb_node->object_bindings.insert({HandleToUint64(view_state->image_view), kVulkanObjectTypeImageView});
502 auto image_state = GetImageState(dev_data, view_state->create_info.image);
503 // Add bindings for image within imageView
505 AddCommandBufferBindingImage(dev_data, cb_node, image_state);
509 // Create binding link between given buffer node and command buffer node
510 void AddCommandBufferBindingBuffer(const layer_data *dev_data, GLOBAL_CB_NODE *cb_node, BUFFER_STATE *buffer_state) {
511 // First update CB binding in MemObj mini CB list
512 for (auto mem_binding : buffer_state->GetBoundMemory()) {
513 DEVICE_MEM_INFO *pMemInfo = GetMemObjInfo(dev_data, mem_binding);
515 pMemInfo->cb_bindings.insert(cb_node);
516 // Now update CBInfo's Mem reference list
517 cb_node->memObjs.insert(mem_binding);
520 // Now update cb binding for buffer
521 cb_node->object_bindings.insert({HandleToUint64(buffer_state->buffer), kVulkanObjectTypeBuffer});
522 buffer_state->cb_bindings.insert(cb_node);
525 // Create binding link between given buffer view node and its buffer with command buffer node
526 void AddCommandBufferBindingBufferView(const layer_data *dev_data, GLOBAL_CB_NODE *cb_node, BUFFER_VIEW_STATE *view_state) {
527 // First add bindings for bufferView
528 view_state->cb_bindings.insert(cb_node);
529 cb_node->object_bindings.insert({HandleToUint64(view_state->buffer_view), kVulkanObjectTypeBufferView});
530 auto buffer_state = GetBufferState(dev_data, view_state->create_info.buffer);
531 // Add bindings for buffer within bufferView
533 AddCommandBufferBindingBuffer(dev_data, cb_node, buffer_state);
537 // For every mem obj bound to particular CB, free bindings related to that CB
538 static void clear_cmd_buf_and_mem_references(layer_data *dev_data, GLOBAL_CB_NODE *cb_node) {
540 if (cb_node->memObjs.size() > 0) {
541 for (auto mem : cb_node->memObjs) {
542 DEVICE_MEM_INFO *pInfo = GetMemObjInfo(dev_data, mem);
544 pInfo->cb_bindings.erase(cb_node);
547 cb_node->memObjs.clear();
552 // Clear a single object binding from given memory object, or report error if binding is missing
553 static bool ClearMemoryObjectBinding(layer_data *dev_data, uint64_t handle, VulkanObjectType type, VkDeviceMemory mem) {
554 DEVICE_MEM_INFO *mem_info = GetMemObjInfo(dev_data, mem);
555 // This obj is bound to a memory object. Remove the reference to this object in that memory object's list
557 mem_info->obj_bindings.erase({handle, type});
562 // ClearMemoryObjectBindings clears the binding of objects to memory
563 // For the given object it pulls the memory bindings and makes sure that the bindings
564 // no longer refer to the object being cleared. This occurs when objects are destroyed.
565 bool ClearMemoryObjectBindings(layer_data *dev_data, uint64_t handle, VulkanObjectType type) {
567 BINDABLE *mem_binding = GetObjectMemBinding(dev_data, handle, type);
569 if (!mem_binding->sparse) {
570 skip = ClearMemoryObjectBinding(dev_data, handle, type, mem_binding->binding.mem);
571 } else { // Sparse, clear all bindings
572 for (auto &sparse_mem_binding : mem_binding->sparse_bindings) {
573 skip |= ClearMemoryObjectBinding(dev_data, handle, type, sparse_mem_binding.mem);
580 // For given mem object, verify that it is not null or UNBOUND, if it is, report error. Return skip value.
581 bool VerifyBoundMemoryIsValid(const layer_data *dev_data, VkDeviceMemory mem, uint64_t handle, const char *api_name,
582 const char *type_name, UNIQUE_VALIDATION_ERROR_CODE error_code) {
584 if (VK_NULL_HANDLE == mem) {
586 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, handle, error_code,
587 "%s: Vk%s object 0x%" PRIx64 " used with no memory bound. Memory should be bound by calling vkBind%sMemory().",
588 api_name, type_name, handle, type_name);
589 } else if (MEMORY_UNBOUND == mem) {
591 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, handle, error_code,
592 "%s: Vk%s object 0x%" PRIx64
593 " used with no memory bound and previously bound memory was freed. Memory must not be freed prior to this "
595 api_name, type_name, handle);
600 // Check to see if memory was ever bound to this image
601 bool ValidateMemoryIsBoundToImage(const layer_data *dev_data, const IMAGE_STATE *image_state, const char *api_name,
602 UNIQUE_VALIDATION_ERROR_CODE error_code) {
604 if (0 == (static_cast<uint32_t>(image_state->createInfo.flags) & VK_IMAGE_CREATE_SPARSE_BINDING_BIT)) {
605 result = VerifyBoundMemoryIsValid(dev_data, image_state->binding.mem, HandleToUint64(image_state->image), api_name, "Image",
611 // Check to see if memory was bound to this buffer
612 bool ValidateMemoryIsBoundToBuffer(const layer_data *dev_data, const BUFFER_STATE *buffer_state, const char *api_name,
613 UNIQUE_VALIDATION_ERROR_CODE error_code) {
615 if (0 == (static_cast<uint32_t>(buffer_state->createInfo.flags) & VK_BUFFER_CREATE_SPARSE_BINDING_BIT)) {
616 result = VerifyBoundMemoryIsValid(dev_data, buffer_state->binding.mem, HandleToUint64(buffer_state->buffer), api_name,
617 "Buffer", error_code);
622 // SetMemBinding is used to establish immutable, non-sparse binding between a single image/buffer object and memory object.
623 // Corresponding valid usage checks are in ValidateSetMemBinding().
624 static void SetMemBinding(layer_data *dev_data, VkDeviceMemory mem, BINDABLE *mem_binding, VkDeviceSize memory_offset,
625 uint64_t handle, VulkanObjectType type, const char *apiName) {
627 mem_binding->binding.mem = mem;
628 mem_binding->UpdateBoundMemorySet(); // force recreation of cached set
629 mem_binding->binding.offset = memory_offset;
630 mem_binding->binding.size = mem_binding->requirements.size;
632 if (mem != VK_NULL_HANDLE) {
633 DEVICE_MEM_INFO *mem_info = GetMemObjInfo(dev_data, mem);
635 mem_info->obj_bindings.insert({handle, type});
636 // For image objects, make sure default memory state is correctly set
637 // TODO : What's the best/correct way to handle this?
638 if (kVulkanObjectTypeImage == type) {
639 auto const image_state = reinterpret_cast<const IMAGE_STATE *>(mem_binding);
641 VkImageCreateInfo ici = image_state->createInfo;
642 if (ici.usage & (VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT)) {
643 // TODO:: More memory state transition stuff.
651 // Valid usage checks for a call to SetMemBinding().
652 // For NULL mem case, output warning
653 // Make sure given object is in global object map
654 // IF a previous binding existed, output validation error
655 // Otherwise, add reference from objectInfo to memoryInfo
656 // Add reference off of objInfo
657 // TODO: We may need to refactor or pass in multiple valid usage statements to handle multiple valid usage conditions.
658 static bool ValidateSetMemBinding(layer_data *dev_data, VkDeviceMemory mem, uint64_t handle, VulkanObjectType type,
659 const char *apiName) {
661 // It's an error to bind an object to NULL memory
662 if (mem != VK_NULL_HANDLE) {
663 BINDABLE *mem_binding = GetObjectMemBinding(dev_data, handle, type);
665 if (mem_binding->sparse) {
666 UNIQUE_VALIDATION_ERROR_CODE error_code = VALIDATION_ERROR_1740082a;
667 const char *handle_type = "IMAGE";
668 if (type == kVulkanObjectTypeBuffer) {
669 error_code = VALIDATION_ERROR_1700080c;
670 handle_type = "BUFFER";
672 assert(type == kVulkanObjectTypeImage);
674 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
675 HandleToUint64(mem), error_code,
676 "In %s, attempting to bind memory (0x%" PRIx64 ") to object (0x%" PRIx64
677 ") which was created with sparse memory flags (VK_%s_CREATE_SPARSE_*_BIT).",
678 apiName, HandleToUint64(mem), handle, handle_type);
680 DEVICE_MEM_INFO *mem_info = GetMemObjInfo(dev_data, mem);
682 DEVICE_MEM_INFO *prev_binding = GetMemObjInfo(dev_data, mem_binding->binding.mem);
684 UNIQUE_VALIDATION_ERROR_CODE error_code = VALIDATION_ERROR_17400828;
685 if (type == kVulkanObjectTypeBuffer) {
686 error_code = VALIDATION_ERROR_1700080a;
688 assert(type == kVulkanObjectTypeImage);
690 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
691 HandleToUint64(mem), error_code,
692 "In %s, attempting to bind memory (0x%" PRIx64 ") to object (0x%" PRIx64
693 ") which has already been bound to mem object 0x%" PRIx64 ".",
694 apiName, HandleToUint64(mem), handle, HandleToUint64(prev_binding->mem));
695 } else if (mem_binding->binding.mem == MEMORY_UNBOUND) {
696 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
697 HandleToUint64(mem), MEMTRACK_REBIND_OBJECT,
698 "In %s, attempting to bind memory (0x%" PRIx64 ") to object (0x%" PRIx64
699 ") which was previous bound to memory that has since been freed. Memory bindings are immutable in "
700 "Vulkan so this attempt to bind to new memory is not allowed.",
701 apiName, HandleToUint64(mem), handle);
708 // For NULL mem case, clear any previous binding Else...
709 // Make sure given object is in its object map
710 // IF a previous binding existed, update binding
711 // Add reference from objectInfo to memoryInfo
712 // Add reference off of object's binding info
713 // Return VK_TRUE if addition is successful, VK_FALSE otherwise
714 static bool SetSparseMemBinding(layer_data *dev_data, MEM_BINDING binding, uint64_t handle, VulkanObjectType type) {
715 bool skip = VK_FALSE;
716 // Handle NULL case separately, just clear previous binding & decrement reference
717 if (binding.mem == VK_NULL_HANDLE) {
718 // TODO : This should cause the range of the resource to be unbound according to spec
720 BINDABLE *mem_binding = GetObjectMemBinding(dev_data, handle, type);
722 assert(mem_binding->sparse);
723 DEVICE_MEM_INFO *mem_info = GetMemObjInfo(dev_data, binding.mem);
725 mem_info->obj_bindings.insert({handle, type});
726 // Need to set mem binding for this object
727 mem_binding->sparse_bindings.insert(binding);
728 mem_binding->UpdateBoundMemorySet();
734 // Check object status for selected flag state
735 static bool validate_status(layer_data *dev_data, GLOBAL_CB_NODE *pNode, CBStatusFlags status_mask, VkFlags msg_flags,
736 const char *fail_msg, UNIQUE_VALIDATION_ERROR_CODE const msg_code) {
737 if (!(pNode->status & status_mask)) {
738 return log_msg(dev_data->report_data, msg_flags, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
739 HandleToUint64(pNode->commandBuffer), msg_code, "command buffer object 0x%" PRIx64 ": %s..",
740 HandleToUint64(pNode->commandBuffer), fail_msg);
745 // Retrieve pipeline node ptr for given pipeline object
746 static PIPELINE_STATE *getPipelineState(layer_data const *dev_data, VkPipeline pipeline) {
747 auto it = dev_data->pipelineMap.find(pipeline);
748 if (it == dev_data->pipelineMap.end()) {
751 return it->second.get();
754 RENDER_PASS_STATE *GetRenderPassState(layer_data const *dev_data, VkRenderPass renderpass) {
755 auto it = dev_data->renderPassMap.find(renderpass);
756 if (it == dev_data->renderPassMap.end()) {
759 return it->second.get();
762 std::shared_ptr<RENDER_PASS_STATE> GetRenderPassStateSharedPtr(layer_data const *dev_data, VkRenderPass renderpass) {
763 auto it = dev_data->renderPassMap.find(renderpass);
764 if (it == dev_data->renderPassMap.end()) {
770 FRAMEBUFFER_STATE *GetFramebufferState(const layer_data *dev_data, VkFramebuffer framebuffer) {
771 auto it = dev_data->frameBufferMap.find(framebuffer);
772 if (it == dev_data->frameBufferMap.end()) {
775 return it->second.get();
778 std::shared_ptr<cvdescriptorset::DescriptorSetLayout const> const GetDescriptorSetLayout(layer_data const *dev_data,
779 VkDescriptorSetLayout dsLayout) {
780 auto it = dev_data->descriptorSetLayoutMap.find(dsLayout);
781 if (it == dev_data->descriptorSetLayoutMap.end()) {
787 static PIPELINE_LAYOUT_NODE const *getPipelineLayout(layer_data const *dev_data, VkPipelineLayout pipeLayout) {
788 auto it = dev_data->pipelineLayoutMap.find(pipeLayout);
789 if (it == dev_data->pipelineLayoutMap.end()) {
795 shader_module const *GetShaderModuleState(layer_data const *dev_data, VkShaderModule module) {
796 auto it = dev_data->shaderModuleMap.find(module);
797 if (it == dev_data->shaderModuleMap.end()) {
800 return it->second.get();
803 // Return true if for a given PSO, the given state enum is dynamic, else return false
804 static bool isDynamic(const PIPELINE_STATE *pPipeline, const VkDynamicState state) {
805 if (pPipeline && pPipeline->graphicsPipelineCI.pDynamicState) {
806 for (uint32_t i = 0; i < pPipeline->graphicsPipelineCI.pDynamicState->dynamicStateCount; i++) {
807 if (state == pPipeline->graphicsPipelineCI.pDynamicState->pDynamicStates[i]) return true;
813 // Validate state stored as flags at time of draw call
814 static bool validate_draw_state_flags(layer_data *dev_data, GLOBAL_CB_NODE *pCB, const PIPELINE_STATE *pPipe, bool indexed,
815 UNIQUE_VALIDATION_ERROR_CODE const msg_code) {
817 if (pPipe->topology_at_rasterizer == VK_PRIMITIVE_TOPOLOGY_LINE_LIST ||
818 pPipe->topology_at_rasterizer == VK_PRIMITIVE_TOPOLOGY_LINE_STRIP) {
819 result |= validate_status(dev_data, pCB, CBSTATUS_LINE_WIDTH_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
820 "Dynamic line width state not set for this command buffer", msg_code);
822 if (pPipe->graphicsPipelineCI.pRasterizationState &&
823 (pPipe->graphicsPipelineCI.pRasterizationState->depthBiasEnable == VK_TRUE)) {
824 result |= validate_status(dev_data, pCB, CBSTATUS_DEPTH_BIAS_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
825 "Dynamic depth bias state not set for this command buffer", msg_code);
827 if (pPipe->blendConstantsEnabled) {
828 result |= validate_status(dev_data, pCB, CBSTATUS_BLEND_CONSTANTS_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
829 "Dynamic blend constants state not set for this command buffer", msg_code);
831 if (pPipe->graphicsPipelineCI.pDepthStencilState &&
832 (pPipe->graphicsPipelineCI.pDepthStencilState->depthBoundsTestEnable == VK_TRUE)) {
833 result |= validate_status(dev_data, pCB, CBSTATUS_DEPTH_BOUNDS_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
834 "Dynamic depth bounds state not set for this command buffer", msg_code);
836 if (pPipe->graphicsPipelineCI.pDepthStencilState &&
837 (pPipe->graphicsPipelineCI.pDepthStencilState->stencilTestEnable == VK_TRUE)) {
838 result |= validate_status(dev_data, pCB, CBSTATUS_STENCIL_READ_MASK_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
839 "Dynamic stencil read mask state not set for this command buffer", msg_code);
840 result |= validate_status(dev_data, pCB, CBSTATUS_STENCIL_WRITE_MASK_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
841 "Dynamic stencil write mask state not set for this command buffer", msg_code);
842 result |= validate_status(dev_data, pCB, CBSTATUS_STENCIL_REFERENCE_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
843 "Dynamic stencil reference state not set for this command buffer", msg_code);
846 result |= validate_status(dev_data, pCB, CBSTATUS_INDEX_BUFFER_BOUND, VK_DEBUG_REPORT_ERROR_BIT_EXT,
847 "Index buffer object not bound to this command buffer when Indexed Draw attempted", msg_code);
853 static bool logInvalidAttachmentMessage(layer_data const *dev_data, const char *type1_string, const RENDER_PASS_STATE *rp1_state,
854 const char *type2_string, const RENDER_PASS_STATE *rp2_state, uint32_t primary_attach,
855 uint32_t secondary_attach, const char *msg, const char *caller,
856 UNIQUE_VALIDATION_ERROR_CODE error_code) {
857 return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
858 HandleToUint64(rp1_state->renderPass), error_code,
859 "%s: RenderPasses incompatible between %s w/ renderPass 0x%" PRIx64 " and %s w/ renderPass 0x%" PRIx64
860 " Attachment %u is not compatible with %u: %s.",
861 caller, type1_string, HandleToUint64(rp1_state->renderPass), type2_string, HandleToUint64(rp2_state->renderPass),
862 primary_attach, secondary_attach, msg);
865 static bool validateAttachmentCompatibility(layer_data const *dev_data, const char *type1_string,
866 const RENDER_PASS_STATE *rp1_state, const char *type2_string,
867 const RENDER_PASS_STATE *rp2_state, uint32_t primary_attach, uint32_t secondary_attach,
868 const char *caller, UNIQUE_VALIDATION_ERROR_CODE error_code) {
870 const auto &primaryPassCI = rp1_state->createInfo;
871 const auto &secondaryPassCI = rp2_state->createInfo;
872 if (primaryPassCI.attachmentCount <= primary_attach) {
873 primary_attach = VK_ATTACHMENT_UNUSED;
875 if (secondaryPassCI.attachmentCount <= secondary_attach) {
876 secondary_attach = VK_ATTACHMENT_UNUSED;
878 if (primary_attach == VK_ATTACHMENT_UNUSED && secondary_attach == VK_ATTACHMENT_UNUSED) {
881 if (primary_attach == VK_ATTACHMENT_UNUSED) {
882 skip |= logInvalidAttachmentMessage(dev_data, type1_string, rp1_state, type2_string, rp2_state, primary_attach,
883 secondary_attach, "The first is unused while the second is not.", caller, error_code);
886 if (secondary_attach == VK_ATTACHMENT_UNUSED) {
887 skip |= logInvalidAttachmentMessage(dev_data, type1_string, rp1_state, type2_string, rp2_state, primary_attach,
888 secondary_attach, "The second is unused while the first is not.", caller, error_code);
891 if (primaryPassCI.pAttachments[primary_attach].format != secondaryPassCI.pAttachments[secondary_attach].format) {
892 skip |= logInvalidAttachmentMessage(dev_data, type1_string, rp1_state, type2_string, rp2_state, primary_attach,
893 secondary_attach, "They have different formats.", caller, error_code);
895 if (primaryPassCI.pAttachments[primary_attach].samples != secondaryPassCI.pAttachments[secondary_attach].samples) {
896 skip |= logInvalidAttachmentMessage(dev_data, type1_string, rp1_state, type2_string, rp2_state, primary_attach,
897 secondary_attach, "They have different samples.", caller, error_code);
899 if (primaryPassCI.pAttachments[primary_attach].flags != secondaryPassCI.pAttachments[secondary_attach].flags) {
900 skip |= logInvalidAttachmentMessage(dev_data, type1_string, rp1_state, type2_string, rp2_state, primary_attach,
901 secondary_attach, "They have different flags.", caller, error_code);
907 static bool validateSubpassCompatibility(layer_data const *dev_data, const char *type1_string, const RENDER_PASS_STATE *rp1_state,
908 const char *type2_string, const RENDER_PASS_STATE *rp2_state, const int subpass,
909 const char *caller, UNIQUE_VALIDATION_ERROR_CODE error_code) {
911 const auto &primary_desc = rp1_state->createInfo.pSubpasses[subpass];
912 const auto &secondary_desc = rp2_state->createInfo.pSubpasses[subpass];
913 uint32_t maxInputAttachmentCount = std::max(primary_desc.inputAttachmentCount, secondary_desc.inputAttachmentCount);
914 for (uint32_t i = 0; i < maxInputAttachmentCount; ++i) {
915 uint32_t primary_input_attach = VK_ATTACHMENT_UNUSED, secondary_input_attach = VK_ATTACHMENT_UNUSED;
916 if (i < primary_desc.inputAttachmentCount) {
917 primary_input_attach = primary_desc.pInputAttachments[i].attachment;
919 if (i < secondary_desc.inputAttachmentCount) {
920 secondary_input_attach = secondary_desc.pInputAttachments[i].attachment;
922 skip |= validateAttachmentCompatibility(dev_data, type1_string, rp1_state, type2_string, rp2_state, primary_input_attach,
923 secondary_input_attach, caller, error_code);
925 uint32_t maxColorAttachmentCount = std::max(primary_desc.colorAttachmentCount, secondary_desc.colorAttachmentCount);
926 for (uint32_t i = 0; i < maxColorAttachmentCount; ++i) {
927 uint32_t primary_color_attach = VK_ATTACHMENT_UNUSED, secondary_color_attach = VK_ATTACHMENT_UNUSED;
928 if (i < primary_desc.colorAttachmentCount) {
929 primary_color_attach = primary_desc.pColorAttachments[i].attachment;
931 if (i < secondary_desc.colorAttachmentCount) {
932 secondary_color_attach = secondary_desc.pColorAttachments[i].attachment;
934 skip |= validateAttachmentCompatibility(dev_data, type1_string, rp1_state, type2_string, rp2_state, primary_color_attach,
935 secondary_color_attach, caller, error_code);
936 uint32_t primary_resolve_attach = VK_ATTACHMENT_UNUSED, secondary_resolve_attach = VK_ATTACHMENT_UNUSED;
937 if (i < primary_desc.colorAttachmentCount && primary_desc.pResolveAttachments) {
938 primary_resolve_attach = primary_desc.pResolveAttachments[i].attachment;
940 if (i < secondary_desc.colorAttachmentCount && secondary_desc.pResolveAttachments) {
941 secondary_resolve_attach = secondary_desc.pResolveAttachments[i].attachment;
943 skip |= validateAttachmentCompatibility(dev_data, type1_string, rp1_state, type2_string, rp2_state, primary_resolve_attach,
944 secondary_resolve_attach, caller, error_code);
946 uint32_t primary_depthstencil_attach = VK_ATTACHMENT_UNUSED, secondary_depthstencil_attach = VK_ATTACHMENT_UNUSED;
947 if (primary_desc.pDepthStencilAttachment) {
948 primary_depthstencil_attach = primary_desc.pDepthStencilAttachment[0].attachment;
950 if (secondary_desc.pDepthStencilAttachment) {
951 secondary_depthstencil_attach = secondary_desc.pDepthStencilAttachment[0].attachment;
953 skip |= validateAttachmentCompatibility(dev_data, type1_string, rp1_state, type2_string, rp2_state, primary_depthstencil_attach,
954 secondary_depthstencil_attach, caller, error_code);
958 // Verify that given renderPass CreateInfo for primary and secondary command buffers are compatible.
959 // This function deals directly with the CreateInfo, there are overloaded versions below that can take the renderPass handle and
960 // will then feed into this function
961 static bool validateRenderPassCompatibility(layer_data const *dev_data, const char *type1_string,
962 const RENDER_PASS_STATE *rp1_state, const char *type2_string,
963 const RENDER_PASS_STATE *rp2_state, const char *caller,
964 UNIQUE_VALIDATION_ERROR_CODE error_code) {
967 if (rp1_state->createInfo.subpassCount != rp2_state->createInfo.subpassCount) {
968 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
969 HandleToUint64(rp1_state->renderPass), error_code,
970 "%s: RenderPasses incompatible between %s w/ renderPass 0x%" PRIx64
971 " with a subpassCount of %u and %s w/ renderPass 0x%" PRIx64 " with a subpassCount of %u.",
972 caller, type1_string, HandleToUint64(rp1_state->renderPass), rp1_state->createInfo.subpassCount,
973 type2_string, HandleToUint64(rp2_state->renderPass), rp2_state->createInfo.subpassCount);
975 for (uint32_t i = 0; i < rp1_state->createInfo.subpassCount; ++i) {
976 skip |= validateSubpassCompatibility(dev_data, type1_string, rp1_state, type2_string, rp2_state, i, caller, error_code);
982 // Return Set node ptr for specified set or else NULL
983 cvdescriptorset::DescriptorSet *GetSetNode(const layer_data *dev_data, VkDescriptorSet set) {
984 auto set_it = dev_data->setMap.find(set);
985 if (set_it == dev_data->setMap.end()) {
988 return set_it->second;
991 // For given pipeline, return number of MSAA samples, or one if MSAA disabled
992 static VkSampleCountFlagBits getNumSamples(PIPELINE_STATE const *pipe) {
993 if (pipe->graphicsPipelineCI.pMultisampleState != NULL &&
994 VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO == pipe->graphicsPipelineCI.pMultisampleState->sType) {
995 return pipe->graphicsPipelineCI.pMultisampleState->rasterizationSamples;
997 return VK_SAMPLE_COUNT_1_BIT;
1000 static void list_bits(std::ostream &s, uint32_t bits) {
1001 for (int i = 0; i < 32 && bits; i++) {
1002 if (bits & (1 << i)) {
1012 // Validate draw-time state related to the PSO
1013 static bool ValidatePipelineDrawtimeState(layer_data const *dev_data, LAST_BOUND_STATE const &state, const GLOBAL_CB_NODE *pCB,
1014 CMD_TYPE cmd_type, PIPELINE_STATE const *pPipeline, const char *caller) {
1017 // Verify vertex binding
1018 if (pPipeline->vertexBindingDescriptions.size() > 0) {
1019 for (size_t i = 0; i < pPipeline->vertexBindingDescriptions.size(); i++) {
1020 auto vertex_binding = pPipeline->vertexBindingDescriptions[i].binding;
1021 if ((pCB->currentDrawData.buffers.size() < (vertex_binding + 1)) ||
1022 (pCB->currentDrawData.buffers[vertex_binding] == VK_NULL_HANDLE)) {
1024 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
1025 HandleToUint64(pCB->commandBuffer), DRAWSTATE_VTX_INDEX_OUT_OF_BOUNDS,
1026 "The Pipeline State Object (0x%" PRIx64
1027 ") expects that this Command Buffer's vertex binding Index %u should be set via "
1028 "vkCmdBindVertexBuffers. This is because VkVertexInputBindingDescription struct at "
1029 "index " PRINTF_SIZE_T_SPECIFIER " of pVertexBindingDescriptions has a binding value of %u.",
1030 HandleToUint64(state.pipeline_state->pipeline), vertex_binding, i, vertex_binding);
1034 if (!pCB->currentDrawData.buffers.empty() && !pCB->vertex_buffer_used) {
1035 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
1036 VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(pCB->commandBuffer),
1037 DRAWSTATE_VTX_INDEX_OUT_OF_BOUNDS,
1038 "Vertex buffers are bound to command buffer (0x%" PRIx64
1039 ") but no vertex buffers are attached to this Pipeline State Object (0x%" PRIx64 ").",
1040 HandleToUint64(pCB->commandBuffer), HandleToUint64(state.pipeline_state->pipeline));
1043 // If Viewport or scissors are dynamic, verify that dynamic count matches PSO count.
1044 // Skip check if rasterization is disabled or there is no viewport.
1045 if ((!pPipeline->graphicsPipelineCI.pRasterizationState ||
1046 (pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable == VK_FALSE)) &&
1047 pPipeline->graphicsPipelineCI.pViewportState) {
1048 bool dynViewport = isDynamic(pPipeline, VK_DYNAMIC_STATE_VIEWPORT);
1049 bool dynScissor = isDynamic(pPipeline, VK_DYNAMIC_STATE_SCISSOR);
1052 auto requiredViewportsMask = (1 << pPipeline->graphicsPipelineCI.pViewportState->viewportCount) - 1;
1053 auto missingViewportMask = ~pCB->viewportMask & requiredViewportsMask;
1054 if (missingViewportMask) {
1055 std::stringstream ss;
1056 ss << "Dynamic viewport(s) ";
1057 list_bits(ss, missingViewportMask);
1058 ss << " are used by pipeline state object, but were not provided via calls to vkCmdSetViewport().";
1059 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
1060 DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "%s", ss.str().c_str());
1065 auto requiredScissorMask = (1 << pPipeline->graphicsPipelineCI.pViewportState->scissorCount) - 1;
1066 auto missingScissorMask = ~pCB->scissorMask & requiredScissorMask;
1067 if (missingScissorMask) {
1068 std::stringstream ss;
1069 ss << "Dynamic scissor(s) ";
1070 list_bits(ss, missingScissorMask);
1071 ss << " are used by pipeline state object, but were not provided via calls to vkCmdSetScissor().";
1072 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
1073 DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "%s", ss.str().c_str());
1078 // Verify that any MSAA request in PSO matches sample# in bound FB
1079 // Skip the check if rasterization is disabled.
1080 if (!pPipeline->graphicsPipelineCI.pRasterizationState ||
1081 (pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable == VK_FALSE)) {
1082 VkSampleCountFlagBits pso_num_samples = getNumSamples(pPipeline);
1083 if (pCB->activeRenderPass) {
1084 auto const render_pass_info = pCB->activeRenderPass->createInfo.ptr();
1085 const VkSubpassDescription *subpass_desc = &render_pass_info->pSubpasses[pCB->activeSubpass];
1087 unsigned subpass_num_samples = 0;
1089 for (i = 0; i < subpass_desc->colorAttachmentCount; i++) {
1090 auto attachment = subpass_desc->pColorAttachments[i].attachment;
1091 if (attachment != VK_ATTACHMENT_UNUSED)
1092 subpass_num_samples |= (unsigned)render_pass_info->pAttachments[attachment].samples;
1095 if (subpass_desc->pDepthStencilAttachment &&
1096 subpass_desc->pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
1097 auto attachment = subpass_desc->pDepthStencilAttachment->attachment;
1098 subpass_num_samples |= (unsigned)render_pass_info->pAttachments[attachment].samples;
1101 if (!dev_data->extensions.vk_amd_mixed_attachment_samples &&
1102 ((subpass_num_samples & static_cast<unsigned>(pso_num_samples)) != subpass_num_samples)) {
1103 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1104 HandleToUint64(pPipeline->pipeline), DRAWSTATE_NUM_SAMPLES_MISMATCH,
1105 "Num samples mismatch! At draw-time in Pipeline (0x%" PRIx64
1106 ") with %u samples while current RenderPass (0x%" PRIx64 ") w/ %u samples!",
1107 HandleToUint64(pPipeline->pipeline), pso_num_samples,
1108 HandleToUint64(pCB->activeRenderPass->renderPass), subpass_num_samples);
1111 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1112 HandleToUint64(pPipeline->pipeline), DRAWSTATE_NUM_SAMPLES_MISMATCH,
1113 "No active render pass found at draw-time in Pipeline (0x%" PRIx64 ")!",
1114 HandleToUint64(pPipeline->pipeline));
1117 // Verify that PSO creation renderPass is compatible with active renderPass
1118 if (pCB->activeRenderPass) {
1119 // TODO: Move all of the error codes common across different Draws into a LUT accessed by cmd_type
1120 // TODO: AMD extension codes are included here, but actual function entrypoints are not yet intercepted
1121 // Error codes for renderpass and subpass mismatches
1122 auto rp_error = VALIDATION_ERROR_1a200366, sp_error = VALIDATION_ERROR_1a200368;
1124 case CMD_DRAWINDEXED:
1125 rp_error = VALIDATION_ERROR_1a40038c;
1126 sp_error = VALIDATION_ERROR_1a40038e;
1128 case CMD_DRAWINDIRECT:
1129 rp_error = VALIDATION_ERROR_1aa003be;
1130 sp_error = VALIDATION_ERROR_1aa003c0;
1132 case CMD_DRAWINDIRECTCOUNTAMD:
1133 rp_error = VALIDATION_ERROR_1ac003f6;
1134 sp_error = VALIDATION_ERROR_1ac003f8;
1136 case CMD_DRAWINDEXEDINDIRECT:
1137 rp_error = VALIDATION_ERROR_1a600426;
1138 sp_error = VALIDATION_ERROR_1a600428;
1140 case CMD_DRAWINDEXEDINDIRECTCOUNTAMD:
1141 rp_error = VALIDATION_ERROR_1a800460;
1142 sp_error = VALIDATION_ERROR_1a800462;
1145 assert(CMD_DRAW == cmd_type);
1148 std::string err_string;
1149 if (pCB->activeRenderPass->renderPass != pPipeline->rp_state->renderPass) {
1150 // renderPass that PSO was created with must be compatible with active renderPass that PSO is being used with
1151 skip |= validateRenderPassCompatibility(dev_data, "active render pass", pCB->activeRenderPass, "pipeline state object",
1152 pPipeline->rp_state.get(), caller, rp_error);
1154 if (pPipeline->graphicsPipelineCI.subpass != pCB->activeSubpass) {
1156 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1157 HandleToUint64(pPipeline->pipeline), sp_error, "Pipeline was built for subpass %u but used in subpass %u.",
1158 pPipeline->graphicsPipelineCI.subpass, pCB->activeSubpass);
1165 // For given cvdescriptorset::DescriptorSet, verify that its Set is compatible w/ the setLayout corresponding to
1166 // pipelineLayout[layoutIndex]
1167 static bool verify_set_layout_compatibility(const cvdescriptorset::DescriptorSet *descriptor_set,
1168 PIPELINE_LAYOUT_NODE const *pipeline_layout, const uint32_t layoutIndex,
1170 auto num_sets = pipeline_layout->set_layouts.size();
1171 if (layoutIndex >= num_sets) {
1172 stringstream errorStr;
1173 errorStr << "VkPipelineLayout (" << pipeline_layout->layout << ") only contains " << num_sets
1174 << " setLayouts corresponding to sets 0-" << num_sets - 1 << ", but you're attempting to bind set to index "
1176 errorMsg = errorStr.str();
1179 if (descriptor_set->IsPushDescriptor()) return true;
1180 auto layout_node = pipeline_layout->set_layouts[layoutIndex];
1181 return descriptor_set->IsCompatible(layout_node.get(), &errorMsg);
1184 // Validate overall state at the time of a draw call
1185 static bool ValidateDrawState(layer_data *dev_data, GLOBAL_CB_NODE *cb_node, CMD_TYPE cmd_type, const bool indexed,
1186 const VkPipelineBindPoint bind_point, const char *function,
1187 UNIQUE_VALIDATION_ERROR_CODE const msg_code) {
1188 bool result = false;
1189 auto const &state = cb_node->lastBound[bind_point];
1190 PIPELINE_STATE *pPipe = state.pipeline_state;
1191 if (nullptr == pPipe) {
1193 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
1194 HandleToUint64(cb_node->commandBuffer), DRAWSTATE_INVALID_PIPELINE,
1195 "At Draw/Dispatch time no valid VkPipeline is bound! This is illegal. Please bind one with vkCmdBindPipeline().");
1196 // Early return as any further checks below will be busted w/o a pipeline
1197 if (result) return true;
1199 // First check flag states
1200 if (VK_PIPELINE_BIND_POINT_GRAPHICS == bind_point)
1201 result = validate_draw_state_flags(dev_data, cb_node, pPipe, indexed, msg_code);
1203 // Now complete other state checks
1205 auto const &pipeline_layout = pPipe->pipeline_layout;
1207 for (const auto &set_binding_pair : pPipe->active_slots) {
1208 uint32_t setIndex = set_binding_pair.first;
1209 // If valid set is not bound throw an error
1210 if ((state.boundDescriptorSets.size() <= setIndex) || (!state.boundDescriptorSets[setIndex])) {
1211 result |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
1212 HandleToUint64(cb_node->commandBuffer), DRAWSTATE_DESCRIPTOR_SET_NOT_BOUND,
1213 "VkPipeline 0x%" PRIx64 " uses set #%u but that set is not bound.", HandleToUint64(pPipe->pipeline),
1215 } else if (!verify_set_layout_compatibility(state.boundDescriptorSets[setIndex], &pipeline_layout, setIndex, errorString)) {
1216 // Set is bound but not compatible w/ overlapping pipeline_layout from PSO
1217 VkDescriptorSet setHandle = state.boundDescriptorSets[setIndex]->GetSet();
1218 result |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
1219 HandleToUint64(setHandle), DRAWSTATE_PIPELINE_LAYOUTS_INCOMPATIBLE,
1220 "VkDescriptorSet (0x%" PRIx64
1221 ") bound as set #%u is not compatible with overlapping VkPipelineLayout 0x%" PRIx64 " due to: %s",
1222 HandleToUint64(setHandle), setIndex, HandleToUint64(pipeline_layout.layout), errorString.c_str());
1223 } else { // Valid set is bound and layout compatible, validate that it's updated
1224 // Pull the set node
1225 cvdescriptorset::DescriptorSet *descriptor_set = state.boundDescriptorSets[setIndex];
1226 // Validate the draw-time state for this descriptor set
1227 std::string err_str;
1228 if (!descriptor_set->IsPushDescriptor()) {
1229 // For the "bindless" style resource usage with many descriptors, need to optimize command <-> descriptor
1230 // binding validation. Take the requested binding set and prefilter it to eliminate redundant validation checks.
1231 // Here, the currently bound pipeline determines whether an image validation check is redundant...
1232 // for images are the "req" portion of the binding_req is indirectly (but tightly) coupled to the pipeline.
1233 const cvdescriptorset::PrefilterBindRequestMap reduced_map(*descriptor_set, set_binding_pair.second, cb_node,
1235 const auto &binding_req_map = reduced_map.Map();
1237 if (!descriptor_set->ValidateDrawState(binding_req_map, state.dynamicOffsets[setIndex], cb_node, function,
1239 auto set = descriptor_set->GetSet();
1241 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
1242 HandleToUint64(set), DRAWSTATE_DESCRIPTOR_SET_NOT_UPDATED,
1243 "Descriptor set 0x%" PRIx64 " bound as set #%u encountered the following validation error at %s time: %s",
1244 HandleToUint64(set), setIndex, function, err_str.c_str());
1250 // Check general pipeline state that needs to be validated at drawtime
1251 if (VK_PIPELINE_BIND_POINT_GRAPHICS == bind_point)
1252 result |= ValidatePipelineDrawtimeState(dev_data, state, cb_node, cmd_type, pPipe, function);
1257 static void UpdateDrawState(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, const VkPipelineBindPoint bind_point) {
1258 auto const &state = cb_state->lastBound[bind_point];
1259 PIPELINE_STATE *pPipe = state.pipeline_state;
1260 if (VK_NULL_HANDLE != state.pipeline_layout) {
1261 for (const auto &set_binding_pair : pPipe->active_slots) {
1262 uint32_t setIndex = set_binding_pair.first;
1263 // Pull the set node
1264 cvdescriptorset::DescriptorSet *descriptor_set = state.boundDescriptorSets[setIndex];
1265 if (!descriptor_set->IsPushDescriptor()) {
1266 // For the "bindless" style resource usage with many descriptors, need to optimize command <-> descriptor binding
1267 const cvdescriptorset::PrefilterBindRequestMap reduced_map(*descriptor_set, set_binding_pair.second, cb_state);
1268 const auto &binding_req_map = reduced_map.Map();
1270 // Bind this set and its active descriptor resources to the command buffer
1271 descriptor_set->BindCommandBuffer(cb_state, binding_req_map);
1272 // For given active slots record updated images & buffers
1273 descriptor_set->GetStorageUpdates(binding_req_map, &cb_state->updateBuffers, &cb_state->updateImages);
1277 if (pPipe->vertexBindingDescriptions.size() > 0) {
1278 cb_state->vertex_buffer_used = true;
1282 static bool ValidatePipelineLocked(layer_data *dev_data, std::vector<std::unique_ptr<PIPELINE_STATE>> const &pPipelines,
1283 int pipelineIndex) {
1286 PIPELINE_STATE *pPipeline = pPipelines[pipelineIndex].get();
1288 // If create derivative bit is set, check that we've specified a base
1289 // pipeline correctly, and that the base pipeline was created to allow
1291 if (pPipeline->graphicsPipelineCI.flags & VK_PIPELINE_CREATE_DERIVATIVE_BIT) {
1292 PIPELINE_STATE *pBasePipeline = nullptr;
1293 if (!((pPipeline->graphicsPipelineCI.basePipelineHandle != VK_NULL_HANDLE) ^
1294 (pPipeline->graphicsPipelineCI.basePipelineIndex != -1))) {
1295 // This check is a superset of VALIDATION_ERROR_096005a8 and VALIDATION_ERROR_096005aa
1296 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1297 HandleToUint64(pPipeline->pipeline), DRAWSTATE_INVALID_PIPELINE_CREATE_STATE,
1298 "Invalid Pipeline CreateInfo: exactly one of base pipeline index and handle must be specified");
1299 } else if (pPipeline->graphicsPipelineCI.basePipelineIndex != -1) {
1300 if (pPipeline->graphicsPipelineCI.basePipelineIndex >= pipelineIndex) {
1301 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1302 HandleToUint64(pPipeline->pipeline), VALIDATION_ERROR_208005a0,
1303 "Invalid Pipeline CreateInfo: base pipeline must occur earlier in array than derivative pipeline.");
1305 pBasePipeline = pPipelines[pPipeline->graphicsPipelineCI.basePipelineIndex].get();
1307 } else if (pPipeline->graphicsPipelineCI.basePipelineHandle != VK_NULL_HANDLE) {
1308 pBasePipeline = getPipelineState(dev_data, pPipeline->graphicsPipelineCI.basePipelineHandle);
1311 if (pBasePipeline && !(pBasePipeline->graphicsPipelineCI.flags & VK_PIPELINE_CREATE_ALLOW_DERIVATIVES_BIT)) {
1312 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1313 HandleToUint64(pPipeline->pipeline), DRAWSTATE_INVALID_PIPELINE_CREATE_STATE,
1314 "Invalid Pipeline CreateInfo: base pipeline does not allow derivatives.");
1321 // UNLOCKED pipeline validation. DO NOT lookup objects in the layer_data->* maps in this function.
1322 static bool ValidatePipelineUnlocked(layer_data *dev_data, std::vector<std::unique_ptr<PIPELINE_STATE>> const &pPipelines,
1323 int pipelineIndex) {
1326 PIPELINE_STATE *pPipeline = pPipelines[pipelineIndex].get();
1328 // Ensure the subpass index is valid. If not, then validate_and_capture_pipeline_shader_state
1329 // produces nonsense errors that confuse users. Other layers should already
1330 // emit errors for renderpass being invalid.
1331 auto subpass_desc = &pPipeline->rp_state->createInfo.pSubpasses[pPipeline->graphicsPipelineCI.subpass];
1332 if (pPipeline->graphicsPipelineCI.subpass >= pPipeline->rp_state->createInfo.subpassCount) {
1333 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1334 HandleToUint64(pPipeline->pipeline), VALIDATION_ERROR_096005ee,
1335 "Invalid Pipeline CreateInfo State: Subpass index %u is out of range for this renderpass (0..%u).",
1336 pPipeline->graphicsPipelineCI.subpass, pPipeline->rp_state->createInfo.subpassCount - 1);
1337 subpass_desc = nullptr;
1340 if (pPipeline->graphicsPipelineCI.pColorBlendState != NULL) {
1341 const safe_VkPipelineColorBlendStateCreateInfo *color_blend_state = pPipeline->graphicsPipelineCI.pColorBlendState;
1342 if (color_blend_state->attachmentCount != subpass_desc->colorAttachmentCount) {
1344 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1345 HandleToUint64(pPipeline->pipeline), VALIDATION_ERROR_096005d4,
1346 "vkCreateGraphicsPipelines(): Render pass (0x%" PRIx64
1347 ") subpass %u has colorAttachmentCount of %u which doesn't match the pColorBlendState->attachmentCount of %u.",
1348 HandleToUint64(pPipeline->rp_state->renderPass), pPipeline->graphicsPipelineCI.subpass,
1349 subpass_desc->colorAttachmentCount, color_blend_state->attachmentCount);
1351 if (!dev_data->enabled_features.independentBlend) {
1352 if (pPipeline->attachments.size() > 1) {
1353 VkPipelineColorBlendAttachmentState *pAttachments = &pPipeline->attachments[0];
1354 for (size_t i = 1; i < pPipeline->attachments.size(); i++) {
1355 // Quoting the spec: "If [the independent blend] feature is not enabled, the VkPipelineColorBlendAttachmentState
1356 // settings for all color attachments must be identical." VkPipelineColorBlendAttachmentState contains
1357 // only attachment state, so memcmp is best suited for the comparison
1358 if (memcmp(static_cast<const void *>(pAttachments), static_cast<const void *>(&pAttachments[i]),
1359 sizeof(pAttachments[0]))) {
1361 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1362 HandleToUint64(pPipeline->pipeline), VALIDATION_ERROR_0f4004ba,
1363 "Invalid Pipeline CreateInfo: If independent blend feature not enabled, all elements of "
1364 "pAttachments must be identical.");
1370 if (!dev_data->enabled_features.logicOp && (pPipeline->graphicsPipelineCI.pColorBlendState->logicOpEnable != VK_FALSE)) {
1372 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1373 HandleToUint64(pPipeline->pipeline), VALIDATION_ERROR_0f4004bc,
1374 "Invalid Pipeline CreateInfo: If logic operations feature not enabled, logicOpEnable must be VK_FALSE.");
1378 if (validate_and_capture_pipeline_shader_state(dev_data, pPipeline)) {
1381 // Each shader's stage must be unique
1382 if (pPipeline->duplicate_shaders) {
1383 for (uint32_t stage = VK_SHADER_STAGE_VERTEX_BIT; stage & VK_SHADER_STAGE_ALL_GRAPHICS; stage <<= 1) {
1384 if (pPipeline->duplicate_shaders & stage) {
1385 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1386 HandleToUint64(pPipeline->pipeline), DRAWSTATE_INVALID_PIPELINE_CREATE_STATE,
1387 "Invalid Pipeline CreateInfo State: Multiple shaders provided for stage %s",
1388 string_VkShaderStageFlagBits(VkShaderStageFlagBits(stage)));
1393 if (!(pPipeline->active_shaders & VK_SHADER_STAGE_VERTEX_BIT)) {
1394 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1395 HandleToUint64(pPipeline->pipeline), VALIDATION_ERROR_096005ae,
1396 "Invalid Pipeline CreateInfo State: Vertex Shader required.");
1398 // Either both or neither TC/TE shaders should be defined
1399 bool has_control = (pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT) != 0;
1400 bool has_eval = (pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT) != 0;
1401 if (has_control && !has_eval) {
1402 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1403 HandleToUint64(pPipeline->pipeline), VALIDATION_ERROR_096005b2,
1404 "Invalid Pipeline CreateInfo State: TE and TC shaders must be included or excluded as a pair.");
1406 if (!has_control && has_eval) {
1407 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1408 HandleToUint64(pPipeline->pipeline), VALIDATION_ERROR_096005b4,
1409 "Invalid Pipeline CreateInfo State: TE and TC shaders must be included or excluded as a pair.");
1411 // Compute shaders should be specified independent of Gfx shaders
1412 if (pPipeline->active_shaders & VK_SHADER_STAGE_COMPUTE_BIT) {
1413 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1414 HandleToUint64(pPipeline->pipeline), VALIDATION_ERROR_096005b0,
1415 "Invalid Pipeline CreateInfo State: Do not specify Compute Shader for Gfx Pipeline.");
1417 // VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive topology is only valid for tessellation pipelines.
1418 // Mismatching primitive topology and tessellation fails graphics pipeline creation.
1419 if (has_control && has_eval &&
1420 (!pPipeline->graphicsPipelineCI.pInputAssemblyState ||
1421 pPipeline->graphicsPipelineCI.pInputAssemblyState->topology != VK_PRIMITIVE_TOPOLOGY_PATCH_LIST)) {
1422 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1423 HandleToUint64(pPipeline->pipeline), VALIDATION_ERROR_096005c0,
1424 "Invalid Pipeline CreateInfo State: VK_PRIMITIVE_TOPOLOGY_PATCH_LIST must be set as IA topology for "
1425 "tessellation pipelines.");
1427 if (pPipeline->graphicsPipelineCI.pInputAssemblyState &&
1428 pPipeline->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_PATCH_LIST) {
1429 if (!has_control || !has_eval) {
1430 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1431 HandleToUint64(pPipeline->pipeline), VALIDATION_ERROR_096005c2,
1432 "Invalid Pipeline CreateInfo State: VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive topology is only valid "
1433 "for tessellation pipelines.");
1437 // If a rasterization state is provided...
1438 if (pPipeline->graphicsPipelineCI.pRasterizationState) {
1439 if ((pPipeline->graphicsPipelineCI.pRasterizationState->depthClampEnable == VK_TRUE) &&
1440 (!dev_data->enabled_features.depthClamp)) {
1441 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1442 HandleToUint64(pPipeline->pipeline), VALIDATION_ERROR_1020061c,
1443 "vkCreateGraphicsPipelines(): the depthClamp device feature is disabled: the depthClampEnable member "
1444 "of the VkPipelineRasterizationStateCreateInfo structure must be set to VK_FALSE.");
1447 if (!isDynamic(pPipeline, VK_DYNAMIC_STATE_DEPTH_BIAS) &&
1448 (pPipeline->graphicsPipelineCI.pRasterizationState->depthBiasClamp != 0.0) &&
1449 (!dev_data->enabled_features.depthBiasClamp)) {
1450 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1451 HandleToUint64(pPipeline->pipeline), DRAWSTATE_INVALID_FEATURE,
1452 "vkCreateGraphicsPipelines(): the depthBiasClamp device feature is disabled: the depthBiasClamp member "
1453 "of the VkPipelineRasterizationStateCreateInfo structure must be set to 0.0 unless the "
1454 "VK_DYNAMIC_STATE_DEPTH_BIAS dynamic state is enabled");
1457 // If rasterization is enabled...
1458 if (pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable == VK_FALSE) {
1459 if ((pPipeline->graphicsPipelineCI.pMultisampleState->alphaToOneEnable == VK_TRUE) &&
1460 (!dev_data->enabled_features.alphaToOne)) {
1461 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1462 HandleToUint64(pPipeline->pipeline), VALIDATION_ERROR_10000622,
1463 "vkCreateGraphicsPipelines(): the alphaToOne device feature is disabled: the alphaToOneEnable "
1464 "member of the VkPipelineMultisampleStateCreateInfo structure must be set to VK_FALSE.");
1467 // If subpass uses a depth/stencil attachment, pDepthStencilState must be a pointer to a valid structure
1468 if (subpass_desc && subpass_desc->pDepthStencilAttachment &&
1469 subpass_desc->pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
1470 if (!pPipeline->graphicsPipelineCI.pDepthStencilState) {
1471 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1472 HandleToUint64(pPipeline->pipeline), VALIDATION_ERROR_096005e0,
1473 "Invalid Pipeline CreateInfo State: pDepthStencilState is NULL when rasterization is enabled "
1474 "and subpass uses a depth/stencil attachment.");
1476 } else if ((pPipeline->graphicsPipelineCI.pDepthStencilState->depthBoundsTestEnable == VK_TRUE) &&
1477 (!dev_data->enabled_features.depthBounds)) {
1478 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1479 HandleToUint64(pPipeline->pipeline), VALIDATION_ERROR_0f6004ac,
1480 "vkCreateGraphicsPipelines(): the depthBounds device feature is disabled: the "
1481 "depthBoundsTestEnable member of the VkPipelineDepthStencilStateCreateInfo structure must be "
1482 "set to VK_FALSE.");
1486 // If subpass uses color attachments, pColorBlendState must be valid pointer
1488 uint32_t color_attachment_count = 0;
1489 for (uint32_t i = 0; i < subpass_desc->colorAttachmentCount; ++i) {
1490 if (subpass_desc->pColorAttachments[i].attachment != VK_ATTACHMENT_UNUSED) {
1491 ++color_attachment_count;
1494 if (color_attachment_count > 0 && pPipeline->graphicsPipelineCI.pColorBlendState == nullptr) {
1495 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1496 HandleToUint64(pPipeline->pipeline), VALIDATION_ERROR_096005e2,
1497 "Invalid Pipeline CreateInfo State: pColorBlendState is NULL when rasterization is enabled and "
1498 "subpass uses color attachments.");
1504 auto vi = pPipeline->graphicsPipelineCI.pVertexInputState;
1506 for (uint32_t j = 0; j < vi->vertexAttributeDescriptionCount; j++) {
1507 VkFormat format = vi->pVertexAttributeDescriptions[j].format;
1508 // Internal call to get format info. Still goes through layers, could potentially go directly to ICD.
1509 VkFormatProperties properties;
1510 dev_data->instance_data->dispatch_table.GetPhysicalDeviceFormatProperties(dev_data->physical_device, format,
1512 if ((properties.bufferFeatures & VK_FORMAT_FEATURE_VERTEX_BUFFER_BIT) == 0) {
1514 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
1515 VALIDATION_ERROR_14a004de,
1516 "vkCreateGraphicsPipelines: pCreateInfo[%d].pVertexInputState->vertexAttributeDescriptions[%d].format "
1517 "(%s) is not a supported vertex buffer format.",
1518 pipelineIndex, j, string_VkFormat(format));
1523 if (dev_data->extensions.vk_amd_mixed_attachment_samples) {
1524 VkSampleCountFlagBits max_sample_count = static_cast<VkSampleCountFlagBits>(0);
1525 for (uint32_t i = 0; i < subpass_desc->colorAttachmentCount; ++i) {
1526 if (subpass_desc->pColorAttachments[i].attachment != VK_ATTACHMENT_UNUSED) {
1528 std::max(max_sample_count,
1529 pPipeline->rp_state->createInfo.pAttachments[subpass_desc->pColorAttachments[i].attachment].samples);
1532 if (subpass_desc->pDepthStencilAttachment && subpass_desc->pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
1534 std::max(max_sample_count,
1535 pPipeline->rp_state->createInfo.pAttachments[subpass_desc->pDepthStencilAttachment->attachment].samples);
1537 if (pPipeline->graphicsPipelineCI.pMultisampleState->rasterizationSamples != max_sample_count) {
1538 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1539 HandleToUint64(pPipeline->pipeline), VALIDATION_ERROR_09600bc2,
1540 "vkCreateGraphicsPipelines: pCreateInfo[%d].pMultisampleState->rasterizationSamples (%s) != max "
1541 "attachment samples (%s) used in subpass %u.",
1543 string_VkSampleCountFlagBits(pPipeline->graphicsPipelineCI.pMultisampleState->rasterizationSamples),
1544 string_VkSampleCountFlagBits(max_sample_count), pPipeline->graphicsPipelineCI.subpass);
1551 // Block of code at start here specifically for managing/tracking DSs
1553 // Return Pool node ptr for specified pool or else NULL
1554 DESCRIPTOR_POOL_STATE *GetDescriptorPoolState(const layer_data *dev_data, const VkDescriptorPool pool) {
1555 auto pool_it = dev_data->descriptorPoolMap.find(pool);
1556 if (pool_it == dev_data->descriptorPoolMap.end()) {
1559 return pool_it->second;
1562 // Validate that given set is valid and that it's not being used by an in-flight CmdBuffer
1563 // func_str is the name of the calling function
1564 // Return false if no errors occur
1565 // Return true if validation error occurs and callback returns true (to skip upcoming API call down the chain)
1566 static bool validateIdleDescriptorSet(const layer_data *dev_data, VkDescriptorSet set, std::string func_str) {
1567 if (dev_data->instance_data->disabled.idle_descriptor_set) return false;
1569 auto set_node = dev_data->setMap.find(set);
1570 if (set_node == dev_data->setMap.end()) {
1571 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
1572 HandleToUint64(set), DRAWSTATE_DOUBLE_DESTROY,
1573 "Cannot call %s() on descriptor set 0x%" PRIx64 " that has not been allocated.", func_str.c_str(),
1574 HandleToUint64(set));
1576 // TODO : This covers various error cases so should pass error enum into this function and use passed in enum here
1577 if (set_node->second->in_use.load()) {
1578 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
1579 HandleToUint64(set), VALIDATION_ERROR_2860026a,
1580 "Cannot call %s() on descriptor set 0x%" PRIx64 " that is in use by a command buffer.",
1581 func_str.c_str(), HandleToUint64(set));
1587 // Validate that given pool does not store any descriptor sets used by an in-flight CmdBuffer
1588 // pool stores the descriptor sets to be validated
1589 // Return false if no errors occur
1590 // Return true if validation error occurs and callback returns true (to skip upcoming API call down the chain)
1591 static bool validateIdleDescriptorSetForPoolReset(const layer_data *dev_data, const VkDescriptorPool pool) {
1592 if (dev_data->instance_data->disabled.idle_descriptor_set) return false;
1594 DESCRIPTOR_POOL_STATE *pPool = GetDescriptorPoolState(dev_data, pool);
1595 if (pPool != nullptr) {
1596 for (auto ds : pPool->sets) {
1597 if (ds && ds->in_use.load()) {
1598 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
1599 VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT, HandleToUint64(pool), VALIDATION_ERROR_32a00272,
1600 "It is invalid to call vkResetDescriptorPool() with descriptor sets in use by a command buffer. %s",
1601 validation_error_map[VALIDATION_ERROR_32a00272]);
1609 // Remove set from setMap and delete the set
1610 static void freeDescriptorSet(layer_data *dev_data, cvdescriptorset::DescriptorSet *descriptor_set) {
1611 dev_data->setMap.erase(descriptor_set->GetSet());
1612 delete descriptor_set;
1614 // Free all DS Pools including their Sets & related sub-structs
1615 // NOTE : Calls to this function should be wrapped in mutex
1616 static void deletePools(layer_data *dev_data) {
1617 for (auto ii = dev_data->descriptorPoolMap.begin(); ii != dev_data->descriptorPoolMap.end();) {
1618 // Remove this pools' sets from setMap and delete them
1619 for (auto ds : ii->second->sets) {
1620 freeDescriptorSet(dev_data, ds);
1622 ii->second->sets.clear();
1624 ii = dev_data->descriptorPoolMap.erase(ii);
1628 static void clearDescriptorPool(layer_data *dev_data, const VkDevice device, const VkDescriptorPool pool,
1629 VkDescriptorPoolResetFlags flags) {
1630 DESCRIPTOR_POOL_STATE *pPool = GetDescriptorPoolState(dev_data, pool);
1631 // TODO: validate flags
1632 // For every set off of this pool, clear it, remove from setMap, and free cvdescriptorset::DescriptorSet
1633 for (auto ds : pPool->sets) {
1634 freeDescriptorSet(dev_data, ds);
1636 pPool->sets.clear();
1637 // Reset available count for each type and available sets for this pool
1638 for (uint32_t i = 0; i < pPool->availableDescriptorTypeCount.size(); ++i) {
1639 pPool->availableDescriptorTypeCount[i] = pPool->maxDescriptorTypeCount[i];
1641 pPool->availableSets = pPool->maxSets;
1644 // For given CB object, fetch associated CB Node from map
1645 GLOBAL_CB_NODE *GetCBNode(layer_data const *dev_data, const VkCommandBuffer cb) {
1646 auto it = dev_data->commandBufferMap.find(cb);
1647 if (it == dev_data->commandBufferMap.end()) {
1653 // If a renderpass is active, verify that the given command type is appropriate for current subpass state
1654 bool ValidateCmdSubpassState(const layer_data *dev_data, const GLOBAL_CB_NODE *pCB, const CMD_TYPE cmd_type) {
1655 if (!pCB->activeRenderPass) return false;
1657 if (pCB->activeSubpassContents == VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS &&
1658 (cmd_type != CMD_EXECUTECOMMANDS && cmd_type != CMD_NEXTSUBPASS && cmd_type != CMD_ENDRENDERPASS)) {
1659 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
1660 HandleToUint64(pCB->commandBuffer), DRAWSTATE_INVALID_COMMAND_BUFFER,
1661 "Commands cannot be called in a subpass using secondary command buffers.");
1662 } else if (pCB->activeSubpassContents == VK_SUBPASS_CONTENTS_INLINE && cmd_type == CMD_EXECUTECOMMANDS) {
1663 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
1664 HandleToUint64(pCB->commandBuffer), DRAWSTATE_INVALID_COMMAND_BUFFER,
1665 "vkCmdExecuteCommands() cannot be called in a subpass using inline commands.");
1670 bool ValidateCmdQueueFlags(layer_data *dev_data, const GLOBAL_CB_NODE *cb_node, const char *caller_name,
1671 VkQueueFlags required_flags, UNIQUE_VALIDATION_ERROR_CODE error_code) {
1672 auto pool = GetCommandPoolNode(dev_data, cb_node->createInfo.commandPool);
1674 VkQueueFlags queue_flags = dev_data->phys_dev_properties.queue_family_properties[pool->queueFamilyIndex].queueFlags;
1675 if (!(required_flags & queue_flags)) {
1676 string required_flags_string;
1677 for (auto flag : {VK_QUEUE_TRANSFER_BIT, VK_QUEUE_GRAPHICS_BIT, VK_QUEUE_COMPUTE_BIT}) {
1678 if (flag & required_flags) {
1679 if (required_flags_string.size()) {
1680 required_flags_string += " or ";
1682 required_flags_string += string_VkQueueFlagBits(flag);
1685 return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
1686 HandleToUint64(cb_node->commandBuffer), error_code,
1687 "Cannot call %s on a command buffer allocated from a pool without %s capabilities..", caller_name,
1688 required_flags_string.c_str());
1694 static char const *GetCauseStr(VK_OBJECT obj) {
1695 if (obj.type == kVulkanObjectTypeDescriptorSet) return "destroyed or updated";
1696 if (obj.type == kVulkanObjectTypeCommandBuffer) return "destroyed or rerecorded";
1700 static bool ReportInvalidCommandBuffer(layer_data *dev_data, const GLOBAL_CB_NODE *cb_state, const char *call_source) {
1702 for (auto obj : cb_state->broken_bindings) {
1703 const char *type_str = object_string[obj.type];
1704 const char *cause_str = GetCauseStr(obj);
1705 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
1706 HandleToUint64(cb_state->commandBuffer), DRAWSTATE_INVALID_COMMAND_BUFFER,
1707 "You are adding %s to command buffer 0x%" PRIx64 " that is invalid because bound %s 0x%" PRIx64 " was %s.",
1708 call_source, HandleToUint64(cb_state->commandBuffer), type_str, obj.handle, cause_str);
1713 // 'commandBuffer must be in the recording state' valid usage error code for each command
1714 // Note: grepping for ^^^^^^^^^ in vk_validation_database is easily massaged into the following list
1715 // Note: C++11 doesn't automatically devolve enum types to the underlying type for hash traits purposes (fixed in C++14)
1716 using CmdTypeHashType = std::underlying_type<CMD_TYPE>::type;
1717 static const std::unordered_map<CmdTypeHashType, UNIQUE_VALIDATION_ERROR_CODE> must_be_recording_map = {
1718 {CMD_NONE, VALIDATION_ERROR_UNDEFINED}, // UNMATCHED
1719 {CMD_BEGINQUERY, VALIDATION_ERROR_17802413},
1720 {CMD_BEGINRENDERPASS, VALIDATION_ERROR_17a02413},
1721 {CMD_BINDDESCRIPTORSETS, VALIDATION_ERROR_17c02413},
1722 {CMD_BINDINDEXBUFFER, VALIDATION_ERROR_17e02413},
1723 {CMD_BINDPIPELINE, VALIDATION_ERROR_18002413},
1724 {CMD_BINDVERTEXBUFFERS, VALIDATION_ERROR_18202413},
1725 {CMD_BLITIMAGE, VALIDATION_ERROR_18402413},
1726 {CMD_CLEARATTACHMENTS, VALIDATION_ERROR_18602413},
1727 {CMD_CLEARCOLORIMAGE, VALIDATION_ERROR_18802413},
1728 {CMD_CLEARDEPTHSTENCILIMAGE, VALIDATION_ERROR_18a02413},
1729 {CMD_COPYBUFFER, VALIDATION_ERROR_18c02413},
1730 {CMD_COPYBUFFERTOIMAGE, VALIDATION_ERROR_18e02413},
1731 {CMD_COPYIMAGE, VALIDATION_ERROR_19002413},
1732 {CMD_COPYIMAGETOBUFFER, VALIDATION_ERROR_19202413},
1733 {CMD_COPYQUERYPOOLRESULTS, VALIDATION_ERROR_19402413},
1734 {CMD_DEBUGMARKERBEGINEXT, VALIDATION_ERROR_19602413},
1735 {CMD_DEBUGMARKERENDEXT, VALIDATION_ERROR_19802413},
1736 {CMD_DEBUGMARKERINSERTEXT, VALIDATION_ERROR_19a02413},
1737 {CMD_DISPATCH, VALIDATION_ERROR_19c02413},
1738 // Exclude KHX (if not already present) { CMD_DISPATCHBASEKHX, VALIDATION_ERROR_19e02413 },
1739 {CMD_DISPATCHINDIRECT, VALIDATION_ERROR_1a002413},
1740 {CMD_DRAW, VALIDATION_ERROR_1a202413},
1741 {CMD_DRAWINDEXED, VALIDATION_ERROR_1a402413},
1742 {CMD_DRAWINDEXEDINDIRECT, VALIDATION_ERROR_1a602413},
1743 // Exclude vendor ext (if not already present) { CMD_DRAWINDEXEDINDIRECTCOUNTAMD, VALIDATION_ERROR_1a802413 },
1744 {CMD_DRAWINDIRECT, VALIDATION_ERROR_1aa02413},
1745 // Exclude vendor ext (if not already present) { CMD_DRAWINDIRECTCOUNTAMD, VALIDATION_ERROR_1ac02413 },
1746 {CMD_ENDCOMMANDBUFFER, VALIDATION_ERROR_27400076},
1747 {CMD_ENDQUERY, VALIDATION_ERROR_1ae02413},
1748 {CMD_ENDRENDERPASS, VALIDATION_ERROR_1b002413},
1749 {CMD_EXECUTECOMMANDS, VALIDATION_ERROR_1b202413},
1750 {CMD_FILLBUFFER, VALIDATION_ERROR_1b402413},
1751 {CMD_NEXTSUBPASS, VALIDATION_ERROR_1b602413},
1752 {CMD_PIPELINEBARRIER, VALIDATION_ERROR_1b802413},
1753 // Exclude vendor ext (if not already present) { CMD_PROCESSCOMMANDSNVX, VALIDATION_ERROR_1ba02413 },
1754 {CMD_PUSHCONSTANTS, VALIDATION_ERROR_1bc02413},
1755 {CMD_PUSHDESCRIPTORSETKHR, VALIDATION_ERROR_1be02413},
1756 {CMD_PUSHDESCRIPTORSETWITHTEMPLATEKHR, VALIDATION_ERROR_1c002413},
1757 // Exclude vendor ext (if not already present) { CMD_RESERVESPACEFORCOMMANDSNVX, VALIDATION_ERROR_1c202413 },
1758 {CMD_RESETEVENT, VALIDATION_ERROR_1c402413},
1759 {CMD_RESETQUERYPOOL, VALIDATION_ERROR_1c602413},
1760 {CMD_RESOLVEIMAGE, VALIDATION_ERROR_1c802413},
1761 {CMD_SETBLENDCONSTANTS, VALIDATION_ERROR_1ca02413},
1762 {CMD_SETDEPTHBIAS, VALIDATION_ERROR_1cc02413},
1763 {CMD_SETDEPTHBOUNDS, VALIDATION_ERROR_1ce02413},
1764 // Exclude KHX (if not already present) { CMD_SETDEVICEMASKKHX, VALIDATION_ERROR_1d002413 },
1765 {CMD_SETDISCARDRECTANGLEEXT, VALIDATION_ERROR_1d202413},
1766 {CMD_SETEVENT, VALIDATION_ERROR_1d402413},
1767 {CMD_SETLINEWIDTH, VALIDATION_ERROR_1d602413},
1768 {CMD_SETSAMPLELOCATIONSEXT, VALIDATION_ERROR_3e202413},
1769 {CMD_SETSCISSOR, VALIDATION_ERROR_1d802413},
1770 {CMD_SETSTENCILCOMPAREMASK, VALIDATION_ERROR_1da02413},
1771 {CMD_SETSTENCILREFERENCE, VALIDATION_ERROR_1dc02413},
1772 {CMD_SETSTENCILWRITEMASK, VALIDATION_ERROR_1de02413},
1773 {CMD_SETVIEWPORT, VALIDATION_ERROR_1e002413},
1774 // Exclude vendor ext (if not already present) { CMD_SETVIEWPORTWSCALINGNV, VALIDATION_ERROR_1e202413 },
1775 {CMD_UPDATEBUFFER, VALIDATION_ERROR_1e402413},
1776 {CMD_WAITEVENTS, VALIDATION_ERROR_1e602413},
1777 {CMD_WRITETIMESTAMP, VALIDATION_ERROR_1e802413},
1780 // Validate the given command being added to the specified cmd buffer, flagging errors if CB is not in the recording state or if
1781 // there's an issue with the Cmd ordering
1782 bool ValidateCmd(layer_data *dev_data, const GLOBAL_CB_NODE *cb_state, const CMD_TYPE cmd, const char *caller_name) {
1783 switch (cb_state->state) {
1785 return ValidateCmdSubpassState(dev_data, cb_state, cmd);
1787 case CB_INVALID_COMPLETE:
1788 case CB_INVALID_INCOMPLETE:
1789 return ReportInvalidCommandBuffer(dev_data, cb_state, caller_name);
1792 auto error_it = must_be_recording_map.find(cmd);
1793 // This assert lets us know that a vkCmd.* entrypoint has been added without enabling it in the map
1794 assert(error_it != must_be_recording_map.cend());
1795 if (error_it == must_be_recording_map.cend()) {
1796 error_it = must_be_recording_map.find(CMD_NONE); // But we'll handle the asserting case, in case of a test gap
1798 const auto error = error_it->second;
1799 return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
1800 HandleToUint64(cb_state->commandBuffer), error,
1801 "You must call vkBeginCommandBuffer() before this call to %s.", caller_name);
1805 // For given object struct return a ptr of BASE_NODE type for its wrapping struct
1806 BASE_NODE *GetStateStructPtrFromObject(layer_data *dev_data, VK_OBJECT object_struct) {
1807 BASE_NODE *base_ptr = nullptr;
1808 switch (object_struct.type) {
1809 case kVulkanObjectTypeDescriptorSet: {
1810 base_ptr = GetSetNode(dev_data, reinterpret_cast<VkDescriptorSet &>(object_struct.handle));
1813 case kVulkanObjectTypeSampler: {
1814 base_ptr = GetSamplerState(dev_data, reinterpret_cast<VkSampler &>(object_struct.handle));
1817 case kVulkanObjectTypeQueryPool: {
1818 base_ptr = GetQueryPoolNode(dev_data, reinterpret_cast<VkQueryPool &>(object_struct.handle));
1821 case kVulkanObjectTypePipeline: {
1822 base_ptr = getPipelineState(dev_data, reinterpret_cast<VkPipeline &>(object_struct.handle));
1825 case kVulkanObjectTypeBuffer: {
1826 base_ptr = GetBufferState(dev_data, reinterpret_cast<VkBuffer &>(object_struct.handle));
1829 case kVulkanObjectTypeBufferView: {
1830 base_ptr = GetBufferViewState(dev_data, reinterpret_cast<VkBufferView &>(object_struct.handle));
1833 case kVulkanObjectTypeImage: {
1834 base_ptr = GetImageState(dev_data, reinterpret_cast<VkImage &>(object_struct.handle));
1837 case kVulkanObjectTypeImageView: {
1838 base_ptr = GetImageViewState(dev_data, reinterpret_cast<VkImageView &>(object_struct.handle));
1841 case kVulkanObjectTypeEvent: {
1842 base_ptr = GetEventNode(dev_data, reinterpret_cast<VkEvent &>(object_struct.handle));
1845 case kVulkanObjectTypeDescriptorPool: {
1846 base_ptr = GetDescriptorPoolState(dev_data, reinterpret_cast<VkDescriptorPool &>(object_struct.handle));
1849 case kVulkanObjectTypeCommandPool: {
1850 base_ptr = GetCommandPoolNode(dev_data, reinterpret_cast<VkCommandPool &>(object_struct.handle));
1853 case kVulkanObjectTypeFramebuffer: {
1854 base_ptr = GetFramebufferState(dev_data, reinterpret_cast<VkFramebuffer &>(object_struct.handle));
1857 case kVulkanObjectTypeRenderPass: {
1858 base_ptr = GetRenderPassState(dev_data, reinterpret_cast<VkRenderPass &>(object_struct.handle));
1861 case kVulkanObjectTypeDeviceMemory: {
1862 base_ptr = GetMemObjInfo(dev_data, reinterpret_cast<VkDeviceMemory &>(object_struct.handle));
1866 // TODO : Any other objects to be handled here?
1873 // Tie the VK_OBJECT to the cmd buffer which includes:
1874 // Add object_binding to cmd buffer
1875 // Add cb_binding to object
1876 static void addCommandBufferBinding(std::unordered_set<GLOBAL_CB_NODE *> *cb_bindings, VK_OBJECT obj, GLOBAL_CB_NODE *cb_node) {
1877 cb_bindings->insert(cb_node);
1878 cb_node->object_bindings.insert(obj);
1880 // For a given object, if cb_node is in that objects cb_bindings, remove cb_node
1881 static void removeCommandBufferBinding(layer_data *dev_data, VK_OBJECT const *object, GLOBAL_CB_NODE *cb_node) {
1882 BASE_NODE *base_obj = GetStateStructPtrFromObject(dev_data, *object);
1883 if (base_obj) base_obj->cb_bindings.erase(cb_node);
1885 // Reset the command buffer state
1886 // Maintain the createInfo and set state to CB_NEW, but clear all other state
1887 static void ResetCommandBufferState(layer_data *dev_data, const VkCommandBuffer cb) {
1888 GLOBAL_CB_NODE *pCB = dev_data->commandBufferMap[cb];
1890 pCB->in_use.store(0);
1891 // Reset CB state (note that createInfo is not cleared)
1892 pCB->commandBuffer = cb;
1893 memset(&pCB->beginInfo, 0, sizeof(VkCommandBufferBeginInfo));
1894 memset(&pCB->inheritanceInfo, 0, sizeof(VkCommandBufferInheritanceInfo));
1895 pCB->hasDrawCmd = false;
1896 pCB->state = CB_NEW;
1897 pCB->submitCount = 0;
1898 pCB->image_layout_change_count = 1; // Start at 1. 0 is insert value for validation cache versions, s.t. new == dirty
1900 pCB->static_status = 0;
1901 pCB->viewportMask = 0;
1902 pCB->scissorMask = 0;
1904 for (uint32_t i = 0; i < VK_PIPELINE_BIND_POINT_RANGE_SIZE; ++i) {
1905 pCB->lastBound[i].reset();
1908 memset(&pCB->activeRenderPassBeginInfo, 0, sizeof(pCB->activeRenderPassBeginInfo));
1909 pCB->activeRenderPass = nullptr;
1910 pCB->activeSubpassContents = VK_SUBPASS_CONTENTS_INLINE;
1911 pCB->activeSubpass = 0;
1912 pCB->broken_bindings.clear();
1913 pCB->waitedEvents.clear();
1914 pCB->events.clear();
1915 pCB->writeEventsBeforeWait.clear();
1916 pCB->waitedEventsBeforeQueryReset.clear();
1917 pCB->queryToStateMap.clear();
1918 pCB->activeQueries.clear();
1919 pCB->startedQueries.clear();
1920 pCB->imageLayoutMap.clear();
1921 pCB->eventToStageMap.clear();
1922 pCB->drawData.clear();
1923 pCB->currentDrawData.buffers.clear();
1924 pCB->vertex_buffer_used = false;
1925 pCB->primaryCommandBuffer = VK_NULL_HANDLE;
1926 // If secondary, invalidate any primary command buffer that may call us.
1927 if (pCB->createInfo.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY) {
1928 invalidateCommandBuffers(dev_data, pCB->linkedCommandBuffers, {HandleToUint64(cb), kVulkanObjectTypeCommandBuffer});
1931 // Remove reverse command buffer links.
1932 for (auto pSubCB : pCB->linkedCommandBuffers) {
1933 pSubCB->linkedCommandBuffers.erase(pCB);
1935 pCB->linkedCommandBuffers.clear();
1936 pCB->updateImages.clear();
1937 pCB->updateBuffers.clear();
1938 clear_cmd_buf_and_mem_references(dev_data, pCB);
1939 pCB->queue_submit_functions.clear();
1940 pCB->cmd_execute_commands_functions.clear();
1941 pCB->eventUpdates.clear();
1942 pCB->queryUpdates.clear();
1944 // Remove object bindings
1945 for (auto obj : pCB->object_bindings) {
1946 removeCommandBufferBinding(dev_data, &obj, pCB);
1948 pCB->object_bindings.clear();
1949 // Remove this cmdBuffer's reference from each FrameBuffer's CB ref list
1950 for (auto framebuffer : pCB->framebuffers) {
1951 auto fb_state = GetFramebufferState(dev_data, framebuffer);
1952 if (fb_state) fb_state->cb_bindings.erase(pCB);
1954 pCB->framebuffers.clear();
1955 pCB->activeFramebuffer = VK_NULL_HANDLE;
1959 CBStatusFlags MakeStaticStateMask(VkPipelineDynamicStateCreateInfo const *ds) {
1960 // initially assume everything is static state
1961 CBStatusFlags flags = CBSTATUS_ALL_STATE_SET;
1964 for (uint32_t i = 0; i < ds->dynamicStateCount; i++) {
1965 switch (ds->pDynamicStates[i]) {
1966 case VK_DYNAMIC_STATE_LINE_WIDTH:
1967 flags &= ~CBSTATUS_LINE_WIDTH_SET;
1969 case VK_DYNAMIC_STATE_DEPTH_BIAS:
1970 flags &= ~CBSTATUS_DEPTH_BIAS_SET;
1972 case VK_DYNAMIC_STATE_BLEND_CONSTANTS:
1973 flags &= ~CBSTATUS_BLEND_CONSTANTS_SET;
1975 case VK_DYNAMIC_STATE_DEPTH_BOUNDS:
1976 flags &= ~CBSTATUS_DEPTH_BOUNDS_SET;
1978 case VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK:
1979 flags &= ~CBSTATUS_STENCIL_READ_MASK_SET;
1981 case VK_DYNAMIC_STATE_STENCIL_WRITE_MASK:
1982 flags &= ~CBSTATUS_STENCIL_WRITE_MASK_SET;
1984 case VK_DYNAMIC_STATE_STENCIL_REFERENCE:
1985 flags &= ~CBSTATUS_STENCIL_REFERENCE_SET;
1987 case VK_DYNAMIC_STATE_SCISSOR:
1988 flags &= ~CBSTATUS_SCISSOR_SET;
1990 case VK_DYNAMIC_STATE_VIEWPORT:
1991 flags &= ~CBSTATUS_VIEWPORT_SET;
2002 // Flags validation error if the associated call is made inside a render pass. The apiName routine should ONLY be called outside a
2004 bool insideRenderPass(const layer_data *dev_data, const GLOBAL_CB_NODE *pCB, const char *apiName,
2005 UNIQUE_VALIDATION_ERROR_CODE msgCode) {
2006 bool inside = false;
2007 if (pCB->activeRenderPass) {
2008 inside = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
2009 HandleToUint64(pCB->commandBuffer), msgCode,
2010 "%s: It is invalid to issue this call inside an active render pass (0x%" PRIx64 ").", apiName,
2011 HandleToUint64(pCB->activeRenderPass->renderPass));
2016 // Flags validation error if the associated call is made outside a render pass. The apiName
2017 // routine should ONLY be called inside a render pass.
2018 bool outsideRenderPass(const layer_data *dev_data, GLOBAL_CB_NODE *pCB, const char *apiName, UNIQUE_VALIDATION_ERROR_CODE msgCode) {
2019 bool outside = false;
2020 if (((pCB->createInfo.level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) && (!pCB->activeRenderPass)) ||
2021 ((pCB->createInfo.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY) && (!pCB->activeRenderPass) &&
2022 !(pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT))) {
2023 outside = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
2024 HandleToUint64(pCB->commandBuffer), msgCode, "%s: This call must be issued inside an active render pass.",
2030 static void init_core_validation(instance_layer_data *instance_data, const VkAllocationCallbacks *pAllocator) {
2031 layer_debug_report_actions(instance_data->report_data, instance_data->logging_callback, pAllocator, "lunarg_core_validation");
2032 layer_debug_messenger_actions(instance_data->report_data, instance_data->logging_messenger, pAllocator,
2033 "lunarg_core_validation");
2036 // For the given ValidationCheck enum, set all relevant instance disabled flags to true
2037 void SetDisabledFlags(instance_layer_data *instance_data, const VkValidationFlagsEXT *val_flags_struct) {
2038 for (uint32_t i = 0; i < val_flags_struct->disabledValidationCheckCount; ++i) {
2039 switch (val_flags_struct->pDisabledValidationChecks[i]) {
2040 case VK_VALIDATION_CHECK_SHADERS_EXT:
2041 instance_data->disabled.shader_validation = true;
2043 case VK_VALIDATION_CHECK_ALL_EXT:
2044 // Set all disabled flags to true
2045 instance_data->disabled.SetAll(true);
2053 VKAPI_ATTR VkResult VKAPI_CALL CreateInstance(const VkInstanceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator,
2054 VkInstance *pInstance) {
2055 VkLayerInstanceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO);
2057 assert(chain_info->u.pLayerInfo);
2058 PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr;
2059 PFN_vkCreateInstance fpCreateInstance = (PFN_vkCreateInstance)fpGetInstanceProcAddr(NULL, "vkCreateInstance");
2060 if (fpCreateInstance == NULL) return VK_ERROR_INITIALIZATION_FAILED;
2062 // Advance the link info for the next element on the chain
2063 chain_info->u.pLayerInfo = chain_info->u.pLayerInfo->pNext;
2065 VkResult result = fpCreateInstance(pCreateInfo, pAllocator, pInstance);
2066 if (result != VK_SUCCESS) return result;
2068 instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(*pInstance), instance_layer_data_map);
2069 instance_data->instance = *pInstance;
2070 layer_init_instance_dispatch_table(*pInstance, &instance_data->dispatch_table, fpGetInstanceProcAddr);
2071 instance_data->report_data = debug_utils_create_instance(
2072 &instance_data->dispatch_table, *pInstance, pCreateInfo->enabledExtensionCount, pCreateInfo->ppEnabledExtensionNames);
2074 instance_data->api_version = instance_data->extensions.InitFromInstanceCreateInfo(
2075 (pCreateInfo->pApplicationInfo ? pCreateInfo->pApplicationInfo->apiVersion : VK_API_VERSION_1_0), pCreateInfo);
2076 init_core_validation(instance_data, pAllocator);
2078 ValidateLayerOrdering(*pCreateInfo);
2079 // Parse any pNext chains
2080 const auto *validation_flags_ext = lvl_find_in_chain<VkValidationFlagsEXT>(pCreateInfo->pNext);
2081 if (validation_flags_ext) {
2082 SetDisabledFlags(instance_data, validation_flags_ext);
2088 // Hook DestroyInstance to remove tableInstanceMap entry
2089 VKAPI_ATTR void VKAPI_CALL DestroyInstance(VkInstance instance, const VkAllocationCallbacks *pAllocator) {
2090 // TODOSC : Shouldn't need any customization here
2091 dispatch_key key = get_dispatch_key(instance);
2092 // TBD: Need any locking this early, in case this function is called at the
2093 // same time by more than one thread?
2094 instance_layer_data *instance_data = GetLayerDataPtr(key, instance_layer_data_map);
2095 instance_data->dispatch_table.DestroyInstance(instance, pAllocator);
2097 lock_guard_t lock(global_lock);
2098 // Clean up logging callback, if any
2099 while (instance_data->logging_messenger.size() > 0) {
2100 VkDebugUtilsMessengerEXT messenger = instance_data->logging_messenger.back();
2101 layer_destroy_messenger_callback(instance_data->report_data, messenger, pAllocator);
2102 instance_data->logging_messenger.pop_back();
2104 while (instance_data->logging_callback.size() > 0) {
2105 VkDebugReportCallbackEXT callback = instance_data->logging_callback.back();
2106 layer_destroy_report_callback(instance_data->report_data, callback, pAllocator);
2107 instance_data->logging_callback.pop_back();
2110 layer_debug_utils_destroy_instance(instance_data->report_data);
2111 FreeLayerDataPtr(key, instance_layer_data_map);
2114 static bool ValidatePhysicalDeviceQueueFamily(instance_layer_data *instance_data, const PHYSICAL_DEVICE_STATE *pd_state,
2115 uint32_t requested_queue_family, int32_t err_code, const char *cmd_name,
2116 const char *queue_family_var_name) {
2119 const char *conditional_ext_cmd = instance_data->extensions.vk_khr_get_physical_device_properties_2
2120 ? "or vkGetPhysicalDeviceQueueFamilyProperties2[KHR]"
2123 std::string count_note = (UNCALLED == pd_state->vkGetPhysicalDeviceQueueFamilyPropertiesState)
2124 ? "the pQueueFamilyPropertyCount was never obtained"
2125 : "i.e. is not less than " + std::to_string(pd_state->queue_family_count);
2127 if (requested_queue_family >= pd_state->queue_family_count) {
2128 skip |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
2129 HandleToUint64(pd_state->phys_device), err_code,
2130 "%s: %s (= %" PRIu32
2131 ") is not less than any previously obtained pQueueFamilyPropertyCount from "
2132 "vkGetPhysicalDeviceQueueFamilyProperties%s (%s).",
2133 cmd_name, queue_family_var_name, requested_queue_family, conditional_ext_cmd, count_note.c_str());
2138 // Verify VkDeviceQueueCreateInfos
2139 static bool ValidateDeviceQueueCreateInfos(instance_layer_data *instance_data, const PHYSICAL_DEVICE_STATE *pd_state,
2140 uint32_t info_count, const VkDeviceQueueCreateInfo *infos) {
2143 for (uint32_t i = 0; i < info_count; ++i) {
2144 const auto requested_queue_family = infos[i].queueFamilyIndex;
2146 // Verify that requested queue family is known to be valid at this point in time
2147 std::string queue_family_var_name = "pCreateInfo->pQueueCreateInfos[" + std::to_string(i) + "].queueFamilyIndex";
2148 skip |= ValidatePhysicalDeviceQueueFamily(instance_data, pd_state, requested_queue_family, VALIDATION_ERROR_06c002fa,
2149 "vkCreateDevice", queue_family_var_name.c_str());
2151 // Verify that requested queue count of queue family is known to be valid at this point in time
2152 if (requested_queue_family < pd_state->queue_family_count) {
2153 const auto requested_queue_count = infos[i].queueCount;
2154 const auto queue_family_props_count = pd_state->queue_family_properties.size();
2155 const bool queue_family_has_props = requested_queue_family < queue_family_props_count;
2156 const char *conditional_ext_cmd = instance_data->extensions.vk_khr_get_physical_device_properties_2
2157 ? "or vkGetPhysicalDeviceQueueFamilyProperties2[KHR]"
2159 std::string count_note =
2160 !queue_family_has_props
2161 ? "the pQueueFamilyProperties[" + std::to_string(requested_queue_family) + "] was never obtained"
2162 : "i.e. is not less than or equal to " +
2163 std::to_string(pd_state->queue_family_properties[requested_queue_family].queueCount);
2165 if (!queue_family_has_props ||
2166 requested_queue_count > pd_state->queue_family_properties[requested_queue_family].queueCount) {
2168 instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
2169 HandleToUint64(pd_state->phys_device), VALIDATION_ERROR_06c002fc,
2170 "vkCreateDevice: pCreateInfo->pQueueCreateInfos[%" PRIu32 "].queueCount (=%" PRIu32
2171 ") is not less than or equal to available queue count for this pCreateInfo->pQueueCreateInfos[%" PRIu32
2172 "].queueFamilyIndex} (=%" PRIu32 ") obtained previously from vkGetPhysicalDeviceQueueFamilyProperties%s (%s).",
2173 i, requested_queue_count, i, requested_queue_family, conditional_ext_cmd, count_note.c_str());
2181 // Verify that features have been queried and that they are available
2182 static bool ValidateRequestedFeatures(instance_layer_data *instance_data, const PHYSICAL_DEVICE_STATE *pd_state,
2183 const VkPhysicalDeviceFeatures *requested_features) {
2186 const VkBool32 *actual = reinterpret_cast<const VkBool32 *>(&pd_state->features);
2187 const VkBool32 *requested = reinterpret_cast<const VkBool32 *>(requested_features);
2188 // TODO : This is a nice, compact way to loop through struct, but a bad way to report issues
2189 // Need to provide the struct member name with the issue. To do that seems like we'll
2190 // have to loop through each struct member which should be done w/ codegen to keep in synch.
2191 uint32_t errors = 0;
2192 uint32_t total_bools = sizeof(VkPhysicalDeviceFeatures) / sizeof(VkBool32);
2193 for (uint32_t i = 0; i < total_bools; i++) {
2194 if (requested[i] > actual[i]) {
2195 skip |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2196 VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, DEVLIMITS_INVALID_FEATURE_REQUESTED,
2197 "While calling vkCreateDevice(), requesting feature '%s' in VkPhysicalDeviceFeatures struct, which is "
2198 "not available on this device.",
2199 GetPhysDevFeatureString(i));
2203 if (errors && (UNCALLED == pd_state->vkGetPhysicalDeviceFeaturesState)) {
2204 // If user didn't request features, notify them that they should
2205 // TODO: Verify this against the spec. I believe this is an invalid use of the API and should return an error
2206 skip |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
2207 0, DEVLIMITS_INVALID_FEATURE_REQUESTED,
2208 "You requested features that are unavailable on this device. You should first query feature availability "
2209 "by calling vkGetPhysicalDeviceFeatures().");
2214 VKAPI_ATTR VkResult VKAPI_CALL CreateDevice(VkPhysicalDevice gpu, const VkDeviceCreateInfo *pCreateInfo,
2215 const VkAllocationCallbacks *pAllocator, VkDevice *pDevice) {
2217 instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(gpu), instance_layer_data_map);
2219 unique_lock_t lock(global_lock);
2220 auto pd_state = GetPhysicalDeviceState(instance_data, gpu);
2222 // TODO: object_tracker should perhaps do this instead
2223 // and it does not seem to currently work anyway -- the loader just crashes before this point
2224 if (!GetPhysicalDeviceState(instance_data, gpu)) {
2226 log_msg(instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0,
2227 DEVLIMITS_MUST_QUERY_COUNT, "Invalid call to vkCreateDevice() w/o first calling vkEnumeratePhysicalDevices().");
2230 // Check that any requested features are available
2231 // The enabled features can come from either pEnabledFeatures, or from the pNext chain
2232 const VkPhysicalDeviceFeatures *enabled_features_found = pCreateInfo->pEnabledFeatures;
2233 if (nullptr == enabled_features_found) {
2234 const auto *features2 = lvl_find_in_chain<VkPhysicalDeviceFeatures2KHR>(pCreateInfo->pNext);
2236 enabled_features_found = &(features2->features);
2240 if (enabled_features_found) {
2241 skip |= ValidateRequestedFeatures(instance_data, pd_state, enabled_features_found);
2245 ValidateDeviceQueueCreateInfos(instance_data, pd_state, pCreateInfo->queueCreateInfoCount, pCreateInfo->pQueueCreateInfos);
2247 if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
2249 VkLayerDeviceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO);
2251 assert(chain_info->u.pLayerInfo);
2252 PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr;
2253 PFN_vkGetDeviceProcAddr fpGetDeviceProcAddr = chain_info->u.pLayerInfo->pfnNextGetDeviceProcAddr;
2254 PFN_vkCreateDevice fpCreateDevice = (PFN_vkCreateDevice)fpGetInstanceProcAddr(instance_data->instance, "vkCreateDevice");
2255 if (fpCreateDevice == NULL) {
2256 return VK_ERROR_INITIALIZATION_FAILED;
2259 // Advance the link info for the next element on the chain
2260 chain_info->u.pLayerInfo = chain_info->u.pLayerInfo->pNext;
2264 VkResult result = fpCreateDevice(gpu, pCreateInfo, pAllocator, pDevice);
2265 if (result != VK_SUCCESS) {
2270 layer_data *device_data = GetLayerDataPtr(get_dispatch_key(*pDevice), layer_data_map);
2272 device_data->instance_data = instance_data;
2273 // Setup device dispatch table
2274 layer_init_device_dispatch_table(*pDevice, &device_data->dispatch_table, fpGetDeviceProcAddr);
2275 device_data->device = *pDevice;
2276 // Save PhysicalDevice handle
2277 device_data->physical_device = gpu;
2279 device_data->report_data = layer_debug_utils_create_device(instance_data->report_data, *pDevice);
2281 // Get physical device limits for this device
2282 instance_data->dispatch_table.GetPhysicalDeviceProperties(gpu, &(device_data->phys_dev_properties.properties));
2284 device_data->api_version = device_data->extensions.InitFromDeviceCreateInfo(
2285 &instance_data->extensions, device_data->phys_dev_properties.properties.apiVersion, pCreateInfo);
2288 instance_data->dispatch_table.GetPhysicalDeviceQueueFamilyProperties(gpu, &count, nullptr);
2289 device_data->phys_dev_properties.queue_family_properties.resize(count);
2290 instance_data->dispatch_table.GetPhysicalDeviceQueueFamilyProperties(
2291 gpu, &count, &device_data->phys_dev_properties.queue_family_properties[0]);
2292 // TODO: device limits should make sure these are compatible
2293 if (enabled_features_found) {
2294 device_data->enabled_features = *enabled_features_found;
2296 memset(&device_data->enabled_features, 0, sizeof(VkPhysicalDeviceFeatures));
2298 // Store physical device properties and physical device mem limits into device layer_data structs
2299 instance_data->dispatch_table.GetPhysicalDeviceMemoryProperties(gpu, &device_data->phys_dev_mem_props);
2300 instance_data->dispatch_table.GetPhysicalDeviceProperties(gpu, &device_data->phys_dev_props);
2302 if (device_data->extensions.vk_khr_push_descriptor) {
2303 // Get the needed push_descriptor limits
2304 auto push_descriptor_prop = lvl_init_struct<VkPhysicalDevicePushDescriptorPropertiesKHR>();
2305 auto prop2 = lvl_init_struct<VkPhysicalDeviceProperties2KHR>(&push_descriptor_prop);
2306 instance_data->dispatch_table.GetPhysicalDeviceProperties2KHR(gpu, &prop2);
2307 device_data->phys_dev_ext_props.max_push_descriptors = push_descriptor_prop.maxPushDescriptors;
2309 if (device_data->extensions.vk_ext_descriptor_indexing) {
2310 // Get the needed descriptor_indexing limits
2311 auto descriptor_indexing_props = lvl_init_struct<VkPhysicalDeviceDescriptorIndexingPropertiesEXT>();
2312 auto prop2 = lvl_init_struct<VkPhysicalDeviceProperties2KHR>(&descriptor_indexing_props);
2313 instance_data->dispatch_table.GetPhysicalDeviceProperties2KHR(gpu, &prop2);
2314 device_data->phys_dev_ext_props.descriptor_indexing_props = descriptor_indexing_props;
2317 const auto *descriptor_indexing_features = lvl_find_in_chain<VkPhysicalDeviceDescriptorIndexingFeaturesEXT>(pCreateInfo->pNext);
2318 if (descriptor_indexing_features) {
2319 device_data->phys_dev_ext_props.descriptor_indexing_features = *descriptor_indexing_features;
2324 ValidateLayerOrdering(*pCreateInfo);
2330 VKAPI_ATTR void VKAPI_CALL DestroyDevice(VkDevice device, const VkAllocationCallbacks *pAllocator) {
2331 // TODOSC : Shouldn't need any customization here
2332 dispatch_key key = get_dispatch_key(device);
2333 layer_data *dev_data = GetLayerDataPtr(key, layer_data_map);
2334 // Free all the memory
2335 unique_lock_t lock(global_lock);
2336 dev_data->pipelineMap.clear();
2337 dev_data->renderPassMap.clear();
2338 for (auto ii = dev_data->commandBufferMap.begin(); ii != dev_data->commandBufferMap.end(); ++ii) {
2339 delete (*ii).second;
2341 dev_data->commandBufferMap.clear();
2342 // This will also delete all sets in the pool & remove them from setMap
2343 deletePools(dev_data);
2344 // All sets should be removed
2345 assert(dev_data->setMap.empty());
2346 dev_data->descriptorSetLayoutMap.clear();
2347 dev_data->imageViewMap.clear();
2348 dev_data->imageMap.clear();
2349 dev_data->imageSubresourceMap.clear();
2350 dev_data->imageLayoutMap.clear();
2351 dev_data->bufferViewMap.clear();
2352 dev_data->bufferMap.clear();
2353 // Queues persist until device is destroyed
2354 dev_data->queueMap.clear();
2355 // Report any memory leaks
2356 layer_debug_utils_destroy_device(device);
2359 #if DISPATCH_MAP_DEBUG
2360 fprintf(stderr, "Device: 0x%p, key: 0x%p\n", device, key);
2363 dev_data->dispatch_table.DestroyDevice(device, pAllocator);
2364 FreeLayerDataPtr(key, layer_data_map);
2367 static const VkExtensionProperties instance_extensions[] = {{VK_EXT_DEBUG_REPORT_EXTENSION_NAME, VK_EXT_DEBUG_REPORT_SPEC_VERSION}};
2369 // For given stage mask, if Geometry shader stage is on w/o GS being enabled, report geo_error_id
2370 // and if Tessellation Control or Evaluation shader stages are on w/o TS being enabled, report tess_error_id
2371 static bool ValidateStageMaskGsTsEnables(layer_data *dev_data, VkPipelineStageFlags stageMask, const char *caller,
2372 UNIQUE_VALIDATION_ERROR_CODE geo_error_id, UNIQUE_VALIDATION_ERROR_CODE tess_error_id) {
2374 if (!dev_data->enabled_features.geometryShader && (stageMask & VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT)) {
2376 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, geo_error_id,
2377 "%s call includes a stageMask with VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT bit set when device does not have "
2378 "geometryShader feature enabled.",
2381 if (!dev_data->enabled_features.tessellationShader &&
2382 (stageMask & (VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT | VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT))) {
2384 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, tess_error_id,
2385 "%s call includes a stageMask with VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT and/or "
2386 "VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT bit(s) set when device does not have "
2387 "tessellationShader feature enabled.",
2393 // Loop through bound objects and increment their in_use counts.
2394 static void IncrementBoundObjects(layer_data *dev_data, GLOBAL_CB_NODE const *cb_node) {
2395 for (auto obj : cb_node->object_bindings) {
2396 auto base_obj = GetStateStructPtrFromObject(dev_data, obj);
2398 base_obj->in_use.fetch_add(1);
2402 // Track which resources are in-flight by atomically incrementing their "in_use" count
2403 static void incrementResources(layer_data *dev_data, GLOBAL_CB_NODE *cb_node) {
2404 cb_node->submitCount++;
2405 cb_node->in_use.fetch_add(1);
2407 // First Increment for all "generic" objects bound to cmd buffer, followed by special-case objects below
2408 IncrementBoundObjects(dev_data, cb_node);
2409 // TODO : We should be able to remove the NULL look-up checks from the code below as long as
2410 // all the corresponding cases are verified to cause CB_INVALID state and the CB_INVALID state
2411 // should then be flagged prior to calling this function
2412 for (auto drawDataElement : cb_node->drawData) {
2413 for (auto buffer : drawDataElement.buffers) {
2414 auto buffer_state = GetBufferState(dev_data, buffer);
2416 buffer_state->in_use.fetch_add(1);
2420 for (auto event : cb_node->writeEventsBeforeWait) {
2421 auto event_state = GetEventNode(dev_data, event);
2422 if (event_state) event_state->write_in_use++;
2426 // Note: This function assumes that the global lock is held by the calling thread.
2427 // For the given queue, verify the queue state up to the given seq number.
2428 // Currently the only check is to make sure that if there are events to be waited on prior to
2429 // a QueryReset, make sure that all such events have been signalled.
2430 static bool VerifyQueueStateToSeq(layer_data *dev_data, QUEUE_STATE *initial_queue, uint64_t initial_seq) {
2433 // sequence number we want to validate up to, per queue
2434 std::unordered_map<QUEUE_STATE *, uint64_t> target_seqs{{initial_queue, initial_seq}};
2435 // sequence number we've completed validation for, per queue
2436 std::unordered_map<QUEUE_STATE *, uint64_t> done_seqs;
2437 std::vector<QUEUE_STATE *> worklist{initial_queue};
2439 while (worklist.size()) {
2440 auto queue = worklist.back();
2441 worklist.pop_back();
2443 auto target_seq = target_seqs[queue];
2444 auto seq = std::max(done_seqs[queue], queue->seq);
2445 auto sub_it = queue->submissions.begin() + int(seq - queue->seq); // seq >= queue->seq
2447 for (; seq < target_seq; ++sub_it, ++seq) {
2448 for (auto &wait : sub_it->waitSemaphores) {
2449 auto other_queue = GetQueueState(dev_data, wait.queue);
2451 if (other_queue == queue) continue; // semaphores /always/ point backwards, so no point here.
2453 auto other_target_seq = std::max(target_seqs[other_queue], wait.seq);
2454 auto other_done_seq = std::max(done_seqs[other_queue], other_queue->seq);
2456 // if this wait is for another queue, and covers new sequence
2457 // numbers beyond what we've already validated, mark the new
2458 // target seq and (possibly-re)add the queue to the worklist.
2459 if (other_done_seq < other_target_seq) {
2460 target_seqs[other_queue] = other_target_seq;
2461 worklist.push_back(other_queue);
2465 for (auto cb : sub_it->cbs) {
2466 auto cb_node = GetCBNode(dev_data, cb);
2468 for (auto queryEventsPair : cb_node->waitedEventsBeforeQueryReset) {
2469 for (auto event : queryEventsPair.second) {
2470 if (dev_data->eventMap[event].needsSignaled) {
2471 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2472 VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, DRAWSTATE_INVALID_QUERY,
2473 "Cannot get query results on queryPool 0x%" PRIx64
2474 " with index %d which was guarded by unsignaled event 0x%" PRIx64 ".",
2475 HandleToUint64(queryEventsPair.first.pool), queryEventsPair.first.index,
2476 HandleToUint64(event));
2484 // finally mark the point we've now validated this queue to.
2485 done_seqs[queue] = seq;
2491 // When the given fence is retired, verify outstanding queue operations through the point of the fence
2492 static bool VerifyQueueStateToFence(layer_data *dev_data, VkFence fence) {
2493 auto fence_state = GetFenceNode(dev_data, fence);
2494 if (fence_state->scope == kSyncScopeInternal && VK_NULL_HANDLE != fence_state->signaler.first) {
2495 return VerifyQueueStateToSeq(dev_data, GetQueueState(dev_data, fence_state->signaler.first), fence_state->signaler.second);
2500 // Decrement in-use count for objects bound to command buffer
2501 static void DecrementBoundResources(layer_data *dev_data, GLOBAL_CB_NODE const *cb_node) {
2502 BASE_NODE *base_obj = nullptr;
2503 for (auto obj : cb_node->object_bindings) {
2504 base_obj = GetStateStructPtrFromObject(dev_data, obj);
2506 base_obj->in_use.fetch_sub(1);
2511 static void RetireWorkOnQueue(layer_data *dev_data, QUEUE_STATE *pQueue, uint64_t seq) {
2512 std::unordered_map<VkQueue, uint64_t> otherQueueSeqs;
2514 // Roll this queue forward, one submission at a time.
2515 while (pQueue->seq < seq) {
2516 auto &submission = pQueue->submissions.front();
2518 for (auto &wait : submission.waitSemaphores) {
2519 auto pSemaphore = GetSemaphoreNode(dev_data, wait.semaphore);
2521 pSemaphore->in_use.fetch_sub(1);
2523 auto &lastSeq = otherQueueSeqs[wait.queue];
2524 lastSeq = std::max(lastSeq, wait.seq);
2527 for (auto &semaphore : submission.signalSemaphores) {
2528 auto pSemaphore = GetSemaphoreNode(dev_data, semaphore);
2530 pSemaphore->in_use.fetch_sub(1);
2534 for (auto &semaphore : submission.externalSemaphores) {
2535 auto pSemaphore = GetSemaphoreNode(dev_data, semaphore);
2537 pSemaphore->in_use.fetch_sub(1);
2541 for (auto cb : submission.cbs) {
2542 auto cb_node = GetCBNode(dev_data, cb);
2546 // First perform decrement on general case bound objects
2547 DecrementBoundResources(dev_data, cb_node);
2548 for (auto drawDataElement : cb_node->drawData) {
2549 for (auto buffer : drawDataElement.buffers) {
2550 auto buffer_state = GetBufferState(dev_data, buffer);
2552 buffer_state->in_use.fetch_sub(1);
2556 for (auto event : cb_node->writeEventsBeforeWait) {
2557 auto eventNode = dev_data->eventMap.find(event);
2558 if (eventNode != dev_data->eventMap.end()) {
2559 eventNode->second.write_in_use--;
2562 for (auto queryStatePair : cb_node->queryToStateMap) {
2563 dev_data->queryToStateMap[queryStatePair.first] = queryStatePair.second;
2565 for (auto eventStagePair : cb_node->eventToStageMap) {
2566 dev_data->eventMap[eventStagePair.first].stageMask = eventStagePair.second;
2569 cb_node->in_use.fetch_sub(1);
2572 auto pFence = GetFenceNode(dev_data, submission.fence);
2573 if (pFence && pFence->scope == kSyncScopeInternal) {
2574 pFence->state = FENCE_RETIRED;
2577 pQueue->submissions.pop_front();
2581 // Roll other queues forward to the highest seq we saw a wait for
2582 for (auto qs : otherQueueSeqs) {
2583 RetireWorkOnQueue(dev_data, GetQueueState(dev_data, qs.first), qs.second);
2587 // Submit a fence to a queue, delimiting previous fences and previous untracked
2589 static void SubmitFence(QUEUE_STATE *pQueue, FENCE_NODE *pFence, uint64_t submitCount) {
2590 pFence->state = FENCE_INFLIGHT;
2591 pFence->signaler.first = pQueue->queue;
2592 pFence->signaler.second = pQueue->seq + pQueue->submissions.size() + submitCount;
2595 static bool validateCommandBufferSimultaneousUse(layer_data *dev_data, GLOBAL_CB_NODE *pCB, int current_submit_count) {
2597 if ((pCB->in_use.load() || current_submit_count > 1) &&
2598 !(pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
2599 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
2600 VALIDATION_ERROR_31a0008e,
2601 "Command Buffer 0x%" PRIx64 " is already in use and is not marked for simultaneous use.",
2602 HandleToUint64(pCB->commandBuffer));
2607 static bool validateCommandBufferState(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, const char *call_source,
2608 int current_submit_count, UNIQUE_VALIDATION_ERROR_CODE vu_id) {
2610 if (dev_data->instance_data->disabled.command_buffer_state) return skip;
2611 // Validate ONE_TIME_SUBMIT_BIT CB is not being submitted more than once
2612 if ((cb_state->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT) &&
2613 (cb_state->submitCount + current_submit_count > 1)) {
2614 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
2615 DRAWSTATE_COMMAND_BUFFER_SINGLE_SUBMIT_VIOLATION,
2616 "Commandbuffer 0x%" PRIx64
2617 " was begun w/ VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT set, but has been submitted 0x%" PRIxLEAST64
2619 HandleToUint64(cb_state->commandBuffer), cb_state->submitCount + current_submit_count);
2622 // Validate that cmd buffers have been updated
2623 switch (cb_state->state) {
2624 case CB_INVALID_INCOMPLETE:
2625 case CB_INVALID_COMPLETE:
2626 skip |= ReportInvalidCommandBuffer(dev_data, cb_state, call_source);
2630 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
2631 (uint64_t)(cb_state->commandBuffer), vu_id,
2632 "Command buffer 0x%" PRIx64 " used in the call to %s is unrecorded and contains no commands.",
2633 HandleToUint64(cb_state->commandBuffer), call_source);
2637 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
2638 HandleToUint64(cb_state->commandBuffer), DRAWSTATE_NO_END_COMMAND_BUFFER,
2639 "You must call vkEndCommandBuffer() on command buffer 0x%" PRIx64 " before this call to %s!",
2640 HandleToUint64(cb_state->commandBuffer), call_source);
2643 default: /* recorded */
2649 static bool validateResources(layer_data *dev_data, GLOBAL_CB_NODE *cb_node) {
2652 // TODO : We should be able to remove the NULL look-up checks from the code below as long as
2653 // all the corresponding cases are verified to cause CB_INVALID state and the CB_INVALID state
2654 // should then be flagged prior to calling this function
2655 for (auto drawDataElement : cb_node->drawData) {
2656 for (auto buffer : drawDataElement.buffers) {
2657 auto buffer_state = GetBufferState(dev_data, buffer);
2658 if (buffer != VK_NULL_HANDLE && !buffer_state) {
2659 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
2660 HandleToUint64(buffer), DRAWSTATE_INVALID_BUFFER,
2661 "Cannot submit cmd buffer using deleted buffer 0x%" PRIx64 ".", HandleToUint64(buffer));
2668 // Check that the queue family index of 'queue' matches one of the entries in pQueueFamilyIndices
2669 bool ValidImageBufferQueue(layer_data *dev_data, GLOBAL_CB_NODE *cb_node, const VK_OBJECT *object, VkQueue queue, uint32_t count,
2670 const uint32_t *indices) {
2673 auto queue_state = GetQueueState(dev_data, queue);
2675 for (uint32_t i = 0; i < count; i++) {
2676 if (indices[i] == queue_state->queueFamilyIndex) {
2683 skip = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, get_debug_report_enum[object->type],
2684 object->handle, DRAWSTATE_INVALID_QUEUE_FAMILY,
2685 "vkQueueSubmit: Command buffer 0x%" PRIx64 " contains %s 0x%" PRIx64
2686 " which was not created allowing concurrent access to this queue family %d.",
2687 HandleToUint64(cb_node->commandBuffer), object_string[object->type], object->handle,
2688 queue_state->queueFamilyIndex);
2694 // Validate that queueFamilyIndices of primary command buffers match this queue
2695 // Secondary command buffers were previously validated in vkCmdExecuteCommands().
2696 static bool validateQueueFamilyIndices(layer_data *dev_data, GLOBAL_CB_NODE *pCB, VkQueue queue) {
2698 auto pPool = GetCommandPoolNode(dev_data, pCB->createInfo.commandPool);
2699 auto queue_state = GetQueueState(dev_data, queue);
2701 if (pPool && queue_state) {
2702 if (pPool->queueFamilyIndex != queue_state->queueFamilyIndex) {
2703 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
2704 HandleToUint64(pCB->commandBuffer), VALIDATION_ERROR_31a00094,
2705 "vkQueueSubmit: Primary command buffer 0x%" PRIx64
2706 " created in queue family %d is being submitted on queue 0x%" PRIx64 " from queue family %d.",
2707 HandleToUint64(pCB->commandBuffer), pPool->queueFamilyIndex, HandleToUint64(queue),
2708 queue_state->queueFamilyIndex);
2711 // Ensure that any bound images or buffers created with SHARING_MODE_CONCURRENT have access to the current queue family
2712 for (auto object : pCB->object_bindings) {
2713 if (object.type == kVulkanObjectTypeImage) {
2714 auto image_state = GetImageState(dev_data, reinterpret_cast<VkImage &>(object.handle));
2715 if (image_state && image_state->createInfo.sharingMode == VK_SHARING_MODE_CONCURRENT) {
2716 skip |= ValidImageBufferQueue(dev_data, pCB, &object, queue, image_state->createInfo.queueFamilyIndexCount,
2717 image_state->createInfo.pQueueFamilyIndices);
2719 } else if (object.type == kVulkanObjectTypeBuffer) {
2720 auto buffer_state = GetBufferState(dev_data, reinterpret_cast<VkBuffer &>(object.handle));
2721 if (buffer_state && buffer_state->createInfo.sharingMode == VK_SHARING_MODE_CONCURRENT) {
2722 skip |= ValidImageBufferQueue(dev_data, pCB, &object, queue, buffer_state->createInfo.queueFamilyIndexCount,
2723 buffer_state->createInfo.pQueueFamilyIndices);
2732 static bool validatePrimaryCommandBufferState(layer_data *dev_data, GLOBAL_CB_NODE *pCB, int current_submit_count) {
2733 // Track in-use for resources off of primary and any secondary CBs
2736 // If USAGE_SIMULTANEOUS_USE_BIT not set then CB cannot already be executing
2738 skip |= validateCommandBufferSimultaneousUse(dev_data, pCB, current_submit_count);
2740 skip |= validateResources(dev_data, pCB);
2742 for (auto pSubCB : pCB->linkedCommandBuffers) {
2743 skip |= validateResources(dev_data, pSubCB);
2744 // TODO: replace with invalidateCommandBuffers() at recording.
2745 if ((pSubCB->primaryCommandBuffer != pCB->commandBuffer) &&
2746 !(pSubCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
2747 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
2748 VALIDATION_ERROR_31a00092,
2749 "Commandbuffer 0x%" PRIx64 " was submitted with secondary buffer 0x%" PRIx64
2750 " but that buffer has subsequently been bound to primary cmd buffer 0x%" PRIx64
2751 " and it does not have VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set.",
2752 HandleToUint64(pCB->commandBuffer), HandleToUint64(pSubCB->commandBuffer),
2753 HandleToUint64(pSubCB->primaryCommandBuffer));
2757 skip |= validateCommandBufferState(dev_data, pCB, "vkQueueSubmit()", current_submit_count, VALIDATION_ERROR_31a00090);
2762 static bool ValidateFenceForSubmit(layer_data *dev_data, FENCE_NODE *pFence) {
2765 if (pFence && pFence->scope == kSyncScopeInternal) {
2766 if (pFence->state == FENCE_INFLIGHT) {
2767 // TODO: opportunities for VALIDATION_ERROR_31a00080, VALIDATION_ERROR_316008b4, VALIDATION_ERROR_16400a0e
2768 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
2769 HandleToUint64(pFence->fence), DRAWSTATE_INVALID_FENCE,
2770 "Fence 0x%" PRIx64 " is already in use by another submission.", HandleToUint64(pFence->fence));
2773 else if (pFence->state == FENCE_RETIRED) {
2774 // TODO: opportunities for VALIDATION_ERROR_31a0007e, VALIDATION_ERROR_316008b2, VALIDATION_ERROR_16400a0e
2775 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
2776 HandleToUint64(pFence->fence), MEMTRACK_INVALID_FENCE_STATE,
2777 "Fence 0x%" PRIx64 " submitted in SIGNALED state. Fences must be reset before being submitted",
2778 HandleToUint64(pFence->fence));
2785 static void PostCallRecordQueueSubmit(layer_data *dev_data, VkQueue queue, uint32_t submitCount, const VkSubmitInfo *pSubmits,
2787 uint64_t early_retire_seq = 0;
2788 auto pQueue = GetQueueState(dev_data, queue);
2789 auto pFence = GetFenceNode(dev_data, fence);
2792 if (pFence->scope == kSyncScopeInternal) {
2793 // Mark fence in use
2794 SubmitFence(pQueue, pFence, std::max(1u, submitCount));
2796 // If no submissions, but just dropping a fence on the end of the queue,
2797 // record an empty submission with just the fence, so we can determine
2799 pQueue->submissions.emplace_back(std::vector<VkCommandBuffer>(), std::vector<SEMAPHORE_WAIT>(),
2800 std::vector<VkSemaphore>(), std::vector<VkSemaphore>(), fence);
2803 // Retire work up until this fence early, we will not see the wait that corresponds to this signal
2804 early_retire_seq = pQueue->seq + pQueue->submissions.size();
2805 if (!dev_data->external_sync_warning) {
2806 dev_data->external_sync_warning = true;
2807 log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
2808 HandleToUint64(fence), DRAWSTATE_QUEUE_FORWARD_PROGRESS,
2809 "vkQueueSubmit(): Signaling external fence 0x%" PRIx64 " on queue 0x%" PRIx64
2810 " will disable validation of preceding command buffer lifecycle states and the in-use status of associated "
2812 HandleToUint64(fence), HandleToUint64(queue));
2817 // Now process each individual submit
2818 for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) {
2819 std::vector<VkCommandBuffer> cbs;
2820 const VkSubmitInfo *submit = &pSubmits[submit_idx];
2821 vector<SEMAPHORE_WAIT> semaphore_waits;
2822 vector<VkSemaphore> semaphore_signals;
2823 vector<VkSemaphore> semaphore_externals;
2824 for (uint32_t i = 0; i < submit->waitSemaphoreCount; ++i) {
2825 VkSemaphore semaphore = submit->pWaitSemaphores[i];
2826 auto pSemaphore = GetSemaphoreNode(dev_data, semaphore);
2828 if (pSemaphore->scope == kSyncScopeInternal) {
2829 if (pSemaphore->signaler.first != VK_NULL_HANDLE) {
2830 semaphore_waits.push_back({semaphore, pSemaphore->signaler.first, pSemaphore->signaler.second});
2831 pSemaphore->in_use.fetch_add(1);
2833 pSemaphore->signaler.first = VK_NULL_HANDLE;
2834 pSemaphore->signaled = false;
2836 semaphore_externals.push_back(semaphore);
2837 pSemaphore->in_use.fetch_add(1);
2838 if (pSemaphore->scope == kSyncScopeExternalTemporary) {
2839 pSemaphore->scope = kSyncScopeInternal;
2844 for (uint32_t i = 0; i < submit->signalSemaphoreCount; ++i) {
2845 VkSemaphore semaphore = submit->pSignalSemaphores[i];
2846 auto pSemaphore = GetSemaphoreNode(dev_data, semaphore);
2848 if (pSemaphore->scope == kSyncScopeInternal) {
2849 pSemaphore->signaler.first = queue;
2850 pSemaphore->signaler.second = pQueue->seq + pQueue->submissions.size() + 1;
2851 pSemaphore->signaled = true;
2852 pSemaphore->in_use.fetch_add(1);
2853 semaphore_signals.push_back(semaphore);
2855 // Retire work up until this submit early, we will not see the wait that corresponds to this signal
2856 early_retire_seq = std::max(early_retire_seq, pQueue->seq + pQueue->submissions.size() + 1);
2857 if (!dev_data->external_sync_warning) {
2858 dev_data->external_sync_warning = true;
2859 log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
2860 HandleToUint64(semaphore), DRAWSTATE_QUEUE_FORWARD_PROGRESS,
2861 "vkQueueSubmit(): Signaling external semaphore 0x%" PRIx64 " on queue 0x%" PRIx64
2862 " will disable validation of preceding command buffer lifecycle states and the in-use status of "
2863 "associated objects.",
2864 HandleToUint64(semaphore), HandleToUint64(queue));
2869 for (uint32_t i = 0; i < submit->commandBufferCount; i++) {
2870 auto cb_node = GetCBNode(dev_data, submit->pCommandBuffers[i]);
2872 cbs.push_back(submit->pCommandBuffers[i]);
2873 for (auto secondaryCmdBuffer : cb_node->linkedCommandBuffers) {
2874 cbs.push_back(secondaryCmdBuffer->commandBuffer);
2875 UpdateCmdBufImageLayouts(dev_data, secondaryCmdBuffer);
2876 incrementResources(dev_data, secondaryCmdBuffer);
2878 UpdateCmdBufImageLayouts(dev_data, cb_node);
2879 incrementResources(dev_data, cb_node);
2882 pQueue->submissions.emplace_back(cbs, semaphore_waits, semaphore_signals, semaphore_externals,
2883 submit_idx == submitCount - 1 ? fence : VK_NULL_HANDLE);
2886 if (early_retire_seq) {
2887 RetireWorkOnQueue(dev_data, pQueue, early_retire_seq);
2891 static bool PreCallValidateQueueSubmit(layer_data *dev_data, VkQueue queue, uint32_t submitCount, const VkSubmitInfo *pSubmits,
2893 auto pFence = GetFenceNode(dev_data, fence);
2894 bool skip = ValidateFenceForSubmit(dev_data, pFence);
2899 unordered_set<VkSemaphore> signaled_semaphores;
2900 unordered_set<VkSemaphore> unsignaled_semaphores;
2901 unordered_set<VkSemaphore> internal_semaphores;
2902 vector<VkCommandBuffer> current_cmds;
2903 unordered_map<ImageSubresourcePair, IMAGE_LAYOUT_NODE> localImageLayoutMap;
2904 // Now verify each individual submit
2905 for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) {
2906 const VkSubmitInfo *submit = &pSubmits[submit_idx];
2907 for (uint32_t i = 0; i < submit->waitSemaphoreCount; ++i) {
2908 skip |= ValidateStageMaskGsTsEnables(dev_data, submit->pWaitDstStageMask[i], "vkQueueSubmit()",
2909 VALIDATION_ERROR_13c00098, VALIDATION_ERROR_13c0009a);
2910 VkSemaphore semaphore = submit->pWaitSemaphores[i];
2911 auto pSemaphore = GetSemaphoreNode(dev_data, semaphore);
2912 if (pSemaphore && (pSemaphore->scope == kSyncScopeInternal || internal_semaphores.count(semaphore))) {
2913 if (unsignaled_semaphores.count(semaphore) ||
2914 (!(signaled_semaphores.count(semaphore)) && !(pSemaphore->signaled))) {
2915 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
2916 HandleToUint64(semaphore), DRAWSTATE_QUEUE_FORWARD_PROGRESS,
2917 "Queue 0x%" PRIx64 " is waiting on semaphore 0x%" PRIx64 " that has no way to be signaled.",
2918 HandleToUint64(queue), HandleToUint64(semaphore));
2920 signaled_semaphores.erase(semaphore);
2921 unsignaled_semaphores.insert(semaphore);
2924 if (pSemaphore && pSemaphore->scope == kSyncScopeExternalTemporary) {
2925 internal_semaphores.insert(semaphore);
2928 for (uint32_t i = 0; i < submit->signalSemaphoreCount; ++i) {
2929 VkSemaphore semaphore = submit->pSignalSemaphores[i];
2930 auto pSemaphore = GetSemaphoreNode(dev_data, semaphore);
2931 if (pSemaphore && (pSemaphore->scope == kSyncScopeInternal || internal_semaphores.count(semaphore))) {
2932 if (signaled_semaphores.count(semaphore) || (!(unsignaled_semaphores.count(semaphore)) && pSemaphore->signaled)) {
2933 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
2934 HandleToUint64(semaphore), DRAWSTATE_QUEUE_FORWARD_PROGRESS,
2935 "Queue 0x%" PRIx64 " is signaling semaphore 0x%" PRIx64
2936 " that has already been signaled but not waited on by queue 0x%" PRIx64 ".",
2937 HandleToUint64(queue), HandleToUint64(semaphore), HandleToUint64(pSemaphore->signaler.first));
2939 unsignaled_semaphores.erase(semaphore);
2940 signaled_semaphores.insert(semaphore);
2944 for (uint32_t i = 0; i < submit->commandBufferCount; i++) {
2945 auto cb_node = GetCBNode(dev_data, submit->pCommandBuffers[i]);
2947 skip |= ValidateCmdBufImageLayouts(dev_data, cb_node, dev_data->imageLayoutMap, localImageLayoutMap);
2948 current_cmds.push_back(submit->pCommandBuffers[i]);
2949 skip |= validatePrimaryCommandBufferState(
2950 dev_data, cb_node, (int)std::count(current_cmds.begin(), current_cmds.end(), submit->pCommandBuffers[i]));
2951 skip |= validateQueueFamilyIndices(dev_data, cb_node, queue);
2953 // Potential early exit here as bad object state may crash in delayed function calls
2958 // Call submit-time functions to validate/update state
2959 for (auto &function : cb_node->queue_submit_functions) {
2962 for (auto &function : cb_node->eventUpdates) {
2963 skip |= function(queue);
2965 for (auto &function : cb_node->queryUpdates) {
2966 skip |= function(queue);
2974 VKAPI_ATTR VkResult VKAPI_CALL QueueSubmit(VkQueue queue, uint32_t submitCount, const VkSubmitInfo *pSubmits, VkFence fence) {
2975 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(queue), layer_data_map);
2976 unique_lock_t lock(global_lock);
2978 bool skip = PreCallValidateQueueSubmit(dev_data, queue, submitCount, pSubmits, fence);
2981 if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
2983 VkResult result = dev_data->dispatch_table.QueueSubmit(queue, submitCount, pSubmits, fence);
2986 PostCallRecordQueueSubmit(dev_data, queue, submitCount, pSubmits, fence);
2991 static bool PreCallValidateAllocateMemory(layer_data *dev_data) {
2993 if (dev_data->memObjMap.size() >= dev_data->phys_dev_properties.properties.limits.maxMemoryAllocationCount) {
2994 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
2995 HandleToUint64(dev_data->device), VALIDATION_ERROR_UNDEFINED,
2996 "Number of currently valid memory objects is not less than the maximum allowed (%u).",
2997 dev_data->phys_dev_properties.properties.limits.maxMemoryAllocationCount);
3002 static void PostCallRecordAllocateMemory(layer_data *dev_data, const VkMemoryAllocateInfo *pAllocateInfo, VkDeviceMemory *pMemory) {
3003 add_mem_obj_info(dev_data, dev_data->device, *pMemory, pAllocateInfo);
3007 VKAPI_ATTR VkResult VKAPI_CALL AllocateMemory(VkDevice device, const VkMemoryAllocateInfo *pAllocateInfo,
3008 const VkAllocationCallbacks *pAllocator, VkDeviceMemory *pMemory) {
3009 VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
3010 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3011 unique_lock_t lock(global_lock);
3012 bool skip = PreCallValidateAllocateMemory(dev_data);
3015 result = dev_data->dispatch_table.AllocateMemory(device, pAllocateInfo, pAllocator, pMemory);
3017 if (VK_SUCCESS == result) {
3018 PostCallRecordAllocateMemory(dev_data, pAllocateInfo, pMemory);
3024 // For given obj node, if it is use, flag a validation error and return callback result, else return false
3025 bool ValidateObjectNotInUse(const layer_data *dev_data, BASE_NODE *obj_node, VK_OBJECT obj_struct, const char *caller_name,
3026 UNIQUE_VALIDATION_ERROR_CODE error_code) {
3027 if (dev_data->instance_data->disabled.object_in_use) return false;
3029 if (obj_node->in_use.load()) {
3031 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, get_debug_report_enum[obj_struct.type], obj_struct.handle,
3032 error_code, "Cannot call %s on %s 0x%" PRIx64 " that is currently in use by a command buffer.", caller_name,
3033 object_string[obj_struct.type], obj_struct.handle);
3038 static bool PreCallValidateFreeMemory(layer_data *dev_data, VkDeviceMemory mem, DEVICE_MEM_INFO **mem_info, VK_OBJECT *obj_struct) {
3039 *mem_info = GetMemObjInfo(dev_data, mem);
3040 *obj_struct = {HandleToUint64(mem), kVulkanObjectTypeDeviceMemory};
3041 if (dev_data->instance_data->disabled.free_memory) return false;
3044 skip |= ValidateObjectNotInUse(dev_data, *mem_info, *obj_struct, "vkFreeMemory", VALIDATION_ERROR_2880054a);
3049 static void PostCallRecordFreeMemory(layer_data *dev_data, VkDeviceMemory mem, DEVICE_MEM_INFO *mem_info, VK_OBJECT obj_struct) {
3050 // Clear mem binding for any bound objects
3051 for (auto obj : mem_info->obj_bindings) {
3052 log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, get_debug_report_enum[obj.type], obj.handle,
3053 MEMTRACK_FREED_MEM_REF, "VK Object 0x%" PRIx64 " still has a reference to mem obj 0x%" PRIx64,
3054 HandleToUint64(obj.handle), HandleToUint64(mem_info->mem));
3055 BINDABLE *bindable_state = nullptr;
3057 case kVulkanObjectTypeImage:
3058 bindable_state = GetImageState(dev_data, reinterpret_cast<VkImage &>(obj.handle));
3060 case kVulkanObjectTypeBuffer:
3061 bindable_state = GetBufferState(dev_data, reinterpret_cast<VkBuffer &>(obj.handle));
3064 // Should only have buffer or image objects bound to memory
3068 assert(bindable_state);
3069 bindable_state->binding.mem = MEMORY_UNBOUND;
3070 bindable_state->UpdateBoundMemorySet();
3072 // Any bound cmd buffers are now invalid
3073 invalidateCommandBuffers(dev_data, mem_info->cb_bindings, obj_struct);
3074 dev_data->memObjMap.erase(mem);
3077 VKAPI_ATTR void VKAPI_CALL FreeMemory(VkDevice device, VkDeviceMemory mem, const VkAllocationCallbacks *pAllocator) {
3078 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3079 DEVICE_MEM_INFO *mem_info = nullptr;
3080 VK_OBJECT obj_struct;
3081 unique_lock_t lock(global_lock);
3082 bool skip = PreCallValidateFreeMemory(dev_data, mem, &mem_info, &obj_struct);
3085 dev_data->dispatch_table.FreeMemory(device, mem, pAllocator);
3087 if (mem != VK_NULL_HANDLE) {
3088 PostCallRecordFreeMemory(dev_data, mem, mem_info, obj_struct);
3093 // Validate that given Map memory range is valid. This means that the memory should not already be mapped,
3094 // and that the size of the map range should be:
3096 // 2. Within the size of the memory allocation
3097 static bool ValidateMapMemRange(layer_data *dev_data, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size) {
3101 skip = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
3102 HandleToUint64(mem), MEMTRACK_INVALID_MAP, "VkMapMemory: Attempting to map memory range of size zero");
3105 auto mem_element = dev_data->memObjMap.find(mem);
3106 if (mem_element != dev_data->memObjMap.end()) {
3107 auto mem_info = mem_element->second.get();
3108 // It is an application error to call VkMapMemory on an object that is already mapped
3109 if (mem_info->mem_range.size != 0) {
3110 skip = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
3111 HandleToUint64(mem), MEMTRACK_INVALID_MAP,
3112 "VkMapMemory: Attempting to map memory on an already-mapped object 0x%" PRIx64, HandleToUint64(mem));
3115 // Validate that offset + size is within object's allocationSize
3116 if (size == VK_WHOLE_SIZE) {
3117 if (offset >= mem_info->alloc_info.allocationSize) {
3118 skip = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
3119 HandleToUint64(mem), MEMTRACK_INVALID_MAP,
3120 "Mapping Memory from 0x%" PRIx64 " to 0x%" PRIx64
3121 " with size of VK_WHOLE_SIZE oversteps total array size 0x%" PRIx64,
3122 offset, mem_info->alloc_info.allocationSize, mem_info->alloc_info.allocationSize);
3125 if ((offset + size) > mem_info->alloc_info.allocationSize) {
3126 skip = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
3127 HandleToUint64(mem), VALIDATION_ERROR_31200552,
3128 "Mapping Memory from 0x%" PRIx64 " to 0x%" PRIx64 " oversteps total array size 0x%" PRIx64 ".",
3129 offset, size + offset, mem_info->alloc_info.allocationSize);
3136 static void storeMemRanges(layer_data *dev_data, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size) {
3137 auto mem_info = GetMemObjInfo(dev_data, mem);
3139 mem_info->mem_range.offset = offset;
3140 mem_info->mem_range.size = size;
3144 static bool deleteMemRanges(layer_data *dev_data, VkDeviceMemory mem) {
3146 auto mem_info = GetMemObjInfo(dev_data, mem);
3148 if (!mem_info->mem_range.size) {
3149 // Valid Usage: memory must currently be mapped
3150 skip = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
3151 HandleToUint64(mem), VALIDATION_ERROR_33600562,
3152 "Unmapping Memory without memory being mapped: mem obj 0x%" PRIx64 ".", HandleToUint64(mem));
3154 mem_info->mem_range.size = 0;
3155 if (mem_info->shadow_copy) {
3156 free(mem_info->shadow_copy_base);
3157 mem_info->shadow_copy_base = 0;
3158 mem_info->shadow_copy = 0;
3164 // Guard value for pad data
3165 static char NoncoherentMemoryFillValue = 0xb;
3167 static void initializeAndTrackMemory(layer_data *dev_data, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size,
3169 auto mem_info = GetMemObjInfo(dev_data, mem);
3171 mem_info->p_driver_data = *ppData;
3172 uint32_t index = mem_info->alloc_info.memoryTypeIndex;
3173 if (dev_data->phys_dev_mem_props.memoryTypes[index].propertyFlags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) {
3174 mem_info->shadow_copy = 0;
3176 if (size == VK_WHOLE_SIZE) {
3177 size = mem_info->alloc_info.allocationSize - offset;
3179 mem_info->shadow_pad_size = dev_data->phys_dev_properties.properties.limits.minMemoryMapAlignment;
3180 assert(SafeModulo(mem_info->shadow_pad_size, dev_data->phys_dev_properties.properties.limits.minMemoryMapAlignment) ==
3182 // Ensure start of mapped region reflects hardware alignment constraints
3183 uint64_t map_alignment = dev_data->phys_dev_properties.properties.limits.minMemoryMapAlignment;
3185 // From spec: (ppData - offset) must be aligned to at least limits::minMemoryMapAlignment.
3186 uint64_t start_offset = offset % map_alignment;
3187 // Data passed to driver will be wrapped by a guardband of data to detect over- or under-writes.
3188 mem_info->shadow_copy_base =
3189 malloc(static_cast<size_t>(2 * mem_info->shadow_pad_size + size + map_alignment + start_offset));
3191 mem_info->shadow_copy =
3192 reinterpret_cast<char *>((reinterpret_cast<uintptr_t>(mem_info->shadow_copy_base) + map_alignment) &
3193 ~(map_alignment - 1)) +
3195 assert(SafeModulo(reinterpret_cast<uintptr_t>(mem_info->shadow_copy) + mem_info->shadow_pad_size - start_offset,
3196 map_alignment) == 0);
3198 memset(mem_info->shadow_copy, NoncoherentMemoryFillValue, static_cast<size_t>(2 * mem_info->shadow_pad_size + size));
3199 *ppData = static_cast<char *>(mem_info->shadow_copy) + mem_info->shadow_pad_size;
3204 // Verify that state for fence being waited on is appropriate. That is,
3205 // a fence being waited on should not already be signaled and
3206 // it should have been submitted on a queue or during acquire next image
3207 static inline bool verifyWaitFenceState(layer_data *dev_data, VkFence fence, const char *apiCall) {
3210 auto pFence = GetFenceNode(dev_data, fence);
3211 if (pFence && pFence->scope == kSyncScopeInternal) {
3212 if (pFence->state == FENCE_UNSIGNALED) {
3214 log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
3215 HandleToUint64(fence), MEMTRACK_INVALID_FENCE_STATE,
3216 "%s called for fence 0x%" PRIx64 " which has not been submitted on a Queue or during acquire next image.",
3217 apiCall, HandleToUint64(fence));
3223 static void RetireFence(layer_data *dev_data, VkFence fence) {
3224 auto pFence = GetFenceNode(dev_data, fence);
3225 if (pFence->scope == kSyncScopeInternal) {
3226 if (pFence->signaler.first != VK_NULL_HANDLE) {
3227 // Fence signaller is a queue -- use this as proof that prior operations on that queue have completed.
3228 RetireWorkOnQueue(dev_data, GetQueueState(dev_data, pFence->signaler.first), pFence->signaler.second);
3230 // Fence signaller is the WSI. We're not tracking what the WSI op actually /was/ in CV yet, but we need to mark
3231 // the fence as retired.
3232 pFence->state = FENCE_RETIRED;
3237 static bool PreCallValidateWaitForFences(layer_data *dev_data, uint32_t fence_count, const VkFence *fences) {
3238 if (dev_data->instance_data->disabled.wait_for_fences) return false;
3240 for (uint32_t i = 0; i < fence_count; i++) {
3241 skip |= verifyWaitFenceState(dev_data, fences[i], "vkWaitForFences");
3242 skip |= VerifyQueueStateToFence(dev_data, fences[i]);
3247 static void PostCallRecordWaitForFences(layer_data *dev_data, uint32_t fence_count, const VkFence *fences, VkBool32 wait_all) {
3248 // When we know that all fences are complete we can clean/remove their CBs
3249 if ((VK_TRUE == wait_all) || (1 == fence_count)) {
3250 for (uint32_t i = 0; i < fence_count; i++) {
3251 RetireFence(dev_data, fences[i]);
3254 // NOTE : Alternate case not handled here is when some fences have completed. In
3255 // this case for app to guarantee which fences completed it will have to call
3256 // vkGetFenceStatus() at which point we'll clean/remove their CBs if complete.
3259 VKAPI_ATTR VkResult VKAPI_CALL WaitForFences(VkDevice device, uint32_t fenceCount, const VkFence *pFences, VkBool32 waitAll,
3261 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3262 // Verify fence status of submitted fences
3263 unique_lock_t lock(global_lock);
3264 bool skip = PreCallValidateWaitForFences(dev_data, fenceCount, pFences);
3266 if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
3268 VkResult result = dev_data->dispatch_table.WaitForFences(device, fenceCount, pFences, waitAll, timeout);
3270 if (result == VK_SUCCESS) {
3272 PostCallRecordWaitForFences(dev_data, fenceCount, pFences, waitAll);
3278 static bool PreCallValidateGetFenceStatus(layer_data *dev_data, VkFence fence) {
3279 if (dev_data->instance_data->disabled.get_fence_state) return false;
3280 return verifyWaitFenceState(dev_data, fence, "vkGetFenceStatus");
3283 static void PostCallRecordGetFenceStatus(layer_data *dev_data, VkFence fence) { RetireFence(dev_data, fence); }
3285 VKAPI_ATTR VkResult VKAPI_CALL GetFenceStatus(VkDevice device, VkFence fence) {
3286 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3287 unique_lock_t lock(global_lock);
3288 bool skip = PreCallValidateGetFenceStatus(dev_data, fence);
3290 if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
3292 VkResult result = dev_data->dispatch_table.GetFenceStatus(device, fence);
3293 if (result == VK_SUCCESS) {
3295 PostCallRecordGetFenceStatus(dev_data, fence);
3301 static void PostCallRecordGetDeviceQueue(layer_data *dev_data, uint32_t q_family_index, VkQueue queue) {
3302 // Add queue to tracking set only if it is new
3303 auto result = dev_data->queues.emplace(queue);
3304 if (result.second == true) {
3305 QUEUE_STATE *queue_state = &dev_data->queueMap[queue];
3306 queue_state->queue = queue;
3307 queue_state->queueFamilyIndex = q_family_index;
3308 queue_state->seq = 0;
3312 VKAPI_ATTR void VKAPI_CALL GetDeviceQueue(VkDevice device, uint32_t queueFamilyIndex, uint32_t queueIndex, VkQueue *pQueue) {
3313 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3314 dev_data->dispatch_table.GetDeviceQueue(device, queueFamilyIndex, queueIndex, pQueue);
3315 lock_guard_t lock(global_lock);
3317 PostCallRecordGetDeviceQueue(dev_data, queueFamilyIndex, *pQueue);
3320 VKAPI_ATTR void VKAPI_CALL GetDeviceQueue2(VkDevice device, VkDeviceQueueInfo2 *pQueueInfo, VkQueue *pQueue) {
3321 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3322 dev_data->dispatch_table.GetDeviceQueue2(device, pQueueInfo, pQueue);
3323 lock_guard_t lock(global_lock);
3325 if (*pQueue != VK_NULL_HANDLE) {
3326 PostCallRecordGetDeviceQueue(dev_data, pQueueInfo->queueFamilyIndex, *pQueue);
3330 static bool PreCallValidateQueueWaitIdle(layer_data *dev_data, VkQueue queue, QUEUE_STATE **queue_state) {
3331 *queue_state = GetQueueState(dev_data, queue);
3332 if (dev_data->instance_data->disabled.queue_wait_idle) return false;
3333 return VerifyQueueStateToSeq(dev_data, *queue_state, (*queue_state)->seq + (*queue_state)->submissions.size());
3336 static void PostCallRecordQueueWaitIdle(layer_data *dev_data, QUEUE_STATE *queue_state) {
3337 RetireWorkOnQueue(dev_data, queue_state, queue_state->seq + queue_state->submissions.size());
3340 VKAPI_ATTR VkResult VKAPI_CALL QueueWaitIdle(VkQueue queue) {
3341 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(queue), layer_data_map);
3342 QUEUE_STATE *queue_state = nullptr;
3343 unique_lock_t lock(global_lock);
3344 bool skip = PreCallValidateQueueWaitIdle(dev_data, queue, &queue_state);
3346 if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
3347 VkResult result = dev_data->dispatch_table.QueueWaitIdle(queue);
3348 if (VK_SUCCESS == result) {
3350 PostCallRecordQueueWaitIdle(dev_data, queue_state);
3356 static bool PreCallValidateDeviceWaitIdle(layer_data *dev_data) {
3357 if (dev_data->instance_data->disabled.device_wait_idle) return false;
3359 for (auto &queue : dev_data->queueMap) {
3360 skip |= VerifyQueueStateToSeq(dev_data, &queue.second, queue.second.seq + queue.second.submissions.size());
3365 static void PostCallRecordDeviceWaitIdle(layer_data *dev_data) {
3366 for (auto &queue : dev_data->queueMap) {
3367 RetireWorkOnQueue(dev_data, &queue.second, queue.second.seq + queue.second.submissions.size());
3371 VKAPI_ATTR VkResult VKAPI_CALL DeviceWaitIdle(VkDevice device) {
3372 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3373 unique_lock_t lock(global_lock);
3374 bool skip = PreCallValidateDeviceWaitIdle(dev_data);
3376 if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
3377 VkResult result = dev_data->dispatch_table.DeviceWaitIdle(device);
3378 if (VK_SUCCESS == result) {
3380 PostCallRecordDeviceWaitIdle(dev_data);
3386 static bool PreCallValidateDestroyFence(layer_data *dev_data, VkFence fence, FENCE_NODE **fence_node, VK_OBJECT *obj_struct) {
3387 *fence_node = GetFenceNode(dev_data, fence);
3388 *obj_struct = {HandleToUint64(fence), kVulkanObjectTypeFence};
3389 if (dev_data->instance_data->disabled.destroy_fence) return false;
3392 if ((*fence_node)->scope == kSyncScopeInternal && (*fence_node)->state == FENCE_INFLIGHT) {
3394 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
3395 HandleToUint64(fence), VALIDATION_ERROR_24e008c0, "Fence 0x%" PRIx64 " is in use.", HandleToUint64(fence));
3401 static void PostCallRecordDestroyFence(layer_data *dev_data, VkFence fence) { dev_data->fenceMap.erase(fence); }
3403 VKAPI_ATTR void VKAPI_CALL DestroyFence(VkDevice device, VkFence fence, const VkAllocationCallbacks *pAllocator) {
3404 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3405 // Common data objects used pre & post call
3406 FENCE_NODE *fence_node = nullptr;
3407 VK_OBJECT obj_struct;
3408 unique_lock_t lock(global_lock);
3409 bool skip = PreCallValidateDestroyFence(dev_data, fence, &fence_node, &obj_struct);
3413 dev_data->dispatch_table.DestroyFence(device, fence, pAllocator);
3415 PostCallRecordDestroyFence(dev_data, fence);
3419 static bool PreCallValidateDestroySemaphore(layer_data *dev_data, VkSemaphore semaphore, SEMAPHORE_NODE **sema_node,
3420 VK_OBJECT *obj_struct) {
3421 *sema_node = GetSemaphoreNode(dev_data, semaphore);
3422 *obj_struct = {HandleToUint64(semaphore), kVulkanObjectTypeSemaphore};
3423 if (dev_data->instance_data->disabled.destroy_semaphore) return false;
3426 skip |= ValidateObjectNotInUse(dev_data, *sema_node, *obj_struct, "vkDestroySemaphore", VALIDATION_ERROR_268008e2);
3431 static void PostCallRecordDestroySemaphore(layer_data *dev_data, VkSemaphore sema) { dev_data->semaphoreMap.erase(sema); }
3433 VKAPI_ATTR void VKAPI_CALL DestroySemaphore(VkDevice device, VkSemaphore semaphore, const VkAllocationCallbacks *pAllocator) {
3434 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3435 SEMAPHORE_NODE *sema_node;
3436 VK_OBJECT obj_struct;
3437 unique_lock_t lock(global_lock);
3438 bool skip = PreCallValidateDestroySemaphore(dev_data, semaphore, &sema_node, &obj_struct);
3441 dev_data->dispatch_table.DestroySemaphore(device, semaphore, pAllocator);
3443 PostCallRecordDestroySemaphore(dev_data, semaphore);
3447 static bool PreCallValidateDestroyEvent(layer_data *dev_data, VkEvent event, EVENT_STATE **event_state, VK_OBJECT *obj_struct) {
3448 *event_state = GetEventNode(dev_data, event);
3449 *obj_struct = {HandleToUint64(event), kVulkanObjectTypeEvent};
3450 if (dev_data->instance_data->disabled.destroy_event) return false;
3453 skip |= ValidateObjectNotInUse(dev_data, *event_state, *obj_struct, "vkDestroyEvent", VALIDATION_ERROR_24c008f2);
3458 static void PostCallRecordDestroyEvent(layer_data *dev_data, VkEvent event, EVENT_STATE *event_state, VK_OBJECT obj_struct) {
3459 invalidateCommandBuffers(dev_data, event_state->cb_bindings, obj_struct);
3460 dev_data->eventMap.erase(event);
3463 VKAPI_ATTR void VKAPI_CALL DestroyEvent(VkDevice device, VkEvent event, const VkAllocationCallbacks *pAllocator) {
3464 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3465 EVENT_STATE *event_state = nullptr;
3466 VK_OBJECT obj_struct;
3467 unique_lock_t lock(global_lock);
3468 bool skip = PreCallValidateDestroyEvent(dev_data, event, &event_state, &obj_struct);
3471 dev_data->dispatch_table.DestroyEvent(device, event, pAllocator);
3473 if (event != VK_NULL_HANDLE) {
3474 PostCallRecordDestroyEvent(dev_data, event, event_state, obj_struct);
3479 static bool PreCallValidateDestroyQueryPool(layer_data *dev_data, VkQueryPool query_pool, QUERY_POOL_NODE **qp_state,
3480 VK_OBJECT *obj_struct) {
3481 *qp_state = GetQueryPoolNode(dev_data, query_pool);
3482 *obj_struct = {HandleToUint64(query_pool), kVulkanObjectTypeQueryPool};
3483 if (dev_data->instance_data->disabled.destroy_query_pool) return false;
3486 skip |= ValidateObjectNotInUse(dev_data, *qp_state, *obj_struct, "vkDestroyQueryPool", VALIDATION_ERROR_26200632);
3491 static void PostCallRecordDestroyQueryPool(layer_data *dev_data, VkQueryPool query_pool, QUERY_POOL_NODE *qp_state,
3492 VK_OBJECT obj_struct) {
3493 invalidateCommandBuffers(dev_data, qp_state->cb_bindings, obj_struct);
3494 dev_data->queryPoolMap.erase(query_pool);
3497 VKAPI_ATTR void VKAPI_CALL DestroyQueryPool(VkDevice device, VkQueryPool queryPool, const VkAllocationCallbacks *pAllocator) {
3498 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3499 QUERY_POOL_NODE *qp_state = nullptr;
3500 VK_OBJECT obj_struct;
3501 unique_lock_t lock(global_lock);
3502 bool skip = PreCallValidateDestroyQueryPool(dev_data, queryPool, &qp_state, &obj_struct);
3505 dev_data->dispatch_table.DestroyQueryPool(device, queryPool, pAllocator);
3507 if (queryPool != VK_NULL_HANDLE) {
3508 PostCallRecordDestroyQueryPool(dev_data, queryPool, qp_state, obj_struct);
3512 static bool PreCallValidateGetQueryPoolResults(layer_data *dev_data, VkQueryPool query_pool, uint32_t first_query,
3513 uint32_t query_count, VkQueryResultFlags flags,
3514 unordered_map<QueryObject, vector<VkCommandBuffer>> *queries_in_flight) {
3516 auto query_pool_state = dev_data->queryPoolMap.find(query_pool);
3517 if (query_pool_state != dev_data->queryPoolMap.end()) {
3518 if ((query_pool_state->second.createInfo.queryType == VK_QUERY_TYPE_TIMESTAMP) && (flags & VK_QUERY_RESULT_PARTIAL_BIT)) {
3520 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0,
3521 VALIDATION_ERROR_2fa00664,
3522 "QueryPool 0x%" PRIx64
3523 " was created with a queryType of VK_QUERY_TYPE_TIMESTAMP but flags contains VK_QUERY_RESULT_PARTIAL_BIT.",
3524 HandleToUint64(query_pool));
3528 // TODO: clean this up, it's insanely wasteful.
3529 for (auto cmd_buffer : dev_data->commandBufferMap) {
3530 if (cmd_buffer.second->in_use.load()) {
3531 for (auto query_state_pair : cmd_buffer.second->queryToStateMap) {
3532 (*queries_in_flight)[query_state_pair.first].push_back(cmd_buffer.first);
3537 if (dev_data->instance_data->disabled.get_query_pool_results) return false;
3538 for (uint32_t i = 0; i < query_count; ++i) {
3539 QueryObject query = {query_pool, first_query + i};
3540 auto qif_pair = queries_in_flight->find(query);
3541 auto query_state_pair = dev_data->queryToStateMap.find(query);
3542 if (query_state_pair != dev_data->queryToStateMap.end()) {
3543 // Available and in flight
3544 if (qif_pair != queries_in_flight->end()) {
3545 if (query_state_pair->second) {
3546 for (auto cmd_buffer : qif_pair->second) {
3547 auto cb = GetCBNode(dev_data, cmd_buffer);
3548 auto query_event_pair = cb->waitedEventsBeforeQueryReset.find(query);
3549 if (query_event_pair == cb->waitedEventsBeforeQueryReset.end()) {
3550 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
3551 VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, DRAWSTATE_INVALID_QUERY,
3552 "Cannot get query results on queryPool 0x%" PRIx64 " with index %d which is in flight.",
3553 HandleToUint64(query_pool), first_query + i);
3557 } else if (!query_state_pair->second) { // Unavailable and Not in flight
3558 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0,
3559 DRAWSTATE_INVALID_QUERY,
3560 "Cannot get query results on queryPool 0x%" PRIx64 " with index %d which is unavailable.",
3561 HandleToUint64(query_pool), first_query + i);
3563 } else { // Uninitialized
3564 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0,
3565 DRAWSTATE_INVALID_QUERY,
3566 "Cannot get query results on queryPool 0x%" PRIx64
3567 " with index %d as data has not been collected for this index.",
3568 HandleToUint64(query_pool), first_query + i);
3574 static void PostCallRecordGetQueryPoolResults(layer_data *dev_data, VkQueryPool query_pool, uint32_t first_query,
3575 uint32_t query_count,
3576 unordered_map<QueryObject, vector<VkCommandBuffer>> *queries_in_flight) {
3577 for (uint32_t i = 0; i < query_count; ++i) {
3578 QueryObject query = {query_pool, first_query + i};
3579 auto qif_pair = queries_in_flight->find(query);
3580 auto query_state_pair = dev_data->queryToStateMap.find(query);
3581 if (query_state_pair != dev_data->queryToStateMap.end()) {
3582 // Available and in flight
3583 if (qif_pair != queries_in_flight->end() && query_state_pair != dev_data->queryToStateMap.end() &&
3584 query_state_pair->second) {
3585 for (auto cmd_buffer : qif_pair->second) {
3586 auto cb = GetCBNode(dev_data, cmd_buffer);
3587 auto query_event_pair = cb->waitedEventsBeforeQueryReset.find(query);
3588 if (query_event_pair != cb->waitedEventsBeforeQueryReset.end()) {
3589 for (auto event : query_event_pair->second) {
3590 dev_data->eventMap[event].needsSignaled = true;
3599 VKAPI_ATTR VkResult VKAPI_CALL GetQueryPoolResults(VkDevice device, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount,
3600 size_t dataSize, void *pData, VkDeviceSize stride, VkQueryResultFlags flags) {
3601 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3602 unordered_map<QueryObject, vector<VkCommandBuffer>> queries_in_flight;
3603 unique_lock_t lock(global_lock);
3604 bool skip = PreCallValidateGetQueryPoolResults(dev_data, queryPool, firstQuery, queryCount, flags, &queries_in_flight);
3606 if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
3608 dev_data->dispatch_table.GetQueryPoolResults(device, queryPool, firstQuery, queryCount, dataSize, pData, stride, flags);
3610 PostCallRecordGetQueryPoolResults(dev_data, queryPool, firstQuery, queryCount, &queries_in_flight);
3615 // Return true if given ranges intersect, else false
3616 // Prereq : For both ranges, range->end - range->start > 0. This case should have already resulted
3617 // in an error so not checking that here
3618 // pad_ranges bool indicates a linear and non-linear comparison which requires padding
3619 // In the case where padding is required, if an alias is encountered then a validation error is reported and skip
3620 // may be set by the callback function so caller should merge in skip value if padding case is possible.
3621 // This check can be skipped by passing skip_checks=true, for call sites outside the validation path.
3622 static bool rangesIntersect(layer_data const *dev_data, MEMORY_RANGE const *range1, MEMORY_RANGE const *range2, bool *skip,
3625 auto r1_start = range1->start;
3626 auto r1_end = range1->end;
3627 auto r2_start = range2->start;
3628 auto r2_end = range2->end;
3629 VkDeviceSize pad_align = 1;
3630 if (range1->linear != range2->linear) {
3631 pad_align = dev_data->phys_dev_properties.properties.limits.bufferImageGranularity;
3633 if ((r1_end & ~(pad_align - 1)) < (r2_start & ~(pad_align - 1))) return false;
3634 if ((r1_start & ~(pad_align - 1)) > (r2_end & ~(pad_align - 1))) return false;
3636 if (!skip_checks && (range1->linear != range2->linear)) {
3637 // In linear vs. non-linear case, warn of aliasing
3638 const char *r1_linear_str = range1->linear ? "Linear" : "Non-linear";
3639 const char *r1_type_str = range1->image ? "image" : "buffer";
3640 const char *r2_linear_str = range2->linear ? "linear" : "non-linear";
3641 const char *r2_type_str = range2->image ? "image" : "buffer";
3642 auto obj_type = range1->image ? VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT : VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT;
3644 dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, obj_type, range1->handle, MEMTRACK_INVALID_ALIASING,
3645 "%s %s 0x%" PRIx64 " is aliased with %s %s 0x%" PRIx64
3646 " which may indicate a bug. For further info refer to the Buffer-Image Granularity section of the Vulkan "
3648 "(https://www.khronos.org/registry/vulkan/specs/1.0-extensions/xhtml/vkspec.html#resources-bufferimagegranularity)",
3649 r1_linear_str, r1_type_str, range1->handle, r2_linear_str, r2_type_str, range2->handle);
3654 // Simplified rangesIntersect that calls above function to check range1 for intersection with offset & end addresses
3655 bool rangesIntersect(layer_data const *dev_data, MEMORY_RANGE const *range1, VkDeviceSize offset, VkDeviceSize end) {
3656 // Create a local MEMORY_RANGE struct to wrap offset/size
3657 MEMORY_RANGE range_wrap;
3658 // Synch linear with range1 to avoid padding and potential validation error case
3659 range_wrap.linear = range1->linear;
3660 range_wrap.start = offset;
3661 range_wrap.end = end;
3663 return rangesIntersect(dev_data, range1, &range_wrap, &tmp_bool, true);
3665 // For given mem_info, set all ranges valid that intersect [offset-end] range
3666 // TODO : For ranges where there is no alias, we may want to create new buffer ranges that are valid
3667 static void SetMemRangesValid(layer_data const *dev_data, DEVICE_MEM_INFO *mem_info, VkDeviceSize offset, VkDeviceSize end) {
3668 bool tmp_bool = false;
3669 MEMORY_RANGE map_range = {};
3670 map_range.linear = true;
3671 map_range.start = offset;
3672 map_range.end = end;
3673 for (auto &handle_range_pair : mem_info->bound_ranges) {
3674 if (rangesIntersect(dev_data, &handle_range_pair.second, &map_range, &tmp_bool, false)) {
3675 // TODO : WARN here if tmp_bool true?
3676 handle_range_pair.second.valid = true;
3681 static bool ValidateInsertMemoryRange(layer_data const *dev_data, uint64_t handle, DEVICE_MEM_INFO *mem_info,
3682 VkDeviceSize memoryOffset, VkMemoryRequirements memRequirements, bool is_image,
3683 bool is_linear, const char *api_name) {
3687 range.image = is_image;
3688 range.handle = handle;
3689 range.linear = is_linear;
3690 range.valid = mem_info->global_valid;
3691 range.memory = mem_info->mem;
3692 range.start = memoryOffset;
3693 range.size = memRequirements.size;
3694 range.end = memoryOffset + memRequirements.size - 1;
3695 range.aliases.clear();
3697 // Check for aliasing problems.
3698 for (auto &obj_range_pair : mem_info->bound_ranges) {
3699 auto check_range = &obj_range_pair.second;
3700 bool intersection_error = false;
3701 if (rangesIntersect(dev_data, &range, check_range, &intersection_error, false)) {
3702 skip |= intersection_error;
3703 range.aliases.insert(check_range);
3707 if (memoryOffset >= mem_info->alloc_info.allocationSize) {
3708 UNIQUE_VALIDATION_ERROR_CODE error_code = is_image ? VALIDATION_ERROR_1740082c : VALIDATION_ERROR_1700080e;
3709 skip = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
3710 HandleToUint64(mem_info->mem), error_code,
3711 "In %s, attempting to bind memory (0x%" PRIx64 ") to object (0x%" PRIx64 "), memoryOffset=0x%" PRIxLEAST64
3712 " must be less than the memory allocation size 0x%" PRIxLEAST64 ".",
3713 api_name, HandleToUint64(mem_info->mem), HandleToUint64(handle), memoryOffset,
3714 mem_info->alloc_info.allocationSize);
3720 // Object with given handle is being bound to memory w/ given mem_info struct.
3721 // Track the newly bound memory range with given memoryOffset
3722 // Also scan any previous ranges, track aliased ranges with new range, and flag an error if a linear
3723 // and non-linear range incorrectly overlap.
3724 // Return true if an error is flagged and the user callback returns "true", otherwise false
3725 // is_image indicates an image object, otherwise handle is for a buffer
3726 // is_linear indicates a buffer or linear image
3727 static void InsertMemoryRange(layer_data const *dev_data, uint64_t handle, DEVICE_MEM_INFO *mem_info, VkDeviceSize memoryOffset,
3728 VkMemoryRequirements memRequirements, bool is_image, bool is_linear) {
3731 range.image = is_image;
3732 range.handle = handle;
3733 range.linear = is_linear;
3734 range.valid = mem_info->global_valid;
3735 range.memory = mem_info->mem;
3736 range.start = memoryOffset;
3737 range.size = memRequirements.size;
3738 range.end = memoryOffset + memRequirements.size - 1;
3739 range.aliases.clear();
3740 // Update Memory aliasing
3741 // Save aliased ranges so we can copy into final map entry below. Can't do it in loop b/c we don't yet have final ptr. If we
3742 // inserted into map before loop to get the final ptr, then we may enter loop when not needed & we check range against itself
3743 std::unordered_set<MEMORY_RANGE *> tmp_alias_ranges;
3744 for (auto &obj_range_pair : mem_info->bound_ranges) {
3745 auto check_range = &obj_range_pair.second;
3746 bool intersection_error = false;
3747 if (rangesIntersect(dev_data, &range, check_range, &intersection_error, true)) {
3748 range.aliases.insert(check_range);
3749 tmp_alias_ranges.insert(check_range);
3752 mem_info->bound_ranges[handle] = std::move(range);
3753 for (auto tmp_range : tmp_alias_ranges) {
3754 tmp_range->aliases.insert(&mem_info->bound_ranges[handle]);
3757 mem_info->bound_images.insert(handle);
3759 mem_info->bound_buffers.insert(handle);
3762 static bool ValidateInsertImageMemoryRange(layer_data const *dev_data, VkImage image, DEVICE_MEM_INFO *mem_info,
3763 VkDeviceSize mem_offset, VkMemoryRequirements mem_reqs, bool is_linear,
3764 const char *api_name) {
3765 return ValidateInsertMemoryRange(dev_data, HandleToUint64(image), mem_info, mem_offset, mem_reqs, true, is_linear, api_name);
3767 static void InsertImageMemoryRange(layer_data const *dev_data, VkImage image, DEVICE_MEM_INFO *mem_info, VkDeviceSize mem_offset,
3768 VkMemoryRequirements mem_reqs, bool is_linear) {
3769 InsertMemoryRange(dev_data, HandleToUint64(image), mem_info, mem_offset, mem_reqs, true, is_linear);
3772 static bool ValidateInsertBufferMemoryRange(layer_data const *dev_data, VkBuffer buffer, DEVICE_MEM_INFO *mem_info,
3773 VkDeviceSize mem_offset, VkMemoryRequirements mem_reqs, const char *api_name) {
3774 return ValidateInsertMemoryRange(dev_data, HandleToUint64(buffer), mem_info, mem_offset, mem_reqs, false, true, api_name);
3776 static void InsertBufferMemoryRange(layer_data const *dev_data, VkBuffer buffer, DEVICE_MEM_INFO *mem_info, VkDeviceSize mem_offset,
3777 VkMemoryRequirements mem_reqs) {
3778 InsertMemoryRange(dev_data, HandleToUint64(buffer), mem_info, mem_offset, mem_reqs, false, true);
3781 // Remove MEMORY_RANGE struct for give handle from bound_ranges of mem_info
3782 // is_image indicates if handle is for image or buffer
3783 // This function will also remove the handle-to-index mapping from the appropriate
3784 // map and clean up any aliases for range being removed.
3785 static void RemoveMemoryRange(uint64_t handle, DEVICE_MEM_INFO *mem_info, bool is_image) {
3786 auto erase_range = &mem_info->bound_ranges[handle];
3787 for (auto alias_range : erase_range->aliases) {
3788 alias_range->aliases.erase(erase_range);
3790 erase_range->aliases.clear();
3791 mem_info->bound_ranges.erase(handle);
3793 mem_info->bound_images.erase(handle);
3795 mem_info->bound_buffers.erase(handle);
3799 void RemoveBufferMemoryRange(uint64_t handle, DEVICE_MEM_INFO *mem_info) { RemoveMemoryRange(handle, mem_info, false); }
3801 void RemoveImageMemoryRange(uint64_t handle, DEVICE_MEM_INFO *mem_info) { RemoveMemoryRange(handle, mem_info, true); }
3803 VKAPI_ATTR void VKAPI_CALL DestroyBuffer(VkDevice device, VkBuffer buffer, const VkAllocationCallbacks *pAllocator) {
3804 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3805 BUFFER_STATE *buffer_state = nullptr;
3806 VK_OBJECT obj_struct;
3807 unique_lock_t lock(global_lock);
3808 bool skip = PreCallValidateDestroyBuffer(dev_data, buffer, &buffer_state, &obj_struct);
3811 dev_data->dispatch_table.DestroyBuffer(device, buffer, pAllocator);
3813 if (buffer != VK_NULL_HANDLE) {
3814 PostCallRecordDestroyBuffer(dev_data, buffer, buffer_state, obj_struct);
3819 VKAPI_ATTR void VKAPI_CALL DestroyBufferView(VkDevice device, VkBufferView bufferView, const VkAllocationCallbacks *pAllocator) {
3820 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3821 // Common data objects used pre & post call
3822 BUFFER_VIEW_STATE *buffer_view_state = nullptr;
3823 VK_OBJECT obj_struct;
3824 unique_lock_t lock(global_lock);
3825 // Validate state before calling down chain, update common data if we'll be calling down chain
3826 bool skip = PreCallValidateDestroyBufferView(dev_data, bufferView, &buffer_view_state, &obj_struct);
3829 dev_data->dispatch_table.DestroyBufferView(device, bufferView, pAllocator);
3831 if (bufferView != VK_NULL_HANDLE) {
3832 PostCallRecordDestroyBufferView(dev_data, bufferView, buffer_view_state, obj_struct);
3837 VKAPI_ATTR void VKAPI_CALL DestroyImage(VkDevice device, VkImage image, const VkAllocationCallbacks *pAllocator) {
3838 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3839 IMAGE_STATE *image_state = nullptr;
3840 VK_OBJECT obj_struct;
3841 unique_lock_t lock(global_lock);
3842 bool skip = PreCallValidateDestroyImage(dev_data, image, &image_state, &obj_struct);
3845 dev_data->dispatch_table.DestroyImage(device, image, pAllocator);
3847 if (image != VK_NULL_HANDLE) {
3848 PostCallRecordDestroyImage(dev_data, image, image_state, obj_struct);
3853 static bool ValidateMemoryTypes(const layer_data *dev_data, const DEVICE_MEM_INFO *mem_info, const uint32_t memory_type_bits,
3854 const char *funcName, UNIQUE_VALIDATION_ERROR_CODE msgCode) {
3856 if (((1 << mem_info->alloc_info.memoryTypeIndex) & memory_type_bits) == 0) {
3857 skip = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
3858 HandleToUint64(mem_info->mem), msgCode,
3859 "%s(): MemoryRequirements->memoryTypeBits (0x%X) for this object type are not compatible with the memory "
3860 "type (0x%X) of this memory object 0x%" PRIx64 ".",
3861 funcName, memory_type_bits, mem_info->alloc_info.memoryTypeIndex, HandleToUint64(mem_info->mem));
3866 static bool PreCallValidateBindBufferMemory(layer_data *dev_data, VkBuffer buffer, BUFFER_STATE *buffer_state, VkDeviceMemory mem,
3867 VkDeviceSize memoryOffset, const char *api_name) {
3870 unique_lock_t lock(global_lock);
3871 // Track objects tied to memory
3872 uint64_t buffer_handle = HandleToUint64(buffer);
3873 skip = ValidateSetMemBinding(dev_data, mem, buffer_handle, kVulkanObjectTypeBuffer, api_name);
3874 if (!buffer_state->memory_requirements_checked) {
3875 // There's not an explicit requirement in the spec to call vkGetBufferMemoryRequirements() prior to calling
3876 // BindBufferMemory, but it's implied in that memory being bound must conform with VkMemoryRequirements from
3877 // vkGetBufferMemoryRequirements()
3878 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
3879 buffer_handle, DRAWSTATE_INVALID_BUFFER,
3880 "%s: Binding memory to buffer 0x%" PRIx64
3881 " but vkGetBufferMemoryRequirements() has not been called on that buffer.",
3882 api_name, HandleToUint64(buffer_handle));
3883 // Make the call for them so we can verify the state
3885 dev_data->dispatch_table.GetBufferMemoryRequirements(dev_data->device, buffer, &buffer_state->requirements);
3889 // Validate bound memory range information
3890 const auto mem_info = GetMemObjInfo(dev_data, mem);
3892 skip |= ValidateInsertBufferMemoryRange(dev_data, buffer, mem_info, memoryOffset, buffer_state->requirements, api_name);
3893 skip |= ValidateMemoryTypes(dev_data, mem_info, buffer_state->requirements.memoryTypeBits, api_name,
3894 VALIDATION_ERROR_17000816);
3897 // Validate memory requirements alignment
3898 if (SafeModulo(memoryOffset, buffer_state->requirements.alignment) != 0) {
3899 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
3900 buffer_handle, VALIDATION_ERROR_17000818,
3901 "%s: memoryOffset is 0x%" PRIxLEAST64
3902 " but must be an integer multiple of the VkMemoryRequirements::alignment value 0x%" PRIxLEAST64
3903 ", returned from a call to vkGetBufferMemoryRequirements with buffer.",
3904 api_name, memoryOffset, buffer_state->requirements.alignment);
3908 // Validate memory requirements size
3909 if (buffer_state->requirements.size > (mem_info->alloc_info.allocationSize - memoryOffset)) {
3910 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
3911 buffer_handle, VALIDATION_ERROR_1700081a,
3912 "%s: memory size minus memoryOffset is 0x%" PRIxLEAST64
3913 " but must be at least as large as VkMemoryRequirements::size value 0x%" PRIxLEAST64
3914 ", returned from a call to vkGetBufferMemoryRequirements with buffer.",
3915 api_name, mem_info->alloc_info.allocationSize - memoryOffset, buffer_state->requirements.size);
3918 // Validate dedicated allocation
3919 if (mem_info->is_dedicated && ((mem_info->dedicated_buffer != buffer) || (memoryOffset != 0))) {
3920 // TODO: Add vkBindBufferMemory2KHR error message when added to spec.
3921 auto validation_error = VALIDATION_ERROR_UNDEFINED;
3922 if (strcmp(api_name, "vkBindBufferMemory()") == 0) {
3923 validation_error = VALIDATION_ERROR_17000bc8;
3926 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
3927 buffer_handle, validation_error,
3928 "%s: for dedicated memory allocation 0x%" PRIxLEAST64
3929 ", VkMemoryDedicatedAllocateInfoKHR::buffer 0x%" PRIXLEAST64 " must be equal to buffer 0x%" PRIxLEAST64
3930 " and memoryOffset 0x%" PRIxLEAST64 " must be zero.",
3931 api_name, HandleToUint64(mem), HandleToUint64(mem_info->dedicated_buffer), buffer_handle, memoryOffset);
3935 // Validate device limits alignments
3936 static const VkBufferUsageFlagBits usage_list[3] = {
3937 static_cast<VkBufferUsageFlagBits>(VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT),
3938 VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT, VK_BUFFER_USAGE_STORAGE_BUFFER_BIT};
3939 static const char *memory_type[3] = {"texel", "uniform", "storage"};
3940 static const char *offset_name[3] = {"minTexelBufferOffsetAlignment", "minUniformBufferOffsetAlignment",
3941 "minStorageBufferOffsetAlignment"};
3943 // TODO: vk_validation_stats.py cannot abide braces immediately preceding or following a validation error enum
3945 static const UNIQUE_VALIDATION_ERROR_CODE msgCode[3] = { VALIDATION_ERROR_17000810, VALIDATION_ERROR_17000812,
3946 VALIDATION_ERROR_17000814 };
3949 // Keep this one fresh!
3950 const VkDeviceSize offset_requirement[3] = {
3951 dev_data->phys_dev_properties.properties.limits.minTexelBufferOffsetAlignment,
3952 dev_data->phys_dev_properties.properties.limits.minUniformBufferOffsetAlignment,
3953 dev_data->phys_dev_properties.properties.limits.minStorageBufferOffsetAlignment};
3954 VkBufferUsageFlags usage = dev_data->bufferMap[buffer].get()->createInfo.usage;
3956 for (int i = 0; i < 3; i++) {
3957 if (usage & usage_list[i]) {
3958 if (SafeModulo(memoryOffset, offset_requirement[i]) != 0) {
3959 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
3960 buffer_handle, msgCode[i],
3961 "%s: %s memoryOffset is 0x%" PRIxLEAST64
3962 " but must be a multiple of device limit %s 0x%" PRIxLEAST64 ".",
3963 api_name, memory_type[i], memoryOffset, offset_name[i], offset_requirement[i]);
3971 static void PostCallRecordBindBufferMemory(layer_data *dev_data, VkBuffer buffer, BUFFER_STATE *buffer_state, VkDeviceMemory mem,
3972 VkDeviceSize memoryOffset, const char *api_name) {
3974 unique_lock_t lock(global_lock);
3975 // Track bound memory range information
3976 auto mem_info = GetMemObjInfo(dev_data, mem);
3978 InsertBufferMemoryRange(dev_data, buffer, mem_info, memoryOffset, buffer_state->requirements);
3981 // Track objects tied to memory
3982 uint64_t buffer_handle = HandleToUint64(buffer);
3983 SetMemBinding(dev_data, mem, buffer_state, memoryOffset, buffer_handle, kVulkanObjectTypeBuffer, api_name);
3987 VKAPI_ATTR VkResult VKAPI_CALL BindBufferMemory(VkDevice device, VkBuffer buffer, VkDeviceMemory mem, VkDeviceSize memoryOffset) {
3988 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3989 VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
3990 BUFFER_STATE *buffer_state;
3992 unique_lock_t lock(global_lock);
3993 buffer_state = GetBufferState(dev_data, buffer);
3995 bool skip = PreCallValidateBindBufferMemory(dev_data, buffer, buffer_state, mem, memoryOffset, "vkBindBufferMemory()");
3997 result = dev_data->dispatch_table.BindBufferMemory(device, buffer, mem, memoryOffset);
3998 if (result == VK_SUCCESS) {
3999 PostCallRecordBindBufferMemory(dev_data, buffer, buffer_state, mem, memoryOffset, "vkBindBufferMemory()");
4005 static bool PreCallValidateBindBufferMemory2(layer_data *dev_data, std::vector<BUFFER_STATE *> *buffer_state,
4006 uint32_t bindInfoCount, const VkBindBufferMemoryInfoKHR *pBindInfos) {
4008 unique_lock_t lock(global_lock);
4009 for (uint32_t i = 0; i < bindInfoCount; i++) {
4010 (*buffer_state)[i] = GetBufferState(dev_data, pBindInfos[i].buffer);
4015 for (uint32_t i = 0; i < bindInfoCount; i++) {
4016 sprintf(api_name, "vkBindBufferMemory2() pBindInfos[%u]", i);
4017 skip |= PreCallValidateBindBufferMemory(dev_data, pBindInfos[i].buffer, (*buffer_state)[i], pBindInfos[i].memory,
4018 pBindInfos[i].memoryOffset, api_name);
4023 static void PostCallRecordBindBufferMemory2(layer_data *dev_data, const std::vector<BUFFER_STATE *> &buffer_state,
4024 uint32_t bindInfoCount, const VkBindBufferMemoryInfoKHR *pBindInfos) {
4025 for (uint32_t i = 0; i < bindInfoCount; i++) {
4026 PostCallRecordBindBufferMemory(dev_data, pBindInfos[i].buffer, buffer_state[i], pBindInfos[i].memory,
4027 pBindInfos[i].memoryOffset, "vkBindBufferMemory2()");
4031 VKAPI_ATTR VkResult VKAPI_CALL BindBufferMemory2(VkDevice device, uint32_t bindInfoCount,
4032 const VkBindBufferMemoryInfoKHR *pBindInfos) {
4033 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4034 VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
4035 std::vector<BUFFER_STATE *> buffer_state(bindInfoCount);
4036 if (!PreCallValidateBindBufferMemory2(dev_data, &buffer_state, bindInfoCount, pBindInfos)) {
4037 result = dev_data->dispatch_table.BindBufferMemory2(device, bindInfoCount, pBindInfos);
4038 if (result == VK_SUCCESS) {
4039 PostCallRecordBindBufferMemory2(dev_data, buffer_state, bindInfoCount, pBindInfos);
4045 VKAPI_ATTR VkResult VKAPI_CALL BindBufferMemory2KHR(VkDevice device, uint32_t bindInfoCount,
4046 const VkBindBufferMemoryInfoKHR *pBindInfos) {
4047 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4048 VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
4049 std::vector<BUFFER_STATE *> buffer_state(bindInfoCount);
4050 if (!PreCallValidateBindBufferMemory2(dev_data, &buffer_state, bindInfoCount, pBindInfos)) {
4051 result = dev_data->dispatch_table.BindBufferMemory2KHR(device, bindInfoCount, pBindInfos);
4052 if (result == VK_SUCCESS) {
4053 PostCallRecordBindBufferMemory2(dev_data, buffer_state, bindInfoCount, pBindInfos);
4059 static void PostCallRecordGetBufferMemoryRequirements(layer_data *dev_data, VkBuffer buffer,
4060 VkMemoryRequirements *pMemoryRequirements) {
4061 BUFFER_STATE *buffer_state;
4063 unique_lock_t lock(global_lock);
4064 buffer_state = GetBufferState(dev_data, buffer);
4067 buffer_state->requirements = *pMemoryRequirements;
4068 buffer_state->memory_requirements_checked = true;
4072 VKAPI_ATTR void VKAPI_CALL GetBufferMemoryRequirements(VkDevice device, VkBuffer buffer,
4073 VkMemoryRequirements *pMemoryRequirements) {
4074 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4075 dev_data->dispatch_table.GetBufferMemoryRequirements(device, buffer, pMemoryRequirements);
4076 PostCallRecordGetBufferMemoryRequirements(dev_data, buffer, pMemoryRequirements);
4079 VKAPI_ATTR void VKAPI_CALL GetBufferMemoryRequirements2(VkDevice device, const VkBufferMemoryRequirementsInfo2KHR *pInfo,
4080 VkMemoryRequirements2KHR *pMemoryRequirements) {
4081 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4082 dev_data->dispatch_table.GetBufferMemoryRequirements2(device, pInfo, pMemoryRequirements);
4083 PostCallRecordGetBufferMemoryRequirements(dev_data, pInfo->buffer, &pMemoryRequirements->memoryRequirements);
4086 VKAPI_ATTR void VKAPI_CALL GetBufferMemoryRequirements2KHR(VkDevice device, const VkBufferMemoryRequirementsInfo2KHR *pInfo,
4087 VkMemoryRequirements2KHR *pMemoryRequirements) {
4088 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4089 dev_data->dispatch_table.GetBufferMemoryRequirements2KHR(device, pInfo, pMemoryRequirements);
4090 PostCallRecordGetBufferMemoryRequirements(dev_data, pInfo->buffer, &pMemoryRequirements->memoryRequirements);
4093 static void PostCallRecordGetImageMemoryRequirements(layer_data *dev_data, VkImage image,
4094 VkMemoryRequirements *pMemoryRequirements) {
4095 IMAGE_STATE *image_state;
4097 unique_lock_t lock(global_lock);
4098 image_state = GetImageState(dev_data, image);
4101 image_state->requirements = *pMemoryRequirements;
4102 image_state->memory_requirements_checked = true;
4106 VKAPI_ATTR void VKAPI_CALL GetImageMemoryRequirements(VkDevice device, VkImage image, VkMemoryRequirements *pMemoryRequirements) {
4107 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4108 dev_data->dispatch_table.GetImageMemoryRequirements(device, image, pMemoryRequirements);
4109 PostCallRecordGetImageMemoryRequirements(dev_data, image, pMemoryRequirements);
4112 VKAPI_ATTR void VKAPI_CALL GetImageMemoryRequirements2(VkDevice device, const VkImageMemoryRequirementsInfo2KHR *pInfo,
4113 VkMemoryRequirements2KHR *pMemoryRequirements) {
4114 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4115 dev_data->dispatch_table.GetImageMemoryRequirements2(device, pInfo, pMemoryRequirements);
4116 PostCallRecordGetImageMemoryRequirements(dev_data, pInfo->image, &pMemoryRequirements->memoryRequirements);
4119 VKAPI_ATTR void VKAPI_CALL GetImageMemoryRequirements2KHR(VkDevice device, const VkImageMemoryRequirementsInfo2KHR *pInfo,
4120 VkMemoryRequirements2KHR *pMemoryRequirements) {
4121 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4122 dev_data->dispatch_table.GetImageMemoryRequirements2KHR(device, pInfo, pMemoryRequirements);
4123 PostCallRecordGetImageMemoryRequirements(dev_data, pInfo->image, &pMemoryRequirements->memoryRequirements);
4126 static void PostCallRecordGetImageSparseMemoryRequirements(IMAGE_STATE *image_state, uint32_t req_count,
4127 VkSparseImageMemoryRequirements *reqs) {
4128 image_state->get_sparse_reqs_called = true;
4129 image_state->sparse_requirements.resize(req_count);
4131 std::copy(reqs, reqs + req_count, image_state->sparse_requirements.begin());
4133 for (const auto &req : image_state->sparse_requirements) {
4134 if (req.formatProperties.aspectMask & VK_IMAGE_ASPECT_METADATA_BIT) {
4135 image_state->sparse_metadata_required = true;
4140 VKAPI_ATTR void VKAPI_CALL GetImageSparseMemoryRequirements(VkDevice device, VkImage image, uint32_t *pSparseMemoryRequirementCount,
4141 VkSparseImageMemoryRequirements *pSparseMemoryRequirements) {
4142 // TODO : Implement tracking here, just passthrough initially
4143 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4144 dev_data->dispatch_table.GetImageSparseMemoryRequirements(device, image, pSparseMemoryRequirementCount,
4145 pSparseMemoryRequirements);
4146 unique_lock_t lock(global_lock);
4147 auto image_state = GetImageState(dev_data, image);
4148 PostCallRecordGetImageSparseMemoryRequirements(image_state, *pSparseMemoryRequirementCount, pSparseMemoryRequirements);
4151 static void PostCallRecordGetImageSparseMemoryRequirements2(IMAGE_STATE *image_state, uint32_t req_count,
4152 VkSparseImageMemoryRequirements2KHR *reqs) {
4153 // reqs is empty, so there is nothing to loop over and read.
4154 if (reqs == nullptr) {
4157 std::vector<VkSparseImageMemoryRequirements> sparse_reqs(req_count);
4158 // Migrate to old struct type for common handling with GetImageSparseMemoryRequirements()
4159 for (uint32_t i = 0; i < req_count; ++i) {
4160 assert(!reqs[i].pNext); // TODO: If an extension is ever added here we need to handle it
4161 sparse_reqs[i] = reqs[i].memoryRequirements;
4163 PostCallRecordGetImageSparseMemoryRequirements(image_state, req_count, sparse_reqs.data());
4166 VKAPI_ATTR void VKAPI_CALL GetImageSparseMemoryRequirements2(VkDevice device, const VkImageSparseMemoryRequirementsInfo2KHR *pInfo,
4167 uint32_t *pSparseMemoryRequirementCount,
4168 VkSparseImageMemoryRequirements2KHR *pSparseMemoryRequirements) {
4169 // TODO : Implement tracking here, just passthrough initially
4170 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4171 dev_data->dispatch_table.GetImageSparseMemoryRequirements2(device, pInfo, pSparseMemoryRequirementCount,
4172 pSparseMemoryRequirements);
4173 unique_lock_t lock(global_lock);
4174 auto image_state = GetImageState(dev_data, pInfo->image);
4175 PostCallRecordGetImageSparseMemoryRequirements2(image_state, *pSparseMemoryRequirementCount, pSparseMemoryRequirements);
4178 VKAPI_ATTR void VKAPI_CALL GetImageSparseMemoryRequirements2KHR(VkDevice device,
4179 const VkImageSparseMemoryRequirementsInfo2KHR *pInfo,
4180 uint32_t *pSparseMemoryRequirementCount,
4181 VkSparseImageMemoryRequirements2KHR *pSparseMemoryRequirements) {
4182 // TODO : Implement tracking here, just passthrough initially
4183 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4184 dev_data->dispatch_table.GetImageSparseMemoryRequirements2KHR(device, pInfo, pSparseMemoryRequirementCount,
4185 pSparseMemoryRequirements);
4186 unique_lock_t lock(global_lock);
4187 auto image_state = GetImageState(dev_data, pInfo->image);
4188 PostCallRecordGetImageSparseMemoryRequirements2(image_state, *pSparseMemoryRequirementCount, pSparseMemoryRequirements);
4191 VKAPI_ATTR void VKAPI_CALL GetPhysicalDeviceSparseImageFormatProperties(VkPhysicalDevice physicalDevice, VkFormat format,
4192 VkImageType type, VkSampleCountFlagBits samples,
4193 VkImageUsageFlags usage, VkImageTiling tiling,
4194 uint32_t *pPropertyCount,
4195 VkSparseImageFormatProperties *pProperties) {
4196 // TODO : Implement this intercept, track sparse image format properties and make sure they are obeyed.
4197 instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
4198 instance_data->dispatch_table.GetPhysicalDeviceSparseImageFormatProperties(physicalDevice, format, type, samples, usage, tiling,
4199 pPropertyCount, pProperties);
4202 VKAPI_ATTR void VKAPI_CALL GetPhysicalDeviceSparseImageFormatProperties2(
4203 VkPhysicalDevice physicalDevice, const VkPhysicalDeviceSparseImageFormatInfo2KHR *pFormatInfo, uint32_t *pPropertyCount,
4204 VkSparseImageFormatProperties2KHR *pProperties) {
4205 // TODO : Implement this intercept, track sparse image format properties and make sure they are obeyed.
4206 instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
4207 instance_data->dispatch_table.GetPhysicalDeviceSparseImageFormatProperties2(physicalDevice, pFormatInfo, pPropertyCount,
4211 VKAPI_ATTR void VKAPI_CALL GetPhysicalDeviceSparseImageFormatProperties2KHR(
4212 VkPhysicalDevice physicalDevice, const VkPhysicalDeviceSparseImageFormatInfo2KHR *pFormatInfo, uint32_t *pPropertyCount,
4213 VkSparseImageFormatProperties2KHR *pProperties) {
4214 // TODO : Implement this intercept, track sparse image format properties and make sure they are obeyed.
4215 instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
4216 instance_data->dispatch_table.GetPhysicalDeviceSparseImageFormatProperties2KHR(physicalDevice, pFormatInfo, pPropertyCount,
4220 VKAPI_ATTR void VKAPI_CALL DestroyImageView(VkDevice device, VkImageView imageView, const VkAllocationCallbacks *pAllocator) {
4221 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4222 // Common data objects used pre & post call
4223 IMAGE_VIEW_STATE *image_view_state = nullptr;
4224 VK_OBJECT obj_struct;
4225 unique_lock_t lock(global_lock);
4226 bool skip = PreCallValidateDestroyImageView(dev_data, imageView, &image_view_state, &obj_struct);
4229 dev_data->dispatch_table.DestroyImageView(device, imageView, pAllocator);
4231 if (imageView != VK_NULL_HANDLE) {
4232 PostCallRecordDestroyImageView(dev_data, imageView, image_view_state, obj_struct);
4237 VKAPI_ATTR void VKAPI_CALL DestroyShaderModule(VkDevice device, VkShaderModule shaderModule,
4238 const VkAllocationCallbacks *pAllocator) {
4239 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4241 unique_lock_t lock(global_lock);
4242 dev_data->shaderModuleMap.erase(shaderModule);
4245 dev_data->dispatch_table.DestroyShaderModule(device, shaderModule, pAllocator);
4248 static bool PreCallValidateDestroyPipeline(layer_data *dev_data, VkPipeline pipeline, PIPELINE_STATE **pipeline_state,
4249 VK_OBJECT *obj_struct) {
4250 *pipeline_state = getPipelineState(dev_data, pipeline);
4251 *obj_struct = {HandleToUint64(pipeline), kVulkanObjectTypePipeline};
4252 if (dev_data->instance_data->disabled.destroy_pipeline) return false;
4254 if (*pipeline_state) {
4255 skip |= ValidateObjectNotInUse(dev_data, *pipeline_state, *obj_struct, "vkDestroyPipeline", VALIDATION_ERROR_25c005fa);
4260 static void PostCallRecordDestroyPipeline(layer_data *dev_data, VkPipeline pipeline, PIPELINE_STATE *pipeline_state,
4261 VK_OBJECT obj_struct) {
4262 // Any bound cmd buffers are now invalid
4263 invalidateCommandBuffers(dev_data, pipeline_state->cb_bindings, obj_struct);
4264 dev_data->pipelineMap.erase(pipeline);
4267 VKAPI_ATTR void VKAPI_CALL DestroyPipeline(VkDevice device, VkPipeline pipeline, const VkAllocationCallbacks *pAllocator) {
4268 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4269 PIPELINE_STATE *pipeline_state = nullptr;
4270 VK_OBJECT obj_struct;
4271 unique_lock_t lock(global_lock);
4272 bool skip = PreCallValidateDestroyPipeline(dev_data, pipeline, &pipeline_state, &obj_struct);
4275 dev_data->dispatch_table.DestroyPipeline(device, pipeline, pAllocator);
4277 if (pipeline != VK_NULL_HANDLE) {
4278 PostCallRecordDestroyPipeline(dev_data, pipeline, pipeline_state, obj_struct);
4283 VKAPI_ATTR void VKAPI_CALL DestroyPipelineLayout(VkDevice device, VkPipelineLayout pipelineLayout,
4284 const VkAllocationCallbacks *pAllocator) {
4285 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4286 unique_lock_t lock(global_lock);
4287 dev_data->pipelineLayoutMap.erase(pipelineLayout);
4290 dev_data->dispatch_table.DestroyPipelineLayout(device, pipelineLayout, pAllocator);
4293 static bool PreCallValidateDestroySampler(layer_data *dev_data, VkSampler sampler, SAMPLER_STATE **sampler_state,
4294 VK_OBJECT *obj_struct) {
4295 *sampler_state = GetSamplerState(dev_data, sampler);
4296 *obj_struct = {HandleToUint64(sampler), kVulkanObjectTypeSampler};
4297 if (dev_data->instance_data->disabled.destroy_sampler) return false;
4299 if (*sampler_state) {
4300 skip |= ValidateObjectNotInUse(dev_data, *sampler_state, *obj_struct, "vkDestroySampler", VALIDATION_ERROR_26600874);
4305 static void PostCallRecordDestroySampler(layer_data *dev_data, VkSampler sampler, SAMPLER_STATE *sampler_state,
4306 VK_OBJECT obj_struct) {
4307 // Any bound cmd buffers are now invalid
4308 if (sampler_state) invalidateCommandBuffers(dev_data, sampler_state->cb_bindings, obj_struct);
4309 dev_data->samplerMap.erase(sampler);
4312 VKAPI_ATTR void VKAPI_CALL DestroySampler(VkDevice device, VkSampler sampler, const VkAllocationCallbacks *pAllocator) {
4313 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4314 SAMPLER_STATE *sampler_state = nullptr;
4315 VK_OBJECT obj_struct;
4316 unique_lock_t lock(global_lock);
4317 bool skip = PreCallValidateDestroySampler(dev_data, sampler, &sampler_state, &obj_struct);
4320 dev_data->dispatch_table.DestroySampler(device, sampler, pAllocator);
4322 if (sampler != VK_NULL_HANDLE) {
4323 PostCallRecordDestroySampler(dev_data, sampler, sampler_state, obj_struct);
4328 static void PostCallRecordDestroyDescriptorSetLayout(layer_data *dev_data, VkDescriptorSetLayout ds_layout) {
4329 auto layout_it = dev_data->descriptorSetLayoutMap.find(ds_layout);
4330 if (layout_it != dev_data->descriptorSetLayoutMap.end()) {
4331 layout_it->second.get()->MarkDestroyed();
4332 dev_data->descriptorSetLayoutMap.erase(layout_it);
4336 VKAPI_ATTR void VKAPI_CALL DestroyDescriptorSetLayout(VkDevice device, VkDescriptorSetLayout descriptorSetLayout,
4337 const VkAllocationCallbacks *pAllocator) {
4338 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4339 dev_data->dispatch_table.DestroyDescriptorSetLayout(device, descriptorSetLayout, pAllocator);
4340 unique_lock_t lock(global_lock);
4341 PostCallRecordDestroyDescriptorSetLayout(dev_data, descriptorSetLayout);
4344 static bool PreCallValidateDestroyDescriptorPool(layer_data *dev_data, VkDescriptorPool pool,
4345 DESCRIPTOR_POOL_STATE **desc_pool_state, VK_OBJECT *obj_struct) {
4346 *desc_pool_state = GetDescriptorPoolState(dev_data, pool);
4347 *obj_struct = {HandleToUint64(pool), kVulkanObjectTypeDescriptorPool};
4348 if (dev_data->instance_data->disabled.destroy_descriptor_pool) return false;
4350 if (*desc_pool_state) {
4352 ValidateObjectNotInUse(dev_data, *desc_pool_state, *obj_struct, "vkDestroyDescriptorPool", VALIDATION_ERROR_2440025e);
4357 static void PreCallRecordDestroyDescriptorPool(layer_data *dev_data, VkDescriptorPool descriptorPool,
4358 DESCRIPTOR_POOL_STATE *desc_pool_state, VK_OBJECT obj_struct) {
4359 if (desc_pool_state) {
4360 // Any bound cmd buffers are now invalid
4361 invalidateCommandBuffers(dev_data, desc_pool_state->cb_bindings, obj_struct);
4362 // Free sets that were in this pool
4363 for (auto ds : desc_pool_state->sets) {
4364 freeDescriptorSet(dev_data, ds);
4366 dev_data->descriptorPoolMap.erase(descriptorPool);
4367 delete desc_pool_state;
4371 VKAPI_ATTR void VKAPI_CALL DestroyDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool,
4372 const VkAllocationCallbacks *pAllocator) {
4373 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4374 DESCRIPTOR_POOL_STATE *desc_pool_state = nullptr;
4375 VK_OBJECT obj_struct;
4376 unique_lock_t lock(global_lock);
4377 bool skip = PreCallValidateDestroyDescriptorPool(dev_data, descriptorPool, &desc_pool_state, &obj_struct);
4379 PreCallRecordDestroyDescriptorPool(dev_data, descriptorPool, desc_pool_state, obj_struct);
4381 dev_data->dispatch_table.DestroyDescriptorPool(device, descriptorPool, pAllocator);
4385 // Verify cmdBuffer in given cb_node is not in global in-flight set, and return skip result
4386 // If this is a secondary command buffer, then make sure its primary is also in-flight
4387 // If primary is not in-flight, then remove secondary from global in-flight set
4388 // This function is only valid at a point when cmdBuffer is being reset or freed
4389 static bool checkCommandBufferInFlight(layer_data *dev_data, const GLOBAL_CB_NODE *cb_node, const char *action,
4390 UNIQUE_VALIDATION_ERROR_CODE error_code) {
4392 if (cb_node->in_use.load()) {
4393 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4394 HandleToUint64(cb_node->commandBuffer), error_code,
4395 "Attempt to %s command buffer (0x%" PRIx64 ") which is in use.", action,
4396 HandleToUint64(cb_node->commandBuffer));
4401 // Iterate over all cmdBuffers in given commandPool and verify that each is not in use
4402 static bool checkCommandBuffersInFlight(layer_data *dev_data, COMMAND_POOL_NODE *pPool, const char *action,
4403 UNIQUE_VALIDATION_ERROR_CODE error_code) {
4405 for (auto cmd_buffer : pPool->commandBuffers) {
4406 skip |= checkCommandBufferInFlight(dev_data, GetCBNode(dev_data, cmd_buffer), action, error_code);
4411 // Free all command buffers in given list, removing all references/links to them using ResetCommandBufferState
4412 static void FreeCommandBufferStates(layer_data *dev_data, COMMAND_POOL_NODE *pool_state, const uint32_t command_buffer_count,
4413 const VkCommandBuffer *command_buffers) {
4414 for (uint32_t i = 0; i < command_buffer_count; i++) {
4415 auto cb_state = GetCBNode(dev_data, command_buffers[i]);
4416 // Remove references to command buffer's state and delete
4418 // reset prior to delete, removing various references to it.
4419 // TODO: fix this, it's insane.
4420 ResetCommandBufferState(dev_data, cb_state->commandBuffer);
4421 // Remove the cb_state's references from layer_data and COMMAND_POOL_NODE
4422 dev_data->commandBufferMap.erase(cb_state->commandBuffer);
4423 pool_state->commandBuffers.erase(command_buffers[i]);
4429 VKAPI_ATTR void VKAPI_CALL FreeCommandBuffers(VkDevice device, VkCommandPool commandPool, uint32_t commandBufferCount,
4430 const VkCommandBuffer *pCommandBuffers) {
4431 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4433 unique_lock_t lock(global_lock);
4435 for (uint32_t i = 0; i < commandBufferCount; i++) {
4436 auto cb_node = GetCBNode(dev_data, pCommandBuffers[i]);
4437 // Delete CB information structure, and remove from commandBufferMap
4439 skip |= checkCommandBufferInFlight(dev_data, cb_node, "free", VALIDATION_ERROR_2840005e);
4445 auto pPool = GetCommandPoolNode(dev_data, commandPool);
4446 FreeCommandBufferStates(dev_data, pPool, commandBufferCount, pCommandBuffers);
4449 dev_data->dispatch_table.FreeCommandBuffers(device, commandPool, commandBufferCount, pCommandBuffers);
4452 VKAPI_ATTR VkResult VKAPI_CALL CreateCommandPool(VkDevice device, const VkCommandPoolCreateInfo *pCreateInfo,
4453 const VkAllocationCallbacks *pAllocator, VkCommandPool *pCommandPool) {
4454 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4456 VkResult result = dev_data->dispatch_table.CreateCommandPool(device, pCreateInfo, pAllocator, pCommandPool);
4458 if (VK_SUCCESS == result) {
4459 lock_guard_t lock(global_lock);
4460 dev_data->commandPoolMap[*pCommandPool].createFlags = pCreateInfo->flags;
4461 dev_data->commandPoolMap[*pCommandPool].queueFamilyIndex = pCreateInfo->queueFamilyIndex;
4466 VKAPI_ATTR VkResult VKAPI_CALL CreateQueryPool(VkDevice device, const VkQueryPoolCreateInfo *pCreateInfo,
4467 const VkAllocationCallbacks *pAllocator, VkQueryPool *pQueryPool) {
4468 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4470 if (pCreateInfo && pCreateInfo->queryType == VK_QUERY_TYPE_PIPELINE_STATISTICS) {
4471 if (!dev_data->enabled_features.pipelineStatisticsQuery) {
4472 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0,
4473 VALIDATION_ERROR_11c0062e,
4474 "Query pool with type VK_QUERY_TYPE_PIPELINE_STATISTICS created on a device with "
4475 "VkDeviceCreateInfo.pEnabledFeatures.pipelineStatisticsQuery == VK_FALSE.");
4479 VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
4481 result = dev_data->dispatch_table.CreateQueryPool(device, pCreateInfo, pAllocator, pQueryPool);
4483 if (result == VK_SUCCESS) {
4484 lock_guard_t lock(global_lock);
4485 QUERY_POOL_NODE *qp_node = &dev_data->queryPoolMap[*pQueryPool];
4486 qp_node->createInfo = *pCreateInfo;
4491 static bool PreCallValidateDestroyCommandPool(layer_data *dev_data, VkCommandPool pool) {
4492 COMMAND_POOL_NODE *cp_state = GetCommandPoolNode(dev_data, pool);
4493 if (dev_data->instance_data->disabled.destroy_command_pool) return false;
4496 // Verify that command buffers in pool are complete (not in-flight)
4497 skip |= checkCommandBuffersInFlight(dev_data, cp_state, "destroy command pool with", VALIDATION_ERROR_24000052);
4502 static void PreCallRecordDestroyCommandPool(layer_data *dev_data, VkCommandPool pool) {
4503 COMMAND_POOL_NODE *cp_state = GetCommandPoolNode(dev_data, pool);
4504 // Remove cmdpool from cmdpoolmap, after freeing layer data for the command buffers
4505 // "When a pool is destroyed, all command buffers allocated from the pool are freed."
4507 // Create a vector, as FreeCommandBufferStates deletes from cp_state->commandBuffers during iteration.
4508 std::vector<VkCommandBuffer> cb_vec{cp_state->commandBuffers.begin(), cp_state->commandBuffers.end()};
4509 FreeCommandBufferStates(dev_data, cp_state, static_cast<uint32_t>(cb_vec.size()), cb_vec.data());
4510 dev_data->commandPoolMap.erase(pool);
4514 // Destroy commandPool along with all of the commandBuffers allocated from that pool
4515 VKAPI_ATTR void VKAPI_CALL DestroyCommandPool(VkDevice device, VkCommandPool commandPool, const VkAllocationCallbacks *pAllocator) {
4516 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4517 unique_lock_t lock(global_lock);
4518 bool skip = PreCallValidateDestroyCommandPool(dev_data, commandPool);
4520 PreCallRecordDestroyCommandPool(dev_data, commandPool);
4522 dev_data->dispatch_table.DestroyCommandPool(device, commandPool, pAllocator);
4526 VKAPI_ATTR VkResult VKAPI_CALL ResetCommandPool(VkDevice device, VkCommandPool commandPool, VkCommandPoolResetFlags flags) {
4527 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4530 unique_lock_t lock(global_lock);
4531 auto pPool = GetCommandPoolNode(dev_data, commandPool);
4532 skip |= checkCommandBuffersInFlight(dev_data, pPool, "reset command pool with", VALIDATION_ERROR_32800050);
4535 if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
4537 VkResult result = dev_data->dispatch_table.ResetCommandPool(device, commandPool, flags);
4539 // Reset all of the CBs allocated from this pool
4540 if (VK_SUCCESS == result) {
4542 for (auto cmdBuffer : pPool->commandBuffers) {
4543 ResetCommandBufferState(dev_data, cmdBuffer);
4550 VKAPI_ATTR VkResult VKAPI_CALL ResetFences(VkDevice device, uint32_t fenceCount, const VkFence *pFences) {
4551 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4553 unique_lock_t lock(global_lock);
4554 for (uint32_t i = 0; i < fenceCount; ++i) {
4555 auto pFence = GetFenceNode(dev_data, pFences[i]);
4556 if (pFence && pFence->scope == kSyncScopeInternal && pFence->state == FENCE_INFLIGHT) {
4557 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
4558 HandleToUint64(pFences[i]), VALIDATION_ERROR_32e008c6, "Fence 0x%" PRIx64 " is in use.",
4559 HandleToUint64(pFences[i]));
4564 if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
4566 VkResult result = dev_data->dispatch_table.ResetFences(device, fenceCount, pFences);
4568 if (result == VK_SUCCESS) {
4570 for (uint32_t i = 0; i < fenceCount; ++i) {
4571 auto pFence = GetFenceNode(dev_data, pFences[i]);
4573 if (pFence->scope == kSyncScopeInternal) {
4574 pFence->state = FENCE_UNSIGNALED;
4575 } else if (pFence->scope == kSyncScopeExternalTemporary) {
4576 pFence->scope = kSyncScopeInternal;
4586 // For given cb_nodes, invalidate them and track object causing invalidation
4587 void invalidateCommandBuffers(const layer_data *dev_data, std::unordered_set<GLOBAL_CB_NODE *> const &cb_nodes, VK_OBJECT obj) {
4588 for (auto cb_node : cb_nodes) {
4589 if (cb_node->state == CB_RECORDING) {
4590 log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4591 HandleToUint64(cb_node->commandBuffer), DRAWSTATE_INVALID_COMMAND_BUFFER,
4592 "Invalidating a command buffer that's currently being recorded: 0x%" PRIx64 ".",
4593 HandleToUint64(cb_node->commandBuffer));
4594 cb_node->state = CB_INVALID_INCOMPLETE;
4595 } else if (cb_node->state == CB_RECORDED) {
4596 cb_node->state = CB_INVALID_COMPLETE;
4598 cb_node->broken_bindings.push_back(obj);
4600 // if secondary, then propagate the invalidation to the primaries that will call us.
4601 if (cb_node->createInfo.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY) {
4602 invalidateCommandBuffers(dev_data, cb_node->linkedCommandBuffers, obj);
4607 static bool PreCallValidateDestroyFramebuffer(layer_data *dev_data, VkFramebuffer framebuffer,
4608 FRAMEBUFFER_STATE **framebuffer_state, VK_OBJECT *obj_struct) {
4609 *framebuffer_state = GetFramebufferState(dev_data, framebuffer);
4610 *obj_struct = {HandleToUint64(framebuffer), kVulkanObjectTypeFramebuffer};
4611 if (dev_data->instance_data->disabled.destroy_framebuffer) return false;
4613 if (*framebuffer_state) {
4615 ValidateObjectNotInUse(dev_data, *framebuffer_state, *obj_struct, "vkDestroyFramebuffer", VALIDATION_ERROR_250006f8);
4620 static void PostCallRecordDestroyFramebuffer(layer_data *dev_data, VkFramebuffer framebuffer, FRAMEBUFFER_STATE *framebuffer_state,
4621 VK_OBJECT obj_struct) {
4622 invalidateCommandBuffers(dev_data, framebuffer_state->cb_bindings, obj_struct);
4623 dev_data->frameBufferMap.erase(framebuffer);
4626 VKAPI_ATTR void VKAPI_CALL DestroyFramebuffer(VkDevice device, VkFramebuffer framebuffer, const VkAllocationCallbacks *pAllocator) {
4627 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4628 FRAMEBUFFER_STATE *framebuffer_state = nullptr;
4629 VK_OBJECT obj_struct;
4630 unique_lock_t lock(global_lock);
4631 bool skip = PreCallValidateDestroyFramebuffer(dev_data, framebuffer, &framebuffer_state, &obj_struct);
4634 dev_data->dispatch_table.DestroyFramebuffer(device, framebuffer, pAllocator);
4636 if (framebuffer != VK_NULL_HANDLE) {
4637 PostCallRecordDestroyFramebuffer(dev_data, framebuffer, framebuffer_state, obj_struct);
4642 static bool PreCallValidateDestroyRenderPass(layer_data *dev_data, VkRenderPass render_pass, RENDER_PASS_STATE **rp_state,
4643 VK_OBJECT *obj_struct) {
4644 *rp_state = GetRenderPassState(dev_data, render_pass);
4645 *obj_struct = {HandleToUint64(render_pass), kVulkanObjectTypeRenderPass};
4646 if (dev_data->instance_data->disabled.destroy_renderpass) return false;
4649 skip |= ValidateObjectNotInUse(dev_data, *rp_state, *obj_struct, "vkDestroyRenderPass", VALIDATION_ERROR_264006d2);
4654 static void PostCallRecordDestroyRenderPass(layer_data *dev_data, VkRenderPass render_pass, RENDER_PASS_STATE *rp_state,
4655 VK_OBJECT obj_struct) {
4656 invalidateCommandBuffers(dev_data, rp_state->cb_bindings, obj_struct);
4657 dev_data->renderPassMap.erase(render_pass);
4660 VKAPI_ATTR void VKAPI_CALL DestroyRenderPass(VkDevice device, VkRenderPass renderPass, const VkAllocationCallbacks *pAllocator) {
4661 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4662 RENDER_PASS_STATE *rp_state = nullptr;
4663 VK_OBJECT obj_struct;
4664 unique_lock_t lock(global_lock);
4665 bool skip = PreCallValidateDestroyRenderPass(dev_data, renderPass, &rp_state, &obj_struct);
4668 dev_data->dispatch_table.DestroyRenderPass(device, renderPass, pAllocator);
4670 if (renderPass != VK_NULL_HANDLE) {
4671 PostCallRecordDestroyRenderPass(dev_data, renderPass, rp_state, obj_struct);
4676 VKAPI_ATTR VkResult VKAPI_CALL CreateBuffer(VkDevice device, const VkBufferCreateInfo *pCreateInfo,
4677 const VkAllocationCallbacks *pAllocator, VkBuffer *pBuffer) {
4678 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4679 unique_lock_t lock(global_lock);
4680 bool skip = PreCallValidateCreateBuffer(dev_data, pCreateInfo);
4683 if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
4684 VkResult result = dev_data->dispatch_table.CreateBuffer(device, pCreateInfo, pAllocator, pBuffer);
4686 if (VK_SUCCESS == result) {
4688 PostCallRecordCreateBuffer(dev_data, pCreateInfo, pBuffer);
4694 VKAPI_ATTR VkResult VKAPI_CALL CreateBufferView(VkDevice device, const VkBufferViewCreateInfo *pCreateInfo,
4695 const VkAllocationCallbacks *pAllocator, VkBufferView *pView) {
4696 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4697 unique_lock_t lock(global_lock);
4698 bool skip = PreCallValidateCreateBufferView(dev_data, pCreateInfo);
4700 if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
4701 VkResult result = dev_data->dispatch_table.CreateBufferView(device, pCreateInfo, pAllocator, pView);
4702 if (VK_SUCCESS == result) {
4704 PostCallRecordCreateBufferView(dev_data, pCreateInfo, pView);
4710 // Access helper functions for external modules
4711 VkFormatProperties GetFormatProperties(core_validation::layer_data *device_data, VkFormat format) {
4712 VkFormatProperties format_properties;
4713 instance_layer_data *instance_data =
4714 GetLayerDataPtr(get_dispatch_key(device_data->instance_data->instance), instance_layer_data_map);
4715 instance_data->dispatch_table.GetPhysicalDeviceFormatProperties(device_data->physical_device, format, &format_properties);
4716 return format_properties;
4719 VkResult GetImageFormatProperties(core_validation::layer_data *device_data, const VkImageCreateInfo *image_ci,
4720 VkImageFormatProperties *pImageFormatProperties) {
4721 instance_layer_data *instance_data =
4722 GetLayerDataPtr(get_dispatch_key(device_data->instance_data->instance), instance_layer_data_map);
4723 return instance_data->dispatch_table.GetPhysicalDeviceImageFormatProperties(
4724 device_data->physical_device, image_ci->format, image_ci->imageType, image_ci->tiling, image_ci->usage, image_ci->flags,
4725 pImageFormatProperties);
4728 const debug_report_data *GetReportData(const core_validation::layer_data *device_data) { return device_data->report_data; }
4730 const VkPhysicalDeviceProperties *GetPhysicalDeviceProperties(core_validation::layer_data *device_data) {
4731 return &device_data->phys_dev_props;
4734 const CHECK_DISABLED *GetDisables(core_validation::layer_data *device_data) { return &device_data->instance_data->disabled; }
4736 std::unordered_map<VkImage, std::unique_ptr<IMAGE_STATE>> *GetImageMap(core_validation::layer_data *device_data) {
4737 return &device_data->imageMap;
4740 std::unordered_map<VkImage, std::vector<ImageSubresourcePair>> *GetImageSubresourceMap(core_validation::layer_data *device_data) {
4741 return &device_data->imageSubresourceMap;
4744 std::unordered_map<ImageSubresourcePair, IMAGE_LAYOUT_NODE> *GetImageLayoutMap(layer_data *device_data) {
4745 return &device_data->imageLayoutMap;
4748 std::unordered_map<ImageSubresourcePair, IMAGE_LAYOUT_NODE> const *GetImageLayoutMap(layer_data const *device_data) {
4749 return &device_data->imageLayoutMap;
4752 std::unordered_map<VkBuffer, std::unique_ptr<BUFFER_STATE>> *GetBufferMap(layer_data *device_data) {
4753 return &device_data->bufferMap;
4756 std::unordered_map<VkBufferView, std::unique_ptr<BUFFER_VIEW_STATE>> *GetBufferViewMap(layer_data *device_data) {
4757 return &device_data->bufferViewMap;
4760 std::unordered_map<VkImageView, std::unique_ptr<IMAGE_VIEW_STATE>> *GetImageViewMap(layer_data *device_data) {
4761 return &device_data->imageViewMap;
4764 const PHYS_DEV_PROPERTIES_NODE *GetPhysDevProperties(const layer_data *device_data) { return &device_data->phys_dev_properties; }
4766 const VkPhysicalDeviceFeatures *GetEnabledFeatures(const layer_data *device_data) { return &device_data->enabled_features; }
4768 const VkPhysicalDeviceDescriptorIndexingFeaturesEXT *GetEnabledDescriptorIndexingFeatures(const layer_data *device_data) {
4769 return &device_data->phys_dev_ext_props.descriptor_indexing_features;
4772 const DeviceExtensions *GetDeviceExtensions(const layer_data *device_data) { return &device_data->extensions; }
4774 VKAPI_ATTR VkResult VKAPI_CALL CreateImage(VkDevice device, const VkImageCreateInfo *pCreateInfo,
4775 const VkAllocationCallbacks *pAllocator, VkImage *pImage) {
4776 VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
4777 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4778 bool skip = PreCallValidateCreateImage(dev_data, pCreateInfo, pAllocator, pImage);
4780 result = dev_data->dispatch_table.CreateImage(device, pCreateInfo, pAllocator, pImage);
4782 if (VK_SUCCESS == result) {
4783 lock_guard_t lock(global_lock);
4784 PostCallRecordCreateImage(dev_data, pCreateInfo, pImage);
4789 VKAPI_ATTR VkResult VKAPI_CALL CreateImageView(VkDevice device, const VkImageViewCreateInfo *pCreateInfo,
4790 const VkAllocationCallbacks *pAllocator, VkImageView *pView) {
4791 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4792 unique_lock_t lock(global_lock);
4793 bool skip = PreCallValidateCreateImageView(dev_data, pCreateInfo);
4795 if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
4796 VkResult result = dev_data->dispatch_table.CreateImageView(device, pCreateInfo, pAllocator, pView);
4797 if (VK_SUCCESS == result) {
4799 PostCallRecordCreateImageView(dev_data, pCreateInfo, *pView);
4806 VKAPI_ATTR VkResult VKAPI_CALL CreateFence(VkDevice device, const VkFenceCreateInfo *pCreateInfo,
4807 const VkAllocationCallbacks *pAllocator, VkFence *pFence) {
4808 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4809 VkResult result = dev_data->dispatch_table.CreateFence(device, pCreateInfo, pAllocator, pFence);
4810 if (VK_SUCCESS == result) {
4811 lock_guard_t lock(global_lock);
4812 auto &fence_node = dev_data->fenceMap[*pFence];
4813 fence_node.fence = *pFence;
4814 fence_node.createInfo = *pCreateInfo;
4815 fence_node.state = (pCreateInfo->flags & VK_FENCE_CREATE_SIGNALED_BIT) ? FENCE_RETIRED : FENCE_UNSIGNALED;
4820 // TODO handle pipeline caches
4821 VKAPI_ATTR VkResult VKAPI_CALL CreatePipelineCache(VkDevice device, const VkPipelineCacheCreateInfo *pCreateInfo,
4822 const VkAllocationCallbacks *pAllocator, VkPipelineCache *pPipelineCache) {
4823 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4824 VkResult result = dev_data->dispatch_table.CreatePipelineCache(device, pCreateInfo, pAllocator, pPipelineCache);
4828 VKAPI_ATTR void VKAPI_CALL DestroyPipelineCache(VkDevice device, VkPipelineCache pipelineCache,
4829 const VkAllocationCallbacks *pAllocator) {
4830 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4831 dev_data->dispatch_table.DestroyPipelineCache(device, pipelineCache, pAllocator);
4834 VKAPI_ATTR VkResult VKAPI_CALL GetPipelineCacheData(VkDevice device, VkPipelineCache pipelineCache, size_t *pDataSize,
4836 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4837 VkResult result = dev_data->dispatch_table.GetPipelineCacheData(device, pipelineCache, pDataSize, pData);
4841 VKAPI_ATTR VkResult VKAPI_CALL MergePipelineCaches(VkDevice device, VkPipelineCache dstCache, uint32_t srcCacheCount,
4842 const VkPipelineCache *pSrcCaches) {
4843 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4844 VkResult result = dev_data->dispatch_table.MergePipelineCaches(device, dstCache, srcCacheCount, pSrcCaches);
4848 // Validation cache:
4849 // CV is the bottommost implementor of this extension. Don't pass calls down.
4850 VKAPI_ATTR VkResult VKAPI_CALL CreateValidationCacheEXT(VkDevice device, const VkValidationCacheCreateInfoEXT *pCreateInfo,
4851 const VkAllocationCallbacks *pAllocator,
4852 VkValidationCacheEXT *pValidationCache) {
4853 *pValidationCache = ValidationCache::Create(pCreateInfo);
4854 return *pValidationCache ? VK_SUCCESS : VK_ERROR_INITIALIZATION_FAILED;
4857 VKAPI_ATTR void VKAPI_CALL DestroyValidationCacheEXT(VkDevice device, VkValidationCacheEXT validationCache,
4858 const VkAllocationCallbacks *pAllocator) {
4859 delete (ValidationCache *)validationCache;
4862 VKAPI_ATTR VkResult VKAPI_CALL GetValidationCacheDataEXT(VkDevice device, VkValidationCacheEXT validationCache, size_t *pDataSize,
4864 size_t inSize = *pDataSize;
4865 ((ValidationCache *)validationCache)->Write(pDataSize, pData);
4866 return (pData && *pDataSize != inSize) ? VK_INCOMPLETE : VK_SUCCESS;
4869 VKAPI_ATTR VkResult VKAPI_CALL MergeValidationCachesEXT(VkDevice device, VkValidationCacheEXT dstCache, uint32_t srcCacheCount,
4870 const VkValidationCacheEXT *pSrcCaches) {
4871 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4873 auto dst = (ValidationCache *)dstCache;
4874 auto src = (ValidationCache const *const *)pSrcCaches;
4875 VkResult result = VK_SUCCESS;
4876 for (uint32_t i = 0; i < srcCacheCount; i++) {
4877 if (src[i] == dst) {
4878 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_VALIDATION_CACHE_EXT,
4879 0, VALIDATION_ERROR_3e600c00,
4880 "vkMergeValidationCachesEXT: dstCache (0x%" PRIx64 ") must not appear in pSrcCaches array.",
4881 HandleToUint64(dstCache));
4882 result = VK_ERROR_VALIDATION_FAILED_EXT;
4892 // utility function to set collective state for pipeline
4893 void set_pipeline_state(PIPELINE_STATE *pPipe) {
4894 // If any attachment used by this pipeline has blendEnable, set top-level blendEnable
4895 if (pPipe->graphicsPipelineCI.pColorBlendState) {
4896 for (size_t i = 0; i < pPipe->attachments.size(); ++i) {
4897 if (VK_TRUE == pPipe->attachments[i].blendEnable) {
4898 if (((pPipe->attachments[i].dstAlphaBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
4899 (pPipe->attachments[i].dstAlphaBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA)) ||
4900 ((pPipe->attachments[i].dstColorBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
4901 (pPipe->attachments[i].dstColorBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA)) ||
4902 ((pPipe->attachments[i].srcAlphaBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
4903 (pPipe->attachments[i].srcAlphaBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA)) ||
4904 ((pPipe->attachments[i].srcColorBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
4905 (pPipe->attachments[i].srcColorBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA))) {
4906 pPipe->blendConstantsEnabled = true;
4913 bool validate_dual_src_blend_feature(layer_data *device_data, PIPELINE_STATE *pipe_state) {
4915 if (pipe_state->graphicsPipelineCI.pColorBlendState) {
4916 for (size_t i = 0; i < pipe_state->attachments.size(); ++i) {
4917 if (!device_data->enabled_features.dualSrcBlend) {
4918 if ((pipe_state->attachments[i].dstAlphaBlendFactor == VK_BLEND_FACTOR_SRC1_COLOR) ||
4919 (pipe_state->attachments[i].dstAlphaBlendFactor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR) ||
4920 (pipe_state->attachments[i].dstAlphaBlendFactor == VK_BLEND_FACTOR_SRC1_ALPHA) ||
4921 (pipe_state->attachments[i].dstAlphaBlendFactor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA) ||
4922 (pipe_state->attachments[i].srcAlphaBlendFactor == VK_BLEND_FACTOR_SRC1_COLOR) ||
4923 (pipe_state->attachments[i].srcAlphaBlendFactor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR) ||
4924 (pipe_state->attachments[i].srcAlphaBlendFactor == VK_BLEND_FACTOR_SRC1_ALPHA) ||
4925 (pipe_state->attachments[i].srcAlphaBlendFactor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA)) {
4927 log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
4928 HandleToUint64(pipe_state->pipeline), DRAWSTATE_INVALID_FEATURE,
4929 "CmdBindPipeline: vkPipeline (0x%" PRIx64 ") attachment[" PRINTF_SIZE_T_SPECIFIER
4930 "] has a dual-source blend factor but this device feature is not enabled.",
4931 HandleToUint64(pipe_state->pipeline), i);
4939 VKAPI_ATTR VkResult VKAPI_CALL CreateGraphicsPipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count,
4940 const VkGraphicsPipelineCreateInfo *pCreateInfos,
4941 const VkAllocationCallbacks *pAllocator, VkPipeline *pPipelines) {
4942 // The order of operations here is a little convoluted but gets the job done
4943 // 1. Pipeline create state is first shadowed into PIPELINE_STATE struct
4944 // 2. Create state is then validated (which uses flags setup during shadowing)
4945 // 3. If everything looks good, we'll then create the pipeline and add NODE to pipelineMap
4947 vector<std::unique_ptr<PIPELINE_STATE>> pipe_state;
4948 pipe_state.reserve(count);
4949 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4952 unique_lock_t lock(global_lock);
4954 for (i = 0; i < count; i++) {
4955 pipe_state.push_back(std::unique_ptr<PIPELINE_STATE>(new PIPELINE_STATE));
4956 pipe_state[i]->initGraphicsPipeline(&pCreateInfos[i], GetRenderPassStateSharedPtr(dev_data, pCreateInfos[i].renderPass));
4957 pipe_state[i]->pipeline_layout = *getPipelineLayout(dev_data, pCreateInfos[i].layout);
4960 for (i = 0; i < count; i++) {
4961 skip |= ValidatePipelineLocked(dev_data, pipe_state, i);
4966 for (i = 0; i < count; i++) {
4967 skip |= ValidatePipelineUnlocked(dev_data, pipe_state, i);
4971 for (i = 0; i < count; i++) {
4972 pPipelines[i] = VK_NULL_HANDLE;
4974 return VK_ERROR_VALIDATION_FAILED_EXT;
4978 dev_data->dispatch_table.CreateGraphicsPipelines(device, pipelineCache, count, pCreateInfos, pAllocator, pPipelines);
4980 for (i = 0; i < count; i++) {
4981 if (pPipelines[i] != VK_NULL_HANDLE) {
4982 pipe_state[i]->pipeline = pPipelines[i];
4983 dev_data->pipelineMap[pPipelines[i]] = std::move(pipe_state[i]);
4990 VKAPI_ATTR VkResult VKAPI_CALL CreateComputePipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count,
4991 const VkComputePipelineCreateInfo *pCreateInfos,
4992 const VkAllocationCallbacks *pAllocator, VkPipeline *pPipelines) {
4995 vector<std::unique_ptr<PIPELINE_STATE>> pPipeState;
4996 pPipeState.reserve(count);
4997 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
5000 unique_lock_t lock(global_lock);
5001 for (i = 0; i < count; i++) {
5002 // Create and initialize internal tracking data structure
5003 pPipeState.push_back(unique_ptr<PIPELINE_STATE>(new PIPELINE_STATE));
5004 pPipeState[i]->initComputePipeline(&pCreateInfos[i]);
5005 pPipeState[i]->pipeline_layout = *getPipelineLayout(dev_data, pCreateInfos[i].layout);
5007 // TODO: Add Compute Pipeline Verification
5008 skip |= validate_compute_pipeline(dev_data, pPipeState[i].get());
5012 for (i = 0; i < count; i++) {
5013 pPipelines[i] = VK_NULL_HANDLE;
5015 return VK_ERROR_VALIDATION_FAILED_EXT;
5020 dev_data->dispatch_table.CreateComputePipelines(device, pipelineCache, count, pCreateInfos, pAllocator, pPipelines);
5022 for (i = 0; i < count; i++) {
5023 if (pPipelines[i] != VK_NULL_HANDLE) {
5024 pPipeState[i]->pipeline = pPipelines[i];
5025 dev_data->pipelineMap[pPipelines[i]] = std::move(pPipeState[i]);
5032 VKAPI_ATTR VkResult VKAPI_CALL CreateSampler(VkDevice device, const VkSamplerCreateInfo *pCreateInfo,
5033 const VkAllocationCallbacks *pAllocator, VkSampler *pSampler) {
5034 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
5035 VkResult result = dev_data->dispatch_table.CreateSampler(device, pCreateInfo, pAllocator, pSampler);
5036 if (VK_SUCCESS == result) {
5037 lock_guard_t lock(global_lock);
5038 dev_data->samplerMap[*pSampler] = unique_ptr<SAMPLER_STATE>(new SAMPLER_STATE(pSampler, pCreateInfo));
5043 static bool PreCallValidateCreateDescriptorSetLayout(layer_data *dev_data, const VkDescriptorSetLayoutCreateInfo *create_info) {
5044 if (dev_data->instance_data->disabled.create_descriptor_set_layout) return false;
5045 return cvdescriptorset::DescriptorSetLayout::ValidateCreateInfo(
5046 dev_data->report_data, create_info, dev_data->extensions.vk_khr_push_descriptor,
5047 dev_data->phys_dev_ext_props.max_push_descriptors, dev_data->extensions.vk_ext_descriptor_indexing,
5048 &dev_data->phys_dev_ext_props.descriptor_indexing_features);
5051 static void PostCallRecordCreateDescriptorSetLayout(layer_data *dev_data, const VkDescriptorSetLayoutCreateInfo *create_info,
5052 VkDescriptorSetLayout set_layout) {
5053 dev_data->descriptorSetLayoutMap[set_layout] = std::make_shared<cvdescriptorset::DescriptorSetLayout>(create_info, set_layout);
5056 VKAPI_ATTR VkResult VKAPI_CALL CreateDescriptorSetLayout(VkDevice device, const VkDescriptorSetLayoutCreateInfo *pCreateInfo,
5057 const VkAllocationCallbacks *pAllocator,
5058 VkDescriptorSetLayout *pSetLayout) {
5059 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
5060 VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
5061 unique_lock_t lock(global_lock);
5062 bool skip = PreCallValidateCreateDescriptorSetLayout(dev_data, pCreateInfo);
5065 result = dev_data->dispatch_table.CreateDescriptorSetLayout(device, pCreateInfo, pAllocator, pSetLayout);
5066 if (VK_SUCCESS == result) {
5068 PostCallRecordCreateDescriptorSetLayout(dev_data, pCreateInfo, *pSetLayout);
5074 // Used by CreatePipelineLayout and CmdPushConstants.
5075 // Note that the index argument is optional and only used by CreatePipelineLayout.
5076 static bool validatePushConstantRange(const layer_data *dev_data, const uint32_t offset, const uint32_t size,
5077 const char *caller_name, uint32_t index = 0) {
5078 if (dev_data->instance_data->disabled.push_constant_range) return false;
5079 uint32_t const maxPushConstantsSize = dev_data->phys_dev_properties.properties.limits.maxPushConstantsSize;
5081 // Check that offset + size don't exceed the max.
5082 // Prevent arithetic overflow here by avoiding addition and testing in this order.
5083 if ((offset >= maxPushConstantsSize) || (size > maxPushConstantsSize - offset)) {
5084 // This is a pain just to adapt the log message to the caller, but better to sort it out only when there is a problem.
5085 if (0 == strcmp(caller_name, "vkCreatePipelineLayout()")) {
5086 if (offset >= maxPushConstantsSize) {
5088 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5089 VALIDATION_ERROR_11a0024c,
5090 "%s call has push constants index %u with offset %u that exceeds this device's maxPushConstantSize of %u.",
5091 caller_name, index, offset, maxPushConstantsSize);
5093 if (size > maxPushConstantsSize - offset) {
5094 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5095 VALIDATION_ERROR_11a00254,
5096 "%s call has push constants index %u with offset %u and size %u that exceeds this device's "
5097 "maxPushConstantSize of %u.",
5098 caller_name, index, offset, size, maxPushConstantsSize);
5100 } else if (0 == strcmp(caller_name, "vkCmdPushConstants()")) {
5101 if (offset >= maxPushConstantsSize) {
5103 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5104 VALIDATION_ERROR_1bc002e4,
5105 "%s call has push constants index %u with offset %u that exceeds this device's maxPushConstantSize of %u.",
5106 caller_name, index, offset, maxPushConstantsSize);
5108 if (size > maxPushConstantsSize - offset) {
5109 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5110 VALIDATION_ERROR_1bc002e6,
5111 "%s call has push constants index %u with offset %u and size %u that exceeds this device's "
5112 "maxPushConstantSize of %u.",
5113 caller_name, index, offset, size, maxPushConstantsSize);
5116 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5117 DRAWSTATE_INTERNAL_ERROR, "%s caller not supported.", caller_name);
5120 // size needs to be non-zero and a multiple of 4.
5121 if ((size == 0) || ((size & 0x3) != 0)) {
5122 if (0 == strcmp(caller_name, "vkCreatePipelineLayout()")) {
5124 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5125 VALIDATION_ERROR_11a00250,
5126 "%s call has push constants index %u with size %u. Size must be greater than zero.", caller_name,
5130 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5131 VALIDATION_ERROR_11a00252,
5132 "%s call has push constants index %u with size %u. Size must be a multiple of 4.", caller_name,
5135 } else if (0 == strcmp(caller_name, "vkCmdPushConstants()")) {
5137 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5138 VALIDATION_ERROR_1bc2c21b,
5139 "%s call has push constants index %u with size %u. Size must be greater than zero.", caller_name,
5143 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5144 VALIDATION_ERROR_1bc002e2,
5145 "%s call has push constants index %u with size %u. Size must be a multiple of 4.", caller_name,
5149 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5150 DRAWSTATE_INTERNAL_ERROR, "%s caller not supported.", caller_name);
5153 // offset needs to be a multiple of 4.
5154 if ((offset & 0x3) != 0) {
5155 if (0 == strcmp(caller_name, "vkCreatePipelineLayout()")) {
5156 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5157 VALIDATION_ERROR_11a0024e,
5158 "%s call has push constants index %u with offset %u. Offset must be a multiple of 4.", caller_name,
5160 } else if (0 == strcmp(caller_name, "vkCmdPushConstants()")) {
5161 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5162 VALIDATION_ERROR_1bc002e0, "%s call has push constants with offset %u. Offset must be a multiple of 4.",
5163 caller_name, offset);
5165 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5166 DRAWSTATE_INTERNAL_ERROR, "%s caller not supported.", caller_name);
5172 enum DSL_DESCRIPTOR_GROUPS {
5173 DSL_TYPE_SAMPLERS = 0,
5174 DSL_TYPE_UNIFORM_BUFFERS,
5175 DSL_TYPE_STORAGE_BUFFERS,
5176 DSL_TYPE_SAMPLED_IMAGES,
5177 DSL_TYPE_STORAGE_IMAGES,
5178 DSL_TYPE_INPUT_ATTACHMENTS,
5179 DSL_NUM_DESCRIPTOR_GROUPS
5182 // Used by PreCallValiateCreatePipelineLayout.
5183 // Returns an array of size DSL_NUM_DESCRIPTOR_GROUPS of the maximum number of descriptors used in any single pipeline stage
5184 std::valarray<uint32_t> GetDescriptorCountMaxPerStage(
5185 const layer_data *dev_data, const std::vector<std::shared_ptr<cvdescriptorset::DescriptorSetLayout const>> set_layouts,
5186 bool skip_update_after_bind) {
5187 // Identify active pipeline stages
5188 std::vector<VkShaderStageFlags> stage_flags = {VK_SHADER_STAGE_VERTEX_BIT, VK_SHADER_STAGE_FRAGMENT_BIT,
5189 VK_SHADER_STAGE_COMPUTE_BIT};
5190 if (dev_data->enabled_features.geometryShader) {
5191 stage_flags.push_back(VK_SHADER_STAGE_GEOMETRY_BIT);
5193 if (dev_data->enabled_features.tessellationShader) {
5194 stage_flags.push_back(VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT);
5195 stage_flags.push_back(VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT);
5198 // Allow iteration over enum values
5199 std::vector<DSL_DESCRIPTOR_GROUPS> dsl_groups = {DSL_TYPE_SAMPLERS, DSL_TYPE_UNIFORM_BUFFERS, DSL_TYPE_STORAGE_BUFFERS,
5200 DSL_TYPE_SAMPLED_IMAGES, DSL_TYPE_STORAGE_IMAGES, DSL_TYPE_INPUT_ATTACHMENTS};
5202 // Sum by layouts per stage, then pick max of stages per type
5203 std::valarray<uint32_t> max_sum(0U, DSL_NUM_DESCRIPTOR_GROUPS); // max descriptor sum among all pipeline stages
5204 for (auto stage : stage_flags) {
5205 std::valarray<uint32_t> stage_sum(0U, DSL_NUM_DESCRIPTOR_GROUPS); // per-stage sums
5206 for (auto dsl : set_layouts) {
5207 if (skip_update_after_bind &&
5208 (dsl->GetCreateFlags() & VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT_EXT)) {
5212 for (uint32_t binding_idx = 0; binding_idx < dsl->GetBindingCount(); binding_idx++) {
5213 const VkDescriptorSetLayoutBinding *binding = dsl->GetDescriptorSetLayoutBindingPtrFromIndex(binding_idx);
5214 if (0 != (stage & binding->stageFlags)) {
5215 switch (binding->descriptorType) {
5216 case VK_DESCRIPTOR_TYPE_SAMPLER:
5217 stage_sum[DSL_TYPE_SAMPLERS] += binding->descriptorCount;
5219 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
5220 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
5221 stage_sum[DSL_TYPE_UNIFORM_BUFFERS] += binding->descriptorCount;
5223 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
5224 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
5225 stage_sum[DSL_TYPE_STORAGE_BUFFERS] += binding->descriptorCount;
5227 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
5228 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
5229 stage_sum[DSL_TYPE_SAMPLED_IMAGES] += binding->descriptorCount;
5231 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
5232 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
5233 stage_sum[DSL_TYPE_STORAGE_IMAGES] += binding->descriptorCount;
5235 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
5236 stage_sum[DSL_TYPE_SAMPLED_IMAGES] += binding->descriptorCount;
5237 stage_sum[DSL_TYPE_SAMPLERS] += binding->descriptorCount;
5239 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
5240 stage_sum[DSL_TYPE_INPUT_ATTACHMENTS] += binding->descriptorCount;
5248 for (auto type : dsl_groups) {
5249 max_sum[type] = std::max(stage_sum[type], max_sum[type]);
5255 // Used by PreCallValidateCreatePipelineLayout.
5256 // Returns an array of size VK_DESCRIPTOR_TYPE_RANGE_SIZE of the summed descriptors by type.
5257 // Note: descriptors only count against the limit once even if used by multiple stages.
5258 std::valarray<uint32_t> GetDescriptorSum(
5259 const layer_data *dev_data, const std::vector<std::shared_ptr<cvdescriptorset::DescriptorSetLayout const>> &set_layouts,
5260 bool skip_update_after_bind) {
5261 std::valarray<uint32_t> sum_by_type(0U, VK_DESCRIPTOR_TYPE_RANGE_SIZE);
5262 for (auto dsl : set_layouts) {
5263 if (skip_update_after_bind && (dsl->GetCreateFlags() & VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT_EXT)) {
5267 for (uint32_t binding_idx = 0; binding_idx < dsl->GetBindingCount(); binding_idx++) {
5268 const VkDescriptorSetLayoutBinding *binding = dsl->GetDescriptorSetLayoutBindingPtrFromIndex(binding_idx);
5269 sum_by_type[binding->descriptorType] += binding->descriptorCount;
5275 static bool PreCallValiateCreatePipelineLayout(const layer_data *dev_data, const VkPipelineLayoutCreateInfo *pCreateInfo) {
5278 // Validate layout count against device physical limit
5279 if (pCreateInfo->setLayoutCount > dev_data->phys_dev_props.limits.maxBoundDescriptorSets) {
5280 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5281 VALIDATION_ERROR_0fe0023c,
5282 "vkCreatePipelineLayout(): setLayoutCount (%d) exceeds physical device maxBoundDescriptorSets limit (%d).",
5283 pCreateInfo->setLayoutCount, dev_data->phys_dev_props.limits.maxBoundDescriptorSets);
5286 // Validate Push Constant ranges
5288 for (i = 0; i < pCreateInfo->pushConstantRangeCount; ++i) {
5289 skip |= validatePushConstantRange(dev_data, pCreateInfo->pPushConstantRanges[i].offset,
5290 pCreateInfo->pPushConstantRanges[i].size, "vkCreatePipelineLayout()", i);
5291 if (0 == pCreateInfo->pPushConstantRanges[i].stageFlags) {
5292 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5293 VALIDATION_ERROR_11a2dc03, "vkCreatePipelineLayout() call has no stageFlags set.");
5297 // As of 1.0.28, there is a VU that states that a stage flag cannot appear more than once in the list of push constant ranges.
5298 for (i = 0; i < pCreateInfo->pushConstantRangeCount; ++i) {
5299 for (j = i + 1; j < pCreateInfo->pushConstantRangeCount; ++j) {
5300 if (0 != (pCreateInfo->pPushConstantRanges[i].stageFlags & pCreateInfo->pPushConstantRanges[j].stageFlags)) {
5301 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5302 VALIDATION_ERROR_0fe00248,
5303 "vkCreatePipelineLayout() Duplicate stage flags found in ranges %d and %d.", i, j);
5309 if (skip) return skip;
5311 std::vector<std::shared_ptr<cvdescriptorset::DescriptorSetLayout const>> set_layouts(pCreateInfo->setLayoutCount, nullptr);
5312 unsigned int push_descriptor_set_count = 0;
5314 unique_lock_t lock(global_lock); // Lock while accessing global state
5315 for (i = 0; i < pCreateInfo->setLayoutCount; ++i) {
5316 set_layouts[i] = GetDescriptorSetLayout(dev_data, pCreateInfo->pSetLayouts[i]);
5317 if (set_layouts[i]->IsPushDescriptor()) ++push_descriptor_set_count;
5321 if (push_descriptor_set_count > 1) {
5322 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5323 VALIDATION_ERROR_0fe0024a, "vkCreatePipelineLayout() Multiple push descriptor sets found.");
5326 // Max descriptors by type, within a single pipeline stage
5327 std::valarray<uint32_t> max_descriptors_per_stage = GetDescriptorCountMaxPerStage(dev_data, set_layouts, true);
5329 if (max_descriptors_per_stage[DSL_TYPE_SAMPLERS] > dev_data->phys_dev_props.limits.maxPerStageDescriptorSamplers) {
5331 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5332 VALIDATION_ERROR_0fe0023e,
5333 "vkCreatePipelineLayout(): max per-stage sampler bindings count (%d) exceeds device "
5334 "maxPerStageDescriptorSamplers limit (%d).",
5335 max_descriptors_per_stage[DSL_TYPE_SAMPLERS], dev_data->phys_dev_props.limits.maxPerStageDescriptorSamplers);
5339 if (max_descriptors_per_stage[DSL_TYPE_UNIFORM_BUFFERS] > dev_data->phys_dev_props.limits.maxPerStageDescriptorUniformBuffers) {
5340 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5341 VALIDATION_ERROR_0fe00240,
5342 "vkCreatePipelineLayout(): max per-stage uniform buffer bindings count (%d) exceeds device "
5343 "maxPerStageDescriptorUniformBuffers limit (%d).",
5344 max_descriptors_per_stage[DSL_TYPE_UNIFORM_BUFFERS],
5345 dev_data->phys_dev_props.limits.maxPerStageDescriptorUniformBuffers);
5349 if (max_descriptors_per_stage[DSL_TYPE_STORAGE_BUFFERS] > dev_data->phys_dev_props.limits.maxPerStageDescriptorStorageBuffers) {
5350 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5351 VALIDATION_ERROR_0fe00242,
5352 "vkCreatePipelineLayout(): max per-stage storage buffer bindings count (%d) exceeds device "
5353 "maxPerStageDescriptorStorageBuffers limit (%d).",
5354 max_descriptors_per_stage[DSL_TYPE_STORAGE_BUFFERS],
5355 dev_data->phys_dev_props.limits.maxPerStageDescriptorStorageBuffers);
5359 if (max_descriptors_per_stage[DSL_TYPE_SAMPLED_IMAGES] > dev_data->phys_dev_props.limits.maxPerStageDescriptorSampledImages) {
5360 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5361 VALIDATION_ERROR_0fe00244,
5362 "vkCreatePipelineLayout(): max per-stage sampled image bindings count (%d) exceeds device "
5363 "maxPerStageDescriptorSampledImages limit (%d).",
5364 max_descriptors_per_stage[DSL_TYPE_SAMPLED_IMAGES],
5365 dev_data->phys_dev_props.limits.maxPerStageDescriptorSampledImages);
5369 if (max_descriptors_per_stage[DSL_TYPE_STORAGE_IMAGES] > dev_data->phys_dev_props.limits.maxPerStageDescriptorStorageImages) {
5370 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5371 VALIDATION_ERROR_0fe00246,
5372 "vkCreatePipelineLayout(): max per-stage storage image bindings count (%d) exceeds device "
5373 "maxPerStageDescriptorStorageImages limit (%d).",
5374 max_descriptors_per_stage[DSL_TYPE_STORAGE_IMAGES],
5375 dev_data->phys_dev_props.limits.maxPerStageDescriptorStorageImages);
5378 // Input attachments
5379 if (max_descriptors_per_stage[DSL_TYPE_INPUT_ATTACHMENTS] >
5380 dev_data->phys_dev_props.limits.maxPerStageDescriptorInputAttachments) {
5381 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5382 VALIDATION_ERROR_0fe00d18,
5383 "vkCreatePipelineLayout(): max per-stage input attachment bindings count (%d) exceeds device "
5384 "maxPerStageDescriptorInputAttachments limit (%d).",
5385 max_descriptors_per_stage[DSL_TYPE_INPUT_ATTACHMENTS],
5386 dev_data->phys_dev_props.limits.maxPerStageDescriptorInputAttachments);
5389 // Total descriptors by type
5391 std::valarray<uint32_t> sum_all_stages = GetDescriptorSum(dev_data, set_layouts, true);
5393 uint32_t sum = sum_all_stages[VK_DESCRIPTOR_TYPE_SAMPLER] + sum_all_stages[VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER];
5394 if (sum > dev_data->phys_dev_props.limits.maxDescriptorSetSamplers) {
5395 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5396 VALIDATION_ERROR_0fe00d1a,
5397 "vkCreatePipelineLayout(): sum of sampler bindings among all stages (%d) exceeds device "
5398 "maxDescriptorSetSamplers limit (%d).",
5399 sum, dev_data->phys_dev_props.limits.maxDescriptorSetSamplers);
5403 if (sum_all_stages[VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER] > dev_data->phys_dev_props.limits.maxDescriptorSetUniformBuffers) {
5404 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5405 VALIDATION_ERROR_0fe00d1c,
5406 "vkCreatePipelineLayout(): sum of uniform buffer bindings among all stages (%d) exceeds device "
5407 "maxDescriptorSetUniformBuffers limit (%d).",
5408 sum_all_stages[VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER],
5409 dev_data->phys_dev_props.limits.maxDescriptorSetUniformBuffers);
5412 // Dynamic uniform buffers
5413 if (sum_all_stages[VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC] >
5414 dev_data->phys_dev_props.limits.maxDescriptorSetUniformBuffersDynamic) {
5415 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5416 VALIDATION_ERROR_0fe00d1e,
5417 "vkCreatePipelineLayout(): sum of dynamic uniform buffer bindings among all stages (%d) exceeds device "
5418 "maxDescriptorSetUniformBuffersDynamic limit (%d).",
5419 sum_all_stages[VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC],
5420 dev_data->phys_dev_props.limits.maxDescriptorSetUniformBuffersDynamic);
5424 if (sum_all_stages[VK_DESCRIPTOR_TYPE_STORAGE_BUFFER] > dev_data->phys_dev_props.limits.maxDescriptorSetStorageBuffers) {
5425 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5426 VALIDATION_ERROR_0fe00d20,
5427 "vkCreatePipelineLayout(): sum of storage buffer bindings among all stages (%d) exceeds device "
5428 "maxDescriptorSetStorageBuffers limit (%d).",
5429 sum_all_stages[VK_DESCRIPTOR_TYPE_STORAGE_BUFFER],
5430 dev_data->phys_dev_props.limits.maxDescriptorSetStorageBuffers);
5433 // Dynamic storage buffers
5434 if (sum_all_stages[VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC] >
5435 dev_data->phys_dev_props.limits.maxDescriptorSetStorageBuffersDynamic) {
5436 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5437 VALIDATION_ERROR_0fe00d22,
5438 "vkCreatePipelineLayout(): sum of dynamic storage buffer bindings among all stages (%d) exceeds device "
5439 "maxDescriptorSetStorageBuffersDynamic limit (%d).",
5440 sum_all_stages[VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC],
5441 dev_data->phys_dev_props.limits.maxDescriptorSetStorageBuffersDynamic);
5445 sum = sum_all_stages[VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE] + sum_all_stages[VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER] +
5446 sum_all_stages[VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER];
5447 if (sum > dev_data->phys_dev_props.limits.maxDescriptorSetSampledImages) {
5448 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5449 VALIDATION_ERROR_0fe00d24,
5450 "vkCreatePipelineLayout(): sum of sampled image bindings among all stages (%d) exceeds device "
5451 "maxDescriptorSetSampledImages limit (%d).",
5452 sum, dev_data->phys_dev_props.limits.maxDescriptorSetSampledImages);
5456 sum = sum_all_stages[VK_DESCRIPTOR_TYPE_STORAGE_IMAGE] + sum_all_stages[VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER];
5457 if (sum > dev_data->phys_dev_props.limits.maxDescriptorSetStorageImages) {
5458 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5459 VALIDATION_ERROR_0fe00d26,
5460 "vkCreatePipelineLayout(): sum of storage image bindings among all stages (%d) exceeds device "
5461 "maxDescriptorSetStorageImages limit (%d).",
5462 sum, dev_data->phys_dev_props.limits.maxDescriptorSetStorageImages);
5465 // Input attachments
5466 if (sum_all_stages[VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT] > dev_data->phys_dev_props.limits.maxDescriptorSetInputAttachments) {
5467 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5468 VALIDATION_ERROR_0fe00d28,
5469 "vkCreatePipelineLayout(): sum of input attachment bindings among all stages (%d) exceeds device "
5470 "maxDescriptorSetInputAttachments limit (%d).",
5471 sum_all_stages[VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT],
5472 dev_data->phys_dev_props.limits.maxDescriptorSetInputAttachments);
5475 if (dev_data->extensions.vk_ext_descriptor_indexing) {
5476 // XXX TODO: replace with correct VU messages
5478 // Max descriptors by type, within a single pipeline stage
5479 std::valarray<uint32_t> max_descriptors_per_stage_update_after_bind =
5480 GetDescriptorCountMaxPerStage(dev_data, set_layouts, false);
5482 if (max_descriptors_per_stage_update_after_bind[DSL_TYPE_SAMPLERS] >
5483 dev_data->phys_dev_ext_props.descriptor_indexing_props.maxPerStageDescriptorUpdateAfterBindSamplers) {
5484 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5485 VALIDATION_ERROR_0fe0179c,
5486 "vkCreatePipelineLayout(): max per-stage sampler bindings count (%d) exceeds device "
5487 "maxPerStageDescriptorUpdateAfterBindSamplers limit (%d).",
5488 max_descriptors_per_stage_update_after_bind[DSL_TYPE_SAMPLERS],
5489 dev_data->phys_dev_ext_props.descriptor_indexing_props.maxPerStageDescriptorUpdateAfterBindSamplers);
5493 if (max_descriptors_per_stage_update_after_bind[DSL_TYPE_UNIFORM_BUFFERS] >
5494 dev_data->phys_dev_ext_props.descriptor_indexing_props.maxPerStageDescriptorUpdateAfterBindUniformBuffers) {
5496 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5497 VALIDATION_ERROR_0fe0179e,
5498 "vkCreatePipelineLayout(): max per-stage uniform buffer bindings count (%d) exceeds device "
5499 "maxPerStageDescriptorUpdateAfterBindUniformBuffers limit (%d).",
5500 max_descriptors_per_stage_update_after_bind[DSL_TYPE_UNIFORM_BUFFERS],
5501 dev_data->phys_dev_ext_props.descriptor_indexing_props.maxPerStageDescriptorUpdateAfterBindUniformBuffers);
5505 if (max_descriptors_per_stage_update_after_bind[DSL_TYPE_STORAGE_BUFFERS] >
5506 dev_data->phys_dev_ext_props.descriptor_indexing_props.maxPerStageDescriptorUpdateAfterBindStorageBuffers) {
5508 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5509 VALIDATION_ERROR_0fe017a0,
5510 "vkCreatePipelineLayout(): max per-stage storage buffer bindings count (%d) exceeds device "
5511 "maxPerStageDescriptorUpdateAfterBindStorageBuffers limit (%d).",
5512 max_descriptors_per_stage_update_after_bind[DSL_TYPE_STORAGE_BUFFERS],
5513 dev_data->phys_dev_ext_props.descriptor_indexing_props.maxPerStageDescriptorUpdateAfterBindStorageBuffers);
5517 if (max_descriptors_per_stage_update_after_bind[DSL_TYPE_SAMPLED_IMAGES] >
5518 dev_data->phys_dev_ext_props.descriptor_indexing_props.maxPerStageDescriptorUpdateAfterBindSampledImages) {
5520 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5521 VALIDATION_ERROR_0fe017a2,
5522 "vkCreatePipelineLayout(): max per-stage sampled image bindings count (%d) exceeds device "
5523 "maxPerStageDescriptorUpdateAfterBindSampledImages limit (%d).",
5524 max_descriptors_per_stage_update_after_bind[DSL_TYPE_SAMPLED_IMAGES],
5525 dev_data->phys_dev_ext_props.descriptor_indexing_props.maxPerStageDescriptorUpdateAfterBindSampledImages);
5529 if (max_descriptors_per_stage_update_after_bind[DSL_TYPE_STORAGE_IMAGES] >
5530 dev_data->phys_dev_ext_props.descriptor_indexing_props.maxPerStageDescriptorUpdateAfterBindStorageImages) {
5532 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5533 VALIDATION_ERROR_0fe017a4,
5534 "vkCreatePipelineLayout(): max per-stage storage image bindings count (%d) exceeds device "
5535 "maxPerStageDescriptorUpdateAfterBindStorageImages limit (%d).",
5536 max_descriptors_per_stage_update_after_bind[DSL_TYPE_STORAGE_IMAGES],
5537 dev_data->phys_dev_ext_props.descriptor_indexing_props.maxPerStageDescriptorUpdateAfterBindStorageImages);
5540 // Input attachments
5541 if (max_descriptors_per_stage_update_after_bind[DSL_TYPE_INPUT_ATTACHMENTS] >
5542 dev_data->phys_dev_ext_props.descriptor_indexing_props.maxPerStageDescriptorUpdateAfterBindInputAttachments) {
5544 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5545 VALIDATION_ERROR_0fe017a6,
5546 "vkCreatePipelineLayout(): max per-stage input attachment bindings count (%d) exceeds device "
5547 "maxPerStageDescriptorUpdateAfterBindInputAttachments limit (%d).",
5548 max_descriptors_per_stage_update_after_bind[DSL_TYPE_INPUT_ATTACHMENTS],
5549 dev_data->phys_dev_ext_props.descriptor_indexing_props.maxPerStageDescriptorUpdateAfterBindInputAttachments);
5552 // Total descriptors by type, summed across all pipeline stages
5554 std::valarray<uint32_t> sum_all_stages_update_after_bind = GetDescriptorSum(dev_data, set_layouts, false);
5556 sum = sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_SAMPLER] +
5557 sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER];
5558 if (sum > dev_data->phys_dev_ext_props.descriptor_indexing_props.maxDescriptorSetUpdateAfterBindSamplers) {
5559 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5560 VALIDATION_ERROR_0fe017b8,
5561 "vkCreatePipelineLayout(): sum of sampler bindings among all stages (%d) exceeds device "
5562 "maxDescriptorSetUpdateAfterBindSamplers limit (%d).",
5563 sum, dev_data->phys_dev_ext_props.descriptor_indexing_props.maxDescriptorSetUpdateAfterBindSamplers);
5567 if (sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER] >
5568 dev_data->phys_dev_ext_props.descriptor_indexing_props.maxDescriptorSetUpdateAfterBindUniformBuffers) {
5569 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5570 VALIDATION_ERROR_0fe017ba,
5571 "vkCreatePipelineLayout(): sum of uniform buffer bindings among all stages (%d) exceeds device "
5572 "maxDescriptorSetUpdateAfterBindUniformBuffers limit (%d).",
5573 sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER],
5574 dev_data->phys_dev_ext_props.descriptor_indexing_props.maxDescriptorSetUpdateAfterBindUniformBuffers);
5577 // Dynamic uniform buffers
5578 if (sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC] >
5579 dev_data->phys_dev_ext_props.descriptor_indexing_props.maxDescriptorSetUpdateAfterBindUniformBuffersDynamic) {
5581 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5582 VALIDATION_ERROR_0fe017bc,
5583 "vkCreatePipelineLayout(): sum of dynamic uniform buffer bindings among all stages (%d) exceeds device "
5584 "maxDescriptorSetUpdateAfterBindUniformBuffersDynamic limit (%d).",
5585 sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC],
5586 dev_data->phys_dev_ext_props.descriptor_indexing_props.maxDescriptorSetUpdateAfterBindUniformBuffersDynamic);
5590 if (sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_STORAGE_BUFFER] >
5591 dev_data->phys_dev_ext_props.descriptor_indexing_props.maxDescriptorSetUpdateAfterBindStorageBuffers) {
5592 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5593 VALIDATION_ERROR_0fe017be,
5594 "vkCreatePipelineLayout(): sum of storage buffer bindings among all stages (%d) exceeds device "
5595 "maxDescriptorSetUpdateAfterBindStorageBuffers limit (%d).",
5596 sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_STORAGE_BUFFER],
5597 dev_data->phys_dev_ext_props.descriptor_indexing_props.maxDescriptorSetUpdateAfterBindStorageBuffers);
5600 // Dynamic storage buffers
5601 if (sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC] >
5602 dev_data->phys_dev_ext_props.descriptor_indexing_props.maxDescriptorSetUpdateAfterBindStorageBuffersDynamic) {
5604 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5605 VALIDATION_ERROR_0fe017c0,
5606 "vkCreatePipelineLayout(): sum of dynamic storage buffer bindings among all stages (%d) exceeds device "
5607 "maxDescriptorSetUpdateAfterBindStorageBuffersDynamic limit (%d).",
5608 sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC],
5609 dev_data->phys_dev_ext_props.descriptor_indexing_props.maxDescriptorSetUpdateAfterBindStorageBuffersDynamic);
5613 sum = sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE] +
5614 sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER] +
5615 sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER];
5616 if (sum > dev_data->phys_dev_ext_props.descriptor_indexing_props.maxDescriptorSetUpdateAfterBindSampledImages) {
5618 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5619 VALIDATION_ERROR_0fe017c2,
5620 "vkCreatePipelineLayout(): sum of sampled image bindings among all stages (%d) exceeds device "
5621 "maxDescriptorSetUpdateAfterBindSampledImages limit (%d).",
5622 sum, dev_data->phys_dev_ext_props.descriptor_indexing_props.maxDescriptorSetUpdateAfterBindSampledImages);
5626 sum = sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_STORAGE_IMAGE] +
5627 sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER];
5628 if (sum > dev_data->phys_dev_ext_props.descriptor_indexing_props.maxDescriptorSetUpdateAfterBindStorageImages) {
5630 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5631 VALIDATION_ERROR_0fe017c4,
5632 "vkCreatePipelineLayout(): sum of storage image bindings among all stages (%d) exceeds device "
5633 "maxDescriptorSetUpdateAfterBindStorageImages limit (%d).",
5634 sum, dev_data->phys_dev_ext_props.descriptor_indexing_props.maxDescriptorSetUpdateAfterBindStorageImages);
5637 // Input attachments
5638 if (sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT] >
5639 dev_data->phys_dev_ext_props.descriptor_indexing_props.maxDescriptorSetUpdateAfterBindInputAttachments) {
5640 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5641 VALIDATION_ERROR_0fe017c6,
5642 "vkCreatePipelineLayout(): sum of input attachment bindings among all stages (%d) exceeds device "
5643 "maxDescriptorSetUpdateAfterBindInputAttachments limit (%d).",
5644 sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT],
5645 dev_data->phys_dev_ext_props.descriptor_indexing_props.maxDescriptorSetUpdateAfterBindInputAttachments);
5651 // For repeatable sorting, not very useful for "memory in range" search
5652 struct PushConstantRangeCompare {
5653 bool operator()(const VkPushConstantRange *lhs, const VkPushConstantRange *rhs) const {
5654 if (lhs->offset == rhs->offset) {
5655 if (lhs->size == rhs->size) {
5656 // The comparison is arbitrary, but avoids false aliasing by comparing all fields.
5657 return lhs->stageFlags < rhs->stageFlags;
5659 // If the offsets are the same then sorting by the end of range is useful for validation
5660 return lhs->size < rhs->size;
5662 return lhs->offset < rhs->offset;
5666 static PushConstantRangesDict push_constant_ranges_dict;
5668 PushConstantRangesId get_canonical_id(const VkPipelineLayoutCreateInfo *info) {
5669 if (!info->pPushConstantRanges) {
5670 // Hand back the empty entry (creating as needed)...
5671 return push_constant_ranges_dict.look_up(PushConstantRanges());
5674 // Sort the input ranges to ensure equivalent ranges map to the same id
5675 std::set<const VkPushConstantRange *, PushConstantRangeCompare> sorted;
5676 for (uint32_t i = 0; i < info->pushConstantRangeCount; i++) {
5677 sorted.insert(info->pPushConstantRanges + i);
5680 PushConstantRanges ranges(sorted.size());
5681 for (const auto range : sorted) {
5682 ranges.emplace_back(*range);
5684 return push_constant_ranges_dict.look_up(std::move(ranges));
5687 // Dictionary of canoncial form of the pipeline set layout of descriptor set layouts
5688 static PipelineLayoutSetLayoutsDict pipeline_layout_set_layouts_dict;
5690 // Dictionary of canonical form of the "compatible for set" records
5691 static PipelineLayoutCompatDict pipeline_layout_compat_dict;
5693 static PipelineLayoutCompatId get_canonical_id(const uint32_t set_index, const PushConstantRangesId pcr_id,
5694 const PipelineLayoutSetLayoutsId set_layouts_id) {
5695 return pipeline_layout_compat_dict.look_up(PipelineLayoutCompatDef(set_index, pcr_id, set_layouts_id));
5698 static void PostCallRecordCreatePipelineLayout(layer_data *dev_data, const VkPipelineLayoutCreateInfo *pCreateInfo,
5699 const VkPipelineLayout *pPipelineLayout) {
5700 unique_lock_t lock(global_lock); // Lock while accessing state
5702 PIPELINE_LAYOUT_NODE &plNode = dev_data->pipelineLayoutMap[*pPipelineLayout];
5703 plNode.layout = *pPipelineLayout;
5704 plNode.set_layouts.resize(pCreateInfo->setLayoutCount);
5705 PipelineLayoutSetLayoutsDef set_layouts(pCreateInfo->setLayoutCount);
5706 for (uint32_t i = 0; i < pCreateInfo->setLayoutCount; ++i) {
5707 plNode.set_layouts[i] = GetDescriptorSetLayout(dev_data, pCreateInfo->pSetLayouts[i]);
5708 set_layouts[i] = plNode.set_layouts[i]->get_layout_id();
5711 // Get canonical form IDs for the "compatible for set" contents
5712 plNode.push_constant_ranges = get_canonical_id(pCreateInfo);
5713 auto set_layouts_id = pipeline_layout_set_layouts_dict.look_up(set_layouts);
5714 plNode.compat_for_set.reserve(pCreateInfo->setLayoutCount);
5716 // Create table of "compatible for set N" cannonical forms for trivial accept validation
5717 for (uint32_t i = 0; i < pCreateInfo->setLayoutCount; ++i) {
5718 plNode.compat_for_set.emplace_back(get_canonical_id(i, plNode.push_constant_ranges, set_layouts_id));
5724 VKAPI_ATTR VkResult VKAPI_CALL CreatePipelineLayout(VkDevice device, const VkPipelineLayoutCreateInfo *pCreateInfo,
5725 const VkAllocationCallbacks *pAllocator, VkPipelineLayout *pPipelineLayout) {
5726 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
5728 bool skip = PreCallValiateCreatePipelineLayout(dev_data, pCreateInfo);
5729 if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
5731 VkResult result = dev_data->dispatch_table.CreatePipelineLayout(device, pCreateInfo, pAllocator, pPipelineLayout);
5733 if (VK_SUCCESS == result) {
5734 PostCallRecordCreatePipelineLayout(dev_data, pCreateInfo, pPipelineLayout);
5739 VKAPI_ATTR VkResult VKAPI_CALL CreateDescriptorPool(VkDevice device, const VkDescriptorPoolCreateInfo *pCreateInfo,
5740 const VkAllocationCallbacks *pAllocator, VkDescriptorPool *pDescriptorPool) {
5741 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
5742 VkResult result = dev_data->dispatch_table.CreateDescriptorPool(device, pCreateInfo, pAllocator, pDescriptorPool);
5743 if (VK_SUCCESS == result) {
5744 DESCRIPTOR_POOL_STATE *pNewNode = new DESCRIPTOR_POOL_STATE(*pDescriptorPool, pCreateInfo);
5745 if (NULL == pNewNode) {
5746 if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
5747 HandleToUint64(*pDescriptorPool), DRAWSTATE_OUT_OF_MEMORY,
5748 "Out of memory while attempting to allocate DESCRIPTOR_POOL_STATE in vkCreateDescriptorPool()"))
5749 return VK_ERROR_VALIDATION_FAILED_EXT;
5751 lock_guard_t lock(global_lock);
5752 dev_data->descriptorPoolMap[*pDescriptorPool] = pNewNode;
5755 // Need to do anything if pool create fails?
5760 VKAPI_ATTR VkResult VKAPI_CALL ResetDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool,
5761 VkDescriptorPoolResetFlags flags) {
5762 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
5764 unique_lock_t lock(global_lock);
5765 // Make sure sets being destroyed are not currently in-use
5766 bool skip = validateIdleDescriptorSetForPoolReset(dev_data, descriptorPool);
5769 if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
5771 VkResult result = dev_data->dispatch_table.ResetDescriptorPool(device, descriptorPool, flags);
5772 if (VK_SUCCESS == result) {
5774 clearDescriptorPool(dev_data, device, descriptorPool, flags);
5779 // Ensure the pool contains enough descriptors and descriptor sets to satisfy
5780 // an allocation request. Fills common_data with the total number of descriptors of each type required,
5781 // as well as DescriptorSetLayout ptrs used for later update.
5782 static bool PreCallValidateAllocateDescriptorSets(layer_data *dev_data, const VkDescriptorSetAllocateInfo *pAllocateInfo,
5783 cvdescriptorset::AllocateDescriptorSetsData *common_data) {
5784 // Always update common data
5785 cvdescriptorset::UpdateAllocateDescriptorSetsData(dev_data, pAllocateInfo, common_data);
5786 if (dev_data->instance_data->disabled.allocate_descriptor_sets) return false;
5787 // All state checks for AllocateDescriptorSets is done in single function
5788 return cvdescriptorset::ValidateAllocateDescriptorSets(dev_data, pAllocateInfo, common_data);
5790 // Allocation state was good and call down chain was made so update state based on allocating descriptor sets
5791 static void PostCallRecordAllocateDescriptorSets(layer_data *dev_data, const VkDescriptorSetAllocateInfo *pAllocateInfo,
5792 VkDescriptorSet *pDescriptorSets,
5793 const cvdescriptorset::AllocateDescriptorSetsData *common_data) {
5794 // All the updates are contained in a single cvdescriptorset function
5795 cvdescriptorset::PerformAllocateDescriptorSets(pAllocateInfo, pDescriptorSets, common_data, &dev_data->descriptorPoolMap,
5796 &dev_data->setMap, dev_data);
5799 // TODO: PostCallRecord routine is dependent on data generated in PreCallValidate -- needs to be moved out
5800 VKAPI_ATTR VkResult VKAPI_CALL AllocateDescriptorSets(VkDevice device, const VkDescriptorSetAllocateInfo *pAllocateInfo,
5801 VkDescriptorSet *pDescriptorSets) {
5802 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
5803 unique_lock_t lock(global_lock);
5804 cvdescriptorset::AllocateDescriptorSetsData common_data(pAllocateInfo->descriptorSetCount);
5805 bool skip = PreCallValidateAllocateDescriptorSets(dev_data, pAllocateInfo, &common_data);
5808 if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
5810 VkResult result = dev_data->dispatch_table.AllocateDescriptorSets(device, pAllocateInfo, pDescriptorSets);
5812 if (VK_SUCCESS == result) {
5814 PostCallRecordAllocateDescriptorSets(dev_data, pAllocateInfo, pDescriptorSets, &common_data);
5819 // Verify state before freeing DescriptorSets
5820 static bool PreCallValidateFreeDescriptorSets(const layer_data *dev_data, VkDescriptorPool pool, uint32_t count,
5821 const VkDescriptorSet *descriptor_sets) {
5822 if (dev_data->instance_data->disabled.free_descriptor_sets) return false;
5824 // First make sure sets being destroyed are not currently in-use
5825 for (uint32_t i = 0; i < count; ++i) {
5826 if (descriptor_sets[i] != VK_NULL_HANDLE) {
5827 skip |= validateIdleDescriptorSet(dev_data, descriptor_sets[i], "vkFreeDescriptorSets");
5831 DESCRIPTOR_POOL_STATE *pool_state = GetDescriptorPoolState(dev_data, pool);
5832 if (pool_state && !(VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT & pool_state->createInfo.flags)) {
5833 // Can't Free from a NON_FREE pool
5834 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
5835 HandleToUint64(pool), VALIDATION_ERROR_28600270,
5836 "It is invalid to call vkFreeDescriptorSets() with a pool created without setting "
5837 "VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT.");
5841 // Sets have been removed from the pool so update underlying state
5842 static void PostCallRecordFreeDescriptorSets(layer_data *dev_data, VkDescriptorPool pool, uint32_t count,
5843 const VkDescriptorSet *descriptor_sets) {
5844 DESCRIPTOR_POOL_STATE *pool_state = GetDescriptorPoolState(dev_data, pool);
5845 // Update available descriptor sets in pool
5846 pool_state->availableSets += count;
5848 // For each freed descriptor add its resources back into the pool as available and remove from pool and setMap
5849 for (uint32_t i = 0; i < count; ++i) {
5850 if (descriptor_sets[i] != VK_NULL_HANDLE) {
5851 auto descriptor_set = dev_data->setMap[descriptor_sets[i]];
5852 uint32_t type_index = 0, descriptor_count = 0;
5853 for (uint32_t j = 0; j < descriptor_set->GetBindingCount(); ++j) {
5854 type_index = static_cast<uint32_t>(descriptor_set->GetTypeFromIndex(j));
5855 descriptor_count = descriptor_set->GetDescriptorCountFromIndex(j);
5856 pool_state->availableDescriptorTypeCount[type_index] += descriptor_count;
5858 freeDescriptorSet(dev_data, descriptor_set);
5859 pool_state->sets.erase(descriptor_set);
5864 VKAPI_ATTR VkResult VKAPI_CALL FreeDescriptorSets(VkDevice device, VkDescriptorPool descriptorPool, uint32_t count,
5865 const VkDescriptorSet *pDescriptorSets) {
5866 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
5867 // Make sure that no sets being destroyed are in-flight
5868 unique_lock_t lock(global_lock);
5869 bool skip = PreCallValidateFreeDescriptorSets(dev_data, descriptorPool, count, pDescriptorSets);
5872 if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
5873 VkResult result = dev_data->dispatch_table.FreeDescriptorSets(device, descriptorPool, count, pDescriptorSets);
5874 if (VK_SUCCESS == result) {
5876 PostCallRecordFreeDescriptorSets(dev_data, descriptorPool, count, pDescriptorSets);
5881 // TODO : This is a Proof-of-concept for core validation architecture
5882 // Really we'll want to break out these functions to separate files but
5883 // keeping it all together here to prove out design
5884 // PreCallValidate* handles validating all of the state prior to calling down chain to UpdateDescriptorSets()
5885 static bool PreCallValidateUpdateDescriptorSets(layer_data *dev_data, uint32_t descriptorWriteCount,
5886 const VkWriteDescriptorSet *pDescriptorWrites, uint32_t descriptorCopyCount,
5887 const VkCopyDescriptorSet *pDescriptorCopies) {
5888 if (dev_data->instance_data->disabled.update_descriptor_sets) return false;
5889 // First thing to do is perform map look-ups.
5890 // NOTE : UpdateDescriptorSets is somewhat unique in that it's operating on a number of DescriptorSets
5891 // so we can't just do a single map look-up up-front, but do them individually in functions below
5893 // Now make call(s) that validate state, but don't perform state updates in this function
5894 // Note, here DescriptorSets is unique in that we don't yet have an instance. Using a helper function in the
5895 // namespace which will parse params and make calls into specific class instances
5896 return cvdescriptorset::ValidateUpdateDescriptorSets(dev_data->report_data, dev_data, descriptorWriteCount, pDescriptorWrites,
5897 descriptorCopyCount, pDescriptorCopies);
5899 // PostCallRecord* handles recording state updates following call down chain to UpdateDescriptorSets()
5900 static void PreCallRecordUpdateDescriptorSets(layer_data *dev_data, uint32_t descriptorWriteCount,
5901 const VkWriteDescriptorSet *pDescriptorWrites, uint32_t descriptorCopyCount,
5902 const VkCopyDescriptorSet *pDescriptorCopies) {
5903 cvdescriptorset::PerformUpdateDescriptorSets(dev_data, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount,
5907 VKAPI_ATTR void VKAPI_CALL UpdateDescriptorSets(VkDevice device, uint32_t descriptorWriteCount,
5908 const VkWriteDescriptorSet *pDescriptorWrites, uint32_t descriptorCopyCount,
5909 const VkCopyDescriptorSet *pDescriptorCopies) {
5910 // Only map look-up at top level is for device-level layer_data
5911 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
5912 unique_lock_t lock(global_lock);
5913 bool skip = PreCallValidateUpdateDescriptorSets(dev_data, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount,
5916 // Since UpdateDescriptorSets() is void, nothing to check prior to updating state & we can update before call down chain
5917 PreCallRecordUpdateDescriptorSets(dev_data, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount,
5920 dev_data->dispatch_table.UpdateDescriptorSets(device, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount,
5925 VKAPI_ATTR VkResult VKAPI_CALL AllocateCommandBuffers(VkDevice device, const VkCommandBufferAllocateInfo *pCreateInfo,
5926 VkCommandBuffer *pCommandBuffer) {
5927 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
5928 VkResult result = dev_data->dispatch_table.AllocateCommandBuffers(device, pCreateInfo, pCommandBuffer);
5929 if (VK_SUCCESS == result) {
5930 unique_lock_t lock(global_lock);
5931 auto pPool = GetCommandPoolNode(dev_data, pCreateInfo->commandPool);
5934 for (uint32_t i = 0; i < pCreateInfo->commandBufferCount; i++) {
5935 // Add command buffer to its commandPool map
5936 pPool->commandBuffers.insert(pCommandBuffer[i]);
5937 GLOBAL_CB_NODE *pCB = new GLOBAL_CB_NODE;
5938 // Add command buffer to map
5939 dev_data->commandBufferMap[pCommandBuffer[i]] = pCB;
5940 ResetCommandBufferState(dev_data, pCommandBuffer[i]);
5941 pCB->createInfo = *pCreateInfo;
5942 pCB->device = device;
5950 // Add bindings between the given cmd buffer & framebuffer and the framebuffer's children
5951 static void AddFramebufferBinding(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, FRAMEBUFFER_STATE *fb_state) {
5952 addCommandBufferBinding(&fb_state->cb_bindings, {HandleToUint64(fb_state->framebuffer), kVulkanObjectTypeFramebuffer},
5954 for (auto attachment : fb_state->attachments) {
5955 auto view_state = attachment.view_state;
5957 AddCommandBufferBindingImageView(dev_data, cb_state, view_state);
5962 VKAPI_ATTR VkResult VKAPI_CALL BeginCommandBuffer(VkCommandBuffer commandBuffer, const VkCommandBufferBeginInfo *pBeginInfo) {
5964 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
5965 unique_lock_t lock(global_lock);
5966 // Validate command buffer level
5967 GLOBAL_CB_NODE *cb_node = GetCBNode(dev_data, commandBuffer);
5969 // This implicitly resets the Cmd Buffer so make sure any fence is done and then clear memory references
5970 if (cb_node->in_use.load()) {
5971 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
5972 HandleToUint64(commandBuffer), VALIDATION_ERROR_16e00062,
5973 "Calling vkBeginCommandBuffer() on active command buffer %" PRIx64
5974 " before it has completed. You must check command buffer fence before this call.",
5975 HandleToUint64(commandBuffer));
5977 clear_cmd_buf_and_mem_references(dev_data, cb_node);
5978 if (cb_node->createInfo.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
5979 // Secondary Command Buffer
5980 const VkCommandBufferInheritanceInfo *pInfo = pBeginInfo->pInheritanceInfo;
5983 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
5984 HandleToUint64(commandBuffer), VALIDATION_ERROR_16e00066,
5985 "vkBeginCommandBuffer(): Secondary Command Buffer (0x%" PRIx64 ") must have inheritance info.",
5986 HandleToUint64(commandBuffer));
5988 if (pBeginInfo->flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT) {
5989 assert(pInfo->renderPass);
5990 string errorString = "";
5991 auto framebuffer = GetFramebufferState(dev_data, pInfo->framebuffer);
5993 if (framebuffer->createInfo.renderPass != pInfo->renderPass) {
5994 // renderPass that framebuffer was created with must be compatible with local renderPass
5996 validateRenderPassCompatibility(dev_data, "framebuffer", framebuffer->rp_state.get(),
5997 "command buffer", GetRenderPassState(dev_data, pInfo->renderPass),
5998 "vkBeginCommandBuffer()", VALIDATION_ERROR_0280006e);
6000 // Connect this framebuffer and its children to this cmdBuffer
6001 AddFramebufferBinding(dev_data, cb_node, framebuffer);
6004 if ((pInfo->occlusionQueryEnable == VK_FALSE || dev_data->enabled_features.occlusionQueryPrecise == VK_FALSE) &&
6005 (pInfo->queryFlags & VK_QUERY_CONTROL_PRECISE_BIT)) {
6006 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6007 VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(commandBuffer),
6008 VALIDATION_ERROR_16e00068,
6009 "vkBeginCommandBuffer(): Secondary Command Buffer (0x%" PRIx64
6010 ") must not have VK_QUERY_CONTROL_PRECISE_BIT if occulusionQuery is disabled or the device "
6011 "does not support precise occlusion queries.",
6012 HandleToUint64(commandBuffer));
6015 if (pInfo && pInfo->renderPass != VK_NULL_HANDLE) {
6016 auto renderPass = GetRenderPassState(dev_data, pInfo->renderPass);
6018 if (pInfo->subpass >= renderPass->createInfo.subpassCount) {
6019 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6020 VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(commandBuffer),
6021 VALIDATION_ERROR_0280006c,
6022 "vkBeginCommandBuffer(): Secondary Command Buffers (0x%" PRIx64
6023 ") must have a subpass index (%d) that is less than the number of subpasses (%d).",
6024 HandleToUint64(commandBuffer), pInfo->subpass, renderPass->createInfo.subpassCount);
6029 if (CB_RECORDING == cb_node->state) {
6030 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6031 HandleToUint64(commandBuffer), VALIDATION_ERROR_16e00062,
6032 "vkBeginCommandBuffer(): Cannot call Begin on command buffer (0x%" PRIx64
6033 ") in the RECORDING state. Must first call vkEndCommandBuffer().",
6034 HandleToUint64(commandBuffer));
6035 } else if (CB_RECORDED == cb_node->state || CB_INVALID_COMPLETE == cb_node->state) {
6036 VkCommandPool cmdPool = cb_node->createInfo.commandPool;
6037 auto pPool = GetCommandPoolNode(dev_data, cmdPool);
6038 if (!(VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT & pPool->createFlags)) {
6040 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6041 HandleToUint64(commandBuffer), VALIDATION_ERROR_16e00064,
6042 "Call to vkBeginCommandBuffer() on command buffer (0x%" PRIx64
6043 ") attempts to implicitly reset cmdBuffer created from command pool (0x%" PRIx64
6044 ") that does NOT have the VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT bit set.",
6045 HandleToUint64(commandBuffer), HandleToUint64(cmdPool));
6047 ResetCommandBufferState(dev_data, commandBuffer);
6049 // Set updated state here in case implicit reset occurs above
6050 cb_node->state = CB_RECORDING;
6051 cb_node->beginInfo = *pBeginInfo;
6052 if (cb_node->beginInfo.pInheritanceInfo) {
6053 cb_node->inheritanceInfo = *(cb_node->beginInfo.pInheritanceInfo);
6054 cb_node->beginInfo.pInheritanceInfo = &cb_node->inheritanceInfo;
6055 // If we are a secondary command-buffer and inheriting. Update the items we should inherit.
6056 if ((cb_node->createInfo.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY) &&
6057 (cb_node->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) {
6058 cb_node->activeRenderPass = GetRenderPassState(dev_data, cb_node->beginInfo.pInheritanceInfo->renderPass);
6059 cb_node->activeSubpass = cb_node->beginInfo.pInheritanceInfo->subpass;
6060 cb_node->activeFramebuffer = cb_node->beginInfo.pInheritanceInfo->framebuffer;
6061 cb_node->framebuffers.insert(cb_node->beginInfo.pInheritanceInfo->framebuffer);
6067 return VK_ERROR_VALIDATION_FAILED_EXT;
6069 VkResult result = dev_data->dispatch_table.BeginCommandBuffer(commandBuffer, pBeginInfo);
6073 static void PostCallRecordEndCommandBuffer(layer_data *dev_data, GLOBAL_CB_NODE *cb_state) {
6074 // Cached validation is specific to a specific recording of a specific command buffer.
6075 for (auto descriptor_set : cb_state->validated_descriptor_sets) {
6076 descriptor_set->ClearCachedValidation(cb_state);
6078 cb_state->validated_descriptor_sets.clear();
6081 VKAPI_ATTR VkResult VKAPI_CALL EndCommandBuffer(VkCommandBuffer commandBuffer) {
6083 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6084 unique_lock_t lock(global_lock);
6085 GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
6087 if ((VK_COMMAND_BUFFER_LEVEL_PRIMARY == pCB->createInfo.level) ||
6088 !(pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) {
6089 // This needs spec clarification to update valid usage, see comments in PR:
6090 // https://github.com/KhronosGroup/Vulkan-LoaderAndValidationLayers/pull/516#discussion_r63013756
6091 skip |= insideRenderPass(dev_data, pCB, "vkEndCommandBuffer()", VALIDATION_ERROR_27400078);
6093 skip |= ValidateCmd(dev_data, pCB, CMD_ENDCOMMANDBUFFER, "vkEndCommandBuffer()");
6094 for (auto query : pCB->activeQueries) {
6095 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6096 HandleToUint64(commandBuffer), VALIDATION_ERROR_2740007a,
6097 "Ending command buffer with in progress query: queryPool 0x%" PRIx64 ", index %d.",
6098 HandleToUint64(query.pool), query.index);
6103 auto result = dev_data->dispatch_table.EndCommandBuffer(commandBuffer);
6105 PostCallRecordEndCommandBuffer(dev_data, pCB);
6106 if (VK_SUCCESS == result) {
6107 pCB->state = CB_RECORDED;
6111 return VK_ERROR_VALIDATION_FAILED_EXT;
6115 VKAPI_ATTR VkResult VKAPI_CALL ResetCommandBuffer(VkCommandBuffer commandBuffer, VkCommandBufferResetFlags flags) {
6117 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6118 unique_lock_t lock(global_lock);
6119 GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
6120 VkCommandPool cmdPool = pCB->createInfo.commandPool;
6121 auto pPool = GetCommandPoolNode(dev_data, cmdPool);
6122 if (!(VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT & pPool->createFlags)) {
6123 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6124 HandleToUint64(commandBuffer), VALIDATION_ERROR_3260005c,
6125 "Attempt to reset command buffer (0x%" PRIx64 ") created from command pool (0x%" PRIx64
6126 ") that does NOT have the VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT bit set.",
6127 HandleToUint64(commandBuffer), HandleToUint64(cmdPool));
6129 skip |= checkCommandBufferInFlight(dev_data, pCB, "reset", VALIDATION_ERROR_3260005a);
6131 if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
6132 VkResult result = dev_data->dispatch_table.ResetCommandBuffer(commandBuffer, flags);
6133 if (VK_SUCCESS == result) {
6135 ResetCommandBufferState(dev_data, commandBuffer);
6141 VKAPI_ATTR void VKAPI_CALL CmdBindPipeline(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint,
6142 VkPipeline pipeline) {
6144 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6145 unique_lock_t lock(global_lock);
6146 GLOBAL_CB_NODE *cb_state = GetCBNode(dev_data, commandBuffer);
6148 skip |= ValidateCmdQueueFlags(dev_data, cb_state, "vkCmdBindPipeline()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
6149 VALIDATION_ERROR_18002415);
6150 skip |= ValidateCmd(dev_data, cb_state, CMD_BINDPIPELINE, "vkCmdBindPipeline()");
6151 // TODO: VALIDATION_ERROR_18000612 VALIDATION_ERROR_18000616 -- using ValidatePipelineBindPoint
6153 auto pipe_state = getPipelineState(dev_data, pipeline);
6154 if (VK_PIPELINE_BIND_POINT_GRAPHICS == pipelineBindPoint) {
6155 cb_state->status &= ~cb_state->static_status;
6156 cb_state->static_status = MakeStaticStateMask(pipe_state->graphicsPipelineCI.ptr()->pDynamicState);
6157 cb_state->status |= cb_state->static_status;
6159 cb_state->lastBound[pipelineBindPoint].pipeline_state = pipe_state;
6160 set_pipeline_state(pipe_state);
6161 skip |= validate_dual_src_blend_feature(dev_data, pipe_state);
6162 addCommandBufferBinding(&pipe_state->cb_bindings, {HandleToUint64(pipeline), kVulkanObjectTypePipeline}, cb_state);
6165 if (!skip) dev_data->dispatch_table.CmdBindPipeline(commandBuffer, pipelineBindPoint, pipeline);
6168 VKAPI_ATTR void VKAPI_CALL CmdSetViewport(VkCommandBuffer commandBuffer, uint32_t firstViewport, uint32_t viewportCount,
6169 const VkViewport *pViewports) {
6171 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6172 unique_lock_t lock(global_lock);
6173 GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
6175 skip |= ValidateCmdQueueFlags(dev_data, pCB, "vkCmdSetViewport()", VK_QUEUE_GRAPHICS_BIT, VALIDATION_ERROR_1e002415);
6176 skip |= ValidateCmd(dev_data, pCB, CMD_SETVIEWPORT, "vkCmdSetViewport()");
6177 if (pCB->static_status & CBSTATUS_VIEWPORT_SET) {
6178 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6179 HandleToUint64(commandBuffer), VALIDATION_ERROR_1e00098a,
6180 "vkCmdSetViewport(): pipeline was created without VK_DYNAMIC_STATE_VIEWPORT flag..");
6183 pCB->viewportMask |= ((1u << viewportCount) - 1u) << firstViewport;
6184 pCB->status |= CBSTATUS_VIEWPORT_SET;
6188 if (!skip) dev_data->dispatch_table.CmdSetViewport(commandBuffer, firstViewport, viewportCount, pViewports);
6191 VKAPI_ATTR void VKAPI_CALL CmdSetScissor(VkCommandBuffer commandBuffer, uint32_t firstScissor, uint32_t scissorCount,
6192 const VkRect2D *pScissors) {
6194 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6195 unique_lock_t lock(global_lock);
6196 GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
6198 skip |= ValidateCmdQueueFlags(dev_data, pCB, "vkCmdSetScissor()", VK_QUEUE_GRAPHICS_BIT, VALIDATION_ERROR_1d802415);
6199 skip |= ValidateCmd(dev_data, pCB, CMD_SETSCISSOR, "vkCmdSetScissor()");
6200 if (pCB->static_status & CBSTATUS_SCISSOR_SET) {
6201 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6202 HandleToUint64(commandBuffer), VALIDATION_ERROR_1d80049c,
6203 "vkCmdSetScissor(): pipeline was created without VK_DYNAMIC_STATE_SCISSOR flag..");
6206 pCB->scissorMask |= ((1u << scissorCount) - 1u) << firstScissor;
6207 pCB->status |= CBSTATUS_SCISSOR_SET;
6211 if (!skip) dev_data->dispatch_table.CmdSetScissor(commandBuffer, firstScissor, scissorCount, pScissors);
6214 VKAPI_ATTR void VKAPI_CALL CmdSetLineWidth(VkCommandBuffer commandBuffer, float lineWidth) {
6216 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6217 unique_lock_t lock(global_lock);
6218 GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
6220 skip |= ValidateCmdQueueFlags(dev_data, pCB, "vkCmdSetLineWidth()", VK_QUEUE_GRAPHICS_BIT, VALIDATION_ERROR_1d602415);
6221 skip |= ValidateCmd(dev_data, pCB, CMD_SETLINEWIDTH, "vkCmdSetLineWidth()");
6223 if (pCB->static_status & CBSTATUS_LINE_WIDTH_SET) {
6224 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6225 HandleToUint64(commandBuffer), VALIDATION_ERROR_1d600626,
6226 "vkCmdSetLineWidth called but pipeline was created without VK_DYNAMIC_STATE_LINE_WIDTH flag.");
6229 pCB->status |= CBSTATUS_LINE_WIDTH_SET;
6233 if (!skip) dev_data->dispatch_table.CmdSetLineWidth(commandBuffer, lineWidth);
6236 VKAPI_ATTR void VKAPI_CALL CmdSetDepthBias(VkCommandBuffer commandBuffer, float depthBiasConstantFactor, float depthBiasClamp,
6237 float depthBiasSlopeFactor) {
6239 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6240 unique_lock_t lock(global_lock);
6241 GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
6243 skip |= ValidateCmdQueueFlags(dev_data, pCB, "vkCmdSetDepthBias()", VK_QUEUE_GRAPHICS_BIT, VALIDATION_ERROR_1cc02415);
6244 skip |= ValidateCmd(dev_data, pCB, CMD_SETDEPTHBIAS, "vkCmdSetDepthBias()");
6245 if (pCB->static_status & CBSTATUS_DEPTH_BIAS_SET) {
6246 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6247 HandleToUint64(commandBuffer), VALIDATION_ERROR_1cc0062a,
6248 "vkCmdSetDepthBias(): pipeline was created without VK_DYNAMIC_STATE_DEPTH_BIAS flag..");
6250 if ((depthBiasClamp != 0.0) && (!dev_data->enabled_features.depthBiasClamp)) {
6251 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6252 HandleToUint64(commandBuffer), VALIDATION_ERROR_1cc0062c,
6253 "vkCmdSetDepthBias(): the depthBiasClamp device feature is disabled: the depthBiasClamp parameter must "
6257 pCB->status |= CBSTATUS_DEPTH_BIAS_SET;
6262 dev_data->dispatch_table.CmdSetDepthBias(commandBuffer, depthBiasConstantFactor, depthBiasClamp, depthBiasSlopeFactor);
6265 VKAPI_ATTR void VKAPI_CALL CmdSetBlendConstants(VkCommandBuffer commandBuffer, const float blendConstants[4]) {
6267 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6268 unique_lock_t lock(global_lock);
6269 GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
6271 skip |= ValidateCmdQueueFlags(dev_data, pCB, "vkCmdSetBlendConstants()", VK_QUEUE_GRAPHICS_BIT, VALIDATION_ERROR_1ca02415);
6272 skip |= ValidateCmd(dev_data, pCB, CMD_SETBLENDCONSTANTS, "vkCmdSetBlendConstants()");
6273 if (pCB->static_status & CBSTATUS_BLEND_CONSTANTS_SET) {
6274 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6275 HandleToUint64(commandBuffer), VALIDATION_ERROR_1ca004c8,
6276 "vkCmdSetBlendConstants(): pipeline was created without VK_DYNAMIC_STATE_BLEND_CONSTANTS flag..");
6279 pCB->status |= CBSTATUS_BLEND_CONSTANTS_SET;
6283 if (!skip) dev_data->dispatch_table.CmdSetBlendConstants(commandBuffer, blendConstants);
6286 VKAPI_ATTR void VKAPI_CALL CmdSetDepthBounds(VkCommandBuffer commandBuffer, float minDepthBounds, float maxDepthBounds) {
6288 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6289 unique_lock_t lock(global_lock);
6290 GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
6292 skip |= ValidateCmdQueueFlags(dev_data, pCB, "vkCmdSetDepthBounds()", VK_QUEUE_GRAPHICS_BIT, VALIDATION_ERROR_1ce02415);
6293 skip |= ValidateCmd(dev_data, pCB, CMD_SETDEPTHBOUNDS, "vkCmdSetDepthBounds()");
6294 if (pCB->static_status & CBSTATUS_DEPTH_BOUNDS_SET) {
6295 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6296 HandleToUint64(commandBuffer), VALIDATION_ERROR_1ce004ae,
6297 "vkCmdSetDepthBounds(): pipeline was created without VK_DYNAMIC_STATE_DEPTH_BOUNDS flag..");
6300 pCB->status |= CBSTATUS_DEPTH_BOUNDS_SET;
6304 if (!skip) dev_data->dispatch_table.CmdSetDepthBounds(commandBuffer, minDepthBounds, maxDepthBounds);
6307 VKAPI_ATTR void VKAPI_CALL CmdSetStencilCompareMask(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask,
6308 uint32_t compareMask) {
6310 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6311 unique_lock_t lock(global_lock);
6312 GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
6315 ValidateCmdQueueFlags(dev_data, pCB, "vkCmdSetStencilCompareMask()", VK_QUEUE_GRAPHICS_BIT, VALIDATION_ERROR_1da02415);
6316 skip |= ValidateCmd(dev_data, pCB, CMD_SETSTENCILCOMPAREMASK, "vkCmdSetStencilCompareMask()");
6317 if (pCB->static_status & CBSTATUS_STENCIL_READ_MASK_SET) {
6319 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6320 HandleToUint64(commandBuffer), VALIDATION_ERROR_1da004b4,
6321 "vkCmdSetStencilCompareMask(): pipeline was created without VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK flag..");
6324 pCB->status |= CBSTATUS_STENCIL_READ_MASK_SET;
6328 if (!skip) dev_data->dispatch_table.CmdSetStencilCompareMask(commandBuffer, faceMask, compareMask);
6331 VKAPI_ATTR void VKAPI_CALL CmdSetStencilWriteMask(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t writeMask) {
6333 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6334 unique_lock_t lock(global_lock);
6335 GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
6338 ValidateCmdQueueFlags(dev_data, pCB, "vkCmdSetStencilWriteMask()", VK_QUEUE_GRAPHICS_BIT, VALIDATION_ERROR_1de02415);
6339 skip |= ValidateCmd(dev_data, pCB, CMD_SETSTENCILWRITEMASK, "vkCmdSetStencilWriteMask()");
6340 if (pCB->static_status & CBSTATUS_STENCIL_WRITE_MASK_SET) {
6341 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6342 HandleToUint64(commandBuffer), VALIDATION_ERROR_1de004b6,
6343 "vkCmdSetStencilWriteMask(): pipeline was created without VK_DYNAMIC_STATE_STENCIL_WRITE_MASK flag..");
6346 pCB->status |= CBSTATUS_STENCIL_WRITE_MASK_SET;
6350 if (!skip) dev_data->dispatch_table.CmdSetStencilWriteMask(commandBuffer, faceMask, writeMask);
6353 VKAPI_ATTR void VKAPI_CALL CmdSetStencilReference(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t reference) {
6355 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6356 unique_lock_t lock(global_lock);
6357 GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
6360 ValidateCmdQueueFlags(dev_data, pCB, "vkCmdSetStencilReference()", VK_QUEUE_GRAPHICS_BIT, VALIDATION_ERROR_1dc02415);
6361 skip |= ValidateCmd(dev_data, pCB, CMD_SETSTENCILREFERENCE, "vkCmdSetStencilReference()");
6362 if (pCB->static_status & CBSTATUS_STENCIL_REFERENCE_SET) {
6363 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6364 HandleToUint64(commandBuffer), VALIDATION_ERROR_1dc004b8,
6365 "vkCmdSetStencilReference(): pipeline was created without VK_DYNAMIC_STATE_STENCIL_REFERENCE flag..");
6368 pCB->status |= CBSTATUS_STENCIL_REFERENCE_SET;
6372 if (!skip) dev_data->dispatch_table.CmdSetStencilReference(commandBuffer, faceMask, reference);
6375 // Update pipeline_layout bind points applying the "Pipeline Layout Compatibility" rules
6376 static void UpdateLastBoundDescriptorSets(layer_data *device_data, GLOBAL_CB_NODE *cb_state,
6377 VkPipelineBindPoint pipeline_bind_point, const PIPELINE_LAYOUT_NODE *pipeline_layout,
6378 uint32_t first_set, uint32_t set_count,
6379 const std::vector<cvdescriptorset::DescriptorSet *> descriptor_sets,
6380 uint32_t dynamic_offset_count, const uint32_t *p_dynamic_offsets) {
6383 if (0 == set_count) return;
6384 assert(pipeline_layout);
6385 if (!pipeline_layout) return;
6387 uint32_t required_size = first_set + set_count;
6388 const uint32_t last_binding_index = required_size - 1;
6389 assert(last_binding_index < pipeline_layout->compat_for_set.size());
6391 // Some useful shorthand
6392 auto &last_bound = cb_state->lastBound[pipeline_bind_point];
6394 auto &bound_sets = last_bound.boundDescriptorSets;
6395 auto &dynamic_offsets = last_bound.dynamicOffsets;
6396 auto &bound_compat_ids = last_bound.compat_id_for_set;
6397 auto &pipe_compat_ids = pipeline_layout->compat_for_set;
6399 const uint32_t current_size = static_cast<uint32_t>(bound_sets.size());
6400 assert(current_size == dynamic_offsets.size());
6401 assert(current_size == bound_compat_ids.size());
6403 // We need this three times in this function, but nowhere else
6404 auto push_descriptor_cleanup = [&last_bound](const cvdescriptorset::DescriptorSet *ds) -> bool {
6405 if (ds && ds->IsPushDescriptor()) {
6406 assert(ds == last_bound.push_descriptor_set.get());
6407 last_bound.push_descriptor_set = nullptr;
6413 // Clean up the "disturbed" before and after the range to be set
6414 if (required_size < current_size) {
6415 if (bound_compat_ids[last_binding_index] != pipe_compat_ids[last_binding_index]) {
6416 // We're disturbing those after last, we'll shrink below, but first need to check for and cleanup the push_descriptor
6417 for (auto set_idx = required_size; set_idx < current_size; ++set_idx) {
6418 if (push_descriptor_cleanup(bound_sets[set_idx])) break;
6421 // We're not disturbing past last, so leave the upper binding data alone.
6422 required_size = current_size;
6426 // We resize if we need more set entries or if those past "last" are disturbed
6427 if (required_size != current_size) {
6428 // TODO: put these size tied things in a struct (touches many lines)
6429 bound_sets.resize(required_size);
6430 dynamic_offsets.resize(required_size);
6431 bound_compat_ids.resize(required_size);
6434 // For any previously bound sets, need to set them to "invalid" if they were disturbed by this update
6435 for (uint32_t set_idx = 0; set_idx < first_set; ++set_idx) {
6436 if (bound_compat_ids[set_idx] != pipe_compat_ids[set_idx]) {
6437 push_descriptor_cleanup(bound_sets[set_idx]);
6438 bound_sets[set_idx] = nullptr;
6439 dynamic_offsets[set_idx].clear();
6440 bound_compat_ids[set_idx] = pipe_compat_ids[set_idx];
6444 // Now update the bound sets with the input sets
6445 const uint32_t *input_dynamic_offsets = p_dynamic_offsets; // "read" pointer for dynamic offset data
6446 for (uint32_t input_idx = 0; input_idx < set_count; input_idx++) {
6447 auto set_idx = input_idx + first_set; // set_idx is index within layout, input_idx is index within input descriptor sets
6448 cvdescriptorset::DescriptorSet *descriptor_set = descriptor_sets[input_idx];
6450 // Record binding (or push)
6451 push_descriptor_cleanup(bound_sets[set_idx]);
6452 bound_sets[set_idx] = descriptor_set;
6453 bound_compat_ids[set_idx] = pipe_compat_ids[set_idx]; // compat ids are canonical *per* set index
6455 if (descriptor_set) {
6456 auto set_dynamic_descriptor_count = descriptor_set->GetDynamicDescriptorCount();
6457 // TODO: Add logic for tracking push_descriptor offsets (here or in caller)
6458 if (set_dynamic_descriptor_count && input_dynamic_offsets) {
6459 const uint32_t *end_offset = input_dynamic_offsets + set_dynamic_descriptor_count;
6460 dynamic_offsets[set_idx] = std::vector<uint32_t>(input_dynamic_offsets, end_offset);
6461 input_dynamic_offsets = end_offset;
6462 assert(input_dynamic_offsets <= (p_dynamic_offsets + dynamic_offset_count));
6464 dynamic_offsets[set_idx].clear();
6466 if (!descriptor_set->IsPushDescriptor()) {
6467 // Can't cache validation of push_descriptors
6468 cb_state->validated_descriptor_sets.insert(descriptor_set);
6474 // Update the bound state for the bind point, including the effects of incompatible pipeline layouts
6475 static void PreCallRecordCmdBindDescriptorSets(layer_data *device_data, GLOBAL_CB_NODE *cb_state,
6476 VkPipelineBindPoint pipelineBindPoint, VkPipelineLayout layout, uint32_t firstSet,
6477 uint32_t setCount, const VkDescriptorSet *pDescriptorSets,
6478 uint32_t dynamicOffsetCount, const uint32_t *pDynamicOffsets) {
6479 auto pipeline_layout = getPipelineLayout(device_data, layout);
6480 std::vector<cvdescriptorset::DescriptorSet *> descriptor_sets;
6481 descriptor_sets.reserve(setCount);
6483 // Construct a list of the descriptors
6484 bool found_non_null = false;
6485 for (uint32_t i = 0; i < setCount; i++) {
6486 cvdescriptorset::DescriptorSet *descriptor_set = GetSetNode(device_data, pDescriptorSets[i]);
6487 descriptor_sets.emplace_back(descriptor_set);
6488 found_non_null |= descriptor_set != nullptr;
6490 if (found_non_null) { // which implies setCount > 0
6491 UpdateLastBoundDescriptorSets(device_data, cb_state, pipelineBindPoint, pipeline_layout, firstSet, setCount,
6492 descriptor_sets, dynamicOffsetCount, pDynamicOffsets);
6493 cb_state->lastBound[pipelineBindPoint].pipeline_layout = layout;
6497 static bool PreCallValidateCmdBindDescriptorSets(layer_data *device_data, GLOBAL_CB_NODE *cb_state,
6498 VkPipelineBindPoint pipelineBindPoint, VkPipelineLayout layout, uint32_t firstSet,
6499 uint32_t setCount, const VkDescriptorSet *pDescriptorSets,
6500 uint32_t dynamicOffsetCount, const uint32_t *pDynamicOffsets) {
6502 skip |= ValidateCmdQueueFlags(device_data, cb_state, "vkCmdBindDescriptorSets()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
6503 VALIDATION_ERROR_17c02415);
6504 skip |= ValidateCmd(device_data, cb_state, CMD_BINDDESCRIPTORSETS, "vkCmdBindDescriptorSets()");
6505 // Track total count of dynamic descriptor types to make sure we have an offset for each one
6506 uint32_t total_dynamic_descriptors = 0;
6507 string error_string = "";
6508 uint32_t last_set_index = firstSet + setCount - 1;
6510 if (last_set_index >= cb_state->lastBound[pipelineBindPoint].boundDescriptorSets.size()) {
6511 cb_state->lastBound[pipelineBindPoint].boundDescriptorSets.resize(last_set_index + 1);
6512 cb_state->lastBound[pipelineBindPoint].dynamicOffsets.resize(last_set_index + 1);
6513 cb_state->lastBound[pipelineBindPoint].compat_id_for_set.resize(last_set_index + 1);
6515 auto pipeline_layout = getPipelineLayout(device_data, layout);
6516 for (uint32_t set_idx = 0; set_idx < setCount; set_idx++) {
6517 cvdescriptorset::DescriptorSet *descriptor_set = GetSetNode(device_data, pDescriptorSets[set_idx]);
6518 if (descriptor_set) {
6519 if (!descriptor_set->IsUpdated() && (descriptor_set->GetTotalDescriptorCount() != 0)) {
6521 device_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
6522 HandleToUint64(pDescriptorSets[set_idx]), DRAWSTATE_DESCRIPTOR_SET_NOT_UPDATED,
6523 "Descriptor Set 0x%" PRIx64 " bound but it was never updated. You may want to either update it or not bind it.",
6524 HandleToUint64(pDescriptorSets[set_idx]));
6526 // Verify that set being bound is compatible with overlapping setLayout of pipelineLayout
6527 if (!verify_set_layout_compatibility(descriptor_set, pipeline_layout, set_idx + firstSet, error_string)) {
6529 log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
6530 HandleToUint64(pDescriptorSets[set_idx]), VALIDATION_ERROR_17c002cc,
6531 "descriptorSet #%u being bound is not compatible with overlapping descriptorSetLayout at index %u of "
6532 "pipelineLayout 0x%" PRIx64 " due to: %s.",
6533 set_idx, set_idx + firstSet, HandleToUint64(layout), error_string.c_str());
6536 auto set_dynamic_descriptor_count = descriptor_set->GetDynamicDescriptorCount();
6538 if (set_dynamic_descriptor_count) {
6539 // First make sure we won't overstep bounds of pDynamicOffsets array
6540 if ((total_dynamic_descriptors + set_dynamic_descriptor_count) > dynamicOffsetCount) {
6541 skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6542 VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, HandleToUint64(pDescriptorSets[set_idx]),
6543 DRAWSTATE_INVALID_DYNAMIC_OFFSET_COUNT,
6544 "descriptorSet #%u (0x%" PRIx64
6545 ") requires %u dynamicOffsets, but only %u dynamicOffsets are left in pDynamicOffsets array. "
6546 "There must be one dynamic offset for each dynamic descriptor being bound.",
6547 set_idx, HandleToUint64(pDescriptorSets[set_idx]), descriptor_set->GetDynamicDescriptorCount(),
6548 (dynamicOffsetCount - total_dynamic_descriptors));
6549 } else { // Validate dynamic offsets and Dynamic Offset Minimums
6550 uint32_t cur_dyn_offset = total_dynamic_descriptors;
6551 for (uint32_t d = 0; d < descriptor_set->GetTotalDescriptorCount(); d++) {
6552 if (descriptor_set->GetTypeFromGlobalIndex(d) == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC) {
6553 if (SafeModulo(pDynamicOffsets[cur_dyn_offset],
6554 device_data->phys_dev_properties.properties.limits.minUniformBufferOffsetAlignment) !=
6556 skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6557 VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, VALIDATION_ERROR_17c002d4,
6558 "vkCmdBindDescriptorSets(): pDynamicOffsets[%d] is %d but must be a multiple of "
6559 "device limit minUniformBufferOffsetAlignment 0x%" PRIxLEAST64 ".",
6560 cur_dyn_offset, pDynamicOffsets[cur_dyn_offset],
6561 device_data->phys_dev_properties.properties.limits.minUniformBufferOffsetAlignment);
6564 } else if (descriptor_set->GetTypeFromGlobalIndex(d) == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC) {
6565 if (SafeModulo(pDynamicOffsets[cur_dyn_offset],
6566 device_data->phys_dev_properties.properties.limits.minStorageBufferOffsetAlignment) !=
6568 skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6569 VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, VALIDATION_ERROR_17c002d4,
6570 "vkCmdBindDescriptorSets(): pDynamicOffsets[%d] is %d but must be a multiple of "
6571 "device limit minStorageBufferOffsetAlignment 0x%" PRIxLEAST64 ".",
6572 cur_dyn_offset, pDynamicOffsets[cur_dyn_offset],
6573 device_data->phys_dev_properties.properties.limits.minStorageBufferOffsetAlignment);
6578 // Keep running total of dynamic descriptor count to verify at the end
6579 total_dynamic_descriptors += set_dynamic_descriptor_count;
6583 skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
6584 HandleToUint64(pDescriptorSets[set_idx]), DRAWSTATE_INVALID_SET,
6585 "Attempt to bind descriptor set 0x%" PRIx64 " that doesn't exist!",
6586 HandleToUint64(pDescriptorSets[set_idx]));
6589 // dynamicOffsetCount must equal the total number of dynamic descriptors in the sets being bound
6590 if (total_dynamic_descriptors != dynamicOffsetCount) {
6591 skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6592 HandleToUint64(cb_state->commandBuffer), VALIDATION_ERROR_17c002ce,
6593 "Attempting to bind %u descriptorSets with %u dynamic descriptors, but dynamicOffsetCount is %u. It should "
6594 "exactly match the number of dynamic descriptors.",
6595 setCount, total_dynamic_descriptors, dynamicOffsetCount);
6600 VKAPI_ATTR void VKAPI_CALL CmdBindDescriptorSets(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint,
6601 VkPipelineLayout layout, uint32_t firstSet, uint32_t setCount,
6602 const VkDescriptorSet *pDescriptorSets, uint32_t dynamicOffsetCount,
6603 const uint32_t *pDynamicOffsets) {
6605 layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6606 unique_lock_t lock(global_lock);
6607 GLOBAL_CB_NODE *cb_state = GetCBNode(device_data, commandBuffer);
6609 skip = PreCallValidateCmdBindDescriptorSets(device_data, cb_state, pipelineBindPoint, layout, firstSet, setCount,
6610 pDescriptorSets, dynamicOffsetCount, pDynamicOffsets);
6612 PreCallRecordCmdBindDescriptorSets(device_data, cb_state, pipelineBindPoint, layout, firstSet, setCount, pDescriptorSets,
6613 dynamicOffsetCount, pDynamicOffsets);
6615 device_data->dispatch_table.CmdBindDescriptorSets(commandBuffer, pipelineBindPoint, layout, firstSet, setCount,
6616 pDescriptorSets, dynamicOffsetCount, pDynamicOffsets);
6622 // Validates that the supplied bind point is supported for the command buffer (vis. the command pool)
6623 // Takes array of error codes as some of the VUID's (e.g. vkCmdBindPipeline) are written per bindpoint
6624 // TODO add vkCmdBindPipeline bind_point validation using this call.
6625 bool ValidatePipelineBindPoint(layer_data *device_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint bind_point,
6626 const char *func_name,
6627 const std::array<UNIQUE_VALIDATION_ERROR_CODE, VK_PIPELINE_BIND_POINT_RANGE_SIZE> &bind_errors) {
6629 auto pool = GetCommandPoolNode(device_data, cb_state->createInfo.commandPool);
6630 if (pool) { // The loss of a pool in a recording cmd is reported in DestroyCommandPool
6631 static const VkQueueFlags flag_mask[VK_PIPELINE_BIND_POINT_RANGE_SIZE] = {VK_QUEUE_GRAPHICS_BIT, VK_QUEUE_COMPUTE_BIT};
6632 const auto bind_point_index = bind_point - VK_PIPELINE_BIND_POINT_BEGIN_RANGE; // typeof enum is not defined, use auto
6633 const auto &qfp = GetPhysDevProperties(device_data)->queue_family_properties[pool->queueFamilyIndex];
6634 if (0 == (qfp.queueFlags & flag_mask[bind_point_index])) {
6635 const UNIQUE_VALIDATION_ERROR_CODE error = bind_errors[bind_point_index];
6636 auto cb_u64 = HandleToUint64(cb_state->commandBuffer);
6637 auto cp_u64 = HandleToUint64(cb_state->createInfo.commandPool);
6638 skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6640 "%s: CommandBuffer 0x%" PRIxLEAST64 " was allocated from VkCommandPool 0x%" PRIxLEAST64
6641 " that does not support bindpoint %s.",
6642 func_name, cb_u64, cp_u64, string_VkPipelineBindPoint(bind_point));
6648 static bool PreCallValidateCmdPushDescriptorSetKHR(layer_data *device_data, GLOBAL_CB_NODE *cb_state,
6649 const VkPipelineBindPoint bind_point, const VkPipelineLayout layout,
6650 const uint32_t set, const uint32_t descriptor_write_count,
6651 const VkWriteDescriptorSet *descriptor_writes, const char *func_name) {
6653 skip |= ValidateCmd(device_data, cb_state, CMD_PUSHDESCRIPTORSETKHR, func_name);
6654 skip |= ValidateCmdQueueFlags(device_data, cb_state, func_name, (VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT),
6655 VALIDATION_ERROR_1be02415);
6656 skip |= ValidatePipelineBindPoint(device_data, cb_state, bind_point, func_name,
6657 {{VALIDATION_ERROR_1be002d6, VALIDATION_ERROR_1be002d6}});
6658 auto layout_data = getPipelineLayout(device_data, layout);
6660 // Validate the set index points to a push descriptor set and is in range
6662 const auto &set_layouts = layout_data->set_layouts;
6663 const auto layout_u64 = HandleToUint64(layout);
6664 if (set < set_layouts.size()) {
6665 const auto *dsl = set_layouts[set].get();
6666 if (dsl && (0 == (dsl->GetCreateFlags() & VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR))) {
6667 skip = log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6668 VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_LAYOUT_EXT, layout_u64, VALIDATION_ERROR_1be002da,
6669 "%s: Set index %" PRIu32
6670 " does not match push descriptor set layout index for VkPipelineLayout 0x%" PRIxLEAST64 ".",
6671 func_name, set, layout_u64);
6674 skip = log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_LAYOUT_EXT,
6675 layout_u64, VALIDATION_ERROR_1be002d8,
6676 "%s: Set index %" PRIu32 " is outside of range for VkPipelineLayout 0x%" PRIxLEAST64 " (set < %" PRIu32
6678 func_name, set, layout_u64, static_cast<uint32_t>(set_layouts.size()));
6684 static void PreCallRecordCmdPushDescriptorSetKHR(layer_data *device_data, GLOBAL_CB_NODE *cb_state,
6685 VkPipelineBindPoint pipelineBindPoint, VkPipelineLayout layout, uint32_t set,
6686 uint32_t descriptorWriteCount, const VkWriteDescriptorSet *pDescriptorWrites) {
6687 const auto &pipeline_layout = getPipelineLayout(device_data, layout);
6688 if (!pipeline_layout) return;
6689 std::unique_ptr<cvdescriptorset::DescriptorSet> new_desc{
6690 new cvdescriptorset::DescriptorSet(0, 0, pipeline_layout->set_layouts[set], 0, device_data)};
6692 std::vector<cvdescriptorset::DescriptorSet *> descriptor_sets = {new_desc.get()};
6693 UpdateLastBoundDescriptorSets(device_data, cb_state, pipelineBindPoint, pipeline_layout, set, 1, descriptor_sets, 0, nullptr);
6694 cb_state->lastBound[pipelineBindPoint].push_descriptor_set = std::move(new_desc);
6695 cb_state->lastBound[pipelineBindPoint].pipeline_layout = layout;
6698 VKAPI_ATTR void VKAPI_CALL CmdPushDescriptorSetKHR(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint,
6699 VkPipelineLayout layout, uint32_t set, uint32_t descriptorWriteCount,
6700 const VkWriteDescriptorSet *pDescriptorWrites) {
6701 layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6702 unique_lock_t lock(global_lock);
6703 auto cb_state = GetCBNode(device_data, commandBuffer);
6704 bool skip = PreCallValidateCmdPushDescriptorSetKHR(device_data, cb_state, pipelineBindPoint, layout, set, descriptorWriteCount,
6705 pDescriptorWrites, "vkCmdPushDescriptorSetKHR()");
6707 PreCallRecordCmdPushDescriptorSetKHR(device_data, cb_state, pipelineBindPoint, layout, set, descriptorWriteCount,
6710 device_data->dispatch_table.CmdPushDescriptorSetKHR(commandBuffer, pipelineBindPoint, layout, set, descriptorWriteCount,
6715 static VkDeviceSize GetIndexAlignment(VkIndexType indexType) {
6716 switch (indexType) {
6717 case VK_INDEX_TYPE_UINT16:
6719 case VK_INDEX_TYPE_UINT32:
6722 // Not a real index type. Express no alignment requirement here; we expect upper layer
6723 // to have already picked up on the enum being nonsense.
6728 VKAPI_ATTR void VKAPI_CALL CmdBindIndexBuffer(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
6729 VkIndexType indexType) {
6731 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6732 unique_lock_t lock(global_lock);
6734 auto buffer_state = GetBufferState(dev_data, buffer);
6735 auto cb_node = GetCBNode(dev_data, commandBuffer);
6737 assert(buffer_state);
6739 skip |= ValidateBufferUsageFlags(dev_data, buffer_state, VK_BUFFER_USAGE_INDEX_BUFFER_BIT, true, VALIDATION_ERROR_17e00362,
6740 "vkCmdBindIndexBuffer()", "VK_BUFFER_USAGE_INDEX_BUFFER_BIT");
6741 skip |= ValidateCmdQueueFlags(dev_data, cb_node, "vkCmdBindIndexBuffer()", VK_QUEUE_GRAPHICS_BIT, VALIDATION_ERROR_17e02415);
6742 skip |= ValidateCmd(dev_data, cb_node, CMD_BINDINDEXBUFFER, "vkCmdBindIndexBuffer()");
6743 skip |= ValidateMemoryIsBoundToBuffer(dev_data, buffer_state, "vkCmdBindIndexBuffer()", VALIDATION_ERROR_17e00364);
6744 auto offset_align = GetIndexAlignment(indexType);
6745 if (offset % offset_align) {
6746 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6747 HandleToUint64(commandBuffer), VALIDATION_ERROR_17e00360,
6748 "vkCmdBindIndexBuffer() offset (0x%" PRIxLEAST64 ") does not fall on alignment (%s) boundary.", offset,
6749 string_VkIndexType(indexType));
6754 std::function<bool()> function = [=]() {
6755 return ValidateBufferMemoryIsValid(dev_data, buffer_state, "vkCmdBindIndexBuffer()");
6757 cb_node->queue_submit_functions.push_back(function);
6758 cb_node->status |= CBSTATUS_INDEX_BUFFER_BOUND;
6761 dev_data->dispatch_table.CmdBindIndexBuffer(commandBuffer, buffer, offset, indexType);
6764 void updateResourceTracking(GLOBAL_CB_NODE *pCB, uint32_t firstBinding, uint32_t bindingCount, const VkBuffer *pBuffers) {
6765 uint32_t end = firstBinding + bindingCount;
6766 if (pCB->currentDrawData.buffers.size() < end) {
6767 pCB->currentDrawData.buffers.resize(end);
6769 for (uint32_t i = 0; i < bindingCount; ++i) {
6770 pCB->currentDrawData.buffers[i + firstBinding] = pBuffers[i];
6774 static inline void updateResourceTrackingOnDraw(GLOBAL_CB_NODE *pCB) { pCB->drawData.push_back(pCB->currentDrawData); }
6776 VKAPI_ATTR void VKAPI_CALL CmdBindVertexBuffers(VkCommandBuffer commandBuffer, uint32_t firstBinding, uint32_t bindingCount,
6777 const VkBuffer *pBuffers, const VkDeviceSize *pOffsets) {
6779 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6780 unique_lock_t lock(global_lock);
6782 auto cb_node = GetCBNode(dev_data, commandBuffer);
6785 skip |= ValidateCmdQueueFlags(dev_data, cb_node, "vkCmdBindVertexBuffers()", VK_QUEUE_GRAPHICS_BIT, VALIDATION_ERROR_18202415);
6786 skip |= ValidateCmd(dev_data, cb_node, CMD_BINDVERTEXBUFFERS, "vkCmdBindVertexBuffers()");
6787 for (uint32_t i = 0; i < bindingCount; ++i) {
6788 auto buffer_state = GetBufferState(dev_data, pBuffers[i]);
6789 assert(buffer_state);
6790 skip |= ValidateBufferUsageFlags(dev_data, buffer_state, VK_BUFFER_USAGE_VERTEX_BUFFER_BIT, true, VALIDATION_ERROR_182004e6,
6791 "vkCmdBindVertexBuffers()", "VK_BUFFER_USAGE_VERTEX_BUFFER_BIT");
6792 skip |= ValidateMemoryIsBoundToBuffer(dev_data, buffer_state, "vkCmdBindVertexBuffers()", VALIDATION_ERROR_182004e8);
6793 if (pOffsets[i] >= buffer_state->createInfo.size) {
6794 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
6795 HandleToUint64(buffer_state->buffer), VALIDATION_ERROR_182004e4,
6796 "vkCmdBindVertexBuffers() offset (0x%" PRIxLEAST64 ") is beyond the end of the buffer.", pOffsets[i]);
6802 for (uint32_t i = 0; i < bindingCount; ++i) {
6803 auto buffer_state = GetBufferState(dev_data, pBuffers[i]);
6804 assert(buffer_state);
6805 std::function<bool()> function = [=]() {
6806 return ValidateBufferMemoryIsValid(dev_data, buffer_state, "vkCmdBindVertexBuffers()");
6808 cb_node->queue_submit_functions.push_back(function);
6811 updateResourceTracking(cb_node, firstBinding, bindingCount, pBuffers);
6814 dev_data->dispatch_table.CmdBindVertexBuffers(commandBuffer, firstBinding, bindingCount, pBuffers, pOffsets);
6817 // Expects global_lock to be held by caller
6818 static void MarkStoreImagesAndBuffersAsWritten(layer_data *dev_data, GLOBAL_CB_NODE *pCB) {
6819 for (auto imageView : pCB->updateImages) {
6820 auto view_state = GetImageViewState(dev_data, imageView);
6821 if (!view_state) continue;
6823 auto image_state = GetImageState(dev_data, view_state->create_info.image);
6824 assert(image_state);
6825 std::function<bool()> function = [=]() {
6826 SetImageMemoryValid(dev_data, image_state, true);
6829 pCB->queue_submit_functions.push_back(function);
6831 for (auto buffer : pCB->updateBuffers) {
6832 auto buffer_state = GetBufferState(dev_data, buffer);
6833 assert(buffer_state);
6834 std::function<bool()> function = [=]() {
6835 SetBufferMemoryValid(dev_data, buffer_state, true);
6838 pCB->queue_submit_functions.push_back(function);
6842 // Generic function to handle validation for all CmdDraw* type functions
6843 static bool ValidateCmdDrawType(layer_data *dev_data, VkCommandBuffer cmd_buffer, bool indexed, VkPipelineBindPoint bind_point,
6844 CMD_TYPE cmd_type, GLOBAL_CB_NODE **cb_state, const char *caller, VkQueueFlags queue_flags,
6845 UNIQUE_VALIDATION_ERROR_CODE queue_flag_code, UNIQUE_VALIDATION_ERROR_CODE msg_code,
6846 UNIQUE_VALIDATION_ERROR_CODE const dynamic_state_msg_code) {
6848 *cb_state = GetCBNode(dev_data, cmd_buffer);
6850 skip |= ValidateCmdQueueFlags(dev_data, *cb_state, caller, queue_flags, queue_flag_code);
6851 skip |= ValidateCmd(dev_data, *cb_state, cmd_type, caller);
6852 skip |= ValidateDrawState(dev_data, *cb_state, cmd_type, indexed, bind_point, caller, dynamic_state_msg_code);
6853 skip |= (VK_PIPELINE_BIND_POINT_GRAPHICS == bind_point) ? outsideRenderPass(dev_data, *cb_state, caller, msg_code)
6854 : insideRenderPass(dev_data, *cb_state, caller, msg_code);
6859 // Generic function to handle state update for all CmdDraw* and CmdDispatch* type functions
6860 static void UpdateStateCmdDrawDispatchType(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint bind_point) {
6861 UpdateDrawState(dev_data, cb_state, bind_point);
6862 MarkStoreImagesAndBuffersAsWritten(dev_data, cb_state);
6865 // Generic function to handle state update for all CmdDraw* type functions
6866 static void UpdateStateCmdDrawType(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint bind_point) {
6867 UpdateStateCmdDrawDispatchType(dev_data, cb_state, bind_point);
6868 updateResourceTrackingOnDraw(cb_state);
6869 cb_state->hasDrawCmd = true;
6872 static bool PreCallValidateCmdDraw(layer_data *dev_data, VkCommandBuffer cmd_buffer, bool indexed, VkPipelineBindPoint bind_point,
6873 GLOBAL_CB_NODE **cb_state, const char *caller) {
6874 return ValidateCmdDrawType(dev_data, cmd_buffer, indexed, bind_point, CMD_DRAW, cb_state, caller, VK_QUEUE_GRAPHICS_BIT,
6875 VALIDATION_ERROR_1a202415, VALIDATION_ERROR_1a200017, VALIDATION_ERROR_1a200376);
6878 static void PostCallRecordCmdDraw(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint bind_point) {
6879 UpdateStateCmdDrawType(dev_data, cb_state, bind_point);
6882 VKAPI_ATTR void VKAPI_CALL CmdDraw(VkCommandBuffer commandBuffer, uint32_t vertexCount, uint32_t instanceCount,
6883 uint32_t firstVertex, uint32_t firstInstance) {
6884 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6885 GLOBAL_CB_NODE *cb_state = nullptr;
6886 unique_lock_t lock(global_lock);
6887 bool skip = PreCallValidateCmdDraw(dev_data, commandBuffer, false, VK_PIPELINE_BIND_POINT_GRAPHICS, &cb_state, "vkCmdDraw()");
6890 dev_data->dispatch_table.CmdDraw(commandBuffer, vertexCount, instanceCount, firstVertex, firstInstance);
6892 PostCallRecordCmdDraw(dev_data, cb_state, VK_PIPELINE_BIND_POINT_GRAPHICS);
6897 static bool PreCallValidateCmdDrawIndexed(layer_data *dev_data, VkCommandBuffer cmd_buffer, bool indexed,
6898 VkPipelineBindPoint bind_point, GLOBAL_CB_NODE **cb_state, const char *caller) {
6899 return ValidateCmdDrawType(dev_data, cmd_buffer, indexed, bind_point, CMD_DRAWINDEXED, cb_state, caller, VK_QUEUE_GRAPHICS_BIT,
6900 VALIDATION_ERROR_1a402415, VALIDATION_ERROR_1a400017, VALIDATION_ERROR_1a40039c);
6903 static void PostCallRecordCmdDrawIndexed(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint bind_point) {
6904 UpdateStateCmdDrawType(dev_data, cb_state, bind_point);
6907 VKAPI_ATTR void VKAPI_CALL CmdDrawIndexed(VkCommandBuffer commandBuffer, uint32_t indexCount, uint32_t instanceCount,
6908 uint32_t firstIndex, int32_t vertexOffset, uint32_t firstInstance) {
6909 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6910 GLOBAL_CB_NODE *cb_state = nullptr;
6911 unique_lock_t lock(global_lock);
6912 bool skip = PreCallValidateCmdDrawIndexed(dev_data, commandBuffer, true, VK_PIPELINE_BIND_POINT_GRAPHICS, &cb_state,
6913 "vkCmdDrawIndexed()");
6916 dev_data->dispatch_table.CmdDrawIndexed(commandBuffer, indexCount, instanceCount, firstIndex, vertexOffset, firstInstance);
6918 PostCallRecordCmdDrawIndexed(dev_data, cb_state, VK_PIPELINE_BIND_POINT_GRAPHICS);
6923 static bool PreCallValidateCmdDrawIndirect(layer_data *dev_data, VkCommandBuffer cmd_buffer, VkBuffer buffer, bool indexed,
6924 VkPipelineBindPoint bind_point, GLOBAL_CB_NODE **cb_state, BUFFER_STATE **buffer_state,
6925 const char *caller) {
6927 ValidateCmdDrawType(dev_data, cmd_buffer, indexed, bind_point, CMD_DRAWINDIRECT, cb_state, caller, VK_QUEUE_GRAPHICS_BIT,
6928 VALIDATION_ERROR_1aa02415, VALIDATION_ERROR_1aa00017, VALIDATION_ERROR_1aa003cc);
6929 *buffer_state = GetBufferState(dev_data, buffer);
6930 skip |= ValidateMemoryIsBoundToBuffer(dev_data, *buffer_state, caller, VALIDATION_ERROR_1aa003b4);
6931 // TODO: If the drawIndirectFirstInstance feature is not enabled, all the firstInstance members of the
6932 // VkDrawIndirectCommand structures accessed by this command must be 0, which will require access to the contents of 'buffer'.
6936 static void PostCallRecordCmdDrawIndirect(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint bind_point,
6937 BUFFER_STATE *buffer_state) {
6938 UpdateStateCmdDrawType(dev_data, cb_state, bind_point);
6939 AddCommandBufferBindingBuffer(dev_data, cb_state, buffer_state);
6942 VKAPI_ATTR void VKAPI_CALL CmdDrawIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t count,
6944 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6945 GLOBAL_CB_NODE *cb_state = nullptr;
6946 BUFFER_STATE *buffer_state = nullptr;
6947 unique_lock_t lock(global_lock);
6948 bool skip = PreCallValidateCmdDrawIndirect(dev_data, commandBuffer, buffer, false, VK_PIPELINE_BIND_POINT_GRAPHICS, &cb_state,
6949 &buffer_state, "vkCmdDrawIndirect()");
6952 dev_data->dispatch_table.CmdDrawIndirect(commandBuffer, buffer, offset, count, stride);
6954 PostCallRecordCmdDrawIndirect(dev_data, cb_state, VK_PIPELINE_BIND_POINT_GRAPHICS, buffer_state);
6959 static bool PreCallValidateCmdDrawIndexedIndirect(layer_data *dev_data, VkCommandBuffer cmd_buffer, VkBuffer buffer, bool indexed,
6960 VkPipelineBindPoint bind_point, GLOBAL_CB_NODE **cb_state,
6961 BUFFER_STATE **buffer_state, const char *caller) {
6963 ValidateCmdDrawType(dev_data, cmd_buffer, indexed, bind_point, CMD_DRAWINDEXEDINDIRECT, cb_state, caller,
6964 VK_QUEUE_GRAPHICS_BIT, VALIDATION_ERROR_1a602415, VALIDATION_ERROR_1a600017, VALIDATION_ERROR_1a600434);
6965 *buffer_state = GetBufferState(dev_data, buffer);
6966 skip |= ValidateMemoryIsBoundToBuffer(dev_data, *buffer_state, caller, VALIDATION_ERROR_1a60041c);
6967 // TODO: If the drawIndirectFirstInstance feature is not enabled, all the firstInstance members of the
6968 // VkDrawIndexedIndirectCommand structures accessed by this command must be 0, which will require access to the contents of
6973 static void PostCallRecordCmdDrawIndexedIndirect(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint bind_point,
6974 BUFFER_STATE *buffer_state) {
6975 UpdateStateCmdDrawType(dev_data, cb_state, bind_point);
6976 AddCommandBufferBindingBuffer(dev_data, cb_state, buffer_state);
6979 VKAPI_ATTR void VKAPI_CALL CmdDrawIndexedIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
6980 uint32_t count, uint32_t stride) {
6981 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6982 GLOBAL_CB_NODE *cb_state = nullptr;
6983 BUFFER_STATE *buffer_state = nullptr;
6984 unique_lock_t lock(global_lock);
6985 bool skip = PreCallValidateCmdDrawIndexedIndirect(dev_data, commandBuffer, buffer, true, VK_PIPELINE_BIND_POINT_GRAPHICS,
6986 &cb_state, &buffer_state, "vkCmdDrawIndexedIndirect()");
6989 dev_data->dispatch_table.CmdDrawIndexedIndirect(commandBuffer, buffer, offset, count, stride);
6991 PostCallRecordCmdDrawIndexedIndirect(dev_data, cb_state, VK_PIPELINE_BIND_POINT_GRAPHICS, buffer_state);
6996 static bool PreCallValidateCmdDispatch(layer_data *dev_data, VkCommandBuffer cmd_buffer, bool indexed,
6997 VkPipelineBindPoint bind_point, GLOBAL_CB_NODE **cb_state, const char *caller) {
6998 return ValidateCmdDrawType(dev_data, cmd_buffer, indexed, bind_point, CMD_DISPATCH, cb_state, caller, VK_QUEUE_COMPUTE_BIT,
6999 VALIDATION_ERROR_19c02415, VALIDATION_ERROR_19c00017, VALIDATION_ERROR_UNDEFINED);
7002 static void PostCallRecordCmdDispatch(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint bind_point) {
7003 UpdateStateCmdDrawDispatchType(dev_data, cb_state, bind_point);
7006 VKAPI_ATTR void VKAPI_CALL CmdDispatch(VkCommandBuffer commandBuffer, uint32_t x, uint32_t y, uint32_t z) {
7007 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
7008 GLOBAL_CB_NODE *cb_state = nullptr;
7009 unique_lock_t lock(global_lock);
7011 PreCallValidateCmdDispatch(dev_data, commandBuffer, false, VK_PIPELINE_BIND_POINT_COMPUTE, &cb_state, "vkCmdDispatch()");
7014 dev_data->dispatch_table.CmdDispatch(commandBuffer, x, y, z);
7016 PostCallRecordCmdDispatch(dev_data, cb_state, VK_PIPELINE_BIND_POINT_COMPUTE);
7021 static bool PreCallValidateCmdDispatchIndirect(layer_data *dev_data, VkCommandBuffer cmd_buffer, VkBuffer buffer, bool indexed,
7022 VkPipelineBindPoint bind_point, GLOBAL_CB_NODE **cb_state,
7023 BUFFER_STATE **buffer_state, const char *caller) {
7025 ValidateCmdDrawType(dev_data, cmd_buffer, indexed, bind_point, CMD_DISPATCHINDIRECT, cb_state, caller, VK_QUEUE_COMPUTE_BIT,
7026 VALIDATION_ERROR_1a002415, VALIDATION_ERROR_1a000017, VALIDATION_ERROR_UNDEFINED);
7027 *buffer_state = GetBufferState(dev_data, buffer);
7028 skip |= ValidateMemoryIsBoundToBuffer(dev_data, *buffer_state, caller, VALIDATION_ERROR_1a000322);
7032 static void PostCallRecordCmdDispatchIndirect(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint bind_point,
7033 BUFFER_STATE *buffer_state) {
7034 UpdateStateCmdDrawDispatchType(dev_data, cb_state, bind_point);
7035 AddCommandBufferBindingBuffer(dev_data, cb_state, buffer_state);
7038 VKAPI_ATTR void VKAPI_CALL CmdDispatchIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset) {
7039 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
7040 GLOBAL_CB_NODE *cb_state = nullptr;
7041 BUFFER_STATE *buffer_state = nullptr;
7042 unique_lock_t lock(global_lock);
7043 bool skip = PreCallValidateCmdDispatchIndirect(dev_data, commandBuffer, buffer, false, VK_PIPELINE_BIND_POINT_COMPUTE,
7044 &cb_state, &buffer_state, "vkCmdDispatchIndirect()");
7047 dev_data->dispatch_table.CmdDispatchIndirect(commandBuffer, buffer, offset);
7049 PostCallRecordCmdDispatchIndirect(dev_data, cb_state, VK_PIPELINE_BIND_POINT_COMPUTE, buffer_state);
7054 VKAPI_ATTR void VKAPI_CALL CmdCopyBuffer(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkBuffer dstBuffer,
7055 uint32_t regionCount, const VkBufferCopy *pRegions) {
7056 layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
7057 unique_lock_t lock(global_lock);
7059 auto cb_node = GetCBNode(device_data, commandBuffer);
7060 auto src_buffer_state = GetBufferState(device_data, srcBuffer);
7061 auto dst_buffer_state = GetBufferState(device_data, dstBuffer);
7063 if (cb_node && src_buffer_state && dst_buffer_state) {
7064 bool skip = PreCallValidateCmdCopyBuffer(device_data, cb_node, src_buffer_state, dst_buffer_state);
7066 PreCallRecordCmdCopyBuffer(device_data, cb_node, src_buffer_state, dst_buffer_state);
7068 device_data->dispatch_table.CmdCopyBuffer(commandBuffer, srcBuffer, dstBuffer, regionCount, pRegions);
7076 VKAPI_ATTR void VKAPI_CALL CmdCopyImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
7077 VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
7078 const VkImageCopy *pRegions) {
7080 layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
7081 unique_lock_t lock(global_lock);
7083 auto cb_node = GetCBNode(device_data, commandBuffer);
7084 auto src_image_state = GetImageState(device_data, srcImage);
7085 auto dst_image_state = GetImageState(device_data, dstImage);
7086 if (cb_node && src_image_state && dst_image_state) {
7087 skip = PreCallValidateCmdCopyImage(device_data, cb_node, src_image_state, dst_image_state, regionCount, pRegions,
7088 srcImageLayout, dstImageLayout);
7090 PreCallRecordCmdCopyImage(device_data, cb_node, src_image_state, dst_image_state, regionCount, pRegions, srcImageLayout,
7093 device_data->dispatch_table.CmdCopyImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount,
7102 // Validate that an image's sampleCount matches the requirement for a specific API call
7103 bool ValidateImageSampleCount(layer_data *dev_data, IMAGE_STATE *image_state, VkSampleCountFlagBits sample_count,
7104 const char *location, UNIQUE_VALIDATION_ERROR_CODE msgCode) {
7106 if (image_state->createInfo.samples != sample_count) {
7107 skip = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
7108 HandleToUint64(image_state->image), msgCode,
7109 "%s for image 0x%" PRIx64 " was created with a sample count of %s but must be %s.", location,
7110 HandleToUint64(image_state->image), string_VkSampleCountFlagBits(image_state->createInfo.samples),
7111 string_VkSampleCountFlagBits(sample_count));
7116 VKAPI_ATTR void VKAPI_CALL CmdBlitImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
7117 VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
7118 const VkImageBlit *pRegions, VkFilter filter) {
7119 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
7120 unique_lock_t lock(global_lock);
7122 auto cb_node = GetCBNode(dev_data, commandBuffer);
7123 auto src_image_state = GetImageState(dev_data, srcImage);
7124 auto dst_image_state = GetImageState(dev_data, dstImage);
7126 bool skip = PreCallValidateCmdBlitImage(dev_data, cb_node, src_image_state, dst_image_state, regionCount, pRegions,
7127 srcImageLayout, dstImageLayout, filter);
7130 PreCallRecordCmdBlitImage(dev_data, cb_node, src_image_state, dst_image_state, regionCount, pRegions, srcImageLayout,
7133 dev_data->dispatch_table.CmdBlitImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount,
7138 VKAPI_ATTR void VKAPI_CALL CmdCopyBufferToImage(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkImage dstImage,
7139 VkImageLayout dstImageLayout, uint32_t regionCount,
7140 const VkBufferImageCopy *pRegions) {
7141 layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
7142 unique_lock_t lock(global_lock);
7144 auto cb_node = GetCBNode(device_data, commandBuffer);
7145 auto src_buffer_state = GetBufferState(device_data, srcBuffer);
7146 auto dst_image_state = GetImageState(device_data, dstImage);
7147 if (cb_node && src_buffer_state && dst_image_state) {
7148 skip = PreCallValidateCmdCopyBufferToImage(device_data, dstImageLayout, cb_node, src_buffer_state, dst_image_state,
7149 regionCount, pRegions, "vkCmdCopyBufferToImage()");
7153 // TODO: report VU01244 here, or put in object tracker?
7156 PreCallRecordCmdCopyBufferToImage(device_data, cb_node, src_buffer_state, dst_image_state, regionCount, pRegions,
7159 device_data->dispatch_table.CmdCopyBufferToImage(commandBuffer, srcBuffer, dstImage, dstImageLayout, regionCount, pRegions);
7163 VKAPI_ATTR void VKAPI_CALL CmdCopyImageToBuffer(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
7164 VkBuffer dstBuffer, uint32_t regionCount, const VkBufferImageCopy *pRegions) {
7166 layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
7167 unique_lock_t lock(global_lock);
7169 auto cb_node = GetCBNode(device_data, commandBuffer);
7170 auto src_image_state = GetImageState(device_data, srcImage);
7171 auto dst_buffer_state = GetBufferState(device_data, dstBuffer);
7172 if (cb_node && src_image_state && dst_buffer_state) {
7173 skip = PreCallValidateCmdCopyImageToBuffer(device_data, srcImageLayout, cb_node, src_image_state, dst_buffer_state,
7174 regionCount, pRegions, "vkCmdCopyImageToBuffer()");
7178 // TODO: report VU01262 here, or put in object tracker?
7181 PreCallRecordCmdCopyImageToBuffer(device_data, cb_node, src_image_state, dst_buffer_state, regionCount, pRegions,
7184 device_data->dispatch_table.CmdCopyImageToBuffer(commandBuffer, srcImage, srcImageLayout, dstBuffer, regionCount, pRegions);
7188 static bool PreCallCmdUpdateBuffer(layer_data *device_data, const GLOBAL_CB_NODE *cb_state, const BUFFER_STATE *dst_buffer_state) {
7190 skip |= ValidateMemoryIsBoundToBuffer(device_data, dst_buffer_state, "vkCmdUpdateBuffer()", VALIDATION_ERROR_1e400046);
7191 // Validate that DST buffer has correct usage flags set
7192 skip |= ValidateBufferUsageFlags(device_data, dst_buffer_state, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true,
7193 VALIDATION_ERROR_1e400044, "vkCmdUpdateBuffer()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
7194 skip |= ValidateCmdQueueFlags(device_data, cb_state, "vkCmdUpdateBuffer()",
7195 VK_QUEUE_TRANSFER_BIT | VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT, VALIDATION_ERROR_1e402415);
7196 skip |= ValidateCmd(device_data, cb_state, CMD_UPDATEBUFFER, "vkCmdUpdateBuffer()");
7197 skip |= insideRenderPass(device_data, cb_state, "vkCmdUpdateBuffer()", VALIDATION_ERROR_1e400017);
7201 static void PostCallRecordCmdUpdateBuffer(layer_data *device_data, GLOBAL_CB_NODE *cb_state, BUFFER_STATE *dst_buffer_state) {
7202 // Update bindings between buffer and cmd buffer
7203 AddCommandBufferBindingBuffer(device_data, cb_state, dst_buffer_state);
7204 std::function<bool()> function = [=]() {
7205 SetBufferMemoryValid(device_data, dst_buffer_state, true);
7208 cb_state->queue_submit_functions.push_back(function);
7211 VKAPI_ATTR void VKAPI_CALL CmdUpdateBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset,
7212 VkDeviceSize dataSize, const uint32_t *pData) {
7214 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
7215 unique_lock_t lock(global_lock);
7217 auto cb_state = GetCBNode(dev_data, commandBuffer);
7219 auto dst_buff_state = GetBufferState(dev_data, dstBuffer);
7220 assert(dst_buff_state);
7221 skip |= PreCallCmdUpdateBuffer(dev_data, cb_state, dst_buff_state);
7224 dev_data->dispatch_table.CmdUpdateBuffer(commandBuffer, dstBuffer, dstOffset, dataSize, pData);
7226 PostCallRecordCmdUpdateBuffer(dev_data, cb_state, dst_buff_state);
7231 VKAPI_ATTR void VKAPI_CALL CmdFillBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset,
7232 VkDeviceSize size, uint32_t data) {
7233 layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
7234 unique_lock_t lock(global_lock);
7235 auto cb_node = GetCBNode(device_data, commandBuffer);
7236 auto buffer_state = GetBufferState(device_data, dstBuffer);
7238 if (cb_node && buffer_state) {
7239 bool skip = PreCallValidateCmdFillBuffer(device_data, cb_node, buffer_state);
7241 PreCallRecordCmdFillBuffer(device_data, cb_node, buffer_state);
7243 device_data->dispatch_table.CmdFillBuffer(commandBuffer, dstBuffer, dstOffset, size, data);
7251 VKAPI_ATTR void VKAPI_CALL CmdClearAttachments(VkCommandBuffer commandBuffer, uint32_t attachmentCount,
7252 const VkClearAttachment *pAttachments, uint32_t rectCount,
7253 const VkClearRect *pRects) {
7255 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
7257 lock_guard_t lock(global_lock);
7258 skip = PreCallValidateCmdClearAttachments(dev_data, commandBuffer, attachmentCount, pAttachments, rectCount, pRects);
7260 if (!skip) dev_data->dispatch_table.CmdClearAttachments(commandBuffer, attachmentCount, pAttachments, rectCount, pRects);
7263 VKAPI_ATTR void VKAPI_CALL CmdClearColorImage(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout,
7264 const VkClearColorValue *pColor, uint32_t rangeCount,
7265 const VkImageSubresourceRange *pRanges) {
7266 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
7267 unique_lock_t lock(global_lock);
7269 bool skip = PreCallValidateCmdClearColorImage(dev_data, commandBuffer, image, imageLayout, rangeCount, pRanges);
7271 PreCallRecordCmdClearImage(dev_data, commandBuffer, image, imageLayout, rangeCount, pRanges);
7273 dev_data->dispatch_table.CmdClearColorImage(commandBuffer, image, imageLayout, pColor, rangeCount, pRanges);
7277 VKAPI_ATTR void VKAPI_CALL CmdClearDepthStencilImage(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout,
7278 const VkClearDepthStencilValue *pDepthStencil, uint32_t rangeCount,
7279 const VkImageSubresourceRange *pRanges) {
7280 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
7281 unique_lock_t lock(global_lock);
7283 bool skip = PreCallValidateCmdClearDepthStencilImage(dev_data, commandBuffer, image, imageLayout, rangeCount, pRanges);
7285 PreCallRecordCmdClearImage(dev_data, commandBuffer, image, imageLayout, rangeCount, pRanges);
7287 dev_data->dispatch_table.CmdClearDepthStencilImage(commandBuffer, image, imageLayout, pDepthStencil, rangeCount, pRanges);
7291 VKAPI_ATTR void VKAPI_CALL CmdResolveImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
7292 VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
7293 const VkImageResolve *pRegions) {
7294 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
7295 unique_lock_t lock(global_lock);
7297 auto cb_node = GetCBNode(dev_data, commandBuffer);
7298 auto src_image_state = GetImageState(dev_data, srcImage);
7299 auto dst_image_state = GetImageState(dev_data, dstImage);
7301 bool skip = PreCallValidateCmdResolveImage(dev_data, cb_node, src_image_state, dst_image_state, regionCount, pRegions);
7304 PreCallRecordCmdResolveImage(dev_data, cb_node, src_image_state, dst_image_state);
7306 dev_data->dispatch_table.CmdResolveImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount,
7311 VKAPI_ATTR void VKAPI_CALL GetImageSubresourceLayout(VkDevice device, VkImage image, const VkImageSubresource *pSubresource,
7312 VkSubresourceLayout *pLayout) {
7313 layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
7314 unique_lock_t lock(global_lock);
7316 bool skip = PreCallValidateGetImageSubresourceLayout(device_data, image, pSubresource);
7319 device_data->dispatch_table.GetImageSubresourceLayout(device, image, pSubresource, pLayout);
7323 bool setEventStageMask(VkQueue queue, VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
7324 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
7325 GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
7327 pCB->eventToStageMap[event] = stageMask;
7329 auto queue_data = dev_data->queueMap.find(queue);
7330 if (queue_data != dev_data->queueMap.end()) {
7331 queue_data->second.eventToStageMap[event] = stageMask;
7336 VKAPI_ATTR void VKAPI_CALL CmdSetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
7338 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
7339 unique_lock_t lock(global_lock);
7340 GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
7342 skip |= ValidateCmdQueueFlags(dev_data, pCB, "vkCmdSetEvent()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
7343 VALIDATION_ERROR_1d402415);
7344 skip |= ValidateCmd(dev_data, pCB, CMD_SETEVENT, "vkCmdSetEvent()");
7345 skip |= insideRenderPass(dev_data, pCB, "vkCmdSetEvent()", VALIDATION_ERROR_1d400017);
7346 skip |= ValidateStageMaskGsTsEnables(dev_data, stageMask, "vkCmdSetEvent()", VALIDATION_ERROR_1d4008fc,
7347 VALIDATION_ERROR_1d4008fe);
7348 auto event_state = GetEventNode(dev_data, event);
7350 addCommandBufferBinding(&event_state->cb_bindings, {HandleToUint64(event), kVulkanObjectTypeEvent}, pCB);
7351 event_state->cb_bindings.insert(pCB);
7353 pCB->events.push_back(event);
7354 if (!pCB->waitedEvents.count(event)) {
7355 pCB->writeEventsBeforeWait.push_back(event);
7357 pCB->eventUpdates.emplace_back([=](VkQueue q) { return setEventStageMask(q, commandBuffer, event, stageMask); });
7360 if (!skip) dev_data->dispatch_table.CmdSetEvent(commandBuffer, event, stageMask);
7363 VKAPI_ATTR void VKAPI_CALL CmdResetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
7365 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
7366 unique_lock_t lock(global_lock);
7367 GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
7369 skip |= ValidateCmdQueueFlags(dev_data, pCB, "vkCmdResetEvent()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
7370 VALIDATION_ERROR_1c402415);
7371 skip |= ValidateCmd(dev_data, pCB, CMD_RESETEVENT, "vkCmdResetEvent()");
7372 skip |= insideRenderPass(dev_data, pCB, "vkCmdResetEvent()", VALIDATION_ERROR_1c400017);
7373 skip |= ValidateStageMaskGsTsEnables(dev_data, stageMask, "vkCmdResetEvent()", VALIDATION_ERROR_1c400904,
7374 VALIDATION_ERROR_1c400906);
7375 auto event_state = GetEventNode(dev_data, event);
7377 addCommandBufferBinding(&event_state->cb_bindings, {HandleToUint64(event), kVulkanObjectTypeEvent}, pCB);
7378 event_state->cb_bindings.insert(pCB);
7380 pCB->events.push_back(event);
7381 if (!pCB->waitedEvents.count(event)) {
7382 pCB->writeEventsBeforeWait.push_back(event);
7384 // TODO : Add check for VALIDATION_ERROR_32c008f8
7385 pCB->eventUpdates.emplace_back(
7386 [=](VkQueue q) { return setEventStageMask(q, commandBuffer, event, VkPipelineStageFlags(0)); });
7389 if (!skip) dev_data->dispatch_table.CmdResetEvent(commandBuffer, event, stageMask);
7392 // Return input pipeline stage flags, expanded for individual bits if VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT
7393 static VkPipelineStageFlags ExpandPipelineStageFlags(VkPipelineStageFlags inflags) {
7394 return (inflags != VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT)
7396 : (VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT | VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT | VK_PIPELINE_STAGE_VERTEX_INPUT_BIT |
7397 VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT |
7398 VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT | VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT |
7399 VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT |
7400 VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT | VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT |
7401 VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT);
7404 // Verify image barrier image state and that the image is consistent with FB image
7405 static bool ValidateImageBarrierImage(layer_data *device_data, const char *funcName, GLOBAL_CB_NODE const *cb_state,
7406 VkFramebuffer framebuffer, uint32_t active_subpass, const safe_VkSubpassDescription &sub_desc,
7407 uint64_t rp_handle, uint32_t img_index, const VkImageMemoryBarrier &img_barrier) {
7409 const auto &fb_state = GetFramebufferState(device_data, framebuffer);
7411 const auto img_bar_image = img_barrier.image;
7412 bool image_match = false;
7413 bool sub_image_found = false; // Do we find a corresponding subpass description
7414 VkImageLayout sub_image_layout = VK_IMAGE_LAYOUT_UNDEFINED;
7415 uint32_t attach_index = 0;
7416 uint32_t index_count = 0;
7417 // Verify that a framebuffer image matches barrier image
7418 for (const auto &fb_attach : fb_state->attachments) {
7419 if (img_bar_image == fb_attach.image) {
7421 attach_index = index_count;
7426 if (image_match) { // Make sure subpass is referring to matching attachment
7427 if (sub_desc.pDepthStencilAttachment && sub_desc.pDepthStencilAttachment->attachment == attach_index) {
7428 sub_image_layout = sub_desc.pDepthStencilAttachment->layout;
7429 sub_image_found = true;
7431 for (uint32_t j = 0; j < sub_desc.colorAttachmentCount; ++j) {
7432 if (sub_desc.pColorAttachments && sub_desc.pColorAttachments[j].attachment == attach_index) {
7433 sub_image_layout = sub_desc.pColorAttachments[j].layout;
7434 sub_image_found = true;
7436 } else if (sub_desc.pResolveAttachments && sub_desc.pResolveAttachments[j].attachment == attach_index) {
7437 sub_image_layout = sub_desc.pResolveAttachments[j].layout;
7438 sub_image_found = true;
7443 if (!sub_image_found) {
7445 device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT, rp_handle,
7446 VALIDATION_ERROR_1b800936,
7447 "%s: Barrier pImageMemoryBarriers[%d].image (0x%" PRIx64
7448 ") is not referenced by the VkSubpassDescription for active subpass (%d) of current renderPass (0x%" PRIx64 ").",
7449 funcName, img_index, HandleToUint64(img_bar_image), active_subpass, rp_handle);
7451 } else { // !image_match
7452 auto const fb_handle = HandleToUint64(fb_state->framebuffer);
7453 skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT,
7454 fb_handle, VALIDATION_ERROR_1b800936,
7455 "%s: Barrier pImageMemoryBarriers[%d].image (0x%" PRIx64
7456 ") does not match an image from the current framebuffer (0x%" PRIx64 ").",
7457 funcName, img_index, HandleToUint64(img_bar_image), fb_handle);
7459 if (img_barrier.oldLayout != img_barrier.newLayout) {
7460 skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7461 HandleToUint64(cb_state->commandBuffer), VALIDATION_ERROR_1b80093a,
7462 "%s: As the Image Barrier for image 0x%" PRIx64
7463 " is being executed within a render pass instance, oldLayout must equal newLayout yet they are %s and %s.",
7464 funcName, HandleToUint64(img_barrier.image), string_VkImageLayout(img_barrier.oldLayout),
7465 string_VkImageLayout(img_barrier.newLayout));
7467 if (sub_image_found && sub_image_layout != img_barrier.oldLayout) {
7468 skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
7469 rp_handle, VALIDATION_ERROR_1b800938,
7470 "%s: Barrier pImageMemoryBarriers[%d].image (0x%" PRIx64
7471 ") is referenced by the VkSubpassDescription for active subpass (%d) of current renderPass (0x%" PRIx64
7472 ") as having layout %s, but image barrier has layout %s.",
7473 funcName, img_index, HandleToUint64(img_bar_image), active_subpass, rp_handle,
7474 string_VkImageLayout(img_barrier.oldLayout), string_VkImageLayout(sub_image_layout));
7480 // Validate image barriers within a renderPass
7481 static bool ValidateRenderPassImageBarriers(layer_data *device_data, const char *funcName, GLOBAL_CB_NODE *cb_state,
7482 uint32_t active_subpass, const safe_VkSubpassDescription &sub_desc, uint64_t rp_handle,
7483 VkAccessFlags sub_src_access_mask, VkAccessFlags sub_dst_access_mask,
7484 uint32_t image_mem_barrier_count, const VkImageMemoryBarrier *image_barriers) {
7486 for (uint32_t i = 0; i < image_mem_barrier_count; ++i) {
7487 const auto &img_barrier = image_barriers[i];
7488 const auto &img_src_access_mask = img_barrier.srcAccessMask;
7489 if (img_src_access_mask != (sub_src_access_mask & img_src_access_mask)) {
7490 skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
7491 rp_handle, VALIDATION_ERROR_1b80092e,
7492 "%s: Barrier pImageMemoryBarriers[%d].srcAccessMask(0x%X) is not a subset of VkSubpassDependency "
7493 "srcAccessMask(0x%X) of subpass %d of renderPass 0x%" PRIx64 ".",
7494 funcName, i, img_src_access_mask, sub_src_access_mask, active_subpass, rp_handle);
7496 const auto &img_dst_access_mask = img_barrier.dstAccessMask;
7497 if (img_dst_access_mask != (sub_dst_access_mask & img_dst_access_mask)) {
7498 skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
7499 rp_handle, VALIDATION_ERROR_1b800930,
7500 "%s: Barrier pImageMemoryBarriers[%d].dstAccessMask(0x%X) is not a subset of VkSubpassDependency "
7501 "dstAccessMask(0x%X) of subpass %d of renderPass 0x%" PRIx64 ".",
7502 funcName, i, img_dst_access_mask, sub_dst_access_mask, active_subpass, rp_handle);
7504 if (VK_QUEUE_FAMILY_IGNORED != img_barrier.srcQueueFamilyIndex ||
7505 VK_QUEUE_FAMILY_IGNORED != img_barrier.dstQueueFamilyIndex) {
7506 skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
7507 rp_handle, VALIDATION_ERROR_1b80093c,
7508 "%s: Barrier pImageMemoryBarriers[%d].srcQueueFamilyIndex is %d and "
7509 "pImageMemoryBarriers[%d].dstQueueFamilyIndex is %d but both must be VK_QUEUE_FAMILY_IGNORED.",
7510 funcName, i, img_barrier.srcQueueFamilyIndex, i, img_barrier.dstQueueFamilyIndex);
7512 // Secondary CBs can have null framebuffer so queue up validation in that case 'til FB is known
7513 if (VK_NULL_HANDLE == cb_state->activeFramebuffer) {
7514 assert(VK_COMMAND_BUFFER_LEVEL_SECONDARY == cb_state->createInfo.level);
7515 // Secondary CB case w/o FB specified delay validation
7516 cb_state->cmd_execute_commands_functions.emplace_back([=](GLOBAL_CB_NODE *primary_cb, VkFramebuffer fb) {
7517 return ValidateImageBarrierImage(device_data, funcName, cb_state, fb, active_subpass, sub_desc, rp_handle, i,
7521 skip |= ValidateImageBarrierImage(device_data, funcName, cb_state, cb_state->activeFramebuffer, active_subpass,
7522 sub_desc, rp_handle, i, img_barrier);
7528 // Validate VUs for Pipeline Barriers that are within a renderPass
7529 // Pre: cb_state->activeRenderPass must be a pointer to valid renderPass state
7530 static bool ValidateRenderPassPipelineBarriers(layer_data *device_data, const char *funcName, GLOBAL_CB_NODE *cb_state,
7531 VkPipelineStageFlags src_stage_mask, VkPipelineStageFlags dst_stage_mask,
7532 VkDependencyFlags dependency_flags, uint32_t mem_barrier_count,
7533 const VkMemoryBarrier *mem_barriers, uint32_t buffer_mem_barrier_count,
7534 const VkBufferMemoryBarrier *buffer_mem_barriers, uint32_t image_mem_barrier_count,
7535 const VkImageMemoryBarrier *image_barriers) {
7537 auto rp_state = cb_state->activeRenderPass;
7538 const auto active_subpass = cb_state->activeSubpass;
7539 auto rp_handle = HandleToUint64(rp_state->renderPass);
7540 if (!rp_state->hasSelfDependency[active_subpass]) {
7542 log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT, rp_handle,
7543 VALIDATION_ERROR_1b800928,
7544 "%s: Barriers cannot be set during subpass %d of renderPass 0x%" PRIx64 " with no self-dependency specified.",
7545 funcName, active_subpass, rp_handle);
7547 assert(rp_state->subpass_to_dependency_index[cb_state->activeSubpass] != -1);
7548 // Grab ref to current subpassDescription up-front for use below
7549 const auto &sub_desc = rp_state->createInfo.pSubpasses[active_subpass];
7550 const auto &sub_dep = rp_state->createInfo.pDependencies[rp_state->subpass_to_dependency_index[active_subpass]];
7551 const auto &sub_src_stage_mask = ExpandPipelineStageFlags(sub_dep.srcStageMask);
7552 const auto &sub_dst_stage_mask = ExpandPipelineStageFlags(sub_dep.dstStageMask);
7553 if ((sub_src_stage_mask != VK_PIPELINE_STAGE_ALL_COMMANDS_BIT) &&
7554 (src_stage_mask != (sub_src_stage_mask & src_stage_mask))) {
7555 skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
7556 rp_handle, VALIDATION_ERROR_1b80092a,
7557 "%s: Barrier srcStageMask(0x%X) is not a subset of VkSubpassDependency srcStageMask(0x%X) of subpass "
7558 "%d of renderPass 0x%" PRIx64 ".",
7559 funcName, src_stage_mask, sub_src_stage_mask, active_subpass, rp_handle);
7561 if ((sub_dst_stage_mask != VK_PIPELINE_STAGE_ALL_COMMANDS_BIT) &&
7562 (dst_stage_mask != (sub_dst_stage_mask & dst_stage_mask))) {
7563 skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
7564 rp_handle, VALIDATION_ERROR_1b80092c,
7565 "%s: Barrier dstStageMask(0x%X) is not a subset of VkSubpassDependency dstStageMask(0x%X) of subpass "
7566 "%d of renderPass 0x%" PRIx64 ".",
7567 funcName, dst_stage_mask, sub_dst_stage_mask, active_subpass, rp_handle);
7569 if (0 != buffer_mem_barrier_count) {
7570 skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
7571 rp_handle, VALIDATION_ERROR_1b800934,
7572 "%s: bufferMemoryBarrierCount is non-zero (%d) for subpass %d of renderPass 0x%" PRIx64 ".", funcName,
7573 buffer_mem_barrier_count, active_subpass, rp_handle);
7575 const auto &sub_src_access_mask = sub_dep.srcAccessMask;
7576 const auto &sub_dst_access_mask = sub_dep.dstAccessMask;
7577 for (uint32_t i = 0; i < mem_barrier_count; ++i) {
7578 const auto &mb_src_access_mask = mem_barriers[i].srcAccessMask;
7579 if (mb_src_access_mask != (sub_src_access_mask & mb_src_access_mask)) {
7580 skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7581 VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT, rp_handle, VALIDATION_ERROR_1b80092e,
7582 "%s: Barrier pMemoryBarriers[%d].srcAccessMask(0x%X) is not a subset of VkSubpassDependency "
7583 "srcAccessMask(0x%X) of subpass %d of renderPass 0x%" PRIx64 ".",
7584 funcName, i, mb_src_access_mask, sub_src_access_mask, active_subpass, rp_handle);
7586 const auto &mb_dst_access_mask = mem_barriers[i].dstAccessMask;
7587 if (mb_dst_access_mask != (sub_dst_access_mask & mb_dst_access_mask)) {
7588 skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7589 VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT, rp_handle, VALIDATION_ERROR_1b800930,
7590 "%s: Barrier pMemoryBarriers[%d].dstAccessMask(0x%X) is not a subset of VkSubpassDependency "
7591 "dstAccessMask(0x%X) of subpass %d of renderPass 0x%" PRIx64 ".",
7592 funcName, i, mb_dst_access_mask, sub_dst_access_mask, active_subpass, rp_handle);
7595 skip |= ValidateRenderPassImageBarriers(device_data, funcName, cb_state, active_subpass, sub_desc, rp_handle,
7596 sub_src_access_mask, sub_dst_access_mask, image_mem_barrier_count, image_barriers);
7597 if (sub_dep.dependencyFlags != dependency_flags) {
7598 skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
7599 rp_handle, VALIDATION_ERROR_1b800932,
7600 "%s: dependencyFlags param (0x%X) does not equal VkSubpassDependency dependencyFlags value (0x%X) for "
7601 "subpass %d of renderPass 0x%" PRIx64 ".",
7602 funcName, dependency_flags, sub_dep.dependencyFlags, cb_state->activeSubpass, rp_handle);
7608 // Array to mask individual accessMask to corresponding stageMask
7609 // accessMask active bit position (0-31) maps to index
7610 const static VkPipelineStageFlags AccessMaskToPipeStage[20] = {
7611 // VK_ACCESS_INDIRECT_COMMAND_READ_BIT = 0
7612 VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT,
7613 // VK_ACCESS_INDEX_READ_BIT = 1
7614 VK_PIPELINE_STAGE_VERTEX_INPUT_BIT,
7615 // VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT = 2
7616 VK_PIPELINE_STAGE_VERTEX_INPUT_BIT,
7617 // VK_ACCESS_UNIFORM_READ_BIT = 3
7618 VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT |
7619 VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT | VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT |
7620 VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
7621 // VK_ACCESS_INPUT_ATTACHMENT_READ_BIT = 4
7622 VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
7623 // VK_ACCESS_SHADER_READ_BIT = 5
7624 VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT |
7625 VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT | VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT |
7626 VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
7627 // VK_ACCESS_SHADER_WRITE_BIT = 6
7628 VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT |
7629 VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT | VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT |
7630 VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
7631 // VK_ACCESS_COLOR_ATTACHMENT_READ_BIT = 7
7632 VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
7633 // VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT = 8
7634 VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
7635 // VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT = 9
7636 VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT | VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT,
7637 // VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT = 10
7638 VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT | VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT,
7639 // VK_ACCESS_TRANSFER_READ_BIT = 11
7640 VK_PIPELINE_STAGE_TRANSFER_BIT,
7641 // VK_ACCESS_TRANSFER_WRITE_BIT = 12
7642 VK_PIPELINE_STAGE_TRANSFER_BIT,
7643 // VK_ACCESS_HOST_READ_BIT = 13
7644 VK_PIPELINE_STAGE_HOST_BIT,
7645 // VK_ACCESS_HOST_WRITE_BIT = 14
7646 VK_PIPELINE_STAGE_HOST_BIT,
7647 // VK_ACCESS_MEMORY_READ_BIT = 15
7648 VK_ACCESS_FLAG_BITS_MAX_ENUM, // Always match
7649 // VK_ACCESS_MEMORY_WRITE_BIT = 16
7650 VK_ACCESS_FLAG_BITS_MAX_ENUM, // Always match
7651 // VK_ACCESS_COMMAND_PROCESS_READ_BIT_NVX = 17
7652 VK_PIPELINE_STAGE_COMMAND_PROCESS_BIT_NVX,
7653 // VK_ACCESS_COMMAND_PROCESS_WRITE_BIT_NVX = 18
7654 VK_PIPELINE_STAGE_COMMAND_PROCESS_BIT_NVX,
7657 // Verify that all bits of access_mask are supported by the src_stage_mask
7658 static bool ValidateAccessMaskPipelineStage(VkAccessFlags access_mask, VkPipelineStageFlags stage_mask) {
7659 // Early out if all commands set, or access_mask NULL
7660 if ((stage_mask & VK_PIPELINE_STAGE_ALL_COMMANDS_BIT) || (0 == access_mask)) return true;
7662 stage_mask = ExpandPipelineStageFlags(stage_mask);
7664 // for each of the set bits in access_mask, make sure that supporting stage mask bit(s) are set
7665 while (access_mask) {
7666 index = (u_ffs(access_mask) - 1);
7668 // Must have "!= 0" compare to prevent warning from MSVC
7669 if ((AccessMaskToPipeStage[index] & stage_mask) == 0) return false; // early out
7670 access_mask &= ~(1 << index); // Mask off bit that's been checked
7675 namespace barrier_queue_families {
7677 kSrcOrDstMustBeIgnore,
7678 kSpecialOrIgnoreOnly,
7679 kSrcIgnoreRequiresDstIgnore,
7680 kDstValidOrSpecialIfNotIgnore,
7681 kSrcValidOrSpecialIfNotIgnore,
7682 kSrcAndDestMustBeIgnore,
7683 kBothIgnoreOrBothValid,
7684 kSubmitQueueMustMatchSrcOrDst
7686 static const char *vu_summary[] = {"Source or destination queue family must be ignored.",
7687 "Source or destination queue family must be special or ignored.",
7688 "Destination queue family must be ignored if source queue family is.",
7689 "Destination queue family must be valid, ignored, or special.",
7690 "Source queue family must be valid, ignored, or special.",
7691 "Source and destination queue family must both be ignored.",
7692 "Source and destination queue family must both be ignore or both valid.",
7693 "Source or destination queue family must match submit queue family, if not ignored."};
7695 static const UNIQUE_VALIDATION_ERROR_CODE image_error_codes[] = {
7696 VALIDATION_ERROR_0a000aca, // VUID-VkImageMemoryBarrier-image-01381 -- kSrcOrDstMustBeIgnore
7697 VALIDATION_ERROR_0a000dcc, // VUID-VkImageMemoryBarrier-image-01766 -- kSpecialOrIgnoreOnly
7698 VALIDATION_ERROR_0a000962, // VUID-VkImageMemoryBarrier-image-01201 -- kSrcIgnoreRequiresDstIgnore
7699 VALIDATION_ERROR_0a000dd0, // VUID-VkImageMemoryBarrier-image-01768 -- kDstValidOrSpecialIfNotIgnore
7700 VALIDATION_ERROR_0a000dce, // VUID-VkImageMemoryBarrier-image-01767 -- kSrcValidOrSpecialIfNotIgnore
7701 VALIDATION_ERROR_0a00095e, // VUID-VkImageMemoryBarrier-image-01199 -- kSrcAndDestMustBeIgnore
7702 VALIDATION_ERROR_0a000960, // VUID-VkImageMemoryBarrier-image-01200 -- kBothIgnoreOrBothValid
7703 VALIDATION_ERROR_0a00096a, // VUID-VkImageMemoryBarrier-image-01205 -- kSubmitQueueMustMatchSrcOrDst
7706 static const UNIQUE_VALIDATION_ERROR_CODE buffer_error_codes[] = {
7707 VALIDATION_ERROR_0180094e, // VUID-VkBufferMemoryBarrier-buffer-01191 -- kSrcOrDstMustBeIgnore
7708 VALIDATION_ERROR_01800dc6, // VUID-VkBufferMemoryBarrier-buffer-01763 -- kSpecialOrIgnoreOnly
7709 VALIDATION_ERROR_01800952, // VUID-VkBufferMemoryBarrier-buffer-01193 -- kSrcIgnoreRequiresDstIgnore
7710 VALIDATION_ERROR_01800dca, // VUID-VkBufferMemoryBarrier-buffer-01765 -- kDstValidOrSpecialIfNotIgnore
7711 VALIDATION_ERROR_01800dc8, // VUID-VkBufferMemoryBarrier-buffer-01764 -- kSrcValidOrSpecialIfNotIgnore
7712 VALIDATION_ERROR_0180094c, // VUID-VkBufferMemoryBarrier-buffer-01190 -- kSrcAndDestMustBeIgnore
7713 VALIDATION_ERROR_01800950, // VUID-VkBufferMemoryBarrier-buffer-01192 -- kBothIgnoreOrBothValid
7714 VALIDATION_ERROR_01800958, // VUID-VkBufferMemoryBarrier-buffer-01196 -- kSubmitQueueMustMatchSrcOrDst
7717 class ValidatorState {
7719 ValidatorState(const layer_data *device_data, const char *func_name, const GLOBAL_CB_NODE *cb_state,
7720 const uint64_t barrier_handle64, const VkSharingMode sharing_mode, const VulkanObjectType object_type,
7721 const UNIQUE_VALIDATION_ERROR_CODE *val_codes)
7722 : report_data_(device_data->report_data),
7723 func_name_(func_name),
7724 cb_handle64_(HandleToUint64(cb_state->commandBuffer)),
7725 barrier_handle64_(barrier_handle64),
7726 sharing_mode_(sharing_mode),
7727 object_type_(object_type),
7728 val_codes_(val_codes),
7729 limit_(static_cast<uint32_t>(device_data->phys_dev_properties.queue_family_properties.size())),
7730 mem_ext_(device_data->extensions.vk_khr_external_memory) {}
7732 // Create a validator state from an image state... reducing the image specific to the generic version.
7733 ValidatorState(const layer_data *device_data, const char *func_name, const GLOBAL_CB_NODE *cb_state,
7734 const VkImageMemoryBarrier *barrier, const IMAGE_STATE *state)
7735 : ValidatorState(device_data, func_name, cb_state, HandleToUint64(barrier->image), state->createInfo.sharingMode,
7736 kVulkanObjectTypeImage, image_error_codes) {}
7738 // Create a validator state from an buffer state... reducing the buffer specific to the generic version.
7739 ValidatorState(const layer_data *device_data, const char *func_name, const GLOBAL_CB_NODE *cb_state,
7740 const VkBufferMemoryBarrier *barrier, const BUFFER_STATE *state)
7741 : ValidatorState(device_data, func_name, cb_state, HandleToUint64(barrier->buffer), state->createInfo.sharingMode,
7742 kVulkanObjectTypeImage, buffer_error_codes) {}
7744 // Log the messages using boilerplate from object state, and Vu specific information from the template arg
7745 // One and two family versions, in the single family version, Vu holds the name of the passed parameter
7746 bool LogMsg(VuIndex vu_index, uint32_t family, const char *param_name) const {
7747 const UNIQUE_VALIDATION_ERROR_CODE val_code = val_codes_[vu_index];
7748 const char *annotation = GetFamilyAnnotation(family);
7749 return log_msg(report_data_, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, cb_handle64_,
7750 val_code, "%s: Barrier using %s 0x%" PRIx64 " created with sharingMode %s, has %s %u%s. %s", func_name_,
7751 GetTypeString(), barrier_handle64_, GetModeString(), param_name, family, annotation, vu_summary[vu_index]);
7754 bool LogMsg(VuIndex vu_index, uint32_t src_family, uint32_t dst_family) const {
7755 const UNIQUE_VALIDATION_ERROR_CODE val_code = val_codes_[vu_index];
7756 const char *src_annotation = GetFamilyAnnotation(src_family);
7757 const char *dst_annotation = GetFamilyAnnotation(dst_family);
7758 return log_msg(report_data_, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, cb_handle64_,
7760 "%s: Barrier using %s 0x%" PRIx64
7761 " created with sharingMode %s, has srcQueueFamilyIndex %u%s and dstQueueFamilyIndex %u%s. %s",
7762 func_name_, GetTypeString(), barrier_handle64_, GetModeString(), src_family, src_annotation, dst_family,
7763 dst_annotation, vu_summary[vu_index]);
7766 // This abstract Vu can only be tested at submit time, thus we need a callback from the closure containing the needed
7767 // data. Note that the mem_barrier is copied to the closure as the lambda lifespan exceed the guarantees of validity for
7768 // application input.
7769 static bool ValidateAtQueueSubmit(const VkQueue queue, const layer_data *device_data, uint32_t src_family, uint32_t dst_family,
7770 const ValidatorState &val) {
7771 auto queue_data_it = device_data->queueMap.find(queue);
7772 if (queue_data_it == device_data->queueMap.end()) return false;
7774 uint32_t queue_family = queue_data_it->second.queueFamilyIndex;
7775 if ((src_family != queue_family) && (dst_family != queue_family)) {
7776 const UNIQUE_VALIDATION_ERROR_CODE val_code = val.val_codes_[kSubmitQueueMustMatchSrcOrDst];
7777 const char *src_annotation = val.GetFamilyAnnotation(src_family);
7778 const char *dst_annotation = val.GetFamilyAnnotation(dst_family);
7779 return log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT,
7780 HandleToUint64(queue), val_code,
7781 "%s: Barrier submitted to queue with family index %u, using %s 0x%" PRIx64
7782 " created with sharingMode %s, has srcQueueFamilyIndex %u%s and dstQueueFamilyIndex %u%s. %s",
7783 "vkQueueSubmit", queue_family, val.GetTypeString(), val.barrier_handle64_, val.GetModeString(),
7784 src_family, src_annotation, dst_family, dst_annotation, vu_summary[kSubmitQueueMustMatchSrcOrDst]);
7788 // Logical helpers for semantic clarity
7789 inline bool KhrExternalMem() const { return mem_ext_; }
7790 inline bool IsValid(uint32_t queue_family) const { return (queue_family < limit_); }
7791 inline bool IsSpecial(uint32_t queue_family) const {
7792 return (queue_family == VK_QUEUE_FAMILY_EXTERNAL_KHR) || (queue_family == VK_QUEUE_FAMILY_FOREIGN_EXT);
7794 inline bool IsValidOrSpecial(uint32_t queue_family) const {
7795 return IsValid(queue_family) || (mem_ext_ && IsSpecial(queue_family));
7797 inline bool IsIgnored(uint32_t queue_family) const { return queue_family == VK_QUEUE_FAMILY_IGNORED; }
7799 // Helpers for LogMsg (and log_msg)
7800 const char *GetModeString() const { return string_VkSharingMode(sharing_mode_); }
7802 // Descriptive text for the various types of queue family index
7803 const char *GetFamilyAnnotation(uint32_t family) const {
7804 const char *external = " (VK_QUEUE_FAMILY_EXTERNAL_KHR)";
7805 const char *foreign = " (VK_QUEUE_FAMILY_FOREIGN_EXT)";
7806 const char *ignored = " (VK_QUEUE_FAMILY_IGNORED)";
7807 const char *valid = " (VALID)";
7808 const char *invalid = " (INVALID)";
7810 case VK_QUEUE_FAMILY_EXTERNAL_KHR:
7812 case VK_QUEUE_FAMILY_FOREIGN_EXT:
7814 case VK_QUEUE_FAMILY_IGNORED:
7817 if (IsValid(family)) {
7823 const char *GetTypeString() const { return object_string[object_type_]; }
7824 VkSharingMode GetSharingMode() const { return sharing_mode_; }
7827 const debug_report_data *const report_data_;
7828 const char *const func_name_;
7829 const uint64_t cb_handle64_;
7830 const uint64_t barrier_handle64_;
7831 const VkSharingMode sharing_mode_;
7832 const VulkanObjectType object_type_;
7833 const UNIQUE_VALIDATION_ERROR_CODE *val_codes_;
7834 const uint32_t limit_;
7835 const bool mem_ext_;
7838 bool Validate(const layer_data *device_data, const char *func_name, GLOBAL_CB_NODE *cb_state, const ValidatorState &val,
7839 const uint32_t src_queue_family, const uint32_t dst_queue_family) {
7842 const bool mode_concurrent = val.GetSharingMode() == VK_SHARING_MODE_CONCURRENT;
7843 const bool src_ignored = val.IsIgnored(src_queue_family);
7844 const bool dst_ignored = val.IsIgnored(dst_queue_family);
7845 if (val.KhrExternalMem()) {
7846 if (mode_concurrent) {
7847 if (!(src_ignored || dst_ignored)) {
7848 skip |= val.LogMsg(kSrcOrDstMustBeIgnore, src_queue_family, dst_queue_family);
7850 if ((src_ignored && !(dst_ignored || val.IsSpecial(dst_queue_family))) ||
7851 (dst_ignored && !(src_ignored || val.IsSpecial(src_queue_family)))) {
7852 skip |= val.LogMsg(kSpecialOrIgnoreOnly, src_queue_family, dst_queue_family);
7855 // VK_SHARING_MODE_EXCLUSIVE
7856 if (src_ignored && !dst_ignored) {
7857 skip |= val.LogMsg(kSrcIgnoreRequiresDstIgnore, src_queue_family, dst_queue_family);
7859 if (!dst_ignored && !val.IsValidOrSpecial(dst_queue_family)) {
7860 skip |= val.LogMsg(kDstValidOrSpecialIfNotIgnore, dst_queue_family, "dstQueueFamilyIndex");
7862 if (!src_ignored && !val.IsValidOrSpecial(src_queue_family)) {
7863 skip |= val.LogMsg(kSrcValidOrSpecialIfNotIgnore, src_queue_family, "srcQueueFamilyIndex");
7867 // No memory extension
7868 if (mode_concurrent) {
7869 if (!src_ignored || !dst_ignored) {
7870 skip |= val.LogMsg(kSrcAndDestMustBeIgnore, src_queue_family, dst_queue_family);
7873 // VK_SHARING_MODE_EXCLUSIVE
7874 if (!((src_ignored && dst_ignored) || (val.IsValid(src_queue_family) && val.IsValid(dst_queue_family)))) {
7875 skip |= val.LogMsg(kBothIgnoreOrBothValid, src_queue_family, dst_queue_family);
7879 if (!mode_concurrent && !src_ignored && !dst_ignored) {
7880 // Only enqueue submit time check if it is needed. If more submit time checks are added, change the criteria
7881 // TODO create a better named list, or rename the submit time lists to something that matches the broader usage...
7882 // Note: if we want to create a semantic that separates state lookup, validation, and state update this should go
7883 // to a local queue of update_state_actions or something.
7884 cb_state->eventUpdates.emplace_back([device_data, src_queue_family, dst_queue_family, val](VkQueue queue) {
7885 return ValidatorState::ValidateAtQueueSubmit(queue, device_data, src_queue_family, dst_queue_family, val);
7890 } // namespace barrier_queue_families
7892 // Type specific wrapper for image barriers
7893 bool ValidateBarrierQueueFamilies(const layer_data *device_data, const char *func_name, GLOBAL_CB_NODE *cb_state,
7894 const VkImageMemoryBarrier *barrier, const IMAGE_STATE *state_data) {
7895 // State data is required
7900 // Create the validator state from the image state
7901 barrier_queue_families::ValidatorState val(device_data, func_name, cb_state, barrier, state_data);
7902 const uint32_t src_queue_family = barrier->srcQueueFamilyIndex;
7903 const uint32_t dst_queue_family = barrier->dstQueueFamilyIndex;
7904 return barrier_queue_families::Validate(device_data, func_name, cb_state, val, src_queue_family, dst_queue_family);
7907 // Type specific wrapper for buffer barriers
7908 bool ValidateBarrierQueueFamilies(const layer_data *device_data, const char *func_name, GLOBAL_CB_NODE *cb_state,
7909 const VkBufferMemoryBarrier *barrier, const BUFFER_STATE *state_data) {
7910 // State data is required
7915 // Create the validator state from the buffer state
7916 barrier_queue_families::ValidatorState val(device_data, func_name, cb_state, barrier, state_data);
7917 const uint32_t src_queue_family = barrier->srcQueueFamilyIndex;
7918 const uint32_t dst_queue_family = barrier->dstQueueFamilyIndex;
7919 return barrier_queue_families::Validate(device_data, func_name, cb_state, val, src_queue_family, dst_queue_family);
7922 static bool ValidateBarriers(layer_data *device_data, const char *funcName, GLOBAL_CB_NODE *cb_state,
7923 VkPipelineStageFlags src_stage_mask, VkPipelineStageFlags dst_stage_mask, uint32_t memBarrierCount,
7924 const VkMemoryBarrier *pMemBarriers, uint32_t bufferBarrierCount,
7925 const VkBufferMemoryBarrier *pBufferMemBarriers, uint32_t imageMemBarrierCount,
7926 const VkImageMemoryBarrier *pImageMemBarriers) {
7928 for (uint32_t i = 0; i < memBarrierCount; ++i) {
7929 const auto &mem_barrier = pMemBarriers[i];
7930 if (!ValidateAccessMaskPipelineStage(mem_barrier.srcAccessMask, src_stage_mask)) {
7931 skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7932 HandleToUint64(cb_state->commandBuffer), VALIDATION_ERROR_1b800940,
7933 "%s: pMemBarriers[%d].srcAccessMask (0x%X) is not supported by srcStageMask (0x%X).", funcName, i,
7934 mem_barrier.srcAccessMask, src_stage_mask);
7936 if (!ValidateAccessMaskPipelineStage(mem_barrier.dstAccessMask, dst_stage_mask)) {
7937 skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7938 HandleToUint64(cb_state->commandBuffer), VALIDATION_ERROR_1b800942,
7939 "%s: pMemBarriers[%d].dstAccessMask (0x%X) is not supported by dstStageMask (0x%X).", funcName, i,
7940 mem_barrier.dstAccessMask, dst_stage_mask);
7943 for (uint32_t i = 0; i < imageMemBarrierCount; ++i) {
7944 auto mem_barrier = &pImageMemBarriers[i];
7945 if (!ValidateAccessMaskPipelineStage(mem_barrier->srcAccessMask, src_stage_mask)) {
7946 skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7947 HandleToUint64(cb_state->commandBuffer), VALIDATION_ERROR_1b800940,
7948 "%s: pImageMemBarriers[%d].srcAccessMask (0x%X) is not supported by srcStageMask (0x%X).", funcName, i,
7949 mem_barrier->srcAccessMask, src_stage_mask);
7951 if (!ValidateAccessMaskPipelineStage(mem_barrier->dstAccessMask, dst_stage_mask)) {
7952 skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7953 HandleToUint64(cb_state->commandBuffer), VALIDATION_ERROR_1b800942,
7954 "%s: pImageMemBarriers[%d].dstAccessMask (0x%X) is not supported by dstStageMask (0x%X).", funcName, i,
7955 mem_barrier->dstAccessMask, dst_stage_mask);
7958 auto image_data = GetImageState(device_data, mem_barrier->image);
7959 skip |= ValidateBarrierQueueFamilies(device_data, funcName, cb_state, mem_barrier, image_data);
7961 if (mem_barrier->newLayout == VK_IMAGE_LAYOUT_UNDEFINED || mem_barrier->newLayout == VK_IMAGE_LAYOUT_PREINITIALIZED) {
7962 skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7963 HandleToUint64(cb_state->commandBuffer), VALIDATION_ERROR_0a00095c,
7964 "%s: Image Layout cannot be transitioned to UNDEFINED or PREINITIALIZED.", funcName);
7968 // There is no VUID for this, but there is blanket text:
7969 // "Non-sparse resources must be bound completely and contiguously to a single VkDeviceMemory object before
7970 // recording commands in a command buffer."
7971 // TODO: Update this when VUID is defined
7972 skip |= ValidateMemoryIsBoundToImage(device_data, image_data, funcName, VALIDATION_ERROR_UNDEFINED);
7974 auto aspect_mask = mem_barrier->subresourceRange.aspectMask;
7975 skip |= ValidateImageAspectMask(device_data, image_data->image, image_data->createInfo.format, aspect_mask, funcName);
7977 std::string param_name = "pImageMemoryBarriers[" + std::to_string(i) + "].subresourceRange";
7978 skip |= ValidateImageBarrierSubresourceRange(device_data, image_data, mem_barrier->subresourceRange, funcName,
7979 param_name.c_str());
7983 for (uint32_t i = 0; i < bufferBarrierCount; ++i) {
7984 auto mem_barrier = &pBufferMemBarriers[i];
7985 if (!mem_barrier) continue;
7987 if (!ValidateAccessMaskPipelineStage(mem_barrier->srcAccessMask, src_stage_mask)) {
7988 skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7989 HandleToUint64(cb_state->commandBuffer), VALIDATION_ERROR_1b800940,
7990 "%s: pBufferMemBarriers[%d].srcAccessMask (0x%X) is not supported by srcStageMask (0x%X).", funcName, i,
7991 mem_barrier->srcAccessMask, src_stage_mask);
7993 if (!ValidateAccessMaskPipelineStage(mem_barrier->dstAccessMask, dst_stage_mask)) {
7994 skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7995 HandleToUint64(cb_state->commandBuffer), VALIDATION_ERROR_1b800942,
7996 "%s: pBufferMemBarriers[%d].dstAccessMask (0x%X) is not supported by dstStageMask (0x%X).", funcName, i,
7997 mem_barrier->dstAccessMask, dst_stage_mask);
7999 // Validate buffer barrier queue family indices
8000 auto buffer_state = GetBufferState(device_data, mem_barrier->buffer);
8001 skip |= ValidateBarrierQueueFamilies(device_data, funcName, cb_state, mem_barrier, buffer_state);
8004 // There is no VUID for this, but there is blanket text:
8005 // "Non-sparse resources must be bound completely and contiguously to a single VkDeviceMemory object before
8006 // recording commands in a command buffer"
8007 // TODO: Update this when VUID is defined
8008 skip |= ValidateMemoryIsBoundToBuffer(device_data, buffer_state, funcName, VALIDATION_ERROR_UNDEFINED);
8010 auto buffer_size = buffer_state->createInfo.size;
8011 if (mem_barrier->offset >= buffer_size) {
8013 device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
8014 HandleToUint64(cb_state->commandBuffer), VALIDATION_ERROR_01800946,
8015 "%s: Buffer Barrier 0x%" PRIx64 " has offset 0x%" PRIx64 " which is not less than total size 0x%" PRIx64 ".",
8016 funcName, HandleToUint64(mem_barrier->buffer), HandleToUint64(mem_barrier->offset),
8017 HandleToUint64(buffer_size));
8018 } else if (mem_barrier->size != VK_WHOLE_SIZE && (mem_barrier->offset + mem_barrier->size > buffer_size)) {
8020 log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
8021 HandleToUint64(cb_state->commandBuffer), VALIDATION_ERROR_0180094a,
8022 "%s: Buffer Barrier 0x%" PRIx64 " has offset 0x%" PRIx64 " and size 0x%" PRIx64
8023 " whose sum is greater than total size 0x%" PRIx64 ".",
8024 funcName, HandleToUint64(mem_barrier->buffer), HandleToUint64(mem_barrier->offset),
8025 HandleToUint64(mem_barrier->size), HandleToUint64(buffer_size));
8032 bool validateEventStageMask(VkQueue queue, GLOBAL_CB_NODE *pCB, uint32_t eventCount, size_t firstEventIndex,
8033 VkPipelineStageFlags sourceStageMask) {
8035 VkPipelineStageFlags stageMask = 0;
8036 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(queue), layer_data_map);
8037 for (uint32_t i = 0; i < eventCount; ++i) {
8038 auto event = pCB->events[firstEventIndex + i];
8039 auto queue_data = dev_data->queueMap.find(queue);
8040 if (queue_data == dev_data->queueMap.end()) return false;
8041 auto event_data = queue_data->second.eventToStageMap.find(event);
8042 if (event_data != queue_data->second.eventToStageMap.end()) {
8043 stageMask |= event_data->second;
8045 auto global_event_data = GetEventNode(dev_data, event);
8046 if (!global_event_data) {
8047 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT,
8048 HandleToUint64(event), DRAWSTATE_INVALID_EVENT,
8049 "Event 0x%" PRIx64 " cannot be waited on if it has never been set.", HandleToUint64(event));
8051 stageMask |= global_event_data->stageMask;
8055 // TODO: Need to validate that host_bit is only set if set event is called
8056 // but set event can be called at any time.
8057 if (sourceStageMask != stageMask && sourceStageMask != (stageMask | VK_PIPELINE_STAGE_HOST_BIT)) {
8058 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
8059 HandleToUint64(pCB->commandBuffer), VALIDATION_ERROR_1e62d401,
8060 "Submitting cmdbuffer with call to VkCmdWaitEvents using srcStageMask 0x%X which must be the bitwise OR of "
8061 "the stageMask parameters used in calls to vkCmdSetEvent and VK_PIPELINE_STAGE_HOST_BIT if used with "
8062 "vkSetEvent but instead is 0x%X.",
8063 sourceStageMask, stageMask);
8068 // Note that we only check bits that HAVE required queueflags -- don't care entries are skipped
8069 static std::unordered_map<VkPipelineStageFlags, VkQueueFlags> supported_pipeline_stages_table = {
8070 {VK_PIPELINE_STAGE_COMMAND_PROCESS_BIT_NVX, VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT},
8071 {VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT, VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT},
8072 {VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, VK_QUEUE_GRAPHICS_BIT},
8073 {VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, VK_QUEUE_GRAPHICS_BIT},
8074 {VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT, VK_QUEUE_GRAPHICS_BIT},
8075 {VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT, VK_QUEUE_GRAPHICS_BIT},
8076 {VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT, VK_QUEUE_GRAPHICS_BIT},
8077 {VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_QUEUE_GRAPHICS_BIT},
8078 {VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT, VK_QUEUE_GRAPHICS_BIT},
8079 {VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT, VK_QUEUE_GRAPHICS_BIT},
8080 {VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_QUEUE_GRAPHICS_BIT},
8081 {VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_QUEUE_COMPUTE_BIT},
8082 {VK_PIPELINE_STAGE_TRANSFER_BIT, VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT | VK_QUEUE_TRANSFER_BIT},
8083 {VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT, VK_QUEUE_GRAPHICS_BIT}};
8085 static const VkPipelineStageFlags stage_flag_bit_array[] = {VK_PIPELINE_STAGE_COMMAND_PROCESS_BIT_NVX,
8086 VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT,
8087 VK_PIPELINE_STAGE_VERTEX_INPUT_BIT,
8088 VK_PIPELINE_STAGE_VERTEX_SHADER_BIT,
8089 VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT,
8090 VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT,
8091 VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT,
8092 VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
8093 VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT,
8094 VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT,
8095 VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
8096 VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
8097 VK_PIPELINE_STAGE_TRANSFER_BIT,
8098 VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT};
8100 bool CheckStageMaskQueueCompatibility(layer_data *dev_data, VkCommandBuffer command_buffer, VkPipelineStageFlags stage_mask,
8101 VkQueueFlags queue_flags, const char *function, const char *src_or_dest,
8102 UNIQUE_VALIDATION_ERROR_CODE error_code) {
8104 // Lookup each bit in the stagemask and check for overlap between its table bits and queue_flags
8105 for (const auto &item : stage_flag_bit_array) {
8106 if (stage_mask & item) {
8107 if ((supported_pipeline_stages_table[item] & queue_flags) == 0) {
8108 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
8109 VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(command_buffer), error_code,
8110 "%s(): %s flag %s is not compatible with the queue family properties of this command buffer.",
8111 function, src_or_dest, string_VkPipelineStageFlagBits(static_cast<VkPipelineStageFlagBits>(item)));
8118 // Check if all barriers are of a given operation type.
8119 template <typename Barrier, typename OpCheck>
8120 static bool AllTransferOp(const COMMAND_POOL_NODE *pool, OpCheck &op_check, uint32_t count, const Barrier *barriers) {
8121 if (!pool) return false;
8123 for (uint32_t b = 0; b < count; b++) {
8124 if (!op_check(pool, barriers + b)) return false;
8129 enum BarrierOperationsType {
8130 kAllAcquire, // All Barrier operations are "ownership acquire" operations
8131 kAllRelease, // All Barrier operations are "ownership release" operations
8132 kGeneral, // Either no ownership operations or a mix of ownership operation types and/or non-ownership operations
8135 // Look at the barriers to see if we they are all release or all acquire, the result impacts queue properties validation
8136 BarrierOperationsType ComputeBarrierOperationsType(layer_data *device_data, GLOBAL_CB_NODE *cb_state, uint32_t buffer_barrier_count,
8137 const VkBufferMemoryBarrier *buffer_barriers, uint32_t image_barrier_count,
8138 const VkImageMemoryBarrier *image_barriers) {
8139 auto pool = GetCommandPoolNode(device_data, cb_state->createInfo.commandPool);
8140 BarrierOperationsType op_type = kGeneral;
8142 // Look at the barrier details only if they exist
8143 // Note: AllTransferOp returns true for count == 0
8144 if ((buffer_barrier_count + image_barrier_count) != 0) {
8145 if (AllTransferOp(pool, IsReleaseOp<VkBufferMemoryBarrier>, buffer_barrier_count, buffer_barriers) &&
8146 AllTransferOp(pool, IsReleaseOp<VkImageMemoryBarrier>, image_barrier_count, image_barriers)) {
8147 op_type = kAllRelease;
8148 } else if (AllTransferOp(pool, IsAcquireOp<VkBufferMemoryBarrier>, buffer_barrier_count, buffer_barriers) &&
8149 AllTransferOp(pool, IsAcquireOp<VkImageMemoryBarrier>, image_barrier_count, image_barriers)) {
8150 op_type = kAllAcquire;
8157 bool ValidateStageMasksAgainstQueueCapabilities(layer_data *dev_data, GLOBAL_CB_NODE const *cb_state,
8158 VkPipelineStageFlags source_stage_mask, VkPipelineStageFlags dest_stage_mask,
8159 BarrierOperationsType barrier_op_type, const char *function,
8160 UNIQUE_VALIDATION_ERROR_CODE error_code) {
8162 uint32_t queue_family_index = dev_data->commandPoolMap[cb_state->createInfo.commandPool].queueFamilyIndex;
8163 instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(dev_data->physical_device), instance_layer_data_map);
8164 auto physical_device_state = GetPhysicalDeviceState(instance_data, dev_data->physical_device);
8166 // Any pipeline stage included in srcStageMask or dstStageMask must be supported by the capabilities of the queue family
8167 // specified by the queueFamilyIndex member of the VkCommandPoolCreateInfo structure that was used to create the VkCommandPool
8168 // that commandBuffer was allocated from, as specified in the table of supported pipeline stages.
8170 if (queue_family_index < physical_device_state->queue_family_properties.size()) {
8171 VkQueueFlags specified_queue_flags = physical_device_state->queue_family_properties[queue_family_index].queueFlags;
8173 // Only check the source stage mask if any barriers aren't "acquire ownership"
8174 if ((barrier_op_type != kAllAcquire) && (source_stage_mask & VK_PIPELINE_STAGE_ALL_COMMANDS_BIT) == 0) {
8175 skip |= CheckStageMaskQueueCompatibility(dev_data, cb_state->commandBuffer, source_stage_mask, specified_queue_flags,
8176 function, "srcStageMask", error_code);
8178 // Only check the dest stage mask if any barriers aren't "release ownership"
8179 if ((barrier_op_type != kAllRelease) && (dest_stage_mask & VK_PIPELINE_STAGE_ALL_COMMANDS_BIT) == 0) {
8180 skip |= CheckStageMaskQueueCompatibility(dev_data, cb_state->commandBuffer, dest_stage_mask, specified_queue_flags,
8181 function, "dstStageMask", error_code);
8187 VKAPI_ATTR void VKAPI_CALL CmdWaitEvents(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent *pEvents,
8188 VkPipelineStageFlags sourceStageMask, VkPipelineStageFlags dstStageMask,
8189 uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
8190 uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers,
8191 uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) {
8193 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
8194 unique_lock_t lock(global_lock);
8195 GLOBAL_CB_NODE *cb_state = GetCBNode(dev_data, commandBuffer);
8197 auto barrier_op_type = ComputeBarrierOperationsType(dev_data, cb_state, bufferMemoryBarrierCount, pBufferMemoryBarriers,
8198 imageMemoryBarrierCount, pImageMemoryBarriers);
8199 skip |= ValidateStageMasksAgainstQueueCapabilities(dev_data, cb_state, sourceStageMask, dstStageMask, barrier_op_type,
8200 "vkCmdWaitEvents", VALIDATION_ERROR_1e600918);
8201 skip |= ValidateStageMaskGsTsEnables(dev_data, sourceStageMask, "vkCmdWaitEvents()", VALIDATION_ERROR_1e60090e,
8202 VALIDATION_ERROR_1e600912);
8203 skip |= ValidateStageMaskGsTsEnables(dev_data, dstStageMask, "vkCmdWaitEvents()", VALIDATION_ERROR_1e600910,
8204 VALIDATION_ERROR_1e600914);
8205 skip |= ValidateCmdQueueFlags(dev_data, cb_state, "vkCmdWaitEvents()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
8206 VALIDATION_ERROR_1e602415);
8207 skip |= ValidateCmd(dev_data, cb_state, CMD_WAITEVENTS, "vkCmdWaitEvents()");
8208 skip |= ValidateBarriersToImages(dev_data, cb_state, imageMemoryBarrierCount, pImageMemoryBarriers, "vkCmdWaitEvents()");
8209 skip |= ValidateBarriers(dev_data, "vkCmdWaitEvents()", cb_state, sourceStageMask, dstStageMask, memoryBarrierCount,
8210 pMemoryBarriers, bufferMemoryBarrierCount, pBufferMemoryBarriers, imageMemoryBarrierCount,
8211 pImageMemoryBarriers);
8213 auto first_event_index = cb_state->events.size();
8214 for (uint32_t i = 0; i < eventCount; ++i) {
8215 auto event_state = GetEventNode(dev_data, pEvents[i]);
8217 addCommandBufferBinding(&event_state->cb_bindings, {HandleToUint64(pEvents[i]), kVulkanObjectTypeEvent},
8219 event_state->cb_bindings.insert(cb_state);
8221 cb_state->waitedEvents.insert(pEvents[i]);
8222 cb_state->events.push_back(pEvents[i]);
8224 cb_state->eventUpdates.emplace_back(
8225 [=](VkQueue q) { return validateEventStageMask(q, cb_state, eventCount, first_event_index, sourceStageMask); });
8226 TransitionImageLayouts(dev_data, cb_state, imageMemoryBarrierCount, pImageMemoryBarriers);
8231 dev_data->dispatch_table.CmdWaitEvents(commandBuffer, eventCount, pEvents, sourceStageMask, dstStageMask,
8232 memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount, pBufferMemoryBarriers,
8233 imageMemoryBarrierCount, pImageMemoryBarriers);
8236 static bool PreCallValidateCmdPipelineBarrier(layer_data *device_data, GLOBAL_CB_NODE *cb_state, VkPipelineStageFlags srcStageMask,
8237 VkPipelineStageFlags dstStageMask, VkDependencyFlags dependencyFlags,
8238 uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
8239 uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers,
8240 uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) {
8242 auto barrier_op_type = ComputeBarrierOperationsType(device_data, cb_state, bufferMemoryBarrierCount, pBufferMemoryBarriers,
8243 imageMemoryBarrierCount, pImageMemoryBarriers);
8244 skip |= ValidateStageMasksAgainstQueueCapabilities(device_data, cb_state, srcStageMask, dstStageMask, barrier_op_type,
8245 "vkCmdPipelineBarrier", VALIDATION_ERROR_1b80093e);
8246 skip |= ValidateCmdQueueFlags(device_data, cb_state, "vkCmdPipelineBarrier()",
8247 VK_QUEUE_TRANSFER_BIT | VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT, VALIDATION_ERROR_1b802415);
8248 skip |= ValidateCmd(device_data, cb_state, CMD_PIPELINEBARRIER, "vkCmdPipelineBarrier()");
8249 skip |= ValidateStageMaskGsTsEnables(device_data, srcStageMask, "vkCmdPipelineBarrier()", VALIDATION_ERROR_1b800920,
8250 VALIDATION_ERROR_1b800924);
8251 skip |= ValidateStageMaskGsTsEnables(device_data, dstStageMask, "vkCmdPipelineBarrier()", VALIDATION_ERROR_1b800922,
8252 VALIDATION_ERROR_1b800926);
8253 if (cb_state->activeRenderPass) {
8254 skip |= ValidateRenderPassPipelineBarriers(device_data, "vkCmdPipelineBarrier()", cb_state, srcStageMask, dstStageMask,
8255 dependencyFlags, memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
8256 pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
8257 if (skip) return true; // Early return to avoid redundant errors from below calls
8260 ValidateBarriersToImages(device_data, cb_state, imageMemoryBarrierCount, pImageMemoryBarriers, "vkCmdPipelineBarrier()");
8261 skip |= ValidateBarriers(device_data, "vkCmdPipelineBarrier()", cb_state, srcStageMask, dstStageMask, memoryBarrierCount,
8262 pMemoryBarriers, bufferMemoryBarrierCount, pBufferMemoryBarriers, imageMemoryBarrierCount,
8263 pImageMemoryBarriers);
8267 static void PreCallRecordCmdPipelineBarrier(layer_data *device_data, GLOBAL_CB_NODE *cb_state, VkCommandBuffer commandBuffer,
8268 uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) {
8269 TransitionImageLayouts(device_data, cb_state, imageMemoryBarrierCount, pImageMemoryBarriers);
8272 VKAPI_ATTR void VKAPI_CALL CmdPipelineBarrier(VkCommandBuffer commandBuffer, VkPipelineStageFlags srcStageMask,
8273 VkPipelineStageFlags dstStageMask, VkDependencyFlags dependencyFlags,
8274 uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
8275 uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers,
8276 uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) {
8278 layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
8279 unique_lock_t lock(global_lock);
8280 GLOBAL_CB_NODE *cb_state = GetCBNode(device_data, commandBuffer);
8282 skip |= PreCallValidateCmdPipelineBarrier(device_data, cb_state, srcStageMask, dstStageMask, dependencyFlags,
8283 memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
8284 pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
8286 PreCallRecordCmdPipelineBarrier(device_data, cb_state, commandBuffer, imageMemoryBarrierCount, pImageMemoryBarriers);
8293 device_data->dispatch_table.CmdPipelineBarrier(commandBuffer, srcStageMask, dstStageMask, dependencyFlags,
8294 memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
8295 pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
8299 static bool setQueryState(VkQueue queue, VkCommandBuffer commandBuffer, QueryObject object, bool value) {
8300 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
8301 GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
8303 pCB->queryToStateMap[object] = value;
8305 auto queue_data = dev_data->queueMap.find(queue);
8306 if (queue_data != dev_data->queueMap.end()) {
8307 queue_data->second.queryToStateMap[object] = value;
8312 VKAPI_ATTR void VKAPI_CALL CmdBeginQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot, VkFlags flags) {
8314 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
8315 unique_lock_t lock(global_lock);
8316 GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
8318 skip |= ValidateCmdQueueFlags(dev_data, pCB, "vkCmdBeginQuery()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
8319 VALIDATION_ERROR_17802415);
8320 skip |= ValidateCmd(dev_data, pCB, CMD_BEGINQUERY, "vkCmdBeginQuery()");
8326 dev_data->dispatch_table.CmdBeginQuery(commandBuffer, queryPool, slot, flags);
8330 QueryObject query = {queryPool, slot};
8331 pCB->activeQueries.insert(query);
8332 pCB->startedQueries.insert(query);
8333 addCommandBufferBinding(&GetQueryPoolNode(dev_data, queryPool)->cb_bindings,
8334 {HandleToUint64(queryPool), kVulkanObjectTypeQueryPool}, pCB);
8338 VKAPI_ATTR void VKAPI_CALL CmdEndQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot) {
8340 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
8341 unique_lock_t lock(global_lock);
8342 QueryObject query = {queryPool, slot};
8343 GLOBAL_CB_NODE *cb_state = GetCBNode(dev_data, commandBuffer);
8345 if (!cb_state->activeQueries.count(query)) {
8346 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
8347 HandleToUint64(commandBuffer), VALIDATION_ERROR_1ae00f06,
8348 "Ending a query before it was started: queryPool 0x%" PRIx64 ", index %d.", HandleToUint64(queryPool),
8351 skip |= ValidateCmdQueueFlags(dev_data, cb_state, "VkCmdEndQuery()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
8352 VALIDATION_ERROR_1ae02415);
8353 skip |= ValidateCmd(dev_data, cb_state, CMD_ENDQUERY, "VkCmdEndQuery()");
8359 dev_data->dispatch_table.CmdEndQuery(commandBuffer, queryPool, slot);
8363 cb_state->activeQueries.erase(query);
8364 cb_state->queryUpdates.emplace_back([=](VkQueue q) { return setQueryState(q, commandBuffer, query, true); });
8365 addCommandBufferBinding(&GetQueryPoolNode(dev_data, queryPool)->cb_bindings,
8366 {HandleToUint64(queryPool), kVulkanObjectTypeQueryPool}, cb_state);
8370 VKAPI_ATTR void VKAPI_CALL CmdResetQueryPool(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery,
8371 uint32_t queryCount) {
8373 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
8374 unique_lock_t lock(global_lock);
8375 GLOBAL_CB_NODE *cb_state = GetCBNode(dev_data, commandBuffer);
8376 skip |= insideRenderPass(dev_data, cb_state, "vkCmdResetQueryPool()", VALIDATION_ERROR_1c600017);
8377 skip |= ValidateCmd(dev_data, cb_state, CMD_RESETQUERYPOOL, "VkCmdResetQueryPool()");
8378 skip |= ValidateCmdQueueFlags(dev_data, cb_state, "VkCmdResetQueryPool()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
8379 VALIDATION_ERROR_1c602415);
8384 dev_data->dispatch_table.CmdResetQueryPool(commandBuffer, queryPool, firstQuery, queryCount);
8387 for (uint32_t i = 0; i < queryCount; i++) {
8388 QueryObject query = {queryPool, firstQuery + i};
8389 cb_state->waitedEventsBeforeQueryReset[query] = cb_state->waitedEvents;
8390 cb_state->queryUpdates.emplace_back([=](VkQueue q) { return setQueryState(q, commandBuffer, query, false); });
8392 addCommandBufferBinding(&GetQueryPoolNode(dev_data, queryPool)->cb_bindings,
8393 {HandleToUint64(queryPool), kVulkanObjectTypeQueryPool}, cb_state);
8396 static bool IsQueryInvalid(layer_data *dev_data, QUEUE_STATE *queue_data, VkQueryPool queryPool, uint32_t queryIndex) {
8397 QueryObject query = {queryPool, queryIndex};
8398 auto query_data = queue_data->queryToStateMap.find(query);
8399 if (query_data != queue_data->queryToStateMap.end()) {
8400 if (!query_data->second) return true;
8402 auto it = dev_data->queryToStateMap.find(query);
8403 if (it == dev_data->queryToStateMap.end() || !it->second) return true;
8409 static bool validateQuery(VkQueue queue, GLOBAL_CB_NODE *pCB, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount) {
8411 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(pCB->commandBuffer), layer_data_map);
8412 auto queue_data = GetQueueState(dev_data, queue);
8413 if (!queue_data) return false;
8414 for (uint32_t i = 0; i < queryCount; i++) {
8415 if (IsQueryInvalid(dev_data, queue_data, queryPool, firstQuery + i)) {
8416 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
8417 HandleToUint64(pCB->commandBuffer), DRAWSTATE_INVALID_QUERY,
8418 "Requesting a copy from query to buffer with invalid query: queryPool 0x%" PRIx64 ", index %d",
8419 HandleToUint64(queryPool), firstQuery + i);
8425 VKAPI_ATTR void VKAPI_CALL CmdCopyQueryPoolResults(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery,
8426 uint32_t queryCount, VkBuffer dstBuffer, VkDeviceSize dstOffset,
8427 VkDeviceSize stride, VkQueryResultFlags flags) {
8429 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
8430 unique_lock_t lock(global_lock);
8432 auto cb_node = GetCBNode(dev_data, commandBuffer);
8433 auto dst_buff_state = GetBufferState(dev_data, dstBuffer);
8434 if (cb_node && dst_buff_state) {
8435 skip |= ValidateMemoryIsBoundToBuffer(dev_data, dst_buff_state, "vkCmdCopyQueryPoolResults()", VALIDATION_ERROR_19400674);
8436 // Validate that DST buffer has correct usage flags set
8438 ValidateBufferUsageFlags(dev_data, dst_buff_state, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true, VALIDATION_ERROR_19400672,
8439 "vkCmdCopyQueryPoolResults()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
8440 skip |= ValidateCmdQueueFlags(dev_data, cb_node, "vkCmdCopyQueryPoolResults()",
8441 VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT, VALIDATION_ERROR_19402415);
8442 skip |= ValidateCmd(dev_data, cb_node, CMD_COPYQUERYPOOLRESULTS, "vkCmdCopyQueryPoolResults()");
8443 skip |= insideRenderPass(dev_data, cb_node, "vkCmdCopyQueryPoolResults()", VALIDATION_ERROR_19400017);
8449 dev_data->dispatch_table.CmdCopyQueryPoolResults(commandBuffer, queryPool, firstQuery, queryCount, dstBuffer, dstOffset, stride,
8453 if (cb_node && dst_buff_state) {
8454 AddCommandBufferBindingBuffer(dev_data, cb_node, dst_buff_state);
8455 cb_node->queue_submit_functions.emplace_back([=]() {
8456 SetBufferMemoryValid(dev_data, dst_buff_state, true);
8459 cb_node->queryUpdates.emplace_back([=](VkQueue q) { return validateQuery(q, cb_node, queryPool, firstQuery, queryCount); });
8460 addCommandBufferBinding(&GetQueryPoolNode(dev_data, queryPool)->cb_bindings,
8461 {HandleToUint64(queryPool), kVulkanObjectTypeQueryPool}, cb_node);
8465 VKAPI_ATTR void VKAPI_CALL CmdPushConstants(VkCommandBuffer commandBuffer, VkPipelineLayout layout, VkShaderStageFlags stageFlags,
8466 uint32_t offset, uint32_t size, const void *pValues) {
8468 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
8469 unique_lock_t lock(global_lock);
8470 GLOBAL_CB_NODE *cb_state = GetCBNode(dev_data, commandBuffer);
8472 skip |= ValidateCmdQueueFlags(dev_data, cb_state, "vkCmdPushConstants()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
8473 VALIDATION_ERROR_1bc02415);
8474 skip |= ValidateCmd(dev_data, cb_state, CMD_PUSHCONSTANTS, "vkCmdPushConstants()");
8476 skip |= validatePushConstantRange(dev_data, offset, size, "vkCmdPushConstants()");
8477 if (0 == stageFlags) {
8479 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
8480 HandleToUint64(commandBuffer), VALIDATION_ERROR_1bc2dc03, "vkCmdPushConstants() call has no stageFlags set.");
8483 // Check if pipeline_layout VkPushConstantRange(s) overlapping offset, size have stageFlags set for each stage in the command
8484 // stageFlags argument, *and* that the command stageFlags argument has bits set for the stageFlags in each overlapping range.
8486 const auto &ranges = *getPipelineLayout(dev_data, layout)->push_constant_ranges;
8487 VkShaderStageFlags found_stages = 0;
8488 for (const auto &range : ranges) {
8489 if ((offset >= range.offset) && (offset + size <= range.offset + range.size)) {
8490 VkShaderStageFlags matching_stages = range.stageFlags & stageFlags;
8491 if (matching_stages != range.stageFlags) {
8492 // VALIDATION_ERROR_1bc00e08 VUID-vkCmdPushConstants-offset-01796
8493 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
8494 VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(commandBuffer),
8495 VALIDATION_ERROR_1bc00e08,
8496 "vkCmdPushConstants(): stageFlags (0x%" PRIx32 ", offset (%" PRIu32 "), and size (%" PRIu32
8498 "must contain all stages in overlapping VkPushConstantRange stageFlags (0x%" PRIx32
8499 "), offset (%" PRIu32 "), and size (%" PRIu32 ") in pipeline layout 0x%" PRIx64 ".",
8500 (uint32_t)stageFlags, offset, size, (uint32_t)range.stageFlags, range.offset, range.size,
8501 HandleToUint64(layout));
8504 // Accumulate all stages we've found
8505 found_stages = matching_stages | found_stages;
8508 if (found_stages != stageFlags) {
8509 // VALIDATION_ERROR_1bc00e06 VUID-vkCmdPushConstants-offset-01795
8510 uint32_t missing_stages = ~found_stages & stageFlags;
8511 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
8512 HandleToUint64(commandBuffer), VALIDATION_ERROR_1bc00e06,
8513 "vkCmdPushConstants(): stageFlags = 0x%" PRIx32 ", VkPushConstantRange in pipeline layout 0x%" PRIx64
8514 " overlapping offset = %d and size = %d, do not contain stageFlags 0x%" PRIx32 ".",
8515 (uint32_t)stageFlags, HandleToUint64(layout), offset, size, missing_stages);
8519 if (!skip) dev_data->dispatch_table.CmdPushConstants(commandBuffer, layout, stageFlags, offset, size, pValues);
8522 VKAPI_ATTR void VKAPI_CALL CmdWriteTimestamp(VkCommandBuffer commandBuffer, VkPipelineStageFlagBits pipelineStage,
8523 VkQueryPool queryPool, uint32_t slot) {
8525 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
8526 unique_lock_t lock(global_lock);
8527 GLOBAL_CB_NODE *cb_state = GetCBNode(dev_data, commandBuffer);
8530 ValidateCmdQueueFlags(dev_data, cb_state, "vkCmdWriteTimestamp()",
8531 VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT | VK_QUEUE_TRANSFER_BIT, VALIDATION_ERROR_1e802415);
8532 skip |= ValidateCmd(dev_data, cb_state, CMD_WRITETIMESTAMP, "vkCmdWriteTimestamp()");
8538 dev_data->dispatch_table.CmdWriteTimestamp(commandBuffer, pipelineStage, queryPool, slot);
8542 QueryObject query = {queryPool, slot};
8543 cb_state->queryUpdates.emplace_back([=](VkQueue q) { return setQueryState(q, commandBuffer, query, true); });
8547 static bool MatchUsage(layer_data *dev_data, uint32_t count, const VkAttachmentReference *attachments,
8548 const VkFramebufferCreateInfo *fbci, VkImageUsageFlagBits usage_flag,
8549 UNIQUE_VALIDATION_ERROR_CODE error_code) {
8552 for (uint32_t attach = 0; attach < count; attach++) {
8553 if (attachments[attach].attachment != VK_ATTACHMENT_UNUSED) {
8554 // Attachment counts are verified elsewhere, but prevent an invalid access
8555 if (attachments[attach].attachment < fbci->attachmentCount) {
8556 const VkImageView *image_view = &fbci->pAttachments[attachments[attach].attachment];
8557 auto view_state = GetImageViewState(dev_data, *image_view);
8559 const VkImageCreateInfo *ici = &GetImageState(dev_data, view_state->create_info.image)->createInfo;
8560 if (ici != nullptr) {
8561 if ((ici->usage & usage_flag) == 0) {
8562 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
8563 VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, error_code,
8564 "vkCreateFramebuffer: Framebuffer Attachment (%d) conflicts with the image's "
8565 "IMAGE_USAGE flags (%s).",
8566 attachments[attach].attachment, string_VkImageUsageFlagBits(usage_flag));
8576 // Validate VkFramebufferCreateInfo which includes:
8577 // 1. attachmentCount equals renderPass attachmentCount
8578 // 2. corresponding framebuffer and renderpass attachments have matching formats
8579 // 3. corresponding framebuffer and renderpass attachments have matching sample counts
8580 // 4. fb attachments only have a single mip level
8581 // 5. fb attachment dimensions are each at least as large as the fb
8582 // 6. fb attachments use idenity swizzle
8583 // 7. fb attachments used by renderPass for color/input/ds have correct usage bit set
8584 // 8. fb dimensions are within physical device limits
8585 static bool ValidateFramebufferCreateInfo(layer_data *dev_data, const VkFramebufferCreateInfo *pCreateInfo) {
8588 auto rp_state = GetRenderPassState(dev_data, pCreateInfo->renderPass);
8590 const VkRenderPassCreateInfo *rpci = rp_state->createInfo.ptr();
8591 if (rpci->attachmentCount != pCreateInfo->attachmentCount) {
8592 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
8593 HandleToUint64(pCreateInfo->renderPass), VALIDATION_ERROR_094006d8,
8594 "vkCreateFramebuffer(): VkFramebufferCreateInfo attachmentCount of %u does not match attachmentCount "
8595 "of %u of renderPass (0x%" PRIx64 ") being used to create Framebuffer.",
8596 pCreateInfo->attachmentCount, rpci->attachmentCount, HandleToUint64(pCreateInfo->renderPass));
8598 // attachmentCounts match, so make sure corresponding attachment details line up
8599 const VkImageView *image_views = pCreateInfo->pAttachments;
8600 for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
8601 auto view_state = GetImageViewState(dev_data, image_views[i]);
8602 auto &ivci = view_state->create_info;
8603 if (ivci.format != rpci->pAttachments[i].format) {
8605 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
8606 HandleToUint64(pCreateInfo->renderPass), VALIDATION_ERROR_094006e0,
8607 "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has format of %s that does not "
8608 "match the format of %s used by the corresponding attachment for renderPass (0x%" PRIx64 ").",
8609 i, string_VkFormat(ivci.format), string_VkFormat(rpci->pAttachments[i].format),
8610 HandleToUint64(pCreateInfo->renderPass));
8612 const VkImageCreateInfo *ici = &GetImageState(dev_data, ivci.image)->createInfo;
8613 if (ici->samples != rpci->pAttachments[i].samples) {
8615 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
8616 HandleToUint64(pCreateInfo->renderPass), VALIDATION_ERROR_094006e2,
8617 "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has %s samples that do not match the %s "
8618 "samples used by the corresponding attachment for renderPass (0x%" PRIx64 ").",
8619 i, string_VkSampleCountFlagBits(ici->samples), string_VkSampleCountFlagBits(rpci->pAttachments[i].samples),
8620 HandleToUint64(pCreateInfo->renderPass));
8622 // Verify that view only has a single mip level
8623 if (ivci.subresourceRange.levelCount != 1) {
8624 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT,
8625 0, VALIDATION_ERROR_094006e6,
8626 "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has mip levelCount of %u but "
8627 "only a single mip level (levelCount == 1) is allowed when creating a Framebuffer.",
8628 i, ivci.subresourceRange.levelCount);
8630 const uint32_t mip_level = ivci.subresourceRange.baseMipLevel;
8631 uint32_t mip_width = max(1u, ici->extent.width >> mip_level);
8632 uint32_t mip_height = max(1u, ici->extent.height >> mip_level);
8633 if ((ivci.subresourceRange.layerCount < pCreateInfo->layers) || (mip_width < pCreateInfo->width) ||
8634 (mip_height < pCreateInfo->height)) {
8635 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT,
8636 0, VALIDATION_ERROR_094006e4,
8637 "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u mip level %u has dimensions "
8638 "smaller than the corresponding framebuffer dimensions. Here are the respective dimensions for "
8639 "attachment #%u, framebuffer:\n"
8642 "layerCount: %u, %u\n",
8643 i, ivci.subresourceRange.baseMipLevel, i, mip_width, pCreateInfo->width, mip_height,
8644 pCreateInfo->height, ivci.subresourceRange.layerCount, pCreateInfo->layers);
8646 if (((ivci.components.r != VK_COMPONENT_SWIZZLE_IDENTITY) && (ivci.components.r != VK_COMPONENT_SWIZZLE_R)) ||
8647 ((ivci.components.g != VK_COMPONENT_SWIZZLE_IDENTITY) && (ivci.components.g != VK_COMPONENT_SWIZZLE_G)) ||
8648 ((ivci.components.b != VK_COMPONENT_SWIZZLE_IDENTITY) && (ivci.components.b != VK_COMPONENT_SWIZZLE_B)) ||
8649 ((ivci.components.a != VK_COMPONENT_SWIZZLE_IDENTITY) && (ivci.components.a != VK_COMPONENT_SWIZZLE_A))) {
8650 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT,
8651 0, VALIDATION_ERROR_094006e8,
8652 "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has non-identy swizzle. All "
8653 "framebuffer attachments must have been created with the identity swizzle. Here are the actual "
8659 i, string_VkComponentSwizzle(ivci.components.r), string_VkComponentSwizzle(ivci.components.g),
8660 string_VkComponentSwizzle(ivci.components.b), string_VkComponentSwizzle(ivci.components.a));
8664 // Verify correct attachment usage flags
8665 for (uint32_t subpass = 0; subpass < rpci->subpassCount; subpass++) {
8666 // Verify input attachments:
8668 MatchUsage(dev_data, rpci->pSubpasses[subpass].inputAttachmentCount, rpci->pSubpasses[subpass].pInputAttachments,
8669 pCreateInfo, VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT, VALIDATION_ERROR_094006de);
8670 // Verify color attachments:
8672 MatchUsage(dev_data, rpci->pSubpasses[subpass].colorAttachmentCount, rpci->pSubpasses[subpass].pColorAttachments,
8673 pCreateInfo, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VALIDATION_ERROR_094006da);
8674 // Verify depth/stencil attachments:
8675 if (rpci->pSubpasses[subpass].pDepthStencilAttachment != nullptr) {
8676 skip |= MatchUsage(dev_data, 1, rpci->pSubpasses[subpass].pDepthStencilAttachment, pCreateInfo,
8677 VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT, VALIDATION_ERROR_094006dc);
8681 // Verify FB dimensions are within physical device limits
8682 if (pCreateInfo->width > dev_data->phys_dev_properties.properties.limits.maxFramebufferWidth) {
8683 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
8684 VALIDATION_ERROR_094006ec,
8685 "vkCreateFramebuffer(): Requested VkFramebufferCreateInfo width exceeds physical device limits. Requested "
8686 "width: %u, device max: %u\n",
8687 pCreateInfo->width, dev_data->phys_dev_properties.properties.limits.maxFramebufferWidth);
8689 if (pCreateInfo->height > dev_data->phys_dev_properties.properties.limits.maxFramebufferHeight) {
8690 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
8691 VALIDATION_ERROR_094006f0,
8692 "vkCreateFramebuffer(): Requested VkFramebufferCreateInfo height exceeds physical device limits. Requested "
8693 "height: %u, device max: %u\n",
8694 pCreateInfo->height, dev_data->phys_dev_properties.properties.limits.maxFramebufferHeight);
8696 if (pCreateInfo->layers > dev_data->phys_dev_properties.properties.limits.maxFramebufferLayers) {
8697 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
8698 VALIDATION_ERROR_094006f4,
8699 "vkCreateFramebuffer(): Requested VkFramebufferCreateInfo layers exceeds physical device limits. Requested "
8700 "layers: %u, device max: %u\n",
8701 pCreateInfo->layers, dev_data->phys_dev_properties.properties.limits.maxFramebufferLayers);
8703 // Verify FB dimensions are greater than zero
8704 if (pCreateInfo->width <= 0) {
8705 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
8706 VALIDATION_ERROR_094006ea,
8707 "vkCreateFramebuffer(): Requested VkFramebufferCreateInfo width must be greater than zero.");
8709 if (pCreateInfo->height <= 0) {
8710 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
8711 VALIDATION_ERROR_094006ee,
8712 "vkCreateFramebuffer(): Requested VkFramebufferCreateInfo height must be greater than zero.");
8714 if (pCreateInfo->layers <= 0) {
8715 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
8716 VALIDATION_ERROR_094006f2,
8717 "vkCreateFramebuffer(): Requested VkFramebufferCreateInfo layers must be greater than zero.");
8722 // Validate VkFramebufferCreateInfo state prior to calling down chain to create Framebuffer object
8723 // Return true if an error is encountered and callback returns true to skip call down chain
8724 // false indicates that call down chain should proceed
8725 static bool PreCallValidateCreateFramebuffer(layer_data *dev_data, const VkFramebufferCreateInfo *pCreateInfo) {
8726 // TODO : Verify that renderPass FB is created with is compatible with FB
8728 skip |= ValidateFramebufferCreateInfo(dev_data, pCreateInfo);
8732 // CreateFramebuffer state has been validated and call down chain completed so record new framebuffer object
8733 static void PostCallRecordCreateFramebuffer(layer_data *dev_data, const VkFramebufferCreateInfo *pCreateInfo, VkFramebuffer fb) {
8734 // Shadow create info and store in map
8735 std::unique_ptr<FRAMEBUFFER_STATE> fb_state(
8736 new FRAMEBUFFER_STATE(fb, pCreateInfo, GetRenderPassStateSharedPtr(dev_data, pCreateInfo->renderPass)));
8738 for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
8739 VkImageView view = pCreateInfo->pAttachments[i];
8740 auto view_state = GetImageViewState(dev_data, view);
8744 MT_FB_ATTACHMENT_INFO fb_info;
8745 fb_info.view_state = view_state;
8746 fb_info.image = view_state->create_info.image;
8747 fb_state->attachments.push_back(fb_info);
8749 dev_data->frameBufferMap[fb] = std::move(fb_state);
8752 VKAPI_ATTR VkResult VKAPI_CALL CreateFramebuffer(VkDevice device, const VkFramebufferCreateInfo *pCreateInfo,
8753 const VkAllocationCallbacks *pAllocator, VkFramebuffer *pFramebuffer) {
8754 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
8755 unique_lock_t lock(global_lock);
8756 bool skip = PreCallValidateCreateFramebuffer(dev_data, pCreateInfo);
8759 if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
8761 VkResult result = dev_data->dispatch_table.CreateFramebuffer(device, pCreateInfo, pAllocator, pFramebuffer);
8763 if (VK_SUCCESS == result) {
8765 PostCallRecordCreateFramebuffer(dev_data, pCreateInfo, *pFramebuffer);
8771 static bool FindDependency(const uint32_t index, const uint32_t dependent, const std::vector<DAGNode> &subpass_to_node,
8772 std::unordered_set<uint32_t> &processed_nodes) {
8773 // If we have already checked this node we have not found a dependency path so return false.
8774 if (processed_nodes.count(index)) return false;
8775 processed_nodes.insert(index);
8776 const DAGNode &node = subpass_to_node[index];
8777 // Look for a dependency path. If one exists return true else recurse on the previous nodes.
8778 if (std::find(node.prev.begin(), node.prev.end(), dependent) == node.prev.end()) {
8779 for (auto elem : node.prev) {
8780 if (FindDependency(elem, dependent, subpass_to_node, processed_nodes)) return true;
8788 static bool CheckDependencyExists(const layer_data *dev_data, const uint32_t subpass,
8789 const std::vector<uint32_t> &dependent_subpasses, const std::vector<DAGNode> &subpass_to_node,
8792 // Loop through all subpasses that share the same attachment and make sure a dependency exists
8793 for (uint32_t k = 0; k < dependent_subpasses.size(); ++k) {
8794 if (static_cast<uint32_t>(subpass) == dependent_subpasses[k]) continue;
8795 const DAGNode &node = subpass_to_node[subpass];
8796 // Check for a specified dependency between the two nodes. If one exists we are done.
8797 auto prev_elem = std::find(node.prev.begin(), node.prev.end(), dependent_subpasses[k]);
8798 auto next_elem = std::find(node.next.begin(), node.next.end(), dependent_subpasses[k]);
8799 if (prev_elem == node.prev.end() && next_elem == node.next.end()) {
8800 // If no dependency exits an implicit dependency still might. If not, throw an error.
8801 std::unordered_set<uint32_t> processed_nodes;
8802 if (!(FindDependency(subpass, dependent_subpasses[k], subpass_to_node, processed_nodes) ||
8803 FindDependency(dependent_subpasses[k], subpass, subpass_to_node, processed_nodes))) {
8804 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
8805 DRAWSTATE_INVALID_RENDERPASS,
8806 "A dependency between subpasses %d and %d must exist but one is not specified.", subpass,
8807 dependent_subpasses[k]);
8815 static bool CheckPreserved(const layer_data *dev_data, const VkRenderPassCreateInfo *pCreateInfo, const int index,
8816 const uint32_t attachment, const std::vector<DAGNode> &subpass_to_node, int depth, bool &skip) {
8817 const DAGNode &node = subpass_to_node[index];
8818 // If this node writes to the attachment return true as next nodes need to preserve the attachment.
8819 const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[index];
8820 for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
8821 if (attachment == subpass.pColorAttachments[j].attachment) return true;
8823 for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
8824 if (attachment == subpass.pInputAttachments[j].attachment) return true;
8826 if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
8827 if (attachment == subpass.pDepthStencilAttachment->attachment) return true;
8829 bool result = false;
8830 // Loop through previous nodes and see if any of them write to the attachment.
8831 for (auto elem : node.prev) {
8832 result |= CheckPreserved(dev_data, pCreateInfo, elem, attachment, subpass_to_node, depth + 1, skip);
8834 // If the attachment was written to by a previous node than this node needs to preserve it.
8835 if (result && depth > 0) {
8836 bool has_preserved = false;
8837 for (uint32_t j = 0; j < subpass.preserveAttachmentCount; ++j) {
8838 if (subpass.pPreserveAttachments[j] == attachment) {
8839 has_preserved = true;
8843 if (!has_preserved) {
8844 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
8845 DRAWSTATE_INVALID_RENDERPASS,
8846 "Attachment %d is used by a later subpass and must be preserved in subpass %d.", attachment, index);
8853 bool isRangeOverlapping(T offset1, T size1, T offset2, T size2) {
8854 return (((offset1 + size1) > offset2) && ((offset1 + size1) < (offset2 + size2))) ||
8855 ((offset1 > offset2) && (offset1 < (offset2 + size2)));
8858 bool isRegionOverlapping(VkImageSubresourceRange range1, VkImageSubresourceRange range2) {
8859 return (isRangeOverlapping(range1.baseMipLevel, range1.levelCount, range2.baseMipLevel, range2.levelCount) &&
8860 isRangeOverlapping(range1.baseArrayLayer, range1.layerCount, range2.baseArrayLayer, range2.layerCount));
8863 static bool ValidateDependencies(const layer_data *dev_data, FRAMEBUFFER_STATE const *framebuffer,
8864 RENDER_PASS_STATE const *renderPass) {
8866 auto const pFramebufferInfo = framebuffer->createInfo.ptr();
8867 auto const pCreateInfo = renderPass->createInfo.ptr();
8868 auto const &subpass_to_node = renderPass->subpassToNode;
8869 std::vector<std::vector<uint32_t>> output_attachment_to_subpass(pCreateInfo->attachmentCount);
8870 std::vector<std::vector<uint32_t>> input_attachment_to_subpass(pCreateInfo->attachmentCount);
8871 std::vector<std::vector<uint32_t>> overlapping_attachments(pCreateInfo->attachmentCount);
8872 // Find overlapping attachments
8873 for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
8874 for (uint32_t j = i + 1; j < pCreateInfo->attachmentCount; ++j) {
8875 VkImageView viewi = pFramebufferInfo->pAttachments[i];
8876 VkImageView viewj = pFramebufferInfo->pAttachments[j];
8877 if (viewi == viewj) {
8878 overlapping_attachments[i].push_back(j);
8879 overlapping_attachments[j].push_back(i);
8882 auto view_state_i = GetImageViewState(dev_data, viewi);
8883 auto view_state_j = GetImageViewState(dev_data, viewj);
8884 if (!view_state_i || !view_state_j) {
8887 auto view_ci_i = view_state_i->create_info;
8888 auto view_ci_j = view_state_j->create_info;
8889 if (view_ci_i.image == view_ci_j.image && isRegionOverlapping(view_ci_i.subresourceRange, view_ci_j.subresourceRange)) {
8890 overlapping_attachments[i].push_back(j);
8891 overlapping_attachments[j].push_back(i);
8894 auto image_data_i = GetImageState(dev_data, view_ci_i.image);
8895 auto image_data_j = GetImageState(dev_data, view_ci_j.image);
8896 if (!image_data_i || !image_data_j) {
8899 if (image_data_i->binding.mem == image_data_j->binding.mem &&
8900 isRangeOverlapping(image_data_i->binding.offset, image_data_i->binding.size, image_data_j->binding.offset,
8901 image_data_j->binding.size)) {
8902 overlapping_attachments[i].push_back(j);
8903 overlapping_attachments[j].push_back(i);
8907 for (uint32_t i = 0; i < overlapping_attachments.size(); ++i) {
8908 uint32_t attachment = i;
8909 for (auto other_attachment : overlapping_attachments[i]) {
8910 if (!(pCreateInfo->pAttachments[attachment].flags & VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT)) {
8911 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT,
8912 HandleToUint64(framebuffer->framebuffer), VALIDATION_ERROR_12200682,
8913 "Attachment %d aliases attachment %d but doesn't set VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT.",
8914 attachment, other_attachment);
8916 if (!(pCreateInfo->pAttachments[other_attachment].flags & VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT)) {
8917 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT,
8918 HandleToUint64(framebuffer->framebuffer), VALIDATION_ERROR_12200682,
8919 "Attachment %d aliases attachment %d but doesn't set VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT.",
8920 other_attachment, attachment);
8924 // Find for each attachment the subpasses that use them.
8925 unordered_set<uint32_t> attachmentIndices;
8926 for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
8927 const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
8928 attachmentIndices.clear();
8929 for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
8930 uint32_t attachment = subpass.pInputAttachments[j].attachment;
8931 if (attachment == VK_ATTACHMENT_UNUSED) continue;
8932 input_attachment_to_subpass[attachment].push_back(i);
8933 for (auto overlapping_attachment : overlapping_attachments[attachment]) {
8934 input_attachment_to_subpass[overlapping_attachment].push_back(i);
8937 for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
8938 uint32_t attachment = subpass.pColorAttachments[j].attachment;
8939 if (attachment == VK_ATTACHMENT_UNUSED) continue;
8940 output_attachment_to_subpass[attachment].push_back(i);
8941 for (auto overlapping_attachment : overlapping_attachments[attachment]) {
8942 output_attachment_to_subpass[overlapping_attachment].push_back(i);
8944 attachmentIndices.insert(attachment);
8946 if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
8947 uint32_t attachment = subpass.pDepthStencilAttachment->attachment;
8948 output_attachment_to_subpass[attachment].push_back(i);
8949 for (auto overlapping_attachment : overlapping_attachments[attachment]) {
8950 output_attachment_to_subpass[overlapping_attachment].push_back(i);
8953 if (attachmentIndices.count(attachment)) {
8955 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
8956 DRAWSTATE_INVALID_RENDERPASS,
8957 "Cannot use same attachment (%u) as both color and depth output in same subpass (%u).", attachment, i);
8961 // If there is a dependency needed make sure one exists
8962 for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
8963 const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
8964 // If the attachment is an input then all subpasses that output must have a dependency relationship
8965 for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
8966 uint32_t attachment = subpass.pInputAttachments[j].attachment;
8967 if (attachment == VK_ATTACHMENT_UNUSED) continue;
8968 CheckDependencyExists(dev_data, i, output_attachment_to_subpass[attachment], subpass_to_node, skip);
8970 // If the attachment is an output then all subpasses that use the attachment must have a dependency relationship
8971 for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
8972 uint32_t attachment = subpass.pColorAttachments[j].attachment;
8973 if (attachment == VK_ATTACHMENT_UNUSED) continue;
8974 CheckDependencyExists(dev_data, i, output_attachment_to_subpass[attachment], subpass_to_node, skip);
8975 CheckDependencyExists(dev_data, i, input_attachment_to_subpass[attachment], subpass_to_node, skip);
8977 if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
8978 const uint32_t &attachment = subpass.pDepthStencilAttachment->attachment;
8979 CheckDependencyExists(dev_data, i, output_attachment_to_subpass[attachment], subpass_to_node, skip);
8980 CheckDependencyExists(dev_data, i, input_attachment_to_subpass[attachment], subpass_to_node, skip);
8983 // Loop through implicit dependencies, if this pass reads make sure the attachment is preserved for all passes after it was
8985 for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
8986 const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
8987 for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
8988 CheckPreserved(dev_data, pCreateInfo, i, subpass.pInputAttachments[j].attachment, subpass_to_node, 0, skip);
8994 static bool CreatePassDAG(const layer_data *dev_data, const VkRenderPassCreateInfo *pCreateInfo,
8995 std::vector<DAGNode> &subpass_to_node, std::vector<bool> &has_self_dependency,
8996 std::vector<int32_t> &subpass_to_dep_index) {
8998 for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
8999 DAGNode &subpass_node = subpass_to_node[i];
9000 subpass_node.pass = i;
9001 subpass_to_dep_index[i] = -1; // Default to no dependency and overwrite below as needed
9003 for (uint32_t i = 0; i < pCreateInfo->dependencyCount; ++i) {
9004 const VkSubpassDependency &dependency = pCreateInfo->pDependencies[i];
9005 if (dependency.srcSubpass == VK_SUBPASS_EXTERNAL || dependency.dstSubpass == VK_SUBPASS_EXTERNAL) {
9006 if (dependency.srcSubpass == dependency.dstSubpass) {
9007 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
9008 DRAWSTATE_INVALID_RENDERPASS, "The src and dest subpasses cannot both be external.");
9010 } else if (dependency.srcSubpass > dependency.dstSubpass) {
9011 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
9012 DRAWSTATE_INVALID_RENDERPASS,
9013 "Dependency graph must be specified such that an earlier pass cannot depend on a later pass.");
9014 } else if (dependency.srcSubpass == dependency.dstSubpass) {
9015 has_self_dependency[dependency.srcSubpass] = true;
9016 subpass_to_dep_index[dependency.srcSubpass] = i;
9018 subpass_to_node[dependency.dstSubpass].prev.push_back(dependency.srcSubpass);
9019 subpass_to_node[dependency.srcSubpass].next.push_back(dependency.dstSubpass);
9025 VKAPI_ATTR VkResult VKAPI_CALL CreateShaderModule(VkDevice device, const VkShaderModuleCreateInfo *pCreateInfo,
9026 const VkAllocationCallbacks *pAllocator, VkShaderModule *pShaderModule) {
9027 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
9030 if (PreCallValidateCreateShaderModule(dev_data, pCreateInfo, &spirv_valid)) return VK_ERROR_VALIDATION_FAILED_EXT;
9032 VkResult res = dev_data->dispatch_table.CreateShaderModule(device, pCreateInfo, pAllocator, pShaderModule);
9034 if (res == VK_SUCCESS) {
9035 lock_guard_t lock(global_lock);
9036 unique_ptr<shader_module> new_shader_module(spirv_valid ? new shader_module(pCreateInfo) : new shader_module());
9037 dev_data->shaderModuleMap[*pShaderModule] = std::move(new_shader_module);
9042 static bool ValidateAttachmentIndex(layer_data *dev_data, uint32_t attachment, uint32_t attachment_count, const char *type) {
9044 if (attachment >= attachment_count && attachment != VK_ATTACHMENT_UNUSED) {
9045 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
9046 VALIDATION_ERROR_12200684,
9047 "CreateRenderPass: %s attachment %d must be less than the total number of attachments %d.", type,
9048 attachment, attachment_count);
9053 static bool IsPowerOfTwo(unsigned x) { return x && !(x & (x - 1)); }
9055 static bool ValidateRenderpassAttachmentUsage(layer_data *dev_data, const VkRenderPassCreateInfo *pCreateInfo) {
9057 for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
9058 const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
9059 if (subpass.pipelineBindPoint != VK_PIPELINE_BIND_POINT_GRAPHICS) {
9060 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
9061 VALIDATION_ERROR_14000698,
9062 "CreateRenderPass: Pipeline bind point for subpass %d must be VK_PIPELINE_BIND_POINT_GRAPHICS.", i);
9065 for (uint32_t j = 0; j < subpass.preserveAttachmentCount; ++j) {
9066 uint32_t attachment = subpass.pPreserveAttachments[j];
9067 if (attachment == VK_ATTACHMENT_UNUSED) {
9068 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
9069 VALIDATION_ERROR_140006aa,
9070 "CreateRenderPass: Preserve attachment (%d) must not be VK_ATTACHMENT_UNUSED.", j);
9072 skip |= ValidateAttachmentIndex(dev_data, attachment, pCreateInfo->attachmentCount, "Preserve");
9074 bool found = (subpass.pDepthStencilAttachment != NULL && subpass.pDepthStencilAttachment->attachment == attachment);
9075 for (uint32_t r = 0; !found && r < subpass.inputAttachmentCount; ++r) {
9076 found = (subpass.pInputAttachments[r].attachment == attachment);
9078 for (uint32_t r = 0; !found && r < subpass.colorAttachmentCount; ++r) {
9079 found = (subpass.pColorAttachments[r].attachment == attachment) ||
9080 (subpass.pResolveAttachments != NULL && subpass.pResolveAttachments[r].attachment == attachment);
9084 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
9085 VALIDATION_ERROR_140006ac,
9086 "CreateRenderPass: subpass %u pPreserveAttachments[%u] (%u) must not be used elsewhere in the subpass.", i,
9092 auto subpass_performs_resolve =
9093 subpass.pResolveAttachments &&
9094 std::any_of(subpass.pResolveAttachments, subpass.pResolveAttachments + subpass.colorAttachmentCount,
9095 [](VkAttachmentReference ref) { return ref.attachment != VK_ATTACHMENT_UNUSED; });
9097 unsigned sample_count = 0;
9099 for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
9100 uint32_t attachment;
9101 if (subpass.pResolveAttachments) {
9102 attachment = subpass.pResolveAttachments[j].attachment;
9103 skip |= ValidateAttachmentIndex(dev_data, attachment, pCreateInfo->attachmentCount, "Resolve");
9105 if (!skip && attachment != VK_ATTACHMENT_UNUSED &&
9106 pCreateInfo->pAttachments[attachment].samples != VK_SAMPLE_COUNT_1_BIT) {
9107 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT,
9108 0, VALIDATION_ERROR_140006a2,
9109 "CreateRenderPass: Subpass %u requests multisample resolve into attachment %u, which must "
9110 "have VK_SAMPLE_COUNT_1_BIT but has %s.",
9111 i, attachment, string_VkSampleCountFlagBits(pCreateInfo->pAttachments[attachment].samples));
9114 if (!skip && subpass.pResolveAttachments[j].attachment != VK_ATTACHMENT_UNUSED &&
9115 subpass.pColorAttachments[j].attachment == VK_ATTACHMENT_UNUSED) {
9116 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT,
9117 0, VALIDATION_ERROR_1400069e,
9118 "CreateRenderPass: Subpass %u requests multisample resolve from attachment %u which has "
9119 "attachment=VK_ATTACHMENT_UNUSED.",
9123 attachment = subpass.pColorAttachments[j].attachment;
9124 skip |= ValidateAttachmentIndex(dev_data, attachment, pCreateInfo->attachmentCount, "Color");
9126 if (!skip && attachment != VK_ATTACHMENT_UNUSED) {
9127 sample_count |= (unsigned)pCreateInfo->pAttachments[attachment].samples;
9129 if (subpass_performs_resolve && pCreateInfo->pAttachments[attachment].samples == VK_SAMPLE_COUNT_1_BIT) {
9130 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT,
9131 0, VALIDATION_ERROR_140006a0,
9132 "CreateRenderPass: Subpass %u requests multisample resolve from attachment %u which has "
9133 "VK_SAMPLE_COUNT_1_BIT.",
9137 if (subpass_performs_resolve && subpass.pResolveAttachments[j].attachment != VK_ATTACHMENT_UNUSED) {
9138 const auto &color_desc = pCreateInfo->pAttachments[attachment];
9139 const auto &resolve_desc = pCreateInfo->pAttachments[subpass.pResolveAttachments[j].attachment];
9140 if (color_desc.format != resolve_desc.format) {
9141 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
9142 VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, VALIDATION_ERROR_140006a4,
9143 "CreateRenderPass: Subpass %u pColorAttachments[%u] resolves to an attachment with a "
9144 "different format. color format: %u, resolve format: %u.",
9145 i, j, color_desc.format, resolve_desc.format);
9149 if (dev_data->extensions.vk_amd_mixed_attachment_samples && subpass.pDepthStencilAttachment &&
9150 subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
9151 const auto depth_stencil_sample_count =
9152 pCreateInfo->pAttachments[subpass.pDepthStencilAttachment->attachment].samples;
9153 if (pCreateInfo->pAttachments[attachment].samples > depth_stencil_sample_count) {
9154 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
9155 VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, VALIDATION_ERROR_14000bc4,
9156 "CreateRenderPass: Subpass %u pColorAttachments[%u] has %s which is larger than "
9157 "depth/stencil attachment %s.",
9158 i, j, string_VkSampleCountFlagBits(pCreateInfo->pAttachments[attachment].samples),
9159 string_VkSampleCountFlagBits(depth_stencil_sample_count));
9165 if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
9166 uint32_t attachment = subpass.pDepthStencilAttachment->attachment;
9167 skip |= ValidateAttachmentIndex(dev_data, attachment, pCreateInfo->attachmentCount, "Depth stencil");
9169 if (!skip && attachment != VK_ATTACHMENT_UNUSED) {
9170 sample_count |= (unsigned)pCreateInfo->pAttachments[attachment].samples;
9174 for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
9175 uint32_t attachment = subpass.pInputAttachments[j].attachment;
9176 skip |= ValidateAttachmentIndex(dev_data, attachment, pCreateInfo->attachmentCount, "Input");
9179 if (!dev_data->extensions.vk_amd_mixed_attachment_samples && sample_count && !IsPowerOfTwo(sample_count)) {
9180 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
9181 VALIDATION_ERROR_0082b401,
9182 "CreateRenderPass: Subpass %u attempts to render to attachments with inconsistent sample counts.", i);
9188 static void MarkAttachmentFirstUse(RENDER_PASS_STATE *render_pass, uint32_t index, bool is_read) {
9189 if (index == VK_ATTACHMENT_UNUSED) return;
9191 if (!render_pass->attachment_first_read.count(index)) render_pass->attachment_first_read[index] = is_read;
9194 VKAPI_ATTR VkResult VKAPI_CALL CreateRenderPass(VkDevice device, const VkRenderPassCreateInfo *pCreateInfo,
9195 const VkAllocationCallbacks *pAllocator, VkRenderPass *pRenderPass) {
9197 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
9199 unique_lock_t lock(global_lock);
9200 // TODO: As part of wrapping up the mem_tracker/core_validation merge the following routine should be consolidated with
9202 skip |= ValidateRenderpassAttachmentUsage(dev_data, pCreateInfo);
9203 for (uint32_t i = 0; i < pCreateInfo->dependencyCount; ++i) {
9204 auto const &dependency = pCreateInfo->pDependencies[i];
9205 skip |= ValidateStageMaskGsTsEnables(dev_data, dependency.srcStageMask, "vkCreateRenderPass()", VALIDATION_ERROR_13e006b8,
9206 VALIDATION_ERROR_13e006bc);
9207 skip |= ValidateStageMaskGsTsEnables(dev_data, dependency.dstStageMask, "vkCreateRenderPass()", VALIDATION_ERROR_13e006ba,
9208 VALIDATION_ERROR_13e006be);
9210 if (!ValidateAccessMaskPipelineStage(dependency.srcAccessMask, dependency.srcStageMask)) {
9211 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
9212 VALIDATION_ERROR_13e006c8,
9213 "CreateRenderPass: pDependencies[%u].srcAccessMask (0x%X) is not supported by srcStageMask (0x%X).", i,
9214 dependency.srcAccessMask, dependency.srcStageMask);
9217 if (!ValidateAccessMaskPipelineStage(dependency.dstAccessMask, dependency.dstStageMask)) {
9218 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
9219 VALIDATION_ERROR_13e006ca,
9220 "CreateRenderPass: pDependencies[%u].dstAccessMask (0x%X) is not supported by dstStageMask (0x%X).", i,
9221 dependency.dstAccessMask, dependency.dstStageMask);
9225 skip |= ValidateLayouts(dev_data, device, pCreateInfo);
9230 return VK_ERROR_VALIDATION_FAILED_EXT;
9233 VkResult result = dev_data->dispatch_table.CreateRenderPass(device, pCreateInfo, pAllocator, pRenderPass);
9235 if (VK_SUCCESS == result) {
9238 std::vector<bool> has_self_dependency(pCreateInfo->subpassCount);
9239 std::vector<DAGNode> subpass_to_node(pCreateInfo->subpassCount);
9240 std::vector<int32_t> subpass_to_dep_index(pCreateInfo->subpassCount);
9241 skip |= CreatePassDAG(dev_data, pCreateInfo, subpass_to_node, has_self_dependency, subpass_to_dep_index);
9243 auto render_pass = std::make_shared<RENDER_PASS_STATE>(pCreateInfo);
9244 render_pass->renderPass = *pRenderPass;
9245 render_pass->hasSelfDependency = has_self_dependency;
9246 render_pass->subpassToNode = subpass_to_node;
9247 render_pass->subpass_to_dependency_index = subpass_to_dep_index;
9249 for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
9250 const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
9251 for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
9252 MarkAttachmentFirstUse(render_pass.get(), subpass.pColorAttachments[j].attachment, false);
9254 // resolve attachments are considered to be written
9255 if (subpass.pResolveAttachments) {
9256 MarkAttachmentFirstUse(render_pass.get(), subpass.pResolveAttachments[j].attachment, false);
9259 if (subpass.pDepthStencilAttachment) {
9260 MarkAttachmentFirstUse(render_pass.get(), subpass.pDepthStencilAttachment->attachment, false);
9262 for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
9263 MarkAttachmentFirstUse(render_pass.get(), subpass.pInputAttachments[j].attachment, true);
9267 dev_data->renderPassMap[*pRenderPass] = std::move(render_pass);
9272 static bool validatePrimaryCommandBuffer(const layer_data *dev_data, const GLOBAL_CB_NODE *pCB, char const *cmd_name,
9273 UNIQUE_VALIDATION_ERROR_CODE error_code) {
9275 if (pCB->createInfo.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
9276 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
9277 HandleToUint64(pCB->commandBuffer), error_code, "Cannot execute command %s on a secondary command buffer.",
9283 static bool VerifyRenderAreaBounds(const layer_data *dev_data, const VkRenderPassBeginInfo *pRenderPassBegin) {
9285 const safe_VkFramebufferCreateInfo *pFramebufferInfo =
9286 &GetFramebufferState(dev_data, pRenderPassBegin->framebuffer)->createInfo;
9287 if (pRenderPassBegin->renderArea.offset.x < 0 ||
9288 (pRenderPassBegin->renderArea.offset.x + pRenderPassBegin->renderArea.extent.width) > pFramebufferInfo->width ||
9289 pRenderPassBegin->renderArea.offset.y < 0 ||
9290 (pRenderPassBegin->renderArea.offset.y + pRenderPassBegin->renderArea.extent.height) > pFramebufferInfo->height) {
9291 skip |= static_cast<bool>(log_msg(
9292 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
9293 DRAWSTATE_INVALID_RENDER_AREA,
9294 "Cannot execute a render pass with renderArea not within the bound of the framebuffer. RenderArea: x %d, y %d, width "
9295 "%d, height %d. Framebuffer: width %d, height %d.",
9296 pRenderPassBegin->renderArea.offset.x, pRenderPassBegin->renderArea.offset.y, pRenderPassBegin->renderArea.extent.width,
9297 pRenderPassBegin->renderArea.extent.height, pFramebufferInfo->width, pFramebufferInfo->height));
9302 // If this is a stencil format, make sure the stencil[Load|Store]Op flag is checked, while if it is a depth/color attachment the
9303 // [load|store]Op flag must be checked
9304 // TODO: The memory valid flag in DEVICE_MEM_INFO should probably be split to track the validity of stencil memory separately.
9305 template <typename T>
9306 static bool FormatSpecificLoadAndStoreOpSettings(VkFormat format, T color_depth_op, T stencil_op, T op) {
9307 if (color_depth_op != op && stencil_op != op) {
9310 bool check_color_depth_load_op = !FormatIsStencilOnly(format);
9311 bool check_stencil_load_op = FormatIsDepthAndStencil(format) || !check_color_depth_load_op;
9313 return ((check_color_depth_load_op && (color_depth_op == op)) || (check_stencil_load_op && (stencil_op == op)));
9316 VKAPI_ATTR void VKAPI_CALL CmdBeginRenderPass(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin,
9317 VkSubpassContents contents) {
9319 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
9320 unique_lock_t lock(global_lock);
9321 GLOBAL_CB_NODE *cb_node = GetCBNode(dev_data, commandBuffer);
9322 auto render_pass_state = pRenderPassBegin ? GetRenderPassState(dev_data, pRenderPassBegin->renderPass) : nullptr;
9323 auto framebuffer = pRenderPassBegin ? GetFramebufferState(dev_data, pRenderPassBegin->framebuffer) : nullptr;
9325 if (render_pass_state) {
9326 uint32_t clear_op_size = 0; // Make sure pClearValues is at least as large as last LOAD_OP_CLEAR
9327 cb_node->activeFramebuffer = pRenderPassBegin->framebuffer;
9328 for (uint32_t i = 0; i < render_pass_state->createInfo.attachmentCount; ++i) {
9329 MT_FB_ATTACHMENT_INFO &fb_info = framebuffer->attachments[i];
9330 auto pAttachment = &render_pass_state->createInfo.pAttachments[i];
9331 if (FormatSpecificLoadAndStoreOpSettings(pAttachment->format, pAttachment->loadOp, pAttachment->stencilLoadOp,
9332 VK_ATTACHMENT_LOAD_OP_CLEAR)) {
9333 clear_op_size = static_cast<uint32_t>(i) + 1;
9334 std::function<bool()> function = [=]() {
9335 SetImageMemoryValid(dev_data, GetImageState(dev_data, fb_info.image), true);
9338 cb_node->queue_submit_functions.push_back(function);
9339 } else if (FormatSpecificLoadAndStoreOpSettings(pAttachment->format, pAttachment->loadOp,
9340 pAttachment->stencilLoadOp, VK_ATTACHMENT_LOAD_OP_DONT_CARE)) {
9341 std::function<bool()> function = [=]() {
9342 SetImageMemoryValid(dev_data, GetImageState(dev_data, fb_info.image), false);
9345 cb_node->queue_submit_functions.push_back(function);
9346 } else if (FormatSpecificLoadAndStoreOpSettings(pAttachment->format, pAttachment->loadOp,
9347 pAttachment->stencilLoadOp, VK_ATTACHMENT_LOAD_OP_LOAD)) {
9348 std::function<bool()> function = [=]() {
9349 return ValidateImageMemoryIsValid(dev_data, GetImageState(dev_data, fb_info.image),
9350 "vkCmdBeginRenderPass()");
9352 cb_node->queue_submit_functions.push_back(function);
9354 if (render_pass_state->attachment_first_read[i]) {
9355 std::function<bool()> function = [=]() {
9356 return ValidateImageMemoryIsValid(dev_data, GetImageState(dev_data, fb_info.image),
9357 "vkCmdBeginRenderPass()");
9359 cb_node->queue_submit_functions.push_back(function);
9362 if (clear_op_size > pRenderPassBegin->clearValueCount) {
9363 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
9364 HandleToUint64(render_pass_state->renderPass), VALIDATION_ERROR_1200070c,
9365 "In vkCmdBeginRenderPass() the VkRenderPassBeginInfo struct has a clearValueCount of %u but there "
9366 "must be at least %u entries in pClearValues array to account for the highest index attachment in "
9367 "renderPass 0x%" PRIx64
9368 " that uses VK_ATTACHMENT_LOAD_OP_CLEAR is %u. Note that the pClearValues array is indexed by "
9369 "attachment number so even if some pClearValues entries between 0 and %u correspond to attachments "
9370 "that aren't cleared they will be ignored.",
9371 pRenderPassBegin->clearValueCount, clear_op_size, HandleToUint64(render_pass_state->renderPass),
9372 clear_op_size, clear_op_size - 1);
9374 skip |= VerifyRenderAreaBounds(dev_data, pRenderPassBegin);
9375 skip |= VerifyFramebufferAndRenderPassLayouts(dev_data, cb_node, pRenderPassBegin,
9376 GetFramebufferState(dev_data, pRenderPassBegin->framebuffer));
9377 if (framebuffer->rp_state->renderPass != render_pass_state->renderPass) {
9378 skip |= validateRenderPassCompatibility(dev_data, "render pass", render_pass_state, "framebuffer",
9379 framebuffer->rp_state.get(), "vkCmdBeginRenderPass()",
9380 VALIDATION_ERROR_12000710);
9382 skip |= insideRenderPass(dev_data, cb_node, "vkCmdBeginRenderPass()", VALIDATION_ERROR_17a00017);
9383 skip |= ValidateDependencies(dev_data, framebuffer, render_pass_state);
9384 skip |= validatePrimaryCommandBuffer(dev_data, cb_node, "vkCmdBeginRenderPass()", VALIDATION_ERROR_17a00019);
9385 skip |= ValidateCmdQueueFlags(dev_data, cb_node, "vkCmdBeginRenderPass()", VK_QUEUE_GRAPHICS_BIT,
9386 VALIDATION_ERROR_17a02415);
9387 skip |= ValidateCmd(dev_data, cb_node, CMD_BEGINRENDERPASS, "vkCmdBeginRenderPass()");
9388 cb_node->activeRenderPass = render_pass_state;
9389 // This is a shallow copy as that is all that is needed for now
9390 cb_node->activeRenderPassBeginInfo = *pRenderPassBegin;
9391 cb_node->activeSubpass = 0;
9392 cb_node->activeSubpassContents = contents;
9393 cb_node->framebuffers.insert(pRenderPassBegin->framebuffer);
9394 // Connect this framebuffer and its children to this cmdBuffer
9395 AddFramebufferBinding(dev_data, cb_node, framebuffer);
9396 // Connect this RP to cmdBuffer
9397 addCommandBufferBinding(&render_pass_state->cb_bindings,
9398 {HandleToUint64(render_pass_state->renderPass), kVulkanObjectTypeRenderPass}, cb_node);
9399 // transition attachments to the correct layouts for beginning of renderPass and first subpass
9400 TransitionBeginRenderPassLayouts(dev_data, cb_node, render_pass_state, framebuffer);
9405 dev_data->dispatch_table.CmdBeginRenderPass(commandBuffer, pRenderPassBegin, contents);
9409 VKAPI_ATTR void VKAPI_CALL CmdNextSubpass(VkCommandBuffer commandBuffer, VkSubpassContents contents) {
9411 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
9412 unique_lock_t lock(global_lock);
9413 GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
9415 skip |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdNextSubpass()", VALIDATION_ERROR_1b600019);
9416 skip |= ValidateCmdQueueFlags(dev_data, pCB, "vkCmdNextSubpass()", VK_QUEUE_GRAPHICS_BIT, VALIDATION_ERROR_1b602415);
9417 skip |= ValidateCmd(dev_data, pCB, CMD_NEXTSUBPASS, "vkCmdNextSubpass()");
9418 skip |= outsideRenderPass(dev_data, pCB, "vkCmdNextSubpass()", VALIDATION_ERROR_1b600017);
9420 auto subpassCount = pCB->activeRenderPass->createInfo.subpassCount;
9421 if (pCB->activeSubpass == subpassCount - 1) {
9422 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
9423 HandleToUint64(commandBuffer), VALIDATION_ERROR_1b60071a,
9424 "vkCmdNextSubpass(): Attempted to advance beyond final subpass.");
9431 dev_data->dispatch_table.CmdNextSubpass(commandBuffer, contents);
9435 pCB->activeSubpass++;
9436 pCB->activeSubpassContents = contents;
9437 TransitionSubpassLayouts(dev_data, pCB, pCB->activeRenderPass, pCB->activeSubpass,
9438 GetFramebufferState(dev_data, pCB->activeRenderPassBeginInfo.framebuffer));
9442 VKAPI_ATTR void VKAPI_CALL CmdEndRenderPass(VkCommandBuffer commandBuffer) {
9444 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
9445 unique_lock_t lock(global_lock);
9446 auto pCB = GetCBNode(dev_data, commandBuffer);
9447 FRAMEBUFFER_STATE *framebuffer = NULL;
9449 RENDER_PASS_STATE *rp_state = pCB->activeRenderPass;
9450 framebuffer = GetFramebufferState(dev_data, pCB->activeFramebuffer);
9452 if (pCB->activeSubpass != rp_state->createInfo.subpassCount - 1) {
9453 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
9454 VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(commandBuffer),
9455 VALIDATION_ERROR_1b00071c, "vkCmdEndRenderPass(): Called before reaching final subpass.");
9458 for (size_t i = 0; i < rp_state->createInfo.attachmentCount; ++i) {
9459 MT_FB_ATTACHMENT_INFO &fb_info = framebuffer->attachments[i];
9460 auto pAttachment = &rp_state->createInfo.pAttachments[i];
9461 if (FormatSpecificLoadAndStoreOpSettings(pAttachment->format, pAttachment->storeOp, pAttachment->stencilStoreOp,
9462 VK_ATTACHMENT_STORE_OP_STORE)) {
9463 std::function<bool()> function = [=]() {
9464 SetImageMemoryValid(dev_data, GetImageState(dev_data, fb_info.image), true);
9467 pCB->queue_submit_functions.push_back(function);
9468 } else if (FormatSpecificLoadAndStoreOpSettings(pAttachment->format, pAttachment->storeOp,
9469 pAttachment->stencilStoreOp, VK_ATTACHMENT_STORE_OP_DONT_CARE)) {
9470 std::function<bool()> function = [=]() {
9471 SetImageMemoryValid(dev_data, GetImageState(dev_data, fb_info.image), false);
9474 pCB->queue_submit_functions.push_back(function);
9478 skip |= outsideRenderPass(dev_data, pCB, "vkCmdEndRenderpass()", VALIDATION_ERROR_1b000017);
9479 skip |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdEndRenderPass()", VALIDATION_ERROR_1b000019);
9480 skip |= ValidateCmdQueueFlags(dev_data, pCB, "vkCmdEndRenderPass()", VK_QUEUE_GRAPHICS_BIT, VALIDATION_ERROR_1b002415);
9481 skip |= ValidateCmd(dev_data, pCB, CMD_ENDRENDERPASS, "vkCmdEndRenderPass()");
9487 dev_data->dispatch_table.CmdEndRenderPass(commandBuffer);
9491 TransitionFinalSubpassLayouts(dev_data, pCB, &pCB->activeRenderPassBeginInfo, framebuffer);
9492 pCB->activeRenderPass = nullptr;
9493 pCB->activeSubpass = 0;
9494 pCB->activeFramebuffer = VK_NULL_HANDLE;
9498 static bool validateFramebuffer(layer_data *dev_data, VkCommandBuffer primaryBuffer, const GLOBAL_CB_NODE *pCB,
9499 VkCommandBuffer secondaryBuffer, const GLOBAL_CB_NODE *pSubCB, const char *caller) {
9501 if (!pSubCB->beginInfo.pInheritanceInfo) {
9504 VkFramebuffer primary_fb = pCB->activeFramebuffer;
9505 VkFramebuffer secondary_fb = pSubCB->beginInfo.pInheritanceInfo->framebuffer;
9506 if (secondary_fb != VK_NULL_HANDLE) {
9507 if (primary_fb != secondary_fb) {
9508 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
9509 HandleToUint64(primaryBuffer), VALIDATION_ERROR_1b2000c6,
9510 "vkCmdExecuteCommands() called w/ invalid secondary command buffer 0x%" PRIx64
9511 " which has a framebuffer 0x%" PRIx64
9512 " that is not the same as the primary command buffer's current active framebuffer 0x%" PRIx64 ".",
9513 HandleToUint64(secondaryBuffer), HandleToUint64(secondary_fb), HandleToUint64(primary_fb));
9515 auto fb = GetFramebufferState(dev_data, secondary_fb);
9517 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
9518 HandleToUint64(primaryBuffer), DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER,
9519 "vkCmdExecuteCommands() called w/ invalid Cmd Buffer 0x%" PRIx64
9520 " which has invalid framebuffer 0x%" PRIx64 ".",
9521 HandleToUint64(secondaryBuffer), HandleToUint64(secondary_fb));
9528 static bool validateSecondaryCommandBufferState(layer_data *dev_data, GLOBAL_CB_NODE *pCB, GLOBAL_CB_NODE *pSubCB) {
9530 unordered_set<int> activeTypes;
9531 for (auto queryObject : pCB->activeQueries) {
9532 auto queryPoolData = dev_data->queryPoolMap.find(queryObject.pool);
9533 if (queryPoolData != dev_data->queryPoolMap.end()) {
9534 if (queryPoolData->second.createInfo.queryType == VK_QUERY_TYPE_PIPELINE_STATISTICS &&
9535 pSubCB->beginInfo.pInheritanceInfo) {
9536 VkQueryPipelineStatisticFlags cmdBufStatistics = pSubCB->beginInfo.pInheritanceInfo->pipelineStatistics;
9537 if ((cmdBufStatistics & queryPoolData->second.createInfo.pipelineStatistics) != cmdBufStatistics) {
9539 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
9540 HandleToUint64(pCB->commandBuffer), VALIDATION_ERROR_1b2000d0,
9541 "vkCmdExecuteCommands() called w/ invalid Cmd Buffer 0x%" PRIx64
9542 " which has invalid active query pool 0x%" PRIx64
9543 ". Pipeline statistics is being queried so the command buffer must have all bits set on the queryPool.",
9544 HandleToUint64(pCB->commandBuffer), HandleToUint64(queryPoolData->first));
9547 activeTypes.insert(queryPoolData->second.createInfo.queryType);
9550 for (auto queryObject : pSubCB->startedQueries) {
9551 auto queryPoolData = dev_data->queryPoolMap.find(queryObject.pool);
9552 if (queryPoolData != dev_data->queryPoolMap.end() && activeTypes.count(queryPoolData->second.createInfo.queryType)) {
9553 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
9554 HandleToUint64(pCB->commandBuffer), DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER,
9555 "vkCmdExecuteCommands() called w/ invalid Cmd Buffer 0x%" PRIx64
9556 " which has invalid active query pool 0x%" PRIx64
9557 " of type %d but a query of that type has been started on secondary Cmd Buffer 0x%" PRIx64 ".",
9558 HandleToUint64(pCB->commandBuffer), HandleToUint64(queryPoolData->first),
9559 queryPoolData->second.createInfo.queryType, HandleToUint64(pSubCB->commandBuffer));
9563 auto primary_pool = GetCommandPoolNode(dev_data, pCB->createInfo.commandPool);
9564 auto secondary_pool = GetCommandPoolNode(dev_data, pSubCB->createInfo.commandPool);
9565 if (primary_pool && secondary_pool && (primary_pool->queueFamilyIndex != secondary_pool->queueFamilyIndex)) {
9566 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
9567 HandleToUint64(pSubCB->commandBuffer), DRAWSTATE_INVALID_QUEUE_FAMILY,
9568 "vkCmdExecuteCommands(): Primary command buffer 0x%" PRIx64
9569 " created in queue family %d has secondary command buffer 0x%" PRIx64 " created in queue family %d.",
9570 HandleToUint64(pCB->commandBuffer), primary_pool->queueFamilyIndex, HandleToUint64(pSubCB->commandBuffer),
9571 secondary_pool->queueFamilyIndex);
9577 VKAPI_ATTR void VKAPI_CALL CmdExecuteCommands(VkCommandBuffer commandBuffer, uint32_t commandBuffersCount,
9578 const VkCommandBuffer *pCommandBuffers) {
9580 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
9581 unique_lock_t lock(global_lock);
9582 GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
9584 GLOBAL_CB_NODE *pSubCB = NULL;
9585 for (uint32_t i = 0; i < commandBuffersCount; i++) {
9586 pSubCB = GetCBNode(dev_data, pCommandBuffers[i]);
9588 if (VK_COMMAND_BUFFER_LEVEL_PRIMARY == pSubCB->createInfo.level) {
9590 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
9591 HandleToUint64(pCommandBuffers[i]), VALIDATION_ERROR_1b2000b0,
9592 "vkCmdExecuteCommands() called w/ Primary Cmd Buffer 0x%" PRIx64
9593 " in element %u of pCommandBuffers array. All cmd buffers in pCommandBuffers array must be secondary.",
9594 HandleToUint64(pCommandBuffers[i]), i);
9595 } else if (pCB->activeRenderPass) { // Secondary CB w/i RenderPass must have *CONTINUE_BIT set
9596 if (pSubCB->beginInfo.pInheritanceInfo != nullptr) {
9597 auto secondary_rp_state = GetRenderPassState(dev_data, pSubCB->beginInfo.pInheritanceInfo->renderPass);
9598 if (!(pSubCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) {
9599 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
9600 VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(pCommandBuffers[i]),
9601 VALIDATION_ERROR_1b2000c0,
9602 "vkCmdExecuteCommands(): Secondary Command Buffer (0x%" PRIx64
9603 ") executed within render pass (0x%" PRIx64
9604 ") must have had vkBeginCommandBuffer() called w/ "
9605 "VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT set.",
9606 HandleToUint64(pCommandBuffers[i]), HandleToUint64(pCB->activeRenderPass->renderPass));
9608 // Make sure render pass is compatible with parent command buffer pass if has continue
9609 if (pCB->activeRenderPass->renderPass != secondary_rp_state->renderPass) {
9610 skip |= validateRenderPassCompatibility(dev_data, "primary command buffer", pCB->activeRenderPass,
9611 "secondary command buffer", secondary_rp_state,
9612 "vkCmdExecuteCommands()", VALIDATION_ERROR_1b2000c4);
9614 // If framebuffer for secondary CB is not NULL, then it must match active FB from primaryCB
9616 validateFramebuffer(dev_data, commandBuffer, pCB, pCommandBuffers[i], pSubCB, "vkCmdExecuteCommands()");
9617 if (!pSubCB->cmd_execute_commands_functions.empty()) {
9618 // Inherit primary's activeFramebuffer and while running validate functions
9619 for (auto &function : pSubCB->cmd_execute_commands_functions) {
9620 skip |= function(pCB, pCB->activeFramebuffer);
9626 // TODO(mlentine): Move more logic into this method
9627 skip |= validateSecondaryCommandBufferState(dev_data, pCB, pSubCB);
9628 skip |= validateCommandBufferState(dev_data, pSubCB, "vkCmdExecuteCommands()", 0, VALIDATION_ERROR_1b2000b2);
9629 if (!(pSubCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
9630 if (pSubCB->in_use.load() || pCB->linkedCommandBuffers.count(pSubCB)) {
9631 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
9632 VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(pCB->commandBuffer),
9633 VALIDATION_ERROR_1b2000b4,
9634 "Attempt to simultaneously execute command buffer 0x%" PRIx64
9635 " without VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set!",
9636 HandleToUint64(pCB->commandBuffer));
9638 if (pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT) {
9639 // Warn that non-simultaneous secondary cmd buffer renders primary non-simultaneous
9640 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
9641 VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(pCommandBuffers[i]),
9642 DRAWSTATE_INVALID_CB_SIMULTANEOUS_USE,
9643 "vkCmdExecuteCommands(): Secondary Command Buffer (0x%" PRIx64
9644 ") does not have VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set and will cause primary "
9645 "command buffer (0x%" PRIx64
9646 ") to be treated as if it does not have VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set, even "
9648 HandleToUint64(pCommandBuffers[i]), HandleToUint64(pCB->commandBuffer));
9649 pCB->beginInfo.flags &= ~VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT;
9652 if (!pCB->activeQueries.empty() && !dev_data->enabled_features.inheritedQueries) {
9654 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
9655 HandleToUint64(pCommandBuffers[i]), VALIDATION_ERROR_1b2000ca,
9656 "vkCmdExecuteCommands(): Secondary Command Buffer (0x%" PRIx64
9657 ") cannot be submitted with a query in flight and inherited queries not supported on this device.",
9658 HandleToUint64(pCommandBuffers[i]));
9660 // TODO: separate validate from update! This is very tangled.
9661 // Propagate layout transitions to the primary cmd buffer
9662 for (auto ilm_entry : pSubCB->imageLayoutMap) {
9663 if (pCB->imageLayoutMap.find(ilm_entry.first) != pCB->imageLayoutMap.end()) {
9664 pCB->imageLayoutMap[ilm_entry.first].layout = ilm_entry.second.layout;
9666 assert(ilm_entry.first.hasSubresource);
9667 IMAGE_CMD_BUF_LAYOUT_NODE node;
9668 if (!FindCmdBufLayout(dev_data, pCB, ilm_entry.first.image, ilm_entry.first.subresource, node)) {
9669 node.initialLayout = ilm_entry.second.initialLayout;
9671 node.layout = ilm_entry.second.layout;
9672 SetLayout(dev_data, pCB, ilm_entry.first, node);
9675 pSubCB->primaryCommandBuffer = pCB->commandBuffer;
9676 pCB->linkedCommandBuffers.insert(pSubCB);
9677 pSubCB->linkedCommandBuffers.insert(pCB);
9678 for (auto &function : pSubCB->queryUpdates) {
9679 pCB->queryUpdates.push_back(function);
9681 for (auto &function : pSubCB->queue_submit_functions) {
9682 pCB->queue_submit_functions.push_back(function);
9685 skip |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdExecuteCommands()", VALIDATION_ERROR_1b200019);
9687 ValidateCmdQueueFlags(dev_data, pCB, "vkCmdExecuteCommands()",
9688 VK_QUEUE_TRANSFER_BIT | VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT, VALIDATION_ERROR_1b202415);
9689 skip |= ValidateCmd(dev_data, pCB, CMD_EXECUTECOMMANDS, "vkCmdExecuteCommands()");
9692 if (!skip) dev_data->dispatch_table.CmdExecuteCommands(commandBuffer, commandBuffersCount, pCommandBuffers);
9695 VKAPI_ATTR VkResult VKAPI_CALL MapMemory(VkDevice device, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size, VkFlags flags,
9697 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
9700 VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
9701 unique_lock_t lock(global_lock);
9702 DEVICE_MEM_INFO *mem_info = GetMemObjInfo(dev_data, mem);
9704 // TODO : This could me more fine-grained to track just region that is valid
9705 mem_info->global_valid = true;
9706 auto end_offset = (VK_WHOLE_SIZE == size) ? mem_info->alloc_info.allocationSize - 1 : offset + size - 1;
9707 skip |= ValidateMapImageLayouts(dev_data, device, mem_info, offset, end_offset);
9708 // TODO : Do we need to create new "bound_range" for the mapped range?
9709 SetMemRangesValid(dev_data, mem_info, offset, end_offset);
9710 if ((dev_data->phys_dev_mem_props.memoryTypes[mem_info->alloc_info.memoryTypeIndex].propertyFlags &
9711 VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0) {
9712 skip = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
9713 HandleToUint64(mem), VALIDATION_ERROR_31200554,
9714 "Mapping Memory without VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT set: mem obj 0x%" PRIx64 ".",
9715 HandleToUint64(mem));
9718 skip |= ValidateMapMemRange(dev_data, mem, offset, size);
9722 result = dev_data->dispatch_table.MapMemory(device, mem, offset, size, flags, ppData);
9723 if (VK_SUCCESS == result) {
9725 // TODO : What's the point of this range? See comment on creating new "bound_range" above, which may replace this
9726 storeMemRanges(dev_data, mem, offset, size);
9727 initializeAndTrackMemory(dev_data, mem, offset, size, ppData);
9734 VKAPI_ATTR void VKAPI_CALL UnmapMemory(VkDevice device, VkDeviceMemory mem) {
9735 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
9738 unique_lock_t lock(global_lock);
9739 skip |= deleteMemRanges(dev_data, mem);
9742 dev_data->dispatch_table.UnmapMemory(device, mem);
9746 static bool validateMemoryIsMapped(layer_data *dev_data, const char *funcName, uint32_t memRangeCount,
9747 const VkMappedMemoryRange *pMemRanges) {
9749 for (uint32_t i = 0; i < memRangeCount; ++i) {
9750 auto mem_info = GetMemObjInfo(dev_data, pMemRanges[i].memory);
9752 if (pMemRanges[i].size == VK_WHOLE_SIZE) {
9753 if (mem_info->mem_range.offset > pMemRanges[i].offset) {
9755 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
9756 HandleToUint64(pMemRanges[i].memory), VALIDATION_ERROR_0c20055c,
9757 "%s: Flush/Invalidate offset (" PRINTF_SIZE_T_SPECIFIER
9758 ") is less than Memory Object's offset (" PRINTF_SIZE_T_SPECIFIER ").",
9759 funcName, static_cast<size_t>(pMemRanges[i].offset), static_cast<size_t>(mem_info->mem_range.offset));
9762 const uint64_t data_end = (mem_info->mem_range.size == VK_WHOLE_SIZE)
9763 ? mem_info->alloc_info.allocationSize
9764 : (mem_info->mem_range.offset + mem_info->mem_range.size);
9765 if ((mem_info->mem_range.offset > pMemRanges[i].offset) ||
9766 (data_end < (pMemRanges[i].offset + pMemRanges[i].size))) {
9768 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
9769 HandleToUint64(pMemRanges[i].memory), VALIDATION_ERROR_0c20055a,
9770 "%s: Flush/Invalidate size or offset (" PRINTF_SIZE_T_SPECIFIER ", " PRINTF_SIZE_T_SPECIFIER
9771 ") exceed the Memory Object's upper-bound (" PRINTF_SIZE_T_SPECIFIER ").",
9772 funcName, static_cast<size_t>(pMemRanges[i].offset + pMemRanges[i].size),
9773 static_cast<size_t>(pMemRanges[i].offset), static_cast<size_t>(data_end));
9781 static bool ValidateAndCopyNoncoherentMemoryToDriver(layer_data *dev_data, uint32_t mem_range_count,
9782 const VkMappedMemoryRange *mem_ranges) {
9784 for (uint32_t i = 0; i < mem_range_count; ++i) {
9785 auto mem_info = GetMemObjInfo(dev_data, mem_ranges[i].memory);
9787 if (mem_info->shadow_copy) {
9788 VkDeviceSize size = (mem_info->mem_range.size != VK_WHOLE_SIZE)
9789 ? mem_info->mem_range.size
9790 : (mem_info->alloc_info.allocationSize - mem_info->mem_range.offset);
9791 char *data = static_cast<char *>(mem_info->shadow_copy);
9792 for (uint64_t j = 0; j < mem_info->shadow_pad_size; ++j) {
9793 if (data[j] != NoncoherentMemoryFillValue) {
9794 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
9795 VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, HandleToUint64(mem_ranges[i].memory),
9796 MEMTRACK_INVALID_MAP, "Memory underflow was detected on mem obj 0x%" PRIx64,
9797 HandleToUint64(mem_ranges[i].memory));
9800 for (uint64_t j = (size + mem_info->shadow_pad_size); j < (2 * mem_info->shadow_pad_size + size); ++j) {
9801 if (data[j] != NoncoherentMemoryFillValue) {
9802 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
9803 VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, HandleToUint64(mem_ranges[i].memory),
9804 MEMTRACK_INVALID_MAP, "Memory overflow was detected on mem obj 0x%" PRIx64,
9805 HandleToUint64(mem_ranges[i].memory));
9808 memcpy(mem_info->p_driver_data, static_cast<void *>(data + mem_info->shadow_pad_size), (size_t)(size));
9815 static void CopyNoncoherentMemoryFromDriver(layer_data *dev_data, uint32_t mem_range_count, const VkMappedMemoryRange *mem_ranges) {
9816 for (uint32_t i = 0; i < mem_range_count; ++i) {
9817 auto mem_info = GetMemObjInfo(dev_data, mem_ranges[i].memory);
9818 if (mem_info && mem_info->shadow_copy) {
9819 VkDeviceSize size = (mem_info->mem_range.size != VK_WHOLE_SIZE)
9820 ? mem_info->mem_range.size
9821 : (mem_info->alloc_info.allocationSize - mem_ranges[i].offset);
9822 char *data = static_cast<char *>(mem_info->shadow_copy);
9823 memcpy(data + mem_info->shadow_pad_size, mem_info->p_driver_data, (size_t)(size));
9828 static bool ValidateMappedMemoryRangeDeviceLimits(layer_data *dev_data, const char *func_name, uint32_t mem_range_count,
9829 const VkMappedMemoryRange *mem_ranges) {
9831 for (uint32_t i = 0; i < mem_range_count; ++i) {
9832 uint64_t atom_size = dev_data->phys_dev_properties.properties.limits.nonCoherentAtomSize;
9833 if (SafeModulo(mem_ranges[i].offset, atom_size) != 0) {
9834 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
9835 HandleToUint64(mem_ranges->memory), VALIDATION_ERROR_0c20055e,
9836 "%s: Offset in pMemRanges[%d] is 0x%" PRIxLEAST64
9837 ", which is not a multiple of VkPhysicalDeviceLimits::nonCoherentAtomSize (0x%" PRIxLEAST64 ").",
9838 func_name, i, mem_ranges[i].offset, atom_size);
9840 auto mem_info = GetMemObjInfo(dev_data, mem_ranges[i].memory);
9841 if ((mem_ranges[i].size != VK_WHOLE_SIZE) &&
9842 (mem_ranges[i].size + mem_ranges[i].offset != mem_info->alloc_info.allocationSize) &&
9843 (SafeModulo(mem_ranges[i].size, atom_size) != 0)) {
9844 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
9845 HandleToUint64(mem_ranges->memory), VALIDATION_ERROR_0c200adc,
9846 "%s: Size in pMemRanges[%d] is 0x%" PRIxLEAST64
9847 ", which is not a multiple of VkPhysicalDeviceLimits::nonCoherentAtomSize (0x%" PRIxLEAST64 ").",
9848 func_name, i, mem_ranges[i].size, atom_size);
9854 static bool PreCallValidateFlushMappedMemoryRanges(layer_data *dev_data, uint32_t mem_range_count,
9855 const VkMappedMemoryRange *mem_ranges) {
9857 lock_guard_t lock(global_lock);
9858 skip |= ValidateMappedMemoryRangeDeviceLimits(dev_data, "vkFlushMappedMemoryRanges", mem_range_count, mem_ranges);
9859 skip |= ValidateAndCopyNoncoherentMemoryToDriver(dev_data, mem_range_count, mem_ranges);
9860 skip |= validateMemoryIsMapped(dev_data, "vkFlushMappedMemoryRanges", mem_range_count, mem_ranges);
9864 VKAPI_ATTR VkResult VKAPI_CALL FlushMappedMemoryRanges(VkDevice device, uint32_t memRangeCount,
9865 const VkMappedMemoryRange *pMemRanges) {
9866 VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
9867 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
9869 if (!PreCallValidateFlushMappedMemoryRanges(dev_data, memRangeCount, pMemRanges)) {
9870 result = dev_data->dispatch_table.FlushMappedMemoryRanges(device, memRangeCount, pMemRanges);
9875 static bool PreCallValidateInvalidateMappedMemoryRanges(layer_data *dev_data, uint32_t mem_range_count,
9876 const VkMappedMemoryRange *mem_ranges) {
9878 lock_guard_t lock(global_lock);
9879 skip |= ValidateMappedMemoryRangeDeviceLimits(dev_data, "vkInvalidateMappedMemoryRanges", mem_range_count, mem_ranges);
9880 skip |= validateMemoryIsMapped(dev_data, "vkInvalidateMappedMemoryRanges", mem_range_count, mem_ranges);
9884 static void PostCallRecordInvalidateMappedMemoryRanges(layer_data *dev_data, uint32_t mem_range_count,
9885 const VkMappedMemoryRange *mem_ranges) {
9886 lock_guard_t lock(global_lock);
9887 // Update our shadow copy with modified driver data
9888 CopyNoncoherentMemoryFromDriver(dev_data, mem_range_count, mem_ranges);
9891 VKAPI_ATTR VkResult VKAPI_CALL InvalidateMappedMemoryRanges(VkDevice device, uint32_t memRangeCount,
9892 const VkMappedMemoryRange *pMemRanges) {
9893 VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
9894 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
9896 if (!PreCallValidateInvalidateMappedMemoryRanges(dev_data, memRangeCount, pMemRanges)) {
9897 result = dev_data->dispatch_table.InvalidateMappedMemoryRanges(device, memRangeCount, pMemRanges);
9898 if (result == VK_SUCCESS) {
9899 PostCallRecordInvalidateMappedMemoryRanges(dev_data, memRangeCount, pMemRanges);
9905 static bool PreCallValidateBindImageMemory(layer_data *dev_data, VkImage image, IMAGE_STATE *image_state, VkDeviceMemory mem,
9906 VkDeviceSize memoryOffset, const char *api_name) {
9909 unique_lock_t lock(global_lock);
9910 // Track objects tied to memory
9911 uint64_t image_handle = HandleToUint64(image);
9912 skip = ValidateSetMemBinding(dev_data, mem, image_handle, kVulkanObjectTypeImage, api_name);
9913 if (!image_state->memory_requirements_checked) {
9914 // There's not an explicit requirement in the spec to call vkGetImageMemoryRequirements() prior to calling
9915 // BindImageMemory but it's implied in that memory being bound must conform with VkMemoryRequirements from
9916 // vkGetImageMemoryRequirements()
9917 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
9918 image_handle, DRAWSTATE_INVALID_IMAGE,
9919 "%s: Binding memory to image 0x%" PRIx64
9920 " but vkGetImageMemoryRequirements() has not been called on that image.",
9921 api_name, HandleToUint64(image_handle));
9922 // Make the call for them so we can verify the state
9924 dev_data->dispatch_table.GetImageMemoryRequirements(dev_data->device, image, &image_state->requirements);
9928 // Validate bound memory range information
9929 auto mem_info = GetMemObjInfo(dev_data, mem);
9931 skip |= ValidateInsertImageMemoryRange(dev_data, image, mem_info, memoryOffset, image_state->requirements,
9932 image_state->createInfo.tiling == VK_IMAGE_TILING_LINEAR, api_name);
9933 skip |= ValidateMemoryTypes(dev_data, mem_info, image_state->requirements.memoryTypeBits, api_name,
9934 VALIDATION_ERROR_1740082e);
9937 // Validate memory requirements alignment
9938 if (SafeModulo(memoryOffset, image_state->requirements.alignment) != 0) {
9939 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
9940 image_handle, VALIDATION_ERROR_17400830,
9941 "%s: memoryOffset is 0x%" PRIxLEAST64
9942 " but must be an integer multiple of the VkMemoryRequirements::alignment value 0x%" PRIxLEAST64
9943 ", returned from a call to vkGetImageMemoryRequirements with image.",
9944 api_name, memoryOffset, image_state->requirements.alignment);
9948 // Validate memory requirements size
9949 if (image_state->requirements.size > mem_info->alloc_info.allocationSize - memoryOffset) {
9950 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
9951 image_handle, VALIDATION_ERROR_17400832,
9952 "%s: memory size minus memoryOffset is 0x%" PRIxLEAST64
9953 " but must be at least as large as VkMemoryRequirements::size value 0x%" PRIxLEAST64
9954 ", returned from a call to vkGetImageMemoryRequirements with image.",
9955 api_name, mem_info->alloc_info.allocationSize - memoryOffset, image_state->requirements.size);
9958 // Validate dedicated allocation
9959 if (mem_info->is_dedicated && ((mem_info->dedicated_image != image) || (memoryOffset != 0))) {
9960 // TODO: Add vkBindImageMemory2KHR error message when added to spec.
9961 auto validation_error = VALIDATION_ERROR_UNDEFINED;
9962 if (strcmp(api_name, "vkBindImageMemory()") == 0) {
9963 validation_error = VALIDATION_ERROR_17400bca;
9966 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
9967 image_handle, validation_error,
9968 "%s: for dedicated memory allocation 0x%" PRIxLEAST64
9969 ", VkMemoryDedicatedAllocateInfoKHR::image 0x%" PRIXLEAST64 " must be equal to image 0x%" PRIxLEAST64
9970 " and memoryOffset 0x%" PRIxLEAST64 " must be zero.",
9971 api_name, HandleToUint64(mem), HandleToUint64(mem_info->dedicated_image), image_handle, memoryOffset);
9978 static void PostCallRecordBindImageMemory(layer_data *dev_data, VkImage image, IMAGE_STATE *image_state, VkDeviceMemory mem,
9979 VkDeviceSize memoryOffset, const char *api_name) {
9981 unique_lock_t lock(global_lock);
9982 // Track bound memory range information
9983 auto mem_info = GetMemObjInfo(dev_data, mem);
9985 InsertImageMemoryRange(dev_data, image, mem_info, memoryOffset, image_state->requirements,
9986 image_state->createInfo.tiling == VK_IMAGE_TILING_LINEAR);
9989 // Track objects tied to memory
9990 uint64_t image_handle = HandleToUint64(image);
9991 SetMemBinding(dev_data, mem, image_state, memoryOffset, image_handle, kVulkanObjectTypeImage, api_name);
9995 VKAPI_ATTR VkResult VKAPI_CALL BindImageMemory(VkDevice device, VkImage image, VkDeviceMemory mem, VkDeviceSize memoryOffset) {
9996 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
9997 VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
9998 IMAGE_STATE *image_state;
10000 unique_lock_t lock(global_lock);
10001 image_state = GetImageState(dev_data, image);
10003 bool skip = PreCallValidateBindImageMemory(dev_data, image, image_state, mem, memoryOffset, "vkBindImageMemory()");
10005 result = dev_data->dispatch_table.BindImageMemory(device, image, mem, memoryOffset);
10006 if (result == VK_SUCCESS) {
10007 PostCallRecordBindImageMemory(dev_data, image, image_state, mem, memoryOffset, "vkBindImageMemory()");
10013 static bool PreCallValidateBindImageMemory2(layer_data *dev_data, std::vector<IMAGE_STATE *> *image_state, uint32_t bindInfoCount,
10014 const VkBindImageMemoryInfoKHR *pBindInfos) {
10016 unique_lock_t lock(global_lock);
10017 for (uint32_t i = 0; i < bindInfoCount; i++) {
10018 (*image_state)[i] = GetImageState(dev_data, pBindInfos[i].image);
10022 char api_name[128];
10023 for (uint32_t i = 0; i < bindInfoCount; i++) {
10024 sprintf(api_name, "vkBindImageMemory2() pBindInfos[%u]", i);
10025 skip |= PreCallValidateBindImageMemory(dev_data, pBindInfos[i].image, (*image_state)[i], pBindInfos[i].memory,
10026 pBindInfos[i].memoryOffset, api_name);
10031 static void PostCallRecordBindImageMemory2(layer_data *dev_data, const std::vector<IMAGE_STATE *> &image_state,
10032 uint32_t bindInfoCount, const VkBindImageMemoryInfoKHR *pBindInfos) {
10033 for (uint32_t i = 0; i < bindInfoCount; i++) {
10034 PostCallRecordBindImageMemory(dev_data, pBindInfos[i].image, image_state[i], pBindInfos[i].memory,
10035 pBindInfos[i].memoryOffset, "vkBindImageMemory2()");
10039 VKAPI_ATTR VkResult VKAPI_CALL BindImageMemory2(VkDevice device, uint32_t bindInfoCount,
10040 const VkBindImageMemoryInfoKHR *pBindInfos) {
10041 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
10042 VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
10043 std::vector<IMAGE_STATE *> image_state(bindInfoCount);
10044 if (!PreCallValidateBindImageMemory2(dev_data, &image_state, bindInfoCount, pBindInfos)) {
10045 result = dev_data->dispatch_table.BindImageMemory2(device, bindInfoCount, pBindInfos);
10046 if (result == VK_SUCCESS) {
10047 PostCallRecordBindImageMemory2(dev_data, image_state, bindInfoCount, pBindInfos);
10053 VKAPI_ATTR VkResult VKAPI_CALL BindImageMemory2KHR(VkDevice device, uint32_t bindInfoCount,
10054 const VkBindImageMemoryInfoKHR *pBindInfos) {
10055 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
10056 VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
10057 std::vector<IMAGE_STATE *> image_state(bindInfoCount);
10058 if (!PreCallValidateBindImageMemory2(dev_data, &image_state, bindInfoCount, pBindInfos)) {
10059 result = dev_data->dispatch_table.BindImageMemory2KHR(device, bindInfoCount, pBindInfos);
10060 if (result == VK_SUCCESS) {
10061 PostCallRecordBindImageMemory2(dev_data, image_state, bindInfoCount, pBindInfos);
10067 VKAPI_ATTR VkResult VKAPI_CALL SetEvent(VkDevice device, VkEvent event) {
10069 VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
10070 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
10071 unique_lock_t lock(global_lock);
10072 auto event_state = GetEventNode(dev_data, event);
10074 event_state->needsSignaled = false;
10075 event_state->stageMask = VK_PIPELINE_STAGE_HOST_BIT;
10076 if (event_state->write_in_use) {
10077 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT,
10078 HandleToUint64(event), DRAWSTATE_QUEUE_FORWARD_PROGRESS,
10079 "Cannot call vkSetEvent() on event 0x%" PRIx64 " that is already in use by a command buffer.",
10080 HandleToUint64(event));
10084 // Host setting event is visible to all queues immediately so update stageMask for any queue that's seen this event
10085 // TODO : For correctness this needs separate fix to verify that app doesn't make incorrect assumptions about the
10086 // ordering of this command in relation to vkCmd[Set|Reset]Events (see GH297)
10087 for (auto queue_data : dev_data->queueMap) {
10088 auto event_entry = queue_data.second.eventToStageMap.find(event);
10089 if (event_entry != queue_data.second.eventToStageMap.end()) {
10090 event_entry->second |= VK_PIPELINE_STAGE_HOST_BIT;
10093 if (!skip) result = dev_data->dispatch_table.SetEvent(device, event);
10097 static bool PreCallValidateQueueBindSparse(layer_data *dev_data, VkQueue queue, uint32_t bindInfoCount,
10098 const VkBindSparseInfo *pBindInfo, VkFence fence) {
10099 auto pFence = GetFenceNode(dev_data, fence);
10100 bool skip = ValidateFenceForSubmit(dev_data, pFence);
10105 unordered_set<VkSemaphore> signaled_semaphores;
10106 unordered_set<VkSemaphore> unsignaled_semaphores;
10107 unordered_set<VkSemaphore> internal_semaphores;
10108 for (uint32_t bindIdx = 0; bindIdx < bindInfoCount; ++bindIdx) {
10109 const VkBindSparseInfo &bindInfo = pBindInfo[bindIdx];
10111 std::vector<SEMAPHORE_WAIT> semaphore_waits;
10112 std::vector<VkSemaphore> semaphore_signals;
10113 for (uint32_t i = 0; i < bindInfo.waitSemaphoreCount; ++i) {
10114 VkSemaphore semaphore = bindInfo.pWaitSemaphores[i];
10115 auto pSemaphore = GetSemaphoreNode(dev_data, semaphore);
10116 if (pSemaphore && (pSemaphore->scope == kSyncScopeInternal || internal_semaphores.count(semaphore))) {
10117 if (unsignaled_semaphores.count(semaphore) ||
10118 (!(signaled_semaphores.count(semaphore)) && !(pSemaphore->signaled))) {
10119 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
10120 HandleToUint64(semaphore), DRAWSTATE_QUEUE_FORWARD_PROGRESS,
10121 "Queue 0x%" PRIx64 " is waiting on semaphore 0x%" PRIx64 " that has no way to be signaled.",
10122 HandleToUint64(queue), HandleToUint64(semaphore));
10124 signaled_semaphores.erase(semaphore);
10125 unsignaled_semaphores.insert(semaphore);
10128 if (pSemaphore && pSemaphore->scope == kSyncScopeExternalTemporary) {
10129 internal_semaphores.insert(semaphore);
10132 for (uint32_t i = 0; i < bindInfo.signalSemaphoreCount; ++i) {
10133 VkSemaphore semaphore = bindInfo.pSignalSemaphores[i];
10134 auto pSemaphore = GetSemaphoreNode(dev_data, semaphore);
10135 if (pSemaphore && pSemaphore->scope == kSyncScopeInternal) {
10136 if (signaled_semaphores.count(semaphore) || (!(unsignaled_semaphores.count(semaphore)) && pSemaphore->signaled)) {
10137 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
10138 HandleToUint64(semaphore), DRAWSTATE_QUEUE_FORWARD_PROGRESS,
10139 "Queue 0x%" PRIx64 " is signaling semaphore 0x%" PRIx64
10140 " that has already been signaled but not waited on by queue 0x%" PRIx64 ".",
10141 HandleToUint64(queue), HandleToUint64(semaphore), HandleToUint64(pSemaphore->signaler.first));
10143 unsignaled_semaphores.erase(semaphore);
10144 signaled_semaphores.insert(semaphore);
10148 // Store sparse binding image_state and after binding is complete make sure that any requiring metadata have it bound
10149 std::unordered_set<IMAGE_STATE *> sparse_images;
10150 // If we're binding sparse image memory make sure reqs were queried and note if metadata is required and bound
10151 for (uint32_t i = 0; i < bindInfo.imageBindCount; ++i) {
10152 const auto &image_bind = bindInfo.pImageBinds[i];
10153 auto image_state = GetImageState(dev_data, image_bind.image);
10154 sparse_images.insert(image_state);
10155 if (!image_state->get_sparse_reqs_called || image_state->sparse_requirements.empty()) {
10156 // For now just warning if sparse image binding occurs without calling to get reqs first
10157 return log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
10158 HandleToUint64(image_state->image), MEMTRACK_INVALID_STATE,
10159 "vkQueueBindSparse(): Binding sparse memory to image 0x%" PRIx64
10160 " without first calling vkGetImageSparseMemoryRequirements[2KHR]() to retrieve requirements.",
10161 HandleToUint64(image_state->image));
10163 for (uint32_t j = 0; j < image_bind.bindCount; ++j) {
10164 if (image_bind.pBinds[j].flags & VK_IMAGE_ASPECT_METADATA_BIT) {
10165 image_state->sparse_metadata_bound = true;
10169 for (uint32_t i = 0; i < bindInfo.imageOpaqueBindCount; ++i) {
10170 auto image_state = GetImageState(dev_data, bindInfo.pImageOpaqueBinds[i].image);
10171 sparse_images.insert(image_state);
10172 if (!image_state->get_sparse_reqs_called || image_state->sparse_requirements.empty()) {
10173 // For now just warning if sparse image binding occurs without calling to get reqs first
10174 return log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
10175 HandleToUint64(image_state->image), MEMTRACK_INVALID_STATE,
10176 "vkQueueBindSparse(): Binding opaque sparse memory to image 0x%" PRIx64
10177 " without first calling vkGetImageSparseMemoryRequirements[2KHR]() to retrieve requirements.",
10178 HandleToUint64(image_state->image));
10181 for (const auto &sparse_image_state : sparse_images) {
10182 if (sparse_image_state->sparse_metadata_required && !sparse_image_state->sparse_metadata_bound) {
10183 // Warn if sparse image binding metadata required for image with sparse binding, but metadata not bound
10184 return log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
10185 HandleToUint64(sparse_image_state->image), MEMTRACK_INVALID_STATE,
10186 "vkQueueBindSparse(): Binding sparse memory to image 0x%" PRIx64
10187 " which requires a metadata aspect but no binding with VK_IMAGE_ASPECT_METADATA_BIT set was made.",
10188 HandleToUint64(sparse_image_state->image));
10195 static void PostCallRecordQueueBindSparse(layer_data *dev_data, VkQueue queue, uint32_t bindInfoCount,
10196 const VkBindSparseInfo *pBindInfo, VkFence fence) {
10197 uint64_t early_retire_seq = 0;
10198 auto pFence = GetFenceNode(dev_data, fence);
10199 auto pQueue = GetQueueState(dev_data, queue);
10202 if (pFence->scope == kSyncScopeInternal) {
10203 SubmitFence(pQueue, pFence, std::max(1u, bindInfoCount));
10204 if (!bindInfoCount) {
10205 // No work to do, just dropping a fence in the queue by itself.
10206 pQueue->submissions.emplace_back(std::vector<VkCommandBuffer>(), std::vector<SEMAPHORE_WAIT>(),
10207 std::vector<VkSemaphore>(), std::vector<VkSemaphore>(), fence);
10210 // Retire work up until this fence early, we will not see the wait that corresponds to this signal
10211 early_retire_seq = pQueue->seq + pQueue->submissions.size();
10212 if (!dev_data->external_sync_warning) {
10213 dev_data->external_sync_warning = true;
10214 log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
10215 HandleToUint64(fence), DRAWSTATE_QUEUE_FORWARD_PROGRESS,
10216 "vkQueueBindSparse(): Signaling external fence 0x%" PRIx64 " on queue 0x%" PRIx64
10217 " will disable validation of preceding command buffer lifecycle states and the in-use status of associated "
10219 HandleToUint64(fence), HandleToUint64(queue));
10224 for (uint32_t bindIdx = 0; bindIdx < bindInfoCount; ++bindIdx) {
10225 const VkBindSparseInfo &bindInfo = pBindInfo[bindIdx];
10226 // Track objects tied to memory
10227 for (uint32_t j = 0; j < bindInfo.bufferBindCount; j++) {
10228 for (uint32_t k = 0; k < bindInfo.pBufferBinds[j].bindCount; k++) {
10229 auto sparse_binding = bindInfo.pBufferBinds[j].pBinds[k];
10230 SetSparseMemBinding(dev_data, {sparse_binding.memory, sparse_binding.memoryOffset, sparse_binding.size},
10231 HandleToUint64(bindInfo.pBufferBinds[j].buffer), kVulkanObjectTypeBuffer);
10234 for (uint32_t j = 0; j < bindInfo.imageOpaqueBindCount; j++) {
10235 for (uint32_t k = 0; k < bindInfo.pImageOpaqueBinds[j].bindCount; k++) {
10236 auto sparse_binding = bindInfo.pImageOpaqueBinds[j].pBinds[k];
10237 SetSparseMemBinding(dev_data, {sparse_binding.memory, sparse_binding.memoryOffset, sparse_binding.size},
10238 HandleToUint64(bindInfo.pImageOpaqueBinds[j].image), kVulkanObjectTypeImage);
10241 for (uint32_t j = 0; j < bindInfo.imageBindCount; j++) {
10242 for (uint32_t k = 0; k < bindInfo.pImageBinds[j].bindCount; k++) {
10243 auto sparse_binding = bindInfo.pImageBinds[j].pBinds[k];
10244 // TODO: This size is broken for non-opaque bindings, need to update to comprehend full sparse binding data
10245 VkDeviceSize size = sparse_binding.extent.depth * sparse_binding.extent.height * sparse_binding.extent.width * 4;
10246 SetSparseMemBinding(dev_data, {sparse_binding.memory, sparse_binding.memoryOffset, size},
10247 HandleToUint64(bindInfo.pImageBinds[j].image), kVulkanObjectTypeImage);
10251 std::vector<SEMAPHORE_WAIT> semaphore_waits;
10252 std::vector<VkSemaphore> semaphore_signals;
10253 std::vector<VkSemaphore> semaphore_externals;
10254 for (uint32_t i = 0; i < bindInfo.waitSemaphoreCount; ++i) {
10255 VkSemaphore semaphore = bindInfo.pWaitSemaphores[i];
10256 auto pSemaphore = GetSemaphoreNode(dev_data, semaphore);
10258 if (pSemaphore->scope == kSyncScopeInternal) {
10259 if (pSemaphore->signaler.first != VK_NULL_HANDLE) {
10260 semaphore_waits.push_back({semaphore, pSemaphore->signaler.first, pSemaphore->signaler.second});
10261 pSemaphore->in_use.fetch_add(1);
10263 pSemaphore->signaler.first = VK_NULL_HANDLE;
10264 pSemaphore->signaled = false;
10266 semaphore_externals.push_back(semaphore);
10267 pSemaphore->in_use.fetch_add(1);
10268 if (pSemaphore->scope == kSyncScopeExternalTemporary) {
10269 pSemaphore->scope = kSyncScopeInternal;
10274 for (uint32_t i = 0; i < bindInfo.signalSemaphoreCount; ++i) {
10275 VkSemaphore semaphore = bindInfo.pSignalSemaphores[i];
10276 auto pSemaphore = GetSemaphoreNode(dev_data, semaphore);
10278 if (pSemaphore->scope == kSyncScopeInternal) {
10279 pSemaphore->signaler.first = queue;
10280 pSemaphore->signaler.second = pQueue->seq + pQueue->submissions.size() + 1;
10281 pSemaphore->signaled = true;
10282 pSemaphore->in_use.fetch_add(1);
10283 semaphore_signals.push_back(semaphore);
10285 // Retire work up until this submit early, we will not see the wait that corresponds to this signal
10286 early_retire_seq = std::max(early_retire_seq, pQueue->seq + pQueue->submissions.size() + 1);
10287 if (!dev_data->external_sync_warning) {
10288 dev_data->external_sync_warning = true;
10289 log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
10290 HandleToUint64(semaphore), DRAWSTATE_QUEUE_FORWARD_PROGRESS,
10291 "vkQueueBindSparse(): Signaling external semaphore 0x%" PRIx64 " on queue 0x%" PRIx64
10292 " will disable validation of preceding command buffer lifecycle states and the in-use status of "
10293 "associated objects.",
10294 HandleToUint64(semaphore), HandleToUint64(queue));
10300 pQueue->submissions.emplace_back(std::vector<VkCommandBuffer>(), semaphore_waits, semaphore_signals, semaphore_externals,
10301 bindIdx == bindInfoCount - 1 ? fence : VK_NULL_HANDLE);
10304 if (early_retire_seq) {
10305 RetireWorkOnQueue(dev_data, pQueue, early_retire_seq);
10309 VKAPI_ATTR VkResult VKAPI_CALL QueueBindSparse(VkQueue queue, uint32_t bindInfoCount, const VkBindSparseInfo *pBindInfo,
10311 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(queue), layer_data_map);
10312 unique_lock_t lock(global_lock);
10313 bool skip = PreCallValidateQueueBindSparse(dev_data, queue, bindInfoCount, pBindInfo, fence);
10316 if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
10318 VkResult result = dev_data->dispatch_table.QueueBindSparse(queue, bindInfoCount, pBindInfo, fence);
10321 PostCallRecordQueueBindSparse(dev_data, queue, bindInfoCount, pBindInfo, fence);
10326 VKAPI_ATTR VkResult VKAPI_CALL CreateSemaphore(VkDevice device, const VkSemaphoreCreateInfo *pCreateInfo,
10327 const VkAllocationCallbacks *pAllocator, VkSemaphore *pSemaphore) {
10328 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
10329 VkResult result = dev_data->dispatch_table.CreateSemaphore(device, pCreateInfo, pAllocator, pSemaphore);
10330 if (result == VK_SUCCESS) {
10331 lock_guard_t lock(global_lock);
10332 SEMAPHORE_NODE *sNode = &dev_data->semaphoreMap[*pSemaphore];
10333 sNode->signaler.first = VK_NULL_HANDLE;
10334 sNode->signaler.second = 0;
10335 sNode->signaled = false;
10336 sNode->scope = kSyncScopeInternal;
10341 static bool PreCallValidateImportSemaphore(layer_data *dev_data, VkSemaphore semaphore, const char *caller_name) {
10342 SEMAPHORE_NODE *sema_node = GetSemaphoreNode(dev_data, semaphore);
10343 VK_OBJECT obj_struct = {HandleToUint64(semaphore), kVulkanObjectTypeSemaphore};
10346 skip |= ValidateObjectNotInUse(dev_data, sema_node, obj_struct, caller_name, VALIDATION_ERROR_UNDEFINED);
10351 static void PostCallRecordImportSemaphore(layer_data *dev_data, VkSemaphore semaphore,
10352 VkExternalSemaphoreHandleTypeFlagBitsKHR handle_type, VkSemaphoreImportFlagsKHR flags) {
10353 SEMAPHORE_NODE *sema_node = GetSemaphoreNode(dev_data, semaphore);
10354 if (sema_node && sema_node->scope != kSyncScopeExternalPermanent) {
10355 if ((handle_type == VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT_KHR || flags & VK_SEMAPHORE_IMPORT_TEMPORARY_BIT_KHR) &&
10356 sema_node->scope == kSyncScopeInternal) {
10357 sema_node->scope = kSyncScopeExternalTemporary;
10359 sema_node->scope = kSyncScopeExternalPermanent;
10364 #ifdef VK_USE_PLATFORM_WIN32_KHR
10365 VKAPI_ATTR VkResult VKAPI_CALL
10366 ImportSemaphoreWin32HandleKHR(VkDevice device, const VkImportSemaphoreWin32HandleInfoKHR *pImportSemaphoreWin32HandleInfo) {
10367 VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
10368 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
10370 PreCallValidateImportSemaphore(dev_data, pImportSemaphoreWin32HandleInfo->semaphore, "vkImportSemaphoreWin32HandleKHR");
10373 result = dev_data->dispatch_table.ImportSemaphoreWin32HandleKHR(device, pImportSemaphoreWin32HandleInfo);
10376 if (result == VK_SUCCESS) {
10377 PostCallRecordImportSemaphore(dev_data, pImportSemaphoreWin32HandleInfo->semaphore,
10378 pImportSemaphoreWin32HandleInfo->handleType, pImportSemaphoreWin32HandleInfo->flags);
10384 VKAPI_ATTR VkResult VKAPI_CALL ImportSemaphoreFdKHR(VkDevice device, const VkImportSemaphoreFdInfoKHR *pImportSemaphoreFdInfo) {
10385 VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
10386 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
10387 bool skip = PreCallValidateImportSemaphore(dev_data, pImportSemaphoreFdInfo->semaphore, "vkImportSemaphoreFdKHR");
10390 result = dev_data->dispatch_table.ImportSemaphoreFdKHR(device, pImportSemaphoreFdInfo);
10393 if (result == VK_SUCCESS) {
10394 PostCallRecordImportSemaphore(dev_data, pImportSemaphoreFdInfo->semaphore, pImportSemaphoreFdInfo->handleType,
10395 pImportSemaphoreFdInfo->flags);
10400 static void PostCallRecordGetSemaphore(layer_data *dev_data, VkSemaphore semaphore,
10401 VkExternalSemaphoreHandleTypeFlagBitsKHR handle_type) {
10402 SEMAPHORE_NODE *sema_node = GetSemaphoreNode(dev_data, semaphore);
10403 if (sema_node && handle_type != VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT_KHR) {
10404 // Cannot track semaphore state once it is exported, except for Sync FD handle types which have copy transference
10405 sema_node->scope = kSyncScopeExternalPermanent;
10409 #ifdef VK_USE_PLATFORM_WIN32_KHR
10410 VKAPI_ATTR VkResult VKAPI_CALL GetSemaphoreWin32HandleKHR(VkDevice device,
10411 const VkSemaphoreGetWin32HandleInfoKHR *pGetWin32HandleInfo,
10413 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
10414 VkResult result = dev_data->dispatch_table.GetSemaphoreWin32HandleKHR(device, pGetWin32HandleInfo, pHandle);
10416 if (result == VK_SUCCESS) {
10417 PostCallRecordGetSemaphore(dev_data, pGetWin32HandleInfo->semaphore, pGetWin32HandleInfo->handleType);
10423 VKAPI_ATTR VkResult VKAPI_CALL GetSemaphoreFdKHR(VkDevice device, const VkSemaphoreGetFdInfoKHR *pGetFdInfo, int *pFd) {
10424 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
10425 VkResult result = dev_data->dispatch_table.GetSemaphoreFdKHR(device, pGetFdInfo, pFd);
10427 if (result == VK_SUCCESS) {
10428 PostCallRecordGetSemaphore(dev_data, pGetFdInfo->semaphore, pGetFdInfo->handleType);
10433 static bool PreCallValidateImportFence(layer_data *dev_data, VkFence fence, const char *caller_name) {
10434 FENCE_NODE *fence_node = GetFenceNode(dev_data, fence);
10436 if (fence_node && fence_node->scope == kSyncScopeInternal && fence_node->state == FENCE_INFLIGHT) {
10437 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
10438 HandleToUint64(fence), VALIDATION_ERROR_UNDEFINED,
10439 "Cannot call %s on fence 0x%" PRIx64 " that is currently in use.", caller_name, HandleToUint64(fence));
10444 static void PostCallRecordImportFence(layer_data *dev_data, VkFence fence, VkExternalFenceHandleTypeFlagBitsKHR handle_type,
10445 VkFenceImportFlagsKHR flags) {
10446 FENCE_NODE *fence_node = GetFenceNode(dev_data, fence);
10447 if (fence_node && fence_node->scope != kSyncScopeExternalPermanent) {
10448 if ((handle_type == VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT_KHR || flags & VK_FENCE_IMPORT_TEMPORARY_BIT_KHR) &&
10449 fence_node->scope == kSyncScopeInternal) {
10450 fence_node->scope = kSyncScopeExternalTemporary;
10452 fence_node->scope = kSyncScopeExternalPermanent;
10457 #ifdef VK_USE_PLATFORM_WIN32_KHR
10458 VKAPI_ATTR VkResult VKAPI_CALL ImportFenceWin32HandleKHR(VkDevice device,
10459 const VkImportFenceWin32HandleInfoKHR *pImportFenceWin32HandleInfo) {
10460 VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
10461 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
10462 bool skip = PreCallValidateImportFence(dev_data, pImportFenceWin32HandleInfo->fence, "vkImportFenceWin32HandleKHR");
10465 result = dev_data->dispatch_table.ImportFenceWin32HandleKHR(device, pImportFenceWin32HandleInfo);
10468 if (result == VK_SUCCESS) {
10469 PostCallRecordImportFence(dev_data, pImportFenceWin32HandleInfo->fence, pImportFenceWin32HandleInfo->handleType,
10470 pImportFenceWin32HandleInfo->flags);
10476 VKAPI_ATTR VkResult VKAPI_CALL ImportFenceFdKHR(VkDevice device, const VkImportFenceFdInfoKHR *pImportFenceFdInfo) {
10477 VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
10478 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
10479 bool skip = PreCallValidateImportFence(dev_data, pImportFenceFdInfo->fence, "vkImportFenceFdKHR");
10482 result = dev_data->dispatch_table.ImportFenceFdKHR(device, pImportFenceFdInfo);
10485 if (result == VK_SUCCESS) {
10486 PostCallRecordImportFence(dev_data, pImportFenceFdInfo->fence, pImportFenceFdInfo->handleType, pImportFenceFdInfo->flags);
10491 static void PostCallRecordGetFence(layer_data *dev_data, VkFence fence, VkExternalFenceHandleTypeFlagBitsKHR handle_type) {
10492 FENCE_NODE *fence_node = GetFenceNode(dev_data, fence);
10494 if (handle_type != VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT_KHR) {
10495 // Export with reference transference becomes external
10496 fence_node->scope = kSyncScopeExternalPermanent;
10497 } else if (fence_node->scope == kSyncScopeInternal) {
10498 // Export with copy transference has a side effect of resetting the fence
10499 fence_node->state = FENCE_UNSIGNALED;
10504 #ifdef VK_USE_PLATFORM_WIN32_KHR
10505 VKAPI_ATTR VkResult VKAPI_CALL GetFenceWin32HandleKHR(VkDevice device, const VkFenceGetWin32HandleInfoKHR *pGetWin32HandleInfo,
10507 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
10508 VkResult result = dev_data->dispatch_table.GetFenceWin32HandleKHR(device, pGetWin32HandleInfo, pHandle);
10510 if (result == VK_SUCCESS) {
10511 PostCallRecordGetFence(dev_data, pGetWin32HandleInfo->fence, pGetWin32HandleInfo->handleType);
10517 VKAPI_ATTR VkResult VKAPI_CALL GetFenceFdKHR(VkDevice device, const VkFenceGetFdInfoKHR *pGetFdInfo, int *pFd) {
10518 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
10519 VkResult result = dev_data->dispatch_table.GetFenceFdKHR(device, pGetFdInfo, pFd);
10521 if (result == VK_SUCCESS) {
10522 PostCallRecordGetFence(dev_data, pGetFdInfo->fence, pGetFdInfo->handleType);
10527 VKAPI_ATTR VkResult VKAPI_CALL CreateEvent(VkDevice device, const VkEventCreateInfo *pCreateInfo,
10528 const VkAllocationCallbacks *pAllocator, VkEvent *pEvent) {
10529 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
10530 VkResult result = dev_data->dispatch_table.CreateEvent(device, pCreateInfo, pAllocator, pEvent);
10531 if (result == VK_SUCCESS) {
10532 lock_guard_t lock(global_lock);
10533 dev_data->eventMap[*pEvent].needsSignaled = false;
10534 dev_data->eventMap[*pEvent].write_in_use = 0;
10535 dev_data->eventMap[*pEvent].stageMask = VkPipelineStageFlags(0);
10540 static bool PreCallValidateCreateSwapchainKHR(layer_data *dev_data, const char *func_name,
10541 VkSwapchainCreateInfoKHR const *pCreateInfo, SURFACE_STATE *surface_state,
10542 SWAPCHAIN_NODE *old_swapchain_state) {
10543 auto most_recent_swapchain = surface_state->swapchain ? surface_state->swapchain : surface_state->old_swapchain;
10545 // TODO: revisit this. some of these rules are being relaxed.
10547 // All physical devices and queue families are required to be able
10548 // to present to any native window on Android; require the
10549 // application to have established support on any other platform.
10550 if (!dev_data->instance_data->extensions.vk_khr_android_surface) {
10551 auto support_predicate = [dev_data](decltype(surface_state->gpu_queue_support)::value_type qs) -> bool {
10552 // TODO: should restrict search only to queue families of VkDeviceQueueCreateInfos, not whole phys. device
10553 return (qs.first.gpu == dev_data->physical_device) && qs.second;
10555 const auto &support = surface_state->gpu_queue_support;
10556 bool is_supported = std::any_of(support.begin(), support.end(), support_predicate);
10558 if (!is_supported) {
10559 if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
10560 HandleToUint64(dev_data->device), VALIDATION_ERROR_146009ec,
10561 "%s: pCreateInfo->surface is not known at this time to be supported for presentation by this device. The "
10562 "vkGetPhysicalDeviceSurfaceSupportKHR() must be called beforehand, and it must return VK_TRUE support with "
10563 "this surface for at least one queue family of this device.",
10569 if (most_recent_swapchain != old_swapchain_state || (surface_state->old_swapchain && surface_state->swapchain)) {
10570 if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
10571 HandleToUint64(dev_data->device), DRAWSTATE_SWAPCHAIN_ALREADY_EXISTS,
10572 "%s: surface has an existing swapchain other than oldSwapchain", func_name))
10575 if (old_swapchain_state && old_swapchain_state->createInfo.surface != pCreateInfo->surface) {
10576 if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
10577 HandleToUint64(pCreateInfo->oldSwapchain), DRAWSTATE_SWAPCHAIN_WRONG_SURFACE,
10578 "%s: pCreateInfo->oldSwapchain's surface is not pCreateInfo->surface", func_name))
10582 if ((pCreateInfo->imageExtent.width == 0) || (pCreateInfo->imageExtent.height == 0)) {
10583 if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
10584 HandleToUint64(dev_data->device), VALIDATION_ERROR_14600d32,
10585 "%s: pCreateInfo->imageExtent = (%d, %d) which is illegal.", func_name, pCreateInfo->imageExtent.width,
10586 pCreateInfo->imageExtent.height))
10590 auto physical_device_state = GetPhysicalDeviceState(dev_data->instance_data, dev_data->physical_device);
10591 if (physical_device_state->vkGetPhysicalDeviceSurfaceCapabilitiesKHRState == UNCALLED) {
10592 if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
10593 HandleToUint64(dev_data->physical_device), DRAWSTATE_SWAPCHAIN_CREATE_BEFORE_QUERY,
10594 "%s: surface capabilities not retrieved for this physical device", func_name))
10596 } else { // have valid capabilities
10597 auto &capabilities = physical_device_state->surfaceCapabilities;
10598 // Validate pCreateInfo->minImageCount against VkSurfaceCapabilitiesKHR::{min|max}ImageCount:
10599 if (pCreateInfo->minImageCount < capabilities.minImageCount) {
10600 if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
10601 HandleToUint64(dev_data->device), VALIDATION_ERROR_146009ee,
10602 "%s called with minImageCount = %d, which is outside the bounds returned by "
10603 "vkGetPhysicalDeviceSurfaceCapabilitiesKHR() (i.e. minImageCount = %d, maxImageCount = %d).",
10604 func_name, pCreateInfo->minImageCount, capabilities.minImageCount, capabilities.maxImageCount))
10608 if ((capabilities.maxImageCount > 0) && (pCreateInfo->minImageCount > capabilities.maxImageCount)) {
10609 if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
10610 HandleToUint64(dev_data->device), VALIDATION_ERROR_146009f0,
10611 "%s called with minImageCount = %d, which is outside the bounds returned by "
10612 "vkGetPhysicalDeviceSurfaceCapabilitiesKHR() (i.e. minImageCount = %d, maxImageCount = %d).",
10613 func_name, pCreateInfo->minImageCount, capabilities.minImageCount, capabilities.maxImageCount))
10617 // Validate pCreateInfo->imageExtent against VkSurfaceCapabilitiesKHR::{current|min|max}ImageExtent:
10618 if ((pCreateInfo->imageExtent.width < capabilities.minImageExtent.width) ||
10619 (pCreateInfo->imageExtent.width > capabilities.maxImageExtent.width) ||
10620 (pCreateInfo->imageExtent.height < capabilities.minImageExtent.height) ||
10621 (pCreateInfo->imageExtent.height > capabilities.maxImageExtent.height)) {
10622 if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
10623 HandleToUint64(dev_data->device), VALIDATION_ERROR_146009f4,
10624 "%s called with imageExtent = (%d,%d), which is outside the bounds returned by "
10625 "vkGetPhysicalDeviceSurfaceCapabilitiesKHR(): currentExtent = (%d,%d), minImageExtent = (%d,%d), "
10626 "maxImageExtent = (%d,%d).",
10627 func_name, pCreateInfo->imageExtent.width, pCreateInfo->imageExtent.height,
10628 capabilities.currentExtent.width, capabilities.currentExtent.height, capabilities.minImageExtent.width,
10629 capabilities.minImageExtent.height, capabilities.maxImageExtent.width, capabilities.maxImageExtent.height))
10632 // pCreateInfo->preTransform should have exactly one bit set, and that bit must also be set in
10633 // VkSurfaceCapabilitiesKHR::supportedTransforms.
10634 if (!pCreateInfo->preTransform || (pCreateInfo->preTransform & (pCreateInfo->preTransform - 1)) ||
10635 !(pCreateInfo->preTransform & capabilities.supportedTransforms)) {
10636 // This is an error situation; one for which we'd like to give the developer a helpful, multi-line error message. Build
10637 // it up a little at a time, and then log it:
10638 std::string errorString = "";
10640 // Here's the first part of the message:
10641 sprintf(str, "%s called with a non-supported pCreateInfo->preTransform (i.e. %s). Supported values are:\n", func_name,
10642 string_VkSurfaceTransformFlagBitsKHR(pCreateInfo->preTransform));
10643 errorString += str;
10644 for (int i = 0; i < 32; i++) {
10645 // Build up the rest of the message:
10646 if ((1 << i) & capabilities.supportedTransforms) {
10647 const char *newStr = string_VkSurfaceTransformFlagBitsKHR((VkSurfaceTransformFlagBitsKHR)(1 << i));
10648 sprintf(str, " %s\n", newStr);
10649 errorString += str;
10652 // Log the message that we've built up:
10653 if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
10654 HandleToUint64(dev_data->device), VALIDATION_ERROR_146009fe, "%s.", errorString.c_str()))
10658 // pCreateInfo->compositeAlpha should have exactly one bit set, and that bit must also be set in
10659 // VkSurfaceCapabilitiesKHR::supportedCompositeAlpha
10660 if (!pCreateInfo->compositeAlpha || (pCreateInfo->compositeAlpha & (pCreateInfo->compositeAlpha - 1)) ||
10661 !((pCreateInfo->compositeAlpha) & capabilities.supportedCompositeAlpha)) {
10662 // This is an error situation; one for which we'd like to give the developer a helpful, multi-line error message. Build
10663 // it up a little at a time, and then log it:
10664 std::string errorString = "";
10666 // Here's the first part of the message:
10667 sprintf(str, "%s called with a non-supported pCreateInfo->compositeAlpha (i.e. %s). Supported values are:\n",
10668 func_name, string_VkCompositeAlphaFlagBitsKHR(pCreateInfo->compositeAlpha));
10669 errorString += str;
10670 for (int i = 0; i < 32; i++) {
10671 // Build up the rest of the message:
10672 if ((1 << i) & capabilities.supportedCompositeAlpha) {
10673 const char *newStr = string_VkCompositeAlphaFlagBitsKHR((VkCompositeAlphaFlagBitsKHR)(1 << i));
10674 sprintf(str, " %s\n", newStr);
10675 errorString += str;
10678 // Log the message that we've built up:
10679 if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
10680 HandleToUint64(dev_data->device), VALIDATION_ERROR_14600a00, "%s.", errorString.c_str()))
10683 // Validate pCreateInfo->imageArrayLayers against VkSurfaceCapabilitiesKHR::maxImageArrayLayers:
10684 if (pCreateInfo->imageArrayLayers > capabilities.maxImageArrayLayers) {
10685 if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
10686 HandleToUint64(dev_data->device), VALIDATION_ERROR_146009f6,
10687 "%s called with a non-supported imageArrayLayers (i.e. %d). Maximum value is %d.", func_name,
10688 pCreateInfo->imageArrayLayers, capabilities.maxImageArrayLayers))
10691 // Validate pCreateInfo->imageUsage against VkSurfaceCapabilitiesKHR::supportedUsageFlags:
10692 if (pCreateInfo->imageUsage != (pCreateInfo->imageUsage & capabilities.supportedUsageFlags)) {
10693 if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
10694 HandleToUint64(dev_data->device), VALIDATION_ERROR_146009f8,
10695 "%s called with a non-supported pCreateInfo->imageUsage (i.e. 0x%08x). Supported flag bits are 0x%08x.",
10696 func_name, pCreateInfo->imageUsage, capabilities.supportedUsageFlags))
10701 // Validate pCreateInfo values with the results of vkGetPhysicalDeviceSurfaceFormatsKHR():
10702 if (physical_device_state->vkGetPhysicalDeviceSurfaceFormatsKHRState != QUERY_DETAILS) {
10703 if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
10704 HandleToUint64(dev_data->device), DRAWSTATE_SWAPCHAIN_CREATE_BEFORE_QUERY,
10705 "%s called before calling vkGetPhysicalDeviceSurfaceFormatsKHR().", func_name))
10708 // Validate pCreateInfo->imageFormat against VkSurfaceFormatKHR::format:
10709 bool foundFormat = false;
10710 bool foundColorSpace = false;
10711 bool foundMatch = false;
10712 for (auto const &format : physical_device_state->surface_formats) {
10713 if (pCreateInfo->imageFormat == format.format) {
10714 // Validate pCreateInfo->imageColorSpace against VkSurfaceFormatKHR::colorSpace:
10715 foundFormat = true;
10716 if (pCreateInfo->imageColorSpace == format.colorSpace) {
10721 if (pCreateInfo->imageColorSpace == format.colorSpace) {
10722 foundColorSpace = true;
10727 if (!foundFormat) {
10728 if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
10729 HandleToUint64(dev_data->device), VALIDATION_ERROR_146009f2,
10730 "%s called with a non-supported pCreateInfo->imageFormat (i.e. %d).", func_name,
10731 pCreateInfo->imageFormat))
10734 if (!foundColorSpace) {
10735 if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
10736 HandleToUint64(dev_data->device), VALIDATION_ERROR_146009f2,
10737 "%s called with a non-supported pCreateInfo->imageColorSpace (i.e. %d).", func_name,
10738 pCreateInfo->imageColorSpace))
10744 // Validate pCreateInfo values with the results of vkGetPhysicalDeviceSurfacePresentModesKHR():
10745 if (physical_device_state->vkGetPhysicalDeviceSurfacePresentModesKHRState != QUERY_DETAILS) {
10746 // FIFO is required to always be supported
10747 if (pCreateInfo->presentMode != VK_PRESENT_MODE_FIFO_KHR) {
10748 if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
10749 HandleToUint64(dev_data->device), DRAWSTATE_SWAPCHAIN_CREATE_BEFORE_QUERY,
10750 "%s called before calling vkGetPhysicalDeviceSurfacePresentModesKHR().", func_name))
10754 // Validate pCreateInfo->presentMode against vkGetPhysicalDeviceSurfacePresentModesKHR():
10755 bool foundMatch = std::find(physical_device_state->present_modes.begin(), physical_device_state->present_modes.end(),
10756 pCreateInfo->presentMode) != physical_device_state->present_modes.end();
10758 if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
10759 HandleToUint64(dev_data->device), VALIDATION_ERROR_14600a02,
10760 "%s called with a non-supported presentMode (i.e. %s).", func_name,
10761 string_VkPresentModeKHR(pCreateInfo->presentMode)))
10765 // Validate state for shared presentable case
10766 if (VK_PRESENT_MODE_SHARED_DEMAND_REFRESH_KHR == pCreateInfo->presentMode ||
10767 VK_PRESENT_MODE_SHARED_CONTINUOUS_REFRESH_KHR == pCreateInfo->presentMode) {
10768 if (!dev_data->extensions.vk_khr_shared_presentable_image) {
10769 if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
10770 HandleToUint64(dev_data->device), DRAWSTATE_EXTENSION_NOT_ENABLED,
10771 "%s called with presentMode %s which requires the VK_KHR_shared_presentable_image extension, which has not "
10773 func_name, string_VkPresentModeKHR(pCreateInfo->presentMode)))
10775 } else if (pCreateInfo->minImageCount != 1) {
10776 if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
10777 HandleToUint64(dev_data->device), VALIDATION_ERROR_14600ace,
10778 "%s called with presentMode %s, but minImageCount value is %d. For shared presentable image, minImageCount "
10780 func_name, string_VkPresentModeKHR(pCreateInfo->presentMode), pCreateInfo->minImageCount))
10788 static void PostCallRecordCreateSwapchainKHR(layer_data *dev_data, VkResult result, const VkSwapchainCreateInfoKHR *pCreateInfo,
10789 VkSwapchainKHR *pSwapchain, SURFACE_STATE *surface_state,
10790 SWAPCHAIN_NODE *old_swapchain_state) {
10791 if (VK_SUCCESS == result) {
10792 lock_guard_t lock(global_lock);
10793 auto swapchain_state = unique_ptr<SWAPCHAIN_NODE>(new SWAPCHAIN_NODE(pCreateInfo, *pSwapchain));
10794 if (VK_PRESENT_MODE_SHARED_DEMAND_REFRESH_KHR == pCreateInfo->presentMode ||
10795 VK_PRESENT_MODE_SHARED_CONTINUOUS_REFRESH_KHR == pCreateInfo->presentMode) {
10796 swapchain_state->shared_presentable = true;
10798 surface_state->swapchain = swapchain_state.get();
10799 dev_data->swapchainMap[*pSwapchain] = std::move(swapchain_state);
10801 surface_state->swapchain = nullptr;
10803 // Spec requires that even if CreateSwapchainKHR fails, oldSwapchain behaves as replaced.
10804 if (old_swapchain_state) {
10805 old_swapchain_state->replaced = true;
10807 surface_state->old_swapchain = old_swapchain_state;
10811 VKAPI_ATTR VkResult VKAPI_CALL CreateSwapchainKHR(VkDevice device, const VkSwapchainCreateInfoKHR *pCreateInfo,
10812 const VkAllocationCallbacks *pAllocator, VkSwapchainKHR *pSwapchain) {
10813 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
10814 auto surface_state = GetSurfaceState(dev_data->instance_data, pCreateInfo->surface);
10815 auto old_swapchain_state = GetSwapchainNode(dev_data, pCreateInfo->oldSwapchain);
10817 if (PreCallValidateCreateSwapchainKHR(dev_data, "vkCreateSwapChainKHR()", pCreateInfo, surface_state, old_swapchain_state)) {
10818 return VK_ERROR_VALIDATION_FAILED_EXT;
10821 VkResult result = dev_data->dispatch_table.CreateSwapchainKHR(device, pCreateInfo, pAllocator, pSwapchain);
10823 PostCallRecordCreateSwapchainKHR(dev_data, result, pCreateInfo, pSwapchain, surface_state, old_swapchain_state);
10828 VKAPI_ATTR void VKAPI_CALL DestroySwapchainKHR(VkDevice device, VkSwapchainKHR swapchain, const VkAllocationCallbacks *pAllocator) {
10829 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
10832 unique_lock_t lock(global_lock);
10833 auto swapchain_data = GetSwapchainNode(dev_data, swapchain);
10834 if (swapchain_data) {
10835 if (swapchain_data->images.size() > 0) {
10836 for (auto swapchain_image : swapchain_data->images) {
10837 auto image_sub = dev_data->imageSubresourceMap.find(swapchain_image);
10838 if (image_sub != dev_data->imageSubresourceMap.end()) {
10839 for (auto imgsubpair : image_sub->second) {
10840 auto image_item = dev_data->imageLayoutMap.find(imgsubpair);
10841 if (image_item != dev_data->imageLayoutMap.end()) {
10842 dev_data->imageLayoutMap.erase(image_item);
10845 dev_data->imageSubresourceMap.erase(image_sub);
10847 skip = ClearMemoryObjectBindings(dev_data, HandleToUint64(swapchain_image), kVulkanObjectTypeSwapchainKHR);
10848 dev_data->imageMap.erase(swapchain_image);
10852 auto surface_state = GetSurfaceState(dev_data->instance_data, swapchain_data->createInfo.surface);
10853 if (surface_state) {
10854 if (surface_state->swapchain == swapchain_data) surface_state->swapchain = nullptr;
10855 if (surface_state->old_swapchain == swapchain_data) surface_state->old_swapchain = nullptr;
10858 dev_data->swapchainMap.erase(swapchain);
10861 if (!skip) dev_data->dispatch_table.DestroySwapchainKHR(device, swapchain, pAllocator);
10864 static bool PreCallValidateGetSwapchainImagesKHR(layer_data *device_data, SWAPCHAIN_NODE *swapchain_state, VkDevice device,
10865 uint32_t *pSwapchainImageCount, VkImage *pSwapchainImages) {
10867 if (swapchain_state && pSwapchainImages) {
10868 lock_guard_t lock(global_lock);
10869 // Compare the preliminary value of *pSwapchainImageCount with the value this time:
10870 if (swapchain_state->vkGetSwapchainImagesKHRState == UNCALLED) {
10871 skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
10872 HandleToUint64(device), SWAPCHAIN_PRIOR_COUNT,
10873 "vkGetSwapchainImagesKHR() called with non-NULL pSwapchainImageCount; but no prior positive value has "
10874 "been seen for pSwapchainImages.");
10875 } else if (*pSwapchainImageCount > swapchain_state->get_swapchain_image_count) {
10877 log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
10878 HandleToUint64(device), SWAPCHAIN_INVALID_COUNT,
10879 "vkGetSwapchainImagesKHR() called with non-NULL pSwapchainImageCount, and with pSwapchainImages set to a "
10880 "value (%d) that is greater than the value (%d) that was returned when pSwapchainImageCount was NULL.",
10881 *pSwapchainImageCount, swapchain_state->get_swapchain_image_count);
10887 static void PostCallRecordGetSwapchainImagesKHR(layer_data *device_data, SWAPCHAIN_NODE *swapchain_state, VkDevice device,
10888 uint32_t *pSwapchainImageCount, VkImage *pSwapchainImages) {
10889 lock_guard_t lock(global_lock);
10891 if (*pSwapchainImageCount > swapchain_state->images.size()) swapchain_state->images.resize(*pSwapchainImageCount);
10893 if (pSwapchainImages) {
10894 if (swapchain_state->vkGetSwapchainImagesKHRState < QUERY_DETAILS) {
10895 swapchain_state->vkGetSwapchainImagesKHRState = QUERY_DETAILS;
10897 for (uint32_t i = 0; i < *pSwapchainImageCount; ++i) {
10898 if (swapchain_state->images[i] != VK_NULL_HANDLE) continue; // Already retrieved this.
10900 IMAGE_LAYOUT_NODE image_layout_node;
10901 image_layout_node.layout = VK_IMAGE_LAYOUT_UNDEFINED;
10902 image_layout_node.format = swapchain_state->createInfo.imageFormat;
10903 // Add imageMap entries for each swapchain image
10904 VkImageCreateInfo image_ci = {};
10905 image_ci.flags = 0;
10906 image_ci.imageType = VK_IMAGE_TYPE_2D;
10907 image_ci.format = swapchain_state->createInfo.imageFormat;
10908 image_ci.extent.width = swapchain_state->createInfo.imageExtent.width;
10909 image_ci.extent.height = swapchain_state->createInfo.imageExtent.height;
10910 image_ci.extent.depth = 1;
10911 image_ci.mipLevels = 1;
10912 image_ci.arrayLayers = swapchain_state->createInfo.imageArrayLayers;
10913 image_ci.samples = VK_SAMPLE_COUNT_1_BIT;
10914 image_ci.tiling = VK_IMAGE_TILING_OPTIMAL;
10915 image_ci.usage = swapchain_state->createInfo.imageUsage;
10916 image_ci.sharingMode = swapchain_state->createInfo.imageSharingMode;
10917 device_data->imageMap[pSwapchainImages[i]] = unique_ptr<IMAGE_STATE>(new IMAGE_STATE(pSwapchainImages[i], &image_ci));
10918 auto &image_state = device_data->imageMap[pSwapchainImages[i]];
10919 image_state->valid = false;
10920 image_state->binding.mem = MEMTRACKER_SWAP_CHAIN_IMAGE_KEY;
10921 swapchain_state->images[i] = pSwapchainImages[i];
10922 ImageSubresourcePair subpair = {pSwapchainImages[i], false, VkImageSubresource()};
10923 device_data->imageSubresourceMap[pSwapchainImages[i]].push_back(subpair);
10924 device_data->imageLayoutMap[subpair] = image_layout_node;
10928 if (*pSwapchainImageCount) {
10929 if (swapchain_state->vkGetSwapchainImagesKHRState < QUERY_COUNT) {
10930 swapchain_state->vkGetSwapchainImagesKHRState = QUERY_COUNT;
10932 swapchain_state->get_swapchain_image_count = *pSwapchainImageCount;
10936 VKAPI_ATTR VkResult VKAPI_CALL GetSwapchainImagesKHR(VkDevice device, VkSwapchainKHR swapchain, uint32_t *pSwapchainImageCount,
10937 VkImage *pSwapchainImages) {
10938 VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
10939 layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
10941 auto swapchain_state = GetSwapchainNode(device_data, swapchain);
10942 bool skip = PreCallValidateGetSwapchainImagesKHR(device_data, swapchain_state, device, pSwapchainImageCount, pSwapchainImages);
10945 result = device_data->dispatch_table.GetSwapchainImagesKHR(device, swapchain, pSwapchainImageCount, pSwapchainImages);
10948 if ((result == VK_SUCCESS || result == VK_INCOMPLETE)) {
10949 PostCallRecordGetSwapchainImagesKHR(device_data, swapchain_state, device, pSwapchainImageCount, pSwapchainImages);
10954 VKAPI_ATTR VkResult VKAPI_CALL QueuePresentKHR(VkQueue queue, const VkPresentInfoKHR *pPresentInfo) {
10955 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(queue), layer_data_map);
10958 lock_guard_t lock(global_lock);
10959 auto queue_state = GetQueueState(dev_data, queue);
10961 for (uint32_t i = 0; i < pPresentInfo->waitSemaphoreCount; ++i) {
10962 auto pSemaphore = GetSemaphoreNode(dev_data, pPresentInfo->pWaitSemaphores[i]);
10963 if (pSemaphore && !pSemaphore->signaled) {
10964 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
10965 DRAWSTATE_QUEUE_FORWARD_PROGRESS,
10966 "Queue 0x%" PRIx64 " is waiting on semaphore 0x%" PRIx64 " that has no way to be signaled.",
10967 HandleToUint64(queue), HandleToUint64(pPresentInfo->pWaitSemaphores[i]));
10971 for (uint32_t i = 0; i < pPresentInfo->swapchainCount; ++i) {
10972 auto swapchain_data = GetSwapchainNode(dev_data, pPresentInfo->pSwapchains[i]);
10973 if (swapchain_data) {
10974 if (pPresentInfo->pImageIndices[i] >= swapchain_data->images.size()) {
10976 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
10977 HandleToUint64(pPresentInfo->pSwapchains[i]), DRAWSTATE_SWAPCHAIN_INVALID_IMAGE,
10978 "vkQueuePresentKHR: Swapchain image index too large (%u). There are only %u images in this swapchain.",
10979 pPresentInfo->pImageIndices[i], (uint32_t)swapchain_data->images.size());
10981 auto image = swapchain_data->images[pPresentInfo->pImageIndices[i]];
10982 auto image_state = GetImageState(dev_data, image);
10984 if (image_state->shared_presentable) {
10985 image_state->layout_locked = true;
10988 skip |= ValidateImageMemoryIsValid(dev_data, image_state, "vkQueuePresentKHR()");
10990 if (!image_state->acquired) {
10992 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
10993 HandleToUint64(pPresentInfo->pSwapchains[i]), DRAWSTATE_SWAPCHAIN_IMAGE_NOT_ACQUIRED,
10994 "vkQueuePresentKHR: Swapchain image index %u has not been acquired.", pPresentInfo->pImageIndices[i]);
10997 vector<VkImageLayout> layouts;
10998 if (FindLayouts(dev_data, image, layouts)) {
10999 for (auto layout : layouts) {
11000 if ((layout != VK_IMAGE_LAYOUT_PRESENT_SRC_KHR) && (!dev_data->extensions.vk_khr_shared_presentable_image ||
11001 (layout != VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR))) {
11002 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
11003 VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT, HandleToUint64(queue), VALIDATION_ERROR_11200a20,
11004 "Images passed to present must be in layout VK_IMAGE_LAYOUT_PRESENT_SRC_KHR or "
11005 "VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR but is in %s.",
11006 string_VkImageLayout(layout));
11012 // All physical devices and queue families are required to be able
11013 // to present to any native window on Android; require the
11014 // application to have established support on any other platform.
11015 if (!dev_data->instance_data->extensions.vk_khr_android_surface) {
11016 auto surface_state = GetSurfaceState(dev_data->instance_data, swapchain_data->createInfo.surface);
11017 auto support_it = surface_state->gpu_queue_support.find({dev_data->physical_device, queue_state->queueFamilyIndex});
11019 if (support_it == surface_state->gpu_queue_support.end()) {
11021 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
11022 HandleToUint64(pPresentInfo->pSwapchains[i]), DRAWSTATE_SWAPCHAIN_UNSUPPORTED_QUEUE,
11023 "vkQueuePresentKHR: Presenting image without calling vkGetPhysicalDeviceSurfaceSupportKHR");
11024 } else if (!support_it->second) {
11026 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
11027 HandleToUint64(pPresentInfo->pSwapchains[i]), VALIDATION_ERROR_31800a18,
11028 "vkQueuePresentKHR: Presenting image on queue that cannot present to this surface.");
11033 if (pPresentInfo && pPresentInfo->pNext) {
11034 // Verify ext struct
11035 const auto *present_regions = lvl_find_in_chain<VkPresentRegionsKHR>(pPresentInfo->pNext);
11036 if (present_regions) {
11037 for (uint32_t i = 0; i < present_regions->swapchainCount; ++i) {
11038 auto swapchain_data = GetSwapchainNode(dev_data, pPresentInfo->pSwapchains[i]);
11039 assert(swapchain_data);
11040 VkPresentRegionKHR region = present_regions->pRegions[i];
11041 for (uint32_t j = 0; j < region.rectangleCount; ++j) {
11042 VkRectLayerKHR rect = region.pRectangles[j];
11043 if ((rect.offset.x + rect.extent.width) > swapchain_data->createInfo.imageExtent.width) {
11044 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
11045 VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT, HandleToUint64(pPresentInfo->pSwapchains[i]),
11046 VALIDATION_ERROR_11e009da,
11047 "vkQueuePresentKHR(): For VkPresentRegionKHR down pNext chain, "
11048 "pRegion[%i].pRectangles[%i], the sum of offset.x (%i) and extent.width (%i) is greater "
11049 "than the corresponding swapchain's imageExtent.width (%i).",
11050 i, j, rect.offset.x, rect.extent.width, swapchain_data->createInfo.imageExtent.width);
11052 if ((rect.offset.y + rect.extent.height) > swapchain_data->createInfo.imageExtent.height) {
11053 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
11054 VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT, HandleToUint64(pPresentInfo->pSwapchains[i]),
11055 VALIDATION_ERROR_11e009da,
11056 "vkQueuePresentKHR(): For VkPresentRegionKHR down pNext chain, "
11057 "pRegion[%i].pRectangles[%i], the sum of offset.y (%i) and extent.height (%i) is greater "
11058 "than the corresponding swapchain's imageExtent.height (%i).",
11059 i, j, rect.offset.y, rect.extent.height, swapchain_data->createInfo.imageExtent.height);
11061 if (rect.layer > swapchain_data->createInfo.imageArrayLayers) {
11063 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
11064 HandleToUint64(pPresentInfo->pSwapchains[i]), VALIDATION_ERROR_11e009dc,
11065 "vkQueuePresentKHR(): For VkPresentRegionKHR down pNext chain, pRegion[%i].pRectangles[%i], the layer "
11066 "(%i) is greater than the corresponding swapchain's imageArrayLayers (%i).",
11067 i, j, rect.layer, swapchain_data->createInfo.imageArrayLayers);
11073 const auto *present_times_info = lvl_find_in_chain<VkPresentTimesInfoGOOGLE>(pPresentInfo->pNext);
11074 if (present_times_info) {
11075 if (pPresentInfo->swapchainCount != present_times_info->swapchainCount) {
11077 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
11078 HandleToUint64(pPresentInfo->pSwapchains[0]),
11080 VALIDATION_ERROR_118009be,
11081 "vkQueuePresentKHR(): VkPresentTimesInfoGOOGLE.swapchainCount is %i but pPresentInfo->swapchainCount "
11082 "is %i. For VkPresentTimesInfoGOOGLE down pNext chain of VkPresentInfoKHR, "
11083 "VkPresentTimesInfoGOOGLE.swapchainCount must equal VkPresentInfoKHR.swapchainCount.",
11084 present_times_info->swapchainCount, pPresentInfo->swapchainCount);
11090 return VK_ERROR_VALIDATION_FAILED_EXT;
11093 VkResult result = dev_data->dispatch_table.QueuePresentKHR(queue, pPresentInfo);
11095 if (result != VK_ERROR_VALIDATION_FAILED_EXT) {
11096 // Semaphore waits occur before error generation, if the call reached
11097 // the ICD. (Confirm?)
11098 for (uint32_t i = 0; i < pPresentInfo->waitSemaphoreCount; ++i) {
11099 auto pSemaphore = GetSemaphoreNode(dev_data, pPresentInfo->pWaitSemaphores[i]);
11101 pSemaphore->signaler.first = VK_NULL_HANDLE;
11102 pSemaphore->signaled = false;
11106 for (uint32_t i = 0; i < pPresentInfo->swapchainCount; ++i) {
11107 // Note: this is imperfect, in that we can get confused about what
11108 // did or didn't succeed-- but if the app does that, it's confused
11109 // itself just as much.
11110 auto local_result = pPresentInfo->pResults ? pPresentInfo->pResults[i] : result;
11112 if (local_result != VK_SUCCESS && local_result != VK_SUBOPTIMAL_KHR) continue; // this present didn't actually happen.
11114 // Mark the image as having been released to the WSI
11115 auto swapchain_data = GetSwapchainNode(dev_data, pPresentInfo->pSwapchains[i]);
11116 auto image = swapchain_data->images[pPresentInfo->pImageIndices[i]];
11117 auto image_state = GetImageState(dev_data, image);
11118 image_state->acquired = false;
11121 // Note: even though presentation is directed to a queue, there is no
11122 // direct ordering between QP and subsequent work, so QP (and its
11123 // semaphore waits) /never/ participate in any completion proof.
11129 static bool PreCallValidateCreateSharedSwapchainsKHR(layer_data *dev_data, uint32_t swapchainCount,
11130 const VkSwapchainCreateInfoKHR *pCreateInfos, VkSwapchainKHR *pSwapchains,
11131 std::vector<SURFACE_STATE *> &surface_state,
11132 std::vector<SWAPCHAIN_NODE *> &old_swapchain_state) {
11133 if (pCreateInfos) {
11134 lock_guard_t lock(global_lock);
11135 for (uint32_t i = 0; i < swapchainCount; i++) {
11136 surface_state.push_back(GetSurfaceState(dev_data->instance_data, pCreateInfos[i].surface));
11137 old_swapchain_state.push_back(GetSwapchainNode(dev_data, pCreateInfos[i].oldSwapchain));
11138 std::stringstream func_name;
11139 func_name << "vkCreateSharedSwapchainsKHR[" << swapchainCount << "]";
11140 if (PreCallValidateCreateSwapchainKHR(dev_data, func_name.str().c_str(), &pCreateInfos[i], surface_state[i],
11141 old_swapchain_state[i])) {
11149 static void PostCallRecordCreateSharedSwapchainsKHR(layer_data *dev_data, VkResult result, uint32_t swapchainCount,
11150 const VkSwapchainCreateInfoKHR *pCreateInfos, VkSwapchainKHR *pSwapchains,
11151 std::vector<SURFACE_STATE *> &surface_state,
11152 std::vector<SWAPCHAIN_NODE *> &old_swapchain_state) {
11153 if (VK_SUCCESS == result) {
11154 for (uint32_t i = 0; i < swapchainCount; i++) {
11155 auto swapchain_state = unique_ptr<SWAPCHAIN_NODE>(new SWAPCHAIN_NODE(&pCreateInfos[i], pSwapchains[i]));
11156 if (VK_PRESENT_MODE_SHARED_DEMAND_REFRESH_KHR == pCreateInfos[i].presentMode ||
11157 VK_PRESENT_MODE_SHARED_CONTINUOUS_REFRESH_KHR == pCreateInfos[i].presentMode) {
11158 swapchain_state->shared_presentable = true;
11160 surface_state[i]->swapchain = swapchain_state.get();
11161 dev_data->swapchainMap[pSwapchains[i]] = std::move(swapchain_state);
11164 for (uint32_t i = 0; i < swapchainCount; i++) {
11165 surface_state[i]->swapchain = nullptr;
11168 // Spec requires that even if CreateSharedSwapchainKHR fails, oldSwapchain behaves as replaced.
11169 for (uint32_t i = 0; i < swapchainCount; i++) {
11170 if (old_swapchain_state[i]) {
11171 old_swapchain_state[i]->replaced = true;
11173 surface_state[i]->old_swapchain = old_swapchain_state[i];
11178 VKAPI_ATTR VkResult VKAPI_CALL CreateSharedSwapchainsKHR(VkDevice device, uint32_t swapchainCount,
11179 const VkSwapchainCreateInfoKHR *pCreateInfos,
11180 const VkAllocationCallbacks *pAllocator, VkSwapchainKHR *pSwapchains) {
11181 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
11182 std::vector<SURFACE_STATE *> surface_state;
11183 std::vector<SWAPCHAIN_NODE *> old_swapchain_state;
11185 if (PreCallValidateCreateSharedSwapchainsKHR(dev_data, swapchainCount, pCreateInfos, pSwapchains, surface_state,
11186 old_swapchain_state)) {
11187 return VK_ERROR_VALIDATION_FAILED_EXT;
11191 dev_data->dispatch_table.CreateSharedSwapchainsKHR(device, swapchainCount, pCreateInfos, pAllocator, pSwapchains);
11193 PostCallRecordCreateSharedSwapchainsKHR(dev_data, result, swapchainCount, pCreateInfos, pSwapchains, surface_state,
11194 old_swapchain_state);
11199 static bool PreCallValidateAcquireNextImageKHR(layer_data *dev_data, VkDevice device, VkSwapchainKHR swapchain, uint64_t timeout,
11200 VkSemaphore semaphore, VkFence fence, uint32_t *pImageIndex) {
11202 if (fence == VK_NULL_HANDLE && semaphore == VK_NULL_HANDLE) {
11203 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
11204 HandleToUint64(device), DRAWSTATE_SWAPCHAIN_NO_SYNC_FOR_ACQUIRE,
11205 "vkAcquireNextImageKHR: Semaphore and fence cannot both be VK_NULL_HANDLE. There would be no way to "
11206 "determine the completion of this operation.");
11209 auto pSemaphore = GetSemaphoreNode(dev_data, semaphore);
11210 if (pSemaphore && pSemaphore->scope == kSyncScopeInternal && pSemaphore->signaled) {
11211 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
11212 HandleToUint64(semaphore), VALIDATION_ERROR_16400a0c,
11213 "vkAcquireNextImageKHR: Semaphore must not be currently signaled or in a wait state.");
11216 auto pFence = GetFenceNode(dev_data, fence);
11218 skip |= ValidateFenceForSubmit(dev_data, pFence);
11221 auto swapchain_data = GetSwapchainNode(dev_data, swapchain);
11222 if (swapchain_data->replaced) {
11223 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
11224 HandleToUint64(swapchain), DRAWSTATE_SWAPCHAIN_REPLACED,
11225 "vkAcquireNextImageKHR: This swapchain has been replaced. The application can still present any images it "
11226 "has acquired, but cannot acquire any more.");
11229 auto physical_device_state = GetPhysicalDeviceState(dev_data->instance_data, dev_data->physical_device);
11230 if (physical_device_state->vkGetPhysicalDeviceSurfaceCapabilitiesKHRState != UNCALLED) {
11231 uint64_t acquired_images = std::count_if(swapchain_data->images.begin(), swapchain_data->images.end(),
11232 [=](VkImage image) { return GetImageState(dev_data, image)->acquired; });
11233 if (acquired_images > swapchain_data->images.size() - physical_device_state->surfaceCapabilities.minImageCount) {
11235 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
11236 HandleToUint64(swapchain), DRAWSTATE_SWAPCHAIN_TOO_MANY_IMAGES,
11237 "vkAcquireNextImageKHR: Application has already acquired the maximum number of images (0x%" PRIxLEAST64 ")",
11242 if (swapchain_data->images.size() == 0) {
11243 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
11244 HandleToUint64(swapchain), DRAWSTATE_SWAPCHAIN_IMAGES_NOT_FOUND,
11245 "vkAcquireNextImageKHR: No images found to acquire from. Application probably did not call "
11246 "vkGetSwapchainImagesKHR after swapchain creation.");
11251 static void PostCallRecordAcquireNextImageKHR(layer_data *dev_data, VkDevice device, VkSwapchainKHR swapchain, uint64_t timeout,
11252 VkSemaphore semaphore, VkFence fence, uint32_t *pImageIndex) {
11253 auto pFence = GetFenceNode(dev_data, fence);
11254 if (pFence && pFence->scope == kSyncScopeInternal) {
11255 // Treat as inflight since it is valid to wait on this fence, even in cases where it is technically a temporary
11257 pFence->state = FENCE_INFLIGHT;
11258 pFence->signaler.first = VK_NULL_HANDLE; // ANI isn't on a queue, so this can't participate in a completion proof.
11261 auto pSemaphore = GetSemaphoreNode(dev_data, semaphore);
11262 if (pSemaphore && pSemaphore->scope == kSyncScopeInternal) {
11263 // Treat as signaled since it is valid to wait on this semaphore, even in cases where it is technically a
11264 // temporary import
11265 pSemaphore->signaled = true;
11266 pSemaphore->signaler.first = VK_NULL_HANDLE;
11269 // Mark the image as acquired.
11270 auto swapchain_data = GetSwapchainNode(dev_data, swapchain);
11271 auto image = swapchain_data->images[*pImageIndex];
11272 auto image_state = GetImageState(dev_data, image);
11273 image_state->acquired = true;
11274 image_state->shared_presentable = swapchain_data->shared_presentable;
11277 VKAPI_ATTR VkResult VKAPI_CALL AcquireNextImageKHR(VkDevice device, VkSwapchainKHR swapchain, uint64_t timeout,
11278 VkSemaphore semaphore, VkFence fence, uint32_t *pImageIndex) {
11279 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
11281 unique_lock_t lock(global_lock);
11282 bool skip = PreCallValidateAcquireNextImageKHR(dev_data, device, swapchain, timeout, semaphore, fence, pImageIndex);
11285 if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
11287 VkResult result = dev_data->dispatch_table.AcquireNextImageKHR(device, swapchain, timeout, semaphore, fence, pImageIndex);
11290 if (result == VK_SUCCESS || result == VK_SUBOPTIMAL_KHR) {
11291 PostCallRecordAcquireNextImageKHR(dev_data, device, swapchain, timeout, semaphore, fence, pImageIndex);
11298 VKAPI_ATTR VkResult VKAPI_CALL EnumeratePhysicalDevices(VkInstance instance, uint32_t *pPhysicalDeviceCount,
11299 VkPhysicalDevice *pPhysicalDevices) {
11301 instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
11302 assert(instance_data);
11304 // For this instance, flag when vkEnumeratePhysicalDevices goes to QUERY_COUNT and then QUERY_DETAILS
11305 if (NULL == pPhysicalDevices) {
11306 instance_data->vkEnumeratePhysicalDevicesState = QUERY_COUNT;
11308 if (UNCALLED == instance_data->vkEnumeratePhysicalDevicesState) {
11309 // Flag warning here. You can call this without having queried the count, but it may not be
11310 // robust on platforms with multiple physical devices.
11311 skip |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT,
11312 0, DEVLIMITS_MISSING_QUERY_COUNT,
11313 "Call sequence has vkEnumeratePhysicalDevices() w/ non-NULL pPhysicalDevices. You should first call "
11314 "vkEnumeratePhysicalDevices() w/ NULL pPhysicalDevices to query pPhysicalDeviceCount.");
11315 } // TODO : Could also flag a warning if re-calling this function in QUERY_DETAILS state
11316 else if (instance_data->physical_devices_count != *pPhysicalDeviceCount) {
11317 // Having actual count match count from app is not a requirement, so this can be a warning
11318 skip |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
11319 VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, DEVLIMITS_COUNT_MISMATCH,
11320 "Call to vkEnumeratePhysicalDevices() w/ pPhysicalDeviceCount value %u, but actual count supported by "
11321 "this instance is %u.",
11322 *pPhysicalDeviceCount, instance_data->physical_devices_count);
11324 instance_data->vkEnumeratePhysicalDevicesState = QUERY_DETAILS;
11327 return VK_ERROR_VALIDATION_FAILED_EXT;
11329 VkResult result = instance_data->dispatch_table.EnumeratePhysicalDevices(instance, pPhysicalDeviceCount, pPhysicalDevices);
11330 if (NULL == pPhysicalDevices) {
11331 instance_data->physical_devices_count = *pPhysicalDeviceCount;
11332 } else if (result == VK_SUCCESS) { // Save physical devices
11333 for (uint32_t i = 0; i < *pPhysicalDeviceCount; i++) {
11334 auto &phys_device_state = instance_data->physical_device_map[pPhysicalDevices[i]];
11335 phys_device_state.phys_device = pPhysicalDevices[i];
11336 // Init actual features for each physical device
11337 instance_data->dispatch_table.GetPhysicalDeviceFeatures(pPhysicalDevices[i], &phys_device_state.features);
11343 // Common function to handle validation for GetPhysicalDeviceQueueFamilyProperties & 2KHR version
11344 static bool ValidateCommonGetPhysicalDeviceQueueFamilyProperties(instance_layer_data *instance_data,
11345 PHYSICAL_DEVICE_STATE *pd_state,
11346 uint32_t requested_queue_family_property_count, bool qfp_null,
11347 const char *caller_name) {
11350 // Verify that for each physical device, this command is called first with NULL pQueueFamilyProperties in order to get count
11351 if (UNCALLED == pd_state->vkGetPhysicalDeviceQueueFamilyPropertiesState) {
11353 instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
11354 HandleToUint64(pd_state->phys_device), DEVLIMITS_MISSING_QUERY_COUNT,
11355 "%s is called with non-NULL pQueueFamilyProperties before obtaining pQueueFamilyPropertyCount. It is recommended "
11356 "to first call %s with NULL pQueueFamilyProperties in order to obtain the maximal pQueueFamilyPropertyCount.",
11357 caller_name, caller_name);
11358 // Then verify that pCount that is passed in on second call matches what was returned
11359 } else if (pd_state->queue_family_count != requested_queue_family_property_count) {
11361 instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
11362 HandleToUint64(pd_state->phys_device), DEVLIMITS_COUNT_MISMATCH,
11363 "%s is called with non-NULL pQueueFamilyProperties and pQueueFamilyPropertyCount value %" PRIu32
11364 ", but the largest previously returned pQueueFamilyPropertyCount for this physicalDevice is %" PRIu32
11365 ". It is recommended to instead receive all the properties by calling %s with pQueueFamilyPropertyCount that was "
11366 "previously obtained by calling %s with NULL pQueueFamilyProperties.",
11367 caller_name, requested_queue_family_property_count, pd_state->queue_family_count, caller_name, caller_name);
11369 pd_state->vkGetPhysicalDeviceQueueFamilyPropertiesState = QUERY_DETAILS;
11375 static bool PreCallValidateGetPhysicalDeviceQueueFamilyProperties(instance_layer_data *instance_data,
11376 PHYSICAL_DEVICE_STATE *pd_state,
11377 uint32_t *pQueueFamilyPropertyCount,
11378 VkQueueFamilyProperties *pQueueFamilyProperties) {
11379 return ValidateCommonGetPhysicalDeviceQueueFamilyProperties(instance_data, pd_state, *pQueueFamilyPropertyCount,
11380 (nullptr == pQueueFamilyProperties),
11381 "vkGetPhysicalDeviceQueueFamilyProperties()");
11384 static bool PreCallValidateGetPhysicalDeviceQueueFamilyProperties2(instance_layer_data *instance_data,
11385 PHYSICAL_DEVICE_STATE *pd_state,
11386 uint32_t *pQueueFamilyPropertyCount,
11387 VkQueueFamilyProperties2KHR *pQueueFamilyProperties) {
11388 return ValidateCommonGetPhysicalDeviceQueueFamilyProperties(instance_data, pd_state, *pQueueFamilyPropertyCount,
11389 (nullptr == pQueueFamilyProperties),
11390 "vkGetPhysicalDeviceQueueFamilyProperties2[KHR]()");
11393 // Common function to update state for GetPhysicalDeviceQueueFamilyProperties & 2KHR version
11394 static void StateUpdateCommonGetPhysicalDeviceQueueFamilyProperties(PHYSICAL_DEVICE_STATE *pd_state, uint32_t count,
11395 VkQueueFamilyProperties2KHR *pQueueFamilyProperties) {
11396 if (!pQueueFamilyProperties) {
11397 if (UNCALLED == pd_state->vkGetPhysicalDeviceQueueFamilyPropertiesState)
11398 pd_state->vkGetPhysicalDeviceQueueFamilyPropertiesState = QUERY_COUNT;
11399 pd_state->queue_family_count = count;
11400 } else { // Save queue family properties
11401 pd_state->vkGetPhysicalDeviceQueueFamilyPropertiesState = QUERY_DETAILS;
11402 pd_state->queue_family_count = std::max(pd_state->queue_family_count, count);
11404 pd_state->queue_family_properties.resize(std::max(static_cast<uint32_t>(pd_state->queue_family_properties.size()), count));
11405 for (uint32_t i = 0; i < count; ++i) {
11406 pd_state->queue_family_properties[i] = pQueueFamilyProperties[i].queueFamilyProperties;
11411 static void PostCallRecordGetPhysicalDeviceQueueFamilyProperties(PHYSICAL_DEVICE_STATE *pd_state, uint32_t count,
11412 VkQueueFamilyProperties *pQueueFamilyProperties) {
11413 VkQueueFamilyProperties2KHR *pqfp = nullptr;
11414 std::vector<VkQueueFamilyProperties2KHR> qfp;
11416 if (pQueueFamilyProperties) {
11417 for (uint32_t i = 0; i < count; ++i) {
11418 qfp[i].sType = VK_STRUCTURE_TYPE_QUEUE_FAMILY_PROPERTIES_2_KHR;
11419 qfp[i].pNext = nullptr;
11420 qfp[i].queueFamilyProperties = pQueueFamilyProperties[i];
11424 StateUpdateCommonGetPhysicalDeviceQueueFamilyProperties(pd_state, count, pqfp);
11427 static void PostCallRecordGetPhysicalDeviceQueueFamilyProperties2(PHYSICAL_DEVICE_STATE *pd_state, uint32_t count,
11428 VkQueueFamilyProperties2KHR *pQueueFamilyProperties) {
11429 StateUpdateCommonGetPhysicalDeviceQueueFamilyProperties(pd_state, count, pQueueFamilyProperties);
11432 VKAPI_ATTR void VKAPI_CALL GetPhysicalDeviceQueueFamilyProperties(VkPhysicalDevice physicalDevice,
11433 uint32_t *pQueueFamilyPropertyCount,
11434 VkQueueFamilyProperties *pQueueFamilyProperties) {
11435 instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
11436 auto physical_device_state = GetPhysicalDeviceState(instance_data, physicalDevice);
11437 assert(physical_device_state);
11438 unique_lock_t lock(global_lock);
11440 bool skip = PreCallValidateGetPhysicalDeviceQueueFamilyProperties(instance_data, physical_device_state,
11441 pQueueFamilyPropertyCount, pQueueFamilyProperties);
11447 instance_data->dispatch_table.GetPhysicalDeviceQueueFamilyProperties(physicalDevice, pQueueFamilyPropertyCount,
11448 pQueueFamilyProperties);
11451 PostCallRecordGetPhysicalDeviceQueueFamilyProperties(physical_device_state, *pQueueFamilyPropertyCount, pQueueFamilyProperties);
11454 VKAPI_ATTR void VKAPI_CALL GetPhysicalDeviceQueueFamilyProperties2(VkPhysicalDevice physicalDevice,
11455 uint32_t *pQueueFamilyPropertyCount,
11456 VkQueueFamilyProperties2KHR *pQueueFamilyProperties) {
11457 instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
11458 auto physical_device_state = GetPhysicalDeviceState(instance_data, physicalDevice);
11459 assert(physical_device_state);
11460 unique_lock_t lock(global_lock);
11461 bool skip = PreCallValidateGetPhysicalDeviceQueueFamilyProperties2(instance_data, physical_device_state,
11462 pQueueFamilyPropertyCount, pQueueFamilyProperties);
11466 instance_data->dispatch_table.GetPhysicalDeviceQueueFamilyProperties2(physicalDevice, pQueueFamilyPropertyCount,
11467 pQueueFamilyProperties);
11469 PostCallRecordGetPhysicalDeviceQueueFamilyProperties2(physical_device_state, *pQueueFamilyPropertyCount,
11470 pQueueFamilyProperties);
11473 VKAPI_ATTR void VKAPI_CALL GetPhysicalDeviceQueueFamilyProperties2KHR(VkPhysicalDevice physicalDevice,
11474 uint32_t *pQueueFamilyPropertyCount,
11475 VkQueueFamilyProperties2KHR *pQueueFamilyProperties) {
11476 instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
11477 auto physical_device_state = GetPhysicalDeviceState(instance_data, physicalDevice);
11478 assert(physical_device_state);
11479 unique_lock_t lock(global_lock);
11480 bool skip = PreCallValidateGetPhysicalDeviceQueueFamilyProperties2(instance_data, physical_device_state,
11481 pQueueFamilyPropertyCount, pQueueFamilyProperties);
11485 instance_data->dispatch_table.GetPhysicalDeviceQueueFamilyProperties2KHR(physicalDevice, pQueueFamilyPropertyCount,
11486 pQueueFamilyProperties);
11488 PostCallRecordGetPhysicalDeviceQueueFamilyProperties2(physical_device_state, *pQueueFamilyPropertyCount,
11489 pQueueFamilyProperties);
11492 template <typename TCreateInfo, typename FPtr>
11493 static VkResult CreateSurface(VkInstance instance, TCreateInfo const *pCreateInfo, VkAllocationCallbacks const *pAllocator,
11494 VkSurfaceKHR *pSurface, FPtr fptr) {
11495 instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
11497 // Call down the call chain:
11498 VkResult result = (instance_data->dispatch_table.*fptr)(instance, pCreateInfo, pAllocator, pSurface);
11500 if (result == VK_SUCCESS) {
11501 unique_lock_t lock(global_lock);
11502 instance_data->surface_map[*pSurface] = SURFACE_STATE(*pSurface);
11509 VKAPI_ATTR void VKAPI_CALL DestroySurfaceKHR(VkInstance instance, VkSurfaceKHR surface, const VkAllocationCallbacks *pAllocator) {
11511 instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
11512 unique_lock_t lock(global_lock);
11513 auto surface_state = GetSurfaceState(instance_data, surface);
11515 if ((surface_state) && (surface_state->swapchain)) {
11516 skip |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT,
11517 HandleToUint64(instance), VALIDATION_ERROR_26c009e4,
11518 "vkDestroySurfaceKHR() called before its associated VkSwapchainKHR was destroyed.");
11520 instance_data->surface_map.erase(surface);
11523 instance_data->dispatch_table.DestroySurfaceKHR(instance, surface, pAllocator);
11527 VKAPI_ATTR VkResult VKAPI_CALL CreateDisplayPlaneSurfaceKHR(VkInstance instance, const VkDisplaySurfaceCreateInfoKHR *pCreateInfo,
11528 const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
11529 return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateDisplayPlaneSurfaceKHR);
11532 #ifdef VK_USE_PLATFORM_ANDROID_KHR
11533 VKAPI_ATTR VkResult VKAPI_CALL CreateAndroidSurfaceKHR(VkInstance instance, const VkAndroidSurfaceCreateInfoKHR *pCreateInfo,
11534 const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
11535 return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateAndroidSurfaceKHR);
11537 #endif // VK_USE_PLATFORM_ANDROID_KHR
11539 #ifdef VK_USE_PLATFORM_IOS_MVK
11540 VKAPI_ATTR VkResult VKAPI_CALL CreateIOSSurfaceMVK(VkInstance instance, const VkIOSSurfaceCreateInfoMVK *pCreateInfo,
11541 const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
11542 return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateIOSSurfaceMVK);
11544 #endif // VK_USE_PLATFORM_IOS_MVK
11546 #ifdef VK_USE_PLATFORM_MACOS_MVK
11547 VKAPI_ATTR VkResult VKAPI_CALL CreateMacOSSurfaceMVK(VkInstance instance, const VkMacOSSurfaceCreateInfoMVK *pCreateInfo,
11548 const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
11549 return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateMacOSSurfaceMVK);
11551 #endif // VK_USE_PLATFORM_MACOS_MVK
11553 #ifdef VK_USE_PLATFORM_MIR_KHR
11554 VKAPI_ATTR VkResult VKAPI_CALL CreateMirSurfaceKHR(VkInstance instance, const VkMirSurfaceCreateInfoKHR *pCreateInfo,
11555 const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
11556 return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateMirSurfaceKHR);
11559 VKAPI_ATTR VkBool32 VKAPI_CALL GetPhysicalDeviceMirPresentationSupportKHR(VkPhysicalDevice physicalDevice,
11560 uint32_t queueFamilyIndex, MirConnection *connection) {
11562 instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
11564 unique_lock_t lock(global_lock);
11565 const auto pd_state = GetPhysicalDeviceState(instance_data, physicalDevice);
11567 skip |= ValidatePhysicalDeviceQueueFamily(instance_data, pd_state, queueFamilyIndex, VALIDATION_ERROR_2d2009e2,
11568 "vkGetPhysicalDeviceMirPresentationSupportKHR", "queueFamilyIndex");
11572 if (skip) return VK_FALSE;
11574 // Call down the call chain:
11576 instance_data->dispatch_table.GetPhysicalDeviceMirPresentationSupportKHR(physicalDevice, queueFamilyIndex, connection);
11580 #endif // VK_USE_PLATFORM_MIR_KHR
11582 #ifdef VK_USE_PLATFORM_WAYLAND_KHR
11583 VKAPI_ATTR VkResult VKAPI_CALL CreateWaylandSurfaceKHR(VkInstance instance, const VkWaylandSurfaceCreateInfoKHR *pCreateInfo,
11584 const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
11585 return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateWaylandSurfaceKHR);
11588 VKAPI_ATTR VkBool32 VKAPI_CALL GetPhysicalDeviceWaylandPresentationSupportKHR(VkPhysicalDevice physicalDevice,
11589 uint32_t queueFamilyIndex,
11590 struct wl_display *display) {
11592 instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
11594 unique_lock_t lock(global_lock);
11595 const auto pd_state = GetPhysicalDeviceState(instance_data, physicalDevice);
11597 skip |= ValidatePhysicalDeviceQueueFamily(instance_data, pd_state, queueFamilyIndex, VALIDATION_ERROR_2f000a34,
11598 "vkGetPhysicalDeviceWaylandPresentationSupportKHR", "queueFamilyIndex");
11602 if (skip) return VK_FALSE;
11604 // Call down the call chain:
11606 instance_data->dispatch_table.GetPhysicalDeviceWaylandPresentationSupportKHR(physicalDevice, queueFamilyIndex, display);
11610 #endif // VK_USE_PLATFORM_WAYLAND_KHR
11612 #ifdef VK_USE_PLATFORM_WIN32_KHR
11613 VKAPI_ATTR VkResult VKAPI_CALL CreateWin32SurfaceKHR(VkInstance instance, const VkWin32SurfaceCreateInfoKHR *pCreateInfo,
11614 const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
11615 return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateWin32SurfaceKHR);
11618 VKAPI_ATTR VkBool32 VKAPI_CALL GetPhysicalDeviceWin32PresentationSupportKHR(VkPhysicalDevice physicalDevice,
11619 uint32_t queueFamilyIndex) {
11621 instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
11623 unique_lock_t lock(global_lock);
11624 const auto pd_state = GetPhysicalDeviceState(instance_data, physicalDevice);
11626 skip |= ValidatePhysicalDeviceQueueFamily(instance_data, pd_state, queueFamilyIndex, VALIDATION_ERROR_2f200a3a,
11627 "vkGetPhysicalDeviceWin32PresentationSupportKHR", "queueFamilyIndex");
11631 if (skip) return VK_FALSE;
11633 // Call down the call chain:
11634 VkBool32 result = instance_data->dispatch_table.GetPhysicalDeviceWin32PresentationSupportKHR(physicalDevice, queueFamilyIndex);
11638 #endif // VK_USE_PLATFORM_WIN32_KHR
11640 #ifdef VK_USE_PLATFORM_XCB_KHR
11641 VKAPI_ATTR VkResult VKAPI_CALL CreateXcbSurfaceKHR(VkInstance instance, const VkXcbSurfaceCreateInfoKHR *pCreateInfo,
11642 const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
11643 return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateXcbSurfaceKHR);
11646 VKAPI_ATTR VkBool32 VKAPI_CALL GetPhysicalDeviceXcbPresentationSupportKHR(VkPhysicalDevice physicalDevice,
11647 uint32_t queueFamilyIndex, xcb_connection_t *connection,
11648 xcb_visualid_t visual_id) {
11650 instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
11652 unique_lock_t lock(global_lock);
11653 const auto pd_state = GetPhysicalDeviceState(instance_data, physicalDevice);
11655 skip |= ValidatePhysicalDeviceQueueFamily(instance_data, pd_state, queueFamilyIndex, VALIDATION_ERROR_2f400a40,
11656 "vkGetPhysicalDeviceXcbPresentationSupportKHR", "queueFamilyIndex");
11660 if (skip) return VK_FALSE;
11662 // Call down the call chain:
11663 VkBool32 result = instance_data->dispatch_table.GetPhysicalDeviceXcbPresentationSupportKHR(physicalDevice, queueFamilyIndex,
11664 connection, visual_id);
11668 #endif // VK_USE_PLATFORM_XCB_KHR
11670 #ifdef VK_USE_PLATFORM_XLIB_KHR
11671 VKAPI_ATTR VkResult VKAPI_CALL CreateXlibSurfaceKHR(VkInstance instance, const VkXlibSurfaceCreateInfoKHR *pCreateInfo,
11672 const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
11673 return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateXlibSurfaceKHR);
11676 VKAPI_ATTR VkBool32 VKAPI_CALL GetPhysicalDeviceXlibPresentationSupportKHR(VkPhysicalDevice physicalDevice,
11677 uint32_t queueFamilyIndex, Display *dpy,
11678 VisualID visualID) {
11680 instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
11682 unique_lock_t lock(global_lock);
11683 const auto pd_state = GetPhysicalDeviceState(instance_data, physicalDevice);
11685 skip |= ValidatePhysicalDeviceQueueFamily(instance_data, pd_state, queueFamilyIndex, VALIDATION_ERROR_2f600a46,
11686 "vkGetPhysicalDeviceXlibPresentationSupportKHR", "queueFamilyIndex");
11690 if (skip) return VK_FALSE;
11692 // Call down the call chain:
11694 instance_data->dispatch_table.GetPhysicalDeviceXlibPresentationSupportKHR(physicalDevice, queueFamilyIndex, dpy, visualID);
11698 #endif // VK_USE_PLATFORM_XLIB_KHR
11700 VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceSurfaceCapabilitiesKHR(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface,
11701 VkSurfaceCapabilitiesKHR *pSurfaceCapabilities) {
11702 auto instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
11704 unique_lock_t lock(global_lock);
11705 auto physical_device_state = GetPhysicalDeviceState(instance_data, physicalDevice);
11709 instance_data->dispatch_table.GetPhysicalDeviceSurfaceCapabilitiesKHR(physicalDevice, surface, pSurfaceCapabilities);
11711 if (result == VK_SUCCESS) {
11712 physical_device_state->vkGetPhysicalDeviceSurfaceCapabilitiesKHRState = QUERY_DETAILS;
11713 physical_device_state->surfaceCapabilities = *pSurfaceCapabilities;
11719 static void PostCallRecordGetPhysicalDeviceSurfaceCapabilities2KHR(instance_layer_data *instanceData,
11720 VkPhysicalDevice physicalDevice,
11721 VkSurfaceCapabilities2KHR *pSurfaceCapabilities) {
11722 unique_lock_t lock(global_lock);
11723 auto physicalDeviceState = GetPhysicalDeviceState(instanceData, physicalDevice);
11724 physicalDeviceState->vkGetPhysicalDeviceSurfaceCapabilitiesKHRState = QUERY_DETAILS;
11725 physicalDeviceState->surfaceCapabilities = pSurfaceCapabilities->surfaceCapabilities;
11728 VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceSurfaceCapabilities2KHR(VkPhysicalDevice physicalDevice,
11729 const VkPhysicalDeviceSurfaceInfo2KHR *pSurfaceInfo,
11730 VkSurfaceCapabilities2KHR *pSurfaceCapabilities) {
11731 auto instanceData = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
11734 instanceData->dispatch_table.GetPhysicalDeviceSurfaceCapabilities2KHR(physicalDevice, pSurfaceInfo, pSurfaceCapabilities);
11736 if (result == VK_SUCCESS) {
11737 PostCallRecordGetPhysicalDeviceSurfaceCapabilities2KHR(instanceData, physicalDevice, pSurfaceCapabilities);
11743 static void PostCallRecordGetPhysicalDeviceSurfaceCapabilities2EXT(instance_layer_data *instanceData,
11744 VkPhysicalDevice physicalDevice,
11745 VkSurfaceCapabilities2EXT *pSurfaceCapabilities) {
11746 unique_lock_t lock(global_lock);
11747 auto physicalDeviceState = GetPhysicalDeviceState(instanceData, physicalDevice);
11748 physicalDeviceState->vkGetPhysicalDeviceSurfaceCapabilitiesKHRState = QUERY_DETAILS;
11749 physicalDeviceState->surfaceCapabilities.minImageCount = pSurfaceCapabilities->minImageCount;
11750 physicalDeviceState->surfaceCapabilities.maxImageCount = pSurfaceCapabilities->maxImageCount;
11751 physicalDeviceState->surfaceCapabilities.currentExtent = pSurfaceCapabilities->currentExtent;
11752 physicalDeviceState->surfaceCapabilities.minImageExtent = pSurfaceCapabilities->minImageExtent;
11753 physicalDeviceState->surfaceCapabilities.maxImageExtent = pSurfaceCapabilities->maxImageExtent;
11754 physicalDeviceState->surfaceCapabilities.maxImageArrayLayers = pSurfaceCapabilities->maxImageArrayLayers;
11755 physicalDeviceState->surfaceCapabilities.supportedTransforms = pSurfaceCapabilities->supportedTransforms;
11756 physicalDeviceState->surfaceCapabilities.currentTransform = pSurfaceCapabilities->currentTransform;
11757 physicalDeviceState->surfaceCapabilities.supportedCompositeAlpha = pSurfaceCapabilities->supportedCompositeAlpha;
11758 physicalDeviceState->surfaceCapabilities.supportedUsageFlags = pSurfaceCapabilities->supportedUsageFlags;
11761 VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceSurfaceCapabilities2EXT(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface,
11762 VkSurfaceCapabilities2EXT *pSurfaceCapabilities) {
11763 auto instanceData = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
11766 instanceData->dispatch_table.GetPhysicalDeviceSurfaceCapabilities2EXT(physicalDevice, surface, pSurfaceCapabilities);
11768 if (result == VK_SUCCESS) {
11769 PostCallRecordGetPhysicalDeviceSurfaceCapabilities2EXT(instanceData, physicalDevice, pSurfaceCapabilities);
11775 VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceSurfaceSupportKHR(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex,
11776 VkSurfaceKHR surface, VkBool32 *pSupported) {
11778 auto instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
11780 unique_lock_t lock(global_lock);
11781 const auto pd_state = GetPhysicalDeviceState(instance_data, physicalDevice);
11782 auto surface_state = GetSurfaceState(instance_data, surface);
11784 skip |= ValidatePhysicalDeviceQueueFamily(instance_data, pd_state, queueFamilyIndex, VALIDATION_ERROR_2ee009ea,
11785 "vkGetPhysicalDeviceSurfaceSupportKHR", "queueFamilyIndex");
11789 if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
11792 instance_data->dispatch_table.GetPhysicalDeviceSurfaceSupportKHR(physicalDevice, queueFamilyIndex, surface, pSupported);
11794 if (result == VK_SUCCESS) {
11795 surface_state->gpu_queue_support[{physicalDevice, queueFamilyIndex}] = (*pSupported == VK_TRUE);
11801 VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceSurfacePresentModesKHR(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface,
11802 uint32_t *pPresentModeCount,
11803 VkPresentModeKHR *pPresentModes) {
11805 auto instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
11806 unique_lock_t lock(global_lock);
11807 // TODO: this isn't quite right. available modes may differ by surface AND physical device.
11808 auto physical_device_state = GetPhysicalDeviceState(instance_data, physicalDevice);
11809 auto &call_state = physical_device_state->vkGetPhysicalDeviceSurfacePresentModesKHRState;
11811 if (pPresentModes) {
11812 // Compare the preliminary value of *pPresentModeCount with the value this time:
11813 auto prev_mode_count = (uint32_t)physical_device_state->present_modes.size();
11814 switch (call_state) {
11816 skip |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
11817 VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, HandleToUint64(physicalDevice),
11818 DEVLIMITS_MUST_QUERY_COUNT,
11819 "vkGetPhysicalDeviceSurfacePresentModesKHR() called with non-NULL pPresentModeCount; but no prior "
11820 "positive value has been seen for pPresentModeCount.");
11823 // both query count and query details
11824 if (*pPresentModeCount != prev_mode_count) {
11825 skip |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
11826 VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, HandleToUint64(physicalDevice),
11827 DEVLIMITS_COUNT_MISMATCH,
11828 "vkGetPhysicalDeviceSurfacePresentModesKHR() called with *pPresentModeCount (%u) that differs "
11829 "from the value (%u) that was returned when pPresentModes was NULL.",
11830 *pPresentModeCount, prev_mode_count);
11837 if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
11839 auto result = instance_data->dispatch_table.GetPhysicalDeviceSurfacePresentModesKHR(physicalDevice, surface, pPresentModeCount,
11842 if (result == VK_SUCCESS || result == VK_INCOMPLETE) {
11845 if (*pPresentModeCount) {
11846 if (call_state < QUERY_COUNT) call_state = QUERY_COUNT;
11847 if (*pPresentModeCount > physical_device_state->present_modes.size())
11848 physical_device_state->present_modes.resize(*pPresentModeCount);
11850 if (pPresentModes) {
11851 if (call_state < QUERY_DETAILS) call_state = QUERY_DETAILS;
11852 for (uint32_t i = 0; i < *pPresentModeCount; i++) {
11853 physical_device_state->present_modes[i] = pPresentModes[i];
11861 VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceSurfaceFormatsKHR(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface,
11862 uint32_t *pSurfaceFormatCount,
11863 VkSurfaceFormatKHR *pSurfaceFormats) {
11865 auto instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
11866 unique_lock_t lock(global_lock);
11867 auto physical_device_state = GetPhysicalDeviceState(instance_data, physicalDevice);
11868 auto &call_state = physical_device_state->vkGetPhysicalDeviceSurfaceFormatsKHRState;
11870 if (pSurfaceFormats) {
11871 auto prev_format_count = (uint32_t)physical_device_state->surface_formats.size();
11873 switch (call_state) {
11875 // Since we haven't recorded a preliminary value of *pSurfaceFormatCount, that likely means that the application
11877 // previously call this function with a NULL value of pSurfaceFormats:
11878 skip |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
11879 VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, HandleToUint64(physicalDevice),
11880 DEVLIMITS_MUST_QUERY_COUNT,
11881 "vkGetPhysicalDeviceSurfaceFormatsKHR() called with non-NULL pSurfaceFormatCount; but no prior "
11882 "positive value has been seen for pSurfaceFormats.");
11885 if (prev_format_count != *pSurfaceFormatCount) {
11886 skip |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
11887 VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, HandleToUint64(physicalDevice),
11888 DEVLIMITS_COUNT_MISMATCH,
11889 "vkGetPhysicalDeviceSurfaceFormatsKHR() called with non-NULL pSurfaceFormatCount, and with "
11890 "pSurfaceFormats set to a value (%u) that is greater than the value (%u) that was returned "
11891 "when pSurfaceFormatCount was NULL.",
11892 *pSurfaceFormatCount, prev_format_count);
11899 if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
11901 // Call down the call chain:
11902 auto result = instance_data->dispatch_table.GetPhysicalDeviceSurfaceFormatsKHR(physicalDevice, surface, pSurfaceFormatCount,
11905 if (result == VK_SUCCESS || result == VK_INCOMPLETE) {
11908 if (*pSurfaceFormatCount) {
11909 if (call_state < QUERY_COUNT) call_state = QUERY_COUNT;
11910 if (*pSurfaceFormatCount > physical_device_state->surface_formats.size())
11911 physical_device_state->surface_formats.resize(*pSurfaceFormatCount);
11913 if (pSurfaceFormats) {
11914 if (call_state < QUERY_DETAILS) call_state = QUERY_DETAILS;
11915 for (uint32_t i = 0; i < *pSurfaceFormatCount; i++) {
11916 physical_device_state->surface_formats[i] = pSurfaceFormats[i];
11923 static void PostCallRecordGetPhysicalDeviceSurfaceFormats2KHR(instance_layer_data *instanceData, VkPhysicalDevice physicalDevice,
11924 uint32_t *pSurfaceFormatCount, VkSurfaceFormat2KHR *pSurfaceFormats) {
11925 unique_lock_t lock(global_lock);
11926 auto physicalDeviceState = GetPhysicalDeviceState(instanceData, physicalDevice);
11927 if (*pSurfaceFormatCount) {
11928 if (physicalDeviceState->vkGetPhysicalDeviceSurfaceFormatsKHRState < QUERY_COUNT) {
11929 physicalDeviceState->vkGetPhysicalDeviceSurfaceFormatsKHRState = QUERY_COUNT;
11931 if (*pSurfaceFormatCount > physicalDeviceState->surface_formats.size())
11932 physicalDeviceState->surface_formats.resize(*pSurfaceFormatCount);
11934 if (pSurfaceFormats) {
11935 if (physicalDeviceState->vkGetPhysicalDeviceSurfaceFormatsKHRState < QUERY_DETAILS) {
11936 physicalDeviceState->vkGetPhysicalDeviceSurfaceFormatsKHRState = QUERY_DETAILS;
11938 for (uint32_t i = 0; i < *pSurfaceFormatCount; i++) {
11939 physicalDeviceState->surface_formats[i] = pSurfaceFormats[i].surfaceFormat;
11944 VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceSurfaceFormats2KHR(VkPhysicalDevice physicalDevice,
11945 const VkPhysicalDeviceSurfaceInfo2KHR *pSurfaceInfo,
11946 uint32_t *pSurfaceFormatCount,
11947 VkSurfaceFormat2KHR *pSurfaceFormats) {
11948 auto instanceData = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
11949 auto result = instanceData->dispatch_table.GetPhysicalDeviceSurfaceFormats2KHR(physicalDevice, pSurfaceInfo,
11950 pSurfaceFormatCount, pSurfaceFormats);
11951 if (result == VK_SUCCESS || result == VK_INCOMPLETE) {
11952 PostCallRecordGetPhysicalDeviceSurfaceFormats2KHR(instanceData, physicalDevice, pSurfaceFormatCount, pSurfaceFormats);
11957 // VK_EXT_debug_utils commands
11958 VKAPI_ATTR VkResult VKAPI_CALL SetDebugUtilsObjectNameEXT(VkDevice device, const VkDebugUtilsObjectNameInfoEXT *pNameInfo) {
11959 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
11960 VkResult result = VK_SUCCESS;
11961 if (pNameInfo->pObjectName) {
11962 dev_data->report_data->debugUtilsObjectNameMap->insert(
11963 std::make_pair<uint64_t, std::string>((uint64_t &&) pNameInfo->objectHandle, pNameInfo->pObjectName));
11965 dev_data->report_data->debugUtilsObjectNameMap->erase(pNameInfo->objectHandle);
11967 if (nullptr != dev_data->dispatch_table.SetDebugUtilsObjectNameEXT) {
11968 result = dev_data->dispatch_table.SetDebugUtilsObjectNameEXT(device, pNameInfo);
11973 VKAPI_ATTR VkResult VKAPI_CALL SetDebugUtilsObjectTagEXT(VkDevice device, const VkDebugUtilsObjectTagInfoEXT *pTagInfo) {
11974 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
11975 VkResult result = VK_SUCCESS;
11976 if (nullptr != dev_data->dispatch_table.SetDebugUtilsObjectTagEXT) {
11977 result = dev_data->dispatch_table.SetDebugUtilsObjectTagEXT(device, pTagInfo);
11982 VKAPI_ATTR void VKAPI_CALL QueueBeginDebugUtilsLabelEXT(VkQueue queue, const VkDebugUtilsLabelEXT *pLabelInfo) {
11983 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(queue), layer_data_map);
11984 BeginQueueDebugUtilsLabel(dev_data->report_data, queue, pLabelInfo);
11985 if (nullptr != dev_data->dispatch_table.QueueBeginDebugUtilsLabelEXT) {
11986 dev_data->dispatch_table.QueueBeginDebugUtilsLabelEXT(queue, pLabelInfo);
11990 VKAPI_ATTR void VKAPI_CALL QueueEndDebugUtilsLabelEXT(VkQueue queue) {
11991 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(queue), layer_data_map);
11992 if (nullptr != dev_data->dispatch_table.QueueEndDebugUtilsLabelEXT) {
11993 dev_data->dispatch_table.QueueEndDebugUtilsLabelEXT(queue);
11995 EndQueueDebugUtilsLabel(dev_data->report_data, queue);
11998 VKAPI_ATTR void VKAPI_CALL QueueInsertDebugUtilsLabelEXT(VkQueue queue, const VkDebugUtilsLabelEXT *pLabelInfo) {
11999 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(queue), layer_data_map);
12000 InsertQueueDebugUtilsLabel(dev_data->report_data, queue, pLabelInfo);
12001 if (nullptr != dev_data->dispatch_table.QueueInsertDebugUtilsLabelEXT) {
12002 dev_data->dispatch_table.QueueInsertDebugUtilsLabelEXT(queue, pLabelInfo);
12006 VKAPI_ATTR void VKAPI_CALL CmdBeginDebugUtilsLabelEXT(VkCommandBuffer commandBuffer, const VkDebugUtilsLabelEXT *pLabelInfo) {
12007 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
12008 BeginCmdDebugUtilsLabel(dev_data->report_data, commandBuffer, pLabelInfo);
12009 if (nullptr != dev_data->dispatch_table.CmdBeginDebugUtilsLabelEXT) {
12010 dev_data->dispatch_table.CmdBeginDebugUtilsLabelEXT(commandBuffer, pLabelInfo);
12014 VKAPI_ATTR void VKAPI_CALL CmdEndDebugUtilsLabelEXT(VkCommandBuffer commandBuffer) {
12015 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
12016 if (nullptr != dev_data->dispatch_table.CmdEndDebugUtilsLabelEXT) {
12017 dev_data->dispatch_table.CmdEndDebugUtilsLabelEXT(commandBuffer);
12019 EndCmdDebugUtilsLabel(dev_data->report_data, commandBuffer);
12022 VKAPI_ATTR void VKAPI_CALL CmdInsertDebugUtilsLabelEXT(VkCommandBuffer commandBuffer, const VkDebugUtilsLabelEXT *pLabelInfo) {
12023 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
12024 InsertCmdDebugUtilsLabel(dev_data->report_data, commandBuffer, pLabelInfo);
12025 if (nullptr != dev_data->dispatch_table.CmdInsertDebugUtilsLabelEXT) {
12026 dev_data->dispatch_table.CmdInsertDebugUtilsLabelEXT(commandBuffer, pLabelInfo);
12030 VKAPI_ATTR VkResult VKAPI_CALL CreateDebugUtilsMessengerEXT(VkInstance instance,
12031 const VkDebugUtilsMessengerCreateInfoEXT *pCreateInfo,
12032 const VkAllocationCallbacks *pAllocator,
12033 VkDebugUtilsMessengerEXT *pMessenger) {
12034 instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
12035 VkResult result = instance_data->dispatch_table.CreateDebugUtilsMessengerEXT(instance, pCreateInfo, pAllocator, pMessenger);
12037 if (VK_SUCCESS == result) {
12038 result = layer_create_messenger_callback(instance_data->report_data, false, pCreateInfo, pAllocator, pMessenger);
12043 VKAPI_ATTR void VKAPI_CALL DestroyDebugUtilsMessengerEXT(VkInstance instance, VkDebugUtilsMessengerEXT messenger,
12044 const VkAllocationCallbacks *pAllocator) {
12045 instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
12046 instance_data->dispatch_table.DestroyDebugUtilsMessengerEXT(instance, messenger, pAllocator);
12047 layer_destroy_messenger_callback(instance_data->report_data, messenger, pAllocator);
12050 VKAPI_ATTR void VKAPI_CALL SubmitDebugUtilsMessageEXT(VkInstance instance, VkDebugUtilsMessageSeverityFlagBitsEXT messageSeverity,
12051 VkDebugUtilsMessageTypeFlagsEXT messageTypes,
12052 const VkDebugUtilsMessengerCallbackDataEXT *pCallbackData) {
12053 instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
12054 instance_data->dispatch_table.SubmitDebugUtilsMessageEXT(instance, messageSeverity, messageTypes, pCallbackData);
12057 // VK_EXT_debug_report commands
12058 VKAPI_ATTR VkResult VKAPI_CALL CreateDebugReportCallbackEXT(VkInstance instance,
12059 const VkDebugReportCallbackCreateInfoEXT *pCreateInfo,
12060 const VkAllocationCallbacks *pAllocator,
12061 VkDebugReportCallbackEXT *pMsgCallback) {
12062 instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
12063 VkResult res = instance_data->dispatch_table.CreateDebugReportCallbackEXT(instance, pCreateInfo, pAllocator, pMsgCallback);
12064 if (VK_SUCCESS == res) {
12065 lock_guard_t lock(global_lock);
12066 res = layer_create_report_callback(instance_data->report_data, false, pCreateInfo, pAllocator, pMsgCallback);
12071 VKAPI_ATTR void VKAPI_CALL DestroyDebugReportCallbackEXT(VkInstance instance, VkDebugReportCallbackEXT msgCallback,
12072 const VkAllocationCallbacks *pAllocator) {
12073 instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
12074 instance_data->dispatch_table.DestroyDebugReportCallbackEXT(instance, msgCallback, pAllocator);
12075 lock_guard_t lock(global_lock);
12076 layer_destroy_report_callback(instance_data->report_data, msgCallback, pAllocator);
12079 VKAPI_ATTR void VKAPI_CALL DebugReportMessageEXT(VkInstance instance, VkDebugReportFlagsEXT flags,
12080 VkDebugReportObjectTypeEXT objType, uint64_t object, size_t location,
12081 int32_t msgCode, const char *pLayerPrefix, const char *pMsg) {
12082 instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
12083 instance_data->dispatch_table.DebugReportMessageEXT(instance, flags, objType, object, location, msgCode, pLayerPrefix, pMsg);
12086 VKAPI_ATTR VkResult VKAPI_CALL EnumerateInstanceLayerProperties(uint32_t *pCount, VkLayerProperties *pProperties) {
12087 return util_GetLayerProperties(1, &global_layer, pCount, pProperties);
12090 VKAPI_ATTR VkResult VKAPI_CALL EnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice, uint32_t *pCount,
12091 VkLayerProperties *pProperties) {
12092 return util_GetLayerProperties(1, &global_layer, pCount, pProperties);
12095 VKAPI_ATTR VkResult VKAPI_CALL EnumerateInstanceExtensionProperties(const char *pLayerName, uint32_t *pCount,
12096 VkExtensionProperties *pProperties) {
12097 if (pLayerName && !strcmp(pLayerName, global_layer.layerName))
12098 return util_GetExtensionProperties(1, instance_extensions, pCount, pProperties);
12100 return VK_ERROR_LAYER_NOT_PRESENT;
12103 VKAPI_ATTR VkResult VKAPI_CALL EnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice, const char *pLayerName,
12104 uint32_t *pCount, VkExtensionProperties *pProperties) {
12105 if (pLayerName && !strcmp(pLayerName, global_layer.layerName))
12106 return util_GetExtensionProperties(1, device_extensions, pCount, pProperties);
12108 assert(physicalDevice);
12110 instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
12111 return instance_data->dispatch_table.EnumerateDeviceExtensionProperties(physicalDevice, NULL, pCount, pProperties);
12114 static bool PreCallValidateEnumeratePhysicalDeviceGroups(VkInstance instance, uint32_t *pPhysicalDeviceGroupCount,
12115 VkPhysicalDeviceGroupPropertiesKHR *pPhysicalDeviceGroupProperties) {
12116 instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
12119 if (instance_data) {
12120 // For this instance, flag when EnumeratePhysicalDeviceGroups goes to QUERY_COUNT and then QUERY_DETAILS.
12121 if (NULL != pPhysicalDeviceGroupProperties) {
12122 if (UNCALLED == instance_data->vkEnumeratePhysicalDeviceGroupsState) {
12123 // Flag warning here. You can call this without having queried the count, but it may not be
12124 // robust on platforms with multiple physical devices.
12125 skip |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
12126 VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT, 0, DEVLIMITS_MISSING_QUERY_COUNT,
12127 "Call sequence has vkEnumeratePhysicalDeviceGroups() w/ non-NULL "
12128 "pPhysicalDeviceGroupProperties. You should first call vkEnumeratePhysicalDeviceGroups() w/ "
12129 "NULL pPhysicalDeviceGroupProperties to query pPhysicalDeviceGroupCount.");
12130 } // TODO : Could also flag a warning if re-calling this function in QUERY_DETAILS state
12131 else if (instance_data->physical_device_groups_count != *pPhysicalDeviceGroupCount) {
12132 // Having actual count match count from app is not a requirement, so this can be a warning
12133 skip |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
12134 VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, DEVLIMITS_COUNT_MISMATCH,
12135 "Call to vkEnumeratePhysicalDeviceGroups() w/ pPhysicalDeviceGroupCount value %u, but actual count "
12136 "supported by this instance is %u.",
12137 *pPhysicalDeviceGroupCount, instance_data->physical_device_groups_count);
12141 log_msg(instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT, 0,
12142 DEVLIMITS_INVALID_INSTANCE, "Invalid instance (0x%" PRIx64 ") passed into vkEnumeratePhysicalDeviceGroups().",
12143 HandleToUint64(instance));
12149 static void PreCallRecordEnumeratePhysicalDeviceGroups(instance_layer_data *instance_data,
12150 VkPhysicalDeviceGroupPropertiesKHR *pPhysicalDeviceGroupProperties) {
12151 if (instance_data) {
12152 // For this instance, flag when EnumeratePhysicalDeviceGroups goes to QUERY_COUNT and then QUERY_DETAILS.
12153 if (NULL == pPhysicalDeviceGroupProperties) {
12154 instance_data->vkEnumeratePhysicalDeviceGroupsState = QUERY_COUNT;
12156 instance_data->vkEnumeratePhysicalDeviceGroupsState = QUERY_DETAILS;
12161 static void PostCallRecordEnumeratePhysicalDeviceGroups(instance_layer_data *instance_data, uint32_t *pPhysicalDeviceGroupCount,
12162 VkPhysicalDeviceGroupPropertiesKHR *pPhysicalDeviceGroupProperties) {
12163 if (NULL == pPhysicalDeviceGroupProperties) {
12164 instance_data->physical_device_groups_count = *pPhysicalDeviceGroupCount;
12165 } else { // Save physical devices
12166 for (uint32_t i = 0; i < *pPhysicalDeviceGroupCount; i++) {
12167 for (uint32_t j = 0; j < pPhysicalDeviceGroupProperties[i].physicalDeviceCount; j++) {
12168 VkPhysicalDevice cur_phys_dev = pPhysicalDeviceGroupProperties[i].physicalDevices[j];
12169 auto &phys_device_state = instance_data->physical_device_map[cur_phys_dev];
12170 phys_device_state.phys_device = cur_phys_dev;
12171 // Init actual features for each physical device
12172 instance_data->dispatch_table.GetPhysicalDeviceFeatures(cur_phys_dev, &phys_device_state.features);
12178 VKAPI_ATTR VkResult VKAPI_CALL EnumeratePhysicalDeviceGroups(VkInstance instance, uint32_t *pPhysicalDeviceGroupCount,
12179 VkPhysicalDeviceGroupPropertiesKHR *pPhysicalDeviceGroupProperties) {
12181 instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
12183 skip = PreCallValidateEnumeratePhysicalDeviceGroups(instance, pPhysicalDeviceGroupCount, pPhysicalDeviceGroupProperties);
12185 return VK_ERROR_VALIDATION_FAILED_EXT;
12187 PreCallRecordEnumeratePhysicalDeviceGroups(instance_data, pPhysicalDeviceGroupProperties);
12188 VkResult result = instance_data->dispatch_table.EnumeratePhysicalDeviceGroups(instance, pPhysicalDeviceGroupCount,
12189 pPhysicalDeviceGroupProperties);
12190 if (result == VK_SUCCESS) {
12191 PostCallRecordEnumeratePhysicalDeviceGroups(instance_data, pPhysicalDeviceGroupCount, pPhysicalDeviceGroupProperties);
12196 VKAPI_ATTR VkResult VKAPI_CALL EnumeratePhysicalDeviceGroupsKHR(
12197 VkInstance instance, uint32_t *pPhysicalDeviceGroupCount, VkPhysicalDeviceGroupPropertiesKHR *pPhysicalDeviceGroupProperties) {
12199 instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
12201 skip = PreCallValidateEnumeratePhysicalDeviceGroups(instance, pPhysicalDeviceGroupCount, pPhysicalDeviceGroupProperties);
12203 return VK_ERROR_VALIDATION_FAILED_EXT;
12205 PreCallRecordEnumeratePhysicalDeviceGroups(instance_data, pPhysicalDeviceGroupProperties);
12206 VkResult result = instance_data->dispatch_table.EnumeratePhysicalDeviceGroupsKHR(instance, pPhysicalDeviceGroupCount,
12207 pPhysicalDeviceGroupProperties);
12208 if (result == VK_SUCCESS) {
12209 PostCallRecordEnumeratePhysicalDeviceGroups(instance_data, pPhysicalDeviceGroupCount, pPhysicalDeviceGroupProperties);
12214 static bool PreCallValidateCreateDescriptorUpdateTemplate(const char *func_name, layer_data *device_data,
12215 const VkDescriptorUpdateTemplateCreateInfoKHR *pCreateInfo,
12216 const VkAllocationCallbacks *pAllocator,
12217 VkDescriptorUpdateTemplateKHR *pDescriptorUpdateTemplate) {
12219 const auto layout = GetDescriptorSetLayout(device_data, pCreateInfo->descriptorSetLayout);
12220 if (VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET == pCreateInfo->templateType && !layout) {
12221 auto ds_uint = HandleToUint64(pCreateInfo->descriptorSetLayout);
12222 skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
12223 ds_uint, VALIDATION_ERROR_052002bc, "%s: Invalid pCreateInfo->descriptorSetLayout (%" PRIx64 ")", func_name,
12225 } else if (VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_PUSH_DESCRIPTORS_KHR == pCreateInfo->templateType) {
12226 auto bind_point = pCreateInfo->pipelineBindPoint;
12227 bool valid_bp = (bind_point == VK_PIPELINE_BIND_POINT_GRAPHICS) || (bind_point == VK_PIPELINE_BIND_POINT_COMPUTE);
12229 skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
12230 VALIDATION_ERROR_052002be, "%s: Invalid pCreateInfo->pipelineBindPoint (%" PRIu32 ").", func_name,
12231 static_cast<uint32_t>(bind_point));
12233 const auto pipeline_layout = getPipelineLayout(device_data, pCreateInfo->pipelineLayout);
12234 if (!pipeline_layout) {
12235 uint64_t pl_uint = HandleToUint64(pCreateInfo->pipelineLayout);
12236 skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
12237 VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_LAYOUT_EXT, pl_uint, VALIDATION_ERROR_052002c0,
12238 "%s: Invalid pCreateInfo->pipelineLayout (%" PRIx64 ")", func_name, pl_uint);
12240 const uint32_t pd_set = pCreateInfo->set;
12241 if ((pd_set >= pipeline_layout->set_layouts.size()) || !pipeline_layout->set_layouts[pd_set] ||
12242 !pipeline_layout->set_layouts[pd_set]->IsPushDescriptor()) {
12243 uint64_t pl_uint = HandleToUint64(pCreateInfo->pipelineLayout);
12244 skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
12245 VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_LAYOUT_EXT, pl_uint, VALIDATION_ERROR_052002c2,
12246 "%s: pCreateInfo->set (%" PRIu32
12247 ") does not refer to the push descriptor set layout for "
12248 "pCreateInfo->pipelineLayout (%" PRIx64 ").",
12249 func_name, pd_set, pl_uint);
12256 static void PostCallRecordCreateDescriptorUpdateTemplate(layer_data *device_data,
12257 const VkDescriptorUpdateTemplateCreateInfoKHR *pCreateInfo,
12258 VkDescriptorUpdateTemplateKHR *pDescriptorUpdateTemplate) {
12259 // Shadow template createInfo for later updates
12260 safe_VkDescriptorUpdateTemplateCreateInfo *local_create_info = new safe_VkDescriptorUpdateTemplateCreateInfo(pCreateInfo);
12261 std::unique_ptr<TEMPLATE_STATE> template_state(new TEMPLATE_STATE(*pDescriptorUpdateTemplate, local_create_info));
12262 device_data->desc_template_map[*pDescriptorUpdateTemplate] = std::move(template_state);
12265 VKAPI_ATTR VkResult VKAPI_CALL CreateDescriptorUpdateTemplate(VkDevice device,
12266 const VkDescriptorUpdateTemplateCreateInfoKHR *pCreateInfo,
12267 const VkAllocationCallbacks *pAllocator,
12268 VkDescriptorUpdateTemplateKHR *pDescriptorUpdateTemplate) {
12269 layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
12270 unique_lock_t lock(global_lock);
12271 bool skip = PreCallValidateCreateDescriptorUpdateTemplate("vkCreateDescriptorUpdateTemplate()", device_data, pCreateInfo,
12272 pAllocator, pDescriptorUpdateTemplate);
12274 VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
12278 device_data->dispatch_table.CreateDescriptorUpdateTemplate(device, pCreateInfo, pAllocator, pDescriptorUpdateTemplate);
12279 if (VK_SUCCESS == result) {
12281 PostCallRecordCreateDescriptorUpdateTemplate(device_data, pCreateInfo, pDescriptorUpdateTemplate);
12287 VKAPI_ATTR VkResult VKAPI_CALL CreateDescriptorUpdateTemplateKHR(VkDevice device,
12288 const VkDescriptorUpdateTemplateCreateInfoKHR *pCreateInfo,
12289 const VkAllocationCallbacks *pAllocator,
12290 VkDescriptorUpdateTemplateKHR *pDescriptorUpdateTemplate) {
12291 layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
12292 unique_lock_t lock(global_lock);
12293 bool skip = PreCallValidateCreateDescriptorUpdateTemplate("vkCreateDescriptorUpdateTemplateKHR()", device_data, pCreateInfo,
12294 pAllocator, pDescriptorUpdateTemplate);
12296 VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
12299 result = device_data->dispatch_table.CreateDescriptorUpdateTemplateKHR(device, pCreateInfo, pAllocator,
12300 pDescriptorUpdateTemplate);
12301 if (VK_SUCCESS == result) {
12303 PostCallRecordCreateDescriptorUpdateTemplate(device_data, pCreateInfo, pDescriptorUpdateTemplate);
12309 static void PreCallRecordDestroyDescriptorUpdateTemplate(layer_data *device_data,
12310 VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate) {
12311 device_data->desc_template_map.erase(descriptorUpdateTemplate);
12314 VKAPI_ATTR void VKAPI_CALL DestroyDescriptorUpdateTemplate(VkDevice device, VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate,
12315 const VkAllocationCallbacks *pAllocator) {
12316 layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
12317 unique_lock_t lock(global_lock);
12318 PreCallRecordDestroyDescriptorUpdateTemplate(device_data, descriptorUpdateTemplate);
12320 device_data->dispatch_table.DestroyDescriptorUpdateTemplate(device, descriptorUpdateTemplate, pAllocator);
12323 VKAPI_ATTR void VKAPI_CALL DestroyDescriptorUpdateTemplateKHR(VkDevice device,
12324 VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate,
12325 const VkAllocationCallbacks *pAllocator) {
12326 layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
12327 unique_lock_t lock(global_lock);
12328 PreCallRecordDestroyDescriptorUpdateTemplate(device_data, descriptorUpdateTemplate);
12330 device_data->dispatch_table.DestroyDescriptorUpdateTemplateKHR(device, descriptorUpdateTemplate, pAllocator);
12333 // PostCallRecord* handles recording state updates following call down chain to UpdateDescriptorSetsWithTemplate()
12334 static void PostCallRecordUpdateDescriptorSetWithTemplate(layer_data *device_data, VkDescriptorSet descriptorSet,
12335 VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate,
12336 const void *pData) {
12337 auto const template_map_entry = device_data->desc_template_map.find(descriptorUpdateTemplate);
12338 if (template_map_entry == device_data->desc_template_map.end()) {
12342 cvdescriptorset::PerformUpdateDescriptorSetsWithTemplateKHR(device_data, descriptorSet, template_map_entry->second, pData);
12345 VKAPI_ATTR void VKAPI_CALL UpdateDescriptorSetWithTemplate(VkDevice device, VkDescriptorSet descriptorSet,
12346 VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate,
12347 const void *pData) {
12348 layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
12349 device_data->dispatch_table.UpdateDescriptorSetWithTemplate(device, descriptorSet, descriptorUpdateTemplate, pData);
12351 PostCallRecordUpdateDescriptorSetWithTemplate(device_data, descriptorSet, descriptorUpdateTemplate, pData);
12354 VKAPI_ATTR void VKAPI_CALL UpdateDescriptorSetWithTemplateKHR(VkDevice device, VkDescriptorSet descriptorSet,
12355 VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate,
12356 const void *pData) {
12357 layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
12358 device_data->dispatch_table.UpdateDescriptorSetWithTemplateKHR(device, descriptorSet, descriptorUpdateTemplate, pData);
12360 PostCallRecordUpdateDescriptorSetWithTemplate(device_data, descriptorSet, descriptorUpdateTemplate, pData);
12363 VKAPI_ATTR void VKAPI_CALL CmdPushDescriptorSetWithTemplateKHR(VkCommandBuffer commandBuffer,
12364 VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate,
12365 VkPipelineLayout layout, uint32_t set, const void *pData) {
12366 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
12367 unique_lock_t lock(global_lock);
12369 GLOBAL_CB_NODE *cb_state = GetCBNode(dev_data, commandBuffer);
12370 // Minimal validation for command buffer state
12372 skip |= ValidateCmd(dev_data, cb_state, CMD_PUSHDESCRIPTORSETWITHTEMPLATEKHR, "vkCmdPushDescriptorSetWithTemplateKHR()");
12377 dev_data->dispatch_table.CmdPushDescriptorSetWithTemplateKHR(commandBuffer, descriptorUpdateTemplate, layout, set, pData);
12381 static void PostCallRecordGetPhysicalDeviceDisplayPlanePropertiesKHR(instance_layer_data *instanceData,
12382 VkPhysicalDevice physicalDevice, uint32_t *pPropertyCount,
12383 VkDisplayPlanePropertiesKHR *pProperties) {
12384 unique_lock_t lock(global_lock);
12385 auto physical_device_state = GetPhysicalDeviceState(instanceData, physicalDevice);
12387 if (*pPropertyCount) {
12388 if (physical_device_state->vkGetPhysicalDeviceDisplayPlanePropertiesKHRState < QUERY_COUNT) {
12389 physical_device_state->vkGetPhysicalDeviceDisplayPlanePropertiesKHRState = QUERY_COUNT;
12391 physical_device_state->display_plane_property_count = *pPropertyCount;
12394 if (physical_device_state->vkGetPhysicalDeviceDisplayPlanePropertiesKHRState < QUERY_DETAILS) {
12395 physical_device_state->vkGetPhysicalDeviceDisplayPlanePropertiesKHRState = QUERY_DETAILS;
12400 VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceDisplayPlanePropertiesKHR(VkPhysicalDevice physicalDevice, uint32_t *pPropertyCount,
12401 VkDisplayPlanePropertiesKHR *pProperties) {
12402 VkResult result = VK_SUCCESS;
12403 instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
12405 result = instance_data->dispatch_table.GetPhysicalDeviceDisplayPlanePropertiesKHR(physicalDevice, pPropertyCount, pProperties);
12407 if (result == VK_SUCCESS || result == VK_INCOMPLETE) {
12408 PostCallRecordGetPhysicalDeviceDisplayPlanePropertiesKHR(instance_data, physicalDevice, pPropertyCount, pProperties);
12414 static bool ValidateGetPhysicalDeviceDisplayPlanePropertiesKHRQuery(instance_layer_data *instance_data,
12415 VkPhysicalDevice physicalDevice, uint32_t planeIndex,
12416 const char *api_name) {
12418 auto physical_device_state = GetPhysicalDeviceState(instance_data, physicalDevice);
12419 if (physical_device_state->vkGetPhysicalDeviceDisplayPlanePropertiesKHRState == UNCALLED) {
12421 instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
12422 HandleToUint64(physicalDevice), SWAPCHAIN_GET_SUPPORTED_DISPLAYS_WITHOUT_QUERY,
12423 "Potential problem with calling %s() without first querying vkGetPhysicalDeviceDisplayPlanePropertiesKHR.", api_name);
12425 if (planeIndex >= physical_device_state->display_plane_property_count) {
12427 instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
12428 HandleToUint64(physicalDevice), VALIDATION_ERROR_29c009c2,
12429 "%s(): planeIndex must be in the range [0, %d] that was returned by vkGetPhysicalDeviceDisplayPlanePropertiesKHR. "
12430 "Do you have the plane index hardcoded?",
12431 api_name, physical_device_state->display_plane_property_count - 1);
12437 static bool PreCallValidateGetDisplayPlaneSupportedDisplaysKHR(instance_layer_data *instance_data, VkPhysicalDevice physicalDevice,
12438 uint32_t planeIndex) {
12440 lock_guard_t lock(global_lock);
12441 skip |= ValidateGetPhysicalDeviceDisplayPlanePropertiesKHRQuery(instance_data, physicalDevice, planeIndex,
12442 "vkGetDisplayPlaneSupportedDisplaysKHR");
12446 VKAPI_ATTR VkResult VKAPI_CALL GetDisplayPlaneSupportedDisplaysKHR(VkPhysicalDevice physicalDevice, uint32_t planeIndex,
12447 uint32_t *pDisplayCount, VkDisplayKHR *pDisplays) {
12448 VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
12449 instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
12450 bool skip = PreCallValidateGetDisplayPlaneSupportedDisplaysKHR(instance_data, physicalDevice, planeIndex);
12453 instance_data->dispatch_table.GetDisplayPlaneSupportedDisplaysKHR(physicalDevice, planeIndex, pDisplayCount, pDisplays);
12458 static bool PreCallValidateGetDisplayPlaneCapabilitiesKHR(instance_layer_data *instance_data, VkPhysicalDevice physicalDevice,
12459 uint32_t planeIndex) {
12461 lock_guard_t lock(global_lock);
12462 skip |= ValidateGetPhysicalDeviceDisplayPlanePropertiesKHRQuery(instance_data, physicalDevice, planeIndex,
12463 "vkGetDisplayPlaneCapabilitiesKHR");
12467 VKAPI_ATTR VkResult VKAPI_CALL GetDisplayPlaneCapabilitiesKHR(VkPhysicalDevice physicalDevice, VkDisplayModeKHR mode,
12468 uint32_t planeIndex, VkDisplayPlaneCapabilitiesKHR *pCapabilities) {
12469 VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
12470 instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
12471 bool skip = PreCallValidateGetDisplayPlaneCapabilitiesKHR(instance_data, physicalDevice, planeIndex);
12474 result = instance_data->dispatch_table.GetDisplayPlaneCapabilitiesKHR(physicalDevice, mode, planeIndex, pCapabilities);
12480 VKAPI_ATTR VkResult VKAPI_CALL DebugMarkerSetObjectNameEXT(VkDevice device, const VkDebugMarkerObjectNameInfoEXT *pNameInfo) {
12481 unique_lock_t lock(global_lock);
12482 layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
12483 if (pNameInfo->pObjectName) {
12484 device_data->report_data->debugObjectNameMap->insert(
12485 std::make_pair<uint64_t, std::string>((uint64_t &&) pNameInfo->object, pNameInfo->pObjectName));
12487 device_data->report_data->debugObjectNameMap->erase(pNameInfo->object);
12490 VkResult result = device_data->dispatch_table.DebugMarkerSetObjectNameEXT(device, pNameInfo);
12494 VKAPI_ATTR VkResult VKAPI_CALL DebugMarkerSetObjectTagEXT(VkDevice device, VkDebugMarkerObjectTagInfoEXT *pTagInfo) {
12495 layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
12496 VkResult result = device_data->dispatch_table.DebugMarkerSetObjectTagEXT(device, pTagInfo);
12500 VKAPI_ATTR void VKAPI_CALL CmdDebugMarkerBeginEXT(VkCommandBuffer commandBuffer, VkDebugMarkerMarkerInfoEXT *pMarkerInfo) {
12501 layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
12502 unique_lock_t lock(global_lock);
12504 GLOBAL_CB_NODE *cb_state = GetCBNode(device_data, commandBuffer);
12505 // Minimal validation for command buffer state
12507 skip |= ValidateCmd(device_data, cb_state, CMD_DEBUGMARKERBEGINEXT, "vkCmdDebugMarkerBeginEXT()");
12511 device_data->dispatch_table.CmdDebugMarkerBeginEXT(commandBuffer, pMarkerInfo);
12515 VKAPI_ATTR void VKAPI_CALL CmdDebugMarkerEndEXT(VkCommandBuffer commandBuffer) {
12516 layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
12517 unique_lock_t lock(global_lock);
12519 GLOBAL_CB_NODE *cb_state = GetCBNode(device_data, commandBuffer);
12520 // Minimal validation for command buffer state
12522 skip |= ValidateCmd(device_data, cb_state, CMD_DEBUGMARKERENDEXT, "vkCmdDebugMarkerEndEXT()");
12526 device_data->dispatch_table.CmdDebugMarkerEndEXT(commandBuffer);
12530 VKAPI_ATTR void VKAPI_CALL CmdDebugMarkerInsertEXT(VkCommandBuffer commandBuffer, VkDebugMarkerMarkerInfoEXT *pMarkerInfo) {
12531 layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
12532 device_data->dispatch_table.CmdDebugMarkerInsertEXT(commandBuffer, pMarkerInfo);
12535 VKAPI_ATTR void VKAPI_CALL CmdSetDiscardRectangleEXT(VkCommandBuffer commandBuffer, uint32_t firstDiscardRectangle,
12536 uint32_t discardRectangleCount, const VkRect2D *pDiscardRectangles) {
12537 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
12538 unique_lock_t lock(global_lock);
12540 GLOBAL_CB_NODE *cb_state = GetCBNode(dev_data, commandBuffer);
12541 // Minimal validation for command buffer state
12543 skip |= ValidateCmd(dev_data, cb_state, CMD_SETDISCARDRECTANGLEEXT, "vkCmdSetDiscardRectangleEXT()");
12548 dev_data->dispatch_table.CmdSetDiscardRectangleEXT(commandBuffer, firstDiscardRectangle, discardRectangleCount,
12549 pDiscardRectangles);
12553 VKAPI_ATTR void VKAPI_CALL CmdSetSampleLocationsEXT(VkCommandBuffer commandBuffer,
12554 const VkSampleLocationsInfoEXT *pSampleLocationsInfo) {
12555 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
12556 unique_lock_t lock(global_lock);
12558 GLOBAL_CB_NODE *cb_state = GetCBNode(dev_data, commandBuffer);
12559 // Minimal validation for command buffer state
12561 skip |= ValidateCmd(dev_data, cb_state, CMD_SETSAMPLELOCATIONSEXT, "vkCmdSetSampleLocationsEXT()");
12566 dev_data->dispatch_table.CmdSetSampleLocationsEXT(commandBuffer, pSampleLocationsInfo);
12570 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetDeviceProcAddr(VkDevice device, const char *funcName);
12571 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetPhysicalDeviceProcAddr(VkInstance instance, const char *funcName);
12572 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetInstanceProcAddr(VkInstance instance, const char *funcName);
12574 // Map of all APIs to be intercepted by this layer
12575 static const std::unordered_map<std::string, void *> name_to_funcptr_map = {
12576 {"vkGetInstanceProcAddr", (void *)GetInstanceProcAddr},
12577 {"vk_layerGetPhysicalDeviceProcAddr", (void *)GetPhysicalDeviceProcAddr},
12578 {"vkGetDeviceProcAddr", (void *)GetDeviceProcAddr},
12579 {"vkCreateInstance", (void *)CreateInstance},
12580 {"vkCreateDevice", (void *)CreateDevice},
12581 {"vkEnumeratePhysicalDevices", (void *)EnumeratePhysicalDevices},
12582 {"vkGetPhysicalDeviceQueueFamilyProperties", (void *)GetPhysicalDeviceQueueFamilyProperties},
12583 {"vkDestroyInstance", (void *)DestroyInstance},
12584 {"vkEnumerateInstanceLayerProperties", (void *)EnumerateInstanceLayerProperties},
12585 {"vkEnumerateDeviceLayerProperties", (void *)EnumerateDeviceLayerProperties},
12586 {"vkEnumerateInstanceExtensionProperties", (void *)EnumerateInstanceExtensionProperties},
12587 {"vkEnumerateDeviceExtensionProperties", (void *)EnumerateDeviceExtensionProperties},
12588 {"vkCreateDescriptorUpdateTemplate", (void *)CreateDescriptorUpdateTemplate},
12589 {"vkCreateDescriptorUpdateTemplateKHR", (void *)CreateDescriptorUpdateTemplateKHR},
12590 {"vkDestroyDescriptorUpdateTemplate", (void *)DestroyDescriptorUpdateTemplate},
12591 {"vkDestroyDescriptorUpdateTemplateKHR", (void *)DestroyDescriptorUpdateTemplateKHR},
12592 {"vkUpdateDescriptorSetWithTemplate", (void *)UpdateDescriptorSetWithTemplate},
12593 {"vkUpdateDescriptorSetWithTemplateKHR", (void *)UpdateDescriptorSetWithTemplateKHR},
12594 {"vkCmdPushDescriptorSetWithTemplateKHR", (void *)CmdPushDescriptorSetWithTemplateKHR},
12595 {"vkCmdPushDescriptorSetKHR", (void *)CmdPushDescriptorSetKHR},
12596 {"vkCreateSwapchainKHR", (void *)CreateSwapchainKHR},
12597 {"vkDestroySwapchainKHR", (void *)DestroySwapchainKHR},
12598 {"vkGetSwapchainImagesKHR", (void *)GetSwapchainImagesKHR},
12599 {"vkAcquireNextImageKHR", (void *)AcquireNextImageKHR},
12600 {"vkQueuePresentKHR", (void *)QueuePresentKHR},
12601 {"vkQueueSubmit", (void *)QueueSubmit},
12602 {"vkWaitForFences", (void *)WaitForFences},
12603 {"vkGetFenceStatus", (void *)GetFenceStatus},
12604 {"vkQueueWaitIdle", (void *)QueueWaitIdle},
12605 {"vkDeviceWaitIdle", (void *)DeviceWaitIdle},
12606 {"vkGetDeviceQueue", (void *)GetDeviceQueue},
12607 {"vkGetDeviceQueue2", (void *)GetDeviceQueue2},
12608 {"vkDestroyDevice", (void *)DestroyDevice},
12609 {"vkDestroyFence", (void *)DestroyFence},
12610 {"vkResetFences", (void *)ResetFences},
12611 {"vkDestroySemaphore", (void *)DestroySemaphore},
12612 {"vkDestroyEvent", (void *)DestroyEvent},
12613 {"vkDestroyQueryPool", (void *)DestroyQueryPool},
12614 {"vkDestroyBuffer", (void *)DestroyBuffer},
12615 {"vkDestroyBufferView", (void *)DestroyBufferView},
12616 {"vkDestroyImage", (void *)DestroyImage},
12617 {"vkDestroyImageView", (void *)DestroyImageView},
12618 {"vkDestroyShaderModule", (void *)DestroyShaderModule},
12619 {"vkDestroyPipeline", (void *)DestroyPipeline},
12620 {"vkDestroyPipelineLayout", (void *)DestroyPipelineLayout},
12621 {"vkDestroySampler", (void *)DestroySampler},
12622 {"vkDestroyDescriptorSetLayout", (void *)DestroyDescriptorSetLayout},
12623 {"vkDestroyDescriptorPool", (void *)DestroyDescriptorPool},
12624 {"vkDestroyFramebuffer", (void *)DestroyFramebuffer},
12625 {"vkDestroyRenderPass", (void *)DestroyRenderPass},
12626 {"vkCreateBuffer", (void *)CreateBuffer},
12627 {"vkCreateBufferView", (void *)CreateBufferView},
12628 {"vkCreateImage", (void *)CreateImage},
12629 {"vkCreateImageView", (void *)CreateImageView},
12630 {"vkCreateFence", (void *)CreateFence},
12631 {"vkCreatePipelineCache", (void *)CreatePipelineCache},
12632 {"vkDestroyPipelineCache", (void *)DestroyPipelineCache},
12633 {"vkGetPipelineCacheData", (void *)GetPipelineCacheData},
12634 {"vkMergePipelineCaches", (void *)MergePipelineCaches},
12635 {"vkCreateGraphicsPipelines", (void *)CreateGraphicsPipelines},
12636 {"vkCreateComputePipelines", (void *)CreateComputePipelines},
12637 {"vkCreateSampler", (void *)CreateSampler},
12638 {"vkCreateDescriptorSetLayout", (void *)CreateDescriptorSetLayout},
12639 {"vkCreatePipelineLayout", (void *)CreatePipelineLayout},
12640 {"vkCreateDescriptorPool", (void *)CreateDescriptorPool},
12641 {"vkResetDescriptorPool", (void *)ResetDescriptorPool},
12642 {"vkAllocateDescriptorSets", (void *)AllocateDescriptorSets},
12643 {"vkFreeDescriptorSets", (void *)FreeDescriptorSets},
12644 {"vkUpdateDescriptorSets", (void *)UpdateDescriptorSets},
12645 {"vkCreateCommandPool", (void *)CreateCommandPool},
12646 {"vkDestroyCommandPool", (void *)DestroyCommandPool},
12647 {"vkResetCommandPool", (void *)ResetCommandPool},
12648 {"vkCreateQueryPool", (void *)CreateQueryPool},
12649 {"vkAllocateCommandBuffers", (void *)AllocateCommandBuffers},
12650 {"vkFreeCommandBuffers", (void *)FreeCommandBuffers},
12651 {"vkBeginCommandBuffer", (void *)BeginCommandBuffer},
12652 {"vkEndCommandBuffer", (void *)EndCommandBuffer},
12653 {"vkResetCommandBuffer", (void *)ResetCommandBuffer},
12654 {"vkCmdBindPipeline", (void *)CmdBindPipeline},
12655 {"vkCmdSetViewport", (void *)CmdSetViewport},
12656 {"vkCmdSetScissor", (void *)CmdSetScissor},
12657 {"vkCmdSetLineWidth", (void *)CmdSetLineWidth},
12658 {"vkCmdSetDepthBias", (void *)CmdSetDepthBias},
12659 {"vkCmdSetBlendConstants", (void *)CmdSetBlendConstants},
12660 {"vkCmdSetDepthBounds", (void *)CmdSetDepthBounds},
12661 {"vkCmdSetStencilCompareMask", (void *)CmdSetStencilCompareMask},
12662 {"vkCmdSetStencilWriteMask", (void *)CmdSetStencilWriteMask},
12663 {"vkCmdSetStencilReference", (void *)CmdSetStencilReference},
12664 {"vkCmdBindDescriptorSets", (void *)CmdBindDescriptorSets},
12665 {"vkCmdBindVertexBuffers", (void *)CmdBindVertexBuffers},
12666 {"vkCmdBindIndexBuffer", (void *)CmdBindIndexBuffer},
12667 {"vkCmdDraw", (void *)CmdDraw},
12668 {"vkCmdDrawIndexed", (void *)CmdDrawIndexed},
12669 {"vkCmdDrawIndirect", (void *)CmdDrawIndirect},
12670 {"vkCmdDrawIndexedIndirect", (void *)CmdDrawIndexedIndirect},
12671 {"vkCmdDispatch", (void *)CmdDispatch},
12672 {"vkCmdDispatchIndirect", (void *)CmdDispatchIndirect},
12673 {"vkCmdCopyBuffer", (void *)CmdCopyBuffer},
12674 {"vkCmdCopyImage", (void *)CmdCopyImage},
12675 {"vkCmdBlitImage", (void *)CmdBlitImage},
12676 {"vkCmdCopyBufferToImage", (void *)CmdCopyBufferToImage},
12677 {"vkCmdCopyImageToBuffer", (void *)CmdCopyImageToBuffer},
12678 {"vkCmdUpdateBuffer", (void *)CmdUpdateBuffer},
12679 {"vkCmdFillBuffer", (void *)CmdFillBuffer},
12680 {"vkCmdClearColorImage", (void *)CmdClearColorImage},
12681 {"vkCmdClearDepthStencilImage", (void *)CmdClearDepthStencilImage},
12682 {"vkCmdClearAttachments", (void *)CmdClearAttachments},
12683 {"vkCmdResolveImage", (void *)CmdResolveImage},
12684 {"vkGetImageSubresourceLayout", (void *)GetImageSubresourceLayout},
12685 {"vkCmdSetEvent", (void *)CmdSetEvent},
12686 {"vkCmdResetEvent", (void *)CmdResetEvent},
12687 {"vkCmdWaitEvents", (void *)CmdWaitEvents},
12688 {"vkCmdPipelineBarrier", (void *)CmdPipelineBarrier},
12689 {"vkCmdBeginQuery", (void *)CmdBeginQuery},
12690 {"vkCmdEndQuery", (void *)CmdEndQuery},
12691 {"vkCmdResetQueryPool", (void *)CmdResetQueryPool},
12692 {"vkCmdCopyQueryPoolResults", (void *)CmdCopyQueryPoolResults},
12693 {"vkCmdPushConstants", (void *)CmdPushConstants},
12694 {"vkCmdWriteTimestamp", (void *)CmdWriteTimestamp},
12695 {"vkCreateFramebuffer", (void *)CreateFramebuffer},
12696 {"vkCreateShaderModule", (void *)CreateShaderModule},
12697 {"vkCreateRenderPass", (void *)CreateRenderPass},
12698 {"vkCmdBeginRenderPass", (void *)CmdBeginRenderPass},
12699 {"vkCmdNextSubpass", (void *)CmdNextSubpass},
12700 {"vkCmdEndRenderPass", (void *)CmdEndRenderPass},
12701 {"vkCmdExecuteCommands", (void *)CmdExecuteCommands},
12702 {"vkCmdDebugMarkerBeginEXT", (void *)CmdDebugMarkerBeginEXT},
12703 {"vkCmdDebugMarkerEndEXT", (void *)CmdDebugMarkerEndEXT},
12704 {"vkCmdDebugMarkerInsertEXT", (void *)CmdDebugMarkerInsertEXT},
12705 {"vkDebugMarkerSetObjectNameEXT", (void *)DebugMarkerSetObjectNameEXT},
12706 {"vkDebugMarkerSetObjectTagEXT", (void *)DebugMarkerSetObjectTagEXT},
12707 {"vkSetEvent", (void *)SetEvent},
12708 {"vkMapMemory", (void *)MapMemory},
12709 {"vkUnmapMemory", (void *)UnmapMemory},
12710 {"vkFlushMappedMemoryRanges", (void *)FlushMappedMemoryRanges},
12711 {"vkInvalidateMappedMemoryRanges", (void *)InvalidateMappedMemoryRanges},
12712 {"vkAllocateMemory", (void *)AllocateMemory},
12713 {"vkFreeMemory", (void *)FreeMemory},
12714 {"vkBindBufferMemory", (void *)BindBufferMemory},
12715 {"vkBindBufferMemory2", (void *)BindBufferMemory2},
12716 {"vkBindBufferMemory2KHR", (void *)BindBufferMemory2KHR},
12717 {"vkGetBufferMemoryRequirements", (void *)GetBufferMemoryRequirements},
12718 {"vkGetBufferMemoryRequirements2", (void *)GetBufferMemoryRequirements2},
12719 {"vkGetBufferMemoryRequirements2KHR", (void *)GetBufferMemoryRequirements2KHR},
12720 {"vkGetImageMemoryRequirements", (void *)GetImageMemoryRequirements},
12721 {"vkGetImageMemoryRequirements2", (void *)GetImageMemoryRequirements2},
12722 {"vkGetImageMemoryRequirements2KHR", (void *)GetImageMemoryRequirements2KHR},
12723 {"vkGetImageSparseMemoryRequirements", (void *)GetImageSparseMemoryRequirements},
12724 {"vkGetImageSparseMemoryRequirements2", (void *)GetImageSparseMemoryRequirements2},
12725 {"vkGetImageSparseMemoryRequirements2KHR", (void *)GetImageSparseMemoryRequirements2KHR},
12726 {"vkGetPhysicalDeviceSparseImageFormatProperties", (void *)GetPhysicalDeviceSparseImageFormatProperties},
12727 {"vkGetPhysicalDeviceSparseImageFormatProperties2", (void *)GetPhysicalDeviceSparseImageFormatProperties2},
12728 {"vkGetPhysicalDeviceSparseImageFormatProperties2KHR", (void *)GetPhysicalDeviceSparseImageFormatProperties2KHR},
12729 {"vkGetQueryPoolResults", (void *)GetQueryPoolResults},
12730 {"vkBindImageMemory", (void *)BindImageMemory},
12731 {"vkBindImageMemory2", (void *)BindImageMemory2},
12732 {"vkBindImageMemory2KHR", (void *)BindImageMemory2KHR},
12733 {"vkQueueBindSparse", (void *)QueueBindSparse},
12734 {"vkCreateSemaphore", (void *)CreateSemaphore},
12735 {"vkCreateEvent", (void *)CreateEvent},
12736 #ifdef VK_USE_PLATFORM_ANDROID_KHR
12737 {"vkCreateAndroidSurfaceKHR", (void *)CreateAndroidSurfaceKHR},
12739 #ifdef VK_USE_PLATFORM_MIR_KHR
12740 {"vkCreateMirSurfaceKHR", (void *)CreateMirSurfaceKHR},
12741 {"vkGetPhysicalDeviceMirPresentationSupportKHR", (void *)GetPhysicalDeviceMirPresentationSupportKHR},
12743 #ifdef VK_USE_PLATFORM_WAYLAND_KHR
12744 {"vkCreateWaylandSurfaceKHR", (void *)CreateWaylandSurfaceKHR},
12745 {"vkGetPhysicalDeviceWaylandPresentationSupportKHR", (void *)GetPhysicalDeviceWaylandPresentationSupportKHR},
12747 #ifdef VK_USE_PLATFORM_WIN32_KHR
12748 {"vkCreateWin32SurfaceKHR", (void *)CreateWin32SurfaceKHR},
12749 {"vkGetPhysicalDeviceWin32PresentationSupportKHR", (void *)GetPhysicalDeviceWin32PresentationSupportKHR},
12750 {"vkImportSemaphoreWin32HandleKHR", (void *)ImportSemaphoreWin32HandleKHR},
12751 {"vkGetSemaphoreWin32HandleKHR", (void *)GetSemaphoreWin32HandleKHR},
12752 {"vkImportFenceWin32HandleKHR", (void *)ImportFenceWin32HandleKHR},
12753 {"vkGetFenceWin32HandleKHR", (void *)GetFenceWin32HandleKHR},
12755 #ifdef VK_USE_PLATFORM_XCB_KHR
12756 {"vkCreateXcbSurfaceKHR", (void *)CreateXcbSurfaceKHR},
12757 {"vkGetPhysicalDeviceXcbPresentationSupportKHR", (void *)GetPhysicalDeviceXcbPresentationSupportKHR},
12759 #ifdef VK_USE_PLATFORM_XLIB_KHR
12760 {"vkCreateXlibSurfaceKHR", (void *)CreateXlibSurfaceKHR},
12761 {"vkGetPhysicalDeviceXlibPresentationSupportKHR", (void *)GetPhysicalDeviceXlibPresentationSupportKHR},
12763 #ifdef VK_USE_PLATFORM_IOS_MVK
12764 {"vkCreateIOSSurfaceMVK", (void *)CreateIOSSurfaceMVK},
12766 #ifdef VK_USE_PLATFORM_MACOS_MVK
12767 {"vkCreateMacOSSurfaceMVK", (void *)CreateMacOSSurfaceMVK},
12769 {"vkCreateDisplayPlaneSurfaceKHR", (void *)CreateDisplayPlaneSurfaceKHR},
12770 {"vkDestroySurfaceKHR", (void *)DestroySurfaceKHR},
12771 {"vkGetPhysicalDeviceSurfaceCapabilitiesKHR", (void *)GetPhysicalDeviceSurfaceCapabilitiesKHR},
12772 {"vkGetPhysicalDeviceSurfaceCapabilities2KHR", (void *)GetPhysicalDeviceSurfaceCapabilities2KHR},
12773 {"vkGetPhysicalDeviceSurfaceCapabilities2EXT", (void *)GetPhysicalDeviceSurfaceCapabilities2EXT},
12774 {"vkGetPhysicalDeviceSurfaceSupportKHR", (void *)GetPhysicalDeviceSurfaceSupportKHR},
12775 {"vkGetPhysicalDeviceSurfacePresentModesKHR", (void *)GetPhysicalDeviceSurfacePresentModesKHR},
12776 {"vkGetPhysicalDeviceSurfaceFormatsKHR", (void *)GetPhysicalDeviceSurfaceFormatsKHR},
12777 {"vkGetPhysicalDeviceSurfaceFormats2KHR", (void *)GetPhysicalDeviceSurfaceFormats2KHR},
12778 {"vkGetPhysicalDeviceQueueFamilyProperties2", (void *)GetPhysicalDeviceQueueFamilyProperties2},
12779 {"vkGetPhysicalDeviceQueueFamilyProperties2KHR", (void *)GetPhysicalDeviceQueueFamilyProperties2KHR},
12780 {"vkEnumeratePhysicalDeviceGroups", (void *)EnumeratePhysicalDeviceGroups},
12781 {"vkEnumeratePhysicalDeviceGroupsKHR", (void *)EnumeratePhysicalDeviceGroupsKHR},
12782 {"vkCreateDebugReportCallbackEXT", (void *)CreateDebugReportCallbackEXT},
12783 {"vkDestroyDebugReportCallbackEXT", (void *)DestroyDebugReportCallbackEXT},
12784 {"vkDebugReportMessageEXT", (void *)DebugReportMessageEXT},
12785 {"vkGetPhysicalDeviceDisplayPlanePropertiesKHR", (void *)GetPhysicalDeviceDisplayPlanePropertiesKHR},
12786 {"vkGetDisplayPlaneSupportedDisplaysKHR", (void *)GetDisplayPlaneSupportedDisplaysKHR},
12787 {"vkGetDisplayPlaneCapabilitiesKHR", (void *)GetDisplayPlaneCapabilitiesKHR},
12788 {"vkImportSemaphoreFdKHR", (void *)ImportSemaphoreFdKHR},
12789 {"vkGetSemaphoreFdKHR", (void *)GetSemaphoreFdKHR},
12790 {"vkImportFenceFdKHR", (void *)ImportFenceFdKHR},
12791 {"vkGetFenceFdKHR", (void *)GetFenceFdKHR},
12792 {"vkCreateValidationCacheEXT", (void *)CreateValidationCacheEXT},
12793 {"vkDestroyValidationCacheEXT", (void *)DestroyValidationCacheEXT},
12794 {"vkGetValidationCacheDataEXT", (void *)GetValidationCacheDataEXT},
12795 {"vkMergeValidationCachesEXT", (void *)MergeValidationCachesEXT},
12796 {"vkCmdSetDiscardRectangleEXT", (void *)CmdSetDiscardRectangleEXT},
12797 {"vkCmdSetSampleLocationsEXT", (void *)CmdSetSampleLocationsEXT},
12798 {"vkSetDebugUtilsObjectNameEXT", (void *)SetDebugUtilsObjectNameEXT},
12799 {"vkSetDebugUtilsObjectTagEXT", (void *)SetDebugUtilsObjectTagEXT},
12800 {"vkQueueBeginDebugUtilsLabelEXT", (void *)QueueBeginDebugUtilsLabelEXT},
12801 {"vkQueueEndDebugUtilsLabelEXT", (void *)QueueEndDebugUtilsLabelEXT},
12802 {"vkQueueInsertDebugUtilsLabelEXT", (void *)QueueInsertDebugUtilsLabelEXT},
12803 {"vkCmdBeginDebugUtilsLabelEXT", (void *)CmdBeginDebugUtilsLabelEXT},
12804 {"vkCmdEndDebugUtilsLabelEXT", (void *)CmdEndDebugUtilsLabelEXT},
12805 {"vkCmdInsertDebugUtilsLabelEXT", (void *)CmdInsertDebugUtilsLabelEXT},
12806 {"vkCreateDebugUtilsMessengerEXT", (void *)CreateDebugUtilsMessengerEXT},
12807 {"vkDestroyDebugUtilsMessengerEXT", (void *)DestroyDebugUtilsMessengerEXT},
12808 {"vkSubmitDebugUtilsMessageEXT", (void *)SubmitDebugUtilsMessageEXT},
12811 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetDeviceProcAddr(VkDevice device, const char *funcName) {
12813 layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
12815 // Is API to be intercepted by this layer?
12816 const auto &item = name_to_funcptr_map.find(funcName);
12817 if (item != name_to_funcptr_map.end()) {
12818 return reinterpret_cast<PFN_vkVoidFunction>(item->second);
12821 auto &table = device_data->dispatch_table;
12822 if (!table.GetDeviceProcAddr) return nullptr;
12823 return table.GetDeviceProcAddr(device, funcName);
12826 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetInstanceProcAddr(VkInstance instance, const char *funcName) {
12827 instance_layer_data *instance_data;
12828 // Is API to be intercepted by this layer?
12829 const auto &item = name_to_funcptr_map.find(funcName);
12830 if (item != name_to_funcptr_map.end()) {
12831 return reinterpret_cast<PFN_vkVoidFunction>(item->second);
12834 instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
12835 auto &table = instance_data->dispatch_table;
12836 if (!table.GetInstanceProcAddr) return nullptr;
12837 return table.GetInstanceProcAddr(instance, funcName);
12840 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetPhysicalDeviceProcAddr(VkInstance instance, const char *funcName) {
12842 instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
12844 auto &table = instance_data->dispatch_table;
12845 if (!table.GetPhysicalDeviceProcAddr) return nullptr;
12846 return table.GetPhysicalDeviceProcAddr(instance, funcName);
12849 } // namespace core_validation
12851 // loader-layer interface v0, just wrappers since there is only a layer
12853 VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateInstanceExtensionProperties(const char *pLayerName, uint32_t *pCount,
12854 VkExtensionProperties *pProperties) {
12855 return core_validation::EnumerateInstanceExtensionProperties(pLayerName, pCount, pProperties);
12858 VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateInstanceLayerProperties(uint32_t *pCount,
12859 VkLayerProperties *pProperties) {
12860 return core_validation::EnumerateInstanceLayerProperties(pCount, pProperties);
12863 VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice, uint32_t *pCount,
12864 VkLayerProperties *pProperties) {
12865 // the layer command handles VK_NULL_HANDLE just fine internally
12866 assert(physicalDevice == VK_NULL_HANDLE);
12867 return core_validation::EnumerateDeviceLayerProperties(VK_NULL_HANDLE, pCount, pProperties);
12870 VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice,
12871 const char *pLayerName, uint32_t *pCount,
12872 VkExtensionProperties *pProperties) {
12873 // the layer command handles VK_NULL_HANDLE just fine internally
12874 assert(physicalDevice == VK_NULL_HANDLE);
12875 return core_validation::EnumerateDeviceExtensionProperties(VK_NULL_HANDLE, pLayerName, pCount, pProperties);
12878 VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetDeviceProcAddr(VkDevice dev, const char *funcName) {
12879 return core_validation::GetDeviceProcAddr(dev, funcName);
12882 VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetInstanceProcAddr(VkInstance instance, const char *funcName) {
12883 return core_validation::GetInstanceProcAddr(instance, funcName);
12886 VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vk_layerGetPhysicalDeviceProcAddr(VkInstance instance,
12887 const char *funcName) {
12888 return core_validation::GetPhysicalDeviceProcAddr(instance, funcName);
12891 VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkNegotiateLoaderLayerInterfaceVersion(VkNegotiateLayerInterface *pVersionStruct) {
12892 assert(pVersionStruct != NULL);
12893 assert(pVersionStruct->sType == LAYER_NEGOTIATE_INTERFACE_STRUCT);
12895 // Fill in the function pointers if our version is at least capable of having the structure contain them.
12896 if (pVersionStruct->loaderLayerInterfaceVersion >= 2) {
12897 pVersionStruct->pfnGetInstanceProcAddr = vkGetInstanceProcAddr;
12898 pVersionStruct->pfnGetDeviceProcAddr = vkGetDeviceProcAddr;
12899 pVersionStruct->pfnGetPhysicalDeviceProcAddr = vk_layerGetPhysicalDeviceProcAddr;
12902 if (pVersionStruct->loaderLayerInterfaceVersion < CURRENT_LOADER_LAYER_INTERFACE_VERSION) {
12903 core_validation::loader_layer_if_version = pVersionStruct->loaderLayerInterfaceVersion;
12904 } else if (pVersionStruct->loaderLayerInterfaceVersion > CURRENT_LOADER_LAYER_INTERFACE_VERSION) {
12905 pVersionStruct->loaderLayerInterfaceVersion = CURRENT_LOADER_LAYER_INTERFACE_VERSION;