1 /* Copyright (c) 2015-2017 The Khronos Group Inc.
2 * Copyright (c) 2015-2017 Valve Corporation
3 * Copyright (c) 2015-2017 LunarG, Inc.
4 * Copyright (C) 2015-2017 Google Inc.
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
10 * http://www.apache.org/licenses/LICENSE-2.0
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
18 * Author: Cody Northrop <cnorthrop@google.com>
19 * Author: Michael Lentine <mlentine@google.com>
20 * Author: Tobin Ehlis <tobine@google.com>
21 * Author: Chia-I Wu <olv@google.com>
22 * Author: Chris Forbes <chrisf@ijw.co.nz>
23 * Author: Mark Lobodzinski <mark@lunarg.com>
24 * Author: Ian Elliott <ianelliott@google.com>
25 * Author: Dave Houlton <daveh@lunarg.com>
26 * Author: Dustin Graves <dustin@lunarg.com>
27 * Author: Jeremy Hayes <jeremy@lunarg.com>
28 * Author: Jon Ashburn <jon@lunarg.com>
29 * Author: Karl Schultz <karl@lunarg.com>
30 * Author: Mark Young <marky@lunarg.com>
31 * Author: Mike Schuchardt <mikes@lunarg.com>
32 * Author: Mike Weiblen <mikew@lunarg.com>
33 * Author: Tony Barbour <tony@LunarG.com>
36 // Allow use of STL min and max functions in Windows
55 #include "vk_loader_platform.h"
56 #include "vk_dispatch_table_helper.h"
57 #include "vk_enum_string_helper.h"
59 #pragma GCC diagnostic ignored "-Wwrite-strings"
62 #pragma GCC diagnostic warning "-Wwrite-strings"
64 #include "core_validation.h"
65 #include "buffer_validation.h"
66 #include "shader_validation.h"
67 #include "vk_layer_table.h"
68 #include "vk_layer_data.h"
69 #include "vk_layer_extension_utils.h"
70 #include "vk_layer_utils.h"
71 #include "vk_typemap_helper.h"
73 #if defined __ANDROID__
74 #include <android/log.h>
75 #define LOGCONSOLE(...) ((void)__android_log_print(ANDROID_LOG_INFO, "CORE_VALIDATION", __VA_ARGS__))
77 #define LOGCONSOLE(...) \
79 printf(__VA_ARGS__); \
84 // This intentionally includes a cpp file
85 #include "vk_safe_struct.cpp"
87 using mutex_t = std::mutex;
88 using lock_guard_t = std::lock_guard<mutex_t>;
89 using unique_lock_t = std::unique_lock<mutex_t>;
91 // These functions are defined *outside* the core_validation namespace as their type
92 // is also defined outside that namespace
93 size_t PipelineLayoutCompatDef::hash() const {
94 hash_util::HashCombiner hc;
95 // The set number is integral to the CompatDef's distinctiveness
96 hc << set << push_constant_ranges.get();
97 const auto &descriptor_set_layouts = *set_layouts_id.get();
98 for (uint32_t i = 0; i <= set; i++) {
99 hc << descriptor_set_layouts[i].get();
104 bool PipelineLayoutCompatDef::operator==(const PipelineLayoutCompatDef &other) const {
105 if ((set != other.set) || (push_constant_ranges != other.push_constant_ranges)) {
109 if (set_layouts_id == other.set_layouts_id) {
110 // if it's the same set_layouts_id, then *any* subset will match
114 // They aren't exactly the same PipelineLayoutSetLayouts, so we need to check if the required subsets match
115 const auto &descriptor_set_layouts = *set_layouts_id.get();
116 assert(set < descriptor_set_layouts.size());
117 const auto &other_ds_layouts = *other.set_layouts_id.get();
118 assert(set < other_ds_layouts.size());
119 for (uint32_t i = 0; i <= set; i++) {
120 if (descriptor_set_layouts[i] != other_ds_layouts[i]) {
127 namespace core_validation {
131 using std::stringstream;
132 using std::unique_ptr;
133 using std::unordered_map;
134 using std::unordered_set;
137 // WSI Image Objects bypass usual Image Object creation methods. A special Memory
138 // Object value will be used to identify them internally.
139 static const VkDeviceMemory MEMTRACKER_SWAP_CHAIN_IMAGE_KEY = (VkDeviceMemory)(-1);
140 // 2nd special memory handle used to flag object as unbound from memory
141 static const VkDeviceMemory MEMORY_UNBOUND = VkDeviceMemory(~((uint64_t)(0)) - 1);
143 struct instance_layer_data {
144 VkInstance instance = VK_NULL_HANDLE;
145 debug_report_data *report_data = nullptr;
146 vector<VkDebugReportCallbackEXT> logging_callback;
147 vector<VkDebugUtilsMessengerEXT> logging_messenger;
148 VkLayerInstanceDispatchTable dispatch_table;
150 CALL_STATE vkEnumeratePhysicalDevicesState = UNCALLED;
151 uint32_t physical_devices_count = 0;
152 CALL_STATE vkEnumeratePhysicalDeviceGroupsState = UNCALLED;
153 uint32_t physical_device_groups_count = 0;
154 CHECK_DISABLED disabled = {};
156 unordered_map<VkPhysicalDevice, PHYSICAL_DEVICE_STATE> physical_device_map;
157 unordered_map<VkSurfaceKHR, SURFACE_STATE> surface_map;
159 InstanceExtensions extensions;
160 uint32_t api_version;
164 debug_report_data *report_data = nullptr;
165 VkLayerDispatchTable dispatch_table;
167 DeviceExtensions extensions = {};
168 unordered_set<VkQueue> queues; // All queues under given device
169 // Layer specific data
170 unordered_map<VkSampler, unique_ptr<SAMPLER_STATE>> samplerMap;
171 unordered_map<VkImageView, unique_ptr<IMAGE_VIEW_STATE>> imageViewMap;
172 unordered_map<VkImage, unique_ptr<IMAGE_STATE>> imageMap;
173 unordered_map<VkBufferView, unique_ptr<BUFFER_VIEW_STATE>> bufferViewMap;
174 unordered_map<VkBuffer, unique_ptr<BUFFER_STATE>> bufferMap;
175 unordered_map<VkPipeline, unique_ptr<PIPELINE_STATE>> pipelineMap;
176 unordered_map<VkCommandPool, COMMAND_POOL_NODE> commandPoolMap;
177 unordered_map<VkDescriptorPool, DESCRIPTOR_POOL_STATE *> descriptorPoolMap;
178 unordered_map<VkDescriptorSet, cvdescriptorset::DescriptorSet *> setMap;
179 unordered_map<VkDescriptorSetLayout, std::shared_ptr<cvdescriptorset::DescriptorSetLayout>> descriptorSetLayoutMap;
180 unordered_map<VkPipelineLayout, PIPELINE_LAYOUT_NODE> pipelineLayoutMap;
181 unordered_map<VkDeviceMemory, unique_ptr<DEVICE_MEM_INFO>> memObjMap;
182 unordered_map<VkFence, FENCE_NODE> fenceMap;
183 unordered_map<VkQueue, QUEUE_STATE> queueMap;
184 unordered_map<VkEvent, EVENT_STATE> eventMap;
185 unordered_map<QueryObject, bool> queryToStateMap;
186 unordered_map<VkQueryPool, QUERY_POOL_NODE> queryPoolMap;
187 unordered_map<VkSemaphore, SEMAPHORE_NODE> semaphoreMap;
188 unordered_map<VkCommandBuffer, GLOBAL_CB_NODE *> commandBufferMap;
189 unordered_map<VkFramebuffer, unique_ptr<FRAMEBUFFER_STATE>> frameBufferMap;
190 unordered_map<VkImage, vector<ImageSubresourcePair>> imageSubresourceMap;
191 unordered_map<ImageSubresourcePair, IMAGE_LAYOUT_NODE> imageLayoutMap;
192 unordered_map<VkRenderPass, std::shared_ptr<RENDER_PASS_STATE>> renderPassMap;
193 unordered_map<VkShaderModule, unique_ptr<shader_module>> shaderModuleMap;
194 unordered_map<VkDescriptorUpdateTemplateKHR, unique_ptr<TEMPLATE_STATE>> desc_template_map;
195 unordered_map<VkSwapchainKHR, std::unique_ptr<SWAPCHAIN_NODE>> swapchainMap;
197 VkDevice device = VK_NULL_HANDLE;
198 VkPhysicalDevice physical_device = VK_NULL_HANDLE;
200 instance_layer_data *instance_data = nullptr; // from device to enclosing instance
202 VkPhysicalDeviceFeatures enabled_features = {};
203 // Device specific data
204 PHYS_DEV_PROPERTIES_NODE phys_dev_properties = {};
205 VkPhysicalDeviceMemoryProperties phys_dev_mem_props = {};
206 VkPhysicalDeviceProperties phys_dev_props = {};
207 // Device extension properties -- storing properties gathered from VkPhysicalDeviceProperties2KHR::pNext chain
208 struct DeviceExtensionProperties {
209 uint32_t max_push_descriptors; // from VkPhysicalDevicePushDescriptorPropertiesKHR::maxPushDescriptors
211 DeviceExtensionProperties phys_dev_ext_props = {};
212 bool external_sync_warning = false;
213 uint32_t api_version = 0;
216 // TODO : Do we need to guard access to layer_data_map w/ lock?
217 static unordered_map<void *, layer_data *> layer_data_map;
218 static unordered_map<void *, instance_layer_data *> instance_layer_data_map;
220 static uint32_t loader_layer_if_version = CURRENT_LOADER_LAYER_INTERFACE_VERSION;
222 static const VkLayerProperties global_layer = {
223 "VK_LAYER_LUNARG_core_validation",
224 VK_LAYER_API_VERSION,
226 "LunarG Validation Layer",
229 static const VkExtensionProperties device_extensions[] = {
230 {VK_EXT_VALIDATION_CACHE_EXTENSION_NAME, VK_EXT_VALIDATION_CACHE_SPEC_VERSION},
233 template <class TCreateInfo>
234 void ValidateLayerOrdering(const TCreateInfo &createInfo) {
235 bool foundLayer = false;
236 for (uint32_t i = 0; i < createInfo.enabledLayerCount; ++i) {
237 if (!strcmp(createInfo.ppEnabledLayerNames[i], global_layer.layerName)) {
240 // This has to be logged to console as we don't have a callback at this point.
241 if (!foundLayer && !strcmp(createInfo.ppEnabledLayerNames[0], "VK_LAYER_GOOGLE_unique_objects")) {
242 LOGCONSOLE("Cannot activate layer VK_LAYER_GOOGLE_unique_objects prior to activating %s.", global_layer.layerName);
247 // TODO : This can be much smarter, using separate locks for separate global data
248 static mutex_t global_lock;
250 // Return IMAGE_VIEW_STATE ptr for specified imageView or else NULL
251 IMAGE_VIEW_STATE *GetImageViewState(const layer_data *dev_data, VkImageView image_view) {
252 auto iv_it = dev_data->imageViewMap.find(image_view);
253 if (iv_it == dev_data->imageViewMap.end()) {
256 return iv_it->second.get();
258 // Return sampler node ptr for specified sampler or else NULL
259 SAMPLER_STATE *GetSamplerState(const layer_data *dev_data, VkSampler sampler) {
260 auto sampler_it = dev_data->samplerMap.find(sampler);
261 if (sampler_it == dev_data->samplerMap.end()) {
264 return sampler_it->second.get();
266 // Return image state ptr for specified image or else NULL
267 IMAGE_STATE *GetImageState(const layer_data *dev_data, VkImage image) {
268 auto img_it = dev_data->imageMap.find(image);
269 if (img_it == dev_data->imageMap.end()) {
272 return img_it->second.get();
274 // Return buffer state ptr for specified buffer or else NULL
275 BUFFER_STATE *GetBufferState(const layer_data *dev_data, VkBuffer buffer) {
276 auto buff_it = dev_data->bufferMap.find(buffer);
277 if (buff_it == dev_data->bufferMap.end()) {
280 return buff_it->second.get();
282 // Return swapchain node for specified swapchain or else NULL
283 SWAPCHAIN_NODE *GetSwapchainNode(const layer_data *dev_data, VkSwapchainKHR swapchain) {
284 auto swp_it = dev_data->swapchainMap.find(swapchain);
285 if (swp_it == dev_data->swapchainMap.end()) {
288 return swp_it->second.get();
290 // Return buffer node ptr for specified buffer or else NULL
291 BUFFER_VIEW_STATE *GetBufferViewState(const layer_data *dev_data, VkBufferView buffer_view) {
292 auto bv_it = dev_data->bufferViewMap.find(buffer_view);
293 if (bv_it == dev_data->bufferViewMap.end()) {
296 return bv_it->second.get();
299 FENCE_NODE *GetFenceNode(layer_data *dev_data, VkFence fence) {
300 auto it = dev_data->fenceMap.find(fence);
301 if (it == dev_data->fenceMap.end()) {
307 EVENT_STATE *GetEventNode(layer_data *dev_data, VkEvent event) {
308 auto it = dev_data->eventMap.find(event);
309 if (it == dev_data->eventMap.end()) {
315 QUERY_POOL_NODE *GetQueryPoolNode(layer_data *dev_data, VkQueryPool query_pool) {
316 auto it = dev_data->queryPoolMap.find(query_pool);
317 if (it == dev_data->queryPoolMap.end()) {
323 QUEUE_STATE *GetQueueState(layer_data *dev_data, VkQueue queue) {
324 auto it = dev_data->queueMap.find(queue);
325 if (it == dev_data->queueMap.end()) {
331 SEMAPHORE_NODE *GetSemaphoreNode(layer_data *dev_data, VkSemaphore semaphore) {
332 auto it = dev_data->semaphoreMap.find(semaphore);
333 if (it == dev_data->semaphoreMap.end()) {
339 COMMAND_POOL_NODE *GetCommandPoolNode(layer_data *dev_data, VkCommandPool pool) {
340 auto it = dev_data->commandPoolMap.find(pool);
341 if (it == dev_data->commandPoolMap.end()) {
347 PHYSICAL_DEVICE_STATE *GetPhysicalDeviceState(instance_layer_data *instance_data, VkPhysicalDevice phys) {
348 auto it = instance_data->physical_device_map.find(phys);
349 if (it == instance_data->physical_device_map.end()) {
355 SURFACE_STATE *GetSurfaceState(instance_layer_data *instance_data, VkSurfaceKHR surface) {
356 auto it = instance_data->surface_map.find(surface);
357 if (it == instance_data->surface_map.end()) {
363 DeviceExtensions const *GetEnabledExtensions(layer_data const *dev_data) { return &dev_data->extensions; }
365 // Return ptr to memory binding for given handle of specified type
366 static BINDABLE *GetObjectMemBinding(layer_data *dev_data, uint64_t handle, VulkanObjectType type) {
368 case kVulkanObjectTypeImage:
369 return GetImageState(dev_data, VkImage(handle));
370 case kVulkanObjectTypeBuffer:
371 return GetBufferState(dev_data, VkBuffer(handle));
378 GLOBAL_CB_NODE *GetCBNode(layer_data const *, const VkCommandBuffer);
380 // Return ptr to info in map container containing mem, or NULL if not found
381 // Calls to this function should be wrapped in mutex
382 DEVICE_MEM_INFO *GetMemObjInfo(const layer_data *dev_data, const VkDeviceMemory mem) {
383 auto mem_it = dev_data->memObjMap.find(mem);
384 if (mem_it == dev_data->memObjMap.end()) {
387 return mem_it->second.get();
390 static void add_mem_obj_info(layer_data *dev_data, void *object, const VkDeviceMemory mem,
391 const VkMemoryAllocateInfo *pAllocateInfo) {
392 assert(object != NULL);
394 auto *mem_info = new DEVICE_MEM_INFO(object, mem, pAllocateInfo);
395 dev_data->memObjMap[mem] = unique_ptr<DEVICE_MEM_INFO>(mem_info);
397 // TODO: If the number of things we search for goes much higher, need a map...
398 mem_info->global_valid = nullptr != lvl_find_in_chain<VkImportMemoryFdInfoKHR>(pAllocateInfo->pNext);
399 #ifdef VK_USE_PLATFORM_WIN32_KHR
400 mem_info->global_valid |= nullptr != lvl_find_in_chain<VkImportMemoryWin32HandleInfoKHR>(pAllocateInfo->pNext);
403 auto dedicated = lvl_find_in_chain<VkMemoryDedicatedAllocateInfoKHR>(pAllocateInfo->pNext);
405 mem_info->is_dedicated = true;
406 mem_info->dedicated_buffer = dedicated->buffer;
407 mem_info->dedicated_image = dedicated->image;
411 // For given bound_object_handle, bound to given mem allocation, verify that the range for the bound object is valid
412 static bool ValidateMemoryIsValid(layer_data *dev_data, VkDeviceMemory mem, uint64_t bound_object_handle, VulkanObjectType type,
413 const char *functionName) {
414 DEVICE_MEM_INFO *mem_info = GetMemObjInfo(dev_data, mem);
416 if (!mem_info->bound_ranges[bound_object_handle].valid) {
417 return log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
418 HandleToUint64(mem), MEMTRACK_INVALID_MEM_REGION,
419 "%s: Cannot read invalid region of memory allocation 0x%" PRIx64 " for bound %s object 0x%" PRIx64
420 ", please fill the memory before using.",
421 functionName, HandleToUint64(mem), object_string[type], bound_object_handle);
426 // For given image_state
427 // If mem is special swapchain key, then verify that image_state valid member is true
428 // Else verify that the image's bound memory range is valid
429 bool ValidateImageMemoryIsValid(layer_data *dev_data, IMAGE_STATE *image_state, const char *functionName) {
430 if (image_state->binding.mem == MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) {
431 if (!image_state->valid) {
432 return log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
433 HandleToUint64(image_state->binding.mem), MEMTRACK_INVALID_MEM_REGION,
434 "%s: Cannot read invalid swapchain image 0x%" PRIx64 ", please fill the memory before using.",
435 functionName, HandleToUint64(image_state->image));
438 return ValidateMemoryIsValid(dev_data, image_state->binding.mem, HandleToUint64(image_state->image), kVulkanObjectTypeImage,
443 // For given buffer_state, verify that the range it's bound to is valid
444 bool ValidateBufferMemoryIsValid(layer_data *dev_data, BUFFER_STATE *buffer_state, const char *functionName) {
445 return ValidateMemoryIsValid(dev_data, buffer_state->binding.mem, HandleToUint64(buffer_state->buffer), kVulkanObjectTypeBuffer,
448 // For the given memory allocation, set the range bound by the given handle object to the valid param value
449 static void SetMemoryValid(layer_data *dev_data, VkDeviceMemory mem, uint64_t handle, bool valid) {
450 DEVICE_MEM_INFO *mem_info = GetMemObjInfo(dev_data, mem);
452 mem_info->bound_ranges[handle].valid = valid;
455 // For given image node
456 // If mem is special swapchain key, then set entire image_state to valid param value
457 // Else set the image's bound memory range to valid param value
458 void SetImageMemoryValid(layer_data *dev_data, IMAGE_STATE *image_state, bool valid) {
459 if (image_state->binding.mem == MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) {
460 image_state->valid = valid;
462 SetMemoryValid(dev_data, image_state->binding.mem, HandleToUint64(image_state->image), valid);
465 // For given buffer node set the buffer's bound memory range to valid param value
466 void SetBufferMemoryValid(layer_data *dev_data, BUFFER_STATE *buffer_state, bool valid) {
467 SetMemoryValid(dev_data, buffer_state->binding.mem, HandleToUint64(buffer_state->buffer), valid);
470 // Create binding link between given sampler and command buffer node
471 void AddCommandBufferBindingSampler(GLOBAL_CB_NODE *cb_node, SAMPLER_STATE *sampler_state) {
472 sampler_state->cb_bindings.insert(cb_node);
473 cb_node->object_bindings.insert({HandleToUint64(sampler_state->sampler), kVulkanObjectTypeSampler});
476 // Create binding link between given image node and command buffer node
477 void AddCommandBufferBindingImage(const layer_data *dev_data, GLOBAL_CB_NODE *cb_node, IMAGE_STATE *image_state) {
478 // Skip validation if this image was created through WSI
479 if (image_state->binding.mem != MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) {
480 // First update CB binding in MemObj mini CB list
481 for (auto mem_binding : image_state->GetBoundMemory()) {
482 DEVICE_MEM_INFO *pMemInfo = GetMemObjInfo(dev_data, mem_binding);
484 pMemInfo->cb_bindings.insert(cb_node);
485 // Now update CBInfo's Mem reference list
486 cb_node->memObjs.insert(mem_binding);
489 // Now update cb binding for image
490 cb_node->object_bindings.insert({HandleToUint64(image_state->image), kVulkanObjectTypeImage});
491 image_state->cb_bindings.insert(cb_node);
495 // Create binding link between given image view node and its image with command buffer node
496 void AddCommandBufferBindingImageView(const layer_data *dev_data, GLOBAL_CB_NODE *cb_node, IMAGE_VIEW_STATE *view_state) {
497 // First add bindings for imageView
498 view_state->cb_bindings.insert(cb_node);
499 cb_node->object_bindings.insert({HandleToUint64(view_state->image_view), kVulkanObjectTypeImageView});
500 auto image_state = GetImageState(dev_data, view_state->create_info.image);
501 // Add bindings for image within imageView
503 AddCommandBufferBindingImage(dev_data, cb_node, image_state);
507 // Create binding link between given buffer node and command buffer node
508 void AddCommandBufferBindingBuffer(const layer_data *dev_data, GLOBAL_CB_NODE *cb_node, BUFFER_STATE *buffer_state) {
509 // First update CB binding in MemObj mini CB list
510 for (auto mem_binding : buffer_state->GetBoundMemory()) {
511 DEVICE_MEM_INFO *pMemInfo = GetMemObjInfo(dev_data, mem_binding);
513 pMemInfo->cb_bindings.insert(cb_node);
514 // Now update CBInfo's Mem reference list
515 cb_node->memObjs.insert(mem_binding);
518 // Now update cb binding for buffer
519 cb_node->object_bindings.insert({HandleToUint64(buffer_state->buffer), kVulkanObjectTypeBuffer});
520 buffer_state->cb_bindings.insert(cb_node);
523 // Create binding link between given buffer view node and its buffer with command buffer node
524 void AddCommandBufferBindingBufferView(const layer_data *dev_data, GLOBAL_CB_NODE *cb_node, BUFFER_VIEW_STATE *view_state) {
525 // First add bindings for bufferView
526 view_state->cb_bindings.insert(cb_node);
527 cb_node->object_bindings.insert({HandleToUint64(view_state->buffer_view), kVulkanObjectTypeBufferView});
528 auto buffer_state = GetBufferState(dev_data, view_state->create_info.buffer);
529 // Add bindings for buffer within bufferView
531 AddCommandBufferBindingBuffer(dev_data, cb_node, buffer_state);
535 // For every mem obj bound to particular CB, free bindings related to that CB
536 static void clear_cmd_buf_and_mem_references(layer_data *dev_data, GLOBAL_CB_NODE *cb_node) {
538 if (cb_node->memObjs.size() > 0) {
539 for (auto mem : cb_node->memObjs) {
540 DEVICE_MEM_INFO *pInfo = GetMemObjInfo(dev_data, mem);
542 pInfo->cb_bindings.erase(cb_node);
545 cb_node->memObjs.clear();
550 // Clear a single object binding from given memory object, or report error if binding is missing
551 static bool ClearMemoryObjectBinding(layer_data *dev_data, uint64_t handle, VulkanObjectType type, VkDeviceMemory mem) {
552 DEVICE_MEM_INFO *mem_info = GetMemObjInfo(dev_data, mem);
553 // This obj is bound to a memory object. Remove the reference to this object in that memory object's list
555 mem_info->obj_bindings.erase({handle, type});
560 // ClearMemoryObjectBindings clears the binding of objects to memory
561 // For the given object it pulls the memory bindings and makes sure that the bindings
562 // no longer refer to the object being cleared. This occurs when objects are destroyed.
563 bool ClearMemoryObjectBindings(layer_data *dev_data, uint64_t handle, VulkanObjectType type) {
565 BINDABLE *mem_binding = GetObjectMemBinding(dev_data, handle, type);
567 if (!mem_binding->sparse) {
568 skip = ClearMemoryObjectBinding(dev_data, handle, type, mem_binding->binding.mem);
569 } else { // Sparse, clear all bindings
570 for (auto &sparse_mem_binding : mem_binding->sparse_bindings) {
571 skip |= ClearMemoryObjectBinding(dev_data, handle, type, sparse_mem_binding.mem);
578 // For given mem object, verify that it is not null or UNBOUND, if it is, report error. Return skip value.
579 bool VerifyBoundMemoryIsValid(const layer_data *dev_data, VkDeviceMemory mem, uint64_t handle, const char *api_name,
580 const char *type_name, UNIQUE_VALIDATION_ERROR_CODE error_code) {
582 if (VK_NULL_HANDLE == mem) {
584 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, handle, error_code,
585 "%s: Vk%s object 0x%" PRIx64 " used with no memory bound. Memory should be bound by calling vkBind%sMemory().",
586 api_name, type_name, handle, type_name);
587 } else if (MEMORY_UNBOUND == mem) {
589 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, handle, error_code,
590 "%s: Vk%s object 0x%" PRIx64
591 " used with no memory bound and previously bound memory was freed. Memory must not be freed prior to this "
593 api_name, type_name, handle);
598 // Check to see if memory was ever bound to this image
599 bool ValidateMemoryIsBoundToImage(const layer_data *dev_data, const IMAGE_STATE *image_state, const char *api_name,
600 UNIQUE_VALIDATION_ERROR_CODE error_code) {
602 if (0 == (static_cast<uint32_t>(image_state->createInfo.flags) & VK_IMAGE_CREATE_SPARSE_BINDING_BIT)) {
603 result = VerifyBoundMemoryIsValid(dev_data, image_state->binding.mem, HandleToUint64(image_state->image), api_name, "Image",
609 // Check to see if memory was bound to this buffer
610 bool ValidateMemoryIsBoundToBuffer(const layer_data *dev_data, const BUFFER_STATE *buffer_state, const char *api_name,
611 UNIQUE_VALIDATION_ERROR_CODE error_code) {
613 if (0 == (static_cast<uint32_t>(buffer_state->createInfo.flags) & VK_BUFFER_CREATE_SPARSE_BINDING_BIT)) {
614 result = VerifyBoundMemoryIsValid(dev_data, buffer_state->binding.mem, HandleToUint64(buffer_state->buffer), api_name,
615 "Buffer", error_code);
620 // SetMemBinding is used to establish immutable, non-sparse binding between a single image/buffer object and memory object.
621 // Corresponding valid usage checks are in ValidateSetMemBinding().
622 static void SetMemBinding(layer_data *dev_data, VkDeviceMemory mem, BINDABLE *mem_binding, VkDeviceSize memory_offset,
623 uint64_t handle, VulkanObjectType type, const char *apiName) {
625 mem_binding->binding.mem = mem;
626 mem_binding->UpdateBoundMemorySet(); // force recreation of cached set
627 mem_binding->binding.offset = memory_offset;
628 mem_binding->binding.size = mem_binding->requirements.size;
630 if (mem != VK_NULL_HANDLE) {
631 DEVICE_MEM_INFO *mem_info = GetMemObjInfo(dev_data, mem);
633 mem_info->obj_bindings.insert({handle, type});
634 // For image objects, make sure default memory state is correctly set
635 // TODO : What's the best/correct way to handle this?
636 if (kVulkanObjectTypeImage == type) {
637 auto const image_state = reinterpret_cast<const IMAGE_STATE *>(mem_binding);
639 VkImageCreateInfo ici = image_state->createInfo;
640 if (ici.usage & (VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT)) {
641 // TODO:: More memory state transition stuff.
649 // Valid usage checks for a call to SetMemBinding().
650 // For NULL mem case, output warning
651 // Make sure given object is in global object map
652 // IF a previous binding existed, output validation error
653 // Otherwise, add reference from objectInfo to memoryInfo
654 // Add reference off of objInfo
655 // TODO: We may need to refactor or pass in multiple valid usage statements to handle multiple valid usage conditions.
656 static bool ValidateSetMemBinding(layer_data *dev_data, VkDeviceMemory mem, uint64_t handle, VulkanObjectType type,
657 const char *apiName) {
659 // It's an error to bind an object to NULL memory
660 if (mem != VK_NULL_HANDLE) {
661 BINDABLE *mem_binding = GetObjectMemBinding(dev_data, handle, type);
663 if (mem_binding->sparse) {
664 UNIQUE_VALIDATION_ERROR_CODE error_code = VALIDATION_ERROR_1740082a;
665 const char *handle_type = "IMAGE";
666 if (type == kVulkanObjectTypeBuffer) {
667 error_code = VALIDATION_ERROR_1700080c;
668 handle_type = "BUFFER";
670 assert(type == kVulkanObjectTypeImage);
672 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
673 HandleToUint64(mem), error_code,
674 "In %s, attempting to bind memory (0x%" PRIx64 ") to object (0x%" PRIx64
675 ") which was created with sparse memory flags (VK_%s_CREATE_SPARSE_*_BIT).",
676 apiName, HandleToUint64(mem), handle, handle_type);
678 DEVICE_MEM_INFO *mem_info = GetMemObjInfo(dev_data, mem);
680 DEVICE_MEM_INFO *prev_binding = GetMemObjInfo(dev_data, mem_binding->binding.mem);
682 UNIQUE_VALIDATION_ERROR_CODE error_code = VALIDATION_ERROR_17400828;
683 if (type == kVulkanObjectTypeBuffer) {
684 error_code = VALIDATION_ERROR_1700080a;
686 assert(type == kVulkanObjectTypeImage);
688 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
689 HandleToUint64(mem), error_code,
690 "In %s, attempting to bind memory (0x%" PRIx64 ") to object (0x%" PRIx64
691 ") which has already been bound to mem object 0x%" PRIx64 ".",
692 apiName, HandleToUint64(mem), handle, HandleToUint64(prev_binding->mem));
693 } else if (mem_binding->binding.mem == MEMORY_UNBOUND) {
694 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
695 HandleToUint64(mem), MEMTRACK_REBIND_OBJECT,
696 "In %s, attempting to bind memory (0x%" PRIx64 ") to object (0x%" PRIx64
697 ") which was previous bound to memory that has since been freed. Memory bindings are immutable in "
698 "Vulkan so this attempt to bind to new memory is not allowed.",
699 apiName, HandleToUint64(mem), handle);
706 // For NULL mem case, clear any previous binding Else...
707 // Make sure given object is in its object map
708 // IF a previous binding existed, update binding
709 // Add reference from objectInfo to memoryInfo
710 // Add reference off of object's binding info
711 // Return VK_TRUE if addition is successful, VK_FALSE otherwise
712 static bool SetSparseMemBinding(layer_data *dev_data, MEM_BINDING binding, uint64_t handle, VulkanObjectType type) {
713 bool skip = VK_FALSE;
714 // Handle NULL case separately, just clear previous binding & decrement reference
715 if (binding.mem == VK_NULL_HANDLE) {
716 // TODO : This should cause the range of the resource to be unbound according to spec
718 BINDABLE *mem_binding = GetObjectMemBinding(dev_data, handle, type);
720 assert(mem_binding->sparse);
721 DEVICE_MEM_INFO *mem_info = GetMemObjInfo(dev_data, binding.mem);
723 mem_info->obj_bindings.insert({handle, type});
724 // Need to set mem binding for this object
725 mem_binding->sparse_bindings.insert(binding);
726 mem_binding->UpdateBoundMemorySet();
732 // Check object status for selected flag state
733 static bool validate_status(layer_data *dev_data, GLOBAL_CB_NODE *pNode, CBStatusFlags status_mask, VkFlags msg_flags,
734 const char *fail_msg, UNIQUE_VALIDATION_ERROR_CODE const msg_code) {
735 if (!(pNode->status & status_mask)) {
736 return log_msg(dev_data->report_data, msg_flags, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
737 HandleToUint64(pNode->commandBuffer), msg_code, "command buffer object 0x%" PRIx64 ": %s..",
738 HandleToUint64(pNode->commandBuffer), fail_msg);
743 // Retrieve pipeline node ptr for given pipeline object
744 static PIPELINE_STATE *getPipelineState(layer_data const *dev_data, VkPipeline pipeline) {
745 auto it = dev_data->pipelineMap.find(pipeline);
746 if (it == dev_data->pipelineMap.end()) {
749 return it->second.get();
752 RENDER_PASS_STATE *GetRenderPassState(layer_data const *dev_data, VkRenderPass renderpass) {
753 auto it = dev_data->renderPassMap.find(renderpass);
754 if (it == dev_data->renderPassMap.end()) {
757 return it->second.get();
760 std::shared_ptr<RENDER_PASS_STATE> GetRenderPassStateSharedPtr(layer_data const *dev_data, VkRenderPass renderpass) {
761 auto it = dev_data->renderPassMap.find(renderpass);
762 if (it == dev_data->renderPassMap.end()) {
768 FRAMEBUFFER_STATE *GetFramebufferState(const layer_data *dev_data, VkFramebuffer framebuffer) {
769 auto it = dev_data->frameBufferMap.find(framebuffer);
770 if (it == dev_data->frameBufferMap.end()) {
773 return it->second.get();
776 std::shared_ptr<cvdescriptorset::DescriptorSetLayout const> const GetDescriptorSetLayout(layer_data const *dev_data,
777 VkDescriptorSetLayout dsLayout) {
778 auto it = dev_data->descriptorSetLayoutMap.find(dsLayout);
779 if (it == dev_data->descriptorSetLayoutMap.end()) {
785 static PIPELINE_LAYOUT_NODE const *getPipelineLayout(layer_data const *dev_data, VkPipelineLayout pipeLayout) {
786 auto it = dev_data->pipelineLayoutMap.find(pipeLayout);
787 if (it == dev_data->pipelineLayoutMap.end()) {
793 shader_module const *GetShaderModuleState(layer_data const *dev_data, VkShaderModule module) {
794 auto it = dev_data->shaderModuleMap.find(module);
795 if (it == dev_data->shaderModuleMap.end()) {
798 return it->second.get();
801 // Return true if for a given PSO, the given state enum is dynamic, else return false
802 static bool isDynamic(const PIPELINE_STATE *pPipeline, const VkDynamicState state) {
803 if (pPipeline && pPipeline->graphicsPipelineCI.pDynamicState) {
804 for (uint32_t i = 0; i < pPipeline->graphicsPipelineCI.pDynamicState->dynamicStateCount; i++) {
805 if (state == pPipeline->graphicsPipelineCI.pDynamicState->pDynamicStates[i]) return true;
811 // Validate state stored as flags at time of draw call
812 static bool validate_draw_state_flags(layer_data *dev_data, GLOBAL_CB_NODE *pCB, const PIPELINE_STATE *pPipe, bool indexed,
813 UNIQUE_VALIDATION_ERROR_CODE const msg_code) {
815 if (pPipe->topology_at_rasterizer == VK_PRIMITIVE_TOPOLOGY_LINE_LIST ||
816 pPipe->topology_at_rasterizer == VK_PRIMITIVE_TOPOLOGY_LINE_STRIP) {
817 result |= validate_status(dev_data, pCB, CBSTATUS_LINE_WIDTH_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
818 "Dynamic line width state not set for this command buffer", msg_code);
820 if (pPipe->graphicsPipelineCI.pRasterizationState &&
821 (pPipe->graphicsPipelineCI.pRasterizationState->depthBiasEnable == VK_TRUE)) {
822 result |= validate_status(dev_data, pCB, CBSTATUS_DEPTH_BIAS_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
823 "Dynamic depth bias state not set for this command buffer", msg_code);
825 if (pPipe->blendConstantsEnabled) {
826 result |= validate_status(dev_data, pCB, CBSTATUS_BLEND_CONSTANTS_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
827 "Dynamic blend constants state not set for this command buffer", msg_code);
829 if (pPipe->graphicsPipelineCI.pDepthStencilState &&
830 (pPipe->graphicsPipelineCI.pDepthStencilState->depthBoundsTestEnable == VK_TRUE)) {
831 result |= validate_status(dev_data, pCB, CBSTATUS_DEPTH_BOUNDS_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
832 "Dynamic depth bounds state not set for this command buffer", msg_code);
834 if (pPipe->graphicsPipelineCI.pDepthStencilState &&
835 (pPipe->graphicsPipelineCI.pDepthStencilState->stencilTestEnable == VK_TRUE)) {
836 result |= validate_status(dev_data, pCB, CBSTATUS_STENCIL_READ_MASK_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
837 "Dynamic stencil read mask state not set for this command buffer", msg_code);
838 result |= validate_status(dev_data, pCB, CBSTATUS_STENCIL_WRITE_MASK_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
839 "Dynamic stencil write mask state not set for this command buffer", msg_code);
840 result |= validate_status(dev_data, pCB, CBSTATUS_STENCIL_REFERENCE_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
841 "Dynamic stencil reference state not set for this command buffer", msg_code);
844 result |= validate_status(dev_data, pCB, CBSTATUS_INDEX_BUFFER_BOUND, VK_DEBUG_REPORT_ERROR_BIT_EXT,
845 "Index buffer object not bound to this command buffer when Indexed Draw attempted", msg_code);
851 static bool logInvalidAttachmentMessage(layer_data const *dev_data, const char *type1_string, const RENDER_PASS_STATE *rp1_state,
852 const char *type2_string, const RENDER_PASS_STATE *rp2_state, uint32_t primary_attach,
853 uint32_t secondary_attach, const char *msg, const char *caller,
854 UNIQUE_VALIDATION_ERROR_CODE error_code) {
855 return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
856 HandleToUint64(rp1_state->renderPass), error_code,
857 "%s: RenderPasses incompatible between %s w/ renderPass 0x%" PRIx64 " and %s w/ renderPass 0x%" PRIx64
858 " Attachment %u is not compatible with %u: %s.",
859 caller, type1_string, HandleToUint64(rp1_state->renderPass), type2_string, HandleToUint64(rp2_state->renderPass),
860 primary_attach, secondary_attach, msg);
863 static bool validateAttachmentCompatibility(layer_data const *dev_data, const char *type1_string,
864 const RENDER_PASS_STATE *rp1_state, const char *type2_string,
865 const RENDER_PASS_STATE *rp2_state, uint32_t primary_attach, uint32_t secondary_attach,
866 const char *caller, UNIQUE_VALIDATION_ERROR_CODE error_code) {
868 const auto &primaryPassCI = rp1_state->createInfo;
869 const auto &secondaryPassCI = rp2_state->createInfo;
870 if (primaryPassCI.attachmentCount <= primary_attach) {
871 primary_attach = VK_ATTACHMENT_UNUSED;
873 if (secondaryPassCI.attachmentCount <= secondary_attach) {
874 secondary_attach = VK_ATTACHMENT_UNUSED;
876 if (primary_attach == VK_ATTACHMENT_UNUSED && secondary_attach == VK_ATTACHMENT_UNUSED) {
879 if (primary_attach == VK_ATTACHMENT_UNUSED) {
880 skip |= logInvalidAttachmentMessage(dev_data, type1_string, rp1_state, type2_string, rp2_state, primary_attach,
881 secondary_attach, "The first is unused while the second is not.", caller, error_code);
884 if (secondary_attach == VK_ATTACHMENT_UNUSED) {
885 skip |= logInvalidAttachmentMessage(dev_data, type1_string, rp1_state, type2_string, rp2_state, primary_attach,
886 secondary_attach, "The second is unused while the first is not.", caller, error_code);
889 if (primaryPassCI.pAttachments[primary_attach].format != secondaryPassCI.pAttachments[secondary_attach].format) {
890 skip |= logInvalidAttachmentMessage(dev_data, type1_string, rp1_state, type2_string, rp2_state, primary_attach,
891 secondary_attach, "They have different formats.", caller, error_code);
893 if (primaryPassCI.pAttachments[primary_attach].samples != secondaryPassCI.pAttachments[secondary_attach].samples) {
894 skip |= logInvalidAttachmentMessage(dev_data, type1_string, rp1_state, type2_string, rp2_state, primary_attach,
895 secondary_attach, "They have different samples.", caller, error_code);
897 if (primaryPassCI.pAttachments[primary_attach].flags != secondaryPassCI.pAttachments[secondary_attach].flags) {
898 skip |= logInvalidAttachmentMessage(dev_data, type1_string, rp1_state, type2_string, rp2_state, primary_attach,
899 secondary_attach, "They have different flags.", caller, error_code);
905 static bool validateSubpassCompatibility(layer_data const *dev_data, const char *type1_string, const RENDER_PASS_STATE *rp1_state,
906 const char *type2_string, const RENDER_PASS_STATE *rp2_state, const int subpass,
907 const char *caller, UNIQUE_VALIDATION_ERROR_CODE error_code) {
909 const auto &primary_desc = rp1_state->createInfo.pSubpasses[subpass];
910 const auto &secondary_desc = rp2_state->createInfo.pSubpasses[subpass];
911 uint32_t maxInputAttachmentCount = std::max(primary_desc.inputAttachmentCount, secondary_desc.inputAttachmentCount);
912 for (uint32_t i = 0; i < maxInputAttachmentCount; ++i) {
913 uint32_t primary_input_attach = VK_ATTACHMENT_UNUSED, secondary_input_attach = VK_ATTACHMENT_UNUSED;
914 if (i < primary_desc.inputAttachmentCount) {
915 primary_input_attach = primary_desc.pInputAttachments[i].attachment;
917 if (i < secondary_desc.inputAttachmentCount) {
918 secondary_input_attach = secondary_desc.pInputAttachments[i].attachment;
920 skip |= validateAttachmentCompatibility(dev_data, type1_string, rp1_state, type2_string, rp2_state, primary_input_attach,
921 secondary_input_attach, caller, error_code);
923 uint32_t maxColorAttachmentCount = std::max(primary_desc.colorAttachmentCount, secondary_desc.colorAttachmentCount);
924 for (uint32_t i = 0; i < maxColorAttachmentCount; ++i) {
925 uint32_t primary_color_attach = VK_ATTACHMENT_UNUSED, secondary_color_attach = VK_ATTACHMENT_UNUSED;
926 if (i < primary_desc.colorAttachmentCount) {
927 primary_color_attach = primary_desc.pColorAttachments[i].attachment;
929 if (i < secondary_desc.colorAttachmentCount) {
930 secondary_color_attach = secondary_desc.pColorAttachments[i].attachment;
932 skip |= validateAttachmentCompatibility(dev_data, type1_string, rp1_state, type2_string, rp2_state, primary_color_attach,
933 secondary_color_attach, caller, error_code);
934 uint32_t primary_resolve_attach = VK_ATTACHMENT_UNUSED, secondary_resolve_attach = VK_ATTACHMENT_UNUSED;
935 if (i < primary_desc.colorAttachmentCount && primary_desc.pResolveAttachments) {
936 primary_resolve_attach = primary_desc.pResolveAttachments[i].attachment;
938 if (i < secondary_desc.colorAttachmentCount && secondary_desc.pResolveAttachments) {
939 secondary_resolve_attach = secondary_desc.pResolveAttachments[i].attachment;
941 skip |= validateAttachmentCompatibility(dev_data, type1_string, rp1_state, type2_string, rp2_state, primary_resolve_attach,
942 secondary_resolve_attach, caller, error_code);
944 uint32_t primary_depthstencil_attach = VK_ATTACHMENT_UNUSED, secondary_depthstencil_attach = VK_ATTACHMENT_UNUSED;
945 if (primary_desc.pDepthStencilAttachment) {
946 primary_depthstencil_attach = primary_desc.pDepthStencilAttachment[0].attachment;
948 if (secondary_desc.pDepthStencilAttachment) {
949 secondary_depthstencil_attach = secondary_desc.pDepthStencilAttachment[0].attachment;
951 skip |= validateAttachmentCompatibility(dev_data, type1_string, rp1_state, type2_string, rp2_state, primary_depthstencil_attach,
952 secondary_depthstencil_attach, caller, error_code);
956 // Verify that given renderPass CreateInfo for primary and secondary command buffers are compatible.
957 // This function deals directly with the CreateInfo, there are overloaded versions below that can take the renderPass handle and
958 // will then feed into this function
959 static bool validateRenderPassCompatibility(layer_data const *dev_data, const char *type1_string,
960 const RENDER_PASS_STATE *rp1_state, const char *type2_string,
961 const RENDER_PASS_STATE *rp2_state, const char *caller,
962 UNIQUE_VALIDATION_ERROR_CODE error_code) {
965 if (rp1_state->createInfo.subpassCount != rp2_state->createInfo.subpassCount) {
966 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
967 HandleToUint64(rp1_state->renderPass), error_code,
968 "%s: RenderPasses incompatible between %s w/ renderPass 0x%" PRIx64
969 " with a subpassCount of %u and %s w/ renderPass 0x%" PRIx64 " with a subpassCount of %u.",
970 caller, type1_string, HandleToUint64(rp1_state->renderPass), rp1_state->createInfo.subpassCount,
971 type2_string, HandleToUint64(rp2_state->renderPass), rp2_state->createInfo.subpassCount);
973 for (uint32_t i = 0; i < rp1_state->createInfo.subpassCount; ++i) {
974 skip |= validateSubpassCompatibility(dev_data, type1_string, rp1_state, type2_string, rp2_state, i, caller, error_code);
980 // Return Set node ptr for specified set or else NULL
981 cvdescriptorset::DescriptorSet *GetSetNode(const layer_data *dev_data, VkDescriptorSet set) {
982 auto set_it = dev_data->setMap.find(set);
983 if (set_it == dev_data->setMap.end()) {
986 return set_it->second;
989 // For given pipeline, return number of MSAA samples, or one if MSAA disabled
990 static VkSampleCountFlagBits getNumSamples(PIPELINE_STATE const *pipe) {
991 if (pipe->graphicsPipelineCI.pMultisampleState != NULL &&
992 VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO == pipe->graphicsPipelineCI.pMultisampleState->sType) {
993 return pipe->graphicsPipelineCI.pMultisampleState->rasterizationSamples;
995 return VK_SAMPLE_COUNT_1_BIT;
998 static void list_bits(std::ostream &s, uint32_t bits) {
999 for (int i = 0; i < 32 && bits; i++) {
1000 if (bits & (1 << i)) {
1010 // Validate draw-time state related to the PSO
1011 static bool ValidatePipelineDrawtimeState(layer_data const *dev_data, LAST_BOUND_STATE const &state, const GLOBAL_CB_NODE *pCB,
1012 CMD_TYPE cmd_type, PIPELINE_STATE const *pPipeline, const char *caller) {
1015 // Verify vertex binding
1016 if (pPipeline->vertexBindingDescriptions.size() > 0) {
1017 for (size_t i = 0; i < pPipeline->vertexBindingDescriptions.size(); i++) {
1018 auto vertex_binding = pPipeline->vertexBindingDescriptions[i].binding;
1019 if ((pCB->currentDrawData.buffers.size() < (vertex_binding + 1)) ||
1020 (pCB->currentDrawData.buffers[vertex_binding] == VK_NULL_HANDLE)) {
1022 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
1023 HandleToUint64(pCB->commandBuffer), DRAWSTATE_VTX_INDEX_OUT_OF_BOUNDS,
1024 "The Pipeline State Object (0x%" PRIx64
1025 ") expects that this Command Buffer's vertex binding Index %u should be set via "
1026 "vkCmdBindVertexBuffers. This is because VkVertexInputBindingDescription struct at "
1027 "index " PRINTF_SIZE_T_SPECIFIER " of pVertexBindingDescriptions has a binding value of %u.",
1028 HandleToUint64(state.pipeline_state->pipeline), vertex_binding, i, vertex_binding);
1032 if (!pCB->currentDrawData.buffers.empty() && !pCB->vertex_buffer_used) {
1033 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
1034 VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(pCB->commandBuffer),
1035 DRAWSTATE_VTX_INDEX_OUT_OF_BOUNDS,
1036 "Vertex buffers are bound to command buffer (0x%" PRIx64
1037 ") but no vertex buffers are attached to this Pipeline State Object (0x%" PRIx64 ").",
1038 HandleToUint64(pCB->commandBuffer), HandleToUint64(state.pipeline_state->pipeline));
1041 // If Viewport or scissors are dynamic, verify that dynamic count matches PSO count.
1042 // Skip check if rasterization is disabled or there is no viewport.
1043 if ((!pPipeline->graphicsPipelineCI.pRasterizationState ||
1044 (pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable == VK_FALSE)) &&
1045 pPipeline->graphicsPipelineCI.pViewportState) {
1046 bool dynViewport = isDynamic(pPipeline, VK_DYNAMIC_STATE_VIEWPORT);
1047 bool dynScissor = isDynamic(pPipeline, VK_DYNAMIC_STATE_SCISSOR);
1050 auto requiredViewportsMask = (1 << pPipeline->graphicsPipelineCI.pViewportState->viewportCount) - 1;
1051 auto missingViewportMask = ~pCB->viewportMask & requiredViewportsMask;
1052 if (missingViewportMask) {
1053 std::stringstream ss;
1054 ss << "Dynamic viewport(s) ";
1055 list_bits(ss, missingViewportMask);
1056 ss << " are used by pipeline state object, but were not provided via calls to vkCmdSetViewport().";
1057 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
1058 DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "%s", ss.str().c_str());
1063 auto requiredScissorMask = (1 << pPipeline->graphicsPipelineCI.pViewportState->scissorCount) - 1;
1064 auto missingScissorMask = ~pCB->scissorMask & requiredScissorMask;
1065 if (missingScissorMask) {
1066 std::stringstream ss;
1067 ss << "Dynamic scissor(s) ";
1068 list_bits(ss, missingScissorMask);
1069 ss << " are used by pipeline state object, but were not provided via calls to vkCmdSetScissor().";
1070 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
1071 DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "%s", ss.str().c_str());
1076 // Verify that any MSAA request in PSO matches sample# in bound FB
1077 // Skip the check if rasterization is disabled.
1078 if (!pPipeline->graphicsPipelineCI.pRasterizationState ||
1079 (pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable == VK_FALSE)) {
1080 VkSampleCountFlagBits pso_num_samples = getNumSamples(pPipeline);
1081 if (pCB->activeRenderPass) {
1082 auto const render_pass_info = pCB->activeRenderPass->createInfo.ptr();
1083 const VkSubpassDescription *subpass_desc = &render_pass_info->pSubpasses[pCB->activeSubpass];
1085 unsigned subpass_num_samples = 0;
1087 for (i = 0; i < subpass_desc->colorAttachmentCount; i++) {
1088 auto attachment = subpass_desc->pColorAttachments[i].attachment;
1089 if (attachment != VK_ATTACHMENT_UNUSED)
1090 subpass_num_samples |= (unsigned)render_pass_info->pAttachments[attachment].samples;
1093 if (subpass_desc->pDepthStencilAttachment &&
1094 subpass_desc->pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
1095 auto attachment = subpass_desc->pDepthStencilAttachment->attachment;
1096 subpass_num_samples |= (unsigned)render_pass_info->pAttachments[attachment].samples;
1099 if (!dev_data->extensions.vk_amd_mixed_attachment_samples &&
1100 ((subpass_num_samples & static_cast<unsigned>(pso_num_samples)) != subpass_num_samples)) {
1101 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1102 HandleToUint64(pPipeline->pipeline), DRAWSTATE_NUM_SAMPLES_MISMATCH,
1103 "Num samples mismatch! At draw-time in Pipeline (0x%" PRIx64
1104 ") with %u samples while current RenderPass (0x%" PRIx64 ") w/ %u samples!",
1105 HandleToUint64(pPipeline->pipeline), pso_num_samples,
1106 HandleToUint64(pCB->activeRenderPass->renderPass), subpass_num_samples);
1109 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1110 HandleToUint64(pPipeline->pipeline), DRAWSTATE_NUM_SAMPLES_MISMATCH,
1111 "No active render pass found at draw-time in Pipeline (0x%" PRIx64 ")!",
1112 HandleToUint64(pPipeline->pipeline));
1115 // Verify that PSO creation renderPass is compatible with active renderPass
1116 if (pCB->activeRenderPass) {
1117 // TODO: Move all of the error codes common across different Draws into a LUT accessed by cmd_type
1118 // TODO: AMD extension codes are included here, but actual function entrypoints are not yet intercepted
1119 // Error codes for renderpass and subpass mismatches
1120 auto rp_error = VALIDATION_ERROR_1a200366, sp_error = VALIDATION_ERROR_1a200368;
1122 case CMD_DRAWINDEXED:
1123 rp_error = VALIDATION_ERROR_1a40038c;
1124 sp_error = VALIDATION_ERROR_1a40038e;
1126 case CMD_DRAWINDIRECT:
1127 rp_error = VALIDATION_ERROR_1aa003be;
1128 sp_error = VALIDATION_ERROR_1aa003c0;
1130 case CMD_DRAWINDIRECTCOUNTAMD:
1131 rp_error = VALIDATION_ERROR_1ac003f6;
1132 sp_error = VALIDATION_ERROR_1ac003f8;
1134 case CMD_DRAWINDEXEDINDIRECT:
1135 rp_error = VALIDATION_ERROR_1a600426;
1136 sp_error = VALIDATION_ERROR_1a600428;
1138 case CMD_DRAWINDEXEDINDIRECTCOUNTAMD:
1139 rp_error = VALIDATION_ERROR_1a800460;
1140 sp_error = VALIDATION_ERROR_1a800462;
1143 assert(CMD_DRAW == cmd_type);
1146 std::string err_string;
1147 if (pCB->activeRenderPass->renderPass != pPipeline->rp_state->renderPass) {
1148 // renderPass that PSO was created with must be compatible with active renderPass that PSO is being used with
1149 skip |= validateRenderPassCompatibility(dev_data, "active render pass", pCB->activeRenderPass, "pipeline state object",
1150 pPipeline->rp_state.get(), caller, rp_error);
1152 if (pPipeline->graphicsPipelineCI.subpass != pCB->activeSubpass) {
1154 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1155 HandleToUint64(pPipeline->pipeline), sp_error, "Pipeline was built for subpass %u but used in subpass %u.",
1156 pPipeline->graphicsPipelineCI.subpass, pCB->activeSubpass);
1163 // For given cvdescriptorset::DescriptorSet, verify that its Set is compatible w/ the setLayout corresponding to
1164 // pipelineLayout[layoutIndex]
1165 static bool verify_set_layout_compatibility(const cvdescriptorset::DescriptorSet *descriptor_set,
1166 PIPELINE_LAYOUT_NODE const *pipeline_layout, const uint32_t layoutIndex,
1168 auto num_sets = pipeline_layout->set_layouts.size();
1169 if (layoutIndex >= num_sets) {
1170 stringstream errorStr;
1171 errorStr << "VkPipelineLayout (" << pipeline_layout->layout << ") only contains " << num_sets
1172 << " setLayouts corresponding to sets 0-" << num_sets - 1 << ", but you're attempting to bind set to index "
1174 errorMsg = errorStr.str();
1177 if (descriptor_set->IsPushDescriptor()) return true;
1178 auto layout_node = pipeline_layout->set_layouts[layoutIndex];
1179 return descriptor_set->IsCompatible(layout_node.get(), &errorMsg);
1182 // Validate overall state at the time of a draw call
1183 static bool ValidateDrawState(layer_data *dev_data, GLOBAL_CB_NODE *cb_node, CMD_TYPE cmd_type, const bool indexed,
1184 const VkPipelineBindPoint bind_point, const char *function,
1185 UNIQUE_VALIDATION_ERROR_CODE const msg_code) {
1186 bool result = false;
1187 auto const &state = cb_node->lastBound[bind_point];
1188 PIPELINE_STATE *pPipe = state.pipeline_state;
1189 if (nullptr == pPipe) {
1191 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
1192 HandleToUint64(cb_node->commandBuffer), DRAWSTATE_INVALID_PIPELINE,
1193 "At Draw/Dispatch time no valid VkPipeline is bound! This is illegal. Please bind one with vkCmdBindPipeline().");
1194 // Early return as any further checks below will be busted w/o a pipeline
1195 if (result) return true;
1197 // First check flag states
1198 if (VK_PIPELINE_BIND_POINT_GRAPHICS == bind_point)
1199 result = validate_draw_state_flags(dev_data, cb_node, pPipe, indexed, msg_code);
1201 // Now complete other state checks
1203 auto const &pipeline_layout = pPipe->pipeline_layout;
1205 for (const auto &set_binding_pair : pPipe->active_slots) {
1206 uint32_t setIndex = set_binding_pair.first;
1207 // If valid set is not bound throw an error
1208 if ((state.boundDescriptorSets.size() <= setIndex) || (!state.boundDescriptorSets[setIndex])) {
1209 result |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
1210 HandleToUint64(cb_node->commandBuffer), DRAWSTATE_DESCRIPTOR_SET_NOT_BOUND,
1211 "VkPipeline 0x%" PRIx64 " uses set #%u but that set is not bound.", HandleToUint64(pPipe->pipeline),
1213 } else if (!verify_set_layout_compatibility(state.boundDescriptorSets[setIndex], &pipeline_layout, setIndex, errorString)) {
1214 // Set is bound but not compatible w/ overlapping pipeline_layout from PSO
1215 VkDescriptorSet setHandle = state.boundDescriptorSets[setIndex]->GetSet();
1216 result |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
1217 HandleToUint64(setHandle), DRAWSTATE_PIPELINE_LAYOUTS_INCOMPATIBLE,
1218 "VkDescriptorSet (0x%" PRIx64
1219 ") bound as set #%u is not compatible with overlapping VkPipelineLayout 0x%" PRIx64 " due to: %s",
1220 HandleToUint64(setHandle), setIndex, HandleToUint64(pipeline_layout.layout), errorString.c_str());
1221 } else { // Valid set is bound and layout compatible, validate that it's updated
1222 // Pull the set node
1223 cvdescriptorset::DescriptorSet *descriptor_set = state.boundDescriptorSets[setIndex];
1224 // Validate the draw-time state for this descriptor set
1225 std::string err_str;
1226 if (!descriptor_set->IsPushDescriptor()) {
1227 // For the "bindless" style resource usage with many descriptors, need to optimize command <-> descriptor
1228 // binding validation. Take the requested binding set and prefilter it to eliminate redundant validation checks.
1229 // Here, the currently bound pipeline determines whether an image validation check is redundant...
1230 // for images are the "req" portion of the binding_req is indirectly (but tightly) coupled to the pipeline.
1231 const cvdescriptorset::PrefilterBindRequestMap reduced_map(*descriptor_set, set_binding_pair.second, cb_node,
1233 const auto &binding_req_map = reduced_map.Map();
1235 if (!descriptor_set->ValidateDrawState(binding_req_map, state.dynamicOffsets[setIndex], cb_node, function,
1237 auto set = descriptor_set->GetSet();
1238 result |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
1239 VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, HandleToUint64(set),
1240 DRAWSTATE_DESCRIPTOR_SET_NOT_UPDATED,
1241 "Descriptor set 0x%" PRIx64 " encountered the following validation error at %s time: %s",
1242 HandleToUint64(set), function, err_str.c_str());
1248 // Check general pipeline state that needs to be validated at drawtime
1249 if (VK_PIPELINE_BIND_POINT_GRAPHICS == bind_point)
1250 result |= ValidatePipelineDrawtimeState(dev_data, state, cb_node, cmd_type, pPipe, function);
1255 static void UpdateDrawState(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, const VkPipelineBindPoint bind_point) {
1256 auto const &state = cb_state->lastBound[bind_point];
1257 PIPELINE_STATE *pPipe = state.pipeline_state;
1258 if (VK_NULL_HANDLE != state.pipeline_layout) {
1259 for (const auto &set_binding_pair : pPipe->active_slots) {
1260 uint32_t setIndex = set_binding_pair.first;
1261 // Pull the set node
1262 cvdescriptorset::DescriptorSet *descriptor_set = state.boundDescriptorSets[setIndex];
1263 if (!descriptor_set->IsPushDescriptor()) {
1264 // For the "bindless" style resource usage with many descriptors, need to optimize command <-> descriptor binding
1265 const cvdescriptorset::PrefilterBindRequestMap reduced_map(*descriptor_set, set_binding_pair.second, cb_state);
1266 const auto &binding_req_map = reduced_map.Map();
1268 // Bind this set and its active descriptor resources to the command buffer
1269 descriptor_set->BindCommandBuffer(cb_state, binding_req_map);
1270 // For given active slots record updated images & buffers
1271 descriptor_set->GetStorageUpdates(binding_req_map, &cb_state->updateBuffers, &cb_state->updateImages);
1275 if (pPipe->vertexBindingDescriptions.size() > 0) {
1276 cb_state->vertex_buffer_used = true;
1280 static bool ValidatePipelineLocked(layer_data *dev_data, std::vector<std::unique_ptr<PIPELINE_STATE>> const &pPipelines,
1281 int pipelineIndex) {
1284 PIPELINE_STATE *pPipeline = pPipelines[pipelineIndex].get();
1286 // If create derivative bit is set, check that we've specified a base
1287 // pipeline correctly, and that the base pipeline was created to allow
1289 if (pPipeline->graphicsPipelineCI.flags & VK_PIPELINE_CREATE_DERIVATIVE_BIT) {
1290 PIPELINE_STATE *pBasePipeline = nullptr;
1291 if (!((pPipeline->graphicsPipelineCI.basePipelineHandle != VK_NULL_HANDLE) ^
1292 (pPipeline->graphicsPipelineCI.basePipelineIndex != -1))) {
1293 // This check is a superset of VALIDATION_ERROR_096005a8 and VALIDATION_ERROR_096005aa
1294 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1295 HandleToUint64(pPipeline->pipeline), DRAWSTATE_INVALID_PIPELINE_CREATE_STATE,
1296 "Invalid Pipeline CreateInfo: exactly one of base pipeline index and handle must be specified");
1297 } else if (pPipeline->graphicsPipelineCI.basePipelineIndex != -1) {
1298 if (pPipeline->graphicsPipelineCI.basePipelineIndex >= pipelineIndex) {
1299 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1300 HandleToUint64(pPipeline->pipeline), VALIDATION_ERROR_208005a0,
1301 "Invalid Pipeline CreateInfo: base pipeline must occur earlier in array than derivative pipeline.");
1303 pBasePipeline = pPipelines[pPipeline->graphicsPipelineCI.basePipelineIndex].get();
1305 } else if (pPipeline->graphicsPipelineCI.basePipelineHandle != VK_NULL_HANDLE) {
1306 pBasePipeline = getPipelineState(dev_data, pPipeline->graphicsPipelineCI.basePipelineHandle);
1309 if (pBasePipeline && !(pBasePipeline->graphicsPipelineCI.flags & VK_PIPELINE_CREATE_ALLOW_DERIVATIVES_BIT)) {
1310 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1311 HandleToUint64(pPipeline->pipeline), DRAWSTATE_INVALID_PIPELINE_CREATE_STATE,
1312 "Invalid Pipeline CreateInfo: base pipeline does not allow derivatives.");
1319 // UNLOCKED pipeline validation. DO NOT lookup objects in the layer_data->* maps in this function.
1320 static bool ValidatePipelineUnlocked(layer_data *dev_data, std::vector<std::unique_ptr<PIPELINE_STATE>> const &pPipelines,
1321 int pipelineIndex) {
1324 PIPELINE_STATE *pPipeline = pPipelines[pipelineIndex].get();
1326 // Ensure the subpass index is valid. If not, then validate_and_capture_pipeline_shader_state
1327 // produces nonsense errors that confuse users. Other layers should already
1328 // emit errors for renderpass being invalid.
1329 auto subpass_desc = &pPipeline->rp_state->createInfo.pSubpasses[pPipeline->graphicsPipelineCI.subpass];
1330 if (pPipeline->graphicsPipelineCI.subpass >= pPipeline->rp_state->createInfo.subpassCount) {
1331 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1332 HandleToUint64(pPipeline->pipeline), VALIDATION_ERROR_096005ee,
1333 "Invalid Pipeline CreateInfo State: Subpass index %u is out of range for this renderpass (0..%u).",
1334 pPipeline->graphicsPipelineCI.subpass, pPipeline->rp_state->createInfo.subpassCount - 1);
1335 subpass_desc = nullptr;
1338 if (pPipeline->graphicsPipelineCI.pColorBlendState != NULL) {
1339 const safe_VkPipelineColorBlendStateCreateInfo *color_blend_state = pPipeline->graphicsPipelineCI.pColorBlendState;
1340 if (color_blend_state->attachmentCount != subpass_desc->colorAttachmentCount) {
1342 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1343 HandleToUint64(pPipeline->pipeline), VALIDATION_ERROR_096005d4,
1344 "vkCreateGraphicsPipelines(): Render pass (0x%" PRIx64
1345 ") subpass %u has colorAttachmentCount of %u which doesn't match the pColorBlendState->attachmentCount of %u.",
1346 HandleToUint64(pPipeline->rp_state->renderPass), pPipeline->graphicsPipelineCI.subpass,
1347 subpass_desc->colorAttachmentCount, color_blend_state->attachmentCount);
1349 if (!dev_data->enabled_features.independentBlend) {
1350 if (pPipeline->attachments.size() > 1) {
1351 VkPipelineColorBlendAttachmentState *pAttachments = &pPipeline->attachments[0];
1352 for (size_t i = 1; i < pPipeline->attachments.size(); i++) {
1353 // Quoting the spec: "If [the independent blend] feature is not enabled, the VkPipelineColorBlendAttachmentState
1354 // settings for all color attachments must be identical." VkPipelineColorBlendAttachmentState contains
1355 // only attachment state, so memcmp is best suited for the comparison
1356 if (memcmp(static_cast<const void *>(pAttachments), static_cast<const void *>(&pAttachments[i]),
1357 sizeof(pAttachments[0]))) {
1359 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1360 HandleToUint64(pPipeline->pipeline), VALIDATION_ERROR_0f4004ba,
1361 "Invalid Pipeline CreateInfo: If independent blend feature not enabled, all elements of "
1362 "pAttachments must be identical.");
1368 if (!dev_data->enabled_features.logicOp && (pPipeline->graphicsPipelineCI.pColorBlendState->logicOpEnable != VK_FALSE)) {
1370 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1371 HandleToUint64(pPipeline->pipeline), VALIDATION_ERROR_0f4004bc,
1372 "Invalid Pipeline CreateInfo: If logic operations feature not enabled, logicOpEnable must be VK_FALSE.");
1376 if (validate_and_capture_pipeline_shader_state(dev_data, pPipeline)) {
1379 // Each shader's stage must be unique
1380 if (pPipeline->duplicate_shaders) {
1381 for (uint32_t stage = VK_SHADER_STAGE_VERTEX_BIT; stage & VK_SHADER_STAGE_ALL_GRAPHICS; stage <<= 1) {
1382 if (pPipeline->duplicate_shaders & stage) {
1383 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1384 HandleToUint64(pPipeline->pipeline), DRAWSTATE_INVALID_PIPELINE_CREATE_STATE,
1385 "Invalid Pipeline CreateInfo State: Multiple shaders provided for stage %s",
1386 string_VkShaderStageFlagBits(VkShaderStageFlagBits(stage)));
1391 if (!(pPipeline->active_shaders & VK_SHADER_STAGE_VERTEX_BIT)) {
1392 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1393 HandleToUint64(pPipeline->pipeline), VALIDATION_ERROR_096005ae,
1394 "Invalid Pipeline CreateInfo State: Vertex Shader required.");
1396 // Either both or neither TC/TE shaders should be defined
1397 bool has_control = (pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT) != 0;
1398 bool has_eval = (pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT) != 0;
1399 if (has_control && !has_eval) {
1400 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1401 HandleToUint64(pPipeline->pipeline), VALIDATION_ERROR_096005b2,
1402 "Invalid Pipeline CreateInfo State: TE and TC shaders must be included or excluded as a pair.");
1404 if (!has_control && has_eval) {
1405 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1406 HandleToUint64(pPipeline->pipeline), VALIDATION_ERROR_096005b4,
1407 "Invalid Pipeline CreateInfo State: TE and TC shaders must be included or excluded as a pair.");
1409 // Compute shaders should be specified independent of Gfx shaders
1410 if (pPipeline->active_shaders & VK_SHADER_STAGE_COMPUTE_BIT) {
1411 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1412 HandleToUint64(pPipeline->pipeline), VALIDATION_ERROR_096005b0,
1413 "Invalid Pipeline CreateInfo State: Do not specify Compute Shader for Gfx Pipeline.");
1415 // VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive topology is only valid for tessellation pipelines.
1416 // Mismatching primitive topology and tessellation fails graphics pipeline creation.
1417 if (has_control && has_eval &&
1418 (!pPipeline->graphicsPipelineCI.pInputAssemblyState ||
1419 pPipeline->graphicsPipelineCI.pInputAssemblyState->topology != VK_PRIMITIVE_TOPOLOGY_PATCH_LIST)) {
1420 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1421 HandleToUint64(pPipeline->pipeline), VALIDATION_ERROR_096005c0,
1422 "Invalid Pipeline CreateInfo State: VK_PRIMITIVE_TOPOLOGY_PATCH_LIST must be set as IA topology for "
1423 "tessellation pipelines.");
1425 if (pPipeline->graphicsPipelineCI.pInputAssemblyState &&
1426 pPipeline->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_PATCH_LIST) {
1427 if (!has_control || !has_eval) {
1428 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1429 HandleToUint64(pPipeline->pipeline), VALIDATION_ERROR_096005c2,
1430 "Invalid Pipeline CreateInfo State: VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive topology is only valid "
1431 "for tessellation pipelines.");
1435 // If a rasterization state is provided...
1436 if (pPipeline->graphicsPipelineCI.pRasterizationState) {
1437 if ((pPipeline->graphicsPipelineCI.pRasterizationState->depthClampEnable == VK_TRUE) &&
1438 (!dev_data->enabled_features.depthClamp)) {
1439 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1440 HandleToUint64(pPipeline->pipeline), VALIDATION_ERROR_1020061c,
1441 "vkCreateGraphicsPipelines(): the depthClamp device feature is disabled: the depthClampEnable member "
1442 "of the VkPipelineRasterizationStateCreateInfo structure must be set to VK_FALSE.");
1445 if (!isDynamic(pPipeline, VK_DYNAMIC_STATE_DEPTH_BIAS) &&
1446 (pPipeline->graphicsPipelineCI.pRasterizationState->depthBiasClamp != 0.0) &&
1447 (!dev_data->enabled_features.depthBiasClamp)) {
1448 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1449 HandleToUint64(pPipeline->pipeline), DRAWSTATE_INVALID_FEATURE,
1450 "vkCreateGraphicsPipelines(): the depthBiasClamp device feature is disabled: the depthBiasClamp member "
1451 "of the VkPipelineRasterizationStateCreateInfo structure must be set to 0.0 unless the "
1452 "VK_DYNAMIC_STATE_DEPTH_BIAS dynamic state is enabled");
1455 // If rasterization is enabled...
1456 if (pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable == VK_FALSE) {
1457 if ((pPipeline->graphicsPipelineCI.pMultisampleState->alphaToOneEnable == VK_TRUE) &&
1458 (!dev_data->enabled_features.alphaToOne)) {
1459 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1460 HandleToUint64(pPipeline->pipeline), VALIDATION_ERROR_10000622,
1461 "vkCreateGraphicsPipelines(): the alphaToOne device feature is disabled: the alphaToOneEnable "
1462 "member of the VkPipelineMultisampleStateCreateInfo structure must be set to VK_FALSE.");
1465 // If subpass uses a depth/stencil attachment, pDepthStencilState must be a pointer to a valid structure
1466 if (subpass_desc && subpass_desc->pDepthStencilAttachment &&
1467 subpass_desc->pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
1468 if (!pPipeline->graphicsPipelineCI.pDepthStencilState) {
1469 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1470 HandleToUint64(pPipeline->pipeline), VALIDATION_ERROR_096005e0,
1471 "Invalid Pipeline CreateInfo State: pDepthStencilState is NULL when rasterization is enabled "
1472 "and subpass uses a depth/stencil attachment.");
1474 } else if ((pPipeline->graphicsPipelineCI.pDepthStencilState->depthBoundsTestEnable == VK_TRUE) &&
1475 (!dev_data->enabled_features.depthBounds)) {
1476 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1477 HandleToUint64(pPipeline->pipeline), VALIDATION_ERROR_0f6004ac,
1478 "vkCreateGraphicsPipelines(): the depthBounds device feature is disabled: the "
1479 "depthBoundsTestEnable member of the VkPipelineDepthStencilStateCreateInfo structure must be "
1480 "set to VK_FALSE.");
1484 // If subpass uses color attachments, pColorBlendState must be valid pointer
1486 uint32_t color_attachment_count = 0;
1487 for (uint32_t i = 0; i < subpass_desc->colorAttachmentCount; ++i) {
1488 if (subpass_desc->pColorAttachments[i].attachment != VK_ATTACHMENT_UNUSED) {
1489 ++color_attachment_count;
1492 if (color_attachment_count > 0 && pPipeline->graphicsPipelineCI.pColorBlendState == nullptr) {
1493 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1494 HandleToUint64(pPipeline->pipeline), VALIDATION_ERROR_096005e2,
1495 "Invalid Pipeline CreateInfo State: pColorBlendState is NULL when rasterization is enabled and "
1496 "subpass uses color attachments.");
1502 auto vi = pPipeline->graphicsPipelineCI.pVertexInputState;
1504 for (uint32_t j = 0; j < vi->vertexAttributeDescriptionCount; j++) {
1505 VkFormat format = vi->pVertexAttributeDescriptions[j].format;
1506 // Internal call to get format info. Still goes through layers, could potentially go directly to ICD.
1507 VkFormatProperties properties;
1508 dev_data->instance_data->dispatch_table.GetPhysicalDeviceFormatProperties(dev_data->physical_device, format,
1510 if ((properties.bufferFeatures & VK_FORMAT_FEATURE_VERTEX_BUFFER_BIT) == 0) {
1512 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
1513 VALIDATION_ERROR_14a004de,
1514 "vkCreateGraphicsPipelines: pCreateInfo[%d].pVertexInputState->vertexAttributeDescriptions[%d].format "
1515 "(%s) is not a supported vertex buffer format.",
1516 pipelineIndex, j, string_VkFormat(format));
1521 if (dev_data->extensions.vk_amd_mixed_attachment_samples) {
1522 VkSampleCountFlagBits max_sample_count = static_cast<VkSampleCountFlagBits>(0);
1523 for (uint32_t i = 0; i < subpass_desc->colorAttachmentCount; ++i) {
1524 if (subpass_desc->pColorAttachments[i].attachment != VK_ATTACHMENT_UNUSED) {
1526 std::max(max_sample_count,
1527 pPipeline->rp_state->createInfo.pAttachments[subpass_desc->pColorAttachments[i].attachment].samples);
1530 if (subpass_desc->pDepthStencilAttachment && subpass_desc->pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
1532 std::max(max_sample_count,
1533 pPipeline->rp_state->createInfo.pAttachments[subpass_desc->pDepthStencilAttachment->attachment].samples);
1535 if (pPipeline->graphicsPipelineCI.pMultisampleState->rasterizationSamples != max_sample_count) {
1536 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1537 HandleToUint64(pPipeline->pipeline), VALIDATION_ERROR_09600bc2,
1538 "vkCreateGraphicsPipelines: pCreateInfo[%d].pMultisampleState->rasterizationSamples (%s) != max "
1539 "attachment samples (%s) used in subpass %u.",
1541 string_VkSampleCountFlagBits(pPipeline->graphicsPipelineCI.pMultisampleState->rasterizationSamples),
1542 string_VkSampleCountFlagBits(max_sample_count), pPipeline->graphicsPipelineCI.subpass);
1549 // Block of code at start here specifically for managing/tracking DSs
1551 // Return Pool node ptr for specified pool or else NULL
1552 DESCRIPTOR_POOL_STATE *GetDescriptorPoolState(const layer_data *dev_data, const VkDescriptorPool pool) {
1553 auto pool_it = dev_data->descriptorPoolMap.find(pool);
1554 if (pool_it == dev_data->descriptorPoolMap.end()) {
1557 return pool_it->second;
1560 // Validate that given set is valid and that it's not being used by an in-flight CmdBuffer
1561 // func_str is the name of the calling function
1562 // Return false if no errors occur
1563 // Return true if validation error occurs and callback returns true (to skip upcoming API call down the chain)
1564 static bool validateIdleDescriptorSet(const layer_data *dev_data, VkDescriptorSet set, std::string func_str) {
1565 if (dev_data->instance_data->disabled.idle_descriptor_set) return false;
1567 auto set_node = dev_data->setMap.find(set);
1568 if (set_node == dev_data->setMap.end()) {
1569 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
1570 HandleToUint64(set), DRAWSTATE_DOUBLE_DESTROY,
1571 "Cannot call %s() on descriptor set 0x%" PRIx64 " that has not been allocated.", func_str.c_str(),
1572 HandleToUint64(set));
1574 // TODO : This covers various error cases so should pass error enum into this function and use passed in enum here
1575 if (set_node->second->in_use.load()) {
1576 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
1577 HandleToUint64(set), VALIDATION_ERROR_2860026a,
1578 "Cannot call %s() on descriptor set 0x%" PRIx64 " that is in use by a command buffer.",
1579 func_str.c_str(), HandleToUint64(set));
1585 // Remove set from setMap and delete the set
1586 static void freeDescriptorSet(layer_data *dev_data, cvdescriptorset::DescriptorSet *descriptor_set) {
1587 dev_data->setMap.erase(descriptor_set->GetSet());
1588 delete descriptor_set;
1590 // Free all DS Pools including their Sets & related sub-structs
1591 // NOTE : Calls to this function should be wrapped in mutex
1592 static void deletePools(layer_data *dev_data) {
1593 for (auto ii = dev_data->descriptorPoolMap.begin(); ii != dev_data->descriptorPoolMap.end();) {
1594 // Remove this pools' sets from setMap and delete them
1595 for (auto ds : ii->second->sets) {
1596 freeDescriptorSet(dev_data, ds);
1598 ii->second->sets.clear();
1600 ii = dev_data->descriptorPoolMap.erase(ii);
1604 static void clearDescriptorPool(layer_data *dev_data, const VkDevice device, const VkDescriptorPool pool,
1605 VkDescriptorPoolResetFlags flags) {
1606 DESCRIPTOR_POOL_STATE *pPool = GetDescriptorPoolState(dev_data, pool);
1607 // TODO: validate flags
1608 // For every set off of this pool, clear it, remove from setMap, and free cvdescriptorset::DescriptorSet
1609 for (auto ds : pPool->sets) {
1610 freeDescriptorSet(dev_data, ds);
1612 pPool->sets.clear();
1613 // Reset available count for each type and available sets for this pool
1614 for (uint32_t i = 0; i < pPool->availableDescriptorTypeCount.size(); ++i) {
1615 pPool->availableDescriptorTypeCount[i] = pPool->maxDescriptorTypeCount[i];
1617 pPool->availableSets = pPool->maxSets;
1620 // For given CB object, fetch associated CB Node from map
1621 GLOBAL_CB_NODE *GetCBNode(layer_data const *dev_data, const VkCommandBuffer cb) {
1622 auto it = dev_data->commandBufferMap.find(cb);
1623 if (it == dev_data->commandBufferMap.end()) {
1629 // If a renderpass is active, verify that the given command type is appropriate for current subpass state
1630 bool ValidateCmdSubpassState(const layer_data *dev_data, const GLOBAL_CB_NODE *pCB, const CMD_TYPE cmd_type) {
1631 if (!pCB->activeRenderPass) return false;
1633 if (pCB->activeSubpassContents == VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS &&
1634 (cmd_type != CMD_EXECUTECOMMANDS && cmd_type != CMD_NEXTSUBPASS && cmd_type != CMD_ENDRENDERPASS)) {
1635 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
1636 HandleToUint64(pCB->commandBuffer), DRAWSTATE_INVALID_COMMAND_BUFFER,
1637 "Commands cannot be called in a subpass using secondary command buffers.");
1638 } else if (pCB->activeSubpassContents == VK_SUBPASS_CONTENTS_INLINE && cmd_type == CMD_EXECUTECOMMANDS) {
1639 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
1640 HandleToUint64(pCB->commandBuffer), DRAWSTATE_INVALID_COMMAND_BUFFER,
1641 "vkCmdExecuteCommands() cannot be called in a subpass using inline commands.");
1646 bool ValidateCmdQueueFlags(layer_data *dev_data, const GLOBAL_CB_NODE *cb_node, const char *caller_name,
1647 VkQueueFlags required_flags, UNIQUE_VALIDATION_ERROR_CODE error_code) {
1648 auto pool = GetCommandPoolNode(dev_data, cb_node->createInfo.commandPool);
1650 VkQueueFlags queue_flags = dev_data->phys_dev_properties.queue_family_properties[pool->queueFamilyIndex].queueFlags;
1651 if (!(required_flags & queue_flags)) {
1652 string required_flags_string;
1653 for (auto flag : {VK_QUEUE_TRANSFER_BIT, VK_QUEUE_GRAPHICS_BIT, VK_QUEUE_COMPUTE_BIT}) {
1654 if (flag & required_flags) {
1655 if (required_flags_string.size()) {
1656 required_flags_string += " or ";
1658 required_flags_string += string_VkQueueFlagBits(flag);
1661 return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
1662 HandleToUint64(cb_node->commandBuffer), error_code,
1663 "Cannot call %s on a command buffer allocated from a pool without %s capabilities..", caller_name,
1664 required_flags_string.c_str());
1670 static char const *GetCauseStr(VK_OBJECT obj) {
1671 if (obj.type == kVulkanObjectTypeDescriptorSet) return "destroyed or updated";
1672 if (obj.type == kVulkanObjectTypeCommandBuffer) return "destroyed or rerecorded";
1676 static bool ReportInvalidCommandBuffer(layer_data *dev_data, const GLOBAL_CB_NODE *cb_state, const char *call_source) {
1678 for (auto obj : cb_state->broken_bindings) {
1679 const char *type_str = object_string[obj.type];
1680 const char *cause_str = GetCauseStr(obj);
1681 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
1682 HandleToUint64(cb_state->commandBuffer), DRAWSTATE_INVALID_COMMAND_BUFFER,
1683 "You are adding %s to command buffer 0x%" PRIx64 " that is invalid because bound %s 0x%" PRIx64 " was %s.",
1684 call_source, HandleToUint64(cb_state->commandBuffer), type_str, obj.handle, cause_str);
1689 // 'commandBuffer must be in the recording state' valid usage error code for each command
1690 // Note: grepping for ^^^^^^^^^ in vk_validation_database is easily massaged into the following list
1691 // Note: C++11 doesn't automatically devolve enum types to the underlying type for hash traits purposes (fixed in C++14)
1692 using CmdTypeHashType = std::underlying_type<CMD_TYPE>::type;
1693 static const std::unordered_map<CmdTypeHashType, UNIQUE_VALIDATION_ERROR_CODE> must_be_recording_map = {
1694 {CMD_NONE, VALIDATION_ERROR_UNDEFINED}, // UNMATCHED
1695 {CMD_BEGINQUERY, VALIDATION_ERROR_17802413},
1696 {CMD_BEGINRENDERPASS, VALIDATION_ERROR_17a02413},
1697 {CMD_BINDDESCRIPTORSETS, VALIDATION_ERROR_17c02413},
1698 {CMD_BINDINDEXBUFFER, VALIDATION_ERROR_17e02413},
1699 {CMD_BINDPIPELINE, VALIDATION_ERROR_18002413},
1700 {CMD_BINDVERTEXBUFFERS, VALIDATION_ERROR_18202413},
1701 {CMD_BLITIMAGE, VALIDATION_ERROR_18402413},
1702 {CMD_CLEARATTACHMENTS, VALIDATION_ERROR_18602413},
1703 {CMD_CLEARCOLORIMAGE, VALIDATION_ERROR_18802413},
1704 {CMD_CLEARDEPTHSTENCILIMAGE, VALIDATION_ERROR_18a02413},
1705 {CMD_COPYBUFFER, VALIDATION_ERROR_18c02413},
1706 {CMD_COPYBUFFERTOIMAGE, VALIDATION_ERROR_18e02413},
1707 {CMD_COPYIMAGE, VALIDATION_ERROR_19002413},
1708 {CMD_COPYIMAGETOBUFFER, VALIDATION_ERROR_19202413},
1709 {CMD_COPYQUERYPOOLRESULTS, VALIDATION_ERROR_19402413},
1710 {CMD_DEBUGMARKERBEGINEXT, VALIDATION_ERROR_19602413},
1711 {CMD_DEBUGMARKERENDEXT, VALIDATION_ERROR_19802413},
1712 {CMD_DEBUGMARKERINSERTEXT, VALIDATION_ERROR_19a02413},
1713 {CMD_DISPATCH, VALIDATION_ERROR_19c02413},
1714 // Exclude KHX (if not already present) { CMD_DISPATCHBASEKHX, VALIDATION_ERROR_19e02413 },
1715 {CMD_DISPATCHINDIRECT, VALIDATION_ERROR_1a002413},
1716 {CMD_DRAW, VALIDATION_ERROR_1a202413},
1717 {CMD_DRAWINDEXED, VALIDATION_ERROR_1a402413},
1718 {CMD_DRAWINDEXEDINDIRECT, VALIDATION_ERROR_1a602413},
1719 // Exclude vendor ext (if not already present) { CMD_DRAWINDEXEDINDIRECTCOUNTAMD, VALIDATION_ERROR_1a802413 },
1720 {CMD_DRAWINDIRECT, VALIDATION_ERROR_1aa02413},
1721 // Exclude vendor ext (if not already present) { CMD_DRAWINDIRECTCOUNTAMD, VALIDATION_ERROR_1ac02413 },
1722 {CMD_ENDCOMMANDBUFFER, VALIDATION_ERROR_27400076},
1723 {CMD_ENDQUERY, VALIDATION_ERROR_1ae02413},
1724 {CMD_ENDRENDERPASS, VALIDATION_ERROR_1b002413},
1725 {CMD_EXECUTECOMMANDS, VALIDATION_ERROR_1b202413},
1726 {CMD_FILLBUFFER, VALIDATION_ERROR_1b402413},
1727 {CMD_NEXTSUBPASS, VALIDATION_ERROR_1b602413},
1728 {CMD_PIPELINEBARRIER, VALIDATION_ERROR_1b802413},
1729 // Exclude vendor ext (if not already present) { CMD_PROCESSCOMMANDSNVX, VALIDATION_ERROR_1ba02413 },
1730 {CMD_PUSHCONSTANTS, VALIDATION_ERROR_1bc02413},
1731 {CMD_PUSHDESCRIPTORSETKHR, VALIDATION_ERROR_1be02413},
1732 {CMD_PUSHDESCRIPTORSETWITHTEMPLATEKHR, VALIDATION_ERROR_1c002413},
1733 // Exclude vendor ext (if not already present) { CMD_RESERVESPACEFORCOMMANDSNVX, VALIDATION_ERROR_1c202413 },
1734 {CMD_RESETEVENT, VALIDATION_ERROR_1c402413},
1735 {CMD_RESETQUERYPOOL, VALIDATION_ERROR_1c602413},
1736 {CMD_RESOLVEIMAGE, VALIDATION_ERROR_1c802413},
1737 {CMD_SETBLENDCONSTANTS, VALIDATION_ERROR_1ca02413},
1738 {CMD_SETDEPTHBIAS, VALIDATION_ERROR_1cc02413},
1739 {CMD_SETDEPTHBOUNDS, VALIDATION_ERROR_1ce02413},
1740 // Exclude KHX (if not already present) { CMD_SETDEVICEMASKKHX, VALIDATION_ERROR_1d002413 },
1741 {CMD_SETDISCARDRECTANGLEEXT, VALIDATION_ERROR_1d202413},
1742 {CMD_SETEVENT, VALIDATION_ERROR_1d402413},
1743 {CMD_SETLINEWIDTH, VALIDATION_ERROR_1d602413},
1744 {CMD_SETSAMPLELOCATIONSEXT, VALIDATION_ERROR_3e202413},
1745 {CMD_SETSCISSOR, VALIDATION_ERROR_1d802413},
1746 {CMD_SETSTENCILCOMPAREMASK, VALIDATION_ERROR_1da02413},
1747 {CMD_SETSTENCILREFERENCE, VALIDATION_ERROR_1dc02413},
1748 {CMD_SETSTENCILWRITEMASK, VALIDATION_ERROR_1de02413},
1749 {CMD_SETVIEWPORT, VALIDATION_ERROR_1e002413},
1750 // Exclude vendor ext (if not already present) { CMD_SETVIEWPORTWSCALINGNV, VALIDATION_ERROR_1e202413 },
1751 {CMD_UPDATEBUFFER, VALIDATION_ERROR_1e402413},
1752 {CMD_WAITEVENTS, VALIDATION_ERROR_1e602413},
1753 {CMD_WRITETIMESTAMP, VALIDATION_ERROR_1e802413},
1756 // Validate the given command being added to the specified cmd buffer, flagging errors if CB is not in the recording state or if
1757 // there's an issue with the Cmd ordering
1758 bool ValidateCmd(layer_data *dev_data, const GLOBAL_CB_NODE *cb_state, const CMD_TYPE cmd, const char *caller_name) {
1759 switch (cb_state->state) {
1761 return ValidateCmdSubpassState(dev_data, cb_state, cmd);
1763 case CB_INVALID_COMPLETE:
1764 case CB_INVALID_INCOMPLETE:
1765 return ReportInvalidCommandBuffer(dev_data, cb_state, caller_name);
1768 auto error_it = must_be_recording_map.find(cmd);
1769 // This assert lets us know that a vkCmd.* entrypoint has been added without enabling it in the map
1770 assert(error_it != must_be_recording_map.cend());
1771 if (error_it == must_be_recording_map.cend()) {
1772 error_it = must_be_recording_map.find(CMD_NONE); // But we'll handle the asserting case, in case of a test gap
1774 const auto error = error_it->second;
1775 return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
1776 HandleToUint64(cb_state->commandBuffer), error,
1777 "You must call vkBeginCommandBuffer() before this call to %s.", caller_name);
1781 // For given object struct return a ptr of BASE_NODE type for its wrapping struct
1782 BASE_NODE *GetStateStructPtrFromObject(layer_data *dev_data, VK_OBJECT object_struct) {
1783 BASE_NODE *base_ptr = nullptr;
1784 switch (object_struct.type) {
1785 case kVulkanObjectTypeDescriptorSet: {
1786 base_ptr = GetSetNode(dev_data, reinterpret_cast<VkDescriptorSet &>(object_struct.handle));
1789 case kVulkanObjectTypeSampler: {
1790 base_ptr = GetSamplerState(dev_data, reinterpret_cast<VkSampler &>(object_struct.handle));
1793 case kVulkanObjectTypeQueryPool: {
1794 base_ptr = GetQueryPoolNode(dev_data, reinterpret_cast<VkQueryPool &>(object_struct.handle));
1797 case kVulkanObjectTypePipeline: {
1798 base_ptr = getPipelineState(dev_data, reinterpret_cast<VkPipeline &>(object_struct.handle));
1801 case kVulkanObjectTypeBuffer: {
1802 base_ptr = GetBufferState(dev_data, reinterpret_cast<VkBuffer &>(object_struct.handle));
1805 case kVulkanObjectTypeBufferView: {
1806 base_ptr = GetBufferViewState(dev_data, reinterpret_cast<VkBufferView &>(object_struct.handle));
1809 case kVulkanObjectTypeImage: {
1810 base_ptr = GetImageState(dev_data, reinterpret_cast<VkImage &>(object_struct.handle));
1813 case kVulkanObjectTypeImageView: {
1814 base_ptr = GetImageViewState(dev_data, reinterpret_cast<VkImageView &>(object_struct.handle));
1817 case kVulkanObjectTypeEvent: {
1818 base_ptr = GetEventNode(dev_data, reinterpret_cast<VkEvent &>(object_struct.handle));
1821 case kVulkanObjectTypeDescriptorPool: {
1822 base_ptr = GetDescriptorPoolState(dev_data, reinterpret_cast<VkDescriptorPool &>(object_struct.handle));
1825 case kVulkanObjectTypeCommandPool: {
1826 base_ptr = GetCommandPoolNode(dev_data, reinterpret_cast<VkCommandPool &>(object_struct.handle));
1829 case kVulkanObjectTypeFramebuffer: {
1830 base_ptr = GetFramebufferState(dev_data, reinterpret_cast<VkFramebuffer &>(object_struct.handle));
1833 case kVulkanObjectTypeRenderPass: {
1834 base_ptr = GetRenderPassState(dev_data, reinterpret_cast<VkRenderPass &>(object_struct.handle));
1837 case kVulkanObjectTypeDeviceMemory: {
1838 base_ptr = GetMemObjInfo(dev_data, reinterpret_cast<VkDeviceMemory &>(object_struct.handle));
1842 // TODO : Any other objects to be handled here?
1849 // Tie the VK_OBJECT to the cmd buffer which includes:
1850 // Add object_binding to cmd buffer
1851 // Add cb_binding to object
1852 static void addCommandBufferBinding(std::unordered_set<GLOBAL_CB_NODE *> *cb_bindings, VK_OBJECT obj, GLOBAL_CB_NODE *cb_node) {
1853 cb_bindings->insert(cb_node);
1854 cb_node->object_bindings.insert(obj);
1856 // For a given object, if cb_node is in that objects cb_bindings, remove cb_node
1857 static void removeCommandBufferBinding(layer_data *dev_data, VK_OBJECT const *object, GLOBAL_CB_NODE *cb_node) {
1858 BASE_NODE *base_obj = GetStateStructPtrFromObject(dev_data, *object);
1859 if (base_obj) base_obj->cb_bindings.erase(cb_node);
1861 // Reset the command buffer state
1862 // Maintain the createInfo and set state to CB_NEW, but clear all other state
1863 static void ResetCommandBufferState(layer_data *dev_data, const VkCommandBuffer cb) {
1864 GLOBAL_CB_NODE *pCB = dev_data->commandBufferMap[cb];
1866 pCB->in_use.store(0);
1867 // Reset CB state (note that createInfo is not cleared)
1868 pCB->commandBuffer = cb;
1869 memset(&pCB->beginInfo, 0, sizeof(VkCommandBufferBeginInfo));
1870 memset(&pCB->inheritanceInfo, 0, sizeof(VkCommandBufferInheritanceInfo));
1871 pCB->hasDrawCmd = false;
1872 pCB->state = CB_NEW;
1873 pCB->submitCount = 0;
1874 pCB->image_layout_change_count = 1; // Start at 1. 0 is insert value for validation cache versions, s.t. new == dirty
1876 pCB->static_status = 0;
1877 pCB->viewportMask = 0;
1878 pCB->scissorMask = 0;
1880 for (uint32_t i = 0; i < VK_PIPELINE_BIND_POINT_RANGE_SIZE; ++i) {
1881 pCB->lastBound[i].reset();
1884 memset(&pCB->activeRenderPassBeginInfo, 0, sizeof(pCB->activeRenderPassBeginInfo));
1885 pCB->activeRenderPass = nullptr;
1886 pCB->activeSubpassContents = VK_SUBPASS_CONTENTS_INLINE;
1887 pCB->activeSubpass = 0;
1888 pCB->broken_bindings.clear();
1889 pCB->waitedEvents.clear();
1890 pCB->events.clear();
1891 pCB->writeEventsBeforeWait.clear();
1892 pCB->waitedEventsBeforeQueryReset.clear();
1893 pCB->queryToStateMap.clear();
1894 pCB->activeQueries.clear();
1895 pCB->startedQueries.clear();
1896 pCB->imageLayoutMap.clear();
1897 pCB->eventToStageMap.clear();
1898 pCB->drawData.clear();
1899 pCB->currentDrawData.buffers.clear();
1900 pCB->vertex_buffer_used = false;
1901 pCB->primaryCommandBuffer = VK_NULL_HANDLE;
1902 // If secondary, invalidate any primary command buffer that may call us.
1903 if (pCB->createInfo.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY) {
1904 invalidateCommandBuffers(dev_data, pCB->linkedCommandBuffers, {HandleToUint64(cb), kVulkanObjectTypeCommandBuffer});
1907 // Remove reverse command buffer links.
1908 for (auto pSubCB : pCB->linkedCommandBuffers) {
1909 pSubCB->linkedCommandBuffers.erase(pCB);
1911 pCB->linkedCommandBuffers.clear();
1912 pCB->updateImages.clear();
1913 pCB->updateBuffers.clear();
1914 clear_cmd_buf_and_mem_references(dev_data, pCB);
1915 pCB->queue_submit_functions.clear();
1916 pCB->cmd_execute_commands_functions.clear();
1917 pCB->eventUpdates.clear();
1918 pCB->queryUpdates.clear();
1920 // Remove object bindings
1921 for (auto obj : pCB->object_bindings) {
1922 removeCommandBufferBinding(dev_data, &obj, pCB);
1924 pCB->object_bindings.clear();
1925 // Remove this cmdBuffer's reference from each FrameBuffer's CB ref list
1926 for (auto framebuffer : pCB->framebuffers) {
1927 auto fb_state = GetFramebufferState(dev_data, framebuffer);
1928 if (fb_state) fb_state->cb_bindings.erase(pCB);
1930 pCB->framebuffers.clear();
1931 pCB->activeFramebuffer = VK_NULL_HANDLE;
1935 CBStatusFlags MakeStaticStateMask(VkPipelineDynamicStateCreateInfo const *ds) {
1936 // initially assume everything is static state
1937 CBStatusFlags flags = CBSTATUS_ALL_STATE_SET;
1940 for (uint32_t i = 0; i < ds->dynamicStateCount; i++) {
1941 switch (ds->pDynamicStates[i]) {
1942 case VK_DYNAMIC_STATE_LINE_WIDTH:
1943 flags &= ~CBSTATUS_LINE_WIDTH_SET;
1945 case VK_DYNAMIC_STATE_DEPTH_BIAS:
1946 flags &= ~CBSTATUS_DEPTH_BIAS_SET;
1948 case VK_DYNAMIC_STATE_BLEND_CONSTANTS:
1949 flags &= ~CBSTATUS_BLEND_CONSTANTS_SET;
1951 case VK_DYNAMIC_STATE_DEPTH_BOUNDS:
1952 flags &= ~CBSTATUS_DEPTH_BOUNDS_SET;
1954 case VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK:
1955 flags &= ~CBSTATUS_STENCIL_READ_MASK_SET;
1957 case VK_DYNAMIC_STATE_STENCIL_WRITE_MASK:
1958 flags &= ~CBSTATUS_STENCIL_WRITE_MASK_SET;
1960 case VK_DYNAMIC_STATE_STENCIL_REFERENCE:
1961 flags &= ~CBSTATUS_STENCIL_REFERENCE_SET;
1963 case VK_DYNAMIC_STATE_SCISSOR:
1964 flags &= ~CBSTATUS_SCISSOR_SET;
1966 case VK_DYNAMIC_STATE_VIEWPORT:
1967 flags &= ~CBSTATUS_VIEWPORT_SET;
1978 // Flags validation error if the associated call is made inside a render pass. The apiName routine should ONLY be called outside a
1980 bool insideRenderPass(const layer_data *dev_data, const GLOBAL_CB_NODE *pCB, const char *apiName,
1981 UNIQUE_VALIDATION_ERROR_CODE msgCode) {
1982 bool inside = false;
1983 if (pCB->activeRenderPass) {
1984 inside = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
1985 HandleToUint64(pCB->commandBuffer), msgCode,
1986 "%s: It is invalid to issue this call inside an active render pass (0x%" PRIx64 ").", apiName,
1987 HandleToUint64(pCB->activeRenderPass->renderPass));
1992 // Flags validation error if the associated call is made outside a render pass. The apiName
1993 // routine should ONLY be called inside a render pass.
1994 bool outsideRenderPass(const layer_data *dev_data, GLOBAL_CB_NODE *pCB, const char *apiName, UNIQUE_VALIDATION_ERROR_CODE msgCode) {
1995 bool outside = false;
1996 if (((pCB->createInfo.level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) && (!pCB->activeRenderPass)) ||
1997 ((pCB->createInfo.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY) && (!pCB->activeRenderPass) &&
1998 !(pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT))) {
1999 outside = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
2000 HandleToUint64(pCB->commandBuffer), msgCode, "%s: This call must be issued inside an active render pass.",
2006 static void init_core_validation(instance_layer_data *instance_data, const VkAllocationCallbacks *pAllocator) {
2007 layer_debug_report_actions(instance_data->report_data, instance_data->logging_callback, pAllocator, "lunarg_core_validation");
2008 layer_debug_messenger_actions(instance_data->report_data, instance_data->logging_messenger, pAllocator,
2009 "lunarg_core_validation");
2012 // For the given ValidationCheck enum, set all relevant instance disabled flags to true
2013 void SetDisabledFlags(instance_layer_data *instance_data, const VkValidationFlagsEXT *val_flags_struct) {
2014 for (uint32_t i = 0; i < val_flags_struct->disabledValidationCheckCount; ++i) {
2015 switch (val_flags_struct->pDisabledValidationChecks[i]) {
2016 case VK_VALIDATION_CHECK_SHADERS_EXT:
2017 instance_data->disabled.shader_validation = true;
2019 case VK_VALIDATION_CHECK_ALL_EXT:
2020 // Set all disabled flags to true
2021 instance_data->disabled.SetAll(true);
2029 VKAPI_ATTR VkResult VKAPI_CALL CreateInstance(const VkInstanceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator,
2030 VkInstance *pInstance) {
2031 VkLayerInstanceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO);
2033 assert(chain_info->u.pLayerInfo);
2034 PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr;
2035 PFN_vkCreateInstance fpCreateInstance = (PFN_vkCreateInstance)fpGetInstanceProcAddr(NULL, "vkCreateInstance");
2036 if (fpCreateInstance == NULL) return VK_ERROR_INITIALIZATION_FAILED;
2038 // Advance the link info for the next element on the chain
2039 chain_info->u.pLayerInfo = chain_info->u.pLayerInfo->pNext;
2041 VkResult result = fpCreateInstance(pCreateInfo, pAllocator, pInstance);
2042 if (result != VK_SUCCESS) return result;
2044 instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(*pInstance), instance_layer_data_map);
2045 instance_data->instance = *pInstance;
2046 layer_init_instance_dispatch_table(*pInstance, &instance_data->dispatch_table, fpGetInstanceProcAddr);
2047 instance_data->report_data = debug_utils_create_instance(
2048 &instance_data->dispatch_table, *pInstance, pCreateInfo->enabledExtensionCount, pCreateInfo->ppEnabledExtensionNames);
2050 instance_data->api_version = instance_data->extensions.InitFromInstanceCreateInfo(
2051 (pCreateInfo->pApplicationInfo ? pCreateInfo->pApplicationInfo->apiVersion : VK_API_VERSION_1_0), pCreateInfo);
2052 init_core_validation(instance_data, pAllocator);
2054 ValidateLayerOrdering(*pCreateInfo);
2055 // Parse any pNext chains
2056 const auto *validation_flags_ext = lvl_find_in_chain<VkValidationFlagsEXT>(pCreateInfo->pNext);
2057 if (validation_flags_ext) {
2058 SetDisabledFlags(instance_data, validation_flags_ext);
2064 // Hook DestroyInstance to remove tableInstanceMap entry
2065 VKAPI_ATTR void VKAPI_CALL DestroyInstance(VkInstance instance, const VkAllocationCallbacks *pAllocator) {
2066 // TODOSC : Shouldn't need any customization here
2067 dispatch_key key = get_dispatch_key(instance);
2068 // TBD: Need any locking this early, in case this function is called at the
2069 // same time by more than one thread?
2070 instance_layer_data *instance_data = GetLayerDataPtr(key, instance_layer_data_map);
2071 instance_data->dispatch_table.DestroyInstance(instance, pAllocator);
2073 lock_guard_t lock(global_lock);
2074 // Clean up logging callback, if any
2075 while (instance_data->logging_messenger.size() > 0) {
2076 VkDebugUtilsMessengerEXT messenger = instance_data->logging_messenger.back();
2077 layer_destroy_messenger_callback(instance_data->report_data, messenger, pAllocator);
2078 instance_data->logging_messenger.pop_back();
2080 while (instance_data->logging_callback.size() > 0) {
2081 VkDebugReportCallbackEXT callback = instance_data->logging_callback.back();
2082 layer_destroy_report_callback(instance_data->report_data, callback, pAllocator);
2083 instance_data->logging_callback.pop_back();
2086 layer_debug_utils_destroy_instance(instance_data->report_data);
2087 FreeLayerDataPtr(key, instance_layer_data_map);
2090 static bool ValidatePhysicalDeviceQueueFamily(instance_layer_data *instance_data, const PHYSICAL_DEVICE_STATE *pd_state,
2091 uint32_t requested_queue_family, int32_t err_code, const char *cmd_name,
2092 const char *queue_family_var_name) {
2095 const char *conditional_ext_cmd = instance_data->extensions.vk_khr_get_physical_device_properties_2
2096 ? "or vkGetPhysicalDeviceQueueFamilyProperties2[KHR]"
2099 std::string count_note = (UNCALLED == pd_state->vkGetPhysicalDeviceQueueFamilyPropertiesState)
2100 ? "the pQueueFamilyPropertyCount was never obtained"
2101 : "i.e. is not less than " + std::to_string(pd_state->queue_family_count);
2103 if (requested_queue_family >= pd_state->queue_family_count) {
2104 skip |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
2105 HandleToUint64(pd_state->phys_device), err_code,
2106 "%s: %s (= %" PRIu32
2107 ") is not less than any previously obtained pQueueFamilyPropertyCount from "
2108 "vkGetPhysicalDeviceQueueFamilyProperties%s (%s).",
2109 cmd_name, queue_family_var_name, requested_queue_family, conditional_ext_cmd, count_note.c_str());
2114 // Verify VkDeviceQueueCreateInfos
2115 static bool ValidateDeviceQueueCreateInfos(instance_layer_data *instance_data, const PHYSICAL_DEVICE_STATE *pd_state,
2116 uint32_t info_count, const VkDeviceQueueCreateInfo *infos) {
2119 for (uint32_t i = 0; i < info_count; ++i) {
2120 const auto requested_queue_family = infos[i].queueFamilyIndex;
2122 // Verify that requested queue family is known to be valid at this point in time
2123 std::string queue_family_var_name = "pCreateInfo->pQueueCreateInfos[" + std::to_string(i) + "].queueFamilyIndex";
2124 skip |= ValidatePhysicalDeviceQueueFamily(instance_data, pd_state, requested_queue_family, VALIDATION_ERROR_06c002fa,
2125 "vkCreateDevice", queue_family_var_name.c_str());
2127 // Verify that requested queue count of queue family is known to be valid at this point in time
2128 if (requested_queue_family < pd_state->queue_family_count) {
2129 const auto requested_queue_count = infos[i].queueCount;
2130 const auto queue_family_props_count = pd_state->queue_family_properties.size();
2131 const bool queue_family_has_props = requested_queue_family < queue_family_props_count;
2132 const char *conditional_ext_cmd = instance_data->extensions.vk_khr_get_physical_device_properties_2
2133 ? "or vkGetPhysicalDeviceQueueFamilyProperties2[KHR]"
2135 std::string count_note =
2136 !queue_family_has_props
2137 ? "the pQueueFamilyProperties[" + std::to_string(requested_queue_family) + "] was never obtained"
2138 : "i.e. is not less than or equal to " +
2139 std::to_string(pd_state->queue_family_properties[requested_queue_family].queueCount);
2141 if (!queue_family_has_props ||
2142 requested_queue_count > pd_state->queue_family_properties[requested_queue_family].queueCount) {
2144 instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
2145 HandleToUint64(pd_state->phys_device), VALIDATION_ERROR_06c002fc,
2146 "vkCreateDevice: pCreateInfo->pQueueCreateInfos[%" PRIu32 "].queueCount (=%" PRIu32
2147 ") is not less than or equal to available queue count for this pCreateInfo->pQueueCreateInfos[%" PRIu32
2148 "].queueFamilyIndex} (=%" PRIu32 ") obtained previously from vkGetPhysicalDeviceQueueFamilyProperties%s (%s).",
2149 i, requested_queue_count, i, requested_queue_family, conditional_ext_cmd, count_note.c_str());
2157 // Verify that features have been queried and that they are available
2158 static bool ValidateRequestedFeatures(instance_layer_data *instance_data, const PHYSICAL_DEVICE_STATE *pd_state,
2159 const VkPhysicalDeviceFeatures *requested_features) {
2162 const VkBool32 *actual = reinterpret_cast<const VkBool32 *>(&pd_state->features);
2163 const VkBool32 *requested = reinterpret_cast<const VkBool32 *>(requested_features);
2164 // TODO : This is a nice, compact way to loop through struct, but a bad way to report issues
2165 // Need to provide the struct member name with the issue. To do that seems like we'll
2166 // have to loop through each struct member which should be done w/ codegen to keep in synch.
2167 uint32_t errors = 0;
2168 uint32_t total_bools = sizeof(VkPhysicalDeviceFeatures) / sizeof(VkBool32);
2169 for (uint32_t i = 0; i < total_bools; i++) {
2170 if (requested[i] > actual[i]) {
2171 skip |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2172 VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, DEVLIMITS_INVALID_FEATURE_REQUESTED,
2173 "While calling vkCreateDevice(), requesting feature '%s' in VkPhysicalDeviceFeatures struct, which is "
2174 "not available on this device.",
2175 GetPhysDevFeatureString(i));
2179 if (errors && (UNCALLED == pd_state->vkGetPhysicalDeviceFeaturesState)) {
2180 // If user didn't request features, notify them that they should
2181 // TODO: Verify this against the spec. I believe this is an invalid use of the API and should return an error
2182 skip |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
2183 0, DEVLIMITS_INVALID_FEATURE_REQUESTED,
2184 "You requested features that are unavailable on this device. You should first query feature availability "
2185 "by calling vkGetPhysicalDeviceFeatures().");
2190 VKAPI_ATTR VkResult VKAPI_CALL CreateDevice(VkPhysicalDevice gpu, const VkDeviceCreateInfo *pCreateInfo,
2191 const VkAllocationCallbacks *pAllocator, VkDevice *pDevice) {
2193 instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(gpu), instance_layer_data_map);
2195 unique_lock_t lock(global_lock);
2196 auto pd_state = GetPhysicalDeviceState(instance_data, gpu);
2198 // TODO: object_tracker should perhaps do this instead
2199 // and it does not seem to currently work anyway -- the loader just crashes before this point
2200 if (!GetPhysicalDeviceState(instance_data, gpu)) {
2202 log_msg(instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0,
2203 DEVLIMITS_MUST_QUERY_COUNT, "Invalid call to vkCreateDevice() w/o first calling vkEnumeratePhysicalDevices().");
2206 // Check that any requested features are available
2207 // The enabled features can come from either pEnabledFeatures, or from the pNext chain
2208 const VkPhysicalDeviceFeatures *enabled_features_found = pCreateInfo->pEnabledFeatures;
2209 if (nullptr == enabled_features_found) {
2210 const auto *features2 = lvl_find_in_chain<VkPhysicalDeviceFeatures2KHR>(pCreateInfo->pNext);
2212 enabled_features_found = &(features2->features);
2216 if (enabled_features_found) {
2217 skip |= ValidateRequestedFeatures(instance_data, pd_state, enabled_features_found);
2221 ValidateDeviceQueueCreateInfos(instance_data, pd_state, pCreateInfo->queueCreateInfoCount, pCreateInfo->pQueueCreateInfos);
2223 if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
2225 VkLayerDeviceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO);
2227 assert(chain_info->u.pLayerInfo);
2228 PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr;
2229 PFN_vkGetDeviceProcAddr fpGetDeviceProcAddr = chain_info->u.pLayerInfo->pfnNextGetDeviceProcAddr;
2230 PFN_vkCreateDevice fpCreateDevice = (PFN_vkCreateDevice)fpGetInstanceProcAddr(instance_data->instance, "vkCreateDevice");
2231 if (fpCreateDevice == NULL) {
2232 return VK_ERROR_INITIALIZATION_FAILED;
2235 // Advance the link info for the next element on the chain
2236 chain_info->u.pLayerInfo = chain_info->u.pLayerInfo->pNext;
2240 VkResult result = fpCreateDevice(gpu, pCreateInfo, pAllocator, pDevice);
2241 if (result != VK_SUCCESS) {
2246 layer_data *device_data = GetLayerDataPtr(get_dispatch_key(*pDevice), layer_data_map);
2248 device_data->instance_data = instance_data;
2249 // Setup device dispatch table
2250 layer_init_device_dispatch_table(*pDevice, &device_data->dispatch_table, fpGetDeviceProcAddr);
2251 device_data->device = *pDevice;
2252 // Save PhysicalDevice handle
2253 device_data->physical_device = gpu;
2255 device_data->report_data = layer_debug_utils_create_device(instance_data->report_data, *pDevice);
2257 // Get physical device limits for this device
2258 instance_data->dispatch_table.GetPhysicalDeviceProperties(gpu, &(device_data->phys_dev_properties.properties));
2260 device_data->api_version = device_data->extensions.InitFromDeviceCreateInfo(
2261 &instance_data->extensions, device_data->phys_dev_properties.properties.apiVersion, pCreateInfo);
2264 instance_data->dispatch_table.GetPhysicalDeviceQueueFamilyProperties(gpu, &count, nullptr);
2265 device_data->phys_dev_properties.queue_family_properties.resize(count);
2266 instance_data->dispatch_table.GetPhysicalDeviceQueueFamilyProperties(
2267 gpu, &count, &device_data->phys_dev_properties.queue_family_properties[0]);
2268 // TODO: device limits should make sure these are compatible
2269 if (enabled_features_found) {
2270 device_data->enabled_features = *enabled_features_found;
2272 memset(&device_data->enabled_features, 0, sizeof(VkPhysicalDeviceFeatures));
2274 // Store physical device properties and physical device mem limits into device layer_data structs
2275 instance_data->dispatch_table.GetPhysicalDeviceMemoryProperties(gpu, &device_data->phys_dev_mem_props);
2276 instance_data->dispatch_table.GetPhysicalDeviceProperties(gpu, &device_data->phys_dev_props);
2278 if (device_data->extensions.vk_khr_push_descriptor) {
2279 // Get the needed push_descriptor limits
2280 auto push_descriptor_prop = lvl_init_struct<VkPhysicalDevicePushDescriptorPropertiesKHR>();
2281 auto prop2 = lvl_init_struct<VkPhysicalDeviceProperties2KHR>(&push_descriptor_prop);
2282 instance_data->dispatch_table.GetPhysicalDeviceProperties2KHR(gpu, &prop2);
2283 device_data->phys_dev_ext_props.max_push_descriptors = push_descriptor_prop.maxPushDescriptors;
2288 ValidateLayerOrdering(*pCreateInfo);
2294 VKAPI_ATTR void VKAPI_CALL DestroyDevice(VkDevice device, const VkAllocationCallbacks *pAllocator) {
2295 // TODOSC : Shouldn't need any customization here
2296 dispatch_key key = get_dispatch_key(device);
2297 layer_data *dev_data = GetLayerDataPtr(key, layer_data_map);
2298 // Free all the memory
2299 unique_lock_t lock(global_lock);
2300 dev_data->pipelineMap.clear();
2301 dev_data->renderPassMap.clear();
2302 for (auto ii = dev_data->commandBufferMap.begin(); ii != dev_data->commandBufferMap.end(); ++ii) {
2303 delete (*ii).second;
2305 dev_data->commandBufferMap.clear();
2306 // This will also delete all sets in the pool & remove them from setMap
2307 deletePools(dev_data);
2308 // All sets should be removed
2309 assert(dev_data->setMap.empty());
2310 dev_data->descriptorSetLayoutMap.clear();
2311 dev_data->imageViewMap.clear();
2312 dev_data->imageMap.clear();
2313 dev_data->imageSubresourceMap.clear();
2314 dev_data->imageLayoutMap.clear();
2315 dev_data->bufferViewMap.clear();
2316 dev_data->bufferMap.clear();
2317 // Queues persist until device is destroyed
2318 dev_data->queueMap.clear();
2319 // Report any memory leaks
2320 layer_debug_utils_destroy_device(device);
2323 #if DISPATCH_MAP_DEBUG
2324 fprintf(stderr, "Device: 0x%p, key: 0x%p\n", device, key);
2327 dev_data->dispatch_table.DestroyDevice(device, pAllocator);
2328 FreeLayerDataPtr(key, layer_data_map);
2331 static const VkExtensionProperties instance_extensions[] = {{VK_EXT_DEBUG_REPORT_EXTENSION_NAME, VK_EXT_DEBUG_REPORT_SPEC_VERSION}};
2333 // For given stage mask, if Geometry shader stage is on w/o GS being enabled, report geo_error_id
2334 // and if Tessellation Control or Evaluation shader stages are on w/o TS being enabled, report tess_error_id
2335 static bool ValidateStageMaskGsTsEnables(layer_data *dev_data, VkPipelineStageFlags stageMask, const char *caller,
2336 UNIQUE_VALIDATION_ERROR_CODE geo_error_id, UNIQUE_VALIDATION_ERROR_CODE tess_error_id) {
2338 if (!dev_data->enabled_features.geometryShader && (stageMask & VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT)) {
2340 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, geo_error_id,
2341 "%s call includes a stageMask with VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT bit set when device does not have "
2342 "geometryShader feature enabled.",
2345 if (!dev_data->enabled_features.tessellationShader &&
2346 (stageMask & (VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT | VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT))) {
2348 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, tess_error_id,
2349 "%s call includes a stageMask with VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT and/or "
2350 "VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT bit(s) set when device does not have "
2351 "tessellationShader feature enabled.",
2357 // Loop through bound objects and increment their in_use counts.
2358 static void IncrementBoundObjects(layer_data *dev_data, GLOBAL_CB_NODE const *cb_node) {
2359 for (auto obj : cb_node->object_bindings) {
2360 auto base_obj = GetStateStructPtrFromObject(dev_data, obj);
2362 base_obj->in_use.fetch_add(1);
2366 // Track which resources are in-flight by atomically incrementing their "in_use" count
2367 static void incrementResources(layer_data *dev_data, GLOBAL_CB_NODE *cb_node) {
2368 cb_node->submitCount++;
2369 cb_node->in_use.fetch_add(1);
2371 // First Increment for all "generic" objects bound to cmd buffer, followed by special-case objects below
2372 IncrementBoundObjects(dev_data, cb_node);
2373 // TODO : We should be able to remove the NULL look-up checks from the code below as long as
2374 // all the corresponding cases are verified to cause CB_INVALID state and the CB_INVALID state
2375 // should then be flagged prior to calling this function
2376 for (auto drawDataElement : cb_node->drawData) {
2377 for (auto buffer : drawDataElement.buffers) {
2378 auto buffer_state = GetBufferState(dev_data, buffer);
2380 buffer_state->in_use.fetch_add(1);
2384 for (auto event : cb_node->writeEventsBeforeWait) {
2385 auto event_state = GetEventNode(dev_data, event);
2386 if (event_state) event_state->write_in_use++;
2390 // Note: This function assumes that the global lock is held by the calling thread.
2391 // For the given queue, verify the queue state up to the given seq number.
2392 // Currently the only check is to make sure that if there are events to be waited on prior to
2393 // a QueryReset, make sure that all such events have been signalled.
2394 static bool VerifyQueueStateToSeq(layer_data *dev_data, QUEUE_STATE *initial_queue, uint64_t initial_seq) {
2397 // sequence number we want to validate up to, per queue
2398 std::unordered_map<QUEUE_STATE *, uint64_t> target_seqs{{initial_queue, initial_seq}};
2399 // sequence number we've completed validation for, per queue
2400 std::unordered_map<QUEUE_STATE *, uint64_t> done_seqs;
2401 std::vector<QUEUE_STATE *> worklist{initial_queue};
2403 while (worklist.size()) {
2404 auto queue = worklist.back();
2405 worklist.pop_back();
2407 auto target_seq = target_seqs[queue];
2408 auto seq = std::max(done_seqs[queue], queue->seq);
2409 auto sub_it = queue->submissions.begin() + int(seq - queue->seq); // seq >= queue->seq
2411 for (; seq < target_seq; ++sub_it, ++seq) {
2412 for (auto &wait : sub_it->waitSemaphores) {
2413 auto other_queue = GetQueueState(dev_data, wait.queue);
2415 if (other_queue == queue) continue; // semaphores /always/ point backwards, so no point here.
2417 auto other_target_seq = std::max(target_seqs[other_queue], wait.seq);
2418 auto other_done_seq = std::max(done_seqs[other_queue], other_queue->seq);
2420 // if this wait is for another queue, and covers new sequence
2421 // numbers beyond what we've already validated, mark the new
2422 // target seq and (possibly-re)add the queue to the worklist.
2423 if (other_done_seq < other_target_seq) {
2424 target_seqs[other_queue] = other_target_seq;
2425 worklist.push_back(other_queue);
2429 for (auto cb : sub_it->cbs) {
2430 auto cb_node = GetCBNode(dev_data, cb);
2432 for (auto queryEventsPair : cb_node->waitedEventsBeforeQueryReset) {
2433 for (auto event : queryEventsPair.second) {
2434 if (dev_data->eventMap[event].needsSignaled) {
2435 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2436 VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, DRAWSTATE_INVALID_QUERY,
2437 "Cannot get query results on queryPool 0x%" PRIx64
2438 " with index %d which was guarded by unsignaled event 0x%" PRIx64 ".",
2439 HandleToUint64(queryEventsPair.first.pool), queryEventsPair.first.index,
2440 HandleToUint64(event));
2448 // finally mark the point we've now validated this queue to.
2449 done_seqs[queue] = seq;
2455 // When the given fence is retired, verify outstanding queue operations through the point of the fence
2456 static bool VerifyQueueStateToFence(layer_data *dev_data, VkFence fence) {
2457 auto fence_state = GetFenceNode(dev_data, fence);
2458 if (fence_state->scope == kSyncScopeInternal && VK_NULL_HANDLE != fence_state->signaler.first) {
2459 return VerifyQueueStateToSeq(dev_data, GetQueueState(dev_data, fence_state->signaler.first), fence_state->signaler.second);
2464 // Decrement in-use count for objects bound to command buffer
2465 static void DecrementBoundResources(layer_data *dev_data, GLOBAL_CB_NODE const *cb_node) {
2466 BASE_NODE *base_obj = nullptr;
2467 for (auto obj : cb_node->object_bindings) {
2468 base_obj = GetStateStructPtrFromObject(dev_data, obj);
2470 base_obj->in_use.fetch_sub(1);
2475 static void RetireWorkOnQueue(layer_data *dev_data, QUEUE_STATE *pQueue, uint64_t seq) {
2476 std::unordered_map<VkQueue, uint64_t> otherQueueSeqs;
2478 // Roll this queue forward, one submission at a time.
2479 while (pQueue->seq < seq) {
2480 auto &submission = pQueue->submissions.front();
2482 for (auto &wait : submission.waitSemaphores) {
2483 auto pSemaphore = GetSemaphoreNode(dev_data, wait.semaphore);
2485 pSemaphore->in_use.fetch_sub(1);
2487 auto &lastSeq = otherQueueSeqs[wait.queue];
2488 lastSeq = std::max(lastSeq, wait.seq);
2491 for (auto &semaphore : submission.signalSemaphores) {
2492 auto pSemaphore = GetSemaphoreNode(dev_data, semaphore);
2494 pSemaphore->in_use.fetch_sub(1);
2498 for (auto &semaphore : submission.externalSemaphores) {
2499 auto pSemaphore = GetSemaphoreNode(dev_data, semaphore);
2501 pSemaphore->in_use.fetch_sub(1);
2505 for (auto cb : submission.cbs) {
2506 auto cb_node = GetCBNode(dev_data, cb);
2510 // First perform decrement on general case bound objects
2511 DecrementBoundResources(dev_data, cb_node);
2512 for (auto drawDataElement : cb_node->drawData) {
2513 for (auto buffer : drawDataElement.buffers) {
2514 auto buffer_state = GetBufferState(dev_data, buffer);
2516 buffer_state->in_use.fetch_sub(1);
2520 for (auto event : cb_node->writeEventsBeforeWait) {
2521 auto eventNode = dev_data->eventMap.find(event);
2522 if (eventNode != dev_data->eventMap.end()) {
2523 eventNode->second.write_in_use--;
2526 for (auto queryStatePair : cb_node->queryToStateMap) {
2527 dev_data->queryToStateMap[queryStatePair.first] = queryStatePair.second;
2529 for (auto eventStagePair : cb_node->eventToStageMap) {
2530 dev_data->eventMap[eventStagePair.first].stageMask = eventStagePair.second;
2533 cb_node->in_use.fetch_sub(1);
2536 auto pFence = GetFenceNode(dev_data, submission.fence);
2537 if (pFence && pFence->scope == kSyncScopeInternal) {
2538 pFence->state = FENCE_RETIRED;
2541 pQueue->submissions.pop_front();
2545 // Roll other queues forward to the highest seq we saw a wait for
2546 for (auto qs : otherQueueSeqs) {
2547 RetireWorkOnQueue(dev_data, GetQueueState(dev_data, qs.first), qs.second);
2551 // Submit a fence to a queue, delimiting previous fences and previous untracked
2553 static void SubmitFence(QUEUE_STATE *pQueue, FENCE_NODE *pFence, uint64_t submitCount) {
2554 pFence->state = FENCE_INFLIGHT;
2555 pFence->signaler.first = pQueue->queue;
2556 pFence->signaler.second = pQueue->seq + pQueue->submissions.size() + submitCount;
2559 static bool validateCommandBufferSimultaneousUse(layer_data *dev_data, GLOBAL_CB_NODE *pCB, int current_submit_count) {
2561 if ((pCB->in_use.load() || current_submit_count > 1) &&
2562 !(pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
2563 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
2564 VALIDATION_ERROR_31a0008e,
2565 "Command Buffer 0x%" PRIx64 " is already in use and is not marked for simultaneous use.",
2566 HandleToUint64(pCB->commandBuffer));
2571 static bool validateCommandBufferState(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, const char *call_source,
2572 int current_submit_count, UNIQUE_VALIDATION_ERROR_CODE vu_id) {
2574 if (dev_data->instance_data->disabled.command_buffer_state) return skip;
2575 // Validate ONE_TIME_SUBMIT_BIT CB is not being submitted more than once
2576 if ((cb_state->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT) &&
2577 (cb_state->submitCount + current_submit_count > 1)) {
2578 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
2579 DRAWSTATE_COMMAND_BUFFER_SINGLE_SUBMIT_VIOLATION,
2580 "Commandbuffer 0x%" PRIx64
2581 " was begun w/ VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT set, but has been submitted 0x%" PRIxLEAST64
2583 HandleToUint64(cb_state->commandBuffer), cb_state->submitCount + current_submit_count);
2586 // Validate that cmd buffers have been updated
2587 switch (cb_state->state) {
2588 case CB_INVALID_INCOMPLETE:
2589 case CB_INVALID_COMPLETE:
2590 skip |= ReportInvalidCommandBuffer(dev_data, cb_state, call_source);
2594 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
2595 (uint64_t)(cb_state->commandBuffer), vu_id,
2596 "Command buffer 0x%" PRIx64 " used in the call to %s is unrecorded and contains no commands.",
2597 HandleToUint64(cb_state->commandBuffer), call_source);
2601 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
2602 HandleToUint64(cb_state->commandBuffer), DRAWSTATE_NO_END_COMMAND_BUFFER,
2603 "You must call vkEndCommandBuffer() on command buffer 0x%" PRIx64 " before this call to %s!",
2604 HandleToUint64(cb_state->commandBuffer), call_source);
2607 default: /* recorded */
2613 static bool validateResources(layer_data *dev_data, GLOBAL_CB_NODE *cb_node) {
2616 // TODO : We should be able to remove the NULL look-up checks from the code below as long as
2617 // all the corresponding cases are verified to cause CB_INVALID state and the CB_INVALID state
2618 // should then be flagged prior to calling this function
2619 for (auto drawDataElement : cb_node->drawData) {
2620 for (auto buffer : drawDataElement.buffers) {
2621 auto buffer_state = GetBufferState(dev_data, buffer);
2622 if (buffer != VK_NULL_HANDLE && !buffer_state) {
2623 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
2624 HandleToUint64(buffer), DRAWSTATE_INVALID_BUFFER,
2625 "Cannot submit cmd buffer using deleted buffer 0x%" PRIx64 ".", HandleToUint64(buffer));
2632 // Check that the queue family index of 'queue' matches one of the entries in pQueueFamilyIndices
2633 bool ValidImageBufferQueue(layer_data *dev_data, GLOBAL_CB_NODE *cb_node, const VK_OBJECT *object, VkQueue queue, uint32_t count,
2634 const uint32_t *indices) {
2637 auto queue_state = GetQueueState(dev_data, queue);
2639 for (uint32_t i = 0; i < count; i++) {
2640 if (indices[i] == queue_state->queueFamilyIndex) {
2647 skip = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, get_debug_report_enum[object->type],
2648 object->handle, DRAWSTATE_INVALID_QUEUE_FAMILY,
2649 "vkQueueSubmit: Command buffer 0x%" PRIx64 " contains %s 0x%" PRIx64
2650 " which was not created allowing concurrent access to this queue family %d.",
2651 HandleToUint64(cb_node->commandBuffer), object_string[object->type], object->handle,
2652 queue_state->queueFamilyIndex);
2658 // Validate that queueFamilyIndices of primary command buffers match this queue
2659 // Secondary command buffers were previously validated in vkCmdExecuteCommands().
2660 static bool validateQueueFamilyIndices(layer_data *dev_data, GLOBAL_CB_NODE *pCB, VkQueue queue) {
2662 auto pPool = GetCommandPoolNode(dev_data, pCB->createInfo.commandPool);
2663 auto queue_state = GetQueueState(dev_data, queue);
2665 if (pPool && queue_state) {
2666 if (pPool->queueFamilyIndex != queue_state->queueFamilyIndex) {
2667 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
2668 HandleToUint64(pCB->commandBuffer), VALIDATION_ERROR_31a00094,
2669 "vkQueueSubmit: Primary command buffer 0x%" PRIx64
2670 " created in queue family %d is being submitted on queue 0x%" PRIx64 " from queue family %d.",
2671 HandleToUint64(pCB->commandBuffer), pPool->queueFamilyIndex, HandleToUint64(queue),
2672 queue_state->queueFamilyIndex);
2675 // Ensure that any bound images or buffers created with SHARING_MODE_CONCURRENT have access to the current queue family
2676 for (auto object : pCB->object_bindings) {
2677 if (object.type == kVulkanObjectTypeImage) {
2678 auto image_state = GetImageState(dev_data, reinterpret_cast<VkImage &>(object.handle));
2679 if (image_state && image_state->createInfo.sharingMode == VK_SHARING_MODE_CONCURRENT) {
2680 skip |= ValidImageBufferQueue(dev_data, pCB, &object, queue, image_state->createInfo.queueFamilyIndexCount,
2681 image_state->createInfo.pQueueFamilyIndices);
2683 } else if (object.type == kVulkanObjectTypeBuffer) {
2684 auto buffer_state = GetBufferState(dev_data, reinterpret_cast<VkBuffer &>(object.handle));
2685 if (buffer_state && buffer_state->createInfo.sharingMode == VK_SHARING_MODE_CONCURRENT) {
2686 skip |= ValidImageBufferQueue(dev_data, pCB, &object, queue, buffer_state->createInfo.queueFamilyIndexCount,
2687 buffer_state->createInfo.pQueueFamilyIndices);
2696 static bool validatePrimaryCommandBufferState(layer_data *dev_data, GLOBAL_CB_NODE *pCB, int current_submit_count) {
2697 // Track in-use for resources off of primary and any secondary CBs
2700 // If USAGE_SIMULTANEOUS_USE_BIT not set then CB cannot already be executing
2702 skip |= validateCommandBufferSimultaneousUse(dev_data, pCB, current_submit_count);
2704 skip |= validateResources(dev_data, pCB);
2706 for (auto pSubCB : pCB->linkedCommandBuffers) {
2707 skip |= validateResources(dev_data, pSubCB);
2708 // TODO: replace with invalidateCommandBuffers() at recording.
2709 if ((pSubCB->primaryCommandBuffer != pCB->commandBuffer) &&
2710 !(pSubCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
2711 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
2712 VALIDATION_ERROR_31a00092,
2713 "Commandbuffer 0x%" PRIx64 " was submitted with secondary buffer 0x%" PRIx64
2714 " but that buffer has subsequently been bound to primary cmd buffer 0x%" PRIx64
2715 " and it does not have VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set.",
2716 HandleToUint64(pCB->commandBuffer), HandleToUint64(pSubCB->commandBuffer),
2717 HandleToUint64(pSubCB->primaryCommandBuffer));
2721 skip |= validateCommandBufferState(dev_data, pCB, "vkQueueSubmit()", current_submit_count, VALIDATION_ERROR_31a00090);
2726 static bool ValidateFenceForSubmit(layer_data *dev_data, FENCE_NODE *pFence) {
2729 if (pFence && pFence->scope == kSyncScopeInternal) {
2730 if (pFence->state == FENCE_INFLIGHT) {
2731 // TODO: opportunities for VALIDATION_ERROR_31a00080, VALIDATION_ERROR_316008b4, VALIDATION_ERROR_16400a0e
2732 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
2733 HandleToUint64(pFence->fence), DRAWSTATE_INVALID_FENCE,
2734 "Fence 0x%" PRIx64 " is already in use by another submission.", HandleToUint64(pFence->fence));
2737 else if (pFence->state == FENCE_RETIRED) {
2738 // TODO: opportunities for VALIDATION_ERROR_31a0007e, VALIDATION_ERROR_316008b2, VALIDATION_ERROR_16400a0e
2739 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
2740 HandleToUint64(pFence->fence), MEMTRACK_INVALID_FENCE_STATE,
2741 "Fence 0x%" PRIx64 " submitted in SIGNALED state. Fences must be reset before being submitted",
2742 HandleToUint64(pFence->fence));
2749 static void PostCallRecordQueueSubmit(layer_data *dev_data, VkQueue queue, uint32_t submitCount, const VkSubmitInfo *pSubmits,
2751 uint64_t early_retire_seq = 0;
2752 auto pQueue = GetQueueState(dev_data, queue);
2753 auto pFence = GetFenceNode(dev_data, fence);
2756 if (pFence->scope == kSyncScopeInternal) {
2757 // Mark fence in use
2758 SubmitFence(pQueue, pFence, std::max(1u, submitCount));
2760 // If no submissions, but just dropping a fence on the end of the queue,
2761 // record an empty submission with just the fence, so we can determine
2763 pQueue->submissions.emplace_back(std::vector<VkCommandBuffer>(), std::vector<SEMAPHORE_WAIT>(),
2764 std::vector<VkSemaphore>(), std::vector<VkSemaphore>(), fence);
2767 // Retire work up until this fence early, we will not see the wait that corresponds to this signal
2768 early_retire_seq = pQueue->seq + pQueue->submissions.size();
2769 if (!dev_data->external_sync_warning) {
2770 dev_data->external_sync_warning = true;
2771 log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
2772 HandleToUint64(fence), DRAWSTATE_QUEUE_FORWARD_PROGRESS,
2773 "vkQueueSubmit(): Signaling external fence 0x%" PRIx64 " on queue 0x%" PRIx64
2774 " will disable validation of preceding command buffer lifecycle states and the in-use status of associated "
2776 HandleToUint64(fence), HandleToUint64(queue));
2781 // Now process each individual submit
2782 for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) {
2783 std::vector<VkCommandBuffer> cbs;
2784 const VkSubmitInfo *submit = &pSubmits[submit_idx];
2785 vector<SEMAPHORE_WAIT> semaphore_waits;
2786 vector<VkSemaphore> semaphore_signals;
2787 vector<VkSemaphore> semaphore_externals;
2788 for (uint32_t i = 0; i < submit->waitSemaphoreCount; ++i) {
2789 VkSemaphore semaphore = submit->pWaitSemaphores[i];
2790 auto pSemaphore = GetSemaphoreNode(dev_data, semaphore);
2792 if (pSemaphore->scope == kSyncScopeInternal) {
2793 if (pSemaphore->signaler.first != VK_NULL_HANDLE) {
2794 semaphore_waits.push_back({semaphore, pSemaphore->signaler.first, pSemaphore->signaler.second});
2795 pSemaphore->in_use.fetch_add(1);
2797 pSemaphore->signaler.first = VK_NULL_HANDLE;
2798 pSemaphore->signaled = false;
2800 semaphore_externals.push_back(semaphore);
2801 pSemaphore->in_use.fetch_add(1);
2802 if (pSemaphore->scope == kSyncScopeExternalTemporary) {
2803 pSemaphore->scope = kSyncScopeInternal;
2808 for (uint32_t i = 0; i < submit->signalSemaphoreCount; ++i) {
2809 VkSemaphore semaphore = submit->pSignalSemaphores[i];
2810 auto pSemaphore = GetSemaphoreNode(dev_data, semaphore);
2812 if (pSemaphore->scope == kSyncScopeInternal) {
2813 pSemaphore->signaler.first = queue;
2814 pSemaphore->signaler.second = pQueue->seq + pQueue->submissions.size() + 1;
2815 pSemaphore->signaled = true;
2816 pSemaphore->in_use.fetch_add(1);
2817 semaphore_signals.push_back(semaphore);
2819 // Retire work up until this submit early, we will not see the wait that corresponds to this signal
2820 early_retire_seq = std::max(early_retire_seq, pQueue->seq + pQueue->submissions.size() + 1);
2821 if (!dev_data->external_sync_warning) {
2822 dev_data->external_sync_warning = true;
2823 log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
2824 HandleToUint64(semaphore), DRAWSTATE_QUEUE_FORWARD_PROGRESS,
2825 "vkQueueSubmit(): Signaling external semaphore 0x%" PRIx64 " on queue 0x%" PRIx64
2826 " will disable validation of preceding command buffer lifecycle states and the in-use status of "
2827 "associated objects.",
2828 HandleToUint64(semaphore), HandleToUint64(queue));
2833 for (uint32_t i = 0; i < submit->commandBufferCount; i++) {
2834 auto cb_node = GetCBNode(dev_data, submit->pCommandBuffers[i]);
2836 cbs.push_back(submit->pCommandBuffers[i]);
2837 for (auto secondaryCmdBuffer : cb_node->linkedCommandBuffers) {
2838 cbs.push_back(secondaryCmdBuffer->commandBuffer);
2839 UpdateCmdBufImageLayouts(dev_data, secondaryCmdBuffer);
2840 incrementResources(dev_data, secondaryCmdBuffer);
2842 UpdateCmdBufImageLayouts(dev_data, cb_node);
2843 incrementResources(dev_data, cb_node);
2846 pQueue->submissions.emplace_back(cbs, semaphore_waits, semaphore_signals, semaphore_externals,
2847 submit_idx == submitCount - 1 ? fence : VK_NULL_HANDLE);
2850 if (early_retire_seq) {
2851 RetireWorkOnQueue(dev_data, pQueue, early_retire_seq);
2855 static bool PreCallValidateQueueSubmit(layer_data *dev_data, VkQueue queue, uint32_t submitCount, const VkSubmitInfo *pSubmits,
2857 auto pFence = GetFenceNode(dev_data, fence);
2858 bool skip = ValidateFenceForSubmit(dev_data, pFence);
2863 unordered_set<VkSemaphore> signaled_semaphores;
2864 unordered_set<VkSemaphore> unsignaled_semaphores;
2865 unordered_set<VkSemaphore> internal_semaphores;
2866 vector<VkCommandBuffer> current_cmds;
2867 unordered_map<ImageSubresourcePair, IMAGE_LAYOUT_NODE> localImageLayoutMap;
2868 // Now verify each individual submit
2869 for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) {
2870 const VkSubmitInfo *submit = &pSubmits[submit_idx];
2871 for (uint32_t i = 0; i < submit->waitSemaphoreCount; ++i) {
2872 skip |= ValidateStageMaskGsTsEnables(dev_data, submit->pWaitDstStageMask[i], "vkQueueSubmit()",
2873 VALIDATION_ERROR_13c00098, VALIDATION_ERROR_13c0009a);
2874 VkSemaphore semaphore = submit->pWaitSemaphores[i];
2875 auto pSemaphore = GetSemaphoreNode(dev_data, semaphore);
2876 if (pSemaphore && (pSemaphore->scope == kSyncScopeInternal || internal_semaphores.count(semaphore))) {
2877 if (unsignaled_semaphores.count(semaphore) ||
2878 (!(signaled_semaphores.count(semaphore)) && !(pSemaphore->signaled))) {
2879 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
2880 HandleToUint64(semaphore), DRAWSTATE_QUEUE_FORWARD_PROGRESS,
2881 "Queue 0x%" PRIx64 " is waiting on semaphore 0x%" PRIx64 " that has no way to be signaled.",
2882 HandleToUint64(queue), HandleToUint64(semaphore));
2884 signaled_semaphores.erase(semaphore);
2885 unsignaled_semaphores.insert(semaphore);
2888 if (pSemaphore && pSemaphore->scope == kSyncScopeExternalTemporary) {
2889 internal_semaphores.insert(semaphore);
2892 for (uint32_t i = 0; i < submit->signalSemaphoreCount; ++i) {
2893 VkSemaphore semaphore = submit->pSignalSemaphores[i];
2894 auto pSemaphore = GetSemaphoreNode(dev_data, semaphore);
2895 if (pSemaphore && (pSemaphore->scope == kSyncScopeInternal || internal_semaphores.count(semaphore))) {
2896 if (signaled_semaphores.count(semaphore) || (!(unsignaled_semaphores.count(semaphore)) && pSemaphore->signaled)) {
2897 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
2898 HandleToUint64(semaphore), DRAWSTATE_QUEUE_FORWARD_PROGRESS,
2899 "Queue 0x%" PRIx64 " is signaling semaphore 0x%" PRIx64
2900 " that has already been signaled but not waited on by queue 0x%" PRIx64 ".",
2901 HandleToUint64(queue), HandleToUint64(semaphore), HandleToUint64(pSemaphore->signaler.first));
2903 unsignaled_semaphores.erase(semaphore);
2904 signaled_semaphores.insert(semaphore);
2908 for (uint32_t i = 0; i < submit->commandBufferCount; i++) {
2909 auto cb_node = GetCBNode(dev_data, submit->pCommandBuffers[i]);
2911 skip |= ValidateCmdBufImageLayouts(dev_data, cb_node, dev_data->imageLayoutMap, localImageLayoutMap);
2912 current_cmds.push_back(submit->pCommandBuffers[i]);
2913 skip |= validatePrimaryCommandBufferState(
2914 dev_data, cb_node, (int)std::count(current_cmds.begin(), current_cmds.end(), submit->pCommandBuffers[i]));
2915 skip |= validateQueueFamilyIndices(dev_data, cb_node, queue);
2917 // Potential early exit here as bad object state may crash in delayed function calls
2922 // Call submit-time functions to validate/update state
2923 for (auto &function : cb_node->queue_submit_functions) {
2926 for (auto &function : cb_node->eventUpdates) {
2927 skip |= function(queue);
2929 for (auto &function : cb_node->queryUpdates) {
2930 skip |= function(queue);
2938 VKAPI_ATTR VkResult VKAPI_CALL QueueSubmit(VkQueue queue, uint32_t submitCount, const VkSubmitInfo *pSubmits, VkFence fence) {
2939 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(queue), layer_data_map);
2940 unique_lock_t lock(global_lock);
2942 bool skip = PreCallValidateQueueSubmit(dev_data, queue, submitCount, pSubmits, fence);
2945 if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
2947 VkResult result = dev_data->dispatch_table.QueueSubmit(queue, submitCount, pSubmits, fence);
2950 PostCallRecordQueueSubmit(dev_data, queue, submitCount, pSubmits, fence);
2955 static bool PreCallValidateAllocateMemory(layer_data *dev_data) {
2957 if (dev_data->memObjMap.size() >= dev_data->phys_dev_properties.properties.limits.maxMemoryAllocationCount) {
2958 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
2959 HandleToUint64(dev_data->device), VALIDATION_ERROR_UNDEFINED,
2960 "Number of currently valid memory objects is not less than the maximum allowed (%u).",
2961 dev_data->phys_dev_properties.properties.limits.maxMemoryAllocationCount);
2966 static void PostCallRecordAllocateMemory(layer_data *dev_data, const VkMemoryAllocateInfo *pAllocateInfo, VkDeviceMemory *pMemory) {
2967 add_mem_obj_info(dev_data, dev_data->device, *pMemory, pAllocateInfo);
2971 VKAPI_ATTR VkResult VKAPI_CALL AllocateMemory(VkDevice device, const VkMemoryAllocateInfo *pAllocateInfo,
2972 const VkAllocationCallbacks *pAllocator, VkDeviceMemory *pMemory) {
2973 VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
2974 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
2975 unique_lock_t lock(global_lock);
2976 bool skip = PreCallValidateAllocateMemory(dev_data);
2979 result = dev_data->dispatch_table.AllocateMemory(device, pAllocateInfo, pAllocator, pMemory);
2981 if (VK_SUCCESS == result) {
2982 PostCallRecordAllocateMemory(dev_data, pAllocateInfo, pMemory);
2988 // For given obj node, if it is use, flag a validation error and return callback result, else return false
2989 bool ValidateObjectNotInUse(const layer_data *dev_data, BASE_NODE *obj_node, VK_OBJECT obj_struct, const char *caller_name,
2990 UNIQUE_VALIDATION_ERROR_CODE error_code) {
2991 if (dev_data->instance_data->disabled.object_in_use) return false;
2993 if (obj_node->in_use.load()) {
2995 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, get_debug_report_enum[obj_struct.type], obj_struct.handle,
2996 error_code, "Cannot call %s on %s 0x%" PRIx64 " that is currently in use by a command buffer.", caller_name,
2997 object_string[obj_struct.type], obj_struct.handle);
3002 static bool PreCallValidateFreeMemory(layer_data *dev_data, VkDeviceMemory mem, DEVICE_MEM_INFO **mem_info, VK_OBJECT *obj_struct) {
3003 *mem_info = GetMemObjInfo(dev_data, mem);
3004 *obj_struct = {HandleToUint64(mem), kVulkanObjectTypeDeviceMemory};
3005 if (dev_data->instance_data->disabled.free_memory) return false;
3008 skip |= ValidateObjectNotInUse(dev_data, *mem_info, *obj_struct, "vkFreeMemory", VALIDATION_ERROR_2880054a);
3013 static void PostCallRecordFreeMemory(layer_data *dev_data, VkDeviceMemory mem, DEVICE_MEM_INFO *mem_info, VK_OBJECT obj_struct) {
3014 // Clear mem binding for any bound objects
3015 for (auto obj : mem_info->obj_bindings) {
3016 log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, get_debug_report_enum[obj.type], obj.handle,
3017 MEMTRACK_FREED_MEM_REF, "VK Object 0x%" PRIx64 " still has a reference to mem obj 0x%" PRIx64,
3018 HandleToUint64(obj.handle), HandleToUint64(mem_info->mem));
3019 BINDABLE *bindable_state = nullptr;
3021 case kVulkanObjectTypeImage:
3022 bindable_state = GetImageState(dev_data, reinterpret_cast<VkImage &>(obj.handle));
3024 case kVulkanObjectTypeBuffer:
3025 bindable_state = GetBufferState(dev_data, reinterpret_cast<VkBuffer &>(obj.handle));
3028 // Should only have buffer or image objects bound to memory
3032 assert(bindable_state);
3033 bindable_state->binding.mem = MEMORY_UNBOUND;
3034 bindable_state->UpdateBoundMemorySet();
3036 // Any bound cmd buffers are now invalid
3037 invalidateCommandBuffers(dev_data, mem_info->cb_bindings, obj_struct);
3038 dev_data->memObjMap.erase(mem);
3041 VKAPI_ATTR void VKAPI_CALL FreeMemory(VkDevice device, VkDeviceMemory mem, const VkAllocationCallbacks *pAllocator) {
3042 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3043 DEVICE_MEM_INFO *mem_info = nullptr;
3044 VK_OBJECT obj_struct;
3045 unique_lock_t lock(global_lock);
3046 bool skip = PreCallValidateFreeMemory(dev_data, mem, &mem_info, &obj_struct);
3049 dev_data->dispatch_table.FreeMemory(device, mem, pAllocator);
3051 if (mem != VK_NULL_HANDLE) {
3052 PostCallRecordFreeMemory(dev_data, mem, mem_info, obj_struct);
3057 // Validate that given Map memory range is valid. This means that the memory should not already be mapped,
3058 // and that the size of the map range should be:
3060 // 2. Within the size of the memory allocation
3061 static bool ValidateMapMemRange(layer_data *dev_data, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size) {
3065 skip = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
3066 HandleToUint64(mem), MEMTRACK_INVALID_MAP, "VkMapMemory: Attempting to map memory range of size zero");
3069 auto mem_element = dev_data->memObjMap.find(mem);
3070 if (mem_element != dev_data->memObjMap.end()) {
3071 auto mem_info = mem_element->second.get();
3072 // It is an application error to call VkMapMemory on an object that is already mapped
3073 if (mem_info->mem_range.size != 0) {
3074 skip = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
3075 HandleToUint64(mem), MEMTRACK_INVALID_MAP,
3076 "VkMapMemory: Attempting to map memory on an already-mapped object 0x%" PRIx64, HandleToUint64(mem));
3079 // Validate that offset + size is within object's allocationSize
3080 if (size == VK_WHOLE_SIZE) {
3081 if (offset >= mem_info->alloc_info.allocationSize) {
3082 skip = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
3083 HandleToUint64(mem), MEMTRACK_INVALID_MAP,
3084 "Mapping Memory from 0x%" PRIx64 " to 0x%" PRIx64
3085 " with size of VK_WHOLE_SIZE oversteps total array size 0x%" PRIx64,
3086 offset, mem_info->alloc_info.allocationSize, mem_info->alloc_info.allocationSize);
3089 if ((offset + size) > mem_info->alloc_info.allocationSize) {
3090 skip = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
3091 HandleToUint64(mem), VALIDATION_ERROR_31200552,
3092 "Mapping Memory from 0x%" PRIx64 " to 0x%" PRIx64 " oversteps total array size 0x%" PRIx64 ".",
3093 offset, size + offset, mem_info->alloc_info.allocationSize);
3100 static void storeMemRanges(layer_data *dev_data, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size) {
3101 auto mem_info = GetMemObjInfo(dev_data, mem);
3103 mem_info->mem_range.offset = offset;
3104 mem_info->mem_range.size = size;
3108 static bool deleteMemRanges(layer_data *dev_data, VkDeviceMemory mem) {
3110 auto mem_info = GetMemObjInfo(dev_data, mem);
3112 if (!mem_info->mem_range.size) {
3113 // Valid Usage: memory must currently be mapped
3114 skip = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
3115 HandleToUint64(mem), VALIDATION_ERROR_33600562,
3116 "Unmapping Memory without memory being mapped: mem obj 0x%" PRIx64 ".", HandleToUint64(mem));
3118 mem_info->mem_range.size = 0;
3119 if (mem_info->shadow_copy) {
3120 free(mem_info->shadow_copy_base);
3121 mem_info->shadow_copy_base = 0;
3122 mem_info->shadow_copy = 0;
3128 // Guard value for pad data
3129 static char NoncoherentMemoryFillValue = 0xb;
3131 static void initializeAndTrackMemory(layer_data *dev_data, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size,
3133 auto mem_info = GetMemObjInfo(dev_data, mem);
3135 mem_info->p_driver_data = *ppData;
3136 uint32_t index = mem_info->alloc_info.memoryTypeIndex;
3137 if (dev_data->phys_dev_mem_props.memoryTypes[index].propertyFlags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) {
3138 mem_info->shadow_copy = 0;
3140 if (size == VK_WHOLE_SIZE) {
3141 size = mem_info->alloc_info.allocationSize - offset;
3143 mem_info->shadow_pad_size = dev_data->phys_dev_properties.properties.limits.minMemoryMapAlignment;
3144 assert(SafeModulo(mem_info->shadow_pad_size, dev_data->phys_dev_properties.properties.limits.minMemoryMapAlignment) ==
3146 // Ensure start of mapped region reflects hardware alignment constraints
3147 uint64_t map_alignment = dev_data->phys_dev_properties.properties.limits.minMemoryMapAlignment;
3149 // From spec: (ppData - offset) must be aligned to at least limits::minMemoryMapAlignment.
3150 uint64_t start_offset = offset % map_alignment;
3151 // Data passed to driver will be wrapped by a guardband of data to detect over- or under-writes.
3152 mem_info->shadow_copy_base =
3153 malloc(static_cast<size_t>(2 * mem_info->shadow_pad_size + size + map_alignment + start_offset));
3155 mem_info->shadow_copy =
3156 reinterpret_cast<char *>((reinterpret_cast<uintptr_t>(mem_info->shadow_copy_base) + map_alignment) &
3157 ~(map_alignment - 1)) +
3159 assert(SafeModulo(reinterpret_cast<uintptr_t>(mem_info->shadow_copy) + mem_info->shadow_pad_size - start_offset,
3160 map_alignment) == 0);
3162 memset(mem_info->shadow_copy, NoncoherentMemoryFillValue, static_cast<size_t>(2 * mem_info->shadow_pad_size + size));
3163 *ppData = static_cast<char *>(mem_info->shadow_copy) + mem_info->shadow_pad_size;
3168 // Verify that state for fence being waited on is appropriate. That is,
3169 // a fence being waited on should not already be signaled and
3170 // it should have been submitted on a queue or during acquire next image
3171 static inline bool verifyWaitFenceState(layer_data *dev_data, VkFence fence, const char *apiCall) {
3174 auto pFence = GetFenceNode(dev_data, fence);
3175 if (pFence && pFence->scope == kSyncScopeInternal) {
3176 if (pFence->state == FENCE_UNSIGNALED) {
3178 log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
3179 HandleToUint64(fence), MEMTRACK_INVALID_FENCE_STATE,
3180 "%s called for fence 0x%" PRIx64 " which has not been submitted on a Queue or during acquire next image.",
3181 apiCall, HandleToUint64(fence));
3187 static void RetireFence(layer_data *dev_data, VkFence fence) {
3188 auto pFence = GetFenceNode(dev_data, fence);
3189 if (pFence->scope == kSyncScopeInternal) {
3190 if (pFence->signaler.first != VK_NULL_HANDLE) {
3191 // Fence signaller is a queue -- use this as proof that prior operations on that queue have completed.
3192 RetireWorkOnQueue(dev_data, GetQueueState(dev_data, pFence->signaler.first), pFence->signaler.second);
3194 // Fence signaller is the WSI. We're not tracking what the WSI op actually /was/ in CV yet, but we need to mark
3195 // the fence as retired.
3196 pFence->state = FENCE_RETIRED;
3201 static bool PreCallValidateWaitForFences(layer_data *dev_data, uint32_t fence_count, const VkFence *fences) {
3202 if (dev_data->instance_data->disabled.wait_for_fences) return false;
3204 for (uint32_t i = 0; i < fence_count; i++) {
3205 skip |= verifyWaitFenceState(dev_data, fences[i], "vkWaitForFences");
3206 skip |= VerifyQueueStateToFence(dev_data, fences[i]);
3211 static void PostCallRecordWaitForFences(layer_data *dev_data, uint32_t fence_count, const VkFence *fences, VkBool32 wait_all) {
3212 // When we know that all fences are complete we can clean/remove their CBs
3213 if ((VK_TRUE == wait_all) || (1 == fence_count)) {
3214 for (uint32_t i = 0; i < fence_count; i++) {
3215 RetireFence(dev_data, fences[i]);
3218 // NOTE : Alternate case not handled here is when some fences have completed. In
3219 // this case for app to guarantee which fences completed it will have to call
3220 // vkGetFenceStatus() at which point we'll clean/remove their CBs if complete.
3223 VKAPI_ATTR VkResult VKAPI_CALL WaitForFences(VkDevice device, uint32_t fenceCount, const VkFence *pFences, VkBool32 waitAll,
3225 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3226 // Verify fence status of submitted fences
3227 unique_lock_t lock(global_lock);
3228 bool skip = PreCallValidateWaitForFences(dev_data, fenceCount, pFences);
3230 if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
3232 VkResult result = dev_data->dispatch_table.WaitForFences(device, fenceCount, pFences, waitAll, timeout);
3234 if (result == VK_SUCCESS) {
3236 PostCallRecordWaitForFences(dev_data, fenceCount, pFences, waitAll);
3242 static bool PreCallValidateGetFenceStatus(layer_data *dev_data, VkFence fence) {
3243 if (dev_data->instance_data->disabled.get_fence_state) return false;
3244 return verifyWaitFenceState(dev_data, fence, "vkGetFenceStatus");
3247 static void PostCallRecordGetFenceStatus(layer_data *dev_data, VkFence fence) { RetireFence(dev_data, fence); }
3249 VKAPI_ATTR VkResult VKAPI_CALL GetFenceStatus(VkDevice device, VkFence fence) {
3250 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3251 unique_lock_t lock(global_lock);
3252 bool skip = PreCallValidateGetFenceStatus(dev_data, fence);
3254 if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
3256 VkResult result = dev_data->dispatch_table.GetFenceStatus(device, fence);
3257 if (result == VK_SUCCESS) {
3259 PostCallRecordGetFenceStatus(dev_data, fence);
3265 static void PostCallRecordGetDeviceQueue(layer_data *dev_data, uint32_t q_family_index, VkQueue queue) {
3266 // Add queue to tracking set only if it is new
3267 auto result = dev_data->queues.emplace(queue);
3268 if (result.second == true) {
3269 QUEUE_STATE *queue_state = &dev_data->queueMap[queue];
3270 queue_state->queue = queue;
3271 queue_state->queueFamilyIndex = q_family_index;
3272 queue_state->seq = 0;
3276 VKAPI_ATTR void VKAPI_CALL GetDeviceQueue(VkDevice device, uint32_t queueFamilyIndex, uint32_t queueIndex, VkQueue *pQueue) {
3277 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3278 dev_data->dispatch_table.GetDeviceQueue(device, queueFamilyIndex, queueIndex, pQueue);
3279 lock_guard_t lock(global_lock);
3281 PostCallRecordGetDeviceQueue(dev_data, queueFamilyIndex, *pQueue);
3284 VKAPI_ATTR void VKAPI_CALL GetDeviceQueue2(VkDevice device, VkDeviceQueueInfo2 *pQueueInfo, VkQueue *pQueue) {
3285 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3286 dev_data->dispatch_table.GetDeviceQueue2(device, pQueueInfo, pQueue);
3287 lock_guard_t lock(global_lock);
3289 if (*pQueue != VK_NULL_HANDLE) {
3290 PostCallRecordGetDeviceQueue(dev_data, pQueueInfo->queueFamilyIndex, *pQueue);
3294 static bool PreCallValidateQueueWaitIdle(layer_data *dev_data, VkQueue queue, QUEUE_STATE **queue_state) {
3295 *queue_state = GetQueueState(dev_data, queue);
3296 if (dev_data->instance_data->disabled.queue_wait_idle) return false;
3297 return VerifyQueueStateToSeq(dev_data, *queue_state, (*queue_state)->seq + (*queue_state)->submissions.size());
3300 static void PostCallRecordQueueWaitIdle(layer_data *dev_data, QUEUE_STATE *queue_state) {
3301 RetireWorkOnQueue(dev_data, queue_state, queue_state->seq + queue_state->submissions.size());
3304 VKAPI_ATTR VkResult VKAPI_CALL QueueWaitIdle(VkQueue queue) {
3305 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(queue), layer_data_map);
3306 QUEUE_STATE *queue_state = nullptr;
3307 unique_lock_t lock(global_lock);
3308 bool skip = PreCallValidateQueueWaitIdle(dev_data, queue, &queue_state);
3310 if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
3311 VkResult result = dev_data->dispatch_table.QueueWaitIdle(queue);
3312 if (VK_SUCCESS == result) {
3314 PostCallRecordQueueWaitIdle(dev_data, queue_state);
3320 static bool PreCallValidateDeviceWaitIdle(layer_data *dev_data) {
3321 if (dev_data->instance_data->disabled.device_wait_idle) return false;
3323 for (auto &queue : dev_data->queueMap) {
3324 skip |= VerifyQueueStateToSeq(dev_data, &queue.second, queue.second.seq + queue.second.submissions.size());
3329 static void PostCallRecordDeviceWaitIdle(layer_data *dev_data) {
3330 for (auto &queue : dev_data->queueMap) {
3331 RetireWorkOnQueue(dev_data, &queue.second, queue.second.seq + queue.second.submissions.size());
3335 VKAPI_ATTR VkResult VKAPI_CALL DeviceWaitIdle(VkDevice device) {
3336 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3337 unique_lock_t lock(global_lock);
3338 bool skip = PreCallValidateDeviceWaitIdle(dev_data);
3340 if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
3341 VkResult result = dev_data->dispatch_table.DeviceWaitIdle(device);
3342 if (VK_SUCCESS == result) {
3344 PostCallRecordDeviceWaitIdle(dev_data);
3350 static bool PreCallValidateDestroyFence(layer_data *dev_data, VkFence fence, FENCE_NODE **fence_node, VK_OBJECT *obj_struct) {
3351 *fence_node = GetFenceNode(dev_data, fence);
3352 *obj_struct = {HandleToUint64(fence), kVulkanObjectTypeFence};
3353 if (dev_data->instance_data->disabled.destroy_fence) return false;
3356 if ((*fence_node)->scope == kSyncScopeInternal && (*fence_node)->state == FENCE_INFLIGHT) {
3358 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
3359 HandleToUint64(fence), VALIDATION_ERROR_24e008c0, "Fence 0x%" PRIx64 " is in use.", HandleToUint64(fence));
3365 static void PostCallRecordDestroyFence(layer_data *dev_data, VkFence fence) { dev_data->fenceMap.erase(fence); }
3367 VKAPI_ATTR void VKAPI_CALL DestroyFence(VkDevice device, VkFence fence, const VkAllocationCallbacks *pAllocator) {
3368 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3369 // Common data objects used pre & post call
3370 FENCE_NODE *fence_node = nullptr;
3371 VK_OBJECT obj_struct;
3372 unique_lock_t lock(global_lock);
3373 bool skip = PreCallValidateDestroyFence(dev_data, fence, &fence_node, &obj_struct);
3377 dev_data->dispatch_table.DestroyFence(device, fence, pAllocator);
3379 PostCallRecordDestroyFence(dev_data, fence);
3383 static bool PreCallValidateDestroySemaphore(layer_data *dev_data, VkSemaphore semaphore, SEMAPHORE_NODE **sema_node,
3384 VK_OBJECT *obj_struct) {
3385 *sema_node = GetSemaphoreNode(dev_data, semaphore);
3386 *obj_struct = {HandleToUint64(semaphore), kVulkanObjectTypeSemaphore};
3387 if (dev_data->instance_data->disabled.destroy_semaphore) return false;
3390 skip |= ValidateObjectNotInUse(dev_data, *sema_node, *obj_struct, "vkDestroySemaphore", VALIDATION_ERROR_268008e2);
3395 static void PostCallRecordDestroySemaphore(layer_data *dev_data, VkSemaphore sema) { dev_data->semaphoreMap.erase(sema); }
3397 VKAPI_ATTR void VKAPI_CALL DestroySemaphore(VkDevice device, VkSemaphore semaphore, const VkAllocationCallbacks *pAllocator) {
3398 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3399 SEMAPHORE_NODE *sema_node;
3400 VK_OBJECT obj_struct;
3401 unique_lock_t lock(global_lock);
3402 bool skip = PreCallValidateDestroySemaphore(dev_data, semaphore, &sema_node, &obj_struct);
3405 dev_data->dispatch_table.DestroySemaphore(device, semaphore, pAllocator);
3407 PostCallRecordDestroySemaphore(dev_data, semaphore);
3411 static bool PreCallValidateDestroyEvent(layer_data *dev_data, VkEvent event, EVENT_STATE **event_state, VK_OBJECT *obj_struct) {
3412 *event_state = GetEventNode(dev_data, event);
3413 *obj_struct = {HandleToUint64(event), kVulkanObjectTypeEvent};
3414 if (dev_data->instance_data->disabled.destroy_event) return false;
3417 skip |= ValidateObjectNotInUse(dev_data, *event_state, *obj_struct, "vkDestroyEvent", VALIDATION_ERROR_24c008f2);
3422 static void PostCallRecordDestroyEvent(layer_data *dev_data, VkEvent event, EVENT_STATE *event_state, VK_OBJECT obj_struct) {
3423 invalidateCommandBuffers(dev_data, event_state->cb_bindings, obj_struct);
3424 dev_data->eventMap.erase(event);
3427 VKAPI_ATTR void VKAPI_CALL DestroyEvent(VkDevice device, VkEvent event, const VkAllocationCallbacks *pAllocator) {
3428 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3429 EVENT_STATE *event_state = nullptr;
3430 VK_OBJECT obj_struct;
3431 unique_lock_t lock(global_lock);
3432 bool skip = PreCallValidateDestroyEvent(dev_data, event, &event_state, &obj_struct);
3435 dev_data->dispatch_table.DestroyEvent(device, event, pAllocator);
3437 if (event != VK_NULL_HANDLE) {
3438 PostCallRecordDestroyEvent(dev_data, event, event_state, obj_struct);
3443 static bool PreCallValidateDestroyQueryPool(layer_data *dev_data, VkQueryPool query_pool, QUERY_POOL_NODE **qp_state,
3444 VK_OBJECT *obj_struct) {
3445 *qp_state = GetQueryPoolNode(dev_data, query_pool);
3446 *obj_struct = {HandleToUint64(query_pool), kVulkanObjectTypeQueryPool};
3447 if (dev_data->instance_data->disabled.destroy_query_pool) return false;
3450 skip |= ValidateObjectNotInUse(dev_data, *qp_state, *obj_struct, "vkDestroyQueryPool", VALIDATION_ERROR_26200632);
3455 static void PostCallRecordDestroyQueryPool(layer_data *dev_data, VkQueryPool query_pool, QUERY_POOL_NODE *qp_state,
3456 VK_OBJECT obj_struct) {
3457 invalidateCommandBuffers(dev_data, qp_state->cb_bindings, obj_struct);
3458 dev_data->queryPoolMap.erase(query_pool);
3461 VKAPI_ATTR void VKAPI_CALL DestroyQueryPool(VkDevice device, VkQueryPool queryPool, const VkAllocationCallbacks *pAllocator) {
3462 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3463 QUERY_POOL_NODE *qp_state = nullptr;
3464 VK_OBJECT obj_struct;
3465 unique_lock_t lock(global_lock);
3466 bool skip = PreCallValidateDestroyQueryPool(dev_data, queryPool, &qp_state, &obj_struct);
3469 dev_data->dispatch_table.DestroyQueryPool(device, queryPool, pAllocator);
3471 if (queryPool != VK_NULL_HANDLE) {
3472 PostCallRecordDestroyQueryPool(dev_data, queryPool, qp_state, obj_struct);
3476 static bool PreCallValidateGetQueryPoolResults(layer_data *dev_data, VkQueryPool query_pool, uint32_t first_query,
3477 uint32_t query_count, VkQueryResultFlags flags,
3478 unordered_map<QueryObject, vector<VkCommandBuffer>> *queries_in_flight) {
3480 auto query_pool_state = dev_data->queryPoolMap.find(query_pool);
3481 if (query_pool_state != dev_data->queryPoolMap.end()) {
3482 if ((query_pool_state->second.createInfo.queryType == VK_QUERY_TYPE_TIMESTAMP) && (flags & VK_QUERY_RESULT_PARTIAL_BIT)) {
3484 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0,
3485 VALIDATION_ERROR_2fa00664,
3486 "QueryPool 0x%" PRIx64
3487 " was created with a queryType of VK_QUERY_TYPE_TIMESTAMP but flags contains VK_QUERY_RESULT_PARTIAL_BIT.",
3488 HandleToUint64(query_pool));
3492 // TODO: clean this up, it's insanely wasteful.
3493 for (auto cmd_buffer : dev_data->commandBufferMap) {
3494 if (cmd_buffer.second->in_use.load()) {
3495 for (auto query_state_pair : cmd_buffer.second->queryToStateMap) {
3496 (*queries_in_flight)[query_state_pair.first].push_back(cmd_buffer.first);
3501 if (dev_data->instance_data->disabled.get_query_pool_results) return false;
3502 for (uint32_t i = 0; i < query_count; ++i) {
3503 QueryObject query = {query_pool, first_query + i};
3504 auto qif_pair = queries_in_flight->find(query);
3505 auto query_state_pair = dev_data->queryToStateMap.find(query);
3506 if (query_state_pair != dev_data->queryToStateMap.end()) {
3507 // Available and in flight
3508 if (qif_pair != queries_in_flight->end()) {
3509 if (query_state_pair->second) {
3510 for (auto cmd_buffer : qif_pair->second) {
3511 auto cb = GetCBNode(dev_data, cmd_buffer);
3512 auto query_event_pair = cb->waitedEventsBeforeQueryReset.find(query);
3513 if (query_event_pair == cb->waitedEventsBeforeQueryReset.end()) {
3514 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
3515 VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, DRAWSTATE_INVALID_QUERY,
3516 "Cannot get query results on queryPool 0x%" PRIx64 " with index %d which is in flight.",
3517 HandleToUint64(query_pool), first_query + i);
3521 } else if (!query_state_pair->second) { // Unavailable and Not in flight
3522 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0,
3523 DRAWSTATE_INVALID_QUERY,
3524 "Cannot get query results on queryPool 0x%" PRIx64 " with index %d which is unavailable.",
3525 HandleToUint64(query_pool), first_query + i);
3527 } else { // Uninitialized
3528 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0,
3529 DRAWSTATE_INVALID_QUERY,
3530 "Cannot get query results on queryPool 0x%" PRIx64
3531 " with index %d as data has not been collected for this index.",
3532 HandleToUint64(query_pool), first_query + i);
3538 static void PostCallRecordGetQueryPoolResults(layer_data *dev_data, VkQueryPool query_pool, uint32_t first_query,
3539 uint32_t query_count,
3540 unordered_map<QueryObject, vector<VkCommandBuffer>> *queries_in_flight) {
3541 for (uint32_t i = 0; i < query_count; ++i) {
3542 QueryObject query = {query_pool, first_query + i};
3543 auto qif_pair = queries_in_flight->find(query);
3544 auto query_state_pair = dev_data->queryToStateMap.find(query);
3545 if (query_state_pair != dev_data->queryToStateMap.end()) {
3546 // Available and in flight
3547 if (qif_pair != queries_in_flight->end() && query_state_pair != dev_data->queryToStateMap.end() &&
3548 query_state_pair->second) {
3549 for (auto cmd_buffer : qif_pair->second) {
3550 auto cb = GetCBNode(dev_data, cmd_buffer);
3551 auto query_event_pair = cb->waitedEventsBeforeQueryReset.find(query);
3552 if (query_event_pair != cb->waitedEventsBeforeQueryReset.end()) {
3553 for (auto event : query_event_pair->second) {
3554 dev_data->eventMap[event].needsSignaled = true;
3563 VKAPI_ATTR VkResult VKAPI_CALL GetQueryPoolResults(VkDevice device, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount,
3564 size_t dataSize, void *pData, VkDeviceSize stride, VkQueryResultFlags flags) {
3565 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3566 unordered_map<QueryObject, vector<VkCommandBuffer>> queries_in_flight;
3567 unique_lock_t lock(global_lock);
3568 bool skip = PreCallValidateGetQueryPoolResults(dev_data, queryPool, firstQuery, queryCount, flags, &queries_in_flight);
3570 if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
3572 dev_data->dispatch_table.GetQueryPoolResults(device, queryPool, firstQuery, queryCount, dataSize, pData, stride, flags);
3574 PostCallRecordGetQueryPoolResults(dev_data, queryPool, firstQuery, queryCount, &queries_in_flight);
3579 // Return true if given ranges intersect, else false
3580 // Prereq : For both ranges, range->end - range->start > 0. This case should have already resulted
3581 // in an error so not checking that here
3582 // pad_ranges bool indicates a linear and non-linear comparison which requires padding
3583 // In the case where padding is required, if an alias is encountered then a validation error is reported and skip
3584 // may be set by the callback function so caller should merge in skip value if padding case is possible.
3585 // This check can be skipped by passing skip_checks=true, for call sites outside the validation path.
3586 static bool rangesIntersect(layer_data const *dev_data, MEMORY_RANGE const *range1, MEMORY_RANGE const *range2, bool *skip,
3589 auto r1_start = range1->start;
3590 auto r1_end = range1->end;
3591 auto r2_start = range2->start;
3592 auto r2_end = range2->end;
3593 VkDeviceSize pad_align = 1;
3594 if (range1->linear != range2->linear) {
3595 pad_align = dev_data->phys_dev_properties.properties.limits.bufferImageGranularity;
3597 if ((r1_end & ~(pad_align - 1)) < (r2_start & ~(pad_align - 1))) return false;
3598 if ((r1_start & ~(pad_align - 1)) > (r2_end & ~(pad_align - 1))) return false;
3600 if (!skip_checks && (range1->linear != range2->linear)) {
3601 // In linear vs. non-linear case, warn of aliasing
3602 const char *r1_linear_str = range1->linear ? "Linear" : "Non-linear";
3603 const char *r1_type_str = range1->image ? "image" : "buffer";
3604 const char *r2_linear_str = range2->linear ? "linear" : "non-linear";
3605 const char *r2_type_str = range2->image ? "image" : "buffer";
3606 auto obj_type = range1->image ? VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT : VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT;
3608 dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, obj_type, range1->handle, MEMTRACK_INVALID_ALIASING,
3609 "%s %s 0x%" PRIx64 " is aliased with %s %s 0x%" PRIx64
3610 " which may indicate a bug. For further info refer to the Buffer-Image Granularity section of the Vulkan "
3612 "(https://www.khronos.org/registry/vulkan/specs/1.0-extensions/xhtml/vkspec.html#resources-bufferimagegranularity)",
3613 r1_linear_str, r1_type_str, range1->handle, r2_linear_str, r2_type_str, range2->handle);
3618 // Simplified rangesIntersect that calls above function to check range1 for intersection with offset & end addresses
3619 bool rangesIntersect(layer_data const *dev_data, MEMORY_RANGE const *range1, VkDeviceSize offset, VkDeviceSize end) {
3620 // Create a local MEMORY_RANGE struct to wrap offset/size
3621 MEMORY_RANGE range_wrap;
3622 // Synch linear with range1 to avoid padding and potential validation error case
3623 range_wrap.linear = range1->linear;
3624 range_wrap.start = offset;
3625 range_wrap.end = end;
3627 return rangesIntersect(dev_data, range1, &range_wrap, &tmp_bool, true);
3629 // For given mem_info, set all ranges valid that intersect [offset-end] range
3630 // TODO : For ranges where there is no alias, we may want to create new buffer ranges that are valid
3631 static void SetMemRangesValid(layer_data const *dev_data, DEVICE_MEM_INFO *mem_info, VkDeviceSize offset, VkDeviceSize end) {
3632 bool tmp_bool = false;
3633 MEMORY_RANGE map_range = {};
3634 map_range.linear = true;
3635 map_range.start = offset;
3636 map_range.end = end;
3637 for (auto &handle_range_pair : mem_info->bound_ranges) {
3638 if (rangesIntersect(dev_data, &handle_range_pair.second, &map_range, &tmp_bool, false)) {
3639 // TODO : WARN here if tmp_bool true?
3640 handle_range_pair.second.valid = true;
3645 static bool ValidateInsertMemoryRange(layer_data const *dev_data, uint64_t handle, DEVICE_MEM_INFO *mem_info,
3646 VkDeviceSize memoryOffset, VkMemoryRequirements memRequirements, bool is_image,
3647 bool is_linear, const char *api_name) {
3651 range.image = is_image;
3652 range.handle = handle;
3653 range.linear = is_linear;
3654 range.valid = mem_info->global_valid;
3655 range.memory = mem_info->mem;
3656 range.start = memoryOffset;
3657 range.size = memRequirements.size;
3658 range.end = memoryOffset + memRequirements.size - 1;
3659 range.aliases.clear();
3661 // Check for aliasing problems.
3662 for (auto &obj_range_pair : mem_info->bound_ranges) {
3663 auto check_range = &obj_range_pair.second;
3664 bool intersection_error = false;
3665 if (rangesIntersect(dev_data, &range, check_range, &intersection_error, false)) {
3666 skip |= intersection_error;
3667 range.aliases.insert(check_range);
3671 if (memoryOffset >= mem_info->alloc_info.allocationSize) {
3672 UNIQUE_VALIDATION_ERROR_CODE error_code = is_image ? VALIDATION_ERROR_1740082c : VALIDATION_ERROR_1700080e;
3673 skip = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
3674 HandleToUint64(mem_info->mem), error_code,
3675 "In %s, attempting to bind memory (0x%" PRIx64 ") to object (0x%" PRIx64 "), memoryOffset=0x%" PRIxLEAST64
3676 " must be less than the memory allocation size 0x%" PRIxLEAST64 ".",
3677 api_name, HandleToUint64(mem_info->mem), HandleToUint64(handle), memoryOffset,
3678 mem_info->alloc_info.allocationSize);
3684 // Object with given handle is being bound to memory w/ given mem_info struct.
3685 // Track the newly bound memory range with given memoryOffset
3686 // Also scan any previous ranges, track aliased ranges with new range, and flag an error if a linear
3687 // and non-linear range incorrectly overlap.
3688 // Return true if an error is flagged and the user callback returns "true", otherwise false
3689 // is_image indicates an image object, otherwise handle is for a buffer
3690 // is_linear indicates a buffer or linear image
3691 static void InsertMemoryRange(layer_data const *dev_data, uint64_t handle, DEVICE_MEM_INFO *mem_info, VkDeviceSize memoryOffset,
3692 VkMemoryRequirements memRequirements, bool is_image, bool is_linear) {
3695 range.image = is_image;
3696 range.handle = handle;
3697 range.linear = is_linear;
3698 range.valid = mem_info->global_valid;
3699 range.memory = mem_info->mem;
3700 range.start = memoryOffset;
3701 range.size = memRequirements.size;
3702 range.end = memoryOffset + memRequirements.size - 1;
3703 range.aliases.clear();
3704 // Update Memory aliasing
3705 // Save aliased ranges so we can copy into final map entry below. Can't do it in loop b/c we don't yet have final ptr. If we
3706 // inserted into map before loop to get the final ptr, then we may enter loop when not needed & we check range against itself
3707 std::unordered_set<MEMORY_RANGE *> tmp_alias_ranges;
3708 for (auto &obj_range_pair : mem_info->bound_ranges) {
3709 auto check_range = &obj_range_pair.second;
3710 bool intersection_error = false;
3711 if (rangesIntersect(dev_data, &range, check_range, &intersection_error, true)) {
3712 range.aliases.insert(check_range);
3713 tmp_alias_ranges.insert(check_range);
3716 mem_info->bound_ranges[handle] = std::move(range);
3717 for (auto tmp_range : tmp_alias_ranges) {
3718 tmp_range->aliases.insert(&mem_info->bound_ranges[handle]);
3721 mem_info->bound_images.insert(handle);
3723 mem_info->bound_buffers.insert(handle);
3726 static bool ValidateInsertImageMemoryRange(layer_data const *dev_data, VkImage image, DEVICE_MEM_INFO *mem_info,
3727 VkDeviceSize mem_offset, VkMemoryRequirements mem_reqs, bool is_linear,
3728 const char *api_name) {
3729 return ValidateInsertMemoryRange(dev_data, HandleToUint64(image), mem_info, mem_offset, mem_reqs, true, is_linear, api_name);
3731 static void InsertImageMemoryRange(layer_data const *dev_data, VkImage image, DEVICE_MEM_INFO *mem_info, VkDeviceSize mem_offset,
3732 VkMemoryRequirements mem_reqs, bool is_linear) {
3733 InsertMemoryRange(dev_data, HandleToUint64(image), mem_info, mem_offset, mem_reqs, true, is_linear);
3736 static bool ValidateInsertBufferMemoryRange(layer_data const *dev_data, VkBuffer buffer, DEVICE_MEM_INFO *mem_info,
3737 VkDeviceSize mem_offset, VkMemoryRequirements mem_reqs, const char *api_name) {
3738 return ValidateInsertMemoryRange(dev_data, HandleToUint64(buffer), mem_info, mem_offset, mem_reqs, false, true, api_name);
3740 static void InsertBufferMemoryRange(layer_data const *dev_data, VkBuffer buffer, DEVICE_MEM_INFO *mem_info, VkDeviceSize mem_offset,
3741 VkMemoryRequirements mem_reqs) {
3742 InsertMemoryRange(dev_data, HandleToUint64(buffer), mem_info, mem_offset, mem_reqs, false, true);
3745 // Remove MEMORY_RANGE struct for give handle from bound_ranges of mem_info
3746 // is_image indicates if handle is for image or buffer
3747 // This function will also remove the handle-to-index mapping from the appropriate
3748 // map and clean up any aliases for range being removed.
3749 static void RemoveMemoryRange(uint64_t handle, DEVICE_MEM_INFO *mem_info, bool is_image) {
3750 auto erase_range = &mem_info->bound_ranges[handle];
3751 for (auto alias_range : erase_range->aliases) {
3752 alias_range->aliases.erase(erase_range);
3754 erase_range->aliases.clear();
3755 mem_info->bound_ranges.erase(handle);
3757 mem_info->bound_images.erase(handle);
3759 mem_info->bound_buffers.erase(handle);
3763 void RemoveBufferMemoryRange(uint64_t handle, DEVICE_MEM_INFO *mem_info) { RemoveMemoryRange(handle, mem_info, false); }
3765 void RemoveImageMemoryRange(uint64_t handle, DEVICE_MEM_INFO *mem_info) { RemoveMemoryRange(handle, mem_info, true); }
3767 VKAPI_ATTR void VKAPI_CALL DestroyBuffer(VkDevice device, VkBuffer buffer, const VkAllocationCallbacks *pAllocator) {
3768 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3769 BUFFER_STATE *buffer_state = nullptr;
3770 VK_OBJECT obj_struct;
3771 unique_lock_t lock(global_lock);
3772 bool skip = PreCallValidateDestroyBuffer(dev_data, buffer, &buffer_state, &obj_struct);
3775 dev_data->dispatch_table.DestroyBuffer(device, buffer, pAllocator);
3777 if (buffer != VK_NULL_HANDLE) {
3778 PostCallRecordDestroyBuffer(dev_data, buffer, buffer_state, obj_struct);
3783 VKAPI_ATTR void VKAPI_CALL DestroyBufferView(VkDevice device, VkBufferView bufferView, const VkAllocationCallbacks *pAllocator) {
3784 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3785 // Common data objects used pre & post call
3786 BUFFER_VIEW_STATE *buffer_view_state = nullptr;
3787 VK_OBJECT obj_struct;
3788 unique_lock_t lock(global_lock);
3789 // Validate state before calling down chain, update common data if we'll be calling down chain
3790 bool skip = PreCallValidateDestroyBufferView(dev_data, bufferView, &buffer_view_state, &obj_struct);
3793 dev_data->dispatch_table.DestroyBufferView(device, bufferView, pAllocator);
3795 if (bufferView != VK_NULL_HANDLE) {
3796 PostCallRecordDestroyBufferView(dev_data, bufferView, buffer_view_state, obj_struct);
3801 VKAPI_ATTR void VKAPI_CALL DestroyImage(VkDevice device, VkImage image, const VkAllocationCallbacks *pAllocator) {
3802 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3803 IMAGE_STATE *image_state = nullptr;
3804 VK_OBJECT obj_struct;
3805 unique_lock_t lock(global_lock);
3806 bool skip = PreCallValidateDestroyImage(dev_data, image, &image_state, &obj_struct);
3809 dev_data->dispatch_table.DestroyImage(device, image, pAllocator);
3811 if (image != VK_NULL_HANDLE) {
3812 PostCallRecordDestroyImage(dev_data, image, image_state, obj_struct);
3817 static bool ValidateMemoryTypes(const layer_data *dev_data, const DEVICE_MEM_INFO *mem_info, const uint32_t memory_type_bits,
3818 const char *funcName, UNIQUE_VALIDATION_ERROR_CODE msgCode) {
3820 if (((1 << mem_info->alloc_info.memoryTypeIndex) & memory_type_bits) == 0) {
3821 skip = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
3822 HandleToUint64(mem_info->mem), msgCode,
3823 "%s(): MemoryRequirements->memoryTypeBits (0x%X) for this object type are not compatible with the memory "
3824 "type (0x%X) of this memory object 0x%" PRIx64 ".",
3825 funcName, memory_type_bits, mem_info->alloc_info.memoryTypeIndex, HandleToUint64(mem_info->mem));
3830 static bool PreCallValidateBindBufferMemory(layer_data *dev_data, VkBuffer buffer, BUFFER_STATE *buffer_state, VkDeviceMemory mem,
3831 VkDeviceSize memoryOffset, const char *api_name) {
3834 unique_lock_t lock(global_lock);
3835 // Track objects tied to memory
3836 uint64_t buffer_handle = HandleToUint64(buffer);
3837 skip = ValidateSetMemBinding(dev_data, mem, buffer_handle, kVulkanObjectTypeBuffer, api_name);
3838 if (!buffer_state->memory_requirements_checked) {
3839 // There's not an explicit requirement in the spec to call vkGetBufferMemoryRequirements() prior to calling
3840 // BindBufferMemory, but it's implied in that memory being bound must conform with VkMemoryRequirements from
3841 // vkGetBufferMemoryRequirements()
3842 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
3843 buffer_handle, DRAWSTATE_INVALID_BUFFER,
3844 "%s: Binding memory to buffer 0x%" PRIx64
3845 " but vkGetBufferMemoryRequirements() has not been called on that buffer.",
3846 api_name, HandleToUint64(buffer_handle));
3847 // Make the call for them so we can verify the state
3849 dev_data->dispatch_table.GetBufferMemoryRequirements(dev_data->device, buffer, &buffer_state->requirements);
3853 // Validate bound memory range information
3854 const auto mem_info = GetMemObjInfo(dev_data, mem);
3856 skip |= ValidateInsertBufferMemoryRange(dev_data, buffer, mem_info, memoryOffset, buffer_state->requirements, api_name);
3857 skip |= ValidateMemoryTypes(dev_data, mem_info, buffer_state->requirements.memoryTypeBits, api_name,
3858 VALIDATION_ERROR_17000816);
3861 // Validate memory requirements alignment
3862 if (SafeModulo(memoryOffset, buffer_state->requirements.alignment) != 0) {
3863 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
3864 buffer_handle, VALIDATION_ERROR_17000818,
3865 "%s: memoryOffset is 0x%" PRIxLEAST64
3866 " but must be an integer multiple of the VkMemoryRequirements::alignment value 0x%" PRIxLEAST64
3867 ", returned from a call to vkGetBufferMemoryRequirements with buffer.",
3868 api_name, memoryOffset, buffer_state->requirements.alignment);
3872 // Validate memory requirements size
3873 if (buffer_state->requirements.size > (mem_info->alloc_info.allocationSize - memoryOffset)) {
3874 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
3875 buffer_handle, VALIDATION_ERROR_1700081a,
3876 "%s: memory size minus memoryOffset is 0x%" PRIxLEAST64
3877 " but must be at least as large as VkMemoryRequirements::size value 0x%" PRIxLEAST64
3878 ", returned from a call to vkGetBufferMemoryRequirements with buffer.",
3879 api_name, mem_info->alloc_info.allocationSize - memoryOffset, buffer_state->requirements.size);
3882 // Validate dedicated allocation
3883 if (mem_info->is_dedicated && ((mem_info->dedicated_buffer != buffer) || (memoryOffset != 0))) {
3884 // TODO: Add vkBindBufferMemory2KHR error message when added to spec.
3885 auto validation_error = VALIDATION_ERROR_UNDEFINED;
3886 if (strcmp(api_name, "vkBindBufferMemory()") == 0) {
3887 validation_error = VALIDATION_ERROR_17000bc8;
3890 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
3891 buffer_handle, validation_error,
3892 "%s: for dedicated memory allocation 0x%" PRIxLEAST64
3893 ", VkMemoryDedicatedAllocateInfoKHR::buffer 0x%" PRIXLEAST64 " must be equal to buffer 0x%" PRIxLEAST64
3894 " and memoryOffset 0x%" PRIxLEAST64 " must be zero.",
3895 api_name, HandleToUint64(mem), HandleToUint64(mem_info->dedicated_buffer), buffer_handle, memoryOffset);
3899 // Validate device limits alignments
3900 static const VkBufferUsageFlagBits usage_list[3] = {
3901 static_cast<VkBufferUsageFlagBits>(VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT),
3902 VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT, VK_BUFFER_USAGE_STORAGE_BUFFER_BIT};
3903 static const char *memory_type[3] = {"texel", "uniform", "storage"};
3904 static const char *offset_name[3] = {"minTexelBufferOffsetAlignment", "minUniformBufferOffsetAlignment",
3905 "minStorageBufferOffsetAlignment"};
3907 // TODO: vk_validation_stats.py cannot abide braces immediately preceding or following a validation error enum
3909 static const UNIQUE_VALIDATION_ERROR_CODE msgCode[3] = { VALIDATION_ERROR_17000810, VALIDATION_ERROR_17000812,
3910 VALIDATION_ERROR_17000814 };
3913 // Keep this one fresh!
3914 const VkDeviceSize offset_requirement[3] = {
3915 dev_data->phys_dev_properties.properties.limits.minTexelBufferOffsetAlignment,
3916 dev_data->phys_dev_properties.properties.limits.minUniformBufferOffsetAlignment,
3917 dev_data->phys_dev_properties.properties.limits.minStorageBufferOffsetAlignment};
3918 VkBufferUsageFlags usage = dev_data->bufferMap[buffer].get()->createInfo.usage;
3920 for (int i = 0; i < 3; i++) {
3921 if (usage & usage_list[i]) {
3922 if (SafeModulo(memoryOffset, offset_requirement[i]) != 0) {
3923 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
3924 buffer_handle, msgCode[i],
3925 "%s: %s memoryOffset is 0x%" PRIxLEAST64
3926 " but must be a multiple of device limit %s 0x%" PRIxLEAST64 ".",
3927 api_name, memory_type[i], memoryOffset, offset_name[i], offset_requirement[i]);
3935 static void PostCallRecordBindBufferMemory(layer_data *dev_data, VkBuffer buffer, BUFFER_STATE *buffer_state, VkDeviceMemory mem,
3936 VkDeviceSize memoryOffset, const char *api_name) {
3938 unique_lock_t lock(global_lock);
3939 // Track bound memory range information
3940 auto mem_info = GetMemObjInfo(dev_data, mem);
3942 InsertBufferMemoryRange(dev_data, buffer, mem_info, memoryOffset, buffer_state->requirements);
3945 // Track objects tied to memory
3946 uint64_t buffer_handle = HandleToUint64(buffer);
3947 SetMemBinding(dev_data, mem, buffer_state, memoryOffset, buffer_handle, kVulkanObjectTypeBuffer, api_name);
3951 VKAPI_ATTR VkResult VKAPI_CALL BindBufferMemory(VkDevice device, VkBuffer buffer, VkDeviceMemory mem, VkDeviceSize memoryOffset) {
3952 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3953 VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
3954 BUFFER_STATE *buffer_state;
3956 unique_lock_t lock(global_lock);
3957 buffer_state = GetBufferState(dev_data, buffer);
3959 bool skip = PreCallValidateBindBufferMemory(dev_data, buffer, buffer_state, mem, memoryOffset, "vkBindBufferMemory()");
3961 result = dev_data->dispatch_table.BindBufferMemory(device, buffer, mem, memoryOffset);
3962 if (result == VK_SUCCESS) {
3963 PostCallRecordBindBufferMemory(dev_data, buffer, buffer_state, mem, memoryOffset, "vkBindBufferMemory()");
3969 static bool PreCallValidateBindBufferMemory2(layer_data *dev_data, std::vector<BUFFER_STATE *> *buffer_state,
3970 uint32_t bindInfoCount, const VkBindBufferMemoryInfoKHR *pBindInfos) {
3972 unique_lock_t lock(global_lock);
3973 for (uint32_t i = 0; i < bindInfoCount; i++) {
3974 (*buffer_state)[i] = GetBufferState(dev_data, pBindInfos[i].buffer);
3979 for (uint32_t i = 0; i < bindInfoCount; i++) {
3980 sprintf(api_name, "vkBindBufferMemory2() pBindInfos[%u]", i);
3981 skip |= PreCallValidateBindBufferMemory(dev_data, pBindInfos[i].buffer, (*buffer_state)[i], pBindInfos[i].memory,
3982 pBindInfos[i].memoryOffset, api_name);
3987 static void PostCallRecordBindBufferMemory2(layer_data *dev_data, const std::vector<BUFFER_STATE *> &buffer_state,
3988 uint32_t bindInfoCount, const VkBindBufferMemoryInfoKHR *pBindInfos) {
3989 for (uint32_t i = 0; i < bindInfoCount; i++) {
3990 PostCallRecordBindBufferMemory(dev_data, pBindInfos[i].buffer, buffer_state[i], pBindInfos[i].memory,
3991 pBindInfos[i].memoryOffset, "vkBindBufferMemory2()");
3995 VKAPI_ATTR VkResult VKAPI_CALL BindBufferMemory2(VkDevice device, uint32_t bindInfoCount,
3996 const VkBindBufferMemoryInfoKHR *pBindInfos) {
3997 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3998 VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
3999 std::vector<BUFFER_STATE *> buffer_state(bindInfoCount);
4000 if (!PreCallValidateBindBufferMemory2(dev_data, &buffer_state, bindInfoCount, pBindInfos)) {
4001 result = dev_data->dispatch_table.BindBufferMemory2(device, bindInfoCount, pBindInfos);
4002 if (result == VK_SUCCESS) {
4003 PostCallRecordBindBufferMemory2(dev_data, buffer_state, bindInfoCount, pBindInfos);
4009 VKAPI_ATTR VkResult VKAPI_CALL BindBufferMemory2KHR(VkDevice device, uint32_t bindInfoCount,
4010 const VkBindBufferMemoryInfoKHR *pBindInfos) {
4011 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4012 VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
4013 std::vector<BUFFER_STATE *> buffer_state(bindInfoCount);
4014 if (!PreCallValidateBindBufferMemory2(dev_data, &buffer_state, bindInfoCount, pBindInfos)) {
4015 result = dev_data->dispatch_table.BindBufferMemory2KHR(device, bindInfoCount, pBindInfos);
4016 if (result == VK_SUCCESS) {
4017 PostCallRecordBindBufferMemory2(dev_data, buffer_state, bindInfoCount, pBindInfos);
4023 static void PostCallRecordGetBufferMemoryRequirements(layer_data *dev_data, VkBuffer buffer,
4024 VkMemoryRequirements *pMemoryRequirements) {
4025 BUFFER_STATE *buffer_state;
4027 unique_lock_t lock(global_lock);
4028 buffer_state = GetBufferState(dev_data, buffer);
4031 buffer_state->requirements = *pMemoryRequirements;
4032 buffer_state->memory_requirements_checked = true;
4036 VKAPI_ATTR void VKAPI_CALL GetBufferMemoryRequirements(VkDevice device, VkBuffer buffer,
4037 VkMemoryRequirements *pMemoryRequirements) {
4038 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4039 dev_data->dispatch_table.GetBufferMemoryRequirements(device, buffer, pMemoryRequirements);
4040 PostCallRecordGetBufferMemoryRequirements(dev_data, buffer, pMemoryRequirements);
4043 VKAPI_ATTR void VKAPI_CALL GetBufferMemoryRequirements2(VkDevice device, const VkBufferMemoryRequirementsInfo2KHR *pInfo,
4044 VkMemoryRequirements2KHR *pMemoryRequirements) {
4045 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4046 dev_data->dispatch_table.GetBufferMemoryRequirements2(device, pInfo, pMemoryRequirements);
4047 PostCallRecordGetBufferMemoryRequirements(dev_data, pInfo->buffer, &pMemoryRequirements->memoryRequirements);
4050 VKAPI_ATTR void VKAPI_CALL GetBufferMemoryRequirements2KHR(VkDevice device, const VkBufferMemoryRequirementsInfo2KHR *pInfo,
4051 VkMemoryRequirements2KHR *pMemoryRequirements) {
4052 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4053 dev_data->dispatch_table.GetBufferMemoryRequirements2KHR(device, pInfo, pMemoryRequirements);
4054 PostCallRecordGetBufferMemoryRequirements(dev_data, pInfo->buffer, &pMemoryRequirements->memoryRequirements);
4057 static void PostCallRecordGetImageMemoryRequirements(layer_data *dev_data, VkImage image,
4058 VkMemoryRequirements *pMemoryRequirements) {
4059 IMAGE_STATE *image_state;
4061 unique_lock_t lock(global_lock);
4062 image_state = GetImageState(dev_data, image);
4065 image_state->requirements = *pMemoryRequirements;
4066 image_state->memory_requirements_checked = true;
4070 VKAPI_ATTR void VKAPI_CALL GetImageMemoryRequirements(VkDevice device, VkImage image, VkMemoryRequirements *pMemoryRequirements) {
4071 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4072 dev_data->dispatch_table.GetImageMemoryRequirements(device, image, pMemoryRequirements);
4073 PostCallRecordGetImageMemoryRequirements(dev_data, image, pMemoryRequirements);
4076 VKAPI_ATTR void VKAPI_CALL GetImageMemoryRequirements2(VkDevice device, const VkImageMemoryRequirementsInfo2KHR *pInfo,
4077 VkMemoryRequirements2KHR *pMemoryRequirements) {
4078 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4079 dev_data->dispatch_table.GetImageMemoryRequirements2(device, pInfo, pMemoryRequirements);
4080 PostCallRecordGetImageMemoryRequirements(dev_data, pInfo->image, &pMemoryRequirements->memoryRequirements);
4083 VKAPI_ATTR void VKAPI_CALL GetImageMemoryRequirements2KHR(VkDevice device, const VkImageMemoryRequirementsInfo2KHR *pInfo,
4084 VkMemoryRequirements2KHR *pMemoryRequirements) {
4085 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4086 dev_data->dispatch_table.GetImageMemoryRequirements2KHR(device, pInfo, pMemoryRequirements);
4087 PostCallRecordGetImageMemoryRequirements(dev_data, pInfo->image, &pMemoryRequirements->memoryRequirements);
4090 static void PostCallRecordGetImageSparseMemoryRequirements(IMAGE_STATE *image_state, uint32_t req_count,
4091 VkSparseImageMemoryRequirements *reqs) {
4092 image_state->get_sparse_reqs_called = true;
4093 image_state->sparse_requirements.resize(req_count);
4095 std::copy(reqs, reqs + req_count, image_state->sparse_requirements.begin());
4097 for (const auto &req : image_state->sparse_requirements) {
4098 if (req.formatProperties.aspectMask & VK_IMAGE_ASPECT_METADATA_BIT) {
4099 image_state->sparse_metadata_required = true;
4104 VKAPI_ATTR void VKAPI_CALL GetImageSparseMemoryRequirements(VkDevice device, VkImage image, uint32_t *pSparseMemoryRequirementCount,
4105 VkSparseImageMemoryRequirements *pSparseMemoryRequirements) {
4106 // TODO : Implement tracking here, just passthrough initially
4107 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4108 dev_data->dispatch_table.GetImageSparseMemoryRequirements(device, image, pSparseMemoryRequirementCount,
4109 pSparseMemoryRequirements);
4110 unique_lock_t lock(global_lock);
4111 auto image_state = GetImageState(dev_data, image);
4112 PostCallRecordGetImageSparseMemoryRequirements(image_state, *pSparseMemoryRequirementCount, pSparseMemoryRequirements);
4115 static void PostCallRecordGetImageSparseMemoryRequirements2(IMAGE_STATE *image_state, uint32_t req_count,
4116 VkSparseImageMemoryRequirements2KHR *reqs) {
4117 std::vector<VkSparseImageMemoryRequirements> sparse_reqs(req_count);
4118 // Migrate to old struct type for common handling with GetImageSparseMemoryRequirements()
4119 for (uint32_t i = 0; i < req_count; ++i) {
4120 assert(!reqs[i].pNext); // TODO: If an extension is ever added here we need to handle it
4121 sparse_reqs[i] = reqs[i].memoryRequirements;
4123 PostCallRecordGetImageSparseMemoryRequirements(image_state, req_count, sparse_reqs.data());
4126 VKAPI_ATTR void VKAPI_CALL GetImageSparseMemoryRequirements2(VkDevice device, const VkImageSparseMemoryRequirementsInfo2KHR *pInfo,
4127 uint32_t *pSparseMemoryRequirementCount,
4128 VkSparseImageMemoryRequirements2KHR *pSparseMemoryRequirements) {
4129 // TODO : Implement tracking here, just passthrough initially
4130 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4131 dev_data->dispatch_table.GetImageSparseMemoryRequirements2(device, pInfo, pSparseMemoryRequirementCount,
4132 pSparseMemoryRequirements);
4133 unique_lock_t lock(global_lock);
4134 auto image_state = GetImageState(dev_data, pInfo->image);
4135 PostCallRecordGetImageSparseMemoryRequirements2(image_state, *pSparseMemoryRequirementCount, pSparseMemoryRequirements);
4138 VKAPI_ATTR void VKAPI_CALL GetImageSparseMemoryRequirements2KHR(VkDevice device,
4139 const VkImageSparseMemoryRequirementsInfo2KHR *pInfo,
4140 uint32_t *pSparseMemoryRequirementCount,
4141 VkSparseImageMemoryRequirements2KHR *pSparseMemoryRequirements) {
4142 // TODO : Implement tracking here, just passthrough initially
4143 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4144 dev_data->dispatch_table.GetImageSparseMemoryRequirements2KHR(device, pInfo, pSparseMemoryRequirementCount,
4145 pSparseMemoryRequirements);
4146 unique_lock_t lock(global_lock);
4147 auto image_state = GetImageState(dev_data, pInfo->image);
4148 PostCallRecordGetImageSparseMemoryRequirements2(image_state, *pSparseMemoryRequirementCount, pSparseMemoryRequirements);
4151 VKAPI_ATTR void VKAPI_CALL GetPhysicalDeviceSparseImageFormatProperties(VkPhysicalDevice physicalDevice, VkFormat format,
4152 VkImageType type, VkSampleCountFlagBits samples,
4153 VkImageUsageFlags usage, VkImageTiling tiling,
4154 uint32_t *pPropertyCount,
4155 VkSparseImageFormatProperties *pProperties) {
4156 // TODO : Implement this intercept, track sparse image format properties and make sure they are obeyed.
4157 instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
4158 instance_data->dispatch_table.GetPhysicalDeviceSparseImageFormatProperties(physicalDevice, format, type, samples, usage, tiling,
4159 pPropertyCount, pProperties);
4162 VKAPI_ATTR void VKAPI_CALL GetPhysicalDeviceSparseImageFormatProperties2(
4163 VkPhysicalDevice physicalDevice, const VkPhysicalDeviceSparseImageFormatInfo2KHR *pFormatInfo, uint32_t *pPropertyCount,
4164 VkSparseImageFormatProperties2KHR *pProperties) {
4165 // TODO : Implement this intercept, track sparse image format properties and make sure they are obeyed.
4166 instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
4167 instance_data->dispatch_table.GetPhysicalDeviceSparseImageFormatProperties2(physicalDevice, pFormatInfo, pPropertyCount,
4171 VKAPI_ATTR void VKAPI_CALL GetPhysicalDeviceSparseImageFormatProperties2KHR(
4172 VkPhysicalDevice physicalDevice, const VkPhysicalDeviceSparseImageFormatInfo2KHR *pFormatInfo, uint32_t *pPropertyCount,
4173 VkSparseImageFormatProperties2KHR *pProperties) {
4174 // TODO : Implement this intercept, track sparse image format properties and make sure they are obeyed.
4175 instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
4176 instance_data->dispatch_table.GetPhysicalDeviceSparseImageFormatProperties2KHR(physicalDevice, pFormatInfo, pPropertyCount,
4180 VKAPI_ATTR void VKAPI_CALL DestroyImageView(VkDevice device, VkImageView imageView, const VkAllocationCallbacks *pAllocator) {
4181 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4182 // Common data objects used pre & post call
4183 IMAGE_VIEW_STATE *image_view_state = nullptr;
4184 VK_OBJECT obj_struct;
4185 unique_lock_t lock(global_lock);
4186 bool skip = PreCallValidateDestroyImageView(dev_data, imageView, &image_view_state, &obj_struct);
4189 dev_data->dispatch_table.DestroyImageView(device, imageView, pAllocator);
4191 if (imageView != VK_NULL_HANDLE) {
4192 PostCallRecordDestroyImageView(dev_data, imageView, image_view_state, obj_struct);
4197 VKAPI_ATTR void VKAPI_CALL DestroyShaderModule(VkDevice device, VkShaderModule shaderModule,
4198 const VkAllocationCallbacks *pAllocator) {
4199 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4201 unique_lock_t lock(global_lock);
4202 dev_data->shaderModuleMap.erase(shaderModule);
4205 dev_data->dispatch_table.DestroyShaderModule(device, shaderModule, pAllocator);
4208 static bool PreCallValidateDestroyPipeline(layer_data *dev_data, VkPipeline pipeline, PIPELINE_STATE **pipeline_state,
4209 VK_OBJECT *obj_struct) {
4210 *pipeline_state = getPipelineState(dev_data, pipeline);
4211 *obj_struct = {HandleToUint64(pipeline), kVulkanObjectTypePipeline};
4212 if (dev_data->instance_data->disabled.destroy_pipeline) return false;
4214 if (*pipeline_state) {
4215 skip |= ValidateObjectNotInUse(dev_data, *pipeline_state, *obj_struct, "vkDestroyPipeline", VALIDATION_ERROR_25c005fa);
4220 static void PostCallRecordDestroyPipeline(layer_data *dev_data, VkPipeline pipeline, PIPELINE_STATE *pipeline_state,
4221 VK_OBJECT obj_struct) {
4222 // Any bound cmd buffers are now invalid
4223 invalidateCommandBuffers(dev_data, pipeline_state->cb_bindings, obj_struct);
4224 dev_data->pipelineMap.erase(pipeline);
4227 VKAPI_ATTR void VKAPI_CALL DestroyPipeline(VkDevice device, VkPipeline pipeline, const VkAllocationCallbacks *pAllocator) {
4228 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4229 PIPELINE_STATE *pipeline_state = nullptr;
4230 VK_OBJECT obj_struct;
4231 unique_lock_t lock(global_lock);
4232 bool skip = PreCallValidateDestroyPipeline(dev_data, pipeline, &pipeline_state, &obj_struct);
4235 dev_data->dispatch_table.DestroyPipeline(device, pipeline, pAllocator);
4237 if (pipeline != VK_NULL_HANDLE) {
4238 PostCallRecordDestroyPipeline(dev_data, pipeline, pipeline_state, obj_struct);
4243 VKAPI_ATTR void VKAPI_CALL DestroyPipelineLayout(VkDevice device, VkPipelineLayout pipelineLayout,
4244 const VkAllocationCallbacks *pAllocator) {
4245 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4246 unique_lock_t lock(global_lock);
4247 dev_data->pipelineLayoutMap.erase(pipelineLayout);
4250 dev_data->dispatch_table.DestroyPipelineLayout(device, pipelineLayout, pAllocator);
4253 static bool PreCallValidateDestroySampler(layer_data *dev_data, VkSampler sampler, SAMPLER_STATE **sampler_state,
4254 VK_OBJECT *obj_struct) {
4255 *sampler_state = GetSamplerState(dev_data, sampler);
4256 *obj_struct = {HandleToUint64(sampler), kVulkanObjectTypeSampler};
4257 if (dev_data->instance_data->disabled.destroy_sampler) return false;
4259 if (*sampler_state) {
4260 skip |= ValidateObjectNotInUse(dev_data, *sampler_state, *obj_struct, "vkDestroySampler", VALIDATION_ERROR_26600874);
4265 static void PostCallRecordDestroySampler(layer_data *dev_data, VkSampler sampler, SAMPLER_STATE *sampler_state,
4266 VK_OBJECT obj_struct) {
4267 // Any bound cmd buffers are now invalid
4268 if (sampler_state) invalidateCommandBuffers(dev_data, sampler_state->cb_bindings, obj_struct);
4269 dev_data->samplerMap.erase(sampler);
4272 VKAPI_ATTR void VKAPI_CALL DestroySampler(VkDevice device, VkSampler sampler, const VkAllocationCallbacks *pAllocator) {
4273 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4274 SAMPLER_STATE *sampler_state = nullptr;
4275 VK_OBJECT obj_struct;
4276 unique_lock_t lock(global_lock);
4277 bool skip = PreCallValidateDestroySampler(dev_data, sampler, &sampler_state, &obj_struct);
4280 dev_data->dispatch_table.DestroySampler(device, sampler, pAllocator);
4282 if (sampler != VK_NULL_HANDLE) {
4283 PostCallRecordDestroySampler(dev_data, sampler, sampler_state, obj_struct);
4288 static void PostCallRecordDestroyDescriptorSetLayout(layer_data *dev_data, VkDescriptorSetLayout ds_layout) {
4289 auto layout_it = dev_data->descriptorSetLayoutMap.find(ds_layout);
4290 if (layout_it != dev_data->descriptorSetLayoutMap.end()) {
4291 layout_it->second.get()->MarkDestroyed();
4292 dev_data->descriptorSetLayoutMap.erase(layout_it);
4296 VKAPI_ATTR void VKAPI_CALL DestroyDescriptorSetLayout(VkDevice device, VkDescriptorSetLayout descriptorSetLayout,
4297 const VkAllocationCallbacks *pAllocator) {
4298 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4299 dev_data->dispatch_table.DestroyDescriptorSetLayout(device, descriptorSetLayout, pAllocator);
4300 unique_lock_t lock(global_lock);
4301 PostCallRecordDestroyDescriptorSetLayout(dev_data, descriptorSetLayout);
4304 static bool PreCallValidateDestroyDescriptorPool(layer_data *dev_data, VkDescriptorPool pool,
4305 DESCRIPTOR_POOL_STATE **desc_pool_state, VK_OBJECT *obj_struct) {
4306 *desc_pool_state = GetDescriptorPoolState(dev_data, pool);
4307 *obj_struct = {HandleToUint64(pool), kVulkanObjectTypeDescriptorPool};
4308 if (dev_data->instance_data->disabled.destroy_descriptor_pool) return false;
4310 if (*desc_pool_state) {
4312 ValidateObjectNotInUse(dev_data, *desc_pool_state, *obj_struct, "vkDestroyDescriptorPool", VALIDATION_ERROR_2440025e);
4317 static void PreCallRecordDestroyDescriptorPool(layer_data *dev_data, VkDescriptorPool descriptorPool,
4318 DESCRIPTOR_POOL_STATE *desc_pool_state, VK_OBJECT obj_struct) {
4319 if (desc_pool_state) {
4320 // Any bound cmd buffers are now invalid
4321 invalidateCommandBuffers(dev_data, desc_pool_state->cb_bindings, obj_struct);
4322 // Free sets that were in this pool
4323 for (auto ds : desc_pool_state->sets) {
4324 freeDescriptorSet(dev_data, ds);
4326 dev_data->descriptorPoolMap.erase(descriptorPool);
4327 delete desc_pool_state;
4331 VKAPI_ATTR void VKAPI_CALL DestroyDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool,
4332 const VkAllocationCallbacks *pAllocator) {
4333 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4334 DESCRIPTOR_POOL_STATE *desc_pool_state = nullptr;
4335 VK_OBJECT obj_struct;
4336 unique_lock_t lock(global_lock);
4337 bool skip = PreCallValidateDestroyDescriptorPool(dev_data, descriptorPool, &desc_pool_state, &obj_struct);
4339 PreCallRecordDestroyDescriptorPool(dev_data, descriptorPool, desc_pool_state, obj_struct);
4341 dev_data->dispatch_table.DestroyDescriptorPool(device, descriptorPool, pAllocator);
4345 // Verify cmdBuffer in given cb_node is not in global in-flight set, and return skip result
4346 // If this is a secondary command buffer, then make sure its primary is also in-flight
4347 // If primary is not in-flight, then remove secondary from global in-flight set
4348 // This function is only valid at a point when cmdBuffer is being reset or freed
4349 static bool checkCommandBufferInFlight(layer_data *dev_data, const GLOBAL_CB_NODE *cb_node, const char *action,
4350 UNIQUE_VALIDATION_ERROR_CODE error_code) {
4352 if (cb_node->in_use.load()) {
4353 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4354 HandleToUint64(cb_node->commandBuffer), error_code,
4355 "Attempt to %s command buffer (0x%" PRIx64 ") which is in use.", action,
4356 HandleToUint64(cb_node->commandBuffer));
4361 // Iterate over all cmdBuffers in given commandPool and verify that each is not in use
4362 static bool checkCommandBuffersInFlight(layer_data *dev_data, COMMAND_POOL_NODE *pPool, const char *action,
4363 UNIQUE_VALIDATION_ERROR_CODE error_code) {
4365 for (auto cmd_buffer : pPool->commandBuffers) {
4366 skip |= checkCommandBufferInFlight(dev_data, GetCBNode(dev_data, cmd_buffer), action, error_code);
4371 // Free all command buffers in given list, removing all references/links to them using ResetCommandBufferState
4372 static void FreeCommandBufferStates(layer_data *dev_data, COMMAND_POOL_NODE *pool_state, const uint32_t command_buffer_count,
4373 const VkCommandBuffer *command_buffers) {
4374 for (uint32_t i = 0; i < command_buffer_count; i++) {
4375 auto cb_state = GetCBNode(dev_data, command_buffers[i]);
4376 // Remove references to command buffer's state and delete
4378 // reset prior to delete, removing various references to it.
4379 // TODO: fix this, it's insane.
4380 ResetCommandBufferState(dev_data, cb_state->commandBuffer);
4381 // Remove the cb_state's references from layer_data and COMMAND_POOL_NODE
4382 dev_data->commandBufferMap.erase(cb_state->commandBuffer);
4383 pool_state->commandBuffers.erase(command_buffers[i]);
4389 VKAPI_ATTR void VKAPI_CALL FreeCommandBuffers(VkDevice device, VkCommandPool commandPool, uint32_t commandBufferCount,
4390 const VkCommandBuffer *pCommandBuffers) {
4391 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4393 unique_lock_t lock(global_lock);
4395 for (uint32_t i = 0; i < commandBufferCount; i++) {
4396 auto cb_node = GetCBNode(dev_data, pCommandBuffers[i]);
4397 // Delete CB information structure, and remove from commandBufferMap
4399 skip |= checkCommandBufferInFlight(dev_data, cb_node, "free", VALIDATION_ERROR_2840005e);
4405 auto pPool = GetCommandPoolNode(dev_data, commandPool);
4406 FreeCommandBufferStates(dev_data, pPool, commandBufferCount, pCommandBuffers);
4409 dev_data->dispatch_table.FreeCommandBuffers(device, commandPool, commandBufferCount, pCommandBuffers);
4412 VKAPI_ATTR VkResult VKAPI_CALL CreateCommandPool(VkDevice device, const VkCommandPoolCreateInfo *pCreateInfo,
4413 const VkAllocationCallbacks *pAllocator, VkCommandPool *pCommandPool) {
4414 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4416 VkResult result = dev_data->dispatch_table.CreateCommandPool(device, pCreateInfo, pAllocator, pCommandPool);
4418 if (VK_SUCCESS == result) {
4419 lock_guard_t lock(global_lock);
4420 dev_data->commandPoolMap[*pCommandPool].createFlags = pCreateInfo->flags;
4421 dev_data->commandPoolMap[*pCommandPool].queueFamilyIndex = pCreateInfo->queueFamilyIndex;
4426 VKAPI_ATTR VkResult VKAPI_CALL CreateQueryPool(VkDevice device, const VkQueryPoolCreateInfo *pCreateInfo,
4427 const VkAllocationCallbacks *pAllocator, VkQueryPool *pQueryPool) {
4428 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4430 if (pCreateInfo && pCreateInfo->queryType == VK_QUERY_TYPE_PIPELINE_STATISTICS) {
4431 if (!dev_data->enabled_features.pipelineStatisticsQuery) {
4432 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0,
4433 VALIDATION_ERROR_11c0062e,
4434 "Query pool with type VK_QUERY_TYPE_PIPELINE_STATISTICS created on a device with "
4435 "VkDeviceCreateInfo.pEnabledFeatures.pipelineStatisticsQuery == VK_FALSE.");
4439 VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
4441 result = dev_data->dispatch_table.CreateQueryPool(device, pCreateInfo, pAllocator, pQueryPool);
4443 if (result == VK_SUCCESS) {
4444 lock_guard_t lock(global_lock);
4445 QUERY_POOL_NODE *qp_node = &dev_data->queryPoolMap[*pQueryPool];
4446 qp_node->createInfo = *pCreateInfo;
4451 static bool PreCallValidateDestroyCommandPool(layer_data *dev_data, VkCommandPool pool) {
4452 COMMAND_POOL_NODE *cp_state = GetCommandPoolNode(dev_data, pool);
4453 if (dev_data->instance_data->disabled.destroy_command_pool) return false;
4456 // Verify that command buffers in pool are complete (not in-flight)
4457 skip |= checkCommandBuffersInFlight(dev_data, cp_state, "destroy command pool with", VALIDATION_ERROR_24000052);
4462 static void PreCallRecordDestroyCommandPool(layer_data *dev_data, VkCommandPool pool) {
4463 COMMAND_POOL_NODE *cp_state = GetCommandPoolNode(dev_data, pool);
4464 // Remove cmdpool from cmdpoolmap, after freeing layer data for the command buffers
4465 // "When a pool is destroyed, all command buffers allocated from the pool are freed."
4467 // Create a vector, as FreeCommandBufferStates deletes from cp_state->commandBuffers during iteration.
4468 std::vector<VkCommandBuffer> cb_vec{cp_state->commandBuffers.begin(), cp_state->commandBuffers.end()};
4469 FreeCommandBufferStates(dev_data, cp_state, static_cast<uint32_t>(cb_vec.size()), cb_vec.data());
4470 dev_data->commandPoolMap.erase(pool);
4474 // Destroy commandPool along with all of the commandBuffers allocated from that pool
4475 VKAPI_ATTR void VKAPI_CALL DestroyCommandPool(VkDevice device, VkCommandPool commandPool, const VkAllocationCallbacks *pAllocator) {
4476 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4477 unique_lock_t lock(global_lock);
4478 bool skip = PreCallValidateDestroyCommandPool(dev_data, commandPool);
4480 PreCallRecordDestroyCommandPool(dev_data, commandPool);
4482 dev_data->dispatch_table.DestroyCommandPool(device, commandPool, pAllocator);
4486 VKAPI_ATTR VkResult VKAPI_CALL ResetCommandPool(VkDevice device, VkCommandPool commandPool, VkCommandPoolResetFlags flags) {
4487 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4490 unique_lock_t lock(global_lock);
4491 auto pPool = GetCommandPoolNode(dev_data, commandPool);
4492 skip |= checkCommandBuffersInFlight(dev_data, pPool, "reset command pool with", VALIDATION_ERROR_32800050);
4495 if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
4497 VkResult result = dev_data->dispatch_table.ResetCommandPool(device, commandPool, flags);
4499 // Reset all of the CBs allocated from this pool
4500 if (VK_SUCCESS == result) {
4502 for (auto cmdBuffer : pPool->commandBuffers) {
4503 ResetCommandBufferState(dev_data, cmdBuffer);
4510 VKAPI_ATTR VkResult VKAPI_CALL ResetFences(VkDevice device, uint32_t fenceCount, const VkFence *pFences) {
4511 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4513 unique_lock_t lock(global_lock);
4514 for (uint32_t i = 0; i < fenceCount; ++i) {
4515 auto pFence = GetFenceNode(dev_data, pFences[i]);
4516 if (pFence && pFence->scope == kSyncScopeInternal && pFence->state == FENCE_INFLIGHT) {
4517 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
4518 HandleToUint64(pFences[i]), VALIDATION_ERROR_32e008c6, "Fence 0x%" PRIx64 " is in use.",
4519 HandleToUint64(pFences[i]));
4524 if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
4526 VkResult result = dev_data->dispatch_table.ResetFences(device, fenceCount, pFences);
4528 if (result == VK_SUCCESS) {
4530 for (uint32_t i = 0; i < fenceCount; ++i) {
4531 auto pFence = GetFenceNode(dev_data, pFences[i]);
4533 if (pFence->scope == kSyncScopeInternal) {
4534 pFence->state = FENCE_UNSIGNALED;
4535 } else if (pFence->scope == kSyncScopeExternalTemporary) {
4536 pFence->scope = kSyncScopeInternal;
4546 // For given cb_nodes, invalidate them and track object causing invalidation
4547 void invalidateCommandBuffers(const layer_data *dev_data, std::unordered_set<GLOBAL_CB_NODE *> const &cb_nodes, VK_OBJECT obj) {
4548 for (auto cb_node : cb_nodes) {
4549 if (cb_node->state == CB_RECORDING) {
4550 log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4551 HandleToUint64(cb_node->commandBuffer), DRAWSTATE_INVALID_COMMAND_BUFFER,
4552 "Invalidating a command buffer that's currently being recorded: 0x%" PRIx64 ".",
4553 HandleToUint64(cb_node->commandBuffer));
4554 cb_node->state = CB_INVALID_INCOMPLETE;
4555 } else if (cb_node->state == CB_RECORDED) {
4556 cb_node->state = CB_INVALID_COMPLETE;
4558 cb_node->broken_bindings.push_back(obj);
4560 // if secondary, then propagate the invalidation to the primaries that will call us.
4561 if (cb_node->createInfo.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY) {
4562 invalidateCommandBuffers(dev_data, cb_node->linkedCommandBuffers, obj);
4567 static bool PreCallValidateDestroyFramebuffer(layer_data *dev_data, VkFramebuffer framebuffer,
4568 FRAMEBUFFER_STATE **framebuffer_state, VK_OBJECT *obj_struct) {
4569 *framebuffer_state = GetFramebufferState(dev_data, framebuffer);
4570 *obj_struct = {HandleToUint64(framebuffer), kVulkanObjectTypeFramebuffer};
4571 if (dev_data->instance_data->disabled.destroy_framebuffer) return false;
4573 if (*framebuffer_state) {
4575 ValidateObjectNotInUse(dev_data, *framebuffer_state, *obj_struct, "vkDestroyFramebuffer", VALIDATION_ERROR_250006f8);
4580 static void PostCallRecordDestroyFramebuffer(layer_data *dev_data, VkFramebuffer framebuffer, FRAMEBUFFER_STATE *framebuffer_state,
4581 VK_OBJECT obj_struct) {
4582 invalidateCommandBuffers(dev_data, framebuffer_state->cb_bindings, obj_struct);
4583 dev_data->frameBufferMap.erase(framebuffer);
4586 VKAPI_ATTR void VKAPI_CALL DestroyFramebuffer(VkDevice device, VkFramebuffer framebuffer, const VkAllocationCallbacks *pAllocator) {
4587 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4588 FRAMEBUFFER_STATE *framebuffer_state = nullptr;
4589 VK_OBJECT obj_struct;
4590 unique_lock_t lock(global_lock);
4591 bool skip = PreCallValidateDestroyFramebuffer(dev_data, framebuffer, &framebuffer_state, &obj_struct);
4594 dev_data->dispatch_table.DestroyFramebuffer(device, framebuffer, pAllocator);
4596 if (framebuffer != VK_NULL_HANDLE) {
4597 PostCallRecordDestroyFramebuffer(dev_data, framebuffer, framebuffer_state, obj_struct);
4602 static bool PreCallValidateDestroyRenderPass(layer_data *dev_data, VkRenderPass render_pass, RENDER_PASS_STATE **rp_state,
4603 VK_OBJECT *obj_struct) {
4604 *rp_state = GetRenderPassState(dev_data, render_pass);
4605 *obj_struct = {HandleToUint64(render_pass), kVulkanObjectTypeRenderPass};
4606 if (dev_data->instance_data->disabled.destroy_renderpass) return false;
4609 skip |= ValidateObjectNotInUse(dev_data, *rp_state, *obj_struct, "vkDestroyRenderPass", VALIDATION_ERROR_264006d2);
4614 static void PostCallRecordDestroyRenderPass(layer_data *dev_data, VkRenderPass render_pass, RENDER_PASS_STATE *rp_state,
4615 VK_OBJECT obj_struct) {
4616 invalidateCommandBuffers(dev_data, rp_state->cb_bindings, obj_struct);
4617 dev_data->renderPassMap.erase(render_pass);
4620 VKAPI_ATTR void VKAPI_CALL DestroyRenderPass(VkDevice device, VkRenderPass renderPass, const VkAllocationCallbacks *pAllocator) {
4621 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4622 RENDER_PASS_STATE *rp_state = nullptr;
4623 VK_OBJECT obj_struct;
4624 unique_lock_t lock(global_lock);
4625 bool skip = PreCallValidateDestroyRenderPass(dev_data, renderPass, &rp_state, &obj_struct);
4628 dev_data->dispatch_table.DestroyRenderPass(device, renderPass, pAllocator);
4630 if (renderPass != VK_NULL_HANDLE) {
4631 PostCallRecordDestroyRenderPass(dev_data, renderPass, rp_state, obj_struct);
4636 VKAPI_ATTR VkResult VKAPI_CALL CreateBuffer(VkDevice device, const VkBufferCreateInfo *pCreateInfo,
4637 const VkAllocationCallbacks *pAllocator, VkBuffer *pBuffer) {
4638 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4639 unique_lock_t lock(global_lock);
4640 bool skip = PreCallValidateCreateBuffer(dev_data, pCreateInfo);
4643 if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
4644 VkResult result = dev_data->dispatch_table.CreateBuffer(device, pCreateInfo, pAllocator, pBuffer);
4646 if (VK_SUCCESS == result) {
4648 PostCallRecordCreateBuffer(dev_data, pCreateInfo, pBuffer);
4654 VKAPI_ATTR VkResult VKAPI_CALL CreateBufferView(VkDevice device, const VkBufferViewCreateInfo *pCreateInfo,
4655 const VkAllocationCallbacks *pAllocator, VkBufferView *pView) {
4656 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4657 unique_lock_t lock(global_lock);
4658 bool skip = PreCallValidateCreateBufferView(dev_data, pCreateInfo);
4660 if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
4661 VkResult result = dev_data->dispatch_table.CreateBufferView(device, pCreateInfo, pAllocator, pView);
4662 if (VK_SUCCESS == result) {
4664 PostCallRecordCreateBufferView(dev_data, pCreateInfo, pView);
4670 // Access helper functions for external modules
4671 VkFormatProperties GetFormatProperties(core_validation::layer_data *device_data, VkFormat format) {
4672 VkFormatProperties format_properties;
4673 instance_layer_data *instance_data =
4674 GetLayerDataPtr(get_dispatch_key(device_data->instance_data->instance), instance_layer_data_map);
4675 instance_data->dispatch_table.GetPhysicalDeviceFormatProperties(device_data->physical_device, format, &format_properties);
4676 return format_properties;
4679 VkResult GetImageFormatProperties(core_validation::layer_data *device_data, const VkImageCreateInfo *image_ci,
4680 VkImageFormatProperties *pImageFormatProperties) {
4681 instance_layer_data *instance_data =
4682 GetLayerDataPtr(get_dispatch_key(device_data->instance_data->instance), instance_layer_data_map);
4683 return instance_data->dispatch_table.GetPhysicalDeviceImageFormatProperties(
4684 device_data->physical_device, image_ci->format, image_ci->imageType, image_ci->tiling, image_ci->usage, image_ci->flags,
4685 pImageFormatProperties);
4688 const debug_report_data *GetReportData(const core_validation::layer_data *device_data) { return device_data->report_data; }
4690 const VkPhysicalDeviceProperties *GetPhysicalDeviceProperties(core_validation::layer_data *device_data) {
4691 return &device_data->phys_dev_props;
4694 const CHECK_DISABLED *GetDisables(core_validation::layer_data *device_data) { return &device_data->instance_data->disabled; }
4696 std::unordered_map<VkImage, std::unique_ptr<IMAGE_STATE>> *GetImageMap(core_validation::layer_data *device_data) {
4697 return &device_data->imageMap;
4700 std::unordered_map<VkImage, std::vector<ImageSubresourcePair>> *GetImageSubresourceMap(core_validation::layer_data *device_data) {
4701 return &device_data->imageSubresourceMap;
4704 std::unordered_map<ImageSubresourcePair, IMAGE_LAYOUT_NODE> *GetImageLayoutMap(layer_data *device_data) {
4705 return &device_data->imageLayoutMap;
4708 std::unordered_map<ImageSubresourcePair, IMAGE_LAYOUT_NODE> const *GetImageLayoutMap(layer_data const *device_data) {
4709 return &device_data->imageLayoutMap;
4712 std::unordered_map<VkBuffer, std::unique_ptr<BUFFER_STATE>> *GetBufferMap(layer_data *device_data) {
4713 return &device_data->bufferMap;
4716 std::unordered_map<VkBufferView, std::unique_ptr<BUFFER_VIEW_STATE>> *GetBufferViewMap(layer_data *device_data) {
4717 return &device_data->bufferViewMap;
4720 std::unordered_map<VkImageView, std::unique_ptr<IMAGE_VIEW_STATE>> *GetImageViewMap(layer_data *device_data) {
4721 return &device_data->imageViewMap;
4724 const PHYS_DEV_PROPERTIES_NODE *GetPhysDevProperties(const layer_data *device_data) { return &device_data->phys_dev_properties; }
4726 const VkPhysicalDeviceFeatures *GetEnabledFeatures(const layer_data *device_data) { return &device_data->enabled_features; }
4728 const DeviceExtensions *GetDeviceExtensions(const layer_data *device_data) { return &device_data->extensions; }
4730 VKAPI_ATTR VkResult VKAPI_CALL CreateImage(VkDevice device, const VkImageCreateInfo *pCreateInfo,
4731 const VkAllocationCallbacks *pAllocator, VkImage *pImage) {
4732 VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
4733 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4734 bool skip = PreCallValidateCreateImage(dev_data, pCreateInfo, pAllocator, pImage);
4736 result = dev_data->dispatch_table.CreateImage(device, pCreateInfo, pAllocator, pImage);
4738 if (VK_SUCCESS == result) {
4739 lock_guard_t lock(global_lock);
4740 PostCallRecordCreateImage(dev_data, pCreateInfo, pImage);
4745 VKAPI_ATTR VkResult VKAPI_CALL CreateImageView(VkDevice device, const VkImageViewCreateInfo *pCreateInfo,
4746 const VkAllocationCallbacks *pAllocator, VkImageView *pView) {
4747 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4748 unique_lock_t lock(global_lock);
4749 bool skip = PreCallValidateCreateImageView(dev_data, pCreateInfo);
4751 if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
4752 VkResult result = dev_data->dispatch_table.CreateImageView(device, pCreateInfo, pAllocator, pView);
4753 if (VK_SUCCESS == result) {
4755 PostCallRecordCreateImageView(dev_data, pCreateInfo, *pView);
4762 VKAPI_ATTR VkResult VKAPI_CALL CreateFence(VkDevice device, const VkFenceCreateInfo *pCreateInfo,
4763 const VkAllocationCallbacks *pAllocator, VkFence *pFence) {
4764 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4765 VkResult result = dev_data->dispatch_table.CreateFence(device, pCreateInfo, pAllocator, pFence);
4766 if (VK_SUCCESS == result) {
4767 lock_guard_t lock(global_lock);
4768 auto &fence_node = dev_data->fenceMap[*pFence];
4769 fence_node.fence = *pFence;
4770 fence_node.createInfo = *pCreateInfo;
4771 fence_node.state = (pCreateInfo->flags & VK_FENCE_CREATE_SIGNALED_BIT) ? FENCE_RETIRED : FENCE_UNSIGNALED;
4776 // TODO handle pipeline caches
4777 VKAPI_ATTR VkResult VKAPI_CALL CreatePipelineCache(VkDevice device, const VkPipelineCacheCreateInfo *pCreateInfo,
4778 const VkAllocationCallbacks *pAllocator, VkPipelineCache *pPipelineCache) {
4779 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4780 VkResult result = dev_data->dispatch_table.CreatePipelineCache(device, pCreateInfo, pAllocator, pPipelineCache);
4784 VKAPI_ATTR void VKAPI_CALL DestroyPipelineCache(VkDevice device, VkPipelineCache pipelineCache,
4785 const VkAllocationCallbacks *pAllocator) {
4786 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4787 dev_data->dispatch_table.DestroyPipelineCache(device, pipelineCache, pAllocator);
4790 VKAPI_ATTR VkResult VKAPI_CALL GetPipelineCacheData(VkDevice device, VkPipelineCache pipelineCache, size_t *pDataSize,
4792 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4793 VkResult result = dev_data->dispatch_table.GetPipelineCacheData(device, pipelineCache, pDataSize, pData);
4797 VKAPI_ATTR VkResult VKAPI_CALL MergePipelineCaches(VkDevice device, VkPipelineCache dstCache, uint32_t srcCacheCount,
4798 const VkPipelineCache *pSrcCaches) {
4799 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4800 VkResult result = dev_data->dispatch_table.MergePipelineCaches(device, dstCache, srcCacheCount, pSrcCaches);
4804 // Validation cache:
4805 // CV is the bottommost implementor of this extension. Don't pass calls down.
4806 VKAPI_ATTR VkResult VKAPI_CALL CreateValidationCacheEXT(VkDevice device, const VkValidationCacheCreateInfoEXT *pCreateInfo,
4807 const VkAllocationCallbacks *pAllocator,
4808 VkValidationCacheEXT *pValidationCache) {
4809 *pValidationCache = ValidationCache::Create(pCreateInfo);
4810 return *pValidationCache ? VK_SUCCESS : VK_ERROR_INITIALIZATION_FAILED;
4813 VKAPI_ATTR void VKAPI_CALL DestroyValidationCacheEXT(VkDevice device, VkValidationCacheEXT validationCache,
4814 const VkAllocationCallbacks *pAllocator) {
4815 delete (ValidationCache *)validationCache;
4818 VKAPI_ATTR VkResult VKAPI_CALL GetValidationCacheDataEXT(VkDevice device, VkValidationCacheEXT validationCache, size_t *pDataSize,
4820 size_t inSize = *pDataSize;
4821 ((ValidationCache *)validationCache)->Write(pDataSize, pData);
4822 return (pData && *pDataSize != inSize) ? VK_INCOMPLETE : VK_SUCCESS;
4825 VKAPI_ATTR VkResult VKAPI_CALL MergeValidationCachesEXT(VkDevice device, VkValidationCacheEXT dstCache, uint32_t srcCacheCount,
4826 const VkValidationCacheEXT *pSrcCaches) {
4827 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4829 auto dst = (ValidationCache *)dstCache;
4830 auto src = (ValidationCache const *const *)pSrcCaches;
4831 VkResult result = VK_SUCCESS;
4832 for (uint32_t i = 0; i < srcCacheCount; i++) {
4833 if (src[i] == dst) {
4834 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_VALIDATION_CACHE_EXT,
4835 0, VALIDATION_ERROR_3e600c00,
4836 "vkMergeValidationCachesEXT: dstCache (0x%" PRIx64 ") must not appear in pSrcCaches array.",
4837 HandleToUint64(dstCache));
4838 result = VK_ERROR_VALIDATION_FAILED_EXT;
4848 // utility function to set collective state for pipeline
4849 void set_pipeline_state(PIPELINE_STATE *pPipe) {
4850 // If any attachment used by this pipeline has blendEnable, set top-level blendEnable
4851 if (pPipe->graphicsPipelineCI.pColorBlendState) {
4852 for (size_t i = 0; i < pPipe->attachments.size(); ++i) {
4853 if (VK_TRUE == pPipe->attachments[i].blendEnable) {
4854 if (((pPipe->attachments[i].dstAlphaBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
4855 (pPipe->attachments[i].dstAlphaBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA)) ||
4856 ((pPipe->attachments[i].dstColorBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
4857 (pPipe->attachments[i].dstColorBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA)) ||
4858 ((pPipe->attachments[i].srcAlphaBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
4859 (pPipe->attachments[i].srcAlphaBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA)) ||
4860 ((pPipe->attachments[i].srcColorBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
4861 (pPipe->attachments[i].srcColorBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA))) {
4862 pPipe->blendConstantsEnabled = true;
4869 bool validate_dual_src_blend_feature(layer_data *device_data, PIPELINE_STATE *pipe_state) {
4871 if (pipe_state->graphicsPipelineCI.pColorBlendState) {
4872 for (size_t i = 0; i < pipe_state->attachments.size(); ++i) {
4873 if (!device_data->enabled_features.dualSrcBlend) {
4874 if ((pipe_state->attachments[i].dstAlphaBlendFactor == VK_BLEND_FACTOR_SRC1_COLOR) ||
4875 (pipe_state->attachments[i].dstAlphaBlendFactor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR) ||
4876 (pipe_state->attachments[i].dstAlphaBlendFactor == VK_BLEND_FACTOR_SRC1_ALPHA) ||
4877 (pipe_state->attachments[i].dstAlphaBlendFactor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA) ||
4878 (pipe_state->attachments[i].srcAlphaBlendFactor == VK_BLEND_FACTOR_SRC1_COLOR) ||
4879 (pipe_state->attachments[i].srcAlphaBlendFactor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR) ||
4880 (pipe_state->attachments[i].srcAlphaBlendFactor == VK_BLEND_FACTOR_SRC1_ALPHA) ||
4881 (pipe_state->attachments[i].srcAlphaBlendFactor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA)) {
4883 log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
4884 HandleToUint64(pipe_state->pipeline), DRAWSTATE_INVALID_FEATURE,
4885 "CmdBindPipeline: vkPipeline (0x%" PRIx64 ") attachment[" PRINTF_SIZE_T_SPECIFIER
4886 "] has a dual-source blend factor but this device feature is not enabled.",
4887 HandleToUint64(pipe_state->pipeline), i);
4895 VKAPI_ATTR VkResult VKAPI_CALL CreateGraphicsPipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count,
4896 const VkGraphicsPipelineCreateInfo *pCreateInfos,
4897 const VkAllocationCallbacks *pAllocator, VkPipeline *pPipelines) {
4898 // The order of operations here is a little convoluted but gets the job done
4899 // 1. Pipeline create state is first shadowed into PIPELINE_STATE struct
4900 // 2. Create state is then validated (which uses flags setup during shadowing)
4901 // 3. If everything looks good, we'll then create the pipeline and add NODE to pipelineMap
4903 vector<std::unique_ptr<PIPELINE_STATE>> pipe_state;
4904 pipe_state.reserve(count);
4905 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4908 unique_lock_t lock(global_lock);
4910 for (i = 0; i < count; i++) {
4911 pipe_state.push_back(std::unique_ptr<PIPELINE_STATE>(new PIPELINE_STATE));
4912 pipe_state[i]->initGraphicsPipeline(&pCreateInfos[i], GetRenderPassStateSharedPtr(dev_data, pCreateInfos[i].renderPass));
4913 pipe_state[i]->pipeline_layout = *getPipelineLayout(dev_data, pCreateInfos[i].layout);
4916 for (i = 0; i < count; i++) {
4917 skip |= ValidatePipelineLocked(dev_data, pipe_state, i);
4922 for (i = 0; i < count; i++) {
4923 skip |= ValidatePipelineUnlocked(dev_data, pipe_state, i);
4927 for (i = 0; i < count; i++) {
4928 pPipelines[i] = VK_NULL_HANDLE;
4930 return VK_ERROR_VALIDATION_FAILED_EXT;
4934 dev_data->dispatch_table.CreateGraphicsPipelines(device, pipelineCache, count, pCreateInfos, pAllocator, pPipelines);
4936 for (i = 0; i < count; i++) {
4937 if (pPipelines[i] != VK_NULL_HANDLE) {
4938 pipe_state[i]->pipeline = pPipelines[i];
4939 dev_data->pipelineMap[pPipelines[i]] = std::move(pipe_state[i]);
4946 VKAPI_ATTR VkResult VKAPI_CALL CreateComputePipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count,
4947 const VkComputePipelineCreateInfo *pCreateInfos,
4948 const VkAllocationCallbacks *pAllocator, VkPipeline *pPipelines) {
4951 vector<std::unique_ptr<PIPELINE_STATE>> pPipeState;
4952 pPipeState.reserve(count);
4953 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4956 unique_lock_t lock(global_lock);
4957 for (i = 0; i < count; i++) {
4958 // Create and initialize internal tracking data structure
4959 pPipeState.push_back(unique_ptr<PIPELINE_STATE>(new PIPELINE_STATE));
4960 pPipeState[i]->initComputePipeline(&pCreateInfos[i]);
4961 pPipeState[i]->pipeline_layout = *getPipelineLayout(dev_data, pCreateInfos[i].layout);
4963 // TODO: Add Compute Pipeline Verification
4964 skip |= validate_compute_pipeline(dev_data, pPipeState[i].get());
4968 for (i = 0; i < count; i++) {
4969 pPipelines[i] = VK_NULL_HANDLE;
4971 return VK_ERROR_VALIDATION_FAILED_EXT;
4976 dev_data->dispatch_table.CreateComputePipelines(device, pipelineCache, count, pCreateInfos, pAllocator, pPipelines);
4978 for (i = 0; i < count; i++) {
4979 if (pPipelines[i] != VK_NULL_HANDLE) {
4980 pPipeState[i]->pipeline = pPipelines[i];
4981 dev_data->pipelineMap[pPipelines[i]] = std::move(pPipeState[i]);
4988 VKAPI_ATTR VkResult VKAPI_CALL CreateSampler(VkDevice device, const VkSamplerCreateInfo *pCreateInfo,
4989 const VkAllocationCallbacks *pAllocator, VkSampler *pSampler) {
4990 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4991 VkResult result = dev_data->dispatch_table.CreateSampler(device, pCreateInfo, pAllocator, pSampler);
4992 if (VK_SUCCESS == result) {
4993 lock_guard_t lock(global_lock);
4994 dev_data->samplerMap[*pSampler] = unique_ptr<SAMPLER_STATE>(new SAMPLER_STATE(pSampler, pCreateInfo));
4999 static bool PreCallValidateCreateDescriptorSetLayout(layer_data *dev_data, const VkDescriptorSetLayoutCreateInfo *create_info) {
5000 if (dev_data->instance_data->disabled.create_descriptor_set_layout) return false;
5001 return cvdescriptorset::DescriptorSetLayout::ValidateCreateInfo(dev_data->report_data, create_info,
5002 dev_data->extensions.vk_khr_push_descriptor,
5003 dev_data->phys_dev_ext_props.max_push_descriptors);
5006 static void PostCallRecordCreateDescriptorSetLayout(layer_data *dev_data, const VkDescriptorSetLayoutCreateInfo *create_info,
5007 VkDescriptorSetLayout set_layout) {
5008 dev_data->descriptorSetLayoutMap[set_layout] = std::make_shared<cvdescriptorset::DescriptorSetLayout>(create_info, set_layout);
5011 VKAPI_ATTR VkResult VKAPI_CALL CreateDescriptorSetLayout(VkDevice device, const VkDescriptorSetLayoutCreateInfo *pCreateInfo,
5012 const VkAllocationCallbacks *pAllocator,
5013 VkDescriptorSetLayout *pSetLayout) {
5014 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
5015 VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
5016 unique_lock_t lock(global_lock);
5017 bool skip = PreCallValidateCreateDescriptorSetLayout(dev_data, pCreateInfo);
5020 result = dev_data->dispatch_table.CreateDescriptorSetLayout(device, pCreateInfo, pAllocator, pSetLayout);
5021 if (VK_SUCCESS == result) {
5023 PostCallRecordCreateDescriptorSetLayout(dev_data, pCreateInfo, *pSetLayout);
5029 // Used by CreatePipelineLayout and CmdPushConstants.
5030 // Note that the index argument is optional and only used by CreatePipelineLayout.
5031 static bool validatePushConstantRange(const layer_data *dev_data, const uint32_t offset, const uint32_t size,
5032 const char *caller_name, uint32_t index = 0) {
5033 if (dev_data->instance_data->disabled.push_constant_range) return false;
5034 uint32_t const maxPushConstantsSize = dev_data->phys_dev_properties.properties.limits.maxPushConstantsSize;
5036 // Check that offset + size don't exceed the max.
5037 // Prevent arithetic overflow here by avoiding addition and testing in this order.
5038 if ((offset >= maxPushConstantsSize) || (size > maxPushConstantsSize - offset)) {
5039 // This is a pain just to adapt the log message to the caller, but better to sort it out only when there is a problem.
5040 if (0 == strcmp(caller_name, "vkCreatePipelineLayout()")) {
5041 if (offset >= maxPushConstantsSize) {
5043 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5044 VALIDATION_ERROR_11a0024c,
5045 "%s call has push constants index %u with offset %u that exceeds this device's maxPushConstantSize of %u.",
5046 caller_name, index, offset, maxPushConstantsSize);
5048 if (size > maxPushConstantsSize - offset) {
5049 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5050 VALIDATION_ERROR_11a00254,
5051 "%s call has push constants index %u with offset %u and size %u that exceeds this device's "
5052 "maxPushConstantSize of %u.",
5053 caller_name, index, offset, size, maxPushConstantsSize);
5055 } else if (0 == strcmp(caller_name, "vkCmdPushConstants()")) {
5056 if (offset >= maxPushConstantsSize) {
5058 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5059 VALIDATION_ERROR_1bc002e4,
5060 "%s call has push constants index %u with offset %u that exceeds this device's maxPushConstantSize of %u.",
5061 caller_name, index, offset, maxPushConstantsSize);
5063 if (size > maxPushConstantsSize - offset) {
5064 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5065 VALIDATION_ERROR_1bc002e6,
5066 "%s call has push constants index %u with offset %u and size %u that exceeds this device's "
5067 "maxPushConstantSize of %u.",
5068 caller_name, index, offset, size, maxPushConstantsSize);
5071 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5072 DRAWSTATE_INTERNAL_ERROR, "%s caller not supported.", caller_name);
5075 // size needs to be non-zero and a multiple of 4.
5076 if ((size == 0) || ((size & 0x3) != 0)) {
5077 if (0 == strcmp(caller_name, "vkCreatePipelineLayout()")) {
5079 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5080 VALIDATION_ERROR_11a00250,
5081 "%s call has push constants index %u with size %u. Size must be greater than zero.", caller_name,
5085 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5086 VALIDATION_ERROR_11a00252,
5087 "%s call has push constants index %u with size %u. Size must be a multiple of 4.", caller_name,
5090 } else if (0 == strcmp(caller_name, "vkCmdPushConstants()")) {
5092 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5093 VALIDATION_ERROR_1bc2c21b,
5094 "%s call has push constants index %u with size %u. Size must be greater than zero.", caller_name,
5098 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5099 VALIDATION_ERROR_1bc002e2,
5100 "%s call has push constants index %u with size %u. Size must be a multiple of 4.", caller_name,
5104 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5105 DRAWSTATE_INTERNAL_ERROR, "%s caller not supported.", caller_name);
5108 // offset needs to be a multiple of 4.
5109 if ((offset & 0x3) != 0) {
5110 if (0 == strcmp(caller_name, "vkCreatePipelineLayout()")) {
5111 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5112 VALIDATION_ERROR_11a0024e,
5113 "%s call has push constants index %u with offset %u. Offset must be a multiple of 4.", caller_name,
5115 } else if (0 == strcmp(caller_name, "vkCmdPushConstants()")) {
5116 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5117 VALIDATION_ERROR_1bc002e0, "%s call has push constants with offset %u. Offset must be a multiple of 4.",
5118 caller_name, offset);
5120 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5121 DRAWSTATE_INTERNAL_ERROR, "%s caller not supported.", caller_name);
5127 enum DSL_DESCRIPTOR_GROUPS {
5128 DSL_TYPE_SAMPLERS = 0,
5129 DSL_TYPE_UNIFORM_BUFFERS,
5130 DSL_TYPE_STORAGE_BUFFERS,
5131 DSL_TYPE_SAMPLED_IMAGES,
5132 DSL_TYPE_STORAGE_IMAGES,
5133 DSL_TYPE_INPUT_ATTACHMENTS,
5134 DSL_NUM_DESCRIPTOR_GROUPS
5137 // Used by PreCallValiateCreatePipelineLayout.
5138 // Returns an array of size DSL_NUM_DESCRIPTOR_GROUPS of the maximum number of descriptors used in any single pipeline stage
5139 std::valarray<uint32_t> GetDescriptorCountMaxPerStage(
5140 const layer_data *dev_data, const std::vector<std::shared_ptr<cvdescriptorset::DescriptorSetLayout const>> set_layouts) {
5141 // Identify active pipeline stages
5142 std::vector<VkShaderStageFlags> stage_flags = {VK_SHADER_STAGE_VERTEX_BIT, VK_SHADER_STAGE_FRAGMENT_BIT,
5143 VK_SHADER_STAGE_COMPUTE_BIT};
5144 if (dev_data->enabled_features.geometryShader) {
5145 stage_flags.push_back(VK_SHADER_STAGE_GEOMETRY_BIT);
5147 if (dev_data->enabled_features.tessellationShader) {
5148 stage_flags.push_back(VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT);
5149 stage_flags.push_back(VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT);
5152 // Allow iteration over enum values
5153 std::vector<DSL_DESCRIPTOR_GROUPS> dsl_groups = {DSL_TYPE_SAMPLERS, DSL_TYPE_UNIFORM_BUFFERS, DSL_TYPE_STORAGE_BUFFERS,
5154 DSL_TYPE_SAMPLED_IMAGES, DSL_TYPE_STORAGE_IMAGES, DSL_TYPE_INPUT_ATTACHMENTS};
5156 // Sum by layouts per stage, then pick max of stages per type
5157 std::valarray<uint32_t> max_sum(0U, DSL_NUM_DESCRIPTOR_GROUPS); // max descriptor sum among all pipeline stages
5158 for (auto stage : stage_flags) {
5159 std::valarray<uint32_t> stage_sum(0U, DSL_NUM_DESCRIPTOR_GROUPS); // per-stage sums
5160 for (auto dsl : set_layouts) {
5161 for (uint32_t binding_idx = 0; binding_idx < dsl->GetBindingCount(); binding_idx++) {
5162 const VkDescriptorSetLayoutBinding *binding = dsl->GetDescriptorSetLayoutBindingPtrFromIndex(binding_idx);
5163 if (0 != (stage & binding->stageFlags)) {
5164 switch (binding->descriptorType) {
5165 case VK_DESCRIPTOR_TYPE_SAMPLER:
5166 stage_sum[DSL_TYPE_SAMPLERS] += binding->descriptorCount;
5168 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
5169 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
5170 stage_sum[DSL_TYPE_UNIFORM_BUFFERS] += binding->descriptorCount;
5172 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
5173 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
5174 stage_sum[DSL_TYPE_STORAGE_BUFFERS] += binding->descriptorCount;
5176 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
5177 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
5178 stage_sum[DSL_TYPE_SAMPLED_IMAGES] += binding->descriptorCount;
5180 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
5181 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
5182 stage_sum[DSL_TYPE_STORAGE_IMAGES] += binding->descriptorCount;
5184 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
5185 stage_sum[DSL_TYPE_SAMPLED_IMAGES] += binding->descriptorCount;
5186 stage_sum[DSL_TYPE_SAMPLERS] += binding->descriptorCount;
5188 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
5189 stage_sum[DSL_TYPE_INPUT_ATTACHMENTS] += binding->descriptorCount;
5197 for (auto type : dsl_groups) {
5198 max_sum[type] = std::max(stage_sum[type], max_sum[type]);
5204 // Used by PreCallValidateCreatePipelineLayout.
5205 // Returns an array of size VK_DESCRIPTOR_TYPE_RANGE_SIZE of the summed descriptors by type.
5206 // Note: descriptors only count against the limit once even if used by multiple stages.
5207 std::valarray<uint32_t> GetDescriptorSum(
5208 const layer_data *dev_data, const std::vector<std::shared_ptr<cvdescriptorset::DescriptorSetLayout const>> &set_layouts) {
5209 std::valarray<uint32_t> sum_by_type(0U, VK_DESCRIPTOR_TYPE_RANGE_SIZE);
5210 for (auto dsl : set_layouts) {
5211 for (uint32_t binding_idx = 0; binding_idx < dsl->GetBindingCount(); binding_idx++) {
5212 const VkDescriptorSetLayoutBinding *binding = dsl->GetDescriptorSetLayoutBindingPtrFromIndex(binding_idx);
5213 sum_by_type[binding->descriptorType] += binding->descriptorCount;
5219 static bool PreCallValiateCreatePipelineLayout(const layer_data *dev_data, const VkPipelineLayoutCreateInfo *pCreateInfo) {
5222 // Validate layout count against device physical limit
5223 if (pCreateInfo->setLayoutCount > dev_data->phys_dev_props.limits.maxBoundDescriptorSets) {
5224 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5225 VALIDATION_ERROR_0fe0023c,
5226 "vkCreatePipelineLayout(): setLayoutCount (%d) exceeds physical device maxBoundDescriptorSets limit (%d).",
5227 pCreateInfo->setLayoutCount, dev_data->phys_dev_props.limits.maxBoundDescriptorSets);
5230 // Validate Push Constant ranges
5232 for (i = 0; i < pCreateInfo->pushConstantRangeCount; ++i) {
5233 skip |= validatePushConstantRange(dev_data, pCreateInfo->pPushConstantRanges[i].offset,
5234 pCreateInfo->pPushConstantRanges[i].size, "vkCreatePipelineLayout()", i);
5235 if (0 == pCreateInfo->pPushConstantRanges[i].stageFlags) {
5236 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5237 VALIDATION_ERROR_11a2dc03, "vkCreatePipelineLayout() call has no stageFlags set.");
5241 // As of 1.0.28, there is a VU that states that a stage flag cannot appear more than once in the list of push constant ranges.
5242 for (i = 0; i < pCreateInfo->pushConstantRangeCount; ++i) {
5243 for (j = i + 1; j < pCreateInfo->pushConstantRangeCount; ++j) {
5244 if (0 != (pCreateInfo->pPushConstantRanges[i].stageFlags & pCreateInfo->pPushConstantRanges[j].stageFlags)) {
5245 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5246 VALIDATION_ERROR_0fe00248,
5247 "vkCreatePipelineLayout() Duplicate stage flags found in ranges %d and %d.", i, j);
5253 if (skip) return skip;
5255 std::vector<std::shared_ptr<cvdescriptorset::DescriptorSetLayout const>> set_layouts(pCreateInfo->setLayoutCount, nullptr);
5256 unsigned int push_descriptor_set_count = 0;
5258 unique_lock_t lock(global_lock); // Lock while accessing global state
5259 for (i = 0; i < pCreateInfo->setLayoutCount; ++i) {
5260 set_layouts[i] = GetDescriptorSetLayout(dev_data, pCreateInfo->pSetLayouts[i]);
5261 if (set_layouts[i]->IsPushDescriptor()) ++push_descriptor_set_count;
5265 if (push_descriptor_set_count > 1) {
5266 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5267 VALIDATION_ERROR_0fe0024a, "vkCreatePipelineLayout() Multiple push descriptor sets found.");
5270 // Max descriptors by type, within a single pipeline stage
5271 std::valarray<uint32_t> max_descriptors_per_stage = GetDescriptorCountMaxPerStage(dev_data, set_layouts);
5273 if (max_descriptors_per_stage[DSL_TYPE_SAMPLERS] > dev_data->phys_dev_props.limits.maxPerStageDescriptorSamplers) {
5275 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5276 VALIDATION_ERROR_0fe0023e,
5277 "vkCreatePipelineLayout(): max per-stage sampler bindings count (%d) exceeds device "
5278 "maxPerStageDescriptorSamplers limit (%d).",
5279 max_descriptors_per_stage[DSL_TYPE_SAMPLERS], dev_data->phys_dev_props.limits.maxPerStageDescriptorSamplers);
5283 if (max_descriptors_per_stage[DSL_TYPE_UNIFORM_BUFFERS] > dev_data->phys_dev_props.limits.maxPerStageDescriptorUniformBuffers) {
5284 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5285 VALIDATION_ERROR_0fe00240,
5286 "vkCreatePipelineLayout(): max per-stage uniform buffer bindings count (%d) exceeds device "
5287 "maxPerStageDescriptorUniformBuffers limit (%d).",
5288 max_descriptors_per_stage[DSL_TYPE_UNIFORM_BUFFERS],
5289 dev_data->phys_dev_props.limits.maxPerStageDescriptorUniformBuffers);
5293 if (max_descriptors_per_stage[DSL_TYPE_STORAGE_BUFFERS] > dev_data->phys_dev_props.limits.maxPerStageDescriptorStorageBuffers) {
5294 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5295 VALIDATION_ERROR_0fe00242,
5296 "vkCreatePipelineLayout(): max per-stage storage buffer bindings count (%d) exceeds device "
5297 "maxPerStageDescriptorStorageBuffers limit (%d).",
5298 max_descriptors_per_stage[DSL_TYPE_STORAGE_BUFFERS],
5299 dev_data->phys_dev_props.limits.maxPerStageDescriptorStorageBuffers);
5303 if (max_descriptors_per_stage[DSL_TYPE_SAMPLED_IMAGES] > dev_data->phys_dev_props.limits.maxPerStageDescriptorSampledImages) {
5304 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5305 VALIDATION_ERROR_0fe00244,
5306 "vkCreatePipelineLayout(): max per-stage sampled image bindings count (%d) exceeds device "
5307 "maxPerStageDescriptorSampledImages limit (%d).",
5308 max_descriptors_per_stage[DSL_TYPE_SAMPLED_IMAGES],
5309 dev_data->phys_dev_props.limits.maxPerStageDescriptorSampledImages);
5313 if (max_descriptors_per_stage[DSL_TYPE_STORAGE_IMAGES] > dev_data->phys_dev_props.limits.maxPerStageDescriptorStorageImages) {
5314 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5315 VALIDATION_ERROR_0fe00246,
5316 "vkCreatePipelineLayout(): max per-stage storage image bindings count (%d) exceeds device "
5317 "maxPerStageDescriptorStorageImages limit (%d).",
5318 max_descriptors_per_stage[DSL_TYPE_STORAGE_IMAGES],
5319 dev_data->phys_dev_props.limits.maxPerStageDescriptorStorageImages);
5322 // Input attachments
5323 if (max_descriptors_per_stage[DSL_TYPE_INPUT_ATTACHMENTS] >
5324 dev_data->phys_dev_props.limits.maxPerStageDescriptorInputAttachments) {
5325 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5326 VALIDATION_ERROR_0fe00d18,
5327 "vkCreatePipelineLayout(): max per-stage input attachment bindings count (%d) exceeds device "
5328 "maxPerStageDescriptorInputAttachments limit (%d).",
5329 max_descriptors_per_stage[DSL_TYPE_INPUT_ATTACHMENTS],
5330 dev_data->phys_dev_props.limits.maxPerStageDescriptorInputAttachments);
5333 // Total descriptors by type
5335 std::valarray<uint32_t> sum_all_stages = GetDescriptorSum(dev_data, set_layouts);
5337 if ((sum_all_stages[VK_DESCRIPTOR_TYPE_SAMPLER] + sum_all_stages[VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER]) >
5338 dev_data->phys_dev_props.limits.maxDescriptorSetSamplers) {
5339 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5340 VALIDATION_ERROR_0fe00d1a,
5341 "vkCreatePipelineLayout(): sum of sampler bindings among all stages (%d) exceeds device "
5342 "maxDescriptorSetSamplers limit (%d).",
5343 sum_all_stages[VK_DESCRIPTOR_TYPE_SAMPLER] + sum_all_stages[VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER],
5344 dev_data->phys_dev_props.limits.maxDescriptorSetSamplers);
5348 if (sum_all_stages[VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER] > dev_data->phys_dev_props.limits.maxDescriptorSetUniformBuffers) {
5349 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5350 VALIDATION_ERROR_0fe00d1c,
5351 "vkCreatePipelineLayout(): sum of uniform buffer bindings among all stages (%d) exceeds device "
5352 "maxDescriptorSetUniformBuffers limit (%d).",
5353 sum_all_stages[VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER],
5354 dev_data->phys_dev_props.limits.maxDescriptorSetUniformBuffers);
5357 // Dynamic uniform buffers
5358 if (sum_all_stages[VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC] >
5359 dev_data->phys_dev_props.limits.maxDescriptorSetUniformBuffersDynamic) {
5360 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5361 VALIDATION_ERROR_0fe00d1e,
5362 "vkCreatePipelineLayout(): sum of dynamic uniform buffer bindings among all stages (%d) exceeds device "
5363 "maxDescriptorSetUniformBuffersDynamic limit (%d).",
5364 sum_all_stages[VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC],
5365 dev_data->phys_dev_props.limits.maxDescriptorSetUniformBuffersDynamic);
5369 if (sum_all_stages[VK_DESCRIPTOR_TYPE_STORAGE_BUFFER] > dev_data->phys_dev_props.limits.maxDescriptorSetStorageBuffers) {
5370 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5371 VALIDATION_ERROR_0fe00d20,
5372 "vkCreatePipelineLayout(): sum of storage buffer bindings among all stages (%d) exceeds device "
5373 "maxDescriptorSetStorageBuffers limit (%d).",
5374 sum_all_stages[VK_DESCRIPTOR_TYPE_STORAGE_BUFFER],
5375 dev_data->phys_dev_props.limits.maxDescriptorSetStorageBuffers);
5378 // Dynamic storage buffers
5379 if (sum_all_stages[VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC] >
5380 dev_data->phys_dev_props.limits.maxDescriptorSetStorageBuffersDynamic) {
5381 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5382 VALIDATION_ERROR_0fe00d22,
5383 "vkCreatePipelineLayout(): sum of dynamic storage buffer bindings among all stages (%d) exceeds device "
5384 "maxDescriptorSetStorageBuffersDynamic limit (%d).",
5385 sum_all_stages[VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC],
5386 dev_data->phys_dev_props.limits.maxDescriptorSetStorageBuffersDynamic);
5390 if ((sum_all_stages[VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE] + sum_all_stages[VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER] +
5391 sum_all_stages[VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER]) > dev_data->phys_dev_props.limits.maxDescriptorSetSampledImages) {
5393 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5394 VALIDATION_ERROR_0fe00d24,
5395 "vkCreatePipelineLayout(): sum of sampled image bindings among all stages (%d) exceeds device "
5396 "maxDescriptorSetSampledImages limit (%d).",
5397 sum_all_stages[VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE] + sum_all_stages[VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER] +
5398 sum_all_stages[VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER],
5399 dev_data->phys_dev_props.limits.maxDescriptorSetSampledImages);
5403 if ((sum_all_stages[VK_DESCRIPTOR_TYPE_STORAGE_IMAGE] + sum_all_stages[VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER]) >
5404 dev_data->phys_dev_props.limits.maxDescriptorSetStorageImages) {
5405 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5406 VALIDATION_ERROR_0fe00d26,
5407 "vkCreatePipelineLayout(): sum of storage image bindings among all stages (%d) exceeds device "
5408 "maxDescriptorSetStorageImages limit (%d).",
5409 sum_all_stages[VK_DESCRIPTOR_TYPE_STORAGE_IMAGE] + sum_all_stages[VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER],
5410 dev_data->phys_dev_props.limits.maxDescriptorSetStorageImages);
5413 // Input attachments
5414 if (sum_all_stages[VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT] > dev_data->phys_dev_props.limits.maxDescriptorSetInputAttachments) {
5415 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5416 VALIDATION_ERROR_0fe00d28,
5417 "vkCreatePipelineLayout(): sum of input attachment bindings among all stages (%d) exceeds device "
5418 "maxDescriptorSetInputAttachments limit (%d).",
5419 sum_all_stages[VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT],
5420 dev_data->phys_dev_props.limits.maxDescriptorSetInputAttachments);
5426 // For repeatable sorting, not very useful for "memory in range" search
5427 struct PushConstantRangeCompare {
5428 bool operator()(const VkPushConstantRange *lhs, const VkPushConstantRange *rhs) const {
5429 if (lhs->offset == rhs->offset) {
5430 if (lhs->size == rhs->size) {
5431 // The comparison is arbitrary, but avoids false aliasing by comparing all fields.
5432 return lhs->stageFlags < rhs->stageFlags;
5434 // If the offsets are the same then sorting by the end of range is useful for validation
5435 return lhs->size < rhs->size;
5437 return lhs->offset < rhs->offset;
5441 static PushConstantRangesDict push_constant_ranges_dict;
5443 PushConstantRangesId get_canonical_id(const VkPipelineLayoutCreateInfo *info) {
5444 if (!info->pPushConstantRanges) {
5445 // Hand back the empty entry (creating as needed)...
5446 return push_constant_ranges_dict.look_up(PushConstantRanges());
5449 // Sort the input ranges to ensure equivalent ranges map to the same id
5450 std::set<const VkPushConstantRange *, PushConstantRangeCompare> sorted;
5451 for (uint32_t i = 0; i < info->pushConstantRangeCount; i++) {
5452 sorted.insert(info->pPushConstantRanges + i);
5455 PushConstantRanges ranges(sorted.size());
5456 for (const auto range : sorted) {
5457 ranges.emplace_back(*range);
5459 return push_constant_ranges_dict.look_up(std::move(ranges));
5462 // Dictionary of canoncial form of the pipeline set layout of descriptor set layouts
5463 static PipelineLayoutSetLayoutsDict pipeline_layout_set_layouts_dict;
5465 // Dictionary of canonical form of the "compatible for set" records
5466 static PipelineLayoutCompatDict pipeline_layout_compat_dict;
5468 static PipelineLayoutCompatId get_canonical_id(const uint32_t set_index, const PushConstantRangesId pcr_id,
5469 const PipelineLayoutSetLayoutsId set_layouts_id) {
5470 return pipeline_layout_compat_dict.look_up(PipelineLayoutCompatDef(set_index, pcr_id, set_layouts_id));
5473 static void PostCallRecordCreatePipelineLayout(layer_data *dev_data, const VkPipelineLayoutCreateInfo *pCreateInfo,
5474 const VkPipelineLayout *pPipelineLayout) {
5475 unique_lock_t lock(global_lock); // Lock while accessing state
5477 PIPELINE_LAYOUT_NODE &plNode = dev_data->pipelineLayoutMap[*pPipelineLayout];
5478 plNode.layout = *pPipelineLayout;
5479 plNode.set_layouts.resize(pCreateInfo->setLayoutCount);
5480 PipelineLayoutSetLayoutsDef set_layouts(pCreateInfo->setLayoutCount);
5481 for (uint32_t i = 0; i < pCreateInfo->setLayoutCount; ++i) {
5482 plNode.set_layouts[i] = GetDescriptorSetLayout(dev_data, pCreateInfo->pSetLayouts[i]);
5483 set_layouts[i] = plNode.set_layouts[i]->get_layout_id();
5486 // Get canonical form IDs for the "compatible for set" contents
5487 plNode.push_constant_ranges = get_canonical_id(pCreateInfo);
5488 auto set_layouts_id = pipeline_layout_set_layouts_dict.look_up(set_layouts);
5489 plNode.compat_for_set.reserve(pCreateInfo->setLayoutCount);
5491 // Create table of "compatible for set N" cannonical forms for trivial accept validation
5492 for (uint32_t i = 0; i < pCreateInfo->setLayoutCount; ++i) {
5493 plNode.compat_for_set.emplace_back(get_canonical_id(i, plNode.push_constant_ranges, set_layouts_id));
5499 VKAPI_ATTR VkResult VKAPI_CALL CreatePipelineLayout(VkDevice device, const VkPipelineLayoutCreateInfo *pCreateInfo,
5500 const VkAllocationCallbacks *pAllocator, VkPipelineLayout *pPipelineLayout) {
5501 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
5503 bool skip = PreCallValiateCreatePipelineLayout(dev_data, pCreateInfo);
5504 if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
5506 VkResult result = dev_data->dispatch_table.CreatePipelineLayout(device, pCreateInfo, pAllocator, pPipelineLayout);
5508 if (VK_SUCCESS == result) {
5509 PostCallRecordCreatePipelineLayout(dev_data, pCreateInfo, pPipelineLayout);
5514 VKAPI_ATTR VkResult VKAPI_CALL CreateDescriptorPool(VkDevice device, const VkDescriptorPoolCreateInfo *pCreateInfo,
5515 const VkAllocationCallbacks *pAllocator, VkDescriptorPool *pDescriptorPool) {
5516 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
5517 VkResult result = dev_data->dispatch_table.CreateDescriptorPool(device, pCreateInfo, pAllocator, pDescriptorPool);
5518 if (VK_SUCCESS == result) {
5519 DESCRIPTOR_POOL_STATE *pNewNode = new DESCRIPTOR_POOL_STATE(*pDescriptorPool, pCreateInfo);
5520 if (NULL == pNewNode) {
5521 if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
5522 HandleToUint64(*pDescriptorPool), DRAWSTATE_OUT_OF_MEMORY,
5523 "Out of memory while attempting to allocate DESCRIPTOR_POOL_STATE in vkCreateDescriptorPool()"))
5524 return VK_ERROR_VALIDATION_FAILED_EXT;
5526 lock_guard_t lock(global_lock);
5527 dev_data->descriptorPoolMap[*pDescriptorPool] = pNewNode;
5530 // Need to do anything if pool create fails?
5535 VKAPI_ATTR VkResult VKAPI_CALL ResetDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool,
5536 VkDescriptorPoolResetFlags flags) {
5537 // TODO : Add checks for VALIDATION_ERROR_32a00272
5538 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
5539 VkResult result = dev_data->dispatch_table.ResetDescriptorPool(device, descriptorPool, flags);
5540 if (VK_SUCCESS == result) {
5541 lock_guard_t lock(global_lock);
5542 clearDescriptorPool(dev_data, device, descriptorPool, flags);
5546 // Ensure the pool contains enough descriptors and descriptor sets to satisfy
5547 // an allocation request. Fills common_data with the total number of descriptors of each type required,
5548 // as well as DescriptorSetLayout ptrs used for later update.
5549 static bool PreCallValidateAllocateDescriptorSets(layer_data *dev_data, const VkDescriptorSetAllocateInfo *pAllocateInfo,
5550 cvdescriptorset::AllocateDescriptorSetsData *common_data) {
5551 // Always update common data
5552 cvdescriptorset::UpdateAllocateDescriptorSetsData(dev_data, pAllocateInfo, common_data);
5553 if (dev_data->instance_data->disabled.allocate_descriptor_sets) return false;
5554 // All state checks for AllocateDescriptorSets is done in single function
5555 return cvdescriptorset::ValidateAllocateDescriptorSets(dev_data, pAllocateInfo, common_data);
5557 // Allocation state was good and call down chain was made so update state based on allocating descriptor sets
5558 static void PostCallRecordAllocateDescriptorSets(layer_data *dev_data, const VkDescriptorSetAllocateInfo *pAllocateInfo,
5559 VkDescriptorSet *pDescriptorSets,
5560 const cvdescriptorset::AllocateDescriptorSetsData *common_data) {
5561 // All the updates are contained in a single cvdescriptorset function
5562 cvdescriptorset::PerformAllocateDescriptorSets(pAllocateInfo, pDescriptorSets, common_data, &dev_data->descriptorPoolMap,
5563 &dev_data->setMap, dev_data);
5566 // TODO: PostCallRecord routine is dependent on data generated in PreCallValidate -- needs to be moved out
5567 VKAPI_ATTR VkResult VKAPI_CALL AllocateDescriptorSets(VkDevice device, const VkDescriptorSetAllocateInfo *pAllocateInfo,
5568 VkDescriptorSet *pDescriptorSets) {
5569 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
5570 unique_lock_t lock(global_lock);
5571 cvdescriptorset::AllocateDescriptorSetsData common_data(pAllocateInfo->descriptorSetCount);
5572 bool skip = PreCallValidateAllocateDescriptorSets(dev_data, pAllocateInfo, &common_data);
5575 if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
5577 VkResult result = dev_data->dispatch_table.AllocateDescriptorSets(device, pAllocateInfo, pDescriptorSets);
5579 if (VK_SUCCESS == result) {
5581 PostCallRecordAllocateDescriptorSets(dev_data, pAllocateInfo, pDescriptorSets, &common_data);
5586 // Verify state before freeing DescriptorSets
5587 static bool PreCallValidateFreeDescriptorSets(const layer_data *dev_data, VkDescriptorPool pool, uint32_t count,
5588 const VkDescriptorSet *descriptor_sets) {
5589 if (dev_data->instance_data->disabled.free_descriptor_sets) return false;
5591 // First make sure sets being destroyed are not currently in-use
5592 for (uint32_t i = 0; i < count; ++i) {
5593 if (descriptor_sets[i] != VK_NULL_HANDLE) {
5594 skip |= validateIdleDescriptorSet(dev_data, descriptor_sets[i], "vkFreeDescriptorSets");
5598 DESCRIPTOR_POOL_STATE *pool_state = GetDescriptorPoolState(dev_data, pool);
5599 if (pool_state && !(VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT & pool_state->createInfo.flags)) {
5600 // Can't Free from a NON_FREE pool
5601 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
5602 HandleToUint64(pool), VALIDATION_ERROR_28600270,
5603 "It is invalid to call vkFreeDescriptorSets() with a pool created without setting "
5604 "VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT.");
5608 // Sets have been removed from the pool so update underlying state
5609 static void PostCallRecordFreeDescriptorSets(layer_data *dev_data, VkDescriptorPool pool, uint32_t count,
5610 const VkDescriptorSet *descriptor_sets) {
5611 DESCRIPTOR_POOL_STATE *pool_state = GetDescriptorPoolState(dev_data, pool);
5612 // Update available descriptor sets in pool
5613 pool_state->availableSets += count;
5615 // For each freed descriptor add its resources back into the pool as available and remove from pool and setMap
5616 for (uint32_t i = 0; i < count; ++i) {
5617 if (descriptor_sets[i] != VK_NULL_HANDLE) {
5618 auto descriptor_set = dev_data->setMap[descriptor_sets[i]];
5619 uint32_t type_index = 0, descriptor_count = 0;
5620 for (uint32_t j = 0; j < descriptor_set->GetBindingCount(); ++j) {
5621 type_index = static_cast<uint32_t>(descriptor_set->GetTypeFromIndex(j));
5622 descriptor_count = descriptor_set->GetDescriptorCountFromIndex(j);
5623 pool_state->availableDescriptorTypeCount[type_index] += descriptor_count;
5625 freeDescriptorSet(dev_data, descriptor_set);
5626 pool_state->sets.erase(descriptor_set);
5631 VKAPI_ATTR VkResult VKAPI_CALL FreeDescriptorSets(VkDevice device, VkDescriptorPool descriptorPool, uint32_t count,
5632 const VkDescriptorSet *pDescriptorSets) {
5633 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
5634 // Make sure that no sets being destroyed are in-flight
5635 unique_lock_t lock(global_lock);
5636 bool skip = PreCallValidateFreeDescriptorSets(dev_data, descriptorPool, count, pDescriptorSets);
5639 if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
5640 VkResult result = dev_data->dispatch_table.FreeDescriptorSets(device, descriptorPool, count, pDescriptorSets);
5641 if (VK_SUCCESS == result) {
5643 PostCallRecordFreeDescriptorSets(dev_data, descriptorPool, count, pDescriptorSets);
5648 // TODO : This is a Proof-of-concept for core validation architecture
5649 // Really we'll want to break out these functions to separate files but
5650 // keeping it all together here to prove out design
5651 // PreCallValidate* handles validating all of the state prior to calling down chain to UpdateDescriptorSets()
5652 static bool PreCallValidateUpdateDescriptorSets(layer_data *dev_data, uint32_t descriptorWriteCount,
5653 const VkWriteDescriptorSet *pDescriptorWrites, uint32_t descriptorCopyCount,
5654 const VkCopyDescriptorSet *pDescriptorCopies) {
5655 if (dev_data->instance_data->disabled.update_descriptor_sets) return false;
5656 // First thing to do is perform map look-ups.
5657 // NOTE : UpdateDescriptorSets is somewhat unique in that it's operating on a number of DescriptorSets
5658 // so we can't just do a single map look-up up-front, but do them individually in functions below
5660 // Now make call(s) that validate state, but don't perform state updates in this function
5661 // Note, here DescriptorSets is unique in that we don't yet have an instance. Using a helper function in the
5662 // namespace which will parse params and make calls into specific class instances
5663 return cvdescriptorset::ValidateUpdateDescriptorSets(dev_data->report_data, dev_data, descriptorWriteCount, pDescriptorWrites,
5664 descriptorCopyCount, pDescriptorCopies);
5666 // PostCallRecord* handles recording state updates following call down chain to UpdateDescriptorSets()
5667 static void PreCallRecordUpdateDescriptorSets(layer_data *dev_data, uint32_t descriptorWriteCount,
5668 const VkWriteDescriptorSet *pDescriptorWrites, uint32_t descriptorCopyCount,
5669 const VkCopyDescriptorSet *pDescriptorCopies) {
5670 cvdescriptorset::PerformUpdateDescriptorSets(dev_data, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount,
5674 VKAPI_ATTR void VKAPI_CALL UpdateDescriptorSets(VkDevice device, uint32_t descriptorWriteCount,
5675 const VkWriteDescriptorSet *pDescriptorWrites, uint32_t descriptorCopyCount,
5676 const VkCopyDescriptorSet *pDescriptorCopies) {
5677 // Only map look-up at top level is for device-level layer_data
5678 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
5679 unique_lock_t lock(global_lock);
5680 bool skip = PreCallValidateUpdateDescriptorSets(dev_data, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount,
5683 // Since UpdateDescriptorSets() is void, nothing to check prior to updating state & we can update before call down chain
5684 PreCallRecordUpdateDescriptorSets(dev_data, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount,
5687 dev_data->dispatch_table.UpdateDescriptorSets(device, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount,
5692 VKAPI_ATTR VkResult VKAPI_CALL AllocateCommandBuffers(VkDevice device, const VkCommandBufferAllocateInfo *pCreateInfo,
5693 VkCommandBuffer *pCommandBuffer) {
5694 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
5695 VkResult result = dev_data->dispatch_table.AllocateCommandBuffers(device, pCreateInfo, pCommandBuffer);
5696 if (VK_SUCCESS == result) {
5697 unique_lock_t lock(global_lock);
5698 auto pPool = GetCommandPoolNode(dev_data, pCreateInfo->commandPool);
5701 for (uint32_t i = 0; i < pCreateInfo->commandBufferCount; i++) {
5702 // Add command buffer to its commandPool map
5703 pPool->commandBuffers.insert(pCommandBuffer[i]);
5704 GLOBAL_CB_NODE *pCB = new GLOBAL_CB_NODE;
5705 // Add command buffer to map
5706 dev_data->commandBufferMap[pCommandBuffer[i]] = pCB;
5707 ResetCommandBufferState(dev_data, pCommandBuffer[i]);
5708 pCB->createInfo = *pCreateInfo;
5709 pCB->device = device;
5717 // Add bindings between the given cmd buffer & framebuffer and the framebuffer's children
5718 static void AddFramebufferBinding(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, FRAMEBUFFER_STATE *fb_state) {
5719 addCommandBufferBinding(&fb_state->cb_bindings, {HandleToUint64(fb_state->framebuffer), kVulkanObjectTypeFramebuffer},
5721 for (auto attachment : fb_state->attachments) {
5722 auto view_state = attachment.view_state;
5724 AddCommandBufferBindingImageView(dev_data, cb_state, view_state);
5729 VKAPI_ATTR VkResult VKAPI_CALL BeginCommandBuffer(VkCommandBuffer commandBuffer, const VkCommandBufferBeginInfo *pBeginInfo) {
5731 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
5732 unique_lock_t lock(global_lock);
5733 // Validate command buffer level
5734 GLOBAL_CB_NODE *cb_node = GetCBNode(dev_data, commandBuffer);
5736 // This implicitly resets the Cmd Buffer so make sure any fence is done and then clear memory references
5737 if (cb_node->in_use.load()) {
5738 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
5739 HandleToUint64(commandBuffer), VALIDATION_ERROR_16e00062,
5740 "Calling vkBeginCommandBuffer() on active command buffer %" PRIx64
5741 " before it has completed. You must check command buffer fence before this call.",
5742 HandleToUint64(commandBuffer));
5744 clear_cmd_buf_and_mem_references(dev_data, cb_node);
5745 if (cb_node->createInfo.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
5746 // Secondary Command Buffer
5747 const VkCommandBufferInheritanceInfo *pInfo = pBeginInfo->pInheritanceInfo;
5750 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
5751 HandleToUint64(commandBuffer), VALIDATION_ERROR_16e00066,
5752 "vkBeginCommandBuffer(): Secondary Command Buffer (0x%" PRIx64 ") must have inheritance info.",
5753 HandleToUint64(commandBuffer));
5755 if (pBeginInfo->flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT) {
5756 assert(pInfo->renderPass);
5757 string errorString = "";
5758 auto framebuffer = GetFramebufferState(dev_data, pInfo->framebuffer);
5760 if (framebuffer->createInfo.renderPass != pInfo->renderPass) {
5761 // renderPass that framebuffer was created with must be compatible with local renderPass
5763 validateRenderPassCompatibility(dev_data, "framebuffer", framebuffer->rp_state.get(),
5764 "command buffer", GetRenderPassState(dev_data, pInfo->renderPass),
5765 "vkBeginCommandBuffer()", VALIDATION_ERROR_0280006e);
5767 // Connect this framebuffer and its children to this cmdBuffer
5768 AddFramebufferBinding(dev_data, cb_node, framebuffer);
5771 if ((pInfo->occlusionQueryEnable == VK_FALSE || dev_data->enabled_features.occlusionQueryPrecise == VK_FALSE) &&
5772 (pInfo->queryFlags & VK_QUERY_CONTROL_PRECISE_BIT)) {
5773 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5774 VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(commandBuffer),
5775 VALIDATION_ERROR_16e00068,
5776 "vkBeginCommandBuffer(): Secondary Command Buffer (0x%" PRIx64
5777 ") must not have VK_QUERY_CONTROL_PRECISE_BIT if occulusionQuery is disabled or the device "
5778 "does not support precise occlusion queries.",
5779 HandleToUint64(commandBuffer));
5782 if (pInfo && pInfo->renderPass != VK_NULL_HANDLE) {
5783 auto renderPass = GetRenderPassState(dev_data, pInfo->renderPass);
5785 if (pInfo->subpass >= renderPass->createInfo.subpassCount) {
5786 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5787 VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(commandBuffer),
5788 VALIDATION_ERROR_0280006c,
5789 "vkBeginCommandBuffer(): Secondary Command Buffers (0x%" PRIx64
5790 ") must have a subpass index (%d) that is less than the number of subpasses (%d).",
5791 HandleToUint64(commandBuffer), pInfo->subpass, renderPass->createInfo.subpassCount);
5796 if (CB_RECORDING == cb_node->state) {
5797 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
5798 HandleToUint64(commandBuffer), VALIDATION_ERROR_16e00062,
5799 "vkBeginCommandBuffer(): Cannot call Begin on command buffer (0x%" PRIx64
5800 ") in the RECORDING state. Must first call vkEndCommandBuffer().",
5801 HandleToUint64(commandBuffer));
5802 } else if (CB_RECORDED == cb_node->state || CB_INVALID_COMPLETE == cb_node->state) {
5803 VkCommandPool cmdPool = cb_node->createInfo.commandPool;
5804 auto pPool = GetCommandPoolNode(dev_data, cmdPool);
5805 if (!(VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT & pPool->createFlags)) {
5807 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
5808 HandleToUint64(commandBuffer), VALIDATION_ERROR_16e00064,
5809 "Call to vkBeginCommandBuffer() on command buffer (0x%" PRIx64
5810 ") attempts to implicitly reset cmdBuffer created from command pool (0x%" PRIx64
5811 ") that does NOT have the VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT bit set.",
5812 HandleToUint64(commandBuffer), HandleToUint64(cmdPool));
5814 ResetCommandBufferState(dev_data, commandBuffer);
5816 // Set updated state here in case implicit reset occurs above
5817 cb_node->state = CB_RECORDING;
5818 cb_node->beginInfo = *pBeginInfo;
5819 if (cb_node->beginInfo.pInheritanceInfo) {
5820 cb_node->inheritanceInfo = *(cb_node->beginInfo.pInheritanceInfo);
5821 cb_node->beginInfo.pInheritanceInfo = &cb_node->inheritanceInfo;
5822 // If we are a secondary command-buffer and inheriting. Update the items we should inherit.
5823 if ((cb_node->createInfo.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY) &&
5824 (cb_node->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) {
5825 cb_node->activeRenderPass = GetRenderPassState(dev_data, cb_node->beginInfo.pInheritanceInfo->renderPass);
5826 cb_node->activeSubpass = cb_node->beginInfo.pInheritanceInfo->subpass;
5827 cb_node->activeFramebuffer = cb_node->beginInfo.pInheritanceInfo->framebuffer;
5828 cb_node->framebuffers.insert(cb_node->beginInfo.pInheritanceInfo->framebuffer);
5834 return VK_ERROR_VALIDATION_FAILED_EXT;
5836 VkResult result = dev_data->dispatch_table.BeginCommandBuffer(commandBuffer, pBeginInfo);
5840 static void PostCallRecordEndCommandBuffer(layer_data *dev_data, GLOBAL_CB_NODE *cb_state) {
5841 // Cached validation is specific to a specific recording of a specific command buffer.
5842 for (auto descriptor_set : cb_state->validated_descriptor_sets) {
5843 descriptor_set->ClearCachedValidation(cb_state);
5845 cb_state->validated_descriptor_sets.clear();
5848 VKAPI_ATTR VkResult VKAPI_CALL EndCommandBuffer(VkCommandBuffer commandBuffer) {
5850 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
5851 unique_lock_t lock(global_lock);
5852 GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
5854 if ((VK_COMMAND_BUFFER_LEVEL_PRIMARY == pCB->createInfo.level) ||
5855 !(pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) {
5856 // This needs spec clarification to update valid usage, see comments in PR:
5857 // https://github.com/KhronosGroup/Vulkan-LoaderAndValidationLayers/pull/516#discussion_r63013756
5858 skip |= insideRenderPass(dev_data, pCB, "vkEndCommandBuffer()", VALIDATION_ERROR_27400078);
5860 skip |= ValidateCmd(dev_data, pCB, CMD_ENDCOMMANDBUFFER, "vkEndCommandBuffer()");
5861 for (auto query : pCB->activeQueries) {
5862 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
5863 HandleToUint64(commandBuffer), VALIDATION_ERROR_2740007a,
5864 "Ending command buffer with in progress query: queryPool 0x%" PRIx64 ", index %d.",
5865 HandleToUint64(query.pool), query.index);
5870 auto result = dev_data->dispatch_table.EndCommandBuffer(commandBuffer);
5872 PostCallRecordEndCommandBuffer(dev_data, pCB);
5873 if (VK_SUCCESS == result) {
5874 pCB->state = CB_RECORDED;
5878 return VK_ERROR_VALIDATION_FAILED_EXT;
5882 VKAPI_ATTR VkResult VKAPI_CALL ResetCommandBuffer(VkCommandBuffer commandBuffer, VkCommandBufferResetFlags flags) {
5884 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
5885 unique_lock_t lock(global_lock);
5886 GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
5887 VkCommandPool cmdPool = pCB->createInfo.commandPool;
5888 auto pPool = GetCommandPoolNode(dev_data, cmdPool);
5889 if (!(VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT & pPool->createFlags)) {
5890 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
5891 HandleToUint64(commandBuffer), VALIDATION_ERROR_3260005c,
5892 "Attempt to reset command buffer (0x%" PRIx64 ") created from command pool (0x%" PRIx64
5893 ") that does NOT have the VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT bit set.",
5894 HandleToUint64(commandBuffer), HandleToUint64(cmdPool));
5896 skip |= checkCommandBufferInFlight(dev_data, pCB, "reset", VALIDATION_ERROR_3260005a);
5898 if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
5899 VkResult result = dev_data->dispatch_table.ResetCommandBuffer(commandBuffer, flags);
5900 if (VK_SUCCESS == result) {
5902 ResetCommandBufferState(dev_data, commandBuffer);
5908 VKAPI_ATTR void VKAPI_CALL CmdBindPipeline(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint,
5909 VkPipeline pipeline) {
5911 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
5912 unique_lock_t lock(global_lock);
5913 GLOBAL_CB_NODE *cb_state = GetCBNode(dev_data, commandBuffer);
5915 skip |= ValidateCmdQueueFlags(dev_data, cb_state, "vkCmdBindPipeline()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
5916 VALIDATION_ERROR_18002415);
5917 skip |= ValidateCmd(dev_data, cb_state, CMD_BINDPIPELINE, "vkCmdBindPipeline()");
5918 // TODO: VALIDATION_ERROR_18000612 VALIDATION_ERROR_18000616 -- using ValidatePipelineBindPoint
5920 auto pipe_state = getPipelineState(dev_data, pipeline);
5921 if (VK_PIPELINE_BIND_POINT_GRAPHICS == pipelineBindPoint) {
5922 cb_state->status &= ~cb_state->static_status;
5923 cb_state->static_status = MakeStaticStateMask(pipe_state->graphicsPipelineCI.ptr()->pDynamicState);
5924 cb_state->status |= cb_state->static_status;
5926 cb_state->lastBound[pipelineBindPoint].pipeline_state = pipe_state;
5927 set_pipeline_state(pipe_state);
5928 skip |= validate_dual_src_blend_feature(dev_data, pipe_state);
5929 addCommandBufferBinding(&pipe_state->cb_bindings, {HandleToUint64(pipeline), kVulkanObjectTypePipeline}, cb_state);
5932 if (!skip) dev_data->dispatch_table.CmdBindPipeline(commandBuffer, pipelineBindPoint, pipeline);
5935 VKAPI_ATTR void VKAPI_CALL CmdSetViewport(VkCommandBuffer commandBuffer, uint32_t firstViewport, uint32_t viewportCount,
5936 const VkViewport *pViewports) {
5938 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
5939 unique_lock_t lock(global_lock);
5940 GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
5942 skip |= ValidateCmdQueueFlags(dev_data, pCB, "vkCmdSetViewport()", VK_QUEUE_GRAPHICS_BIT, VALIDATION_ERROR_1e002415);
5943 skip |= ValidateCmd(dev_data, pCB, CMD_SETVIEWPORT, "vkCmdSetViewport()");
5944 if (pCB->static_status & CBSTATUS_VIEWPORT_SET) {
5945 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
5946 HandleToUint64(commandBuffer), VALIDATION_ERROR_1e00098a,
5947 "vkCmdSetViewport(): pipeline was created without VK_DYNAMIC_STATE_VIEWPORT flag..");
5950 pCB->viewportMask |= ((1u << viewportCount) - 1u) << firstViewport;
5951 pCB->status |= CBSTATUS_VIEWPORT_SET;
5955 if (!skip) dev_data->dispatch_table.CmdSetViewport(commandBuffer, firstViewport, viewportCount, pViewports);
5958 VKAPI_ATTR void VKAPI_CALL CmdSetScissor(VkCommandBuffer commandBuffer, uint32_t firstScissor, uint32_t scissorCount,
5959 const VkRect2D *pScissors) {
5961 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
5962 unique_lock_t lock(global_lock);
5963 GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
5965 skip |= ValidateCmdQueueFlags(dev_data, pCB, "vkCmdSetScissor()", VK_QUEUE_GRAPHICS_BIT, VALIDATION_ERROR_1d802415);
5966 skip |= ValidateCmd(dev_data, pCB, CMD_SETSCISSOR, "vkCmdSetScissor()");
5967 if (pCB->static_status & CBSTATUS_SCISSOR_SET) {
5968 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
5969 HandleToUint64(commandBuffer), VALIDATION_ERROR_1d80049c,
5970 "vkCmdSetScissor(): pipeline was created without VK_DYNAMIC_STATE_SCISSOR flag..");
5973 pCB->scissorMask |= ((1u << scissorCount) - 1u) << firstScissor;
5974 pCB->status |= CBSTATUS_SCISSOR_SET;
5978 if (!skip) dev_data->dispatch_table.CmdSetScissor(commandBuffer, firstScissor, scissorCount, pScissors);
5981 VKAPI_ATTR void VKAPI_CALL CmdSetLineWidth(VkCommandBuffer commandBuffer, float lineWidth) {
5983 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
5984 unique_lock_t lock(global_lock);
5985 GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
5987 skip |= ValidateCmdQueueFlags(dev_data, pCB, "vkCmdSetLineWidth()", VK_QUEUE_GRAPHICS_BIT, VALIDATION_ERROR_1d602415);
5988 skip |= ValidateCmd(dev_data, pCB, CMD_SETLINEWIDTH, "vkCmdSetLineWidth()");
5990 if (pCB->static_status & CBSTATUS_LINE_WIDTH_SET) {
5991 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
5992 HandleToUint64(commandBuffer), VALIDATION_ERROR_1d600626,
5993 "vkCmdSetLineWidth called but pipeline was created without VK_DYNAMIC_STATE_LINE_WIDTH flag.");
5996 pCB->status |= CBSTATUS_LINE_WIDTH_SET;
6000 if (!skip) dev_data->dispatch_table.CmdSetLineWidth(commandBuffer, lineWidth);
6003 VKAPI_ATTR void VKAPI_CALL CmdSetDepthBias(VkCommandBuffer commandBuffer, float depthBiasConstantFactor, float depthBiasClamp,
6004 float depthBiasSlopeFactor) {
6006 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6007 unique_lock_t lock(global_lock);
6008 GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
6010 skip |= ValidateCmdQueueFlags(dev_data, pCB, "vkCmdSetDepthBias()", VK_QUEUE_GRAPHICS_BIT, VALIDATION_ERROR_1cc02415);
6011 skip |= ValidateCmd(dev_data, pCB, CMD_SETDEPTHBIAS, "vkCmdSetDepthBias()");
6012 if (pCB->static_status & CBSTATUS_DEPTH_BIAS_SET) {
6013 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6014 HandleToUint64(commandBuffer), VALIDATION_ERROR_1cc0062a,
6015 "vkCmdSetDepthBias(): pipeline was created without VK_DYNAMIC_STATE_DEPTH_BIAS flag..");
6017 if ((depthBiasClamp != 0.0) && (!dev_data->enabled_features.depthBiasClamp)) {
6018 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6019 HandleToUint64(commandBuffer), VALIDATION_ERROR_1cc0062c,
6020 "vkCmdSetDepthBias(): the depthBiasClamp device feature is disabled: the depthBiasClamp parameter must "
6024 pCB->status |= CBSTATUS_DEPTH_BIAS_SET;
6029 dev_data->dispatch_table.CmdSetDepthBias(commandBuffer, depthBiasConstantFactor, depthBiasClamp, depthBiasSlopeFactor);
6032 VKAPI_ATTR void VKAPI_CALL CmdSetBlendConstants(VkCommandBuffer commandBuffer, const float blendConstants[4]) {
6034 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6035 unique_lock_t lock(global_lock);
6036 GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
6038 skip |= ValidateCmdQueueFlags(dev_data, pCB, "vkCmdSetBlendConstants()", VK_QUEUE_GRAPHICS_BIT, VALIDATION_ERROR_1ca02415);
6039 skip |= ValidateCmd(dev_data, pCB, CMD_SETBLENDCONSTANTS, "vkCmdSetBlendConstants()");
6040 if (pCB->static_status & CBSTATUS_BLEND_CONSTANTS_SET) {
6041 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6042 HandleToUint64(commandBuffer), VALIDATION_ERROR_1ca004c8,
6043 "vkCmdSetBlendConstants(): pipeline was created without VK_DYNAMIC_STATE_BLEND_CONSTANTS flag..");
6046 pCB->status |= CBSTATUS_BLEND_CONSTANTS_SET;
6050 if (!skip) dev_data->dispatch_table.CmdSetBlendConstants(commandBuffer, blendConstants);
6053 VKAPI_ATTR void VKAPI_CALL CmdSetDepthBounds(VkCommandBuffer commandBuffer, float minDepthBounds, float maxDepthBounds) {
6055 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6056 unique_lock_t lock(global_lock);
6057 GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
6059 skip |= ValidateCmdQueueFlags(dev_data, pCB, "vkCmdSetDepthBounds()", VK_QUEUE_GRAPHICS_BIT, VALIDATION_ERROR_1ce02415);
6060 skip |= ValidateCmd(dev_data, pCB, CMD_SETDEPTHBOUNDS, "vkCmdSetDepthBounds()");
6061 if (pCB->static_status & CBSTATUS_DEPTH_BOUNDS_SET) {
6062 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6063 HandleToUint64(commandBuffer), VALIDATION_ERROR_1ce004ae,
6064 "vkCmdSetDepthBounds(): pipeline was created without VK_DYNAMIC_STATE_DEPTH_BOUNDS flag..");
6067 pCB->status |= CBSTATUS_DEPTH_BOUNDS_SET;
6071 if (!skip) dev_data->dispatch_table.CmdSetDepthBounds(commandBuffer, minDepthBounds, maxDepthBounds);
6074 VKAPI_ATTR void VKAPI_CALL CmdSetStencilCompareMask(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask,
6075 uint32_t compareMask) {
6077 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6078 unique_lock_t lock(global_lock);
6079 GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
6082 ValidateCmdQueueFlags(dev_data, pCB, "vkCmdSetStencilCompareMask()", VK_QUEUE_GRAPHICS_BIT, VALIDATION_ERROR_1da02415);
6083 skip |= ValidateCmd(dev_data, pCB, CMD_SETSTENCILCOMPAREMASK, "vkCmdSetStencilCompareMask()");
6084 if (pCB->static_status & CBSTATUS_STENCIL_READ_MASK_SET) {
6086 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6087 HandleToUint64(commandBuffer), VALIDATION_ERROR_1da004b4,
6088 "vkCmdSetStencilCompareMask(): pipeline was created without VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK flag..");
6091 pCB->status |= CBSTATUS_STENCIL_READ_MASK_SET;
6095 if (!skip) dev_data->dispatch_table.CmdSetStencilCompareMask(commandBuffer, faceMask, compareMask);
6098 VKAPI_ATTR void VKAPI_CALL CmdSetStencilWriteMask(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t writeMask) {
6100 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6101 unique_lock_t lock(global_lock);
6102 GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
6105 ValidateCmdQueueFlags(dev_data, pCB, "vkCmdSetStencilWriteMask()", VK_QUEUE_GRAPHICS_BIT, VALIDATION_ERROR_1de02415);
6106 skip |= ValidateCmd(dev_data, pCB, CMD_SETSTENCILWRITEMASK, "vkCmdSetStencilWriteMask()");
6107 if (pCB->static_status & CBSTATUS_STENCIL_WRITE_MASK_SET) {
6108 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6109 HandleToUint64(commandBuffer), VALIDATION_ERROR_1de004b6,
6110 "vkCmdSetStencilWriteMask(): pipeline was created without VK_DYNAMIC_STATE_STENCIL_WRITE_MASK flag..");
6113 pCB->status |= CBSTATUS_STENCIL_WRITE_MASK_SET;
6117 if (!skip) dev_data->dispatch_table.CmdSetStencilWriteMask(commandBuffer, faceMask, writeMask);
6120 VKAPI_ATTR void VKAPI_CALL CmdSetStencilReference(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t reference) {
6122 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6123 unique_lock_t lock(global_lock);
6124 GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
6127 ValidateCmdQueueFlags(dev_data, pCB, "vkCmdSetStencilReference()", VK_QUEUE_GRAPHICS_BIT, VALIDATION_ERROR_1dc02415);
6128 skip |= ValidateCmd(dev_data, pCB, CMD_SETSTENCILREFERENCE, "vkCmdSetStencilReference()");
6129 if (pCB->static_status & CBSTATUS_STENCIL_REFERENCE_SET) {
6130 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6131 HandleToUint64(commandBuffer), VALIDATION_ERROR_1dc004b8,
6132 "vkCmdSetStencilReference(): pipeline was created without VK_DYNAMIC_STATE_STENCIL_REFERENCE flag..");
6135 pCB->status |= CBSTATUS_STENCIL_REFERENCE_SET;
6139 if (!skip) dev_data->dispatch_table.CmdSetStencilReference(commandBuffer, faceMask, reference);
6142 // Update pipeline_layout bind points applying the "Pipeline Layout Compatibility" rules
6143 static void UpdateLastBoundDescriptorSets(layer_data *device_data, GLOBAL_CB_NODE *cb_state,
6144 VkPipelineBindPoint pipeline_bind_point, const PIPELINE_LAYOUT_NODE *pipeline_layout,
6145 uint32_t first_set, uint32_t set_count,
6146 const std::vector<cvdescriptorset::DescriptorSet *> descriptor_sets,
6147 uint32_t dynamic_offset_count, const uint32_t *p_dynamic_offsets) {
6150 if (0 == set_count) return;
6151 assert(pipeline_layout);
6152 if (!pipeline_layout) return;
6154 uint32_t required_size = first_set + set_count;
6155 const uint32_t last_binding_index = required_size - 1;
6156 assert(last_binding_index < pipeline_layout->compat_for_set.size());
6158 // Some useful shorthand
6159 auto &last_bound = cb_state->lastBound[pipeline_bind_point];
6161 auto &bound_sets = last_bound.boundDescriptorSets;
6162 auto &dynamic_offsets = last_bound.dynamicOffsets;
6163 auto &bound_compat_ids = last_bound.compat_id_for_set;
6164 auto &pipe_compat_ids = pipeline_layout->compat_for_set;
6166 const uint32_t current_size = static_cast<uint32_t>(bound_sets.size());
6167 assert(current_size == dynamic_offsets.size());
6168 assert(current_size == bound_compat_ids.size());
6170 // We need this three times in this function, but nowhere else
6171 auto push_descriptor_cleanup = [&last_bound](const cvdescriptorset::DescriptorSet *ds) -> bool {
6172 if (ds && ds->IsPushDescriptor()) {
6173 assert(ds == last_bound.push_descriptor_set.get());
6174 last_bound.push_descriptor_set = nullptr;
6180 // Clean up the "disturbed" before and after the range to be set
6181 if (required_size < current_size) {
6182 if (bound_compat_ids[last_binding_index] != pipe_compat_ids[last_binding_index]) {
6183 // We're disturbing those after last, we'll shrink below, but first need to check for and cleanup the push_descriptor
6184 for (auto set_idx = required_size; set_idx < current_size; ++set_idx) {
6185 if (push_descriptor_cleanup(bound_sets[set_idx])) break;
6188 // We're not disturbing past last, so leave the upper binding data alone.
6189 required_size = current_size;
6193 // We resize if we need more set entries or if those past "last" are disturbed
6194 if (required_size != current_size) {
6195 // TODO: put these size tied things in a struct (touches many lines)
6196 bound_sets.resize(required_size);
6197 dynamic_offsets.resize(required_size);
6198 bound_compat_ids.resize(required_size);
6201 // For any previously bound sets, need to set them to "invalid" if they were disturbed by this update
6202 for (uint32_t set_idx = 0; set_idx < first_set; ++set_idx) {
6203 if (bound_compat_ids[set_idx] != pipe_compat_ids[set_idx]) {
6204 push_descriptor_cleanup(bound_sets[set_idx]);
6205 bound_sets[set_idx] = nullptr;
6206 dynamic_offsets[set_idx].clear();
6207 bound_compat_ids[set_idx] = pipe_compat_ids[set_idx];
6211 // Now update the bound sets with the input sets
6212 const uint32_t *input_dynamic_offsets = p_dynamic_offsets; // "read" pointer for dynamic offset data
6213 for (uint32_t input_idx = 0; input_idx < set_count; input_idx++) {
6214 auto set_idx = input_idx + first_set; // set_idx is index within layout, input_idx is index within input descriptor sets
6215 cvdescriptorset::DescriptorSet *descriptor_set = descriptor_sets[input_idx];
6217 // Record binding (or push)
6218 push_descriptor_cleanup(bound_sets[set_idx]);
6219 bound_sets[set_idx] = descriptor_set;
6220 bound_compat_ids[set_idx] = pipe_compat_ids[set_idx]; // compat ids are canonical *per* set index
6222 if (descriptor_set) {
6223 auto set_dynamic_descriptor_count = descriptor_set->GetDynamicDescriptorCount();
6224 // TODO: Add logic for tracking push_descriptor offsets (here or in caller)
6225 if (set_dynamic_descriptor_count && input_dynamic_offsets) {
6226 const uint32_t *end_offset = input_dynamic_offsets + set_dynamic_descriptor_count;
6227 dynamic_offsets[set_idx] = std::vector<uint32_t>(input_dynamic_offsets, end_offset);
6228 input_dynamic_offsets = end_offset;
6229 assert(input_dynamic_offsets <= (p_dynamic_offsets + dynamic_offset_count));
6231 dynamic_offsets[set_idx].clear();
6233 if (!descriptor_set->IsPushDescriptor()) {
6234 // Can't cache validation of push_descriptors
6235 cb_state->validated_descriptor_sets.insert(descriptor_set);
6241 // Update the bound state for the bind point, including the effects of incompatible pipeline layouts
6242 static void PreCallRecordCmdBindDescriptorSets(layer_data *device_data, GLOBAL_CB_NODE *cb_state,
6243 VkPipelineBindPoint pipelineBindPoint, VkPipelineLayout layout, uint32_t firstSet,
6244 uint32_t setCount, const VkDescriptorSet *pDescriptorSets,
6245 uint32_t dynamicOffsetCount, const uint32_t *pDynamicOffsets) {
6246 auto pipeline_layout = getPipelineLayout(device_data, layout);
6247 std::vector<cvdescriptorset::DescriptorSet *> descriptor_sets;
6248 descriptor_sets.reserve(setCount);
6250 // Construct a list of the descriptors
6251 bool found_non_null = false;
6252 for (uint32_t i = 0; i < setCount; i++) {
6253 cvdescriptorset::DescriptorSet *descriptor_set = GetSetNode(device_data, pDescriptorSets[i]);
6254 descriptor_sets.emplace_back(descriptor_set);
6255 found_non_null |= descriptor_set != nullptr;
6257 if (found_non_null) { // which implies setCount > 0
6258 UpdateLastBoundDescriptorSets(device_data, cb_state, pipelineBindPoint, pipeline_layout, firstSet, setCount,
6259 descriptor_sets, dynamicOffsetCount, pDynamicOffsets);
6260 cb_state->lastBound[pipelineBindPoint].pipeline_layout = layout;
6264 static bool PreCallValidateCmdBindDescriptorSets(layer_data *device_data, GLOBAL_CB_NODE *cb_state,
6265 VkPipelineBindPoint pipelineBindPoint, VkPipelineLayout layout, uint32_t firstSet,
6266 uint32_t setCount, const VkDescriptorSet *pDescriptorSets,
6267 uint32_t dynamicOffsetCount, const uint32_t *pDynamicOffsets) {
6269 skip |= ValidateCmdQueueFlags(device_data, cb_state, "vkCmdBindDescriptorSets()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
6270 VALIDATION_ERROR_17c02415);
6271 skip |= ValidateCmd(device_data, cb_state, CMD_BINDDESCRIPTORSETS, "vkCmdBindDescriptorSets()");
6272 // Track total count of dynamic descriptor types to make sure we have an offset for each one
6273 uint32_t total_dynamic_descriptors = 0;
6274 string error_string = "";
6275 uint32_t last_set_index = firstSet + setCount - 1;
6277 if (last_set_index >= cb_state->lastBound[pipelineBindPoint].boundDescriptorSets.size()) {
6278 cb_state->lastBound[pipelineBindPoint].boundDescriptorSets.resize(last_set_index + 1);
6279 cb_state->lastBound[pipelineBindPoint].dynamicOffsets.resize(last_set_index + 1);
6280 cb_state->lastBound[pipelineBindPoint].compat_id_for_set.resize(last_set_index + 1);
6282 auto pipeline_layout = getPipelineLayout(device_data, layout);
6283 for (uint32_t set_idx = 0; set_idx < setCount; set_idx++) {
6284 cvdescriptorset::DescriptorSet *descriptor_set = GetSetNode(device_data, pDescriptorSets[set_idx]);
6285 if (descriptor_set) {
6286 if (!descriptor_set->IsUpdated() && (descriptor_set->GetTotalDescriptorCount() != 0)) {
6288 device_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
6289 HandleToUint64(pDescriptorSets[set_idx]), DRAWSTATE_DESCRIPTOR_SET_NOT_UPDATED,
6290 "Descriptor Set 0x%" PRIx64 " bound but it was never updated. You may want to either update it or not bind it.",
6291 HandleToUint64(pDescriptorSets[set_idx]));
6293 // Verify that set being bound is compatible with overlapping setLayout of pipelineLayout
6294 if (!verify_set_layout_compatibility(descriptor_set, pipeline_layout, set_idx + firstSet, error_string)) {
6296 log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
6297 HandleToUint64(pDescriptorSets[set_idx]), VALIDATION_ERROR_17c002cc,
6298 "descriptorSet #%u being bound is not compatible with overlapping descriptorSetLayout at index %u of "
6299 "pipelineLayout 0x%" PRIx64 " due to: %s.",
6300 set_idx, set_idx + firstSet, HandleToUint64(layout), error_string.c_str());
6303 auto set_dynamic_descriptor_count = descriptor_set->GetDynamicDescriptorCount();
6305 if (set_dynamic_descriptor_count) {
6306 // First make sure we won't overstep bounds of pDynamicOffsets array
6307 if ((total_dynamic_descriptors + set_dynamic_descriptor_count) > dynamicOffsetCount) {
6308 skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6309 VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, HandleToUint64(pDescriptorSets[set_idx]),
6310 DRAWSTATE_INVALID_DYNAMIC_OFFSET_COUNT,
6311 "descriptorSet #%u (0x%" PRIx64
6312 ") requires %u dynamicOffsets, but only %u dynamicOffsets are left in pDynamicOffsets array. "
6313 "There must be one dynamic offset for each dynamic descriptor being bound.",
6314 set_idx, HandleToUint64(pDescriptorSets[set_idx]), descriptor_set->GetDynamicDescriptorCount(),
6315 (dynamicOffsetCount - total_dynamic_descriptors));
6316 } else { // Validate dynamic offsets and Dynamic Offset Minimums
6317 uint32_t cur_dyn_offset = total_dynamic_descriptors;
6318 for (uint32_t d = 0; d < descriptor_set->GetTotalDescriptorCount(); d++) {
6319 if (descriptor_set->GetTypeFromGlobalIndex(d) == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC) {
6320 if (SafeModulo(pDynamicOffsets[cur_dyn_offset],
6321 device_data->phys_dev_properties.properties.limits.minUniformBufferOffsetAlignment) !=
6323 skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6324 VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, VALIDATION_ERROR_17c002d4,
6325 "vkCmdBindDescriptorSets(): pDynamicOffsets[%d] is %d but must be a multiple of "
6326 "device limit minUniformBufferOffsetAlignment 0x%" PRIxLEAST64 ".",
6327 cur_dyn_offset, pDynamicOffsets[cur_dyn_offset],
6328 device_data->phys_dev_properties.properties.limits.minUniformBufferOffsetAlignment);
6331 } else if (descriptor_set->GetTypeFromGlobalIndex(d) == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC) {
6332 if (SafeModulo(pDynamicOffsets[cur_dyn_offset],
6333 device_data->phys_dev_properties.properties.limits.minStorageBufferOffsetAlignment) !=
6335 skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6336 VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, VALIDATION_ERROR_17c002d4,
6337 "vkCmdBindDescriptorSets(): pDynamicOffsets[%d] is %d but must be a multiple of "
6338 "device limit minStorageBufferOffsetAlignment 0x%" PRIxLEAST64 ".",
6339 cur_dyn_offset, pDynamicOffsets[cur_dyn_offset],
6340 device_data->phys_dev_properties.properties.limits.minStorageBufferOffsetAlignment);
6345 // Keep running total of dynamic descriptor count to verify at the end
6346 total_dynamic_descriptors += set_dynamic_descriptor_count;
6350 skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
6351 HandleToUint64(pDescriptorSets[set_idx]), DRAWSTATE_INVALID_SET,
6352 "Attempt to bind descriptor set 0x%" PRIx64 " that doesn't exist!",
6353 HandleToUint64(pDescriptorSets[set_idx]));
6356 // dynamicOffsetCount must equal the total number of dynamic descriptors in the sets being bound
6357 if (total_dynamic_descriptors != dynamicOffsetCount) {
6358 skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6359 HandleToUint64(cb_state->commandBuffer), VALIDATION_ERROR_17c002ce,
6360 "Attempting to bind %u descriptorSets with %u dynamic descriptors, but dynamicOffsetCount is %u. It should "
6361 "exactly match the number of dynamic descriptors.",
6362 setCount, total_dynamic_descriptors, dynamicOffsetCount);
6367 VKAPI_ATTR void VKAPI_CALL CmdBindDescriptorSets(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint,
6368 VkPipelineLayout layout, uint32_t firstSet, uint32_t setCount,
6369 const VkDescriptorSet *pDescriptorSets, uint32_t dynamicOffsetCount,
6370 const uint32_t *pDynamicOffsets) {
6372 layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6373 unique_lock_t lock(global_lock);
6374 GLOBAL_CB_NODE *cb_state = GetCBNode(device_data, commandBuffer);
6376 skip = PreCallValidateCmdBindDescriptorSets(device_data, cb_state, pipelineBindPoint, layout, firstSet, setCount,
6377 pDescriptorSets, dynamicOffsetCount, pDynamicOffsets);
6379 PreCallRecordCmdBindDescriptorSets(device_data, cb_state, pipelineBindPoint, layout, firstSet, setCount, pDescriptorSets,
6380 dynamicOffsetCount, pDynamicOffsets);
6382 device_data->dispatch_table.CmdBindDescriptorSets(commandBuffer, pipelineBindPoint, layout, firstSet, setCount,
6383 pDescriptorSets, dynamicOffsetCount, pDynamicOffsets);
6389 // Validates that the supplied bind point is supported for the command buffer (vis. the command pool)
6390 // Takes array of error codes as some of the VUID's (e.g. vkCmdBindPipeline) are written per bindpoint
6391 // TODO add vkCmdBindPipeline bind_point validation using this call.
6392 bool ValidatePipelineBindPoint(layer_data *device_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint bind_point,
6393 const char *func_name,
6394 const std::array<UNIQUE_VALIDATION_ERROR_CODE, VK_PIPELINE_BIND_POINT_RANGE_SIZE> &bind_errors) {
6396 auto pool = GetCommandPoolNode(device_data, cb_state->createInfo.commandPool);
6397 if (pool) { // The loss of a pool in a recording cmd is reported in DestroyCommandPool
6398 static const VkQueueFlags flag_mask[VK_PIPELINE_BIND_POINT_RANGE_SIZE] = {VK_QUEUE_GRAPHICS_BIT, VK_QUEUE_COMPUTE_BIT};
6399 const auto bind_point_index = bind_point - VK_PIPELINE_BIND_POINT_BEGIN_RANGE; // typeof enum is not defined, use auto
6400 const auto &qfp = GetPhysDevProperties(device_data)->queue_family_properties[pool->queueFamilyIndex];
6401 if (0 == (qfp.queueFlags & flag_mask[bind_point_index])) {
6402 const UNIQUE_VALIDATION_ERROR_CODE error = bind_errors[bind_point_index];
6403 auto cb_u64 = HandleToUint64(cb_state->commandBuffer);
6404 auto cp_u64 = HandleToUint64(cb_state->createInfo.commandPool);
6405 skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6407 "%s: CommandBuffer 0x%" PRIxLEAST64 " was allocated from VkCommandPool 0x%" PRIxLEAST64
6408 " that does not support bindpoint %s.",
6409 func_name, cb_u64, cp_u64, string_VkPipelineBindPoint(bind_point));
6415 static bool PreCallValidateCmdPushDescriptorSetKHR(layer_data *device_data, GLOBAL_CB_NODE *cb_state,
6416 const VkPipelineBindPoint bind_point, const VkPipelineLayout layout,
6417 const uint32_t set, const uint32_t descriptor_write_count,
6418 const VkWriteDescriptorSet *descriptor_writes, const char *func_name) {
6420 skip |= ValidateCmd(device_data, cb_state, CMD_PUSHDESCRIPTORSETKHR, func_name);
6421 skip |= ValidateCmdQueueFlags(device_data, cb_state, func_name, (VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT),
6422 VALIDATION_ERROR_1be02415);
6423 skip |= ValidatePipelineBindPoint(device_data, cb_state, bind_point, func_name,
6424 {{VALIDATION_ERROR_1be002d6, VALIDATION_ERROR_1be002d6}});
6425 auto layout_data = getPipelineLayout(device_data, layout);
6427 // Validate the set index points to a push descriptor set and is in range
6429 const auto &set_layouts = layout_data->set_layouts;
6430 const auto layout_u64 = HandleToUint64(layout);
6431 if (set < set_layouts.size()) {
6432 const auto *dsl = set_layouts[set].get();
6433 if (dsl && (0 == (dsl->GetCreateFlags() & VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR))) {
6434 skip = log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6435 VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_LAYOUT_EXT, layout_u64, VALIDATION_ERROR_1be002da,
6436 "%s: Set index %" PRIu32
6437 " does not match push descriptor set layout index for VkPipelineLayout 0x%" PRIxLEAST64 ".",
6438 func_name, set, layout_u64);
6441 skip = log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_LAYOUT_EXT,
6442 layout_u64, VALIDATION_ERROR_1be002d8,
6443 "%s: Set index %" PRIu32 " is outside of range for VkPipelineLayout 0x%" PRIxLEAST64 " (set < %" PRIu32
6445 func_name, set, layout_u64, static_cast<uint32_t>(set_layouts.size()));
6451 static void PreCallRecordCmdPushDescriptorSetKHR(layer_data *device_data, GLOBAL_CB_NODE *cb_state,
6452 VkPipelineBindPoint pipelineBindPoint, VkPipelineLayout layout, uint32_t set,
6453 uint32_t descriptorWriteCount, const VkWriteDescriptorSet *pDescriptorWrites) {
6454 const auto &pipeline_layout = getPipelineLayout(device_data, layout);
6455 if (!pipeline_layout) return;
6456 std::unique_ptr<cvdescriptorset::DescriptorSet> new_desc{
6457 new cvdescriptorset::DescriptorSet(0, 0, pipeline_layout->set_layouts[set], device_data)};
6459 std::vector<cvdescriptorset::DescriptorSet *> descriptor_sets = {new_desc.get()};
6460 UpdateLastBoundDescriptorSets(device_data, cb_state, pipelineBindPoint, pipeline_layout, set, 1, descriptor_sets, 0, nullptr);
6461 cb_state->lastBound[pipelineBindPoint].push_descriptor_set = std::move(new_desc);
6462 cb_state->lastBound[pipelineBindPoint].pipeline_layout = layout;
6465 VKAPI_ATTR void VKAPI_CALL CmdPushDescriptorSetKHR(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint,
6466 VkPipelineLayout layout, uint32_t set, uint32_t descriptorWriteCount,
6467 const VkWriteDescriptorSet *pDescriptorWrites) {
6468 layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6469 unique_lock_t lock(global_lock);
6470 auto cb_state = GetCBNode(device_data, commandBuffer);
6471 bool skip = PreCallValidateCmdPushDescriptorSetKHR(device_data, cb_state, pipelineBindPoint, layout, set, descriptorWriteCount,
6472 pDescriptorWrites, "vkCmdPushDescriptorSetKHR()");
6474 PreCallRecordCmdPushDescriptorSetKHR(device_data, cb_state, pipelineBindPoint, layout, set, descriptorWriteCount,
6477 device_data->dispatch_table.CmdPushDescriptorSetKHR(commandBuffer, pipelineBindPoint, layout, set, descriptorWriteCount,
6482 static VkDeviceSize GetIndexAlignment(VkIndexType indexType) {
6483 switch (indexType) {
6484 case VK_INDEX_TYPE_UINT16:
6486 case VK_INDEX_TYPE_UINT32:
6489 // Not a real index type. Express no alignment requirement here; we expect upper layer
6490 // to have already picked up on the enum being nonsense.
6495 VKAPI_ATTR void VKAPI_CALL CmdBindIndexBuffer(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
6496 VkIndexType indexType) {
6498 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6499 unique_lock_t lock(global_lock);
6501 auto buffer_state = GetBufferState(dev_data, buffer);
6502 auto cb_node = GetCBNode(dev_data, commandBuffer);
6504 assert(buffer_state);
6506 skip |= ValidateBufferUsageFlags(dev_data, buffer_state, VK_BUFFER_USAGE_INDEX_BUFFER_BIT, true, VALIDATION_ERROR_17e00362,
6507 "vkCmdBindIndexBuffer()", "VK_BUFFER_USAGE_INDEX_BUFFER_BIT");
6508 skip |= ValidateCmdQueueFlags(dev_data, cb_node, "vkCmdBindIndexBuffer()", VK_QUEUE_GRAPHICS_BIT, VALIDATION_ERROR_17e02415);
6509 skip |= ValidateCmd(dev_data, cb_node, CMD_BINDINDEXBUFFER, "vkCmdBindIndexBuffer()");
6510 skip |= ValidateMemoryIsBoundToBuffer(dev_data, buffer_state, "vkCmdBindIndexBuffer()", VALIDATION_ERROR_17e00364);
6511 auto offset_align = GetIndexAlignment(indexType);
6512 if (offset % offset_align) {
6513 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6514 HandleToUint64(commandBuffer), VALIDATION_ERROR_17e00360,
6515 "vkCmdBindIndexBuffer() offset (0x%" PRIxLEAST64 ") does not fall on alignment (%s) boundary.", offset,
6516 string_VkIndexType(indexType));
6521 std::function<bool()> function = [=]() {
6522 return ValidateBufferMemoryIsValid(dev_data, buffer_state, "vkCmdBindIndexBuffer()");
6524 cb_node->queue_submit_functions.push_back(function);
6525 cb_node->status |= CBSTATUS_INDEX_BUFFER_BOUND;
6528 dev_data->dispatch_table.CmdBindIndexBuffer(commandBuffer, buffer, offset, indexType);
6531 void updateResourceTracking(GLOBAL_CB_NODE *pCB, uint32_t firstBinding, uint32_t bindingCount, const VkBuffer *pBuffers) {
6532 uint32_t end = firstBinding + bindingCount;
6533 if (pCB->currentDrawData.buffers.size() < end) {
6534 pCB->currentDrawData.buffers.resize(end);
6536 for (uint32_t i = 0; i < bindingCount; ++i) {
6537 pCB->currentDrawData.buffers[i + firstBinding] = pBuffers[i];
6541 static inline void updateResourceTrackingOnDraw(GLOBAL_CB_NODE *pCB) { pCB->drawData.push_back(pCB->currentDrawData); }
6543 VKAPI_ATTR void VKAPI_CALL CmdBindVertexBuffers(VkCommandBuffer commandBuffer, uint32_t firstBinding, uint32_t bindingCount,
6544 const VkBuffer *pBuffers, const VkDeviceSize *pOffsets) {
6546 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6547 unique_lock_t lock(global_lock);
6549 auto cb_node = GetCBNode(dev_data, commandBuffer);
6552 skip |= ValidateCmdQueueFlags(dev_data, cb_node, "vkCmdBindVertexBuffers()", VK_QUEUE_GRAPHICS_BIT, VALIDATION_ERROR_18202415);
6553 skip |= ValidateCmd(dev_data, cb_node, CMD_BINDVERTEXBUFFERS, "vkCmdBindVertexBuffers()");
6554 for (uint32_t i = 0; i < bindingCount; ++i) {
6555 auto buffer_state = GetBufferState(dev_data, pBuffers[i]);
6556 assert(buffer_state);
6557 skip |= ValidateBufferUsageFlags(dev_data, buffer_state, VK_BUFFER_USAGE_VERTEX_BUFFER_BIT, true, VALIDATION_ERROR_182004e6,
6558 "vkCmdBindVertexBuffers()", "VK_BUFFER_USAGE_VERTEX_BUFFER_BIT");
6559 skip |= ValidateMemoryIsBoundToBuffer(dev_data, buffer_state, "vkCmdBindVertexBuffers()", VALIDATION_ERROR_182004e8);
6560 if (pOffsets[i] >= buffer_state->createInfo.size) {
6561 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
6562 HandleToUint64(buffer_state->buffer), VALIDATION_ERROR_182004e4,
6563 "vkCmdBindVertexBuffers() offset (0x%" PRIxLEAST64 ") is beyond the end of the buffer.", pOffsets[i]);
6569 for (uint32_t i = 0; i < bindingCount; ++i) {
6570 auto buffer_state = GetBufferState(dev_data, pBuffers[i]);
6571 assert(buffer_state);
6572 std::function<bool()> function = [=]() {
6573 return ValidateBufferMemoryIsValid(dev_data, buffer_state, "vkCmdBindVertexBuffers()");
6575 cb_node->queue_submit_functions.push_back(function);
6578 updateResourceTracking(cb_node, firstBinding, bindingCount, pBuffers);
6581 dev_data->dispatch_table.CmdBindVertexBuffers(commandBuffer, firstBinding, bindingCount, pBuffers, pOffsets);
6584 // Expects global_lock to be held by caller
6585 static void MarkStoreImagesAndBuffersAsWritten(layer_data *dev_data, GLOBAL_CB_NODE *pCB) {
6586 for (auto imageView : pCB->updateImages) {
6587 auto view_state = GetImageViewState(dev_data, imageView);
6588 if (!view_state) continue;
6590 auto image_state = GetImageState(dev_data, view_state->create_info.image);
6591 assert(image_state);
6592 std::function<bool()> function = [=]() {
6593 SetImageMemoryValid(dev_data, image_state, true);
6596 pCB->queue_submit_functions.push_back(function);
6598 for (auto buffer : pCB->updateBuffers) {
6599 auto buffer_state = GetBufferState(dev_data, buffer);
6600 assert(buffer_state);
6601 std::function<bool()> function = [=]() {
6602 SetBufferMemoryValid(dev_data, buffer_state, true);
6605 pCB->queue_submit_functions.push_back(function);
6609 // Generic function to handle validation for all CmdDraw* type functions
6610 static bool ValidateCmdDrawType(layer_data *dev_data, VkCommandBuffer cmd_buffer, bool indexed, VkPipelineBindPoint bind_point,
6611 CMD_TYPE cmd_type, GLOBAL_CB_NODE **cb_state, const char *caller, VkQueueFlags queue_flags,
6612 UNIQUE_VALIDATION_ERROR_CODE queue_flag_code, UNIQUE_VALIDATION_ERROR_CODE msg_code,
6613 UNIQUE_VALIDATION_ERROR_CODE const dynamic_state_msg_code) {
6615 *cb_state = GetCBNode(dev_data, cmd_buffer);
6617 skip |= ValidateCmdQueueFlags(dev_data, *cb_state, caller, queue_flags, queue_flag_code);
6618 skip |= ValidateCmd(dev_data, *cb_state, cmd_type, caller);
6619 skip |= ValidateDrawState(dev_data, *cb_state, cmd_type, indexed, bind_point, caller, dynamic_state_msg_code);
6620 skip |= (VK_PIPELINE_BIND_POINT_GRAPHICS == bind_point) ? outsideRenderPass(dev_data, *cb_state, caller, msg_code)
6621 : insideRenderPass(dev_data, *cb_state, caller, msg_code);
6626 // Generic function to handle state update for all CmdDraw* and CmdDispatch* type functions
6627 static void UpdateStateCmdDrawDispatchType(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint bind_point) {
6628 UpdateDrawState(dev_data, cb_state, bind_point);
6629 MarkStoreImagesAndBuffersAsWritten(dev_data, cb_state);
6632 // Generic function to handle state update for all CmdDraw* type functions
6633 static void UpdateStateCmdDrawType(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint bind_point) {
6634 UpdateStateCmdDrawDispatchType(dev_data, cb_state, bind_point);
6635 updateResourceTrackingOnDraw(cb_state);
6636 cb_state->hasDrawCmd = true;
6639 static bool PreCallValidateCmdDraw(layer_data *dev_data, VkCommandBuffer cmd_buffer, bool indexed, VkPipelineBindPoint bind_point,
6640 GLOBAL_CB_NODE **cb_state, const char *caller) {
6641 return ValidateCmdDrawType(dev_data, cmd_buffer, indexed, bind_point, CMD_DRAW, cb_state, caller, VK_QUEUE_GRAPHICS_BIT,
6642 VALIDATION_ERROR_1a202415, VALIDATION_ERROR_1a200017, VALIDATION_ERROR_1a200376);
6645 static void PostCallRecordCmdDraw(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint bind_point) {
6646 UpdateStateCmdDrawType(dev_data, cb_state, bind_point);
6649 VKAPI_ATTR void VKAPI_CALL CmdDraw(VkCommandBuffer commandBuffer, uint32_t vertexCount, uint32_t instanceCount,
6650 uint32_t firstVertex, uint32_t firstInstance) {
6651 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6652 GLOBAL_CB_NODE *cb_state = nullptr;
6653 unique_lock_t lock(global_lock);
6654 bool skip = PreCallValidateCmdDraw(dev_data, commandBuffer, false, VK_PIPELINE_BIND_POINT_GRAPHICS, &cb_state, "vkCmdDraw()");
6657 dev_data->dispatch_table.CmdDraw(commandBuffer, vertexCount, instanceCount, firstVertex, firstInstance);
6659 PostCallRecordCmdDraw(dev_data, cb_state, VK_PIPELINE_BIND_POINT_GRAPHICS);
6664 static bool PreCallValidateCmdDrawIndexed(layer_data *dev_data, VkCommandBuffer cmd_buffer, bool indexed,
6665 VkPipelineBindPoint bind_point, GLOBAL_CB_NODE **cb_state, const char *caller) {
6666 return ValidateCmdDrawType(dev_data, cmd_buffer, indexed, bind_point, CMD_DRAWINDEXED, cb_state, caller, VK_QUEUE_GRAPHICS_BIT,
6667 VALIDATION_ERROR_1a402415, VALIDATION_ERROR_1a400017, VALIDATION_ERROR_1a40039c);
6670 static void PostCallRecordCmdDrawIndexed(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint bind_point) {
6671 UpdateStateCmdDrawType(dev_data, cb_state, bind_point);
6674 VKAPI_ATTR void VKAPI_CALL CmdDrawIndexed(VkCommandBuffer commandBuffer, uint32_t indexCount, uint32_t instanceCount,
6675 uint32_t firstIndex, int32_t vertexOffset, uint32_t firstInstance) {
6676 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6677 GLOBAL_CB_NODE *cb_state = nullptr;
6678 unique_lock_t lock(global_lock);
6679 bool skip = PreCallValidateCmdDrawIndexed(dev_data, commandBuffer, true, VK_PIPELINE_BIND_POINT_GRAPHICS, &cb_state,
6680 "vkCmdDrawIndexed()");
6683 dev_data->dispatch_table.CmdDrawIndexed(commandBuffer, indexCount, instanceCount, firstIndex, vertexOffset, firstInstance);
6685 PostCallRecordCmdDrawIndexed(dev_data, cb_state, VK_PIPELINE_BIND_POINT_GRAPHICS);
6690 static bool PreCallValidateCmdDrawIndirect(layer_data *dev_data, VkCommandBuffer cmd_buffer, VkBuffer buffer, bool indexed,
6691 VkPipelineBindPoint bind_point, GLOBAL_CB_NODE **cb_state, BUFFER_STATE **buffer_state,
6692 const char *caller) {
6694 ValidateCmdDrawType(dev_data, cmd_buffer, indexed, bind_point, CMD_DRAWINDIRECT, cb_state, caller, VK_QUEUE_GRAPHICS_BIT,
6695 VALIDATION_ERROR_1aa02415, VALIDATION_ERROR_1aa00017, VALIDATION_ERROR_1aa003cc);
6696 *buffer_state = GetBufferState(dev_data, buffer);
6697 skip |= ValidateMemoryIsBoundToBuffer(dev_data, *buffer_state, caller, VALIDATION_ERROR_1aa003b4);
6698 // TODO: If the drawIndirectFirstInstance feature is not enabled, all the firstInstance members of the
6699 // VkDrawIndirectCommand structures accessed by this command must be 0, which will require access to the contents of 'buffer'.
6703 static void PostCallRecordCmdDrawIndirect(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint bind_point,
6704 BUFFER_STATE *buffer_state) {
6705 UpdateStateCmdDrawType(dev_data, cb_state, bind_point);
6706 AddCommandBufferBindingBuffer(dev_data, cb_state, buffer_state);
6709 VKAPI_ATTR void VKAPI_CALL CmdDrawIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t count,
6711 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6712 GLOBAL_CB_NODE *cb_state = nullptr;
6713 BUFFER_STATE *buffer_state = nullptr;
6714 unique_lock_t lock(global_lock);
6715 bool skip = PreCallValidateCmdDrawIndirect(dev_data, commandBuffer, buffer, false, VK_PIPELINE_BIND_POINT_GRAPHICS, &cb_state,
6716 &buffer_state, "vkCmdDrawIndirect()");
6719 dev_data->dispatch_table.CmdDrawIndirect(commandBuffer, buffer, offset, count, stride);
6721 PostCallRecordCmdDrawIndirect(dev_data, cb_state, VK_PIPELINE_BIND_POINT_GRAPHICS, buffer_state);
6726 static bool PreCallValidateCmdDrawIndexedIndirect(layer_data *dev_data, VkCommandBuffer cmd_buffer, VkBuffer buffer, bool indexed,
6727 VkPipelineBindPoint bind_point, GLOBAL_CB_NODE **cb_state,
6728 BUFFER_STATE **buffer_state, const char *caller) {
6730 ValidateCmdDrawType(dev_data, cmd_buffer, indexed, bind_point, CMD_DRAWINDEXEDINDIRECT, cb_state, caller,
6731 VK_QUEUE_GRAPHICS_BIT, VALIDATION_ERROR_1a602415, VALIDATION_ERROR_1a600017, VALIDATION_ERROR_1a600434);
6732 *buffer_state = GetBufferState(dev_data, buffer);
6733 skip |= ValidateMemoryIsBoundToBuffer(dev_data, *buffer_state, caller, VALIDATION_ERROR_1a60041c);
6734 // TODO: If the drawIndirectFirstInstance feature is not enabled, all the firstInstance members of the
6735 // VkDrawIndexedIndirectCommand structures accessed by this command must be 0, which will require access to the contents of
6740 static void PostCallRecordCmdDrawIndexedIndirect(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint bind_point,
6741 BUFFER_STATE *buffer_state) {
6742 UpdateStateCmdDrawType(dev_data, cb_state, bind_point);
6743 AddCommandBufferBindingBuffer(dev_data, cb_state, buffer_state);
6746 VKAPI_ATTR void VKAPI_CALL CmdDrawIndexedIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
6747 uint32_t count, uint32_t stride) {
6748 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6749 GLOBAL_CB_NODE *cb_state = nullptr;
6750 BUFFER_STATE *buffer_state = nullptr;
6751 unique_lock_t lock(global_lock);
6752 bool skip = PreCallValidateCmdDrawIndexedIndirect(dev_data, commandBuffer, buffer, true, VK_PIPELINE_BIND_POINT_GRAPHICS,
6753 &cb_state, &buffer_state, "vkCmdDrawIndexedIndirect()");
6756 dev_data->dispatch_table.CmdDrawIndexedIndirect(commandBuffer, buffer, offset, count, stride);
6758 PostCallRecordCmdDrawIndexedIndirect(dev_data, cb_state, VK_PIPELINE_BIND_POINT_GRAPHICS, buffer_state);
6763 static bool PreCallValidateCmdDispatch(layer_data *dev_data, VkCommandBuffer cmd_buffer, bool indexed,
6764 VkPipelineBindPoint bind_point, GLOBAL_CB_NODE **cb_state, const char *caller) {
6765 return ValidateCmdDrawType(dev_data, cmd_buffer, indexed, bind_point, CMD_DISPATCH, cb_state, caller, VK_QUEUE_COMPUTE_BIT,
6766 VALIDATION_ERROR_19c02415, VALIDATION_ERROR_19c00017, VALIDATION_ERROR_UNDEFINED);
6769 static void PostCallRecordCmdDispatch(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint bind_point) {
6770 UpdateStateCmdDrawDispatchType(dev_data, cb_state, bind_point);
6773 VKAPI_ATTR void VKAPI_CALL CmdDispatch(VkCommandBuffer commandBuffer, uint32_t x, uint32_t y, uint32_t z) {
6774 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6775 GLOBAL_CB_NODE *cb_state = nullptr;
6776 unique_lock_t lock(global_lock);
6778 PreCallValidateCmdDispatch(dev_data, commandBuffer, false, VK_PIPELINE_BIND_POINT_COMPUTE, &cb_state, "vkCmdDispatch()");
6781 dev_data->dispatch_table.CmdDispatch(commandBuffer, x, y, z);
6783 PostCallRecordCmdDispatch(dev_data, cb_state, VK_PIPELINE_BIND_POINT_COMPUTE);
6788 static bool PreCallValidateCmdDispatchIndirect(layer_data *dev_data, VkCommandBuffer cmd_buffer, VkBuffer buffer, bool indexed,
6789 VkPipelineBindPoint bind_point, GLOBAL_CB_NODE **cb_state,
6790 BUFFER_STATE **buffer_state, const char *caller) {
6792 ValidateCmdDrawType(dev_data, cmd_buffer, indexed, bind_point, CMD_DISPATCHINDIRECT, cb_state, caller, VK_QUEUE_COMPUTE_BIT,
6793 VALIDATION_ERROR_1a002415, VALIDATION_ERROR_1a000017, VALIDATION_ERROR_UNDEFINED);
6794 *buffer_state = GetBufferState(dev_data, buffer);
6795 skip |= ValidateMemoryIsBoundToBuffer(dev_data, *buffer_state, caller, VALIDATION_ERROR_1a000322);
6799 static void PostCallRecordCmdDispatchIndirect(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint bind_point,
6800 BUFFER_STATE *buffer_state) {
6801 UpdateStateCmdDrawDispatchType(dev_data, cb_state, bind_point);
6802 AddCommandBufferBindingBuffer(dev_data, cb_state, buffer_state);
6805 VKAPI_ATTR void VKAPI_CALL CmdDispatchIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset) {
6806 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6807 GLOBAL_CB_NODE *cb_state = nullptr;
6808 BUFFER_STATE *buffer_state = nullptr;
6809 unique_lock_t lock(global_lock);
6810 bool skip = PreCallValidateCmdDispatchIndirect(dev_data, commandBuffer, buffer, false, VK_PIPELINE_BIND_POINT_COMPUTE,
6811 &cb_state, &buffer_state, "vkCmdDispatchIndirect()");
6814 dev_data->dispatch_table.CmdDispatchIndirect(commandBuffer, buffer, offset);
6816 PostCallRecordCmdDispatchIndirect(dev_data, cb_state, VK_PIPELINE_BIND_POINT_COMPUTE, buffer_state);
6821 VKAPI_ATTR void VKAPI_CALL CmdCopyBuffer(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkBuffer dstBuffer,
6822 uint32_t regionCount, const VkBufferCopy *pRegions) {
6823 layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6824 unique_lock_t lock(global_lock);
6826 auto cb_node = GetCBNode(device_data, commandBuffer);
6827 auto src_buffer_state = GetBufferState(device_data, srcBuffer);
6828 auto dst_buffer_state = GetBufferState(device_data, dstBuffer);
6830 if (cb_node && src_buffer_state && dst_buffer_state) {
6831 bool skip = PreCallValidateCmdCopyBuffer(device_data, cb_node, src_buffer_state, dst_buffer_state);
6833 PreCallRecordCmdCopyBuffer(device_data, cb_node, src_buffer_state, dst_buffer_state);
6835 device_data->dispatch_table.CmdCopyBuffer(commandBuffer, srcBuffer, dstBuffer, regionCount, pRegions);
6843 VKAPI_ATTR void VKAPI_CALL CmdCopyImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
6844 VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
6845 const VkImageCopy *pRegions) {
6847 layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6848 unique_lock_t lock(global_lock);
6850 auto cb_node = GetCBNode(device_data, commandBuffer);
6851 auto src_image_state = GetImageState(device_data, srcImage);
6852 auto dst_image_state = GetImageState(device_data, dstImage);
6853 if (cb_node && src_image_state && dst_image_state) {
6854 skip = PreCallValidateCmdCopyImage(device_data, cb_node, src_image_state, dst_image_state, regionCount, pRegions,
6855 srcImageLayout, dstImageLayout);
6857 PreCallRecordCmdCopyImage(device_data, cb_node, src_image_state, dst_image_state, regionCount, pRegions, srcImageLayout,
6860 device_data->dispatch_table.CmdCopyImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount,
6869 // Validate that an image's sampleCount matches the requirement for a specific API call
6870 bool ValidateImageSampleCount(layer_data *dev_data, IMAGE_STATE *image_state, VkSampleCountFlagBits sample_count,
6871 const char *location, UNIQUE_VALIDATION_ERROR_CODE msgCode) {
6873 if (image_state->createInfo.samples != sample_count) {
6874 skip = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
6875 HandleToUint64(image_state->image), msgCode,
6876 "%s for image 0x%" PRIx64 " was created with a sample count of %s but must be %s.", location,
6877 HandleToUint64(image_state->image), string_VkSampleCountFlagBits(image_state->createInfo.samples),
6878 string_VkSampleCountFlagBits(sample_count));
6883 VKAPI_ATTR void VKAPI_CALL CmdBlitImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
6884 VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
6885 const VkImageBlit *pRegions, VkFilter filter) {
6886 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6887 unique_lock_t lock(global_lock);
6889 auto cb_node = GetCBNode(dev_data, commandBuffer);
6890 auto src_image_state = GetImageState(dev_data, srcImage);
6891 auto dst_image_state = GetImageState(dev_data, dstImage);
6893 bool skip = PreCallValidateCmdBlitImage(dev_data, cb_node, src_image_state, dst_image_state, regionCount, pRegions,
6894 srcImageLayout, dstImageLayout, filter);
6897 PreCallRecordCmdBlitImage(dev_data, cb_node, src_image_state, dst_image_state, regionCount, pRegions, srcImageLayout,
6900 dev_data->dispatch_table.CmdBlitImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount,
6905 VKAPI_ATTR void VKAPI_CALL CmdCopyBufferToImage(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkImage dstImage,
6906 VkImageLayout dstImageLayout, uint32_t regionCount,
6907 const VkBufferImageCopy *pRegions) {
6908 layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6909 unique_lock_t lock(global_lock);
6911 auto cb_node = GetCBNode(device_data, commandBuffer);
6912 auto src_buffer_state = GetBufferState(device_data, srcBuffer);
6913 auto dst_image_state = GetImageState(device_data, dstImage);
6914 if (cb_node && src_buffer_state && dst_image_state) {
6915 skip = PreCallValidateCmdCopyBufferToImage(device_data, dstImageLayout, cb_node, src_buffer_state, dst_image_state,
6916 regionCount, pRegions, "vkCmdCopyBufferToImage()");
6920 // TODO: report VU01244 here, or put in object tracker?
6923 PreCallRecordCmdCopyBufferToImage(device_data, cb_node, src_buffer_state, dst_image_state, regionCount, pRegions,
6926 device_data->dispatch_table.CmdCopyBufferToImage(commandBuffer, srcBuffer, dstImage, dstImageLayout, regionCount, pRegions);
6930 VKAPI_ATTR void VKAPI_CALL CmdCopyImageToBuffer(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
6931 VkBuffer dstBuffer, uint32_t regionCount, const VkBufferImageCopy *pRegions) {
6933 layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6934 unique_lock_t lock(global_lock);
6936 auto cb_node = GetCBNode(device_data, commandBuffer);
6937 auto src_image_state = GetImageState(device_data, srcImage);
6938 auto dst_buffer_state = GetBufferState(device_data, dstBuffer);
6939 if (cb_node && src_image_state && dst_buffer_state) {
6940 skip = PreCallValidateCmdCopyImageToBuffer(device_data, srcImageLayout, cb_node, src_image_state, dst_buffer_state,
6941 regionCount, pRegions, "vkCmdCopyImageToBuffer()");
6945 // TODO: report VU01262 here, or put in object tracker?
6948 PreCallRecordCmdCopyImageToBuffer(device_data, cb_node, src_image_state, dst_buffer_state, regionCount, pRegions,
6951 device_data->dispatch_table.CmdCopyImageToBuffer(commandBuffer, srcImage, srcImageLayout, dstBuffer, regionCount, pRegions);
6955 static bool PreCallCmdUpdateBuffer(layer_data *device_data, const GLOBAL_CB_NODE *cb_state, const BUFFER_STATE *dst_buffer_state) {
6957 skip |= ValidateMemoryIsBoundToBuffer(device_data, dst_buffer_state, "vkCmdUpdateBuffer()", VALIDATION_ERROR_1e400046);
6958 // Validate that DST buffer has correct usage flags set
6959 skip |= ValidateBufferUsageFlags(device_data, dst_buffer_state, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true,
6960 VALIDATION_ERROR_1e400044, "vkCmdUpdateBuffer()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
6961 skip |= ValidateCmdQueueFlags(device_data, cb_state, "vkCmdUpdateBuffer()",
6962 VK_QUEUE_TRANSFER_BIT | VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT, VALIDATION_ERROR_1e402415);
6963 skip |= ValidateCmd(device_data, cb_state, CMD_UPDATEBUFFER, "vkCmdUpdateBuffer()");
6964 skip |= insideRenderPass(device_data, cb_state, "vkCmdUpdateBuffer()", VALIDATION_ERROR_1e400017);
6968 static void PostCallRecordCmdUpdateBuffer(layer_data *device_data, GLOBAL_CB_NODE *cb_state, BUFFER_STATE *dst_buffer_state) {
6969 // Update bindings between buffer and cmd buffer
6970 AddCommandBufferBindingBuffer(device_data, cb_state, dst_buffer_state);
6971 std::function<bool()> function = [=]() {
6972 SetBufferMemoryValid(device_data, dst_buffer_state, true);
6975 cb_state->queue_submit_functions.push_back(function);
6978 VKAPI_ATTR void VKAPI_CALL CmdUpdateBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset,
6979 VkDeviceSize dataSize, const uint32_t *pData) {
6981 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6982 unique_lock_t lock(global_lock);
6984 auto cb_state = GetCBNode(dev_data, commandBuffer);
6986 auto dst_buff_state = GetBufferState(dev_data, dstBuffer);
6987 assert(dst_buff_state);
6988 skip |= PreCallCmdUpdateBuffer(dev_data, cb_state, dst_buff_state);
6991 dev_data->dispatch_table.CmdUpdateBuffer(commandBuffer, dstBuffer, dstOffset, dataSize, pData);
6993 PostCallRecordCmdUpdateBuffer(dev_data, cb_state, dst_buff_state);
6998 VKAPI_ATTR void VKAPI_CALL CmdFillBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset,
6999 VkDeviceSize size, uint32_t data) {
7000 layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
7001 unique_lock_t lock(global_lock);
7002 auto cb_node = GetCBNode(device_data, commandBuffer);
7003 auto buffer_state = GetBufferState(device_data, dstBuffer);
7005 if (cb_node && buffer_state) {
7006 bool skip = PreCallValidateCmdFillBuffer(device_data, cb_node, buffer_state);
7008 PreCallRecordCmdFillBuffer(device_data, cb_node, buffer_state);
7010 device_data->dispatch_table.CmdFillBuffer(commandBuffer, dstBuffer, dstOffset, size, data);
7018 VKAPI_ATTR void VKAPI_CALL CmdClearAttachments(VkCommandBuffer commandBuffer, uint32_t attachmentCount,
7019 const VkClearAttachment *pAttachments, uint32_t rectCount,
7020 const VkClearRect *pRects) {
7022 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
7024 lock_guard_t lock(global_lock);
7025 skip = PreCallValidateCmdClearAttachments(dev_data, commandBuffer, attachmentCount, pAttachments, rectCount, pRects);
7027 if (!skip) dev_data->dispatch_table.CmdClearAttachments(commandBuffer, attachmentCount, pAttachments, rectCount, pRects);
7030 VKAPI_ATTR void VKAPI_CALL CmdClearColorImage(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout,
7031 const VkClearColorValue *pColor, uint32_t rangeCount,
7032 const VkImageSubresourceRange *pRanges) {
7033 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
7034 unique_lock_t lock(global_lock);
7036 bool skip = PreCallValidateCmdClearColorImage(dev_data, commandBuffer, image, imageLayout, rangeCount, pRanges);
7038 PreCallRecordCmdClearImage(dev_data, commandBuffer, image, imageLayout, rangeCount, pRanges);
7040 dev_data->dispatch_table.CmdClearColorImage(commandBuffer, image, imageLayout, pColor, rangeCount, pRanges);
7044 VKAPI_ATTR void VKAPI_CALL CmdClearDepthStencilImage(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout,
7045 const VkClearDepthStencilValue *pDepthStencil, uint32_t rangeCount,
7046 const VkImageSubresourceRange *pRanges) {
7047 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
7048 unique_lock_t lock(global_lock);
7050 bool skip = PreCallValidateCmdClearDepthStencilImage(dev_data, commandBuffer, image, imageLayout, rangeCount, pRanges);
7052 PreCallRecordCmdClearImage(dev_data, commandBuffer, image, imageLayout, rangeCount, pRanges);
7054 dev_data->dispatch_table.CmdClearDepthStencilImage(commandBuffer, image, imageLayout, pDepthStencil, rangeCount, pRanges);
7058 VKAPI_ATTR void VKAPI_CALL CmdResolveImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
7059 VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
7060 const VkImageResolve *pRegions) {
7061 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
7062 unique_lock_t lock(global_lock);
7064 auto cb_node = GetCBNode(dev_data, commandBuffer);
7065 auto src_image_state = GetImageState(dev_data, srcImage);
7066 auto dst_image_state = GetImageState(dev_data, dstImage);
7068 bool skip = PreCallValidateCmdResolveImage(dev_data, cb_node, src_image_state, dst_image_state, regionCount, pRegions);
7071 PreCallRecordCmdResolveImage(dev_data, cb_node, src_image_state, dst_image_state);
7073 dev_data->dispatch_table.CmdResolveImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount,
7078 VKAPI_ATTR void VKAPI_CALL GetImageSubresourceLayout(VkDevice device, VkImage image, const VkImageSubresource *pSubresource,
7079 VkSubresourceLayout *pLayout) {
7080 layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
7081 unique_lock_t lock(global_lock);
7083 bool skip = PreCallValidateGetImageSubresourceLayout(device_data, image, pSubresource);
7086 device_data->dispatch_table.GetImageSubresourceLayout(device, image, pSubresource, pLayout);
7090 bool setEventStageMask(VkQueue queue, VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
7091 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
7092 GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
7094 pCB->eventToStageMap[event] = stageMask;
7096 auto queue_data = dev_data->queueMap.find(queue);
7097 if (queue_data != dev_data->queueMap.end()) {
7098 queue_data->second.eventToStageMap[event] = stageMask;
7103 VKAPI_ATTR void VKAPI_CALL CmdSetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
7105 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
7106 unique_lock_t lock(global_lock);
7107 GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
7109 skip |= ValidateCmdQueueFlags(dev_data, pCB, "vkCmdSetEvent()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
7110 VALIDATION_ERROR_1d402415);
7111 skip |= ValidateCmd(dev_data, pCB, CMD_SETEVENT, "vkCmdSetEvent()");
7112 skip |= insideRenderPass(dev_data, pCB, "vkCmdSetEvent()", VALIDATION_ERROR_1d400017);
7113 skip |= ValidateStageMaskGsTsEnables(dev_data, stageMask, "vkCmdSetEvent()", VALIDATION_ERROR_1d4008fc,
7114 VALIDATION_ERROR_1d4008fe);
7115 auto event_state = GetEventNode(dev_data, event);
7117 addCommandBufferBinding(&event_state->cb_bindings, {HandleToUint64(event), kVulkanObjectTypeEvent}, pCB);
7118 event_state->cb_bindings.insert(pCB);
7120 pCB->events.push_back(event);
7121 if (!pCB->waitedEvents.count(event)) {
7122 pCB->writeEventsBeforeWait.push_back(event);
7124 pCB->eventUpdates.emplace_back([=](VkQueue q) { return setEventStageMask(q, commandBuffer, event, stageMask); });
7127 if (!skip) dev_data->dispatch_table.CmdSetEvent(commandBuffer, event, stageMask);
7130 VKAPI_ATTR void VKAPI_CALL CmdResetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
7132 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
7133 unique_lock_t lock(global_lock);
7134 GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
7136 skip |= ValidateCmdQueueFlags(dev_data, pCB, "vkCmdResetEvent()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
7137 VALIDATION_ERROR_1c402415);
7138 skip |= ValidateCmd(dev_data, pCB, CMD_RESETEVENT, "vkCmdResetEvent()");
7139 skip |= insideRenderPass(dev_data, pCB, "vkCmdResetEvent()", VALIDATION_ERROR_1c400017);
7140 skip |= ValidateStageMaskGsTsEnables(dev_data, stageMask, "vkCmdResetEvent()", VALIDATION_ERROR_1c400904,
7141 VALIDATION_ERROR_1c400906);
7142 auto event_state = GetEventNode(dev_data, event);
7144 addCommandBufferBinding(&event_state->cb_bindings, {HandleToUint64(event), kVulkanObjectTypeEvent}, pCB);
7145 event_state->cb_bindings.insert(pCB);
7147 pCB->events.push_back(event);
7148 if (!pCB->waitedEvents.count(event)) {
7149 pCB->writeEventsBeforeWait.push_back(event);
7151 // TODO : Add check for VALIDATION_ERROR_32c008f8
7152 pCB->eventUpdates.emplace_back(
7153 [=](VkQueue q) { return setEventStageMask(q, commandBuffer, event, VkPipelineStageFlags(0)); });
7156 if (!skip) dev_data->dispatch_table.CmdResetEvent(commandBuffer, event, stageMask);
7159 // Return input pipeline stage flags, expanded for individual bits if VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT
7160 static VkPipelineStageFlags ExpandPipelineStageFlags(VkPipelineStageFlags inflags) {
7161 return (inflags != VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT)
7163 : (VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT | VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT | VK_PIPELINE_STAGE_VERTEX_INPUT_BIT |
7164 VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT |
7165 VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT | VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT |
7166 VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT |
7167 VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT | VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT |
7168 VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT);
7171 // Verify image barrier image state and that the image is consistent with FB image
7172 static bool ValidateImageBarrierImage(layer_data *device_data, const char *funcName, GLOBAL_CB_NODE const *cb_state,
7173 VkFramebuffer framebuffer, uint32_t active_subpass, const safe_VkSubpassDescription &sub_desc,
7174 uint64_t rp_handle, uint32_t img_index, const VkImageMemoryBarrier &img_barrier) {
7176 const auto &fb_state = GetFramebufferState(device_data, framebuffer);
7178 const auto img_bar_image = img_barrier.image;
7179 bool image_match = false;
7180 bool sub_image_found = false; // Do we find a corresponding subpass description
7181 VkImageLayout sub_image_layout = VK_IMAGE_LAYOUT_UNDEFINED;
7182 uint32_t attach_index = 0;
7183 uint32_t index_count = 0;
7184 // Verify that a framebuffer image matches barrier image
7185 for (const auto &fb_attach : fb_state->attachments) {
7186 if (img_bar_image == fb_attach.image) {
7188 attach_index = index_count;
7193 if (image_match) { // Make sure subpass is referring to matching attachment
7194 if (sub_desc.pDepthStencilAttachment && sub_desc.pDepthStencilAttachment->attachment == attach_index) {
7195 sub_image_layout = sub_desc.pDepthStencilAttachment->layout;
7196 sub_image_found = true;
7198 for (uint32_t j = 0; j < sub_desc.colorAttachmentCount; ++j) {
7199 if (sub_desc.pColorAttachments && sub_desc.pColorAttachments[j].attachment == attach_index) {
7200 sub_image_layout = sub_desc.pColorAttachments[j].layout;
7201 sub_image_found = true;
7203 } else if (sub_desc.pResolveAttachments && sub_desc.pResolveAttachments[j].attachment == attach_index) {
7204 sub_image_layout = sub_desc.pResolveAttachments[j].layout;
7205 sub_image_found = true;
7210 if (!sub_image_found) {
7212 device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT, rp_handle,
7213 VALIDATION_ERROR_1b800936,
7214 "%s: Barrier pImageMemoryBarriers[%d].image (0x%" PRIx64
7215 ") is not referenced by the VkSubpassDescription for active subpass (%d) of current renderPass (0x%" PRIx64 ").",
7216 funcName, img_index, HandleToUint64(img_bar_image), active_subpass, rp_handle);
7218 } else { // !image_match
7219 auto const fb_handle = HandleToUint64(fb_state->framebuffer);
7220 skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT,
7221 fb_handle, VALIDATION_ERROR_1b800936,
7222 "%s: Barrier pImageMemoryBarriers[%d].image (0x%" PRIx64
7223 ") does not match an image from the current framebuffer (0x%" PRIx64 ").",
7224 funcName, img_index, HandleToUint64(img_bar_image), fb_handle);
7226 if (img_barrier.oldLayout != img_barrier.newLayout) {
7227 skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7228 HandleToUint64(cb_state->commandBuffer), VALIDATION_ERROR_1b80093a,
7229 "%s: As the Image Barrier for image 0x%" PRIx64
7230 " is being executed within a render pass instance, oldLayout must equal newLayout yet they are %s and %s.",
7231 funcName, HandleToUint64(img_barrier.image), string_VkImageLayout(img_barrier.oldLayout),
7232 string_VkImageLayout(img_barrier.newLayout));
7234 if (sub_image_found && sub_image_layout != img_barrier.oldLayout) {
7235 skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
7236 rp_handle, VALIDATION_ERROR_1b800938,
7237 "%s: Barrier pImageMemoryBarriers[%d].image (0x%" PRIx64
7238 ") is referenced by the VkSubpassDescription for active subpass (%d) of current renderPass (0x%" PRIx64
7239 ") as having layout %s, but image barrier has layout %s.",
7240 funcName, img_index, HandleToUint64(img_bar_image), active_subpass, rp_handle,
7241 string_VkImageLayout(img_barrier.oldLayout), string_VkImageLayout(sub_image_layout));
7247 // Validate image barriers within a renderPass
7248 static bool ValidateRenderPassImageBarriers(layer_data *device_data, const char *funcName, GLOBAL_CB_NODE *cb_state,
7249 uint32_t active_subpass, const safe_VkSubpassDescription &sub_desc, uint64_t rp_handle,
7250 VkAccessFlags sub_src_access_mask, VkAccessFlags sub_dst_access_mask,
7251 uint32_t image_mem_barrier_count, const VkImageMemoryBarrier *image_barriers) {
7253 for (uint32_t i = 0; i < image_mem_barrier_count; ++i) {
7254 const auto &img_barrier = image_barriers[i];
7255 const auto &img_src_access_mask = img_barrier.srcAccessMask;
7256 if (img_src_access_mask != (sub_src_access_mask & img_src_access_mask)) {
7257 skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
7258 rp_handle, VALIDATION_ERROR_1b80092e,
7259 "%s: Barrier pImageMemoryBarriers[%d].srcAccessMask(0x%X) is not a subset of VkSubpassDependency "
7260 "srcAccessMask(0x%X) of subpass %d of renderPass 0x%" PRIx64 ".",
7261 funcName, i, img_src_access_mask, sub_src_access_mask, active_subpass, rp_handle);
7263 const auto &img_dst_access_mask = img_barrier.dstAccessMask;
7264 if (img_dst_access_mask != (sub_dst_access_mask & img_dst_access_mask)) {
7265 skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
7266 rp_handle, VALIDATION_ERROR_1b800930,
7267 "%s: Barrier pImageMemoryBarriers[%d].dstAccessMask(0x%X) is not a subset of VkSubpassDependency "
7268 "dstAccessMask(0x%X) of subpass %d of renderPass 0x%" PRIx64 ".",
7269 funcName, i, img_dst_access_mask, sub_dst_access_mask, active_subpass, rp_handle);
7271 if (VK_QUEUE_FAMILY_IGNORED != img_barrier.srcQueueFamilyIndex ||
7272 VK_QUEUE_FAMILY_IGNORED != img_barrier.dstQueueFamilyIndex) {
7273 skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
7274 rp_handle, VALIDATION_ERROR_1b80093c,
7275 "%s: Barrier pImageMemoryBarriers[%d].srcQueueFamilyIndex is %d and "
7276 "pImageMemoryBarriers[%d].dstQueueFamilyIndex is %d but both must be VK_QUEUE_FAMILY_IGNORED.",
7277 funcName, i, img_barrier.srcQueueFamilyIndex, i, img_barrier.dstQueueFamilyIndex);
7279 // Secondary CBs can have null framebuffer so queue up validation in that case 'til FB is known
7280 if (VK_NULL_HANDLE == cb_state->activeFramebuffer) {
7281 assert(VK_COMMAND_BUFFER_LEVEL_SECONDARY == cb_state->createInfo.level);
7282 // Secondary CB case w/o FB specified delay validation
7283 cb_state->cmd_execute_commands_functions.emplace_back([=](GLOBAL_CB_NODE *primary_cb, VkFramebuffer fb) {
7284 return ValidateImageBarrierImage(device_data, funcName, cb_state, fb, active_subpass, sub_desc, rp_handle, i,
7288 skip |= ValidateImageBarrierImage(device_data, funcName, cb_state, cb_state->activeFramebuffer, active_subpass,
7289 sub_desc, rp_handle, i, img_barrier);
7295 // Validate VUs for Pipeline Barriers that are within a renderPass
7296 // Pre: cb_state->activeRenderPass must be a pointer to valid renderPass state
7297 static bool ValidateRenderPassPipelineBarriers(layer_data *device_data, const char *funcName, GLOBAL_CB_NODE *cb_state,
7298 VkPipelineStageFlags src_stage_mask, VkPipelineStageFlags dst_stage_mask,
7299 VkDependencyFlags dependency_flags, uint32_t mem_barrier_count,
7300 const VkMemoryBarrier *mem_barriers, uint32_t buffer_mem_barrier_count,
7301 const VkBufferMemoryBarrier *buffer_mem_barriers, uint32_t image_mem_barrier_count,
7302 const VkImageMemoryBarrier *image_barriers) {
7304 auto rp_state = cb_state->activeRenderPass;
7305 const auto active_subpass = cb_state->activeSubpass;
7306 auto rp_handle = HandleToUint64(rp_state->renderPass);
7307 if (!rp_state->hasSelfDependency[active_subpass]) {
7309 log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT, rp_handle,
7310 VALIDATION_ERROR_1b800928,
7311 "%s: Barriers cannot be set during subpass %d of renderPass 0x%" PRIx64 " with no self-dependency specified.",
7312 funcName, active_subpass, rp_handle);
7314 assert(rp_state->subpass_to_dependency_index[cb_state->activeSubpass] != -1);
7315 // Grab ref to current subpassDescription up-front for use below
7316 const auto &sub_desc = rp_state->createInfo.pSubpasses[active_subpass];
7317 const auto &sub_dep = rp_state->createInfo.pDependencies[rp_state->subpass_to_dependency_index[active_subpass]];
7318 const auto &sub_src_stage_mask = ExpandPipelineStageFlags(sub_dep.srcStageMask);
7319 const auto &sub_dst_stage_mask = ExpandPipelineStageFlags(sub_dep.dstStageMask);
7320 if ((sub_src_stage_mask != VK_PIPELINE_STAGE_ALL_COMMANDS_BIT) &&
7321 (src_stage_mask != (sub_src_stage_mask & src_stage_mask))) {
7322 skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
7323 rp_handle, VALIDATION_ERROR_1b80092a,
7324 "%s: Barrier srcStageMask(0x%X) is not a subset of VkSubpassDependency srcStageMask(0x%X) of subpass "
7325 "%d of renderPass 0x%" PRIx64 ".",
7326 funcName, src_stage_mask, sub_src_stage_mask, active_subpass, rp_handle);
7328 if ((sub_dst_stage_mask != VK_PIPELINE_STAGE_ALL_COMMANDS_BIT) &&
7329 (dst_stage_mask != (sub_dst_stage_mask & dst_stage_mask))) {
7330 skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
7331 rp_handle, VALIDATION_ERROR_1b80092c,
7332 "%s: Barrier dstStageMask(0x%X) is not a subset of VkSubpassDependency dstStageMask(0x%X) of subpass "
7333 "%d of renderPass 0x%" PRIx64 ".",
7334 funcName, dst_stage_mask, sub_dst_stage_mask, active_subpass, rp_handle);
7336 if (0 != buffer_mem_barrier_count) {
7337 skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
7338 rp_handle, VALIDATION_ERROR_1b800934,
7339 "%s: bufferMemoryBarrierCount is non-zero (%d) for subpass %d of renderPass 0x%" PRIx64 ".", funcName,
7340 buffer_mem_barrier_count, active_subpass, rp_handle);
7342 const auto &sub_src_access_mask = sub_dep.srcAccessMask;
7343 const auto &sub_dst_access_mask = sub_dep.dstAccessMask;
7344 for (uint32_t i = 0; i < mem_barrier_count; ++i) {
7345 const auto &mb_src_access_mask = mem_barriers[i].srcAccessMask;
7346 if (mb_src_access_mask != (sub_src_access_mask & mb_src_access_mask)) {
7347 skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7348 VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT, rp_handle, VALIDATION_ERROR_1b80092e,
7349 "%s: Barrier pMemoryBarriers[%d].srcAccessMask(0x%X) is not a subset of VkSubpassDependency "
7350 "srcAccessMask(0x%X) of subpass %d of renderPass 0x%" PRIx64 ".",
7351 funcName, i, mb_src_access_mask, sub_src_access_mask, active_subpass, rp_handle);
7353 const auto &mb_dst_access_mask = mem_barriers[i].dstAccessMask;
7354 if (mb_dst_access_mask != (sub_dst_access_mask & mb_dst_access_mask)) {
7355 skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7356 VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT, rp_handle, VALIDATION_ERROR_1b800930,
7357 "%s: Barrier pMemoryBarriers[%d].dstAccessMask(0x%X) is not a subset of VkSubpassDependency "
7358 "dstAccessMask(0x%X) of subpass %d of renderPass 0x%" PRIx64 ".",
7359 funcName, i, mb_dst_access_mask, sub_dst_access_mask, active_subpass, rp_handle);
7362 skip |= ValidateRenderPassImageBarriers(device_data, funcName, cb_state, active_subpass, sub_desc, rp_handle,
7363 sub_src_access_mask, sub_dst_access_mask, image_mem_barrier_count, image_barriers);
7364 if (sub_dep.dependencyFlags != dependency_flags) {
7365 skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
7366 rp_handle, VALIDATION_ERROR_1b800932,
7367 "%s: dependencyFlags param (0x%X) does not equal VkSubpassDependency dependencyFlags value (0x%X) for "
7368 "subpass %d of renderPass 0x%" PRIx64 ".",
7369 funcName, dependency_flags, sub_dep.dependencyFlags, cb_state->activeSubpass, rp_handle);
7375 // Array to mask individual accessMask to corresponding stageMask
7376 // accessMask active bit position (0-31) maps to index
7377 const static VkPipelineStageFlags AccessMaskToPipeStage[20] = {
7378 // VK_ACCESS_INDIRECT_COMMAND_READ_BIT = 0
7379 VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT,
7380 // VK_ACCESS_INDEX_READ_BIT = 1
7381 VK_PIPELINE_STAGE_VERTEX_INPUT_BIT,
7382 // VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT = 2
7383 VK_PIPELINE_STAGE_VERTEX_INPUT_BIT,
7384 // VK_ACCESS_UNIFORM_READ_BIT = 3
7385 VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT |
7386 VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT | VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT |
7387 VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
7388 // VK_ACCESS_INPUT_ATTACHMENT_READ_BIT = 4
7389 VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
7390 // VK_ACCESS_SHADER_READ_BIT = 5
7391 VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT |
7392 VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT | VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT |
7393 VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
7394 // VK_ACCESS_SHADER_WRITE_BIT = 6
7395 VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT |
7396 VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT | VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT |
7397 VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
7398 // VK_ACCESS_COLOR_ATTACHMENT_READ_BIT = 7
7399 VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
7400 // VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT = 8
7401 VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
7402 // VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT = 9
7403 VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT | VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT,
7404 // VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT = 10
7405 VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT | VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT,
7406 // VK_ACCESS_TRANSFER_READ_BIT = 11
7407 VK_PIPELINE_STAGE_TRANSFER_BIT,
7408 // VK_ACCESS_TRANSFER_WRITE_BIT = 12
7409 VK_PIPELINE_STAGE_TRANSFER_BIT,
7410 // VK_ACCESS_HOST_READ_BIT = 13
7411 VK_PIPELINE_STAGE_HOST_BIT,
7412 // VK_ACCESS_HOST_WRITE_BIT = 14
7413 VK_PIPELINE_STAGE_HOST_BIT,
7414 // VK_ACCESS_MEMORY_READ_BIT = 15
7415 VK_ACCESS_FLAG_BITS_MAX_ENUM, // Always match
7416 // VK_ACCESS_MEMORY_WRITE_BIT = 16
7417 VK_ACCESS_FLAG_BITS_MAX_ENUM, // Always match
7418 // VK_ACCESS_COMMAND_PROCESS_READ_BIT_NVX = 17
7419 VK_PIPELINE_STAGE_COMMAND_PROCESS_BIT_NVX,
7420 // VK_ACCESS_COMMAND_PROCESS_WRITE_BIT_NVX = 18
7421 VK_PIPELINE_STAGE_COMMAND_PROCESS_BIT_NVX,
7424 // Verify that all bits of access_mask are supported by the src_stage_mask
7425 static bool ValidateAccessMaskPipelineStage(VkAccessFlags access_mask, VkPipelineStageFlags stage_mask) {
7426 // Early out if all commands set, or access_mask NULL
7427 if ((stage_mask & VK_PIPELINE_STAGE_ALL_COMMANDS_BIT) || (0 == access_mask)) return true;
7429 stage_mask = ExpandPipelineStageFlags(stage_mask);
7431 // for each of the set bits in access_mask, make sure that supporting stage mask bit(s) are set
7432 while (access_mask) {
7433 index = (u_ffs(access_mask) - 1);
7435 // Must have "!= 0" compare to prevent warning from MSVC
7436 if ((AccessMaskToPipeStage[index] & stage_mask) == 0) return false; // early out
7437 access_mask &= ~(1 << index); // Mask off bit that's been checked
7442 namespace barrier_queue_families {
7444 kSrcOrDstMustBeIgnore,
7445 kSpecialOrIgnoreOnly,
7446 kSrcIgnoreRequiresDstIgnore,
7447 kDstValidOrSpecialIfNotIgnore,
7448 kSrcValidOrSpecialIfNotIgnore,
7449 kSrcAndDestMustBeIgnore,
7450 kBothIgnoreOrBothValid,
7451 kSubmitQueueMustMatchSrcOrDst
7453 static const char *vu_summary[] = {"Source or destination queue family must be ignored.",
7454 "Source or destination queue family must be special or ignored.",
7455 "Destination queue family must be ignored if source queue family is.",
7456 "Destination queue family must be valid, ignored, or special.",
7457 "Source queue family must be valid, ignored, or special.",
7458 "Source and destination queue family must both be ignored.",
7459 "Source and destination queue family must both be ignore or both valid.",
7460 "Source or destination queue family must match submit queue family, if not ignored."};
7462 static const UNIQUE_VALIDATION_ERROR_CODE image_error_codes[] = {
7463 VALIDATION_ERROR_0a000aca, // VUID-VkImageMemoryBarrier-image-01381 -- kSrcOrDstMustBeIgnore
7464 VALIDATION_ERROR_0a000dcc, // VUID-VkImageMemoryBarrier-image-01766 -- kSpecialOrIgnoreOnly
7465 VALIDATION_ERROR_0a000962, // VUID-VkImageMemoryBarrier-image-01201 -- kSrcIgnoreRequiresDstIgnore
7466 VALIDATION_ERROR_0a000dd0, // VUID-VkImageMemoryBarrier-image-01768 -- kDstValidOrSpecialIfNotIgnore
7467 VALIDATION_ERROR_0a000dce, // VUID-VkImageMemoryBarrier-image-01767 -- kSrcValidOrSpecialIfNotIgnore
7468 VALIDATION_ERROR_0a00095e, // VUID-VkImageMemoryBarrier-image-01199 -- kSrcAndDestMustBeIgnore
7469 VALIDATION_ERROR_0a000960, // VUID-VkImageMemoryBarrier-image-01200 -- kBothIgnoreOrBothValid
7470 VALIDATION_ERROR_0a00096a, // VUID-VkImageMemoryBarrier-image-01205 -- kSubmitQueueMustMatchSrcOrDst
7473 static const UNIQUE_VALIDATION_ERROR_CODE buffer_error_codes[] = {
7474 VALIDATION_ERROR_0180094e, // VUID-VkBufferMemoryBarrier-buffer-01191 -- kSrcOrDstMustBeIgnore
7475 VALIDATION_ERROR_01800dc6, // VUID-VkBufferMemoryBarrier-buffer-01763 -- kSpecialOrIgnoreOnly
7476 VALIDATION_ERROR_01800952, // VUID-VkBufferMemoryBarrier-buffer-01193 -- kSrcIgnoreRequiresDstIgnore
7477 VALIDATION_ERROR_01800dca, // VUID-VkBufferMemoryBarrier-buffer-01765 -- kDstValidOrSpecialIfNotIgnore
7478 VALIDATION_ERROR_01800dc8, // VUID-VkBufferMemoryBarrier-buffer-01764 -- kSrcValidOrSpecialIfNotIgnore
7479 VALIDATION_ERROR_0180094c, // VUID-VkBufferMemoryBarrier-buffer-01190 -- kSrcAndDestMustBeIgnore
7480 VALIDATION_ERROR_01800950, // VUID-VkBufferMemoryBarrier-buffer-01192 -- kBothIgnoreOrBothValid
7481 VALIDATION_ERROR_01800958, // VUID-VkBufferMemoryBarrier-buffer-01196 -- kSubmitQueueMustMatchSrcOrDst
7484 class ValidatorState {
7486 ValidatorState(const layer_data *device_data, const char *func_name, const GLOBAL_CB_NODE *cb_state,
7487 const uint64_t barrier_handle64, const VkSharingMode sharing_mode, const VulkanObjectType object_type,
7488 const UNIQUE_VALIDATION_ERROR_CODE *val_codes)
7489 : report_data_(device_data->report_data),
7490 func_name_(func_name),
7491 cb_handle64_(HandleToUint64(cb_state->commandBuffer)),
7492 barrier_handle64_(barrier_handle64),
7493 sharing_mode_(sharing_mode),
7494 object_type_(object_type),
7495 val_codes_(val_codes),
7496 limit_(static_cast<uint32_t>(device_data->phys_dev_properties.queue_family_properties.size())),
7497 mem_ext_(device_data->extensions.vk_khr_external_memory) {}
7499 // Create a validator state from an image state... reducing the image specific to the generic version.
7500 ValidatorState(const layer_data *device_data, const char *func_name, const GLOBAL_CB_NODE *cb_state,
7501 const VkImageMemoryBarrier *barrier, const IMAGE_STATE *state)
7502 : ValidatorState(device_data, func_name, cb_state, HandleToUint64(barrier->image), state->createInfo.sharingMode,
7503 kVulkanObjectTypeImage, image_error_codes) {}
7505 // Create a validator state from an buffer state... reducing the buffer specific to the generic version.
7506 ValidatorState(const layer_data *device_data, const char *func_name, const GLOBAL_CB_NODE *cb_state,
7507 const VkBufferMemoryBarrier *barrier, const BUFFER_STATE *state)
7508 : ValidatorState(device_data, func_name, cb_state, HandleToUint64(barrier->buffer), state->createInfo.sharingMode,
7509 kVulkanObjectTypeImage, buffer_error_codes) {}
7511 // Log the messages using boilerplate from object state, and Vu specific information from the template arg
7512 // One and two family versions, in the single family version, Vu holds the name of the passed parameter
7513 bool LogMsg(VuIndex vu_index, uint32_t family, const char *param_name) const {
7514 const UNIQUE_VALIDATION_ERROR_CODE val_code = val_codes_[vu_index];
7515 const char *annotation = GetFamilyAnnotation(family);
7516 return log_msg(report_data_, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, cb_handle64_,
7517 val_code, "%s: Barrier using %s 0x%" PRIx64 " created with sharingMode %s, has %s %u%s. %s", func_name_,
7518 GetTypeString(), barrier_handle64_, GetModeString(), param_name, family, annotation, vu_summary[vu_index]);
7521 bool LogMsg(VuIndex vu_index, uint32_t src_family, uint32_t dst_family) const {
7522 const UNIQUE_VALIDATION_ERROR_CODE val_code = val_codes_[vu_index];
7523 const char *src_annotation = GetFamilyAnnotation(src_family);
7524 const char *dst_annotation = GetFamilyAnnotation(dst_family);
7525 return log_msg(report_data_, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, cb_handle64_,
7527 "%s: Barrier using %s 0x%" PRIx64
7528 " created with sharingMode %s, has srcQueueFamilyIndex %u%s and dstQueueFamilyIndex %u%s. %s",
7529 func_name_, GetTypeString(), barrier_handle64_, GetModeString(), src_family, src_annotation, dst_family,
7530 dst_annotation, vu_summary[vu_index]);
7533 // This abstract Vu can only be tested at submit time, thus we need a callback from the closure containing the needed
7534 // data. Note that the mem_barrier is copied to the closure as the lambda lifespan exceed the guarantees of validity for
7535 // application input.
7536 static bool ValidateAtQueueSubmit(const VkQueue queue, const layer_data *device_data, uint32_t src_family, uint32_t dst_family,
7537 const ValidatorState &val) {
7538 auto queue_data_it = device_data->queueMap.find(queue);
7539 if (queue_data_it == device_data->queueMap.end()) return false;
7541 uint32_t queue_family = queue_data_it->second.queueFamilyIndex;
7542 if ((src_family != queue_family) && (dst_family != queue_family)) {
7543 const UNIQUE_VALIDATION_ERROR_CODE val_code = val.val_codes_[kSubmitQueueMustMatchSrcOrDst];
7544 const char *src_annotation = val.GetFamilyAnnotation(src_family);
7545 const char *dst_annotation = val.GetFamilyAnnotation(dst_family);
7546 return log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT,
7547 HandleToUint64(queue), val_code,
7548 "%s: Barrier submitted to queue with family index %u, using %s 0x%" PRIx64
7549 " created with sharingMode %s, has srcQueueFamilyIndex %u%s and dstQueueFamilyIndex %u%s. %s",
7550 "vkQueueSubmit", queue_family, val.GetTypeString(), val.barrier_handle64_, val.GetModeString(),
7551 src_family, src_annotation, dst_family, dst_annotation, vu_summary[kSubmitQueueMustMatchSrcOrDst]);
7555 // Logical helpers for semantic clarity
7556 inline bool KhrExternalMem() const { return mem_ext_; }
7557 inline bool IsValid(uint32_t queue_family) const { return (queue_family < limit_); }
7558 inline bool IsSpecial(uint32_t queue_family) const {
7559 return (queue_family == VK_QUEUE_FAMILY_EXTERNAL_KHR) || (queue_family == VK_QUEUE_FAMILY_FOREIGN_EXT);
7561 inline bool IsValidOrSpecial(uint32_t queue_family) const {
7562 return IsValid(queue_family) || (mem_ext_ && IsSpecial(queue_family));
7564 inline bool IsIgnored(uint32_t queue_family) const { return queue_family == VK_QUEUE_FAMILY_IGNORED; }
7566 // Helpers for LogMsg (and log_msg)
7567 const char *GetModeString() const { return string_VkSharingMode(sharing_mode_); }
7569 // Descriptive text for the various types of queue family index
7570 const char *GetFamilyAnnotation(uint32_t family) const {
7571 const char *external = " (VK_QUEUE_FAMILY_EXTERNAL_KHR)";
7572 const char *foreign = " (VK_QUEUE_FAMILY_FOREIGN_EXT)";
7573 const char *ignored = " (VK_QUEUE_FAMILY_IGNORED)";
7574 const char *valid = " (VALID)";
7575 const char *invalid = " (INVALID)";
7577 case VK_QUEUE_FAMILY_EXTERNAL_KHR:
7579 case VK_QUEUE_FAMILY_FOREIGN_EXT:
7581 case VK_QUEUE_FAMILY_IGNORED:
7584 if (IsValid(family)) {
7590 const char *GetTypeString() const { return object_string[object_type_]; }
7591 VkSharingMode GetSharingMode() const { return sharing_mode_; }
7594 const debug_report_data *const report_data_;
7595 const char *const func_name_;
7596 const uint64_t cb_handle64_;
7597 const uint64_t barrier_handle64_;
7598 const VkSharingMode sharing_mode_;
7599 const VulkanObjectType object_type_;
7600 const UNIQUE_VALIDATION_ERROR_CODE *val_codes_;
7601 const uint32_t limit_;
7602 const bool mem_ext_;
7605 bool Validate(const layer_data *device_data, const char *func_name, GLOBAL_CB_NODE *cb_state, const ValidatorState &val,
7606 const uint32_t src_queue_family, const uint32_t dst_queue_family) {
7609 const bool mode_concurrent = val.GetSharingMode() == VK_SHARING_MODE_CONCURRENT;
7610 const bool src_ignored = val.IsIgnored(src_queue_family);
7611 const bool dst_ignored = val.IsIgnored(dst_queue_family);
7612 if (val.KhrExternalMem()) {
7613 if (mode_concurrent) {
7614 if (!(src_ignored || dst_ignored)) {
7615 skip |= val.LogMsg(kSrcOrDstMustBeIgnore, src_queue_family, dst_queue_family);
7617 if ((src_ignored && !(dst_ignored || val.IsSpecial(dst_queue_family))) ||
7618 (dst_ignored && !(src_ignored || val.IsSpecial(src_queue_family)))) {
7619 skip |= val.LogMsg(kSpecialOrIgnoreOnly, src_queue_family, dst_queue_family);
7622 // VK_SHARING_MODE_EXCLUSIVE
7623 if (src_ignored && !dst_ignored) {
7624 skip |= val.LogMsg(kSrcIgnoreRequiresDstIgnore, src_queue_family, dst_queue_family);
7626 if (!dst_ignored && !val.IsValidOrSpecial(dst_queue_family)) {
7627 skip |= val.LogMsg(kDstValidOrSpecialIfNotIgnore, dst_queue_family, "dstQueueFamilyIndex");
7629 if (!src_ignored && !val.IsValidOrSpecial(src_queue_family)) {
7630 skip |= val.LogMsg(kSrcValidOrSpecialIfNotIgnore, src_queue_family, "srcQueueFamilyIndex");
7634 // No memory extension
7635 if (mode_concurrent) {
7636 if (!src_ignored || !dst_ignored) {
7637 skip |= val.LogMsg(kSrcAndDestMustBeIgnore, src_queue_family, dst_queue_family);
7640 // VK_SHARING_MODE_EXCLUSIVE
7641 if (!((src_ignored && dst_ignored) || (val.IsValid(src_queue_family) && val.IsValid(dst_queue_family)))) {
7642 skip |= val.LogMsg(kBothIgnoreOrBothValid, src_queue_family, dst_queue_family);
7646 if (!mode_concurrent && !src_ignored && !dst_ignored) {
7647 // Only enqueue submit time check if it is needed. If more submit time checks are added, change the criteria
7648 // TODO create a better named list, or rename the submit time lists to something that matches the broader usage...
7649 // Note: if we want to create a semantic that separates state lookup, validation, and state update this should go
7650 // to a local queue of update_state_actions or something.
7651 cb_state->eventUpdates.emplace_back([device_data, src_queue_family, dst_queue_family, val](VkQueue queue) {
7652 return ValidatorState::ValidateAtQueueSubmit(queue, device_data, src_queue_family, dst_queue_family, val);
7657 } // namespace barrier_queue_families
7659 // Type specific wrapper for image barriers
7660 bool ValidateBarrierQueueFamilies(const layer_data *device_data, const char *func_name, GLOBAL_CB_NODE *cb_state,
7661 const VkImageMemoryBarrier *barrier, const IMAGE_STATE *state_data) {
7662 // State data is required
7667 // Create the validator state from the image state
7668 barrier_queue_families::ValidatorState val(device_data, func_name, cb_state, barrier, state_data);
7669 const uint32_t src_queue_family = barrier->srcQueueFamilyIndex;
7670 const uint32_t dst_queue_family = barrier->dstQueueFamilyIndex;
7671 return barrier_queue_families::Validate(device_data, func_name, cb_state, val, src_queue_family, dst_queue_family);
7674 // Type specific wrapper for buffer barriers
7675 bool ValidateBarrierQueueFamilies(const layer_data *device_data, const char *func_name, GLOBAL_CB_NODE *cb_state,
7676 const VkBufferMemoryBarrier *barrier, const BUFFER_STATE *state_data) {
7677 // State data is required
7682 // Create the validator state from the buffer state
7683 barrier_queue_families::ValidatorState val(device_data, func_name, cb_state, barrier, state_data);
7684 const uint32_t src_queue_family = barrier->srcQueueFamilyIndex;
7685 const uint32_t dst_queue_family = barrier->dstQueueFamilyIndex;
7686 return barrier_queue_families::Validate(device_data, func_name, cb_state, val, src_queue_family, dst_queue_family);
7689 static bool ValidateBarriers(layer_data *device_data, const char *funcName, GLOBAL_CB_NODE *cb_state,
7690 VkPipelineStageFlags src_stage_mask, VkPipelineStageFlags dst_stage_mask, uint32_t memBarrierCount,
7691 const VkMemoryBarrier *pMemBarriers, uint32_t bufferBarrierCount,
7692 const VkBufferMemoryBarrier *pBufferMemBarriers, uint32_t imageMemBarrierCount,
7693 const VkImageMemoryBarrier *pImageMemBarriers) {
7695 for (uint32_t i = 0; i < memBarrierCount; ++i) {
7696 const auto &mem_barrier = pMemBarriers[i];
7697 if (!ValidateAccessMaskPipelineStage(mem_barrier.srcAccessMask, src_stage_mask)) {
7698 skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7699 HandleToUint64(cb_state->commandBuffer), VALIDATION_ERROR_1b800940,
7700 "%s: pMemBarriers[%d].srcAccessMask (0x%X) is not supported by srcStageMask (0x%X).", funcName, i,
7701 mem_barrier.srcAccessMask, src_stage_mask);
7703 if (!ValidateAccessMaskPipelineStage(mem_barrier.dstAccessMask, dst_stage_mask)) {
7704 skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7705 HandleToUint64(cb_state->commandBuffer), VALIDATION_ERROR_1b800942,
7706 "%s: pMemBarriers[%d].dstAccessMask (0x%X) is not supported by dstStageMask (0x%X).", funcName, i,
7707 mem_barrier.dstAccessMask, dst_stage_mask);
7710 for (uint32_t i = 0; i < imageMemBarrierCount; ++i) {
7711 auto mem_barrier = &pImageMemBarriers[i];
7712 if (!ValidateAccessMaskPipelineStage(mem_barrier->srcAccessMask, src_stage_mask)) {
7713 skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7714 HandleToUint64(cb_state->commandBuffer), VALIDATION_ERROR_1b800940,
7715 "%s: pImageMemBarriers[%d].srcAccessMask (0x%X) is not supported by srcStageMask (0x%X).", funcName, i,
7716 mem_barrier->srcAccessMask, src_stage_mask);
7718 if (!ValidateAccessMaskPipelineStage(mem_barrier->dstAccessMask, dst_stage_mask)) {
7719 skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7720 HandleToUint64(cb_state->commandBuffer), VALIDATION_ERROR_1b800942,
7721 "%s: pImageMemBarriers[%d].dstAccessMask (0x%X) is not supported by dstStageMask (0x%X).", funcName, i,
7722 mem_barrier->dstAccessMask, dst_stage_mask);
7725 auto image_data = GetImageState(device_data, mem_barrier->image);
7726 skip |= ValidateBarrierQueueFamilies(device_data, funcName, cb_state, mem_barrier, image_data);
7728 if (mem_barrier->newLayout == VK_IMAGE_LAYOUT_UNDEFINED || mem_barrier->newLayout == VK_IMAGE_LAYOUT_PREINITIALIZED) {
7729 skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7730 HandleToUint64(cb_state->commandBuffer), VALIDATION_ERROR_0a00095c,
7731 "%s: Image Layout cannot be transitioned to UNDEFINED or PREINITIALIZED.", funcName);
7735 // There is no VUID for this, but there is blanket text:
7736 // "Non-sparse resources must be bound completely and contiguously to a single VkDeviceMemory object before
7737 // recording commands in a command buffer."
7738 // TODO: Update this when VUID is defined
7739 skip |= ValidateMemoryIsBoundToImage(device_data, image_data, funcName, VALIDATION_ERROR_UNDEFINED);
7741 auto aspect_mask = mem_barrier->subresourceRange.aspectMask;
7742 skip |= ValidateImageAspectMask(device_data, image_data->image, image_data->createInfo.format, aspect_mask, funcName);
7744 std::string param_name = "pImageMemoryBarriers[" + std::to_string(i) + "].subresourceRange";
7745 skip |= ValidateImageBarrierSubresourceRange(device_data, image_data, mem_barrier->subresourceRange, funcName,
7746 param_name.c_str());
7750 for (uint32_t i = 0; i < bufferBarrierCount; ++i) {
7751 auto mem_barrier = &pBufferMemBarriers[i];
7752 if (!mem_barrier) continue;
7754 if (!ValidateAccessMaskPipelineStage(mem_barrier->srcAccessMask, src_stage_mask)) {
7755 skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7756 HandleToUint64(cb_state->commandBuffer), VALIDATION_ERROR_1b800940,
7757 "%s: pBufferMemBarriers[%d].srcAccessMask (0x%X) is not supported by srcStageMask (0x%X).", funcName, i,
7758 mem_barrier->srcAccessMask, src_stage_mask);
7760 if (!ValidateAccessMaskPipelineStage(mem_barrier->dstAccessMask, dst_stage_mask)) {
7761 skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7762 HandleToUint64(cb_state->commandBuffer), VALIDATION_ERROR_1b800942,
7763 "%s: pBufferMemBarriers[%d].dstAccessMask (0x%X) is not supported by dstStageMask (0x%X).", funcName, i,
7764 mem_barrier->dstAccessMask, dst_stage_mask);
7766 // Validate buffer barrier queue family indices
7767 auto buffer_state = GetBufferState(device_data, mem_barrier->buffer);
7768 skip |= ValidateBarrierQueueFamilies(device_data, funcName, cb_state, mem_barrier, buffer_state);
7771 // There is no VUID for this, but there is blanket text:
7772 // "Non-sparse resources must be bound completely and contiguously to a single VkDeviceMemory object before
7773 // recording commands in a command buffer"
7774 // TODO: Update this when VUID is defined
7775 skip |= ValidateMemoryIsBoundToBuffer(device_data, buffer_state, funcName, VALIDATION_ERROR_UNDEFINED);
7777 auto buffer_size = buffer_state->createInfo.size;
7778 if (mem_barrier->offset >= buffer_size) {
7780 device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7781 HandleToUint64(cb_state->commandBuffer), VALIDATION_ERROR_01800946,
7782 "%s: Buffer Barrier 0x%" PRIx64 " has offset 0x%" PRIx64 " which is not less than total size 0x%" PRIx64 ".",
7783 funcName, HandleToUint64(mem_barrier->buffer), HandleToUint64(mem_barrier->offset),
7784 HandleToUint64(buffer_size));
7785 } else if (mem_barrier->size != VK_WHOLE_SIZE && (mem_barrier->offset + mem_barrier->size > buffer_size)) {
7787 log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7788 HandleToUint64(cb_state->commandBuffer), VALIDATION_ERROR_0180094a,
7789 "%s: Buffer Barrier 0x%" PRIx64 " has offset 0x%" PRIx64 " and size 0x%" PRIx64
7790 " whose sum is greater than total size 0x%" PRIx64 ".",
7791 funcName, HandleToUint64(mem_barrier->buffer), HandleToUint64(mem_barrier->offset),
7792 HandleToUint64(mem_barrier->size), HandleToUint64(buffer_size));
7799 bool validateEventStageMask(VkQueue queue, GLOBAL_CB_NODE *pCB, uint32_t eventCount, size_t firstEventIndex,
7800 VkPipelineStageFlags sourceStageMask) {
7802 VkPipelineStageFlags stageMask = 0;
7803 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(queue), layer_data_map);
7804 for (uint32_t i = 0; i < eventCount; ++i) {
7805 auto event = pCB->events[firstEventIndex + i];
7806 auto queue_data = dev_data->queueMap.find(queue);
7807 if (queue_data == dev_data->queueMap.end()) return false;
7808 auto event_data = queue_data->second.eventToStageMap.find(event);
7809 if (event_data != queue_data->second.eventToStageMap.end()) {
7810 stageMask |= event_data->second;
7812 auto global_event_data = GetEventNode(dev_data, event);
7813 if (!global_event_data) {
7814 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT,
7815 HandleToUint64(event), DRAWSTATE_INVALID_EVENT,
7816 "Event 0x%" PRIx64 " cannot be waited on if it has never been set.", HandleToUint64(event));
7818 stageMask |= global_event_data->stageMask;
7822 // TODO: Need to validate that host_bit is only set if set event is called
7823 // but set event can be called at any time.
7824 if (sourceStageMask != stageMask && sourceStageMask != (stageMask | VK_PIPELINE_STAGE_HOST_BIT)) {
7825 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7826 HandleToUint64(pCB->commandBuffer), VALIDATION_ERROR_1e62d401,
7827 "Submitting cmdbuffer with call to VkCmdWaitEvents using srcStageMask 0x%X which must be the bitwise OR of "
7828 "the stageMask parameters used in calls to vkCmdSetEvent and VK_PIPELINE_STAGE_HOST_BIT if used with "
7829 "vkSetEvent but instead is 0x%X.",
7830 sourceStageMask, stageMask);
7835 // Note that we only check bits that HAVE required queueflags -- don't care entries are skipped
7836 static std::unordered_map<VkPipelineStageFlags, VkQueueFlags> supported_pipeline_stages_table = {
7837 {VK_PIPELINE_STAGE_COMMAND_PROCESS_BIT_NVX, VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT},
7838 {VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT, VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT},
7839 {VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, VK_QUEUE_GRAPHICS_BIT},
7840 {VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, VK_QUEUE_GRAPHICS_BIT},
7841 {VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT, VK_QUEUE_GRAPHICS_BIT},
7842 {VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT, VK_QUEUE_GRAPHICS_BIT},
7843 {VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT, VK_QUEUE_GRAPHICS_BIT},
7844 {VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_QUEUE_GRAPHICS_BIT},
7845 {VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT, VK_QUEUE_GRAPHICS_BIT},
7846 {VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT, VK_QUEUE_GRAPHICS_BIT},
7847 {VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_QUEUE_GRAPHICS_BIT},
7848 {VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_QUEUE_COMPUTE_BIT},
7849 {VK_PIPELINE_STAGE_TRANSFER_BIT, VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT | VK_QUEUE_TRANSFER_BIT},
7850 {VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT, VK_QUEUE_GRAPHICS_BIT}};
7852 static const VkPipelineStageFlags stage_flag_bit_array[] = {VK_PIPELINE_STAGE_COMMAND_PROCESS_BIT_NVX,
7853 VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT,
7854 VK_PIPELINE_STAGE_VERTEX_INPUT_BIT,
7855 VK_PIPELINE_STAGE_VERTEX_SHADER_BIT,
7856 VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT,
7857 VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT,
7858 VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT,
7859 VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
7860 VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT,
7861 VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT,
7862 VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
7863 VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
7864 VK_PIPELINE_STAGE_TRANSFER_BIT,
7865 VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT};
7867 bool CheckStageMaskQueueCompatibility(layer_data *dev_data, VkCommandBuffer command_buffer, VkPipelineStageFlags stage_mask,
7868 VkQueueFlags queue_flags, const char *function, const char *src_or_dest,
7869 UNIQUE_VALIDATION_ERROR_CODE error_code) {
7871 // Lookup each bit in the stagemask and check for overlap between its table bits and queue_flags
7872 for (const auto &item : stage_flag_bit_array) {
7873 if (stage_mask & item) {
7874 if ((supported_pipeline_stages_table[item] & queue_flags) == 0) {
7875 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7876 VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(command_buffer), error_code,
7877 "%s(): %s flag %s is not compatible with the queue family properties of this command buffer.",
7878 function, src_or_dest, string_VkPipelineStageFlagBits(static_cast<VkPipelineStageFlagBits>(item)));
7885 // Check if all barriers are of a given operation type.
7886 template <typename Barrier, typename OpCheck>
7887 static bool AllTransferOp(const COMMAND_POOL_NODE *pool, OpCheck &op_check, uint32_t count, const Barrier *barriers) {
7888 if (!pool) return false;
7890 for (uint32_t b = 0; b < count; b++) {
7891 if (!op_check(pool, barriers + b)) return false;
7896 enum BarrierOperationsType {
7897 kAllAcquire, // All Barrier operations are "ownership acquire" operations
7898 kAllRelease, // All Barrier operations are "ownership release" operations
7899 kGeneral, // Either no ownership operations or a mix of ownership operation types and/or non-ownership operations
7902 // Look at the barriers to see if we they are all release or all acquire, the result impacts queue properties validation
7903 BarrierOperationsType ComputeBarrierOperationsType(layer_data *device_data, GLOBAL_CB_NODE *cb_state, uint32_t buffer_barrier_count,
7904 const VkBufferMemoryBarrier *buffer_barriers, uint32_t image_barrier_count,
7905 const VkImageMemoryBarrier *image_barriers) {
7906 auto pool = GetCommandPoolNode(device_data, cb_state->createInfo.commandPool);
7907 BarrierOperationsType op_type = kGeneral;
7909 // Look at the barrier details only if they exist
7910 // Note: AllTransferOp returns true for count == 0
7911 if ((buffer_barrier_count + image_barrier_count) != 0) {
7912 if (AllTransferOp(pool, IsReleaseOp<VkBufferMemoryBarrier>, buffer_barrier_count, buffer_barriers) &&
7913 AllTransferOp(pool, IsReleaseOp<VkImageMemoryBarrier>, image_barrier_count, image_barriers)) {
7914 op_type = kAllRelease;
7915 } else if (AllTransferOp(pool, IsAcquireOp<VkBufferMemoryBarrier>, buffer_barrier_count, buffer_barriers) &&
7916 AllTransferOp(pool, IsAcquireOp<VkImageMemoryBarrier>, image_barrier_count, image_barriers)) {
7917 op_type = kAllAcquire;
7924 bool ValidateStageMasksAgainstQueueCapabilities(layer_data *dev_data, GLOBAL_CB_NODE const *cb_state,
7925 VkPipelineStageFlags source_stage_mask, VkPipelineStageFlags dest_stage_mask,
7926 BarrierOperationsType barrier_op_type, const char *function,
7927 UNIQUE_VALIDATION_ERROR_CODE error_code) {
7929 uint32_t queue_family_index = dev_data->commandPoolMap[cb_state->createInfo.commandPool].queueFamilyIndex;
7930 instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(dev_data->physical_device), instance_layer_data_map);
7931 auto physical_device_state = GetPhysicalDeviceState(instance_data, dev_data->physical_device);
7933 // Any pipeline stage included in srcStageMask or dstStageMask must be supported by the capabilities of the queue family
7934 // specified by the queueFamilyIndex member of the VkCommandPoolCreateInfo structure that was used to create the VkCommandPool
7935 // that commandBuffer was allocated from, as specified in the table of supported pipeline stages.
7937 if (queue_family_index < physical_device_state->queue_family_properties.size()) {
7938 VkQueueFlags specified_queue_flags = physical_device_state->queue_family_properties[queue_family_index].queueFlags;
7940 // Only check the source stage mask if any barriers aren't "acquire ownership"
7941 if ((barrier_op_type != kAllAcquire) && (source_stage_mask & VK_PIPELINE_STAGE_ALL_COMMANDS_BIT) == 0) {
7942 skip |= CheckStageMaskQueueCompatibility(dev_data, cb_state->commandBuffer, source_stage_mask, specified_queue_flags,
7943 function, "srcStageMask", error_code);
7945 // Only check the dest stage mask if any barriers aren't "release ownership"
7946 if ((barrier_op_type != kAllRelease) && (dest_stage_mask & VK_PIPELINE_STAGE_ALL_COMMANDS_BIT) == 0) {
7947 skip |= CheckStageMaskQueueCompatibility(dev_data, cb_state->commandBuffer, dest_stage_mask, specified_queue_flags,
7948 function, "dstStageMask", error_code);
7954 VKAPI_ATTR void VKAPI_CALL CmdWaitEvents(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent *pEvents,
7955 VkPipelineStageFlags sourceStageMask, VkPipelineStageFlags dstStageMask,
7956 uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
7957 uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers,
7958 uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) {
7960 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
7961 unique_lock_t lock(global_lock);
7962 GLOBAL_CB_NODE *cb_state = GetCBNode(dev_data, commandBuffer);
7964 auto barrier_op_type = ComputeBarrierOperationsType(dev_data, cb_state, bufferMemoryBarrierCount, pBufferMemoryBarriers,
7965 imageMemoryBarrierCount, pImageMemoryBarriers);
7966 skip |= ValidateStageMasksAgainstQueueCapabilities(dev_data, cb_state, sourceStageMask, dstStageMask, barrier_op_type,
7967 "vkCmdWaitEvents", VALIDATION_ERROR_1e600918);
7968 skip |= ValidateStageMaskGsTsEnables(dev_data, sourceStageMask, "vkCmdWaitEvents()", VALIDATION_ERROR_1e60090e,
7969 VALIDATION_ERROR_1e600912);
7970 skip |= ValidateStageMaskGsTsEnables(dev_data, dstStageMask, "vkCmdWaitEvents()", VALIDATION_ERROR_1e600910,
7971 VALIDATION_ERROR_1e600914);
7972 skip |= ValidateCmdQueueFlags(dev_data, cb_state, "vkCmdWaitEvents()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
7973 VALIDATION_ERROR_1e602415);
7974 skip |= ValidateCmd(dev_data, cb_state, CMD_WAITEVENTS, "vkCmdWaitEvents()");
7975 skip |= ValidateBarriersToImages(dev_data, cb_state, imageMemoryBarrierCount, pImageMemoryBarriers, "vkCmdWaitEvents()");
7976 skip |= ValidateBarriers(dev_data, "vkCmdWaitEvents()", cb_state, sourceStageMask, dstStageMask, memoryBarrierCount,
7977 pMemoryBarriers, bufferMemoryBarrierCount, pBufferMemoryBarriers, imageMemoryBarrierCount,
7978 pImageMemoryBarriers);
7980 auto first_event_index = cb_state->events.size();
7981 for (uint32_t i = 0; i < eventCount; ++i) {
7982 auto event_state = GetEventNode(dev_data, pEvents[i]);
7984 addCommandBufferBinding(&event_state->cb_bindings, {HandleToUint64(pEvents[i]), kVulkanObjectTypeEvent},
7986 event_state->cb_bindings.insert(cb_state);
7988 cb_state->waitedEvents.insert(pEvents[i]);
7989 cb_state->events.push_back(pEvents[i]);
7991 cb_state->eventUpdates.emplace_back(
7992 [=](VkQueue q) { return validateEventStageMask(q, cb_state, eventCount, first_event_index, sourceStageMask); });
7993 TransitionImageLayouts(dev_data, cb_state, imageMemoryBarrierCount, pImageMemoryBarriers);
7998 dev_data->dispatch_table.CmdWaitEvents(commandBuffer, eventCount, pEvents, sourceStageMask, dstStageMask,
7999 memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount, pBufferMemoryBarriers,
8000 imageMemoryBarrierCount, pImageMemoryBarriers);
8003 static bool PreCallValidateCmdPipelineBarrier(layer_data *device_data, GLOBAL_CB_NODE *cb_state, VkPipelineStageFlags srcStageMask,
8004 VkPipelineStageFlags dstStageMask, VkDependencyFlags dependencyFlags,
8005 uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
8006 uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers,
8007 uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) {
8009 auto barrier_op_type = ComputeBarrierOperationsType(device_data, cb_state, bufferMemoryBarrierCount, pBufferMemoryBarriers,
8010 imageMemoryBarrierCount, pImageMemoryBarriers);
8011 skip |= ValidateStageMasksAgainstQueueCapabilities(device_data, cb_state, srcStageMask, dstStageMask, barrier_op_type,
8012 "vkCmdPipelineBarrier", VALIDATION_ERROR_1b80093e);
8013 skip |= ValidateCmdQueueFlags(device_data, cb_state, "vkCmdPipelineBarrier()",
8014 VK_QUEUE_TRANSFER_BIT | VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT, VALIDATION_ERROR_1b802415);
8015 skip |= ValidateCmd(device_data, cb_state, CMD_PIPELINEBARRIER, "vkCmdPipelineBarrier()");
8016 skip |= ValidateStageMaskGsTsEnables(device_data, srcStageMask, "vkCmdPipelineBarrier()", VALIDATION_ERROR_1b800920,
8017 VALIDATION_ERROR_1b800924);
8018 skip |= ValidateStageMaskGsTsEnables(device_data, dstStageMask, "vkCmdPipelineBarrier()", VALIDATION_ERROR_1b800922,
8019 VALIDATION_ERROR_1b800926);
8020 if (cb_state->activeRenderPass) {
8021 skip |= ValidateRenderPassPipelineBarriers(device_data, "vkCmdPipelineBarrier()", cb_state, srcStageMask, dstStageMask,
8022 dependencyFlags, memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
8023 pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
8024 if (skip) return true; // Early return to avoid redundant errors from below calls
8027 ValidateBarriersToImages(device_data, cb_state, imageMemoryBarrierCount, pImageMemoryBarriers, "vkCmdPipelineBarrier()");
8028 skip |= ValidateBarriers(device_data, "vkCmdPipelineBarrier()", cb_state, srcStageMask, dstStageMask, memoryBarrierCount,
8029 pMemoryBarriers, bufferMemoryBarrierCount, pBufferMemoryBarriers, imageMemoryBarrierCount,
8030 pImageMemoryBarriers);
8034 static void PreCallRecordCmdPipelineBarrier(layer_data *device_data, GLOBAL_CB_NODE *cb_state, VkCommandBuffer commandBuffer,
8035 uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) {
8036 TransitionImageLayouts(device_data, cb_state, imageMemoryBarrierCount, pImageMemoryBarriers);
8039 VKAPI_ATTR void VKAPI_CALL CmdPipelineBarrier(VkCommandBuffer commandBuffer, VkPipelineStageFlags srcStageMask,
8040 VkPipelineStageFlags dstStageMask, VkDependencyFlags dependencyFlags,
8041 uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
8042 uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers,
8043 uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) {
8045 layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
8046 unique_lock_t lock(global_lock);
8047 GLOBAL_CB_NODE *cb_state = GetCBNode(device_data, commandBuffer);
8049 skip |= PreCallValidateCmdPipelineBarrier(device_data, cb_state, srcStageMask, dstStageMask, dependencyFlags,
8050 memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
8051 pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
8053 PreCallRecordCmdPipelineBarrier(device_data, cb_state, commandBuffer, imageMemoryBarrierCount, pImageMemoryBarriers);
8060 device_data->dispatch_table.CmdPipelineBarrier(commandBuffer, srcStageMask, dstStageMask, dependencyFlags,
8061 memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
8062 pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
8066 static bool setQueryState(VkQueue queue, VkCommandBuffer commandBuffer, QueryObject object, bool value) {
8067 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
8068 GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
8070 pCB->queryToStateMap[object] = value;
8072 auto queue_data = dev_data->queueMap.find(queue);
8073 if (queue_data != dev_data->queueMap.end()) {
8074 queue_data->second.queryToStateMap[object] = value;
8079 VKAPI_ATTR void VKAPI_CALL CmdBeginQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot, VkFlags flags) {
8081 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
8082 unique_lock_t lock(global_lock);
8083 GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
8085 skip |= ValidateCmdQueueFlags(dev_data, pCB, "vkCmdBeginQuery()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
8086 VALIDATION_ERROR_17802415);
8087 skip |= ValidateCmd(dev_data, pCB, CMD_BEGINQUERY, "vkCmdBeginQuery()");
8093 dev_data->dispatch_table.CmdBeginQuery(commandBuffer, queryPool, slot, flags);
8097 QueryObject query = {queryPool, slot};
8098 pCB->activeQueries.insert(query);
8099 pCB->startedQueries.insert(query);
8100 addCommandBufferBinding(&GetQueryPoolNode(dev_data, queryPool)->cb_bindings,
8101 {HandleToUint64(queryPool), kVulkanObjectTypeQueryPool}, pCB);
8105 VKAPI_ATTR void VKAPI_CALL CmdEndQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot) {
8107 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
8108 unique_lock_t lock(global_lock);
8109 QueryObject query = {queryPool, slot};
8110 GLOBAL_CB_NODE *cb_state = GetCBNode(dev_data, commandBuffer);
8112 if (!cb_state->activeQueries.count(query)) {
8113 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
8114 HandleToUint64(commandBuffer), VALIDATION_ERROR_1ae00f06,
8115 "Ending a query before it was started: queryPool 0x%" PRIx64 ", index %d.", HandleToUint64(queryPool),
8118 skip |= ValidateCmdQueueFlags(dev_data, cb_state, "VkCmdEndQuery()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
8119 VALIDATION_ERROR_1ae02415);
8120 skip |= ValidateCmd(dev_data, cb_state, CMD_ENDQUERY, "VkCmdEndQuery()");
8126 dev_data->dispatch_table.CmdEndQuery(commandBuffer, queryPool, slot);
8130 cb_state->activeQueries.erase(query);
8131 cb_state->queryUpdates.emplace_back([=](VkQueue q) { return setQueryState(q, commandBuffer, query, true); });
8132 addCommandBufferBinding(&GetQueryPoolNode(dev_data, queryPool)->cb_bindings,
8133 {HandleToUint64(queryPool), kVulkanObjectTypeQueryPool}, cb_state);
8137 VKAPI_ATTR void VKAPI_CALL CmdResetQueryPool(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery,
8138 uint32_t queryCount) {
8140 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
8141 unique_lock_t lock(global_lock);
8142 GLOBAL_CB_NODE *cb_state = GetCBNode(dev_data, commandBuffer);
8143 skip |= insideRenderPass(dev_data, cb_state, "vkCmdResetQueryPool()", VALIDATION_ERROR_1c600017);
8144 skip |= ValidateCmd(dev_data, cb_state, CMD_RESETQUERYPOOL, "VkCmdResetQueryPool()");
8145 skip |= ValidateCmdQueueFlags(dev_data, cb_state, "VkCmdResetQueryPool()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
8146 VALIDATION_ERROR_1c602415);
8151 dev_data->dispatch_table.CmdResetQueryPool(commandBuffer, queryPool, firstQuery, queryCount);
8154 for (uint32_t i = 0; i < queryCount; i++) {
8155 QueryObject query = {queryPool, firstQuery + i};
8156 cb_state->waitedEventsBeforeQueryReset[query] = cb_state->waitedEvents;
8157 cb_state->queryUpdates.emplace_back([=](VkQueue q) { return setQueryState(q, commandBuffer, query, false); });
8159 addCommandBufferBinding(&GetQueryPoolNode(dev_data, queryPool)->cb_bindings,
8160 {HandleToUint64(queryPool), kVulkanObjectTypeQueryPool}, cb_state);
8163 static bool IsQueryInvalid(layer_data *dev_data, QUEUE_STATE *queue_data, VkQueryPool queryPool, uint32_t queryIndex) {
8164 QueryObject query = {queryPool, queryIndex};
8165 auto query_data = queue_data->queryToStateMap.find(query);
8166 if (query_data != queue_data->queryToStateMap.end()) {
8167 if (!query_data->second) return true;
8169 auto it = dev_data->queryToStateMap.find(query);
8170 if (it == dev_data->queryToStateMap.end() || !it->second) return true;
8176 static bool validateQuery(VkQueue queue, GLOBAL_CB_NODE *pCB, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount) {
8178 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(pCB->commandBuffer), layer_data_map);
8179 auto queue_data = GetQueueState(dev_data, queue);
8180 if (!queue_data) return false;
8181 for (uint32_t i = 0; i < queryCount; i++) {
8182 if (IsQueryInvalid(dev_data, queue_data, queryPool, firstQuery + i)) {
8183 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
8184 HandleToUint64(pCB->commandBuffer), DRAWSTATE_INVALID_QUERY,
8185 "Requesting a copy from query to buffer with invalid query: queryPool 0x%" PRIx64 ", index %d",
8186 HandleToUint64(queryPool), firstQuery + i);
8192 VKAPI_ATTR void VKAPI_CALL CmdCopyQueryPoolResults(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery,
8193 uint32_t queryCount, VkBuffer dstBuffer, VkDeviceSize dstOffset,
8194 VkDeviceSize stride, VkQueryResultFlags flags) {
8196 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
8197 unique_lock_t lock(global_lock);
8199 auto cb_node = GetCBNode(dev_data, commandBuffer);
8200 auto dst_buff_state = GetBufferState(dev_data, dstBuffer);
8201 if (cb_node && dst_buff_state) {
8202 skip |= ValidateMemoryIsBoundToBuffer(dev_data, dst_buff_state, "vkCmdCopyQueryPoolResults()", VALIDATION_ERROR_19400674);
8203 // Validate that DST buffer has correct usage flags set
8205 ValidateBufferUsageFlags(dev_data, dst_buff_state, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true, VALIDATION_ERROR_19400672,
8206 "vkCmdCopyQueryPoolResults()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
8207 skip |= ValidateCmdQueueFlags(dev_data, cb_node, "vkCmdCopyQueryPoolResults()",
8208 VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT, VALIDATION_ERROR_19402415);
8209 skip |= ValidateCmd(dev_data, cb_node, CMD_COPYQUERYPOOLRESULTS, "vkCmdCopyQueryPoolResults()");
8210 skip |= insideRenderPass(dev_data, cb_node, "vkCmdCopyQueryPoolResults()", VALIDATION_ERROR_19400017);
8216 dev_data->dispatch_table.CmdCopyQueryPoolResults(commandBuffer, queryPool, firstQuery, queryCount, dstBuffer, dstOffset, stride,
8220 if (cb_node && dst_buff_state) {
8221 AddCommandBufferBindingBuffer(dev_data, cb_node, dst_buff_state);
8222 cb_node->queue_submit_functions.emplace_back([=]() {
8223 SetBufferMemoryValid(dev_data, dst_buff_state, true);
8226 cb_node->queryUpdates.emplace_back([=](VkQueue q) { return validateQuery(q, cb_node, queryPool, firstQuery, queryCount); });
8227 addCommandBufferBinding(&GetQueryPoolNode(dev_data, queryPool)->cb_bindings,
8228 {HandleToUint64(queryPool), kVulkanObjectTypeQueryPool}, cb_node);
8232 VKAPI_ATTR void VKAPI_CALL CmdPushConstants(VkCommandBuffer commandBuffer, VkPipelineLayout layout, VkShaderStageFlags stageFlags,
8233 uint32_t offset, uint32_t size, const void *pValues) {
8235 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
8236 unique_lock_t lock(global_lock);
8237 GLOBAL_CB_NODE *cb_state = GetCBNode(dev_data, commandBuffer);
8239 skip |= ValidateCmdQueueFlags(dev_data, cb_state, "vkCmdPushConstants()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
8240 VALIDATION_ERROR_1bc02415);
8241 skip |= ValidateCmd(dev_data, cb_state, CMD_PUSHCONSTANTS, "vkCmdPushConstants()");
8243 skip |= validatePushConstantRange(dev_data, offset, size, "vkCmdPushConstants()");
8244 if (0 == stageFlags) {
8246 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
8247 HandleToUint64(commandBuffer), VALIDATION_ERROR_1bc2dc03, "vkCmdPushConstants() call has no stageFlags set.");
8250 // Check if pipeline_layout VkPushConstantRange(s) overlapping offset, size have stageFlags set for each stage in the command
8251 // stageFlags argument, *and* that the command stageFlags argument has bits set for the stageFlags in each overlapping range.
8253 const auto &ranges = *getPipelineLayout(dev_data, layout)->push_constant_ranges;
8254 VkShaderStageFlags found_stages = 0;
8255 for (const auto &range : ranges) {
8256 if ((offset >= range.offset) && (offset + size <= range.offset + range.size)) {
8257 VkShaderStageFlags matching_stages = range.stageFlags & stageFlags;
8258 if (matching_stages != range.stageFlags) {
8259 // VALIDATION_ERROR_1bc00e08 VUID-vkCmdPushConstants-offset-01796
8260 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
8261 VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(commandBuffer),
8262 VALIDATION_ERROR_1bc00e08,
8263 "vkCmdPushConstants(): stageFlags (0x%" PRIx32 ", offset (%" PRIu32 "), and size (%" PRIu32
8265 "must contain all stages in overlapping VkPushConstantRange stageFlags (0x%" PRIx32
8266 "), offset (%" PRIu32 "), and size (%" PRIu32 ") in pipeline layout 0x%" PRIx64 ".",
8267 (uint32_t)stageFlags, offset, size, (uint32_t)range.stageFlags, range.offset, range.size,
8268 HandleToUint64(layout));
8271 // Accumulate all stages we've found
8272 found_stages = matching_stages | found_stages;
8275 if (found_stages != stageFlags) {
8276 // VALIDATION_ERROR_1bc00e06 VUID-vkCmdPushConstants-offset-01795
8277 uint32_t missing_stages = ~found_stages & stageFlags;
8278 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
8279 HandleToUint64(commandBuffer), VALIDATION_ERROR_1bc00e06,
8280 "vkCmdPushConstants(): stageFlags = 0x%" PRIx32 ", VkPushConstantRange in pipeline layout 0x%" PRIx64
8281 " overlapping offset = %d and size = %d, do not contain stageFlags 0x%" PRIx32 ".",
8282 (uint32_t)stageFlags, HandleToUint64(layout), offset, size, missing_stages);
8286 if (!skip) dev_data->dispatch_table.CmdPushConstants(commandBuffer, layout, stageFlags, offset, size, pValues);
8289 VKAPI_ATTR void VKAPI_CALL CmdWriteTimestamp(VkCommandBuffer commandBuffer, VkPipelineStageFlagBits pipelineStage,
8290 VkQueryPool queryPool, uint32_t slot) {
8292 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
8293 unique_lock_t lock(global_lock);
8294 GLOBAL_CB_NODE *cb_state = GetCBNode(dev_data, commandBuffer);
8297 ValidateCmdQueueFlags(dev_data, cb_state, "vkCmdWriteTimestamp()",
8298 VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT | VK_QUEUE_TRANSFER_BIT, VALIDATION_ERROR_1e802415);
8299 skip |= ValidateCmd(dev_data, cb_state, CMD_WRITETIMESTAMP, "vkCmdWriteTimestamp()");
8305 dev_data->dispatch_table.CmdWriteTimestamp(commandBuffer, pipelineStage, queryPool, slot);
8309 QueryObject query = {queryPool, slot};
8310 cb_state->queryUpdates.emplace_back([=](VkQueue q) { return setQueryState(q, commandBuffer, query, true); });
8314 static bool MatchUsage(layer_data *dev_data, uint32_t count, const VkAttachmentReference *attachments,
8315 const VkFramebufferCreateInfo *fbci, VkImageUsageFlagBits usage_flag,
8316 UNIQUE_VALIDATION_ERROR_CODE error_code) {
8319 for (uint32_t attach = 0; attach < count; attach++) {
8320 if (attachments[attach].attachment != VK_ATTACHMENT_UNUSED) {
8321 // Attachment counts are verified elsewhere, but prevent an invalid access
8322 if (attachments[attach].attachment < fbci->attachmentCount) {
8323 const VkImageView *image_view = &fbci->pAttachments[attachments[attach].attachment];
8324 auto view_state = GetImageViewState(dev_data, *image_view);
8326 const VkImageCreateInfo *ici = &GetImageState(dev_data, view_state->create_info.image)->createInfo;
8327 if (ici != nullptr) {
8328 if ((ici->usage & usage_flag) == 0) {
8329 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
8330 VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, error_code,
8331 "vkCreateFramebuffer: Framebuffer Attachment (%d) conflicts with the image's "
8332 "IMAGE_USAGE flags (%s).",
8333 attachments[attach].attachment, string_VkImageUsageFlagBits(usage_flag));
8343 // Validate VkFramebufferCreateInfo which includes:
8344 // 1. attachmentCount equals renderPass attachmentCount
8345 // 2. corresponding framebuffer and renderpass attachments have matching formats
8346 // 3. corresponding framebuffer and renderpass attachments have matching sample counts
8347 // 4. fb attachments only have a single mip level
8348 // 5. fb attachment dimensions are each at least as large as the fb
8349 // 6. fb attachments use idenity swizzle
8350 // 7. fb attachments used by renderPass for color/input/ds have correct usage bit set
8351 // 8. fb dimensions are within physical device limits
8352 static bool ValidateFramebufferCreateInfo(layer_data *dev_data, const VkFramebufferCreateInfo *pCreateInfo) {
8355 auto rp_state = GetRenderPassState(dev_data, pCreateInfo->renderPass);
8357 const VkRenderPassCreateInfo *rpci = rp_state->createInfo.ptr();
8358 if (rpci->attachmentCount != pCreateInfo->attachmentCount) {
8359 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
8360 HandleToUint64(pCreateInfo->renderPass), VALIDATION_ERROR_094006d8,
8361 "vkCreateFramebuffer(): VkFramebufferCreateInfo attachmentCount of %u does not match attachmentCount "
8362 "of %u of renderPass (0x%" PRIx64 ") being used to create Framebuffer.",
8363 pCreateInfo->attachmentCount, rpci->attachmentCount, HandleToUint64(pCreateInfo->renderPass));
8365 // attachmentCounts match, so make sure corresponding attachment details line up
8366 const VkImageView *image_views = pCreateInfo->pAttachments;
8367 for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
8368 auto view_state = GetImageViewState(dev_data, image_views[i]);
8369 auto &ivci = view_state->create_info;
8370 if (ivci.format != rpci->pAttachments[i].format) {
8372 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
8373 HandleToUint64(pCreateInfo->renderPass), VALIDATION_ERROR_094006e0,
8374 "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has format of %s that does not "
8375 "match the format of %s used by the corresponding attachment for renderPass (0x%" PRIx64 ").",
8376 i, string_VkFormat(ivci.format), string_VkFormat(rpci->pAttachments[i].format),
8377 HandleToUint64(pCreateInfo->renderPass));
8379 const VkImageCreateInfo *ici = &GetImageState(dev_data, ivci.image)->createInfo;
8380 if (ici->samples != rpci->pAttachments[i].samples) {
8382 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
8383 HandleToUint64(pCreateInfo->renderPass), VALIDATION_ERROR_094006e2,
8384 "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has %s samples that do not match the %s "
8385 "samples used by the corresponding attachment for renderPass (0x%" PRIx64 ").",
8386 i, string_VkSampleCountFlagBits(ici->samples), string_VkSampleCountFlagBits(rpci->pAttachments[i].samples),
8387 HandleToUint64(pCreateInfo->renderPass));
8389 // Verify that view only has a single mip level
8390 if (ivci.subresourceRange.levelCount != 1) {
8391 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT,
8392 0, VALIDATION_ERROR_094006e6,
8393 "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has mip levelCount of %u but "
8394 "only a single mip level (levelCount == 1) is allowed when creating a Framebuffer.",
8395 i, ivci.subresourceRange.levelCount);
8397 const uint32_t mip_level = ivci.subresourceRange.baseMipLevel;
8398 uint32_t mip_width = max(1u, ici->extent.width >> mip_level);
8399 uint32_t mip_height = max(1u, ici->extent.height >> mip_level);
8400 if ((ivci.subresourceRange.layerCount < pCreateInfo->layers) || (mip_width < pCreateInfo->width) ||
8401 (mip_height < pCreateInfo->height)) {
8402 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT,
8403 0, VALIDATION_ERROR_094006e4,
8404 "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u mip level %u has dimensions "
8405 "smaller than the corresponding framebuffer dimensions. Here are the respective dimensions for "
8406 "attachment #%u, framebuffer:\n"
8409 "layerCount: %u, %u\n",
8410 i, ivci.subresourceRange.baseMipLevel, i, mip_width, pCreateInfo->width, mip_height,
8411 pCreateInfo->height, ivci.subresourceRange.layerCount, pCreateInfo->layers);
8413 if (((ivci.components.r != VK_COMPONENT_SWIZZLE_IDENTITY) && (ivci.components.r != VK_COMPONENT_SWIZZLE_R)) ||
8414 ((ivci.components.g != VK_COMPONENT_SWIZZLE_IDENTITY) && (ivci.components.g != VK_COMPONENT_SWIZZLE_G)) ||
8415 ((ivci.components.b != VK_COMPONENT_SWIZZLE_IDENTITY) && (ivci.components.b != VK_COMPONENT_SWIZZLE_B)) ||
8416 ((ivci.components.a != VK_COMPONENT_SWIZZLE_IDENTITY) && (ivci.components.a != VK_COMPONENT_SWIZZLE_A))) {
8417 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT,
8418 0, VALIDATION_ERROR_094006e8,
8419 "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has non-identy swizzle. All "
8420 "framebuffer attachments must have been created with the identity swizzle. Here are the actual "
8426 i, string_VkComponentSwizzle(ivci.components.r), string_VkComponentSwizzle(ivci.components.g),
8427 string_VkComponentSwizzle(ivci.components.b), string_VkComponentSwizzle(ivci.components.a));
8431 // Verify correct attachment usage flags
8432 for (uint32_t subpass = 0; subpass < rpci->subpassCount; subpass++) {
8433 // Verify input attachments:
8435 MatchUsage(dev_data, rpci->pSubpasses[subpass].inputAttachmentCount, rpci->pSubpasses[subpass].pInputAttachments,
8436 pCreateInfo, VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT, VALIDATION_ERROR_094006de);
8437 // Verify color attachments:
8439 MatchUsage(dev_data, rpci->pSubpasses[subpass].colorAttachmentCount, rpci->pSubpasses[subpass].pColorAttachments,
8440 pCreateInfo, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VALIDATION_ERROR_094006da);
8441 // Verify depth/stencil attachments:
8442 if (rpci->pSubpasses[subpass].pDepthStencilAttachment != nullptr) {
8443 skip |= MatchUsage(dev_data, 1, rpci->pSubpasses[subpass].pDepthStencilAttachment, pCreateInfo,
8444 VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT, VALIDATION_ERROR_094006dc);
8448 // Verify FB dimensions are within physical device limits
8449 if (pCreateInfo->width > dev_data->phys_dev_properties.properties.limits.maxFramebufferWidth) {
8450 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
8451 VALIDATION_ERROR_094006ec,
8452 "vkCreateFramebuffer(): Requested VkFramebufferCreateInfo width exceeds physical device limits. Requested "
8453 "width: %u, device max: %u\n",
8454 pCreateInfo->width, dev_data->phys_dev_properties.properties.limits.maxFramebufferWidth);
8456 if (pCreateInfo->height > dev_data->phys_dev_properties.properties.limits.maxFramebufferHeight) {
8457 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
8458 VALIDATION_ERROR_094006f0,
8459 "vkCreateFramebuffer(): Requested VkFramebufferCreateInfo height exceeds physical device limits. Requested "
8460 "height: %u, device max: %u\n",
8461 pCreateInfo->height, dev_data->phys_dev_properties.properties.limits.maxFramebufferHeight);
8463 if (pCreateInfo->layers > dev_data->phys_dev_properties.properties.limits.maxFramebufferLayers) {
8464 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
8465 VALIDATION_ERROR_094006f4,
8466 "vkCreateFramebuffer(): Requested VkFramebufferCreateInfo layers exceeds physical device limits. Requested "
8467 "layers: %u, device max: %u\n",
8468 pCreateInfo->layers, dev_data->phys_dev_properties.properties.limits.maxFramebufferLayers);
8470 // Verify FB dimensions are greater than zero
8471 if (pCreateInfo->width <= 0) {
8472 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
8473 VALIDATION_ERROR_094006ea,
8474 "vkCreateFramebuffer(): Requested VkFramebufferCreateInfo width must be greater than zero.");
8476 if (pCreateInfo->height <= 0) {
8477 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
8478 VALIDATION_ERROR_094006ee,
8479 "vkCreateFramebuffer(): Requested VkFramebufferCreateInfo height must be greater than zero.");
8481 if (pCreateInfo->layers <= 0) {
8482 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
8483 VALIDATION_ERROR_094006f2,
8484 "vkCreateFramebuffer(): Requested VkFramebufferCreateInfo layers must be greater than zero.");
8489 // Validate VkFramebufferCreateInfo state prior to calling down chain to create Framebuffer object
8490 // Return true if an error is encountered and callback returns true to skip call down chain
8491 // false indicates that call down chain should proceed
8492 static bool PreCallValidateCreateFramebuffer(layer_data *dev_data, const VkFramebufferCreateInfo *pCreateInfo) {
8493 // TODO : Verify that renderPass FB is created with is compatible with FB
8495 skip |= ValidateFramebufferCreateInfo(dev_data, pCreateInfo);
8499 // CreateFramebuffer state has been validated and call down chain completed so record new framebuffer object
8500 static void PostCallRecordCreateFramebuffer(layer_data *dev_data, const VkFramebufferCreateInfo *pCreateInfo, VkFramebuffer fb) {
8501 // Shadow create info and store in map
8502 std::unique_ptr<FRAMEBUFFER_STATE> fb_state(
8503 new FRAMEBUFFER_STATE(fb, pCreateInfo, GetRenderPassStateSharedPtr(dev_data, pCreateInfo->renderPass)));
8505 for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
8506 VkImageView view = pCreateInfo->pAttachments[i];
8507 auto view_state = GetImageViewState(dev_data, view);
8511 MT_FB_ATTACHMENT_INFO fb_info;
8512 fb_info.view_state = view_state;
8513 fb_info.image = view_state->create_info.image;
8514 fb_state->attachments.push_back(fb_info);
8516 dev_data->frameBufferMap[fb] = std::move(fb_state);
8519 VKAPI_ATTR VkResult VKAPI_CALL CreateFramebuffer(VkDevice device, const VkFramebufferCreateInfo *pCreateInfo,
8520 const VkAllocationCallbacks *pAllocator, VkFramebuffer *pFramebuffer) {
8521 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
8522 unique_lock_t lock(global_lock);
8523 bool skip = PreCallValidateCreateFramebuffer(dev_data, pCreateInfo);
8526 if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
8528 VkResult result = dev_data->dispatch_table.CreateFramebuffer(device, pCreateInfo, pAllocator, pFramebuffer);
8530 if (VK_SUCCESS == result) {
8532 PostCallRecordCreateFramebuffer(dev_data, pCreateInfo, *pFramebuffer);
8538 static bool FindDependency(const uint32_t index, const uint32_t dependent, const std::vector<DAGNode> &subpass_to_node,
8539 std::unordered_set<uint32_t> &processed_nodes) {
8540 // If we have already checked this node we have not found a dependency path so return false.
8541 if (processed_nodes.count(index)) return false;
8542 processed_nodes.insert(index);
8543 const DAGNode &node = subpass_to_node[index];
8544 // Look for a dependency path. If one exists return true else recurse on the previous nodes.
8545 if (std::find(node.prev.begin(), node.prev.end(), dependent) == node.prev.end()) {
8546 for (auto elem : node.prev) {
8547 if (FindDependency(elem, dependent, subpass_to_node, processed_nodes)) return true;
8555 static bool CheckDependencyExists(const layer_data *dev_data, const uint32_t subpass,
8556 const std::vector<uint32_t> &dependent_subpasses, const std::vector<DAGNode> &subpass_to_node,
8559 // Loop through all subpasses that share the same attachment and make sure a dependency exists
8560 for (uint32_t k = 0; k < dependent_subpasses.size(); ++k) {
8561 if (static_cast<uint32_t>(subpass) == dependent_subpasses[k]) continue;
8562 const DAGNode &node = subpass_to_node[subpass];
8563 // Check for a specified dependency between the two nodes. If one exists we are done.
8564 auto prev_elem = std::find(node.prev.begin(), node.prev.end(), dependent_subpasses[k]);
8565 auto next_elem = std::find(node.next.begin(), node.next.end(), dependent_subpasses[k]);
8566 if (prev_elem == node.prev.end() && next_elem == node.next.end()) {
8567 // If no dependency exits an implicit dependency still might. If not, throw an error.
8568 std::unordered_set<uint32_t> processed_nodes;
8569 if (!(FindDependency(subpass, dependent_subpasses[k], subpass_to_node, processed_nodes) ||
8570 FindDependency(dependent_subpasses[k], subpass, subpass_to_node, processed_nodes))) {
8571 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
8572 DRAWSTATE_INVALID_RENDERPASS,
8573 "A dependency between subpasses %d and %d must exist but one is not specified.", subpass,
8574 dependent_subpasses[k]);
8582 static bool CheckPreserved(const layer_data *dev_data, const VkRenderPassCreateInfo *pCreateInfo, const int index,
8583 const uint32_t attachment, const std::vector<DAGNode> &subpass_to_node, int depth, bool &skip) {
8584 const DAGNode &node = subpass_to_node[index];
8585 // If this node writes to the attachment return true as next nodes need to preserve the attachment.
8586 const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[index];
8587 for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
8588 if (attachment == subpass.pColorAttachments[j].attachment) return true;
8590 for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
8591 if (attachment == subpass.pInputAttachments[j].attachment) return true;
8593 if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
8594 if (attachment == subpass.pDepthStencilAttachment->attachment) return true;
8596 bool result = false;
8597 // Loop through previous nodes and see if any of them write to the attachment.
8598 for (auto elem : node.prev) {
8599 result |= CheckPreserved(dev_data, pCreateInfo, elem, attachment, subpass_to_node, depth + 1, skip);
8601 // If the attachment was written to by a previous node than this node needs to preserve it.
8602 if (result && depth > 0) {
8603 bool has_preserved = false;
8604 for (uint32_t j = 0; j < subpass.preserveAttachmentCount; ++j) {
8605 if (subpass.pPreserveAttachments[j] == attachment) {
8606 has_preserved = true;
8610 if (!has_preserved) {
8611 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
8612 DRAWSTATE_INVALID_RENDERPASS,
8613 "Attachment %d is used by a later subpass and must be preserved in subpass %d.", attachment, index);
8620 bool isRangeOverlapping(T offset1, T size1, T offset2, T size2) {
8621 return (((offset1 + size1) > offset2) && ((offset1 + size1) < (offset2 + size2))) ||
8622 ((offset1 > offset2) && (offset1 < (offset2 + size2)));
8625 bool isRegionOverlapping(VkImageSubresourceRange range1, VkImageSubresourceRange range2) {
8626 return (isRangeOverlapping(range1.baseMipLevel, range1.levelCount, range2.baseMipLevel, range2.levelCount) &&
8627 isRangeOverlapping(range1.baseArrayLayer, range1.layerCount, range2.baseArrayLayer, range2.layerCount));
8630 static bool ValidateDependencies(const layer_data *dev_data, FRAMEBUFFER_STATE const *framebuffer,
8631 RENDER_PASS_STATE const *renderPass) {
8633 auto const pFramebufferInfo = framebuffer->createInfo.ptr();
8634 auto const pCreateInfo = renderPass->createInfo.ptr();
8635 auto const &subpass_to_node = renderPass->subpassToNode;
8636 std::vector<std::vector<uint32_t>> output_attachment_to_subpass(pCreateInfo->attachmentCount);
8637 std::vector<std::vector<uint32_t>> input_attachment_to_subpass(pCreateInfo->attachmentCount);
8638 std::vector<std::vector<uint32_t>> overlapping_attachments(pCreateInfo->attachmentCount);
8639 // Find overlapping attachments
8640 for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
8641 for (uint32_t j = i + 1; j < pCreateInfo->attachmentCount; ++j) {
8642 VkImageView viewi = pFramebufferInfo->pAttachments[i];
8643 VkImageView viewj = pFramebufferInfo->pAttachments[j];
8644 if (viewi == viewj) {
8645 overlapping_attachments[i].push_back(j);
8646 overlapping_attachments[j].push_back(i);
8649 auto view_state_i = GetImageViewState(dev_data, viewi);
8650 auto view_state_j = GetImageViewState(dev_data, viewj);
8651 if (!view_state_i || !view_state_j) {
8654 auto view_ci_i = view_state_i->create_info;
8655 auto view_ci_j = view_state_j->create_info;
8656 if (view_ci_i.image == view_ci_j.image && isRegionOverlapping(view_ci_i.subresourceRange, view_ci_j.subresourceRange)) {
8657 overlapping_attachments[i].push_back(j);
8658 overlapping_attachments[j].push_back(i);
8661 auto image_data_i = GetImageState(dev_data, view_ci_i.image);
8662 auto image_data_j = GetImageState(dev_data, view_ci_j.image);
8663 if (!image_data_i || !image_data_j) {
8666 if (image_data_i->binding.mem == image_data_j->binding.mem &&
8667 isRangeOverlapping(image_data_i->binding.offset, image_data_i->binding.size, image_data_j->binding.offset,
8668 image_data_j->binding.size)) {
8669 overlapping_attachments[i].push_back(j);
8670 overlapping_attachments[j].push_back(i);
8674 for (uint32_t i = 0; i < overlapping_attachments.size(); ++i) {
8675 uint32_t attachment = i;
8676 for (auto other_attachment : overlapping_attachments[i]) {
8677 if (!(pCreateInfo->pAttachments[attachment].flags & VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT)) {
8678 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT,
8679 HandleToUint64(framebuffer->framebuffer), VALIDATION_ERROR_12200682,
8680 "Attachment %d aliases attachment %d but doesn't set VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT.",
8681 attachment, other_attachment);
8683 if (!(pCreateInfo->pAttachments[other_attachment].flags & VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT)) {
8684 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT,
8685 HandleToUint64(framebuffer->framebuffer), VALIDATION_ERROR_12200682,
8686 "Attachment %d aliases attachment %d but doesn't set VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT.",
8687 other_attachment, attachment);
8691 // Find for each attachment the subpasses that use them.
8692 unordered_set<uint32_t> attachmentIndices;
8693 for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
8694 const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
8695 attachmentIndices.clear();
8696 for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
8697 uint32_t attachment = subpass.pInputAttachments[j].attachment;
8698 if (attachment == VK_ATTACHMENT_UNUSED) continue;
8699 input_attachment_to_subpass[attachment].push_back(i);
8700 for (auto overlapping_attachment : overlapping_attachments[attachment]) {
8701 input_attachment_to_subpass[overlapping_attachment].push_back(i);
8704 for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
8705 uint32_t attachment = subpass.pColorAttachments[j].attachment;
8706 if (attachment == VK_ATTACHMENT_UNUSED) continue;
8707 output_attachment_to_subpass[attachment].push_back(i);
8708 for (auto overlapping_attachment : overlapping_attachments[attachment]) {
8709 output_attachment_to_subpass[overlapping_attachment].push_back(i);
8711 attachmentIndices.insert(attachment);
8713 if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
8714 uint32_t attachment = subpass.pDepthStencilAttachment->attachment;
8715 output_attachment_to_subpass[attachment].push_back(i);
8716 for (auto overlapping_attachment : overlapping_attachments[attachment]) {
8717 output_attachment_to_subpass[overlapping_attachment].push_back(i);
8720 if (attachmentIndices.count(attachment)) {
8722 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
8723 DRAWSTATE_INVALID_RENDERPASS,
8724 "Cannot use same attachment (%u) as both color and depth output in same subpass (%u).", attachment, i);
8728 // If there is a dependency needed make sure one exists
8729 for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
8730 const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
8731 // If the attachment is an input then all subpasses that output must have a dependency relationship
8732 for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
8733 uint32_t attachment = subpass.pInputAttachments[j].attachment;
8734 if (attachment == VK_ATTACHMENT_UNUSED) continue;
8735 CheckDependencyExists(dev_data, i, output_attachment_to_subpass[attachment], subpass_to_node, skip);
8737 // If the attachment is an output then all subpasses that use the attachment must have a dependency relationship
8738 for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
8739 uint32_t attachment = subpass.pColorAttachments[j].attachment;
8740 if (attachment == VK_ATTACHMENT_UNUSED) continue;
8741 CheckDependencyExists(dev_data, i, output_attachment_to_subpass[attachment], subpass_to_node, skip);
8742 CheckDependencyExists(dev_data, i, input_attachment_to_subpass[attachment], subpass_to_node, skip);
8744 if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
8745 const uint32_t &attachment = subpass.pDepthStencilAttachment->attachment;
8746 CheckDependencyExists(dev_data, i, output_attachment_to_subpass[attachment], subpass_to_node, skip);
8747 CheckDependencyExists(dev_data, i, input_attachment_to_subpass[attachment], subpass_to_node, skip);
8750 // Loop through implicit dependencies, if this pass reads make sure the attachment is preserved for all passes after it was
8752 for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
8753 const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
8754 for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
8755 CheckPreserved(dev_data, pCreateInfo, i, subpass.pInputAttachments[j].attachment, subpass_to_node, 0, skip);
8761 static bool CreatePassDAG(const layer_data *dev_data, const VkRenderPassCreateInfo *pCreateInfo,
8762 std::vector<DAGNode> &subpass_to_node, std::vector<bool> &has_self_dependency,
8763 std::vector<int32_t> &subpass_to_dep_index) {
8765 for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
8766 DAGNode &subpass_node = subpass_to_node[i];
8767 subpass_node.pass = i;
8768 subpass_to_dep_index[i] = -1; // Default to no dependency and overwrite below as needed
8770 for (uint32_t i = 0; i < pCreateInfo->dependencyCount; ++i) {
8771 const VkSubpassDependency &dependency = pCreateInfo->pDependencies[i];
8772 if (dependency.srcSubpass == VK_SUBPASS_EXTERNAL || dependency.dstSubpass == VK_SUBPASS_EXTERNAL) {
8773 if (dependency.srcSubpass == dependency.dstSubpass) {
8774 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
8775 DRAWSTATE_INVALID_RENDERPASS, "The src and dest subpasses cannot both be external.");
8777 } else if (dependency.srcSubpass > dependency.dstSubpass) {
8778 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
8779 DRAWSTATE_INVALID_RENDERPASS,
8780 "Dependency graph must be specified such that an earlier pass cannot depend on a later pass.");
8781 } else if (dependency.srcSubpass == dependency.dstSubpass) {
8782 has_self_dependency[dependency.srcSubpass] = true;
8783 subpass_to_dep_index[dependency.srcSubpass] = i;
8785 subpass_to_node[dependency.dstSubpass].prev.push_back(dependency.srcSubpass);
8786 subpass_to_node[dependency.srcSubpass].next.push_back(dependency.dstSubpass);
8792 VKAPI_ATTR VkResult VKAPI_CALL CreateShaderModule(VkDevice device, const VkShaderModuleCreateInfo *pCreateInfo,
8793 const VkAllocationCallbacks *pAllocator, VkShaderModule *pShaderModule) {
8794 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
8797 if (PreCallValidateCreateShaderModule(dev_data, pCreateInfo, &spirv_valid)) return VK_ERROR_VALIDATION_FAILED_EXT;
8799 VkResult res = dev_data->dispatch_table.CreateShaderModule(device, pCreateInfo, pAllocator, pShaderModule);
8801 if (res == VK_SUCCESS) {
8802 lock_guard_t lock(global_lock);
8803 unique_ptr<shader_module> new_shader_module(spirv_valid ? new shader_module(pCreateInfo) : new shader_module());
8804 dev_data->shaderModuleMap[*pShaderModule] = std::move(new_shader_module);
8809 static bool ValidateAttachmentIndex(layer_data *dev_data, uint32_t attachment, uint32_t attachment_count, const char *type) {
8811 if (attachment >= attachment_count && attachment != VK_ATTACHMENT_UNUSED) {
8812 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
8813 VALIDATION_ERROR_12200684,
8814 "CreateRenderPass: %s attachment %d must be less than the total number of attachments %d.", type,
8815 attachment, attachment_count);
8820 static bool IsPowerOfTwo(unsigned x) { return x && !(x & (x - 1)); }
8822 static bool ValidateRenderpassAttachmentUsage(layer_data *dev_data, const VkRenderPassCreateInfo *pCreateInfo) {
8824 for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
8825 const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
8826 if (subpass.pipelineBindPoint != VK_PIPELINE_BIND_POINT_GRAPHICS) {
8827 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
8828 VALIDATION_ERROR_14000698,
8829 "CreateRenderPass: Pipeline bind point for subpass %d must be VK_PIPELINE_BIND_POINT_GRAPHICS.", i);
8832 for (uint32_t j = 0; j < subpass.preserveAttachmentCount; ++j) {
8833 uint32_t attachment = subpass.pPreserveAttachments[j];
8834 if (attachment == VK_ATTACHMENT_UNUSED) {
8835 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
8836 VALIDATION_ERROR_140006aa,
8837 "CreateRenderPass: Preserve attachment (%d) must not be VK_ATTACHMENT_UNUSED.", j);
8839 skip |= ValidateAttachmentIndex(dev_data, attachment, pCreateInfo->attachmentCount, "Preserve");
8841 bool found = (subpass.pDepthStencilAttachment != NULL && subpass.pDepthStencilAttachment->attachment == attachment);
8842 for (uint32_t r = 0; !found && r < subpass.inputAttachmentCount; ++r) {
8843 found = (subpass.pInputAttachments[r].attachment == attachment);
8845 for (uint32_t r = 0; !found && r < subpass.colorAttachmentCount; ++r) {
8846 found = (subpass.pColorAttachments[r].attachment == attachment) ||
8847 (subpass.pResolveAttachments != NULL && subpass.pResolveAttachments[r].attachment == attachment);
8851 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
8852 VALIDATION_ERROR_140006ac,
8853 "CreateRenderPass: subpass %u pPreserveAttachments[%u] (%u) must not be used elsewhere in the subpass.", i,
8859 auto subpass_performs_resolve =
8860 subpass.pResolveAttachments &&
8861 std::any_of(subpass.pResolveAttachments, subpass.pResolveAttachments + subpass.colorAttachmentCount,
8862 [](VkAttachmentReference ref) { return ref.attachment != VK_ATTACHMENT_UNUSED; });
8864 unsigned sample_count = 0;
8866 for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
8867 uint32_t attachment;
8868 if (subpass.pResolveAttachments) {
8869 attachment = subpass.pResolveAttachments[j].attachment;
8870 skip |= ValidateAttachmentIndex(dev_data, attachment, pCreateInfo->attachmentCount, "Resolve");
8872 if (!skip && attachment != VK_ATTACHMENT_UNUSED &&
8873 pCreateInfo->pAttachments[attachment].samples != VK_SAMPLE_COUNT_1_BIT) {
8874 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT,
8875 0, VALIDATION_ERROR_140006a2,
8876 "CreateRenderPass: Subpass %u requests multisample resolve into attachment %u, which must "
8877 "have VK_SAMPLE_COUNT_1_BIT but has %s.",
8878 i, attachment, string_VkSampleCountFlagBits(pCreateInfo->pAttachments[attachment].samples));
8881 if (!skip && subpass.pResolveAttachments[j].attachment != VK_ATTACHMENT_UNUSED &&
8882 subpass.pColorAttachments[j].attachment == VK_ATTACHMENT_UNUSED) {
8883 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT,
8884 0, VALIDATION_ERROR_1400069e,
8885 "CreateRenderPass: Subpass %u requests multisample resolve from attachment %u which has "
8886 "attachment=VK_ATTACHMENT_UNUSED.",
8890 attachment = subpass.pColorAttachments[j].attachment;
8891 skip |= ValidateAttachmentIndex(dev_data, attachment, pCreateInfo->attachmentCount, "Color");
8893 if (!skip && attachment != VK_ATTACHMENT_UNUSED) {
8894 sample_count |= (unsigned)pCreateInfo->pAttachments[attachment].samples;
8896 if (subpass_performs_resolve && pCreateInfo->pAttachments[attachment].samples == VK_SAMPLE_COUNT_1_BIT) {
8897 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT,
8898 0, VALIDATION_ERROR_140006a0,
8899 "CreateRenderPass: Subpass %u requests multisample resolve from attachment %u which has "
8900 "VK_SAMPLE_COUNT_1_BIT.",
8904 if (subpass_performs_resolve && subpass.pResolveAttachments[j].attachment != VK_ATTACHMENT_UNUSED) {
8905 const auto &color_desc = pCreateInfo->pAttachments[attachment];
8906 const auto &resolve_desc = pCreateInfo->pAttachments[subpass.pResolveAttachments[j].attachment];
8907 if (color_desc.format != resolve_desc.format) {
8908 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
8909 VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, VALIDATION_ERROR_140006a4,
8910 "CreateRenderPass: Subpass %u pColorAttachments[%u] resolves to an attachment with a "
8911 "different format. color format: %u, resolve format: %u.",
8912 i, j, color_desc.format, resolve_desc.format);
8916 if (dev_data->extensions.vk_amd_mixed_attachment_samples && subpass.pDepthStencilAttachment &&
8917 subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
8918 const auto depth_stencil_sample_count =
8919 pCreateInfo->pAttachments[subpass.pDepthStencilAttachment->attachment].samples;
8920 if (pCreateInfo->pAttachments[attachment].samples > depth_stencil_sample_count) {
8921 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
8922 VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, VALIDATION_ERROR_14000bc4,
8923 "CreateRenderPass: Subpass %u pColorAttachments[%u] has %s which is larger than "
8924 "depth/stencil attachment %s.",
8925 i, j, string_VkSampleCountFlagBits(pCreateInfo->pAttachments[attachment].samples),
8926 string_VkSampleCountFlagBits(depth_stencil_sample_count));
8932 if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
8933 uint32_t attachment = subpass.pDepthStencilAttachment->attachment;
8934 skip |= ValidateAttachmentIndex(dev_data, attachment, pCreateInfo->attachmentCount, "Depth stencil");
8936 if (!skip && attachment != VK_ATTACHMENT_UNUSED) {
8937 sample_count |= (unsigned)pCreateInfo->pAttachments[attachment].samples;
8941 for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
8942 uint32_t attachment = subpass.pInputAttachments[j].attachment;
8943 skip |= ValidateAttachmentIndex(dev_data, attachment, pCreateInfo->attachmentCount, "Input");
8946 if (!dev_data->extensions.vk_amd_mixed_attachment_samples && sample_count && !IsPowerOfTwo(sample_count)) {
8947 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
8948 VALIDATION_ERROR_0082b401,
8949 "CreateRenderPass: Subpass %u attempts to render to attachments with inconsistent sample counts.", i);
8955 static void MarkAttachmentFirstUse(RENDER_PASS_STATE *render_pass, uint32_t index, bool is_read) {
8956 if (index == VK_ATTACHMENT_UNUSED) return;
8958 if (!render_pass->attachment_first_read.count(index)) render_pass->attachment_first_read[index] = is_read;
8961 VKAPI_ATTR VkResult VKAPI_CALL CreateRenderPass(VkDevice device, const VkRenderPassCreateInfo *pCreateInfo,
8962 const VkAllocationCallbacks *pAllocator, VkRenderPass *pRenderPass) {
8964 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
8966 unique_lock_t lock(global_lock);
8967 // TODO: As part of wrapping up the mem_tracker/core_validation merge the following routine should be consolidated with
8969 skip |= ValidateRenderpassAttachmentUsage(dev_data, pCreateInfo);
8970 for (uint32_t i = 0; i < pCreateInfo->dependencyCount; ++i) {
8971 skip |= ValidateStageMaskGsTsEnables(dev_data, pCreateInfo->pDependencies[i].srcStageMask, "vkCreateRenderPass()",
8972 VALIDATION_ERROR_13e006b8, VALIDATION_ERROR_13e006bc);
8973 skip |= ValidateStageMaskGsTsEnables(dev_data, pCreateInfo->pDependencies[i].dstStageMask, "vkCreateRenderPass()",
8974 VALIDATION_ERROR_13e006ba, VALIDATION_ERROR_13e006be);
8977 skip |= ValidateLayouts(dev_data, device, pCreateInfo);
8982 return VK_ERROR_VALIDATION_FAILED_EXT;
8985 VkResult result = dev_data->dispatch_table.CreateRenderPass(device, pCreateInfo, pAllocator, pRenderPass);
8987 if (VK_SUCCESS == result) {
8990 std::vector<bool> has_self_dependency(pCreateInfo->subpassCount);
8991 std::vector<DAGNode> subpass_to_node(pCreateInfo->subpassCount);
8992 std::vector<int32_t> subpass_to_dep_index(pCreateInfo->subpassCount);
8993 skip |= CreatePassDAG(dev_data, pCreateInfo, subpass_to_node, has_self_dependency, subpass_to_dep_index);
8995 auto render_pass = std::make_shared<RENDER_PASS_STATE>(pCreateInfo);
8996 render_pass->renderPass = *pRenderPass;
8997 render_pass->hasSelfDependency = has_self_dependency;
8998 render_pass->subpassToNode = subpass_to_node;
8999 render_pass->subpass_to_dependency_index = subpass_to_dep_index;
9001 for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
9002 const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
9003 for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
9004 MarkAttachmentFirstUse(render_pass.get(), subpass.pColorAttachments[j].attachment, false);
9006 // resolve attachments are considered to be written
9007 if (subpass.pResolveAttachments) {
9008 MarkAttachmentFirstUse(render_pass.get(), subpass.pResolveAttachments[j].attachment, false);
9011 if (subpass.pDepthStencilAttachment) {
9012 MarkAttachmentFirstUse(render_pass.get(), subpass.pDepthStencilAttachment->attachment, false);
9014 for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
9015 MarkAttachmentFirstUse(render_pass.get(), subpass.pInputAttachments[j].attachment, true);
9019 dev_data->renderPassMap[*pRenderPass] = std::move(render_pass);
9024 static bool validatePrimaryCommandBuffer(const layer_data *dev_data, const GLOBAL_CB_NODE *pCB, char const *cmd_name,
9025 UNIQUE_VALIDATION_ERROR_CODE error_code) {
9027 if (pCB->createInfo.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
9028 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
9029 HandleToUint64(pCB->commandBuffer), error_code, "Cannot execute command %s on a secondary command buffer.",
9035 static bool VerifyRenderAreaBounds(const layer_data *dev_data, const VkRenderPassBeginInfo *pRenderPassBegin) {
9037 const safe_VkFramebufferCreateInfo *pFramebufferInfo =
9038 &GetFramebufferState(dev_data, pRenderPassBegin->framebuffer)->createInfo;
9039 if (pRenderPassBegin->renderArea.offset.x < 0 ||
9040 (pRenderPassBegin->renderArea.offset.x + pRenderPassBegin->renderArea.extent.width) > pFramebufferInfo->width ||
9041 pRenderPassBegin->renderArea.offset.y < 0 ||
9042 (pRenderPassBegin->renderArea.offset.y + pRenderPassBegin->renderArea.extent.height) > pFramebufferInfo->height) {
9043 skip |= static_cast<bool>(log_msg(
9044 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
9045 DRAWSTATE_INVALID_RENDER_AREA,
9046 "Cannot execute a render pass with renderArea not within the bound of the framebuffer. RenderArea: x %d, y %d, width "
9047 "%d, height %d. Framebuffer: width %d, height %d.",
9048 pRenderPassBegin->renderArea.offset.x, pRenderPassBegin->renderArea.offset.y, pRenderPassBegin->renderArea.extent.width,
9049 pRenderPassBegin->renderArea.extent.height, pFramebufferInfo->width, pFramebufferInfo->height));
9054 // If this is a stencil format, make sure the stencil[Load|Store]Op flag is checked, while if it is a depth/color attachment the
9055 // [load|store]Op flag must be checked
9056 // TODO: The memory valid flag in DEVICE_MEM_INFO should probably be split to track the validity of stencil memory separately.
9057 template <typename T>
9058 static bool FormatSpecificLoadAndStoreOpSettings(VkFormat format, T color_depth_op, T stencil_op, T op) {
9059 if (color_depth_op != op && stencil_op != op) {
9062 bool check_color_depth_load_op = !FormatIsStencilOnly(format);
9063 bool check_stencil_load_op = FormatIsDepthAndStencil(format) || !check_color_depth_load_op;
9065 return ((check_color_depth_load_op && (color_depth_op == op)) || (check_stencil_load_op && (stencil_op == op)));
9068 VKAPI_ATTR void VKAPI_CALL CmdBeginRenderPass(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin,
9069 VkSubpassContents contents) {
9071 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
9072 unique_lock_t lock(global_lock);
9073 GLOBAL_CB_NODE *cb_node = GetCBNode(dev_data, commandBuffer);
9074 auto render_pass_state = pRenderPassBegin ? GetRenderPassState(dev_data, pRenderPassBegin->renderPass) : nullptr;
9075 auto framebuffer = pRenderPassBegin ? GetFramebufferState(dev_data, pRenderPassBegin->framebuffer) : nullptr;
9077 if (render_pass_state) {
9078 uint32_t clear_op_size = 0; // Make sure pClearValues is at least as large as last LOAD_OP_CLEAR
9079 cb_node->activeFramebuffer = pRenderPassBegin->framebuffer;
9080 for (uint32_t i = 0; i < render_pass_state->createInfo.attachmentCount; ++i) {
9081 MT_FB_ATTACHMENT_INFO &fb_info = framebuffer->attachments[i];
9082 auto pAttachment = &render_pass_state->createInfo.pAttachments[i];
9083 if (FormatSpecificLoadAndStoreOpSettings(pAttachment->format, pAttachment->loadOp, pAttachment->stencilLoadOp,
9084 VK_ATTACHMENT_LOAD_OP_CLEAR)) {
9085 clear_op_size = static_cast<uint32_t>(i) + 1;
9086 std::function<bool()> function = [=]() {
9087 SetImageMemoryValid(dev_data, GetImageState(dev_data, fb_info.image), true);
9090 cb_node->queue_submit_functions.push_back(function);
9091 } else if (FormatSpecificLoadAndStoreOpSettings(pAttachment->format, pAttachment->loadOp,
9092 pAttachment->stencilLoadOp, VK_ATTACHMENT_LOAD_OP_DONT_CARE)) {
9093 std::function<bool()> function = [=]() {
9094 SetImageMemoryValid(dev_data, GetImageState(dev_data, fb_info.image), false);
9097 cb_node->queue_submit_functions.push_back(function);
9098 } else if (FormatSpecificLoadAndStoreOpSettings(pAttachment->format, pAttachment->loadOp,
9099 pAttachment->stencilLoadOp, VK_ATTACHMENT_LOAD_OP_LOAD)) {
9100 std::function<bool()> function = [=]() {
9101 return ValidateImageMemoryIsValid(dev_data, GetImageState(dev_data, fb_info.image),
9102 "vkCmdBeginRenderPass()");
9104 cb_node->queue_submit_functions.push_back(function);
9106 if (render_pass_state->attachment_first_read[i]) {
9107 std::function<bool()> function = [=]() {
9108 return ValidateImageMemoryIsValid(dev_data, GetImageState(dev_data, fb_info.image),
9109 "vkCmdBeginRenderPass()");
9111 cb_node->queue_submit_functions.push_back(function);
9114 if (clear_op_size > pRenderPassBegin->clearValueCount) {
9115 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
9116 HandleToUint64(render_pass_state->renderPass), VALIDATION_ERROR_1200070c,
9117 "In vkCmdBeginRenderPass() the VkRenderPassBeginInfo struct has a clearValueCount of %u but there "
9118 "must be at least %u entries in pClearValues array to account for the highest index attachment in "
9119 "renderPass 0x%" PRIx64
9120 " that uses VK_ATTACHMENT_LOAD_OP_CLEAR is %u. Note that the pClearValues array is indexed by "
9121 "attachment number so even if some pClearValues entries between 0 and %u correspond to attachments "
9122 "that aren't cleared they will be ignored.",
9123 pRenderPassBegin->clearValueCount, clear_op_size, HandleToUint64(render_pass_state->renderPass),
9124 clear_op_size, clear_op_size - 1);
9126 skip |= VerifyRenderAreaBounds(dev_data, pRenderPassBegin);
9127 skip |= VerifyFramebufferAndRenderPassLayouts(dev_data, cb_node, pRenderPassBegin,
9128 GetFramebufferState(dev_data, pRenderPassBegin->framebuffer));
9129 if (framebuffer->rp_state->renderPass != render_pass_state->renderPass) {
9130 skip |= validateRenderPassCompatibility(dev_data, "render pass", render_pass_state, "framebuffer",
9131 framebuffer->rp_state.get(), "vkCmdBeginRenderPass()",
9132 VALIDATION_ERROR_12000710);
9134 skip |= insideRenderPass(dev_data, cb_node, "vkCmdBeginRenderPass()", VALIDATION_ERROR_17a00017);
9135 skip |= ValidateDependencies(dev_data, framebuffer, render_pass_state);
9136 skip |= validatePrimaryCommandBuffer(dev_data, cb_node, "vkCmdBeginRenderPass()", VALIDATION_ERROR_17a00019);
9137 skip |= ValidateCmdQueueFlags(dev_data, cb_node, "vkCmdBeginRenderPass()", VK_QUEUE_GRAPHICS_BIT,
9138 VALIDATION_ERROR_17a02415);
9139 skip |= ValidateCmd(dev_data, cb_node, CMD_BEGINRENDERPASS, "vkCmdBeginRenderPass()");
9140 cb_node->activeRenderPass = render_pass_state;
9141 // This is a shallow copy as that is all that is needed for now
9142 cb_node->activeRenderPassBeginInfo = *pRenderPassBegin;
9143 cb_node->activeSubpass = 0;
9144 cb_node->activeSubpassContents = contents;
9145 cb_node->framebuffers.insert(pRenderPassBegin->framebuffer);
9146 // Connect this framebuffer and its children to this cmdBuffer
9147 AddFramebufferBinding(dev_data, cb_node, framebuffer);
9148 // Connect this RP to cmdBuffer
9149 addCommandBufferBinding(&render_pass_state->cb_bindings,
9150 {HandleToUint64(render_pass_state->renderPass), kVulkanObjectTypeRenderPass}, cb_node);
9151 // transition attachments to the correct layouts for beginning of renderPass and first subpass
9152 TransitionBeginRenderPassLayouts(dev_data, cb_node, render_pass_state, framebuffer);
9157 dev_data->dispatch_table.CmdBeginRenderPass(commandBuffer, pRenderPassBegin, contents);
9161 VKAPI_ATTR void VKAPI_CALL CmdNextSubpass(VkCommandBuffer commandBuffer, VkSubpassContents contents) {
9163 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
9164 unique_lock_t lock(global_lock);
9165 GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
9167 skip |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdNextSubpass()", VALIDATION_ERROR_1b600019);
9168 skip |= ValidateCmdQueueFlags(dev_data, pCB, "vkCmdNextSubpass()", VK_QUEUE_GRAPHICS_BIT, VALIDATION_ERROR_1b602415);
9169 skip |= ValidateCmd(dev_data, pCB, CMD_NEXTSUBPASS, "vkCmdNextSubpass()");
9170 skip |= outsideRenderPass(dev_data, pCB, "vkCmdNextSubpass()", VALIDATION_ERROR_1b600017);
9172 auto subpassCount = pCB->activeRenderPass->createInfo.subpassCount;
9173 if (pCB->activeSubpass == subpassCount - 1) {
9174 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
9175 HandleToUint64(commandBuffer), VALIDATION_ERROR_1b60071a,
9176 "vkCmdNextSubpass(): Attempted to advance beyond final subpass.");
9183 dev_data->dispatch_table.CmdNextSubpass(commandBuffer, contents);
9187 pCB->activeSubpass++;
9188 pCB->activeSubpassContents = contents;
9189 TransitionSubpassLayouts(dev_data, pCB, pCB->activeRenderPass, pCB->activeSubpass,
9190 GetFramebufferState(dev_data, pCB->activeRenderPassBeginInfo.framebuffer));
9194 VKAPI_ATTR void VKAPI_CALL CmdEndRenderPass(VkCommandBuffer commandBuffer) {
9196 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
9197 unique_lock_t lock(global_lock);
9198 auto pCB = GetCBNode(dev_data, commandBuffer);
9199 FRAMEBUFFER_STATE *framebuffer = NULL;
9201 RENDER_PASS_STATE *rp_state = pCB->activeRenderPass;
9202 framebuffer = GetFramebufferState(dev_data, pCB->activeFramebuffer);
9204 if (pCB->activeSubpass != rp_state->createInfo.subpassCount - 1) {
9205 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
9206 VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(commandBuffer),
9207 VALIDATION_ERROR_1b00071c, "vkCmdEndRenderPass(): Called before reaching final subpass.");
9210 for (size_t i = 0; i < rp_state->createInfo.attachmentCount; ++i) {
9211 MT_FB_ATTACHMENT_INFO &fb_info = framebuffer->attachments[i];
9212 auto pAttachment = &rp_state->createInfo.pAttachments[i];
9213 if (FormatSpecificLoadAndStoreOpSettings(pAttachment->format, pAttachment->storeOp, pAttachment->stencilStoreOp,
9214 VK_ATTACHMENT_STORE_OP_STORE)) {
9215 std::function<bool()> function = [=]() {
9216 SetImageMemoryValid(dev_data, GetImageState(dev_data, fb_info.image), true);
9219 pCB->queue_submit_functions.push_back(function);
9220 } else if (FormatSpecificLoadAndStoreOpSettings(pAttachment->format, pAttachment->storeOp,
9221 pAttachment->stencilStoreOp, VK_ATTACHMENT_STORE_OP_DONT_CARE)) {
9222 std::function<bool()> function = [=]() {
9223 SetImageMemoryValid(dev_data, GetImageState(dev_data, fb_info.image), false);
9226 pCB->queue_submit_functions.push_back(function);
9230 skip |= outsideRenderPass(dev_data, pCB, "vkCmdEndRenderpass()", VALIDATION_ERROR_1b000017);
9231 skip |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdEndRenderPass()", VALIDATION_ERROR_1b000019);
9232 skip |= ValidateCmdQueueFlags(dev_data, pCB, "vkCmdEndRenderPass()", VK_QUEUE_GRAPHICS_BIT, VALIDATION_ERROR_1b002415);
9233 skip |= ValidateCmd(dev_data, pCB, CMD_ENDRENDERPASS, "vkCmdEndRenderPass()");
9239 dev_data->dispatch_table.CmdEndRenderPass(commandBuffer);
9243 TransitionFinalSubpassLayouts(dev_data, pCB, &pCB->activeRenderPassBeginInfo, framebuffer);
9244 pCB->activeRenderPass = nullptr;
9245 pCB->activeSubpass = 0;
9246 pCB->activeFramebuffer = VK_NULL_HANDLE;
9250 static bool validateFramebuffer(layer_data *dev_data, VkCommandBuffer primaryBuffer, const GLOBAL_CB_NODE *pCB,
9251 VkCommandBuffer secondaryBuffer, const GLOBAL_CB_NODE *pSubCB, const char *caller) {
9253 if (!pSubCB->beginInfo.pInheritanceInfo) {
9256 VkFramebuffer primary_fb = pCB->activeFramebuffer;
9257 VkFramebuffer secondary_fb = pSubCB->beginInfo.pInheritanceInfo->framebuffer;
9258 if (secondary_fb != VK_NULL_HANDLE) {
9259 if (primary_fb != secondary_fb) {
9260 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
9261 HandleToUint64(primaryBuffer), VALIDATION_ERROR_1b2000c6,
9262 "vkCmdExecuteCommands() called w/ invalid secondary command buffer 0x%" PRIx64
9263 " which has a framebuffer 0x%" PRIx64
9264 " that is not the same as the primary command buffer's current active framebuffer 0x%" PRIx64 ".",
9265 HandleToUint64(secondaryBuffer), HandleToUint64(secondary_fb), HandleToUint64(primary_fb));
9267 auto fb = GetFramebufferState(dev_data, secondary_fb);
9269 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
9270 HandleToUint64(primaryBuffer), DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER,
9271 "vkCmdExecuteCommands() called w/ invalid Cmd Buffer 0x%" PRIx64
9272 " which has invalid framebuffer 0x%" PRIx64 ".",
9273 HandleToUint64(secondaryBuffer), HandleToUint64(secondary_fb));
9280 static bool validateSecondaryCommandBufferState(layer_data *dev_data, GLOBAL_CB_NODE *pCB, GLOBAL_CB_NODE *pSubCB) {
9282 unordered_set<int> activeTypes;
9283 for (auto queryObject : pCB->activeQueries) {
9284 auto queryPoolData = dev_data->queryPoolMap.find(queryObject.pool);
9285 if (queryPoolData != dev_data->queryPoolMap.end()) {
9286 if (queryPoolData->second.createInfo.queryType == VK_QUERY_TYPE_PIPELINE_STATISTICS &&
9287 pSubCB->beginInfo.pInheritanceInfo) {
9288 VkQueryPipelineStatisticFlags cmdBufStatistics = pSubCB->beginInfo.pInheritanceInfo->pipelineStatistics;
9289 if ((cmdBufStatistics & queryPoolData->second.createInfo.pipelineStatistics) != cmdBufStatistics) {
9291 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
9292 HandleToUint64(pCB->commandBuffer), VALIDATION_ERROR_1b2000d0,
9293 "vkCmdExecuteCommands() called w/ invalid Cmd Buffer 0x%" PRIx64
9294 " which has invalid active query pool 0x%" PRIx64
9295 ". Pipeline statistics is being queried so the command buffer must have all bits set on the queryPool.",
9296 HandleToUint64(pCB->commandBuffer), HandleToUint64(queryPoolData->first));
9299 activeTypes.insert(queryPoolData->second.createInfo.queryType);
9302 for (auto queryObject : pSubCB->startedQueries) {
9303 auto queryPoolData = dev_data->queryPoolMap.find(queryObject.pool);
9304 if (queryPoolData != dev_data->queryPoolMap.end() && activeTypes.count(queryPoolData->second.createInfo.queryType)) {
9305 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
9306 HandleToUint64(pCB->commandBuffer), DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER,
9307 "vkCmdExecuteCommands() called w/ invalid Cmd Buffer 0x%" PRIx64
9308 " which has invalid active query pool 0x%" PRIx64
9309 " of type %d but a query of that type has been started on secondary Cmd Buffer 0x%" PRIx64 ".",
9310 HandleToUint64(pCB->commandBuffer), HandleToUint64(queryPoolData->first),
9311 queryPoolData->second.createInfo.queryType, HandleToUint64(pSubCB->commandBuffer));
9315 auto primary_pool = GetCommandPoolNode(dev_data, pCB->createInfo.commandPool);
9316 auto secondary_pool = GetCommandPoolNode(dev_data, pSubCB->createInfo.commandPool);
9317 if (primary_pool && secondary_pool && (primary_pool->queueFamilyIndex != secondary_pool->queueFamilyIndex)) {
9318 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
9319 HandleToUint64(pSubCB->commandBuffer), DRAWSTATE_INVALID_QUEUE_FAMILY,
9320 "vkCmdExecuteCommands(): Primary command buffer 0x%" PRIx64
9321 " created in queue family %d has secondary command buffer 0x%" PRIx64 " created in queue family %d.",
9322 HandleToUint64(pCB->commandBuffer), primary_pool->queueFamilyIndex, HandleToUint64(pSubCB->commandBuffer),
9323 secondary_pool->queueFamilyIndex);
9329 VKAPI_ATTR void VKAPI_CALL CmdExecuteCommands(VkCommandBuffer commandBuffer, uint32_t commandBuffersCount,
9330 const VkCommandBuffer *pCommandBuffers) {
9332 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
9333 unique_lock_t lock(global_lock);
9334 GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
9336 GLOBAL_CB_NODE *pSubCB = NULL;
9337 for (uint32_t i = 0; i < commandBuffersCount; i++) {
9338 pSubCB = GetCBNode(dev_data, pCommandBuffers[i]);
9340 if (VK_COMMAND_BUFFER_LEVEL_PRIMARY == pSubCB->createInfo.level) {
9342 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
9343 HandleToUint64(pCommandBuffers[i]), VALIDATION_ERROR_1b2000b0,
9344 "vkCmdExecuteCommands() called w/ Primary Cmd Buffer 0x%" PRIx64
9345 " in element %u of pCommandBuffers array. All cmd buffers in pCommandBuffers array must be secondary.",
9346 HandleToUint64(pCommandBuffers[i]), i);
9347 } else if (pCB->activeRenderPass) { // Secondary CB w/i RenderPass must have *CONTINUE_BIT set
9348 if (pSubCB->beginInfo.pInheritanceInfo != nullptr) {
9349 auto secondary_rp_state = GetRenderPassState(dev_data, pSubCB->beginInfo.pInheritanceInfo->renderPass);
9350 if (!(pSubCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) {
9351 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
9352 VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(pCommandBuffers[i]),
9353 VALIDATION_ERROR_1b2000c0,
9354 "vkCmdExecuteCommands(): Secondary Command Buffer (0x%" PRIx64
9355 ") executed within render pass (0x%" PRIx64
9356 ") must have had vkBeginCommandBuffer() called w/ "
9357 "VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT set.",
9358 HandleToUint64(pCommandBuffers[i]), HandleToUint64(pCB->activeRenderPass->renderPass));
9360 // Make sure render pass is compatible with parent command buffer pass if has continue
9361 if (pCB->activeRenderPass->renderPass != secondary_rp_state->renderPass) {
9362 skip |= validateRenderPassCompatibility(dev_data, "primary command buffer", pCB->activeRenderPass,
9363 "secondary command buffer", secondary_rp_state,
9364 "vkCmdExecuteCommands()", VALIDATION_ERROR_1b2000c4);
9366 // If framebuffer for secondary CB is not NULL, then it must match active FB from primaryCB
9368 validateFramebuffer(dev_data, commandBuffer, pCB, pCommandBuffers[i], pSubCB, "vkCmdExecuteCommands()");
9369 if (!pSubCB->cmd_execute_commands_functions.empty()) {
9370 // Inherit primary's activeFramebuffer and while running validate functions
9371 for (auto &function : pSubCB->cmd_execute_commands_functions) {
9372 skip |= function(pCB, pCB->activeFramebuffer);
9378 // TODO(mlentine): Move more logic into this method
9379 skip |= validateSecondaryCommandBufferState(dev_data, pCB, pSubCB);
9380 skip |= validateCommandBufferState(dev_data, pSubCB, "vkCmdExecuteCommands()", 0, VALIDATION_ERROR_1b2000b2);
9381 if (!(pSubCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
9382 if (pSubCB->in_use.load() || pCB->linkedCommandBuffers.count(pSubCB)) {
9383 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
9384 VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(pCB->commandBuffer),
9385 VALIDATION_ERROR_1b2000b4,
9386 "Attempt to simultaneously execute command buffer 0x%" PRIx64
9387 " without VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set!",
9388 HandleToUint64(pCB->commandBuffer));
9390 if (pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT) {
9391 // Warn that non-simultaneous secondary cmd buffer renders primary non-simultaneous
9392 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
9393 VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(pCommandBuffers[i]),
9394 DRAWSTATE_INVALID_CB_SIMULTANEOUS_USE,
9395 "vkCmdExecuteCommands(): Secondary Command Buffer (0x%" PRIx64
9396 ") does not have VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set and will cause primary "
9397 "command buffer (0x%" PRIx64
9398 ") to be treated as if it does not have VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set, even "
9400 HandleToUint64(pCommandBuffers[i]), HandleToUint64(pCB->commandBuffer));
9401 pCB->beginInfo.flags &= ~VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT;
9404 if (!pCB->activeQueries.empty() && !dev_data->enabled_features.inheritedQueries) {
9406 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
9407 HandleToUint64(pCommandBuffers[i]), VALIDATION_ERROR_1b2000ca,
9408 "vkCmdExecuteCommands(): Secondary Command Buffer (0x%" PRIx64
9409 ") cannot be submitted with a query in flight and inherited queries not supported on this device.",
9410 HandleToUint64(pCommandBuffers[i]));
9412 // TODO: separate validate from update! This is very tangled.
9413 // Propagate layout transitions to the primary cmd buffer
9414 for (auto ilm_entry : pSubCB->imageLayoutMap) {
9415 if (pCB->imageLayoutMap.find(ilm_entry.first) != pCB->imageLayoutMap.end()) {
9416 pCB->imageLayoutMap[ilm_entry.first].layout = ilm_entry.second.layout;
9418 assert(ilm_entry.first.hasSubresource);
9419 IMAGE_CMD_BUF_LAYOUT_NODE node;
9420 if (!FindCmdBufLayout(dev_data, pCB, ilm_entry.first.image, ilm_entry.first.subresource, node)) {
9421 node.initialLayout = ilm_entry.second.initialLayout;
9423 node.layout = ilm_entry.second.layout;
9424 SetLayout(dev_data, pCB, ilm_entry.first, node);
9427 pSubCB->primaryCommandBuffer = pCB->commandBuffer;
9428 pCB->linkedCommandBuffers.insert(pSubCB);
9429 pSubCB->linkedCommandBuffers.insert(pCB);
9430 for (auto &function : pSubCB->queryUpdates) {
9431 pCB->queryUpdates.push_back(function);
9433 for (auto &function : pSubCB->queue_submit_functions) {
9434 pCB->queue_submit_functions.push_back(function);
9437 skip |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdExecuteCommands()", VALIDATION_ERROR_1b200019);
9439 ValidateCmdQueueFlags(dev_data, pCB, "vkCmdExecuteCommands()",
9440 VK_QUEUE_TRANSFER_BIT | VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT, VALIDATION_ERROR_1b202415);
9441 skip |= ValidateCmd(dev_data, pCB, CMD_EXECUTECOMMANDS, "vkCmdExecuteCommands()");
9444 if (!skip) dev_data->dispatch_table.CmdExecuteCommands(commandBuffer, commandBuffersCount, pCommandBuffers);
9447 VKAPI_ATTR VkResult VKAPI_CALL MapMemory(VkDevice device, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size, VkFlags flags,
9449 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
9452 VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
9453 unique_lock_t lock(global_lock);
9454 DEVICE_MEM_INFO *mem_info = GetMemObjInfo(dev_data, mem);
9456 // TODO : This could me more fine-grained to track just region that is valid
9457 mem_info->global_valid = true;
9458 auto end_offset = (VK_WHOLE_SIZE == size) ? mem_info->alloc_info.allocationSize - 1 : offset + size - 1;
9459 skip |= ValidateMapImageLayouts(dev_data, device, mem_info, offset, end_offset);
9460 // TODO : Do we need to create new "bound_range" for the mapped range?
9461 SetMemRangesValid(dev_data, mem_info, offset, end_offset);
9462 if ((dev_data->phys_dev_mem_props.memoryTypes[mem_info->alloc_info.memoryTypeIndex].propertyFlags &
9463 VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0) {
9464 skip = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
9465 HandleToUint64(mem), VALIDATION_ERROR_31200554,
9466 "Mapping Memory without VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT set: mem obj 0x%" PRIx64 ".",
9467 HandleToUint64(mem));
9470 skip |= ValidateMapMemRange(dev_data, mem, offset, size);
9474 result = dev_data->dispatch_table.MapMemory(device, mem, offset, size, flags, ppData);
9475 if (VK_SUCCESS == result) {
9477 // TODO : What's the point of this range? See comment on creating new "bound_range" above, which may replace this
9478 storeMemRanges(dev_data, mem, offset, size);
9479 initializeAndTrackMemory(dev_data, mem, offset, size, ppData);
9486 VKAPI_ATTR void VKAPI_CALL UnmapMemory(VkDevice device, VkDeviceMemory mem) {
9487 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
9490 unique_lock_t lock(global_lock);
9491 skip |= deleteMemRanges(dev_data, mem);
9494 dev_data->dispatch_table.UnmapMemory(device, mem);
9498 static bool validateMemoryIsMapped(layer_data *dev_data, const char *funcName, uint32_t memRangeCount,
9499 const VkMappedMemoryRange *pMemRanges) {
9501 for (uint32_t i = 0; i < memRangeCount; ++i) {
9502 auto mem_info = GetMemObjInfo(dev_data, pMemRanges[i].memory);
9504 if (pMemRanges[i].size == VK_WHOLE_SIZE) {
9505 if (mem_info->mem_range.offset > pMemRanges[i].offset) {
9507 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
9508 HandleToUint64(pMemRanges[i].memory), VALIDATION_ERROR_0c20055c,
9509 "%s: Flush/Invalidate offset (" PRINTF_SIZE_T_SPECIFIER
9510 ") is less than Memory Object's offset (" PRINTF_SIZE_T_SPECIFIER ").",
9511 funcName, static_cast<size_t>(pMemRanges[i].offset), static_cast<size_t>(mem_info->mem_range.offset));
9514 const uint64_t data_end = (mem_info->mem_range.size == VK_WHOLE_SIZE)
9515 ? mem_info->alloc_info.allocationSize
9516 : (mem_info->mem_range.offset + mem_info->mem_range.size);
9517 if ((mem_info->mem_range.offset > pMemRanges[i].offset) ||
9518 (data_end < (pMemRanges[i].offset + pMemRanges[i].size))) {
9520 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
9521 HandleToUint64(pMemRanges[i].memory), VALIDATION_ERROR_0c20055a,
9522 "%s: Flush/Invalidate size or offset (" PRINTF_SIZE_T_SPECIFIER ", " PRINTF_SIZE_T_SPECIFIER
9523 ") exceed the Memory Object's upper-bound (" PRINTF_SIZE_T_SPECIFIER ").",
9524 funcName, static_cast<size_t>(pMemRanges[i].offset + pMemRanges[i].size),
9525 static_cast<size_t>(pMemRanges[i].offset), static_cast<size_t>(data_end));
9533 static bool ValidateAndCopyNoncoherentMemoryToDriver(layer_data *dev_data, uint32_t mem_range_count,
9534 const VkMappedMemoryRange *mem_ranges) {
9536 for (uint32_t i = 0; i < mem_range_count; ++i) {
9537 auto mem_info = GetMemObjInfo(dev_data, mem_ranges[i].memory);
9539 if (mem_info->shadow_copy) {
9540 VkDeviceSize size = (mem_info->mem_range.size != VK_WHOLE_SIZE)
9541 ? mem_info->mem_range.size
9542 : (mem_info->alloc_info.allocationSize - mem_info->mem_range.offset);
9543 char *data = static_cast<char *>(mem_info->shadow_copy);
9544 for (uint64_t j = 0; j < mem_info->shadow_pad_size; ++j) {
9545 if (data[j] != NoncoherentMemoryFillValue) {
9546 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
9547 VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, HandleToUint64(mem_ranges[i].memory),
9548 MEMTRACK_INVALID_MAP, "Memory underflow was detected on mem obj 0x%" PRIx64,
9549 HandleToUint64(mem_ranges[i].memory));
9552 for (uint64_t j = (size + mem_info->shadow_pad_size); j < (2 * mem_info->shadow_pad_size + size); ++j) {
9553 if (data[j] != NoncoherentMemoryFillValue) {
9554 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
9555 VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, HandleToUint64(mem_ranges[i].memory),
9556 MEMTRACK_INVALID_MAP, "Memory overflow was detected on mem obj 0x%" PRIx64,
9557 HandleToUint64(mem_ranges[i].memory));
9560 memcpy(mem_info->p_driver_data, static_cast<void *>(data + mem_info->shadow_pad_size), (size_t)(size));
9567 static void CopyNoncoherentMemoryFromDriver(layer_data *dev_data, uint32_t mem_range_count, const VkMappedMemoryRange *mem_ranges) {
9568 for (uint32_t i = 0; i < mem_range_count; ++i) {
9569 auto mem_info = GetMemObjInfo(dev_data, mem_ranges[i].memory);
9570 if (mem_info && mem_info->shadow_copy) {
9571 VkDeviceSize size = (mem_info->mem_range.size != VK_WHOLE_SIZE)
9572 ? mem_info->mem_range.size
9573 : (mem_info->alloc_info.allocationSize - mem_ranges[i].offset);
9574 char *data = static_cast<char *>(mem_info->shadow_copy);
9575 memcpy(data + mem_info->shadow_pad_size, mem_info->p_driver_data, (size_t)(size));
9580 static bool ValidateMappedMemoryRangeDeviceLimits(layer_data *dev_data, const char *func_name, uint32_t mem_range_count,
9581 const VkMappedMemoryRange *mem_ranges) {
9583 for (uint32_t i = 0; i < mem_range_count; ++i) {
9584 uint64_t atom_size = dev_data->phys_dev_properties.properties.limits.nonCoherentAtomSize;
9585 if (SafeModulo(mem_ranges[i].offset, atom_size) != 0) {
9586 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
9587 HandleToUint64(mem_ranges->memory), VALIDATION_ERROR_0c20055e,
9588 "%s: Offset in pMemRanges[%d] is 0x%" PRIxLEAST64
9589 ", which is not a multiple of VkPhysicalDeviceLimits::nonCoherentAtomSize (0x%" PRIxLEAST64 ").",
9590 func_name, i, mem_ranges[i].offset, atom_size);
9592 if ((mem_ranges[i].size != VK_WHOLE_SIZE) && (SafeModulo(mem_ranges[i].size, atom_size) != 0)) {
9593 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
9594 HandleToUint64(mem_ranges->memory), VALIDATION_ERROR_0c200adc,
9595 "%s: Size in pMemRanges[%d] is 0x%" PRIxLEAST64
9596 ", which is not a multiple of VkPhysicalDeviceLimits::nonCoherentAtomSize (0x%" PRIxLEAST64 ").",
9597 func_name, i, mem_ranges[i].size, atom_size);
9603 static bool PreCallValidateFlushMappedMemoryRanges(layer_data *dev_data, uint32_t mem_range_count,
9604 const VkMappedMemoryRange *mem_ranges) {
9606 lock_guard_t lock(global_lock);
9607 skip |= ValidateAndCopyNoncoherentMemoryToDriver(dev_data, mem_range_count, mem_ranges);
9608 skip |= validateMemoryIsMapped(dev_data, "vkFlushMappedMemoryRanges", mem_range_count, mem_ranges);
9612 VKAPI_ATTR VkResult VKAPI_CALL FlushMappedMemoryRanges(VkDevice device, uint32_t memRangeCount,
9613 const VkMappedMemoryRange *pMemRanges) {
9614 VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
9615 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
9617 if (!PreCallValidateFlushMappedMemoryRanges(dev_data, memRangeCount, pMemRanges)) {
9618 result = dev_data->dispatch_table.FlushMappedMemoryRanges(device, memRangeCount, pMemRanges);
9623 static bool PreCallValidateInvalidateMappedMemoryRanges(layer_data *dev_data, uint32_t mem_range_count,
9624 const VkMappedMemoryRange *mem_ranges) {
9626 lock_guard_t lock(global_lock);
9627 skip |= validateMemoryIsMapped(dev_data, "vkInvalidateMappedMemoryRanges", mem_range_count, mem_ranges);
9631 static void PostCallRecordInvalidateMappedMemoryRanges(layer_data *dev_data, uint32_t mem_range_count,
9632 const VkMappedMemoryRange *mem_ranges) {
9633 lock_guard_t lock(global_lock);
9634 // Update our shadow copy with modified driver data
9635 CopyNoncoherentMemoryFromDriver(dev_data, mem_range_count, mem_ranges);
9638 VKAPI_ATTR VkResult VKAPI_CALL InvalidateMappedMemoryRanges(VkDevice device, uint32_t memRangeCount,
9639 const VkMappedMemoryRange *pMemRanges) {
9640 VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
9641 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
9643 if (!PreCallValidateInvalidateMappedMemoryRanges(dev_data, memRangeCount, pMemRanges)) {
9644 result = dev_data->dispatch_table.InvalidateMappedMemoryRanges(device, memRangeCount, pMemRanges);
9645 if (result == VK_SUCCESS) {
9646 PostCallRecordInvalidateMappedMemoryRanges(dev_data, memRangeCount, pMemRanges);
9652 static bool PreCallValidateBindImageMemory(layer_data *dev_data, VkImage image, IMAGE_STATE *image_state, VkDeviceMemory mem,
9653 VkDeviceSize memoryOffset, const char *api_name) {
9656 unique_lock_t lock(global_lock);
9657 // Track objects tied to memory
9658 uint64_t image_handle = HandleToUint64(image);
9659 skip = ValidateSetMemBinding(dev_data, mem, image_handle, kVulkanObjectTypeImage, api_name);
9660 if (!image_state->memory_requirements_checked) {
9661 // There's not an explicit requirement in the spec to call vkGetImageMemoryRequirements() prior to calling
9662 // BindImageMemory but it's implied in that memory being bound must conform with VkMemoryRequirements from
9663 // vkGetImageMemoryRequirements()
9664 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
9665 image_handle, DRAWSTATE_INVALID_IMAGE,
9666 "%s: Binding memory to image 0x%" PRIx64
9667 " but vkGetImageMemoryRequirements() has not been called on that image.",
9668 api_name, HandleToUint64(image_handle));
9669 // Make the call for them so we can verify the state
9671 dev_data->dispatch_table.GetImageMemoryRequirements(dev_data->device, image, &image_state->requirements);
9675 // Validate bound memory range information
9676 auto mem_info = GetMemObjInfo(dev_data, mem);
9678 skip |= ValidateInsertImageMemoryRange(dev_data, image, mem_info, memoryOffset, image_state->requirements,
9679 image_state->createInfo.tiling == VK_IMAGE_TILING_LINEAR, api_name);
9680 skip |= ValidateMemoryTypes(dev_data, mem_info, image_state->requirements.memoryTypeBits, api_name,
9681 VALIDATION_ERROR_1740082e);
9684 // Validate memory requirements alignment
9685 if (SafeModulo(memoryOffset, image_state->requirements.alignment) != 0) {
9686 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
9687 image_handle, VALIDATION_ERROR_17400830,
9688 "%s: memoryOffset is 0x%" PRIxLEAST64
9689 " but must be an integer multiple of the VkMemoryRequirements::alignment value 0x%" PRIxLEAST64
9690 ", returned from a call to vkGetImageMemoryRequirements with image.",
9691 api_name, memoryOffset, image_state->requirements.alignment);
9695 // Validate memory requirements size
9696 if (image_state->requirements.size > mem_info->alloc_info.allocationSize - memoryOffset) {
9697 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
9698 image_handle, VALIDATION_ERROR_17400832,
9699 "%s: memory size minus memoryOffset is 0x%" PRIxLEAST64
9700 " but must be at least as large as VkMemoryRequirements::size value 0x%" PRIxLEAST64
9701 ", returned from a call to vkGetImageMemoryRequirements with image.",
9702 api_name, mem_info->alloc_info.allocationSize - memoryOffset, image_state->requirements.size);
9705 // Validate dedicated allocation
9706 if (mem_info->is_dedicated && ((mem_info->dedicated_image != image) || (memoryOffset != 0))) {
9707 // TODO: Add vkBindImageMemory2KHR error message when added to spec.
9708 auto validation_error = VALIDATION_ERROR_UNDEFINED;
9709 if (strcmp(api_name, "vkBindImageMemory()") == 0) {
9710 validation_error = VALIDATION_ERROR_17400bca;
9713 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
9714 image_handle, validation_error,
9715 "%s: for dedicated memory allocation 0x%" PRIxLEAST64
9716 ", VkMemoryDedicatedAllocateInfoKHR::image 0x%" PRIXLEAST64 " must be equal to image 0x%" PRIxLEAST64
9717 " and memoryOffset 0x%" PRIxLEAST64 " must be zero.",
9718 api_name, HandleToUint64(mem), HandleToUint64(mem_info->dedicated_image), image_handle, memoryOffset);
9725 static void PostCallRecordBindImageMemory(layer_data *dev_data, VkImage image, IMAGE_STATE *image_state, VkDeviceMemory mem,
9726 VkDeviceSize memoryOffset, const char *api_name) {
9728 unique_lock_t lock(global_lock);
9729 // Track bound memory range information
9730 auto mem_info = GetMemObjInfo(dev_data, mem);
9732 InsertImageMemoryRange(dev_data, image, mem_info, memoryOffset, image_state->requirements,
9733 image_state->createInfo.tiling == VK_IMAGE_TILING_LINEAR);
9736 // Track objects tied to memory
9737 uint64_t image_handle = HandleToUint64(image);
9738 SetMemBinding(dev_data, mem, image_state, memoryOffset, image_handle, kVulkanObjectTypeImage, api_name);
9742 VKAPI_ATTR VkResult VKAPI_CALL BindImageMemory(VkDevice device, VkImage image, VkDeviceMemory mem, VkDeviceSize memoryOffset) {
9743 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
9744 VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
9745 IMAGE_STATE *image_state;
9747 unique_lock_t lock(global_lock);
9748 image_state = GetImageState(dev_data, image);
9750 bool skip = PreCallValidateBindImageMemory(dev_data, image, image_state, mem, memoryOffset, "vkBindImageMemory()");
9752 result = dev_data->dispatch_table.BindImageMemory(device, image, mem, memoryOffset);
9753 if (result == VK_SUCCESS) {
9754 PostCallRecordBindImageMemory(dev_data, image, image_state, mem, memoryOffset, "vkBindImageMemory()");
9760 static bool PreCallValidateBindImageMemory2(layer_data *dev_data, std::vector<IMAGE_STATE *> *image_state, uint32_t bindInfoCount,
9761 const VkBindImageMemoryInfoKHR *pBindInfos) {
9763 unique_lock_t lock(global_lock);
9764 for (uint32_t i = 0; i < bindInfoCount; i++) {
9765 (*image_state)[i] = GetImageState(dev_data, pBindInfos[i].image);
9770 for (uint32_t i = 0; i < bindInfoCount; i++) {
9771 sprintf(api_name, "vkBindImageMemory2() pBindInfos[%u]", i);
9772 skip |= PreCallValidateBindImageMemory(dev_data, pBindInfos[i].image, (*image_state)[i], pBindInfos[i].memory,
9773 pBindInfos[i].memoryOffset, api_name);
9778 static void PostCallRecordBindImageMemory2(layer_data *dev_data, const std::vector<IMAGE_STATE *> &image_state,
9779 uint32_t bindInfoCount, const VkBindImageMemoryInfoKHR *pBindInfos) {
9780 for (uint32_t i = 0; i < bindInfoCount; i++) {
9781 PostCallRecordBindImageMemory(dev_data, pBindInfos[i].image, image_state[i], pBindInfos[i].memory,
9782 pBindInfos[i].memoryOffset, "vkBindImageMemory2()");
9786 VKAPI_ATTR VkResult VKAPI_CALL BindImageMemory2(VkDevice device, uint32_t bindInfoCount,
9787 const VkBindImageMemoryInfoKHR *pBindInfos) {
9788 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
9789 VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
9790 std::vector<IMAGE_STATE *> image_state(bindInfoCount);
9791 if (!PreCallValidateBindImageMemory2(dev_data, &image_state, bindInfoCount, pBindInfos)) {
9792 result = dev_data->dispatch_table.BindImageMemory2(device, bindInfoCount, pBindInfos);
9793 if (result == VK_SUCCESS) {
9794 PostCallRecordBindImageMemory2(dev_data, image_state, bindInfoCount, pBindInfos);
9800 VKAPI_ATTR VkResult VKAPI_CALL BindImageMemory2KHR(VkDevice device, uint32_t bindInfoCount,
9801 const VkBindImageMemoryInfoKHR *pBindInfos) {
9802 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
9803 VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
9804 std::vector<IMAGE_STATE *> image_state(bindInfoCount);
9805 if (!PreCallValidateBindImageMemory2(dev_data, &image_state, bindInfoCount, pBindInfos)) {
9806 result = dev_data->dispatch_table.BindImageMemory2KHR(device, bindInfoCount, pBindInfos);
9807 if (result == VK_SUCCESS) {
9808 PostCallRecordBindImageMemory2(dev_data, image_state, bindInfoCount, pBindInfos);
9814 VKAPI_ATTR VkResult VKAPI_CALL SetEvent(VkDevice device, VkEvent event) {
9816 VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
9817 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
9818 unique_lock_t lock(global_lock);
9819 auto event_state = GetEventNode(dev_data, event);
9821 event_state->needsSignaled = false;
9822 event_state->stageMask = VK_PIPELINE_STAGE_HOST_BIT;
9823 if (event_state->write_in_use) {
9824 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT,
9825 HandleToUint64(event), DRAWSTATE_QUEUE_FORWARD_PROGRESS,
9826 "Cannot call vkSetEvent() on event 0x%" PRIx64 " that is already in use by a command buffer.",
9827 HandleToUint64(event));
9831 // Host setting event is visible to all queues immediately so update stageMask for any queue that's seen this event
9832 // TODO : For correctness this needs separate fix to verify that app doesn't make incorrect assumptions about the
9833 // ordering of this command in relation to vkCmd[Set|Reset]Events (see GH297)
9834 for (auto queue_data : dev_data->queueMap) {
9835 auto event_entry = queue_data.second.eventToStageMap.find(event);
9836 if (event_entry != queue_data.second.eventToStageMap.end()) {
9837 event_entry->second |= VK_PIPELINE_STAGE_HOST_BIT;
9840 if (!skip) result = dev_data->dispatch_table.SetEvent(device, event);
9844 static bool PreCallValidateQueueBindSparse(layer_data *dev_data, VkQueue queue, uint32_t bindInfoCount,
9845 const VkBindSparseInfo *pBindInfo, VkFence fence) {
9846 auto pFence = GetFenceNode(dev_data, fence);
9847 bool skip = ValidateFenceForSubmit(dev_data, pFence);
9852 unordered_set<VkSemaphore> signaled_semaphores;
9853 unordered_set<VkSemaphore> unsignaled_semaphores;
9854 unordered_set<VkSemaphore> internal_semaphores;
9855 for (uint32_t bindIdx = 0; bindIdx < bindInfoCount; ++bindIdx) {
9856 const VkBindSparseInfo &bindInfo = pBindInfo[bindIdx];
9858 std::vector<SEMAPHORE_WAIT> semaphore_waits;
9859 std::vector<VkSemaphore> semaphore_signals;
9860 for (uint32_t i = 0; i < bindInfo.waitSemaphoreCount; ++i) {
9861 VkSemaphore semaphore = bindInfo.pWaitSemaphores[i];
9862 auto pSemaphore = GetSemaphoreNode(dev_data, semaphore);
9863 if (pSemaphore && (pSemaphore->scope == kSyncScopeInternal || internal_semaphores.count(semaphore))) {
9864 if (unsignaled_semaphores.count(semaphore) ||
9865 (!(signaled_semaphores.count(semaphore)) && !(pSemaphore->signaled))) {
9866 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
9867 HandleToUint64(semaphore), DRAWSTATE_QUEUE_FORWARD_PROGRESS,
9868 "Queue 0x%" PRIx64 " is waiting on semaphore 0x%" PRIx64 " that has no way to be signaled.",
9869 HandleToUint64(queue), HandleToUint64(semaphore));
9871 signaled_semaphores.erase(semaphore);
9872 unsignaled_semaphores.insert(semaphore);
9875 if (pSemaphore && pSemaphore->scope == kSyncScopeExternalTemporary) {
9876 internal_semaphores.insert(semaphore);
9879 for (uint32_t i = 0; i < bindInfo.signalSemaphoreCount; ++i) {
9880 VkSemaphore semaphore = bindInfo.pSignalSemaphores[i];
9881 auto pSemaphore = GetSemaphoreNode(dev_data, semaphore);
9882 if (pSemaphore && pSemaphore->scope == kSyncScopeInternal) {
9883 if (signaled_semaphores.count(semaphore) || (!(unsignaled_semaphores.count(semaphore)) && pSemaphore->signaled)) {
9884 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
9885 HandleToUint64(semaphore), DRAWSTATE_QUEUE_FORWARD_PROGRESS,
9886 "Queue 0x%" PRIx64 " is signaling semaphore 0x%" PRIx64
9887 " that has already been signaled but not waited on by queue 0x%" PRIx64 ".",
9888 HandleToUint64(queue), HandleToUint64(semaphore), HandleToUint64(pSemaphore->signaler.first));
9890 unsignaled_semaphores.erase(semaphore);
9891 signaled_semaphores.insert(semaphore);
9895 // Store sparse binding image_state and after binding is complete make sure that any requiring metadata have it bound
9896 std::unordered_set<IMAGE_STATE *> sparse_images;
9897 // If we're binding sparse image memory make sure reqs were queried and note if metadata is required and bound
9898 for (uint32_t i = 0; i < bindInfo.imageBindCount; ++i) {
9899 const auto &opaque_bind = bindInfo.pImageOpaqueBinds[i];
9900 auto image_state = GetImageState(dev_data, opaque_bind.image);
9901 sparse_images.insert(image_state);
9902 if (!image_state->get_sparse_reqs_called || image_state->sparse_requirements.empty()) {
9903 // For now just warning if sparse image binding occurs without calling to get reqs first
9904 return log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
9905 HandleToUint64(image_state->image), MEMTRACK_INVALID_STATE,
9906 "vkQueueBindSparse(): Binding sparse memory to image 0x%" PRIx64
9907 " without first calling vkGetImageSparseMemoryRequirements[2KHR]() to retrieve requirements.",
9908 HandleToUint64(image_state->image));
9910 for (uint32_t j = 0; j < opaque_bind.bindCount; ++j) {
9911 if (opaque_bind.pBinds[j].flags & VK_IMAGE_ASPECT_METADATA_BIT) {
9912 image_state->sparse_metadata_bound = true;
9916 for (uint32_t i = 0; i < bindInfo.imageOpaqueBindCount; ++i) {
9917 auto image_state = GetImageState(dev_data, bindInfo.pImageOpaqueBinds[i].image);
9918 sparse_images.insert(image_state);
9919 if (!image_state->get_sparse_reqs_called || image_state->sparse_requirements.empty()) {
9920 // For now just warning if sparse image binding occurs without calling to get reqs first
9921 return log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
9922 HandleToUint64(image_state->image), MEMTRACK_INVALID_STATE,
9923 "vkQueueBindSparse(): Binding opaque sparse memory to image 0x%" PRIx64
9924 " without first calling vkGetImageSparseMemoryRequirements[2KHR]() to retrieve requirements.",
9925 HandleToUint64(image_state->image));
9928 for (const auto &sparse_image_state : sparse_images) {
9929 if (sparse_image_state->sparse_metadata_required && !sparse_image_state->sparse_metadata_bound) {
9930 // Warn if sparse image binding metadata required for image with sparse binding, but metadata not bound
9931 return log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
9932 HandleToUint64(sparse_image_state->image), MEMTRACK_INVALID_STATE,
9933 "vkQueueBindSparse(): Binding sparse memory to image 0x%" PRIx64
9934 " which requires a metadata aspect but no binding with VK_IMAGE_ASPECT_METADATA_BIT set was made.",
9935 HandleToUint64(sparse_image_state->image));
9942 static void PostCallRecordQueueBindSparse(layer_data *dev_data, VkQueue queue, uint32_t bindInfoCount,
9943 const VkBindSparseInfo *pBindInfo, VkFence fence) {
9944 uint64_t early_retire_seq = 0;
9945 auto pFence = GetFenceNode(dev_data, fence);
9946 auto pQueue = GetQueueState(dev_data, queue);
9949 if (pFence->scope == kSyncScopeInternal) {
9950 SubmitFence(pQueue, pFence, std::max(1u, bindInfoCount));
9951 if (!bindInfoCount) {
9952 // No work to do, just dropping a fence in the queue by itself.
9953 pQueue->submissions.emplace_back(std::vector<VkCommandBuffer>(), std::vector<SEMAPHORE_WAIT>(),
9954 std::vector<VkSemaphore>(), std::vector<VkSemaphore>(), fence);
9957 // Retire work up until this fence early, we will not see the wait that corresponds to this signal
9958 early_retire_seq = pQueue->seq + pQueue->submissions.size();
9959 if (!dev_data->external_sync_warning) {
9960 dev_data->external_sync_warning = true;
9961 log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
9962 HandleToUint64(fence), DRAWSTATE_QUEUE_FORWARD_PROGRESS,
9963 "vkQueueBindSparse(): Signaling external fence 0x%" PRIx64 " on queue 0x%" PRIx64
9964 " will disable validation of preceding command buffer lifecycle states and the in-use status of associated "
9966 HandleToUint64(fence), HandleToUint64(queue));
9971 for (uint32_t bindIdx = 0; bindIdx < bindInfoCount; ++bindIdx) {
9972 const VkBindSparseInfo &bindInfo = pBindInfo[bindIdx];
9973 // Track objects tied to memory
9974 for (uint32_t j = 0; j < bindInfo.bufferBindCount; j++) {
9975 for (uint32_t k = 0; k < bindInfo.pBufferBinds[j].bindCount; k++) {
9976 auto sparse_binding = bindInfo.pBufferBinds[j].pBinds[k];
9977 SetSparseMemBinding(dev_data, {sparse_binding.memory, sparse_binding.memoryOffset, sparse_binding.size},
9978 HandleToUint64(bindInfo.pBufferBinds[j].buffer), kVulkanObjectTypeBuffer);
9981 for (uint32_t j = 0; j < bindInfo.imageOpaqueBindCount; j++) {
9982 for (uint32_t k = 0; k < bindInfo.pImageOpaqueBinds[j].bindCount; k++) {
9983 auto sparse_binding = bindInfo.pImageOpaqueBinds[j].pBinds[k];
9984 SetSparseMemBinding(dev_data, {sparse_binding.memory, sparse_binding.memoryOffset, sparse_binding.size},
9985 HandleToUint64(bindInfo.pImageOpaqueBinds[j].image), kVulkanObjectTypeImage);
9988 for (uint32_t j = 0; j < bindInfo.imageBindCount; j++) {
9989 for (uint32_t k = 0; k < bindInfo.pImageBinds[j].bindCount; k++) {
9990 auto sparse_binding = bindInfo.pImageBinds[j].pBinds[k];
9991 // TODO: This size is broken for non-opaque bindings, need to update to comprehend full sparse binding data
9992 VkDeviceSize size = sparse_binding.extent.depth * sparse_binding.extent.height * sparse_binding.extent.width * 4;
9993 SetSparseMemBinding(dev_data, {sparse_binding.memory, sparse_binding.memoryOffset, size},
9994 HandleToUint64(bindInfo.pImageBinds[j].image), kVulkanObjectTypeImage);
9998 std::vector<SEMAPHORE_WAIT> semaphore_waits;
9999 std::vector<VkSemaphore> semaphore_signals;
10000 std::vector<VkSemaphore> semaphore_externals;
10001 for (uint32_t i = 0; i < bindInfo.waitSemaphoreCount; ++i) {
10002 VkSemaphore semaphore = bindInfo.pWaitSemaphores[i];
10003 auto pSemaphore = GetSemaphoreNode(dev_data, semaphore);
10005 if (pSemaphore->scope == kSyncScopeInternal) {
10006 if (pSemaphore->signaler.first != VK_NULL_HANDLE) {
10007 semaphore_waits.push_back({semaphore, pSemaphore->signaler.first, pSemaphore->signaler.second});
10008 pSemaphore->in_use.fetch_add(1);
10010 pSemaphore->signaler.first = VK_NULL_HANDLE;
10011 pSemaphore->signaled = false;
10013 semaphore_externals.push_back(semaphore);
10014 pSemaphore->in_use.fetch_add(1);
10015 if (pSemaphore->scope == kSyncScopeExternalTemporary) {
10016 pSemaphore->scope = kSyncScopeInternal;
10021 for (uint32_t i = 0; i < bindInfo.signalSemaphoreCount; ++i) {
10022 VkSemaphore semaphore = bindInfo.pSignalSemaphores[i];
10023 auto pSemaphore = GetSemaphoreNode(dev_data, semaphore);
10025 if (pSemaphore->scope == kSyncScopeInternal) {
10026 pSemaphore->signaler.first = queue;
10027 pSemaphore->signaler.second = pQueue->seq + pQueue->submissions.size() + 1;
10028 pSemaphore->signaled = true;
10029 pSemaphore->in_use.fetch_add(1);
10030 semaphore_signals.push_back(semaphore);
10032 // Retire work up until this submit early, we will not see the wait that corresponds to this signal
10033 early_retire_seq = std::max(early_retire_seq, pQueue->seq + pQueue->submissions.size() + 1);
10034 if (!dev_data->external_sync_warning) {
10035 dev_data->external_sync_warning = true;
10036 log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
10037 HandleToUint64(semaphore), DRAWSTATE_QUEUE_FORWARD_PROGRESS,
10038 "vkQueueBindSparse(): Signaling external semaphore 0x%" PRIx64 " on queue 0x%" PRIx64
10039 " will disable validation of preceding command buffer lifecycle states and the in-use status of "
10040 "associated objects.",
10041 HandleToUint64(semaphore), HandleToUint64(queue));
10047 pQueue->submissions.emplace_back(std::vector<VkCommandBuffer>(), semaphore_waits, semaphore_signals, semaphore_externals,
10048 bindIdx == bindInfoCount - 1 ? fence : VK_NULL_HANDLE);
10051 if (early_retire_seq) {
10052 RetireWorkOnQueue(dev_data, pQueue, early_retire_seq);
10056 VKAPI_ATTR VkResult VKAPI_CALL QueueBindSparse(VkQueue queue, uint32_t bindInfoCount, const VkBindSparseInfo *pBindInfo,
10058 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(queue), layer_data_map);
10059 unique_lock_t lock(global_lock);
10060 bool skip = PreCallValidateQueueBindSparse(dev_data, queue, bindInfoCount, pBindInfo, fence);
10063 if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
10065 VkResult result = dev_data->dispatch_table.QueueBindSparse(queue, bindInfoCount, pBindInfo, fence);
10068 PostCallRecordQueueBindSparse(dev_data, queue, bindInfoCount, pBindInfo, fence);
10073 VKAPI_ATTR VkResult VKAPI_CALL CreateSemaphore(VkDevice device, const VkSemaphoreCreateInfo *pCreateInfo,
10074 const VkAllocationCallbacks *pAllocator, VkSemaphore *pSemaphore) {
10075 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
10076 VkResult result = dev_data->dispatch_table.CreateSemaphore(device, pCreateInfo, pAllocator, pSemaphore);
10077 if (result == VK_SUCCESS) {
10078 lock_guard_t lock(global_lock);
10079 SEMAPHORE_NODE *sNode = &dev_data->semaphoreMap[*pSemaphore];
10080 sNode->signaler.first = VK_NULL_HANDLE;
10081 sNode->signaler.second = 0;
10082 sNode->signaled = false;
10083 sNode->scope = kSyncScopeInternal;
10088 static bool PreCallValidateImportSemaphore(layer_data *dev_data, VkSemaphore semaphore, const char *caller_name) {
10089 SEMAPHORE_NODE *sema_node = GetSemaphoreNode(dev_data, semaphore);
10090 VK_OBJECT obj_struct = {HandleToUint64(semaphore), kVulkanObjectTypeSemaphore};
10093 skip |= ValidateObjectNotInUse(dev_data, sema_node, obj_struct, caller_name, VALIDATION_ERROR_UNDEFINED);
10098 static void PostCallRecordImportSemaphore(layer_data *dev_data, VkSemaphore semaphore,
10099 VkExternalSemaphoreHandleTypeFlagBitsKHR handle_type, VkSemaphoreImportFlagsKHR flags) {
10100 SEMAPHORE_NODE *sema_node = GetSemaphoreNode(dev_data, semaphore);
10101 if (sema_node && sema_node->scope != kSyncScopeExternalPermanent) {
10102 if ((handle_type == VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT_KHR || flags & VK_SEMAPHORE_IMPORT_TEMPORARY_BIT_KHR) &&
10103 sema_node->scope == kSyncScopeInternal) {
10104 sema_node->scope = kSyncScopeExternalTemporary;
10106 sema_node->scope = kSyncScopeExternalPermanent;
10111 #ifdef VK_USE_PLATFORM_WIN32_KHR
10112 VKAPI_ATTR VkResult VKAPI_CALL
10113 ImportSemaphoreWin32HandleKHR(VkDevice device, const VkImportSemaphoreWin32HandleInfoKHR *pImportSemaphoreWin32HandleInfo) {
10114 VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
10115 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
10117 PreCallValidateImportSemaphore(dev_data, pImportSemaphoreWin32HandleInfo->semaphore, "vkImportSemaphoreWin32HandleKHR");
10120 result = dev_data->dispatch_table.ImportSemaphoreWin32HandleKHR(device, pImportSemaphoreWin32HandleInfo);
10123 if (result == VK_SUCCESS) {
10124 PostCallRecordImportSemaphore(dev_data, pImportSemaphoreWin32HandleInfo->semaphore,
10125 pImportSemaphoreWin32HandleInfo->handleType, pImportSemaphoreWin32HandleInfo->flags);
10131 VKAPI_ATTR VkResult VKAPI_CALL ImportSemaphoreFdKHR(VkDevice device, const VkImportSemaphoreFdInfoKHR *pImportSemaphoreFdInfo) {
10132 VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
10133 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
10134 bool skip = PreCallValidateImportSemaphore(dev_data, pImportSemaphoreFdInfo->semaphore, "vkImportSemaphoreFdKHR");
10137 result = dev_data->dispatch_table.ImportSemaphoreFdKHR(device, pImportSemaphoreFdInfo);
10140 if (result == VK_SUCCESS) {
10141 PostCallRecordImportSemaphore(dev_data, pImportSemaphoreFdInfo->semaphore, pImportSemaphoreFdInfo->handleType,
10142 pImportSemaphoreFdInfo->flags);
10147 static void PostCallRecordGetSemaphore(layer_data *dev_data, VkSemaphore semaphore,
10148 VkExternalSemaphoreHandleTypeFlagBitsKHR handle_type) {
10149 SEMAPHORE_NODE *sema_node = GetSemaphoreNode(dev_data, semaphore);
10150 if (sema_node && handle_type != VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT_KHR) {
10151 // Cannot track semaphore state once it is exported, except for Sync FD handle types which have copy transference
10152 sema_node->scope = kSyncScopeExternalPermanent;
10156 #ifdef VK_USE_PLATFORM_WIN32_KHR
10157 VKAPI_ATTR VkResult VKAPI_CALL GetSemaphoreWin32HandleKHR(VkDevice device,
10158 const VkSemaphoreGetWin32HandleInfoKHR *pGetWin32HandleInfo,
10160 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
10161 VkResult result = dev_data->dispatch_table.GetSemaphoreWin32HandleKHR(device, pGetWin32HandleInfo, pHandle);
10163 if (result == VK_SUCCESS) {
10164 PostCallRecordGetSemaphore(dev_data, pGetWin32HandleInfo->semaphore, pGetWin32HandleInfo->handleType);
10170 VKAPI_ATTR VkResult VKAPI_CALL GetSemaphoreFdKHR(VkDevice device, const VkSemaphoreGetFdInfoKHR *pGetFdInfo, int *pFd) {
10171 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
10172 VkResult result = dev_data->dispatch_table.GetSemaphoreFdKHR(device, pGetFdInfo, pFd);
10174 if (result == VK_SUCCESS) {
10175 PostCallRecordGetSemaphore(dev_data, pGetFdInfo->semaphore, pGetFdInfo->handleType);
10180 static bool PreCallValidateImportFence(layer_data *dev_data, VkFence fence, const char *caller_name) {
10181 FENCE_NODE *fence_node = GetFenceNode(dev_data, fence);
10183 if (fence_node && fence_node->scope == kSyncScopeInternal && fence_node->state == FENCE_INFLIGHT) {
10184 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
10185 HandleToUint64(fence), VALIDATION_ERROR_UNDEFINED,
10186 "Cannot call %s on fence 0x%" PRIx64 " that is currently in use.", caller_name, HandleToUint64(fence));
10191 static void PostCallRecordImportFence(layer_data *dev_data, VkFence fence, VkExternalFenceHandleTypeFlagBitsKHR handle_type,
10192 VkFenceImportFlagsKHR flags) {
10193 FENCE_NODE *fence_node = GetFenceNode(dev_data, fence);
10194 if (fence_node && fence_node->scope != kSyncScopeExternalPermanent) {
10195 if ((handle_type == VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT_KHR || flags & VK_FENCE_IMPORT_TEMPORARY_BIT_KHR) &&
10196 fence_node->scope == kSyncScopeInternal) {
10197 fence_node->scope = kSyncScopeExternalTemporary;
10199 fence_node->scope = kSyncScopeExternalPermanent;
10204 #ifdef VK_USE_PLATFORM_WIN32_KHR
10205 VKAPI_ATTR VkResult VKAPI_CALL ImportFenceWin32HandleKHR(VkDevice device,
10206 const VkImportFenceWin32HandleInfoKHR *pImportFenceWin32HandleInfo) {
10207 VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
10208 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
10209 bool skip = PreCallValidateImportFence(dev_data, pImportFenceWin32HandleInfo->fence, "vkImportFenceWin32HandleKHR");
10212 result = dev_data->dispatch_table.ImportFenceWin32HandleKHR(device, pImportFenceWin32HandleInfo);
10215 if (result == VK_SUCCESS) {
10216 PostCallRecordImportFence(dev_data, pImportFenceWin32HandleInfo->fence, pImportFenceWin32HandleInfo->handleType,
10217 pImportFenceWin32HandleInfo->flags);
10223 VKAPI_ATTR VkResult VKAPI_CALL ImportFenceFdKHR(VkDevice device, const VkImportFenceFdInfoKHR *pImportFenceFdInfo) {
10224 VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
10225 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
10226 bool skip = PreCallValidateImportFence(dev_data, pImportFenceFdInfo->fence, "vkImportFenceFdKHR");
10229 result = dev_data->dispatch_table.ImportFenceFdKHR(device, pImportFenceFdInfo);
10232 if (result == VK_SUCCESS) {
10233 PostCallRecordImportFence(dev_data, pImportFenceFdInfo->fence, pImportFenceFdInfo->handleType, pImportFenceFdInfo->flags);
10238 static void PostCallRecordGetFence(layer_data *dev_data, VkFence fence, VkExternalFenceHandleTypeFlagBitsKHR handle_type) {
10239 FENCE_NODE *fence_node = GetFenceNode(dev_data, fence);
10241 if (handle_type != VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT_KHR) {
10242 // Export with reference transference becomes external
10243 fence_node->scope = kSyncScopeExternalPermanent;
10244 } else if (fence_node->scope == kSyncScopeInternal) {
10245 // Export with copy transference has a side effect of resetting the fence
10246 fence_node->state = FENCE_UNSIGNALED;
10251 #ifdef VK_USE_PLATFORM_WIN32_KHR
10252 VKAPI_ATTR VkResult VKAPI_CALL GetFenceWin32HandleKHR(VkDevice device, const VkFenceGetWin32HandleInfoKHR *pGetWin32HandleInfo,
10254 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
10255 VkResult result = dev_data->dispatch_table.GetFenceWin32HandleKHR(device, pGetWin32HandleInfo, pHandle);
10257 if (result == VK_SUCCESS) {
10258 PostCallRecordGetFence(dev_data, pGetWin32HandleInfo->fence, pGetWin32HandleInfo->handleType);
10264 VKAPI_ATTR VkResult VKAPI_CALL GetFenceFdKHR(VkDevice device, const VkFenceGetFdInfoKHR *pGetFdInfo, int *pFd) {
10265 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
10266 VkResult result = dev_data->dispatch_table.GetFenceFdKHR(device, pGetFdInfo, pFd);
10268 if (result == VK_SUCCESS) {
10269 PostCallRecordGetFence(dev_data, pGetFdInfo->fence, pGetFdInfo->handleType);
10274 VKAPI_ATTR VkResult VKAPI_CALL CreateEvent(VkDevice device, const VkEventCreateInfo *pCreateInfo,
10275 const VkAllocationCallbacks *pAllocator, VkEvent *pEvent) {
10276 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
10277 VkResult result = dev_data->dispatch_table.CreateEvent(device, pCreateInfo, pAllocator, pEvent);
10278 if (result == VK_SUCCESS) {
10279 lock_guard_t lock(global_lock);
10280 dev_data->eventMap[*pEvent].needsSignaled = false;
10281 dev_data->eventMap[*pEvent].write_in_use = 0;
10282 dev_data->eventMap[*pEvent].stageMask = VkPipelineStageFlags(0);
10287 static bool PreCallValidateCreateSwapchainKHR(layer_data *dev_data, const char *func_name,
10288 VkSwapchainCreateInfoKHR const *pCreateInfo, SURFACE_STATE *surface_state,
10289 SWAPCHAIN_NODE *old_swapchain_state) {
10290 auto most_recent_swapchain = surface_state->swapchain ? surface_state->swapchain : surface_state->old_swapchain;
10292 // TODO: revisit this. some of these rules are being relaxed.
10294 // All physical devices and queue families are required to be able
10295 // to present to any native window on Android; require the
10296 // application to have established support on any other platform.
10297 if (!dev_data->instance_data->extensions.vk_khr_android_surface) {
10298 auto support_predicate = [dev_data](decltype(surface_state->gpu_queue_support)::value_type qs) -> bool {
10299 // TODO: should restrict search only to queue families of VkDeviceQueueCreateInfos, not whole phys. device
10300 return (qs.first.gpu == dev_data->physical_device) && qs.second;
10302 const auto &support = surface_state->gpu_queue_support;
10303 bool is_supported = std::any_of(support.begin(), support.end(), support_predicate);
10305 if (!is_supported) {
10306 if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
10307 HandleToUint64(dev_data->device), VALIDATION_ERROR_146009ec,
10308 "%s: pCreateInfo->surface is not known at this time to be supported for presentation by this device. The "
10309 "vkGetPhysicalDeviceSurfaceSupportKHR() must be called beforehand, and it must return VK_TRUE support with "
10310 "this surface for at least one queue family of this device.",
10316 if (most_recent_swapchain != old_swapchain_state || (surface_state->old_swapchain && surface_state->swapchain)) {
10317 if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
10318 HandleToUint64(dev_data->device), DRAWSTATE_SWAPCHAIN_ALREADY_EXISTS,
10319 "%s: surface has an existing swapchain other than oldSwapchain", func_name))
10322 if (old_swapchain_state && old_swapchain_state->createInfo.surface != pCreateInfo->surface) {
10323 if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
10324 HandleToUint64(pCreateInfo->oldSwapchain), DRAWSTATE_SWAPCHAIN_WRONG_SURFACE,
10325 "%s: pCreateInfo->oldSwapchain's surface is not pCreateInfo->surface", func_name))
10329 if ((pCreateInfo->imageExtent.width == 0) || (pCreateInfo->imageExtent.height == 0)) {
10330 if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
10331 HandleToUint64(dev_data->device), VALIDATION_ERROR_14600d32,
10332 "%s: pCreateInfo->imageExtent = (%d, %d) which is illegal.", func_name, pCreateInfo->imageExtent.width,
10333 pCreateInfo->imageExtent.height))
10337 auto physical_device_state = GetPhysicalDeviceState(dev_data->instance_data, dev_data->physical_device);
10338 if (physical_device_state->vkGetPhysicalDeviceSurfaceCapabilitiesKHRState == UNCALLED) {
10339 if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
10340 HandleToUint64(dev_data->physical_device), DRAWSTATE_SWAPCHAIN_CREATE_BEFORE_QUERY,
10341 "%s: surface capabilities not retrieved for this physical device", func_name))
10343 } else { // have valid capabilities
10344 auto &capabilities = physical_device_state->surfaceCapabilities;
10345 // Validate pCreateInfo->minImageCount against VkSurfaceCapabilitiesKHR::{min|max}ImageCount:
10346 if (pCreateInfo->minImageCount < capabilities.minImageCount) {
10347 if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
10348 HandleToUint64(dev_data->device), VALIDATION_ERROR_146009ee,
10349 "%s called with minImageCount = %d, which is outside the bounds returned by "
10350 "vkGetPhysicalDeviceSurfaceCapabilitiesKHR() (i.e. minImageCount = %d, maxImageCount = %d).",
10351 func_name, pCreateInfo->minImageCount, capabilities.minImageCount, capabilities.maxImageCount))
10355 if ((capabilities.maxImageCount > 0) && (pCreateInfo->minImageCount > capabilities.maxImageCount)) {
10356 if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
10357 HandleToUint64(dev_data->device), VALIDATION_ERROR_146009f0,
10358 "%s called with minImageCount = %d, which is outside the bounds returned by "
10359 "vkGetPhysicalDeviceSurfaceCapabilitiesKHR() (i.e. minImageCount = %d, maxImageCount = %d).",
10360 func_name, pCreateInfo->minImageCount, capabilities.minImageCount, capabilities.maxImageCount))
10364 // Validate pCreateInfo->imageExtent against VkSurfaceCapabilitiesKHR::{current|min|max}ImageExtent:
10365 if ((pCreateInfo->imageExtent.width < capabilities.minImageExtent.width) ||
10366 (pCreateInfo->imageExtent.width > capabilities.maxImageExtent.width) ||
10367 (pCreateInfo->imageExtent.height < capabilities.minImageExtent.height) ||
10368 (pCreateInfo->imageExtent.height > capabilities.maxImageExtent.height)) {
10369 if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
10370 HandleToUint64(dev_data->device), VALIDATION_ERROR_146009f4,
10371 "%s called with imageExtent = (%d,%d), which is outside the bounds returned by "
10372 "vkGetPhysicalDeviceSurfaceCapabilitiesKHR(): currentExtent = (%d,%d), minImageExtent = (%d,%d), "
10373 "maxImageExtent = (%d,%d).",
10374 func_name, pCreateInfo->imageExtent.width, pCreateInfo->imageExtent.height,
10375 capabilities.currentExtent.width, capabilities.currentExtent.height, capabilities.minImageExtent.width,
10376 capabilities.minImageExtent.height, capabilities.maxImageExtent.width, capabilities.maxImageExtent.height))
10379 // pCreateInfo->preTransform should have exactly one bit set, and that bit must also be set in
10380 // VkSurfaceCapabilitiesKHR::supportedTransforms.
10381 if (!pCreateInfo->preTransform || (pCreateInfo->preTransform & (pCreateInfo->preTransform - 1)) ||
10382 !(pCreateInfo->preTransform & capabilities.supportedTransforms)) {
10383 // This is an error situation; one for which we'd like to give the developer a helpful, multi-line error message. Build
10384 // it up a little at a time, and then log it:
10385 std::string errorString = "";
10387 // Here's the first part of the message:
10388 sprintf(str, "%s called with a non-supported pCreateInfo->preTransform (i.e. %s). Supported values are:\n", func_name,
10389 string_VkSurfaceTransformFlagBitsKHR(pCreateInfo->preTransform));
10390 errorString += str;
10391 for (int i = 0; i < 32; i++) {
10392 // Build up the rest of the message:
10393 if ((1 << i) & capabilities.supportedTransforms) {
10394 const char *newStr = string_VkSurfaceTransformFlagBitsKHR((VkSurfaceTransformFlagBitsKHR)(1 << i));
10395 sprintf(str, " %s\n", newStr);
10396 errorString += str;
10399 // Log the message that we've built up:
10400 if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
10401 HandleToUint64(dev_data->device), VALIDATION_ERROR_146009fe, "%s.", errorString.c_str()))
10405 // pCreateInfo->compositeAlpha should have exactly one bit set, and that bit must also be set in
10406 // VkSurfaceCapabilitiesKHR::supportedCompositeAlpha
10407 if (!pCreateInfo->compositeAlpha || (pCreateInfo->compositeAlpha & (pCreateInfo->compositeAlpha - 1)) ||
10408 !((pCreateInfo->compositeAlpha) & capabilities.supportedCompositeAlpha)) {
10409 // This is an error situation; one for which we'd like to give the developer a helpful, multi-line error message. Build
10410 // it up a little at a time, and then log it:
10411 std::string errorString = "";
10413 // Here's the first part of the message:
10414 sprintf(str, "%s called with a non-supported pCreateInfo->compositeAlpha (i.e. %s). Supported values are:\n",
10415 func_name, string_VkCompositeAlphaFlagBitsKHR(pCreateInfo->compositeAlpha));
10416 errorString += str;
10417 for (int i = 0; i < 32; i++) {
10418 // Build up the rest of the message:
10419 if ((1 << i) & capabilities.supportedCompositeAlpha) {
10420 const char *newStr = string_VkCompositeAlphaFlagBitsKHR((VkCompositeAlphaFlagBitsKHR)(1 << i));
10421 sprintf(str, " %s\n", newStr);
10422 errorString += str;
10425 // Log the message that we've built up:
10426 if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
10427 HandleToUint64(dev_data->device), VALIDATION_ERROR_14600a00, "%s.", errorString.c_str()))
10430 // Validate pCreateInfo->imageArrayLayers against VkSurfaceCapabilitiesKHR::maxImageArrayLayers:
10431 if (pCreateInfo->imageArrayLayers > capabilities.maxImageArrayLayers) {
10432 if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
10433 HandleToUint64(dev_data->device), VALIDATION_ERROR_146009f6,
10434 "%s called with a non-supported imageArrayLayers (i.e. %d). Maximum value is %d.", func_name,
10435 pCreateInfo->imageArrayLayers, capabilities.maxImageArrayLayers))
10438 // Validate pCreateInfo->imageUsage against VkSurfaceCapabilitiesKHR::supportedUsageFlags:
10439 if (pCreateInfo->imageUsage != (pCreateInfo->imageUsage & capabilities.supportedUsageFlags)) {
10440 if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
10441 HandleToUint64(dev_data->device), VALIDATION_ERROR_146009f8,
10442 "%s called with a non-supported pCreateInfo->imageUsage (i.e. 0x%08x). Supported flag bits are 0x%08x.",
10443 func_name, pCreateInfo->imageUsage, capabilities.supportedUsageFlags))
10448 // Validate pCreateInfo values with the results of vkGetPhysicalDeviceSurfaceFormatsKHR():
10449 if (physical_device_state->vkGetPhysicalDeviceSurfaceFormatsKHRState != QUERY_DETAILS) {
10450 if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
10451 HandleToUint64(dev_data->device), DRAWSTATE_SWAPCHAIN_CREATE_BEFORE_QUERY,
10452 "%s called before calling vkGetPhysicalDeviceSurfaceFormatsKHR().", func_name))
10455 // Validate pCreateInfo->imageFormat against VkSurfaceFormatKHR::format:
10456 bool foundFormat = false;
10457 bool foundColorSpace = false;
10458 bool foundMatch = false;
10459 for (auto const &format : physical_device_state->surface_formats) {
10460 if (pCreateInfo->imageFormat == format.format) {
10461 // Validate pCreateInfo->imageColorSpace against VkSurfaceFormatKHR::colorSpace:
10462 foundFormat = true;
10463 if (pCreateInfo->imageColorSpace == format.colorSpace) {
10468 if (pCreateInfo->imageColorSpace == format.colorSpace) {
10469 foundColorSpace = true;
10474 if (!foundFormat) {
10475 if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
10476 HandleToUint64(dev_data->device), VALIDATION_ERROR_146009f2,
10477 "%s called with a non-supported pCreateInfo->imageFormat (i.e. %d).", func_name,
10478 pCreateInfo->imageFormat))
10481 if (!foundColorSpace) {
10482 if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
10483 HandleToUint64(dev_data->device), VALIDATION_ERROR_146009f2,
10484 "%s called with a non-supported pCreateInfo->imageColorSpace (i.e. %d).", func_name,
10485 pCreateInfo->imageColorSpace))
10491 // Validate pCreateInfo values with the results of vkGetPhysicalDeviceSurfacePresentModesKHR():
10492 if (physical_device_state->vkGetPhysicalDeviceSurfacePresentModesKHRState != QUERY_DETAILS) {
10493 // FIFO is required to always be supported
10494 if (pCreateInfo->presentMode != VK_PRESENT_MODE_FIFO_KHR) {
10495 if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
10496 HandleToUint64(dev_data->device), DRAWSTATE_SWAPCHAIN_CREATE_BEFORE_QUERY,
10497 "%s called before calling vkGetPhysicalDeviceSurfacePresentModesKHR().", func_name))
10501 // Validate pCreateInfo->presentMode against vkGetPhysicalDeviceSurfacePresentModesKHR():
10502 bool foundMatch = std::find(physical_device_state->present_modes.begin(), physical_device_state->present_modes.end(),
10503 pCreateInfo->presentMode) != physical_device_state->present_modes.end();
10505 if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
10506 HandleToUint64(dev_data->device), VALIDATION_ERROR_14600a02,
10507 "%s called with a non-supported presentMode (i.e. %s).", func_name,
10508 string_VkPresentModeKHR(pCreateInfo->presentMode)))
10512 // Validate state for shared presentable case
10513 if (VK_PRESENT_MODE_SHARED_DEMAND_REFRESH_KHR == pCreateInfo->presentMode ||
10514 VK_PRESENT_MODE_SHARED_CONTINUOUS_REFRESH_KHR == pCreateInfo->presentMode) {
10515 if (!dev_data->extensions.vk_khr_shared_presentable_image) {
10516 if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
10517 HandleToUint64(dev_data->device), DRAWSTATE_EXTENSION_NOT_ENABLED,
10518 "%s called with presentMode %s which requires the VK_KHR_shared_presentable_image extension, which has not "
10520 func_name, string_VkPresentModeKHR(pCreateInfo->presentMode)))
10522 } else if (pCreateInfo->minImageCount != 1) {
10523 if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
10524 HandleToUint64(dev_data->device), VALIDATION_ERROR_14600ace,
10525 "%s called with presentMode %s, but minImageCount value is %d. For shared presentable image, minImageCount "
10527 func_name, string_VkPresentModeKHR(pCreateInfo->presentMode), pCreateInfo->minImageCount))
10535 static void PostCallRecordCreateSwapchainKHR(layer_data *dev_data, VkResult result, const VkSwapchainCreateInfoKHR *pCreateInfo,
10536 VkSwapchainKHR *pSwapchain, SURFACE_STATE *surface_state,
10537 SWAPCHAIN_NODE *old_swapchain_state) {
10538 if (VK_SUCCESS == result) {
10539 lock_guard_t lock(global_lock);
10540 auto swapchain_state = unique_ptr<SWAPCHAIN_NODE>(new SWAPCHAIN_NODE(pCreateInfo, *pSwapchain));
10541 if (VK_PRESENT_MODE_SHARED_DEMAND_REFRESH_KHR == pCreateInfo->presentMode ||
10542 VK_PRESENT_MODE_SHARED_CONTINUOUS_REFRESH_KHR == pCreateInfo->presentMode) {
10543 swapchain_state->shared_presentable = true;
10545 surface_state->swapchain = swapchain_state.get();
10546 dev_data->swapchainMap[*pSwapchain] = std::move(swapchain_state);
10548 surface_state->swapchain = nullptr;
10550 // Spec requires that even if CreateSwapchainKHR fails, oldSwapchain behaves as replaced.
10551 if (old_swapchain_state) {
10552 old_swapchain_state->replaced = true;
10554 surface_state->old_swapchain = old_swapchain_state;
10558 VKAPI_ATTR VkResult VKAPI_CALL CreateSwapchainKHR(VkDevice device, const VkSwapchainCreateInfoKHR *pCreateInfo,
10559 const VkAllocationCallbacks *pAllocator, VkSwapchainKHR *pSwapchain) {
10560 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
10561 auto surface_state = GetSurfaceState(dev_data->instance_data, pCreateInfo->surface);
10562 auto old_swapchain_state = GetSwapchainNode(dev_data, pCreateInfo->oldSwapchain);
10564 if (PreCallValidateCreateSwapchainKHR(dev_data, "vkCreateSwapChainKHR()", pCreateInfo, surface_state, old_swapchain_state)) {
10565 return VK_ERROR_VALIDATION_FAILED_EXT;
10568 VkResult result = dev_data->dispatch_table.CreateSwapchainKHR(device, pCreateInfo, pAllocator, pSwapchain);
10570 PostCallRecordCreateSwapchainKHR(dev_data, result, pCreateInfo, pSwapchain, surface_state, old_swapchain_state);
10575 VKAPI_ATTR void VKAPI_CALL DestroySwapchainKHR(VkDevice device, VkSwapchainKHR swapchain, const VkAllocationCallbacks *pAllocator) {
10576 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
10579 unique_lock_t lock(global_lock);
10580 auto swapchain_data = GetSwapchainNode(dev_data, swapchain);
10581 if (swapchain_data) {
10582 if (swapchain_data->images.size() > 0) {
10583 for (auto swapchain_image : swapchain_data->images) {
10584 auto image_sub = dev_data->imageSubresourceMap.find(swapchain_image);
10585 if (image_sub != dev_data->imageSubresourceMap.end()) {
10586 for (auto imgsubpair : image_sub->second) {
10587 auto image_item = dev_data->imageLayoutMap.find(imgsubpair);
10588 if (image_item != dev_data->imageLayoutMap.end()) {
10589 dev_data->imageLayoutMap.erase(image_item);
10592 dev_data->imageSubresourceMap.erase(image_sub);
10594 skip = ClearMemoryObjectBindings(dev_data, HandleToUint64(swapchain_image), kVulkanObjectTypeSwapchainKHR);
10595 dev_data->imageMap.erase(swapchain_image);
10599 auto surface_state = GetSurfaceState(dev_data->instance_data, swapchain_data->createInfo.surface);
10600 if (surface_state) {
10601 if (surface_state->swapchain == swapchain_data) surface_state->swapchain = nullptr;
10602 if (surface_state->old_swapchain == swapchain_data) surface_state->old_swapchain = nullptr;
10605 dev_data->swapchainMap.erase(swapchain);
10608 if (!skip) dev_data->dispatch_table.DestroySwapchainKHR(device, swapchain, pAllocator);
10611 static bool PreCallValidateGetSwapchainImagesKHR(layer_data *device_data, SWAPCHAIN_NODE *swapchain_state, VkDevice device,
10612 uint32_t *pSwapchainImageCount, VkImage *pSwapchainImages) {
10614 if (swapchain_state && pSwapchainImages) {
10615 lock_guard_t lock(global_lock);
10616 // Compare the preliminary value of *pSwapchainImageCount with the value this time:
10617 if (swapchain_state->vkGetSwapchainImagesKHRState == UNCALLED) {
10618 skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
10619 HandleToUint64(device), SWAPCHAIN_PRIOR_COUNT,
10620 "vkGetSwapchainImagesKHR() called with non-NULL pSwapchainImageCount; but no prior positive value has "
10621 "been seen for pSwapchainImages.");
10622 } else if (*pSwapchainImageCount > swapchain_state->get_swapchain_image_count) {
10624 log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
10625 HandleToUint64(device), SWAPCHAIN_INVALID_COUNT,
10626 "vkGetSwapchainImagesKHR() called with non-NULL pSwapchainImageCount, and with pSwapchainImages set to a "
10627 "value (%d) that is greater than the value (%d) that was returned when pSwapchainImageCount was NULL.",
10628 *pSwapchainImageCount, swapchain_state->get_swapchain_image_count);
10634 static void PostCallRecordGetSwapchainImagesKHR(layer_data *device_data, SWAPCHAIN_NODE *swapchain_state, VkDevice device,
10635 uint32_t *pSwapchainImageCount, VkImage *pSwapchainImages) {
10636 lock_guard_t lock(global_lock);
10638 if (*pSwapchainImageCount > swapchain_state->images.size()) swapchain_state->images.resize(*pSwapchainImageCount);
10640 if (pSwapchainImages) {
10641 if (swapchain_state->vkGetSwapchainImagesKHRState < QUERY_DETAILS) {
10642 swapchain_state->vkGetSwapchainImagesKHRState = QUERY_DETAILS;
10644 for (uint32_t i = 0; i < *pSwapchainImageCount; ++i) {
10645 if (swapchain_state->images[i] != VK_NULL_HANDLE) continue; // Already retrieved this.
10647 IMAGE_LAYOUT_NODE image_layout_node;
10648 image_layout_node.layout = VK_IMAGE_LAYOUT_UNDEFINED;
10649 image_layout_node.format = swapchain_state->createInfo.imageFormat;
10650 // Add imageMap entries for each swapchain image
10651 VkImageCreateInfo image_ci = {};
10652 image_ci.flags = 0;
10653 image_ci.imageType = VK_IMAGE_TYPE_2D;
10654 image_ci.format = swapchain_state->createInfo.imageFormat;
10655 image_ci.extent.width = swapchain_state->createInfo.imageExtent.width;
10656 image_ci.extent.height = swapchain_state->createInfo.imageExtent.height;
10657 image_ci.extent.depth = 1;
10658 image_ci.mipLevels = 1;
10659 image_ci.arrayLayers = swapchain_state->createInfo.imageArrayLayers;
10660 image_ci.samples = VK_SAMPLE_COUNT_1_BIT;
10661 image_ci.tiling = VK_IMAGE_TILING_OPTIMAL;
10662 image_ci.usage = swapchain_state->createInfo.imageUsage;
10663 image_ci.sharingMode = swapchain_state->createInfo.imageSharingMode;
10664 device_data->imageMap[pSwapchainImages[i]] = unique_ptr<IMAGE_STATE>(new IMAGE_STATE(pSwapchainImages[i], &image_ci));
10665 auto &image_state = device_data->imageMap[pSwapchainImages[i]];
10666 image_state->valid = false;
10667 image_state->binding.mem = MEMTRACKER_SWAP_CHAIN_IMAGE_KEY;
10668 swapchain_state->images[i] = pSwapchainImages[i];
10669 ImageSubresourcePair subpair = {pSwapchainImages[i], false, VkImageSubresource()};
10670 device_data->imageSubresourceMap[pSwapchainImages[i]].push_back(subpair);
10671 device_data->imageLayoutMap[subpair] = image_layout_node;
10675 if (*pSwapchainImageCount) {
10676 if (swapchain_state->vkGetSwapchainImagesKHRState < QUERY_COUNT) {
10677 swapchain_state->vkGetSwapchainImagesKHRState = QUERY_COUNT;
10679 swapchain_state->get_swapchain_image_count = *pSwapchainImageCount;
10683 VKAPI_ATTR VkResult VKAPI_CALL GetSwapchainImagesKHR(VkDevice device, VkSwapchainKHR swapchain, uint32_t *pSwapchainImageCount,
10684 VkImage *pSwapchainImages) {
10685 VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
10686 layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
10688 auto swapchain_state = GetSwapchainNode(device_data, swapchain);
10689 bool skip = PreCallValidateGetSwapchainImagesKHR(device_data, swapchain_state, device, pSwapchainImageCount, pSwapchainImages);
10692 result = device_data->dispatch_table.GetSwapchainImagesKHR(device, swapchain, pSwapchainImageCount, pSwapchainImages);
10695 if ((result == VK_SUCCESS || result == VK_INCOMPLETE)) {
10696 PostCallRecordGetSwapchainImagesKHR(device_data, swapchain_state, device, pSwapchainImageCount, pSwapchainImages);
10701 VKAPI_ATTR VkResult VKAPI_CALL QueuePresentKHR(VkQueue queue, const VkPresentInfoKHR *pPresentInfo) {
10702 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(queue), layer_data_map);
10705 lock_guard_t lock(global_lock);
10706 auto queue_state = GetQueueState(dev_data, queue);
10708 for (uint32_t i = 0; i < pPresentInfo->waitSemaphoreCount; ++i) {
10709 auto pSemaphore = GetSemaphoreNode(dev_data, pPresentInfo->pWaitSemaphores[i]);
10710 if (pSemaphore && !pSemaphore->signaled) {
10711 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
10712 DRAWSTATE_QUEUE_FORWARD_PROGRESS,
10713 "Queue 0x%" PRIx64 " is waiting on semaphore 0x%" PRIx64 " that has no way to be signaled.",
10714 HandleToUint64(queue), HandleToUint64(pPresentInfo->pWaitSemaphores[i]));
10718 for (uint32_t i = 0; i < pPresentInfo->swapchainCount; ++i) {
10719 auto swapchain_data = GetSwapchainNode(dev_data, pPresentInfo->pSwapchains[i]);
10720 if (swapchain_data) {
10721 if (pPresentInfo->pImageIndices[i] >= swapchain_data->images.size()) {
10723 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
10724 HandleToUint64(pPresentInfo->pSwapchains[i]), DRAWSTATE_SWAPCHAIN_INVALID_IMAGE,
10725 "vkQueuePresentKHR: Swapchain image index too large (%u). There are only %u images in this swapchain.",
10726 pPresentInfo->pImageIndices[i], (uint32_t)swapchain_data->images.size());
10728 auto image = swapchain_data->images[pPresentInfo->pImageIndices[i]];
10729 auto image_state = GetImageState(dev_data, image);
10731 if (image_state->shared_presentable) {
10732 image_state->layout_locked = true;
10735 skip |= ValidateImageMemoryIsValid(dev_data, image_state, "vkQueuePresentKHR()");
10737 if (!image_state->acquired) {
10739 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
10740 HandleToUint64(pPresentInfo->pSwapchains[i]), DRAWSTATE_SWAPCHAIN_IMAGE_NOT_ACQUIRED,
10741 "vkQueuePresentKHR: Swapchain image index %u has not been acquired.", pPresentInfo->pImageIndices[i]);
10744 vector<VkImageLayout> layouts;
10745 if (FindLayouts(dev_data, image, layouts)) {
10746 for (auto layout : layouts) {
10747 if ((layout != VK_IMAGE_LAYOUT_PRESENT_SRC_KHR) && (!dev_data->extensions.vk_khr_shared_presentable_image ||
10748 (layout != VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR))) {
10749 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
10750 VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT, HandleToUint64(queue), VALIDATION_ERROR_11200a20,
10751 "Images passed to present must be in layout VK_IMAGE_LAYOUT_PRESENT_SRC_KHR or "
10752 "VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR but is in %s.",
10753 string_VkImageLayout(layout));
10759 // All physical devices and queue families are required to be able
10760 // to present to any native window on Android; require the
10761 // application to have established support on any other platform.
10762 if (!dev_data->instance_data->extensions.vk_khr_android_surface) {
10763 auto surface_state = GetSurfaceState(dev_data->instance_data, swapchain_data->createInfo.surface);
10764 auto support_it = surface_state->gpu_queue_support.find({dev_data->physical_device, queue_state->queueFamilyIndex});
10766 if (support_it == surface_state->gpu_queue_support.end()) {
10768 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
10769 HandleToUint64(pPresentInfo->pSwapchains[i]), DRAWSTATE_SWAPCHAIN_UNSUPPORTED_QUEUE,
10770 "vkQueuePresentKHR: Presenting image without calling vkGetPhysicalDeviceSurfaceSupportKHR");
10771 } else if (!support_it->second) {
10773 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
10774 HandleToUint64(pPresentInfo->pSwapchains[i]), VALIDATION_ERROR_31800a18,
10775 "vkQueuePresentKHR: Presenting image on queue that cannot present to this surface.");
10780 if (pPresentInfo && pPresentInfo->pNext) {
10781 // Verify ext struct
10782 const auto *present_regions = lvl_find_in_chain<VkPresentRegionsKHR>(pPresentInfo->pNext);
10783 if (present_regions) {
10784 for (uint32_t i = 0; i < present_regions->swapchainCount; ++i) {
10785 auto swapchain_data = GetSwapchainNode(dev_data, pPresentInfo->pSwapchains[i]);
10786 assert(swapchain_data);
10787 VkPresentRegionKHR region = present_regions->pRegions[i];
10788 for (uint32_t j = 0; j < region.rectangleCount; ++j) {
10789 VkRectLayerKHR rect = region.pRectangles[j];
10790 if ((rect.offset.x + rect.extent.width) > swapchain_data->createInfo.imageExtent.width) {
10791 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
10792 VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT, HandleToUint64(pPresentInfo->pSwapchains[i]),
10793 VALIDATION_ERROR_11e009da,
10794 "vkQueuePresentKHR(): For VkPresentRegionKHR down pNext chain, "
10795 "pRegion[%i].pRectangles[%i], the sum of offset.x (%i) and extent.width (%i) is greater "
10796 "than the corresponding swapchain's imageExtent.width (%i).",
10797 i, j, rect.offset.x, rect.extent.width, swapchain_data->createInfo.imageExtent.width);
10799 if ((rect.offset.y + rect.extent.height) > swapchain_data->createInfo.imageExtent.height) {
10800 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
10801 VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT, HandleToUint64(pPresentInfo->pSwapchains[i]),
10802 VALIDATION_ERROR_11e009da,
10803 "vkQueuePresentKHR(): For VkPresentRegionKHR down pNext chain, "
10804 "pRegion[%i].pRectangles[%i], the sum of offset.y (%i) and extent.height (%i) is greater "
10805 "than the corresponding swapchain's imageExtent.height (%i).",
10806 i, j, rect.offset.y, rect.extent.height, swapchain_data->createInfo.imageExtent.height);
10808 if (rect.layer > swapchain_data->createInfo.imageArrayLayers) {
10810 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
10811 HandleToUint64(pPresentInfo->pSwapchains[i]), VALIDATION_ERROR_11e009dc,
10812 "vkQueuePresentKHR(): For VkPresentRegionKHR down pNext chain, pRegion[%i].pRectangles[%i], the layer "
10813 "(%i) is greater than the corresponding swapchain's imageArrayLayers (%i).",
10814 i, j, rect.layer, swapchain_data->createInfo.imageArrayLayers);
10820 const auto *present_times_info = lvl_find_in_chain<VkPresentTimesInfoGOOGLE>(pPresentInfo->pNext);
10821 if (present_times_info) {
10822 if (pPresentInfo->swapchainCount != present_times_info->swapchainCount) {
10824 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
10825 HandleToUint64(pPresentInfo->pSwapchains[0]),
10827 VALIDATION_ERROR_118009be,
10828 "vkQueuePresentKHR(): VkPresentTimesInfoGOOGLE.swapchainCount is %i but pPresentInfo->swapchainCount "
10829 "is %i. For VkPresentTimesInfoGOOGLE down pNext chain of VkPresentInfoKHR, "
10830 "VkPresentTimesInfoGOOGLE.swapchainCount must equal VkPresentInfoKHR.swapchainCount.",
10831 present_times_info->swapchainCount, pPresentInfo->swapchainCount);
10837 return VK_ERROR_VALIDATION_FAILED_EXT;
10840 VkResult result = dev_data->dispatch_table.QueuePresentKHR(queue, pPresentInfo);
10842 if (result != VK_ERROR_VALIDATION_FAILED_EXT) {
10843 // Semaphore waits occur before error generation, if the call reached
10844 // the ICD. (Confirm?)
10845 for (uint32_t i = 0; i < pPresentInfo->waitSemaphoreCount; ++i) {
10846 auto pSemaphore = GetSemaphoreNode(dev_data, pPresentInfo->pWaitSemaphores[i]);
10848 pSemaphore->signaler.first = VK_NULL_HANDLE;
10849 pSemaphore->signaled = false;
10853 for (uint32_t i = 0; i < pPresentInfo->swapchainCount; ++i) {
10854 // Note: this is imperfect, in that we can get confused about what
10855 // did or didn't succeed-- but if the app does that, it's confused
10856 // itself just as much.
10857 auto local_result = pPresentInfo->pResults ? pPresentInfo->pResults[i] : result;
10859 if (local_result != VK_SUCCESS && local_result != VK_SUBOPTIMAL_KHR) continue; // this present didn't actually happen.
10861 // Mark the image as having been released to the WSI
10862 auto swapchain_data = GetSwapchainNode(dev_data, pPresentInfo->pSwapchains[i]);
10863 auto image = swapchain_data->images[pPresentInfo->pImageIndices[i]];
10864 auto image_state = GetImageState(dev_data, image);
10865 image_state->acquired = false;
10868 // Note: even though presentation is directed to a queue, there is no
10869 // direct ordering between QP and subsequent work, so QP (and its
10870 // semaphore waits) /never/ participate in any completion proof.
10876 static bool PreCallValidateCreateSharedSwapchainsKHR(layer_data *dev_data, uint32_t swapchainCount,
10877 const VkSwapchainCreateInfoKHR *pCreateInfos, VkSwapchainKHR *pSwapchains,
10878 std::vector<SURFACE_STATE *> &surface_state,
10879 std::vector<SWAPCHAIN_NODE *> &old_swapchain_state) {
10880 if (pCreateInfos) {
10881 lock_guard_t lock(global_lock);
10882 for (uint32_t i = 0; i < swapchainCount; i++) {
10883 surface_state.push_back(GetSurfaceState(dev_data->instance_data, pCreateInfos[i].surface));
10884 old_swapchain_state.push_back(GetSwapchainNode(dev_data, pCreateInfos[i].oldSwapchain));
10885 std::stringstream func_name;
10886 func_name << "vkCreateSharedSwapchainsKHR[" << swapchainCount << "]";
10887 if (PreCallValidateCreateSwapchainKHR(dev_data, func_name.str().c_str(), &pCreateInfos[i], surface_state[i],
10888 old_swapchain_state[i])) {
10896 static void PostCallRecordCreateSharedSwapchainsKHR(layer_data *dev_data, VkResult result, uint32_t swapchainCount,
10897 const VkSwapchainCreateInfoKHR *pCreateInfos, VkSwapchainKHR *pSwapchains,
10898 std::vector<SURFACE_STATE *> &surface_state,
10899 std::vector<SWAPCHAIN_NODE *> &old_swapchain_state) {
10900 if (VK_SUCCESS == result) {
10901 for (uint32_t i = 0; i < swapchainCount; i++) {
10902 auto swapchain_state = unique_ptr<SWAPCHAIN_NODE>(new SWAPCHAIN_NODE(&pCreateInfos[i], pSwapchains[i]));
10903 if (VK_PRESENT_MODE_SHARED_DEMAND_REFRESH_KHR == pCreateInfos[i].presentMode ||
10904 VK_PRESENT_MODE_SHARED_CONTINUOUS_REFRESH_KHR == pCreateInfos[i].presentMode) {
10905 swapchain_state->shared_presentable = true;
10907 surface_state[i]->swapchain = swapchain_state.get();
10908 dev_data->swapchainMap[pSwapchains[i]] = std::move(swapchain_state);
10911 for (uint32_t i = 0; i < swapchainCount; i++) {
10912 surface_state[i]->swapchain = nullptr;
10915 // Spec requires that even if CreateSharedSwapchainKHR fails, oldSwapchain behaves as replaced.
10916 for (uint32_t i = 0; i < swapchainCount; i++) {
10917 if (old_swapchain_state[i]) {
10918 old_swapchain_state[i]->replaced = true;
10920 surface_state[i]->old_swapchain = old_swapchain_state[i];
10925 VKAPI_ATTR VkResult VKAPI_CALL CreateSharedSwapchainsKHR(VkDevice device, uint32_t swapchainCount,
10926 const VkSwapchainCreateInfoKHR *pCreateInfos,
10927 const VkAllocationCallbacks *pAllocator, VkSwapchainKHR *pSwapchains) {
10928 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
10929 std::vector<SURFACE_STATE *> surface_state;
10930 std::vector<SWAPCHAIN_NODE *> old_swapchain_state;
10932 if (PreCallValidateCreateSharedSwapchainsKHR(dev_data, swapchainCount, pCreateInfos, pSwapchains, surface_state,
10933 old_swapchain_state)) {
10934 return VK_ERROR_VALIDATION_FAILED_EXT;
10938 dev_data->dispatch_table.CreateSharedSwapchainsKHR(device, swapchainCount, pCreateInfos, pAllocator, pSwapchains);
10940 PostCallRecordCreateSharedSwapchainsKHR(dev_data, result, swapchainCount, pCreateInfos, pSwapchains, surface_state,
10941 old_swapchain_state);
10946 static bool PreCallValidateAcquireNextImageKHR(layer_data *dev_data, VkDevice device, VkSwapchainKHR swapchain, uint64_t timeout,
10947 VkSemaphore semaphore, VkFence fence, uint32_t *pImageIndex) {
10949 if (fence == VK_NULL_HANDLE && semaphore == VK_NULL_HANDLE) {
10950 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
10951 HandleToUint64(device), DRAWSTATE_SWAPCHAIN_NO_SYNC_FOR_ACQUIRE,
10952 "vkAcquireNextImageKHR: Semaphore and fence cannot both be VK_NULL_HANDLE. There would be no way to "
10953 "determine the completion of this operation.");
10956 auto pSemaphore = GetSemaphoreNode(dev_data, semaphore);
10957 if (pSemaphore && pSemaphore->scope == kSyncScopeInternal && pSemaphore->signaled) {
10958 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
10959 HandleToUint64(semaphore), VALIDATION_ERROR_16400a0c,
10960 "vkAcquireNextImageKHR: Semaphore must not be currently signaled or in a wait state.");
10963 auto pFence = GetFenceNode(dev_data, fence);
10965 skip |= ValidateFenceForSubmit(dev_data, pFence);
10968 auto swapchain_data = GetSwapchainNode(dev_data, swapchain);
10969 if (swapchain_data->replaced) {
10970 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
10971 HandleToUint64(swapchain), DRAWSTATE_SWAPCHAIN_REPLACED,
10972 "vkAcquireNextImageKHR: This swapchain has been replaced. The application can still present any images it "
10973 "has acquired, but cannot acquire any more.");
10976 auto physical_device_state = GetPhysicalDeviceState(dev_data->instance_data, dev_data->physical_device);
10977 if (physical_device_state->vkGetPhysicalDeviceSurfaceCapabilitiesKHRState != UNCALLED) {
10978 uint64_t acquired_images = std::count_if(swapchain_data->images.begin(), swapchain_data->images.end(),
10979 [=](VkImage image) { return GetImageState(dev_data, image)->acquired; });
10980 if (acquired_images > swapchain_data->images.size() - physical_device_state->surfaceCapabilities.minImageCount) {
10982 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
10983 HandleToUint64(swapchain), DRAWSTATE_SWAPCHAIN_TOO_MANY_IMAGES,
10984 "vkAcquireNextImageKHR: Application has already acquired the maximum number of images (0x%" PRIxLEAST64 ")",
10989 if (swapchain_data->images.size() == 0) {
10990 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
10991 HandleToUint64(swapchain), DRAWSTATE_SWAPCHAIN_IMAGES_NOT_FOUND,
10992 "vkAcquireNextImageKHR: No images found to acquire from. Application probably did not call "
10993 "vkGetSwapchainImagesKHR after swapchain creation.");
10998 static void PostCallRecordAcquireNextImageKHR(layer_data *dev_data, VkDevice device, VkSwapchainKHR swapchain, uint64_t timeout,
10999 VkSemaphore semaphore, VkFence fence, uint32_t *pImageIndex) {
11000 auto pFence = GetFenceNode(dev_data, fence);
11001 if (pFence && pFence->scope == kSyncScopeInternal) {
11002 // Treat as inflight since it is valid to wait on this fence, even in cases where it is technically a temporary
11004 pFence->state = FENCE_INFLIGHT;
11005 pFence->signaler.first = VK_NULL_HANDLE; // ANI isn't on a queue, so this can't participate in a completion proof.
11008 auto pSemaphore = GetSemaphoreNode(dev_data, semaphore);
11009 if (pSemaphore && pSemaphore->scope == kSyncScopeInternal) {
11010 // Treat as signaled since it is valid to wait on this semaphore, even in cases where it is technically a
11011 // temporary import
11012 pSemaphore->signaled = true;
11013 pSemaphore->signaler.first = VK_NULL_HANDLE;
11016 // Mark the image as acquired.
11017 auto swapchain_data = GetSwapchainNode(dev_data, swapchain);
11018 auto image = swapchain_data->images[*pImageIndex];
11019 auto image_state = GetImageState(dev_data, image);
11020 image_state->acquired = true;
11021 image_state->shared_presentable = swapchain_data->shared_presentable;
11024 VKAPI_ATTR VkResult VKAPI_CALL AcquireNextImageKHR(VkDevice device, VkSwapchainKHR swapchain, uint64_t timeout,
11025 VkSemaphore semaphore, VkFence fence, uint32_t *pImageIndex) {
11026 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
11028 unique_lock_t lock(global_lock);
11029 bool skip = PreCallValidateAcquireNextImageKHR(dev_data, device, swapchain, timeout, semaphore, fence, pImageIndex);
11032 if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
11034 VkResult result = dev_data->dispatch_table.AcquireNextImageKHR(device, swapchain, timeout, semaphore, fence, pImageIndex);
11037 if (result == VK_SUCCESS || result == VK_SUBOPTIMAL_KHR) {
11038 PostCallRecordAcquireNextImageKHR(dev_data, device, swapchain, timeout, semaphore, fence, pImageIndex);
11045 VKAPI_ATTR VkResult VKAPI_CALL EnumeratePhysicalDevices(VkInstance instance, uint32_t *pPhysicalDeviceCount,
11046 VkPhysicalDevice *pPhysicalDevices) {
11048 instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
11049 assert(instance_data);
11051 // For this instance, flag when vkEnumeratePhysicalDevices goes to QUERY_COUNT and then QUERY_DETAILS
11052 if (NULL == pPhysicalDevices) {
11053 instance_data->vkEnumeratePhysicalDevicesState = QUERY_COUNT;
11055 if (UNCALLED == instance_data->vkEnumeratePhysicalDevicesState) {
11056 // Flag warning here. You can call this without having queried the count, but it may not be
11057 // robust on platforms with multiple physical devices.
11058 skip |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT,
11059 0, DEVLIMITS_MISSING_QUERY_COUNT,
11060 "Call sequence has vkEnumeratePhysicalDevices() w/ non-NULL pPhysicalDevices. You should first call "
11061 "vkEnumeratePhysicalDevices() w/ NULL pPhysicalDevices to query pPhysicalDeviceCount.");
11062 } // TODO : Could also flag a warning if re-calling this function in QUERY_DETAILS state
11063 else if (instance_data->physical_devices_count != *pPhysicalDeviceCount) {
11064 // Having actual count match count from app is not a requirement, so this can be a warning
11065 skip |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
11066 VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, DEVLIMITS_COUNT_MISMATCH,
11067 "Call to vkEnumeratePhysicalDevices() w/ pPhysicalDeviceCount value %u, but actual count supported by "
11068 "this instance is %u.",
11069 *pPhysicalDeviceCount, instance_data->physical_devices_count);
11071 instance_data->vkEnumeratePhysicalDevicesState = QUERY_DETAILS;
11074 return VK_ERROR_VALIDATION_FAILED_EXT;
11076 VkResult result = instance_data->dispatch_table.EnumeratePhysicalDevices(instance, pPhysicalDeviceCount, pPhysicalDevices);
11077 if (NULL == pPhysicalDevices) {
11078 instance_data->physical_devices_count = *pPhysicalDeviceCount;
11079 } else if (result == VK_SUCCESS) { // Save physical devices
11080 for (uint32_t i = 0; i < *pPhysicalDeviceCount; i++) {
11081 auto &phys_device_state = instance_data->physical_device_map[pPhysicalDevices[i]];
11082 phys_device_state.phys_device = pPhysicalDevices[i];
11083 // Init actual features for each physical device
11084 instance_data->dispatch_table.GetPhysicalDeviceFeatures(pPhysicalDevices[i], &phys_device_state.features);
11090 // Common function to handle validation for GetPhysicalDeviceQueueFamilyProperties & 2KHR version
11091 static bool ValidateCommonGetPhysicalDeviceQueueFamilyProperties(instance_layer_data *instance_data,
11092 PHYSICAL_DEVICE_STATE *pd_state,
11093 uint32_t requested_queue_family_property_count, bool qfp_null,
11094 const char *caller_name) {
11097 // Verify that for each physical device, this command is called first with NULL pQueueFamilyProperties in order to get count
11098 if (UNCALLED == pd_state->vkGetPhysicalDeviceQueueFamilyPropertiesState) {
11100 instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
11101 HandleToUint64(pd_state->phys_device), DEVLIMITS_MISSING_QUERY_COUNT,
11102 "%s is called with non-NULL pQueueFamilyProperties before obtaining pQueueFamilyPropertyCount. It is recommended "
11103 "to first call %s with NULL pQueueFamilyProperties in order to obtain the maximal pQueueFamilyPropertyCount.",
11104 caller_name, caller_name);
11105 // Then verify that pCount that is passed in on second call matches what was returned
11106 } else if (pd_state->queue_family_count != requested_queue_family_property_count) {
11108 instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
11109 HandleToUint64(pd_state->phys_device), DEVLIMITS_COUNT_MISMATCH,
11110 "%s is called with non-NULL pQueueFamilyProperties and pQueueFamilyPropertyCount value %" PRIu32
11111 ", but the largest previously returned pQueueFamilyPropertyCount for this physicalDevice is %" PRIu32
11112 ". It is recommended to instead receive all the properties by calling %s with pQueueFamilyPropertyCount that was "
11113 "previously obtained by calling %s with NULL pQueueFamilyProperties.",
11114 caller_name, requested_queue_family_property_count, pd_state->queue_family_count, caller_name, caller_name);
11116 pd_state->vkGetPhysicalDeviceQueueFamilyPropertiesState = QUERY_DETAILS;
11122 static bool PreCallValidateGetPhysicalDeviceQueueFamilyProperties(instance_layer_data *instance_data,
11123 PHYSICAL_DEVICE_STATE *pd_state,
11124 uint32_t *pQueueFamilyPropertyCount,
11125 VkQueueFamilyProperties *pQueueFamilyProperties) {
11126 return ValidateCommonGetPhysicalDeviceQueueFamilyProperties(instance_data, pd_state, *pQueueFamilyPropertyCount,
11127 (nullptr == pQueueFamilyProperties),
11128 "vkGetPhysicalDeviceQueueFamilyProperties()");
11131 static bool PreCallValidateGetPhysicalDeviceQueueFamilyProperties2(instance_layer_data *instance_data,
11132 PHYSICAL_DEVICE_STATE *pd_state,
11133 uint32_t *pQueueFamilyPropertyCount,
11134 VkQueueFamilyProperties2KHR *pQueueFamilyProperties) {
11135 return ValidateCommonGetPhysicalDeviceQueueFamilyProperties(instance_data, pd_state, *pQueueFamilyPropertyCount,
11136 (nullptr == pQueueFamilyProperties),
11137 "vkGetPhysicalDeviceQueueFamilyProperties2[KHR]()");
11140 // Common function to update state for GetPhysicalDeviceQueueFamilyProperties & 2KHR version
11141 static void StateUpdateCommonGetPhysicalDeviceQueueFamilyProperties(PHYSICAL_DEVICE_STATE *pd_state, uint32_t count,
11142 VkQueueFamilyProperties2KHR *pQueueFamilyProperties) {
11143 if (!pQueueFamilyProperties) {
11144 if (UNCALLED == pd_state->vkGetPhysicalDeviceQueueFamilyPropertiesState)
11145 pd_state->vkGetPhysicalDeviceQueueFamilyPropertiesState = QUERY_COUNT;
11146 pd_state->queue_family_count = count;
11147 } else { // Save queue family properties
11148 pd_state->vkGetPhysicalDeviceQueueFamilyPropertiesState = QUERY_DETAILS;
11149 pd_state->queue_family_count = std::max(pd_state->queue_family_count, count);
11151 pd_state->queue_family_properties.resize(std::max(static_cast<uint32_t>(pd_state->queue_family_properties.size()), count));
11152 for (uint32_t i = 0; i < count; ++i) {
11153 pd_state->queue_family_properties[i] = pQueueFamilyProperties[i].queueFamilyProperties;
11158 static void PostCallRecordGetPhysicalDeviceQueueFamilyProperties(PHYSICAL_DEVICE_STATE *pd_state, uint32_t count,
11159 VkQueueFamilyProperties *pQueueFamilyProperties) {
11160 VkQueueFamilyProperties2KHR *pqfp = nullptr;
11161 std::vector<VkQueueFamilyProperties2KHR> qfp;
11163 if (pQueueFamilyProperties) {
11164 for (uint32_t i = 0; i < count; ++i) {
11165 qfp[i].sType = VK_STRUCTURE_TYPE_QUEUE_FAMILY_PROPERTIES_2_KHR;
11166 qfp[i].pNext = nullptr;
11167 qfp[i].queueFamilyProperties = pQueueFamilyProperties[i];
11171 StateUpdateCommonGetPhysicalDeviceQueueFamilyProperties(pd_state, count, pqfp);
11174 static void PostCallRecordGetPhysicalDeviceQueueFamilyProperties2(PHYSICAL_DEVICE_STATE *pd_state, uint32_t count,
11175 VkQueueFamilyProperties2KHR *pQueueFamilyProperties) {
11176 StateUpdateCommonGetPhysicalDeviceQueueFamilyProperties(pd_state, count, pQueueFamilyProperties);
11179 VKAPI_ATTR void VKAPI_CALL GetPhysicalDeviceQueueFamilyProperties(VkPhysicalDevice physicalDevice,
11180 uint32_t *pQueueFamilyPropertyCount,
11181 VkQueueFamilyProperties *pQueueFamilyProperties) {
11182 instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
11183 auto physical_device_state = GetPhysicalDeviceState(instance_data, physicalDevice);
11184 assert(physical_device_state);
11185 unique_lock_t lock(global_lock);
11187 bool skip = PreCallValidateGetPhysicalDeviceQueueFamilyProperties(instance_data, physical_device_state,
11188 pQueueFamilyPropertyCount, pQueueFamilyProperties);
11194 instance_data->dispatch_table.GetPhysicalDeviceQueueFamilyProperties(physicalDevice, pQueueFamilyPropertyCount,
11195 pQueueFamilyProperties);
11198 PostCallRecordGetPhysicalDeviceQueueFamilyProperties(physical_device_state, *pQueueFamilyPropertyCount, pQueueFamilyProperties);
11201 VKAPI_ATTR void VKAPI_CALL GetPhysicalDeviceQueueFamilyProperties2(VkPhysicalDevice physicalDevice,
11202 uint32_t *pQueueFamilyPropertyCount,
11203 VkQueueFamilyProperties2KHR *pQueueFamilyProperties) {
11204 instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
11205 auto physical_device_state = GetPhysicalDeviceState(instance_data, physicalDevice);
11206 assert(physical_device_state);
11207 unique_lock_t lock(global_lock);
11208 bool skip = PreCallValidateGetPhysicalDeviceQueueFamilyProperties2(instance_data, physical_device_state,
11209 pQueueFamilyPropertyCount, pQueueFamilyProperties);
11213 instance_data->dispatch_table.GetPhysicalDeviceQueueFamilyProperties2(physicalDevice, pQueueFamilyPropertyCount,
11214 pQueueFamilyProperties);
11216 PostCallRecordGetPhysicalDeviceQueueFamilyProperties2(physical_device_state, *pQueueFamilyPropertyCount,
11217 pQueueFamilyProperties);
11220 VKAPI_ATTR void VKAPI_CALL GetPhysicalDeviceQueueFamilyProperties2KHR(VkPhysicalDevice physicalDevice,
11221 uint32_t *pQueueFamilyPropertyCount,
11222 VkQueueFamilyProperties2KHR *pQueueFamilyProperties) {
11223 instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
11224 auto physical_device_state = GetPhysicalDeviceState(instance_data, physicalDevice);
11225 assert(physical_device_state);
11226 unique_lock_t lock(global_lock);
11227 bool skip = PreCallValidateGetPhysicalDeviceQueueFamilyProperties2(instance_data, physical_device_state,
11228 pQueueFamilyPropertyCount, pQueueFamilyProperties);
11232 instance_data->dispatch_table.GetPhysicalDeviceQueueFamilyProperties2KHR(physicalDevice, pQueueFamilyPropertyCount,
11233 pQueueFamilyProperties);
11235 PostCallRecordGetPhysicalDeviceQueueFamilyProperties2(physical_device_state, *pQueueFamilyPropertyCount,
11236 pQueueFamilyProperties);
11239 template <typename TCreateInfo, typename FPtr>
11240 static VkResult CreateSurface(VkInstance instance, TCreateInfo const *pCreateInfo, VkAllocationCallbacks const *pAllocator,
11241 VkSurfaceKHR *pSurface, FPtr fptr) {
11242 instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
11244 // Call down the call chain:
11245 VkResult result = (instance_data->dispatch_table.*fptr)(instance, pCreateInfo, pAllocator, pSurface);
11247 if (result == VK_SUCCESS) {
11248 unique_lock_t lock(global_lock);
11249 instance_data->surface_map[*pSurface] = SURFACE_STATE(*pSurface);
11256 VKAPI_ATTR void VKAPI_CALL DestroySurfaceKHR(VkInstance instance, VkSurfaceKHR surface, const VkAllocationCallbacks *pAllocator) {
11258 instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
11259 unique_lock_t lock(global_lock);
11260 auto surface_state = GetSurfaceState(instance_data, surface);
11262 if ((surface_state) && (surface_state->swapchain)) {
11263 skip |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT,
11264 HandleToUint64(instance), VALIDATION_ERROR_26c009e4,
11265 "vkDestroySurfaceKHR() called before its associated VkSwapchainKHR was destroyed.");
11267 instance_data->surface_map.erase(surface);
11270 instance_data->dispatch_table.DestroySurfaceKHR(instance, surface, pAllocator);
11274 VKAPI_ATTR VkResult VKAPI_CALL CreateDisplayPlaneSurfaceKHR(VkInstance instance, const VkDisplaySurfaceCreateInfoKHR *pCreateInfo,
11275 const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
11276 return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateDisplayPlaneSurfaceKHR);
11279 #ifdef VK_USE_PLATFORM_ANDROID_KHR
11280 VKAPI_ATTR VkResult VKAPI_CALL CreateAndroidSurfaceKHR(VkInstance instance, const VkAndroidSurfaceCreateInfoKHR *pCreateInfo,
11281 const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
11282 return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateAndroidSurfaceKHR);
11284 #endif // VK_USE_PLATFORM_ANDROID_KHR
11286 #ifdef VK_USE_PLATFORM_IOS_MVK
11287 VKAPI_ATTR VkResult VKAPI_CALL CreateIOSSurfaceMVK(VkInstance instance, const VkIOSSurfaceCreateInfoMVK *pCreateInfo,
11288 const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
11289 return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateIOSSurfaceMVK);
11291 #endif // VK_USE_PLATFORM_IOS_MVK
11293 #ifdef VK_USE_PLATFORM_MACOS_MVK
11294 VKAPI_ATTR VkResult VKAPI_CALL CreateMacOSSurfaceMVK(VkInstance instance, const VkMacOSSurfaceCreateInfoMVK *pCreateInfo,
11295 const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
11296 return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateMacOSSurfaceMVK);
11298 #endif // VK_USE_PLATFORM_MACOS_MVK
11300 #ifdef VK_USE_PLATFORM_MIR_KHR
11301 VKAPI_ATTR VkResult VKAPI_CALL CreateMirSurfaceKHR(VkInstance instance, const VkMirSurfaceCreateInfoKHR *pCreateInfo,
11302 const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
11303 return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateMirSurfaceKHR);
11306 VKAPI_ATTR VkBool32 VKAPI_CALL GetPhysicalDeviceMirPresentationSupportKHR(VkPhysicalDevice physicalDevice,
11307 uint32_t queueFamilyIndex, MirConnection *connection) {
11309 instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
11311 unique_lock_t lock(global_lock);
11312 const auto pd_state = GetPhysicalDeviceState(instance_data, physicalDevice);
11314 skip |= ValidatePhysicalDeviceQueueFamily(instance_data, pd_state, queueFamilyIndex, VALIDATION_ERROR_2d2009e2,
11315 "vkGetPhysicalDeviceMirPresentationSupportKHR", "queueFamilyIndex");
11319 if (skip) return VK_FALSE;
11321 // Call down the call chain:
11323 instance_data->dispatch_table.GetPhysicalDeviceMirPresentationSupportKHR(physicalDevice, queueFamilyIndex, connection);
11327 #endif // VK_USE_PLATFORM_MIR_KHR
11329 #ifdef VK_USE_PLATFORM_WAYLAND_KHR
11330 VKAPI_ATTR VkResult VKAPI_CALL CreateWaylandSurfaceKHR(VkInstance instance, const VkWaylandSurfaceCreateInfoKHR *pCreateInfo,
11331 const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
11332 return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateWaylandSurfaceKHR);
11335 VKAPI_ATTR VkBool32 VKAPI_CALL GetPhysicalDeviceWaylandPresentationSupportKHR(VkPhysicalDevice physicalDevice,
11336 uint32_t queueFamilyIndex,
11337 struct wl_display *display) {
11339 instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
11341 unique_lock_t lock(global_lock);
11342 const auto pd_state = GetPhysicalDeviceState(instance_data, physicalDevice);
11344 skip |= ValidatePhysicalDeviceQueueFamily(instance_data, pd_state, queueFamilyIndex, VALIDATION_ERROR_2f000a34,
11345 "vkGetPhysicalDeviceWaylandPresentationSupportKHR", "queueFamilyIndex");
11349 if (skip) return VK_FALSE;
11351 // Call down the call chain:
11353 instance_data->dispatch_table.GetPhysicalDeviceWaylandPresentationSupportKHR(physicalDevice, queueFamilyIndex, display);
11357 #endif // VK_USE_PLATFORM_WAYLAND_KHR
11359 #ifdef VK_USE_PLATFORM_WIN32_KHR
11360 VKAPI_ATTR VkResult VKAPI_CALL CreateWin32SurfaceKHR(VkInstance instance, const VkWin32SurfaceCreateInfoKHR *pCreateInfo,
11361 const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
11362 return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateWin32SurfaceKHR);
11365 VKAPI_ATTR VkBool32 VKAPI_CALL GetPhysicalDeviceWin32PresentationSupportKHR(VkPhysicalDevice physicalDevice,
11366 uint32_t queueFamilyIndex) {
11368 instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
11370 unique_lock_t lock(global_lock);
11371 const auto pd_state = GetPhysicalDeviceState(instance_data, physicalDevice);
11373 skip |= ValidatePhysicalDeviceQueueFamily(instance_data, pd_state, queueFamilyIndex, VALIDATION_ERROR_2f200a3a,
11374 "vkGetPhysicalDeviceWin32PresentationSupportKHR", "queueFamilyIndex");
11378 if (skip) return VK_FALSE;
11380 // Call down the call chain:
11381 VkBool32 result = instance_data->dispatch_table.GetPhysicalDeviceWin32PresentationSupportKHR(physicalDevice, queueFamilyIndex);
11385 #endif // VK_USE_PLATFORM_WIN32_KHR
11387 #ifdef VK_USE_PLATFORM_XCB_KHR
11388 VKAPI_ATTR VkResult VKAPI_CALL CreateXcbSurfaceKHR(VkInstance instance, const VkXcbSurfaceCreateInfoKHR *pCreateInfo,
11389 const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
11390 return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateXcbSurfaceKHR);
11393 VKAPI_ATTR VkBool32 VKAPI_CALL GetPhysicalDeviceXcbPresentationSupportKHR(VkPhysicalDevice physicalDevice,
11394 uint32_t queueFamilyIndex, xcb_connection_t *connection,
11395 xcb_visualid_t visual_id) {
11397 instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
11399 unique_lock_t lock(global_lock);
11400 const auto pd_state = GetPhysicalDeviceState(instance_data, physicalDevice);
11402 skip |= ValidatePhysicalDeviceQueueFamily(instance_data, pd_state, queueFamilyIndex, VALIDATION_ERROR_2f400a40,
11403 "vkGetPhysicalDeviceXcbPresentationSupportKHR", "queueFamilyIndex");
11407 if (skip) return VK_FALSE;
11409 // Call down the call chain:
11410 VkBool32 result = instance_data->dispatch_table.GetPhysicalDeviceXcbPresentationSupportKHR(physicalDevice, queueFamilyIndex,
11411 connection, visual_id);
11415 #endif // VK_USE_PLATFORM_XCB_KHR
11417 #ifdef VK_USE_PLATFORM_XLIB_KHR
11418 VKAPI_ATTR VkResult VKAPI_CALL CreateXlibSurfaceKHR(VkInstance instance, const VkXlibSurfaceCreateInfoKHR *pCreateInfo,
11419 const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
11420 return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateXlibSurfaceKHR);
11423 VKAPI_ATTR VkBool32 VKAPI_CALL GetPhysicalDeviceXlibPresentationSupportKHR(VkPhysicalDevice physicalDevice,
11424 uint32_t queueFamilyIndex, Display *dpy,
11425 VisualID visualID) {
11427 instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
11429 unique_lock_t lock(global_lock);
11430 const auto pd_state = GetPhysicalDeviceState(instance_data, physicalDevice);
11432 skip |= ValidatePhysicalDeviceQueueFamily(instance_data, pd_state, queueFamilyIndex, VALIDATION_ERROR_2f600a46,
11433 "vkGetPhysicalDeviceXlibPresentationSupportKHR", "queueFamilyIndex");
11437 if (skip) return VK_FALSE;
11439 // Call down the call chain:
11441 instance_data->dispatch_table.GetPhysicalDeviceXlibPresentationSupportKHR(physicalDevice, queueFamilyIndex, dpy, visualID);
11445 #endif // VK_USE_PLATFORM_XLIB_KHR
11447 VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceSurfaceCapabilitiesKHR(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface,
11448 VkSurfaceCapabilitiesKHR *pSurfaceCapabilities) {
11449 auto instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
11451 unique_lock_t lock(global_lock);
11452 auto physical_device_state = GetPhysicalDeviceState(instance_data, physicalDevice);
11456 instance_data->dispatch_table.GetPhysicalDeviceSurfaceCapabilitiesKHR(physicalDevice, surface, pSurfaceCapabilities);
11458 if (result == VK_SUCCESS) {
11459 physical_device_state->vkGetPhysicalDeviceSurfaceCapabilitiesKHRState = QUERY_DETAILS;
11460 physical_device_state->surfaceCapabilities = *pSurfaceCapabilities;
11466 static void PostCallRecordGetPhysicalDeviceSurfaceCapabilities2KHR(instance_layer_data *instanceData,
11467 VkPhysicalDevice physicalDevice,
11468 VkSurfaceCapabilities2KHR *pSurfaceCapabilities) {
11469 unique_lock_t lock(global_lock);
11470 auto physicalDeviceState = GetPhysicalDeviceState(instanceData, physicalDevice);
11471 physicalDeviceState->vkGetPhysicalDeviceSurfaceCapabilitiesKHRState = QUERY_DETAILS;
11472 physicalDeviceState->surfaceCapabilities = pSurfaceCapabilities->surfaceCapabilities;
11475 VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceSurfaceCapabilities2KHR(VkPhysicalDevice physicalDevice,
11476 const VkPhysicalDeviceSurfaceInfo2KHR *pSurfaceInfo,
11477 VkSurfaceCapabilities2KHR *pSurfaceCapabilities) {
11478 auto instanceData = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
11481 instanceData->dispatch_table.GetPhysicalDeviceSurfaceCapabilities2KHR(physicalDevice, pSurfaceInfo, pSurfaceCapabilities);
11483 if (result == VK_SUCCESS) {
11484 PostCallRecordGetPhysicalDeviceSurfaceCapabilities2KHR(instanceData, physicalDevice, pSurfaceCapabilities);
11490 static void PostCallRecordGetPhysicalDeviceSurfaceCapabilities2EXT(instance_layer_data *instanceData,
11491 VkPhysicalDevice physicalDevice,
11492 VkSurfaceCapabilities2EXT *pSurfaceCapabilities) {
11493 unique_lock_t lock(global_lock);
11494 auto physicalDeviceState = GetPhysicalDeviceState(instanceData, physicalDevice);
11495 physicalDeviceState->vkGetPhysicalDeviceSurfaceCapabilitiesKHRState = QUERY_DETAILS;
11496 physicalDeviceState->surfaceCapabilities.minImageCount = pSurfaceCapabilities->minImageCount;
11497 physicalDeviceState->surfaceCapabilities.maxImageCount = pSurfaceCapabilities->maxImageCount;
11498 physicalDeviceState->surfaceCapabilities.currentExtent = pSurfaceCapabilities->currentExtent;
11499 physicalDeviceState->surfaceCapabilities.minImageExtent = pSurfaceCapabilities->minImageExtent;
11500 physicalDeviceState->surfaceCapabilities.maxImageExtent = pSurfaceCapabilities->maxImageExtent;
11501 physicalDeviceState->surfaceCapabilities.maxImageArrayLayers = pSurfaceCapabilities->maxImageArrayLayers;
11502 physicalDeviceState->surfaceCapabilities.supportedTransforms = pSurfaceCapabilities->supportedTransforms;
11503 physicalDeviceState->surfaceCapabilities.currentTransform = pSurfaceCapabilities->currentTransform;
11504 physicalDeviceState->surfaceCapabilities.supportedCompositeAlpha = pSurfaceCapabilities->supportedCompositeAlpha;
11505 physicalDeviceState->surfaceCapabilities.supportedUsageFlags = pSurfaceCapabilities->supportedUsageFlags;
11508 VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceSurfaceCapabilities2EXT(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface,
11509 VkSurfaceCapabilities2EXT *pSurfaceCapabilities) {
11510 auto instanceData = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
11513 instanceData->dispatch_table.GetPhysicalDeviceSurfaceCapabilities2EXT(physicalDevice, surface, pSurfaceCapabilities);
11515 if (result == VK_SUCCESS) {
11516 PostCallRecordGetPhysicalDeviceSurfaceCapabilities2EXT(instanceData, physicalDevice, pSurfaceCapabilities);
11522 VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceSurfaceSupportKHR(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex,
11523 VkSurfaceKHR surface, VkBool32 *pSupported) {
11525 auto instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
11527 unique_lock_t lock(global_lock);
11528 const auto pd_state = GetPhysicalDeviceState(instance_data, physicalDevice);
11529 auto surface_state = GetSurfaceState(instance_data, surface);
11531 skip |= ValidatePhysicalDeviceQueueFamily(instance_data, pd_state, queueFamilyIndex, VALIDATION_ERROR_2ee009ea,
11532 "vkGetPhysicalDeviceSurfaceSupportKHR", "queueFamilyIndex");
11536 if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
11539 instance_data->dispatch_table.GetPhysicalDeviceSurfaceSupportKHR(physicalDevice, queueFamilyIndex, surface, pSupported);
11541 if (result == VK_SUCCESS) {
11542 surface_state->gpu_queue_support[{physicalDevice, queueFamilyIndex}] = (*pSupported == VK_TRUE);
11548 VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceSurfacePresentModesKHR(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface,
11549 uint32_t *pPresentModeCount,
11550 VkPresentModeKHR *pPresentModes) {
11552 auto instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
11553 unique_lock_t lock(global_lock);
11554 // TODO: this isn't quite right. available modes may differ by surface AND physical device.
11555 auto physical_device_state = GetPhysicalDeviceState(instance_data, physicalDevice);
11556 auto &call_state = physical_device_state->vkGetPhysicalDeviceSurfacePresentModesKHRState;
11558 if (pPresentModes) {
11559 // Compare the preliminary value of *pPresentModeCount with the value this time:
11560 auto prev_mode_count = (uint32_t)physical_device_state->present_modes.size();
11561 switch (call_state) {
11563 skip |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
11564 VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, HandleToUint64(physicalDevice),
11565 DEVLIMITS_MUST_QUERY_COUNT,
11566 "vkGetPhysicalDeviceSurfacePresentModesKHR() called with non-NULL pPresentModeCount; but no prior "
11567 "positive value has been seen for pPresentModeCount.");
11570 // both query count and query details
11571 if (*pPresentModeCount != prev_mode_count) {
11572 skip |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
11573 VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, HandleToUint64(physicalDevice),
11574 DEVLIMITS_COUNT_MISMATCH,
11575 "vkGetPhysicalDeviceSurfacePresentModesKHR() called with *pPresentModeCount (%u) that differs "
11576 "from the value (%u) that was returned when pPresentModes was NULL.",
11577 *pPresentModeCount, prev_mode_count);
11584 if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
11586 auto result = instance_data->dispatch_table.GetPhysicalDeviceSurfacePresentModesKHR(physicalDevice, surface, pPresentModeCount,
11589 if (result == VK_SUCCESS || result == VK_INCOMPLETE) {
11592 if (*pPresentModeCount) {
11593 if (call_state < QUERY_COUNT) call_state = QUERY_COUNT;
11594 if (*pPresentModeCount > physical_device_state->present_modes.size())
11595 physical_device_state->present_modes.resize(*pPresentModeCount);
11597 if (pPresentModes) {
11598 if (call_state < QUERY_DETAILS) call_state = QUERY_DETAILS;
11599 for (uint32_t i = 0; i < *pPresentModeCount; i++) {
11600 physical_device_state->present_modes[i] = pPresentModes[i];
11608 VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceSurfaceFormatsKHR(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface,
11609 uint32_t *pSurfaceFormatCount,
11610 VkSurfaceFormatKHR *pSurfaceFormats) {
11612 auto instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
11613 unique_lock_t lock(global_lock);
11614 auto physical_device_state = GetPhysicalDeviceState(instance_data, physicalDevice);
11615 auto &call_state = physical_device_state->vkGetPhysicalDeviceSurfaceFormatsKHRState;
11617 if (pSurfaceFormats) {
11618 auto prev_format_count = (uint32_t)physical_device_state->surface_formats.size();
11620 switch (call_state) {
11622 // Since we haven't recorded a preliminary value of *pSurfaceFormatCount, that likely means that the application
11624 // previously call this function with a NULL value of pSurfaceFormats:
11625 skip |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
11626 VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, HandleToUint64(physicalDevice),
11627 DEVLIMITS_MUST_QUERY_COUNT,
11628 "vkGetPhysicalDeviceSurfaceFormatsKHR() called with non-NULL pSurfaceFormatCount; but no prior "
11629 "positive value has been seen for pSurfaceFormats.");
11632 if (prev_format_count != *pSurfaceFormatCount) {
11633 skip |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
11634 VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, HandleToUint64(physicalDevice),
11635 DEVLIMITS_COUNT_MISMATCH,
11636 "vkGetPhysicalDeviceSurfaceFormatsKHR() called with non-NULL pSurfaceFormatCount, and with "
11637 "pSurfaceFormats set to a value (%u) that is greater than the value (%u) that was returned "
11638 "when pSurfaceFormatCount was NULL.",
11639 *pSurfaceFormatCount, prev_format_count);
11646 if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
11648 // Call down the call chain:
11649 auto result = instance_data->dispatch_table.GetPhysicalDeviceSurfaceFormatsKHR(physicalDevice, surface, pSurfaceFormatCount,
11652 if (result == VK_SUCCESS || result == VK_INCOMPLETE) {
11655 if (*pSurfaceFormatCount) {
11656 if (call_state < QUERY_COUNT) call_state = QUERY_COUNT;
11657 if (*pSurfaceFormatCount > physical_device_state->surface_formats.size())
11658 physical_device_state->surface_formats.resize(*pSurfaceFormatCount);
11660 if (pSurfaceFormats) {
11661 if (call_state < QUERY_DETAILS) call_state = QUERY_DETAILS;
11662 for (uint32_t i = 0; i < *pSurfaceFormatCount; i++) {
11663 physical_device_state->surface_formats[i] = pSurfaceFormats[i];
11670 static void PostCallRecordGetPhysicalDeviceSurfaceFormats2KHR(instance_layer_data *instanceData, VkPhysicalDevice physicalDevice,
11671 uint32_t *pSurfaceFormatCount, VkSurfaceFormat2KHR *pSurfaceFormats) {
11672 unique_lock_t lock(global_lock);
11673 auto physicalDeviceState = GetPhysicalDeviceState(instanceData, physicalDevice);
11674 if (*pSurfaceFormatCount) {
11675 if (physicalDeviceState->vkGetPhysicalDeviceSurfaceFormatsKHRState < QUERY_COUNT) {
11676 physicalDeviceState->vkGetPhysicalDeviceSurfaceFormatsKHRState = QUERY_COUNT;
11678 if (*pSurfaceFormatCount > physicalDeviceState->surface_formats.size())
11679 physicalDeviceState->surface_formats.resize(*pSurfaceFormatCount);
11681 if (pSurfaceFormats) {
11682 if (physicalDeviceState->vkGetPhysicalDeviceSurfaceFormatsKHRState < QUERY_DETAILS) {
11683 physicalDeviceState->vkGetPhysicalDeviceSurfaceFormatsKHRState = QUERY_DETAILS;
11685 for (uint32_t i = 0; i < *pSurfaceFormatCount; i++) {
11686 physicalDeviceState->surface_formats[i] = pSurfaceFormats[i].surfaceFormat;
11691 VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceSurfaceFormats2KHR(VkPhysicalDevice physicalDevice,
11692 const VkPhysicalDeviceSurfaceInfo2KHR *pSurfaceInfo,
11693 uint32_t *pSurfaceFormatCount,
11694 VkSurfaceFormat2KHR *pSurfaceFormats) {
11695 auto instanceData = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
11696 auto result = instanceData->dispatch_table.GetPhysicalDeviceSurfaceFormats2KHR(physicalDevice, pSurfaceInfo,
11697 pSurfaceFormatCount, pSurfaceFormats);
11698 if (result == VK_SUCCESS || result == VK_INCOMPLETE) {
11699 PostCallRecordGetPhysicalDeviceSurfaceFormats2KHR(instanceData, physicalDevice, pSurfaceFormatCount, pSurfaceFormats);
11704 // VK_EXT_debug_utils commands
11705 VKAPI_ATTR VkResult VKAPI_CALL SetDebugUtilsObjectNameEXT(VkDevice device, const VkDebugUtilsObjectNameInfoEXT *pNameInfo) {
11706 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
11707 VkResult result = VK_SUCCESS;
11708 if (pNameInfo->pObjectName) {
11709 dev_data->report_data->debugUtilsObjectNameMap->insert(
11710 std::make_pair<uint64_t, std::string>((uint64_t &&) pNameInfo->objectHandle, pNameInfo->pObjectName));
11712 dev_data->report_data->debugUtilsObjectNameMap->erase(pNameInfo->objectHandle);
11714 if (nullptr != dev_data->dispatch_table.SetDebugUtilsObjectNameEXT) {
11715 result = dev_data->dispatch_table.SetDebugUtilsObjectNameEXT(device, pNameInfo);
11720 VKAPI_ATTR VkResult VKAPI_CALL SetDebugUtilsObjectTagEXT(VkDevice device, const VkDebugUtilsObjectTagInfoEXT *pTagInfo) {
11721 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
11722 VkResult result = VK_SUCCESS;
11723 if (nullptr != dev_data->dispatch_table.SetDebugUtilsObjectTagEXT) {
11724 result = dev_data->dispatch_table.SetDebugUtilsObjectTagEXT(device, pTagInfo);
11729 VKAPI_ATTR void VKAPI_CALL QueueBeginDebugUtilsLabelEXT(VkQueue queue, const VkDebugUtilsLabelEXT *pLabelInfo) {
11730 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(queue), layer_data_map);
11731 BeginQueueDebugUtilsLabel(dev_data->report_data, queue, pLabelInfo);
11732 if (nullptr != dev_data->dispatch_table.QueueBeginDebugUtilsLabelEXT) {
11733 dev_data->dispatch_table.QueueBeginDebugUtilsLabelEXT(queue, pLabelInfo);
11737 VKAPI_ATTR void VKAPI_CALL QueueEndDebugUtilsLabelEXT(VkQueue queue) {
11738 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(queue), layer_data_map);
11739 if (nullptr != dev_data->dispatch_table.QueueEndDebugUtilsLabelEXT) {
11740 dev_data->dispatch_table.QueueEndDebugUtilsLabelEXT(queue);
11742 EndQueueDebugUtilsLabel(dev_data->report_data, queue);
11745 VKAPI_ATTR void VKAPI_CALL QueueInsertDebugUtilsLabelEXT(VkQueue queue, const VkDebugUtilsLabelEXT *pLabelInfo) {
11746 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(queue), layer_data_map);
11747 InsertQueueDebugUtilsLabel(dev_data->report_data, queue, pLabelInfo);
11748 if (nullptr != dev_data->dispatch_table.QueueInsertDebugUtilsLabelEXT) {
11749 dev_data->dispatch_table.QueueInsertDebugUtilsLabelEXT(queue, pLabelInfo);
11753 VKAPI_ATTR void VKAPI_CALL CmdBeginDebugUtilsLabelEXT(VkCommandBuffer commandBuffer, const VkDebugUtilsLabelEXT *pLabelInfo) {
11754 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
11755 BeginCmdDebugUtilsLabel(dev_data->report_data, commandBuffer, pLabelInfo);
11756 if (nullptr != dev_data->dispatch_table.CmdBeginDebugUtilsLabelEXT) {
11757 dev_data->dispatch_table.CmdBeginDebugUtilsLabelEXT(commandBuffer, pLabelInfo);
11761 VKAPI_ATTR void VKAPI_CALL CmdEndDebugUtilsLabelEXT(VkCommandBuffer commandBuffer) {
11762 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
11763 if (nullptr != dev_data->dispatch_table.CmdEndDebugUtilsLabelEXT) {
11764 dev_data->dispatch_table.CmdEndDebugUtilsLabelEXT(commandBuffer);
11766 EndCmdDebugUtilsLabel(dev_data->report_data, commandBuffer);
11769 VKAPI_ATTR void VKAPI_CALL CmdInsertDebugUtilsLabelEXT(VkCommandBuffer commandBuffer, const VkDebugUtilsLabelEXT *pLabelInfo) {
11770 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
11771 InsertCmdDebugUtilsLabel(dev_data->report_data, commandBuffer, pLabelInfo);
11772 if (nullptr != dev_data->dispatch_table.CmdInsertDebugUtilsLabelEXT) {
11773 dev_data->dispatch_table.CmdInsertDebugUtilsLabelEXT(commandBuffer, pLabelInfo);
11777 VKAPI_ATTR VkResult VKAPI_CALL CreateDebugUtilsMessengerEXT(VkInstance instance,
11778 const VkDebugUtilsMessengerCreateInfoEXT *pCreateInfo,
11779 const VkAllocationCallbacks *pAllocator,
11780 VkDebugUtilsMessengerEXT *pMessenger) {
11781 instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
11782 VkResult result = instance_data->dispatch_table.CreateDebugUtilsMessengerEXT(instance, pCreateInfo, pAllocator, pMessenger);
11784 if (VK_SUCCESS == result) {
11785 result = layer_create_messenger_callback(instance_data->report_data, false, pCreateInfo, pAllocator, pMessenger);
11790 VKAPI_ATTR void VKAPI_CALL DestroyDebugUtilsMessengerEXT(VkInstance instance, VkDebugUtilsMessengerEXT messenger,
11791 const VkAllocationCallbacks *pAllocator) {
11792 instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
11793 instance_data->dispatch_table.DestroyDebugUtilsMessengerEXT(instance, messenger, pAllocator);
11794 layer_destroy_messenger_callback(instance_data->report_data, messenger, pAllocator);
11797 VKAPI_ATTR void VKAPI_CALL SubmitDebugUtilsMessageEXT(VkInstance instance, VkDebugUtilsMessageSeverityFlagBitsEXT messageSeverity,
11798 VkDebugUtilsMessageTypeFlagsEXT messageTypes,
11799 const VkDebugUtilsMessengerCallbackDataEXT *pCallbackData) {
11800 instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
11801 instance_data->dispatch_table.SubmitDebugUtilsMessageEXT(instance, messageSeverity, messageTypes, pCallbackData);
11804 // VK_EXT_debug_report commands
11805 VKAPI_ATTR VkResult VKAPI_CALL CreateDebugReportCallbackEXT(VkInstance instance,
11806 const VkDebugReportCallbackCreateInfoEXT *pCreateInfo,
11807 const VkAllocationCallbacks *pAllocator,
11808 VkDebugReportCallbackEXT *pMsgCallback) {
11809 instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
11810 VkResult res = instance_data->dispatch_table.CreateDebugReportCallbackEXT(instance, pCreateInfo, pAllocator, pMsgCallback);
11811 if (VK_SUCCESS == res) {
11812 lock_guard_t lock(global_lock);
11813 res = layer_create_report_callback(instance_data->report_data, false, pCreateInfo, pAllocator, pMsgCallback);
11818 VKAPI_ATTR void VKAPI_CALL DestroyDebugReportCallbackEXT(VkInstance instance, VkDebugReportCallbackEXT msgCallback,
11819 const VkAllocationCallbacks *pAllocator) {
11820 instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
11821 instance_data->dispatch_table.DestroyDebugReportCallbackEXT(instance, msgCallback, pAllocator);
11822 lock_guard_t lock(global_lock);
11823 layer_destroy_report_callback(instance_data->report_data, msgCallback, pAllocator);
11826 VKAPI_ATTR void VKAPI_CALL DebugReportMessageEXT(VkInstance instance, VkDebugReportFlagsEXT flags,
11827 VkDebugReportObjectTypeEXT objType, uint64_t object, size_t location,
11828 int32_t msgCode, const char *pLayerPrefix, const char *pMsg) {
11829 instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
11830 instance_data->dispatch_table.DebugReportMessageEXT(instance, flags, objType, object, location, msgCode, pLayerPrefix, pMsg);
11833 VKAPI_ATTR VkResult VKAPI_CALL EnumerateInstanceLayerProperties(uint32_t *pCount, VkLayerProperties *pProperties) {
11834 return util_GetLayerProperties(1, &global_layer, pCount, pProperties);
11837 VKAPI_ATTR VkResult VKAPI_CALL EnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice, uint32_t *pCount,
11838 VkLayerProperties *pProperties) {
11839 return util_GetLayerProperties(1, &global_layer, pCount, pProperties);
11842 VKAPI_ATTR VkResult VKAPI_CALL EnumerateInstanceExtensionProperties(const char *pLayerName, uint32_t *pCount,
11843 VkExtensionProperties *pProperties) {
11844 if (pLayerName && !strcmp(pLayerName, global_layer.layerName))
11845 return util_GetExtensionProperties(1, instance_extensions, pCount, pProperties);
11847 return VK_ERROR_LAYER_NOT_PRESENT;
11850 VKAPI_ATTR VkResult VKAPI_CALL EnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice, const char *pLayerName,
11851 uint32_t *pCount, VkExtensionProperties *pProperties) {
11852 if (pLayerName && !strcmp(pLayerName, global_layer.layerName))
11853 return util_GetExtensionProperties(1, device_extensions, pCount, pProperties);
11855 assert(physicalDevice);
11857 instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
11858 return instance_data->dispatch_table.EnumerateDeviceExtensionProperties(physicalDevice, NULL, pCount, pProperties);
11861 static bool PreCallValidateEnumeratePhysicalDeviceGroups(VkInstance instance, uint32_t *pPhysicalDeviceGroupCount,
11862 VkPhysicalDeviceGroupPropertiesKHR *pPhysicalDeviceGroupProperties) {
11863 instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
11866 if (instance_data) {
11867 // For this instance, flag when EnumeratePhysicalDeviceGroups goes to QUERY_COUNT and then QUERY_DETAILS.
11868 if (NULL != pPhysicalDeviceGroupProperties) {
11869 if (UNCALLED == instance_data->vkEnumeratePhysicalDeviceGroupsState) {
11870 // Flag warning here. You can call this without having queried the count, but it may not be
11871 // robust on platforms with multiple physical devices.
11872 skip |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
11873 VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT, 0, DEVLIMITS_MISSING_QUERY_COUNT,
11874 "Call sequence has vkEnumeratePhysicalDeviceGroups() w/ non-NULL "
11875 "pPhysicalDeviceGroupProperties. You should first call vkEnumeratePhysicalDeviceGroups() w/ "
11876 "NULL pPhysicalDeviceGroupProperties to query pPhysicalDeviceGroupCount.");
11877 } // TODO : Could also flag a warning if re-calling this function in QUERY_DETAILS state
11878 else if (instance_data->physical_device_groups_count != *pPhysicalDeviceGroupCount) {
11879 // Having actual count match count from app is not a requirement, so this can be a warning
11880 skip |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
11881 VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, DEVLIMITS_COUNT_MISMATCH,
11882 "Call to vkEnumeratePhysicalDeviceGroups() w/ pPhysicalDeviceGroupCount value %u, but actual count "
11883 "supported by this instance is %u.",
11884 *pPhysicalDeviceGroupCount, instance_data->physical_device_groups_count);
11888 log_msg(instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT, 0,
11889 DEVLIMITS_INVALID_INSTANCE, "Invalid instance (0x%" PRIx64 ") passed into vkEnumeratePhysicalDeviceGroups().",
11890 HandleToUint64(instance));
11896 static void PreCallRecordEnumeratePhysicalDeviceGroups(instance_layer_data *instance_data,
11897 VkPhysicalDeviceGroupPropertiesKHR *pPhysicalDeviceGroupProperties) {
11898 if (instance_data) {
11899 // For this instance, flag when EnumeratePhysicalDeviceGroups goes to QUERY_COUNT and then QUERY_DETAILS.
11900 if (NULL == pPhysicalDeviceGroupProperties) {
11901 instance_data->vkEnumeratePhysicalDeviceGroupsState = QUERY_COUNT;
11903 instance_data->vkEnumeratePhysicalDeviceGroupsState = QUERY_DETAILS;
11908 static void PostCallRecordEnumeratePhysicalDeviceGroups(instance_layer_data *instance_data, uint32_t *pPhysicalDeviceGroupCount,
11909 VkPhysicalDeviceGroupPropertiesKHR *pPhysicalDeviceGroupProperties) {
11910 if (NULL == pPhysicalDeviceGroupProperties) {
11911 instance_data->physical_device_groups_count = *pPhysicalDeviceGroupCount;
11912 } else { // Save physical devices
11913 for (uint32_t i = 0; i < *pPhysicalDeviceGroupCount; i++) {
11914 for (uint32_t j = 0; j < pPhysicalDeviceGroupProperties[i].physicalDeviceCount; j++) {
11915 VkPhysicalDevice cur_phys_dev = pPhysicalDeviceGroupProperties[i].physicalDevices[j];
11916 auto &phys_device_state = instance_data->physical_device_map[cur_phys_dev];
11917 phys_device_state.phys_device = cur_phys_dev;
11918 // Init actual features for each physical device
11919 instance_data->dispatch_table.GetPhysicalDeviceFeatures(cur_phys_dev, &phys_device_state.features);
11925 VKAPI_ATTR VkResult VKAPI_CALL EnumeratePhysicalDeviceGroups(VkInstance instance, uint32_t *pPhysicalDeviceGroupCount,
11926 VkPhysicalDeviceGroupPropertiesKHR *pPhysicalDeviceGroupProperties) {
11928 instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
11930 skip = PreCallValidateEnumeratePhysicalDeviceGroups(instance, pPhysicalDeviceGroupCount, pPhysicalDeviceGroupProperties);
11932 return VK_ERROR_VALIDATION_FAILED_EXT;
11934 PreCallRecordEnumeratePhysicalDeviceGroups(instance_data, pPhysicalDeviceGroupProperties);
11935 VkResult result = instance_data->dispatch_table.EnumeratePhysicalDeviceGroups(instance, pPhysicalDeviceGroupCount,
11936 pPhysicalDeviceGroupProperties);
11937 if (result == VK_SUCCESS) {
11938 PostCallRecordEnumeratePhysicalDeviceGroups(instance_data, pPhysicalDeviceGroupCount, pPhysicalDeviceGroupProperties);
11943 VKAPI_ATTR VkResult VKAPI_CALL EnumeratePhysicalDeviceGroupsKHR(
11944 VkInstance instance, uint32_t *pPhysicalDeviceGroupCount, VkPhysicalDeviceGroupPropertiesKHR *pPhysicalDeviceGroupProperties) {
11946 instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
11948 skip = PreCallValidateEnumeratePhysicalDeviceGroups(instance, pPhysicalDeviceGroupCount, pPhysicalDeviceGroupProperties);
11950 return VK_ERROR_VALIDATION_FAILED_EXT;
11952 PreCallRecordEnumeratePhysicalDeviceGroups(instance_data, pPhysicalDeviceGroupProperties);
11953 VkResult result = instance_data->dispatch_table.EnumeratePhysicalDeviceGroupsKHR(instance, pPhysicalDeviceGroupCount,
11954 pPhysicalDeviceGroupProperties);
11955 if (result == VK_SUCCESS) {
11956 PostCallRecordEnumeratePhysicalDeviceGroups(instance_data, pPhysicalDeviceGroupCount, pPhysicalDeviceGroupProperties);
11961 static bool PreCallValidateCreateDescriptorUpdateTemplate(const char *func_name, layer_data *device_data,
11962 const VkDescriptorUpdateTemplateCreateInfoKHR *pCreateInfo,
11963 const VkAllocationCallbacks *pAllocator,
11964 VkDescriptorUpdateTemplateKHR *pDescriptorUpdateTemplate) {
11966 const auto layout = GetDescriptorSetLayout(device_data, pCreateInfo->descriptorSetLayout);
11967 if (VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET == pCreateInfo->templateType && !layout) {
11968 auto ds_uint = HandleToUint64(pCreateInfo->descriptorSetLayout);
11969 skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
11970 ds_uint, VALIDATION_ERROR_052002bc, "%s: Invalid pCreateInfo->descriptorSetLayout (%" PRIx64 ")", func_name,
11972 } else if (VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_PUSH_DESCRIPTORS_KHR == pCreateInfo->templateType) {
11973 auto bind_point = pCreateInfo->pipelineBindPoint;
11974 bool valid_bp = (bind_point == VK_PIPELINE_BIND_POINT_GRAPHICS) || (bind_point == VK_PIPELINE_BIND_POINT_COMPUTE);
11976 skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
11977 VALIDATION_ERROR_052002be, "%s: Invalid pCreateInfo->pipelineBindPoint (%" PRIu32 ").", func_name,
11978 static_cast<uint32_t>(bind_point));
11980 const auto pipeline_layout = getPipelineLayout(device_data, pCreateInfo->pipelineLayout);
11981 if (!pipeline_layout) {
11982 uint64_t pl_uint = HandleToUint64(pCreateInfo->pipelineLayout);
11983 skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
11984 VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_LAYOUT_EXT, pl_uint, VALIDATION_ERROR_052002c0,
11985 "%s: Invalid pCreateInfo->pipelineLayout (%" PRIx64 ")", func_name, pl_uint);
11987 const uint32_t pd_set = pCreateInfo->set;
11988 if ((pd_set >= pipeline_layout->set_layouts.size()) || !pipeline_layout->set_layouts[pd_set] ||
11989 !pipeline_layout->set_layouts[pd_set]->IsPushDescriptor()) {
11990 uint64_t pl_uint = HandleToUint64(pCreateInfo->pipelineLayout);
11991 skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
11992 VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_LAYOUT_EXT, pl_uint, VALIDATION_ERROR_052002c2,
11993 "%s: pCreateInfo->set (%" PRIu32
11994 ") does not refer to the push descriptor set layout for "
11995 "pCreateInfo->pipelineLayout (%" PRIx64 ").",
11996 func_name, pd_set, pl_uint);
12003 static void PostCallRecordCreateDescriptorUpdateTemplate(layer_data *device_data,
12004 const VkDescriptorUpdateTemplateCreateInfoKHR *pCreateInfo,
12005 VkDescriptorUpdateTemplateKHR *pDescriptorUpdateTemplate) {
12006 // Shadow template createInfo for later updates
12007 safe_VkDescriptorUpdateTemplateCreateInfo *local_create_info = new safe_VkDescriptorUpdateTemplateCreateInfo(pCreateInfo);
12008 std::unique_ptr<TEMPLATE_STATE> template_state(new TEMPLATE_STATE(*pDescriptorUpdateTemplate, local_create_info));
12009 device_data->desc_template_map[*pDescriptorUpdateTemplate] = std::move(template_state);
12012 VKAPI_ATTR VkResult VKAPI_CALL CreateDescriptorUpdateTemplate(VkDevice device,
12013 const VkDescriptorUpdateTemplateCreateInfoKHR *pCreateInfo,
12014 const VkAllocationCallbacks *pAllocator,
12015 VkDescriptorUpdateTemplateKHR *pDescriptorUpdateTemplate) {
12016 layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
12017 unique_lock_t lock(global_lock);
12018 bool skip = PreCallValidateCreateDescriptorUpdateTemplate("vkCreateDescriptorUpdateTemplate()", device_data, pCreateInfo,
12019 pAllocator, pDescriptorUpdateTemplate);
12021 VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
12025 device_data->dispatch_table.CreateDescriptorUpdateTemplate(device, pCreateInfo, pAllocator, pDescriptorUpdateTemplate);
12026 if (VK_SUCCESS == result) {
12028 PostCallRecordCreateDescriptorUpdateTemplate(device_data, pCreateInfo, pDescriptorUpdateTemplate);
12034 VKAPI_ATTR VkResult VKAPI_CALL CreateDescriptorUpdateTemplateKHR(VkDevice device,
12035 const VkDescriptorUpdateTemplateCreateInfoKHR *pCreateInfo,
12036 const VkAllocationCallbacks *pAllocator,
12037 VkDescriptorUpdateTemplateKHR *pDescriptorUpdateTemplate) {
12038 layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
12039 unique_lock_t lock(global_lock);
12040 bool skip = PreCallValidateCreateDescriptorUpdateTemplate("vkCreateDescriptorUpdateTemplateKHR()", device_data, pCreateInfo,
12041 pAllocator, pDescriptorUpdateTemplate);
12043 VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
12046 result = device_data->dispatch_table.CreateDescriptorUpdateTemplateKHR(device, pCreateInfo, pAllocator,
12047 pDescriptorUpdateTemplate);
12048 if (VK_SUCCESS == result) {
12050 PostCallRecordCreateDescriptorUpdateTemplate(device_data, pCreateInfo, pDescriptorUpdateTemplate);
12056 static void PreCallRecordDestroyDescriptorUpdateTemplate(layer_data *device_data,
12057 VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate) {
12058 device_data->desc_template_map.erase(descriptorUpdateTemplate);
12061 VKAPI_ATTR void VKAPI_CALL DestroyDescriptorUpdateTemplate(VkDevice device, VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate,
12062 const VkAllocationCallbacks *pAllocator) {
12063 layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
12064 unique_lock_t lock(global_lock);
12065 PreCallRecordDestroyDescriptorUpdateTemplate(device_data, descriptorUpdateTemplate);
12067 device_data->dispatch_table.DestroyDescriptorUpdateTemplate(device, descriptorUpdateTemplate, pAllocator);
12070 VKAPI_ATTR void VKAPI_CALL DestroyDescriptorUpdateTemplateKHR(VkDevice device,
12071 VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate,
12072 const VkAllocationCallbacks *pAllocator) {
12073 layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
12074 unique_lock_t lock(global_lock);
12075 PreCallRecordDestroyDescriptorUpdateTemplate(device_data, descriptorUpdateTemplate);
12077 device_data->dispatch_table.DestroyDescriptorUpdateTemplateKHR(device, descriptorUpdateTemplate, pAllocator);
12080 // PostCallRecord* handles recording state updates following call down chain to UpdateDescriptorSetsWithTemplate()
12081 static void PostCallRecordUpdateDescriptorSetWithTemplate(layer_data *device_data, VkDescriptorSet descriptorSet,
12082 VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate,
12083 const void *pData) {
12084 auto const template_map_entry = device_data->desc_template_map.find(descriptorUpdateTemplate);
12085 if (template_map_entry == device_data->desc_template_map.end()) {
12089 cvdescriptorset::PerformUpdateDescriptorSetsWithTemplateKHR(device_data, descriptorSet, template_map_entry->second, pData);
12092 VKAPI_ATTR void VKAPI_CALL UpdateDescriptorSetWithTemplate(VkDevice device, VkDescriptorSet descriptorSet,
12093 VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate,
12094 const void *pData) {
12095 layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
12096 device_data->dispatch_table.UpdateDescriptorSetWithTemplate(device, descriptorSet, descriptorUpdateTemplate, pData);
12098 PostCallRecordUpdateDescriptorSetWithTemplate(device_data, descriptorSet, descriptorUpdateTemplate, pData);
12101 VKAPI_ATTR void VKAPI_CALL UpdateDescriptorSetWithTemplateKHR(VkDevice device, VkDescriptorSet descriptorSet,
12102 VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate,
12103 const void *pData) {
12104 layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
12105 device_data->dispatch_table.UpdateDescriptorSetWithTemplateKHR(device, descriptorSet, descriptorUpdateTemplate, pData);
12107 PostCallRecordUpdateDescriptorSetWithTemplate(device_data, descriptorSet, descriptorUpdateTemplate, pData);
12110 VKAPI_ATTR void VKAPI_CALL CmdPushDescriptorSetWithTemplateKHR(VkCommandBuffer commandBuffer,
12111 VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate,
12112 VkPipelineLayout layout, uint32_t set, const void *pData) {
12113 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
12114 unique_lock_t lock(global_lock);
12116 GLOBAL_CB_NODE *cb_state = GetCBNode(dev_data, commandBuffer);
12117 // Minimal validation for command buffer state
12119 skip |= ValidateCmd(dev_data, cb_state, CMD_PUSHDESCRIPTORSETWITHTEMPLATEKHR, "vkCmdPushDescriptorSetWithTemplateKHR()");
12124 dev_data->dispatch_table.CmdPushDescriptorSetWithTemplateKHR(commandBuffer, descriptorUpdateTemplate, layout, set, pData);
12128 static void PostCallRecordGetPhysicalDeviceDisplayPlanePropertiesKHR(instance_layer_data *instanceData,
12129 VkPhysicalDevice physicalDevice, uint32_t *pPropertyCount,
12130 VkDisplayPlanePropertiesKHR *pProperties) {
12131 unique_lock_t lock(global_lock);
12132 auto physical_device_state = GetPhysicalDeviceState(instanceData, physicalDevice);
12134 if (*pPropertyCount) {
12135 if (physical_device_state->vkGetPhysicalDeviceDisplayPlanePropertiesKHRState < QUERY_COUNT) {
12136 physical_device_state->vkGetPhysicalDeviceDisplayPlanePropertiesKHRState = QUERY_COUNT;
12138 physical_device_state->display_plane_property_count = *pPropertyCount;
12141 if (physical_device_state->vkGetPhysicalDeviceDisplayPlanePropertiesKHRState < QUERY_DETAILS) {
12142 physical_device_state->vkGetPhysicalDeviceDisplayPlanePropertiesKHRState = QUERY_DETAILS;
12147 VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceDisplayPlanePropertiesKHR(VkPhysicalDevice physicalDevice, uint32_t *pPropertyCount,
12148 VkDisplayPlanePropertiesKHR *pProperties) {
12149 VkResult result = VK_SUCCESS;
12150 instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
12152 result = instance_data->dispatch_table.GetPhysicalDeviceDisplayPlanePropertiesKHR(physicalDevice, pPropertyCount, pProperties);
12154 if (result == VK_SUCCESS || result == VK_INCOMPLETE) {
12155 PostCallRecordGetPhysicalDeviceDisplayPlanePropertiesKHR(instance_data, physicalDevice, pPropertyCount, pProperties);
12161 static bool ValidateGetPhysicalDeviceDisplayPlanePropertiesKHRQuery(instance_layer_data *instance_data,
12162 VkPhysicalDevice physicalDevice, uint32_t planeIndex,
12163 const char *api_name) {
12165 auto physical_device_state = GetPhysicalDeviceState(instance_data, physicalDevice);
12166 if (physical_device_state->vkGetPhysicalDeviceDisplayPlanePropertiesKHRState == UNCALLED) {
12168 instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
12169 HandleToUint64(physicalDevice), SWAPCHAIN_GET_SUPPORTED_DISPLAYS_WITHOUT_QUERY,
12170 "Potential problem with calling %s() without first querying vkGetPhysicalDeviceDisplayPlanePropertiesKHR.", api_name);
12172 if (planeIndex >= physical_device_state->display_plane_property_count) {
12174 instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
12175 HandleToUint64(physicalDevice), VALIDATION_ERROR_29c009c2,
12176 "%s(): planeIndex must be in the range [0, %d] that was returned by vkGetPhysicalDeviceDisplayPlanePropertiesKHR. "
12177 "Do you have the plane index hardcoded?",
12178 api_name, physical_device_state->display_plane_property_count - 1);
12184 static bool PreCallValidateGetDisplayPlaneSupportedDisplaysKHR(instance_layer_data *instance_data, VkPhysicalDevice physicalDevice,
12185 uint32_t planeIndex) {
12187 lock_guard_t lock(global_lock);
12188 skip |= ValidateGetPhysicalDeviceDisplayPlanePropertiesKHRQuery(instance_data, physicalDevice, planeIndex,
12189 "vkGetDisplayPlaneSupportedDisplaysKHR");
12193 VKAPI_ATTR VkResult VKAPI_CALL GetDisplayPlaneSupportedDisplaysKHR(VkPhysicalDevice physicalDevice, uint32_t planeIndex,
12194 uint32_t *pDisplayCount, VkDisplayKHR *pDisplays) {
12195 VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
12196 instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
12197 bool skip = PreCallValidateGetDisplayPlaneSupportedDisplaysKHR(instance_data, physicalDevice, planeIndex);
12200 instance_data->dispatch_table.GetDisplayPlaneSupportedDisplaysKHR(physicalDevice, planeIndex, pDisplayCount, pDisplays);
12205 static bool PreCallValidateGetDisplayPlaneCapabilitiesKHR(instance_layer_data *instance_data, VkPhysicalDevice physicalDevice,
12206 uint32_t planeIndex) {
12208 lock_guard_t lock(global_lock);
12209 skip |= ValidateGetPhysicalDeviceDisplayPlanePropertiesKHRQuery(instance_data, physicalDevice, planeIndex,
12210 "vkGetDisplayPlaneCapabilitiesKHR");
12214 VKAPI_ATTR VkResult VKAPI_CALL GetDisplayPlaneCapabilitiesKHR(VkPhysicalDevice physicalDevice, VkDisplayModeKHR mode,
12215 uint32_t planeIndex, VkDisplayPlaneCapabilitiesKHR *pCapabilities) {
12216 VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
12217 instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
12218 bool skip = PreCallValidateGetDisplayPlaneCapabilitiesKHR(instance_data, physicalDevice, planeIndex);
12221 result = instance_data->dispatch_table.GetDisplayPlaneCapabilitiesKHR(physicalDevice, mode, planeIndex, pCapabilities);
12227 VKAPI_ATTR VkResult VKAPI_CALL DebugMarkerSetObjectNameEXT(VkDevice device, const VkDebugMarkerObjectNameInfoEXT *pNameInfo) {
12228 unique_lock_t lock(global_lock);
12229 layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
12230 if (pNameInfo->pObjectName) {
12231 device_data->report_data->debugObjectNameMap->insert(
12232 std::make_pair<uint64_t, std::string>((uint64_t &&) pNameInfo->object, pNameInfo->pObjectName));
12234 device_data->report_data->debugObjectNameMap->erase(pNameInfo->object);
12237 VkResult result = device_data->dispatch_table.DebugMarkerSetObjectNameEXT(device, pNameInfo);
12241 VKAPI_ATTR VkResult VKAPI_CALL DebugMarkerSetObjectTagEXT(VkDevice device, VkDebugMarkerObjectTagInfoEXT *pTagInfo) {
12242 layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
12243 VkResult result = device_data->dispatch_table.DebugMarkerSetObjectTagEXT(device, pTagInfo);
12247 VKAPI_ATTR void VKAPI_CALL CmdDebugMarkerBeginEXT(VkCommandBuffer commandBuffer, VkDebugMarkerMarkerInfoEXT *pMarkerInfo) {
12248 layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
12249 unique_lock_t lock(global_lock);
12251 GLOBAL_CB_NODE *cb_state = GetCBNode(device_data, commandBuffer);
12252 // Minimal validation for command buffer state
12254 skip |= ValidateCmd(device_data, cb_state, CMD_DEBUGMARKERBEGINEXT, "vkCmdDebugMarkerBeginEXT()");
12258 device_data->dispatch_table.CmdDebugMarkerBeginEXT(commandBuffer, pMarkerInfo);
12262 VKAPI_ATTR void VKAPI_CALL CmdDebugMarkerEndEXT(VkCommandBuffer commandBuffer) {
12263 layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
12264 unique_lock_t lock(global_lock);
12266 GLOBAL_CB_NODE *cb_state = GetCBNode(device_data, commandBuffer);
12267 // Minimal validation for command buffer state
12269 skip |= ValidateCmd(device_data, cb_state, CMD_DEBUGMARKERENDEXT, "vkCmdDebugMarkerEndEXT()");
12273 device_data->dispatch_table.CmdDebugMarkerEndEXT(commandBuffer);
12277 VKAPI_ATTR void VKAPI_CALL CmdDebugMarkerInsertEXT(VkCommandBuffer commandBuffer, VkDebugMarkerMarkerInfoEXT *pMarkerInfo) {
12278 layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
12279 device_data->dispatch_table.CmdDebugMarkerInsertEXT(commandBuffer, pMarkerInfo);
12282 VKAPI_ATTR void VKAPI_CALL CmdSetDiscardRectangleEXT(VkCommandBuffer commandBuffer, uint32_t firstDiscardRectangle,
12283 uint32_t discardRectangleCount, const VkRect2D *pDiscardRectangles) {
12284 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
12285 unique_lock_t lock(global_lock);
12287 GLOBAL_CB_NODE *cb_state = GetCBNode(dev_data, commandBuffer);
12288 // Minimal validation for command buffer state
12290 skip |= ValidateCmd(dev_data, cb_state, CMD_SETDISCARDRECTANGLEEXT, "vkCmdSetDiscardRectangleEXT()");
12295 dev_data->dispatch_table.CmdSetDiscardRectangleEXT(commandBuffer, firstDiscardRectangle, discardRectangleCount,
12296 pDiscardRectangles);
12300 VKAPI_ATTR void VKAPI_CALL CmdSetSampleLocationsEXT(VkCommandBuffer commandBuffer,
12301 const VkSampleLocationsInfoEXT *pSampleLocationsInfo) {
12302 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
12303 unique_lock_t lock(global_lock);
12305 GLOBAL_CB_NODE *cb_state = GetCBNode(dev_data, commandBuffer);
12306 // Minimal validation for command buffer state
12308 skip |= ValidateCmd(dev_data, cb_state, CMD_SETSAMPLELOCATIONSEXT, "vkCmdSetSampleLocationsEXT()");
12313 dev_data->dispatch_table.CmdSetSampleLocationsEXT(commandBuffer, pSampleLocationsInfo);
12317 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetDeviceProcAddr(VkDevice device, const char *funcName);
12318 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetPhysicalDeviceProcAddr(VkInstance instance, const char *funcName);
12319 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetInstanceProcAddr(VkInstance instance, const char *funcName);
12321 // Map of all APIs to be intercepted by this layer
12322 static const std::unordered_map<std::string, void *> name_to_funcptr_map = {
12323 {"vkGetInstanceProcAddr", (void *)GetInstanceProcAddr},
12324 {"vk_layerGetPhysicalDeviceProcAddr", (void *)GetPhysicalDeviceProcAddr},
12325 {"vkGetDeviceProcAddr", (void *)GetDeviceProcAddr},
12326 {"vkCreateInstance", (void *)CreateInstance},
12327 {"vkCreateDevice", (void *)CreateDevice},
12328 {"vkEnumeratePhysicalDevices", (void *)EnumeratePhysicalDevices},
12329 {"vkGetPhysicalDeviceQueueFamilyProperties", (void *)GetPhysicalDeviceQueueFamilyProperties},
12330 {"vkDestroyInstance", (void *)DestroyInstance},
12331 {"vkEnumerateInstanceLayerProperties", (void *)EnumerateInstanceLayerProperties},
12332 {"vkEnumerateDeviceLayerProperties", (void *)EnumerateDeviceLayerProperties},
12333 {"vkEnumerateInstanceExtensionProperties", (void *)EnumerateInstanceExtensionProperties},
12334 {"vkEnumerateDeviceExtensionProperties", (void *)EnumerateDeviceExtensionProperties},
12335 {"vkCreateDescriptorUpdateTemplate", (void *)CreateDescriptorUpdateTemplate},
12336 {"vkCreateDescriptorUpdateTemplateKHR", (void *)CreateDescriptorUpdateTemplateKHR},
12337 {"vkDestroyDescriptorUpdateTemplate", (void *)DestroyDescriptorUpdateTemplate},
12338 {"vkDestroyDescriptorUpdateTemplateKHR", (void *)DestroyDescriptorUpdateTemplateKHR},
12339 {"vkUpdateDescriptorSetWithTemplate", (void *)UpdateDescriptorSetWithTemplate},
12340 {"vkUpdateDescriptorSetWithTemplateKHR", (void *)UpdateDescriptorSetWithTemplateKHR},
12341 {"vkCmdPushDescriptorSetWithTemplateKHR", (void *)CmdPushDescriptorSetWithTemplateKHR},
12342 {"vkCmdPushDescriptorSetKHR", (void *)CmdPushDescriptorSetKHR},
12343 {"vkCreateSwapchainKHR", (void *)CreateSwapchainKHR},
12344 {"vkDestroySwapchainKHR", (void *)DestroySwapchainKHR},
12345 {"vkGetSwapchainImagesKHR", (void *)GetSwapchainImagesKHR},
12346 {"vkAcquireNextImageKHR", (void *)AcquireNextImageKHR},
12347 {"vkQueuePresentKHR", (void *)QueuePresentKHR},
12348 {"vkQueueSubmit", (void *)QueueSubmit},
12349 {"vkWaitForFences", (void *)WaitForFences},
12350 {"vkGetFenceStatus", (void *)GetFenceStatus},
12351 {"vkQueueWaitIdle", (void *)QueueWaitIdle},
12352 {"vkDeviceWaitIdle", (void *)DeviceWaitIdle},
12353 {"vkGetDeviceQueue", (void *)GetDeviceQueue},
12354 {"vkGetDeviceQueue2", (void *)GetDeviceQueue2},
12355 {"vkDestroyDevice", (void *)DestroyDevice},
12356 {"vkDestroyFence", (void *)DestroyFence},
12357 {"vkResetFences", (void *)ResetFences},
12358 {"vkDestroySemaphore", (void *)DestroySemaphore},
12359 {"vkDestroyEvent", (void *)DestroyEvent},
12360 {"vkDestroyQueryPool", (void *)DestroyQueryPool},
12361 {"vkDestroyBuffer", (void *)DestroyBuffer},
12362 {"vkDestroyBufferView", (void *)DestroyBufferView},
12363 {"vkDestroyImage", (void *)DestroyImage},
12364 {"vkDestroyImageView", (void *)DestroyImageView},
12365 {"vkDestroyShaderModule", (void *)DestroyShaderModule},
12366 {"vkDestroyPipeline", (void *)DestroyPipeline},
12367 {"vkDestroyPipelineLayout", (void *)DestroyPipelineLayout},
12368 {"vkDestroySampler", (void *)DestroySampler},
12369 {"vkDestroyDescriptorSetLayout", (void *)DestroyDescriptorSetLayout},
12370 {"vkDestroyDescriptorPool", (void *)DestroyDescriptorPool},
12371 {"vkDestroyFramebuffer", (void *)DestroyFramebuffer},
12372 {"vkDestroyRenderPass", (void *)DestroyRenderPass},
12373 {"vkCreateBuffer", (void *)CreateBuffer},
12374 {"vkCreateBufferView", (void *)CreateBufferView},
12375 {"vkCreateImage", (void *)CreateImage},
12376 {"vkCreateImageView", (void *)CreateImageView},
12377 {"vkCreateFence", (void *)CreateFence},
12378 {"vkCreatePipelineCache", (void *)CreatePipelineCache},
12379 {"vkDestroyPipelineCache", (void *)DestroyPipelineCache},
12380 {"vkGetPipelineCacheData", (void *)GetPipelineCacheData},
12381 {"vkMergePipelineCaches", (void *)MergePipelineCaches},
12382 {"vkCreateGraphicsPipelines", (void *)CreateGraphicsPipelines},
12383 {"vkCreateComputePipelines", (void *)CreateComputePipelines},
12384 {"vkCreateSampler", (void *)CreateSampler},
12385 {"vkCreateDescriptorSetLayout", (void *)CreateDescriptorSetLayout},
12386 {"vkCreatePipelineLayout", (void *)CreatePipelineLayout},
12387 {"vkCreateDescriptorPool", (void *)CreateDescriptorPool},
12388 {"vkResetDescriptorPool", (void *)ResetDescriptorPool},
12389 {"vkAllocateDescriptorSets", (void *)AllocateDescriptorSets},
12390 {"vkFreeDescriptorSets", (void *)FreeDescriptorSets},
12391 {"vkUpdateDescriptorSets", (void *)UpdateDescriptorSets},
12392 {"vkCreateCommandPool", (void *)CreateCommandPool},
12393 {"vkDestroyCommandPool", (void *)DestroyCommandPool},
12394 {"vkResetCommandPool", (void *)ResetCommandPool},
12395 {"vkCreateQueryPool", (void *)CreateQueryPool},
12396 {"vkAllocateCommandBuffers", (void *)AllocateCommandBuffers},
12397 {"vkFreeCommandBuffers", (void *)FreeCommandBuffers},
12398 {"vkBeginCommandBuffer", (void *)BeginCommandBuffer},
12399 {"vkEndCommandBuffer", (void *)EndCommandBuffer},
12400 {"vkResetCommandBuffer", (void *)ResetCommandBuffer},
12401 {"vkCmdBindPipeline", (void *)CmdBindPipeline},
12402 {"vkCmdSetViewport", (void *)CmdSetViewport},
12403 {"vkCmdSetScissor", (void *)CmdSetScissor},
12404 {"vkCmdSetLineWidth", (void *)CmdSetLineWidth},
12405 {"vkCmdSetDepthBias", (void *)CmdSetDepthBias},
12406 {"vkCmdSetBlendConstants", (void *)CmdSetBlendConstants},
12407 {"vkCmdSetDepthBounds", (void *)CmdSetDepthBounds},
12408 {"vkCmdSetStencilCompareMask", (void *)CmdSetStencilCompareMask},
12409 {"vkCmdSetStencilWriteMask", (void *)CmdSetStencilWriteMask},
12410 {"vkCmdSetStencilReference", (void *)CmdSetStencilReference},
12411 {"vkCmdBindDescriptorSets", (void *)CmdBindDescriptorSets},
12412 {"vkCmdBindVertexBuffers", (void *)CmdBindVertexBuffers},
12413 {"vkCmdBindIndexBuffer", (void *)CmdBindIndexBuffer},
12414 {"vkCmdDraw", (void *)CmdDraw},
12415 {"vkCmdDrawIndexed", (void *)CmdDrawIndexed},
12416 {"vkCmdDrawIndirect", (void *)CmdDrawIndirect},
12417 {"vkCmdDrawIndexedIndirect", (void *)CmdDrawIndexedIndirect},
12418 {"vkCmdDispatch", (void *)CmdDispatch},
12419 {"vkCmdDispatchIndirect", (void *)CmdDispatchIndirect},
12420 {"vkCmdCopyBuffer", (void *)CmdCopyBuffer},
12421 {"vkCmdCopyImage", (void *)CmdCopyImage},
12422 {"vkCmdBlitImage", (void *)CmdBlitImage},
12423 {"vkCmdCopyBufferToImage", (void *)CmdCopyBufferToImage},
12424 {"vkCmdCopyImageToBuffer", (void *)CmdCopyImageToBuffer},
12425 {"vkCmdUpdateBuffer", (void *)CmdUpdateBuffer},
12426 {"vkCmdFillBuffer", (void *)CmdFillBuffer},
12427 {"vkCmdClearColorImage", (void *)CmdClearColorImage},
12428 {"vkCmdClearDepthStencilImage", (void *)CmdClearDepthStencilImage},
12429 {"vkCmdClearAttachments", (void *)CmdClearAttachments},
12430 {"vkCmdResolveImage", (void *)CmdResolveImage},
12431 {"vkGetImageSubresourceLayout", (void *)GetImageSubresourceLayout},
12432 {"vkCmdSetEvent", (void *)CmdSetEvent},
12433 {"vkCmdResetEvent", (void *)CmdResetEvent},
12434 {"vkCmdWaitEvents", (void *)CmdWaitEvents},
12435 {"vkCmdPipelineBarrier", (void *)CmdPipelineBarrier},
12436 {"vkCmdBeginQuery", (void *)CmdBeginQuery},
12437 {"vkCmdEndQuery", (void *)CmdEndQuery},
12438 {"vkCmdResetQueryPool", (void *)CmdResetQueryPool},
12439 {"vkCmdCopyQueryPoolResults", (void *)CmdCopyQueryPoolResults},
12440 {"vkCmdPushConstants", (void *)CmdPushConstants},
12441 {"vkCmdWriteTimestamp", (void *)CmdWriteTimestamp},
12442 {"vkCreateFramebuffer", (void *)CreateFramebuffer},
12443 {"vkCreateShaderModule", (void *)CreateShaderModule},
12444 {"vkCreateRenderPass", (void *)CreateRenderPass},
12445 {"vkCmdBeginRenderPass", (void *)CmdBeginRenderPass},
12446 {"vkCmdNextSubpass", (void *)CmdNextSubpass},
12447 {"vkCmdEndRenderPass", (void *)CmdEndRenderPass},
12448 {"vkCmdExecuteCommands", (void *)CmdExecuteCommands},
12449 {"vkCmdDebugMarkerBeginEXT", (void *)CmdDebugMarkerBeginEXT},
12450 {"vkCmdDebugMarkerEndEXT", (void *)CmdDebugMarkerEndEXT},
12451 {"vkCmdDebugMarkerInsertEXT", (void *)CmdDebugMarkerInsertEXT},
12452 {"vkDebugMarkerSetObjectNameEXT", (void *)DebugMarkerSetObjectNameEXT},
12453 {"vkDebugMarkerSetObjectTagEXT", (void *)DebugMarkerSetObjectTagEXT},
12454 {"vkSetEvent", (void *)SetEvent},
12455 {"vkMapMemory", (void *)MapMemory},
12456 {"vkUnmapMemory", (void *)UnmapMemory},
12457 {"vkFlushMappedMemoryRanges", (void *)FlushMappedMemoryRanges},
12458 {"vkInvalidateMappedMemoryRanges", (void *)InvalidateMappedMemoryRanges},
12459 {"vkAllocateMemory", (void *)AllocateMemory},
12460 {"vkFreeMemory", (void *)FreeMemory},
12461 {"vkBindBufferMemory", (void *)BindBufferMemory},
12462 {"vkBindBufferMemory2", (void *)BindBufferMemory2},
12463 {"vkBindBufferMemory2KHR", (void *)BindBufferMemory2KHR},
12464 {"vkGetBufferMemoryRequirements", (void *)GetBufferMemoryRequirements},
12465 {"vkGetBufferMemoryRequirements2", (void *)GetBufferMemoryRequirements2},
12466 {"vkGetBufferMemoryRequirements2KHR", (void *)GetBufferMemoryRequirements2KHR},
12467 {"vkGetImageMemoryRequirements", (void *)GetImageMemoryRequirements},
12468 {"vkGetImageMemoryRequirements2", (void *)GetImageMemoryRequirements2},
12469 {"vkGetImageMemoryRequirements2KHR", (void *)GetImageMemoryRequirements2KHR},
12470 {"vkGetImageSparseMemoryRequirements", (void *)GetImageSparseMemoryRequirements},
12471 {"vkGetImageSparseMemoryRequirements2", (void *)GetImageSparseMemoryRequirements2},
12472 {"vkGetImageSparseMemoryRequirements2KHR", (void *)GetImageSparseMemoryRequirements2KHR},
12473 {"vkGetPhysicalDeviceSparseImageFormatProperties", (void *)GetPhysicalDeviceSparseImageFormatProperties},
12474 {"vkGetPhysicalDeviceSparseImageFormatProperties2", (void *)GetPhysicalDeviceSparseImageFormatProperties2},
12475 {"vkGetPhysicalDeviceSparseImageFormatProperties2KHR", (void *)GetPhysicalDeviceSparseImageFormatProperties2KHR},
12476 {"vkGetQueryPoolResults", (void *)GetQueryPoolResults},
12477 {"vkBindImageMemory", (void *)BindImageMemory},
12478 {"vkBindImageMemory2", (void *)BindImageMemory2},
12479 {"vkBindImageMemory2KHR", (void *)BindImageMemory2KHR},
12480 {"vkQueueBindSparse", (void *)QueueBindSparse},
12481 {"vkCreateSemaphore", (void *)CreateSemaphore},
12482 {"vkCreateEvent", (void *)CreateEvent},
12483 #ifdef VK_USE_PLATFORM_ANDROID_KHR
12484 {"vkCreateAndroidSurfaceKHR", (void *)CreateAndroidSurfaceKHR},
12486 #ifdef VK_USE_PLATFORM_MIR_KHR
12487 {"vkCreateMirSurfaceKHR", (void *)CreateMirSurfaceKHR},
12488 {"vkGetPhysicalDeviceMirPresentationSupportKHR", (void *)GetPhysicalDeviceMirPresentationSupportKHR},
12490 #ifdef VK_USE_PLATFORM_WAYLAND_KHR
12491 {"vkCreateWaylandSurfaceKHR", (void *)CreateWaylandSurfaceKHR},
12492 {"vkGetPhysicalDeviceWaylandPresentationSupportKHR", (void *)GetPhysicalDeviceWaylandPresentationSupportKHR},
12494 #ifdef VK_USE_PLATFORM_WIN32_KHR
12495 {"vkCreateWin32SurfaceKHR", (void *)CreateWin32SurfaceKHR},
12496 {"vkGetPhysicalDeviceWin32PresentationSupportKHR", (void *)GetPhysicalDeviceWin32PresentationSupportKHR},
12497 {"vkImportSemaphoreWin32HandleKHR", (void *)ImportSemaphoreWin32HandleKHR},
12498 {"vkGetSemaphoreWin32HandleKHR", (void *)GetSemaphoreWin32HandleKHR},
12499 {"vkImportFenceWin32HandleKHR", (void *)ImportFenceWin32HandleKHR},
12500 {"vkGetFenceWin32HandleKHR", (void *)GetFenceWin32HandleKHR},
12502 #ifdef VK_USE_PLATFORM_XCB_KHR
12503 {"vkCreateXcbSurfaceKHR", (void *)CreateXcbSurfaceKHR},
12504 {"vkGetPhysicalDeviceXcbPresentationSupportKHR", (void *)GetPhysicalDeviceXcbPresentationSupportKHR},
12506 #ifdef VK_USE_PLATFORM_XLIB_KHR
12507 {"vkCreateXlibSurfaceKHR", (void *)CreateXlibSurfaceKHR},
12508 {"vkGetPhysicalDeviceXlibPresentationSupportKHR", (void *)GetPhysicalDeviceXlibPresentationSupportKHR},
12510 #ifdef VK_USE_PLATFORM_IOS_MVK
12511 {"vkCreateIOSSurfaceMVK", (void *)CreateIOSSurfaceMVK},
12513 #ifdef VK_USE_PLATFORM_MACOS_MVK
12514 {"vkCreateMacOSSurfaceMVK", (void *)CreateMacOSSurfaceMVK},
12516 {"vkCreateDisplayPlaneSurfaceKHR", (void *)CreateDisplayPlaneSurfaceKHR},
12517 {"vkDestroySurfaceKHR", (void *)DestroySurfaceKHR},
12518 {"vkGetPhysicalDeviceSurfaceCapabilitiesKHR", (void *)GetPhysicalDeviceSurfaceCapabilitiesKHR},
12519 {"vkGetPhysicalDeviceSurfaceCapabilities2KHR", (void *)GetPhysicalDeviceSurfaceCapabilities2KHR},
12520 {"vkGetPhysicalDeviceSurfaceCapabilities2EXT", (void *)GetPhysicalDeviceSurfaceCapabilities2EXT},
12521 {"vkGetPhysicalDeviceSurfaceSupportKHR", (void *)GetPhysicalDeviceSurfaceSupportKHR},
12522 {"vkGetPhysicalDeviceSurfacePresentModesKHR", (void *)GetPhysicalDeviceSurfacePresentModesKHR},
12523 {"vkGetPhysicalDeviceSurfaceFormatsKHR", (void *)GetPhysicalDeviceSurfaceFormatsKHR},
12524 {"vkGetPhysicalDeviceSurfaceFormats2KHR", (void *)GetPhysicalDeviceSurfaceFormats2KHR},
12525 {"vkGetPhysicalDeviceQueueFamilyProperties2", (void *)GetPhysicalDeviceQueueFamilyProperties2},
12526 {"vkGetPhysicalDeviceQueueFamilyProperties2KHR", (void *)GetPhysicalDeviceQueueFamilyProperties2KHR},
12527 {"vkEnumeratePhysicalDeviceGroups", (void *)EnumeratePhysicalDeviceGroups},
12528 {"vkEnumeratePhysicalDeviceGroupsKHR", (void *)EnumeratePhysicalDeviceGroupsKHR},
12529 {"vkCreateDebugReportCallbackEXT", (void *)CreateDebugReportCallbackEXT},
12530 {"vkDestroyDebugReportCallbackEXT", (void *)DestroyDebugReportCallbackEXT},
12531 {"vkDebugReportMessageEXT", (void *)DebugReportMessageEXT},
12532 {"vkGetPhysicalDeviceDisplayPlanePropertiesKHR", (void *)GetPhysicalDeviceDisplayPlanePropertiesKHR},
12533 {"vkGetDisplayPlaneSupportedDisplaysKHR", (void *)GetDisplayPlaneSupportedDisplaysKHR},
12534 {"vkGetDisplayPlaneCapabilitiesKHR", (void *)GetDisplayPlaneCapabilitiesKHR},
12535 {"vkImportSemaphoreFdKHR", (void *)ImportSemaphoreFdKHR},
12536 {"vkGetSemaphoreFdKHR", (void *)GetSemaphoreFdKHR},
12537 {"vkImportFenceFdKHR", (void *)ImportFenceFdKHR},
12538 {"vkGetFenceFdKHR", (void *)GetFenceFdKHR},
12539 {"vkCreateValidationCacheEXT", (void *)CreateValidationCacheEXT},
12540 {"vkDestroyValidationCacheEXT", (void *)DestroyValidationCacheEXT},
12541 {"vkGetValidationCacheDataEXT", (void *)GetValidationCacheDataEXT},
12542 {"vkMergeValidationCachesEXT", (void *)MergeValidationCachesEXT},
12543 {"vkCmdSetDiscardRectangleEXT", (void *)CmdSetDiscardRectangleEXT},
12544 {"vkCmdSetSampleLocationsEXT", (void *)CmdSetSampleLocationsEXT},
12545 {"vkSetDebugUtilsObjectNameEXT", (void *)SetDebugUtilsObjectNameEXT},
12546 {"vkSetDebugUtilsObjectTagEXT", (void *)SetDebugUtilsObjectTagEXT},
12547 {"vkQueueBeginDebugUtilsLabelEXT", (void *)QueueBeginDebugUtilsLabelEXT},
12548 {"vkQueueEndDebugUtilsLabelEXT", (void *)QueueEndDebugUtilsLabelEXT},
12549 {"vkQueueInsertDebugUtilsLabelEXT", (void *)QueueInsertDebugUtilsLabelEXT},
12550 {"vkCmdBeginDebugUtilsLabelEXT", (void *)CmdBeginDebugUtilsLabelEXT},
12551 {"vkCmdEndDebugUtilsLabelEXT", (void *)CmdEndDebugUtilsLabelEXT},
12552 {"vkCmdInsertDebugUtilsLabelEXT", (void *)CmdInsertDebugUtilsLabelEXT},
12553 {"vkCreateDebugUtilsMessengerEXT", (void *)CreateDebugUtilsMessengerEXT},
12554 {"vkDestroyDebugUtilsMessengerEXT", (void *)DestroyDebugUtilsMessengerEXT},
12555 {"vkSubmitDebugUtilsMessageEXT", (void *)SubmitDebugUtilsMessageEXT},
12558 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetDeviceProcAddr(VkDevice device, const char *funcName) {
12560 layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
12562 // Is API to be intercepted by this layer?
12563 const auto &item = name_to_funcptr_map.find(funcName);
12564 if (item != name_to_funcptr_map.end()) {
12565 return reinterpret_cast<PFN_vkVoidFunction>(item->second);
12568 auto &table = device_data->dispatch_table;
12569 if (!table.GetDeviceProcAddr) return nullptr;
12570 return table.GetDeviceProcAddr(device, funcName);
12573 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetInstanceProcAddr(VkInstance instance, const char *funcName) {
12574 instance_layer_data *instance_data;
12575 // Is API to be intercepted by this layer?
12576 const auto &item = name_to_funcptr_map.find(funcName);
12577 if (item != name_to_funcptr_map.end()) {
12578 return reinterpret_cast<PFN_vkVoidFunction>(item->second);
12581 instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
12582 auto &table = instance_data->dispatch_table;
12583 if (!table.GetInstanceProcAddr) return nullptr;
12584 return table.GetInstanceProcAddr(instance, funcName);
12587 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetPhysicalDeviceProcAddr(VkInstance instance, const char *funcName) {
12589 instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
12591 auto &table = instance_data->dispatch_table;
12592 if (!table.GetPhysicalDeviceProcAddr) return nullptr;
12593 return table.GetPhysicalDeviceProcAddr(instance, funcName);
12596 } // namespace core_validation
12598 // loader-layer interface v0, just wrappers since there is only a layer
12600 VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateInstanceExtensionProperties(const char *pLayerName, uint32_t *pCount,
12601 VkExtensionProperties *pProperties) {
12602 return core_validation::EnumerateInstanceExtensionProperties(pLayerName, pCount, pProperties);
12605 VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateInstanceLayerProperties(uint32_t *pCount,
12606 VkLayerProperties *pProperties) {
12607 return core_validation::EnumerateInstanceLayerProperties(pCount, pProperties);
12610 VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice, uint32_t *pCount,
12611 VkLayerProperties *pProperties) {
12612 // the layer command handles VK_NULL_HANDLE just fine internally
12613 assert(physicalDevice == VK_NULL_HANDLE);
12614 return core_validation::EnumerateDeviceLayerProperties(VK_NULL_HANDLE, pCount, pProperties);
12617 VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice,
12618 const char *pLayerName, uint32_t *pCount,
12619 VkExtensionProperties *pProperties) {
12620 // the layer command handles VK_NULL_HANDLE just fine internally
12621 assert(physicalDevice == VK_NULL_HANDLE);
12622 return core_validation::EnumerateDeviceExtensionProperties(VK_NULL_HANDLE, pLayerName, pCount, pProperties);
12625 VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetDeviceProcAddr(VkDevice dev, const char *funcName) {
12626 return core_validation::GetDeviceProcAddr(dev, funcName);
12629 VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetInstanceProcAddr(VkInstance instance, const char *funcName) {
12630 return core_validation::GetInstanceProcAddr(instance, funcName);
12633 VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vk_layerGetPhysicalDeviceProcAddr(VkInstance instance,
12634 const char *funcName) {
12635 return core_validation::GetPhysicalDeviceProcAddr(instance, funcName);
12638 VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkNegotiateLoaderLayerInterfaceVersion(VkNegotiateLayerInterface *pVersionStruct) {
12639 assert(pVersionStruct != NULL);
12640 assert(pVersionStruct->sType == LAYER_NEGOTIATE_INTERFACE_STRUCT);
12642 // Fill in the function pointers if our version is at least capable of having the structure contain them.
12643 if (pVersionStruct->loaderLayerInterfaceVersion >= 2) {
12644 pVersionStruct->pfnGetInstanceProcAddr = vkGetInstanceProcAddr;
12645 pVersionStruct->pfnGetDeviceProcAddr = vkGetDeviceProcAddr;
12646 pVersionStruct->pfnGetPhysicalDeviceProcAddr = vk_layerGetPhysicalDeviceProcAddr;
12649 if (pVersionStruct->loaderLayerInterfaceVersion < CURRENT_LOADER_LAYER_INTERFACE_VERSION) {
12650 core_validation::loader_layer_if_version = pVersionStruct->loaderLayerInterfaceVersion;
12651 } else if (pVersionStruct->loaderLayerInterfaceVersion > CURRENT_LOADER_LAYER_INTERFACE_VERSION) {
12652 pVersionStruct->loaderLayerInterfaceVersion = CURRENT_LOADER_LAYER_INTERFACE_VERSION;