755699bda83bb632a023f5c3abfacf0534a7a1cd
[platform/upstream/Vulkan-LoaderAndValidationLayers.git] / layers / core_validation.cpp
1 /* Copyright (c) 2015-2017 The Khronos Group Inc.
2  * Copyright (c) 2015-2017 Valve Corporation
3  * Copyright (c) 2015-2017 LunarG, Inc.
4  * Copyright (C) 2015-2017 Google Inc.
5  *
6  * Licensed under the Apache License, Version 2.0 (the "License");
7  * you may not use this file except in compliance with the License.
8  * You may obtain a copy of the License at
9  *
10  *     http://www.apache.org/licenses/LICENSE-2.0
11  *
12  * Unless required by applicable law or agreed to in writing, software
13  * distributed under the License is distributed on an "AS IS" BASIS,
14  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15  * See the License for the specific language governing permissions and
16  * limitations under the License.
17  *
18  * Author: Cody Northrop <cnorthrop@google.com>
19  * Author: Michael Lentine <mlentine@google.com>
20  * Author: Tobin Ehlis <tobine@google.com>
21  * Author: Chia-I Wu <olv@google.com>
22  * Author: Chris Forbes <chrisf@ijw.co.nz>
23  * Author: Mark Lobodzinski <mark@lunarg.com>
24  * Author: Ian Elliott <ianelliott@google.com>
25  * Author: Dave Houlton <daveh@lunarg.com>
26  * Author: Dustin Graves <dustin@lunarg.com>
27  * Author: Jeremy Hayes <jeremy@lunarg.com>
28  * Author: Jon Ashburn <jon@lunarg.com>
29  * Author: Karl Schultz <karl@lunarg.com>
30  * Author: Mark Young <marky@lunarg.com>
31  * Author: Mike Schuchardt <mikes@lunarg.com>
32  * Author: Mike Weiblen <mikew@lunarg.com>
33  * Author: Tony Barbour <tony@LunarG.com>
34  */
35
36 // Allow use of STL min and max functions in Windows
37 #define NOMINMAX
38
39 #include <algorithm>
40 #include <array>
41 #include <assert.h>
42 #include <iostream>
43 #include <list>
44 #include <map>
45 #include <memory>
46 #include <mutex>
47 #include <set>
48 #include <sstream>
49 #include <stdio.h>
50 #include <stdlib.h>
51 #include <string.h>
52 #include <string>
53 #include <valarray>
54
55 #include "vk_loader_platform.h"
56 #include "vk_dispatch_table_helper.h"
57 #include "vk_enum_string_helper.h"
58 #if defined(__GNUC__)
59 #pragma GCC diagnostic ignored "-Wwrite-strings"
60 #endif
61 #if defined(__GNUC__)
62 #pragma GCC diagnostic warning "-Wwrite-strings"
63 #endif
64 #include "core_validation.h"
65 #include "buffer_validation.h"
66 #include "shader_validation.h"
67 #include "vk_layer_table.h"
68 #include "vk_layer_data.h"
69 #include "vk_layer_extension_utils.h"
70 #include "vk_layer_utils.h"
71 #include "vk_typemap_helper.h"
72
73 #if defined __ANDROID__
74 #include <android/log.h>
75 #define LOGCONSOLE(...) ((void)__android_log_print(ANDROID_LOG_INFO, "CORE_VALIDATION", __VA_ARGS__))
76 #else
77 #define LOGCONSOLE(...)      \
78     {                        \
79         printf(__VA_ARGS__); \
80         printf("\n");        \
81     }
82 #endif
83
84 // This intentionally includes a cpp file
85 #include "vk_safe_struct.cpp"
86
87 using mutex_t = std::mutex;
88 using lock_guard_t = std::lock_guard<mutex_t>;
89 using unique_lock_t = std::unique_lock<mutex_t>;
90
91 // These functions are defined *outside* the core_validation namespace as their type
92 // is also defined outside that namespace
93 size_t PipelineLayoutCompatDef::hash() const {
94     hash_util::HashCombiner hc;
95     // The set number is integral to the CompatDef's distinctiveness
96     hc << set << push_constant_ranges.get();
97     const auto &descriptor_set_layouts = *set_layouts_id.get();
98     for (uint32_t i = 0; i <= set; i++) {
99         hc << descriptor_set_layouts[i].get();
100     }
101     return hc.Value();
102 }
103
104 bool PipelineLayoutCompatDef::operator==(const PipelineLayoutCompatDef &other) const {
105     if ((set != other.set) || (push_constant_ranges != other.push_constant_ranges)) {
106         return false;
107     }
108
109     if (set_layouts_id == other.set_layouts_id) {
110         // if it's the same set_layouts_id, then *any* subset will match
111         return true;
112     }
113
114     // They aren't exactly the same PipelineLayoutSetLayouts, so we need to check if the required subsets match
115     const auto &descriptor_set_layouts = *set_layouts_id.get();
116     assert(set < descriptor_set_layouts.size());
117     const auto &other_ds_layouts = *other.set_layouts_id.get();
118     assert(set < other_ds_layouts.size());
119     for (uint32_t i = 0; i <= set; i++) {
120         if (descriptor_set_layouts[i] != other_ds_layouts[i]) {
121             return false;
122         }
123     }
124     return true;
125 }
126
127 namespace core_validation {
128
129 using std::max;
130 using std::string;
131 using std::stringstream;
132 using std::unique_ptr;
133 using std::unordered_map;
134 using std::unordered_set;
135 using std::vector;
136
137 // WSI Image Objects bypass usual Image Object creation methods.  A special Memory
138 // Object value will be used to identify them internally.
139 static const VkDeviceMemory MEMTRACKER_SWAP_CHAIN_IMAGE_KEY = (VkDeviceMemory)(-1);
140 // 2nd special memory handle used to flag object as unbound from memory
141 static const VkDeviceMemory MEMORY_UNBOUND = VkDeviceMemory(~((uint64_t)(0)) - 1);
142
143 struct instance_layer_data {
144     VkInstance instance = VK_NULL_HANDLE;
145     debug_report_data *report_data = nullptr;
146     vector<VkDebugReportCallbackEXT> logging_callback;
147     vector<VkDebugUtilsMessengerEXT> logging_messenger;
148     VkLayerInstanceDispatchTable dispatch_table;
149
150     CALL_STATE vkEnumeratePhysicalDevicesState = UNCALLED;
151     uint32_t physical_devices_count = 0;
152     CALL_STATE vkEnumeratePhysicalDeviceGroupsState = UNCALLED;
153     uint32_t physical_device_groups_count = 0;
154     CHECK_DISABLED disabled = {};
155
156     unordered_map<VkPhysicalDevice, PHYSICAL_DEVICE_STATE> physical_device_map;
157     unordered_map<VkSurfaceKHR, SURFACE_STATE> surface_map;
158
159     InstanceExtensions extensions;
160     uint32_t api_version;
161 };
162
163 struct layer_data {
164     debug_report_data *report_data = nullptr;
165     VkLayerDispatchTable dispatch_table;
166
167     DeviceExtensions extensions = {};
168     unordered_set<VkQueue> queues;  // All queues under given device
169     // Layer specific data
170     unordered_map<VkSampler, unique_ptr<SAMPLER_STATE>> samplerMap;
171     unordered_map<VkImageView, unique_ptr<IMAGE_VIEW_STATE>> imageViewMap;
172     unordered_map<VkImage, unique_ptr<IMAGE_STATE>> imageMap;
173     unordered_map<VkBufferView, unique_ptr<BUFFER_VIEW_STATE>> bufferViewMap;
174     unordered_map<VkBuffer, unique_ptr<BUFFER_STATE>> bufferMap;
175     unordered_map<VkPipeline, unique_ptr<PIPELINE_STATE>> pipelineMap;
176     unordered_map<VkCommandPool, COMMAND_POOL_NODE> commandPoolMap;
177     unordered_map<VkDescriptorPool, DESCRIPTOR_POOL_STATE *> descriptorPoolMap;
178     unordered_map<VkDescriptorSet, cvdescriptorset::DescriptorSet *> setMap;
179     unordered_map<VkDescriptorSetLayout, std::shared_ptr<cvdescriptorset::DescriptorSetLayout>> descriptorSetLayoutMap;
180     unordered_map<VkPipelineLayout, PIPELINE_LAYOUT_NODE> pipelineLayoutMap;
181     unordered_map<VkDeviceMemory, unique_ptr<DEVICE_MEM_INFO>> memObjMap;
182     unordered_map<VkFence, FENCE_NODE> fenceMap;
183     unordered_map<VkQueue, QUEUE_STATE> queueMap;
184     unordered_map<VkEvent, EVENT_STATE> eventMap;
185     unordered_map<QueryObject, bool> queryToStateMap;
186     unordered_map<VkQueryPool, QUERY_POOL_NODE> queryPoolMap;
187     unordered_map<VkSemaphore, SEMAPHORE_NODE> semaphoreMap;
188     unordered_map<VkCommandBuffer, GLOBAL_CB_NODE *> commandBufferMap;
189     unordered_map<VkFramebuffer, unique_ptr<FRAMEBUFFER_STATE>> frameBufferMap;
190     unordered_map<VkImage, vector<ImageSubresourcePair>> imageSubresourceMap;
191     unordered_map<ImageSubresourcePair, IMAGE_LAYOUT_NODE> imageLayoutMap;
192     unordered_map<VkRenderPass, std::shared_ptr<RENDER_PASS_STATE>> renderPassMap;
193     unordered_map<VkShaderModule, unique_ptr<shader_module>> shaderModuleMap;
194     unordered_map<VkDescriptorUpdateTemplateKHR, unique_ptr<TEMPLATE_STATE>> desc_template_map;
195     unordered_map<VkSwapchainKHR, std::unique_ptr<SWAPCHAIN_NODE>> swapchainMap;
196
197     VkDevice device = VK_NULL_HANDLE;
198     VkPhysicalDevice physical_device = VK_NULL_HANDLE;
199
200     instance_layer_data *instance_data = nullptr;  // from device to enclosing instance
201
202     VkPhysicalDeviceFeatures enabled_features = {};
203     // Device specific data
204     PHYS_DEV_PROPERTIES_NODE phys_dev_properties = {};
205     VkPhysicalDeviceMemoryProperties phys_dev_mem_props = {};
206     VkPhysicalDeviceProperties phys_dev_props = {};
207     // Device extension properties -- storing properties gathered from VkPhysicalDeviceProperties2KHR::pNext chain
208     struct DeviceExtensionProperties {
209         uint32_t max_push_descriptors;  // from VkPhysicalDevicePushDescriptorPropertiesKHR::maxPushDescriptors
210         VkPhysicalDeviceDescriptorIndexingPropertiesEXT descriptor_indexing_props;
211         VkPhysicalDeviceDescriptorIndexingFeaturesEXT descriptor_indexing_features;
212     };
213     DeviceExtensionProperties phys_dev_ext_props = {};
214     bool external_sync_warning = false;
215     uint32_t api_version = 0;
216 };
217
218 // TODO : Do we need to guard access to layer_data_map w/ lock?
219 static unordered_map<void *, layer_data *> layer_data_map;
220 static unordered_map<void *, instance_layer_data *> instance_layer_data_map;
221
222 static uint32_t loader_layer_if_version = CURRENT_LOADER_LAYER_INTERFACE_VERSION;
223
224 static const VkLayerProperties global_layer = {
225     "VK_LAYER_LUNARG_core_validation",
226     VK_LAYER_API_VERSION,
227     1,
228     "LunarG Validation Layer",
229 };
230
231 static const VkExtensionProperties device_extensions[] = {
232     {VK_EXT_VALIDATION_CACHE_EXTENSION_NAME, VK_EXT_VALIDATION_CACHE_SPEC_VERSION},
233 };
234
235 template <class TCreateInfo>
236 void ValidateLayerOrdering(const TCreateInfo &createInfo) {
237     bool foundLayer = false;
238     for (uint32_t i = 0; i < createInfo.enabledLayerCount; ++i) {
239         if (!strcmp(createInfo.ppEnabledLayerNames[i], global_layer.layerName)) {
240             foundLayer = true;
241         }
242         // This has to be logged to console as we don't have a callback at this point.
243         if (!foundLayer && !strcmp(createInfo.ppEnabledLayerNames[0], "VK_LAYER_GOOGLE_unique_objects")) {
244             LOGCONSOLE("Cannot activate layer VK_LAYER_GOOGLE_unique_objects prior to activating %s.", global_layer.layerName);
245         }
246     }
247 }
248
249 // TODO : This can be much smarter, using separate locks for separate global data
250 static mutex_t global_lock;
251
252 // Return IMAGE_VIEW_STATE ptr for specified imageView or else NULL
253 IMAGE_VIEW_STATE *GetImageViewState(const layer_data *dev_data, VkImageView image_view) {
254     auto iv_it = dev_data->imageViewMap.find(image_view);
255     if (iv_it == dev_data->imageViewMap.end()) {
256         return nullptr;
257     }
258     return iv_it->second.get();
259 }
260 // Return sampler node ptr for specified sampler or else NULL
261 SAMPLER_STATE *GetSamplerState(const layer_data *dev_data, VkSampler sampler) {
262     auto sampler_it = dev_data->samplerMap.find(sampler);
263     if (sampler_it == dev_data->samplerMap.end()) {
264         return nullptr;
265     }
266     return sampler_it->second.get();
267 }
268 // Return image state ptr for specified image or else NULL
269 IMAGE_STATE *GetImageState(const layer_data *dev_data, VkImage image) {
270     auto img_it = dev_data->imageMap.find(image);
271     if (img_it == dev_data->imageMap.end()) {
272         return nullptr;
273     }
274     return img_it->second.get();
275 }
276 // Return buffer state ptr for specified buffer or else NULL
277 BUFFER_STATE *GetBufferState(const layer_data *dev_data, VkBuffer buffer) {
278     auto buff_it = dev_data->bufferMap.find(buffer);
279     if (buff_it == dev_data->bufferMap.end()) {
280         return nullptr;
281     }
282     return buff_it->second.get();
283 }
284 // Return swapchain node for specified swapchain or else NULL
285 SWAPCHAIN_NODE *GetSwapchainNode(const layer_data *dev_data, VkSwapchainKHR swapchain) {
286     auto swp_it = dev_data->swapchainMap.find(swapchain);
287     if (swp_it == dev_data->swapchainMap.end()) {
288         return nullptr;
289     }
290     return swp_it->second.get();
291 }
292 // Return buffer node ptr for specified buffer or else NULL
293 BUFFER_VIEW_STATE *GetBufferViewState(const layer_data *dev_data, VkBufferView buffer_view) {
294     auto bv_it = dev_data->bufferViewMap.find(buffer_view);
295     if (bv_it == dev_data->bufferViewMap.end()) {
296         return nullptr;
297     }
298     return bv_it->second.get();
299 }
300
301 FENCE_NODE *GetFenceNode(layer_data *dev_data, VkFence fence) {
302     auto it = dev_data->fenceMap.find(fence);
303     if (it == dev_data->fenceMap.end()) {
304         return nullptr;
305     }
306     return &it->second;
307 }
308
309 EVENT_STATE *GetEventNode(layer_data *dev_data, VkEvent event) {
310     auto it = dev_data->eventMap.find(event);
311     if (it == dev_data->eventMap.end()) {
312         return nullptr;
313     }
314     return &it->second;
315 }
316
317 QUERY_POOL_NODE *GetQueryPoolNode(layer_data *dev_data, VkQueryPool query_pool) {
318     auto it = dev_data->queryPoolMap.find(query_pool);
319     if (it == dev_data->queryPoolMap.end()) {
320         return nullptr;
321     }
322     return &it->second;
323 }
324
325 QUEUE_STATE *GetQueueState(layer_data *dev_data, VkQueue queue) {
326     auto it = dev_data->queueMap.find(queue);
327     if (it == dev_data->queueMap.end()) {
328         return nullptr;
329     }
330     return &it->second;
331 }
332
333 SEMAPHORE_NODE *GetSemaphoreNode(layer_data *dev_data, VkSemaphore semaphore) {
334     auto it = dev_data->semaphoreMap.find(semaphore);
335     if (it == dev_data->semaphoreMap.end()) {
336         return nullptr;
337     }
338     return &it->second;
339 }
340
341 COMMAND_POOL_NODE *GetCommandPoolNode(layer_data *dev_data, VkCommandPool pool) {
342     auto it = dev_data->commandPoolMap.find(pool);
343     if (it == dev_data->commandPoolMap.end()) {
344         return nullptr;
345     }
346     return &it->second;
347 }
348
349 PHYSICAL_DEVICE_STATE *GetPhysicalDeviceState(instance_layer_data *instance_data, VkPhysicalDevice phys) {
350     auto it = instance_data->physical_device_map.find(phys);
351     if (it == instance_data->physical_device_map.end()) {
352         return nullptr;
353     }
354     return &it->second;
355 }
356
357 SURFACE_STATE *GetSurfaceState(instance_layer_data *instance_data, VkSurfaceKHR surface) {
358     auto it = instance_data->surface_map.find(surface);
359     if (it == instance_data->surface_map.end()) {
360         return nullptr;
361     }
362     return &it->second;
363 }
364
365 // Return ptr to memory binding for given handle of specified type
366 static BINDABLE *GetObjectMemBinding(layer_data *dev_data, uint64_t handle, VulkanObjectType type) {
367     switch (type) {
368         case kVulkanObjectTypeImage:
369             return GetImageState(dev_data, VkImage(handle));
370         case kVulkanObjectTypeBuffer:
371             return GetBufferState(dev_data, VkBuffer(handle));
372         default:
373             break;
374     }
375     return nullptr;
376 }
377 // prototype
378 GLOBAL_CB_NODE *GetCBNode(layer_data const *, const VkCommandBuffer);
379
380 // Return ptr to info in map container containing mem, or NULL if not found
381 //  Calls to this function should be wrapped in mutex
382 DEVICE_MEM_INFO *GetMemObjInfo(const layer_data *dev_data, const VkDeviceMemory mem) {
383     auto mem_it = dev_data->memObjMap.find(mem);
384     if (mem_it == dev_data->memObjMap.end()) {
385         return NULL;
386     }
387     return mem_it->second.get();
388 }
389
390 static void add_mem_obj_info(layer_data *dev_data, void *object, const VkDeviceMemory mem,
391                              const VkMemoryAllocateInfo *pAllocateInfo) {
392     assert(object != NULL);
393
394     auto *mem_info = new DEVICE_MEM_INFO(object, mem, pAllocateInfo);
395     dev_data->memObjMap[mem] = unique_ptr<DEVICE_MEM_INFO>(mem_info);
396
397     auto dedicated = lvl_find_in_chain<VkMemoryDedicatedAllocateInfoKHR>(pAllocateInfo->pNext);
398     if (dedicated) {
399         mem_info->is_dedicated = true;
400         mem_info->dedicated_buffer = dedicated->buffer;
401         mem_info->dedicated_image = dedicated->image;
402     }
403 }
404
405 // Create binding link between given sampler and command buffer node
406 void AddCommandBufferBindingSampler(GLOBAL_CB_NODE *cb_node, SAMPLER_STATE *sampler_state) {
407     sampler_state->cb_bindings.insert(cb_node);
408     cb_node->object_bindings.insert({HandleToUint64(sampler_state->sampler), kVulkanObjectTypeSampler});
409 }
410
411 // Create binding link between given image node and command buffer node
412 void AddCommandBufferBindingImage(const layer_data *dev_data, GLOBAL_CB_NODE *cb_node, IMAGE_STATE *image_state) {
413     // Skip validation if this image was created through WSI
414     if (image_state->binding.mem != MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) {
415         // First update CB binding in MemObj mini CB list
416         for (auto mem_binding : image_state->GetBoundMemory()) {
417             DEVICE_MEM_INFO *pMemInfo = GetMemObjInfo(dev_data, mem_binding);
418             if (pMemInfo) {
419                 pMemInfo->cb_bindings.insert(cb_node);
420                 // Now update CBInfo's Mem reference list
421                 cb_node->memObjs.insert(mem_binding);
422             }
423         }
424         // Now update cb binding for image
425         cb_node->object_bindings.insert({HandleToUint64(image_state->image), kVulkanObjectTypeImage});
426         image_state->cb_bindings.insert(cb_node);
427     }
428 }
429
430 // Create binding link between given image view node and its image with command buffer node
431 void AddCommandBufferBindingImageView(const layer_data *dev_data, GLOBAL_CB_NODE *cb_node, IMAGE_VIEW_STATE *view_state) {
432     // First add bindings for imageView
433     view_state->cb_bindings.insert(cb_node);
434     cb_node->object_bindings.insert({HandleToUint64(view_state->image_view), kVulkanObjectTypeImageView});
435     auto image_state = GetImageState(dev_data, view_state->create_info.image);
436     // Add bindings for image within imageView
437     if (image_state) {
438         AddCommandBufferBindingImage(dev_data, cb_node, image_state);
439     }
440 }
441
442 // Create binding link between given buffer node and command buffer node
443 void AddCommandBufferBindingBuffer(const layer_data *dev_data, GLOBAL_CB_NODE *cb_node, BUFFER_STATE *buffer_state) {
444     // First update CB binding in MemObj mini CB list
445     for (auto mem_binding : buffer_state->GetBoundMemory()) {
446         DEVICE_MEM_INFO *pMemInfo = GetMemObjInfo(dev_data, mem_binding);
447         if (pMemInfo) {
448             pMemInfo->cb_bindings.insert(cb_node);
449             // Now update CBInfo's Mem reference list
450             cb_node->memObjs.insert(mem_binding);
451         }
452     }
453     // Now update cb binding for buffer
454     cb_node->object_bindings.insert({HandleToUint64(buffer_state->buffer), kVulkanObjectTypeBuffer});
455     buffer_state->cb_bindings.insert(cb_node);
456 }
457
458 // Create binding link between given buffer view node and its buffer with command buffer node
459 void AddCommandBufferBindingBufferView(const layer_data *dev_data, GLOBAL_CB_NODE *cb_node, BUFFER_VIEW_STATE *view_state) {
460     // First add bindings for bufferView
461     view_state->cb_bindings.insert(cb_node);
462     cb_node->object_bindings.insert({HandleToUint64(view_state->buffer_view), kVulkanObjectTypeBufferView});
463     auto buffer_state = GetBufferState(dev_data, view_state->create_info.buffer);
464     // Add bindings for buffer within bufferView
465     if (buffer_state) {
466         AddCommandBufferBindingBuffer(dev_data, cb_node, buffer_state);
467     }
468 }
469
470 // For every mem obj bound to particular CB, free bindings related to that CB
471 static void clear_cmd_buf_and_mem_references(layer_data *dev_data, GLOBAL_CB_NODE *cb_node) {
472     if (cb_node) {
473         if (cb_node->memObjs.size() > 0) {
474             for (auto mem : cb_node->memObjs) {
475                 DEVICE_MEM_INFO *pInfo = GetMemObjInfo(dev_data, mem);
476                 if (pInfo) {
477                     pInfo->cb_bindings.erase(cb_node);
478                 }
479             }
480             cb_node->memObjs.clear();
481         }
482     }
483 }
484
485 // Clear a single object binding from given memory object, or report error if binding is missing
486 static bool ClearMemoryObjectBinding(layer_data *dev_data, uint64_t handle, VulkanObjectType type, VkDeviceMemory mem) {
487     DEVICE_MEM_INFO *mem_info = GetMemObjInfo(dev_data, mem);
488     // This obj is bound to a memory object. Remove the reference to this object in that memory object's list
489     if (mem_info) {
490         mem_info->obj_bindings.erase({handle, type});
491     }
492     return false;
493 }
494
495 // ClearMemoryObjectBindings clears the binding of objects to memory
496 //  For the given object it pulls the memory bindings and makes sure that the bindings
497 //  no longer refer to the object being cleared. This occurs when objects are destroyed.
498 bool ClearMemoryObjectBindings(layer_data *dev_data, uint64_t handle, VulkanObjectType type) {
499     bool skip = false;
500     BINDABLE *mem_binding = GetObjectMemBinding(dev_data, handle, type);
501     if (mem_binding) {
502         if (!mem_binding->sparse) {
503             skip = ClearMemoryObjectBinding(dev_data, handle, type, mem_binding->binding.mem);
504         } else {  // Sparse, clear all bindings
505             for (auto &sparse_mem_binding : mem_binding->sparse_bindings) {
506                 skip |= ClearMemoryObjectBinding(dev_data, handle, type, sparse_mem_binding.mem);
507             }
508         }
509     }
510     return skip;
511 }
512
513 // For given mem object, verify that it is not null or UNBOUND, if it is, report error. Return skip value.
514 bool VerifyBoundMemoryIsValid(const layer_data *dev_data, VkDeviceMemory mem, uint64_t handle, const char *api_name,
515                               const char *type_name, UNIQUE_VALIDATION_ERROR_CODE error_code) {
516     bool result = false;
517     if (VK_NULL_HANDLE == mem) {
518         result =
519             log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, handle, error_code,
520                     "%s: Vk%s object 0x%" PRIx64 " used with no memory bound. Memory should be bound by calling vkBind%sMemory().",
521                     api_name, type_name, handle, type_name);
522     } else if (MEMORY_UNBOUND == mem) {
523         result =
524             log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, handle, error_code,
525                     "%s: Vk%s object 0x%" PRIx64
526                     " used with no memory bound and previously bound memory was freed. Memory must not be freed prior to this "
527                     "operation.",
528                     api_name, type_name, handle);
529     }
530     return result;
531 }
532
533 // Check to see if memory was ever bound to this image
534 bool ValidateMemoryIsBoundToImage(const layer_data *dev_data, const IMAGE_STATE *image_state, const char *api_name,
535                                   UNIQUE_VALIDATION_ERROR_CODE error_code) {
536     bool result = false;
537     if (0 == (static_cast<uint32_t>(image_state->createInfo.flags) & VK_IMAGE_CREATE_SPARSE_BINDING_BIT)) {
538         result = VerifyBoundMemoryIsValid(dev_data, image_state->binding.mem, HandleToUint64(image_state->image), api_name, "Image",
539                                           error_code);
540     }
541     return result;
542 }
543
544 // Check to see if memory was bound to this buffer
545 bool ValidateMemoryIsBoundToBuffer(const layer_data *dev_data, const BUFFER_STATE *buffer_state, const char *api_name,
546                                    UNIQUE_VALIDATION_ERROR_CODE error_code) {
547     bool result = false;
548     if (0 == (static_cast<uint32_t>(buffer_state->createInfo.flags) & VK_BUFFER_CREATE_SPARSE_BINDING_BIT)) {
549         result = VerifyBoundMemoryIsValid(dev_data, buffer_state->binding.mem, HandleToUint64(buffer_state->buffer), api_name,
550                                           "Buffer", error_code);
551     }
552     return result;
553 }
554
555 // SetMemBinding is used to establish immutable, non-sparse binding between a single image/buffer object and memory object.
556 // Corresponding valid usage checks are in ValidateSetMemBinding().
557 static void SetMemBinding(layer_data *dev_data, VkDeviceMemory mem, BINDABLE *mem_binding, VkDeviceSize memory_offset,
558                           uint64_t handle, VulkanObjectType type, const char *apiName) {
559     assert(mem_binding);
560     mem_binding->binding.mem = mem;
561     mem_binding->UpdateBoundMemorySet();  // force recreation of cached set
562     mem_binding->binding.offset = memory_offset;
563     mem_binding->binding.size = mem_binding->requirements.size;
564
565     if (mem != VK_NULL_HANDLE) {
566         DEVICE_MEM_INFO *mem_info = GetMemObjInfo(dev_data, mem);
567         if (mem_info) {
568             mem_info->obj_bindings.insert({handle, type});
569             // For image objects, make sure default memory state is correctly set
570             // TODO : What's the best/correct way to handle this?
571             if (kVulkanObjectTypeImage == type) {
572                 auto const image_state = reinterpret_cast<const IMAGE_STATE *>(mem_binding);
573                 if (image_state) {
574                     VkImageCreateInfo ici = image_state->createInfo;
575                     if (ici.usage & (VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT)) {
576                         // TODO::  More memory state transition stuff.
577                     }
578                 }
579             }
580         }
581     }
582 }
583
584 // Valid usage checks for a call to SetMemBinding().
585 // For NULL mem case, output warning
586 // Make sure given object is in global object map
587 //  IF a previous binding existed, output validation error
588 //  Otherwise, add reference from objectInfo to memoryInfo
589 //  Add reference off of objInfo
590 // TODO: We may need to refactor or pass in multiple valid usage statements to handle multiple valid usage conditions.
591 static bool ValidateSetMemBinding(layer_data *dev_data, VkDeviceMemory mem, uint64_t handle, VulkanObjectType type,
592                                   const char *apiName) {
593     bool skip = false;
594     // It's an error to bind an object to NULL memory
595     if (mem != VK_NULL_HANDLE) {
596         BINDABLE *mem_binding = GetObjectMemBinding(dev_data, handle, type);
597         assert(mem_binding);
598         if (mem_binding->sparse) {
599             UNIQUE_VALIDATION_ERROR_CODE error_code = VALIDATION_ERROR_1740082a;
600             const char *handle_type = "IMAGE";
601             if (type == kVulkanObjectTypeBuffer) {
602                 error_code = VALIDATION_ERROR_1700080c;
603                 handle_type = "BUFFER";
604             } else {
605                 assert(type == kVulkanObjectTypeImage);
606             }
607             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
608                             HandleToUint64(mem), error_code,
609                             "In %s, attempting to bind memory (0x%" PRIx64 ") to object (0x%" PRIx64
610                             ") which was created with sparse memory flags (VK_%s_CREATE_SPARSE_*_BIT).",
611                             apiName, HandleToUint64(mem), handle, handle_type);
612         }
613         DEVICE_MEM_INFO *mem_info = GetMemObjInfo(dev_data, mem);
614         if (mem_info) {
615             DEVICE_MEM_INFO *prev_binding = GetMemObjInfo(dev_data, mem_binding->binding.mem);
616             if (prev_binding) {
617                 UNIQUE_VALIDATION_ERROR_CODE error_code = VALIDATION_ERROR_17400828;
618                 if (type == kVulkanObjectTypeBuffer) {
619                     error_code = VALIDATION_ERROR_1700080a;
620                 } else {
621                     assert(type == kVulkanObjectTypeImage);
622                 }
623                 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
624                                 HandleToUint64(mem), error_code,
625                                 "In %s, attempting to bind memory (0x%" PRIx64 ") to object (0x%" PRIx64
626                                 ") which has already been bound to mem object 0x%" PRIx64 ".",
627                                 apiName, HandleToUint64(mem), handle, HandleToUint64(prev_binding->mem));
628             } else if (mem_binding->binding.mem == MEMORY_UNBOUND) {
629                 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
630                                 HandleToUint64(mem), MEMTRACK_REBIND_OBJECT,
631                                 "In %s, attempting to bind memory (0x%" PRIx64 ") to object (0x%" PRIx64
632                                 ") which was previous bound to memory that has since been freed. Memory bindings are immutable in "
633                                 "Vulkan so this attempt to bind to new memory is not allowed.",
634                                 apiName, HandleToUint64(mem), handle);
635             }
636         }
637     }
638     return skip;
639 }
640
641 // For NULL mem case, clear any previous binding Else...
642 // Make sure given object is in its object map
643 //  IF a previous binding existed, update binding
644 //  Add reference from objectInfo to memoryInfo
645 //  Add reference off of object's binding info
646 // Return VK_TRUE if addition is successful, VK_FALSE otherwise
647 static bool SetSparseMemBinding(layer_data *dev_data, MEM_BINDING binding, uint64_t handle, VulkanObjectType type) {
648     bool skip = VK_FALSE;
649     // Handle NULL case separately, just clear previous binding & decrement reference
650     if (binding.mem == VK_NULL_HANDLE) {
651         // TODO : This should cause the range of the resource to be unbound according to spec
652     } else {
653         BINDABLE *mem_binding = GetObjectMemBinding(dev_data, handle, type);
654         assert(mem_binding);
655         assert(mem_binding->sparse);
656         DEVICE_MEM_INFO *mem_info = GetMemObjInfo(dev_data, binding.mem);
657         if (mem_info) {
658             mem_info->obj_bindings.insert({handle, type});
659             // Need to set mem binding for this object
660             mem_binding->sparse_bindings.insert(binding);
661             mem_binding->UpdateBoundMemorySet();
662         }
663     }
664     return skip;
665 }
666
667 // Check object status for selected flag state
668 static bool validate_status(layer_data *dev_data, GLOBAL_CB_NODE *pNode, CBStatusFlags status_mask, VkFlags msg_flags,
669                             const char *fail_msg, UNIQUE_VALIDATION_ERROR_CODE const msg_code) {
670     if (!(pNode->status & status_mask)) {
671         return log_msg(dev_data->report_data, msg_flags, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
672                        HandleToUint64(pNode->commandBuffer), msg_code, "command buffer object 0x%" PRIx64 ": %s..",
673                        HandleToUint64(pNode->commandBuffer), fail_msg);
674     }
675     return false;
676 }
677
678 // Retrieve pipeline node ptr for given pipeline object
679 static PIPELINE_STATE *getPipelineState(layer_data const *dev_data, VkPipeline pipeline) {
680     auto it = dev_data->pipelineMap.find(pipeline);
681     if (it == dev_data->pipelineMap.end()) {
682         return nullptr;
683     }
684     return it->second.get();
685 }
686
687 RENDER_PASS_STATE *GetRenderPassState(layer_data const *dev_data, VkRenderPass renderpass) {
688     auto it = dev_data->renderPassMap.find(renderpass);
689     if (it == dev_data->renderPassMap.end()) {
690         return nullptr;
691     }
692     return it->second.get();
693 }
694
695 std::shared_ptr<RENDER_PASS_STATE> GetRenderPassStateSharedPtr(layer_data const *dev_data, VkRenderPass renderpass) {
696     auto it = dev_data->renderPassMap.find(renderpass);
697     if (it == dev_data->renderPassMap.end()) {
698         return nullptr;
699     }
700     return it->second;
701 }
702
703 FRAMEBUFFER_STATE *GetFramebufferState(const layer_data *dev_data, VkFramebuffer framebuffer) {
704     auto it = dev_data->frameBufferMap.find(framebuffer);
705     if (it == dev_data->frameBufferMap.end()) {
706         return nullptr;
707     }
708     return it->second.get();
709 }
710
711 std::shared_ptr<cvdescriptorset::DescriptorSetLayout const> const GetDescriptorSetLayout(layer_data const *dev_data,
712                                                                                          VkDescriptorSetLayout dsLayout) {
713     auto it = dev_data->descriptorSetLayoutMap.find(dsLayout);
714     if (it == dev_data->descriptorSetLayoutMap.end()) {
715         return nullptr;
716     }
717     return it->second;
718 }
719
720 static PIPELINE_LAYOUT_NODE const *getPipelineLayout(layer_data const *dev_data, VkPipelineLayout pipeLayout) {
721     auto it = dev_data->pipelineLayoutMap.find(pipeLayout);
722     if (it == dev_data->pipelineLayoutMap.end()) {
723         return nullptr;
724     }
725     return &it->second;
726 }
727
728 shader_module const *GetShaderModuleState(layer_data const *dev_data, VkShaderModule module) {
729     auto it = dev_data->shaderModuleMap.find(module);
730     if (it == dev_data->shaderModuleMap.end()) {
731         return nullptr;
732     }
733     return it->second.get();
734 }
735
736 // Return true if for a given PSO, the given state enum is dynamic, else return false
737 static bool isDynamic(const PIPELINE_STATE *pPipeline, const VkDynamicState state) {
738     if (pPipeline && pPipeline->graphicsPipelineCI.pDynamicState) {
739         for (uint32_t i = 0; i < pPipeline->graphicsPipelineCI.pDynamicState->dynamicStateCount; i++) {
740             if (state == pPipeline->graphicsPipelineCI.pDynamicState->pDynamicStates[i]) return true;
741         }
742     }
743     return false;
744 }
745
746 // Validate state stored as flags at time of draw call
747 static bool validate_draw_state_flags(layer_data *dev_data, GLOBAL_CB_NODE *pCB, const PIPELINE_STATE *pPipe, bool indexed,
748                                       UNIQUE_VALIDATION_ERROR_CODE const msg_code) {
749     bool result = false;
750     if (pPipe->topology_at_rasterizer == VK_PRIMITIVE_TOPOLOGY_LINE_LIST ||
751         pPipe->topology_at_rasterizer == VK_PRIMITIVE_TOPOLOGY_LINE_STRIP) {
752         result |= validate_status(dev_data, pCB, CBSTATUS_LINE_WIDTH_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
753                                   "Dynamic line width state not set for this command buffer", msg_code);
754     }
755     if (pPipe->graphicsPipelineCI.pRasterizationState &&
756         (pPipe->graphicsPipelineCI.pRasterizationState->depthBiasEnable == VK_TRUE)) {
757         result |= validate_status(dev_data, pCB, CBSTATUS_DEPTH_BIAS_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
758                                   "Dynamic depth bias state not set for this command buffer", msg_code);
759     }
760     if (pPipe->blendConstantsEnabled) {
761         result |= validate_status(dev_data, pCB, CBSTATUS_BLEND_CONSTANTS_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
762                                   "Dynamic blend constants state not set for this command buffer", msg_code);
763     }
764     if (pPipe->graphicsPipelineCI.pDepthStencilState &&
765         (pPipe->graphicsPipelineCI.pDepthStencilState->depthBoundsTestEnable == VK_TRUE)) {
766         result |= validate_status(dev_data, pCB, CBSTATUS_DEPTH_BOUNDS_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
767                                   "Dynamic depth bounds state not set for this command buffer", msg_code);
768     }
769     if (pPipe->graphicsPipelineCI.pDepthStencilState &&
770         (pPipe->graphicsPipelineCI.pDepthStencilState->stencilTestEnable == VK_TRUE)) {
771         result |= validate_status(dev_data, pCB, CBSTATUS_STENCIL_READ_MASK_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
772                                   "Dynamic stencil read mask state not set for this command buffer", msg_code);
773         result |= validate_status(dev_data, pCB, CBSTATUS_STENCIL_WRITE_MASK_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
774                                   "Dynamic stencil write mask state not set for this command buffer", msg_code);
775         result |= validate_status(dev_data, pCB, CBSTATUS_STENCIL_REFERENCE_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
776                                   "Dynamic stencil reference state not set for this command buffer", msg_code);
777     }
778     if (indexed) {
779         result |= validate_status(dev_data, pCB, CBSTATUS_INDEX_BUFFER_BOUND, VK_DEBUG_REPORT_ERROR_BIT_EXT,
780                                   "Index buffer object not bound to this command buffer when Indexed Draw attempted", msg_code);
781     }
782
783     return result;
784 }
785
786 static bool logInvalidAttachmentMessage(layer_data const *dev_data, const char *type1_string, const RENDER_PASS_STATE *rp1_state,
787                                         const char *type2_string, const RENDER_PASS_STATE *rp2_state, uint32_t primary_attach,
788                                         uint32_t secondary_attach, const char *msg, const char *caller,
789                                         UNIQUE_VALIDATION_ERROR_CODE error_code) {
790     return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
791                    HandleToUint64(rp1_state->renderPass), error_code,
792                    "%s: RenderPasses incompatible between %s w/ renderPass 0x%" PRIx64 " and %s w/ renderPass 0x%" PRIx64
793                    " Attachment %u is not compatible with %u: %s.",
794                    caller, type1_string, HandleToUint64(rp1_state->renderPass), type2_string, HandleToUint64(rp2_state->renderPass),
795                    primary_attach, secondary_attach, msg);
796 }
797
798 static bool validateAttachmentCompatibility(layer_data const *dev_data, const char *type1_string,
799                                             const RENDER_PASS_STATE *rp1_state, const char *type2_string,
800                                             const RENDER_PASS_STATE *rp2_state, uint32_t primary_attach, uint32_t secondary_attach,
801                                             const char *caller, UNIQUE_VALIDATION_ERROR_CODE error_code) {
802     bool skip = false;
803     const auto &primaryPassCI = rp1_state->createInfo;
804     const auto &secondaryPassCI = rp2_state->createInfo;
805     if (primaryPassCI.attachmentCount <= primary_attach) {
806         primary_attach = VK_ATTACHMENT_UNUSED;
807     }
808     if (secondaryPassCI.attachmentCount <= secondary_attach) {
809         secondary_attach = VK_ATTACHMENT_UNUSED;
810     }
811     if (primary_attach == VK_ATTACHMENT_UNUSED && secondary_attach == VK_ATTACHMENT_UNUSED) {
812         return skip;
813     }
814     if (primary_attach == VK_ATTACHMENT_UNUSED) {
815         skip |= logInvalidAttachmentMessage(dev_data, type1_string, rp1_state, type2_string, rp2_state, primary_attach,
816                                             secondary_attach, "The first is unused while the second is not.", caller, error_code);
817         return skip;
818     }
819     if (secondary_attach == VK_ATTACHMENT_UNUSED) {
820         skip |= logInvalidAttachmentMessage(dev_data, type1_string, rp1_state, type2_string, rp2_state, primary_attach,
821                                             secondary_attach, "The second is unused while the first is not.", caller, error_code);
822         return skip;
823     }
824     if (primaryPassCI.pAttachments[primary_attach].format != secondaryPassCI.pAttachments[secondary_attach].format) {
825         skip |= logInvalidAttachmentMessage(dev_data, type1_string, rp1_state, type2_string, rp2_state, primary_attach,
826                                             secondary_attach, "They have different formats.", caller, error_code);
827     }
828     if (primaryPassCI.pAttachments[primary_attach].samples != secondaryPassCI.pAttachments[secondary_attach].samples) {
829         skip |= logInvalidAttachmentMessage(dev_data, type1_string, rp1_state, type2_string, rp2_state, primary_attach,
830                                             secondary_attach, "They have different samples.", caller, error_code);
831     }
832     if (primaryPassCI.pAttachments[primary_attach].flags != secondaryPassCI.pAttachments[secondary_attach].flags) {
833         skip |= logInvalidAttachmentMessage(dev_data, type1_string, rp1_state, type2_string, rp2_state, primary_attach,
834                                             secondary_attach, "They have different flags.", caller, error_code);
835     }
836
837     return skip;
838 }
839
840 static bool validateSubpassCompatibility(layer_data const *dev_data, const char *type1_string, const RENDER_PASS_STATE *rp1_state,
841                                          const char *type2_string, const RENDER_PASS_STATE *rp2_state, const int subpass,
842                                          const char *caller, UNIQUE_VALIDATION_ERROR_CODE error_code) {
843     bool skip = false;
844     const auto &primary_desc = rp1_state->createInfo.pSubpasses[subpass];
845     const auto &secondary_desc = rp2_state->createInfo.pSubpasses[subpass];
846     uint32_t maxInputAttachmentCount = std::max(primary_desc.inputAttachmentCount, secondary_desc.inputAttachmentCount);
847     for (uint32_t i = 0; i < maxInputAttachmentCount; ++i) {
848         uint32_t primary_input_attach = VK_ATTACHMENT_UNUSED, secondary_input_attach = VK_ATTACHMENT_UNUSED;
849         if (i < primary_desc.inputAttachmentCount) {
850             primary_input_attach = primary_desc.pInputAttachments[i].attachment;
851         }
852         if (i < secondary_desc.inputAttachmentCount) {
853             secondary_input_attach = secondary_desc.pInputAttachments[i].attachment;
854         }
855         skip |= validateAttachmentCompatibility(dev_data, type1_string, rp1_state, type2_string, rp2_state, primary_input_attach,
856                                                 secondary_input_attach, caller, error_code);
857     }
858     uint32_t maxColorAttachmentCount = std::max(primary_desc.colorAttachmentCount, secondary_desc.colorAttachmentCount);
859     for (uint32_t i = 0; i < maxColorAttachmentCount; ++i) {
860         uint32_t primary_color_attach = VK_ATTACHMENT_UNUSED, secondary_color_attach = VK_ATTACHMENT_UNUSED;
861         if (i < primary_desc.colorAttachmentCount) {
862             primary_color_attach = primary_desc.pColorAttachments[i].attachment;
863         }
864         if (i < secondary_desc.colorAttachmentCount) {
865             secondary_color_attach = secondary_desc.pColorAttachments[i].attachment;
866         }
867         skip |= validateAttachmentCompatibility(dev_data, type1_string, rp1_state, type2_string, rp2_state, primary_color_attach,
868                                                 secondary_color_attach, caller, error_code);
869         uint32_t primary_resolve_attach = VK_ATTACHMENT_UNUSED, secondary_resolve_attach = VK_ATTACHMENT_UNUSED;
870         if (i < primary_desc.colorAttachmentCount && primary_desc.pResolveAttachments) {
871             primary_resolve_attach = primary_desc.pResolveAttachments[i].attachment;
872         }
873         if (i < secondary_desc.colorAttachmentCount && secondary_desc.pResolveAttachments) {
874             secondary_resolve_attach = secondary_desc.pResolveAttachments[i].attachment;
875         }
876         skip |= validateAttachmentCompatibility(dev_data, type1_string, rp1_state, type2_string, rp2_state, primary_resolve_attach,
877                                                 secondary_resolve_attach, caller, error_code);
878     }
879     uint32_t primary_depthstencil_attach = VK_ATTACHMENT_UNUSED, secondary_depthstencil_attach = VK_ATTACHMENT_UNUSED;
880     if (primary_desc.pDepthStencilAttachment) {
881         primary_depthstencil_attach = primary_desc.pDepthStencilAttachment[0].attachment;
882     }
883     if (secondary_desc.pDepthStencilAttachment) {
884         secondary_depthstencil_attach = secondary_desc.pDepthStencilAttachment[0].attachment;
885     }
886     skip |= validateAttachmentCompatibility(dev_data, type1_string, rp1_state, type2_string, rp2_state, primary_depthstencil_attach,
887                                             secondary_depthstencil_attach, caller, error_code);
888     return skip;
889 }
890
891 // Verify that given renderPass CreateInfo for primary and secondary command buffers are compatible.
892 //  This function deals directly with the CreateInfo, there are overloaded versions below that can take the renderPass handle and
893 //  will then feed into this function
894 static bool validateRenderPassCompatibility(layer_data const *dev_data, const char *type1_string,
895                                             const RENDER_PASS_STATE *rp1_state, const char *type2_string,
896                                             const RENDER_PASS_STATE *rp2_state, const char *caller,
897                                             UNIQUE_VALIDATION_ERROR_CODE error_code) {
898     bool skip = false;
899
900     if (rp1_state->createInfo.subpassCount != rp2_state->createInfo.subpassCount) {
901         skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
902                         HandleToUint64(rp1_state->renderPass), error_code,
903                         "%s: RenderPasses incompatible between %s w/ renderPass 0x%" PRIx64
904                         " with a subpassCount of %u and %s w/ renderPass 0x%" PRIx64 " with a subpassCount of %u.",
905                         caller, type1_string, HandleToUint64(rp1_state->renderPass), rp1_state->createInfo.subpassCount,
906                         type2_string, HandleToUint64(rp2_state->renderPass), rp2_state->createInfo.subpassCount);
907     } else {
908         for (uint32_t i = 0; i < rp1_state->createInfo.subpassCount; ++i) {
909             skip |= validateSubpassCompatibility(dev_data, type1_string, rp1_state, type2_string, rp2_state, i, caller, error_code);
910         }
911     }
912     return skip;
913 }
914
915 // Return Set node ptr for specified set or else NULL
916 cvdescriptorset::DescriptorSet *GetSetNode(const layer_data *dev_data, VkDescriptorSet set) {
917     auto set_it = dev_data->setMap.find(set);
918     if (set_it == dev_data->setMap.end()) {
919         return NULL;
920     }
921     return set_it->second;
922 }
923
924 // For given pipeline, return number of MSAA samples, or one if MSAA disabled
925 static VkSampleCountFlagBits getNumSamples(PIPELINE_STATE const *pipe) {
926     if (pipe->graphicsPipelineCI.pMultisampleState != NULL &&
927         VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO == pipe->graphicsPipelineCI.pMultisampleState->sType) {
928         return pipe->graphicsPipelineCI.pMultisampleState->rasterizationSamples;
929     }
930     return VK_SAMPLE_COUNT_1_BIT;
931 }
932
933 static void list_bits(std::ostream &s, uint32_t bits) {
934     for (int i = 0; i < 32 && bits; i++) {
935         if (bits & (1 << i)) {
936             s << i;
937             bits &= ~(1 << i);
938             if (bits) {
939                 s << ",";
940             }
941         }
942     }
943 }
944
945 // Validate draw-time state related to the PSO
946 static bool ValidatePipelineDrawtimeState(layer_data const *dev_data, LAST_BOUND_STATE const &state, const GLOBAL_CB_NODE *pCB,
947                                           CMD_TYPE cmd_type, PIPELINE_STATE const *pPipeline, const char *caller) {
948     bool skip = false;
949
950     // Verify vertex binding
951     if (pPipeline->vertexBindingDescriptions.size() > 0) {
952         for (size_t i = 0; i < pPipeline->vertexBindingDescriptions.size(); i++) {
953             auto vertex_binding = pPipeline->vertexBindingDescriptions[i].binding;
954             if ((pCB->currentDrawData.buffers.size() < (vertex_binding + 1)) ||
955                 (pCB->currentDrawData.buffers[vertex_binding] == VK_NULL_HANDLE)) {
956                 skip |=
957                     log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
958                             HandleToUint64(pCB->commandBuffer), DRAWSTATE_VTX_INDEX_OUT_OF_BOUNDS,
959                             "The Pipeline State Object (0x%" PRIx64
960                             ") expects that this Command Buffer's vertex binding Index %u should be set via "
961                             "vkCmdBindVertexBuffers. This is because VkVertexInputBindingDescription struct at "
962                             "index " PRINTF_SIZE_T_SPECIFIER " of pVertexBindingDescriptions has a binding value of %u.",
963                             HandleToUint64(state.pipeline_state->pipeline), vertex_binding, i, vertex_binding);
964             }
965         }
966     } else {
967         if (!pCB->currentDrawData.buffers.empty() && !pCB->vertex_buffer_used) {
968             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
969                             VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(pCB->commandBuffer),
970                             DRAWSTATE_VTX_INDEX_OUT_OF_BOUNDS,
971                             "Vertex buffers are bound to command buffer (0x%" PRIx64
972                             ") but no vertex buffers are attached to this Pipeline State Object (0x%" PRIx64 ").",
973                             HandleToUint64(pCB->commandBuffer), HandleToUint64(state.pipeline_state->pipeline));
974         }
975     }
976     // If Viewport or scissors are dynamic, verify that dynamic count matches PSO count.
977     // Skip check if rasterization is disabled or there is no viewport.
978     if ((!pPipeline->graphicsPipelineCI.pRasterizationState ||
979          (pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable == VK_FALSE)) &&
980         pPipeline->graphicsPipelineCI.pViewportState) {
981         bool dynViewport = isDynamic(pPipeline, VK_DYNAMIC_STATE_VIEWPORT);
982         bool dynScissor = isDynamic(pPipeline, VK_DYNAMIC_STATE_SCISSOR);
983
984         if (dynViewport) {
985             auto requiredViewportsMask = (1 << pPipeline->graphicsPipelineCI.pViewportState->viewportCount) - 1;
986             auto missingViewportMask = ~pCB->viewportMask & requiredViewportsMask;
987             if (missingViewportMask) {
988                 std::stringstream ss;
989                 ss << "Dynamic viewport(s) ";
990                 list_bits(ss, missingViewportMask);
991                 ss << " are used by pipeline state object, but were not provided via calls to vkCmdSetViewport().";
992                 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
993                                 DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "%s", ss.str().c_str());
994             }
995         }
996
997         if (dynScissor) {
998             auto requiredScissorMask = (1 << pPipeline->graphicsPipelineCI.pViewportState->scissorCount) - 1;
999             auto missingScissorMask = ~pCB->scissorMask & requiredScissorMask;
1000             if (missingScissorMask) {
1001                 std::stringstream ss;
1002                 ss << "Dynamic scissor(s) ";
1003                 list_bits(ss, missingScissorMask);
1004                 ss << " are used by pipeline state object, but were not provided via calls to vkCmdSetScissor().";
1005                 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
1006                                 DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "%s", ss.str().c_str());
1007             }
1008         }
1009     }
1010
1011     // Verify that any MSAA request in PSO matches sample# in bound FB
1012     // Skip the check if rasterization is disabled.
1013     if (!pPipeline->graphicsPipelineCI.pRasterizationState ||
1014         (pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable == VK_FALSE)) {
1015         VkSampleCountFlagBits pso_num_samples = getNumSamples(pPipeline);
1016         if (pCB->activeRenderPass) {
1017             auto const render_pass_info = pCB->activeRenderPass->createInfo.ptr();
1018             const VkSubpassDescription *subpass_desc = &render_pass_info->pSubpasses[pCB->activeSubpass];
1019             uint32_t i;
1020             unsigned subpass_num_samples = 0;
1021
1022             for (i = 0; i < subpass_desc->colorAttachmentCount; i++) {
1023                 auto attachment = subpass_desc->pColorAttachments[i].attachment;
1024                 if (attachment != VK_ATTACHMENT_UNUSED)
1025                     subpass_num_samples |= (unsigned)render_pass_info->pAttachments[attachment].samples;
1026             }
1027
1028             if (subpass_desc->pDepthStencilAttachment &&
1029                 subpass_desc->pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
1030                 auto attachment = subpass_desc->pDepthStencilAttachment->attachment;
1031                 subpass_num_samples |= (unsigned)render_pass_info->pAttachments[attachment].samples;
1032             }
1033
1034             if (!dev_data->extensions.vk_amd_mixed_attachment_samples &&
1035                 ((subpass_num_samples & static_cast<unsigned>(pso_num_samples)) != subpass_num_samples)) {
1036                 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1037                                 HandleToUint64(pPipeline->pipeline), DRAWSTATE_NUM_SAMPLES_MISMATCH,
1038                                 "Num samples mismatch! At draw-time in Pipeline (0x%" PRIx64
1039                                 ") with %u samples while current RenderPass (0x%" PRIx64 ") w/ %u samples!",
1040                                 HandleToUint64(pPipeline->pipeline), pso_num_samples,
1041                                 HandleToUint64(pCB->activeRenderPass->renderPass), subpass_num_samples);
1042             }
1043         } else {
1044             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1045                             HandleToUint64(pPipeline->pipeline), DRAWSTATE_NUM_SAMPLES_MISMATCH,
1046                             "No active render pass found at draw-time in Pipeline (0x%" PRIx64 ")!",
1047                             HandleToUint64(pPipeline->pipeline));
1048         }
1049     }
1050     // Verify that PSO creation renderPass is compatible with active renderPass
1051     if (pCB->activeRenderPass) {
1052         // TODO: Move all of the error codes common across different Draws into a LUT accessed by cmd_type
1053         // TODO: AMD extension codes are included here, but actual function entrypoints are not yet intercepted
1054         // Error codes for renderpass and subpass mismatches
1055         auto rp_error = VALIDATION_ERROR_1a200366, sp_error = VALIDATION_ERROR_1a200368;
1056         switch (cmd_type) {
1057             case CMD_DRAWINDEXED:
1058                 rp_error = VALIDATION_ERROR_1a40038c;
1059                 sp_error = VALIDATION_ERROR_1a40038e;
1060                 break;
1061             case CMD_DRAWINDIRECT:
1062                 rp_error = VALIDATION_ERROR_1aa003be;
1063                 sp_error = VALIDATION_ERROR_1aa003c0;
1064                 break;
1065             case CMD_DRAWINDIRECTCOUNTAMD:
1066                 rp_error = VALIDATION_ERROR_1ac003f6;
1067                 sp_error = VALIDATION_ERROR_1ac003f8;
1068                 break;
1069             case CMD_DRAWINDEXEDINDIRECT:
1070                 rp_error = VALIDATION_ERROR_1a600426;
1071                 sp_error = VALIDATION_ERROR_1a600428;
1072                 break;
1073             case CMD_DRAWINDEXEDINDIRECTCOUNTAMD:
1074                 rp_error = VALIDATION_ERROR_1a800460;
1075                 sp_error = VALIDATION_ERROR_1a800462;
1076                 break;
1077             default:
1078                 assert(CMD_DRAW == cmd_type);
1079                 break;
1080         }
1081         std::string err_string;
1082         if (pCB->activeRenderPass->renderPass != pPipeline->rp_state->renderPass) {
1083             // renderPass that PSO was created with must be compatible with active renderPass that PSO is being used with
1084             skip |= validateRenderPassCompatibility(dev_data, "active render pass", pCB->activeRenderPass, "pipeline state object",
1085                                                     pPipeline->rp_state.get(), caller, rp_error);
1086         }
1087         if (pPipeline->graphicsPipelineCI.subpass != pCB->activeSubpass) {
1088             skip |=
1089                 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1090                         HandleToUint64(pPipeline->pipeline), sp_error, "Pipeline was built for subpass %u but used in subpass %u.",
1091                         pPipeline->graphicsPipelineCI.subpass, pCB->activeSubpass);
1092         }
1093     }
1094
1095     return skip;
1096 }
1097
1098 // For given cvdescriptorset::DescriptorSet, verify that its Set is compatible w/ the setLayout corresponding to
1099 // pipelineLayout[layoutIndex]
1100 static bool verify_set_layout_compatibility(const cvdescriptorset::DescriptorSet *descriptor_set,
1101                                             PIPELINE_LAYOUT_NODE const *pipeline_layout, const uint32_t layoutIndex,
1102                                             string &errorMsg) {
1103     auto num_sets = pipeline_layout->set_layouts.size();
1104     if (layoutIndex >= num_sets) {
1105         stringstream errorStr;
1106         errorStr << "VkPipelineLayout (" << pipeline_layout->layout << ") only contains " << num_sets
1107                  << " setLayouts corresponding to sets 0-" << num_sets - 1 << ", but you're attempting to bind set to index "
1108                  << layoutIndex;
1109         errorMsg = errorStr.str();
1110         return false;
1111     }
1112     if (descriptor_set->IsPushDescriptor()) return true;
1113     auto layout_node = pipeline_layout->set_layouts[layoutIndex];
1114     return descriptor_set->IsCompatible(layout_node.get(), &errorMsg);
1115 }
1116
1117 // Validate overall state at the time of a draw call
1118 static bool ValidateDrawState(layer_data *dev_data, GLOBAL_CB_NODE *cb_node, CMD_TYPE cmd_type, const bool indexed,
1119                               const VkPipelineBindPoint bind_point, const char *function,
1120                               UNIQUE_VALIDATION_ERROR_CODE const msg_code) {
1121     bool result = false;
1122     auto const &state = cb_node->lastBound[bind_point];
1123     PIPELINE_STATE *pPipe = state.pipeline_state;
1124     if (nullptr == pPipe) {
1125         result |= log_msg(
1126             dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
1127             HandleToUint64(cb_node->commandBuffer), DRAWSTATE_INVALID_PIPELINE,
1128             "At Draw/Dispatch time no valid VkPipeline is bound! This is illegal. Please bind one with vkCmdBindPipeline().");
1129         // Early return as any further checks below will be busted w/o a pipeline
1130         if (result) return true;
1131     }
1132     // First check flag states
1133     if (VK_PIPELINE_BIND_POINT_GRAPHICS == bind_point)
1134         result = validate_draw_state_flags(dev_data, cb_node, pPipe, indexed, msg_code);
1135
1136     // Now complete other state checks
1137     string errorString;
1138     auto const &pipeline_layout = pPipe->pipeline_layout;
1139
1140     for (const auto &set_binding_pair : pPipe->active_slots) {
1141         uint32_t setIndex = set_binding_pair.first;
1142         // If valid set is not bound throw an error
1143         if ((state.boundDescriptorSets.size() <= setIndex) || (!state.boundDescriptorSets[setIndex])) {
1144             result |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
1145                               HandleToUint64(cb_node->commandBuffer), DRAWSTATE_DESCRIPTOR_SET_NOT_BOUND,
1146                               "VkPipeline 0x%" PRIx64 " uses set #%u but that set is not bound.", HandleToUint64(pPipe->pipeline),
1147                               setIndex);
1148         } else if (!verify_set_layout_compatibility(state.boundDescriptorSets[setIndex], &pipeline_layout, setIndex, errorString)) {
1149             // Set is bound but not compatible w/ overlapping pipeline_layout from PSO
1150             VkDescriptorSet setHandle = state.boundDescriptorSets[setIndex]->GetSet();
1151             result |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
1152                               HandleToUint64(setHandle), DRAWSTATE_PIPELINE_LAYOUTS_INCOMPATIBLE,
1153                               "VkDescriptorSet (0x%" PRIx64
1154                               ") bound as set #%u is not compatible with overlapping VkPipelineLayout 0x%" PRIx64 " due to: %s",
1155                               HandleToUint64(setHandle), setIndex, HandleToUint64(pipeline_layout.layout), errorString.c_str());
1156         } else {  // Valid set is bound and layout compatible, validate that it's updated
1157             // Pull the set node
1158             cvdescriptorset::DescriptorSet *descriptor_set = state.boundDescriptorSets[setIndex];
1159             // Validate the draw-time state for this descriptor set
1160             std::string err_str;
1161             if (!descriptor_set->IsPushDescriptor()) {
1162                 // For the "bindless" style resource usage with many descriptors, need to optimize command <-> descriptor
1163                 // binding validation. Take the requested binding set and prefilter it to eliminate redundant validation checks.
1164                 // Here, the currently bound pipeline determines whether an image validation check is redundant...
1165                 // for images are the "req" portion of the binding_req is indirectly (but tightly) coupled to the pipeline.
1166                 const cvdescriptorset::PrefilterBindRequestMap reduced_map(*descriptor_set, set_binding_pair.second, cb_node,
1167                                                                            pPipe);
1168                 const auto &binding_req_map = reduced_map.Map();
1169
1170                 if (!descriptor_set->ValidateDrawState(binding_req_map, state.dynamicOffsets[setIndex], cb_node, function,
1171                                                        &err_str)) {
1172                     auto set = descriptor_set->GetSet();
1173                     result |= log_msg(
1174                         dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
1175                         HandleToUint64(set), DRAWSTATE_DESCRIPTOR_SET_NOT_UPDATED,
1176                         "Descriptor set 0x%" PRIx64 " bound as set #%u encountered the following validation error at %s time: %s",
1177                         HandleToUint64(set), setIndex, function, err_str.c_str());
1178                 }
1179             }
1180         }
1181     }
1182
1183     // Check general pipeline state that needs to be validated at drawtime
1184     if (VK_PIPELINE_BIND_POINT_GRAPHICS == bind_point)
1185         result |= ValidatePipelineDrawtimeState(dev_data, state, cb_node, cmd_type, pPipe, function);
1186
1187     return result;
1188 }
1189
1190 static void UpdateDrawState(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, const VkPipelineBindPoint bind_point) {
1191     auto const &state = cb_state->lastBound[bind_point];
1192     PIPELINE_STATE *pPipe = state.pipeline_state;
1193     if (VK_NULL_HANDLE != state.pipeline_layout) {
1194         for (const auto &set_binding_pair : pPipe->active_slots) {
1195             uint32_t setIndex = set_binding_pair.first;
1196             // Pull the set node
1197             cvdescriptorset::DescriptorSet *descriptor_set = state.boundDescriptorSets[setIndex];
1198             if (!descriptor_set->IsPushDescriptor()) {
1199                 // For the "bindless" style resource usage with many descriptors, need to optimize command <-> descriptor binding
1200                 const cvdescriptorset::PrefilterBindRequestMap reduced_map(*descriptor_set, set_binding_pair.second, cb_state);
1201                 const auto &binding_req_map = reduced_map.Map();
1202
1203                 // Bind this set and its active descriptor resources to the command buffer
1204                 descriptor_set->BindCommandBuffer(cb_state, binding_req_map);
1205                 // For given active slots record updated images & buffers
1206                 descriptor_set->GetStorageUpdates(binding_req_map, &cb_state->updateBuffers, &cb_state->updateImages);
1207             }
1208         }
1209     }
1210     if (pPipe->vertexBindingDescriptions.size() > 0) {
1211         cb_state->vertex_buffer_used = true;
1212     }
1213 }
1214
1215 static bool ValidatePipelineLocked(layer_data *dev_data, std::vector<std::unique_ptr<PIPELINE_STATE>> const &pPipelines,
1216                                    int pipelineIndex) {
1217     bool skip = false;
1218
1219     PIPELINE_STATE *pPipeline = pPipelines[pipelineIndex].get();
1220
1221     // If create derivative bit is set, check that we've specified a base
1222     // pipeline correctly, and that the base pipeline was created to allow
1223     // derivatives.
1224     if (pPipeline->graphicsPipelineCI.flags & VK_PIPELINE_CREATE_DERIVATIVE_BIT) {
1225         PIPELINE_STATE *pBasePipeline = nullptr;
1226         if (!((pPipeline->graphicsPipelineCI.basePipelineHandle != VK_NULL_HANDLE) ^
1227               (pPipeline->graphicsPipelineCI.basePipelineIndex != -1))) {
1228             // This check is a superset of VALIDATION_ERROR_096005a8 and VALIDATION_ERROR_096005aa
1229             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1230                             HandleToUint64(pPipeline->pipeline), DRAWSTATE_INVALID_PIPELINE_CREATE_STATE,
1231                             "Invalid Pipeline CreateInfo: exactly one of base pipeline index and handle must be specified");
1232         } else if (pPipeline->graphicsPipelineCI.basePipelineIndex != -1) {
1233             if (pPipeline->graphicsPipelineCI.basePipelineIndex >= pipelineIndex) {
1234                 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1235                                 HandleToUint64(pPipeline->pipeline), VALIDATION_ERROR_208005a0,
1236                                 "Invalid Pipeline CreateInfo: base pipeline must occur earlier in array than derivative pipeline.");
1237             } else {
1238                 pBasePipeline = pPipelines[pPipeline->graphicsPipelineCI.basePipelineIndex].get();
1239             }
1240         } else if (pPipeline->graphicsPipelineCI.basePipelineHandle != VK_NULL_HANDLE) {
1241             pBasePipeline = getPipelineState(dev_data, pPipeline->graphicsPipelineCI.basePipelineHandle);
1242         }
1243
1244         if (pBasePipeline && !(pBasePipeline->graphicsPipelineCI.flags & VK_PIPELINE_CREATE_ALLOW_DERIVATIVES_BIT)) {
1245             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1246                             HandleToUint64(pPipeline->pipeline), DRAWSTATE_INVALID_PIPELINE_CREATE_STATE,
1247                             "Invalid Pipeline CreateInfo: base pipeline does not allow derivatives.");
1248         }
1249     }
1250
1251     return skip;
1252 }
1253
1254 // UNLOCKED pipeline validation. DO NOT lookup objects in the layer_data->* maps in this function.
1255 static bool ValidatePipelineUnlocked(layer_data *dev_data, std::vector<std::unique_ptr<PIPELINE_STATE>> const &pPipelines,
1256                                      int pipelineIndex) {
1257     bool skip = false;
1258
1259     PIPELINE_STATE *pPipeline = pPipelines[pipelineIndex].get();
1260
1261     // Ensure the subpass index is valid. If not, then validate_and_capture_pipeline_shader_state
1262     // produces nonsense errors that confuse users. Other layers should already
1263     // emit errors for renderpass being invalid.
1264     auto subpass_desc = &pPipeline->rp_state->createInfo.pSubpasses[pPipeline->graphicsPipelineCI.subpass];
1265     if (pPipeline->graphicsPipelineCI.subpass >= pPipeline->rp_state->createInfo.subpassCount) {
1266         skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1267                         HandleToUint64(pPipeline->pipeline), VALIDATION_ERROR_096005ee,
1268                         "Invalid Pipeline CreateInfo State: Subpass index %u is out of range for this renderpass (0..%u).",
1269                         pPipeline->graphicsPipelineCI.subpass, pPipeline->rp_state->createInfo.subpassCount - 1);
1270         subpass_desc = nullptr;
1271     }
1272
1273     if (pPipeline->graphicsPipelineCI.pColorBlendState != NULL) {
1274         const safe_VkPipelineColorBlendStateCreateInfo *color_blend_state = pPipeline->graphicsPipelineCI.pColorBlendState;
1275         if (color_blend_state->attachmentCount != subpass_desc->colorAttachmentCount) {
1276             skip |= log_msg(
1277                 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1278                 HandleToUint64(pPipeline->pipeline), VALIDATION_ERROR_096005d4,
1279                 "vkCreateGraphicsPipelines(): Render pass (0x%" PRIx64
1280                 ") subpass %u has colorAttachmentCount of %u which doesn't match the pColorBlendState->attachmentCount of %u.",
1281                 HandleToUint64(pPipeline->rp_state->renderPass), pPipeline->graphicsPipelineCI.subpass,
1282                 subpass_desc->colorAttachmentCount, color_blend_state->attachmentCount);
1283         }
1284         if (!dev_data->enabled_features.independentBlend) {
1285             if (pPipeline->attachments.size() > 1) {
1286                 VkPipelineColorBlendAttachmentState *pAttachments = &pPipeline->attachments[0];
1287                 for (size_t i = 1; i < pPipeline->attachments.size(); i++) {
1288                     // Quoting the spec: "If [the independent blend] feature is not enabled, the VkPipelineColorBlendAttachmentState
1289                     // settings for all color attachments must be identical." VkPipelineColorBlendAttachmentState contains
1290                     // only attachment state, so memcmp is best suited for the comparison
1291                     if (memcmp(static_cast<const void *>(pAttachments), static_cast<const void *>(&pAttachments[i]),
1292                                sizeof(pAttachments[0]))) {
1293                         skip |=
1294                             log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1295                                     HandleToUint64(pPipeline->pipeline), VALIDATION_ERROR_0f4004ba,
1296                                     "Invalid Pipeline CreateInfo: If independent blend feature not enabled, all elements of "
1297                                     "pAttachments must be identical.");
1298                         break;
1299                     }
1300                 }
1301             }
1302         }
1303         if (!dev_data->enabled_features.logicOp && (pPipeline->graphicsPipelineCI.pColorBlendState->logicOpEnable != VK_FALSE)) {
1304             skip |=
1305                 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1306                         HandleToUint64(pPipeline->pipeline), VALIDATION_ERROR_0f4004bc,
1307                         "Invalid Pipeline CreateInfo: If logic operations feature not enabled, logicOpEnable must be VK_FALSE.");
1308         }
1309         for (size_t i = 0; i < pPipeline->attachments.size(); i++) {
1310             if ((pPipeline->attachments[i].srcColorBlendFactor == VK_BLEND_FACTOR_SRC1_COLOR) ||
1311                 (pPipeline->attachments[i].srcColorBlendFactor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR) ||
1312                 (pPipeline->attachments[i].srcColorBlendFactor == VK_BLEND_FACTOR_SRC1_ALPHA) ||
1313                 (pPipeline->attachments[i].srcColorBlendFactor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA)) {
1314                 if (!dev_data->enabled_features.dualSrcBlend) {
1315                     skip |=
1316                         log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1317                                 HandleToUint64(pPipeline->pipeline), VALIDATION_ERROR_0f2004c0,
1318                                 "vkCreateGraphicsPipelines(): pPipelines[%d].pColorBlendState.pAttachments[" PRINTF_SIZE_T_SPECIFIER
1319                                 "].srcColorBlendFactor uses a dual-source blend factor (%d), but this device feature is not "
1320                                 "enabled.",
1321                                 pipelineIndex, i, pPipeline->attachments[i].srcColorBlendFactor);
1322                 }
1323             }
1324             if ((pPipeline->attachments[i].dstColorBlendFactor == VK_BLEND_FACTOR_SRC1_COLOR) ||
1325                 (pPipeline->attachments[i].dstColorBlendFactor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR) ||
1326                 (pPipeline->attachments[i].dstColorBlendFactor == VK_BLEND_FACTOR_SRC1_ALPHA) ||
1327                 (pPipeline->attachments[i].dstColorBlendFactor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA)) {
1328                 if (!dev_data->enabled_features.dualSrcBlend) {
1329                     skip |=
1330                         log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1331                                 HandleToUint64(pPipeline->pipeline), VALIDATION_ERROR_0f2004c2,
1332                                 "vkCreateGraphicsPipelines(): pPipelines[%d].pColorBlendState.pAttachments[" PRINTF_SIZE_T_SPECIFIER
1333                                 "].dstColorBlendFactor uses a dual-source blend factor (%d), but this device feature is not "
1334                                 "enabled.",
1335                                 pipelineIndex, i, pPipeline->attachments[i].dstColorBlendFactor);
1336                 }
1337             }
1338             if ((pPipeline->attachments[i].srcAlphaBlendFactor == VK_BLEND_FACTOR_SRC1_COLOR) ||
1339                 (pPipeline->attachments[i].srcAlphaBlendFactor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR) ||
1340                 (pPipeline->attachments[i].srcAlphaBlendFactor == VK_BLEND_FACTOR_SRC1_ALPHA) ||
1341                 (pPipeline->attachments[i].srcAlphaBlendFactor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA)) {
1342                 if (!dev_data->enabled_features.dualSrcBlend) {
1343                     skip |=
1344                         log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1345                                 HandleToUint64(pPipeline->pipeline), VALIDATION_ERROR_0f2004c4,
1346                                 "vkCreateGraphicsPipelines(): pPipelines[%d].pColorBlendState.pAttachments[" PRINTF_SIZE_T_SPECIFIER
1347                                 "].srcAlphaBlendFactor uses a dual-source blend factor (%d), but this device feature is not "
1348                                 "enabled.",
1349                                 pipelineIndex, i, pPipeline->attachments[i].srcAlphaBlendFactor);
1350                 }
1351             }
1352             if ((pPipeline->attachments[i].dstAlphaBlendFactor == VK_BLEND_FACTOR_SRC1_COLOR) ||
1353                 (pPipeline->attachments[i].dstAlphaBlendFactor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR) ||
1354                 (pPipeline->attachments[i].dstAlphaBlendFactor == VK_BLEND_FACTOR_SRC1_ALPHA) ||
1355                 (pPipeline->attachments[i].dstAlphaBlendFactor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA)) {
1356                 if (!dev_data->enabled_features.dualSrcBlend) {
1357                     skip |=
1358                         log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1359                                 HandleToUint64(pPipeline->pipeline), VALIDATION_ERROR_0f2004c6,
1360                                 "vkCreateGraphicsPipelines(): pPipelines[%d].pColorBlendState.pAttachments[" PRINTF_SIZE_T_SPECIFIER
1361                                 "].dstAlphaBlendFactor uses a dual-source blend factor (%d), but this device feature is not "
1362                                 "enabled.",
1363                                 pipelineIndex, i, pPipeline->attachments[i].dstAlphaBlendFactor);
1364                 }
1365             }
1366         }
1367     }
1368
1369     if (validate_and_capture_pipeline_shader_state(dev_data, pPipeline)) {
1370         skip = true;
1371     }
1372     // Each shader's stage must be unique
1373     if (pPipeline->duplicate_shaders) {
1374         for (uint32_t stage = VK_SHADER_STAGE_VERTEX_BIT; stage & VK_SHADER_STAGE_ALL_GRAPHICS; stage <<= 1) {
1375             if (pPipeline->duplicate_shaders & stage) {
1376                 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1377                                 HandleToUint64(pPipeline->pipeline), DRAWSTATE_INVALID_PIPELINE_CREATE_STATE,
1378                                 "Invalid Pipeline CreateInfo State: Multiple shaders provided for stage %s",
1379                                 string_VkShaderStageFlagBits(VkShaderStageFlagBits(stage)));
1380             }
1381         }
1382     }
1383     // VS is required
1384     if (!(pPipeline->active_shaders & VK_SHADER_STAGE_VERTEX_BIT)) {
1385         skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1386                         HandleToUint64(pPipeline->pipeline), VALIDATION_ERROR_096005ae,
1387                         "Invalid Pipeline CreateInfo State: Vertex Shader required.");
1388     }
1389     // Either both or neither TC/TE shaders should be defined
1390     bool has_control = (pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT) != 0;
1391     bool has_eval = (pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT) != 0;
1392     if (has_control && !has_eval) {
1393         skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1394                         HandleToUint64(pPipeline->pipeline), VALIDATION_ERROR_096005b2,
1395                         "Invalid Pipeline CreateInfo State: TE and TC shaders must be included or excluded as a pair.");
1396     }
1397     if (!has_control && has_eval) {
1398         skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1399                         HandleToUint64(pPipeline->pipeline), VALIDATION_ERROR_096005b4,
1400                         "Invalid Pipeline CreateInfo State: TE and TC shaders must be included or excluded as a pair.");
1401     }
1402     // Compute shaders should be specified independent of Gfx shaders
1403     if (pPipeline->active_shaders & VK_SHADER_STAGE_COMPUTE_BIT) {
1404         skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1405                         HandleToUint64(pPipeline->pipeline), VALIDATION_ERROR_096005b0,
1406                         "Invalid Pipeline CreateInfo State: Do not specify Compute Shader for Gfx Pipeline.");
1407     }
1408     // VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive topology is only valid for tessellation pipelines.
1409     // Mismatching primitive topology and tessellation fails graphics pipeline creation.
1410     if (has_control && has_eval &&
1411         (!pPipeline->graphicsPipelineCI.pInputAssemblyState ||
1412          pPipeline->graphicsPipelineCI.pInputAssemblyState->topology != VK_PRIMITIVE_TOPOLOGY_PATCH_LIST)) {
1413         skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1414                         HandleToUint64(pPipeline->pipeline), VALIDATION_ERROR_096005c0,
1415                         "Invalid Pipeline CreateInfo State: VK_PRIMITIVE_TOPOLOGY_PATCH_LIST must be set as IA topology for "
1416                         "tessellation pipelines.");
1417     }
1418     if (pPipeline->graphicsPipelineCI.pInputAssemblyState &&
1419         pPipeline->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_PATCH_LIST) {
1420         if (!has_control || !has_eval) {
1421             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1422                             HandleToUint64(pPipeline->pipeline), VALIDATION_ERROR_096005c2,
1423                             "Invalid Pipeline CreateInfo State: VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive topology is only valid "
1424                             "for tessellation pipelines.");
1425         }
1426     }
1427
1428     // If a rasterization state is provided...
1429     if (pPipeline->graphicsPipelineCI.pRasterizationState) {
1430         if ((pPipeline->graphicsPipelineCI.pRasterizationState->depthClampEnable == VK_TRUE) &&
1431             (!dev_data->enabled_features.depthClamp)) {
1432             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1433                             HandleToUint64(pPipeline->pipeline), VALIDATION_ERROR_1020061c,
1434                             "vkCreateGraphicsPipelines(): the depthClamp device feature is disabled: the depthClampEnable member "
1435                             "of the VkPipelineRasterizationStateCreateInfo structure must be set to VK_FALSE.");
1436         }
1437
1438         if (!isDynamic(pPipeline, VK_DYNAMIC_STATE_DEPTH_BIAS) &&
1439             (pPipeline->graphicsPipelineCI.pRasterizationState->depthBiasClamp != 0.0) &&
1440             (!dev_data->enabled_features.depthBiasClamp)) {
1441             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1442                             HandleToUint64(pPipeline->pipeline), DRAWSTATE_INVALID_FEATURE,
1443                             "vkCreateGraphicsPipelines(): the depthBiasClamp device feature is disabled: the depthBiasClamp member "
1444                             "of the VkPipelineRasterizationStateCreateInfo structure must be set to 0.0 unless the "
1445                             "VK_DYNAMIC_STATE_DEPTH_BIAS dynamic state is enabled");
1446         }
1447
1448         // If rasterization is enabled...
1449         if (pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable == VK_FALSE) {
1450             if ((pPipeline->graphicsPipelineCI.pMultisampleState->alphaToOneEnable == VK_TRUE) &&
1451                 (!dev_data->enabled_features.alphaToOne)) {
1452                 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1453                                 HandleToUint64(pPipeline->pipeline), VALIDATION_ERROR_10000622,
1454                                 "vkCreateGraphicsPipelines(): the alphaToOne device feature is disabled: the alphaToOneEnable "
1455                                 "member of the VkPipelineMultisampleStateCreateInfo structure must be set to VK_FALSE.");
1456             }
1457
1458             // If subpass uses a depth/stencil attachment, pDepthStencilState must be a pointer to a valid structure
1459             if (subpass_desc && subpass_desc->pDepthStencilAttachment &&
1460                 subpass_desc->pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
1461                 if (!pPipeline->graphicsPipelineCI.pDepthStencilState) {
1462                     skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1463                                     HandleToUint64(pPipeline->pipeline), VALIDATION_ERROR_096005e0,
1464                                     "Invalid Pipeline CreateInfo State: pDepthStencilState is NULL when rasterization is enabled "
1465                                     "and subpass uses a depth/stencil attachment.");
1466
1467                 } else if ((pPipeline->graphicsPipelineCI.pDepthStencilState->depthBoundsTestEnable == VK_TRUE) &&
1468                            (!dev_data->enabled_features.depthBounds)) {
1469                     skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1470                                     HandleToUint64(pPipeline->pipeline), VALIDATION_ERROR_0f6004ac,
1471                                     "vkCreateGraphicsPipelines(): the depthBounds device feature is disabled: the "
1472                                     "depthBoundsTestEnable member of the VkPipelineDepthStencilStateCreateInfo structure must be "
1473                                     "set to VK_FALSE.");
1474                 }
1475             }
1476
1477             // If subpass uses color attachments, pColorBlendState must be valid pointer
1478             if (subpass_desc) {
1479                 uint32_t color_attachment_count = 0;
1480                 for (uint32_t i = 0; i < subpass_desc->colorAttachmentCount; ++i) {
1481                     if (subpass_desc->pColorAttachments[i].attachment != VK_ATTACHMENT_UNUSED) {
1482                         ++color_attachment_count;
1483                     }
1484                 }
1485                 if (color_attachment_count > 0 && pPipeline->graphicsPipelineCI.pColorBlendState == nullptr) {
1486                     skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1487                                     HandleToUint64(pPipeline->pipeline), VALIDATION_ERROR_096005e2,
1488                                     "Invalid Pipeline CreateInfo State: pColorBlendState is NULL when rasterization is enabled and "
1489                                     "subpass uses color attachments.");
1490                 }
1491             }
1492         }
1493     }
1494
1495     auto vi = pPipeline->graphicsPipelineCI.pVertexInputState;
1496     if (vi != NULL) {
1497         for (uint32_t j = 0; j < vi->vertexAttributeDescriptionCount; j++) {
1498             VkFormat format = vi->pVertexAttributeDescriptions[j].format;
1499             // Internal call to get format info.  Still goes through layers, could potentially go directly to ICD.
1500             VkFormatProperties properties;
1501             dev_data->instance_data->dispatch_table.GetPhysicalDeviceFormatProperties(dev_data->physical_device, format,
1502                                                                                       &properties);
1503             if ((properties.bufferFeatures & VK_FORMAT_FEATURE_VERTEX_BUFFER_BIT) == 0) {
1504                 skip |=
1505                     log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
1506                             VALIDATION_ERROR_14a004de,
1507                             "vkCreateGraphicsPipelines: pCreateInfo[%d].pVertexInputState->vertexAttributeDescriptions[%d].format "
1508                             "(%s) is not a supported vertex buffer format.",
1509                             pipelineIndex, j, string_VkFormat(format));
1510             }
1511         }
1512     }
1513
1514     if (dev_data->extensions.vk_amd_mixed_attachment_samples) {
1515         VkSampleCountFlagBits max_sample_count = static_cast<VkSampleCountFlagBits>(0);
1516         for (uint32_t i = 0; i < subpass_desc->colorAttachmentCount; ++i) {
1517             if (subpass_desc->pColorAttachments[i].attachment != VK_ATTACHMENT_UNUSED) {
1518                 max_sample_count =
1519                     std::max(max_sample_count,
1520                              pPipeline->rp_state->createInfo.pAttachments[subpass_desc->pColorAttachments[i].attachment].samples);
1521             }
1522         }
1523         if (subpass_desc->pDepthStencilAttachment && subpass_desc->pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
1524             max_sample_count =
1525                 std::max(max_sample_count,
1526                          pPipeline->rp_state->createInfo.pAttachments[subpass_desc->pDepthStencilAttachment->attachment].samples);
1527         }
1528         if (pPipeline->graphicsPipelineCI.pMultisampleState->rasterizationSamples != max_sample_count) {
1529             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
1530                             HandleToUint64(pPipeline->pipeline), VALIDATION_ERROR_09600bc2,
1531                             "vkCreateGraphicsPipelines: pCreateInfo[%d].pMultisampleState->rasterizationSamples (%s) != max "
1532                             "attachment samples (%s) used in subpass %u.",
1533                             pipelineIndex,
1534                             string_VkSampleCountFlagBits(pPipeline->graphicsPipelineCI.pMultisampleState->rasterizationSamples),
1535                             string_VkSampleCountFlagBits(max_sample_count), pPipeline->graphicsPipelineCI.subpass);
1536         }
1537     }
1538
1539     return skip;
1540 }
1541
1542 // Block of code at start here specifically for managing/tracking DSs
1543
1544 // Return Pool node ptr for specified pool or else NULL
1545 DESCRIPTOR_POOL_STATE *GetDescriptorPoolState(const layer_data *dev_data, const VkDescriptorPool pool) {
1546     auto pool_it = dev_data->descriptorPoolMap.find(pool);
1547     if (pool_it == dev_data->descriptorPoolMap.end()) {
1548         return NULL;
1549     }
1550     return pool_it->second;
1551 }
1552
1553 // Validate that given set is valid and that it's not being used by an in-flight CmdBuffer
1554 // func_str is the name of the calling function
1555 // Return false if no errors occur
1556 // Return true if validation error occurs and callback returns true (to skip upcoming API call down the chain)
1557 static bool validateIdleDescriptorSet(const layer_data *dev_data, VkDescriptorSet set, std::string func_str) {
1558     if (dev_data->instance_data->disabled.idle_descriptor_set) return false;
1559     bool skip = false;
1560     auto set_node = dev_data->setMap.find(set);
1561     if (set_node == dev_data->setMap.end()) {
1562         skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
1563                         HandleToUint64(set), DRAWSTATE_DOUBLE_DESTROY,
1564                         "Cannot call %s() on descriptor set 0x%" PRIx64 " that has not been allocated.", func_str.c_str(),
1565                         HandleToUint64(set));
1566     } else {
1567         // TODO : This covers various error cases so should pass error enum into this function and use passed in enum here
1568         if (set_node->second->in_use.load()) {
1569             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
1570                             HandleToUint64(set), VALIDATION_ERROR_2860026a,
1571                             "Cannot call %s() on descriptor set 0x%" PRIx64 " that is in use by a command buffer.",
1572                             func_str.c_str(), HandleToUint64(set));
1573         }
1574     }
1575     return skip;
1576 }
1577
1578 // Validate that given pool does not store any descriptor sets used by an in-flight CmdBuffer
1579 // pool stores the descriptor sets to be validated
1580 // Return false if no errors occur
1581 // Return true if validation error occurs and callback returns true (to skip upcoming API call down the chain)
1582 static bool validateIdleDescriptorSetForPoolReset(const layer_data *dev_data, const VkDescriptorPool pool) {
1583     if (dev_data->instance_data->disabled.idle_descriptor_set) return false;
1584     bool skip = false;
1585     DESCRIPTOR_POOL_STATE *pPool = GetDescriptorPoolState(dev_data, pool);
1586     if (pPool != nullptr) {
1587         for (auto ds : pPool->sets) {
1588             if (ds && ds->in_use.load()) {
1589                 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
1590                                 VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT, HandleToUint64(pool), VALIDATION_ERROR_32a00272,
1591                                 "It is invalid to call vkResetDescriptorPool() with descriptor sets in use by a command buffer. %s",
1592                                 validation_error_map[VALIDATION_ERROR_32a00272]);
1593                 if (skip) break;
1594             }
1595         }
1596     }
1597     return skip;
1598 }
1599
1600 // Remove set from setMap and delete the set
1601 static void freeDescriptorSet(layer_data *dev_data, cvdescriptorset::DescriptorSet *descriptor_set) {
1602     dev_data->setMap.erase(descriptor_set->GetSet());
1603     delete descriptor_set;
1604 }
1605 // Free all DS Pools including their Sets & related sub-structs
1606 // NOTE : Calls to this function should be wrapped in mutex
1607 static void deletePools(layer_data *dev_data) {
1608     for (auto ii = dev_data->descriptorPoolMap.begin(); ii != dev_data->descriptorPoolMap.end();) {
1609         // Remove this pools' sets from setMap and delete them
1610         for (auto ds : ii->second->sets) {
1611             freeDescriptorSet(dev_data, ds);
1612         }
1613         ii->second->sets.clear();
1614         delete ii->second;
1615         ii = dev_data->descriptorPoolMap.erase(ii);
1616     }
1617 }
1618
1619 static void clearDescriptorPool(layer_data *dev_data, const VkDevice device, const VkDescriptorPool pool,
1620                                 VkDescriptorPoolResetFlags flags) {
1621     DESCRIPTOR_POOL_STATE *pPool = GetDescriptorPoolState(dev_data, pool);
1622     // TODO: validate flags
1623     // For every set off of this pool, clear it, remove from setMap, and free cvdescriptorset::DescriptorSet
1624     for (auto ds : pPool->sets) {
1625         freeDescriptorSet(dev_data, ds);
1626     }
1627     pPool->sets.clear();
1628     // Reset available count for each type and available sets for this pool
1629     for (uint32_t i = 0; i < pPool->availableDescriptorTypeCount.size(); ++i) {
1630         pPool->availableDescriptorTypeCount[i] = pPool->maxDescriptorTypeCount[i];
1631     }
1632     pPool->availableSets = pPool->maxSets;
1633 }
1634
1635 // For given CB object, fetch associated CB Node from map
1636 GLOBAL_CB_NODE *GetCBNode(layer_data const *dev_data, const VkCommandBuffer cb) {
1637     auto it = dev_data->commandBufferMap.find(cb);
1638     if (it == dev_data->commandBufferMap.end()) {
1639         return NULL;
1640     }
1641     return it->second;
1642 }
1643
1644 // If a renderpass is active, verify that the given command type is appropriate for current subpass state
1645 bool ValidateCmdSubpassState(const layer_data *dev_data, const GLOBAL_CB_NODE *pCB, const CMD_TYPE cmd_type) {
1646     if (!pCB->activeRenderPass) return false;
1647     bool skip = false;
1648     if (pCB->activeSubpassContents == VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS &&
1649         (cmd_type != CMD_EXECUTECOMMANDS && cmd_type != CMD_NEXTSUBPASS && cmd_type != CMD_ENDRENDERPASS)) {
1650         skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
1651                         HandleToUint64(pCB->commandBuffer), DRAWSTATE_INVALID_COMMAND_BUFFER,
1652                         "Commands cannot be called in a subpass using secondary command buffers.");
1653     } else if (pCB->activeSubpassContents == VK_SUBPASS_CONTENTS_INLINE && cmd_type == CMD_EXECUTECOMMANDS) {
1654         skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
1655                         HandleToUint64(pCB->commandBuffer), DRAWSTATE_INVALID_COMMAND_BUFFER,
1656                         "vkCmdExecuteCommands() cannot be called in a subpass using inline commands.");
1657     }
1658     return skip;
1659 }
1660
1661 bool ValidateCmdQueueFlags(layer_data *dev_data, const GLOBAL_CB_NODE *cb_node, const char *caller_name,
1662                            VkQueueFlags required_flags, UNIQUE_VALIDATION_ERROR_CODE error_code) {
1663     auto pool = GetCommandPoolNode(dev_data, cb_node->createInfo.commandPool);
1664     if (pool) {
1665         VkQueueFlags queue_flags = dev_data->phys_dev_properties.queue_family_properties[pool->queueFamilyIndex].queueFlags;
1666         if (!(required_flags & queue_flags)) {
1667             string required_flags_string;
1668             for (auto flag : {VK_QUEUE_TRANSFER_BIT, VK_QUEUE_GRAPHICS_BIT, VK_QUEUE_COMPUTE_BIT}) {
1669                 if (flag & required_flags) {
1670                     if (required_flags_string.size()) {
1671                         required_flags_string += " or ";
1672                     }
1673                     required_flags_string += string_VkQueueFlagBits(flag);
1674                 }
1675             }
1676             return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
1677                            HandleToUint64(cb_node->commandBuffer), error_code,
1678                            "Cannot call %s on a command buffer allocated from a pool without %s capabilities..", caller_name,
1679                            required_flags_string.c_str());
1680         }
1681     }
1682     return false;
1683 }
1684
1685 static char const *GetCauseStr(VK_OBJECT obj) {
1686     if (obj.type == kVulkanObjectTypeDescriptorSet) return "destroyed or updated";
1687     if (obj.type == kVulkanObjectTypeCommandBuffer) return "destroyed or rerecorded";
1688     return "destroyed";
1689 }
1690
1691 static bool ReportInvalidCommandBuffer(layer_data *dev_data, const GLOBAL_CB_NODE *cb_state, const char *call_source) {
1692     bool skip = false;
1693     for (auto obj : cb_state->broken_bindings) {
1694         const char *type_str = object_string[obj.type];
1695         const char *cause_str = GetCauseStr(obj);
1696         skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
1697                         HandleToUint64(cb_state->commandBuffer), DRAWSTATE_INVALID_COMMAND_BUFFER,
1698                         "You are adding %s to command buffer 0x%" PRIx64 " that is invalid because bound %s 0x%" PRIx64 " was %s.",
1699                         call_source, HandleToUint64(cb_state->commandBuffer), type_str, obj.handle, cause_str);
1700     }
1701     return skip;
1702 }
1703
1704 // 'commandBuffer must be in the recording state' valid usage error code for each command
1705 // Note: grepping for ^^^^^^^^^ in vk_validation_database is easily massaged into the following list
1706 // Note: C++11 doesn't automatically devolve enum types to the underlying type for hash traits purposes (fixed in C++14)
1707 using CmdTypeHashType = std::underlying_type<CMD_TYPE>::type;
1708 static const std::unordered_map<CmdTypeHashType, UNIQUE_VALIDATION_ERROR_CODE> must_be_recording_map = {
1709     {CMD_NONE, VALIDATION_ERROR_UNDEFINED},  // UNMATCHED
1710     {CMD_BEGINQUERY, VALIDATION_ERROR_17802413},
1711     {CMD_BEGINRENDERPASS, VALIDATION_ERROR_17a02413},
1712     {CMD_BINDDESCRIPTORSETS, VALIDATION_ERROR_17c02413},
1713     {CMD_BINDINDEXBUFFER, VALIDATION_ERROR_17e02413},
1714     {CMD_BINDPIPELINE, VALIDATION_ERROR_18002413},
1715     {CMD_BINDVERTEXBUFFERS, VALIDATION_ERROR_18202413},
1716     {CMD_BLITIMAGE, VALIDATION_ERROR_18402413},
1717     {CMD_CLEARATTACHMENTS, VALIDATION_ERROR_18602413},
1718     {CMD_CLEARCOLORIMAGE, VALIDATION_ERROR_18802413},
1719     {CMD_CLEARDEPTHSTENCILIMAGE, VALIDATION_ERROR_18a02413},
1720     {CMD_COPYBUFFER, VALIDATION_ERROR_18c02413},
1721     {CMD_COPYBUFFERTOIMAGE, VALIDATION_ERROR_18e02413},
1722     {CMD_COPYIMAGE, VALIDATION_ERROR_19002413},
1723     {CMD_COPYIMAGETOBUFFER, VALIDATION_ERROR_19202413},
1724     {CMD_COPYQUERYPOOLRESULTS, VALIDATION_ERROR_19402413},
1725     {CMD_DEBUGMARKERBEGINEXT, VALIDATION_ERROR_19602413},
1726     {CMD_DEBUGMARKERENDEXT, VALIDATION_ERROR_19802413},
1727     {CMD_DEBUGMARKERINSERTEXT, VALIDATION_ERROR_19a02413},
1728     {CMD_DISPATCH, VALIDATION_ERROR_19c02413},
1729     // Exclude KHX (if not already present) { CMD_DISPATCHBASEKHX, VALIDATION_ERROR_19e02413 },
1730     {CMD_DISPATCHINDIRECT, VALIDATION_ERROR_1a002413},
1731     {CMD_DRAW, VALIDATION_ERROR_1a202413},
1732     {CMD_DRAWINDEXED, VALIDATION_ERROR_1a402413},
1733     {CMD_DRAWINDEXEDINDIRECT, VALIDATION_ERROR_1a602413},
1734     // Exclude vendor ext (if not already present) { CMD_DRAWINDEXEDINDIRECTCOUNTAMD, VALIDATION_ERROR_1a802413 },
1735     {CMD_DRAWINDIRECT, VALIDATION_ERROR_1aa02413},
1736     // Exclude vendor ext (if not already present) { CMD_DRAWINDIRECTCOUNTAMD, VALIDATION_ERROR_1ac02413 },
1737     {CMD_ENDCOMMANDBUFFER, VALIDATION_ERROR_27400076},
1738     {CMD_ENDQUERY, VALIDATION_ERROR_1ae02413},
1739     {CMD_ENDRENDERPASS, VALIDATION_ERROR_1b002413},
1740     {CMD_EXECUTECOMMANDS, VALIDATION_ERROR_1b202413},
1741     {CMD_FILLBUFFER, VALIDATION_ERROR_1b402413},
1742     {CMD_NEXTSUBPASS, VALIDATION_ERROR_1b602413},
1743     {CMD_PIPELINEBARRIER, VALIDATION_ERROR_1b802413},
1744     // Exclude vendor ext (if not already present) { CMD_PROCESSCOMMANDSNVX, VALIDATION_ERROR_1ba02413 },
1745     {CMD_PUSHCONSTANTS, VALIDATION_ERROR_1bc02413},
1746     {CMD_PUSHDESCRIPTORSETKHR, VALIDATION_ERROR_1be02413},
1747     {CMD_PUSHDESCRIPTORSETWITHTEMPLATEKHR, VALIDATION_ERROR_1c002413},
1748     // Exclude vendor ext (if not already present) { CMD_RESERVESPACEFORCOMMANDSNVX, VALIDATION_ERROR_1c202413 },
1749     {CMD_RESETEVENT, VALIDATION_ERROR_1c402413},
1750     {CMD_RESETQUERYPOOL, VALIDATION_ERROR_1c602413},
1751     {CMD_RESOLVEIMAGE, VALIDATION_ERROR_1c802413},
1752     {CMD_SETBLENDCONSTANTS, VALIDATION_ERROR_1ca02413},
1753     {CMD_SETDEPTHBIAS, VALIDATION_ERROR_1cc02413},
1754     {CMD_SETDEPTHBOUNDS, VALIDATION_ERROR_1ce02413},
1755     // Exclude KHX (if not already present) { CMD_SETDEVICEMASKKHX, VALIDATION_ERROR_1d002413 },
1756     {CMD_SETDISCARDRECTANGLEEXT, VALIDATION_ERROR_1d202413},
1757     {CMD_SETEVENT, VALIDATION_ERROR_1d402413},
1758     {CMD_SETLINEWIDTH, VALIDATION_ERROR_1d602413},
1759     {CMD_SETSAMPLELOCATIONSEXT, VALIDATION_ERROR_3e202413},
1760     {CMD_SETSCISSOR, VALIDATION_ERROR_1d802413},
1761     {CMD_SETSTENCILCOMPAREMASK, VALIDATION_ERROR_1da02413},
1762     {CMD_SETSTENCILREFERENCE, VALIDATION_ERROR_1dc02413},
1763     {CMD_SETSTENCILWRITEMASK, VALIDATION_ERROR_1de02413},
1764     {CMD_SETVIEWPORT, VALIDATION_ERROR_1e002413},
1765     // Exclude vendor ext (if not already present) { CMD_SETVIEWPORTWSCALINGNV, VALIDATION_ERROR_1e202413 },
1766     {CMD_UPDATEBUFFER, VALIDATION_ERROR_1e402413},
1767     {CMD_WAITEVENTS, VALIDATION_ERROR_1e602413},
1768     {CMD_WRITETIMESTAMP, VALIDATION_ERROR_1e802413},
1769 };
1770
1771 // Validate the given command being added to the specified cmd buffer, flagging errors if CB is not in the recording state or if
1772 // there's an issue with the Cmd ordering
1773 bool ValidateCmd(layer_data *dev_data, const GLOBAL_CB_NODE *cb_state, const CMD_TYPE cmd, const char *caller_name) {
1774     switch (cb_state->state) {
1775         case CB_RECORDING:
1776             return ValidateCmdSubpassState(dev_data, cb_state, cmd);
1777
1778         case CB_INVALID_COMPLETE:
1779         case CB_INVALID_INCOMPLETE:
1780             return ReportInvalidCommandBuffer(dev_data, cb_state, caller_name);
1781
1782         default:
1783             auto error_it = must_be_recording_map.find(cmd);
1784             // This assert lets us know that a vkCmd.* entrypoint has been added without enabling it in the map
1785             assert(error_it != must_be_recording_map.cend());
1786             if (error_it == must_be_recording_map.cend()) {
1787                 error_it = must_be_recording_map.find(CMD_NONE);  // But we'll handle the asserting case, in case of a test gap
1788             }
1789             const auto error = error_it->second;
1790             return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
1791                            HandleToUint64(cb_state->commandBuffer), error,
1792                            "You must call vkBeginCommandBuffer() before this call to %s.", caller_name);
1793     }
1794 }
1795
1796 // For given object struct return a ptr of BASE_NODE type for its wrapping struct
1797 BASE_NODE *GetStateStructPtrFromObject(layer_data *dev_data, VK_OBJECT object_struct) {
1798     BASE_NODE *base_ptr = nullptr;
1799     switch (object_struct.type) {
1800         case kVulkanObjectTypeDescriptorSet: {
1801             base_ptr = GetSetNode(dev_data, reinterpret_cast<VkDescriptorSet &>(object_struct.handle));
1802             break;
1803         }
1804         case kVulkanObjectTypeSampler: {
1805             base_ptr = GetSamplerState(dev_data, reinterpret_cast<VkSampler &>(object_struct.handle));
1806             break;
1807         }
1808         case kVulkanObjectTypeQueryPool: {
1809             base_ptr = GetQueryPoolNode(dev_data, reinterpret_cast<VkQueryPool &>(object_struct.handle));
1810             break;
1811         }
1812         case kVulkanObjectTypePipeline: {
1813             base_ptr = getPipelineState(dev_data, reinterpret_cast<VkPipeline &>(object_struct.handle));
1814             break;
1815         }
1816         case kVulkanObjectTypeBuffer: {
1817             base_ptr = GetBufferState(dev_data, reinterpret_cast<VkBuffer &>(object_struct.handle));
1818             break;
1819         }
1820         case kVulkanObjectTypeBufferView: {
1821             base_ptr = GetBufferViewState(dev_data, reinterpret_cast<VkBufferView &>(object_struct.handle));
1822             break;
1823         }
1824         case kVulkanObjectTypeImage: {
1825             base_ptr = GetImageState(dev_data, reinterpret_cast<VkImage &>(object_struct.handle));
1826             break;
1827         }
1828         case kVulkanObjectTypeImageView: {
1829             base_ptr = GetImageViewState(dev_data, reinterpret_cast<VkImageView &>(object_struct.handle));
1830             break;
1831         }
1832         case kVulkanObjectTypeEvent: {
1833             base_ptr = GetEventNode(dev_data, reinterpret_cast<VkEvent &>(object_struct.handle));
1834             break;
1835         }
1836         case kVulkanObjectTypeDescriptorPool: {
1837             base_ptr = GetDescriptorPoolState(dev_data, reinterpret_cast<VkDescriptorPool &>(object_struct.handle));
1838             break;
1839         }
1840         case kVulkanObjectTypeCommandPool: {
1841             base_ptr = GetCommandPoolNode(dev_data, reinterpret_cast<VkCommandPool &>(object_struct.handle));
1842             break;
1843         }
1844         case kVulkanObjectTypeFramebuffer: {
1845             base_ptr = GetFramebufferState(dev_data, reinterpret_cast<VkFramebuffer &>(object_struct.handle));
1846             break;
1847         }
1848         case kVulkanObjectTypeRenderPass: {
1849             base_ptr = GetRenderPassState(dev_data, reinterpret_cast<VkRenderPass &>(object_struct.handle));
1850             break;
1851         }
1852         case kVulkanObjectTypeDeviceMemory: {
1853             base_ptr = GetMemObjInfo(dev_data, reinterpret_cast<VkDeviceMemory &>(object_struct.handle));
1854             break;
1855         }
1856         default:
1857             // TODO : Any other objects to be handled here?
1858             assert(0);
1859             break;
1860     }
1861     return base_ptr;
1862 }
1863
1864 // Tie the VK_OBJECT to the cmd buffer which includes:
1865 //  Add object_binding to cmd buffer
1866 //  Add cb_binding to object
1867 static void addCommandBufferBinding(std::unordered_set<GLOBAL_CB_NODE *> *cb_bindings, VK_OBJECT obj, GLOBAL_CB_NODE *cb_node) {
1868     cb_bindings->insert(cb_node);
1869     cb_node->object_bindings.insert(obj);
1870 }
1871 // For a given object, if cb_node is in that objects cb_bindings, remove cb_node
1872 static void removeCommandBufferBinding(layer_data *dev_data, VK_OBJECT const *object, GLOBAL_CB_NODE *cb_node) {
1873     BASE_NODE *base_obj = GetStateStructPtrFromObject(dev_data, *object);
1874     if (base_obj) base_obj->cb_bindings.erase(cb_node);
1875 }
1876 // Reset the command buffer state
1877 //  Maintain the createInfo and set state to CB_NEW, but clear all other state
1878 static void ResetCommandBufferState(layer_data *dev_data, const VkCommandBuffer cb) {
1879     GLOBAL_CB_NODE *pCB = dev_data->commandBufferMap[cb];
1880     if (pCB) {
1881         pCB->in_use.store(0);
1882         // Reset CB state (note that createInfo is not cleared)
1883         pCB->commandBuffer = cb;
1884         memset(&pCB->beginInfo, 0, sizeof(VkCommandBufferBeginInfo));
1885         memset(&pCB->inheritanceInfo, 0, sizeof(VkCommandBufferInheritanceInfo));
1886         pCB->hasDrawCmd = false;
1887         pCB->state = CB_NEW;
1888         pCB->submitCount = 0;
1889         pCB->image_layout_change_count = 1;  // Start at 1. 0 is insert value for validation cache versions, s.t. new == dirty
1890         pCB->status = 0;
1891         pCB->static_status = 0;
1892         pCB->viewportMask = 0;
1893         pCB->scissorMask = 0;
1894
1895         for (uint32_t i = 0; i < VK_PIPELINE_BIND_POINT_RANGE_SIZE; ++i) {
1896             pCB->lastBound[i].reset();
1897         }
1898
1899         memset(&pCB->activeRenderPassBeginInfo, 0, sizeof(pCB->activeRenderPassBeginInfo));
1900         pCB->activeRenderPass = nullptr;
1901         pCB->activeSubpassContents = VK_SUBPASS_CONTENTS_INLINE;
1902         pCB->activeSubpass = 0;
1903         pCB->broken_bindings.clear();
1904         pCB->waitedEvents.clear();
1905         pCB->events.clear();
1906         pCB->writeEventsBeforeWait.clear();
1907         pCB->waitedEventsBeforeQueryReset.clear();
1908         pCB->queryToStateMap.clear();
1909         pCB->activeQueries.clear();
1910         pCB->startedQueries.clear();
1911         pCB->imageLayoutMap.clear();
1912         pCB->eventToStageMap.clear();
1913         pCB->drawData.clear();
1914         pCB->currentDrawData.buffers.clear();
1915         pCB->vertex_buffer_used = false;
1916         pCB->primaryCommandBuffer = VK_NULL_HANDLE;
1917         // If secondary, invalidate any primary command buffer that may call us.
1918         if (pCB->createInfo.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY) {
1919             invalidateCommandBuffers(dev_data, pCB->linkedCommandBuffers, {HandleToUint64(cb), kVulkanObjectTypeCommandBuffer});
1920         }
1921
1922         // Remove reverse command buffer links.
1923         for (auto pSubCB : pCB->linkedCommandBuffers) {
1924             pSubCB->linkedCommandBuffers.erase(pCB);
1925         }
1926         pCB->linkedCommandBuffers.clear();
1927         pCB->updateImages.clear();
1928         pCB->updateBuffers.clear();
1929         clear_cmd_buf_and_mem_references(dev_data, pCB);
1930         pCB->queue_submit_functions.clear();
1931         pCB->cmd_execute_commands_functions.clear();
1932         pCB->eventUpdates.clear();
1933         pCB->queryUpdates.clear();
1934
1935         // Remove object bindings
1936         for (auto obj : pCB->object_bindings) {
1937             removeCommandBufferBinding(dev_data, &obj, pCB);
1938         }
1939         pCB->object_bindings.clear();
1940         // Remove this cmdBuffer's reference from each FrameBuffer's CB ref list
1941         for (auto framebuffer : pCB->framebuffers) {
1942             auto fb_state = GetFramebufferState(dev_data, framebuffer);
1943             if (fb_state) fb_state->cb_bindings.erase(pCB);
1944         }
1945         pCB->framebuffers.clear();
1946         pCB->activeFramebuffer = VK_NULL_HANDLE;
1947         memset(&pCB->index_buffer_binding, 0, sizeof(pCB->index_buffer_binding));
1948     }
1949 }
1950
1951 CBStatusFlags MakeStaticStateMask(VkPipelineDynamicStateCreateInfo const *ds) {
1952     // initially assume everything is static state
1953     CBStatusFlags flags = CBSTATUS_ALL_STATE_SET;
1954
1955     if (ds) {
1956         for (uint32_t i = 0; i < ds->dynamicStateCount; i++) {
1957             switch (ds->pDynamicStates[i]) {
1958                 case VK_DYNAMIC_STATE_LINE_WIDTH:
1959                     flags &= ~CBSTATUS_LINE_WIDTH_SET;
1960                     break;
1961                 case VK_DYNAMIC_STATE_DEPTH_BIAS:
1962                     flags &= ~CBSTATUS_DEPTH_BIAS_SET;
1963                     break;
1964                 case VK_DYNAMIC_STATE_BLEND_CONSTANTS:
1965                     flags &= ~CBSTATUS_BLEND_CONSTANTS_SET;
1966                     break;
1967                 case VK_DYNAMIC_STATE_DEPTH_BOUNDS:
1968                     flags &= ~CBSTATUS_DEPTH_BOUNDS_SET;
1969                     break;
1970                 case VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK:
1971                     flags &= ~CBSTATUS_STENCIL_READ_MASK_SET;
1972                     break;
1973                 case VK_DYNAMIC_STATE_STENCIL_WRITE_MASK:
1974                     flags &= ~CBSTATUS_STENCIL_WRITE_MASK_SET;
1975                     break;
1976                 case VK_DYNAMIC_STATE_STENCIL_REFERENCE:
1977                     flags &= ~CBSTATUS_STENCIL_REFERENCE_SET;
1978                     break;
1979                 case VK_DYNAMIC_STATE_SCISSOR:
1980                     flags &= ~CBSTATUS_SCISSOR_SET;
1981                     break;
1982                 case VK_DYNAMIC_STATE_VIEWPORT:
1983                     flags &= ~CBSTATUS_VIEWPORT_SET;
1984                     break;
1985                 default:
1986                     break;
1987             }
1988         }
1989     }
1990
1991     return flags;
1992 }
1993
1994 // Flags validation error if the associated call is made inside a render pass. The apiName routine should ONLY be called outside a
1995 // render pass.
1996 bool insideRenderPass(const layer_data *dev_data, const GLOBAL_CB_NODE *pCB, const char *apiName,
1997                       UNIQUE_VALIDATION_ERROR_CODE msgCode) {
1998     bool inside = false;
1999     if (pCB->activeRenderPass) {
2000         inside = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
2001                          HandleToUint64(pCB->commandBuffer), msgCode,
2002                          "%s: It is invalid to issue this call inside an active render pass (0x%" PRIx64 ").", apiName,
2003                          HandleToUint64(pCB->activeRenderPass->renderPass));
2004     }
2005     return inside;
2006 }
2007
2008 // Flags validation error if the associated call is made outside a render pass. The apiName
2009 // routine should ONLY be called inside a render pass.
2010 bool outsideRenderPass(const layer_data *dev_data, GLOBAL_CB_NODE *pCB, const char *apiName, UNIQUE_VALIDATION_ERROR_CODE msgCode) {
2011     bool outside = false;
2012     if (((pCB->createInfo.level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) && (!pCB->activeRenderPass)) ||
2013         ((pCB->createInfo.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY) && (!pCB->activeRenderPass) &&
2014          !(pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT))) {
2015         outside = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
2016                           HandleToUint64(pCB->commandBuffer), msgCode, "%s: This call must be issued inside an active render pass.",
2017                           apiName);
2018     }
2019     return outside;
2020 }
2021
2022 static void init_core_validation(instance_layer_data *instance_data, const VkAllocationCallbacks *pAllocator) {
2023     layer_debug_report_actions(instance_data->report_data, instance_data->logging_callback, pAllocator, "lunarg_core_validation");
2024     layer_debug_messenger_actions(instance_data->report_data, instance_data->logging_messenger, pAllocator,
2025                                   "lunarg_core_validation");
2026 }
2027
2028 // For the given ValidationCheck enum, set all relevant instance disabled flags to true
2029 void SetDisabledFlags(instance_layer_data *instance_data, const VkValidationFlagsEXT *val_flags_struct) {
2030     for (uint32_t i = 0; i < val_flags_struct->disabledValidationCheckCount; ++i) {
2031         switch (val_flags_struct->pDisabledValidationChecks[i]) {
2032             case VK_VALIDATION_CHECK_SHADERS_EXT:
2033                 instance_data->disabled.shader_validation = true;
2034                 break;
2035             case VK_VALIDATION_CHECK_ALL_EXT:
2036                 // Set all disabled flags to true
2037                 instance_data->disabled.SetAll(true);
2038                 break;
2039             default:
2040                 break;
2041         }
2042     }
2043 }
2044
2045 VKAPI_ATTR VkResult VKAPI_CALL CreateInstance(const VkInstanceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator,
2046                                               VkInstance *pInstance) {
2047     VkLayerInstanceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO);
2048
2049     assert(chain_info->u.pLayerInfo);
2050     PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr;
2051     PFN_vkCreateInstance fpCreateInstance = (PFN_vkCreateInstance)fpGetInstanceProcAddr(NULL, "vkCreateInstance");
2052     if (fpCreateInstance == NULL) return VK_ERROR_INITIALIZATION_FAILED;
2053
2054     // Advance the link info for the next element on the chain
2055     chain_info->u.pLayerInfo = chain_info->u.pLayerInfo->pNext;
2056
2057     VkResult result = fpCreateInstance(pCreateInfo, pAllocator, pInstance);
2058     if (result != VK_SUCCESS) return result;
2059
2060     instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(*pInstance), instance_layer_data_map);
2061     instance_data->instance = *pInstance;
2062     layer_init_instance_dispatch_table(*pInstance, &instance_data->dispatch_table, fpGetInstanceProcAddr);
2063     instance_data->report_data = debug_utils_create_instance(
2064         &instance_data->dispatch_table, *pInstance, pCreateInfo->enabledExtensionCount, pCreateInfo->ppEnabledExtensionNames);
2065
2066     instance_data->api_version = instance_data->extensions.InitFromInstanceCreateInfo(
2067         (pCreateInfo->pApplicationInfo ? pCreateInfo->pApplicationInfo->apiVersion : VK_API_VERSION_1_0), pCreateInfo);
2068     init_core_validation(instance_data, pAllocator);
2069
2070     ValidateLayerOrdering(*pCreateInfo);
2071     // Parse any pNext chains
2072     const auto *validation_flags_ext = lvl_find_in_chain<VkValidationFlagsEXT>(pCreateInfo->pNext);
2073     if (validation_flags_ext) {
2074         SetDisabledFlags(instance_data, validation_flags_ext);
2075     }
2076
2077     return result;
2078 }
2079
2080 // Hook DestroyInstance to remove tableInstanceMap entry
2081 VKAPI_ATTR void VKAPI_CALL DestroyInstance(VkInstance instance, const VkAllocationCallbacks *pAllocator) {
2082     // TODOSC : Shouldn't need any customization here
2083     dispatch_key key = get_dispatch_key(instance);
2084     // TBD: Need any locking this early, in case this function is called at the
2085     // same time by more than one thread?
2086     instance_layer_data *instance_data = GetLayerDataPtr(key, instance_layer_data_map);
2087     instance_data->dispatch_table.DestroyInstance(instance, pAllocator);
2088
2089     lock_guard_t lock(global_lock);
2090     // Clean up logging callback, if any
2091     while (instance_data->logging_messenger.size() > 0) {
2092         VkDebugUtilsMessengerEXT messenger = instance_data->logging_messenger.back();
2093         layer_destroy_messenger_callback(instance_data->report_data, messenger, pAllocator);
2094         instance_data->logging_messenger.pop_back();
2095     }
2096     while (instance_data->logging_callback.size() > 0) {
2097         VkDebugReportCallbackEXT callback = instance_data->logging_callback.back();
2098         layer_destroy_report_callback(instance_data->report_data, callback, pAllocator);
2099         instance_data->logging_callback.pop_back();
2100     }
2101
2102     layer_debug_utils_destroy_instance(instance_data->report_data);
2103     FreeLayerDataPtr(key, instance_layer_data_map);
2104 }
2105
2106 static bool ValidatePhysicalDeviceQueueFamily(instance_layer_data *instance_data, const PHYSICAL_DEVICE_STATE *pd_state,
2107                                               uint32_t requested_queue_family, int32_t err_code, const char *cmd_name,
2108                                               const char *queue_family_var_name) {
2109     bool skip = false;
2110
2111     const char *conditional_ext_cmd = instance_data->extensions.vk_khr_get_physical_device_properties_2
2112                                           ? "or vkGetPhysicalDeviceQueueFamilyProperties2[KHR]"
2113                                           : "";
2114
2115     std::string count_note = (UNCALLED == pd_state->vkGetPhysicalDeviceQueueFamilyPropertiesState)
2116                                  ? "the pQueueFamilyPropertyCount was never obtained"
2117                                  : "i.e. is not less than " + std::to_string(pd_state->queue_family_count);
2118
2119     if (requested_queue_family >= pd_state->queue_family_count) {
2120         skip |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
2121                         HandleToUint64(pd_state->phys_device), err_code,
2122                         "%s: %s (= %" PRIu32
2123                         ") is not less than any previously obtained pQueueFamilyPropertyCount from "
2124                         "vkGetPhysicalDeviceQueueFamilyProperties%s (%s).",
2125                         cmd_name, queue_family_var_name, requested_queue_family, conditional_ext_cmd, count_note.c_str());
2126     }
2127     return skip;
2128 }
2129
2130 // Verify VkDeviceQueueCreateInfos
2131 static bool ValidateDeviceQueueCreateInfos(instance_layer_data *instance_data, const PHYSICAL_DEVICE_STATE *pd_state,
2132                                            uint32_t info_count, const VkDeviceQueueCreateInfo *infos) {
2133     bool skip = false;
2134
2135     for (uint32_t i = 0; i < info_count; ++i) {
2136         const auto requested_queue_family = infos[i].queueFamilyIndex;
2137
2138         // Verify that requested queue family is known to be valid at this point in time
2139         std::string queue_family_var_name = "pCreateInfo->pQueueCreateInfos[" + std::to_string(i) + "].queueFamilyIndex";
2140         skip |= ValidatePhysicalDeviceQueueFamily(instance_data, pd_state, requested_queue_family, VALIDATION_ERROR_06c002fa,
2141                                                   "vkCreateDevice", queue_family_var_name.c_str());
2142
2143         // Verify that requested  queue count of queue family is known to be valid at this point in time
2144         if (requested_queue_family < pd_state->queue_family_count) {
2145             const auto requested_queue_count = infos[i].queueCount;
2146             const auto queue_family_props_count = pd_state->queue_family_properties.size();
2147             const bool queue_family_has_props = requested_queue_family < queue_family_props_count;
2148             const char *conditional_ext_cmd = instance_data->extensions.vk_khr_get_physical_device_properties_2
2149                                                   ? "or vkGetPhysicalDeviceQueueFamilyProperties2[KHR]"
2150                                                   : "";
2151             std::string count_note =
2152                 !queue_family_has_props
2153                     ? "the pQueueFamilyProperties[" + std::to_string(requested_queue_family) + "] was never obtained"
2154                     : "i.e. is not less than or equal to " +
2155                           std::to_string(pd_state->queue_family_properties[requested_queue_family].queueCount);
2156
2157             if (!queue_family_has_props ||
2158                 requested_queue_count > pd_state->queue_family_properties[requested_queue_family].queueCount) {
2159                 skip |= log_msg(
2160                     instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
2161                     HandleToUint64(pd_state->phys_device), VALIDATION_ERROR_06c002fc,
2162                     "vkCreateDevice: pCreateInfo->pQueueCreateInfos[%" PRIu32 "].queueCount (=%" PRIu32
2163                     ") is not less than or equal to available queue count for this pCreateInfo->pQueueCreateInfos[%" PRIu32
2164                     "].queueFamilyIndex} (=%" PRIu32 ") obtained previously from vkGetPhysicalDeviceQueueFamilyProperties%s (%s).",
2165                     i, requested_queue_count, i, requested_queue_family, conditional_ext_cmd, count_note.c_str());
2166             }
2167         }
2168     }
2169
2170     return skip;
2171 }
2172
2173 // Verify that features have been queried and that they are available
2174 static bool ValidateRequestedFeatures(instance_layer_data *instance_data, const PHYSICAL_DEVICE_STATE *pd_state,
2175                                       const VkPhysicalDeviceFeatures *requested_features) {
2176     bool skip = false;
2177
2178     const VkBool32 *actual = reinterpret_cast<const VkBool32 *>(&pd_state->features);
2179     const VkBool32 *requested = reinterpret_cast<const VkBool32 *>(requested_features);
2180     // TODO : This is a nice, compact way to loop through struct, but a bad way to report issues
2181     //  Need to provide the struct member name with the issue. To do that seems like we'll
2182     //  have to loop through each struct member which should be done w/ codegen to keep in synch.
2183     uint32_t errors = 0;
2184     uint32_t total_bools = sizeof(VkPhysicalDeviceFeatures) / sizeof(VkBool32);
2185     for (uint32_t i = 0; i < total_bools; i++) {
2186         if (requested[i] > actual[i]) {
2187             skip |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2188                             VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, DEVLIMITS_INVALID_FEATURE_REQUESTED,
2189                             "While calling vkCreateDevice(), requesting feature '%s' in VkPhysicalDeviceFeatures struct, which is "
2190                             "not available on this device.",
2191                             GetPhysDevFeatureString(i));
2192             errors++;
2193         }
2194     }
2195     if (errors && (UNCALLED == pd_state->vkGetPhysicalDeviceFeaturesState)) {
2196         // If user didn't request features, notify them that they should
2197         // TODO: Verify this against the spec. I believe this is an invalid use of the API and should return an error
2198         skip |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
2199                         0, DEVLIMITS_INVALID_FEATURE_REQUESTED,
2200                         "You requested features that are unavailable on this device. You should first query feature availability "
2201                         "by calling vkGetPhysicalDeviceFeatures().");
2202     }
2203     return skip;
2204 }
2205
2206 VKAPI_ATTR VkResult VKAPI_CALL CreateDevice(VkPhysicalDevice gpu, const VkDeviceCreateInfo *pCreateInfo,
2207                                             const VkAllocationCallbacks *pAllocator, VkDevice *pDevice) {
2208     bool skip = false;
2209     instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(gpu), instance_layer_data_map);
2210
2211     unique_lock_t lock(global_lock);
2212     auto pd_state = GetPhysicalDeviceState(instance_data, gpu);
2213
2214     // TODO: object_tracker should perhaps do this instead
2215     //       and it does not seem to currently work anyway -- the loader just crashes before this point
2216     if (!GetPhysicalDeviceState(instance_data, gpu)) {
2217         skip |=
2218             log_msg(instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0,
2219                     DEVLIMITS_MUST_QUERY_COUNT, "Invalid call to vkCreateDevice() w/o first calling vkEnumeratePhysicalDevices().");
2220     }
2221
2222     // Check that any requested features are available
2223     // The enabled features can come from either pEnabledFeatures, or from the pNext chain
2224     const VkPhysicalDeviceFeatures *enabled_features_found = pCreateInfo->pEnabledFeatures;
2225     if (nullptr == enabled_features_found) {
2226         const auto *features2 = lvl_find_in_chain<VkPhysicalDeviceFeatures2KHR>(pCreateInfo->pNext);
2227         if (features2) {
2228             enabled_features_found = &(features2->features);
2229         }
2230     }
2231
2232     if (enabled_features_found) {
2233         skip |= ValidateRequestedFeatures(instance_data, pd_state, enabled_features_found);
2234     }
2235
2236     skip |=
2237         ValidateDeviceQueueCreateInfos(instance_data, pd_state, pCreateInfo->queueCreateInfoCount, pCreateInfo->pQueueCreateInfos);
2238
2239     if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
2240
2241     VkLayerDeviceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO);
2242
2243     assert(chain_info->u.pLayerInfo);
2244     PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr;
2245     PFN_vkGetDeviceProcAddr fpGetDeviceProcAddr = chain_info->u.pLayerInfo->pfnNextGetDeviceProcAddr;
2246     PFN_vkCreateDevice fpCreateDevice = (PFN_vkCreateDevice)fpGetInstanceProcAddr(instance_data->instance, "vkCreateDevice");
2247     if (fpCreateDevice == NULL) {
2248         return VK_ERROR_INITIALIZATION_FAILED;
2249     }
2250
2251     // Advance the link info for the next element on the chain
2252     chain_info->u.pLayerInfo = chain_info->u.pLayerInfo->pNext;
2253
2254     lock.unlock();
2255
2256     VkResult result = fpCreateDevice(gpu, pCreateInfo, pAllocator, pDevice);
2257     if (result != VK_SUCCESS) {
2258         return result;
2259     }
2260
2261     lock.lock();
2262     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(*pDevice), layer_data_map);
2263
2264     device_data->instance_data = instance_data;
2265     // Setup device dispatch table
2266     layer_init_device_dispatch_table(*pDevice, &device_data->dispatch_table, fpGetDeviceProcAddr);
2267     device_data->device = *pDevice;
2268     // Save PhysicalDevice handle
2269     device_data->physical_device = gpu;
2270
2271     device_data->report_data = layer_debug_utils_create_device(instance_data->report_data, *pDevice);
2272
2273     // Get physical device limits for this device
2274     instance_data->dispatch_table.GetPhysicalDeviceProperties(gpu, &(device_data->phys_dev_properties.properties));
2275
2276     device_data->api_version = device_data->extensions.InitFromDeviceCreateInfo(
2277         &instance_data->extensions, device_data->phys_dev_properties.properties.apiVersion, pCreateInfo);
2278
2279     uint32_t count;
2280     instance_data->dispatch_table.GetPhysicalDeviceQueueFamilyProperties(gpu, &count, nullptr);
2281     device_data->phys_dev_properties.queue_family_properties.resize(count);
2282     instance_data->dispatch_table.GetPhysicalDeviceQueueFamilyProperties(
2283         gpu, &count, &device_data->phys_dev_properties.queue_family_properties[0]);
2284     // TODO: device limits should make sure these are compatible
2285     if (enabled_features_found) {
2286         device_data->enabled_features = *enabled_features_found;
2287     } else {
2288         memset(&device_data->enabled_features, 0, sizeof(VkPhysicalDeviceFeatures));
2289     }
2290     // Store physical device properties and physical device mem limits into device layer_data structs
2291     instance_data->dispatch_table.GetPhysicalDeviceMemoryProperties(gpu, &device_data->phys_dev_mem_props);
2292     instance_data->dispatch_table.GetPhysicalDeviceProperties(gpu, &device_data->phys_dev_props);
2293
2294     if (device_data->extensions.vk_khr_push_descriptor) {
2295         // Get the needed push_descriptor limits
2296         auto push_descriptor_prop = lvl_init_struct<VkPhysicalDevicePushDescriptorPropertiesKHR>();
2297         auto prop2 = lvl_init_struct<VkPhysicalDeviceProperties2KHR>(&push_descriptor_prop);
2298         instance_data->dispatch_table.GetPhysicalDeviceProperties2KHR(gpu, &prop2);
2299         device_data->phys_dev_ext_props.max_push_descriptors = push_descriptor_prop.maxPushDescriptors;
2300     }
2301     if (device_data->extensions.vk_ext_descriptor_indexing) {
2302         // Get the needed descriptor_indexing limits
2303         auto descriptor_indexing_props = lvl_init_struct<VkPhysicalDeviceDescriptorIndexingPropertiesEXT>();
2304         auto prop2 = lvl_init_struct<VkPhysicalDeviceProperties2KHR>(&descriptor_indexing_props);
2305         instance_data->dispatch_table.GetPhysicalDeviceProperties2KHR(gpu, &prop2);
2306         device_data->phys_dev_ext_props.descriptor_indexing_props = descriptor_indexing_props;
2307     }
2308
2309     const auto *descriptor_indexing_features = lvl_find_in_chain<VkPhysicalDeviceDescriptorIndexingFeaturesEXT>(pCreateInfo->pNext);
2310     if (descriptor_indexing_features) {
2311         device_data->phys_dev_ext_props.descriptor_indexing_features = *descriptor_indexing_features;
2312     }
2313
2314     lock.unlock();
2315
2316     ValidateLayerOrdering(*pCreateInfo);
2317
2318     return result;
2319 }
2320
2321 // prototype
2322 VKAPI_ATTR void VKAPI_CALL DestroyDevice(VkDevice device, const VkAllocationCallbacks *pAllocator) {
2323     // TODOSC : Shouldn't need any customization here
2324     dispatch_key key = get_dispatch_key(device);
2325     layer_data *dev_data = GetLayerDataPtr(key, layer_data_map);
2326     // Free all the memory
2327     unique_lock_t lock(global_lock);
2328     dev_data->pipelineMap.clear();
2329     dev_data->renderPassMap.clear();
2330     for (auto ii = dev_data->commandBufferMap.begin(); ii != dev_data->commandBufferMap.end(); ++ii) {
2331         delete (*ii).second;
2332     }
2333     dev_data->commandBufferMap.clear();
2334     // This will also delete all sets in the pool & remove them from setMap
2335     deletePools(dev_data);
2336     // All sets should be removed
2337     assert(dev_data->setMap.empty());
2338     dev_data->descriptorSetLayoutMap.clear();
2339     dev_data->imageViewMap.clear();
2340     dev_data->imageMap.clear();
2341     dev_data->imageSubresourceMap.clear();
2342     dev_data->imageLayoutMap.clear();
2343     dev_data->bufferViewMap.clear();
2344     dev_data->bufferMap.clear();
2345     // Queues persist until device is destroyed
2346     dev_data->queueMap.clear();
2347     // Report any memory leaks
2348     layer_debug_utils_destroy_device(device);
2349     lock.unlock();
2350
2351 #if DISPATCH_MAP_DEBUG
2352     fprintf(stderr, "Device: 0x%p, key: 0x%p\n", device, key);
2353 #endif
2354
2355     dev_data->dispatch_table.DestroyDevice(device, pAllocator);
2356     FreeLayerDataPtr(key, layer_data_map);
2357 }
2358
2359 static const VkExtensionProperties instance_extensions[] = {{VK_EXT_DEBUG_REPORT_EXTENSION_NAME, VK_EXT_DEBUG_REPORT_SPEC_VERSION}};
2360
2361 // For given stage mask, if Geometry shader stage is on w/o GS being enabled, report geo_error_id
2362 //   and if Tessellation Control or Evaluation shader stages are on w/o TS being enabled, report tess_error_id
2363 static bool ValidateStageMaskGsTsEnables(layer_data *dev_data, VkPipelineStageFlags stageMask, const char *caller,
2364                                          UNIQUE_VALIDATION_ERROR_CODE geo_error_id, UNIQUE_VALIDATION_ERROR_CODE tess_error_id) {
2365     bool skip = false;
2366     if (!dev_data->enabled_features.geometryShader && (stageMask & VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT)) {
2367         skip |=
2368             log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, geo_error_id,
2369                     "%s call includes a stageMask with VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT bit set when device does not have "
2370                     "geometryShader feature enabled.",
2371                     caller);
2372     }
2373     if (!dev_data->enabled_features.tessellationShader &&
2374         (stageMask & (VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT | VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT))) {
2375         skip |=
2376             log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, tess_error_id,
2377                     "%s call includes a stageMask with VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT and/or "
2378                     "VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT bit(s) set when device does not have "
2379                     "tessellationShader feature enabled.",
2380                     caller);
2381     }
2382     return skip;
2383 }
2384
2385 // Loop through bound objects and increment their in_use counts.
2386 static void IncrementBoundObjects(layer_data *dev_data, GLOBAL_CB_NODE const *cb_node) {
2387     for (auto obj : cb_node->object_bindings) {
2388         auto base_obj = GetStateStructPtrFromObject(dev_data, obj);
2389         if (base_obj) {
2390             base_obj->in_use.fetch_add(1);
2391         }
2392     }
2393 }
2394 // Track which resources are in-flight by atomically incrementing their "in_use" count
2395 static void incrementResources(layer_data *dev_data, GLOBAL_CB_NODE *cb_node) {
2396     cb_node->submitCount++;
2397     cb_node->in_use.fetch_add(1);
2398
2399     // First Increment for all "generic" objects bound to cmd buffer, followed by special-case objects below
2400     IncrementBoundObjects(dev_data, cb_node);
2401     // TODO : We should be able to remove the NULL look-up checks from the code below as long as
2402     //  all the corresponding cases are verified to cause CB_INVALID state and the CB_INVALID state
2403     //  should then be flagged prior to calling this function
2404     for (auto drawDataElement : cb_node->drawData) {
2405         for (auto buffer : drawDataElement.buffers) {
2406             auto buffer_state = GetBufferState(dev_data, buffer);
2407             if (buffer_state) {
2408                 buffer_state->in_use.fetch_add(1);
2409             }
2410         }
2411     }
2412     for (auto event : cb_node->writeEventsBeforeWait) {
2413         auto event_state = GetEventNode(dev_data, event);
2414         if (event_state) event_state->write_in_use++;
2415     }
2416 }
2417
2418 // Note: This function assumes that the global lock is held by the calling thread.
2419 // For the given queue, verify the queue state up to the given seq number.
2420 // Currently the only check is to make sure that if there are events to be waited on prior to
2421 //  a QueryReset, make sure that all such events have been signalled.
2422 static bool VerifyQueueStateToSeq(layer_data *dev_data, QUEUE_STATE *initial_queue, uint64_t initial_seq) {
2423     bool skip = false;
2424
2425     // sequence number we want to validate up to, per queue
2426     std::unordered_map<QUEUE_STATE *, uint64_t> target_seqs{{initial_queue, initial_seq}};
2427     // sequence number we've completed validation for, per queue
2428     std::unordered_map<QUEUE_STATE *, uint64_t> done_seqs;
2429     std::vector<QUEUE_STATE *> worklist{initial_queue};
2430
2431     while (worklist.size()) {
2432         auto queue = worklist.back();
2433         worklist.pop_back();
2434
2435         auto target_seq = target_seqs[queue];
2436         auto seq = std::max(done_seqs[queue], queue->seq);
2437         auto sub_it = queue->submissions.begin() + int(seq - queue->seq);  // seq >= queue->seq
2438
2439         for (; seq < target_seq; ++sub_it, ++seq) {
2440             for (auto &wait : sub_it->waitSemaphores) {
2441                 auto other_queue = GetQueueState(dev_data, wait.queue);
2442
2443                 if (other_queue == queue) continue;  // semaphores /always/ point backwards, so no point here.
2444
2445                 auto other_target_seq = std::max(target_seqs[other_queue], wait.seq);
2446                 auto other_done_seq = std::max(done_seqs[other_queue], other_queue->seq);
2447
2448                 // if this wait is for another queue, and covers new sequence
2449                 // numbers beyond what we've already validated, mark the new
2450                 // target seq and (possibly-re)add the queue to the worklist.
2451                 if (other_done_seq < other_target_seq) {
2452                     target_seqs[other_queue] = other_target_seq;
2453                     worklist.push_back(other_queue);
2454                 }
2455             }
2456
2457             for (auto cb : sub_it->cbs) {
2458                 auto cb_node = GetCBNode(dev_data, cb);
2459                 if (cb_node) {
2460                     for (auto queryEventsPair : cb_node->waitedEventsBeforeQueryReset) {
2461                         for (auto event : queryEventsPair.second) {
2462                             if (dev_data->eventMap[event].needsSignaled) {
2463                                 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2464                                                 VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, DRAWSTATE_INVALID_QUERY,
2465                                                 "Cannot get query results on queryPool 0x%" PRIx64
2466                                                 " with index %d which was guarded by unsignaled event 0x%" PRIx64 ".",
2467                                                 HandleToUint64(queryEventsPair.first.pool), queryEventsPair.first.index,
2468                                                 HandleToUint64(event));
2469                             }
2470                         }
2471                     }
2472                 }
2473             }
2474         }
2475
2476         // finally mark the point we've now validated this queue to.
2477         done_seqs[queue] = seq;
2478     }
2479
2480     return skip;
2481 }
2482
2483 // When the given fence is retired, verify outstanding queue operations through the point of the fence
2484 static bool VerifyQueueStateToFence(layer_data *dev_data, VkFence fence) {
2485     auto fence_state = GetFenceNode(dev_data, fence);
2486     if (fence_state->scope == kSyncScopeInternal && VK_NULL_HANDLE != fence_state->signaler.first) {
2487         return VerifyQueueStateToSeq(dev_data, GetQueueState(dev_data, fence_state->signaler.first), fence_state->signaler.second);
2488     }
2489     return false;
2490 }
2491
2492 // Decrement in-use count for objects bound to command buffer
2493 static void DecrementBoundResources(layer_data *dev_data, GLOBAL_CB_NODE const *cb_node) {
2494     BASE_NODE *base_obj = nullptr;
2495     for (auto obj : cb_node->object_bindings) {
2496         base_obj = GetStateStructPtrFromObject(dev_data, obj);
2497         if (base_obj) {
2498             base_obj->in_use.fetch_sub(1);
2499         }
2500     }
2501 }
2502
2503 static void RetireWorkOnQueue(layer_data *dev_data, QUEUE_STATE *pQueue, uint64_t seq) {
2504     std::unordered_map<VkQueue, uint64_t> otherQueueSeqs;
2505
2506     // Roll this queue forward, one submission at a time.
2507     while (pQueue->seq < seq) {
2508         auto &submission = pQueue->submissions.front();
2509
2510         for (auto &wait : submission.waitSemaphores) {
2511             auto pSemaphore = GetSemaphoreNode(dev_data, wait.semaphore);
2512             if (pSemaphore) {
2513                 pSemaphore->in_use.fetch_sub(1);
2514             }
2515             auto &lastSeq = otherQueueSeqs[wait.queue];
2516             lastSeq = std::max(lastSeq, wait.seq);
2517         }
2518
2519         for (auto &semaphore : submission.signalSemaphores) {
2520             auto pSemaphore = GetSemaphoreNode(dev_data, semaphore);
2521             if (pSemaphore) {
2522                 pSemaphore->in_use.fetch_sub(1);
2523             }
2524         }
2525
2526         for (auto &semaphore : submission.externalSemaphores) {
2527             auto pSemaphore = GetSemaphoreNode(dev_data, semaphore);
2528             if (pSemaphore) {
2529                 pSemaphore->in_use.fetch_sub(1);
2530             }
2531         }
2532
2533         for (auto cb : submission.cbs) {
2534             auto cb_node = GetCBNode(dev_data, cb);
2535             if (!cb_node) {
2536                 continue;
2537             }
2538             // First perform decrement on general case bound objects
2539             DecrementBoundResources(dev_data, cb_node);
2540             for (auto drawDataElement : cb_node->drawData) {
2541                 for (auto buffer : drawDataElement.buffers) {
2542                     auto buffer_state = GetBufferState(dev_data, buffer);
2543                     if (buffer_state) {
2544                         buffer_state->in_use.fetch_sub(1);
2545                     }
2546                 }
2547             }
2548             for (auto event : cb_node->writeEventsBeforeWait) {
2549                 auto eventNode = dev_data->eventMap.find(event);
2550                 if (eventNode != dev_data->eventMap.end()) {
2551                     eventNode->second.write_in_use--;
2552                 }
2553             }
2554             for (auto queryStatePair : cb_node->queryToStateMap) {
2555                 dev_data->queryToStateMap[queryStatePair.first] = queryStatePair.second;
2556             }
2557             for (auto eventStagePair : cb_node->eventToStageMap) {
2558                 dev_data->eventMap[eventStagePair.first].stageMask = eventStagePair.second;
2559             }
2560
2561             cb_node->in_use.fetch_sub(1);
2562         }
2563
2564         auto pFence = GetFenceNode(dev_data, submission.fence);
2565         if (pFence && pFence->scope == kSyncScopeInternal) {
2566             pFence->state = FENCE_RETIRED;
2567         }
2568
2569         pQueue->submissions.pop_front();
2570         pQueue->seq++;
2571     }
2572
2573     // Roll other queues forward to the highest seq we saw a wait for
2574     for (auto qs : otherQueueSeqs) {
2575         RetireWorkOnQueue(dev_data, GetQueueState(dev_data, qs.first), qs.second);
2576     }
2577 }
2578
2579 // Submit a fence to a queue, delimiting previous fences and previous untracked
2580 // work by it.
2581 static void SubmitFence(QUEUE_STATE *pQueue, FENCE_NODE *pFence, uint64_t submitCount) {
2582     pFence->state = FENCE_INFLIGHT;
2583     pFence->signaler.first = pQueue->queue;
2584     pFence->signaler.second = pQueue->seq + pQueue->submissions.size() + submitCount;
2585 }
2586
2587 static bool validateCommandBufferSimultaneousUse(layer_data *dev_data, GLOBAL_CB_NODE *pCB, int current_submit_count) {
2588     bool skip = false;
2589     if ((pCB->in_use.load() || current_submit_count > 1) &&
2590         !(pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
2591         skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
2592                         VALIDATION_ERROR_31a0008e,
2593                         "Command Buffer 0x%" PRIx64 " is already in use and is not marked for simultaneous use.",
2594                         HandleToUint64(pCB->commandBuffer));
2595     }
2596     return skip;
2597 }
2598
2599 static bool validateCommandBufferState(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, const char *call_source,
2600                                        int current_submit_count, UNIQUE_VALIDATION_ERROR_CODE vu_id) {
2601     bool skip = false;
2602     if (dev_data->instance_data->disabled.command_buffer_state) return skip;
2603     // Validate ONE_TIME_SUBMIT_BIT CB is not being submitted more than once
2604     if ((cb_state->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT) &&
2605         (cb_state->submitCount + current_submit_count > 1)) {
2606         skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
2607                         DRAWSTATE_COMMAND_BUFFER_SINGLE_SUBMIT_VIOLATION,
2608                         "Commandbuffer 0x%" PRIx64
2609                         " was begun w/ VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT set, but has been submitted 0x%" PRIxLEAST64
2610                         " times.",
2611                         HandleToUint64(cb_state->commandBuffer), cb_state->submitCount + current_submit_count);
2612     }
2613
2614     // Validate that cmd buffers have been updated
2615     switch (cb_state->state) {
2616         case CB_INVALID_INCOMPLETE:
2617         case CB_INVALID_COMPLETE:
2618             skip |= ReportInvalidCommandBuffer(dev_data, cb_state, call_source);
2619             break;
2620
2621         case CB_NEW:
2622             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
2623                             (uint64_t)(cb_state->commandBuffer), vu_id,
2624                             "Command buffer 0x%" PRIx64 " used in the call to %s is unrecorded and contains no commands.",
2625                             HandleToUint64(cb_state->commandBuffer), call_source);
2626             break;
2627
2628         case CB_RECORDING:
2629             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
2630                             HandleToUint64(cb_state->commandBuffer), DRAWSTATE_NO_END_COMMAND_BUFFER,
2631                             "You must call vkEndCommandBuffer() on command buffer 0x%" PRIx64 " before this call to %s!",
2632                             HandleToUint64(cb_state->commandBuffer), call_source);
2633             break;
2634
2635         default: /* recorded */
2636             break;
2637     }
2638     return skip;
2639 }
2640
2641 static bool validateResources(layer_data *dev_data, GLOBAL_CB_NODE *cb_node) {
2642     bool skip = false;
2643
2644     // TODO : We should be able to remove the NULL look-up checks from the code below as long as
2645     //  all the corresponding cases are verified to cause CB_INVALID state and the CB_INVALID state
2646     //  should then be flagged prior to calling this function
2647     for (auto drawDataElement : cb_node->drawData) {
2648         for (auto buffer : drawDataElement.buffers) {
2649             auto buffer_state = GetBufferState(dev_data, buffer);
2650             if (buffer != VK_NULL_HANDLE && !buffer_state) {
2651                 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
2652                                 HandleToUint64(buffer), DRAWSTATE_INVALID_BUFFER,
2653                                 "Cannot submit cmd buffer using deleted buffer 0x%" PRIx64 ".", HandleToUint64(buffer));
2654             }
2655         }
2656     }
2657     return skip;
2658 }
2659
2660 // Check that the queue family index of 'queue' matches one of the entries in pQueueFamilyIndices
2661 bool ValidImageBufferQueue(layer_data *dev_data, GLOBAL_CB_NODE *cb_node, const VK_OBJECT *object, VkQueue queue, uint32_t count,
2662                            const uint32_t *indices) {
2663     bool found = false;
2664     bool skip = false;
2665     auto queue_state = GetQueueState(dev_data, queue);
2666     if (queue_state) {
2667         for (uint32_t i = 0; i < count; i++) {
2668             if (indices[i] == queue_state->queueFamilyIndex) {
2669                 found = true;
2670                 break;
2671             }
2672         }
2673
2674         if (!found) {
2675             skip = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, get_debug_report_enum[object->type],
2676                            object->handle, DRAWSTATE_INVALID_QUEUE_FAMILY,
2677                            "vkQueueSubmit: Command buffer 0x%" PRIx64 " contains %s 0x%" PRIx64
2678                            " which was not created allowing concurrent access to this queue family %d.",
2679                            HandleToUint64(cb_node->commandBuffer), object_string[object->type], object->handle,
2680                            queue_state->queueFamilyIndex);
2681         }
2682     }
2683     return skip;
2684 }
2685
2686 // Validate that queueFamilyIndices of primary command buffers match this queue
2687 // Secondary command buffers were previously validated in vkCmdExecuteCommands().
2688 static bool validateQueueFamilyIndices(layer_data *dev_data, GLOBAL_CB_NODE *pCB, VkQueue queue) {
2689     bool skip = false;
2690     auto pPool = GetCommandPoolNode(dev_data, pCB->createInfo.commandPool);
2691     auto queue_state = GetQueueState(dev_data, queue);
2692
2693     if (pPool && queue_state) {
2694         if (pPool->queueFamilyIndex != queue_state->queueFamilyIndex) {
2695             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
2696                             HandleToUint64(pCB->commandBuffer), VALIDATION_ERROR_31a00094,
2697                             "vkQueueSubmit: Primary command buffer 0x%" PRIx64
2698                             " created in queue family %d is being submitted on queue 0x%" PRIx64 " from queue family %d.",
2699                             HandleToUint64(pCB->commandBuffer), pPool->queueFamilyIndex, HandleToUint64(queue),
2700                             queue_state->queueFamilyIndex);
2701         }
2702
2703         // Ensure that any bound images or buffers created with SHARING_MODE_CONCURRENT have access to the current queue family
2704         for (auto object : pCB->object_bindings) {
2705             if (object.type == kVulkanObjectTypeImage) {
2706                 auto image_state = GetImageState(dev_data, reinterpret_cast<VkImage &>(object.handle));
2707                 if (image_state && image_state->createInfo.sharingMode == VK_SHARING_MODE_CONCURRENT) {
2708                     skip |= ValidImageBufferQueue(dev_data, pCB, &object, queue, image_state->createInfo.queueFamilyIndexCount,
2709                                                   image_state->createInfo.pQueueFamilyIndices);
2710                 }
2711             } else if (object.type == kVulkanObjectTypeBuffer) {
2712                 auto buffer_state = GetBufferState(dev_data, reinterpret_cast<VkBuffer &>(object.handle));
2713                 if (buffer_state && buffer_state->createInfo.sharingMode == VK_SHARING_MODE_CONCURRENT) {
2714                     skip |= ValidImageBufferQueue(dev_data, pCB, &object, queue, buffer_state->createInfo.queueFamilyIndexCount,
2715                                                   buffer_state->createInfo.pQueueFamilyIndices);
2716                 }
2717             }
2718         }
2719     }
2720
2721     return skip;
2722 }
2723
2724 static bool validatePrimaryCommandBufferState(layer_data *dev_data, GLOBAL_CB_NODE *pCB, int current_submit_count) {
2725     // Track in-use for resources off of primary and any secondary CBs
2726     bool skip = false;
2727
2728     // If USAGE_SIMULTANEOUS_USE_BIT not set then CB cannot already be executing
2729     // on device
2730     skip |= validateCommandBufferSimultaneousUse(dev_data, pCB, current_submit_count);
2731
2732     skip |= validateResources(dev_data, pCB);
2733
2734     for (auto pSubCB : pCB->linkedCommandBuffers) {
2735         skip |= validateResources(dev_data, pSubCB);
2736         // TODO: replace with invalidateCommandBuffers() at recording.
2737         if ((pSubCB->primaryCommandBuffer != pCB->commandBuffer) &&
2738             !(pSubCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
2739             log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
2740                     VALIDATION_ERROR_31a00092,
2741                     "Commandbuffer 0x%" PRIx64 " was submitted with secondary buffer 0x%" PRIx64
2742                     " but that buffer has subsequently been bound to primary cmd buffer 0x%" PRIx64
2743                     " and it does not have VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set.",
2744                     HandleToUint64(pCB->commandBuffer), HandleToUint64(pSubCB->commandBuffer),
2745                     HandleToUint64(pSubCB->primaryCommandBuffer));
2746         }
2747     }
2748
2749     skip |= validateCommandBufferState(dev_data, pCB, "vkQueueSubmit()", current_submit_count, VALIDATION_ERROR_31a00090);
2750
2751     return skip;
2752 }
2753
2754 static bool ValidateFenceForSubmit(layer_data *dev_data, FENCE_NODE *pFence) {
2755     bool skip = false;
2756
2757     if (pFence && pFence->scope == kSyncScopeInternal) {
2758         if (pFence->state == FENCE_INFLIGHT) {
2759             // TODO: opportunities for VALIDATION_ERROR_31a00080, VALIDATION_ERROR_316008b4, VALIDATION_ERROR_16400a0e
2760             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
2761                             HandleToUint64(pFence->fence), DRAWSTATE_INVALID_FENCE,
2762                             "Fence 0x%" PRIx64 " is already in use by another submission.", HandleToUint64(pFence->fence));
2763         }
2764
2765         else if (pFence->state == FENCE_RETIRED) {
2766             // TODO: opportunities for VALIDATION_ERROR_31a0007e, VALIDATION_ERROR_316008b2, VALIDATION_ERROR_16400a0e
2767             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
2768                             HandleToUint64(pFence->fence), MEMTRACK_INVALID_FENCE_STATE,
2769                             "Fence 0x%" PRIx64 " submitted in SIGNALED state.  Fences must be reset before being submitted",
2770                             HandleToUint64(pFence->fence));
2771         }
2772     }
2773
2774     return skip;
2775 }
2776
2777 static void PostCallRecordQueueSubmit(layer_data *dev_data, VkQueue queue, uint32_t submitCount, const VkSubmitInfo *pSubmits,
2778                                       VkFence fence) {
2779     uint64_t early_retire_seq = 0;
2780     auto pQueue = GetQueueState(dev_data, queue);
2781     auto pFence = GetFenceNode(dev_data, fence);
2782
2783     if (pFence) {
2784         if (pFence->scope == kSyncScopeInternal) {
2785             // Mark fence in use
2786             SubmitFence(pQueue, pFence, std::max(1u, submitCount));
2787             if (!submitCount) {
2788                 // If no submissions, but just dropping a fence on the end of the queue,
2789                 // record an empty submission with just the fence, so we can determine
2790                 // its completion.
2791                 pQueue->submissions.emplace_back(std::vector<VkCommandBuffer>(), std::vector<SEMAPHORE_WAIT>(),
2792                                                  std::vector<VkSemaphore>(), std::vector<VkSemaphore>(), fence);
2793             }
2794         } else {
2795             // Retire work up until this fence early, we will not see the wait that corresponds to this signal
2796             early_retire_seq = pQueue->seq + pQueue->submissions.size();
2797             if (!dev_data->external_sync_warning) {
2798                 dev_data->external_sync_warning = true;
2799                 log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
2800                         HandleToUint64(fence), DRAWSTATE_QUEUE_FORWARD_PROGRESS,
2801                         "vkQueueSubmit(): Signaling external fence 0x%" PRIx64 " on queue 0x%" PRIx64
2802                         " will disable validation of preceding command buffer lifecycle states and the in-use status of associated "
2803                         "objects.",
2804                         HandleToUint64(fence), HandleToUint64(queue));
2805             }
2806         }
2807     }
2808
2809     // Now process each individual submit
2810     for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) {
2811         std::vector<VkCommandBuffer> cbs;
2812         const VkSubmitInfo *submit = &pSubmits[submit_idx];
2813         vector<SEMAPHORE_WAIT> semaphore_waits;
2814         vector<VkSemaphore> semaphore_signals;
2815         vector<VkSemaphore> semaphore_externals;
2816         for (uint32_t i = 0; i < submit->waitSemaphoreCount; ++i) {
2817             VkSemaphore semaphore = submit->pWaitSemaphores[i];
2818             auto pSemaphore = GetSemaphoreNode(dev_data, semaphore);
2819             if (pSemaphore) {
2820                 if (pSemaphore->scope == kSyncScopeInternal) {
2821                     if (pSemaphore->signaler.first != VK_NULL_HANDLE) {
2822                         semaphore_waits.push_back({semaphore, pSemaphore->signaler.first, pSemaphore->signaler.second});
2823                         pSemaphore->in_use.fetch_add(1);
2824                     }
2825                     pSemaphore->signaler.first = VK_NULL_HANDLE;
2826                     pSemaphore->signaled = false;
2827                 } else {
2828                     semaphore_externals.push_back(semaphore);
2829                     pSemaphore->in_use.fetch_add(1);
2830                     if (pSemaphore->scope == kSyncScopeExternalTemporary) {
2831                         pSemaphore->scope = kSyncScopeInternal;
2832                     }
2833                 }
2834             }
2835         }
2836         for (uint32_t i = 0; i < submit->signalSemaphoreCount; ++i) {
2837             VkSemaphore semaphore = submit->pSignalSemaphores[i];
2838             auto pSemaphore = GetSemaphoreNode(dev_data, semaphore);
2839             if (pSemaphore) {
2840                 if (pSemaphore->scope == kSyncScopeInternal) {
2841                     pSemaphore->signaler.first = queue;
2842                     pSemaphore->signaler.second = pQueue->seq + pQueue->submissions.size() + 1;
2843                     pSemaphore->signaled = true;
2844                     pSemaphore->in_use.fetch_add(1);
2845                     semaphore_signals.push_back(semaphore);
2846                 } else {
2847                     // Retire work up until this submit early, we will not see the wait that corresponds to this signal
2848                     early_retire_seq = std::max(early_retire_seq, pQueue->seq + pQueue->submissions.size() + 1);
2849                     if (!dev_data->external_sync_warning) {
2850                         dev_data->external_sync_warning = true;
2851                         log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
2852                                 HandleToUint64(semaphore), DRAWSTATE_QUEUE_FORWARD_PROGRESS,
2853                                 "vkQueueSubmit(): Signaling external semaphore 0x%" PRIx64 " on queue 0x%" PRIx64
2854                                 " will disable validation of preceding command buffer lifecycle states and the in-use status of "
2855                                 "associated objects.",
2856                                 HandleToUint64(semaphore), HandleToUint64(queue));
2857                     }
2858                 }
2859             }
2860         }
2861         for (uint32_t i = 0; i < submit->commandBufferCount; i++) {
2862             auto cb_node = GetCBNode(dev_data, submit->pCommandBuffers[i]);
2863             if (cb_node) {
2864                 cbs.push_back(submit->pCommandBuffers[i]);
2865                 for (auto secondaryCmdBuffer : cb_node->linkedCommandBuffers) {
2866                     cbs.push_back(secondaryCmdBuffer->commandBuffer);
2867                     UpdateCmdBufImageLayouts(dev_data, secondaryCmdBuffer);
2868                     incrementResources(dev_data, secondaryCmdBuffer);
2869                 }
2870                 UpdateCmdBufImageLayouts(dev_data, cb_node);
2871                 incrementResources(dev_data, cb_node);
2872             }
2873         }
2874         pQueue->submissions.emplace_back(cbs, semaphore_waits, semaphore_signals, semaphore_externals,
2875                                          submit_idx == submitCount - 1 ? fence : VK_NULL_HANDLE);
2876     }
2877
2878     if (early_retire_seq) {
2879         RetireWorkOnQueue(dev_data, pQueue, early_retire_seq);
2880     }
2881 }
2882
2883 static bool PreCallValidateQueueSubmit(layer_data *dev_data, VkQueue queue, uint32_t submitCount, const VkSubmitInfo *pSubmits,
2884                                        VkFence fence) {
2885     auto pFence = GetFenceNode(dev_data, fence);
2886     bool skip = ValidateFenceForSubmit(dev_data, pFence);
2887     if (skip) {
2888         return true;
2889     }
2890
2891     unordered_set<VkSemaphore> signaled_semaphores;
2892     unordered_set<VkSemaphore> unsignaled_semaphores;
2893     unordered_set<VkSemaphore> internal_semaphores;
2894     vector<VkCommandBuffer> current_cmds;
2895     unordered_map<ImageSubresourcePair, IMAGE_LAYOUT_NODE> localImageLayoutMap;
2896     // Now verify each individual submit
2897     for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) {
2898         const VkSubmitInfo *submit = &pSubmits[submit_idx];
2899         for (uint32_t i = 0; i < submit->waitSemaphoreCount; ++i) {
2900             skip |= ValidateStageMaskGsTsEnables(dev_data, submit->pWaitDstStageMask[i], "vkQueueSubmit()",
2901                                                  VALIDATION_ERROR_13c00098, VALIDATION_ERROR_13c0009a);
2902             VkSemaphore semaphore = submit->pWaitSemaphores[i];
2903             auto pSemaphore = GetSemaphoreNode(dev_data, semaphore);
2904             if (pSemaphore && (pSemaphore->scope == kSyncScopeInternal || internal_semaphores.count(semaphore))) {
2905                 if (unsignaled_semaphores.count(semaphore) ||
2906                     (!(signaled_semaphores.count(semaphore)) && !(pSemaphore->signaled))) {
2907                     skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
2908                                     HandleToUint64(semaphore), DRAWSTATE_QUEUE_FORWARD_PROGRESS,
2909                                     "Queue 0x%" PRIx64 " is waiting on semaphore 0x%" PRIx64 " that has no way to be signaled.",
2910                                     HandleToUint64(queue), HandleToUint64(semaphore));
2911                 } else {
2912                     signaled_semaphores.erase(semaphore);
2913                     unsignaled_semaphores.insert(semaphore);
2914                 }
2915             }
2916             if (pSemaphore && pSemaphore->scope == kSyncScopeExternalTemporary) {
2917                 internal_semaphores.insert(semaphore);
2918             }
2919         }
2920         for (uint32_t i = 0; i < submit->signalSemaphoreCount; ++i) {
2921             VkSemaphore semaphore = submit->pSignalSemaphores[i];
2922             auto pSemaphore = GetSemaphoreNode(dev_data, semaphore);
2923             if (pSemaphore && (pSemaphore->scope == kSyncScopeInternal || internal_semaphores.count(semaphore))) {
2924                 if (signaled_semaphores.count(semaphore) || (!(unsignaled_semaphores.count(semaphore)) && pSemaphore->signaled)) {
2925                     skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
2926                                     HandleToUint64(semaphore), DRAWSTATE_QUEUE_FORWARD_PROGRESS,
2927                                     "Queue 0x%" PRIx64 " is signaling semaphore 0x%" PRIx64
2928                                     " that has already been signaled but not waited on by queue 0x%" PRIx64 ".",
2929                                     HandleToUint64(queue), HandleToUint64(semaphore), HandleToUint64(pSemaphore->signaler.first));
2930                 } else {
2931                     unsignaled_semaphores.erase(semaphore);
2932                     signaled_semaphores.insert(semaphore);
2933                 }
2934             }
2935         }
2936         for (uint32_t i = 0; i < submit->commandBufferCount; i++) {
2937             auto cb_node = GetCBNode(dev_data, submit->pCommandBuffers[i]);
2938             if (cb_node) {
2939                 skip |= ValidateCmdBufImageLayouts(dev_data, cb_node, dev_data->imageLayoutMap, localImageLayoutMap);
2940                 current_cmds.push_back(submit->pCommandBuffers[i]);
2941                 skip |= validatePrimaryCommandBufferState(
2942                     dev_data, cb_node, (int)std::count(current_cmds.begin(), current_cmds.end(), submit->pCommandBuffers[i]));
2943                 skip |= validateQueueFamilyIndices(dev_data, cb_node, queue);
2944
2945                 // Potential early exit here as bad object state may crash in delayed function calls
2946                 if (skip) {
2947                     return true;
2948                 }
2949
2950                 // Call submit-time functions to validate/update state
2951                 for (auto &function : cb_node->queue_submit_functions) {
2952                     skip |= function();
2953                 }
2954                 for (auto &function : cb_node->eventUpdates) {
2955                     skip |= function(queue);
2956                 }
2957                 for (auto &function : cb_node->queryUpdates) {
2958                     skip |= function(queue);
2959                 }
2960             }
2961         }
2962     }
2963     return skip;
2964 }
2965
2966 VKAPI_ATTR VkResult VKAPI_CALL QueueSubmit(VkQueue queue, uint32_t submitCount, const VkSubmitInfo *pSubmits, VkFence fence) {
2967     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(queue), layer_data_map);
2968     unique_lock_t lock(global_lock);
2969
2970     bool skip = PreCallValidateQueueSubmit(dev_data, queue, submitCount, pSubmits, fence);
2971     lock.unlock();
2972
2973     if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
2974
2975     VkResult result = dev_data->dispatch_table.QueueSubmit(queue, submitCount, pSubmits, fence);
2976
2977     lock.lock();
2978     PostCallRecordQueueSubmit(dev_data, queue, submitCount, pSubmits, fence);
2979     lock.unlock();
2980     return result;
2981 }
2982
2983 static bool PreCallValidateAllocateMemory(layer_data *dev_data) {
2984     bool skip = false;
2985     if (dev_data->memObjMap.size() >= dev_data->phys_dev_properties.properties.limits.maxMemoryAllocationCount) {
2986         skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
2987                         HandleToUint64(dev_data->device), VALIDATION_ERROR_UNDEFINED,
2988                         "Number of currently valid memory objects is not less than the maximum allowed (%u).",
2989                         dev_data->phys_dev_properties.properties.limits.maxMemoryAllocationCount);
2990     }
2991     return skip;
2992 }
2993
2994 static void PostCallRecordAllocateMemory(layer_data *dev_data, const VkMemoryAllocateInfo *pAllocateInfo, VkDeviceMemory *pMemory) {
2995     add_mem_obj_info(dev_data, dev_data->device, *pMemory, pAllocateInfo);
2996     return;
2997 }
2998
2999 VKAPI_ATTR VkResult VKAPI_CALL AllocateMemory(VkDevice device, const VkMemoryAllocateInfo *pAllocateInfo,
3000                                               const VkAllocationCallbacks *pAllocator, VkDeviceMemory *pMemory) {
3001     VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
3002     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3003     unique_lock_t lock(global_lock);
3004     bool skip = PreCallValidateAllocateMemory(dev_data);
3005     if (!skip) {
3006         lock.unlock();
3007         result = dev_data->dispatch_table.AllocateMemory(device, pAllocateInfo, pAllocator, pMemory);
3008         lock.lock();
3009         if (VK_SUCCESS == result) {
3010             PostCallRecordAllocateMemory(dev_data, pAllocateInfo, pMemory);
3011         }
3012     }
3013     return result;
3014 }
3015
3016 // For given obj node, if it is use, flag a validation error and return callback result, else return false
3017 bool ValidateObjectNotInUse(const layer_data *dev_data, BASE_NODE *obj_node, VK_OBJECT obj_struct, const char *caller_name,
3018                             UNIQUE_VALIDATION_ERROR_CODE error_code) {
3019     if (dev_data->instance_data->disabled.object_in_use) return false;
3020     bool skip = false;
3021     if (obj_node->in_use.load()) {
3022         skip |=
3023             log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, get_debug_report_enum[obj_struct.type], obj_struct.handle,
3024                     error_code, "Cannot call %s on %s 0x%" PRIx64 " that is currently in use by a command buffer.", caller_name,
3025                     object_string[obj_struct.type], obj_struct.handle);
3026     }
3027     return skip;
3028 }
3029
3030 static bool PreCallValidateFreeMemory(layer_data *dev_data, VkDeviceMemory mem, DEVICE_MEM_INFO **mem_info, VK_OBJECT *obj_struct) {
3031     *mem_info = GetMemObjInfo(dev_data, mem);
3032     *obj_struct = {HandleToUint64(mem), kVulkanObjectTypeDeviceMemory};
3033     if (dev_data->instance_data->disabled.free_memory) return false;
3034     bool skip = false;
3035     if (*mem_info) {
3036         skip |= ValidateObjectNotInUse(dev_data, *mem_info, *obj_struct, "vkFreeMemory", VALIDATION_ERROR_2880054a);
3037     }
3038     return skip;
3039 }
3040
3041 static void PreCallRecordFreeMemory(layer_data *dev_data, VkDeviceMemory mem, DEVICE_MEM_INFO *mem_info, VK_OBJECT obj_struct) {
3042     // Clear mem binding for any bound objects
3043     for (auto obj : mem_info->obj_bindings) {
3044         log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, get_debug_report_enum[obj.type], obj.handle,
3045                 MEMTRACK_FREED_MEM_REF, "VK Object 0x%" PRIx64 " still has a reference to mem obj 0x%" PRIx64,
3046                 HandleToUint64(obj.handle), HandleToUint64(mem_info->mem));
3047         BINDABLE *bindable_state = nullptr;
3048         switch (obj.type) {
3049             case kVulkanObjectTypeImage:
3050                 bindable_state = GetImageState(dev_data, reinterpret_cast<VkImage &>(obj.handle));
3051                 break;
3052             case kVulkanObjectTypeBuffer:
3053                 bindable_state = GetBufferState(dev_data, reinterpret_cast<VkBuffer &>(obj.handle));
3054                 break;
3055             default:
3056                 // Should only have buffer or image objects bound to memory
3057                 assert(0);
3058         }
3059
3060         assert(bindable_state);
3061         bindable_state->binding.mem = MEMORY_UNBOUND;
3062         bindable_state->UpdateBoundMemorySet();
3063     }
3064     // Any bound cmd buffers are now invalid
3065     invalidateCommandBuffers(dev_data, mem_info->cb_bindings, obj_struct);
3066     dev_data->memObjMap.erase(mem);
3067 }
3068
3069 VKAPI_ATTR void VKAPI_CALL FreeMemory(VkDevice device, VkDeviceMemory mem, const VkAllocationCallbacks *pAllocator) {
3070     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3071     DEVICE_MEM_INFO *mem_info = nullptr;
3072     VK_OBJECT obj_struct;
3073     unique_lock_t lock(global_lock);
3074     bool skip = PreCallValidateFreeMemory(dev_data, mem, &mem_info, &obj_struct);
3075     if (!skip) {
3076         if (mem != VK_NULL_HANDLE) {
3077             // Avoid free/alloc race by recording state change before dispatching
3078             PreCallRecordFreeMemory(dev_data, mem, mem_info, obj_struct);
3079         }
3080         lock.unlock();
3081         dev_data->dispatch_table.FreeMemory(device, mem, pAllocator);
3082     }
3083 }
3084
3085 // Validate that given Map memory range is valid. This means that the memory should not already be mapped,
3086 //  and that the size of the map range should be:
3087 //  1. Not zero
3088 //  2. Within the size of the memory allocation
3089 static bool ValidateMapMemRange(layer_data *dev_data, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size) {
3090     bool skip = false;
3091
3092     if (size == 0) {
3093         skip = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
3094                        HandleToUint64(mem), MEMTRACK_INVALID_MAP, "VkMapMemory: Attempting to map memory range of size zero");
3095     }
3096
3097     auto mem_element = dev_data->memObjMap.find(mem);
3098     if (mem_element != dev_data->memObjMap.end()) {
3099         auto mem_info = mem_element->second.get();
3100         // It is an application error to call VkMapMemory on an object that is already mapped
3101         if (mem_info->mem_range.size != 0) {
3102             skip = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
3103                            HandleToUint64(mem), MEMTRACK_INVALID_MAP,
3104                            "VkMapMemory: Attempting to map memory on an already-mapped object 0x%" PRIx64, HandleToUint64(mem));
3105         }
3106
3107         // Validate that offset + size is within object's allocationSize
3108         if (size == VK_WHOLE_SIZE) {
3109             if (offset >= mem_info->alloc_info.allocationSize) {
3110                 skip = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
3111                                HandleToUint64(mem), MEMTRACK_INVALID_MAP,
3112                                "Mapping Memory from 0x%" PRIx64 " to 0x%" PRIx64
3113                                " with size of VK_WHOLE_SIZE oversteps total array size 0x%" PRIx64,
3114                                offset, mem_info->alloc_info.allocationSize, mem_info->alloc_info.allocationSize);
3115             }
3116         } else {
3117             if ((offset + size) > mem_info->alloc_info.allocationSize) {
3118                 skip = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
3119                                HandleToUint64(mem), VALIDATION_ERROR_31200552,
3120                                "Mapping Memory from 0x%" PRIx64 " to 0x%" PRIx64 " oversteps total array size 0x%" PRIx64 ".",
3121                                offset, size + offset, mem_info->alloc_info.allocationSize);
3122             }
3123         }
3124     }
3125     return skip;
3126 }
3127
3128 static void storeMemRanges(layer_data *dev_data, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size) {
3129     auto mem_info = GetMemObjInfo(dev_data, mem);
3130     if (mem_info) {
3131         mem_info->mem_range.offset = offset;
3132         mem_info->mem_range.size = size;
3133     }
3134 }
3135
3136 static bool deleteMemRanges(layer_data *dev_data, VkDeviceMemory mem) {
3137     bool skip = false;
3138     auto mem_info = GetMemObjInfo(dev_data, mem);
3139     if (mem_info) {
3140         if (!mem_info->mem_range.size) {
3141             // Valid Usage: memory must currently be mapped
3142             skip = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
3143                            HandleToUint64(mem), VALIDATION_ERROR_33600562,
3144                            "Unmapping Memory without memory being mapped: mem obj 0x%" PRIx64 ".", HandleToUint64(mem));
3145         }
3146         mem_info->mem_range.size = 0;
3147         if (mem_info->shadow_copy) {
3148             free(mem_info->shadow_copy_base);
3149             mem_info->shadow_copy_base = 0;
3150             mem_info->shadow_copy = 0;
3151         }
3152     }
3153     return skip;
3154 }
3155
3156 // Guard value for pad data
3157 static char NoncoherentMemoryFillValue = 0xb;
3158
3159 static void initializeAndTrackMemory(layer_data *dev_data, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size,
3160                                      void **ppData) {
3161     auto mem_info = GetMemObjInfo(dev_data, mem);
3162     if (mem_info) {
3163         mem_info->p_driver_data = *ppData;
3164         uint32_t index = mem_info->alloc_info.memoryTypeIndex;
3165         if (dev_data->phys_dev_mem_props.memoryTypes[index].propertyFlags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) {
3166             mem_info->shadow_copy = 0;
3167         } else {
3168             if (size == VK_WHOLE_SIZE) {
3169                 size = mem_info->alloc_info.allocationSize - offset;
3170             }
3171             mem_info->shadow_pad_size = dev_data->phys_dev_properties.properties.limits.minMemoryMapAlignment;
3172             assert(SafeModulo(mem_info->shadow_pad_size, dev_data->phys_dev_properties.properties.limits.minMemoryMapAlignment) ==
3173                    0);
3174             // Ensure start of mapped region reflects hardware alignment constraints
3175             uint64_t map_alignment = dev_data->phys_dev_properties.properties.limits.minMemoryMapAlignment;
3176
3177             // From spec: (ppData - offset) must be aligned to at least limits::minMemoryMapAlignment.
3178             uint64_t start_offset = offset % map_alignment;
3179             // Data passed to driver will be wrapped by a guardband of data to detect over- or under-writes.
3180             mem_info->shadow_copy_base =
3181                 malloc(static_cast<size_t>(2 * mem_info->shadow_pad_size + size + map_alignment + start_offset));
3182
3183             mem_info->shadow_copy =
3184                 reinterpret_cast<char *>((reinterpret_cast<uintptr_t>(mem_info->shadow_copy_base) + map_alignment) &
3185                                          ~(map_alignment - 1)) +
3186                 start_offset;
3187             assert(SafeModulo(reinterpret_cast<uintptr_t>(mem_info->shadow_copy) + mem_info->shadow_pad_size - start_offset,
3188                               map_alignment) == 0);
3189
3190             memset(mem_info->shadow_copy, NoncoherentMemoryFillValue, static_cast<size_t>(2 * mem_info->shadow_pad_size + size));
3191             *ppData = static_cast<char *>(mem_info->shadow_copy) + mem_info->shadow_pad_size;
3192         }
3193     }
3194 }
3195
3196 // Verify that state for fence being waited on is appropriate. That is,
3197 //  a fence being waited on should not already be signaled and
3198 //  it should have been submitted on a queue or during acquire next image
3199 static inline bool verifyWaitFenceState(layer_data *dev_data, VkFence fence, const char *apiCall) {
3200     bool skip = false;
3201
3202     auto pFence = GetFenceNode(dev_data, fence);
3203     if (pFence && pFence->scope == kSyncScopeInternal) {
3204         if (pFence->state == FENCE_UNSIGNALED) {
3205             skip |=
3206                 log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
3207                         HandleToUint64(fence), MEMTRACK_INVALID_FENCE_STATE,
3208                         "%s called for fence 0x%" PRIx64 " which has not been submitted on a Queue or during acquire next image.",
3209                         apiCall, HandleToUint64(fence));
3210         }
3211     }
3212     return skip;
3213 }
3214
3215 static void RetireFence(layer_data *dev_data, VkFence fence) {
3216     auto pFence = GetFenceNode(dev_data, fence);
3217     if (pFence->scope == kSyncScopeInternal) {
3218         if (pFence->signaler.first != VK_NULL_HANDLE) {
3219             // Fence signaller is a queue -- use this as proof that prior operations on that queue have completed.
3220             RetireWorkOnQueue(dev_data, GetQueueState(dev_data, pFence->signaler.first), pFence->signaler.second);
3221         } else {
3222             // Fence signaller is the WSI. We're not tracking what the WSI op actually /was/ in CV yet, but we need to mark
3223             // the fence as retired.
3224             pFence->state = FENCE_RETIRED;
3225         }
3226     }
3227 }
3228
3229 static bool PreCallValidateWaitForFences(layer_data *dev_data, uint32_t fence_count, const VkFence *fences) {
3230     if (dev_data->instance_data->disabled.wait_for_fences) return false;
3231     bool skip = false;
3232     for (uint32_t i = 0; i < fence_count; i++) {
3233         skip |= verifyWaitFenceState(dev_data, fences[i], "vkWaitForFences");
3234         skip |= VerifyQueueStateToFence(dev_data, fences[i]);
3235     }
3236     return skip;
3237 }
3238
3239 static void PostCallRecordWaitForFences(layer_data *dev_data, uint32_t fence_count, const VkFence *fences, VkBool32 wait_all) {
3240     // When we know that all fences are complete we can clean/remove their CBs
3241     if ((VK_TRUE == wait_all) || (1 == fence_count)) {
3242         for (uint32_t i = 0; i < fence_count; i++) {
3243             RetireFence(dev_data, fences[i]);
3244         }
3245     }
3246     // NOTE : Alternate case not handled here is when some fences have completed. In
3247     //  this case for app to guarantee which fences completed it will have to call
3248     //  vkGetFenceStatus() at which point we'll clean/remove their CBs if complete.
3249 }
3250
3251 VKAPI_ATTR VkResult VKAPI_CALL WaitForFences(VkDevice device, uint32_t fenceCount, const VkFence *pFences, VkBool32 waitAll,
3252                                              uint64_t timeout) {
3253     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3254     // Verify fence status of submitted fences
3255     unique_lock_t lock(global_lock);
3256     bool skip = PreCallValidateWaitForFences(dev_data, fenceCount, pFences);
3257     lock.unlock();
3258     if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
3259
3260     VkResult result = dev_data->dispatch_table.WaitForFences(device, fenceCount, pFences, waitAll, timeout);
3261
3262     if (result == VK_SUCCESS) {
3263         lock.lock();
3264         PostCallRecordWaitForFences(dev_data, fenceCount, pFences, waitAll);
3265         lock.unlock();
3266     }
3267     return result;
3268 }
3269
3270 static bool PreCallValidateGetFenceStatus(layer_data *dev_data, VkFence fence) {
3271     if (dev_data->instance_data->disabled.get_fence_state) return false;
3272     return verifyWaitFenceState(dev_data, fence, "vkGetFenceStatus");
3273 }
3274
3275 static void PostCallRecordGetFenceStatus(layer_data *dev_data, VkFence fence) { RetireFence(dev_data, fence); }
3276
3277 VKAPI_ATTR VkResult VKAPI_CALL GetFenceStatus(VkDevice device, VkFence fence) {
3278     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3279     unique_lock_t lock(global_lock);
3280     bool skip = PreCallValidateGetFenceStatus(dev_data, fence);
3281     lock.unlock();
3282     if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
3283
3284     VkResult result = dev_data->dispatch_table.GetFenceStatus(device, fence);
3285     if (result == VK_SUCCESS) {
3286         lock.lock();
3287         PostCallRecordGetFenceStatus(dev_data, fence);
3288         lock.unlock();
3289     }
3290     return result;
3291 }
3292
3293 static void PostCallRecordGetDeviceQueue(layer_data *dev_data, uint32_t q_family_index, VkQueue queue) {
3294     // Add queue to tracking set only if it is new
3295     auto result = dev_data->queues.emplace(queue);
3296     if (result.second == true) {
3297         QUEUE_STATE *queue_state = &dev_data->queueMap[queue];
3298         queue_state->queue = queue;
3299         queue_state->queueFamilyIndex = q_family_index;
3300         queue_state->seq = 0;
3301     }
3302 }
3303
3304 VKAPI_ATTR void VKAPI_CALL GetDeviceQueue(VkDevice device, uint32_t queueFamilyIndex, uint32_t queueIndex, VkQueue *pQueue) {
3305     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3306     dev_data->dispatch_table.GetDeviceQueue(device, queueFamilyIndex, queueIndex, pQueue);
3307     lock_guard_t lock(global_lock);
3308
3309     PostCallRecordGetDeviceQueue(dev_data, queueFamilyIndex, *pQueue);
3310 }
3311
3312 VKAPI_ATTR void VKAPI_CALL GetDeviceQueue2(VkDevice device, VkDeviceQueueInfo2 *pQueueInfo, VkQueue *pQueue) {
3313     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3314     dev_data->dispatch_table.GetDeviceQueue2(device, pQueueInfo, pQueue);
3315     lock_guard_t lock(global_lock);
3316
3317     if (*pQueue != VK_NULL_HANDLE) {
3318         PostCallRecordGetDeviceQueue(dev_data, pQueueInfo->queueFamilyIndex, *pQueue);
3319     }
3320 }
3321
3322 static bool PreCallValidateQueueWaitIdle(layer_data *dev_data, VkQueue queue, QUEUE_STATE **queue_state) {
3323     *queue_state = GetQueueState(dev_data, queue);
3324     if (dev_data->instance_data->disabled.queue_wait_idle) return false;
3325     return VerifyQueueStateToSeq(dev_data, *queue_state, (*queue_state)->seq + (*queue_state)->submissions.size());
3326 }
3327
3328 static void PostCallRecordQueueWaitIdle(layer_data *dev_data, QUEUE_STATE *queue_state) {
3329     RetireWorkOnQueue(dev_data, queue_state, queue_state->seq + queue_state->submissions.size());
3330 }
3331
3332 VKAPI_ATTR VkResult VKAPI_CALL QueueWaitIdle(VkQueue queue) {
3333     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(queue), layer_data_map);
3334     QUEUE_STATE *queue_state = nullptr;
3335     unique_lock_t lock(global_lock);
3336     bool skip = PreCallValidateQueueWaitIdle(dev_data, queue, &queue_state);
3337     lock.unlock();
3338     if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
3339     VkResult result = dev_data->dispatch_table.QueueWaitIdle(queue);
3340     if (VK_SUCCESS == result) {
3341         lock.lock();
3342         PostCallRecordQueueWaitIdle(dev_data, queue_state);
3343         lock.unlock();
3344     }
3345     return result;
3346 }
3347
3348 static bool PreCallValidateDeviceWaitIdle(layer_data *dev_data) {
3349     if (dev_data->instance_data->disabled.device_wait_idle) return false;
3350     bool skip = false;
3351     for (auto &queue : dev_data->queueMap) {
3352         skip |= VerifyQueueStateToSeq(dev_data, &queue.second, queue.second.seq + queue.second.submissions.size());
3353     }
3354     return skip;
3355 }
3356
3357 static void PostCallRecordDeviceWaitIdle(layer_data *dev_data) {
3358     for (auto &queue : dev_data->queueMap) {
3359         RetireWorkOnQueue(dev_data, &queue.second, queue.second.seq + queue.second.submissions.size());
3360     }
3361 }
3362
3363 VKAPI_ATTR VkResult VKAPI_CALL DeviceWaitIdle(VkDevice device) {
3364     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3365     unique_lock_t lock(global_lock);
3366     bool skip = PreCallValidateDeviceWaitIdle(dev_data);
3367     lock.unlock();
3368     if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
3369     VkResult result = dev_data->dispatch_table.DeviceWaitIdle(device);
3370     if (VK_SUCCESS == result) {
3371         lock.lock();
3372         PostCallRecordDeviceWaitIdle(dev_data);
3373         lock.unlock();
3374     }
3375     return result;
3376 }
3377
3378 static bool PreCallValidateDestroyFence(layer_data *dev_data, VkFence fence, FENCE_NODE **fence_node, VK_OBJECT *obj_struct) {
3379     *fence_node = GetFenceNode(dev_data, fence);
3380     *obj_struct = {HandleToUint64(fence), kVulkanObjectTypeFence};
3381     if (dev_data->instance_data->disabled.destroy_fence) return false;
3382     bool skip = false;
3383     if (*fence_node) {
3384         if ((*fence_node)->scope == kSyncScopeInternal && (*fence_node)->state == FENCE_INFLIGHT) {
3385             skip |=
3386                 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
3387                         HandleToUint64(fence), VALIDATION_ERROR_24e008c0, "Fence 0x%" PRIx64 " is in use.", HandleToUint64(fence));
3388         }
3389     }
3390     return skip;
3391 }
3392
3393 static void PreCallRecordDestroyFence(layer_data *dev_data, VkFence fence) { dev_data->fenceMap.erase(fence); }
3394
3395 VKAPI_ATTR void VKAPI_CALL DestroyFence(VkDevice device, VkFence fence, const VkAllocationCallbacks *pAllocator) {
3396     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3397     // Common data objects used pre & post call
3398     FENCE_NODE *fence_node = nullptr;
3399     VK_OBJECT obj_struct;
3400     unique_lock_t lock(global_lock);
3401     bool skip = PreCallValidateDestroyFence(dev_data, fence, &fence_node, &obj_struct);
3402
3403     if (!skip) {
3404         // Pre-record to avoid Destroy/Create race
3405         PreCallRecordDestroyFence(dev_data, fence);
3406         lock.unlock();
3407         dev_data->dispatch_table.DestroyFence(device, fence, pAllocator);
3408     }
3409 }
3410
3411 static bool PreCallValidateDestroySemaphore(layer_data *dev_data, VkSemaphore semaphore, SEMAPHORE_NODE **sema_node,
3412                                             VK_OBJECT *obj_struct) {
3413     *sema_node = GetSemaphoreNode(dev_data, semaphore);
3414     *obj_struct = {HandleToUint64(semaphore), kVulkanObjectTypeSemaphore};
3415     if (dev_data->instance_data->disabled.destroy_semaphore) return false;
3416     bool skip = false;
3417     if (*sema_node) {
3418         skip |= ValidateObjectNotInUse(dev_data, *sema_node, *obj_struct, "vkDestroySemaphore", VALIDATION_ERROR_268008e2);
3419     }
3420     return skip;
3421 }
3422
3423 static void PreCallRecordDestroySemaphore(layer_data *dev_data, VkSemaphore sema) { dev_data->semaphoreMap.erase(sema); }
3424
3425 VKAPI_ATTR void VKAPI_CALL DestroySemaphore(VkDevice device, VkSemaphore semaphore, const VkAllocationCallbacks *pAllocator) {
3426     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3427     SEMAPHORE_NODE *sema_node;
3428     VK_OBJECT obj_struct;
3429     unique_lock_t lock(global_lock);
3430     bool skip = PreCallValidateDestroySemaphore(dev_data, semaphore, &sema_node, &obj_struct);
3431     if (!skip) {
3432         // Pre-record to avoid Destroy/Create race
3433         PreCallRecordDestroySemaphore(dev_data, semaphore);
3434         lock.unlock();
3435         dev_data->dispatch_table.DestroySemaphore(device, semaphore, pAllocator);
3436     }
3437 }
3438
3439 static bool PreCallValidateDestroyEvent(layer_data *dev_data, VkEvent event, EVENT_STATE **event_state, VK_OBJECT *obj_struct) {
3440     *event_state = GetEventNode(dev_data, event);
3441     *obj_struct = {HandleToUint64(event), kVulkanObjectTypeEvent};
3442     if (dev_data->instance_data->disabled.destroy_event) return false;
3443     bool skip = false;
3444     if (*event_state) {
3445         skip |= ValidateObjectNotInUse(dev_data, *event_state, *obj_struct, "vkDestroyEvent", VALIDATION_ERROR_24c008f2);
3446     }
3447     return skip;
3448 }
3449
3450 static void PreCallRecordDestroyEvent(layer_data *dev_data, VkEvent event, EVENT_STATE *event_state, VK_OBJECT obj_struct) {
3451     invalidateCommandBuffers(dev_data, event_state->cb_bindings, obj_struct);
3452     dev_data->eventMap.erase(event);
3453 }
3454
3455 VKAPI_ATTR void VKAPI_CALL DestroyEvent(VkDevice device, VkEvent event, const VkAllocationCallbacks *pAllocator) {
3456     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3457     EVENT_STATE *event_state = nullptr;
3458     VK_OBJECT obj_struct;
3459     unique_lock_t lock(global_lock);
3460     bool skip = PreCallValidateDestroyEvent(dev_data, event, &event_state, &obj_struct);
3461     if (!skip) {
3462         if (event != VK_NULL_HANDLE) {
3463             // Pre-record to avoid Destroy/Create race
3464             PreCallRecordDestroyEvent(dev_data, event, event_state, obj_struct);
3465         }
3466         lock.unlock();
3467         dev_data->dispatch_table.DestroyEvent(device, event, pAllocator);
3468     }
3469 }
3470
3471 static bool PreCallValidateDestroyQueryPool(layer_data *dev_data, VkQueryPool query_pool, QUERY_POOL_NODE **qp_state,
3472                                             VK_OBJECT *obj_struct) {
3473     *qp_state = GetQueryPoolNode(dev_data, query_pool);
3474     *obj_struct = {HandleToUint64(query_pool), kVulkanObjectTypeQueryPool};
3475     if (dev_data->instance_data->disabled.destroy_query_pool) return false;
3476     bool skip = false;
3477     if (*qp_state) {
3478         skip |= ValidateObjectNotInUse(dev_data, *qp_state, *obj_struct, "vkDestroyQueryPool", VALIDATION_ERROR_26200632);
3479     }
3480     return skip;
3481 }
3482
3483 static void PreCallRecordDestroyQueryPool(layer_data *dev_data, VkQueryPool query_pool, QUERY_POOL_NODE *qp_state,
3484                                           VK_OBJECT obj_struct) {
3485     invalidateCommandBuffers(dev_data, qp_state->cb_bindings, obj_struct);
3486     dev_data->queryPoolMap.erase(query_pool);
3487 }
3488
3489 VKAPI_ATTR void VKAPI_CALL DestroyQueryPool(VkDevice device, VkQueryPool queryPool, const VkAllocationCallbacks *pAllocator) {
3490     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3491     QUERY_POOL_NODE *qp_state = nullptr;
3492     VK_OBJECT obj_struct;
3493     unique_lock_t lock(global_lock);
3494     bool skip = PreCallValidateDestroyQueryPool(dev_data, queryPool, &qp_state, &obj_struct);
3495     if (!skip) {
3496         if (queryPool != VK_NULL_HANDLE) {
3497             // Pre-record to avoid Destroy/Create race
3498             PreCallRecordDestroyQueryPool(dev_data, queryPool, qp_state, obj_struct);
3499         }
3500         lock.unlock();
3501         dev_data->dispatch_table.DestroyQueryPool(device, queryPool, pAllocator);
3502     }
3503 }
3504 static bool PreCallValidateGetQueryPoolResults(layer_data *dev_data, VkQueryPool query_pool, uint32_t first_query,
3505                                                uint32_t query_count, VkQueryResultFlags flags,
3506                                                unordered_map<QueryObject, vector<VkCommandBuffer>> *queries_in_flight) {
3507     bool skip = false;
3508     auto query_pool_state = dev_data->queryPoolMap.find(query_pool);
3509     if (query_pool_state != dev_data->queryPoolMap.end()) {
3510         if ((query_pool_state->second.createInfo.queryType == VK_QUERY_TYPE_TIMESTAMP) && (flags & VK_QUERY_RESULT_PARTIAL_BIT)) {
3511             skip |=
3512                 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0,
3513                         VALIDATION_ERROR_2fa00664,
3514                         "QueryPool 0x%" PRIx64
3515                         " was created with a queryType of VK_QUERY_TYPE_TIMESTAMP but flags contains VK_QUERY_RESULT_PARTIAL_BIT.",
3516                         HandleToUint64(query_pool));
3517         }
3518     }
3519
3520     // TODO: clean this up, it's insanely wasteful.
3521     for (auto cmd_buffer : dev_data->commandBufferMap) {
3522         if (cmd_buffer.second->in_use.load()) {
3523             for (auto query_state_pair : cmd_buffer.second->queryToStateMap) {
3524                 (*queries_in_flight)[query_state_pair.first].push_back(cmd_buffer.first);
3525             }
3526         }
3527     }
3528
3529     if (dev_data->instance_data->disabled.get_query_pool_results) return false;
3530     for (uint32_t i = 0; i < query_count; ++i) {
3531         QueryObject query = {query_pool, first_query + i};
3532         auto qif_pair = queries_in_flight->find(query);
3533         auto query_state_pair = dev_data->queryToStateMap.find(query);
3534         if (query_state_pair != dev_data->queryToStateMap.end()) {
3535             // Available and in flight
3536             if (qif_pair != queries_in_flight->end()) {
3537                 if (query_state_pair->second) {
3538                     for (auto cmd_buffer : qif_pair->second) {
3539                         auto cb = GetCBNode(dev_data, cmd_buffer);
3540                         auto query_event_pair = cb->waitedEventsBeforeQueryReset.find(query);
3541                         if (query_event_pair == cb->waitedEventsBeforeQueryReset.end()) {
3542                             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
3543                                             VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, DRAWSTATE_INVALID_QUERY,
3544                                             "Cannot get query results on queryPool 0x%" PRIx64 " with index %d which is in flight.",
3545                                             HandleToUint64(query_pool), first_query + i);
3546                         }
3547                     }
3548                 }
3549             } else if (!query_state_pair->second) {  // Unavailable and Not in flight
3550                 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0,
3551                                 DRAWSTATE_INVALID_QUERY,
3552                                 "Cannot get query results on queryPool 0x%" PRIx64 " with index %d which is unavailable.",
3553                                 HandleToUint64(query_pool), first_query + i);
3554             }
3555         } else {  // Uninitialized
3556             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0,
3557                             DRAWSTATE_INVALID_QUERY,
3558                             "Cannot get query results on queryPool 0x%" PRIx64
3559                             " with index %d as data has not been collected for this index.",
3560                             HandleToUint64(query_pool), first_query + i);
3561         }
3562     }
3563     return skip;
3564 }
3565
3566 static void PostCallRecordGetQueryPoolResults(layer_data *dev_data, VkQueryPool query_pool, uint32_t first_query,
3567                                               uint32_t query_count,
3568                                               unordered_map<QueryObject, vector<VkCommandBuffer>> *queries_in_flight) {
3569     for (uint32_t i = 0; i < query_count; ++i) {
3570         QueryObject query = {query_pool, first_query + i};
3571         auto qif_pair = queries_in_flight->find(query);
3572         auto query_state_pair = dev_data->queryToStateMap.find(query);
3573         if (query_state_pair != dev_data->queryToStateMap.end()) {
3574             // Available and in flight
3575             if (qif_pair != queries_in_flight->end() && query_state_pair != dev_data->queryToStateMap.end() &&
3576                 query_state_pair->second) {
3577                 for (auto cmd_buffer : qif_pair->second) {
3578                     auto cb = GetCBNode(dev_data, cmd_buffer);
3579                     auto query_event_pair = cb->waitedEventsBeforeQueryReset.find(query);
3580                     if (query_event_pair != cb->waitedEventsBeforeQueryReset.end()) {
3581                         for (auto event : query_event_pair->second) {
3582                             dev_data->eventMap[event].needsSignaled = true;
3583                         }
3584                     }
3585                 }
3586             }
3587         }
3588     }
3589 }
3590
3591 VKAPI_ATTR VkResult VKAPI_CALL GetQueryPoolResults(VkDevice device, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount,
3592                                                    size_t dataSize, void *pData, VkDeviceSize stride, VkQueryResultFlags flags) {
3593     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3594     unordered_map<QueryObject, vector<VkCommandBuffer>> queries_in_flight;
3595     unique_lock_t lock(global_lock);
3596     bool skip = PreCallValidateGetQueryPoolResults(dev_data, queryPool, firstQuery, queryCount, flags, &queries_in_flight);
3597     lock.unlock();
3598     if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
3599     VkResult result =
3600         dev_data->dispatch_table.GetQueryPoolResults(device, queryPool, firstQuery, queryCount, dataSize, pData, stride, flags);
3601     lock.lock();
3602     PostCallRecordGetQueryPoolResults(dev_data, queryPool, firstQuery, queryCount, &queries_in_flight);
3603     lock.unlock();
3604     return result;
3605 }
3606
3607 // Return true if given ranges intersect, else false
3608 // Prereq : For both ranges, range->end - range->start > 0. This case should have already resulted
3609 //  in an error so not checking that here
3610 // pad_ranges bool indicates a linear and non-linear comparison which requires padding
3611 // In the case where padding is required, if an alias is encountered then a validation error is reported and skip
3612 //  may be set by the callback function so caller should merge in skip value if padding case is possible.
3613 // This check can be skipped by passing skip_checks=true, for call sites outside the validation path.
3614 static bool rangesIntersect(layer_data const *dev_data, MEMORY_RANGE const *range1, MEMORY_RANGE const *range2, bool *skip,
3615                             bool skip_checks) {
3616     *skip = false;
3617     auto r1_start = range1->start;
3618     auto r1_end = range1->end;
3619     auto r2_start = range2->start;
3620     auto r2_end = range2->end;
3621     VkDeviceSize pad_align = 1;
3622     if (range1->linear != range2->linear) {
3623         pad_align = dev_data->phys_dev_properties.properties.limits.bufferImageGranularity;
3624     }
3625     if ((r1_end & ~(pad_align - 1)) < (r2_start & ~(pad_align - 1))) return false;
3626     if ((r1_start & ~(pad_align - 1)) > (r2_end & ~(pad_align - 1))) return false;
3627
3628     if (!skip_checks && (range1->linear != range2->linear)) {
3629         // In linear vs. non-linear case, warn of aliasing
3630         const char *r1_linear_str = range1->linear ? "Linear" : "Non-linear";
3631         const char *r1_type_str = range1->image ? "image" : "buffer";
3632         const char *r2_linear_str = range2->linear ? "linear" : "non-linear";
3633         const char *r2_type_str = range2->image ? "image" : "buffer";
3634         auto obj_type = range1->image ? VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT : VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT;
3635         *skip |= log_msg(
3636             dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, obj_type, range1->handle, MEMTRACK_INVALID_ALIASING,
3637             "%s %s 0x%" PRIx64 " is aliased with %s %s 0x%" PRIx64
3638             " which may indicate a bug. For further info refer to the Buffer-Image Granularity section of the Vulkan "
3639             "specification. "
3640             "(https://www.khronos.org/registry/vulkan/specs/1.0-extensions/xhtml/vkspec.html#resources-bufferimagegranularity)",
3641             r1_linear_str, r1_type_str, range1->handle, r2_linear_str, r2_type_str, range2->handle);
3642     }
3643     // Ranges intersect
3644     return true;
3645 }
3646 // Simplified rangesIntersect that calls above function to check range1 for intersection with offset & end addresses
3647 bool rangesIntersect(layer_data const *dev_data, MEMORY_RANGE const *range1, VkDeviceSize offset, VkDeviceSize end) {
3648     // Create a local MEMORY_RANGE struct to wrap offset/size
3649     MEMORY_RANGE range_wrap;
3650     // Synch linear with range1 to avoid padding and potential validation error case
3651     range_wrap.linear = range1->linear;
3652     range_wrap.start = offset;
3653     range_wrap.end = end;
3654     bool tmp_bool;
3655     return rangesIntersect(dev_data, range1, &range_wrap, &tmp_bool, true);
3656 }
3657
3658 static bool ValidateInsertMemoryRange(layer_data const *dev_data, uint64_t handle, DEVICE_MEM_INFO *mem_info,
3659                                       VkDeviceSize memoryOffset, VkMemoryRequirements memRequirements, bool is_image,
3660                                       bool is_linear, const char *api_name) {
3661     bool skip = false;
3662
3663     MEMORY_RANGE range;
3664     range.image = is_image;
3665     range.handle = handle;
3666     range.linear = is_linear;
3667     range.memory = mem_info->mem;
3668     range.start = memoryOffset;
3669     range.size = memRequirements.size;
3670     range.end = memoryOffset + memRequirements.size - 1;
3671     range.aliases.clear();
3672
3673     // Check for aliasing problems.
3674     for (auto &obj_range_pair : mem_info->bound_ranges) {
3675         auto check_range = &obj_range_pair.second;
3676         bool intersection_error = false;
3677         if (rangesIntersect(dev_data, &range, check_range, &intersection_error, false)) {
3678             skip |= intersection_error;
3679             range.aliases.insert(check_range);
3680         }
3681     }
3682
3683     if (memoryOffset >= mem_info->alloc_info.allocationSize) {
3684         UNIQUE_VALIDATION_ERROR_CODE error_code = is_image ? VALIDATION_ERROR_1740082c : VALIDATION_ERROR_1700080e;
3685         skip = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
3686                        HandleToUint64(mem_info->mem), error_code,
3687                        "In %s, attempting to bind memory (0x%" PRIx64 ") to object (0x%" PRIx64 "), memoryOffset=0x%" PRIxLEAST64
3688                        " must be less than the memory allocation size 0x%" PRIxLEAST64 ".",
3689                        api_name, HandleToUint64(mem_info->mem), HandleToUint64(handle), memoryOffset,
3690                        mem_info->alloc_info.allocationSize);
3691     }
3692
3693     return skip;
3694 }
3695
3696 // Object with given handle is being bound to memory w/ given mem_info struct.
3697 //  Track the newly bound memory range with given memoryOffset
3698 //  Also scan any previous ranges, track aliased ranges with new range, and flag an error if a linear
3699 //  and non-linear range incorrectly overlap.
3700 // Return true if an error is flagged and the user callback returns "true", otherwise false
3701 // is_image indicates an image object, otherwise handle is for a buffer
3702 // is_linear indicates a buffer or linear image
3703 static void InsertMemoryRange(layer_data const *dev_data, uint64_t handle, DEVICE_MEM_INFO *mem_info, VkDeviceSize memoryOffset,
3704                               VkMemoryRequirements memRequirements, bool is_image, bool is_linear) {
3705     MEMORY_RANGE range;
3706
3707     range.image = is_image;
3708     range.handle = handle;
3709     range.linear = is_linear;
3710     range.memory = mem_info->mem;
3711     range.start = memoryOffset;
3712     range.size = memRequirements.size;
3713     range.end = memoryOffset + memRequirements.size - 1;
3714     range.aliases.clear();
3715     // Update Memory aliasing
3716     // Save aliased ranges so we can copy into final map entry below. Can't do it in loop b/c we don't yet have final ptr. If we
3717     // inserted into map before loop to get the final ptr, then we may enter loop when not needed & we check range against itself
3718     std::unordered_set<MEMORY_RANGE *> tmp_alias_ranges;
3719     for (auto &obj_range_pair : mem_info->bound_ranges) {
3720         auto check_range = &obj_range_pair.second;
3721         bool intersection_error = false;
3722         if (rangesIntersect(dev_data, &range, check_range, &intersection_error, true)) {
3723             range.aliases.insert(check_range);
3724             tmp_alias_ranges.insert(check_range);
3725         }
3726     }
3727     mem_info->bound_ranges[handle] = std::move(range);
3728     for (auto tmp_range : tmp_alias_ranges) {
3729         tmp_range->aliases.insert(&mem_info->bound_ranges[handle]);
3730     }
3731     if (is_image)
3732         mem_info->bound_images.insert(handle);
3733     else
3734         mem_info->bound_buffers.insert(handle);
3735 }
3736
3737 static bool ValidateInsertImageMemoryRange(layer_data const *dev_data, VkImage image, DEVICE_MEM_INFO *mem_info,
3738                                            VkDeviceSize mem_offset, VkMemoryRequirements mem_reqs, bool is_linear,
3739                                            const char *api_name) {
3740     return ValidateInsertMemoryRange(dev_data, HandleToUint64(image), mem_info, mem_offset, mem_reqs, true, is_linear, api_name);
3741 }
3742 static void InsertImageMemoryRange(layer_data const *dev_data, VkImage image, DEVICE_MEM_INFO *mem_info, VkDeviceSize mem_offset,
3743                                    VkMemoryRequirements mem_reqs, bool is_linear) {
3744     InsertMemoryRange(dev_data, HandleToUint64(image), mem_info, mem_offset, mem_reqs, true, is_linear);
3745 }
3746
3747 static bool ValidateInsertBufferMemoryRange(layer_data const *dev_data, VkBuffer buffer, DEVICE_MEM_INFO *mem_info,
3748                                             VkDeviceSize mem_offset, VkMemoryRequirements mem_reqs, const char *api_name) {
3749     return ValidateInsertMemoryRange(dev_data, HandleToUint64(buffer), mem_info, mem_offset, mem_reqs, false, true, api_name);
3750 }
3751 static void InsertBufferMemoryRange(layer_data const *dev_data, VkBuffer buffer, DEVICE_MEM_INFO *mem_info, VkDeviceSize mem_offset,
3752                                     VkMemoryRequirements mem_reqs) {
3753     InsertMemoryRange(dev_data, HandleToUint64(buffer), mem_info, mem_offset, mem_reqs, false, true);
3754 }
3755
3756 // Remove MEMORY_RANGE struct for give handle from bound_ranges of mem_info
3757 //  is_image indicates if handle is for image or buffer
3758 //  This function will also remove the handle-to-index mapping from the appropriate
3759 //  map and clean up any aliases for range being removed.
3760 static void RemoveMemoryRange(uint64_t handle, DEVICE_MEM_INFO *mem_info, bool is_image) {
3761     auto erase_range = &mem_info->bound_ranges[handle];
3762     for (auto alias_range : erase_range->aliases) {
3763         alias_range->aliases.erase(erase_range);
3764     }
3765     erase_range->aliases.clear();
3766     mem_info->bound_ranges.erase(handle);
3767     if (is_image) {
3768         mem_info->bound_images.erase(handle);
3769     } else {
3770         mem_info->bound_buffers.erase(handle);
3771     }
3772 }
3773
3774 void RemoveBufferMemoryRange(uint64_t handle, DEVICE_MEM_INFO *mem_info) { RemoveMemoryRange(handle, mem_info, false); }
3775
3776 void RemoveImageMemoryRange(uint64_t handle, DEVICE_MEM_INFO *mem_info) { RemoveMemoryRange(handle, mem_info, true); }
3777
3778 VKAPI_ATTR void VKAPI_CALL DestroyBuffer(VkDevice device, VkBuffer buffer, const VkAllocationCallbacks *pAllocator) {
3779     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3780     BUFFER_STATE *buffer_state = nullptr;
3781     VK_OBJECT obj_struct;
3782     unique_lock_t lock(global_lock);
3783     bool skip = PreCallValidateDestroyBuffer(dev_data, buffer, &buffer_state, &obj_struct);
3784     if (!skip) {
3785         if (buffer != VK_NULL_HANDLE) {
3786             // Pre-record to avoid Destroy/Create race
3787             PreCallRecordDestroyBuffer(dev_data, buffer, buffer_state, obj_struct);
3788         }
3789         lock.unlock();
3790         dev_data->dispatch_table.DestroyBuffer(device, buffer, pAllocator);
3791     }
3792 }
3793
3794 VKAPI_ATTR void VKAPI_CALL DestroyBufferView(VkDevice device, VkBufferView bufferView, const VkAllocationCallbacks *pAllocator) {
3795     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3796     // Common data objects used pre & post call
3797     BUFFER_VIEW_STATE *buffer_view_state = nullptr;
3798     VK_OBJECT obj_struct;
3799     unique_lock_t lock(global_lock);
3800     // Validate state before calling down chain, update common data if we'll be calling down chain
3801     bool skip = PreCallValidateDestroyBufferView(dev_data, bufferView, &buffer_view_state, &obj_struct);
3802     if (!skip) {
3803         if (bufferView != VK_NULL_HANDLE) {
3804             // Pre-record to avoid Destroy/Create race
3805             PreCallRecordDestroyBufferView(dev_data, bufferView, buffer_view_state, obj_struct);
3806         }
3807         lock.unlock();
3808         dev_data->dispatch_table.DestroyBufferView(device, bufferView, pAllocator);
3809     }
3810 }
3811
3812 VKAPI_ATTR void VKAPI_CALL DestroyImage(VkDevice device, VkImage image, const VkAllocationCallbacks *pAllocator) {
3813     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3814     IMAGE_STATE *image_state = nullptr;
3815     VK_OBJECT obj_struct;
3816     unique_lock_t lock(global_lock);
3817     bool skip = PreCallValidateDestroyImage(dev_data, image, &image_state, &obj_struct);
3818     if (!skip) {
3819         if (image != VK_NULL_HANDLE) {
3820             // Pre-record to avoid Destroy/Create race
3821             PreCallRecordDestroyImage(dev_data, image, image_state, obj_struct);
3822         }
3823         lock.unlock();
3824         dev_data->dispatch_table.DestroyImage(device, image, pAllocator);
3825     }
3826 }
3827
3828 static bool ValidateMemoryTypes(const layer_data *dev_data, const DEVICE_MEM_INFO *mem_info, const uint32_t memory_type_bits,
3829                                 const char *funcName, UNIQUE_VALIDATION_ERROR_CODE msgCode) {
3830     bool skip = false;
3831     if (((1 << mem_info->alloc_info.memoryTypeIndex) & memory_type_bits) == 0) {
3832         skip = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
3833                        HandleToUint64(mem_info->mem), msgCode,
3834                        "%s(): MemoryRequirements->memoryTypeBits (0x%X) for this object type are not compatible with the memory "
3835                        "type (0x%X) of this memory object 0x%" PRIx64 ".",
3836                        funcName, memory_type_bits, mem_info->alloc_info.memoryTypeIndex, HandleToUint64(mem_info->mem));
3837     }
3838     return skip;
3839 }
3840
3841 static bool PreCallValidateBindBufferMemory(layer_data *dev_data, VkBuffer buffer, BUFFER_STATE *buffer_state, VkDeviceMemory mem,
3842                                             VkDeviceSize memoryOffset, const char *api_name) {
3843     bool skip = false;
3844     if (buffer_state) {
3845         unique_lock_t lock(global_lock);
3846         // Track objects tied to memory
3847         uint64_t buffer_handle = HandleToUint64(buffer);
3848         skip = ValidateSetMemBinding(dev_data, mem, buffer_handle, kVulkanObjectTypeBuffer, api_name);
3849         if (!buffer_state->memory_requirements_checked) {
3850             // There's not an explicit requirement in the spec to call vkGetBufferMemoryRequirements() prior to calling
3851             // BindBufferMemory, but it's implied in that memory being bound must conform with VkMemoryRequirements from
3852             // vkGetBufferMemoryRequirements()
3853             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
3854                             buffer_handle, DRAWSTATE_INVALID_BUFFER,
3855                             "%s: Binding memory to buffer 0x%" PRIx64
3856                             " but vkGetBufferMemoryRequirements() has not been called on that buffer.",
3857                             api_name, HandleToUint64(buffer_handle));
3858             // Make the call for them so we can verify the state
3859             lock.unlock();
3860             dev_data->dispatch_table.GetBufferMemoryRequirements(dev_data->device, buffer, &buffer_state->requirements);
3861             lock.lock();
3862         }
3863
3864         // Validate bound memory range information
3865         const auto mem_info = GetMemObjInfo(dev_data, mem);
3866         if (mem_info) {
3867             skip |= ValidateInsertBufferMemoryRange(dev_data, buffer, mem_info, memoryOffset, buffer_state->requirements, api_name);
3868             skip |= ValidateMemoryTypes(dev_data, mem_info, buffer_state->requirements.memoryTypeBits, api_name,
3869                                         VALIDATION_ERROR_17000816);
3870         }
3871
3872         // Validate memory requirements alignment
3873         if (SafeModulo(memoryOffset, buffer_state->requirements.alignment) != 0) {
3874             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
3875                             buffer_handle, VALIDATION_ERROR_17000818,
3876                             "%s: memoryOffset is 0x%" PRIxLEAST64
3877                             " but must be an integer multiple of the VkMemoryRequirements::alignment value 0x%" PRIxLEAST64
3878                             ", returned from a call to vkGetBufferMemoryRequirements with buffer.",
3879                             api_name, memoryOffset, buffer_state->requirements.alignment);
3880         }
3881
3882         if (mem_info) {
3883             // Validate memory requirements size
3884             if (buffer_state->requirements.size > (mem_info->alloc_info.allocationSize - memoryOffset)) {
3885                 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
3886                                 buffer_handle, VALIDATION_ERROR_1700081a,
3887                                 "%s: memory size minus memoryOffset is 0x%" PRIxLEAST64
3888                                 " but must be at least as large as VkMemoryRequirements::size value 0x%" PRIxLEAST64
3889                                 ", returned from a call to vkGetBufferMemoryRequirements with buffer.",
3890                                 api_name, mem_info->alloc_info.allocationSize - memoryOffset, buffer_state->requirements.size);
3891             }
3892
3893             // Validate dedicated allocation
3894             if (mem_info->is_dedicated && ((mem_info->dedicated_buffer != buffer) || (memoryOffset != 0))) {
3895                 // TODO: Add vkBindBufferMemory2KHR error message when added to spec.
3896                 auto validation_error = VALIDATION_ERROR_UNDEFINED;
3897                 if (strcmp(api_name, "vkBindBufferMemory()") == 0) {
3898                     validation_error = VALIDATION_ERROR_17000bc8;
3899                 }
3900                 skip |=
3901                     log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
3902                             buffer_handle, validation_error,
3903                             "%s: for dedicated memory allocation 0x%" PRIxLEAST64
3904                             ", VkMemoryDedicatedAllocateInfoKHR::buffer 0x%" PRIXLEAST64 " must be equal to buffer 0x%" PRIxLEAST64
3905                             " and memoryOffset 0x%" PRIxLEAST64 " must be zero.",
3906                             api_name, HandleToUint64(mem), HandleToUint64(mem_info->dedicated_buffer), buffer_handle, memoryOffset);
3907             }
3908         }
3909
3910         // Validate device limits alignments
3911         static const VkBufferUsageFlagBits usage_list[3] = {
3912             static_cast<VkBufferUsageFlagBits>(VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT),
3913             VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT, VK_BUFFER_USAGE_STORAGE_BUFFER_BIT};
3914         static const char *memory_type[3] = {"texel", "uniform", "storage"};
3915         static const char *offset_name[3] = {"minTexelBufferOffsetAlignment", "minUniformBufferOffsetAlignment",
3916                                              "minStorageBufferOffsetAlignment"};
3917
3918         // TODO:  vk_validation_stats.py cannot abide braces immediately preceding or following a validation error enum
3919         // clang-format off
3920         static const UNIQUE_VALIDATION_ERROR_CODE msgCode[3] = { VALIDATION_ERROR_17000810, VALIDATION_ERROR_17000812,
3921             VALIDATION_ERROR_17000814 };
3922         // clang-format on
3923
3924         // Keep this one fresh!
3925         const VkDeviceSize offset_requirement[3] = {
3926             dev_data->phys_dev_properties.properties.limits.minTexelBufferOffsetAlignment,
3927             dev_data->phys_dev_properties.properties.limits.minUniformBufferOffsetAlignment,
3928             dev_data->phys_dev_properties.properties.limits.minStorageBufferOffsetAlignment};
3929         VkBufferUsageFlags usage = dev_data->bufferMap[buffer].get()->createInfo.usage;
3930
3931         for (int i = 0; i < 3; i++) {
3932             if (usage & usage_list[i]) {
3933                 if (SafeModulo(memoryOffset, offset_requirement[i]) != 0) {
3934                     skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
3935                                     buffer_handle, msgCode[i],
3936                                     "%s: %s memoryOffset is 0x%" PRIxLEAST64
3937                                     " but must be a multiple of device limit %s 0x%" PRIxLEAST64 ".",
3938                                     api_name, memory_type[i], memoryOffset, offset_name[i], offset_requirement[i]);
3939                 }
3940             }
3941         }
3942     }
3943     return skip;
3944 }
3945
3946 static void PostCallRecordBindBufferMemory(layer_data *dev_data, VkBuffer buffer, BUFFER_STATE *buffer_state, VkDeviceMemory mem,
3947                                            VkDeviceSize memoryOffset, const char *api_name) {
3948     if (buffer_state) {
3949         unique_lock_t lock(global_lock);
3950         // Track bound memory range information
3951         auto mem_info = GetMemObjInfo(dev_data, mem);
3952         if (mem_info) {
3953             InsertBufferMemoryRange(dev_data, buffer, mem_info, memoryOffset, buffer_state->requirements);
3954         }
3955
3956         // Track objects tied to memory
3957         uint64_t buffer_handle = HandleToUint64(buffer);
3958         SetMemBinding(dev_data, mem, buffer_state, memoryOffset, buffer_handle, kVulkanObjectTypeBuffer, api_name);
3959     }
3960 }
3961
3962 VKAPI_ATTR VkResult VKAPI_CALL BindBufferMemory(VkDevice device, VkBuffer buffer, VkDeviceMemory mem, VkDeviceSize memoryOffset) {
3963     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
3964     VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
3965     BUFFER_STATE *buffer_state;
3966     {
3967         unique_lock_t lock(global_lock);
3968         buffer_state = GetBufferState(dev_data, buffer);
3969     }
3970     bool skip = PreCallValidateBindBufferMemory(dev_data, buffer, buffer_state, mem, memoryOffset, "vkBindBufferMemory()");
3971     if (!skip) {
3972         result = dev_data->dispatch_table.BindBufferMemory(device, buffer, mem, memoryOffset);
3973         if (result == VK_SUCCESS) {
3974             PostCallRecordBindBufferMemory(dev_data, buffer, buffer_state, mem, memoryOffset, "vkBindBufferMemory()");
3975         }
3976     }
3977     return result;
3978 }
3979
3980 static bool PreCallValidateBindBufferMemory2(layer_data *dev_data, std::vector<BUFFER_STATE *> *buffer_state,
3981                                              uint32_t bindInfoCount, const VkBindBufferMemoryInfoKHR *pBindInfos) {
3982     {
3983         unique_lock_t lock(global_lock);
3984         for (uint32_t i = 0; i < bindInfoCount; i++) {
3985             (*buffer_state)[i] = GetBufferState(dev_data, pBindInfos[i].buffer);
3986         }
3987     }
3988     bool skip = false;
3989     char api_name[64];
3990     for (uint32_t i = 0; i < bindInfoCount; i++) {
3991         sprintf(api_name, "vkBindBufferMemory2() pBindInfos[%u]", i);
3992         skip |= PreCallValidateBindBufferMemory(dev_data, pBindInfos[i].buffer, (*buffer_state)[i], pBindInfos[i].memory,
3993                                                 pBindInfos[i].memoryOffset, api_name);
3994     }
3995     return skip;
3996 }
3997
3998 static void PostCallRecordBindBufferMemory2(layer_data *dev_data, const std::vector<BUFFER_STATE *> &buffer_state,
3999                                             uint32_t bindInfoCount, const VkBindBufferMemoryInfoKHR *pBindInfos) {
4000     for (uint32_t i = 0; i < bindInfoCount; i++) {
4001         PostCallRecordBindBufferMemory(dev_data, pBindInfos[i].buffer, buffer_state[i], pBindInfos[i].memory,
4002                                        pBindInfos[i].memoryOffset, "vkBindBufferMemory2()");
4003     }
4004 }
4005
4006 VKAPI_ATTR VkResult VKAPI_CALL BindBufferMemory2(VkDevice device, uint32_t bindInfoCount,
4007                                                  const VkBindBufferMemoryInfoKHR *pBindInfos) {
4008     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4009     VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
4010     std::vector<BUFFER_STATE *> buffer_state(bindInfoCount);
4011     if (!PreCallValidateBindBufferMemory2(dev_data, &buffer_state, bindInfoCount, pBindInfos)) {
4012         result = dev_data->dispatch_table.BindBufferMemory2(device, bindInfoCount, pBindInfos);
4013         if (result == VK_SUCCESS) {
4014             PostCallRecordBindBufferMemory2(dev_data, buffer_state, bindInfoCount, pBindInfos);
4015         }
4016     }
4017     return result;
4018 }
4019
4020 VKAPI_ATTR VkResult VKAPI_CALL BindBufferMemory2KHR(VkDevice device, uint32_t bindInfoCount,
4021                                                     const VkBindBufferMemoryInfoKHR *pBindInfos) {
4022     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4023     VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
4024     std::vector<BUFFER_STATE *> buffer_state(bindInfoCount);
4025     if (!PreCallValidateBindBufferMemory2(dev_data, &buffer_state, bindInfoCount, pBindInfos)) {
4026         result = dev_data->dispatch_table.BindBufferMemory2KHR(device, bindInfoCount, pBindInfos);
4027         if (result == VK_SUCCESS) {
4028             PostCallRecordBindBufferMemory2(dev_data, buffer_state, bindInfoCount, pBindInfos);
4029         }
4030     }
4031     return result;
4032 }
4033
4034 static void PostCallRecordGetBufferMemoryRequirements(layer_data *dev_data, VkBuffer buffer,
4035                                                       VkMemoryRequirements *pMemoryRequirements) {
4036     BUFFER_STATE *buffer_state;
4037     {
4038         unique_lock_t lock(global_lock);
4039         buffer_state = GetBufferState(dev_data, buffer);
4040     }
4041     if (buffer_state) {
4042         buffer_state->requirements = *pMemoryRequirements;
4043         buffer_state->memory_requirements_checked = true;
4044     }
4045 }
4046
4047 VKAPI_ATTR void VKAPI_CALL GetBufferMemoryRequirements(VkDevice device, VkBuffer buffer,
4048                                                        VkMemoryRequirements *pMemoryRequirements) {
4049     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4050     dev_data->dispatch_table.GetBufferMemoryRequirements(device, buffer, pMemoryRequirements);
4051     PostCallRecordGetBufferMemoryRequirements(dev_data, buffer, pMemoryRequirements);
4052 }
4053
4054 VKAPI_ATTR void VKAPI_CALL GetBufferMemoryRequirements2(VkDevice device, const VkBufferMemoryRequirementsInfo2KHR *pInfo,
4055                                                         VkMemoryRequirements2KHR *pMemoryRequirements) {
4056     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4057     dev_data->dispatch_table.GetBufferMemoryRequirements2(device, pInfo, pMemoryRequirements);
4058     PostCallRecordGetBufferMemoryRequirements(dev_data, pInfo->buffer, &pMemoryRequirements->memoryRequirements);
4059 }
4060
4061 VKAPI_ATTR void VKAPI_CALL GetBufferMemoryRequirements2KHR(VkDevice device, const VkBufferMemoryRequirementsInfo2KHR *pInfo,
4062                                                            VkMemoryRequirements2KHR *pMemoryRequirements) {
4063     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4064     dev_data->dispatch_table.GetBufferMemoryRequirements2KHR(device, pInfo, pMemoryRequirements);
4065     PostCallRecordGetBufferMemoryRequirements(dev_data, pInfo->buffer, &pMemoryRequirements->memoryRequirements);
4066 }
4067
4068 static void PostCallRecordGetImageMemoryRequirements(layer_data *dev_data, VkImage image,
4069                                                      VkMemoryRequirements *pMemoryRequirements) {
4070     IMAGE_STATE *image_state;
4071     {
4072         unique_lock_t lock(global_lock);
4073         image_state = GetImageState(dev_data, image);
4074     }
4075     if (image_state) {
4076         image_state->requirements = *pMemoryRequirements;
4077         image_state->memory_requirements_checked = true;
4078     }
4079 }
4080
4081 VKAPI_ATTR void VKAPI_CALL GetImageMemoryRequirements(VkDevice device, VkImage image, VkMemoryRequirements *pMemoryRequirements) {
4082     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4083     dev_data->dispatch_table.GetImageMemoryRequirements(device, image, pMemoryRequirements);
4084     PostCallRecordGetImageMemoryRequirements(dev_data, image, pMemoryRequirements);
4085 }
4086
4087 VKAPI_ATTR void VKAPI_CALL GetImageMemoryRequirements2(VkDevice device, const VkImageMemoryRequirementsInfo2KHR *pInfo,
4088                                                        VkMemoryRequirements2KHR *pMemoryRequirements) {
4089     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4090     dev_data->dispatch_table.GetImageMemoryRequirements2(device, pInfo, pMemoryRequirements);
4091     PostCallRecordGetImageMemoryRequirements(dev_data, pInfo->image, &pMemoryRequirements->memoryRequirements);
4092 }
4093
4094 VKAPI_ATTR void VKAPI_CALL GetImageMemoryRequirements2KHR(VkDevice device, const VkImageMemoryRequirementsInfo2KHR *pInfo,
4095                                                           VkMemoryRequirements2KHR *pMemoryRequirements) {
4096     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4097     dev_data->dispatch_table.GetImageMemoryRequirements2KHR(device, pInfo, pMemoryRequirements);
4098     PostCallRecordGetImageMemoryRequirements(dev_data, pInfo->image, &pMemoryRequirements->memoryRequirements);
4099 }
4100
4101 static void PostCallRecordGetImageSparseMemoryRequirements(IMAGE_STATE *image_state, uint32_t req_count,
4102                                                            VkSparseImageMemoryRequirements *reqs) {
4103     image_state->get_sparse_reqs_called = true;
4104     image_state->sparse_requirements.resize(req_count);
4105     if (reqs) {
4106         std::copy(reqs, reqs + req_count, image_state->sparse_requirements.begin());
4107     }
4108     for (const auto &req : image_state->sparse_requirements) {
4109         if (req.formatProperties.aspectMask & VK_IMAGE_ASPECT_METADATA_BIT) {
4110             image_state->sparse_metadata_required = true;
4111         }
4112     }
4113 }
4114
4115 VKAPI_ATTR void VKAPI_CALL GetImageSparseMemoryRequirements(VkDevice device, VkImage image, uint32_t *pSparseMemoryRequirementCount,
4116                                                             VkSparseImageMemoryRequirements *pSparseMemoryRequirements) {
4117     // TODO : Implement tracking here, just passthrough initially
4118     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4119     dev_data->dispatch_table.GetImageSparseMemoryRequirements(device, image, pSparseMemoryRequirementCount,
4120                                                               pSparseMemoryRequirements);
4121     unique_lock_t lock(global_lock);
4122     auto image_state = GetImageState(dev_data, image);
4123     PostCallRecordGetImageSparseMemoryRequirements(image_state, *pSparseMemoryRequirementCount, pSparseMemoryRequirements);
4124 }
4125
4126 static void PostCallRecordGetImageSparseMemoryRequirements2(IMAGE_STATE *image_state, uint32_t req_count,
4127                                                             VkSparseImageMemoryRequirements2KHR *reqs) {
4128     // reqs is empty, so there is nothing to loop over and read.
4129     if (reqs == nullptr) {
4130         return;
4131     }
4132     std::vector<VkSparseImageMemoryRequirements> sparse_reqs(req_count);
4133     // Migrate to old struct type for common handling with GetImageSparseMemoryRequirements()
4134     for (uint32_t i = 0; i < req_count; ++i) {
4135         assert(!reqs[i].pNext);  // TODO: If an extension is ever added here we need to handle it
4136         sparse_reqs[i] = reqs[i].memoryRequirements;
4137     }
4138     PostCallRecordGetImageSparseMemoryRequirements(image_state, req_count, sparse_reqs.data());
4139 }
4140
4141 VKAPI_ATTR void VKAPI_CALL GetImageSparseMemoryRequirements2(VkDevice device, const VkImageSparseMemoryRequirementsInfo2KHR *pInfo,
4142                                                              uint32_t *pSparseMemoryRequirementCount,
4143                                                              VkSparseImageMemoryRequirements2KHR *pSparseMemoryRequirements) {
4144     // TODO : Implement tracking here, just passthrough initially
4145     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4146     dev_data->dispatch_table.GetImageSparseMemoryRequirements2(device, pInfo, pSparseMemoryRequirementCount,
4147                                                                pSparseMemoryRequirements);
4148     unique_lock_t lock(global_lock);
4149     auto image_state = GetImageState(dev_data, pInfo->image);
4150     PostCallRecordGetImageSparseMemoryRequirements2(image_state, *pSparseMemoryRequirementCount, pSparseMemoryRequirements);
4151 }
4152
4153 VKAPI_ATTR void VKAPI_CALL GetImageSparseMemoryRequirements2KHR(VkDevice device,
4154                                                                 const VkImageSparseMemoryRequirementsInfo2KHR *pInfo,
4155                                                                 uint32_t *pSparseMemoryRequirementCount,
4156                                                                 VkSparseImageMemoryRequirements2KHR *pSparseMemoryRequirements) {
4157     // TODO : Implement tracking here, just passthrough initially
4158     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4159     dev_data->dispatch_table.GetImageSparseMemoryRequirements2KHR(device, pInfo, pSparseMemoryRequirementCount,
4160                                                                   pSparseMemoryRequirements);
4161     unique_lock_t lock(global_lock);
4162     auto image_state = GetImageState(dev_data, pInfo->image);
4163     PostCallRecordGetImageSparseMemoryRequirements2(image_state, *pSparseMemoryRequirementCount, pSparseMemoryRequirements);
4164 }
4165
4166 VKAPI_ATTR void VKAPI_CALL GetPhysicalDeviceSparseImageFormatProperties(VkPhysicalDevice physicalDevice, VkFormat format,
4167                                                                         VkImageType type, VkSampleCountFlagBits samples,
4168                                                                         VkImageUsageFlags usage, VkImageTiling tiling,
4169                                                                         uint32_t *pPropertyCount,
4170                                                                         VkSparseImageFormatProperties *pProperties) {
4171     // TODO : Implement this intercept, track sparse image format properties and make sure they are obeyed.
4172     instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
4173     instance_data->dispatch_table.GetPhysicalDeviceSparseImageFormatProperties(physicalDevice, format, type, samples, usage, tiling,
4174                                                                                pPropertyCount, pProperties);
4175 }
4176
4177 VKAPI_ATTR void VKAPI_CALL GetPhysicalDeviceSparseImageFormatProperties2(
4178     VkPhysicalDevice physicalDevice, const VkPhysicalDeviceSparseImageFormatInfo2KHR *pFormatInfo, uint32_t *pPropertyCount,
4179     VkSparseImageFormatProperties2KHR *pProperties) {
4180     // TODO : Implement this intercept, track sparse image format properties and make sure they are obeyed.
4181     instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
4182     instance_data->dispatch_table.GetPhysicalDeviceSparseImageFormatProperties2(physicalDevice, pFormatInfo, pPropertyCount,
4183                                                                                 pProperties);
4184 }
4185
4186 VKAPI_ATTR void VKAPI_CALL GetPhysicalDeviceSparseImageFormatProperties2KHR(
4187     VkPhysicalDevice physicalDevice, const VkPhysicalDeviceSparseImageFormatInfo2KHR *pFormatInfo, uint32_t *pPropertyCount,
4188     VkSparseImageFormatProperties2KHR *pProperties) {
4189     // TODO : Implement this intercept, track sparse image format properties and make sure they are obeyed.
4190     instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
4191     instance_data->dispatch_table.GetPhysicalDeviceSparseImageFormatProperties2KHR(physicalDevice, pFormatInfo, pPropertyCount,
4192                                                                                    pProperties);
4193 }
4194
4195 VKAPI_ATTR void VKAPI_CALL DestroyImageView(VkDevice device, VkImageView imageView, const VkAllocationCallbacks *pAllocator) {
4196     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4197     // Common data objects used pre & post call
4198     IMAGE_VIEW_STATE *image_view_state = nullptr;
4199     VK_OBJECT obj_struct;
4200     unique_lock_t lock(global_lock);
4201     bool skip = PreCallValidateDestroyImageView(dev_data, imageView, &image_view_state, &obj_struct);
4202     if (!skip) {
4203         if (imageView != VK_NULL_HANDLE) {
4204             // Pre-record to avoid Destroy/Create race
4205             PreCallRecordDestroyImageView(dev_data, imageView, image_view_state, obj_struct);
4206         }
4207         lock.unlock();
4208         dev_data->dispatch_table.DestroyImageView(device, imageView, pAllocator);
4209     }
4210 }
4211
4212 VKAPI_ATTR void VKAPI_CALL DestroyShaderModule(VkDevice device, VkShaderModule shaderModule,
4213                                                const VkAllocationCallbacks *pAllocator) {
4214     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4215
4216     unique_lock_t lock(global_lock);
4217     // Pre-record to avoid Destroy/Create race
4218     dev_data->shaderModuleMap.erase(shaderModule);
4219     lock.unlock();
4220
4221     dev_data->dispatch_table.DestroyShaderModule(device, shaderModule, pAllocator);
4222 }
4223
4224 static bool PreCallValidateDestroyPipeline(layer_data *dev_data, VkPipeline pipeline, PIPELINE_STATE **pipeline_state,
4225                                            VK_OBJECT *obj_struct) {
4226     *pipeline_state = getPipelineState(dev_data, pipeline);
4227     *obj_struct = {HandleToUint64(pipeline), kVulkanObjectTypePipeline};
4228     if (dev_data->instance_data->disabled.destroy_pipeline) return false;
4229     bool skip = false;
4230     if (*pipeline_state) {
4231         skip |= ValidateObjectNotInUse(dev_data, *pipeline_state, *obj_struct, "vkDestroyPipeline", VALIDATION_ERROR_25c005fa);
4232     }
4233     return skip;
4234 }
4235
4236 static void PreCallRecordDestroyPipeline(layer_data *dev_data, VkPipeline pipeline, PIPELINE_STATE *pipeline_state,
4237                                          VK_OBJECT obj_struct) {
4238     // Any bound cmd buffers are now invalid
4239     invalidateCommandBuffers(dev_data, pipeline_state->cb_bindings, obj_struct);
4240     dev_data->pipelineMap.erase(pipeline);
4241 }
4242
4243 VKAPI_ATTR void VKAPI_CALL DestroyPipeline(VkDevice device, VkPipeline pipeline, const VkAllocationCallbacks *pAllocator) {
4244     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4245     PIPELINE_STATE *pipeline_state = nullptr;
4246     VK_OBJECT obj_struct;
4247     unique_lock_t lock(global_lock);
4248     bool skip = PreCallValidateDestroyPipeline(dev_data, pipeline, &pipeline_state, &obj_struct);
4249     if (!skip) {
4250         if (pipeline != VK_NULL_HANDLE) {
4251             // Pre-record to avoid Destroy/Create race
4252             PreCallRecordDestroyPipeline(dev_data, pipeline, pipeline_state, obj_struct);
4253         }
4254         lock.unlock();
4255         dev_data->dispatch_table.DestroyPipeline(device, pipeline, pAllocator);
4256     }
4257 }
4258
4259 VKAPI_ATTR void VKAPI_CALL DestroyPipelineLayout(VkDevice device, VkPipelineLayout pipelineLayout,
4260                                                  const VkAllocationCallbacks *pAllocator) {
4261     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4262     unique_lock_t lock(global_lock);
4263     // Pre-record to avoid Destroy/Create race
4264     dev_data->pipelineLayoutMap.erase(pipelineLayout);
4265     lock.unlock();
4266
4267     dev_data->dispatch_table.DestroyPipelineLayout(device, pipelineLayout, pAllocator);
4268 }
4269
4270 static bool PreCallValidateDestroySampler(layer_data *dev_data, VkSampler sampler, SAMPLER_STATE **sampler_state,
4271                                           VK_OBJECT *obj_struct) {
4272     *sampler_state = GetSamplerState(dev_data, sampler);
4273     *obj_struct = {HandleToUint64(sampler), kVulkanObjectTypeSampler};
4274     if (dev_data->instance_data->disabled.destroy_sampler) return false;
4275     bool skip = false;
4276     if (*sampler_state) {
4277         skip |= ValidateObjectNotInUse(dev_data, *sampler_state, *obj_struct, "vkDestroySampler", VALIDATION_ERROR_26600874);
4278     }
4279     return skip;
4280 }
4281
4282 static void PreCallRecordDestroySampler(layer_data *dev_data, VkSampler sampler, SAMPLER_STATE *sampler_state,
4283                                         VK_OBJECT obj_struct) {
4284     // Any bound cmd buffers are now invalid
4285     if (sampler_state) invalidateCommandBuffers(dev_data, sampler_state->cb_bindings, obj_struct);
4286     dev_data->samplerMap.erase(sampler);
4287 }
4288
4289 VKAPI_ATTR void VKAPI_CALL DestroySampler(VkDevice device, VkSampler sampler, const VkAllocationCallbacks *pAllocator) {
4290     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4291     SAMPLER_STATE *sampler_state = nullptr;
4292     VK_OBJECT obj_struct;
4293     unique_lock_t lock(global_lock);
4294     bool skip = PreCallValidateDestroySampler(dev_data, sampler, &sampler_state, &obj_struct);
4295     if (!skip) {
4296         if (sampler != VK_NULL_HANDLE) {
4297             // Pre-record to avoid Destroy/Create race
4298             PreCallRecordDestroySampler(dev_data, sampler, sampler_state, obj_struct);
4299         }
4300         lock.unlock();
4301         dev_data->dispatch_table.DestroySampler(device, sampler, pAllocator);
4302     }
4303 }
4304
4305 static void PreCallRecordDestroyDescriptorSetLayout(layer_data *dev_data, VkDescriptorSetLayout ds_layout) {
4306     auto layout_it = dev_data->descriptorSetLayoutMap.find(ds_layout);
4307     if (layout_it != dev_data->descriptorSetLayoutMap.end()) {
4308         layout_it->second.get()->MarkDestroyed();
4309         dev_data->descriptorSetLayoutMap.erase(layout_it);
4310     }
4311 }
4312
4313 VKAPI_ATTR void VKAPI_CALL DestroyDescriptorSetLayout(VkDevice device, VkDescriptorSetLayout descriptorSetLayout,
4314                                                       const VkAllocationCallbacks *pAllocator) {
4315     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4316     {
4317         lock_guard_t lock(global_lock);
4318         // Pre-record to avoid Destroy/Create race
4319         PreCallRecordDestroyDescriptorSetLayout(dev_data, descriptorSetLayout);
4320     }
4321     dev_data->dispatch_table.DestroyDescriptorSetLayout(device, descriptorSetLayout, pAllocator);
4322 }
4323
4324 static bool PreCallValidateDestroyDescriptorPool(layer_data *dev_data, VkDescriptorPool pool,
4325                                                  DESCRIPTOR_POOL_STATE **desc_pool_state, VK_OBJECT *obj_struct) {
4326     *desc_pool_state = GetDescriptorPoolState(dev_data, pool);
4327     *obj_struct = {HandleToUint64(pool), kVulkanObjectTypeDescriptorPool};
4328     if (dev_data->instance_data->disabled.destroy_descriptor_pool) return false;
4329     bool skip = false;
4330     if (*desc_pool_state) {
4331         skip |=
4332             ValidateObjectNotInUse(dev_data, *desc_pool_state, *obj_struct, "vkDestroyDescriptorPool", VALIDATION_ERROR_2440025e);
4333     }
4334     return skip;
4335 }
4336
4337 static void PreCallRecordDestroyDescriptorPool(layer_data *dev_data, VkDescriptorPool descriptorPool,
4338                                                DESCRIPTOR_POOL_STATE *desc_pool_state, VK_OBJECT obj_struct) {
4339     if (desc_pool_state) {
4340         // Any bound cmd buffers are now invalid
4341         invalidateCommandBuffers(dev_data, desc_pool_state->cb_bindings, obj_struct);
4342         // Free sets that were in this pool
4343         for (auto ds : desc_pool_state->sets) {
4344             freeDescriptorSet(dev_data, ds);
4345         }
4346         dev_data->descriptorPoolMap.erase(descriptorPool);
4347         delete desc_pool_state;
4348     }
4349 }
4350
4351 VKAPI_ATTR void VKAPI_CALL DestroyDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool,
4352                                                  const VkAllocationCallbacks *pAllocator) {
4353     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4354     DESCRIPTOR_POOL_STATE *desc_pool_state = nullptr;
4355     VK_OBJECT obj_struct;
4356     unique_lock_t lock(global_lock);
4357     bool skip = PreCallValidateDestroyDescriptorPool(dev_data, descriptorPool, &desc_pool_state, &obj_struct);
4358     if (!skip) {
4359         // Pre-record to avoid Destroy/Create race
4360         PreCallRecordDestroyDescriptorPool(dev_data, descriptorPool, desc_pool_state, obj_struct);
4361         lock.unlock();
4362         dev_data->dispatch_table.DestroyDescriptorPool(device, descriptorPool, pAllocator);
4363     }
4364 }
4365
4366 // Verify cmdBuffer in given cb_node is not in global in-flight set, and return skip result
4367 //  If this is a secondary command buffer, then make sure its primary is also in-flight
4368 //  If primary is not in-flight, then remove secondary from global in-flight set
4369 // This function is only valid at a point when cmdBuffer is being reset or freed
4370 static bool checkCommandBufferInFlight(layer_data *dev_data, const GLOBAL_CB_NODE *cb_node, const char *action,
4371                                        UNIQUE_VALIDATION_ERROR_CODE error_code) {
4372     bool skip = false;
4373     if (cb_node->in_use.load()) {
4374         skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4375                         HandleToUint64(cb_node->commandBuffer), error_code,
4376                         "Attempt to %s command buffer (0x%" PRIx64 ") which is in use.", action,
4377                         HandleToUint64(cb_node->commandBuffer));
4378     }
4379     return skip;
4380 }
4381
4382 // Iterate over all cmdBuffers in given commandPool and verify that each is not in use
4383 static bool checkCommandBuffersInFlight(layer_data *dev_data, COMMAND_POOL_NODE *pPool, const char *action,
4384                                         UNIQUE_VALIDATION_ERROR_CODE error_code) {
4385     bool skip = false;
4386     for (auto cmd_buffer : pPool->commandBuffers) {
4387         skip |= checkCommandBufferInFlight(dev_data, GetCBNode(dev_data, cmd_buffer), action, error_code);
4388     }
4389     return skip;
4390 }
4391
4392 // Free all command buffers in given list, removing all references/links to them using ResetCommandBufferState
4393 static void FreeCommandBufferStates(layer_data *dev_data, COMMAND_POOL_NODE *pool_state, const uint32_t command_buffer_count,
4394                                     const VkCommandBuffer *command_buffers) {
4395     for (uint32_t i = 0; i < command_buffer_count; i++) {
4396         auto cb_state = GetCBNode(dev_data, command_buffers[i]);
4397         // Remove references to command buffer's state and delete
4398         if (cb_state) {
4399             // reset prior to delete, removing various references to it.
4400             // TODO: fix this, it's insane.
4401             ResetCommandBufferState(dev_data, cb_state->commandBuffer);
4402             // Remove the cb_state's references from layer_data and COMMAND_POOL_NODE
4403             dev_data->commandBufferMap.erase(cb_state->commandBuffer);
4404             pool_state->commandBuffers.erase(command_buffers[i]);
4405             delete cb_state;
4406         }
4407     }
4408 }
4409
4410 VKAPI_ATTR void VKAPI_CALL FreeCommandBuffers(VkDevice device, VkCommandPool commandPool, uint32_t commandBufferCount,
4411                                               const VkCommandBuffer *pCommandBuffers) {
4412     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4413     bool skip = false;
4414     unique_lock_t lock(global_lock);
4415
4416     for (uint32_t i = 0; i < commandBufferCount; i++) {
4417         auto cb_node = GetCBNode(dev_data, pCommandBuffers[i]);
4418         // Delete CB information structure, and remove from commandBufferMap
4419         if (cb_node) {
4420             skip |= checkCommandBufferInFlight(dev_data, cb_node, "free", VALIDATION_ERROR_2840005e);
4421         }
4422     }
4423
4424     if (skip) return;
4425
4426     auto pPool = GetCommandPoolNode(dev_data, commandPool);
4427     FreeCommandBufferStates(dev_data, pPool, commandBufferCount, pCommandBuffers);
4428     lock.unlock();
4429
4430     dev_data->dispatch_table.FreeCommandBuffers(device, commandPool, commandBufferCount, pCommandBuffers);
4431 }
4432
4433 VKAPI_ATTR VkResult VKAPI_CALL CreateCommandPool(VkDevice device, const VkCommandPoolCreateInfo *pCreateInfo,
4434                                                  const VkAllocationCallbacks *pAllocator, VkCommandPool *pCommandPool) {
4435     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4436
4437     VkResult result = dev_data->dispatch_table.CreateCommandPool(device, pCreateInfo, pAllocator, pCommandPool);
4438
4439     if (VK_SUCCESS == result) {
4440         lock_guard_t lock(global_lock);
4441         dev_data->commandPoolMap[*pCommandPool].createFlags = pCreateInfo->flags;
4442         dev_data->commandPoolMap[*pCommandPool].queueFamilyIndex = pCreateInfo->queueFamilyIndex;
4443     }
4444     return result;
4445 }
4446
4447 VKAPI_ATTR VkResult VKAPI_CALL CreateQueryPool(VkDevice device, const VkQueryPoolCreateInfo *pCreateInfo,
4448                                                const VkAllocationCallbacks *pAllocator, VkQueryPool *pQueryPool) {
4449     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4450     bool skip = false;
4451     if (pCreateInfo && pCreateInfo->queryType == VK_QUERY_TYPE_PIPELINE_STATISTICS) {
4452         if (!dev_data->enabled_features.pipelineStatisticsQuery) {
4453             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0,
4454                             VALIDATION_ERROR_11c0062e,
4455                             "Query pool with type VK_QUERY_TYPE_PIPELINE_STATISTICS created on a device with "
4456                             "VkDeviceCreateInfo.pEnabledFeatures.pipelineStatisticsQuery == VK_FALSE.");
4457         }
4458     }
4459
4460     VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
4461     if (!skip) {
4462         result = dev_data->dispatch_table.CreateQueryPool(device, pCreateInfo, pAllocator, pQueryPool);
4463     }
4464     if (result == VK_SUCCESS) {
4465         lock_guard_t lock(global_lock);
4466         QUERY_POOL_NODE *qp_node = &dev_data->queryPoolMap[*pQueryPool];
4467         qp_node->createInfo = *pCreateInfo;
4468     }
4469     return result;
4470 }
4471
4472 static bool PreCallValidateDestroyCommandPool(layer_data *dev_data, VkCommandPool pool) {
4473     COMMAND_POOL_NODE *cp_state = GetCommandPoolNode(dev_data, pool);
4474     if (dev_data->instance_data->disabled.destroy_command_pool) return false;
4475     bool skip = false;
4476     if (cp_state) {
4477         // Verify that command buffers in pool are complete (not in-flight)
4478         skip |= checkCommandBuffersInFlight(dev_data, cp_state, "destroy command pool with", VALIDATION_ERROR_24000052);
4479     }
4480     return skip;
4481 }
4482
4483 static void PreCallRecordDestroyCommandPool(layer_data *dev_data, VkCommandPool pool) {
4484     COMMAND_POOL_NODE *cp_state = GetCommandPoolNode(dev_data, pool);
4485     // Remove cmdpool from cmdpoolmap, after freeing layer data for the command buffers
4486     // "When a pool is destroyed, all command buffers allocated from the pool are freed."
4487     if (cp_state) {
4488         // Create a vector, as FreeCommandBufferStates deletes from cp_state->commandBuffers during iteration.
4489         std::vector<VkCommandBuffer> cb_vec{cp_state->commandBuffers.begin(), cp_state->commandBuffers.end()};
4490         FreeCommandBufferStates(dev_data, cp_state, static_cast<uint32_t>(cb_vec.size()), cb_vec.data());
4491         dev_data->commandPoolMap.erase(pool);
4492     }
4493 }
4494
4495 // Destroy commandPool along with all of the commandBuffers allocated from that pool
4496 VKAPI_ATTR void VKAPI_CALL DestroyCommandPool(VkDevice device, VkCommandPool commandPool, const VkAllocationCallbacks *pAllocator) {
4497     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4498     unique_lock_t lock(global_lock);
4499     bool skip = PreCallValidateDestroyCommandPool(dev_data, commandPool);
4500     if (!skip) {
4501         // Pre-record to avoid Destroy/Create race
4502         PreCallRecordDestroyCommandPool(dev_data, commandPool);
4503         lock.unlock();
4504         dev_data->dispatch_table.DestroyCommandPool(device, commandPool, pAllocator);
4505     }
4506 }
4507
4508 VKAPI_ATTR VkResult VKAPI_CALL ResetCommandPool(VkDevice device, VkCommandPool commandPool, VkCommandPoolResetFlags flags) {
4509     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4510     bool skip = false;
4511
4512     unique_lock_t lock(global_lock);
4513     auto pPool = GetCommandPoolNode(dev_data, commandPool);
4514     skip |= checkCommandBuffersInFlight(dev_data, pPool, "reset command pool with", VALIDATION_ERROR_32800050);
4515     lock.unlock();
4516
4517     if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
4518
4519     VkResult result = dev_data->dispatch_table.ResetCommandPool(device, commandPool, flags);
4520
4521     // Reset all of the CBs allocated from this pool
4522     if (VK_SUCCESS == result) {
4523         lock.lock();
4524         for (auto cmdBuffer : pPool->commandBuffers) {
4525             ResetCommandBufferState(dev_data, cmdBuffer);
4526         }
4527         lock.unlock();
4528     }
4529     return result;
4530 }
4531
4532 VKAPI_ATTR VkResult VKAPI_CALL ResetFences(VkDevice device, uint32_t fenceCount, const VkFence *pFences) {
4533     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4534     bool skip = false;
4535     unique_lock_t lock(global_lock);
4536     for (uint32_t i = 0; i < fenceCount; ++i) {
4537         auto pFence = GetFenceNode(dev_data, pFences[i]);
4538         if (pFence && pFence->scope == kSyncScopeInternal && pFence->state == FENCE_INFLIGHT) {
4539             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
4540                             HandleToUint64(pFences[i]), VALIDATION_ERROR_32e008c6, "Fence 0x%" PRIx64 " is in use.",
4541                             HandleToUint64(pFences[i]));
4542         }
4543     }
4544     lock.unlock();
4545
4546     if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
4547
4548     VkResult result = dev_data->dispatch_table.ResetFences(device, fenceCount, pFences);
4549
4550     if (result == VK_SUCCESS) {
4551         lock.lock();
4552         for (uint32_t i = 0; i < fenceCount; ++i) {
4553             auto pFence = GetFenceNode(dev_data, pFences[i]);
4554             if (pFence) {
4555                 if (pFence->scope == kSyncScopeInternal) {
4556                     pFence->state = FENCE_UNSIGNALED;
4557                 } else if (pFence->scope == kSyncScopeExternalTemporary) {
4558                     pFence->scope = kSyncScopeInternal;
4559                 }
4560             }
4561         }
4562         lock.unlock();
4563     }
4564
4565     return result;
4566 }
4567
4568 // For given cb_nodes, invalidate them and track object causing invalidation
4569 void invalidateCommandBuffers(const layer_data *dev_data, std::unordered_set<GLOBAL_CB_NODE *> const &cb_nodes, VK_OBJECT obj) {
4570     for (auto cb_node : cb_nodes) {
4571         if (cb_node->state == CB_RECORDING) {
4572             log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4573                     HandleToUint64(cb_node->commandBuffer), DRAWSTATE_INVALID_COMMAND_BUFFER,
4574                     "Invalidating a command buffer that's currently being recorded: 0x%" PRIx64 ".",
4575                     HandleToUint64(cb_node->commandBuffer));
4576             cb_node->state = CB_INVALID_INCOMPLETE;
4577         } else if (cb_node->state == CB_RECORDED) {
4578             cb_node->state = CB_INVALID_COMPLETE;
4579         }
4580         cb_node->broken_bindings.push_back(obj);
4581
4582         // if secondary, then propagate the invalidation to the primaries that will call us.
4583         if (cb_node->createInfo.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY) {
4584             invalidateCommandBuffers(dev_data, cb_node->linkedCommandBuffers, obj);
4585         }
4586     }
4587 }
4588
4589 static bool PreCallValidateDestroyFramebuffer(layer_data *dev_data, VkFramebuffer framebuffer,
4590                                               FRAMEBUFFER_STATE **framebuffer_state, VK_OBJECT *obj_struct) {
4591     *framebuffer_state = GetFramebufferState(dev_data, framebuffer);
4592     *obj_struct = {HandleToUint64(framebuffer), kVulkanObjectTypeFramebuffer};
4593     if (dev_data->instance_data->disabled.destroy_framebuffer) return false;
4594     bool skip = false;
4595     if (*framebuffer_state) {
4596         skip |=
4597             ValidateObjectNotInUse(dev_data, *framebuffer_state, *obj_struct, "vkDestroyFramebuffer", VALIDATION_ERROR_250006f8);
4598     }
4599     return skip;
4600 }
4601
4602 static void PreCallRecordDestroyFramebuffer(layer_data *dev_data, VkFramebuffer framebuffer, FRAMEBUFFER_STATE *framebuffer_state,
4603                                             VK_OBJECT obj_struct) {
4604     invalidateCommandBuffers(dev_data, framebuffer_state->cb_bindings, obj_struct);
4605     dev_data->frameBufferMap.erase(framebuffer);
4606 }
4607
4608 VKAPI_ATTR void VKAPI_CALL DestroyFramebuffer(VkDevice device, VkFramebuffer framebuffer, const VkAllocationCallbacks *pAllocator) {
4609     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4610     FRAMEBUFFER_STATE *framebuffer_state = nullptr;
4611     VK_OBJECT obj_struct;
4612     unique_lock_t lock(global_lock);
4613     bool skip = PreCallValidateDestroyFramebuffer(dev_data, framebuffer, &framebuffer_state, &obj_struct);
4614     if (!skip) {
4615         if (framebuffer != VK_NULL_HANDLE) {
4616             // Pre-record to avoid Destroy/Create race
4617             PreCallRecordDestroyFramebuffer(dev_data, framebuffer, framebuffer_state, obj_struct);
4618         }
4619         lock.unlock();
4620         dev_data->dispatch_table.DestroyFramebuffer(device, framebuffer, pAllocator);
4621     }
4622 }
4623
4624 static bool PreCallValidateDestroyRenderPass(layer_data *dev_data, VkRenderPass render_pass, RENDER_PASS_STATE **rp_state,
4625                                              VK_OBJECT *obj_struct) {
4626     *rp_state = GetRenderPassState(dev_data, render_pass);
4627     *obj_struct = {HandleToUint64(render_pass), kVulkanObjectTypeRenderPass};
4628     if (dev_data->instance_data->disabled.destroy_renderpass) return false;
4629     bool skip = false;
4630     if (*rp_state) {
4631         skip |= ValidateObjectNotInUse(dev_data, *rp_state, *obj_struct, "vkDestroyRenderPass", VALIDATION_ERROR_264006d2);
4632     }
4633     return skip;
4634 }
4635
4636 static void PreCallRecordDestroyRenderPass(layer_data *dev_data, VkRenderPass render_pass, RENDER_PASS_STATE *rp_state,
4637                                            VK_OBJECT obj_struct) {
4638     invalidateCommandBuffers(dev_data, rp_state->cb_bindings, obj_struct);
4639     dev_data->renderPassMap.erase(render_pass);
4640 }
4641
4642 VKAPI_ATTR void VKAPI_CALL DestroyRenderPass(VkDevice device, VkRenderPass renderPass, const VkAllocationCallbacks *pAllocator) {
4643     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4644     RENDER_PASS_STATE *rp_state = nullptr;
4645     VK_OBJECT obj_struct;
4646     unique_lock_t lock(global_lock);
4647     bool skip = PreCallValidateDestroyRenderPass(dev_data, renderPass, &rp_state, &obj_struct);
4648     if (!skip) {
4649         if (renderPass != VK_NULL_HANDLE) {
4650             // Pre-record to avoid Destroy/Create race
4651             PreCallRecordDestroyRenderPass(dev_data, renderPass, rp_state, obj_struct);
4652         }
4653         lock.unlock();
4654         dev_data->dispatch_table.DestroyRenderPass(device, renderPass, pAllocator);
4655     }
4656 }
4657
4658 VKAPI_ATTR VkResult VKAPI_CALL CreateBuffer(VkDevice device, const VkBufferCreateInfo *pCreateInfo,
4659                                             const VkAllocationCallbacks *pAllocator, VkBuffer *pBuffer) {
4660     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4661     unique_lock_t lock(global_lock);
4662     bool skip = PreCallValidateCreateBuffer(dev_data, pCreateInfo);
4663     lock.unlock();
4664
4665     if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
4666     VkResult result = dev_data->dispatch_table.CreateBuffer(device, pCreateInfo, pAllocator, pBuffer);
4667
4668     if (VK_SUCCESS == result) {
4669         lock.lock();
4670         PostCallRecordCreateBuffer(dev_data, pCreateInfo, pBuffer);
4671         lock.unlock();
4672     }
4673     return result;
4674 }
4675
4676 VKAPI_ATTR VkResult VKAPI_CALL CreateBufferView(VkDevice device, const VkBufferViewCreateInfo *pCreateInfo,
4677                                                 const VkAllocationCallbacks *pAllocator, VkBufferView *pView) {
4678     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4679     unique_lock_t lock(global_lock);
4680     bool skip = PreCallValidateCreateBufferView(dev_data, pCreateInfo);
4681     lock.unlock();
4682     if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
4683     VkResult result = dev_data->dispatch_table.CreateBufferView(device, pCreateInfo, pAllocator, pView);
4684     if (VK_SUCCESS == result) {
4685         lock.lock();
4686         PostCallRecordCreateBufferView(dev_data, pCreateInfo, pView);
4687         lock.unlock();
4688     }
4689     return result;
4690 }
4691
4692 // Access helper functions for external modules
4693 VkFormatProperties GetFormatProperties(core_validation::layer_data *device_data, VkFormat format) {
4694     VkFormatProperties format_properties;
4695     instance_layer_data *instance_data =
4696         GetLayerDataPtr(get_dispatch_key(device_data->instance_data->instance), instance_layer_data_map);
4697     instance_data->dispatch_table.GetPhysicalDeviceFormatProperties(device_data->physical_device, format, &format_properties);
4698     return format_properties;
4699 }
4700
4701 VkResult GetImageFormatProperties(core_validation::layer_data *device_data, const VkImageCreateInfo *image_ci,
4702                                   VkImageFormatProperties *pImageFormatProperties) {
4703     instance_layer_data *instance_data =
4704         GetLayerDataPtr(get_dispatch_key(device_data->instance_data->instance), instance_layer_data_map);
4705     return instance_data->dispatch_table.GetPhysicalDeviceImageFormatProperties(
4706         device_data->physical_device, image_ci->format, image_ci->imageType, image_ci->tiling, image_ci->usage, image_ci->flags,
4707         pImageFormatProperties);
4708 }
4709
4710 const debug_report_data *GetReportData(const core_validation::layer_data *device_data) { return device_data->report_data; }
4711
4712 const VkPhysicalDeviceProperties *GetPhysicalDeviceProperties(core_validation::layer_data *device_data) {
4713     return &device_data->phys_dev_props;
4714 }
4715
4716 const CHECK_DISABLED *GetDisables(core_validation::layer_data *device_data) { return &device_data->instance_data->disabled; }
4717
4718 std::unordered_map<VkImage, std::unique_ptr<IMAGE_STATE>> *GetImageMap(core_validation::layer_data *device_data) {
4719     return &device_data->imageMap;
4720 }
4721
4722 std::unordered_map<VkImage, std::vector<ImageSubresourcePair>> *GetImageSubresourceMap(core_validation::layer_data *device_data) {
4723     return &device_data->imageSubresourceMap;
4724 }
4725
4726 std::unordered_map<ImageSubresourcePair, IMAGE_LAYOUT_NODE> *GetImageLayoutMap(layer_data *device_data) {
4727     return &device_data->imageLayoutMap;
4728 }
4729
4730 std::unordered_map<ImageSubresourcePair, IMAGE_LAYOUT_NODE> const *GetImageLayoutMap(layer_data const *device_data) {
4731     return &device_data->imageLayoutMap;
4732 }
4733
4734 std::unordered_map<VkBuffer, std::unique_ptr<BUFFER_STATE>> *GetBufferMap(layer_data *device_data) {
4735     return &device_data->bufferMap;
4736 }
4737
4738 std::unordered_map<VkBufferView, std::unique_ptr<BUFFER_VIEW_STATE>> *GetBufferViewMap(layer_data *device_data) {
4739     return &device_data->bufferViewMap;
4740 }
4741
4742 std::unordered_map<VkImageView, std::unique_ptr<IMAGE_VIEW_STATE>> *GetImageViewMap(layer_data *device_data) {
4743     return &device_data->imageViewMap;
4744 }
4745
4746 const PHYS_DEV_PROPERTIES_NODE *GetPhysDevProperties(const layer_data *device_data) { return &device_data->phys_dev_properties; }
4747
4748 const VkPhysicalDeviceFeatures *GetEnabledFeatures(const layer_data *device_data) { return &device_data->enabled_features; }
4749
4750 const VkPhysicalDeviceDescriptorIndexingFeaturesEXT *GetEnabledDescriptorIndexingFeatures(const layer_data *device_data) {
4751     return &device_data->phys_dev_ext_props.descriptor_indexing_features;
4752 }
4753
4754 const DeviceExtensions *GetDeviceExtensions(const layer_data *device_data) { return &device_data->extensions; }
4755
4756 VKAPI_ATTR VkResult VKAPI_CALL CreateImage(VkDevice device, const VkImageCreateInfo *pCreateInfo,
4757                                            const VkAllocationCallbacks *pAllocator, VkImage *pImage) {
4758     VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
4759     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4760     bool skip = PreCallValidateCreateImage(dev_data, pCreateInfo, pAllocator, pImage);
4761     if (!skip) {
4762         result = dev_data->dispatch_table.CreateImage(device, pCreateInfo, pAllocator, pImage);
4763     }
4764     if (VK_SUCCESS == result) {
4765         lock_guard_t lock(global_lock);
4766         PostCallRecordCreateImage(dev_data, pCreateInfo, pImage);
4767     }
4768     return result;
4769 }
4770
4771 VKAPI_ATTR VkResult VKAPI_CALL CreateImageView(VkDevice device, const VkImageViewCreateInfo *pCreateInfo,
4772                                                const VkAllocationCallbacks *pAllocator, VkImageView *pView) {
4773     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4774     unique_lock_t lock(global_lock);
4775     bool skip = PreCallValidateCreateImageView(dev_data, pCreateInfo);
4776     lock.unlock();
4777     if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
4778     VkResult result = dev_data->dispatch_table.CreateImageView(device, pCreateInfo, pAllocator, pView);
4779     if (VK_SUCCESS == result) {
4780         lock.lock();
4781         PostCallRecordCreateImageView(dev_data, pCreateInfo, *pView);
4782         lock.unlock();
4783     }
4784
4785     return result;
4786 }
4787
4788 VKAPI_ATTR VkResult VKAPI_CALL CreateFence(VkDevice device, const VkFenceCreateInfo *pCreateInfo,
4789                                            const VkAllocationCallbacks *pAllocator, VkFence *pFence) {
4790     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4791     VkResult result = dev_data->dispatch_table.CreateFence(device, pCreateInfo, pAllocator, pFence);
4792     if (VK_SUCCESS == result) {
4793         lock_guard_t lock(global_lock);
4794         auto &fence_node = dev_data->fenceMap[*pFence];
4795         fence_node.fence = *pFence;
4796         fence_node.createInfo = *pCreateInfo;
4797         fence_node.state = (pCreateInfo->flags & VK_FENCE_CREATE_SIGNALED_BIT) ? FENCE_RETIRED : FENCE_UNSIGNALED;
4798     }
4799     return result;
4800 }
4801
4802 // TODO handle pipeline caches
4803 VKAPI_ATTR VkResult VKAPI_CALL CreatePipelineCache(VkDevice device, const VkPipelineCacheCreateInfo *pCreateInfo,
4804                                                    const VkAllocationCallbacks *pAllocator, VkPipelineCache *pPipelineCache) {
4805     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4806     VkResult result = dev_data->dispatch_table.CreatePipelineCache(device, pCreateInfo, pAllocator, pPipelineCache);
4807     return result;
4808 }
4809
4810 VKAPI_ATTR void VKAPI_CALL DestroyPipelineCache(VkDevice device, VkPipelineCache pipelineCache,
4811                                                 const VkAllocationCallbacks *pAllocator) {
4812     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4813     // Pre-record to avoid Destroy/Create race (if/when implemented)
4814     dev_data->dispatch_table.DestroyPipelineCache(device, pipelineCache, pAllocator);
4815 }
4816
4817 VKAPI_ATTR VkResult VKAPI_CALL GetPipelineCacheData(VkDevice device, VkPipelineCache pipelineCache, size_t *pDataSize,
4818                                                     void *pData) {
4819     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4820     VkResult result = dev_data->dispatch_table.GetPipelineCacheData(device, pipelineCache, pDataSize, pData);
4821     return result;
4822 }
4823
4824 VKAPI_ATTR VkResult VKAPI_CALL MergePipelineCaches(VkDevice device, VkPipelineCache dstCache, uint32_t srcCacheCount,
4825                                                    const VkPipelineCache *pSrcCaches) {
4826     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4827     VkResult result = dev_data->dispatch_table.MergePipelineCaches(device, dstCache, srcCacheCount, pSrcCaches);
4828     return result;
4829 }
4830
4831 // Validation cache:
4832 // CV is the bottommost implementor of this extension. Don't pass calls down.
4833 VKAPI_ATTR VkResult VKAPI_CALL CreateValidationCacheEXT(VkDevice device, const VkValidationCacheCreateInfoEXT *pCreateInfo,
4834                                                         const VkAllocationCallbacks *pAllocator,
4835                                                         VkValidationCacheEXT *pValidationCache) {
4836     *pValidationCache = ValidationCache::Create(pCreateInfo);
4837     return *pValidationCache ? VK_SUCCESS : VK_ERROR_INITIALIZATION_FAILED;
4838 }
4839
4840 VKAPI_ATTR void VKAPI_CALL DestroyValidationCacheEXT(VkDevice device, VkValidationCacheEXT validationCache,
4841                                                      const VkAllocationCallbacks *pAllocator) {
4842     delete (ValidationCache *)validationCache;
4843 }
4844
4845 VKAPI_ATTR VkResult VKAPI_CALL GetValidationCacheDataEXT(VkDevice device, VkValidationCacheEXT validationCache, size_t *pDataSize,
4846                                                          void *pData) {
4847     size_t inSize = *pDataSize;
4848     ((ValidationCache *)validationCache)->Write(pDataSize, pData);
4849     return (pData && *pDataSize != inSize) ? VK_INCOMPLETE : VK_SUCCESS;
4850 }
4851
4852 VKAPI_ATTR VkResult VKAPI_CALL MergeValidationCachesEXT(VkDevice device, VkValidationCacheEXT dstCache, uint32_t srcCacheCount,
4853                                                         const VkValidationCacheEXT *pSrcCaches) {
4854     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4855     bool skip = false;
4856     auto dst = (ValidationCache *)dstCache;
4857     auto src = (ValidationCache const *const *)pSrcCaches;
4858     VkResult result = VK_SUCCESS;
4859     for (uint32_t i = 0; i < srcCacheCount; i++) {
4860         if (src[i] == dst) {
4861             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_VALIDATION_CACHE_EXT,
4862                             0, VALIDATION_ERROR_3e600c00,
4863                             "vkMergeValidationCachesEXT: dstCache (0x%" PRIx64 ") must not appear in pSrcCaches array.",
4864                             HandleToUint64(dstCache));
4865             result = VK_ERROR_VALIDATION_FAILED_EXT;
4866         }
4867         if (!skip) {
4868             dst->Merge(src[i]);
4869         }
4870     }
4871
4872     return result;
4873 }
4874
4875 // utility function to set collective state for pipeline
4876 void set_pipeline_state(PIPELINE_STATE *pPipe) {
4877     // If any attachment used by this pipeline has blendEnable, set top-level blendEnable
4878     if (pPipe->graphicsPipelineCI.pColorBlendState) {
4879         for (size_t i = 0; i < pPipe->attachments.size(); ++i) {
4880             if (VK_TRUE == pPipe->attachments[i].blendEnable) {
4881                 if (((pPipe->attachments[i].dstAlphaBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
4882                      (pPipe->attachments[i].dstAlphaBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA)) ||
4883                     ((pPipe->attachments[i].dstColorBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
4884                      (pPipe->attachments[i].dstColorBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA)) ||
4885                     ((pPipe->attachments[i].srcAlphaBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
4886                      (pPipe->attachments[i].srcAlphaBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA)) ||
4887                     ((pPipe->attachments[i].srcColorBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
4888                      (pPipe->attachments[i].srcColorBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA))) {
4889                     pPipe->blendConstantsEnabled = true;
4890                 }
4891             }
4892         }
4893     }
4894 }
4895
4896 VKAPI_ATTR VkResult VKAPI_CALL CreateGraphicsPipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count,
4897                                                        const VkGraphicsPipelineCreateInfo *pCreateInfos,
4898                                                        const VkAllocationCallbacks *pAllocator, VkPipeline *pPipelines) {
4899     // The order of operations here is a little convoluted but gets the job done
4900     //  1. Pipeline create state is first shadowed into PIPELINE_STATE struct
4901     //  2. Create state is then validated (which uses flags setup during shadowing)
4902     //  3. If everything looks good, we'll then create the pipeline and add NODE to pipelineMap
4903     bool skip = false;
4904     vector<std::unique_ptr<PIPELINE_STATE>> pipe_state;
4905     pipe_state.reserve(count);
4906     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4907
4908     uint32_t i = 0;
4909     unique_lock_t lock(global_lock);
4910
4911     for (i = 0; i < count; i++) {
4912         pipe_state.push_back(std::unique_ptr<PIPELINE_STATE>(new PIPELINE_STATE));
4913         pipe_state[i]->initGraphicsPipeline(&pCreateInfos[i], GetRenderPassStateSharedPtr(dev_data, pCreateInfos[i].renderPass));
4914         pipe_state[i]->pipeline_layout = *getPipelineLayout(dev_data, pCreateInfos[i].layout);
4915     }
4916
4917     for (i = 0; i < count; i++) {
4918         skip |= ValidatePipelineLocked(dev_data, pipe_state, i);
4919     }
4920
4921     lock.unlock();
4922
4923     for (i = 0; i < count; i++) {
4924         skip |= ValidatePipelineUnlocked(dev_data, pipe_state, i);
4925     }
4926
4927     if (skip) {
4928         for (i = 0; i < count; i++) {
4929             pPipelines[i] = VK_NULL_HANDLE;
4930         }
4931         return VK_ERROR_VALIDATION_FAILED_EXT;
4932     }
4933
4934     auto result =
4935         dev_data->dispatch_table.CreateGraphicsPipelines(device, pipelineCache, count, pCreateInfos, pAllocator, pPipelines);
4936     lock.lock();
4937     for (i = 0; i < count; i++) {
4938         if (pPipelines[i] != VK_NULL_HANDLE) {
4939             pipe_state[i]->pipeline = pPipelines[i];
4940             dev_data->pipelineMap[pPipelines[i]] = std::move(pipe_state[i]);
4941         }
4942     }
4943
4944     return result;
4945 }
4946
4947 VKAPI_ATTR VkResult VKAPI_CALL CreateComputePipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count,
4948                                                       const VkComputePipelineCreateInfo *pCreateInfos,
4949                                                       const VkAllocationCallbacks *pAllocator, VkPipeline *pPipelines) {
4950     bool skip = false;
4951
4952     vector<std::unique_ptr<PIPELINE_STATE>> pPipeState;
4953     pPipeState.reserve(count);
4954     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4955
4956     uint32_t i = 0;
4957     unique_lock_t lock(global_lock);
4958     for (i = 0; i < count; i++) {
4959         // Create and initialize internal tracking data structure
4960         pPipeState.push_back(unique_ptr<PIPELINE_STATE>(new PIPELINE_STATE));
4961         pPipeState[i]->initComputePipeline(&pCreateInfos[i]);
4962         pPipeState[i]->pipeline_layout = *getPipelineLayout(dev_data, pCreateInfos[i].layout);
4963
4964         // TODO: Add Compute Pipeline Verification
4965         skip |= validate_compute_pipeline(dev_data, pPipeState[i].get());
4966     }
4967
4968     if (skip) {
4969         for (i = 0; i < count; i++) {
4970             pPipelines[i] = VK_NULL_HANDLE;
4971         }
4972         return VK_ERROR_VALIDATION_FAILED_EXT;
4973     }
4974
4975     lock.unlock();
4976     auto result =
4977         dev_data->dispatch_table.CreateComputePipelines(device, pipelineCache, count, pCreateInfos, pAllocator, pPipelines);
4978     lock.lock();
4979     for (i = 0; i < count; i++) {
4980         if (pPipelines[i] != VK_NULL_HANDLE) {
4981             pPipeState[i]->pipeline = pPipelines[i];
4982             dev_data->pipelineMap[pPipelines[i]] = std::move(pPipeState[i]);
4983         }
4984     }
4985
4986     return result;
4987 }
4988
4989 VKAPI_ATTR VkResult VKAPI_CALL CreateSampler(VkDevice device, const VkSamplerCreateInfo *pCreateInfo,
4990                                              const VkAllocationCallbacks *pAllocator, VkSampler *pSampler) {
4991     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
4992     VkResult result = dev_data->dispatch_table.CreateSampler(device, pCreateInfo, pAllocator, pSampler);
4993     if (VK_SUCCESS == result) {
4994         lock_guard_t lock(global_lock);
4995         dev_data->samplerMap[*pSampler] = unique_ptr<SAMPLER_STATE>(new SAMPLER_STATE(pSampler, pCreateInfo));
4996     }
4997     return result;
4998 }
4999
5000 static bool PreCallValidateCreateDescriptorSetLayout(layer_data *dev_data, const VkDescriptorSetLayoutCreateInfo *create_info) {
5001     if (dev_data->instance_data->disabled.create_descriptor_set_layout) return false;
5002     return cvdescriptorset::DescriptorSetLayout::ValidateCreateInfo(
5003         dev_data->report_data, create_info, dev_data->extensions.vk_khr_push_descriptor,
5004         dev_data->phys_dev_ext_props.max_push_descriptors, dev_data->extensions.vk_ext_descriptor_indexing,
5005         &dev_data->phys_dev_ext_props.descriptor_indexing_features);
5006 }
5007
5008 static void PostCallRecordCreateDescriptorSetLayout(layer_data *dev_data, const VkDescriptorSetLayoutCreateInfo *create_info,
5009                                                     VkDescriptorSetLayout set_layout) {
5010     dev_data->descriptorSetLayoutMap[set_layout] = std::make_shared<cvdescriptorset::DescriptorSetLayout>(create_info, set_layout);
5011 }
5012
5013 VKAPI_ATTR VkResult VKAPI_CALL CreateDescriptorSetLayout(VkDevice device, const VkDescriptorSetLayoutCreateInfo *pCreateInfo,
5014                                                          const VkAllocationCallbacks *pAllocator,
5015                                                          VkDescriptorSetLayout *pSetLayout) {
5016     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
5017     VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
5018     unique_lock_t lock(global_lock);
5019     bool skip = PreCallValidateCreateDescriptorSetLayout(dev_data, pCreateInfo);
5020     if (!skip) {
5021         lock.unlock();
5022         result = dev_data->dispatch_table.CreateDescriptorSetLayout(device, pCreateInfo, pAllocator, pSetLayout);
5023         if (VK_SUCCESS == result) {
5024             lock.lock();
5025             PostCallRecordCreateDescriptorSetLayout(dev_data, pCreateInfo, *pSetLayout);
5026         }
5027     }
5028     return result;
5029 }
5030
5031 // Used by CreatePipelineLayout and CmdPushConstants.
5032 // Note that the index argument is optional and only used by CreatePipelineLayout.
5033 static bool validatePushConstantRange(const layer_data *dev_data, const uint32_t offset, const uint32_t size,
5034                                       const char *caller_name, uint32_t index = 0) {
5035     if (dev_data->instance_data->disabled.push_constant_range) return false;
5036     uint32_t const maxPushConstantsSize = dev_data->phys_dev_properties.properties.limits.maxPushConstantsSize;
5037     bool skip = false;
5038     // Check that offset + size don't exceed the max.
5039     // Prevent arithetic overflow here by avoiding addition and testing in this order.
5040     if ((offset >= maxPushConstantsSize) || (size > maxPushConstantsSize - offset)) {
5041         // This is a pain just to adapt the log message to the caller, but better to sort it out only when there is a problem.
5042         if (0 == strcmp(caller_name, "vkCreatePipelineLayout()")) {
5043             if (offset >= maxPushConstantsSize) {
5044                 skip |= log_msg(
5045                     dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5046                     VALIDATION_ERROR_11a0024c,
5047                     "%s call has push constants index %u with offset %u that exceeds this device's maxPushConstantSize of %u.",
5048                     caller_name, index, offset, maxPushConstantsSize);
5049             }
5050             if (size > maxPushConstantsSize - offset) {
5051                 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5052                                 VALIDATION_ERROR_11a00254,
5053                                 "%s call has push constants index %u with offset %u and size %u that exceeds this device's "
5054                                 "maxPushConstantSize of %u.",
5055                                 caller_name, index, offset, size, maxPushConstantsSize);
5056             }
5057         } else if (0 == strcmp(caller_name, "vkCmdPushConstants()")) {
5058             if (offset >= maxPushConstantsSize) {
5059                 skip |= log_msg(
5060                     dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5061                     VALIDATION_ERROR_1bc002e4,
5062                     "%s call has push constants index %u with offset %u that exceeds this device's maxPushConstantSize of %u.",
5063                     caller_name, index, offset, maxPushConstantsSize);
5064             }
5065             if (size > maxPushConstantsSize - offset) {
5066                 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5067                                 VALIDATION_ERROR_1bc002e6,
5068                                 "%s call has push constants index %u with offset %u and size %u that exceeds this device's "
5069                                 "maxPushConstantSize of %u.",
5070                                 caller_name, index, offset, size, maxPushConstantsSize);
5071             }
5072         } else {
5073             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5074                             DRAWSTATE_INTERNAL_ERROR, "%s caller not supported.", caller_name);
5075         }
5076     }
5077     // size needs to be non-zero and a multiple of 4.
5078     if ((size == 0) || ((size & 0x3) != 0)) {
5079         if (0 == strcmp(caller_name, "vkCreatePipelineLayout()")) {
5080             if (size == 0) {
5081                 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5082                                 VALIDATION_ERROR_11a00250,
5083                                 "%s call has push constants index %u with size %u. Size must be greater than zero.", caller_name,
5084                                 index, size);
5085             }
5086             if (size & 0x3) {
5087                 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5088                                 VALIDATION_ERROR_11a00252,
5089                                 "%s call has push constants index %u with size %u. Size must be a multiple of 4.", caller_name,
5090                                 index, size);
5091             }
5092         } else if (0 == strcmp(caller_name, "vkCmdPushConstants()")) {
5093             if (size == 0) {
5094                 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5095                                 VALIDATION_ERROR_1bc2c21b,
5096                                 "%s call has push constants index %u with size %u. Size must be greater than zero.", caller_name,
5097                                 index, size);
5098             }
5099             if (size & 0x3) {
5100                 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5101                                 VALIDATION_ERROR_1bc002e2,
5102                                 "%s call has push constants index %u with size %u. Size must be a multiple of 4.", caller_name,
5103                                 index, size);
5104             }
5105         } else {
5106             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5107                             DRAWSTATE_INTERNAL_ERROR, "%s caller not supported.", caller_name);
5108         }
5109     }
5110     // offset needs to be a multiple of 4.
5111     if ((offset & 0x3) != 0) {
5112         if (0 == strcmp(caller_name, "vkCreatePipelineLayout()")) {
5113             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5114                             VALIDATION_ERROR_11a0024e,
5115                             "%s call has push constants index %u with offset %u. Offset must be a multiple of 4.", caller_name,
5116                             index, offset);
5117         } else if (0 == strcmp(caller_name, "vkCmdPushConstants()")) {
5118             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5119                             VALIDATION_ERROR_1bc002e0, "%s call has push constants with offset %u. Offset must be a multiple of 4.",
5120                             caller_name, offset);
5121         } else {
5122             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5123                             DRAWSTATE_INTERNAL_ERROR, "%s caller not supported.", caller_name);
5124         }
5125     }
5126     return skip;
5127 }
5128
5129 enum DSL_DESCRIPTOR_GROUPS {
5130     DSL_TYPE_SAMPLERS = 0,
5131     DSL_TYPE_UNIFORM_BUFFERS,
5132     DSL_TYPE_STORAGE_BUFFERS,
5133     DSL_TYPE_SAMPLED_IMAGES,
5134     DSL_TYPE_STORAGE_IMAGES,
5135     DSL_TYPE_INPUT_ATTACHMENTS,
5136     DSL_NUM_DESCRIPTOR_GROUPS
5137 };
5138
5139 // Used by PreCallValiateCreatePipelineLayout.
5140 // Returns an array of size DSL_NUM_DESCRIPTOR_GROUPS of the maximum number of descriptors used in any single pipeline stage
5141 std::valarray<uint32_t> GetDescriptorCountMaxPerStage(
5142     const layer_data *dev_data, const std::vector<std::shared_ptr<cvdescriptorset::DescriptorSetLayout const>> set_layouts,
5143     bool skip_update_after_bind) {
5144     // Identify active pipeline stages
5145     std::vector<VkShaderStageFlags> stage_flags = {VK_SHADER_STAGE_VERTEX_BIT, VK_SHADER_STAGE_FRAGMENT_BIT,
5146                                                    VK_SHADER_STAGE_COMPUTE_BIT};
5147     if (dev_data->enabled_features.geometryShader) {
5148         stage_flags.push_back(VK_SHADER_STAGE_GEOMETRY_BIT);
5149     }
5150     if (dev_data->enabled_features.tessellationShader) {
5151         stage_flags.push_back(VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT);
5152         stage_flags.push_back(VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT);
5153     }
5154
5155     // Allow iteration over enum values
5156     std::vector<DSL_DESCRIPTOR_GROUPS> dsl_groups = {DSL_TYPE_SAMPLERS,       DSL_TYPE_UNIFORM_BUFFERS, DSL_TYPE_STORAGE_BUFFERS,
5157                                                      DSL_TYPE_SAMPLED_IMAGES, DSL_TYPE_STORAGE_IMAGES,  DSL_TYPE_INPUT_ATTACHMENTS};
5158
5159     // Sum by layouts per stage, then pick max of stages per type
5160     std::valarray<uint32_t> max_sum(0U, DSL_NUM_DESCRIPTOR_GROUPS);  // max descriptor sum among all pipeline stages
5161     for (auto stage : stage_flags) {
5162         std::valarray<uint32_t> stage_sum(0U, DSL_NUM_DESCRIPTOR_GROUPS);  // per-stage sums
5163         for (auto dsl : set_layouts) {
5164             if (skip_update_after_bind &&
5165                 (dsl->GetCreateFlags() & VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT_EXT)) {
5166                 continue;
5167             }
5168
5169             for (uint32_t binding_idx = 0; binding_idx < dsl->GetBindingCount(); binding_idx++) {
5170                 const VkDescriptorSetLayoutBinding *binding = dsl->GetDescriptorSetLayoutBindingPtrFromIndex(binding_idx);
5171                 if (0 != (stage & binding->stageFlags)) {
5172                     switch (binding->descriptorType) {
5173                         case VK_DESCRIPTOR_TYPE_SAMPLER:
5174                             stage_sum[DSL_TYPE_SAMPLERS] += binding->descriptorCount;
5175                             break;
5176                         case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
5177                         case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
5178                             stage_sum[DSL_TYPE_UNIFORM_BUFFERS] += binding->descriptorCount;
5179                             break;
5180                         case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
5181                         case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
5182                             stage_sum[DSL_TYPE_STORAGE_BUFFERS] += binding->descriptorCount;
5183                             break;
5184                         case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
5185                         case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
5186                             stage_sum[DSL_TYPE_SAMPLED_IMAGES] += binding->descriptorCount;
5187                             break;
5188                         case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
5189                         case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
5190                             stage_sum[DSL_TYPE_STORAGE_IMAGES] += binding->descriptorCount;
5191                             break;
5192                         case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
5193                             stage_sum[DSL_TYPE_SAMPLED_IMAGES] += binding->descriptorCount;
5194                             stage_sum[DSL_TYPE_SAMPLERS] += binding->descriptorCount;
5195                             break;
5196                         case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
5197                             stage_sum[DSL_TYPE_INPUT_ATTACHMENTS] += binding->descriptorCount;
5198                             break;
5199                         default:
5200                             break;
5201                     }
5202                 }
5203             }
5204         }
5205         for (auto type : dsl_groups) {
5206             max_sum[type] = std::max(stage_sum[type], max_sum[type]);
5207         }
5208     }
5209     return max_sum;
5210 }
5211
5212 // Used by PreCallValidateCreatePipelineLayout.
5213 // Returns an array of size VK_DESCRIPTOR_TYPE_RANGE_SIZE of the summed descriptors by type.
5214 // Note: descriptors only count against the limit once even if used by multiple stages.
5215 std::valarray<uint32_t> GetDescriptorSum(
5216     const layer_data *dev_data, const std::vector<std::shared_ptr<cvdescriptorset::DescriptorSetLayout const>> &set_layouts,
5217     bool skip_update_after_bind) {
5218     std::valarray<uint32_t> sum_by_type(0U, VK_DESCRIPTOR_TYPE_RANGE_SIZE);
5219     for (auto dsl : set_layouts) {
5220         if (skip_update_after_bind && (dsl->GetCreateFlags() & VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT_EXT)) {
5221             continue;
5222         }
5223
5224         for (uint32_t binding_idx = 0; binding_idx < dsl->GetBindingCount(); binding_idx++) {
5225             const VkDescriptorSetLayoutBinding *binding = dsl->GetDescriptorSetLayoutBindingPtrFromIndex(binding_idx);
5226             sum_by_type[binding->descriptorType] += binding->descriptorCount;
5227         }
5228     }
5229     return sum_by_type;
5230 }
5231
5232 static bool PreCallValiateCreatePipelineLayout(const layer_data *dev_data, const VkPipelineLayoutCreateInfo *pCreateInfo) {
5233     bool skip = false;
5234
5235     // Validate layout count against device physical limit
5236     if (pCreateInfo->setLayoutCount > dev_data->phys_dev_props.limits.maxBoundDescriptorSets) {
5237         skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5238                         VALIDATION_ERROR_0fe0023c,
5239                         "vkCreatePipelineLayout(): setLayoutCount (%d) exceeds physical device maxBoundDescriptorSets limit (%d).",
5240                         pCreateInfo->setLayoutCount, dev_data->phys_dev_props.limits.maxBoundDescriptorSets);
5241     }
5242
5243     // Validate Push Constant ranges
5244     uint32_t i, j;
5245     for (i = 0; i < pCreateInfo->pushConstantRangeCount; ++i) {
5246         skip |= validatePushConstantRange(dev_data, pCreateInfo->pPushConstantRanges[i].offset,
5247                                           pCreateInfo->pPushConstantRanges[i].size, "vkCreatePipelineLayout()", i);
5248         if (0 == pCreateInfo->pPushConstantRanges[i].stageFlags) {
5249             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5250                             VALIDATION_ERROR_11a2dc03, "vkCreatePipelineLayout() call has no stageFlags set.");
5251         }
5252     }
5253
5254     // As of 1.0.28, there is a VU that states that a stage flag cannot appear more than once in the list of push constant ranges.
5255     for (i = 0; i < pCreateInfo->pushConstantRangeCount; ++i) {
5256         for (j = i + 1; j < pCreateInfo->pushConstantRangeCount; ++j) {
5257             if (0 != (pCreateInfo->pPushConstantRanges[i].stageFlags & pCreateInfo->pPushConstantRanges[j].stageFlags)) {
5258                 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5259                                 VALIDATION_ERROR_0fe00248,
5260                                 "vkCreatePipelineLayout() Duplicate stage flags found in ranges %d and %d.", i, j);
5261             }
5262         }
5263     }
5264
5265     // Early-out
5266     if (skip) return skip;
5267
5268     std::vector<std::shared_ptr<cvdescriptorset::DescriptorSetLayout const>> set_layouts(pCreateInfo->setLayoutCount, nullptr);
5269     unsigned int push_descriptor_set_count = 0;
5270     {
5271         unique_lock_t lock(global_lock);  // Lock while accessing global state
5272         for (i = 0; i < pCreateInfo->setLayoutCount; ++i) {
5273             set_layouts[i] = GetDescriptorSetLayout(dev_data, pCreateInfo->pSetLayouts[i]);
5274             if (set_layouts[i]->IsPushDescriptor()) ++push_descriptor_set_count;
5275         }
5276     }  // Unlock
5277
5278     if (push_descriptor_set_count > 1) {
5279         skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5280                         VALIDATION_ERROR_0fe0024a, "vkCreatePipelineLayout() Multiple push descriptor sets found.");
5281     }
5282
5283     // Max descriptors by type, within a single pipeline stage
5284     std::valarray<uint32_t> max_descriptors_per_stage = GetDescriptorCountMaxPerStage(dev_data, set_layouts, true);
5285     // Samplers
5286     if (max_descriptors_per_stage[DSL_TYPE_SAMPLERS] > dev_data->phys_dev_props.limits.maxPerStageDescriptorSamplers) {
5287         skip |=
5288             log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5289                     VALIDATION_ERROR_0fe0023e,
5290                     "vkCreatePipelineLayout(): max per-stage sampler bindings count (%d) exceeds device "
5291                     "maxPerStageDescriptorSamplers limit (%d).",
5292                     max_descriptors_per_stage[DSL_TYPE_SAMPLERS], dev_data->phys_dev_props.limits.maxPerStageDescriptorSamplers);
5293     }
5294
5295     // Uniform buffers
5296     if (max_descriptors_per_stage[DSL_TYPE_UNIFORM_BUFFERS] > dev_data->phys_dev_props.limits.maxPerStageDescriptorUniformBuffers) {
5297         skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5298                         VALIDATION_ERROR_0fe00240,
5299                         "vkCreatePipelineLayout(): max per-stage uniform buffer bindings count (%d) exceeds device "
5300                         "maxPerStageDescriptorUniformBuffers limit (%d).",
5301                         max_descriptors_per_stage[DSL_TYPE_UNIFORM_BUFFERS],
5302                         dev_data->phys_dev_props.limits.maxPerStageDescriptorUniformBuffers);
5303     }
5304
5305     // Storage buffers
5306     if (max_descriptors_per_stage[DSL_TYPE_STORAGE_BUFFERS] > dev_data->phys_dev_props.limits.maxPerStageDescriptorStorageBuffers) {
5307         skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5308                         VALIDATION_ERROR_0fe00242,
5309                         "vkCreatePipelineLayout(): max per-stage storage buffer bindings count (%d) exceeds device "
5310                         "maxPerStageDescriptorStorageBuffers limit (%d).",
5311                         max_descriptors_per_stage[DSL_TYPE_STORAGE_BUFFERS],
5312                         dev_data->phys_dev_props.limits.maxPerStageDescriptorStorageBuffers);
5313     }
5314
5315     // Sampled images
5316     if (max_descriptors_per_stage[DSL_TYPE_SAMPLED_IMAGES] > dev_data->phys_dev_props.limits.maxPerStageDescriptorSampledImages) {
5317         skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5318                         VALIDATION_ERROR_0fe00244,
5319                         "vkCreatePipelineLayout(): max per-stage sampled image bindings count (%d) exceeds device "
5320                         "maxPerStageDescriptorSampledImages limit (%d).",
5321                         max_descriptors_per_stage[DSL_TYPE_SAMPLED_IMAGES],
5322                         dev_data->phys_dev_props.limits.maxPerStageDescriptorSampledImages);
5323     }
5324
5325     // Storage images
5326     if (max_descriptors_per_stage[DSL_TYPE_STORAGE_IMAGES] > dev_data->phys_dev_props.limits.maxPerStageDescriptorStorageImages) {
5327         skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5328                         VALIDATION_ERROR_0fe00246,
5329                         "vkCreatePipelineLayout(): max per-stage storage image bindings count (%d) exceeds device "
5330                         "maxPerStageDescriptorStorageImages limit (%d).",
5331                         max_descriptors_per_stage[DSL_TYPE_STORAGE_IMAGES],
5332                         dev_data->phys_dev_props.limits.maxPerStageDescriptorStorageImages);
5333     }
5334
5335     // Input attachments
5336     if (max_descriptors_per_stage[DSL_TYPE_INPUT_ATTACHMENTS] >
5337         dev_data->phys_dev_props.limits.maxPerStageDescriptorInputAttachments) {
5338         skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5339                         VALIDATION_ERROR_0fe00d18,
5340                         "vkCreatePipelineLayout(): max per-stage input attachment bindings count (%d) exceeds device "
5341                         "maxPerStageDescriptorInputAttachments limit (%d).",
5342                         max_descriptors_per_stage[DSL_TYPE_INPUT_ATTACHMENTS],
5343                         dev_data->phys_dev_props.limits.maxPerStageDescriptorInputAttachments);
5344     }
5345
5346     // Total descriptors by type
5347     //
5348     std::valarray<uint32_t> sum_all_stages = GetDescriptorSum(dev_data, set_layouts, true);
5349     // Samplers
5350     uint32_t sum = sum_all_stages[VK_DESCRIPTOR_TYPE_SAMPLER] + sum_all_stages[VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER];
5351     if (sum > dev_data->phys_dev_props.limits.maxDescriptorSetSamplers) {
5352         skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5353                         VALIDATION_ERROR_0fe00d1a,
5354                         "vkCreatePipelineLayout(): sum of sampler bindings among all stages (%d) exceeds device "
5355                         "maxDescriptorSetSamplers limit (%d).",
5356                         sum, dev_data->phys_dev_props.limits.maxDescriptorSetSamplers);
5357     }
5358
5359     // Uniform buffers
5360     if (sum_all_stages[VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER] > dev_data->phys_dev_props.limits.maxDescriptorSetUniformBuffers) {
5361         skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5362                         VALIDATION_ERROR_0fe00d1c,
5363                         "vkCreatePipelineLayout(): sum of uniform buffer bindings among all stages (%d) exceeds device "
5364                         "maxDescriptorSetUniformBuffers limit (%d).",
5365                         sum_all_stages[VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER],
5366                         dev_data->phys_dev_props.limits.maxDescriptorSetUniformBuffers);
5367     }
5368
5369     // Dynamic uniform buffers
5370     if (sum_all_stages[VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC] >
5371         dev_data->phys_dev_props.limits.maxDescriptorSetUniformBuffersDynamic) {
5372         skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5373                         VALIDATION_ERROR_0fe00d1e,
5374                         "vkCreatePipelineLayout(): sum of dynamic uniform buffer bindings among all stages (%d) exceeds device "
5375                         "maxDescriptorSetUniformBuffersDynamic limit (%d).",
5376                         sum_all_stages[VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC],
5377                         dev_data->phys_dev_props.limits.maxDescriptorSetUniformBuffersDynamic);
5378     }
5379
5380     // Storage buffers
5381     if (sum_all_stages[VK_DESCRIPTOR_TYPE_STORAGE_BUFFER] > dev_data->phys_dev_props.limits.maxDescriptorSetStorageBuffers) {
5382         skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5383                         VALIDATION_ERROR_0fe00d20,
5384                         "vkCreatePipelineLayout(): sum of storage buffer bindings among all stages (%d) exceeds device "
5385                         "maxDescriptorSetStorageBuffers limit (%d).",
5386                         sum_all_stages[VK_DESCRIPTOR_TYPE_STORAGE_BUFFER],
5387                         dev_data->phys_dev_props.limits.maxDescriptorSetStorageBuffers);
5388     }
5389
5390     // Dynamic storage buffers
5391     if (sum_all_stages[VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC] >
5392         dev_data->phys_dev_props.limits.maxDescriptorSetStorageBuffersDynamic) {
5393         skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5394                         VALIDATION_ERROR_0fe00d22,
5395                         "vkCreatePipelineLayout(): sum of dynamic storage buffer bindings among all stages (%d) exceeds device "
5396                         "maxDescriptorSetStorageBuffersDynamic limit (%d).",
5397                         sum_all_stages[VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC],
5398                         dev_data->phys_dev_props.limits.maxDescriptorSetStorageBuffersDynamic);
5399     }
5400
5401     //  Sampled images
5402     sum = sum_all_stages[VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE] + sum_all_stages[VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER] +
5403           sum_all_stages[VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER];
5404     if (sum > dev_data->phys_dev_props.limits.maxDescriptorSetSampledImages) {
5405         skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5406                         VALIDATION_ERROR_0fe00d24,
5407                         "vkCreatePipelineLayout(): sum of sampled image bindings among all stages (%d) exceeds device "
5408                         "maxDescriptorSetSampledImages limit (%d).",
5409                         sum, dev_data->phys_dev_props.limits.maxDescriptorSetSampledImages);
5410     }
5411
5412     //  Storage images
5413     sum = sum_all_stages[VK_DESCRIPTOR_TYPE_STORAGE_IMAGE] + sum_all_stages[VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER];
5414     if (sum > dev_data->phys_dev_props.limits.maxDescriptorSetStorageImages) {
5415         skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5416                         VALIDATION_ERROR_0fe00d26,
5417                         "vkCreatePipelineLayout(): sum of storage image bindings among all stages (%d) exceeds device "
5418                         "maxDescriptorSetStorageImages limit (%d).",
5419                         sum, dev_data->phys_dev_props.limits.maxDescriptorSetStorageImages);
5420     }
5421
5422     // Input attachments
5423     if (sum_all_stages[VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT] > dev_data->phys_dev_props.limits.maxDescriptorSetInputAttachments) {
5424         skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5425                         VALIDATION_ERROR_0fe00d28,
5426                         "vkCreatePipelineLayout(): sum of input attachment bindings among all stages (%d) exceeds device "
5427                         "maxDescriptorSetInputAttachments limit (%d).",
5428                         sum_all_stages[VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT],
5429                         dev_data->phys_dev_props.limits.maxDescriptorSetInputAttachments);
5430     }
5431
5432     if (dev_data->extensions.vk_ext_descriptor_indexing) {
5433         // XXX TODO: replace with correct VU messages
5434
5435         // Max descriptors by type, within a single pipeline stage
5436         std::valarray<uint32_t> max_descriptors_per_stage_update_after_bind =
5437             GetDescriptorCountMaxPerStage(dev_data, set_layouts, false);
5438         // Samplers
5439         if (max_descriptors_per_stage_update_after_bind[DSL_TYPE_SAMPLERS] >
5440             dev_data->phys_dev_ext_props.descriptor_indexing_props.maxPerStageDescriptorUpdateAfterBindSamplers) {
5441             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5442                             VALIDATION_ERROR_0fe0179c,
5443                             "vkCreatePipelineLayout(): max per-stage sampler bindings count (%d) exceeds device "
5444                             "maxPerStageDescriptorUpdateAfterBindSamplers limit (%d).",
5445                             max_descriptors_per_stage_update_after_bind[DSL_TYPE_SAMPLERS],
5446                             dev_data->phys_dev_ext_props.descriptor_indexing_props.maxPerStageDescriptorUpdateAfterBindSamplers);
5447         }
5448
5449         // Uniform buffers
5450         if (max_descriptors_per_stage_update_after_bind[DSL_TYPE_UNIFORM_BUFFERS] >
5451             dev_data->phys_dev_ext_props.descriptor_indexing_props.maxPerStageDescriptorUpdateAfterBindUniformBuffers) {
5452             skip |=
5453                 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5454                         VALIDATION_ERROR_0fe0179e,
5455                         "vkCreatePipelineLayout(): max per-stage uniform buffer bindings count (%d) exceeds device "
5456                         "maxPerStageDescriptorUpdateAfterBindUniformBuffers limit (%d).",
5457                         max_descriptors_per_stage_update_after_bind[DSL_TYPE_UNIFORM_BUFFERS],
5458                         dev_data->phys_dev_ext_props.descriptor_indexing_props.maxPerStageDescriptorUpdateAfterBindUniformBuffers);
5459         }
5460
5461         // Storage buffers
5462         if (max_descriptors_per_stage_update_after_bind[DSL_TYPE_STORAGE_BUFFERS] >
5463             dev_data->phys_dev_ext_props.descriptor_indexing_props.maxPerStageDescriptorUpdateAfterBindStorageBuffers) {
5464             skip |=
5465                 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5466                         VALIDATION_ERROR_0fe017a0,
5467                         "vkCreatePipelineLayout(): max per-stage storage buffer bindings count (%d) exceeds device "
5468                         "maxPerStageDescriptorUpdateAfterBindStorageBuffers limit (%d).",
5469                         max_descriptors_per_stage_update_after_bind[DSL_TYPE_STORAGE_BUFFERS],
5470                         dev_data->phys_dev_ext_props.descriptor_indexing_props.maxPerStageDescriptorUpdateAfterBindStorageBuffers);
5471         }
5472
5473         // Sampled images
5474         if (max_descriptors_per_stage_update_after_bind[DSL_TYPE_SAMPLED_IMAGES] >
5475             dev_data->phys_dev_ext_props.descriptor_indexing_props.maxPerStageDescriptorUpdateAfterBindSampledImages) {
5476             skip |=
5477                 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5478                         VALIDATION_ERROR_0fe017a2,
5479                         "vkCreatePipelineLayout(): max per-stage sampled image bindings count (%d) exceeds device "
5480                         "maxPerStageDescriptorUpdateAfterBindSampledImages limit (%d).",
5481                         max_descriptors_per_stage_update_after_bind[DSL_TYPE_SAMPLED_IMAGES],
5482                         dev_data->phys_dev_ext_props.descriptor_indexing_props.maxPerStageDescriptorUpdateAfterBindSampledImages);
5483         }
5484
5485         // Storage images
5486         if (max_descriptors_per_stage_update_after_bind[DSL_TYPE_STORAGE_IMAGES] >
5487             dev_data->phys_dev_ext_props.descriptor_indexing_props.maxPerStageDescriptorUpdateAfterBindStorageImages) {
5488             skip |=
5489                 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5490                         VALIDATION_ERROR_0fe017a4,
5491                         "vkCreatePipelineLayout(): max per-stage storage image bindings count (%d) exceeds device "
5492                         "maxPerStageDescriptorUpdateAfterBindStorageImages limit (%d).",
5493                         max_descriptors_per_stage_update_after_bind[DSL_TYPE_STORAGE_IMAGES],
5494                         dev_data->phys_dev_ext_props.descriptor_indexing_props.maxPerStageDescriptorUpdateAfterBindStorageImages);
5495         }
5496
5497         // Input attachments
5498         if (max_descriptors_per_stage_update_after_bind[DSL_TYPE_INPUT_ATTACHMENTS] >
5499             dev_data->phys_dev_ext_props.descriptor_indexing_props.maxPerStageDescriptorUpdateAfterBindInputAttachments) {
5500             skip |= log_msg(
5501                 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5502                 VALIDATION_ERROR_0fe017a6,
5503                 "vkCreatePipelineLayout(): max per-stage input attachment bindings count (%d) exceeds device "
5504                 "maxPerStageDescriptorUpdateAfterBindInputAttachments limit (%d).",
5505                 max_descriptors_per_stage_update_after_bind[DSL_TYPE_INPUT_ATTACHMENTS],
5506                 dev_data->phys_dev_ext_props.descriptor_indexing_props.maxPerStageDescriptorUpdateAfterBindInputAttachments);
5507         }
5508
5509         // Total descriptors by type, summed across all pipeline stages
5510         //
5511         std::valarray<uint32_t> sum_all_stages_update_after_bind = GetDescriptorSum(dev_data, set_layouts, false);
5512         // Samplers
5513         sum = sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_SAMPLER] +
5514               sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER];
5515         if (sum > dev_data->phys_dev_ext_props.descriptor_indexing_props.maxDescriptorSetUpdateAfterBindSamplers) {
5516             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5517                             VALIDATION_ERROR_0fe017b8,
5518                             "vkCreatePipelineLayout(): sum of sampler bindings among all stages (%d) exceeds device "
5519                             "maxDescriptorSetUpdateAfterBindSamplers limit (%d).",
5520                             sum, dev_data->phys_dev_ext_props.descriptor_indexing_props.maxDescriptorSetUpdateAfterBindSamplers);
5521         }
5522
5523         // Uniform buffers
5524         if (sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER] >
5525             dev_data->phys_dev_ext_props.descriptor_indexing_props.maxDescriptorSetUpdateAfterBindUniformBuffers) {
5526             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5527                             VALIDATION_ERROR_0fe017ba,
5528                             "vkCreatePipelineLayout(): sum of uniform buffer bindings among all stages (%d) exceeds device "
5529                             "maxDescriptorSetUpdateAfterBindUniformBuffers limit (%d).",
5530                             sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER],
5531                             dev_data->phys_dev_ext_props.descriptor_indexing_props.maxDescriptorSetUpdateAfterBindUniformBuffers);
5532         }
5533
5534         // Dynamic uniform buffers
5535         if (sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC] >
5536             dev_data->phys_dev_ext_props.descriptor_indexing_props.maxDescriptorSetUpdateAfterBindUniformBuffersDynamic) {
5537             skip |= log_msg(
5538                 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5539                 VALIDATION_ERROR_0fe017bc,
5540                 "vkCreatePipelineLayout(): sum of dynamic uniform buffer bindings among all stages (%d) exceeds device "
5541                 "maxDescriptorSetUpdateAfterBindUniformBuffersDynamic limit (%d).",
5542                 sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC],
5543                 dev_data->phys_dev_ext_props.descriptor_indexing_props.maxDescriptorSetUpdateAfterBindUniformBuffersDynamic);
5544         }
5545
5546         // Storage buffers
5547         if (sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_STORAGE_BUFFER] >
5548             dev_data->phys_dev_ext_props.descriptor_indexing_props.maxDescriptorSetUpdateAfterBindStorageBuffers) {
5549             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5550                             VALIDATION_ERROR_0fe017be,
5551                             "vkCreatePipelineLayout(): sum of storage buffer bindings among all stages (%d) exceeds device "
5552                             "maxDescriptorSetUpdateAfterBindStorageBuffers limit (%d).",
5553                             sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_STORAGE_BUFFER],
5554                             dev_data->phys_dev_ext_props.descriptor_indexing_props.maxDescriptorSetUpdateAfterBindStorageBuffers);
5555         }
5556
5557         // Dynamic storage buffers
5558         if (sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC] >
5559             dev_data->phys_dev_ext_props.descriptor_indexing_props.maxDescriptorSetUpdateAfterBindStorageBuffersDynamic) {
5560             skip |= log_msg(
5561                 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5562                 VALIDATION_ERROR_0fe017c0,
5563                 "vkCreatePipelineLayout(): sum of dynamic storage buffer bindings among all stages (%d) exceeds device "
5564                 "maxDescriptorSetUpdateAfterBindStorageBuffersDynamic limit (%d).",
5565                 sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC],
5566                 dev_data->phys_dev_ext_props.descriptor_indexing_props.maxDescriptorSetUpdateAfterBindStorageBuffersDynamic);
5567         }
5568
5569         //  Sampled images
5570         sum = sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE] +
5571               sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER] +
5572               sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER];
5573         if (sum > dev_data->phys_dev_ext_props.descriptor_indexing_props.maxDescriptorSetUpdateAfterBindSampledImages) {
5574             skip |=
5575                 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5576                         VALIDATION_ERROR_0fe017c2,
5577                         "vkCreatePipelineLayout(): sum of sampled image bindings among all stages (%d) exceeds device "
5578                         "maxDescriptorSetUpdateAfterBindSampledImages limit (%d).",
5579                         sum, dev_data->phys_dev_ext_props.descriptor_indexing_props.maxDescriptorSetUpdateAfterBindSampledImages);
5580         }
5581
5582         //  Storage images
5583         sum = sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_STORAGE_IMAGE] +
5584               sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER];
5585         if (sum > dev_data->phys_dev_ext_props.descriptor_indexing_props.maxDescriptorSetUpdateAfterBindStorageImages) {
5586             skip |=
5587                 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5588                         VALIDATION_ERROR_0fe017c4,
5589                         "vkCreatePipelineLayout(): sum of storage image bindings among all stages (%d) exceeds device "
5590                         "maxDescriptorSetUpdateAfterBindStorageImages limit (%d).",
5591                         sum, dev_data->phys_dev_ext_props.descriptor_indexing_props.maxDescriptorSetUpdateAfterBindStorageImages);
5592         }
5593
5594         // Input attachments
5595         if (sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT] >
5596             dev_data->phys_dev_ext_props.descriptor_indexing_props.maxDescriptorSetUpdateAfterBindInputAttachments) {
5597             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
5598                             VALIDATION_ERROR_0fe017c6,
5599                             "vkCreatePipelineLayout(): sum of input attachment bindings among all stages (%d) exceeds device "
5600                             "maxDescriptorSetUpdateAfterBindInputAttachments limit (%d).",
5601                             sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT],
5602                             dev_data->phys_dev_ext_props.descriptor_indexing_props.maxDescriptorSetUpdateAfterBindInputAttachments);
5603         }
5604     }
5605     return skip;
5606 }
5607
5608 // For repeatable sorting, not very useful for "memory in range" search
5609 struct PushConstantRangeCompare {
5610     bool operator()(const VkPushConstantRange *lhs, const VkPushConstantRange *rhs) const {
5611         if (lhs->offset == rhs->offset) {
5612             if (lhs->size == rhs->size) {
5613                 // The comparison is arbitrary, but avoids false aliasing by comparing all fields.
5614                 return lhs->stageFlags < rhs->stageFlags;
5615             }
5616             // If the offsets are the same then sorting by the end of range is useful for validation
5617             return lhs->size < rhs->size;
5618         }
5619         return lhs->offset < rhs->offset;
5620     }
5621 };
5622
5623 static PushConstantRangesDict push_constant_ranges_dict;
5624
5625 PushConstantRangesId get_canonical_id(const VkPipelineLayoutCreateInfo *info) {
5626     if (!info->pPushConstantRanges) {
5627         // Hand back the empty entry (creating as needed)...
5628         return push_constant_ranges_dict.look_up(PushConstantRanges());
5629     }
5630
5631     // Sort the input ranges to ensure equivalent ranges map to the same id
5632     std::set<const VkPushConstantRange *, PushConstantRangeCompare> sorted;
5633     for (uint32_t i = 0; i < info->pushConstantRangeCount; i++) {
5634         sorted.insert(info->pPushConstantRanges + i);
5635     }
5636
5637     PushConstantRanges ranges(sorted.size());
5638     for (const auto range : sorted) {
5639         ranges.emplace_back(*range);
5640     }
5641     return push_constant_ranges_dict.look_up(std::move(ranges));
5642 }
5643
5644 // Dictionary of canoncial form of the pipeline set layout of descriptor set layouts
5645 static PipelineLayoutSetLayoutsDict pipeline_layout_set_layouts_dict;
5646
5647 // Dictionary of canonical form of the "compatible for set" records
5648 static PipelineLayoutCompatDict pipeline_layout_compat_dict;
5649
5650 static PipelineLayoutCompatId get_canonical_id(const uint32_t set_index, const PushConstantRangesId pcr_id,
5651                                                const PipelineLayoutSetLayoutsId set_layouts_id) {
5652     return pipeline_layout_compat_dict.look_up(PipelineLayoutCompatDef(set_index, pcr_id, set_layouts_id));
5653 }
5654
5655 static void PostCallRecordCreatePipelineLayout(layer_data *dev_data, const VkPipelineLayoutCreateInfo *pCreateInfo,
5656                                                const VkPipelineLayout *pPipelineLayout) {
5657     unique_lock_t lock(global_lock);  // Lock while accessing state
5658
5659     PIPELINE_LAYOUT_NODE &plNode = dev_data->pipelineLayoutMap[*pPipelineLayout];
5660     plNode.layout = *pPipelineLayout;
5661     plNode.set_layouts.resize(pCreateInfo->setLayoutCount);
5662     PipelineLayoutSetLayoutsDef set_layouts(pCreateInfo->setLayoutCount);
5663     for (uint32_t i = 0; i < pCreateInfo->setLayoutCount; ++i) {
5664         plNode.set_layouts[i] = GetDescriptorSetLayout(dev_data, pCreateInfo->pSetLayouts[i]);
5665         set_layouts[i] = plNode.set_layouts[i]->get_layout_id();
5666     }
5667
5668     // Get canonical form IDs for the "compatible for set" contents
5669     plNode.push_constant_ranges = get_canonical_id(pCreateInfo);
5670     auto set_layouts_id = pipeline_layout_set_layouts_dict.look_up(set_layouts);
5671     plNode.compat_for_set.reserve(pCreateInfo->setLayoutCount);
5672
5673     // Create table of "compatible for set N" cannonical forms for trivial accept validation
5674     for (uint32_t i = 0; i < pCreateInfo->setLayoutCount; ++i) {
5675         plNode.compat_for_set.emplace_back(get_canonical_id(i, plNode.push_constant_ranges, set_layouts_id));
5676     }
5677
5678     // Implicit unlock
5679 };
5680
5681 VKAPI_ATTR VkResult VKAPI_CALL CreatePipelineLayout(VkDevice device, const VkPipelineLayoutCreateInfo *pCreateInfo,
5682                                                     const VkAllocationCallbacks *pAllocator, VkPipelineLayout *pPipelineLayout) {
5683     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
5684
5685     bool skip = PreCallValiateCreatePipelineLayout(dev_data, pCreateInfo);
5686     if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
5687
5688     VkResult result = dev_data->dispatch_table.CreatePipelineLayout(device, pCreateInfo, pAllocator, pPipelineLayout);
5689
5690     if (VK_SUCCESS == result) {
5691         PostCallRecordCreatePipelineLayout(dev_data, pCreateInfo, pPipelineLayout);
5692     }
5693     return result;
5694 }
5695
5696 VKAPI_ATTR VkResult VKAPI_CALL CreateDescriptorPool(VkDevice device, const VkDescriptorPoolCreateInfo *pCreateInfo,
5697                                                     const VkAllocationCallbacks *pAllocator, VkDescriptorPool *pDescriptorPool) {
5698     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
5699     VkResult result = dev_data->dispatch_table.CreateDescriptorPool(device, pCreateInfo, pAllocator, pDescriptorPool);
5700     if (VK_SUCCESS == result) {
5701         DESCRIPTOR_POOL_STATE *pNewNode = new DESCRIPTOR_POOL_STATE(*pDescriptorPool, pCreateInfo);
5702         if (NULL == pNewNode) {
5703             if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
5704                         HandleToUint64(*pDescriptorPool), DRAWSTATE_OUT_OF_MEMORY,
5705                         "Out of memory while attempting to allocate DESCRIPTOR_POOL_STATE in vkCreateDescriptorPool()"))
5706                 return VK_ERROR_VALIDATION_FAILED_EXT;
5707         } else {
5708             lock_guard_t lock(global_lock);
5709             dev_data->descriptorPoolMap[*pDescriptorPool] = pNewNode;
5710         }
5711     } else {
5712         // Need to do anything if pool create fails?
5713     }
5714     return result;
5715 }
5716
5717 VKAPI_ATTR VkResult VKAPI_CALL ResetDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool,
5718                                                    VkDescriptorPoolResetFlags flags) {
5719     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
5720
5721     unique_lock_t lock(global_lock);
5722     // Make sure sets being destroyed are not currently in-use
5723     bool skip = validateIdleDescriptorSetForPoolReset(dev_data, descriptorPool);
5724     lock.unlock();
5725
5726     if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
5727
5728     VkResult result = dev_data->dispatch_table.ResetDescriptorPool(device, descriptorPool, flags);
5729     if (VK_SUCCESS == result) {
5730         lock.lock();
5731         clearDescriptorPool(dev_data, device, descriptorPool, flags);
5732         lock.unlock();
5733     }
5734     return result;
5735 }
5736 // Ensure the pool contains enough descriptors and descriptor sets to satisfy
5737 // an allocation request. Fills common_data with the total number of descriptors of each type required,
5738 // as well as DescriptorSetLayout ptrs used for later update.
5739 static bool PreCallValidateAllocateDescriptorSets(layer_data *dev_data, const VkDescriptorSetAllocateInfo *pAllocateInfo,
5740                                                   cvdescriptorset::AllocateDescriptorSetsData *common_data) {
5741     // Always update common data
5742     cvdescriptorset::UpdateAllocateDescriptorSetsData(dev_data, pAllocateInfo, common_data);
5743     if (dev_data->instance_data->disabled.allocate_descriptor_sets) return false;
5744     // All state checks for AllocateDescriptorSets is done in single function
5745     return cvdescriptorset::ValidateAllocateDescriptorSets(dev_data, pAllocateInfo, common_data);
5746 }
5747 // Allocation state was good and call down chain was made so update state based on allocating descriptor sets
5748 static void PostCallRecordAllocateDescriptorSets(layer_data *dev_data, const VkDescriptorSetAllocateInfo *pAllocateInfo,
5749                                                  VkDescriptorSet *pDescriptorSets,
5750                                                  const cvdescriptorset::AllocateDescriptorSetsData *common_data) {
5751     // All the updates are contained in a single cvdescriptorset function
5752     cvdescriptorset::PerformAllocateDescriptorSets(pAllocateInfo, pDescriptorSets, common_data, &dev_data->descriptorPoolMap,
5753                                                    &dev_data->setMap, dev_data);
5754 }
5755
5756 // TODO: PostCallRecord routine is dependent on data generated in PreCallValidate -- needs to be moved out
5757 VKAPI_ATTR VkResult VKAPI_CALL AllocateDescriptorSets(VkDevice device, const VkDescriptorSetAllocateInfo *pAllocateInfo,
5758                                                       VkDescriptorSet *pDescriptorSets) {
5759     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
5760     unique_lock_t lock(global_lock);
5761     cvdescriptorset::AllocateDescriptorSetsData common_data(pAllocateInfo->descriptorSetCount);
5762     bool skip = PreCallValidateAllocateDescriptorSets(dev_data, pAllocateInfo, &common_data);
5763     lock.unlock();
5764
5765     if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
5766
5767     VkResult result = dev_data->dispatch_table.AllocateDescriptorSets(device, pAllocateInfo, pDescriptorSets);
5768
5769     if (VK_SUCCESS == result) {
5770         lock.lock();
5771         PostCallRecordAllocateDescriptorSets(dev_data, pAllocateInfo, pDescriptorSets, &common_data);
5772         lock.unlock();
5773     }
5774     return result;
5775 }
5776 // Verify state before freeing DescriptorSets
5777 static bool PreCallValidateFreeDescriptorSets(const layer_data *dev_data, VkDescriptorPool pool, uint32_t count,
5778                                               const VkDescriptorSet *descriptor_sets) {
5779     if (dev_data->instance_data->disabled.free_descriptor_sets) return false;
5780     bool skip = false;
5781     // First make sure sets being destroyed are not currently in-use
5782     for (uint32_t i = 0; i < count; ++i) {
5783         if (descriptor_sets[i] != VK_NULL_HANDLE) {
5784             skip |= validateIdleDescriptorSet(dev_data, descriptor_sets[i], "vkFreeDescriptorSets");
5785         }
5786     }
5787
5788     DESCRIPTOR_POOL_STATE *pool_state = GetDescriptorPoolState(dev_data, pool);
5789     if (pool_state && !(VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT & pool_state->createInfo.flags)) {
5790         // Can't Free from a NON_FREE pool
5791         skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
5792                         HandleToUint64(pool), VALIDATION_ERROR_28600270,
5793                         "It is invalid to call vkFreeDescriptorSets() with a pool created without setting "
5794                         "VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT.");
5795     }
5796     return skip;
5797 }
5798 // Sets are being returned to the pool so update the pool state
5799 static void PreCallRecordFreeDescriptorSets(layer_data *dev_data, VkDescriptorPool pool, uint32_t count,
5800                                             const VkDescriptorSet *descriptor_sets) {
5801     DESCRIPTOR_POOL_STATE *pool_state = GetDescriptorPoolState(dev_data, pool);
5802     // Update available descriptor sets in pool
5803     pool_state->availableSets += count;
5804
5805     // For each freed descriptor add its resources back into the pool as available and remove from pool and setMap
5806     for (uint32_t i = 0; i < count; ++i) {
5807         if (descriptor_sets[i] != VK_NULL_HANDLE) {
5808             auto descriptor_set = dev_data->setMap[descriptor_sets[i]];
5809             uint32_t type_index = 0, descriptor_count = 0;
5810             for (uint32_t j = 0; j < descriptor_set->GetBindingCount(); ++j) {
5811                 type_index = static_cast<uint32_t>(descriptor_set->GetTypeFromIndex(j));
5812                 descriptor_count = descriptor_set->GetDescriptorCountFromIndex(j);
5813                 pool_state->availableDescriptorTypeCount[type_index] += descriptor_count;
5814             }
5815             freeDescriptorSet(dev_data, descriptor_set);
5816             pool_state->sets.erase(descriptor_set);
5817         }
5818     }
5819 }
5820
5821 VKAPI_ATTR VkResult VKAPI_CALL FreeDescriptorSets(VkDevice device, VkDescriptorPool descriptorPool, uint32_t count,
5822                                                   const VkDescriptorSet *pDescriptorSets) {
5823     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
5824     // Make sure that no sets being destroyed are in-flight
5825     unique_lock_t lock(global_lock);
5826     bool skip = PreCallValidateFreeDescriptorSets(dev_data, descriptorPool, count, pDescriptorSets);
5827
5828     VkResult result;
5829     if (skip) {
5830         result = VK_ERROR_VALIDATION_FAILED_EXT;
5831     } else {
5832         // A race here is invalid (descriptorPool should be externally sync'd), but code defensively against an invalid race
5833         PreCallRecordFreeDescriptorSets(dev_data, descriptorPool, count, pDescriptorSets);
5834         lock.unlock();
5835         result = dev_data->dispatch_table.FreeDescriptorSets(device, descriptorPool, count, pDescriptorSets);
5836     }
5837     return result;
5838 }
5839 // TODO : This is a Proof-of-concept for core validation architecture
5840 //  Really we'll want to break out these functions to separate files but
5841 //  keeping it all together here to prove out design
5842 // PreCallValidate* handles validating all of the state prior to calling down chain to UpdateDescriptorSets()
5843 static bool PreCallValidateUpdateDescriptorSets(layer_data *dev_data, uint32_t descriptorWriteCount,
5844                                                 const VkWriteDescriptorSet *pDescriptorWrites, uint32_t descriptorCopyCount,
5845                                                 const VkCopyDescriptorSet *pDescriptorCopies) {
5846     if (dev_data->instance_data->disabled.update_descriptor_sets) return false;
5847     // First thing to do is perform map look-ups.
5848     // NOTE : UpdateDescriptorSets is somewhat unique in that it's operating on a number of DescriptorSets
5849     //  so we can't just do a single map look-up up-front, but do them individually in functions below
5850
5851     // Now make call(s) that validate state, but don't perform state updates in this function
5852     // Note, here DescriptorSets is unique in that we don't yet have an instance. Using a helper function in the
5853     //  namespace which will parse params and make calls into specific class instances
5854     return cvdescriptorset::ValidateUpdateDescriptorSets(dev_data->report_data, dev_data, descriptorWriteCount, pDescriptorWrites,
5855                                                          descriptorCopyCount, pDescriptorCopies);
5856 }
5857 // PostCallRecord* handles recording state updates following call down chain to UpdateDescriptorSets()
5858 static void PreCallRecordUpdateDescriptorSets(layer_data *dev_data, uint32_t descriptorWriteCount,
5859                                               const VkWriteDescriptorSet *pDescriptorWrites, uint32_t descriptorCopyCount,
5860                                               const VkCopyDescriptorSet *pDescriptorCopies) {
5861     cvdescriptorset::PerformUpdateDescriptorSets(dev_data, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount,
5862                                                  pDescriptorCopies);
5863 }
5864
5865 VKAPI_ATTR void VKAPI_CALL UpdateDescriptorSets(VkDevice device, uint32_t descriptorWriteCount,
5866                                                 const VkWriteDescriptorSet *pDescriptorWrites, uint32_t descriptorCopyCount,
5867                                                 const VkCopyDescriptorSet *pDescriptorCopies) {
5868     // Only map look-up at top level is for device-level layer_data
5869     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
5870     unique_lock_t lock(global_lock);
5871     bool skip = PreCallValidateUpdateDescriptorSets(dev_data, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount,
5872                                                     pDescriptorCopies);
5873     if (!skip) {
5874         // Since UpdateDescriptorSets() is void, nothing to check prior to updating state & we can update before call down chain
5875         PreCallRecordUpdateDescriptorSets(dev_data, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount,
5876                                           pDescriptorCopies);
5877         lock.unlock();
5878         dev_data->dispatch_table.UpdateDescriptorSets(device, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount,
5879                                                       pDescriptorCopies);
5880     }
5881 }
5882
5883 VKAPI_ATTR VkResult VKAPI_CALL AllocateCommandBuffers(VkDevice device, const VkCommandBufferAllocateInfo *pCreateInfo,
5884                                                       VkCommandBuffer *pCommandBuffer) {
5885     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
5886     VkResult result = dev_data->dispatch_table.AllocateCommandBuffers(device, pCreateInfo, pCommandBuffer);
5887     if (VK_SUCCESS == result) {
5888         unique_lock_t lock(global_lock);
5889         auto pPool = GetCommandPoolNode(dev_data, pCreateInfo->commandPool);
5890
5891         if (pPool) {
5892             for (uint32_t i = 0; i < pCreateInfo->commandBufferCount; i++) {
5893                 // Add command buffer to its commandPool map
5894                 pPool->commandBuffers.insert(pCommandBuffer[i]);
5895                 GLOBAL_CB_NODE *pCB = new GLOBAL_CB_NODE;
5896                 // Add command buffer to map
5897                 dev_data->commandBufferMap[pCommandBuffer[i]] = pCB;
5898                 ResetCommandBufferState(dev_data, pCommandBuffer[i]);
5899                 pCB->createInfo = *pCreateInfo;
5900                 pCB->device = device;
5901             }
5902         }
5903         lock.unlock();
5904     }
5905     return result;
5906 }
5907
5908 // Add bindings between the given cmd buffer & framebuffer and the framebuffer's children
5909 static void AddFramebufferBinding(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, FRAMEBUFFER_STATE *fb_state) {
5910     addCommandBufferBinding(&fb_state->cb_bindings, {HandleToUint64(fb_state->framebuffer), kVulkanObjectTypeFramebuffer},
5911                             cb_state);
5912     for (auto attachment : fb_state->attachments) {
5913         auto view_state = attachment.view_state;
5914         if (view_state) {
5915             AddCommandBufferBindingImageView(dev_data, cb_state, view_state);
5916         }
5917     }
5918 }
5919
5920 VKAPI_ATTR VkResult VKAPI_CALL BeginCommandBuffer(VkCommandBuffer commandBuffer, const VkCommandBufferBeginInfo *pBeginInfo) {
5921     bool skip = false;
5922     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
5923     unique_lock_t lock(global_lock);
5924     // Validate command buffer level
5925     GLOBAL_CB_NODE *cb_node = GetCBNode(dev_data, commandBuffer);
5926     if (cb_node) {
5927         // This implicitly resets the Cmd Buffer so make sure any fence is done and then clear memory references
5928         if (cb_node->in_use.load()) {
5929             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
5930                             HandleToUint64(commandBuffer), VALIDATION_ERROR_16e00062,
5931                             "Calling vkBeginCommandBuffer() on active command buffer %" PRIx64
5932                             " before it has completed. You must check command buffer fence before this call.",
5933                             HandleToUint64(commandBuffer));
5934         }
5935         clear_cmd_buf_and_mem_references(dev_data, cb_node);
5936         if (cb_node->createInfo.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
5937             // Secondary Command Buffer
5938             const VkCommandBufferInheritanceInfo *pInfo = pBeginInfo->pInheritanceInfo;
5939             if (!pInfo) {
5940                 skip |=
5941                     log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
5942                             HandleToUint64(commandBuffer), VALIDATION_ERROR_16e00066,
5943                             "vkBeginCommandBuffer(): Secondary Command Buffer (0x%" PRIx64 ") must have inheritance info.",
5944                             HandleToUint64(commandBuffer));
5945             } else {
5946                 if (pBeginInfo->flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT) {
5947                     assert(pInfo->renderPass);
5948                     string errorString = "";
5949                     auto framebuffer = GetFramebufferState(dev_data, pInfo->framebuffer);
5950                     if (framebuffer) {
5951                         if (framebuffer->createInfo.renderPass != pInfo->renderPass) {
5952                             // renderPass that framebuffer was created with must be compatible with local renderPass
5953                             skip |=
5954                                 validateRenderPassCompatibility(dev_data, "framebuffer", framebuffer->rp_state.get(),
5955                                                                 "command buffer", GetRenderPassState(dev_data, pInfo->renderPass),
5956                                                                 "vkBeginCommandBuffer()", VALIDATION_ERROR_0280006e);
5957                         }
5958                         // Connect this framebuffer and its children to this cmdBuffer
5959                         AddFramebufferBinding(dev_data, cb_node, framebuffer);
5960                     }
5961                 }
5962                 if ((pInfo->occlusionQueryEnable == VK_FALSE || dev_data->enabled_features.occlusionQueryPrecise == VK_FALSE) &&
5963                     (pInfo->queryFlags & VK_QUERY_CONTROL_PRECISE_BIT)) {
5964                     skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5965                                     VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(commandBuffer),
5966                                     VALIDATION_ERROR_16e00068,
5967                                     "vkBeginCommandBuffer(): Secondary Command Buffer (0x%" PRIx64
5968                                     ") must not have VK_QUERY_CONTROL_PRECISE_BIT if occulusionQuery is disabled or the device "
5969                                     "does not support precise occlusion queries.",
5970                                     HandleToUint64(commandBuffer));
5971                 }
5972             }
5973             if (pInfo && pInfo->renderPass != VK_NULL_HANDLE) {
5974                 auto renderPass = GetRenderPassState(dev_data, pInfo->renderPass);
5975                 if (renderPass) {
5976                     if (pInfo->subpass >= renderPass->createInfo.subpassCount) {
5977                         skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5978                                         VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(commandBuffer),
5979                                         VALIDATION_ERROR_0280006c,
5980                                         "vkBeginCommandBuffer(): Secondary Command Buffers (0x%" PRIx64
5981                                         ") must have a subpass index (%d) that is less than the number of subpasses (%d).",
5982                                         HandleToUint64(commandBuffer), pInfo->subpass, renderPass->createInfo.subpassCount);
5983                     }
5984                 }
5985             }
5986         }
5987         if (CB_RECORDING == cb_node->state) {
5988             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
5989                             HandleToUint64(commandBuffer), VALIDATION_ERROR_16e00062,
5990                             "vkBeginCommandBuffer(): Cannot call Begin on command buffer (0x%" PRIx64
5991                             ") in the RECORDING state. Must first call vkEndCommandBuffer().",
5992                             HandleToUint64(commandBuffer));
5993         } else if (CB_RECORDED == cb_node->state || CB_INVALID_COMPLETE == cb_node->state) {
5994             VkCommandPool cmdPool = cb_node->createInfo.commandPool;
5995             auto pPool = GetCommandPoolNode(dev_data, cmdPool);
5996             if (!(VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT & pPool->createFlags)) {
5997                 skip |=
5998                     log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
5999                             HandleToUint64(commandBuffer), VALIDATION_ERROR_16e00064,
6000                             "Call to vkBeginCommandBuffer() on command buffer (0x%" PRIx64
6001                             ") attempts to implicitly reset cmdBuffer created from command pool (0x%" PRIx64
6002                             ") that does NOT have the VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT bit set.",
6003                             HandleToUint64(commandBuffer), HandleToUint64(cmdPool));
6004             }
6005             ResetCommandBufferState(dev_data, commandBuffer);
6006         }
6007         // Set updated state here in case implicit reset occurs above
6008         cb_node->state = CB_RECORDING;
6009         cb_node->beginInfo = *pBeginInfo;
6010         if (cb_node->beginInfo.pInheritanceInfo) {
6011             cb_node->inheritanceInfo = *(cb_node->beginInfo.pInheritanceInfo);
6012             cb_node->beginInfo.pInheritanceInfo = &cb_node->inheritanceInfo;
6013             // If we are a secondary command-buffer and inheriting.  Update the items we should inherit.
6014             if ((cb_node->createInfo.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY) &&
6015                 (cb_node->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) {
6016                 cb_node->activeRenderPass = GetRenderPassState(dev_data, cb_node->beginInfo.pInheritanceInfo->renderPass);
6017                 cb_node->activeSubpass = cb_node->beginInfo.pInheritanceInfo->subpass;
6018                 cb_node->activeFramebuffer = cb_node->beginInfo.pInheritanceInfo->framebuffer;
6019                 cb_node->framebuffers.insert(cb_node->beginInfo.pInheritanceInfo->framebuffer);
6020             }
6021         }
6022     }
6023     lock.unlock();
6024     if (skip) {
6025         return VK_ERROR_VALIDATION_FAILED_EXT;
6026     }
6027     VkResult result = dev_data->dispatch_table.BeginCommandBuffer(commandBuffer, pBeginInfo);
6028
6029     return result;
6030 }
6031 static void PostCallRecordEndCommandBuffer(layer_data *dev_data, GLOBAL_CB_NODE *cb_state) {
6032     // Cached validation is specific to a specific recording of a specific command buffer.
6033     for (auto descriptor_set : cb_state->validated_descriptor_sets) {
6034         descriptor_set->ClearCachedValidation(cb_state);
6035     }
6036     cb_state->validated_descriptor_sets.clear();
6037 }
6038
6039 VKAPI_ATTR VkResult VKAPI_CALL EndCommandBuffer(VkCommandBuffer commandBuffer) {
6040     bool skip = false;
6041     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6042     unique_lock_t lock(global_lock);
6043     GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
6044     if (pCB) {
6045         if ((VK_COMMAND_BUFFER_LEVEL_PRIMARY == pCB->createInfo.level) ||
6046             !(pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) {
6047             // This needs spec clarification to update valid usage, see comments in PR:
6048             // https://github.com/KhronosGroup/Vulkan-LoaderAndValidationLayers/pull/516#discussion_r63013756
6049             skip |= insideRenderPass(dev_data, pCB, "vkEndCommandBuffer()", VALIDATION_ERROR_27400078);
6050         }
6051         skip |= ValidateCmd(dev_data, pCB, CMD_ENDCOMMANDBUFFER, "vkEndCommandBuffer()");
6052         for (auto query : pCB->activeQueries) {
6053             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6054                             HandleToUint64(commandBuffer), VALIDATION_ERROR_2740007a,
6055                             "Ending command buffer with in progress query: queryPool 0x%" PRIx64 ", index %d.",
6056                             HandleToUint64(query.pool), query.index);
6057         }
6058     }
6059     if (!skip) {
6060         lock.unlock();
6061         auto result = dev_data->dispatch_table.EndCommandBuffer(commandBuffer);
6062         lock.lock();
6063         PostCallRecordEndCommandBuffer(dev_data, pCB);
6064         if (VK_SUCCESS == result) {
6065             pCB->state = CB_RECORDED;
6066         }
6067         return result;
6068     } else {
6069         return VK_ERROR_VALIDATION_FAILED_EXT;
6070     }
6071 }
6072
6073 VKAPI_ATTR VkResult VKAPI_CALL ResetCommandBuffer(VkCommandBuffer commandBuffer, VkCommandBufferResetFlags flags) {
6074     bool skip = false;
6075     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6076     unique_lock_t lock(global_lock);
6077     GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
6078     VkCommandPool cmdPool = pCB->createInfo.commandPool;
6079     auto pPool = GetCommandPoolNode(dev_data, cmdPool);
6080     if (!(VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT & pPool->createFlags)) {
6081         skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6082                         HandleToUint64(commandBuffer), VALIDATION_ERROR_3260005c,
6083                         "Attempt to reset command buffer (0x%" PRIx64 ") created from command pool (0x%" PRIx64
6084                         ") that does NOT have the VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT bit set.",
6085                         HandleToUint64(commandBuffer), HandleToUint64(cmdPool));
6086     }
6087     skip |= checkCommandBufferInFlight(dev_data, pCB, "reset", VALIDATION_ERROR_3260005a);
6088     lock.unlock();
6089     if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
6090     VkResult result = dev_data->dispatch_table.ResetCommandBuffer(commandBuffer, flags);
6091     if (VK_SUCCESS == result) {
6092         lock.lock();
6093         ResetCommandBufferState(dev_data, commandBuffer);
6094         lock.unlock();
6095     }
6096     return result;
6097 }
6098
6099 VKAPI_ATTR void VKAPI_CALL CmdBindPipeline(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint,
6100                                            VkPipeline pipeline) {
6101     bool skip = false;
6102     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6103     unique_lock_t lock(global_lock);
6104     GLOBAL_CB_NODE *cb_state = GetCBNode(dev_data, commandBuffer);
6105     if (cb_state) {
6106         skip |= ValidateCmdQueueFlags(dev_data, cb_state, "vkCmdBindPipeline()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
6107                                       VALIDATION_ERROR_18002415);
6108         skip |= ValidateCmd(dev_data, cb_state, CMD_BINDPIPELINE, "vkCmdBindPipeline()");
6109         // TODO: VALIDATION_ERROR_18000612 VALIDATION_ERROR_18000616  -- using ValidatePipelineBindPoint
6110
6111         auto pipe_state = getPipelineState(dev_data, pipeline);
6112         if (VK_PIPELINE_BIND_POINT_GRAPHICS == pipelineBindPoint) {
6113             cb_state->status &= ~cb_state->static_status;
6114             cb_state->static_status = MakeStaticStateMask(pipe_state->graphicsPipelineCI.ptr()->pDynamicState);
6115             cb_state->status |= cb_state->static_status;
6116         }
6117         cb_state->lastBound[pipelineBindPoint].pipeline_state = pipe_state;
6118         set_pipeline_state(pipe_state);
6119         addCommandBufferBinding(&pipe_state->cb_bindings, {HandleToUint64(pipeline), kVulkanObjectTypePipeline}, cb_state);
6120     }
6121     lock.unlock();
6122     if (!skip) dev_data->dispatch_table.CmdBindPipeline(commandBuffer, pipelineBindPoint, pipeline);
6123 }
6124
6125 VKAPI_ATTR void VKAPI_CALL CmdSetViewport(VkCommandBuffer commandBuffer, uint32_t firstViewport, uint32_t viewportCount,
6126                                           const VkViewport *pViewports) {
6127     bool skip = false;
6128     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6129     unique_lock_t lock(global_lock);
6130     GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
6131     if (pCB) {
6132         skip |= ValidateCmdQueueFlags(dev_data, pCB, "vkCmdSetViewport()", VK_QUEUE_GRAPHICS_BIT, VALIDATION_ERROR_1e002415);
6133         skip |= ValidateCmd(dev_data, pCB, CMD_SETVIEWPORT, "vkCmdSetViewport()");
6134         if (pCB->static_status & CBSTATUS_VIEWPORT_SET) {
6135             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6136                             HandleToUint64(commandBuffer), VALIDATION_ERROR_1e00098a,
6137                             "vkCmdSetViewport(): pipeline was created without VK_DYNAMIC_STATE_VIEWPORT flag..");
6138         }
6139         if (!skip) {
6140             pCB->viewportMask |= ((1u << viewportCount) - 1u) << firstViewport;
6141             pCB->status |= CBSTATUS_VIEWPORT_SET;
6142         }
6143     }
6144     lock.unlock();
6145     if (!skip) dev_data->dispatch_table.CmdSetViewport(commandBuffer, firstViewport, viewportCount, pViewports);
6146 }
6147
6148 VKAPI_ATTR void VKAPI_CALL CmdSetScissor(VkCommandBuffer commandBuffer, uint32_t firstScissor, uint32_t scissorCount,
6149                                          const VkRect2D *pScissors) {
6150     bool skip = false;
6151     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6152     unique_lock_t lock(global_lock);
6153     GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
6154     if (pCB) {
6155         skip |= ValidateCmdQueueFlags(dev_data, pCB, "vkCmdSetScissor()", VK_QUEUE_GRAPHICS_BIT, VALIDATION_ERROR_1d802415);
6156         skip |= ValidateCmd(dev_data, pCB, CMD_SETSCISSOR, "vkCmdSetScissor()");
6157         if (pCB->static_status & CBSTATUS_SCISSOR_SET) {
6158             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6159                             HandleToUint64(commandBuffer), VALIDATION_ERROR_1d80049c,
6160                             "vkCmdSetScissor(): pipeline was created without VK_DYNAMIC_STATE_SCISSOR flag..");
6161         }
6162         if (!skip) {
6163             pCB->scissorMask |= ((1u << scissorCount) - 1u) << firstScissor;
6164             pCB->status |= CBSTATUS_SCISSOR_SET;
6165         }
6166     }
6167     lock.unlock();
6168     if (!skip) dev_data->dispatch_table.CmdSetScissor(commandBuffer, firstScissor, scissorCount, pScissors);
6169 }
6170
6171 VKAPI_ATTR void VKAPI_CALL CmdSetLineWidth(VkCommandBuffer commandBuffer, float lineWidth) {
6172     bool skip = false;
6173     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6174     unique_lock_t lock(global_lock);
6175     GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
6176     if (pCB) {
6177         skip |= ValidateCmdQueueFlags(dev_data, pCB, "vkCmdSetLineWidth()", VK_QUEUE_GRAPHICS_BIT, VALIDATION_ERROR_1d602415);
6178         skip |= ValidateCmd(dev_data, pCB, CMD_SETLINEWIDTH, "vkCmdSetLineWidth()");
6179
6180         if (pCB->static_status & CBSTATUS_LINE_WIDTH_SET) {
6181             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6182                             HandleToUint64(commandBuffer), VALIDATION_ERROR_1d600626,
6183                             "vkCmdSetLineWidth called but pipeline was created without VK_DYNAMIC_STATE_LINE_WIDTH flag.");
6184         }
6185         if (!skip) {
6186             pCB->status |= CBSTATUS_LINE_WIDTH_SET;
6187         }
6188     }
6189     lock.unlock();
6190     if (!skip) dev_data->dispatch_table.CmdSetLineWidth(commandBuffer, lineWidth);
6191 }
6192
6193 VKAPI_ATTR void VKAPI_CALL CmdSetDepthBias(VkCommandBuffer commandBuffer, float depthBiasConstantFactor, float depthBiasClamp,
6194                                            float depthBiasSlopeFactor) {
6195     bool skip = false;
6196     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6197     unique_lock_t lock(global_lock);
6198     GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
6199     if (pCB) {
6200         skip |= ValidateCmdQueueFlags(dev_data, pCB, "vkCmdSetDepthBias()", VK_QUEUE_GRAPHICS_BIT, VALIDATION_ERROR_1cc02415);
6201         skip |= ValidateCmd(dev_data, pCB, CMD_SETDEPTHBIAS, "vkCmdSetDepthBias()");
6202         if (pCB->static_status & CBSTATUS_DEPTH_BIAS_SET) {
6203             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6204                             HandleToUint64(commandBuffer), VALIDATION_ERROR_1cc0062a,
6205                             "vkCmdSetDepthBias(): pipeline was created without VK_DYNAMIC_STATE_DEPTH_BIAS flag..");
6206         }
6207         if ((depthBiasClamp != 0.0) && (!dev_data->enabled_features.depthBiasClamp)) {
6208             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6209                             HandleToUint64(commandBuffer), VALIDATION_ERROR_1cc0062c,
6210                             "vkCmdSetDepthBias(): the depthBiasClamp device feature is disabled: the depthBiasClamp parameter must "
6211                             "be set to 0.0.");
6212         }
6213         if (!skip) {
6214             pCB->status |= CBSTATUS_DEPTH_BIAS_SET;
6215         }
6216     }
6217     lock.unlock();
6218     if (!skip)
6219         dev_data->dispatch_table.CmdSetDepthBias(commandBuffer, depthBiasConstantFactor, depthBiasClamp, depthBiasSlopeFactor);
6220 }
6221
6222 VKAPI_ATTR void VKAPI_CALL CmdSetBlendConstants(VkCommandBuffer commandBuffer, const float blendConstants[4]) {
6223     bool skip = false;
6224     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6225     unique_lock_t lock(global_lock);
6226     GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
6227     if (pCB) {
6228         skip |= ValidateCmdQueueFlags(dev_data, pCB, "vkCmdSetBlendConstants()", VK_QUEUE_GRAPHICS_BIT, VALIDATION_ERROR_1ca02415);
6229         skip |= ValidateCmd(dev_data, pCB, CMD_SETBLENDCONSTANTS, "vkCmdSetBlendConstants()");
6230         if (pCB->static_status & CBSTATUS_BLEND_CONSTANTS_SET) {
6231             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6232                             HandleToUint64(commandBuffer), VALIDATION_ERROR_1ca004c8,
6233                             "vkCmdSetBlendConstants(): pipeline was created without VK_DYNAMIC_STATE_BLEND_CONSTANTS flag..");
6234         }
6235         if (!skip) {
6236             pCB->status |= CBSTATUS_BLEND_CONSTANTS_SET;
6237         }
6238     }
6239     lock.unlock();
6240     if (!skip) dev_data->dispatch_table.CmdSetBlendConstants(commandBuffer, blendConstants);
6241 }
6242
6243 VKAPI_ATTR void VKAPI_CALL CmdSetDepthBounds(VkCommandBuffer commandBuffer, float minDepthBounds, float maxDepthBounds) {
6244     bool skip = false;
6245     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6246     unique_lock_t lock(global_lock);
6247     GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
6248     if (pCB) {
6249         skip |= ValidateCmdQueueFlags(dev_data, pCB, "vkCmdSetDepthBounds()", VK_QUEUE_GRAPHICS_BIT, VALIDATION_ERROR_1ce02415);
6250         skip |= ValidateCmd(dev_data, pCB, CMD_SETDEPTHBOUNDS, "vkCmdSetDepthBounds()");
6251         if (pCB->static_status & CBSTATUS_DEPTH_BOUNDS_SET) {
6252             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6253                             HandleToUint64(commandBuffer), VALIDATION_ERROR_1ce004ae,
6254                             "vkCmdSetDepthBounds(): pipeline was created without VK_DYNAMIC_STATE_DEPTH_BOUNDS flag..");
6255         }
6256         if (!skip) {
6257             pCB->status |= CBSTATUS_DEPTH_BOUNDS_SET;
6258         }
6259     }
6260     lock.unlock();
6261     if (!skip) dev_data->dispatch_table.CmdSetDepthBounds(commandBuffer, minDepthBounds, maxDepthBounds);
6262 }
6263
6264 VKAPI_ATTR void VKAPI_CALL CmdSetStencilCompareMask(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask,
6265                                                     uint32_t compareMask) {
6266     bool skip = false;
6267     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6268     unique_lock_t lock(global_lock);
6269     GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
6270     if (pCB) {
6271         skip |=
6272             ValidateCmdQueueFlags(dev_data, pCB, "vkCmdSetStencilCompareMask()", VK_QUEUE_GRAPHICS_BIT, VALIDATION_ERROR_1da02415);
6273         skip |= ValidateCmd(dev_data, pCB, CMD_SETSTENCILCOMPAREMASK, "vkCmdSetStencilCompareMask()");
6274         if (pCB->static_status & CBSTATUS_STENCIL_READ_MASK_SET) {
6275             skip |=
6276                 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6277                         HandleToUint64(commandBuffer), VALIDATION_ERROR_1da004b4,
6278                         "vkCmdSetStencilCompareMask(): pipeline was created without VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK flag..");
6279         }
6280         if (!skip) {
6281             pCB->status |= CBSTATUS_STENCIL_READ_MASK_SET;
6282         }
6283     }
6284     lock.unlock();
6285     if (!skip) dev_data->dispatch_table.CmdSetStencilCompareMask(commandBuffer, faceMask, compareMask);
6286 }
6287
6288 VKAPI_ATTR void VKAPI_CALL CmdSetStencilWriteMask(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t writeMask) {
6289     bool skip = false;
6290     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6291     unique_lock_t lock(global_lock);
6292     GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
6293     if (pCB) {
6294         skip |=
6295             ValidateCmdQueueFlags(dev_data, pCB, "vkCmdSetStencilWriteMask()", VK_QUEUE_GRAPHICS_BIT, VALIDATION_ERROR_1de02415);
6296         skip |= ValidateCmd(dev_data, pCB, CMD_SETSTENCILWRITEMASK, "vkCmdSetStencilWriteMask()");
6297         if (pCB->static_status & CBSTATUS_STENCIL_WRITE_MASK_SET) {
6298             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6299                             HandleToUint64(commandBuffer), VALIDATION_ERROR_1de004b6,
6300                             "vkCmdSetStencilWriteMask(): pipeline was created without VK_DYNAMIC_STATE_STENCIL_WRITE_MASK flag..");
6301         }
6302         if (!skip) {
6303             pCB->status |= CBSTATUS_STENCIL_WRITE_MASK_SET;
6304         }
6305     }
6306     lock.unlock();
6307     if (!skip) dev_data->dispatch_table.CmdSetStencilWriteMask(commandBuffer, faceMask, writeMask);
6308 }
6309
6310 VKAPI_ATTR void VKAPI_CALL CmdSetStencilReference(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t reference) {
6311     bool skip = false;
6312     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6313     unique_lock_t lock(global_lock);
6314     GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
6315     if (pCB) {
6316         skip |=
6317             ValidateCmdQueueFlags(dev_data, pCB, "vkCmdSetStencilReference()", VK_QUEUE_GRAPHICS_BIT, VALIDATION_ERROR_1dc02415);
6318         skip |= ValidateCmd(dev_data, pCB, CMD_SETSTENCILREFERENCE, "vkCmdSetStencilReference()");
6319         if (pCB->static_status & CBSTATUS_STENCIL_REFERENCE_SET) {
6320             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6321                             HandleToUint64(commandBuffer), VALIDATION_ERROR_1dc004b8,
6322                             "vkCmdSetStencilReference(): pipeline was created without VK_DYNAMIC_STATE_STENCIL_REFERENCE flag..");
6323         }
6324         if (!skip) {
6325             pCB->status |= CBSTATUS_STENCIL_REFERENCE_SET;
6326         }
6327     }
6328     lock.unlock();
6329     if (!skip) dev_data->dispatch_table.CmdSetStencilReference(commandBuffer, faceMask, reference);
6330 }
6331
6332 // Update pipeline_layout bind points applying the "Pipeline Layout Compatibility" rules
6333 static void UpdateLastBoundDescriptorSets(layer_data *device_data, GLOBAL_CB_NODE *cb_state,
6334                                           VkPipelineBindPoint pipeline_bind_point, const PIPELINE_LAYOUT_NODE *pipeline_layout,
6335                                           uint32_t first_set, uint32_t set_count,
6336                                           const std::vector<cvdescriptorset::DescriptorSet *> descriptor_sets,
6337                                           uint32_t dynamic_offset_count, const uint32_t *p_dynamic_offsets) {
6338     // Defensive
6339     assert(set_count);
6340     if (0 == set_count) return;
6341     assert(pipeline_layout);
6342     if (!pipeline_layout) return;
6343
6344     uint32_t required_size = first_set + set_count;
6345     const uint32_t last_binding_index = required_size - 1;
6346     assert(last_binding_index < pipeline_layout->compat_for_set.size());
6347
6348     // Some useful shorthand
6349     auto &last_bound = cb_state->lastBound[pipeline_bind_point];
6350
6351     auto &bound_sets = last_bound.boundDescriptorSets;
6352     auto &dynamic_offsets = last_bound.dynamicOffsets;
6353     auto &bound_compat_ids = last_bound.compat_id_for_set;
6354     auto &pipe_compat_ids = pipeline_layout->compat_for_set;
6355
6356     const uint32_t current_size = static_cast<uint32_t>(bound_sets.size());
6357     assert(current_size == dynamic_offsets.size());
6358     assert(current_size == bound_compat_ids.size());
6359
6360     // We need this three times in this function, but nowhere else
6361     auto push_descriptor_cleanup = [&last_bound](const cvdescriptorset::DescriptorSet *ds) -> bool {
6362         if (ds && ds->IsPushDescriptor()) {
6363             assert(ds == last_bound.push_descriptor_set.get());
6364             last_bound.push_descriptor_set = nullptr;
6365             return true;
6366         }
6367         return false;
6368     };
6369
6370     // Clean up the "disturbed" before and after the range to be set
6371     if (required_size < current_size) {
6372         if (bound_compat_ids[last_binding_index] != pipe_compat_ids[last_binding_index]) {
6373             // We're disturbing those after last, we'll shrink below, but first need to check for and cleanup the push_descriptor
6374             for (auto set_idx = required_size; set_idx < current_size; ++set_idx) {
6375                 if (push_descriptor_cleanup(bound_sets[set_idx])) break;
6376             }
6377         } else {
6378             // We're not disturbing past last, so leave the upper binding data alone.
6379             required_size = current_size;
6380         }
6381     }
6382
6383     // We resize if we need more set entries or if those past "last" are disturbed
6384     if (required_size != current_size) {
6385         // TODO: put these size tied things in a struct (touches many lines)
6386         bound_sets.resize(required_size);
6387         dynamic_offsets.resize(required_size);
6388         bound_compat_ids.resize(required_size);
6389     }
6390
6391     // For any previously bound sets, need to set them to "invalid" if they were disturbed by this update
6392     for (uint32_t set_idx = 0; set_idx < first_set; ++set_idx) {
6393         if (bound_compat_ids[set_idx] != pipe_compat_ids[set_idx]) {
6394             push_descriptor_cleanup(bound_sets[set_idx]);
6395             bound_sets[set_idx] = nullptr;
6396             dynamic_offsets[set_idx].clear();
6397             bound_compat_ids[set_idx] = pipe_compat_ids[set_idx];
6398         }
6399     }
6400
6401     // Now update the bound sets with the input sets
6402     const uint32_t *input_dynamic_offsets = p_dynamic_offsets;  // "read" pointer for dynamic offset data
6403     for (uint32_t input_idx = 0; input_idx < set_count; input_idx++) {
6404         auto set_idx = input_idx + first_set;  // set_idx is index within layout, input_idx is index within input descriptor sets
6405         cvdescriptorset::DescriptorSet *descriptor_set = descriptor_sets[input_idx];
6406
6407         // Record binding (or push)
6408         push_descriptor_cleanup(bound_sets[set_idx]);
6409         bound_sets[set_idx] = descriptor_set;
6410         bound_compat_ids[set_idx] = pipe_compat_ids[set_idx];  // compat ids are canonical *per* set index
6411
6412         if (descriptor_set) {
6413             auto set_dynamic_descriptor_count = descriptor_set->GetDynamicDescriptorCount();
6414             // TODO: Add logic for tracking push_descriptor offsets (here or in caller)
6415             if (set_dynamic_descriptor_count && input_dynamic_offsets) {
6416                 const uint32_t *end_offset = input_dynamic_offsets + set_dynamic_descriptor_count;
6417                 dynamic_offsets[set_idx] = std::vector<uint32_t>(input_dynamic_offsets, end_offset);
6418                 input_dynamic_offsets = end_offset;
6419                 assert(input_dynamic_offsets <= (p_dynamic_offsets + dynamic_offset_count));
6420             } else {
6421                 dynamic_offsets[set_idx].clear();
6422             }
6423             if (!descriptor_set->IsPushDescriptor()) {
6424                 // Can't cache validation of push_descriptors
6425                 cb_state->validated_descriptor_sets.insert(descriptor_set);
6426             }
6427         }
6428     }
6429 }
6430
6431 // Update the bound state for the bind point, including the effects of incompatible pipeline layouts
6432 static void PreCallRecordCmdBindDescriptorSets(layer_data *device_data, GLOBAL_CB_NODE *cb_state,
6433                                                VkPipelineBindPoint pipelineBindPoint, VkPipelineLayout layout, uint32_t firstSet,
6434                                                uint32_t setCount, const VkDescriptorSet *pDescriptorSets,
6435                                                uint32_t dynamicOffsetCount, const uint32_t *pDynamicOffsets) {
6436     auto pipeline_layout = getPipelineLayout(device_data, layout);
6437     std::vector<cvdescriptorset::DescriptorSet *> descriptor_sets;
6438     descriptor_sets.reserve(setCount);
6439
6440     // Construct a list of the descriptors
6441     bool found_non_null = false;
6442     for (uint32_t i = 0; i < setCount; i++) {
6443         cvdescriptorset::DescriptorSet *descriptor_set = GetSetNode(device_data, pDescriptorSets[i]);
6444         descriptor_sets.emplace_back(descriptor_set);
6445         found_non_null |= descriptor_set != nullptr;
6446     }
6447     if (found_non_null) {  // which implies setCount > 0
6448         UpdateLastBoundDescriptorSets(device_data, cb_state, pipelineBindPoint, pipeline_layout, firstSet, setCount,
6449                                       descriptor_sets, dynamicOffsetCount, pDynamicOffsets);
6450         cb_state->lastBound[pipelineBindPoint].pipeline_layout = layout;
6451     }
6452 }
6453
6454 static bool PreCallValidateCmdBindDescriptorSets(layer_data *device_data, GLOBAL_CB_NODE *cb_state,
6455                                                  VkPipelineBindPoint pipelineBindPoint, VkPipelineLayout layout, uint32_t firstSet,
6456                                                  uint32_t setCount, const VkDescriptorSet *pDescriptorSets,
6457                                                  uint32_t dynamicOffsetCount, const uint32_t *pDynamicOffsets) {
6458     bool skip = false;
6459     skip |= ValidateCmdQueueFlags(device_data, cb_state, "vkCmdBindDescriptorSets()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
6460                                   VALIDATION_ERROR_17c02415);
6461     skip |= ValidateCmd(device_data, cb_state, CMD_BINDDESCRIPTORSETS, "vkCmdBindDescriptorSets()");
6462     // Track total count of dynamic descriptor types to make sure we have an offset for each one
6463     uint32_t total_dynamic_descriptors = 0;
6464     string error_string = "";
6465     uint32_t last_set_index = firstSet + setCount - 1;
6466
6467     if (last_set_index >= cb_state->lastBound[pipelineBindPoint].boundDescriptorSets.size()) {
6468         cb_state->lastBound[pipelineBindPoint].boundDescriptorSets.resize(last_set_index + 1);
6469         cb_state->lastBound[pipelineBindPoint].dynamicOffsets.resize(last_set_index + 1);
6470         cb_state->lastBound[pipelineBindPoint].compat_id_for_set.resize(last_set_index + 1);
6471     }
6472     auto pipeline_layout = getPipelineLayout(device_data, layout);
6473     for (uint32_t set_idx = 0; set_idx < setCount; set_idx++) {
6474         cvdescriptorset::DescriptorSet *descriptor_set = GetSetNode(device_data, pDescriptorSets[set_idx]);
6475         if (descriptor_set) {
6476             if (!descriptor_set->IsUpdated() && (descriptor_set->GetTotalDescriptorCount() != 0)) {
6477                 skip |= log_msg(
6478                     device_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
6479                     HandleToUint64(pDescriptorSets[set_idx]), DRAWSTATE_DESCRIPTOR_SET_NOT_UPDATED,
6480                     "Descriptor Set 0x%" PRIx64 " bound but it was never updated. You may want to either update it or not bind it.",
6481                     HandleToUint64(pDescriptorSets[set_idx]));
6482             }
6483             // Verify that set being bound is compatible with overlapping setLayout of pipelineLayout
6484             if (!verify_set_layout_compatibility(descriptor_set, pipeline_layout, set_idx + firstSet, error_string)) {
6485                 skip |=
6486                     log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
6487                             HandleToUint64(pDescriptorSets[set_idx]), VALIDATION_ERROR_17c002cc,
6488                             "descriptorSet #%u being bound is not compatible with overlapping descriptorSetLayout at index %u of "
6489                             "pipelineLayout 0x%" PRIx64 " due to: %s.",
6490                             set_idx, set_idx + firstSet, HandleToUint64(layout), error_string.c_str());
6491             }
6492
6493             auto set_dynamic_descriptor_count = descriptor_set->GetDynamicDescriptorCount();
6494
6495             if (set_dynamic_descriptor_count) {
6496                 // First make sure we won't overstep bounds of pDynamicOffsets array
6497                 if ((total_dynamic_descriptors + set_dynamic_descriptor_count) > dynamicOffsetCount) {
6498                     skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6499                                     VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, HandleToUint64(pDescriptorSets[set_idx]),
6500                                     DRAWSTATE_INVALID_DYNAMIC_OFFSET_COUNT,
6501                                     "descriptorSet #%u (0x%" PRIx64
6502                                     ") requires %u dynamicOffsets, but only %u dynamicOffsets are left in pDynamicOffsets array. "
6503                                     "There must be one dynamic offset for each dynamic descriptor being bound.",
6504                                     set_idx, HandleToUint64(pDescriptorSets[set_idx]), descriptor_set->GetDynamicDescriptorCount(),
6505                                     (dynamicOffsetCount - total_dynamic_descriptors));
6506                 } else {  // Validate dynamic offsets and Dynamic Offset Minimums
6507                     uint32_t cur_dyn_offset = total_dynamic_descriptors;
6508                     for (uint32_t d = 0; d < descriptor_set->GetTotalDescriptorCount(); d++) {
6509                         if (descriptor_set->GetTypeFromGlobalIndex(d) == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC) {
6510                             if (SafeModulo(pDynamicOffsets[cur_dyn_offset],
6511                                            device_data->phys_dev_properties.properties.limits.minUniformBufferOffsetAlignment) !=
6512                                 0) {
6513                                 skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6514                                                 VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, VALIDATION_ERROR_17c002d4,
6515                                                 "vkCmdBindDescriptorSets(): pDynamicOffsets[%d] is %d but must be a multiple of "
6516                                                 "device limit minUniformBufferOffsetAlignment 0x%" PRIxLEAST64 ".",
6517                                                 cur_dyn_offset, pDynamicOffsets[cur_dyn_offset],
6518                                                 device_data->phys_dev_properties.properties.limits.minUniformBufferOffsetAlignment);
6519                             }
6520                             cur_dyn_offset++;
6521                         } else if (descriptor_set->GetTypeFromGlobalIndex(d) == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC) {
6522                             if (SafeModulo(pDynamicOffsets[cur_dyn_offset],
6523                                            device_data->phys_dev_properties.properties.limits.minStorageBufferOffsetAlignment) !=
6524                                 0) {
6525                                 skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6526                                                 VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, VALIDATION_ERROR_17c002d4,
6527                                                 "vkCmdBindDescriptorSets(): pDynamicOffsets[%d] is %d but must be a multiple of "
6528                                                 "device limit minStorageBufferOffsetAlignment 0x%" PRIxLEAST64 ".",
6529                                                 cur_dyn_offset, pDynamicOffsets[cur_dyn_offset],
6530                                                 device_data->phys_dev_properties.properties.limits.minStorageBufferOffsetAlignment);
6531                             }
6532                             cur_dyn_offset++;
6533                         }
6534                     }
6535                     // Keep running total of dynamic descriptor count to verify at the end
6536                     total_dynamic_descriptors += set_dynamic_descriptor_count;
6537                 }
6538             }
6539         } else {
6540             skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
6541                             HandleToUint64(pDescriptorSets[set_idx]), DRAWSTATE_INVALID_SET,
6542                             "Attempt to bind descriptor set 0x%" PRIx64 " that doesn't exist!",
6543                             HandleToUint64(pDescriptorSets[set_idx]));
6544         }
6545     }
6546     //  dynamicOffsetCount must equal the total number of dynamic descriptors in the sets being bound
6547     if (total_dynamic_descriptors != dynamicOffsetCount) {
6548         skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6549                         HandleToUint64(cb_state->commandBuffer), VALIDATION_ERROR_17c002ce,
6550                         "Attempting to bind %u descriptorSets with %u dynamic descriptors, but dynamicOffsetCount is %u. It should "
6551                         "exactly match the number of dynamic descriptors.",
6552                         setCount, total_dynamic_descriptors, dynamicOffsetCount);
6553     }
6554     return skip;
6555 }
6556
6557 VKAPI_ATTR void VKAPI_CALL CmdBindDescriptorSets(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint,
6558                                                  VkPipelineLayout layout, uint32_t firstSet, uint32_t setCount,
6559                                                  const VkDescriptorSet *pDescriptorSets, uint32_t dynamicOffsetCount,
6560                                                  const uint32_t *pDynamicOffsets) {
6561     bool skip = false;
6562     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6563     unique_lock_t lock(global_lock);
6564     GLOBAL_CB_NODE *cb_state = GetCBNode(device_data, commandBuffer);
6565     assert(cb_state);
6566     skip = PreCallValidateCmdBindDescriptorSets(device_data, cb_state, pipelineBindPoint, layout, firstSet, setCount,
6567                                                 pDescriptorSets, dynamicOffsetCount, pDynamicOffsets);
6568     if (!skip) {
6569         PreCallRecordCmdBindDescriptorSets(device_data, cb_state, pipelineBindPoint, layout, firstSet, setCount, pDescriptorSets,
6570                                            dynamicOffsetCount, pDynamicOffsets);
6571         lock.unlock();
6572         device_data->dispatch_table.CmdBindDescriptorSets(commandBuffer, pipelineBindPoint, layout, firstSet, setCount,
6573                                                           pDescriptorSets, dynamicOffsetCount, pDynamicOffsets);
6574     } else {
6575         lock.unlock();
6576     }
6577 }
6578
6579 // Validates that the supplied bind point is supported for the command buffer (vis. the command pool)
6580 // Takes array of error codes as some of the VUID's (e.g. vkCmdBindPipeline) are written per bindpoint
6581 // TODO add vkCmdBindPipeline bind_point validation using this call.
6582 bool ValidatePipelineBindPoint(layer_data *device_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint bind_point,
6583                                const char *func_name,
6584                                const std::array<UNIQUE_VALIDATION_ERROR_CODE, VK_PIPELINE_BIND_POINT_RANGE_SIZE> &bind_errors) {
6585     bool skip = false;
6586     auto pool = GetCommandPoolNode(device_data, cb_state->createInfo.commandPool);
6587     if (pool) {  // The loss of a pool in a recording cmd is reported in DestroyCommandPool
6588         static const VkQueueFlags flag_mask[VK_PIPELINE_BIND_POINT_RANGE_SIZE] = {VK_QUEUE_GRAPHICS_BIT, VK_QUEUE_COMPUTE_BIT};
6589         const auto bind_point_index = bind_point - VK_PIPELINE_BIND_POINT_BEGIN_RANGE;  // typeof enum is not defined, use auto
6590         const auto &qfp = GetPhysDevProperties(device_data)->queue_family_properties[pool->queueFamilyIndex];
6591         if (0 == (qfp.queueFlags & flag_mask[bind_point_index])) {
6592             const UNIQUE_VALIDATION_ERROR_CODE error = bind_errors[bind_point_index];
6593             auto cb_u64 = HandleToUint64(cb_state->commandBuffer);
6594             auto cp_u64 = HandleToUint64(cb_state->createInfo.commandPool);
6595             skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6596                             cb_u64, error,
6597                             "%s: CommandBuffer 0x%" PRIxLEAST64 " was allocated from VkCommandPool 0x%" PRIxLEAST64
6598                             " that does not support bindpoint %s.",
6599                             func_name, cb_u64, cp_u64, string_VkPipelineBindPoint(bind_point));
6600         }
6601     }
6602     return skip;
6603 }
6604
6605 static bool PreCallValidateCmdPushDescriptorSetKHR(layer_data *device_data, GLOBAL_CB_NODE *cb_state,
6606                                                    const VkPipelineBindPoint bind_point, const VkPipelineLayout layout,
6607                                                    const uint32_t set, const uint32_t descriptor_write_count,
6608                                                    const VkWriteDescriptorSet *descriptor_writes, const char *func_name) {
6609     bool skip = false;
6610     skip |= ValidateCmd(device_data, cb_state, CMD_PUSHDESCRIPTORSETKHR, func_name);
6611     skip |= ValidateCmdQueueFlags(device_data, cb_state, func_name, (VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT),
6612                                   VALIDATION_ERROR_1be02415);
6613     skip |= ValidatePipelineBindPoint(device_data, cb_state, bind_point, func_name,
6614                                       {{VALIDATION_ERROR_1be002d6, VALIDATION_ERROR_1be002d6}});
6615     auto layout_data = getPipelineLayout(device_data, layout);
6616
6617     // Validate the set index points to a push descriptor set and is in range
6618     if (layout_data) {
6619         const auto &set_layouts = layout_data->set_layouts;
6620         const auto layout_u64 = HandleToUint64(layout);
6621         if (set < set_layouts.size()) {
6622             const auto *dsl = set_layouts[set].get();
6623             if (dsl && (0 == (dsl->GetCreateFlags() & VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR))) {
6624                 skip = log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6625                                VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_LAYOUT_EXT, layout_u64, VALIDATION_ERROR_1be002da,
6626                                "%s: Set index %" PRIu32
6627                                " does not match push descriptor set layout index for VkPipelineLayout 0x%" PRIxLEAST64 ".",
6628                                func_name, set, layout_u64);
6629             }
6630         } else {
6631             skip = log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_LAYOUT_EXT,
6632                            layout_u64, VALIDATION_ERROR_1be002d8,
6633                            "%s: Set index %" PRIu32 " is outside of range for VkPipelineLayout 0x%" PRIxLEAST64 " (set < %" PRIu32
6634                            ").",
6635                            func_name, set, layout_u64, static_cast<uint32_t>(set_layouts.size()));
6636         }
6637     }
6638
6639     return skip;
6640 }
6641 static void PreCallRecordCmdPushDescriptorSetKHR(layer_data *device_data, GLOBAL_CB_NODE *cb_state,
6642                                                  VkPipelineBindPoint pipelineBindPoint, VkPipelineLayout layout, uint32_t set,
6643                                                  uint32_t descriptorWriteCount, const VkWriteDescriptorSet *pDescriptorWrites) {
6644     const auto &pipeline_layout = getPipelineLayout(device_data, layout);
6645     if (!pipeline_layout) return;
6646     std::unique_ptr<cvdescriptorset::DescriptorSet> new_desc{
6647         new cvdescriptorset::DescriptorSet(0, 0, pipeline_layout->set_layouts[set], 0, device_data)};
6648
6649     std::vector<cvdescriptorset::DescriptorSet *> descriptor_sets = {new_desc.get()};
6650     UpdateLastBoundDescriptorSets(device_data, cb_state, pipelineBindPoint, pipeline_layout, set, 1, descriptor_sets, 0, nullptr);
6651     cb_state->lastBound[pipelineBindPoint].push_descriptor_set = std::move(new_desc);
6652     cb_state->lastBound[pipelineBindPoint].pipeline_layout = layout;
6653 }
6654
6655 VKAPI_ATTR void VKAPI_CALL CmdPushDescriptorSetKHR(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint,
6656                                                    VkPipelineLayout layout, uint32_t set, uint32_t descriptorWriteCount,
6657                                                    const VkWriteDescriptorSet *pDescriptorWrites) {
6658     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6659     unique_lock_t lock(global_lock);
6660     auto cb_state = GetCBNode(device_data, commandBuffer);
6661     bool skip = PreCallValidateCmdPushDescriptorSetKHR(device_data, cb_state, pipelineBindPoint, layout, set, descriptorWriteCount,
6662                                                        pDescriptorWrites, "vkCmdPushDescriptorSetKHR()");
6663     if (!skip) {
6664         PreCallRecordCmdPushDescriptorSetKHR(device_data, cb_state, pipelineBindPoint, layout, set, descriptorWriteCount,
6665                                              pDescriptorWrites);
6666         lock.unlock();
6667         device_data->dispatch_table.CmdPushDescriptorSetKHR(commandBuffer, pipelineBindPoint, layout, set, descriptorWriteCount,
6668                                                             pDescriptorWrites);
6669     }
6670 }
6671
6672 static VkDeviceSize GetIndexAlignment(VkIndexType indexType) {
6673     switch (indexType) {
6674         case VK_INDEX_TYPE_UINT16:
6675             return 2;
6676         case VK_INDEX_TYPE_UINT32:
6677             return 4;
6678         default:
6679             // Not a real index type. Express no alignment requirement here; we expect upper layer
6680             // to have already picked up on the enum being nonsense.
6681             return 1;
6682     }
6683 }
6684
6685 VKAPI_ATTR void VKAPI_CALL CmdBindIndexBuffer(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
6686                                               VkIndexType indexType) {
6687     bool skip = false;
6688     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6689     unique_lock_t lock(global_lock);
6690
6691     auto buffer_state = GetBufferState(dev_data, buffer);
6692     auto cb_node = GetCBNode(dev_data, commandBuffer);
6693     assert(cb_node);
6694     assert(buffer_state);
6695
6696     skip |= ValidateBufferUsageFlags(dev_data, buffer_state, VK_BUFFER_USAGE_INDEX_BUFFER_BIT, true, VALIDATION_ERROR_17e00362,
6697                                      "vkCmdBindIndexBuffer()", "VK_BUFFER_USAGE_INDEX_BUFFER_BIT");
6698     skip |= ValidateCmdQueueFlags(dev_data, cb_node, "vkCmdBindIndexBuffer()", VK_QUEUE_GRAPHICS_BIT, VALIDATION_ERROR_17e02415);
6699     skip |= ValidateCmd(dev_data, cb_node, CMD_BINDINDEXBUFFER, "vkCmdBindIndexBuffer()");
6700     skip |= ValidateMemoryIsBoundToBuffer(dev_data, buffer_state, "vkCmdBindIndexBuffer()", VALIDATION_ERROR_17e00364);
6701     auto offset_align = GetIndexAlignment(indexType);
6702     if (offset % offset_align) {
6703         skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6704                         HandleToUint64(commandBuffer), VALIDATION_ERROR_17e00360,
6705                         "vkCmdBindIndexBuffer() offset (0x%" PRIxLEAST64 ") does not fall on alignment (%s) boundary.", offset,
6706                         string_VkIndexType(indexType));
6707     }
6708
6709     if (skip) return;
6710
6711     cb_node->status |= CBSTATUS_INDEX_BUFFER_BOUND;
6712     cb_node->index_buffer_binding.buffer = buffer;
6713     cb_node->index_buffer_binding.size = buffer_state->createInfo.size;
6714     cb_node->index_buffer_binding.offset = offset;
6715     cb_node->index_buffer_binding.index_type = indexType;
6716
6717     lock.unlock();
6718     dev_data->dispatch_table.CmdBindIndexBuffer(commandBuffer, buffer, offset, indexType);
6719 }
6720
6721 void updateResourceTracking(GLOBAL_CB_NODE *pCB, uint32_t firstBinding, uint32_t bindingCount, const VkBuffer *pBuffers) {
6722     uint32_t end = firstBinding + bindingCount;
6723     if (pCB->currentDrawData.buffers.size() < end) {
6724         pCB->currentDrawData.buffers.resize(end);
6725     }
6726     for (uint32_t i = 0; i < bindingCount; ++i) {
6727         pCB->currentDrawData.buffers[i + firstBinding] = pBuffers[i];
6728     }
6729 }
6730
6731 static inline void updateResourceTrackingOnDraw(GLOBAL_CB_NODE *pCB) { pCB->drawData.push_back(pCB->currentDrawData); }
6732
6733 VKAPI_ATTR void VKAPI_CALL CmdBindVertexBuffers(VkCommandBuffer commandBuffer, uint32_t firstBinding, uint32_t bindingCount,
6734                                                 const VkBuffer *pBuffers, const VkDeviceSize *pOffsets) {
6735     bool skip = false;
6736     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6737     unique_lock_t lock(global_lock);
6738
6739     auto cb_node = GetCBNode(dev_data, commandBuffer);
6740     assert(cb_node);
6741
6742     skip |= ValidateCmdQueueFlags(dev_data, cb_node, "vkCmdBindVertexBuffers()", VK_QUEUE_GRAPHICS_BIT, VALIDATION_ERROR_18202415);
6743     skip |= ValidateCmd(dev_data, cb_node, CMD_BINDVERTEXBUFFERS, "vkCmdBindVertexBuffers()");
6744     for (uint32_t i = 0; i < bindingCount; ++i) {
6745         auto buffer_state = GetBufferState(dev_data, pBuffers[i]);
6746         assert(buffer_state);
6747         skip |= ValidateBufferUsageFlags(dev_data, buffer_state, VK_BUFFER_USAGE_VERTEX_BUFFER_BIT, true, VALIDATION_ERROR_182004e6,
6748                                          "vkCmdBindVertexBuffers()", "VK_BUFFER_USAGE_VERTEX_BUFFER_BIT");
6749         skip |= ValidateMemoryIsBoundToBuffer(dev_data, buffer_state, "vkCmdBindVertexBuffers()", VALIDATION_ERROR_182004e8);
6750         if (pOffsets[i] >= buffer_state->createInfo.size) {
6751             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
6752                             HandleToUint64(buffer_state->buffer), VALIDATION_ERROR_182004e4,
6753                             "vkCmdBindVertexBuffers() offset (0x%" PRIxLEAST64 ") is beyond the end of the buffer.", pOffsets[i]);
6754         }
6755     }
6756
6757     if (skip) return;
6758
6759     updateResourceTracking(cb_node, firstBinding, bindingCount, pBuffers);
6760
6761     lock.unlock();
6762     dev_data->dispatch_table.CmdBindVertexBuffers(commandBuffer, firstBinding, bindingCount, pBuffers, pOffsets);
6763 }
6764
6765 // Generic function to handle validation for all CmdDraw* type functions
6766 static bool ValidateCmdDrawType(layer_data *dev_data, VkCommandBuffer cmd_buffer, bool indexed, VkPipelineBindPoint bind_point,
6767                                 CMD_TYPE cmd_type, GLOBAL_CB_NODE **cb_state, const char *caller, VkQueueFlags queue_flags,
6768                                 UNIQUE_VALIDATION_ERROR_CODE queue_flag_code, UNIQUE_VALIDATION_ERROR_CODE msg_code,
6769                                 UNIQUE_VALIDATION_ERROR_CODE const dynamic_state_msg_code) {
6770     bool skip = false;
6771     *cb_state = GetCBNode(dev_data, cmd_buffer);
6772     if (*cb_state) {
6773         skip |= ValidateCmdQueueFlags(dev_data, *cb_state, caller, queue_flags, queue_flag_code);
6774         skip |= ValidateCmd(dev_data, *cb_state, cmd_type, caller);
6775         skip |= ValidateDrawState(dev_data, *cb_state, cmd_type, indexed, bind_point, caller, dynamic_state_msg_code);
6776         skip |= (VK_PIPELINE_BIND_POINT_GRAPHICS == bind_point) ? outsideRenderPass(dev_data, *cb_state, caller, msg_code)
6777                                                                 : insideRenderPass(dev_data, *cb_state, caller, msg_code);
6778     }
6779     return skip;
6780 }
6781
6782 // Generic function to handle state update for all CmdDraw* and CmdDispatch* type functions
6783 static void UpdateStateCmdDrawDispatchType(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint bind_point) {
6784     UpdateDrawState(dev_data, cb_state, bind_point);
6785 }
6786
6787 // Generic function to handle state update for all CmdDraw* type functions
6788 static void UpdateStateCmdDrawType(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint bind_point) {
6789     UpdateStateCmdDrawDispatchType(dev_data, cb_state, bind_point);
6790     updateResourceTrackingOnDraw(cb_state);
6791     cb_state->hasDrawCmd = true;
6792 }
6793
6794 static bool PreCallValidateCmdDraw(layer_data *dev_data, VkCommandBuffer cmd_buffer, bool indexed, VkPipelineBindPoint bind_point,
6795                                    GLOBAL_CB_NODE **cb_state, const char *caller) {
6796     return ValidateCmdDrawType(dev_data, cmd_buffer, indexed, bind_point, CMD_DRAW, cb_state, caller, VK_QUEUE_GRAPHICS_BIT,
6797                                VALIDATION_ERROR_1a202415, VALIDATION_ERROR_1a200017, VALIDATION_ERROR_1a200376);
6798 }
6799
6800 static void PostCallRecordCmdDraw(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint bind_point) {
6801     UpdateStateCmdDrawType(dev_data, cb_state, bind_point);
6802 }
6803
6804 VKAPI_ATTR void VKAPI_CALL CmdDraw(VkCommandBuffer commandBuffer, uint32_t vertexCount, uint32_t instanceCount,
6805                                    uint32_t firstVertex, uint32_t firstInstance) {
6806     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6807     GLOBAL_CB_NODE *cb_state = nullptr;
6808     unique_lock_t lock(global_lock);
6809     bool skip = PreCallValidateCmdDraw(dev_data, commandBuffer, false, VK_PIPELINE_BIND_POINT_GRAPHICS, &cb_state, "vkCmdDraw()");
6810     lock.unlock();
6811     if (!skip) {
6812         dev_data->dispatch_table.CmdDraw(commandBuffer, vertexCount, instanceCount, firstVertex, firstInstance);
6813         lock.lock();
6814         PostCallRecordCmdDraw(dev_data, cb_state, VK_PIPELINE_BIND_POINT_GRAPHICS);
6815         lock.unlock();
6816     }
6817 }
6818
6819 static bool PreCallValidateCmdDrawIndexed(layer_data *dev_data, VkCommandBuffer cmd_buffer, bool indexed,
6820                                           VkPipelineBindPoint bind_point, GLOBAL_CB_NODE **cb_state, const char *caller,
6821                                           uint32_t indexCount, uint32_t firstIndex) {
6822     bool skip =
6823         ValidateCmdDrawType(dev_data, cmd_buffer, indexed, bind_point, CMD_DRAWINDEXED, cb_state, caller, VK_QUEUE_GRAPHICS_BIT,
6824                             VALIDATION_ERROR_1a402415, VALIDATION_ERROR_1a400017, VALIDATION_ERROR_1a40039c);
6825     if (!skip && ((*cb_state)->status & CBSTATUS_INDEX_BUFFER_BOUND)) {
6826         unsigned int index_size = 0;
6827         const auto &index_buffer_binding = (*cb_state)->index_buffer_binding;
6828         if (index_buffer_binding.index_type == VK_INDEX_TYPE_UINT16) {
6829             index_size = 2;
6830         } else if (index_buffer_binding.index_type == VK_INDEX_TYPE_UINT32) {
6831             index_size = 4;
6832         }
6833         VkDeviceSize end_offset = (index_size * ((VkDeviceSize)firstIndex + indexCount)) + index_buffer_binding.offset;
6834         if (end_offset > index_buffer_binding.size) {
6835             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
6836                             HandleToUint64(index_buffer_binding.buffer), VALIDATION_ERROR_1a40039e,
6837                             "vkCmdDrawIndexed() index size (%d) * (firstIndex (%d) + indexCount (%d)) "
6838                             "+ binding offset (%" PRIuLEAST64 ") = an ending offset of %" PRIuLEAST64
6839                             " bytes, "
6840                             "which is greater than the index buffer size (%" PRIuLEAST64 ").",
6841                             index_size, firstIndex, indexCount, index_buffer_binding.offset, end_offset, index_buffer_binding.size);
6842         }
6843     }
6844     return skip;
6845 }
6846
6847 static void PostCallRecordCmdDrawIndexed(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint bind_point) {
6848     UpdateStateCmdDrawType(dev_data, cb_state, bind_point);
6849 }
6850
6851 VKAPI_ATTR void VKAPI_CALL CmdDrawIndexed(VkCommandBuffer commandBuffer, uint32_t indexCount, uint32_t instanceCount,
6852                                           uint32_t firstIndex, int32_t vertexOffset, uint32_t firstInstance) {
6853     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6854     GLOBAL_CB_NODE *cb_state = nullptr;
6855     unique_lock_t lock(global_lock);
6856     bool skip = PreCallValidateCmdDrawIndexed(dev_data, commandBuffer, true, VK_PIPELINE_BIND_POINT_GRAPHICS, &cb_state,
6857                                               "vkCmdDrawIndexed()", indexCount, firstIndex);
6858     lock.unlock();
6859     if (!skip) {
6860         dev_data->dispatch_table.CmdDrawIndexed(commandBuffer, indexCount, instanceCount, firstIndex, vertexOffset, firstInstance);
6861         lock.lock();
6862         PostCallRecordCmdDrawIndexed(dev_data, cb_state, VK_PIPELINE_BIND_POINT_GRAPHICS);
6863         lock.unlock();
6864     }
6865 }
6866
6867 static bool PreCallValidateCmdDrawIndirect(layer_data *dev_data, VkCommandBuffer cmd_buffer, VkBuffer buffer, bool indexed,
6868                                            VkPipelineBindPoint bind_point, GLOBAL_CB_NODE **cb_state, BUFFER_STATE **buffer_state,
6869                                            const char *caller) {
6870     bool skip =
6871         ValidateCmdDrawType(dev_data, cmd_buffer, indexed, bind_point, CMD_DRAWINDIRECT, cb_state, caller, VK_QUEUE_GRAPHICS_BIT,
6872                             VALIDATION_ERROR_1aa02415, VALIDATION_ERROR_1aa00017, VALIDATION_ERROR_1aa003cc);
6873     *buffer_state = GetBufferState(dev_data, buffer);
6874     skip |= ValidateMemoryIsBoundToBuffer(dev_data, *buffer_state, caller, VALIDATION_ERROR_1aa003b4);
6875     // TODO: If the drawIndirectFirstInstance feature is not enabled, all the firstInstance members of the
6876     // VkDrawIndirectCommand structures accessed by this command must be 0, which will require access to the contents of 'buffer'.
6877     return skip;
6878 }
6879
6880 static void PostCallRecordCmdDrawIndirect(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint bind_point,
6881                                           BUFFER_STATE *buffer_state) {
6882     UpdateStateCmdDrawType(dev_data, cb_state, bind_point);
6883     AddCommandBufferBindingBuffer(dev_data, cb_state, buffer_state);
6884 }
6885
6886 VKAPI_ATTR void VKAPI_CALL CmdDrawIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t count,
6887                                            uint32_t stride) {
6888     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6889     GLOBAL_CB_NODE *cb_state = nullptr;
6890     BUFFER_STATE *buffer_state = nullptr;
6891     unique_lock_t lock(global_lock);
6892     bool skip = PreCallValidateCmdDrawIndirect(dev_data, commandBuffer, buffer, false, VK_PIPELINE_BIND_POINT_GRAPHICS, &cb_state,
6893                                                &buffer_state, "vkCmdDrawIndirect()");
6894     lock.unlock();
6895     if (!skip) {
6896         dev_data->dispatch_table.CmdDrawIndirect(commandBuffer, buffer, offset, count, stride);
6897         lock.lock();
6898         PostCallRecordCmdDrawIndirect(dev_data, cb_state, VK_PIPELINE_BIND_POINT_GRAPHICS, buffer_state);
6899         lock.unlock();
6900     }
6901 }
6902
6903 static bool PreCallValidateCmdDrawIndexedIndirect(layer_data *dev_data, VkCommandBuffer cmd_buffer, VkBuffer buffer, bool indexed,
6904                                                   VkPipelineBindPoint bind_point, GLOBAL_CB_NODE **cb_state,
6905                                                   BUFFER_STATE **buffer_state, const char *caller) {
6906     bool skip =
6907         ValidateCmdDrawType(dev_data, cmd_buffer, indexed, bind_point, CMD_DRAWINDEXEDINDIRECT, cb_state, caller,
6908                             VK_QUEUE_GRAPHICS_BIT, VALIDATION_ERROR_1a602415, VALIDATION_ERROR_1a600017, VALIDATION_ERROR_1a600434);
6909     *buffer_state = GetBufferState(dev_data, buffer);
6910     skip |= ValidateMemoryIsBoundToBuffer(dev_data, *buffer_state, caller, VALIDATION_ERROR_1a60041c);
6911     // TODO: If the drawIndirectFirstInstance feature is not enabled, all the firstInstance members of the
6912     // VkDrawIndexedIndirectCommand structures accessed by this command must be 0, which will require access to the contents of
6913     // 'buffer'.
6914     return skip;
6915 }
6916
6917 static void PostCallRecordCmdDrawIndexedIndirect(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint bind_point,
6918                                                  BUFFER_STATE *buffer_state) {
6919     UpdateStateCmdDrawType(dev_data, cb_state, bind_point);
6920     AddCommandBufferBindingBuffer(dev_data, cb_state, buffer_state);
6921 }
6922
6923 VKAPI_ATTR void VKAPI_CALL CmdDrawIndexedIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
6924                                                   uint32_t count, uint32_t stride) {
6925     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6926     GLOBAL_CB_NODE *cb_state = nullptr;
6927     BUFFER_STATE *buffer_state = nullptr;
6928     unique_lock_t lock(global_lock);
6929     bool skip = PreCallValidateCmdDrawIndexedIndirect(dev_data, commandBuffer, buffer, true, VK_PIPELINE_BIND_POINT_GRAPHICS,
6930                                                       &cb_state, &buffer_state, "vkCmdDrawIndexedIndirect()");
6931     lock.unlock();
6932     if (!skip) {
6933         dev_data->dispatch_table.CmdDrawIndexedIndirect(commandBuffer, buffer, offset, count, stride);
6934         lock.lock();
6935         PostCallRecordCmdDrawIndexedIndirect(dev_data, cb_state, VK_PIPELINE_BIND_POINT_GRAPHICS, buffer_state);
6936         lock.unlock();
6937     }
6938 }
6939
6940 static bool PreCallValidateCmdDispatch(layer_data *dev_data, VkCommandBuffer cmd_buffer, bool indexed,
6941                                        VkPipelineBindPoint bind_point, GLOBAL_CB_NODE **cb_state, const char *caller) {
6942     return ValidateCmdDrawType(dev_data, cmd_buffer, indexed, bind_point, CMD_DISPATCH, cb_state, caller, VK_QUEUE_COMPUTE_BIT,
6943                                VALIDATION_ERROR_19c02415, VALIDATION_ERROR_19c00017, VALIDATION_ERROR_UNDEFINED);
6944 }
6945
6946 static void PostCallRecordCmdDispatch(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint bind_point) {
6947     UpdateStateCmdDrawDispatchType(dev_data, cb_state, bind_point);
6948 }
6949
6950 VKAPI_ATTR void VKAPI_CALL CmdDispatch(VkCommandBuffer commandBuffer, uint32_t x, uint32_t y, uint32_t z) {
6951     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6952     GLOBAL_CB_NODE *cb_state = nullptr;
6953     unique_lock_t lock(global_lock);
6954     bool skip =
6955         PreCallValidateCmdDispatch(dev_data, commandBuffer, false, VK_PIPELINE_BIND_POINT_COMPUTE, &cb_state, "vkCmdDispatch()");
6956     lock.unlock();
6957     if (!skip) {
6958         dev_data->dispatch_table.CmdDispatch(commandBuffer, x, y, z);
6959         lock.lock();
6960         PostCallRecordCmdDispatch(dev_data, cb_state, VK_PIPELINE_BIND_POINT_COMPUTE);
6961         lock.unlock();
6962     }
6963 }
6964
6965 static bool PreCallValidateCmdDispatchIndirect(layer_data *dev_data, VkCommandBuffer cmd_buffer, VkBuffer buffer, bool indexed,
6966                                                VkPipelineBindPoint bind_point, GLOBAL_CB_NODE **cb_state,
6967                                                BUFFER_STATE **buffer_state, const char *caller) {
6968     bool skip =
6969         ValidateCmdDrawType(dev_data, cmd_buffer, indexed, bind_point, CMD_DISPATCHINDIRECT, cb_state, caller, VK_QUEUE_COMPUTE_BIT,
6970                             VALIDATION_ERROR_1a002415, VALIDATION_ERROR_1a000017, VALIDATION_ERROR_UNDEFINED);
6971     *buffer_state = GetBufferState(dev_data, buffer);
6972     skip |= ValidateMemoryIsBoundToBuffer(dev_data, *buffer_state, caller, VALIDATION_ERROR_1a000322);
6973     return skip;
6974 }
6975
6976 static void PostCallRecordCmdDispatchIndirect(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint bind_point,
6977                                               BUFFER_STATE *buffer_state) {
6978     UpdateStateCmdDrawDispatchType(dev_data, cb_state, bind_point);
6979     AddCommandBufferBindingBuffer(dev_data, cb_state, buffer_state);
6980 }
6981
6982 VKAPI_ATTR void VKAPI_CALL CmdDispatchIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset) {
6983     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
6984     GLOBAL_CB_NODE *cb_state = nullptr;
6985     BUFFER_STATE *buffer_state = nullptr;
6986     unique_lock_t lock(global_lock);
6987     bool skip = PreCallValidateCmdDispatchIndirect(dev_data, commandBuffer, buffer, false, VK_PIPELINE_BIND_POINT_COMPUTE,
6988                                                    &cb_state, &buffer_state, "vkCmdDispatchIndirect()");
6989     lock.unlock();
6990     if (!skip) {
6991         dev_data->dispatch_table.CmdDispatchIndirect(commandBuffer, buffer, offset);
6992         lock.lock();
6993         PostCallRecordCmdDispatchIndirect(dev_data, cb_state, VK_PIPELINE_BIND_POINT_COMPUTE, buffer_state);
6994         lock.unlock();
6995     }
6996 }
6997
6998 VKAPI_ATTR void VKAPI_CALL CmdCopyBuffer(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkBuffer dstBuffer,
6999                                          uint32_t regionCount, const VkBufferCopy *pRegions) {
7000     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
7001     unique_lock_t lock(global_lock);
7002
7003     auto cb_node = GetCBNode(device_data, commandBuffer);
7004     auto src_buffer_state = GetBufferState(device_data, srcBuffer);
7005     auto dst_buffer_state = GetBufferState(device_data, dstBuffer);
7006
7007     if (cb_node && src_buffer_state && dst_buffer_state) {
7008         bool skip = PreCallValidateCmdCopyBuffer(device_data, cb_node, src_buffer_state, dst_buffer_state);
7009         if (!skip) {
7010             PreCallRecordCmdCopyBuffer(device_data, cb_node, src_buffer_state, dst_buffer_state);
7011             lock.unlock();
7012             device_data->dispatch_table.CmdCopyBuffer(commandBuffer, srcBuffer, dstBuffer, regionCount, pRegions);
7013         }
7014     } else {
7015         lock.unlock();
7016         assert(0);
7017     }
7018 }
7019
7020 VKAPI_ATTR void VKAPI_CALL CmdCopyImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
7021                                         VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
7022                                         const VkImageCopy *pRegions) {
7023     bool skip = false;
7024     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
7025     unique_lock_t lock(global_lock);
7026
7027     auto cb_node = GetCBNode(device_data, commandBuffer);
7028     auto src_image_state = GetImageState(device_data, srcImage);
7029     auto dst_image_state = GetImageState(device_data, dstImage);
7030     if (cb_node && src_image_state && dst_image_state) {
7031         skip = PreCallValidateCmdCopyImage(device_data, cb_node, src_image_state, dst_image_state, regionCount, pRegions,
7032                                            srcImageLayout, dstImageLayout);
7033         if (!skip) {
7034             PreCallRecordCmdCopyImage(device_data, cb_node, src_image_state, dst_image_state, regionCount, pRegions, srcImageLayout,
7035                                       dstImageLayout);
7036             lock.unlock();
7037             device_data->dispatch_table.CmdCopyImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount,
7038                                                      pRegions);
7039         }
7040     } else {
7041         lock.unlock();
7042         assert(0);
7043     }
7044 }
7045
7046 // Validate that an image's sampleCount matches the requirement for a specific API call
7047 bool ValidateImageSampleCount(layer_data *dev_data, IMAGE_STATE *image_state, VkSampleCountFlagBits sample_count,
7048                               const char *location, UNIQUE_VALIDATION_ERROR_CODE msgCode) {
7049     bool skip = false;
7050     if (image_state->createInfo.samples != sample_count) {
7051         skip = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
7052                        HandleToUint64(image_state->image), msgCode,
7053                        "%s for image 0x%" PRIx64 " was created with a sample count of %s but must be %s.", location,
7054                        HandleToUint64(image_state->image), string_VkSampleCountFlagBits(image_state->createInfo.samples),
7055                        string_VkSampleCountFlagBits(sample_count));
7056     }
7057     return skip;
7058 }
7059
7060 VKAPI_ATTR void VKAPI_CALL CmdBlitImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
7061                                         VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
7062                                         const VkImageBlit *pRegions, VkFilter filter) {
7063     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
7064     unique_lock_t lock(global_lock);
7065
7066     auto cb_node = GetCBNode(dev_data, commandBuffer);
7067     auto src_image_state = GetImageState(dev_data, srcImage);
7068     auto dst_image_state = GetImageState(dev_data, dstImage);
7069
7070     bool skip = PreCallValidateCmdBlitImage(dev_data, cb_node, src_image_state, dst_image_state, regionCount, pRegions,
7071                                             srcImageLayout, dstImageLayout, filter);
7072
7073     if (!skip) {
7074         PreCallRecordCmdBlitImage(dev_data, cb_node, src_image_state, dst_image_state, regionCount, pRegions, srcImageLayout,
7075                                   dstImageLayout);
7076         lock.unlock();
7077         dev_data->dispatch_table.CmdBlitImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount,
7078                                               pRegions, filter);
7079     }
7080 }
7081
7082 VKAPI_ATTR void VKAPI_CALL CmdCopyBufferToImage(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkImage dstImage,
7083                                                 VkImageLayout dstImageLayout, uint32_t regionCount,
7084                                                 const VkBufferImageCopy *pRegions) {
7085     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
7086     unique_lock_t lock(global_lock);
7087     bool skip = false;
7088     auto cb_node = GetCBNode(device_data, commandBuffer);
7089     auto src_buffer_state = GetBufferState(device_data, srcBuffer);
7090     auto dst_image_state = GetImageState(device_data, dstImage);
7091     if (cb_node && src_buffer_state && dst_image_state) {
7092         skip = PreCallValidateCmdCopyBufferToImage(device_data, dstImageLayout, cb_node, src_buffer_state, dst_image_state,
7093                                                    regionCount, pRegions, "vkCmdCopyBufferToImage()");
7094     } else {
7095         lock.unlock();
7096         assert(0);
7097         // TODO: report VU01244 here, or put in object tracker?
7098     }
7099     if (!skip) {
7100         PreCallRecordCmdCopyBufferToImage(device_data, cb_node, src_buffer_state, dst_image_state, regionCount, pRegions,
7101                                           dstImageLayout);
7102         lock.unlock();
7103         device_data->dispatch_table.CmdCopyBufferToImage(commandBuffer, srcBuffer, dstImage, dstImageLayout, regionCount, pRegions);
7104     }
7105 }
7106
7107 VKAPI_ATTR void VKAPI_CALL CmdCopyImageToBuffer(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
7108                                                 VkBuffer dstBuffer, uint32_t regionCount, const VkBufferImageCopy *pRegions) {
7109     bool skip = false;
7110     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
7111     unique_lock_t lock(global_lock);
7112
7113     auto cb_node = GetCBNode(device_data, commandBuffer);
7114     auto src_image_state = GetImageState(device_data, srcImage);
7115     auto dst_buffer_state = GetBufferState(device_data, dstBuffer);
7116     if (cb_node && src_image_state && dst_buffer_state) {
7117         skip = PreCallValidateCmdCopyImageToBuffer(device_data, srcImageLayout, cb_node, src_image_state, dst_buffer_state,
7118                                                    regionCount, pRegions, "vkCmdCopyImageToBuffer()");
7119     } else {
7120         lock.unlock();
7121         assert(0);
7122         // TODO: report VU01262 here, or put in object tracker?
7123     }
7124     if (!skip) {
7125         PreCallRecordCmdCopyImageToBuffer(device_data, cb_node, src_image_state, dst_buffer_state, regionCount, pRegions,
7126                                           srcImageLayout);
7127         lock.unlock();
7128         device_data->dispatch_table.CmdCopyImageToBuffer(commandBuffer, srcImage, srcImageLayout, dstBuffer, regionCount, pRegions);
7129     }
7130 }
7131
7132 static bool PreCallCmdUpdateBuffer(layer_data *device_data, const GLOBAL_CB_NODE *cb_state, const BUFFER_STATE *dst_buffer_state) {
7133     bool skip = false;
7134     skip |= ValidateMemoryIsBoundToBuffer(device_data, dst_buffer_state, "vkCmdUpdateBuffer()", VALIDATION_ERROR_1e400046);
7135     // Validate that DST buffer has correct usage flags set
7136     skip |= ValidateBufferUsageFlags(device_data, dst_buffer_state, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true,
7137                                      VALIDATION_ERROR_1e400044, "vkCmdUpdateBuffer()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
7138     skip |= ValidateCmdQueueFlags(device_data, cb_state, "vkCmdUpdateBuffer()",
7139                                   VK_QUEUE_TRANSFER_BIT | VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT, VALIDATION_ERROR_1e402415);
7140     skip |= ValidateCmd(device_data, cb_state, CMD_UPDATEBUFFER, "vkCmdUpdateBuffer()");
7141     skip |= insideRenderPass(device_data, cb_state, "vkCmdUpdateBuffer()", VALIDATION_ERROR_1e400017);
7142     return skip;
7143 }
7144
7145 static void PostCallRecordCmdUpdateBuffer(layer_data *device_data, GLOBAL_CB_NODE *cb_state, BUFFER_STATE *dst_buffer_state) {
7146     // Update bindings between buffer and cmd buffer
7147     AddCommandBufferBindingBuffer(device_data, cb_state, dst_buffer_state);
7148 }
7149
7150 VKAPI_ATTR void VKAPI_CALL CmdUpdateBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset,
7151                                            VkDeviceSize dataSize, const uint32_t *pData) {
7152     bool skip = false;
7153     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
7154     unique_lock_t lock(global_lock);
7155
7156     auto cb_state = GetCBNode(dev_data, commandBuffer);
7157     assert(cb_state);
7158     auto dst_buff_state = GetBufferState(dev_data, dstBuffer);
7159     assert(dst_buff_state);
7160     skip |= PreCallCmdUpdateBuffer(dev_data, cb_state, dst_buff_state);
7161     lock.unlock();
7162     if (!skip) {
7163         dev_data->dispatch_table.CmdUpdateBuffer(commandBuffer, dstBuffer, dstOffset, dataSize, pData);
7164         lock.lock();
7165         PostCallRecordCmdUpdateBuffer(dev_data, cb_state, dst_buff_state);
7166         lock.unlock();
7167     }
7168 }
7169
7170 VKAPI_ATTR void VKAPI_CALL CmdFillBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset,
7171                                          VkDeviceSize size, uint32_t data) {
7172     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
7173     unique_lock_t lock(global_lock);
7174     auto cb_node = GetCBNode(device_data, commandBuffer);
7175     auto buffer_state = GetBufferState(device_data, dstBuffer);
7176
7177     if (cb_node && buffer_state) {
7178         bool skip = PreCallValidateCmdFillBuffer(device_data, cb_node, buffer_state);
7179         if (!skip) {
7180             PreCallRecordCmdFillBuffer(device_data, cb_node, buffer_state);
7181             lock.unlock();
7182             device_data->dispatch_table.CmdFillBuffer(commandBuffer, dstBuffer, dstOffset, size, data);
7183         }
7184     } else {
7185         lock.unlock();
7186         assert(0);
7187     }
7188 }
7189
7190 VKAPI_ATTR void VKAPI_CALL CmdClearAttachments(VkCommandBuffer commandBuffer, uint32_t attachmentCount,
7191                                                const VkClearAttachment *pAttachments, uint32_t rectCount,
7192                                                const VkClearRect *pRects) {
7193     bool skip = false;
7194     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
7195     {
7196         lock_guard_t lock(global_lock);
7197         skip = PreCallValidateCmdClearAttachments(dev_data, commandBuffer, attachmentCount, pAttachments, rectCount, pRects);
7198     }
7199     if (!skip) dev_data->dispatch_table.CmdClearAttachments(commandBuffer, attachmentCount, pAttachments, rectCount, pRects);
7200 }
7201
7202 VKAPI_ATTR void VKAPI_CALL CmdClearColorImage(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout,
7203                                               const VkClearColorValue *pColor, uint32_t rangeCount,
7204                                               const VkImageSubresourceRange *pRanges) {
7205     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
7206     unique_lock_t lock(global_lock);
7207
7208     bool skip = PreCallValidateCmdClearColorImage(dev_data, commandBuffer, image, imageLayout, rangeCount, pRanges);
7209     if (!skip) {
7210         PreCallRecordCmdClearImage(dev_data, commandBuffer, image, imageLayout, rangeCount, pRanges);
7211         lock.unlock();
7212         dev_data->dispatch_table.CmdClearColorImage(commandBuffer, image, imageLayout, pColor, rangeCount, pRanges);
7213     }
7214 }
7215
7216 VKAPI_ATTR void VKAPI_CALL CmdClearDepthStencilImage(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout,
7217                                                      const VkClearDepthStencilValue *pDepthStencil, uint32_t rangeCount,
7218                                                      const VkImageSubresourceRange *pRanges) {
7219     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
7220     unique_lock_t lock(global_lock);
7221
7222     bool skip = PreCallValidateCmdClearDepthStencilImage(dev_data, commandBuffer, image, imageLayout, rangeCount, pRanges);
7223     if (!skip) {
7224         PreCallRecordCmdClearImage(dev_data, commandBuffer, image, imageLayout, rangeCount, pRanges);
7225         lock.unlock();
7226         dev_data->dispatch_table.CmdClearDepthStencilImage(commandBuffer, image, imageLayout, pDepthStencil, rangeCount, pRanges);
7227     }
7228 }
7229
7230 VKAPI_ATTR void VKAPI_CALL CmdResolveImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
7231                                            VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
7232                                            const VkImageResolve *pRegions) {
7233     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
7234     unique_lock_t lock(global_lock);
7235
7236     auto cb_node = GetCBNode(dev_data, commandBuffer);
7237     auto src_image_state = GetImageState(dev_data, srcImage);
7238     auto dst_image_state = GetImageState(dev_data, dstImage);
7239
7240     bool skip = PreCallValidateCmdResolveImage(dev_data, cb_node, src_image_state, dst_image_state, regionCount, pRegions);
7241
7242     if (!skip) {
7243         PreCallRecordCmdResolveImage(dev_data, cb_node, src_image_state, dst_image_state);
7244         lock.unlock();
7245         dev_data->dispatch_table.CmdResolveImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount,
7246                                                  pRegions);
7247     }
7248 }
7249
7250 VKAPI_ATTR void VKAPI_CALL GetImageSubresourceLayout(VkDevice device, VkImage image, const VkImageSubresource *pSubresource,
7251                                                      VkSubresourceLayout *pLayout) {
7252     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
7253     unique_lock_t lock(global_lock);
7254
7255     bool skip = PreCallValidateGetImageSubresourceLayout(device_data, image, pSubresource);
7256     if (!skip) {
7257         lock.unlock();
7258         device_data->dispatch_table.GetImageSubresourceLayout(device, image, pSubresource, pLayout);
7259     }
7260 }
7261
7262 bool setEventStageMask(VkQueue queue, VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
7263     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
7264     GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
7265     if (pCB) {
7266         pCB->eventToStageMap[event] = stageMask;
7267     }
7268     auto queue_data = dev_data->queueMap.find(queue);
7269     if (queue_data != dev_data->queueMap.end()) {
7270         queue_data->second.eventToStageMap[event] = stageMask;
7271     }
7272     return false;
7273 }
7274
7275 VKAPI_ATTR void VKAPI_CALL CmdSetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
7276     bool skip = false;
7277     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
7278     unique_lock_t lock(global_lock);
7279     GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
7280     if (pCB) {
7281         skip |= ValidateCmdQueueFlags(dev_data, pCB, "vkCmdSetEvent()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
7282                                       VALIDATION_ERROR_1d402415);
7283         skip |= ValidateCmd(dev_data, pCB, CMD_SETEVENT, "vkCmdSetEvent()");
7284         skip |= insideRenderPass(dev_data, pCB, "vkCmdSetEvent()", VALIDATION_ERROR_1d400017);
7285         skip |= ValidateStageMaskGsTsEnables(dev_data, stageMask, "vkCmdSetEvent()", VALIDATION_ERROR_1d4008fc,
7286                                              VALIDATION_ERROR_1d4008fe);
7287         auto event_state = GetEventNode(dev_data, event);
7288         if (event_state) {
7289             addCommandBufferBinding(&event_state->cb_bindings, {HandleToUint64(event), kVulkanObjectTypeEvent}, pCB);
7290             event_state->cb_bindings.insert(pCB);
7291         }
7292         pCB->events.push_back(event);
7293         if (!pCB->waitedEvents.count(event)) {
7294             pCB->writeEventsBeforeWait.push_back(event);
7295         }
7296         pCB->eventUpdates.emplace_back([=](VkQueue q) { return setEventStageMask(q, commandBuffer, event, stageMask); });
7297     }
7298     lock.unlock();
7299     if (!skip) dev_data->dispatch_table.CmdSetEvent(commandBuffer, event, stageMask);
7300 }
7301
7302 VKAPI_ATTR void VKAPI_CALL CmdResetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
7303     bool skip = false;
7304     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
7305     unique_lock_t lock(global_lock);
7306     GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
7307     if (pCB) {
7308         skip |= ValidateCmdQueueFlags(dev_data, pCB, "vkCmdResetEvent()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
7309                                       VALIDATION_ERROR_1c402415);
7310         skip |= ValidateCmd(dev_data, pCB, CMD_RESETEVENT, "vkCmdResetEvent()");
7311         skip |= insideRenderPass(dev_data, pCB, "vkCmdResetEvent()", VALIDATION_ERROR_1c400017);
7312         skip |= ValidateStageMaskGsTsEnables(dev_data, stageMask, "vkCmdResetEvent()", VALIDATION_ERROR_1c400904,
7313                                              VALIDATION_ERROR_1c400906);
7314         auto event_state = GetEventNode(dev_data, event);
7315         if (event_state) {
7316             addCommandBufferBinding(&event_state->cb_bindings, {HandleToUint64(event), kVulkanObjectTypeEvent}, pCB);
7317             event_state->cb_bindings.insert(pCB);
7318         }
7319         pCB->events.push_back(event);
7320         if (!pCB->waitedEvents.count(event)) {
7321             pCB->writeEventsBeforeWait.push_back(event);
7322         }
7323         // TODO : Add check for VALIDATION_ERROR_32c008f8
7324         pCB->eventUpdates.emplace_back(
7325             [=](VkQueue q) { return setEventStageMask(q, commandBuffer, event, VkPipelineStageFlags(0)); });
7326     }
7327     lock.unlock();
7328     if (!skip) dev_data->dispatch_table.CmdResetEvent(commandBuffer, event, stageMask);
7329 }
7330
7331 // Return input pipeline stage flags, expanded for individual bits if VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT
7332 static VkPipelineStageFlags ExpandPipelineStageFlags(VkPipelineStageFlags inflags) {
7333     return (inflags != VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT)
7334                ? inflags
7335                : (VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT | VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT | VK_PIPELINE_STAGE_VERTEX_INPUT_BIT |
7336                   VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT |
7337                   VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT | VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT |
7338                   VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT |
7339                   VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT | VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT |
7340                   VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT);
7341 }
7342
7343 // Verify image barrier image state and that the image is consistent with FB image
7344 static bool ValidateImageBarrierImage(layer_data *device_data, const char *funcName, GLOBAL_CB_NODE const *cb_state,
7345                                       VkFramebuffer framebuffer, uint32_t active_subpass, const safe_VkSubpassDescription &sub_desc,
7346                                       uint64_t rp_handle, uint32_t img_index, const VkImageMemoryBarrier &img_barrier) {
7347     bool skip = false;
7348     const auto &fb_state = GetFramebufferState(device_data, framebuffer);
7349     assert(fb_state);
7350     const auto img_bar_image = img_barrier.image;
7351     bool image_match = false;
7352     bool sub_image_found = false;  // Do we find a corresponding subpass description
7353     VkImageLayout sub_image_layout = VK_IMAGE_LAYOUT_UNDEFINED;
7354     uint32_t attach_index = 0;
7355     uint32_t index_count = 0;
7356     // Verify that a framebuffer image matches barrier image
7357     for (const auto &fb_attach : fb_state->attachments) {
7358         if (img_bar_image == fb_attach.image) {
7359             image_match = true;
7360             attach_index = index_count;
7361             break;
7362         }
7363         index_count++;
7364     }
7365     if (image_match) {  // Make sure subpass is referring to matching attachment
7366         if (sub_desc.pDepthStencilAttachment && sub_desc.pDepthStencilAttachment->attachment == attach_index) {
7367             sub_image_layout = sub_desc.pDepthStencilAttachment->layout;
7368             sub_image_found = true;
7369         } else {
7370             for (uint32_t j = 0; j < sub_desc.colorAttachmentCount; ++j) {
7371                 if (sub_desc.pColorAttachments && sub_desc.pColorAttachments[j].attachment == attach_index) {
7372                     sub_image_layout = sub_desc.pColorAttachments[j].layout;
7373                     sub_image_found = true;
7374                     break;
7375                 } else if (sub_desc.pResolveAttachments && sub_desc.pResolveAttachments[j].attachment == attach_index) {
7376                     sub_image_layout = sub_desc.pResolveAttachments[j].layout;
7377                     sub_image_found = true;
7378                     break;
7379                 }
7380             }
7381         }
7382         if (!sub_image_found) {
7383             skip |= log_msg(
7384                 device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT, rp_handle,
7385                 VALIDATION_ERROR_1b800936,
7386                 "%s: Barrier pImageMemoryBarriers[%d].image (0x%" PRIx64
7387                 ") is not referenced by the VkSubpassDescription for active subpass (%d) of current renderPass (0x%" PRIx64 ").",
7388                 funcName, img_index, HandleToUint64(img_bar_image), active_subpass, rp_handle);
7389         }
7390     } else {  // !image_match
7391         auto const fb_handle = HandleToUint64(fb_state->framebuffer);
7392         skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT,
7393                         fb_handle, VALIDATION_ERROR_1b800936,
7394                         "%s: Barrier pImageMemoryBarriers[%d].image (0x%" PRIx64
7395                         ") does not match an image from the current framebuffer (0x%" PRIx64 ").",
7396                         funcName, img_index, HandleToUint64(img_bar_image), fb_handle);
7397     }
7398     if (img_barrier.oldLayout != img_barrier.newLayout) {
7399         skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7400                         HandleToUint64(cb_state->commandBuffer), VALIDATION_ERROR_1b80093a,
7401                         "%s: As the Image Barrier for image 0x%" PRIx64
7402                         " is being executed within a render pass instance, oldLayout must equal newLayout yet they are %s and %s.",
7403                         funcName, HandleToUint64(img_barrier.image), string_VkImageLayout(img_barrier.oldLayout),
7404                         string_VkImageLayout(img_barrier.newLayout));
7405     } else {
7406         if (sub_image_found && sub_image_layout != img_barrier.oldLayout) {
7407             skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
7408                             rp_handle, VALIDATION_ERROR_1b800938,
7409                             "%s: Barrier pImageMemoryBarriers[%d].image (0x%" PRIx64
7410                             ") is referenced by the VkSubpassDescription for active subpass (%d) of current renderPass (0x%" PRIx64
7411                             ") as having layout %s, but image barrier has layout %s.",
7412                             funcName, img_index, HandleToUint64(img_bar_image), active_subpass, rp_handle,
7413                             string_VkImageLayout(img_barrier.oldLayout), string_VkImageLayout(sub_image_layout));
7414         }
7415     }
7416     return skip;
7417 }
7418
7419 // Validate image barriers within a renderPass
7420 static bool ValidateRenderPassImageBarriers(layer_data *device_data, const char *funcName, GLOBAL_CB_NODE *cb_state,
7421                                             uint32_t active_subpass, const safe_VkSubpassDescription &sub_desc, uint64_t rp_handle,
7422                                             VkAccessFlags sub_src_access_mask, VkAccessFlags sub_dst_access_mask,
7423                                             uint32_t image_mem_barrier_count, const VkImageMemoryBarrier *image_barriers) {
7424     bool skip = false;
7425     for (uint32_t i = 0; i < image_mem_barrier_count; ++i) {
7426         const auto &img_barrier = image_barriers[i];
7427         const auto &img_src_access_mask = img_barrier.srcAccessMask;
7428         if (img_src_access_mask != (sub_src_access_mask & img_src_access_mask)) {
7429             skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
7430                             rp_handle, VALIDATION_ERROR_1b80092e,
7431                             "%s: Barrier pImageMemoryBarriers[%d].srcAccessMask(0x%X) is not a subset of VkSubpassDependency "
7432                             "srcAccessMask(0x%X) of subpass %d of renderPass 0x%" PRIx64 ".",
7433                             funcName, i, img_src_access_mask, sub_src_access_mask, active_subpass, rp_handle);
7434         }
7435         const auto &img_dst_access_mask = img_barrier.dstAccessMask;
7436         if (img_dst_access_mask != (sub_dst_access_mask & img_dst_access_mask)) {
7437             skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
7438                             rp_handle, VALIDATION_ERROR_1b800930,
7439                             "%s: Barrier pImageMemoryBarriers[%d].dstAccessMask(0x%X) is not a subset of VkSubpassDependency "
7440                             "dstAccessMask(0x%X) of subpass %d of renderPass 0x%" PRIx64 ".",
7441                             funcName, i, img_dst_access_mask, sub_dst_access_mask, active_subpass, rp_handle);
7442         }
7443         if (VK_QUEUE_FAMILY_IGNORED != img_barrier.srcQueueFamilyIndex ||
7444             VK_QUEUE_FAMILY_IGNORED != img_barrier.dstQueueFamilyIndex) {
7445             skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
7446                             rp_handle, VALIDATION_ERROR_1b80093c,
7447                             "%s: Barrier pImageMemoryBarriers[%d].srcQueueFamilyIndex is %d and "
7448                             "pImageMemoryBarriers[%d].dstQueueFamilyIndex is %d but both must be VK_QUEUE_FAMILY_IGNORED.",
7449                             funcName, i, img_barrier.srcQueueFamilyIndex, i, img_barrier.dstQueueFamilyIndex);
7450         }
7451         // Secondary CBs can have null framebuffer so queue up validation in that case 'til FB is known
7452         if (VK_NULL_HANDLE == cb_state->activeFramebuffer) {
7453             assert(VK_COMMAND_BUFFER_LEVEL_SECONDARY == cb_state->createInfo.level);
7454             // Secondary CB case w/o FB specified delay validation
7455             cb_state->cmd_execute_commands_functions.emplace_back([=](GLOBAL_CB_NODE *primary_cb, VkFramebuffer fb) {
7456                 return ValidateImageBarrierImage(device_data, funcName, cb_state, fb, active_subpass, sub_desc, rp_handle, i,
7457                                                  img_barrier);
7458             });
7459         } else {
7460             skip |= ValidateImageBarrierImage(device_data, funcName, cb_state, cb_state->activeFramebuffer, active_subpass,
7461                                               sub_desc, rp_handle, i, img_barrier);
7462         }
7463     }
7464     return skip;
7465 }
7466
7467 // Validate VUs for Pipeline Barriers that are within a renderPass
7468 // Pre: cb_state->activeRenderPass must be a pointer to valid renderPass state
7469 static bool ValidateRenderPassPipelineBarriers(layer_data *device_data, const char *funcName, GLOBAL_CB_NODE *cb_state,
7470                                                VkPipelineStageFlags src_stage_mask, VkPipelineStageFlags dst_stage_mask,
7471                                                VkDependencyFlags dependency_flags, uint32_t mem_barrier_count,
7472                                                const VkMemoryBarrier *mem_barriers, uint32_t buffer_mem_barrier_count,
7473                                                const VkBufferMemoryBarrier *buffer_mem_barriers, uint32_t image_mem_barrier_count,
7474                                                const VkImageMemoryBarrier *image_barriers) {
7475     bool skip = false;
7476     auto rp_state = cb_state->activeRenderPass;
7477     const auto active_subpass = cb_state->activeSubpass;
7478     auto rp_handle = HandleToUint64(rp_state->renderPass);
7479     if (!rp_state->hasSelfDependency[active_subpass]) {
7480         skip |=
7481             log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT, rp_handle,
7482                     VALIDATION_ERROR_1b800928,
7483                     "%s: Barriers cannot be set during subpass %d of renderPass 0x%" PRIx64 " with no self-dependency specified.",
7484                     funcName, active_subpass, rp_handle);
7485     } else {
7486         assert(rp_state->subpass_to_dependency_index[cb_state->activeSubpass] != -1);
7487         // Grab ref to current subpassDescription up-front for use below
7488         const auto &sub_desc = rp_state->createInfo.pSubpasses[active_subpass];
7489         const auto &sub_dep = rp_state->createInfo.pDependencies[rp_state->subpass_to_dependency_index[active_subpass]];
7490         const auto &sub_src_stage_mask = ExpandPipelineStageFlags(sub_dep.srcStageMask);
7491         const auto &sub_dst_stage_mask = ExpandPipelineStageFlags(sub_dep.dstStageMask);
7492         if ((sub_src_stage_mask != VK_PIPELINE_STAGE_ALL_COMMANDS_BIT) &&
7493             (src_stage_mask != (sub_src_stage_mask & src_stage_mask))) {
7494             skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
7495                             rp_handle, VALIDATION_ERROR_1b80092a,
7496                             "%s: Barrier srcStageMask(0x%X) is not a subset of VkSubpassDependency srcStageMask(0x%X) of subpass "
7497                             "%d of renderPass 0x%" PRIx64 ".",
7498                             funcName, src_stage_mask, sub_src_stage_mask, active_subpass, rp_handle);
7499         }
7500         if ((sub_dst_stage_mask != VK_PIPELINE_STAGE_ALL_COMMANDS_BIT) &&
7501             (dst_stage_mask != (sub_dst_stage_mask & dst_stage_mask))) {
7502             skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
7503                             rp_handle, VALIDATION_ERROR_1b80092c,
7504                             "%s: Barrier dstStageMask(0x%X) is not a subset of VkSubpassDependency dstStageMask(0x%X) of subpass "
7505                             "%d of renderPass 0x%" PRIx64 ".",
7506                             funcName, dst_stage_mask, sub_dst_stage_mask, active_subpass, rp_handle);
7507         }
7508         if (0 != buffer_mem_barrier_count) {
7509             skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
7510                             rp_handle, VALIDATION_ERROR_1b800934,
7511                             "%s: bufferMemoryBarrierCount is non-zero (%d) for subpass %d of renderPass 0x%" PRIx64 ".", funcName,
7512                             buffer_mem_barrier_count, active_subpass, rp_handle);
7513         }
7514         const auto &sub_src_access_mask = sub_dep.srcAccessMask;
7515         const auto &sub_dst_access_mask = sub_dep.dstAccessMask;
7516         for (uint32_t i = 0; i < mem_barrier_count; ++i) {
7517             const auto &mb_src_access_mask = mem_barriers[i].srcAccessMask;
7518             if (mb_src_access_mask != (sub_src_access_mask & mb_src_access_mask)) {
7519                 skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7520                                 VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT, rp_handle, VALIDATION_ERROR_1b80092e,
7521                                 "%s: Barrier pMemoryBarriers[%d].srcAccessMask(0x%X) is not a subset of VkSubpassDependency "
7522                                 "srcAccessMask(0x%X) of subpass %d of renderPass 0x%" PRIx64 ".",
7523                                 funcName, i, mb_src_access_mask, sub_src_access_mask, active_subpass, rp_handle);
7524             }
7525             const auto &mb_dst_access_mask = mem_barriers[i].dstAccessMask;
7526             if (mb_dst_access_mask != (sub_dst_access_mask & mb_dst_access_mask)) {
7527                 skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7528                                 VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT, rp_handle, VALIDATION_ERROR_1b800930,
7529                                 "%s: Barrier pMemoryBarriers[%d].dstAccessMask(0x%X) is not a subset of VkSubpassDependency "
7530                                 "dstAccessMask(0x%X) of subpass %d of renderPass 0x%" PRIx64 ".",
7531                                 funcName, i, mb_dst_access_mask, sub_dst_access_mask, active_subpass, rp_handle);
7532             }
7533         }
7534         skip |= ValidateRenderPassImageBarriers(device_data, funcName, cb_state, active_subpass, sub_desc, rp_handle,
7535                                                 sub_src_access_mask, sub_dst_access_mask, image_mem_barrier_count, image_barriers);
7536         if (sub_dep.dependencyFlags != dependency_flags) {
7537             skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
7538                             rp_handle, VALIDATION_ERROR_1b800932,
7539                             "%s: dependencyFlags param (0x%X) does not equal VkSubpassDependency dependencyFlags value (0x%X) for "
7540                             "subpass %d of renderPass 0x%" PRIx64 ".",
7541                             funcName, dependency_flags, sub_dep.dependencyFlags, cb_state->activeSubpass, rp_handle);
7542         }
7543     }
7544     return skip;
7545 }
7546
7547 // Array to mask individual accessMask to corresponding stageMask
7548 //  accessMask active bit position (0-31) maps to index
7549 const static VkPipelineStageFlags AccessMaskToPipeStage[20] = {
7550     // VK_ACCESS_INDIRECT_COMMAND_READ_BIT = 0
7551     VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT,
7552     // VK_ACCESS_INDEX_READ_BIT = 1
7553     VK_PIPELINE_STAGE_VERTEX_INPUT_BIT,
7554     // VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT = 2
7555     VK_PIPELINE_STAGE_VERTEX_INPUT_BIT,
7556     // VK_ACCESS_UNIFORM_READ_BIT = 3
7557     VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT |
7558         VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT | VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT |
7559         VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
7560     // VK_ACCESS_INPUT_ATTACHMENT_READ_BIT = 4
7561     VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
7562     // VK_ACCESS_SHADER_READ_BIT = 5
7563     VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT |
7564         VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT | VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT |
7565         VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
7566     // VK_ACCESS_SHADER_WRITE_BIT = 6
7567     VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT |
7568         VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT | VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT |
7569         VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
7570     // VK_ACCESS_COLOR_ATTACHMENT_READ_BIT = 7
7571     VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
7572     // VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT = 8
7573     VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
7574     // VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT = 9
7575     VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT | VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT,
7576     // VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT = 10
7577     VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT | VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT,
7578     // VK_ACCESS_TRANSFER_READ_BIT = 11
7579     VK_PIPELINE_STAGE_TRANSFER_BIT,
7580     // VK_ACCESS_TRANSFER_WRITE_BIT = 12
7581     VK_PIPELINE_STAGE_TRANSFER_BIT,
7582     // VK_ACCESS_HOST_READ_BIT = 13
7583     VK_PIPELINE_STAGE_HOST_BIT,
7584     // VK_ACCESS_HOST_WRITE_BIT = 14
7585     VK_PIPELINE_STAGE_HOST_BIT,
7586     // VK_ACCESS_MEMORY_READ_BIT = 15
7587     VK_ACCESS_FLAG_BITS_MAX_ENUM,  // Always match
7588     // VK_ACCESS_MEMORY_WRITE_BIT = 16
7589     VK_ACCESS_FLAG_BITS_MAX_ENUM,  // Always match
7590     // VK_ACCESS_COMMAND_PROCESS_READ_BIT_NVX = 17
7591     VK_PIPELINE_STAGE_COMMAND_PROCESS_BIT_NVX,
7592     // VK_ACCESS_COMMAND_PROCESS_WRITE_BIT_NVX = 18
7593     VK_PIPELINE_STAGE_COMMAND_PROCESS_BIT_NVX,
7594 };
7595
7596 // Verify that all bits of access_mask are supported by the src_stage_mask
7597 static bool ValidateAccessMaskPipelineStage(VkAccessFlags access_mask, VkPipelineStageFlags stage_mask) {
7598     // Early out if all commands set, or access_mask NULL
7599     if ((stage_mask & VK_PIPELINE_STAGE_ALL_COMMANDS_BIT) || (0 == access_mask)) return true;
7600
7601     stage_mask = ExpandPipelineStageFlags(stage_mask);
7602     int index = 0;
7603     // for each of the set bits in access_mask, make sure that supporting stage mask bit(s) are set
7604     while (access_mask) {
7605         index = (u_ffs(access_mask) - 1);
7606         assert(index >= 0);
7607         // Must have "!= 0" compare to prevent warning from MSVC
7608         if ((AccessMaskToPipeStage[index] & stage_mask) == 0) return false;  // early out
7609         access_mask &= ~(1 << index);                                        // Mask off bit that's been checked
7610     }
7611     return true;
7612 }
7613
7614 namespace barrier_queue_families {
7615 enum VuIndex {
7616     kSrcOrDstMustBeIgnore,
7617     kSpecialOrIgnoreOnly,
7618     kSrcIgnoreRequiresDstIgnore,
7619     kDstValidOrSpecialIfNotIgnore,
7620     kSrcValidOrSpecialIfNotIgnore,
7621     kSrcAndDestMustBeIgnore,
7622     kBothIgnoreOrBothValid,
7623     kSubmitQueueMustMatchSrcOrDst
7624 };
7625 static const char *vu_summary[] = {"Source or destination queue family must be ignored.",
7626                                    "Source or destination queue family must be special or ignored.",
7627                                    "Destination queue family must be ignored if source queue family is.",
7628                                    "Destination queue family must be valid, ignored, or special.",
7629                                    "Source queue family must be valid, ignored, or special.",
7630                                    "Source and destination queue family must both be ignored.",
7631                                    "Source and destination queue family must both be ignore or both valid.",
7632                                    "Source or destination queue family must match submit queue family, if not ignored."};
7633
7634 static const UNIQUE_VALIDATION_ERROR_CODE image_error_codes[] = {
7635     VALIDATION_ERROR_0a000aca,  //  VUID-VkImageMemoryBarrier-image-01381 -- kSrcOrDstMustBeIgnore
7636     VALIDATION_ERROR_0a000dcc,  //  VUID-VkImageMemoryBarrier-image-01766 -- kSpecialOrIgnoreOnly
7637     VALIDATION_ERROR_0a000962,  //  VUID-VkImageMemoryBarrier-image-01201 -- kSrcIgnoreRequiresDstIgnore
7638     VALIDATION_ERROR_0a000dd0,  //  VUID-VkImageMemoryBarrier-image-01768 -- kDstValidOrSpecialIfNotIgnore
7639     VALIDATION_ERROR_0a000dce,  //  VUID-VkImageMemoryBarrier-image-01767 -- kSrcValidOrSpecialIfNotIgnore
7640     VALIDATION_ERROR_0a00095e,  //  VUID-VkImageMemoryBarrier-image-01199 -- kSrcAndDestMustBeIgnore
7641     VALIDATION_ERROR_0a000960,  //  VUID-VkImageMemoryBarrier-image-01200 -- kBothIgnoreOrBothValid
7642     VALIDATION_ERROR_0a00096a,  //  VUID-VkImageMemoryBarrier-image-01205 -- kSubmitQueueMustMatchSrcOrDst
7643 };
7644
7645 static const UNIQUE_VALIDATION_ERROR_CODE buffer_error_codes[] = {
7646     VALIDATION_ERROR_0180094e,  //  VUID-VkBufferMemoryBarrier-buffer-01191 -- kSrcOrDstMustBeIgnore
7647     VALIDATION_ERROR_01800dc6,  //  VUID-VkBufferMemoryBarrier-buffer-01763 -- kSpecialOrIgnoreOnly
7648     VALIDATION_ERROR_01800952,  //  VUID-VkBufferMemoryBarrier-buffer-01193 -- kSrcIgnoreRequiresDstIgnore
7649     VALIDATION_ERROR_01800dca,  //  VUID-VkBufferMemoryBarrier-buffer-01765 -- kDstValidOrSpecialIfNotIgnore
7650     VALIDATION_ERROR_01800dc8,  //  VUID-VkBufferMemoryBarrier-buffer-01764 -- kSrcValidOrSpecialIfNotIgnore
7651     VALIDATION_ERROR_0180094c,  //  VUID-VkBufferMemoryBarrier-buffer-01190 -- kSrcAndDestMustBeIgnore
7652     VALIDATION_ERROR_01800950,  //  VUID-VkBufferMemoryBarrier-buffer-01192 -- kBothIgnoreOrBothValid
7653     VALIDATION_ERROR_01800958,  //  VUID-VkBufferMemoryBarrier-buffer-01196 -- kSubmitQueueMustMatchSrcOrDst
7654 };
7655
7656 class ValidatorState {
7657    public:
7658     ValidatorState(const layer_data *device_data, const char *func_name, const GLOBAL_CB_NODE *cb_state,
7659                    const uint64_t barrier_handle64, const VkSharingMode sharing_mode, const VulkanObjectType object_type,
7660                    const UNIQUE_VALIDATION_ERROR_CODE *val_codes)
7661         : report_data_(device_data->report_data),
7662           func_name_(func_name),
7663           cb_handle64_(HandleToUint64(cb_state->commandBuffer)),
7664           barrier_handle64_(barrier_handle64),
7665           sharing_mode_(sharing_mode),
7666           object_type_(object_type),
7667           val_codes_(val_codes),
7668           limit_(static_cast<uint32_t>(device_data->phys_dev_properties.queue_family_properties.size())),
7669           mem_ext_(device_data->extensions.vk_khr_external_memory) {}
7670
7671     // Create a validator state from an image state... reducing the image specific to the generic version.
7672     ValidatorState(const layer_data *device_data, const char *func_name, const GLOBAL_CB_NODE *cb_state,
7673                    const VkImageMemoryBarrier *barrier, const IMAGE_STATE *state)
7674         : ValidatorState(device_data, func_name, cb_state, HandleToUint64(barrier->image), state->createInfo.sharingMode,
7675                          kVulkanObjectTypeImage, image_error_codes) {}
7676
7677     // Create a validator state from an buffer state... reducing the buffer specific to the generic version.
7678     ValidatorState(const layer_data *device_data, const char *func_name, const GLOBAL_CB_NODE *cb_state,
7679                    const VkBufferMemoryBarrier *barrier, const BUFFER_STATE *state)
7680         : ValidatorState(device_data, func_name, cb_state, HandleToUint64(barrier->buffer), state->createInfo.sharingMode,
7681                          kVulkanObjectTypeImage, buffer_error_codes) {}
7682
7683     // Log the messages using boilerplate from object state, and Vu specific information from the template arg
7684     // One and two family versions, in the single family version, Vu holds the name of the passed parameter
7685     bool LogMsg(VuIndex vu_index, uint32_t family, const char *param_name) const {
7686         const UNIQUE_VALIDATION_ERROR_CODE val_code = val_codes_[vu_index];
7687         const char *annotation = GetFamilyAnnotation(family);
7688         return log_msg(report_data_, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, cb_handle64_,
7689                        val_code, "%s: Barrier using %s 0x%" PRIx64 " created with sharingMode %s, has %s %u%s. %s", func_name_,
7690                        GetTypeString(), barrier_handle64_, GetModeString(), param_name, family, annotation, vu_summary[vu_index]);
7691     }
7692
7693     bool LogMsg(VuIndex vu_index, uint32_t src_family, uint32_t dst_family) const {
7694         const UNIQUE_VALIDATION_ERROR_CODE val_code = val_codes_[vu_index];
7695         const char *src_annotation = GetFamilyAnnotation(src_family);
7696         const char *dst_annotation = GetFamilyAnnotation(dst_family);
7697         return log_msg(report_data_, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, cb_handle64_,
7698                        val_code,
7699                        "%s: Barrier using %s 0x%" PRIx64
7700                        " created with sharingMode %s, has srcQueueFamilyIndex %u%s and dstQueueFamilyIndex %u%s. %s",
7701                        func_name_, GetTypeString(), barrier_handle64_, GetModeString(), src_family, src_annotation, dst_family,
7702                        dst_annotation, vu_summary[vu_index]);
7703     }
7704
7705     // This abstract Vu can only be tested at submit time, thus we need a callback from the closure containing the needed
7706     // data. Note that the mem_barrier is copied to the closure as the lambda lifespan exceed the guarantees of validity for
7707     // application input.
7708     static bool ValidateAtQueueSubmit(const VkQueue queue, const layer_data *device_data, uint32_t src_family, uint32_t dst_family,
7709                                       const ValidatorState &val) {
7710         auto queue_data_it = device_data->queueMap.find(queue);
7711         if (queue_data_it == device_data->queueMap.end()) return false;
7712
7713         uint32_t queue_family = queue_data_it->second.queueFamilyIndex;
7714         if ((src_family != queue_family) && (dst_family != queue_family)) {
7715             const UNIQUE_VALIDATION_ERROR_CODE val_code = val.val_codes_[kSubmitQueueMustMatchSrcOrDst];
7716             const char *src_annotation = val.GetFamilyAnnotation(src_family);
7717             const char *dst_annotation = val.GetFamilyAnnotation(dst_family);
7718             return log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT,
7719                            HandleToUint64(queue), val_code,
7720                            "%s: Barrier submitted to queue with family index %u, using %s 0x%" PRIx64
7721                            " created with sharingMode %s, has srcQueueFamilyIndex %u%s and dstQueueFamilyIndex %u%s. %s",
7722                            "vkQueueSubmit", queue_family, val.GetTypeString(), val.barrier_handle64_, val.GetModeString(),
7723                            src_family, src_annotation, dst_family, dst_annotation, vu_summary[kSubmitQueueMustMatchSrcOrDst]);
7724         }
7725         return false;
7726     }
7727     // Logical helpers for semantic clarity
7728     inline bool KhrExternalMem() const { return mem_ext_; }
7729     inline bool IsValid(uint32_t queue_family) const { return (queue_family < limit_); }
7730     inline bool IsSpecial(uint32_t queue_family) const {
7731         return (queue_family == VK_QUEUE_FAMILY_EXTERNAL_KHR) || (queue_family == VK_QUEUE_FAMILY_FOREIGN_EXT);
7732     }
7733     inline bool IsValidOrSpecial(uint32_t queue_family) const {
7734         return IsValid(queue_family) || (mem_ext_ && IsSpecial(queue_family));
7735     }
7736     inline bool IsIgnored(uint32_t queue_family) const { return queue_family == VK_QUEUE_FAMILY_IGNORED; }
7737
7738     // Helpers for LogMsg (and log_msg)
7739     const char *GetModeString() const { return string_VkSharingMode(sharing_mode_); }
7740
7741     // Descriptive text for the various types of queue family index
7742     const char *GetFamilyAnnotation(uint32_t family) const {
7743         const char *external = " (VK_QUEUE_FAMILY_EXTERNAL_KHR)";
7744         const char *foreign = " (VK_QUEUE_FAMILY_FOREIGN_EXT)";
7745         const char *ignored = " (VK_QUEUE_FAMILY_IGNORED)";
7746         const char *valid = " (VALID)";
7747         const char *invalid = " (INVALID)";
7748         switch (family) {
7749             case VK_QUEUE_FAMILY_EXTERNAL_KHR:
7750                 return external;
7751             case VK_QUEUE_FAMILY_FOREIGN_EXT:
7752                 return foreign;
7753             case VK_QUEUE_FAMILY_IGNORED:
7754                 return ignored;
7755             default:
7756                 if (IsValid(family)) {
7757                     return valid;
7758                 }
7759                 return invalid;
7760         };
7761     }
7762     const char *GetTypeString() const { return object_string[object_type_]; }
7763     VkSharingMode GetSharingMode() const { return sharing_mode_; }
7764
7765    protected:
7766     const debug_report_data *const report_data_;
7767     const char *const func_name_;
7768     const uint64_t cb_handle64_;
7769     const uint64_t barrier_handle64_;
7770     const VkSharingMode sharing_mode_;
7771     const VulkanObjectType object_type_;
7772     const UNIQUE_VALIDATION_ERROR_CODE *val_codes_;
7773     const uint32_t limit_;
7774     const bool mem_ext_;
7775 };
7776
7777 bool Validate(const layer_data *device_data, const char *func_name, GLOBAL_CB_NODE *cb_state, const ValidatorState &val,
7778               const uint32_t src_queue_family, const uint32_t dst_queue_family) {
7779     bool skip = false;
7780
7781     const bool mode_concurrent = val.GetSharingMode() == VK_SHARING_MODE_CONCURRENT;
7782     const bool src_ignored = val.IsIgnored(src_queue_family);
7783     const bool dst_ignored = val.IsIgnored(dst_queue_family);
7784     if (val.KhrExternalMem()) {
7785         if (mode_concurrent) {
7786             if (!(src_ignored || dst_ignored)) {
7787                 skip |= val.LogMsg(kSrcOrDstMustBeIgnore, src_queue_family, dst_queue_family);
7788             }
7789             if ((src_ignored && !(dst_ignored || val.IsSpecial(dst_queue_family))) ||
7790                 (dst_ignored && !(src_ignored || val.IsSpecial(src_queue_family)))) {
7791                 skip |= val.LogMsg(kSpecialOrIgnoreOnly, src_queue_family, dst_queue_family);
7792             }
7793         } else {
7794             // VK_SHARING_MODE_EXCLUSIVE
7795             if (src_ignored && !dst_ignored) {
7796                 skip |= val.LogMsg(kSrcIgnoreRequiresDstIgnore, src_queue_family, dst_queue_family);
7797             }
7798             if (!dst_ignored && !val.IsValidOrSpecial(dst_queue_family)) {
7799                 skip |= val.LogMsg(kDstValidOrSpecialIfNotIgnore, dst_queue_family, "dstQueueFamilyIndex");
7800             }
7801             if (!src_ignored && !val.IsValidOrSpecial(src_queue_family)) {
7802                 skip |= val.LogMsg(kSrcValidOrSpecialIfNotIgnore, src_queue_family, "srcQueueFamilyIndex");
7803             }
7804         }
7805     } else {
7806         // No memory extension
7807         if (mode_concurrent) {
7808             if (!src_ignored || !dst_ignored) {
7809                 skip |= val.LogMsg(kSrcAndDestMustBeIgnore, src_queue_family, dst_queue_family);
7810             }
7811         } else {
7812             // VK_SHARING_MODE_EXCLUSIVE
7813             if (!((src_ignored && dst_ignored) || (val.IsValid(src_queue_family) && val.IsValid(dst_queue_family)))) {
7814                 skip |= val.LogMsg(kBothIgnoreOrBothValid, src_queue_family, dst_queue_family);
7815             }
7816         }
7817     }
7818     if (!mode_concurrent && !src_ignored && !dst_ignored) {
7819         // Only enqueue submit time check if it is needed. If more submit time checks are added, change the criteria
7820         // TODO create a better named list, or rename the submit time lists to something that matches the broader usage...
7821         // Note: if we want to create a semantic that separates state lookup, validation, and state update this should go
7822         // to a local queue of update_state_actions or something.
7823         cb_state->eventUpdates.emplace_back([device_data, src_queue_family, dst_queue_family, val](VkQueue queue) {
7824             return ValidatorState::ValidateAtQueueSubmit(queue, device_data, src_queue_family, dst_queue_family, val);
7825         });
7826     }
7827     return skip;
7828 }
7829 }  // namespace barrier_queue_families
7830
7831 // Type specific wrapper for image barriers
7832 bool ValidateBarrierQueueFamilies(const layer_data *device_data, const char *func_name, GLOBAL_CB_NODE *cb_state,
7833                                   const VkImageMemoryBarrier *barrier, const IMAGE_STATE *state_data) {
7834     // State data is required
7835     if (!state_data) {
7836         return false;
7837     }
7838
7839     // Create the validator state from the image state
7840     barrier_queue_families::ValidatorState val(device_data, func_name, cb_state, barrier, state_data);
7841     const uint32_t src_queue_family = barrier->srcQueueFamilyIndex;
7842     const uint32_t dst_queue_family = barrier->dstQueueFamilyIndex;
7843     return barrier_queue_families::Validate(device_data, func_name, cb_state, val, src_queue_family, dst_queue_family);
7844 }
7845
7846 // Type specific wrapper for buffer barriers
7847 bool ValidateBarrierQueueFamilies(const layer_data *device_data, const char *func_name, GLOBAL_CB_NODE *cb_state,
7848                                   const VkBufferMemoryBarrier *barrier, const BUFFER_STATE *state_data) {
7849     // State data is required
7850     if (!state_data) {
7851         return false;
7852     }
7853
7854     // Create the validator state from the buffer state
7855     barrier_queue_families::ValidatorState val(device_data, func_name, cb_state, barrier, state_data);
7856     const uint32_t src_queue_family = barrier->srcQueueFamilyIndex;
7857     const uint32_t dst_queue_family = barrier->dstQueueFamilyIndex;
7858     return barrier_queue_families::Validate(device_data, func_name, cb_state, val, src_queue_family, dst_queue_family);
7859 }
7860
7861 static bool ValidateBarriers(layer_data *device_data, const char *funcName, GLOBAL_CB_NODE *cb_state,
7862                              VkPipelineStageFlags src_stage_mask, VkPipelineStageFlags dst_stage_mask, uint32_t memBarrierCount,
7863                              const VkMemoryBarrier *pMemBarriers, uint32_t bufferBarrierCount,
7864                              const VkBufferMemoryBarrier *pBufferMemBarriers, uint32_t imageMemBarrierCount,
7865                              const VkImageMemoryBarrier *pImageMemBarriers) {
7866     bool skip = false;
7867     for (uint32_t i = 0; i < memBarrierCount; ++i) {
7868         const auto &mem_barrier = pMemBarriers[i];
7869         if (!ValidateAccessMaskPipelineStage(mem_barrier.srcAccessMask, src_stage_mask)) {
7870             skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7871                             HandleToUint64(cb_state->commandBuffer), VALIDATION_ERROR_1b800940,
7872                             "%s: pMemBarriers[%d].srcAccessMask (0x%X) is not supported by srcStageMask (0x%X).", funcName, i,
7873                             mem_barrier.srcAccessMask, src_stage_mask);
7874         }
7875         if (!ValidateAccessMaskPipelineStage(mem_barrier.dstAccessMask, dst_stage_mask)) {
7876             skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7877                             HandleToUint64(cb_state->commandBuffer), VALIDATION_ERROR_1b800942,
7878                             "%s: pMemBarriers[%d].dstAccessMask (0x%X) is not supported by dstStageMask (0x%X).", funcName, i,
7879                             mem_barrier.dstAccessMask, dst_stage_mask);
7880         }
7881     }
7882     for (uint32_t i = 0; i < imageMemBarrierCount; ++i) {
7883         auto mem_barrier = &pImageMemBarriers[i];
7884         if (!ValidateAccessMaskPipelineStage(mem_barrier->srcAccessMask, src_stage_mask)) {
7885             skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7886                             HandleToUint64(cb_state->commandBuffer), VALIDATION_ERROR_1b800940,
7887                             "%s: pImageMemBarriers[%d].srcAccessMask (0x%X) is not supported by srcStageMask (0x%X).", funcName, i,
7888                             mem_barrier->srcAccessMask, src_stage_mask);
7889         }
7890         if (!ValidateAccessMaskPipelineStage(mem_barrier->dstAccessMask, dst_stage_mask)) {
7891             skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7892                             HandleToUint64(cb_state->commandBuffer), VALIDATION_ERROR_1b800942,
7893                             "%s: pImageMemBarriers[%d].dstAccessMask (0x%X) is not supported by dstStageMask (0x%X).", funcName, i,
7894                             mem_barrier->dstAccessMask, dst_stage_mask);
7895         }
7896
7897         auto image_data = GetImageState(device_data, mem_barrier->image);
7898         skip |= ValidateBarrierQueueFamilies(device_data, funcName, cb_state, mem_barrier, image_data);
7899
7900         if (mem_barrier->newLayout == VK_IMAGE_LAYOUT_UNDEFINED || mem_barrier->newLayout == VK_IMAGE_LAYOUT_PREINITIALIZED) {
7901             skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7902                             HandleToUint64(cb_state->commandBuffer), VALIDATION_ERROR_0a00095c,
7903                             "%s: Image Layout cannot be transitioned to UNDEFINED or PREINITIALIZED.", funcName);
7904         }
7905
7906         if (image_data) {
7907             // There is no VUID for this, but there is blanket text:
7908             //     "Non-sparse resources must be bound completely and contiguously to a single VkDeviceMemory object before
7909             //     recording commands in a command buffer."
7910             // TODO: Update this when VUID is defined
7911             skip |= ValidateMemoryIsBoundToImage(device_data, image_data, funcName, VALIDATION_ERROR_UNDEFINED);
7912
7913             auto aspect_mask = mem_barrier->subresourceRange.aspectMask;
7914             skip |= ValidateImageAspectMask(device_data, image_data->image, image_data->createInfo.format, aspect_mask, funcName);
7915
7916             std::string param_name = "pImageMemoryBarriers[" + std::to_string(i) + "].subresourceRange";
7917             skip |= ValidateImageBarrierSubresourceRange(device_data, image_data, mem_barrier->subresourceRange, funcName,
7918                                                          param_name.c_str());
7919         }
7920     }
7921
7922     for (uint32_t i = 0; i < bufferBarrierCount; ++i) {
7923         auto mem_barrier = &pBufferMemBarriers[i];
7924         if (!mem_barrier) continue;
7925
7926         if (!ValidateAccessMaskPipelineStage(mem_barrier->srcAccessMask, src_stage_mask)) {
7927             skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7928                             HandleToUint64(cb_state->commandBuffer), VALIDATION_ERROR_1b800940,
7929                             "%s: pBufferMemBarriers[%d].srcAccessMask (0x%X) is not supported by srcStageMask (0x%X).", funcName, i,
7930                             mem_barrier->srcAccessMask, src_stage_mask);
7931         }
7932         if (!ValidateAccessMaskPipelineStage(mem_barrier->dstAccessMask, dst_stage_mask)) {
7933             skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7934                             HandleToUint64(cb_state->commandBuffer), VALIDATION_ERROR_1b800942,
7935                             "%s: pBufferMemBarriers[%d].dstAccessMask (0x%X) is not supported by dstStageMask (0x%X).", funcName, i,
7936                             mem_barrier->dstAccessMask, dst_stage_mask);
7937         }
7938         // Validate buffer barrier queue family indices
7939         auto buffer_state = GetBufferState(device_data, mem_barrier->buffer);
7940         skip |= ValidateBarrierQueueFamilies(device_data, funcName, cb_state, mem_barrier, buffer_state);
7941
7942         if (buffer_state) {
7943             // There is no VUID for this, but there is blanket text:
7944             //     "Non-sparse resources must be bound completely and contiguously to a single VkDeviceMemory object before
7945             //     recording commands in a command buffer"
7946             // TODO: Update this when VUID is defined
7947             skip |= ValidateMemoryIsBoundToBuffer(device_data, buffer_state, funcName, VALIDATION_ERROR_UNDEFINED);
7948
7949             auto buffer_size = buffer_state->createInfo.size;
7950             if (mem_barrier->offset >= buffer_size) {
7951                 skip |= log_msg(
7952                     device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7953                     HandleToUint64(cb_state->commandBuffer), VALIDATION_ERROR_01800946,
7954                     "%s: Buffer Barrier 0x%" PRIx64 " has offset 0x%" PRIx64 " which is not less than total size 0x%" PRIx64 ".",
7955                     funcName, HandleToUint64(mem_barrier->buffer), HandleToUint64(mem_barrier->offset),
7956                     HandleToUint64(buffer_size));
7957             } else if (mem_barrier->size != VK_WHOLE_SIZE && (mem_barrier->offset + mem_barrier->size > buffer_size)) {
7958                 skip |=
7959                     log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7960                             HandleToUint64(cb_state->commandBuffer), VALIDATION_ERROR_0180094a,
7961                             "%s: Buffer Barrier 0x%" PRIx64 " has offset 0x%" PRIx64 " and size 0x%" PRIx64
7962                             " whose sum is greater than total size 0x%" PRIx64 ".",
7963                             funcName, HandleToUint64(mem_barrier->buffer), HandleToUint64(mem_barrier->offset),
7964                             HandleToUint64(mem_barrier->size), HandleToUint64(buffer_size));
7965             }
7966         }
7967     }
7968     return skip;
7969 }
7970
7971 bool validateEventStageMask(VkQueue queue, GLOBAL_CB_NODE *pCB, uint32_t eventCount, size_t firstEventIndex,
7972                             VkPipelineStageFlags sourceStageMask) {
7973     bool skip = false;
7974     VkPipelineStageFlags stageMask = 0;
7975     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(queue), layer_data_map);
7976     for (uint32_t i = 0; i < eventCount; ++i) {
7977         auto event = pCB->events[firstEventIndex + i];
7978         auto queue_data = dev_data->queueMap.find(queue);
7979         if (queue_data == dev_data->queueMap.end()) return false;
7980         auto event_data = queue_data->second.eventToStageMap.find(event);
7981         if (event_data != queue_data->second.eventToStageMap.end()) {
7982             stageMask |= event_data->second;
7983         } else {
7984             auto global_event_data = GetEventNode(dev_data, event);
7985             if (!global_event_data) {
7986                 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT,
7987                                 HandleToUint64(event), DRAWSTATE_INVALID_EVENT,
7988                                 "Event 0x%" PRIx64 " cannot be waited on if it has never been set.", HandleToUint64(event));
7989             } else {
7990                 stageMask |= global_event_data->stageMask;
7991             }
7992         }
7993     }
7994     // TODO: Need to validate that host_bit is only set if set event is called
7995     // but set event can be called at any time.
7996     if (sourceStageMask != stageMask && sourceStageMask != (stageMask | VK_PIPELINE_STAGE_HOST_BIT)) {
7997         skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7998                         HandleToUint64(pCB->commandBuffer), VALIDATION_ERROR_1e62d401,
7999                         "Submitting cmdbuffer with call to VkCmdWaitEvents using srcStageMask 0x%X which must be the bitwise OR of "
8000                         "the stageMask parameters used in calls to vkCmdSetEvent and VK_PIPELINE_STAGE_HOST_BIT if used with "
8001                         "vkSetEvent but instead is 0x%X.",
8002                         sourceStageMask, stageMask);
8003     }
8004     return skip;
8005 }
8006
8007 // Note that we only check bits that HAVE required queueflags -- don't care entries are skipped
8008 static std::unordered_map<VkPipelineStageFlags, VkQueueFlags> supported_pipeline_stages_table = {
8009     {VK_PIPELINE_STAGE_COMMAND_PROCESS_BIT_NVX, VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT},
8010     {VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT, VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT},
8011     {VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, VK_QUEUE_GRAPHICS_BIT},
8012     {VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, VK_QUEUE_GRAPHICS_BIT},
8013     {VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT, VK_QUEUE_GRAPHICS_BIT},
8014     {VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT, VK_QUEUE_GRAPHICS_BIT},
8015     {VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT, VK_QUEUE_GRAPHICS_BIT},
8016     {VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_QUEUE_GRAPHICS_BIT},
8017     {VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT, VK_QUEUE_GRAPHICS_BIT},
8018     {VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT, VK_QUEUE_GRAPHICS_BIT},
8019     {VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_QUEUE_GRAPHICS_BIT},
8020     {VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_QUEUE_COMPUTE_BIT},
8021     {VK_PIPELINE_STAGE_TRANSFER_BIT, VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT | VK_QUEUE_TRANSFER_BIT},
8022     {VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT, VK_QUEUE_GRAPHICS_BIT}};
8023
8024 static const VkPipelineStageFlags stage_flag_bit_array[] = {VK_PIPELINE_STAGE_COMMAND_PROCESS_BIT_NVX,
8025                                                             VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT,
8026                                                             VK_PIPELINE_STAGE_VERTEX_INPUT_BIT,
8027                                                             VK_PIPELINE_STAGE_VERTEX_SHADER_BIT,
8028                                                             VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT,
8029                                                             VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT,
8030                                                             VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT,
8031                                                             VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
8032                                                             VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT,
8033                                                             VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT,
8034                                                             VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
8035                                                             VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
8036                                                             VK_PIPELINE_STAGE_TRANSFER_BIT,
8037                                                             VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT};
8038
8039 bool CheckStageMaskQueueCompatibility(layer_data *dev_data, VkCommandBuffer command_buffer, VkPipelineStageFlags stage_mask,
8040                                       VkQueueFlags queue_flags, const char *function, const char *src_or_dest,
8041                                       UNIQUE_VALIDATION_ERROR_CODE error_code) {
8042     bool skip = false;
8043     // Lookup each bit in the stagemask and check for overlap between its table bits and queue_flags
8044     for (const auto &item : stage_flag_bit_array) {
8045         if (stage_mask & item) {
8046             if ((supported_pipeline_stages_table[item] & queue_flags) == 0) {
8047                 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
8048                                 VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(command_buffer), error_code,
8049                                 "%s(): %s flag %s is not compatible with the queue family properties of this command buffer.",
8050                                 function, src_or_dest, string_VkPipelineStageFlagBits(static_cast<VkPipelineStageFlagBits>(item)));
8051             }
8052         }
8053     }
8054     return skip;
8055 }
8056
8057 // Check if all barriers are of a given operation type.
8058 template <typename Barrier, typename OpCheck>
8059 static bool AllTransferOp(const COMMAND_POOL_NODE *pool, OpCheck &op_check, uint32_t count, const Barrier *barriers) {
8060     if (!pool) return false;
8061
8062     for (uint32_t b = 0; b < count; b++) {
8063         if (!op_check(pool, barriers + b)) return false;
8064     }
8065     return true;
8066 }
8067
8068 enum BarrierOperationsType {
8069     kAllAcquire,  // All Barrier operations are "ownership acquire" operations
8070     kAllRelease,  // All Barrier operations are "ownership release" operations
8071     kGeneral,     // Either no ownership operations or a mix of ownership operation types and/or non-ownership operations
8072 };
8073
8074 // Look at the barriers to see if we they are all release or all acquire, the result impacts queue properties validation
8075 BarrierOperationsType ComputeBarrierOperationsType(layer_data *device_data, GLOBAL_CB_NODE *cb_state, uint32_t buffer_barrier_count,
8076                                                    const VkBufferMemoryBarrier *buffer_barriers, uint32_t image_barrier_count,
8077                                                    const VkImageMemoryBarrier *image_barriers) {
8078     auto pool = GetCommandPoolNode(device_data, cb_state->createInfo.commandPool);
8079     BarrierOperationsType op_type = kGeneral;
8080
8081     // Look at the barrier details only if they exist
8082     // Note: AllTransferOp returns true for count == 0
8083     if ((buffer_barrier_count + image_barrier_count) != 0) {
8084         if (AllTransferOp(pool, IsReleaseOp<VkBufferMemoryBarrier>, buffer_barrier_count, buffer_barriers) &&
8085             AllTransferOp(pool, IsReleaseOp<VkImageMemoryBarrier>, image_barrier_count, image_barriers)) {
8086             op_type = kAllRelease;
8087         } else if (AllTransferOp(pool, IsAcquireOp<VkBufferMemoryBarrier>, buffer_barrier_count, buffer_barriers) &&
8088                    AllTransferOp(pool, IsAcquireOp<VkImageMemoryBarrier>, image_barrier_count, image_barriers)) {
8089             op_type = kAllAcquire;
8090         }
8091     }
8092
8093     return op_type;
8094 }
8095
8096 bool ValidateStageMasksAgainstQueueCapabilities(layer_data *dev_data, GLOBAL_CB_NODE const *cb_state,
8097                                                 VkPipelineStageFlags source_stage_mask, VkPipelineStageFlags dest_stage_mask,
8098                                                 BarrierOperationsType barrier_op_type, const char *function,
8099                                                 UNIQUE_VALIDATION_ERROR_CODE error_code) {
8100     bool skip = false;
8101     uint32_t queue_family_index = dev_data->commandPoolMap[cb_state->createInfo.commandPool].queueFamilyIndex;
8102     instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(dev_data->physical_device), instance_layer_data_map);
8103     auto physical_device_state = GetPhysicalDeviceState(instance_data, dev_data->physical_device);
8104
8105     // Any pipeline stage included in srcStageMask or dstStageMask must be supported by the capabilities of the queue family
8106     // specified by the queueFamilyIndex member of the VkCommandPoolCreateInfo structure that was used to create the VkCommandPool
8107     // that commandBuffer was allocated from, as specified in the table of supported pipeline stages.
8108
8109     if (queue_family_index < physical_device_state->queue_family_properties.size()) {
8110         VkQueueFlags specified_queue_flags = physical_device_state->queue_family_properties[queue_family_index].queueFlags;
8111
8112         // Only check the source stage mask if any barriers aren't "acquire ownership"
8113         if ((barrier_op_type != kAllAcquire) && (source_stage_mask & VK_PIPELINE_STAGE_ALL_COMMANDS_BIT) == 0) {
8114             skip |= CheckStageMaskQueueCompatibility(dev_data, cb_state->commandBuffer, source_stage_mask, specified_queue_flags,
8115                                                      function, "srcStageMask", error_code);
8116         }
8117         // Only check the dest stage mask if any barriers aren't "release ownership"
8118         if ((barrier_op_type != kAllRelease) && (dest_stage_mask & VK_PIPELINE_STAGE_ALL_COMMANDS_BIT) == 0) {
8119             skip |= CheckStageMaskQueueCompatibility(dev_data, cb_state->commandBuffer, dest_stage_mask, specified_queue_flags,
8120                                                      function, "dstStageMask", error_code);
8121         }
8122     }
8123     return skip;
8124 }
8125
8126 VKAPI_ATTR void VKAPI_CALL CmdWaitEvents(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent *pEvents,
8127                                          VkPipelineStageFlags sourceStageMask, VkPipelineStageFlags dstStageMask,
8128                                          uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
8129                                          uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers,
8130                                          uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) {
8131     bool skip = false;
8132     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
8133     unique_lock_t lock(global_lock);
8134     GLOBAL_CB_NODE *cb_state = GetCBNode(dev_data, commandBuffer);
8135     if (cb_state) {
8136         auto barrier_op_type = ComputeBarrierOperationsType(dev_data, cb_state, bufferMemoryBarrierCount, pBufferMemoryBarriers,
8137                                                             imageMemoryBarrierCount, pImageMemoryBarriers);
8138         skip |= ValidateStageMasksAgainstQueueCapabilities(dev_data, cb_state, sourceStageMask, dstStageMask, barrier_op_type,
8139                                                            "vkCmdWaitEvents", VALIDATION_ERROR_1e600918);
8140         skip |= ValidateStageMaskGsTsEnables(dev_data, sourceStageMask, "vkCmdWaitEvents()", VALIDATION_ERROR_1e60090e,
8141                                              VALIDATION_ERROR_1e600912);
8142         skip |= ValidateStageMaskGsTsEnables(dev_data, dstStageMask, "vkCmdWaitEvents()", VALIDATION_ERROR_1e600910,
8143                                              VALIDATION_ERROR_1e600914);
8144         skip |= ValidateCmdQueueFlags(dev_data, cb_state, "vkCmdWaitEvents()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
8145                                       VALIDATION_ERROR_1e602415);
8146         skip |= ValidateCmd(dev_data, cb_state, CMD_WAITEVENTS, "vkCmdWaitEvents()");
8147         skip |= ValidateBarriersToImages(dev_data, cb_state, imageMemoryBarrierCount, pImageMemoryBarriers, "vkCmdWaitEvents()");
8148         skip |= ValidateBarriers(dev_data, "vkCmdWaitEvents()", cb_state, sourceStageMask, dstStageMask, memoryBarrierCount,
8149                                  pMemoryBarriers, bufferMemoryBarrierCount, pBufferMemoryBarriers, imageMemoryBarrierCount,
8150                                  pImageMemoryBarriers);
8151         if (!skip) {
8152             auto first_event_index = cb_state->events.size();
8153             for (uint32_t i = 0; i < eventCount; ++i) {
8154                 auto event_state = GetEventNode(dev_data, pEvents[i]);
8155                 if (event_state) {
8156                     addCommandBufferBinding(&event_state->cb_bindings, {HandleToUint64(pEvents[i]), kVulkanObjectTypeEvent},
8157                                             cb_state);
8158                     event_state->cb_bindings.insert(cb_state);
8159                 }
8160                 cb_state->waitedEvents.insert(pEvents[i]);
8161                 cb_state->events.push_back(pEvents[i]);
8162             }
8163             cb_state->eventUpdates.emplace_back(
8164                 [=](VkQueue q) { return validateEventStageMask(q, cb_state, eventCount, first_event_index, sourceStageMask); });
8165             TransitionImageLayouts(dev_data, cb_state, imageMemoryBarrierCount, pImageMemoryBarriers);
8166         }
8167     }
8168     lock.unlock();
8169     if (!skip)
8170         dev_data->dispatch_table.CmdWaitEvents(commandBuffer, eventCount, pEvents, sourceStageMask, dstStageMask,
8171                                                memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount, pBufferMemoryBarriers,
8172                                                imageMemoryBarrierCount, pImageMemoryBarriers);
8173 }
8174
8175 static bool PreCallValidateCmdPipelineBarrier(layer_data *device_data, GLOBAL_CB_NODE *cb_state, VkPipelineStageFlags srcStageMask,
8176                                               VkPipelineStageFlags dstStageMask, VkDependencyFlags dependencyFlags,
8177                                               uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
8178                                               uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers,
8179                                               uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) {
8180     bool skip = false;
8181     auto barrier_op_type = ComputeBarrierOperationsType(device_data, cb_state, bufferMemoryBarrierCount, pBufferMemoryBarriers,
8182                                                         imageMemoryBarrierCount, pImageMemoryBarriers);
8183     skip |= ValidateStageMasksAgainstQueueCapabilities(device_data, cb_state, srcStageMask, dstStageMask, barrier_op_type,
8184                                                        "vkCmdPipelineBarrier", VALIDATION_ERROR_1b80093e);
8185     skip |= ValidateCmdQueueFlags(device_data, cb_state, "vkCmdPipelineBarrier()",
8186                                   VK_QUEUE_TRANSFER_BIT | VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT, VALIDATION_ERROR_1b802415);
8187     skip |= ValidateCmd(device_data, cb_state, CMD_PIPELINEBARRIER, "vkCmdPipelineBarrier()");
8188     skip |= ValidateStageMaskGsTsEnables(device_data, srcStageMask, "vkCmdPipelineBarrier()", VALIDATION_ERROR_1b800920,
8189                                          VALIDATION_ERROR_1b800924);
8190     skip |= ValidateStageMaskGsTsEnables(device_data, dstStageMask, "vkCmdPipelineBarrier()", VALIDATION_ERROR_1b800922,
8191                                          VALIDATION_ERROR_1b800926);
8192     if (cb_state->activeRenderPass) {
8193         skip |= ValidateRenderPassPipelineBarriers(device_data, "vkCmdPipelineBarrier()", cb_state, srcStageMask, dstStageMask,
8194                                                    dependencyFlags, memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
8195                                                    pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
8196         if (skip) return true;  // Early return to avoid redundant errors from below calls
8197     }
8198     skip |=
8199         ValidateBarriersToImages(device_data, cb_state, imageMemoryBarrierCount, pImageMemoryBarriers, "vkCmdPipelineBarrier()");
8200     skip |= ValidateBarriers(device_data, "vkCmdPipelineBarrier()", cb_state, srcStageMask, dstStageMask, memoryBarrierCount,
8201                              pMemoryBarriers, bufferMemoryBarrierCount, pBufferMemoryBarriers, imageMemoryBarrierCount,
8202                              pImageMemoryBarriers);
8203     return skip;
8204 }
8205
8206 static void PreCallRecordCmdPipelineBarrier(layer_data *device_data, GLOBAL_CB_NODE *cb_state, VkCommandBuffer commandBuffer,
8207                                             uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) {
8208     TransitionImageLayouts(device_data, cb_state, imageMemoryBarrierCount, pImageMemoryBarriers);
8209 }
8210
8211 VKAPI_ATTR void VKAPI_CALL CmdPipelineBarrier(VkCommandBuffer commandBuffer, VkPipelineStageFlags srcStageMask,
8212                                               VkPipelineStageFlags dstStageMask, VkDependencyFlags dependencyFlags,
8213                                               uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
8214                                               uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers,
8215                                               uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) {
8216     bool skip = false;
8217     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
8218     unique_lock_t lock(global_lock);
8219     GLOBAL_CB_NODE *cb_state = GetCBNode(device_data, commandBuffer);
8220     if (cb_state) {
8221         skip |= PreCallValidateCmdPipelineBarrier(device_data, cb_state, srcStageMask, dstStageMask, dependencyFlags,
8222                                                   memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
8223                                                   pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
8224         if (!skip) {
8225             PreCallRecordCmdPipelineBarrier(device_data, cb_state, commandBuffer, imageMemoryBarrierCount, pImageMemoryBarriers);
8226         }
8227     } else {
8228         assert(0);
8229     }
8230     lock.unlock();
8231     if (!skip) {
8232         device_data->dispatch_table.CmdPipelineBarrier(commandBuffer, srcStageMask, dstStageMask, dependencyFlags,
8233                                                        memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
8234                                                        pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
8235     }
8236 }
8237
8238 static bool setQueryState(VkQueue queue, VkCommandBuffer commandBuffer, QueryObject object, bool value) {
8239     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
8240     GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
8241     if (pCB) {
8242         pCB->queryToStateMap[object] = value;
8243     }
8244     auto queue_data = dev_data->queueMap.find(queue);
8245     if (queue_data != dev_data->queueMap.end()) {
8246         queue_data->second.queryToStateMap[object] = value;
8247     }
8248     return false;
8249 }
8250
8251 VKAPI_ATTR void VKAPI_CALL CmdBeginQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot, VkFlags flags) {
8252     bool skip = false;
8253     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
8254     unique_lock_t lock(global_lock);
8255     GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
8256     if (pCB) {
8257         skip |= ValidateCmdQueueFlags(dev_data, pCB, "vkCmdBeginQuery()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
8258                                       VALIDATION_ERROR_17802415);
8259         skip |= ValidateCmd(dev_data, pCB, CMD_BEGINQUERY, "vkCmdBeginQuery()");
8260     }
8261     lock.unlock();
8262
8263     if (skip) return;
8264
8265     dev_data->dispatch_table.CmdBeginQuery(commandBuffer, queryPool, slot, flags);
8266
8267     lock.lock();
8268     if (pCB) {
8269         QueryObject query = {queryPool, slot};
8270         pCB->activeQueries.insert(query);
8271         pCB->startedQueries.insert(query);
8272         addCommandBufferBinding(&GetQueryPoolNode(dev_data, queryPool)->cb_bindings,
8273                                 {HandleToUint64(queryPool), kVulkanObjectTypeQueryPool}, pCB);
8274     }
8275 }
8276
8277 VKAPI_ATTR void VKAPI_CALL CmdEndQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot) {
8278     bool skip = false;
8279     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
8280     unique_lock_t lock(global_lock);
8281     QueryObject query = {queryPool, slot};
8282     GLOBAL_CB_NODE *cb_state = GetCBNode(dev_data, commandBuffer);
8283     if (cb_state) {
8284         if (!cb_state->activeQueries.count(query)) {
8285             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
8286                             HandleToUint64(commandBuffer), VALIDATION_ERROR_1ae00f06,
8287                             "Ending a query before it was started: queryPool 0x%" PRIx64 ", index %d.", HandleToUint64(queryPool),
8288                             slot);
8289         }
8290         skip |= ValidateCmdQueueFlags(dev_data, cb_state, "VkCmdEndQuery()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
8291                                       VALIDATION_ERROR_1ae02415);
8292         skip |= ValidateCmd(dev_data, cb_state, CMD_ENDQUERY, "VkCmdEndQuery()");
8293     }
8294     lock.unlock();
8295
8296     if (skip) return;
8297
8298     dev_data->dispatch_table.CmdEndQuery(commandBuffer, queryPool, slot);
8299
8300     lock.lock();
8301     if (cb_state) {
8302         cb_state->activeQueries.erase(query);
8303         cb_state->queryUpdates.emplace_back([=](VkQueue q) { return setQueryState(q, commandBuffer, query, true); });
8304         addCommandBufferBinding(&GetQueryPoolNode(dev_data, queryPool)->cb_bindings,
8305                                 {HandleToUint64(queryPool), kVulkanObjectTypeQueryPool}, cb_state);
8306     }
8307 }
8308
8309 VKAPI_ATTR void VKAPI_CALL CmdResetQueryPool(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery,
8310                                              uint32_t queryCount) {
8311     bool skip = false;
8312     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
8313     unique_lock_t lock(global_lock);
8314     GLOBAL_CB_NODE *cb_state = GetCBNode(dev_data, commandBuffer);
8315     skip |= insideRenderPass(dev_data, cb_state, "vkCmdResetQueryPool()", VALIDATION_ERROR_1c600017);
8316     skip |= ValidateCmd(dev_data, cb_state, CMD_RESETQUERYPOOL, "VkCmdResetQueryPool()");
8317     skip |= ValidateCmdQueueFlags(dev_data, cb_state, "VkCmdResetQueryPool()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
8318                                   VALIDATION_ERROR_1c602415);
8319     lock.unlock();
8320
8321     if (skip) return;
8322
8323     dev_data->dispatch_table.CmdResetQueryPool(commandBuffer, queryPool, firstQuery, queryCount);
8324
8325     lock.lock();
8326     for (uint32_t i = 0; i < queryCount; i++) {
8327         QueryObject query = {queryPool, firstQuery + i};
8328         cb_state->waitedEventsBeforeQueryReset[query] = cb_state->waitedEvents;
8329         cb_state->queryUpdates.emplace_back([=](VkQueue q) { return setQueryState(q, commandBuffer, query, false); });
8330     }
8331     addCommandBufferBinding(&GetQueryPoolNode(dev_data, queryPool)->cb_bindings,
8332                             {HandleToUint64(queryPool), kVulkanObjectTypeQueryPool}, cb_state);
8333 }
8334
8335 static bool IsQueryInvalid(layer_data *dev_data, QUEUE_STATE *queue_data, VkQueryPool queryPool, uint32_t queryIndex) {
8336     QueryObject query = {queryPool, queryIndex};
8337     auto query_data = queue_data->queryToStateMap.find(query);
8338     if (query_data != queue_data->queryToStateMap.end()) {
8339         if (!query_data->second) return true;
8340     } else {
8341         auto it = dev_data->queryToStateMap.find(query);
8342         if (it == dev_data->queryToStateMap.end() || !it->second) return true;
8343     }
8344
8345     return false;
8346 }
8347
8348 static bool validateQuery(VkQueue queue, GLOBAL_CB_NODE *pCB, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount) {
8349     bool skip = false;
8350     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(pCB->commandBuffer), layer_data_map);
8351     auto queue_data = GetQueueState(dev_data, queue);
8352     if (!queue_data) return false;
8353     for (uint32_t i = 0; i < queryCount; i++) {
8354         if (IsQueryInvalid(dev_data, queue_data, queryPool, firstQuery + i)) {
8355             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
8356                             HandleToUint64(pCB->commandBuffer), DRAWSTATE_INVALID_QUERY,
8357                             "Requesting a copy from query to buffer with invalid query: queryPool 0x%" PRIx64 ", index %d",
8358                             HandleToUint64(queryPool), firstQuery + i);
8359         }
8360     }
8361     return skip;
8362 }
8363
8364 VKAPI_ATTR void VKAPI_CALL CmdCopyQueryPoolResults(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery,
8365                                                    uint32_t queryCount, VkBuffer dstBuffer, VkDeviceSize dstOffset,
8366                                                    VkDeviceSize stride, VkQueryResultFlags flags) {
8367     bool skip = false;
8368     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
8369     unique_lock_t lock(global_lock);
8370
8371     auto cb_node = GetCBNode(dev_data, commandBuffer);
8372     auto dst_buff_state = GetBufferState(dev_data, dstBuffer);
8373     if (cb_node && dst_buff_state) {
8374         skip |= ValidateMemoryIsBoundToBuffer(dev_data, dst_buff_state, "vkCmdCopyQueryPoolResults()", VALIDATION_ERROR_19400674);
8375         // Validate that DST buffer has correct usage flags set
8376         skip |=
8377             ValidateBufferUsageFlags(dev_data, dst_buff_state, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true, VALIDATION_ERROR_19400672,
8378                                      "vkCmdCopyQueryPoolResults()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
8379         skip |= ValidateCmdQueueFlags(dev_data, cb_node, "vkCmdCopyQueryPoolResults()",
8380                                       VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT, VALIDATION_ERROR_19402415);
8381         skip |= ValidateCmd(dev_data, cb_node, CMD_COPYQUERYPOOLRESULTS, "vkCmdCopyQueryPoolResults()");
8382         skip |= insideRenderPass(dev_data, cb_node, "vkCmdCopyQueryPoolResults()", VALIDATION_ERROR_19400017);
8383     }
8384     lock.unlock();
8385
8386     if (skip) return;
8387
8388     dev_data->dispatch_table.CmdCopyQueryPoolResults(commandBuffer, queryPool, firstQuery, queryCount, dstBuffer, dstOffset, stride,
8389                                                      flags);
8390
8391     lock.lock();
8392     if (cb_node && dst_buff_state) {
8393         AddCommandBufferBindingBuffer(dev_data, cb_node, dst_buff_state);
8394         cb_node->queryUpdates.emplace_back([=](VkQueue q) { return validateQuery(q, cb_node, queryPool, firstQuery, queryCount); });
8395         addCommandBufferBinding(&GetQueryPoolNode(dev_data, queryPool)->cb_bindings,
8396                                 {HandleToUint64(queryPool), kVulkanObjectTypeQueryPool}, cb_node);
8397     }
8398 }
8399
8400 VKAPI_ATTR void VKAPI_CALL CmdPushConstants(VkCommandBuffer commandBuffer, VkPipelineLayout layout, VkShaderStageFlags stageFlags,
8401                                             uint32_t offset, uint32_t size, const void *pValues) {
8402     bool skip = false;
8403     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
8404     unique_lock_t lock(global_lock);
8405     GLOBAL_CB_NODE *cb_state = GetCBNode(dev_data, commandBuffer);
8406     if (cb_state) {
8407         skip |= ValidateCmdQueueFlags(dev_data, cb_state, "vkCmdPushConstants()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
8408                                       VALIDATION_ERROR_1bc02415);
8409         skip |= ValidateCmd(dev_data, cb_state, CMD_PUSHCONSTANTS, "vkCmdPushConstants()");
8410     }
8411     skip |= validatePushConstantRange(dev_data, offset, size, "vkCmdPushConstants()");
8412     if (0 == stageFlags) {
8413         skip |=
8414             log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
8415                     HandleToUint64(commandBuffer), VALIDATION_ERROR_1bc2dc03, "vkCmdPushConstants() call has no stageFlags set.");
8416     }
8417
8418     // Check if pipeline_layout VkPushConstantRange(s) overlapping offset, size have stageFlags set for each stage in the command
8419     // stageFlags argument, *and* that the command stageFlags argument has bits set for the stageFlags in each overlapping range.
8420     if (!skip) {
8421         const auto &ranges = *getPipelineLayout(dev_data, layout)->push_constant_ranges;
8422         VkShaderStageFlags found_stages = 0;
8423         for (const auto &range : ranges) {
8424             if ((offset >= range.offset) && (offset + size <= range.offset + range.size)) {
8425                 VkShaderStageFlags matching_stages = range.stageFlags & stageFlags;
8426                 if (matching_stages != range.stageFlags) {
8427                     // VALIDATION_ERROR_1bc00e08 VUID-vkCmdPushConstants-offset-01796
8428                     skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
8429                                     VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(commandBuffer),
8430                                     VALIDATION_ERROR_1bc00e08,
8431                                     "vkCmdPushConstants(): stageFlags (0x%" PRIx32 ", offset (%" PRIu32 "), and size (%" PRIu32
8432                                     "),  "
8433                                     "must contain all stages in overlapping VkPushConstantRange stageFlags (0x%" PRIx32
8434                                     "), offset (%" PRIu32 "), and size (%" PRIu32 ") in pipeline layout 0x%" PRIx64 ".",
8435                                     (uint32_t)stageFlags, offset, size, (uint32_t)range.stageFlags, range.offset, range.size,
8436                                     HandleToUint64(layout));
8437                 }
8438
8439                 // Accumulate all stages we've found
8440                 found_stages = matching_stages | found_stages;
8441             }
8442         }
8443         if (found_stages != stageFlags) {
8444             // VALIDATION_ERROR_1bc00e06 VUID-vkCmdPushConstants-offset-01795
8445             uint32_t missing_stages = ~found_stages & stageFlags;
8446             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
8447                             HandleToUint64(commandBuffer), VALIDATION_ERROR_1bc00e06,
8448                             "vkCmdPushConstants(): stageFlags = 0x%" PRIx32 ", VkPushConstantRange in pipeline layout 0x%" PRIx64
8449                             " overlapping offset = %d and size = %d, do not contain stageFlags 0x%" PRIx32 ".",
8450                             (uint32_t)stageFlags, HandleToUint64(layout), offset, size, missing_stages);
8451         }
8452     }
8453     lock.unlock();
8454     if (!skip) dev_data->dispatch_table.CmdPushConstants(commandBuffer, layout, stageFlags, offset, size, pValues);
8455 }
8456
8457 VKAPI_ATTR void VKAPI_CALL CmdWriteTimestamp(VkCommandBuffer commandBuffer, VkPipelineStageFlagBits pipelineStage,
8458                                              VkQueryPool queryPool, uint32_t slot) {
8459     bool skip = false;
8460     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
8461     unique_lock_t lock(global_lock);
8462     GLOBAL_CB_NODE *cb_state = GetCBNode(dev_data, commandBuffer);
8463     if (cb_state) {
8464         skip |=
8465             ValidateCmdQueueFlags(dev_data, cb_state, "vkCmdWriteTimestamp()",
8466                                   VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT | VK_QUEUE_TRANSFER_BIT, VALIDATION_ERROR_1e802415);
8467         skip |= ValidateCmd(dev_data, cb_state, CMD_WRITETIMESTAMP, "vkCmdWriteTimestamp()");
8468     }
8469     lock.unlock();
8470
8471     if (skip) return;
8472
8473     dev_data->dispatch_table.CmdWriteTimestamp(commandBuffer, pipelineStage, queryPool, slot);
8474
8475     lock.lock();
8476     if (cb_state) {
8477         QueryObject query = {queryPool, slot};
8478         cb_state->queryUpdates.emplace_back([=](VkQueue q) { return setQueryState(q, commandBuffer, query, true); });
8479     }
8480 }
8481
8482 static bool MatchUsage(layer_data *dev_data, uint32_t count, const VkAttachmentReference *attachments,
8483                        const VkFramebufferCreateInfo *fbci, VkImageUsageFlagBits usage_flag,
8484                        UNIQUE_VALIDATION_ERROR_CODE error_code) {
8485     bool skip = false;
8486
8487     for (uint32_t attach = 0; attach < count; attach++) {
8488         if (attachments[attach].attachment != VK_ATTACHMENT_UNUSED) {
8489             // Attachment counts are verified elsewhere, but prevent an invalid access
8490             if (attachments[attach].attachment < fbci->attachmentCount) {
8491                 const VkImageView *image_view = &fbci->pAttachments[attachments[attach].attachment];
8492                 auto view_state = GetImageViewState(dev_data, *image_view);
8493                 if (view_state) {
8494                     const VkImageCreateInfo *ici = &GetImageState(dev_data, view_state->create_info.image)->createInfo;
8495                     if (ici != nullptr) {
8496                         if ((ici->usage & usage_flag) == 0) {
8497                             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
8498                                             VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, error_code,
8499                                             "vkCreateFramebuffer:  Framebuffer Attachment (%d) conflicts with the image's "
8500                                             "IMAGE_USAGE flags (%s).",
8501                                             attachments[attach].attachment, string_VkImageUsageFlagBits(usage_flag));
8502                         }
8503                     }
8504                 }
8505             }
8506         }
8507     }
8508     return skip;
8509 }
8510
8511 // Validate VkFramebufferCreateInfo which includes:
8512 // 1. attachmentCount equals renderPass attachmentCount
8513 // 2. corresponding framebuffer and renderpass attachments have matching formats
8514 // 3. corresponding framebuffer and renderpass attachments have matching sample counts
8515 // 4. fb attachments only have a single mip level
8516 // 5. fb attachment dimensions are each at least as large as the fb
8517 // 6. fb attachments use idenity swizzle
8518 // 7. fb attachments used by renderPass for color/input/ds have correct usage bit set
8519 // 8. fb dimensions are within physical device limits
8520 static bool ValidateFramebufferCreateInfo(layer_data *dev_data, const VkFramebufferCreateInfo *pCreateInfo) {
8521     bool skip = false;
8522
8523     auto rp_state = GetRenderPassState(dev_data, pCreateInfo->renderPass);
8524     if (rp_state) {
8525         const VkRenderPassCreateInfo *rpci = rp_state->createInfo.ptr();
8526         if (rpci->attachmentCount != pCreateInfo->attachmentCount) {
8527             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
8528                             HandleToUint64(pCreateInfo->renderPass), VALIDATION_ERROR_094006d8,
8529                             "vkCreateFramebuffer(): VkFramebufferCreateInfo attachmentCount of %u does not match attachmentCount "
8530                             "of %u of renderPass (0x%" PRIx64 ") being used to create Framebuffer.",
8531                             pCreateInfo->attachmentCount, rpci->attachmentCount, HandleToUint64(pCreateInfo->renderPass));
8532         } else {
8533             // attachmentCounts match, so make sure corresponding attachment details line up
8534             const VkImageView *image_views = pCreateInfo->pAttachments;
8535             for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
8536                 auto view_state = GetImageViewState(dev_data, image_views[i]);
8537                 auto &ivci = view_state->create_info;
8538                 if (ivci.format != rpci->pAttachments[i].format) {
8539                     skip |=
8540                         log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
8541                                 HandleToUint64(pCreateInfo->renderPass), VALIDATION_ERROR_094006e0,
8542                                 "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has format of %s that does not "
8543                                 "match the format of %s used by the corresponding attachment for renderPass (0x%" PRIx64 ").",
8544                                 i, string_VkFormat(ivci.format), string_VkFormat(rpci->pAttachments[i].format),
8545                                 HandleToUint64(pCreateInfo->renderPass));
8546                 }
8547                 const VkImageCreateInfo *ici = &GetImageState(dev_data, ivci.image)->createInfo;
8548                 if (ici->samples != rpci->pAttachments[i].samples) {
8549                     skip |= log_msg(
8550                         dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
8551                         HandleToUint64(pCreateInfo->renderPass), VALIDATION_ERROR_094006e2,
8552                         "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has %s samples that do not match the %s "
8553                         "samples used by the corresponding attachment for renderPass (0x%" PRIx64 ").",
8554                         i, string_VkSampleCountFlagBits(ici->samples), string_VkSampleCountFlagBits(rpci->pAttachments[i].samples),
8555                         HandleToUint64(pCreateInfo->renderPass));
8556                 }
8557                 // Verify that view only has a single mip level
8558                 if (ivci.subresourceRange.levelCount != 1) {
8559                     skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT,
8560                                     0, VALIDATION_ERROR_094006e6,
8561                                     "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has mip levelCount of %u but "
8562                                     "only a single mip level (levelCount ==  1) is allowed when creating a Framebuffer.",
8563                                     i, ivci.subresourceRange.levelCount);
8564                 }
8565                 const uint32_t mip_level = ivci.subresourceRange.baseMipLevel;
8566                 uint32_t mip_width = max(1u, ici->extent.width >> mip_level);
8567                 uint32_t mip_height = max(1u, ici->extent.height >> mip_level);
8568                 if ((ivci.subresourceRange.layerCount < pCreateInfo->layers) || (mip_width < pCreateInfo->width) ||
8569                     (mip_height < pCreateInfo->height)) {
8570                     skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT,
8571                                     0, VALIDATION_ERROR_094006e4,
8572                                     "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u mip level %u has dimensions "
8573                                     "smaller than the corresponding framebuffer dimensions. Here are the respective dimensions for "
8574                                     "attachment #%u, framebuffer:\n"
8575                                     "width: %u, %u\n"
8576                                     "height: %u, %u\n"
8577                                     "layerCount: %u, %u\n",
8578                                     i, ivci.subresourceRange.baseMipLevel, i, mip_width, pCreateInfo->width, mip_height,
8579                                     pCreateInfo->height, ivci.subresourceRange.layerCount, pCreateInfo->layers);
8580                 }
8581                 if (((ivci.components.r != VK_COMPONENT_SWIZZLE_IDENTITY) && (ivci.components.r != VK_COMPONENT_SWIZZLE_R)) ||
8582                     ((ivci.components.g != VK_COMPONENT_SWIZZLE_IDENTITY) && (ivci.components.g != VK_COMPONENT_SWIZZLE_G)) ||
8583                     ((ivci.components.b != VK_COMPONENT_SWIZZLE_IDENTITY) && (ivci.components.b != VK_COMPONENT_SWIZZLE_B)) ||
8584                     ((ivci.components.a != VK_COMPONENT_SWIZZLE_IDENTITY) && (ivci.components.a != VK_COMPONENT_SWIZZLE_A))) {
8585                     skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT,
8586                                     0, VALIDATION_ERROR_094006e8,
8587                                     "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has non-identy swizzle. All "
8588                                     "framebuffer attachments must have been created with the identity swizzle. Here are the actual "
8589                                     "swizzle values:\n"
8590                                     "r swizzle = %s\n"
8591                                     "g swizzle = %s\n"
8592                                     "b swizzle = %s\n"
8593                                     "a swizzle = %s\n",
8594                                     i, string_VkComponentSwizzle(ivci.components.r), string_VkComponentSwizzle(ivci.components.g),
8595                                     string_VkComponentSwizzle(ivci.components.b), string_VkComponentSwizzle(ivci.components.a));
8596                 }
8597             }
8598         }
8599         // Verify correct attachment usage flags
8600         for (uint32_t subpass = 0; subpass < rpci->subpassCount; subpass++) {
8601             // Verify input attachments:
8602             skip |=
8603                 MatchUsage(dev_data, rpci->pSubpasses[subpass].inputAttachmentCount, rpci->pSubpasses[subpass].pInputAttachments,
8604                            pCreateInfo, VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT, VALIDATION_ERROR_094006de);
8605             // Verify color attachments:
8606             skip |=
8607                 MatchUsage(dev_data, rpci->pSubpasses[subpass].colorAttachmentCount, rpci->pSubpasses[subpass].pColorAttachments,
8608                            pCreateInfo, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VALIDATION_ERROR_094006da);
8609             // Verify depth/stencil attachments:
8610             if (rpci->pSubpasses[subpass].pDepthStencilAttachment != nullptr) {
8611                 skip |= MatchUsage(dev_data, 1, rpci->pSubpasses[subpass].pDepthStencilAttachment, pCreateInfo,
8612                                    VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT, VALIDATION_ERROR_094006dc);
8613             }
8614         }
8615     }
8616     // Verify FB dimensions are within physical device limits
8617     if (pCreateInfo->width > dev_data->phys_dev_properties.properties.limits.maxFramebufferWidth) {
8618         skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
8619                         VALIDATION_ERROR_094006ec,
8620                         "vkCreateFramebuffer(): Requested VkFramebufferCreateInfo width exceeds physical device limits. Requested "
8621                         "width: %u, device max: %u\n",
8622                         pCreateInfo->width, dev_data->phys_dev_properties.properties.limits.maxFramebufferWidth);
8623     }
8624     if (pCreateInfo->height > dev_data->phys_dev_properties.properties.limits.maxFramebufferHeight) {
8625         skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
8626                         VALIDATION_ERROR_094006f0,
8627                         "vkCreateFramebuffer(): Requested VkFramebufferCreateInfo height exceeds physical device limits. Requested "
8628                         "height: %u, device max: %u\n",
8629                         pCreateInfo->height, dev_data->phys_dev_properties.properties.limits.maxFramebufferHeight);
8630     }
8631     if (pCreateInfo->layers > dev_data->phys_dev_properties.properties.limits.maxFramebufferLayers) {
8632         skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
8633                         VALIDATION_ERROR_094006f4,
8634                         "vkCreateFramebuffer(): Requested VkFramebufferCreateInfo layers exceeds physical device limits. Requested "
8635                         "layers: %u, device max: %u\n",
8636                         pCreateInfo->layers, dev_data->phys_dev_properties.properties.limits.maxFramebufferLayers);
8637     }
8638     // Verify FB dimensions are greater than zero
8639     if (pCreateInfo->width <= 0) {
8640         skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
8641                         VALIDATION_ERROR_094006ea,
8642                         "vkCreateFramebuffer(): Requested VkFramebufferCreateInfo width must be greater than zero.");
8643     }
8644     if (pCreateInfo->height <= 0) {
8645         skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
8646                         VALIDATION_ERROR_094006ee,
8647                         "vkCreateFramebuffer(): Requested VkFramebufferCreateInfo height must be greater than zero.");
8648     }
8649     if (pCreateInfo->layers <= 0) {
8650         skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
8651                         VALIDATION_ERROR_094006f2,
8652                         "vkCreateFramebuffer(): Requested VkFramebufferCreateInfo layers must be greater than zero.");
8653     }
8654     return skip;
8655 }
8656
8657 // Validate VkFramebufferCreateInfo state prior to calling down chain to create Framebuffer object
8658 //  Return true if an error is encountered and callback returns true to skip call down chain
8659 //   false indicates that call down chain should proceed
8660 static bool PreCallValidateCreateFramebuffer(layer_data *dev_data, const VkFramebufferCreateInfo *pCreateInfo) {
8661     // TODO : Verify that renderPass FB is created with is compatible with FB
8662     bool skip = false;
8663     skip |= ValidateFramebufferCreateInfo(dev_data, pCreateInfo);
8664     return skip;
8665 }
8666
8667 // CreateFramebuffer state has been validated and call down chain completed so record new framebuffer object
8668 static void PostCallRecordCreateFramebuffer(layer_data *dev_data, const VkFramebufferCreateInfo *pCreateInfo, VkFramebuffer fb) {
8669     // Shadow create info and store in map
8670     std::unique_ptr<FRAMEBUFFER_STATE> fb_state(
8671         new FRAMEBUFFER_STATE(fb, pCreateInfo, GetRenderPassStateSharedPtr(dev_data, pCreateInfo->renderPass)));
8672
8673     for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
8674         VkImageView view = pCreateInfo->pAttachments[i];
8675         auto view_state = GetImageViewState(dev_data, view);
8676         if (!view_state) {
8677             continue;
8678         }
8679         MT_FB_ATTACHMENT_INFO fb_info;
8680         fb_info.view_state = view_state;
8681         fb_info.image = view_state->create_info.image;
8682         fb_state->attachments.push_back(fb_info);
8683     }
8684     dev_data->frameBufferMap[fb] = std::move(fb_state);
8685 }
8686
8687 VKAPI_ATTR VkResult VKAPI_CALL CreateFramebuffer(VkDevice device, const VkFramebufferCreateInfo *pCreateInfo,
8688                                                  const VkAllocationCallbacks *pAllocator, VkFramebuffer *pFramebuffer) {
8689     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
8690     unique_lock_t lock(global_lock);
8691     bool skip = PreCallValidateCreateFramebuffer(dev_data, pCreateInfo);
8692     lock.unlock();
8693
8694     if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
8695
8696     VkResult result = dev_data->dispatch_table.CreateFramebuffer(device, pCreateInfo, pAllocator, pFramebuffer);
8697
8698     if (VK_SUCCESS == result) {
8699         lock.lock();
8700         PostCallRecordCreateFramebuffer(dev_data, pCreateInfo, *pFramebuffer);
8701         lock.unlock();
8702     }
8703     return result;
8704 }
8705
8706 static bool FindDependency(const uint32_t index, const uint32_t dependent, const std::vector<DAGNode> &subpass_to_node,
8707                            std::unordered_set<uint32_t> &processed_nodes) {
8708     // If we have already checked this node we have not found a dependency path so return false.
8709     if (processed_nodes.count(index)) return false;
8710     processed_nodes.insert(index);
8711     const DAGNode &node = subpass_to_node[index];
8712     // Look for a dependency path. If one exists return true else recurse on the previous nodes.
8713     if (std::find(node.prev.begin(), node.prev.end(), dependent) == node.prev.end()) {
8714         for (auto elem : node.prev) {
8715             if (FindDependency(elem, dependent, subpass_to_node, processed_nodes)) return true;
8716         }
8717     } else {
8718         return true;
8719     }
8720     return false;
8721 }
8722
8723 static bool CheckDependencyExists(const layer_data *dev_data, const uint32_t subpass,
8724                                   const std::vector<uint32_t> &dependent_subpasses, const std::vector<DAGNode> &subpass_to_node,
8725                                   bool &skip) {
8726     bool result = true;
8727     // Loop through all subpasses that share the same attachment and make sure a dependency exists
8728     for (uint32_t k = 0; k < dependent_subpasses.size(); ++k) {
8729         if (static_cast<uint32_t>(subpass) == dependent_subpasses[k]) continue;
8730         const DAGNode &node = subpass_to_node[subpass];
8731         // Check for a specified dependency between the two nodes. If one exists we are done.
8732         auto prev_elem = std::find(node.prev.begin(), node.prev.end(), dependent_subpasses[k]);
8733         auto next_elem = std::find(node.next.begin(), node.next.end(), dependent_subpasses[k]);
8734         if (prev_elem == node.prev.end() && next_elem == node.next.end()) {
8735             // If no dependency exits an implicit dependency still might. If not, throw an error.
8736             std::unordered_set<uint32_t> processed_nodes;
8737             if (!(FindDependency(subpass, dependent_subpasses[k], subpass_to_node, processed_nodes) ||
8738                   FindDependency(dependent_subpasses[k], subpass, subpass_to_node, processed_nodes))) {
8739                 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
8740                                 DRAWSTATE_INVALID_RENDERPASS,
8741                                 "A dependency between subpasses %d and %d must exist but one is not specified.", subpass,
8742                                 dependent_subpasses[k]);
8743                 result = false;
8744             }
8745         }
8746     }
8747     return result;
8748 }
8749
8750 static bool CheckPreserved(const layer_data *dev_data, const VkRenderPassCreateInfo *pCreateInfo, const int index,
8751                            const uint32_t attachment, const std::vector<DAGNode> &subpass_to_node, int depth, bool &skip) {
8752     const DAGNode &node = subpass_to_node[index];
8753     // If this node writes to the attachment return true as next nodes need to preserve the attachment.
8754     const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[index];
8755     for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
8756         if (attachment == subpass.pColorAttachments[j].attachment) return true;
8757     }
8758     for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
8759         if (attachment == subpass.pInputAttachments[j].attachment) return true;
8760     }
8761     if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
8762         if (attachment == subpass.pDepthStencilAttachment->attachment) return true;
8763     }
8764     bool result = false;
8765     // Loop through previous nodes and see if any of them write to the attachment.
8766     for (auto elem : node.prev) {
8767         result |= CheckPreserved(dev_data, pCreateInfo, elem, attachment, subpass_to_node, depth + 1, skip);
8768     }
8769     // If the attachment was written to by a previous node than this node needs to preserve it.
8770     if (result && depth > 0) {
8771         bool has_preserved = false;
8772         for (uint32_t j = 0; j < subpass.preserveAttachmentCount; ++j) {
8773             if (subpass.pPreserveAttachments[j] == attachment) {
8774                 has_preserved = true;
8775                 break;
8776             }
8777         }
8778         if (!has_preserved) {
8779             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
8780                             DRAWSTATE_INVALID_RENDERPASS,
8781                             "Attachment %d is used by a later subpass and must be preserved in subpass %d.", attachment, index);
8782         }
8783     }
8784     return result;
8785 }
8786
8787 template <class T>
8788 bool isRangeOverlapping(T offset1, T size1, T offset2, T size2) {
8789     return (((offset1 + size1) > offset2) && ((offset1 + size1) < (offset2 + size2))) ||
8790            ((offset1 > offset2) && (offset1 < (offset2 + size2)));
8791 }
8792
8793 bool isRegionOverlapping(VkImageSubresourceRange range1, VkImageSubresourceRange range2) {
8794     return (isRangeOverlapping(range1.baseMipLevel, range1.levelCount, range2.baseMipLevel, range2.levelCount) &&
8795             isRangeOverlapping(range1.baseArrayLayer, range1.layerCount, range2.baseArrayLayer, range2.layerCount));
8796 }
8797
8798 static bool ValidateDependencies(const layer_data *dev_data, FRAMEBUFFER_STATE const *framebuffer,
8799                                  RENDER_PASS_STATE const *renderPass) {
8800     bool skip = false;
8801     auto const pFramebufferInfo = framebuffer->createInfo.ptr();
8802     auto const pCreateInfo = renderPass->createInfo.ptr();
8803     auto const &subpass_to_node = renderPass->subpassToNode;
8804     std::vector<std::vector<uint32_t>> output_attachment_to_subpass(pCreateInfo->attachmentCount);
8805     std::vector<std::vector<uint32_t>> input_attachment_to_subpass(pCreateInfo->attachmentCount);
8806     std::vector<std::vector<uint32_t>> overlapping_attachments(pCreateInfo->attachmentCount);
8807     // Find overlapping attachments
8808     for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
8809         for (uint32_t j = i + 1; j < pCreateInfo->attachmentCount; ++j) {
8810             VkImageView viewi = pFramebufferInfo->pAttachments[i];
8811             VkImageView viewj = pFramebufferInfo->pAttachments[j];
8812             if (viewi == viewj) {
8813                 overlapping_attachments[i].push_back(j);
8814                 overlapping_attachments[j].push_back(i);
8815                 continue;
8816             }
8817             auto view_state_i = GetImageViewState(dev_data, viewi);
8818             auto view_state_j = GetImageViewState(dev_data, viewj);
8819             if (!view_state_i || !view_state_j) {
8820                 continue;
8821             }
8822             auto view_ci_i = view_state_i->create_info;
8823             auto view_ci_j = view_state_j->create_info;
8824             if (view_ci_i.image == view_ci_j.image && isRegionOverlapping(view_ci_i.subresourceRange, view_ci_j.subresourceRange)) {
8825                 overlapping_attachments[i].push_back(j);
8826                 overlapping_attachments[j].push_back(i);
8827                 continue;
8828             }
8829             auto image_data_i = GetImageState(dev_data, view_ci_i.image);
8830             auto image_data_j = GetImageState(dev_data, view_ci_j.image);
8831             if (!image_data_i || !image_data_j) {
8832                 continue;
8833             }
8834             if (image_data_i->binding.mem == image_data_j->binding.mem &&
8835                 isRangeOverlapping(image_data_i->binding.offset, image_data_i->binding.size, image_data_j->binding.offset,
8836                                    image_data_j->binding.size)) {
8837                 overlapping_attachments[i].push_back(j);
8838                 overlapping_attachments[j].push_back(i);
8839             }
8840         }
8841     }
8842     for (uint32_t i = 0; i < overlapping_attachments.size(); ++i) {
8843         uint32_t attachment = i;
8844         for (auto other_attachment : overlapping_attachments[i]) {
8845             if (!(pCreateInfo->pAttachments[attachment].flags & VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT)) {
8846                 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT,
8847                                 HandleToUint64(framebuffer->framebuffer), VALIDATION_ERROR_12200682,
8848                                 "Attachment %d aliases attachment %d but doesn't set VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT.",
8849                                 attachment, other_attachment);
8850             }
8851             if (!(pCreateInfo->pAttachments[other_attachment].flags & VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT)) {
8852                 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT,
8853                                 HandleToUint64(framebuffer->framebuffer), VALIDATION_ERROR_12200682,
8854                                 "Attachment %d aliases attachment %d but doesn't set VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT.",
8855                                 other_attachment, attachment);
8856             }
8857         }
8858     }
8859     // Find for each attachment the subpasses that use them.
8860     unordered_set<uint32_t> attachmentIndices;
8861     for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
8862         const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
8863         attachmentIndices.clear();
8864         for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
8865             uint32_t attachment = subpass.pInputAttachments[j].attachment;
8866             if (attachment == VK_ATTACHMENT_UNUSED) continue;
8867             input_attachment_to_subpass[attachment].push_back(i);
8868             for (auto overlapping_attachment : overlapping_attachments[attachment]) {
8869                 input_attachment_to_subpass[overlapping_attachment].push_back(i);
8870             }
8871         }
8872         for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
8873             uint32_t attachment = subpass.pColorAttachments[j].attachment;
8874             if (attachment == VK_ATTACHMENT_UNUSED) continue;
8875             output_attachment_to_subpass[attachment].push_back(i);
8876             for (auto overlapping_attachment : overlapping_attachments[attachment]) {
8877                 output_attachment_to_subpass[overlapping_attachment].push_back(i);
8878             }
8879             attachmentIndices.insert(attachment);
8880         }
8881         if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
8882             uint32_t attachment = subpass.pDepthStencilAttachment->attachment;
8883             output_attachment_to_subpass[attachment].push_back(i);
8884             for (auto overlapping_attachment : overlapping_attachments[attachment]) {
8885                 output_attachment_to_subpass[overlapping_attachment].push_back(i);
8886             }
8887
8888             if (attachmentIndices.count(attachment)) {
8889                 skip |=
8890                     log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
8891                             DRAWSTATE_INVALID_RENDERPASS,
8892                             "Cannot use same attachment (%u) as both color and depth output in same subpass (%u).", attachment, i);
8893             }
8894         }
8895     }
8896     // If there is a dependency needed make sure one exists
8897     for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
8898         const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
8899         // If the attachment is an input then all subpasses that output must have a dependency relationship
8900         for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
8901             uint32_t attachment = subpass.pInputAttachments[j].attachment;
8902             if (attachment == VK_ATTACHMENT_UNUSED) continue;
8903             CheckDependencyExists(dev_data, i, output_attachment_to_subpass[attachment], subpass_to_node, skip);
8904         }
8905         // If the attachment is an output then all subpasses that use the attachment must have a dependency relationship
8906         for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
8907             uint32_t attachment = subpass.pColorAttachments[j].attachment;
8908             if (attachment == VK_ATTACHMENT_UNUSED) continue;
8909             CheckDependencyExists(dev_data, i, output_attachment_to_subpass[attachment], subpass_to_node, skip);
8910             CheckDependencyExists(dev_data, i, input_attachment_to_subpass[attachment], subpass_to_node, skip);
8911         }
8912         if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
8913             const uint32_t &attachment = subpass.pDepthStencilAttachment->attachment;
8914             CheckDependencyExists(dev_data, i, output_attachment_to_subpass[attachment], subpass_to_node, skip);
8915             CheckDependencyExists(dev_data, i, input_attachment_to_subpass[attachment], subpass_to_node, skip);
8916         }
8917     }
8918     // Loop through implicit dependencies, if this pass reads make sure the attachment is preserved for all passes after it was
8919     // written.
8920     for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
8921         const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
8922         for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
8923             CheckPreserved(dev_data, pCreateInfo, i, subpass.pInputAttachments[j].attachment, subpass_to_node, 0, skip);
8924         }
8925     }
8926     return skip;
8927 }
8928
8929 static bool CreatePassDAG(const layer_data *dev_data, const VkRenderPassCreateInfo *pCreateInfo,
8930                           std::vector<DAGNode> &subpass_to_node, std::vector<bool> &has_self_dependency,
8931                           std::vector<int32_t> &subpass_to_dep_index) {
8932     bool skip = false;
8933     for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
8934         DAGNode &subpass_node = subpass_to_node[i];
8935         subpass_node.pass = i;
8936         subpass_to_dep_index[i] = -1;  // Default to no dependency and overwrite below as needed
8937     }
8938     for (uint32_t i = 0; i < pCreateInfo->dependencyCount; ++i) {
8939         const VkSubpassDependency &dependency = pCreateInfo->pDependencies[i];
8940         if (dependency.srcSubpass == VK_SUBPASS_EXTERNAL || dependency.dstSubpass == VK_SUBPASS_EXTERNAL) {
8941             if (dependency.srcSubpass == dependency.dstSubpass) {
8942                 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
8943                                 DRAWSTATE_INVALID_RENDERPASS, "The src and dest subpasses cannot both be external.");
8944             }
8945         } else if (dependency.srcSubpass > dependency.dstSubpass) {
8946             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
8947                             DRAWSTATE_INVALID_RENDERPASS,
8948                             "Dependency graph must be specified such that an earlier pass cannot depend on a later pass.");
8949         } else if (dependency.srcSubpass == dependency.dstSubpass) {
8950             has_self_dependency[dependency.srcSubpass] = true;
8951             subpass_to_dep_index[dependency.srcSubpass] = i;
8952         } else {
8953             subpass_to_node[dependency.dstSubpass].prev.push_back(dependency.srcSubpass);
8954             subpass_to_node[dependency.srcSubpass].next.push_back(dependency.dstSubpass);
8955         }
8956     }
8957     return skip;
8958 }
8959
8960 VKAPI_ATTR VkResult VKAPI_CALL CreateShaderModule(VkDevice device, const VkShaderModuleCreateInfo *pCreateInfo,
8961                                                   const VkAllocationCallbacks *pAllocator, VkShaderModule *pShaderModule) {
8962     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
8963     bool spirv_valid;
8964
8965     if (PreCallValidateCreateShaderModule(dev_data, pCreateInfo, &spirv_valid)) return VK_ERROR_VALIDATION_FAILED_EXT;
8966
8967     VkResult res = dev_data->dispatch_table.CreateShaderModule(device, pCreateInfo, pAllocator, pShaderModule);
8968
8969     if (res == VK_SUCCESS) {
8970         lock_guard_t lock(global_lock);
8971         unique_ptr<shader_module> new_shader_module(spirv_valid ? new shader_module(pCreateInfo) : new shader_module());
8972         dev_data->shaderModuleMap[*pShaderModule] = std::move(new_shader_module);
8973     }
8974     return res;
8975 }
8976
8977 static bool ValidateAttachmentIndex(layer_data *dev_data, uint32_t attachment, uint32_t attachment_count, const char *type) {
8978     bool skip = false;
8979     if (attachment >= attachment_count && attachment != VK_ATTACHMENT_UNUSED) {
8980         skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
8981                         VALIDATION_ERROR_12200684,
8982                         "CreateRenderPass: %s attachment %d must be less than the total number of attachments %d.", type,
8983                         attachment, attachment_count);
8984     }
8985     return skip;
8986 }
8987
8988 static bool IsPowerOfTwo(unsigned x) { return x && !(x & (x - 1)); }
8989
8990 static bool ValidateRenderpassAttachmentUsage(layer_data *dev_data, const VkRenderPassCreateInfo *pCreateInfo) {
8991     bool skip = false;
8992     for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
8993         const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
8994         if (subpass.pipelineBindPoint != VK_PIPELINE_BIND_POINT_GRAPHICS) {
8995             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
8996                             VALIDATION_ERROR_14000698,
8997                             "CreateRenderPass: Pipeline bind point for subpass %d must be VK_PIPELINE_BIND_POINT_GRAPHICS.", i);
8998         }
8999
9000         for (uint32_t j = 0; j < subpass.preserveAttachmentCount; ++j) {
9001             uint32_t attachment = subpass.pPreserveAttachments[j];
9002             if (attachment == VK_ATTACHMENT_UNUSED) {
9003                 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
9004                                 VALIDATION_ERROR_140006aa,
9005                                 "CreateRenderPass:  Preserve attachment (%d) must not be VK_ATTACHMENT_UNUSED.", j);
9006             } else {
9007                 skip |= ValidateAttachmentIndex(dev_data, attachment, pCreateInfo->attachmentCount, "Preserve");
9008
9009                 bool found = (subpass.pDepthStencilAttachment != NULL && subpass.pDepthStencilAttachment->attachment == attachment);
9010                 for (uint32_t r = 0; !found && r < subpass.inputAttachmentCount; ++r) {
9011                     found = (subpass.pInputAttachments[r].attachment == attachment);
9012                 }
9013                 for (uint32_t r = 0; !found && r < subpass.colorAttachmentCount; ++r) {
9014                     found = (subpass.pColorAttachments[r].attachment == attachment) ||
9015                             (subpass.pResolveAttachments != NULL && subpass.pResolveAttachments[r].attachment == attachment);
9016                 }
9017                 if (found) {
9018                     skip |= log_msg(
9019                         dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
9020                         VALIDATION_ERROR_140006ac,
9021                         "CreateRenderPass: subpass %u pPreserveAttachments[%u] (%u) must not be used elsewhere in the subpass.", i,
9022                         j, attachment);
9023                 }
9024             }
9025         }
9026
9027         auto subpass_performs_resolve =
9028             subpass.pResolveAttachments &&
9029             std::any_of(subpass.pResolveAttachments, subpass.pResolveAttachments + subpass.colorAttachmentCount,
9030                         [](VkAttachmentReference ref) { return ref.attachment != VK_ATTACHMENT_UNUSED; });
9031
9032         unsigned sample_count = 0;
9033
9034         for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
9035             uint32_t attachment;
9036             if (subpass.pResolveAttachments) {
9037                 attachment = subpass.pResolveAttachments[j].attachment;
9038                 skip |= ValidateAttachmentIndex(dev_data, attachment, pCreateInfo->attachmentCount, "Resolve");
9039
9040                 if (!skip && attachment != VK_ATTACHMENT_UNUSED &&
9041                     pCreateInfo->pAttachments[attachment].samples != VK_SAMPLE_COUNT_1_BIT) {
9042                     skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT,
9043                                     0, VALIDATION_ERROR_140006a2,
9044                                     "CreateRenderPass:  Subpass %u requests multisample resolve into attachment %u, which must "
9045                                     "have VK_SAMPLE_COUNT_1_BIT but has %s.",
9046                                     i, attachment, string_VkSampleCountFlagBits(pCreateInfo->pAttachments[attachment].samples));
9047                 }
9048
9049                 if (!skip && subpass.pResolveAttachments[j].attachment != VK_ATTACHMENT_UNUSED &&
9050                     subpass.pColorAttachments[j].attachment == VK_ATTACHMENT_UNUSED) {
9051                     skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT,
9052                                     0, VALIDATION_ERROR_1400069e,
9053                                     "CreateRenderPass:  Subpass %u requests multisample resolve from attachment %u which has "
9054                                     "attachment=VK_ATTACHMENT_UNUSED.",
9055                                     i, attachment);
9056                 }
9057             }
9058             attachment = subpass.pColorAttachments[j].attachment;
9059             skip |= ValidateAttachmentIndex(dev_data, attachment, pCreateInfo->attachmentCount, "Color");
9060
9061             if (!skip && attachment != VK_ATTACHMENT_UNUSED) {
9062                 sample_count |= (unsigned)pCreateInfo->pAttachments[attachment].samples;
9063
9064                 if (subpass_performs_resolve && pCreateInfo->pAttachments[attachment].samples == VK_SAMPLE_COUNT_1_BIT) {
9065                     skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT,
9066                                     0, VALIDATION_ERROR_140006a0,
9067                                     "CreateRenderPass:  Subpass %u requests multisample resolve from attachment %u which has "
9068                                     "VK_SAMPLE_COUNT_1_BIT.",
9069                                     i, attachment);
9070                 }
9071
9072                 if (subpass_performs_resolve && subpass.pResolveAttachments[j].attachment != VK_ATTACHMENT_UNUSED) {
9073                     const auto &color_desc = pCreateInfo->pAttachments[attachment];
9074                     const auto &resolve_desc = pCreateInfo->pAttachments[subpass.pResolveAttachments[j].attachment];
9075                     if (color_desc.format != resolve_desc.format) {
9076                         skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
9077                                         VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, VALIDATION_ERROR_140006a4,
9078                                         "CreateRenderPass:  Subpass %u pColorAttachments[%u] resolves to an attachment with a "
9079                                         "different format. color format: %u, resolve format: %u.",
9080                                         i, j, color_desc.format, resolve_desc.format);
9081                     }
9082                 }
9083
9084                 if (dev_data->extensions.vk_amd_mixed_attachment_samples && subpass.pDepthStencilAttachment &&
9085                     subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
9086                     const auto depth_stencil_sample_count =
9087                         pCreateInfo->pAttachments[subpass.pDepthStencilAttachment->attachment].samples;
9088                     if (pCreateInfo->pAttachments[attachment].samples > depth_stencil_sample_count) {
9089                         skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
9090                                         VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, VALIDATION_ERROR_14000bc4,
9091                                         "CreateRenderPass:  Subpass %u pColorAttachments[%u] has %s which is larger than "
9092                                         "depth/stencil attachment %s.",
9093                                         i, j, string_VkSampleCountFlagBits(pCreateInfo->pAttachments[attachment].samples),
9094                                         string_VkSampleCountFlagBits(depth_stencil_sample_count));
9095                     }
9096                 }
9097             }
9098         }
9099
9100         if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
9101             uint32_t attachment = subpass.pDepthStencilAttachment->attachment;
9102             skip |= ValidateAttachmentIndex(dev_data, attachment, pCreateInfo->attachmentCount, "Depth stencil");
9103
9104             if (!skip && attachment != VK_ATTACHMENT_UNUSED) {
9105                 sample_count |= (unsigned)pCreateInfo->pAttachments[attachment].samples;
9106             }
9107         }
9108
9109         for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
9110             uint32_t attachment = subpass.pInputAttachments[j].attachment;
9111             skip |= ValidateAttachmentIndex(dev_data, attachment, pCreateInfo->attachmentCount, "Input");
9112         }
9113
9114         if (!dev_data->extensions.vk_amd_mixed_attachment_samples && sample_count && !IsPowerOfTwo(sample_count)) {
9115             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
9116                             VALIDATION_ERROR_0082b401,
9117                             "CreateRenderPass:  Subpass %u attempts to render to attachments with inconsistent sample counts.", i);
9118         }
9119     }
9120     return skip;
9121 }
9122
9123 static void MarkAttachmentFirstUse(RENDER_PASS_STATE *render_pass, uint32_t index, bool is_read) {
9124     if (index == VK_ATTACHMENT_UNUSED) return;
9125
9126     if (!render_pass->attachment_first_read.count(index)) render_pass->attachment_first_read[index] = is_read;
9127 }
9128
9129 VKAPI_ATTR VkResult VKAPI_CALL CreateRenderPass(VkDevice device, const VkRenderPassCreateInfo *pCreateInfo,
9130                                                 const VkAllocationCallbacks *pAllocator, VkRenderPass *pRenderPass) {
9131     bool skip = false;
9132     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
9133
9134     unique_lock_t lock(global_lock);
9135     // TODO: As part of wrapping up the mem_tracker/core_validation merge the following routine should be consolidated with
9136     //       ValidateLayouts.
9137     skip |= ValidateRenderpassAttachmentUsage(dev_data, pCreateInfo);
9138     for (uint32_t i = 0; i < pCreateInfo->dependencyCount; ++i) {
9139         auto const &dependency = pCreateInfo->pDependencies[i];
9140         skip |= ValidateStageMaskGsTsEnables(dev_data, dependency.srcStageMask, "vkCreateRenderPass()", VALIDATION_ERROR_13e006b8,
9141                                              VALIDATION_ERROR_13e006bc);
9142         skip |= ValidateStageMaskGsTsEnables(dev_data, dependency.dstStageMask, "vkCreateRenderPass()", VALIDATION_ERROR_13e006ba,
9143                                              VALIDATION_ERROR_13e006be);
9144
9145         if (!ValidateAccessMaskPipelineStage(dependency.srcAccessMask, dependency.srcStageMask)) {
9146             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
9147                             VALIDATION_ERROR_13e006c8,
9148                             "CreateRenderPass: pDependencies[%u].srcAccessMask (0x%X) is not supported by srcStageMask (0x%X).", i,
9149                             dependency.srcAccessMask, dependency.srcStageMask);
9150         }
9151
9152         if (!ValidateAccessMaskPipelineStage(dependency.dstAccessMask, dependency.dstStageMask)) {
9153             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
9154                             VALIDATION_ERROR_13e006ca,
9155                             "CreateRenderPass: pDependencies[%u].dstAccessMask (0x%X) is not supported by dstStageMask (0x%X).", i,
9156                             dependency.dstAccessMask, dependency.dstStageMask);
9157         }
9158     }
9159     if (!skip) {
9160         skip |= ValidateLayouts(dev_data, device, pCreateInfo);
9161     }
9162     lock.unlock();
9163
9164     if (skip) {
9165         return VK_ERROR_VALIDATION_FAILED_EXT;
9166     }
9167
9168     VkResult result = dev_data->dispatch_table.CreateRenderPass(device, pCreateInfo, pAllocator, pRenderPass);
9169
9170     if (VK_SUCCESS == result) {
9171         lock.lock();
9172
9173         std::vector<bool> has_self_dependency(pCreateInfo->subpassCount);
9174         std::vector<DAGNode> subpass_to_node(pCreateInfo->subpassCount);
9175         std::vector<int32_t> subpass_to_dep_index(pCreateInfo->subpassCount);
9176         skip |= CreatePassDAG(dev_data, pCreateInfo, subpass_to_node, has_self_dependency, subpass_to_dep_index);
9177
9178         auto render_pass = std::make_shared<RENDER_PASS_STATE>(pCreateInfo);
9179         render_pass->renderPass = *pRenderPass;
9180         render_pass->hasSelfDependency = has_self_dependency;
9181         render_pass->subpassToNode = subpass_to_node;
9182         render_pass->subpass_to_dependency_index = subpass_to_dep_index;
9183
9184         for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
9185             const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
9186             for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
9187                 MarkAttachmentFirstUse(render_pass.get(), subpass.pColorAttachments[j].attachment, false);
9188
9189                 // resolve attachments are considered to be written
9190                 if (subpass.pResolveAttachments) {
9191                     MarkAttachmentFirstUse(render_pass.get(), subpass.pResolveAttachments[j].attachment, false);
9192                 }
9193             }
9194             if (subpass.pDepthStencilAttachment) {
9195                 MarkAttachmentFirstUse(render_pass.get(), subpass.pDepthStencilAttachment->attachment, false);
9196             }
9197             for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
9198                 MarkAttachmentFirstUse(render_pass.get(), subpass.pInputAttachments[j].attachment, true);
9199             }
9200         }
9201
9202         dev_data->renderPassMap[*pRenderPass] = std::move(render_pass);
9203     }
9204     return result;
9205 }
9206
9207 static bool validatePrimaryCommandBuffer(const layer_data *dev_data, const GLOBAL_CB_NODE *pCB, char const *cmd_name,
9208                                          UNIQUE_VALIDATION_ERROR_CODE error_code) {
9209     bool skip = false;
9210     if (pCB->createInfo.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
9211         skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
9212                         HandleToUint64(pCB->commandBuffer), error_code, "Cannot execute command %s on a secondary command buffer.",
9213                         cmd_name);
9214     }
9215     return skip;
9216 }
9217
9218 static bool VerifyRenderAreaBounds(const layer_data *dev_data, const VkRenderPassBeginInfo *pRenderPassBegin) {
9219     bool skip = false;
9220     const safe_VkFramebufferCreateInfo *pFramebufferInfo =
9221         &GetFramebufferState(dev_data, pRenderPassBegin->framebuffer)->createInfo;
9222     if (pRenderPassBegin->renderArea.offset.x < 0 ||
9223         (pRenderPassBegin->renderArea.offset.x + pRenderPassBegin->renderArea.extent.width) > pFramebufferInfo->width ||
9224         pRenderPassBegin->renderArea.offset.y < 0 ||
9225         (pRenderPassBegin->renderArea.offset.y + pRenderPassBegin->renderArea.extent.height) > pFramebufferInfo->height) {
9226         skip |= static_cast<bool>(log_msg(
9227             dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
9228             DRAWSTATE_INVALID_RENDER_AREA,
9229             "Cannot execute a render pass with renderArea not within the bound of the framebuffer. RenderArea: x %d, y %d, width "
9230             "%d, height %d. Framebuffer: width %d, height %d.",
9231             pRenderPassBegin->renderArea.offset.x, pRenderPassBegin->renderArea.offset.y, pRenderPassBegin->renderArea.extent.width,
9232             pRenderPassBegin->renderArea.extent.height, pFramebufferInfo->width, pFramebufferInfo->height));
9233     }
9234     return skip;
9235 }
9236
9237 // If this is a stencil format, make sure the stencil[Load|Store]Op flag is checked, while if it is a depth/color attachment the
9238 // [load|store]Op flag must be checked
9239 // TODO: The memory valid flag in DEVICE_MEM_INFO should probably be split to track the validity of stencil memory separately.
9240 template <typename T>
9241 static bool FormatSpecificLoadAndStoreOpSettings(VkFormat format, T color_depth_op, T stencil_op, T op) {
9242     if (color_depth_op != op && stencil_op != op) {
9243         return false;
9244     }
9245     bool check_color_depth_load_op = !FormatIsStencilOnly(format);
9246     bool check_stencil_load_op = FormatIsDepthAndStencil(format) || !check_color_depth_load_op;
9247
9248     return ((check_color_depth_load_op && (color_depth_op == op)) || (check_stencil_load_op && (stencil_op == op)));
9249 }
9250
9251 VKAPI_ATTR void VKAPI_CALL CmdBeginRenderPass(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin,
9252                                               VkSubpassContents contents) {
9253     bool skip = false;
9254     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
9255     unique_lock_t lock(global_lock);
9256     GLOBAL_CB_NODE *cb_node = GetCBNode(dev_data, commandBuffer);
9257     auto render_pass_state = pRenderPassBegin ? GetRenderPassState(dev_data, pRenderPassBegin->renderPass) : nullptr;
9258     auto framebuffer = pRenderPassBegin ? GetFramebufferState(dev_data, pRenderPassBegin->framebuffer) : nullptr;
9259     if (cb_node) {
9260         if (render_pass_state) {
9261             uint32_t clear_op_size = 0;  // Make sure pClearValues is at least as large as last LOAD_OP_CLEAR
9262             cb_node->activeFramebuffer = pRenderPassBegin->framebuffer;
9263
9264             for (uint32_t i = 0; i < render_pass_state->createInfo.attachmentCount; ++i) {
9265                 auto pAttachment = &render_pass_state->createInfo.pAttachments[i];
9266                 if (FormatSpecificLoadAndStoreOpSettings(pAttachment->format, pAttachment->loadOp, pAttachment->stencilLoadOp,
9267                                                          VK_ATTACHMENT_LOAD_OP_CLEAR)) {
9268                     clear_op_size = static_cast<uint32_t>(i) + 1;
9269                 }
9270             }
9271
9272             if (clear_op_size > pRenderPassBegin->clearValueCount) {
9273                 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
9274                                 HandleToUint64(render_pass_state->renderPass), VALIDATION_ERROR_1200070c,
9275                                 "In vkCmdBeginRenderPass() the VkRenderPassBeginInfo struct has a clearValueCount of %u but there "
9276                                 "must be at least %u entries in pClearValues array to account for the highest index attachment in "
9277                                 "renderPass 0x%" PRIx64
9278                                 " that uses VK_ATTACHMENT_LOAD_OP_CLEAR is %u. Note that the pClearValues array is indexed by "
9279                                 "attachment number so even if some pClearValues entries between 0 and %u correspond to attachments "
9280                                 "that aren't cleared they will be ignored.",
9281                                 pRenderPassBegin->clearValueCount, clear_op_size, HandleToUint64(render_pass_state->renderPass),
9282                                 clear_op_size, clear_op_size - 1);
9283             }
9284             skip |= VerifyRenderAreaBounds(dev_data, pRenderPassBegin);
9285             skip |= VerifyFramebufferAndRenderPassLayouts(dev_data, cb_node, pRenderPassBegin,
9286                                                           GetFramebufferState(dev_data, pRenderPassBegin->framebuffer));
9287             if (framebuffer->rp_state->renderPass != render_pass_state->renderPass) {
9288                 skip |= validateRenderPassCompatibility(dev_data, "render pass", render_pass_state, "framebuffer",
9289                                                         framebuffer->rp_state.get(), "vkCmdBeginRenderPass()",
9290                                                         VALIDATION_ERROR_12000710);
9291             }
9292             skip |= insideRenderPass(dev_data, cb_node, "vkCmdBeginRenderPass()", VALIDATION_ERROR_17a00017);
9293             skip |= ValidateDependencies(dev_data, framebuffer, render_pass_state);
9294             skip |= validatePrimaryCommandBuffer(dev_data, cb_node, "vkCmdBeginRenderPass()", VALIDATION_ERROR_17a00019);
9295             skip |= ValidateCmdQueueFlags(dev_data, cb_node, "vkCmdBeginRenderPass()", VK_QUEUE_GRAPHICS_BIT,
9296                                           VALIDATION_ERROR_17a02415);
9297             skip |= ValidateCmd(dev_data, cb_node, CMD_BEGINRENDERPASS, "vkCmdBeginRenderPass()");
9298             cb_node->activeRenderPass = render_pass_state;
9299             // This is a shallow copy as that is all that is needed for now
9300             cb_node->activeRenderPassBeginInfo = *pRenderPassBegin;
9301             cb_node->activeSubpass = 0;
9302             cb_node->activeSubpassContents = contents;
9303             cb_node->framebuffers.insert(pRenderPassBegin->framebuffer);
9304             // Connect this framebuffer and its children to this cmdBuffer
9305             AddFramebufferBinding(dev_data, cb_node, framebuffer);
9306             // Connect this RP to cmdBuffer
9307             addCommandBufferBinding(&render_pass_state->cb_bindings,
9308                                     {HandleToUint64(render_pass_state->renderPass), kVulkanObjectTypeRenderPass}, cb_node);
9309             // transition attachments to the correct layouts for beginning of renderPass and first subpass
9310             TransitionBeginRenderPassLayouts(dev_data, cb_node, render_pass_state, framebuffer);
9311         }
9312     }
9313     lock.unlock();
9314     if (!skip) {
9315         dev_data->dispatch_table.CmdBeginRenderPass(commandBuffer, pRenderPassBegin, contents);
9316     }
9317 }
9318
9319 VKAPI_ATTR void VKAPI_CALL CmdNextSubpass(VkCommandBuffer commandBuffer, VkSubpassContents contents) {
9320     bool skip = false;
9321     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
9322     unique_lock_t lock(global_lock);
9323     GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
9324     if (pCB) {
9325         skip |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdNextSubpass()", VALIDATION_ERROR_1b600019);
9326         skip |= ValidateCmdQueueFlags(dev_data, pCB, "vkCmdNextSubpass()", VK_QUEUE_GRAPHICS_BIT, VALIDATION_ERROR_1b602415);
9327         skip |= ValidateCmd(dev_data, pCB, CMD_NEXTSUBPASS, "vkCmdNextSubpass()");
9328         skip |= outsideRenderPass(dev_data, pCB, "vkCmdNextSubpass()", VALIDATION_ERROR_1b600017);
9329
9330         auto subpassCount = pCB->activeRenderPass->createInfo.subpassCount;
9331         if (pCB->activeSubpass == subpassCount - 1) {
9332             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
9333                             HandleToUint64(commandBuffer), VALIDATION_ERROR_1b60071a,
9334                             "vkCmdNextSubpass(): Attempted to advance beyond final subpass.");
9335         }
9336     }
9337     lock.unlock();
9338
9339     if (skip) return;
9340
9341     dev_data->dispatch_table.CmdNextSubpass(commandBuffer, contents);
9342
9343     if (pCB) {
9344         lock.lock();
9345         pCB->activeSubpass++;
9346         pCB->activeSubpassContents = contents;
9347         TransitionSubpassLayouts(dev_data, pCB, pCB->activeRenderPass, pCB->activeSubpass,
9348                                  GetFramebufferState(dev_data, pCB->activeRenderPassBeginInfo.framebuffer));
9349     }
9350 }
9351
9352 VKAPI_ATTR void VKAPI_CALL CmdEndRenderPass(VkCommandBuffer commandBuffer) {
9353     bool skip = false;
9354     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
9355     unique_lock_t lock(global_lock);
9356     auto pCB = GetCBNode(dev_data, commandBuffer);
9357     FRAMEBUFFER_STATE *framebuffer = NULL;
9358     if (pCB) {
9359         RENDER_PASS_STATE *rp_state = pCB->activeRenderPass;
9360         framebuffer = GetFramebufferState(dev_data, pCB->activeFramebuffer);
9361         if (rp_state) {
9362             if (pCB->activeSubpass != rp_state->createInfo.subpassCount - 1) {
9363                 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
9364                                 VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(commandBuffer),
9365                                 VALIDATION_ERROR_1b00071c, "vkCmdEndRenderPass(): Called before reaching final subpass.");
9366             }
9367         }
9368         skip |= outsideRenderPass(dev_data, pCB, "vkCmdEndRenderpass()", VALIDATION_ERROR_1b000017);
9369         skip |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdEndRenderPass()", VALIDATION_ERROR_1b000019);
9370         skip |= ValidateCmdQueueFlags(dev_data, pCB, "vkCmdEndRenderPass()", VK_QUEUE_GRAPHICS_BIT, VALIDATION_ERROR_1b002415);
9371         skip |= ValidateCmd(dev_data, pCB, CMD_ENDRENDERPASS, "vkCmdEndRenderPass()");
9372     }
9373     lock.unlock();
9374
9375     if (skip) return;
9376
9377     dev_data->dispatch_table.CmdEndRenderPass(commandBuffer);
9378
9379     if (pCB) {
9380         lock.lock();
9381         TransitionFinalSubpassLayouts(dev_data, pCB, &pCB->activeRenderPassBeginInfo, framebuffer);
9382         pCB->activeRenderPass = nullptr;
9383         pCB->activeSubpass = 0;
9384         pCB->activeFramebuffer = VK_NULL_HANDLE;
9385     }
9386 }
9387
9388 static bool validateFramebuffer(layer_data *dev_data, VkCommandBuffer primaryBuffer, const GLOBAL_CB_NODE *pCB,
9389                                 VkCommandBuffer secondaryBuffer, const GLOBAL_CB_NODE *pSubCB, const char *caller) {
9390     bool skip = false;
9391     if (!pSubCB->beginInfo.pInheritanceInfo) {
9392         return skip;
9393     }
9394     VkFramebuffer primary_fb = pCB->activeFramebuffer;
9395     VkFramebuffer secondary_fb = pSubCB->beginInfo.pInheritanceInfo->framebuffer;
9396     if (secondary_fb != VK_NULL_HANDLE) {
9397         if (primary_fb != secondary_fb) {
9398             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
9399                             HandleToUint64(primaryBuffer), VALIDATION_ERROR_1b2000c6,
9400                             "vkCmdExecuteCommands() called w/ invalid secondary command buffer 0x%" PRIx64
9401                             " which has a framebuffer 0x%" PRIx64
9402                             " that is not the same as the primary command buffer's current active framebuffer 0x%" PRIx64 ".",
9403                             HandleToUint64(secondaryBuffer), HandleToUint64(secondary_fb), HandleToUint64(primary_fb));
9404         }
9405         auto fb = GetFramebufferState(dev_data, secondary_fb);
9406         if (!fb) {
9407             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
9408                             HandleToUint64(primaryBuffer), DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER,
9409                             "vkCmdExecuteCommands() called w/ invalid Cmd Buffer 0x%" PRIx64
9410                             " which has invalid framebuffer 0x%" PRIx64 ".",
9411                             HandleToUint64(secondaryBuffer), HandleToUint64(secondary_fb));
9412             return skip;
9413         }
9414     }
9415     return skip;
9416 }
9417
9418 static bool validateSecondaryCommandBufferState(layer_data *dev_data, GLOBAL_CB_NODE *pCB, GLOBAL_CB_NODE *pSubCB) {
9419     bool skip = false;
9420     unordered_set<int> activeTypes;
9421     for (auto queryObject : pCB->activeQueries) {
9422         auto queryPoolData = dev_data->queryPoolMap.find(queryObject.pool);
9423         if (queryPoolData != dev_data->queryPoolMap.end()) {
9424             if (queryPoolData->second.createInfo.queryType == VK_QUERY_TYPE_PIPELINE_STATISTICS &&
9425                 pSubCB->beginInfo.pInheritanceInfo) {
9426                 VkQueryPipelineStatisticFlags cmdBufStatistics = pSubCB->beginInfo.pInheritanceInfo->pipelineStatistics;
9427                 if ((cmdBufStatistics & queryPoolData->second.createInfo.pipelineStatistics) != cmdBufStatistics) {
9428                     skip |= log_msg(
9429                         dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
9430                         HandleToUint64(pCB->commandBuffer), VALIDATION_ERROR_1b2000d0,
9431                         "vkCmdExecuteCommands() called w/ invalid Cmd Buffer 0x%" PRIx64
9432                         " which has invalid active query pool 0x%" PRIx64
9433                         ". Pipeline statistics is being queried so the command buffer must have all bits set on the queryPool.",
9434                         HandleToUint64(pCB->commandBuffer), HandleToUint64(queryPoolData->first));
9435                 }
9436             }
9437             activeTypes.insert(queryPoolData->second.createInfo.queryType);
9438         }
9439     }
9440     for (auto queryObject : pSubCB->startedQueries) {
9441         auto queryPoolData = dev_data->queryPoolMap.find(queryObject.pool);
9442         if (queryPoolData != dev_data->queryPoolMap.end() && activeTypes.count(queryPoolData->second.createInfo.queryType)) {
9443             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
9444                             HandleToUint64(pCB->commandBuffer), DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER,
9445                             "vkCmdExecuteCommands() called w/ invalid Cmd Buffer 0x%" PRIx64
9446                             " which has invalid active query pool 0x%" PRIx64
9447                             " of type %d but a query of that type has been started on secondary Cmd Buffer 0x%" PRIx64 ".",
9448                             HandleToUint64(pCB->commandBuffer), HandleToUint64(queryPoolData->first),
9449                             queryPoolData->second.createInfo.queryType, HandleToUint64(pSubCB->commandBuffer));
9450         }
9451     }
9452
9453     auto primary_pool = GetCommandPoolNode(dev_data, pCB->createInfo.commandPool);
9454     auto secondary_pool = GetCommandPoolNode(dev_data, pSubCB->createInfo.commandPool);
9455     if (primary_pool && secondary_pool && (primary_pool->queueFamilyIndex != secondary_pool->queueFamilyIndex)) {
9456         skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
9457                         HandleToUint64(pSubCB->commandBuffer), DRAWSTATE_INVALID_QUEUE_FAMILY,
9458                         "vkCmdExecuteCommands(): Primary command buffer 0x%" PRIx64
9459                         " created in queue family %d has secondary command buffer 0x%" PRIx64 " created in queue family %d.",
9460                         HandleToUint64(pCB->commandBuffer), primary_pool->queueFamilyIndex, HandleToUint64(pSubCB->commandBuffer),
9461                         secondary_pool->queueFamilyIndex);
9462     }
9463
9464     return skip;
9465 }
9466
9467 VKAPI_ATTR void VKAPI_CALL CmdExecuteCommands(VkCommandBuffer commandBuffer, uint32_t commandBuffersCount,
9468                                               const VkCommandBuffer *pCommandBuffers) {
9469     bool skip = false;
9470     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
9471     unique_lock_t lock(global_lock);
9472     GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
9473     if (pCB) {
9474         GLOBAL_CB_NODE *pSubCB = NULL;
9475         for (uint32_t i = 0; i < commandBuffersCount; i++) {
9476             pSubCB = GetCBNode(dev_data, pCommandBuffers[i]);
9477             assert(pSubCB);
9478             if (VK_COMMAND_BUFFER_LEVEL_PRIMARY == pSubCB->createInfo.level) {
9479                 skip |=
9480                     log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
9481                             HandleToUint64(pCommandBuffers[i]), VALIDATION_ERROR_1b2000b0,
9482                             "vkCmdExecuteCommands() called w/ Primary Cmd Buffer 0x%" PRIx64
9483                             " in element %u of pCommandBuffers array. All cmd buffers in pCommandBuffers array must be secondary.",
9484                             HandleToUint64(pCommandBuffers[i]), i);
9485             } else if (pCB->activeRenderPass) {  // Secondary CB w/i RenderPass must have *CONTINUE_BIT set
9486                 if (pSubCB->beginInfo.pInheritanceInfo != nullptr) {
9487                     auto secondary_rp_state = GetRenderPassState(dev_data, pSubCB->beginInfo.pInheritanceInfo->renderPass);
9488                     if (!(pSubCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) {
9489                         skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
9490                                         VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(pCommandBuffers[i]),
9491                                         VALIDATION_ERROR_1b2000c0,
9492                                         "vkCmdExecuteCommands(): Secondary Command Buffer (0x%" PRIx64
9493                                         ") executed within render pass (0x%" PRIx64
9494                                         ") must have had vkBeginCommandBuffer() called w/ "
9495                                         "VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT set.",
9496                                         HandleToUint64(pCommandBuffers[i]), HandleToUint64(pCB->activeRenderPass->renderPass));
9497                     } else {
9498                         // Make sure render pass is compatible with parent command buffer pass if has continue
9499                         if (pCB->activeRenderPass->renderPass != secondary_rp_state->renderPass) {
9500                             skip |= validateRenderPassCompatibility(dev_data, "primary command buffer", pCB->activeRenderPass,
9501                                                                     "secondary command buffer", secondary_rp_state,
9502                                                                     "vkCmdExecuteCommands()", VALIDATION_ERROR_1b2000c4);
9503                         }
9504                         //  If framebuffer for secondary CB is not NULL, then it must match active FB from primaryCB
9505                         skip |=
9506                             validateFramebuffer(dev_data, commandBuffer, pCB, pCommandBuffers[i], pSubCB, "vkCmdExecuteCommands()");
9507                         if (!pSubCB->cmd_execute_commands_functions.empty()) {
9508                             //  Inherit primary's activeFramebuffer and while running validate functions
9509                             for (auto &function : pSubCB->cmd_execute_commands_functions) {
9510                                 skip |= function(pCB, pCB->activeFramebuffer);
9511                             }
9512                         }
9513                     }
9514                 }
9515             }
9516             // TODO(mlentine): Move more logic into this method
9517             skip |= validateSecondaryCommandBufferState(dev_data, pCB, pSubCB);
9518             skip |= validateCommandBufferState(dev_data, pSubCB, "vkCmdExecuteCommands()", 0, VALIDATION_ERROR_1b2000b2);
9519             if (!(pSubCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
9520                 if (pSubCB->in_use.load() || pCB->linkedCommandBuffers.count(pSubCB)) {
9521                     skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
9522                                     VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(pCB->commandBuffer),
9523                                     VALIDATION_ERROR_1b2000b4,
9524                                     "Attempt to simultaneously execute command buffer 0x%" PRIx64
9525                                     " without VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set!",
9526                                     HandleToUint64(pCB->commandBuffer));
9527                 }
9528                 if (pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT) {
9529                     // Warn that non-simultaneous secondary cmd buffer renders primary non-simultaneous
9530                     skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
9531                                     VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(pCommandBuffers[i]),
9532                                     DRAWSTATE_INVALID_CB_SIMULTANEOUS_USE,
9533                                     "vkCmdExecuteCommands(): Secondary Command Buffer (0x%" PRIx64
9534                                     ") does not have VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set and will cause primary "
9535                                     "command buffer (0x%" PRIx64
9536                                     ") to be treated as if it does not have VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set, even "
9537                                     "though it does.",
9538                                     HandleToUint64(pCommandBuffers[i]), HandleToUint64(pCB->commandBuffer));
9539                     pCB->beginInfo.flags &= ~VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT;
9540                 }
9541             }
9542             if (!pCB->activeQueries.empty() && !dev_data->enabled_features.inheritedQueries) {
9543                 skip |=
9544                     log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
9545                             HandleToUint64(pCommandBuffers[i]), VALIDATION_ERROR_1b2000ca,
9546                             "vkCmdExecuteCommands(): Secondary Command Buffer (0x%" PRIx64
9547                             ") cannot be submitted with a query in flight and inherited queries not supported on this device.",
9548                             HandleToUint64(pCommandBuffers[i]));
9549             }
9550             // TODO: separate validate from update! This is very tangled.
9551             // Propagate layout transitions to the primary cmd buffer
9552             for (auto ilm_entry : pSubCB->imageLayoutMap) {
9553                 if (pCB->imageLayoutMap.find(ilm_entry.first) != pCB->imageLayoutMap.end()) {
9554                     pCB->imageLayoutMap[ilm_entry.first].layout = ilm_entry.second.layout;
9555                 } else {
9556                     assert(ilm_entry.first.hasSubresource);
9557                     IMAGE_CMD_BUF_LAYOUT_NODE node;
9558                     if (!FindCmdBufLayout(dev_data, pCB, ilm_entry.first.image, ilm_entry.first.subresource, node)) {
9559                         node.initialLayout = ilm_entry.second.initialLayout;
9560                     }
9561                     node.layout = ilm_entry.second.layout;
9562                     SetLayout(dev_data, pCB, ilm_entry.first, node);
9563                 }
9564             }
9565             pSubCB->primaryCommandBuffer = pCB->commandBuffer;
9566             pCB->linkedCommandBuffers.insert(pSubCB);
9567             pSubCB->linkedCommandBuffers.insert(pCB);
9568             for (auto &function : pSubCB->queryUpdates) {
9569                 pCB->queryUpdates.push_back(function);
9570             }
9571             for (auto &function : pSubCB->queue_submit_functions) {
9572                 pCB->queue_submit_functions.push_back(function);
9573             }
9574         }
9575         skip |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdExecuteCommands()", VALIDATION_ERROR_1b200019);
9576         skip |=
9577             ValidateCmdQueueFlags(dev_data, pCB, "vkCmdExecuteCommands()",
9578                                   VK_QUEUE_TRANSFER_BIT | VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT, VALIDATION_ERROR_1b202415);
9579         skip |= ValidateCmd(dev_data, pCB, CMD_EXECUTECOMMANDS, "vkCmdExecuteCommands()");
9580     }
9581     lock.unlock();
9582     if (!skip) dev_data->dispatch_table.CmdExecuteCommands(commandBuffer, commandBuffersCount, pCommandBuffers);
9583 }
9584
9585 VKAPI_ATTR VkResult VKAPI_CALL MapMemory(VkDevice device, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size, VkFlags flags,
9586                                          void **ppData) {
9587     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
9588
9589     bool skip = false;
9590     VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
9591     unique_lock_t lock(global_lock);
9592     DEVICE_MEM_INFO *mem_info = GetMemObjInfo(dev_data, mem);
9593     if (mem_info) {
9594         auto end_offset = (VK_WHOLE_SIZE == size) ? mem_info->alloc_info.allocationSize - 1 : offset + size - 1;
9595         skip |= ValidateMapImageLayouts(dev_data, device, mem_info, offset, end_offset);
9596         if ((dev_data->phys_dev_mem_props.memoryTypes[mem_info->alloc_info.memoryTypeIndex].propertyFlags &
9597              VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0) {
9598             skip = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
9599                            HandleToUint64(mem), VALIDATION_ERROR_31200554,
9600                            "Mapping Memory without VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT set: mem obj 0x%" PRIx64 ".",
9601                            HandleToUint64(mem));
9602         }
9603     }
9604     skip |= ValidateMapMemRange(dev_data, mem, offset, size);
9605     lock.unlock();
9606
9607     if (!skip) {
9608         result = dev_data->dispatch_table.MapMemory(device, mem, offset, size, flags, ppData);
9609         if (VK_SUCCESS == result) {
9610             lock.lock();
9611             // TODO : What's the point of this range? See comment on creating new "bound_range" above, which may replace this
9612             storeMemRanges(dev_data, mem, offset, size);
9613             initializeAndTrackMemory(dev_data, mem, offset, size, ppData);
9614             lock.unlock();
9615         }
9616     }
9617     return result;
9618 }
9619
9620 VKAPI_ATTR void VKAPI_CALL UnmapMemory(VkDevice device, VkDeviceMemory mem) {
9621     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
9622     bool skip = false;
9623
9624     unique_lock_t lock(global_lock);
9625     skip |= deleteMemRanges(dev_data, mem);
9626     lock.unlock();
9627     if (!skip) {
9628         dev_data->dispatch_table.UnmapMemory(device, mem);
9629     }
9630 }
9631
9632 static bool validateMemoryIsMapped(layer_data *dev_data, const char *funcName, uint32_t memRangeCount,
9633                                    const VkMappedMemoryRange *pMemRanges) {
9634     bool skip = false;
9635     for (uint32_t i = 0; i < memRangeCount; ++i) {
9636         auto mem_info = GetMemObjInfo(dev_data, pMemRanges[i].memory);
9637         if (mem_info) {
9638             if (pMemRanges[i].size == VK_WHOLE_SIZE) {
9639                 if (mem_info->mem_range.offset > pMemRanges[i].offset) {
9640                     skip |= log_msg(
9641                         dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
9642                         HandleToUint64(pMemRanges[i].memory), VALIDATION_ERROR_0c20055c,
9643                         "%s: Flush/Invalidate offset (" PRINTF_SIZE_T_SPECIFIER
9644                         ") is less than Memory Object's offset (" PRINTF_SIZE_T_SPECIFIER ").",
9645                         funcName, static_cast<size_t>(pMemRanges[i].offset), static_cast<size_t>(mem_info->mem_range.offset));
9646                 }
9647             } else {
9648                 const uint64_t data_end = (mem_info->mem_range.size == VK_WHOLE_SIZE)
9649                                               ? mem_info->alloc_info.allocationSize
9650                                               : (mem_info->mem_range.offset + mem_info->mem_range.size);
9651                 if ((mem_info->mem_range.offset > pMemRanges[i].offset) ||
9652                     (data_end < (pMemRanges[i].offset + pMemRanges[i].size))) {
9653                     skip |=
9654                         log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
9655                                 HandleToUint64(pMemRanges[i].memory), VALIDATION_ERROR_0c20055a,
9656                                 "%s: Flush/Invalidate size or offset (" PRINTF_SIZE_T_SPECIFIER ", " PRINTF_SIZE_T_SPECIFIER
9657                                 ") exceed the Memory Object's upper-bound (" PRINTF_SIZE_T_SPECIFIER ").",
9658                                 funcName, static_cast<size_t>(pMemRanges[i].offset + pMemRanges[i].size),
9659                                 static_cast<size_t>(pMemRanges[i].offset), static_cast<size_t>(data_end));
9660                 }
9661             }
9662         }
9663     }
9664     return skip;
9665 }
9666
9667 static bool ValidateAndCopyNoncoherentMemoryToDriver(layer_data *dev_data, uint32_t mem_range_count,
9668                                                      const VkMappedMemoryRange *mem_ranges) {
9669     bool skip = false;
9670     for (uint32_t i = 0; i < mem_range_count; ++i) {
9671         auto mem_info = GetMemObjInfo(dev_data, mem_ranges[i].memory);
9672         if (mem_info) {
9673             if (mem_info->shadow_copy) {
9674                 VkDeviceSize size = (mem_info->mem_range.size != VK_WHOLE_SIZE)
9675                                         ? mem_info->mem_range.size
9676                                         : (mem_info->alloc_info.allocationSize - mem_info->mem_range.offset);
9677                 char *data = static_cast<char *>(mem_info->shadow_copy);
9678                 for (uint64_t j = 0; j < mem_info->shadow_pad_size; ++j) {
9679                     if (data[j] != NoncoherentMemoryFillValue) {
9680                         skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
9681                                         VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, HandleToUint64(mem_ranges[i].memory),
9682                                         MEMTRACK_INVALID_MAP, "Memory underflow was detected on mem obj 0x%" PRIx64,
9683                                         HandleToUint64(mem_ranges[i].memory));
9684                     }
9685                 }
9686                 for (uint64_t j = (size + mem_info->shadow_pad_size); j < (2 * mem_info->shadow_pad_size + size); ++j) {
9687                     if (data[j] != NoncoherentMemoryFillValue) {
9688                         skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
9689                                         VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, HandleToUint64(mem_ranges[i].memory),
9690                                         MEMTRACK_INVALID_MAP, "Memory overflow was detected on mem obj 0x%" PRIx64,
9691                                         HandleToUint64(mem_ranges[i].memory));
9692                     }
9693                 }
9694                 memcpy(mem_info->p_driver_data, static_cast<void *>(data + mem_info->shadow_pad_size), (size_t)(size));
9695             }
9696         }
9697     }
9698     return skip;
9699 }
9700
9701 static void CopyNoncoherentMemoryFromDriver(layer_data *dev_data, uint32_t mem_range_count, const VkMappedMemoryRange *mem_ranges) {
9702     for (uint32_t i = 0; i < mem_range_count; ++i) {
9703         auto mem_info = GetMemObjInfo(dev_data, mem_ranges[i].memory);
9704         if (mem_info && mem_info->shadow_copy) {
9705             VkDeviceSize size = (mem_info->mem_range.size != VK_WHOLE_SIZE)
9706                                     ? mem_info->mem_range.size
9707                                     : (mem_info->alloc_info.allocationSize - mem_ranges[i].offset);
9708             char *data = static_cast<char *>(mem_info->shadow_copy);
9709             memcpy(data + mem_info->shadow_pad_size, mem_info->p_driver_data, (size_t)(size));
9710         }
9711     }
9712 }
9713
9714 static bool ValidateMappedMemoryRangeDeviceLimits(layer_data *dev_data, const char *func_name, uint32_t mem_range_count,
9715                                                   const VkMappedMemoryRange *mem_ranges) {
9716     bool skip = false;
9717     for (uint32_t i = 0; i < mem_range_count; ++i) {
9718         uint64_t atom_size = dev_data->phys_dev_properties.properties.limits.nonCoherentAtomSize;
9719         if (SafeModulo(mem_ranges[i].offset, atom_size) != 0) {
9720             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
9721                             HandleToUint64(mem_ranges->memory), VALIDATION_ERROR_0c20055e,
9722                             "%s: Offset in pMemRanges[%d] is 0x%" PRIxLEAST64
9723                             ", which is not a multiple of VkPhysicalDeviceLimits::nonCoherentAtomSize (0x%" PRIxLEAST64 ").",
9724                             func_name, i, mem_ranges[i].offset, atom_size);
9725         }
9726         auto mem_info = GetMemObjInfo(dev_data, mem_ranges[i].memory);
9727         if ((mem_ranges[i].size != VK_WHOLE_SIZE) &&
9728             (mem_ranges[i].size + mem_ranges[i].offset != mem_info->alloc_info.allocationSize) &&
9729             (SafeModulo(mem_ranges[i].size, atom_size) != 0)) {
9730             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
9731                             HandleToUint64(mem_ranges->memory), VALIDATION_ERROR_0c200adc,
9732                             "%s: Size in pMemRanges[%d] is 0x%" PRIxLEAST64
9733                             ", which is not a multiple of VkPhysicalDeviceLimits::nonCoherentAtomSize (0x%" PRIxLEAST64 ").",
9734                             func_name, i, mem_ranges[i].size, atom_size);
9735         }
9736     }
9737     return skip;
9738 }
9739
9740 static bool PreCallValidateFlushMappedMemoryRanges(layer_data *dev_data, uint32_t mem_range_count,
9741                                                    const VkMappedMemoryRange *mem_ranges) {
9742     bool skip = false;
9743     lock_guard_t lock(global_lock);
9744     skip |= ValidateMappedMemoryRangeDeviceLimits(dev_data, "vkFlushMappedMemoryRanges", mem_range_count, mem_ranges);
9745     skip |= ValidateAndCopyNoncoherentMemoryToDriver(dev_data, mem_range_count, mem_ranges);
9746     skip |= validateMemoryIsMapped(dev_data, "vkFlushMappedMemoryRanges", mem_range_count, mem_ranges);
9747     return skip;
9748 }
9749
9750 VKAPI_ATTR VkResult VKAPI_CALL FlushMappedMemoryRanges(VkDevice device, uint32_t memRangeCount,
9751                                                        const VkMappedMemoryRange *pMemRanges) {
9752     VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
9753     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
9754
9755     if (!PreCallValidateFlushMappedMemoryRanges(dev_data, memRangeCount, pMemRanges)) {
9756         result = dev_data->dispatch_table.FlushMappedMemoryRanges(device, memRangeCount, pMemRanges);
9757     }
9758     return result;
9759 }
9760
9761 static bool PreCallValidateInvalidateMappedMemoryRanges(layer_data *dev_data, uint32_t mem_range_count,
9762                                                         const VkMappedMemoryRange *mem_ranges) {
9763     bool skip = false;
9764     lock_guard_t lock(global_lock);
9765     skip |= ValidateMappedMemoryRangeDeviceLimits(dev_data, "vkInvalidateMappedMemoryRanges", mem_range_count, mem_ranges);
9766     skip |= validateMemoryIsMapped(dev_data, "vkInvalidateMappedMemoryRanges", mem_range_count, mem_ranges);
9767     return skip;
9768 }
9769
9770 static void PostCallRecordInvalidateMappedMemoryRanges(layer_data *dev_data, uint32_t mem_range_count,
9771                                                        const VkMappedMemoryRange *mem_ranges) {
9772     lock_guard_t lock(global_lock);
9773     // Update our shadow copy with modified driver data
9774     CopyNoncoherentMemoryFromDriver(dev_data, mem_range_count, mem_ranges);
9775 }
9776
9777 VKAPI_ATTR VkResult VKAPI_CALL InvalidateMappedMemoryRanges(VkDevice device, uint32_t memRangeCount,
9778                                                             const VkMappedMemoryRange *pMemRanges) {
9779     VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
9780     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
9781
9782     if (!PreCallValidateInvalidateMappedMemoryRanges(dev_data, memRangeCount, pMemRanges)) {
9783         result = dev_data->dispatch_table.InvalidateMappedMemoryRanges(device, memRangeCount, pMemRanges);
9784         if (result == VK_SUCCESS) {
9785             PostCallRecordInvalidateMappedMemoryRanges(dev_data, memRangeCount, pMemRanges);
9786         }
9787     }
9788     return result;
9789 }
9790
9791 static bool PreCallValidateBindImageMemory(layer_data *dev_data, VkImage image, IMAGE_STATE *image_state, VkDeviceMemory mem,
9792                                            VkDeviceSize memoryOffset, const char *api_name) {
9793     bool skip = false;
9794     if (image_state) {
9795         unique_lock_t lock(global_lock);
9796         // Track objects tied to memory
9797         uint64_t image_handle = HandleToUint64(image);
9798         skip = ValidateSetMemBinding(dev_data, mem, image_handle, kVulkanObjectTypeImage, api_name);
9799         if (!image_state->memory_requirements_checked) {
9800             // There's not an explicit requirement in the spec to call vkGetImageMemoryRequirements() prior to calling
9801             // BindImageMemory but it's implied in that memory being bound must conform with VkMemoryRequirements from
9802             // vkGetImageMemoryRequirements()
9803             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
9804                             image_handle, DRAWSTATE_INVALID_IMAGE,
9805                             "%s: Binding memory to image 0x%" PRIx64
9806                             " but vkGetImageMemoryRequirements() has not been called on that image.",
9807                             api_name, HandleToUint64(image_handle));
9808             // Make the call for them so we can verify the state
9809             lock.unlock();
9810             dev_data->dispatch_table.GetImageMemoryRequirements(dev_data->device, image, &image_state->requirements);
9811             lock.lock();
9812         }
9813
9814         // Validate bound memory range information
9815         auto mem_info = GetMemObjInfo(dev_data, mem);
9816         if (mem_info) {
9817             skip |= ValidateInsertImageMemoryRange(dev_data, image, mem_info, memoryOffset, image_state->requirements,
9818                                                    image_state->createInfo.tiling == VK_IMAGE_TILING_LINEAR, api_name);
9819             skip |= ValidateMemoryTypes(dev_data, mem_info, image_state->requirements.memoryTypeBits, api_name,
9820                                         VALIDATION_ERROR_1740082e);
9821         }
9822
9823         // Validate memory requirements alignment
9824         if (SafeModulo(memoryOffset, image_state->requirements.alignment) != 0) {
9825             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
9826                             image_handle, VALIDATION_ERROR_17400830,
9827                             "%s: memoryOffset is 0x%" PRIxLEAST64
9828                             " but must be an integer multiple of the VkMemoryRequirements::alignment value 0x%" PRIxLEAST64
9829                             ", returned from a call to vkGetImageMemoryRequirements with image.",
9830                             api_name, memoryOffset, image_state->requirements.alignment);
9831         }
9832
9833         if (mem_info) {
9834             // Validate memory requirements size
9835             if (image_state->requirements.size > mem_info->alloc_info.allocationSize - memoryOffset) {
9836                 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
9837                                 image_handle, VALIDATION_ERROR_17400832,
9838                                 "%s: memory size minus memoryOffset is 0x%" PRIxLEAST64
9839                                 " but must be at least as large as VkMemoryRequirements::size value 0x%" PRIxLEAST64
9840                                 ", returned from a call to vkGetImageMemoryRequirements with image.",
9841                                 api_name, mem_info->alloc_info.allocationSize - memoryOffset, image_state->requirements.size);
9842             }
9843
9844             // Validate dedicated allocation
9845             if (mem_info->is_dedicated && ((mem_info->dedicated_image != image) || (memoryOffset != 0))) {
9846                 // TODO: Add vkBindImageMemory2KHR error message when added to spec.
9847                 auto validation_error = VALIDATION_ERROR_UNDEFINED;
9848                 if (strcmp(api_name, "vkBindImageMemory()") == 0) {
9849                     validation_error = VALIDATION_ERROR_17400bca;
9850                 }
9851                 skip |=
9852                     log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
9853                             image_handle, validation_error,
9854                             "%s: for dedicated memory allocation 0x%" PRIxLEAST64
9855                             ", VkMemoryDedicatedAllocateInfoKHR::image 0x%" PRIXLEAST64 " must be equal to image 0x%" PRIxLEAST64
9856                             " and memoryOffset 0x%" PRIxLEAST64 " must be zero.",
9857                             api_name, HandleToUint64(mem), HandleToUint64(mem_info->dedicated_image), image_handle, memoryOffset);
9858             }
9859         }
9860     }
9861     return skip;
9862 }
9863
9864 static void PostCallRecordBindImageMemory(layer_data *dev_data, VkImage image, IMAGE_STATE *image_state, VkDeviceMemory mem,
9865                                           VkDeviceSize memoryOffset, const char *api_name) {
9866     if (image_state) {
9867         unique_lock_t lock(global_lock);
9868         // Track bound memory range information
9869         auto mem_info = GetMemObjInfo(dev_data, mem);
9870         if (mem_info) {
9871             InsertImageMemoryRange(dev_data, image, mem_info, memoryOffset, image_state->requirements,
9872                                    image_state->createInfo.tiling == VK_IMAGE_TILING_LINEAR);
9873         }
9874
9875         // Track objects tied to memory
9876         uint64_t image_handle = HandleToUint64(image);
9877         SetMemBinding(dev_data, mem, image_state, memoryOffset, image_handle, kVulkanObjectTypeImage, api_name);
9878     }
9879 }
9880
9881 VKAPI_ATTR VkResult VKAPI_CALL BindImageMemory(VkDevice device, VkImage image, VkDeviceMemory mem, VkDeviceSize memoryOffset) {
9882     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
9883     VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
9884     IMAGE_STATE *image_state;
9885     {
9886         unique_lock_t lock(global_lock);
9887         image_state = GetImageState(dev_data, image);
9888     }
9889     bool skip = PreCallValidateBindImageMemory(dev_data, image, image_state, mem, memoryOffset, "vkBindImageMemory()");
9890     if (!skip) {
9891         result = dev_data->dispatch_table.BindImageMemory(device, image, mem, memoryOffset);
9892         if (result == VK_SUCCESS) {
9893             PostCallRecordBindImageMemory(dev_data, image, image_state, mem, memoryOffset, "vkBindImageMemory()");
9894         }
9895     }
9896     return result;
9897 }
9898
9899 static bool PreCallValidateBindImageMemory2(layer_data *dev_data, std::vector<IMAGE_STATE *> *image_state, uint32_t bindInfoCount,
9900                                             const VkBindImageMemoryInfoKHR *pBindInfos) {
9901     {
9902         unique_lock_t lock(global_lock);
9903         for (uint32_t i = 0; i < bindInfoCount; i++) {
9904             (*image_state)[i] = GetImageState(dev_data, pBindInfos[i].image);
9905         }
9906     }
9907     bool skip = false;
9908     char api_name[128];
9909     for (uint32_t i = 0; i < bindInfoCount; i++) {
9910         sprintf(api_name, "vkBindImageMemory2() pBindInfos[%u]", i);
9911         skip |= PreCallValidateBindImageMemory(dev_data, pBindInfos[i].image, (*image_state)[i], pBindInfos[i].memory,
9912                                                pBindInfos[i].memoryOffset, api_name);
9913     }
9914     return skip;
9915 }
9916
9917 static void PostCallRecordBindImageMemory2(layer_data *dev_data, const std::vector<IMAGE_STATE *> &image_state,
9918                                            uint32_t bindInfoCount, const VkBindImageMemoryInfoKHR *pBindInfos) {
9919     for (uint32_t i = 0; i < bindInfoCount; i++) {
9920         PostCallRecordBindImageMemory(dev_data, pBindInfos[i].image, image_state[i], pBindInfos[i].memory,
9921                                       pBindInfos[i].memoryOffset, "vkBindImageMemory2()");
9922     }
9923 }
9924
9925 VKAPI_ATTR VkResult VKAPI_CALL BindImageMemory2(VkDevice device, uint32_t bindInfoCount,
9926                                                 const VkBindImageMemoryInfoKHR *pBindInfos) {
9927     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
9928     VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
9929     std::vector<IMAGE_STATE *> image_state(bindInfoCount);
9930     if (!PreCallValidateBindImageMemory2(dev_data, &image_state, bindInfoCount, pBindInfos)) {
9931         result = dev_data->dispatch_table.BindImageMemory2(device, bindInfoCount, pBindInfos);
9932         if (result == VK_SUCCESS) {
9933             PostCallRecordBindImageMemory2(dev_data, image_state, bindInfoCount, pBindInfos);
9934         }
9935     }
9936     return result;
9937 }
9938
9939 VKAPI_ATTR VkResult VKAPI_CALL BindImageMemory2KHR(VkDevice device, uint32_t bindInfoCount,
9940                                                    const VkBindImageMemoryInfoKHR *pBindInfos) {
9941     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
9942     VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
9943     std::vector<IMAGE_STATE *> image_state(bindInfoCount);
9944     if (!PreCallValidateBindImageMemory2(dev_data, &image_state, bindInfoCount, pBindInfos)) {
9945         result = dev_data->dispatch_table.BindImageMemory2KHR(device, bindInfoCount, pBindInfos);
9946         if (result == VK_SUCCESS) {
9947             PostCallRecordBindImageMemory2(dev_data, image_state, bindInfoCount, pBindInfos);
9948         }
9949     }
9950     return result;
9951 }
9952
9953 VKAPI_ATTR VkResult VKAPI_CALL SetEvent(VkDevice device, VkEvent event) {
9954     bool skip = false;
9955     VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
9956     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
9957     unique_lock_t lock(global_lock);
9958     auto event_state = GetEventNode(dev_data, event);
9959     if (event_state) {
9960         event_state->needsSignaled = false;
9961         event_state->stageMask = VK_PIPELINE_STAGE_HOST_BIT;
9962         if (event_state->write_in_use) {
9963             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT,
9964                             HandleToUint64(event), DRAWSTATE_QUEUE_FORWARD_PROGRESS,
9965                             "Cannot call vkSetEvent() on event 0x%" PRIx64 " that is already in use by a command buffer.",
9966                             HandleToUint64(event));
9967         }
9968     }
9969     lock.unlock();
9970     // Host setting event is visible to all queues immediately so update stageMask for any queue that's seen this event
9971     // TODO : For correctness this needs separate fix to verify that app doesn't make incorrect assumptions about the
9972     // ordering of this command in relation to vkCmd[Set|Reset]Events (see GH297)
9973     for (auto queue_data : dev_data->queueMap) {
9974         auto event_entry = queue_data.second.eventToStageMap.find(event);
9975         if (event_entry != queue_data.second.eventToStageMap.end()) {
9976             event_entry->second |= VK_PIPELINE_STAGE_HOST_BIT;
9977         }
9978     }
9979     if (!skip) result = dev_data->dispatch_table.SetEvent(device, event);
9980     return result;
9981 }
9982
9983 static bool PreCallValidateQueueBindSparse(layer_data *dev_data, VkQueue queue, uint32_t bindInfoCount,
9984                                            const VkBindSparseInfo *pBindInfo, VkFence fence) {
9985     auto pFence = GetFenceNode(dev_data, fence);
9986     bool skip = ValidateFenceForSubmit(dev_data, pFence);
9987     if (skip) {
9988         return true;
9989     }
9990
9991     unordered_set<VkSemaphore> signaled_semaphores;
9992     unordered_set<VkSemaphore> unsignaled_semaphores;
9993     unordered_set<VkSemaphore> internal_semaphores;
9994     for (uint32_t bindIdx = 0; bindIdx < bindInfoCount; ++bindIdx) {
9995         const VkBindSparseInfo &bindInfo = pBindInfo[bindIdx];
9996
9997         std::vector<SEMAPHORE_WAIT> semaphore_waits;
9998         std::vector<VkSemaphore> semaphore_signals;
9999         for (uint32_t i = 0; i < bindInfo.waitSemaphoreCount; ++i) {
10000             VkSemaphore semaphore = bindInfo.pWaitSemaphores[i];
10001             auto pSemaphore = GetSemaphoreNode(dev_data, semaphore);
10002             if (pSemaphore && (pSemaphore->scope == kSyncScopeInternal || internal_semaphores.count(semaphore))) {
10003                 if (unsignaled_semaphores.count(semaphore) ||
10004                     (!(signaled_semaphores.count(semaphore)) && !(pSemaphore->signaled))) {
10005                     skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
10006                                     HandleToUint64(semaphore), DRAWSTATE_QUEUE_FORWARD_PROGRESS,
10007                                     "Queue 0x%" PRIx64 " is waiting on semaphore 0x%" PRIx64 " that has no way to be signaled.",
10008                                     HandleToUint64(queue), HandleToUint64(semaphore));
10009                 } else {
10010                     signaled_semaphores.erase(semaphore);
10011                     unsignaled_semaphores.insert(semaphore);
10012                 }
10013             }
10014             if (pSemaphore && pSemaphore->scope == kSyncScopeExternalTemporary) {
10015                 internal_semaphores.insert(semaphore);
10016             }
10017         }
10018         for (uint32_t i = 0; i < bindInfo.signalSemaphoreCount; ++i) {
10019             VkSemaphore semaphore = bindInfo.pSignalSemaphores[i];
10020             auto pSemaphore = GetSemaphoreNode(dev_data, semaphore);
10021             if (pSemaphore && pSemaphore->scope == kSyncScopeInternal) {
10022                 if (signaled_semaphores.count(semaphore) || (!(unsignaled_semaphores.count(semaphore)) && pSemaphore->signaled)) {
10023                     skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
10024                                     HandleToUint64(semaphore), DRAWSTATE_QUEUE_FORWARD_PROGRESS,
10025                                     "Queue 0x%" PRIx64 " is signaling semaphore 0x%" PRIx64
10026                                     " that has already been signaled but not waited on by queue 0x%" PRIx64 ".",
10027                                     HandleToUint64(queue), HandleToUint64(semaphore), HandleToUint64(pSemaphore->signaler.first));
10028                 } else {
10029                     unsignaled_semaphores.erase(semaphore);
10030                     signaled_semaphores.insert(semaphore);
10031                 }
10032             }
10033         }
10034         // Store sparse binding image_state and after binding is complete make sure that any requiring metadata have it bound
10035         std::unordered_set<IMAGE_STATE *> sparse_images;
10036         // If we're binding sparse image memory make sure reqs were queried and note if metadata is required and bound
10037         for (uint32_t i = 0; i < bindInfo.imageBindCount; ++i) {
10038             const auto &image_bind = bindInfo.pImageBinds[i];
10039             auto image_state = GetImageState(dev_data, image_bind.image);
10040             sparse_images.insert(image_state);
10041             if (!image_state->get_sparse_reqs_called || image_state->sparse_requirements.empty()) {
10042                 // For now just warning if sparse image binding occurs without calling to get reqs first
10043                 return log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
10044                                HandleToUint64(image_state->image), MEMTRACK_INVALID_STATE,
10045                                "vkQueueBindSparse(): Binding sparse memory to image 0x%" PRIx64
10046                                " without first calling vkGetImageSparseMemoryRequirements[2KHR]() to retrieve requirements.",
10047                                HandleToUint64(image_state->image));
10048             }
10049             for (uint32_t j = 0; j < image_bind.bindCount; ++j) {
10050                 if (image_bind.pBinds[j].flags & VK_IMAGE_ASPECT_METADATA_BIT) {
10051                     image_state->sparse_metadata_bound = true;
10052                 }
10053             }
10054         }
10055         for (uint32_t i = 0; i < bindInfo.imageOpaqueBindCount; ++i) {
10056             auto image_state = GetImageState(dev_data, bindInfo.pImageOpaqueBinds[i].image);
10057             sparse_images.insert(image_state);
10058             if (!image_state->get_sparse_reqs_called || image_state->sparse_requirements.empty()) {
10059                 // For now just warning if sparse image binding occurs without calling to get reqs first
10060                 return log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
10061                                HandleToUint64(image_state->image), MEMTRACK_INVALID_STATE,
10062                                "vkQueueBindSparse(): Binding opaque sparse memory to image 0x%" PRIx64
10063                                " without first calling vkGetImageSparseMemoryRequirements[2KHR]() to retrieve requirements.",
10064                                HandleToUint64(image_state->image));
10065             }
10066         }
10067         for (const auto &sparse_image_state : sparse_images) {
10068             if (sparse_image_state->sparse_metadata_required && !sparse_image_state->sparse_metadata_bound) {
10069                 // Warn if sparse image binding metadata required for image with sparse binding, but metadata not bound
10070                 return log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
10071                                HandleToUint64(sparse_image_state->image), MEMTRACK_INVALID_STATE,
10072                                "vkQueueBindSparse(): Binding sparse memory to image 0x%" PRIx64
10073                                " which requires a metadata aspect but no binding with VK_IMAGE_ASPECT_METADATA_BIT set was made.",
10074                                HandleToUint64(sparse_image_state->image));
10075             }
10076         }
10077     }
10078
10079     return skip;
10080 }
10081 static void PostCallRecordQueueBindSparse(layer_data *dev_data, VkQueue queue, uint32_t bindInfoCount,
10082                                           const VkBindSparseInfo *pBindInfo, VkFence fence) {
10083     uint64_t early_retire_seq = 0;
10084     auto pFence = GetFenceNode(dev_data, fence);
10085     auto pQueue = GetQueueState(dev_data, queue);
10086
10087     if (pFence) {
10088         if (pFence->scope == kSyncScopeInternal) {
10089             SubmitFence(pQueue, pFence, std::max(1u, bindInfoCount));
10090             if (!bindInfoCount) {
10091                 // No work to do, just dropping a fence in the queue by itself.
10092                 pQueue->submissions.emplace_back(std::vector<VkCommandBuffer>(), std::vector<SEMAPHORE_WAIT>(),
10093                                                  std::vector<VkSemaphore>(), std::vector<VkSemaphore>(), fence);
10094             }
10095         } else {
10096             // Retire work up until this fence early, we will not see the wait that corresponds to this signal
10097             early_retire_seq = pQueue->seq + pQueue->submissions.size();
10098             if (!dev_data->external_sync_warning) {
10099                 dev_data->external_sync_warning = true;
10100                 log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
10101                         HandleToUint64(fence), DRAWSTATE_QUEUE_FORWARD_PROGRESS,
10102                         "vkQueueBindSparse(): Signaling external fence 0x%" PRIx64 " on queue 0x%" PRIx64
10103                         " will disable validation of preceding command buffer lifecycle states and the in-use status of associated "
10104                         "objects.",
10105                         HandleToUint64(fence), HandleToUint64(queue));
10106             }
10107         }
10108     }
10109
10110     for (uint32_t bindIdx = 0; bindIdx < bindInfoCount; ++bindIdx) {
10111         const VkBindSparseInfo &bindInfo = pBindInfo[bindIdx];
10112         // Track objects tied to memory
10113         for (uint32_t j = 0; j < bindInfo.bufferBindCount; j++) {
10114             for (uint32_t k = 0; k < bindInfo.pBufferBinds[j].bindCount; k++) {
10115                 auto sparse_binding = bindInfo.pBufferBinds[j].pBinds[k];
10116                 SetSparseMemBinding(dev_data, {sparse_binding.memory, sparse_binding.memoryOffset, sparse_binding.size},
10117                                     HandleToUint64(bindInfo.pBufferBinds[j].buffer), kVulkanObjectTypeBuffer);
10118             }
10119         }
10120         for (uint32_t j = 0; j < bindInfo.imageOpaqueBindCount; j++) {
10121             for (uint32_t k = 0; k < bindInfo.pImageOpaqueBinds[j].bindCount; k++) {
10122                 auto sparse_binding = bindInfo.pImageOpaqueBinds[j].pBinds[k];
10123                 SetSparseMemBinding(dev_data, {sparse_binding.memory, sparse_binding.memoryOffset, sparse_binding.size},
10124                                     HandleToUint64(bindInfo.pImageOpaqueBinds[j].image), kVulkanObjectTypeImage);
10125             }
10126         }
10127         for (uint32_t j = 0; j < bindInfo.imageBindCount; j++) {
10128             for (uint32_t k = 0; k < bindInfo.pImageBinds[j].bindCount; k++) {
10129                 auto sparse_binding = bindInfo.pImageBinds[j].pBinds[k];
10130                 // TODO: This size is broken for non-opaque bindings, need to update to comprehend full sparse binding data
10131                 VkDeviceSize size = sparse_binding.extent.depth * sparse_binding.extent.height * sparse_binding.extent.width * 4;
10132                 SetSparseMemBinding(dev_data, {sparse_binding.memory, sparse_binding.memoryOffset, size},
10133                                     HandleToUint64(bindInfo.pImageBinds[j].image), kVulkanObjectTypeImage);
10134             }
10135         }
10136
10137         std::vector<SEMAPHORE_WAIT> semaphore_waits;
10138         std::vector<VkSemaphore> semaphore_signals;
10139         std::vector<VkSemaphore> semaphore_externals;
10140         for (uint32_t i = 0; i < bindInfo.waitSemaphoreCount; ++i) {
10141             VkSemaphore semaphore = bindInfo.pWaitSemaphores[i];
10142             auto pSemaphore = GetSemaphoreNode(dev_data, semaphore);
10143             if (pSemaphore) {
10144                 if (pSemaphore->scope == kSyncScopeInternal) {
10145                     if (pSemaphore->signaler.first != VK_NULL_HANDLE) {
10146                         semaphore_waits.push_back({semaphore, pSemaphore->signaler.first, pSemaphore->signaler.second});
10147                         pSemaphore->in_use.fetch_add(1);
10148                     }
10149                     pSemaphore->signaler.first = VK_NULL_HANDLE;
10150                     pSemaphore->signaled = false;
10151                 } else {
10152                     semaphore_externals.push_back(semaphore);
10153                     pSemaphore->in_use.fetch_add(1);
10154                     if (pSemaphore->scope == kSyncScopeExternalTemporary) {
10155                         pSemaphore->scope = kSyncScopeInternal;
10156                     }
10157                 }
10158             }
10159         }
10160         for (uint32_t i = 0; i < bindInfo.signalSemaphoreCount; ++i) {
10161             VkSemaphore semaphore = bindInfo.pSignalSemaphores[i];
10162             auto pSemaphore = GetSemaphoreNode(dev_data, semaphore);
10163             if (pSemaphore) {
10164                 if (pSemaphore->scope == kSyncScopeInternal) {
10165                     pSemaphore->signaler.first = queue;
10166                     pSemaphore->signaler.second = pQueue->seq + pQueue->submissions.size() + 1;
10167                     pSemaphore->signaled = true;
10168                     pSemaphore->in_use.fetch_add(1);
10169                     semaphore_signals.push_back(semaphore);
10170                 } else {
10171                     // Retire work up until this submit early, we will not see the wait that corresponds to this signal
10172                     early_retire_seq = std::max(early_retire_seq, pQueue->seq + pQueue->submissions.size() + 1);
10173                     if (!dev_data->external_sync_warning) {
10174                         dev_data->external_sync_warning = true;
10175                         log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
10176                                 HandleToUint64(semaphore), DRAWSTATE_QUEUE_FORWARD_PROGRESS,
10177                                 "vkQueueBindSparse(): Signaling external semaphore 0x%" PRIx64 " on queue 0x%" PRIx64
10178                                 " will disable validation of preceding command buffer lifecycle states and the in-use status of "
10179                                 "associated objects.",
10180                                 HandleToUint64(semaphore), HandleToUint64(queue));
10181                     }
10182                 }
10183             }
10184         }
10185
10186         pQueue->submissions.emplace_back(std::vector<VkCommandBuffer>(), semaphore_waits, semaphore_signals, semaphore_externals,
10187                                          bindIdx == bindInfoCount - 1 ? fence : VK_NULL_HANDLE);
10188     }
10189
10190     if (early_retire_seq) {
10191         RetireWorkOnQueue(dev_data, pQueue, early_retire_seq);
10192     }
10193 }
10194
10195 VKAPI_ATTR VkResult VKAPI_CALL QueueBindSparse(VkQueue queue, uint32_t bindInfoCount, const VkBindSparseInfo *pBindInfo,
10196                                                VkFence fence) {
10197     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(queue), layer_data_map);
10198     unique_lock_t lock(global_lock);
10199     bool skip = PreCallValidateQueueBindSparse(dev_data, queue, bindInfoCount, pBindInfo, fence);
10200     lock.unlock();
10201
10202     if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
10203
10204     VkResult result = dev_data->dispatch_table.QueueBindSparse(queue, bindInfoCount, pBindInfo, fence);
10205
10206     lock.lock();
10207     PostCallRecordQueueBindSparse(dev_data, queue, bindInfoCount, pBindInfo, fence);
10208     lock.unlock();
10209     return result;
10210 }
10211
10212 VKAPI_ATTR VkResult VKAPI_CALL CreateSemaphore(VkDevice device, const VkSemaphoreCreateInfo *pCreateInfo,
10213                                                const VkAllocationCallbacks *pAllocator, VkSemaphore *pSemaphore) {
10214     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
10215     VkResult result = dev_data->dispatch_table.CreateSemaphore(device, pCreateInfo, pAllocator, pSemaphore);
10216     if (result == VK_SUCCESS) {
10217         lock_guard_t lock(global_lock);
10218         SEMAPHORE_NODE *sNode = &dev_data->semaphoreMap[*pSemaphore];
10219         sNode->signaler.first = VK_NULL_HANDLE;
10220         sNode->signaler.second = 0;
10221         sNode->signaled = false;
10222         sNode->scope = kSyncScopeInternal;
10223     }
10224     return result;
10225 }
10226
10227 static bool PreCallValidateImportSemaphore(layer_data *dev_data, VkSemaphore semaphore, const char *caller_name) {
10228     SEMAPHORE_NODE *sema_node = GetSemaphoreNode(dev_data, semaphore);
10229     VK_OBJECT obj_struct = {HandleToUint64(semaphore), kVulkanObjectTypeSemaphore};
10230     bool skip = false;
10231     if (sema_node) {
10232         skip |= ValidateObjectNotInUse(dev_data, sema_node, obj_struct, caller_name, VALIDATION_ERROR_UNDEFINED);
10233     }
10234     return skip;
10235 }
10236
10237 static void PostCallRecordImportSemaphore(layer_data *dev_data, VkSemaphore semaphore,
10238                                           VkExternalSemaphoreHandleTypeFlagBitsKHR handle_type, VkSemaphoreImportFlagsKHR flags) {
10239     SEMAPHORE_NODE *sema_node = GetSemaphoreNode(dev_data, semaphore);
10240     if (sema_node && sema_node->scope != kSyncScopeExternalPermanent) {
10241         if ((handle_type == VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT_KHR || flags & VK_SEMAPHORE_IMPORT_TEMPORARY_BIT_KHR) &&
10242             sema_node->scope == kSyncScopeInternal) {
10243             sema_node->scope = kSyncScopeExternalTemporary;
10244         } else {
10245             sema_node->scope = kSyncScopeExternalPermanent;
10246         }
10247     }
10248 }
10249
10250 #ifdef VK_USE_PLATFORM_WIN32_KHR
10251 VKAPI_ATTR VkResult VKAPI_CALL
10252 ImportSemaphoreWin32HandleKHR(VkDevice device, const VkImportSemaphoreWin32HandleInfoKHR *pImportSemaphoreWin32HandleInfo) {
10253     VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
10254     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
10255     bool skip =
10256         PreCallValidateImportSemaphore(dev_data, pImportSemaphoreWin32HandleInfo->semaphore, "vkImportSemaphoreWin32HandleKHR");
10257
10258     if (!skip) {
10259         result = dev_data->dispatch_table.ImportSemaphoreWin32HandleKHR(device, pImportSemaphoreWin32HandleInfo);
10260     }
10261
10262     if (result == VK_SUCCESS) {
10263         PostCallRecordImportSemaphore(dev_data, pImportSemaphoreWin32HandleInfo->semaphore,
10264                                       pImportSemaphoreWin32HandleInfo->handleType, pImportSemaphoreWin32HandleInfo->flags);
10265     }
10266     return result;
10267 }
10268 #endif
10269
10270 VKAPI_ATTR VkResult VKAPI_CALL ImportSemaphoreFdKHR(VkDevice device, const VkImportSemaphoreFdInfoKHR *pImportSemaphoreFdInfo) {
10271     VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
10272     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
10273     bool skip = PreCallValidateImportSemaphore(dev_data, pImportSemaphoreFdInfo->semaphore, "vkImportSemaphoreFdKHR");
10274
10275     if (!skip) {
10276         result = dev_data->dispatch_table.ImportSemaphoreFdKHR(device, pImportSemaphoreFdInfo);
10277     }
10278
10279     if (result == VK_SUCCESS) {
10280         PostCallRecordImportSemaphore(dev_data, pImportSemaphoreFdInfo->semaphore, pImportSemaphoreFdInfo->handleType,
10281                                       pImportSemaphoreFdInfo->flags);
10282     }
10283     return result;
10284 }
10285
10286 static void PostCallRecordGetSemaphore(layer_data *dev_data, VkSemaphore semaphore,
10287                                        VkExternalSemaphoreHandleTypeFlagBitsKHR handle_type) {
10288     SEMAPHORE_NODE *sema_node = GetSemaphoreNode(dev_data, semaphore);
10289     if (sema_node && handle_type != VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT_KHR) {
10290         // Cannot track semaphore state once it is exported, except for Sync FD handle types which have copy transference
10291         sema_node->scope = kSyncScopeExternalPermanent;
10292     }
10293 }
10294
10295 #ifdef VK_USE_PLATFORM_WIN32_KHR
10296 VKAPI_ATTR VkResult VKAPI_CALL GetSemaphoreWin32HandleKHR(VkDevice device,
10297                                                           const VkSemaphoreGetWin32HandleInfoKHR *pGetWin32HandleInfo,
10298                                                           HANDLE *pHandle) {
10299     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
10300     VkResult result = dev_data->dispatch_table.GetSemaphoreWin32HandleKHR(device, pGetWin32HandleInfo, pHandle);
10301
10302     if (result == VK_SUCCESS) {
10303         PostCallRecordGetSemaphore(dev_data, pGetWin32HandleInfo->semaphore, pGetWin32HandleInfo->handleType);
10304     }
10305     return result;
10306 }
10307 #endif
10308
10309 VKAPI_ATTR VkResult VKAPI_CALL GetSemaphoreFdKHR(VkDevice device, const VkSemaphoreGetFdInfoKHR *pGetFdInfo, int *pFd) {
10310     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
10311     VkResult result = dev_data->dispatch_table.GetSemaphoreFdKHR(device, pGetFdInfo, pFd);
10312
10313     if (result == VK_SUCCESS) {
10314         PostCallRecordGetSemaphore(dev_data, pGetFdInfo->semaphore, pGetFdInfo->handleType);
10315     }
10316     return result;
10317 }
10318
10319 static bool PreCallValidateImportFence(layer_data *dev_data, VkFence fence, const char *caller_name) {
10320     FENCE_NODE *fence_node = GetFenceNode(dev_data, fence);
10321     bool skip = false;
10322     if (fence_node && fence_node->scope == kSyncScopeInternal && fence_node->state == FENCE_INFLIGHT) {
10323         skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
10324                         HandleToUint64(fence), VALIDATION_ERROR_UNDEFINED,
10325                         "Cannot call %s on fence 0x%" PRIx64 " that is currently in use.", caller_name, HandleToUint64(fence));
10326     }
10327     return skip;
10328 }
10329
10330 static void PostCallRecordImportFence(layer_data *dev_data, VkFence fence, VkExternalFenceHandleTypeFlagBitsKHR handle_type,
10331                                       VkFenceImportFlagsKHR flags) {
10332     FENCE_NODE *fence_node = GetFenceNode(dev_data, fence);
10333     if (fence_node && fence_node->scope != kSyncScopeExternalPermanent) {
10334         if ((handle_type == VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT_KHR || flags & VK_FENCE_IMPORT_TEMPORARY_BIT_KHR) &&
10335             fence_node->scope == kSyncScopeInternal) {
10336             fence_node->scope = kSyncScopeExternalTemporary;
10337         } else {
10338             fence_node->scope = kSyncScopeExternalPermanent;
10339         }
10340     }
10341 }
10342
10343 #ifdef VK_USE_PLATFORM_WIN32_KHR
10344 VKAPI_ATTR VkResult VKAPI_CALL ImportFenceWin32HandleKHR(VkDevice device,
10345                                                          const VkImportFenceWin32HandleInfoKHR *pImportFenceWin32HandleInfo) {
10346     VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
10347     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
10348     bool skip = PreCallValidateImportFence(dev_data, pImportFenceWin32HandleInfo->fence, "vkImportFenceWin32HandleKHR");
10349
10350     if (!skip) {
10351         result = dev_data->dispatch_table.ImportFenceWin32HandleKHR(device, pImportFenceWin32HandleInfo);
10352     }
10353
10354     if (result == VK_SUCCESS) {
10355         PostCallRecordImportFence(dev_data, pImportFenceWin32HandleInfo->fence, pImportFenceWin32HandleInfo->handleType,
10356                                   pImportFenceWin32HandleInfo->flags);
10357     }
10358     return result;
10359 }
10360 #endif
10361
10362 VKAPI_ATTR VkResult VKAPI_CALL ImportFenceFdKHR(VkDevice device, const VkImportFenceFdInfoKHR *pImportFenceFdInfo) {
10363     VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
10364     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
10365     bool skip = PreCallValidateImportFence(dev_data, pImportFenceFdInfo->fence, "vkImportFenceFdKHR");
10366
10367     if (!skip) {
10368         result = dev_data->dispatch_table.ImportFenceFdKHR(device, pImportFenceFdInfo);
10369     }
10370
10371     if (result == VK_SUCCESS) {
10372         PostCallRecordImportFence(dev_data, pImportFenceFdInfo->fence, pImportFenceFdInfo->handleType, pImportFenceFdInfo->flags);
10373     }
10374     return result;
10375 }
10376
10377 static void PostCallRecordGetFence(layer_data *dev_data, VkFence fence, VkExternalFenceHandleTypeFlagBitsKHR handle_type) {
10378     FENCE_NODE *fence_node = GetFenceNode(dev_data, fence);
10379     if (fence_node) {
10380         if (handle_type != VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT_KHR) {
10381             // Export with reference transference becomes external
10382             fence_node->scope = kSyncScopeExternalPermanent;
10383         } else if (fence_node->scope == kSyncScopeInternal) {
10384             // Export with copy transference has a side effect of resetting the fence
10385             fence_node->state = FENCE_UNSIGNALED;
10386         }
10387     }
10388 }
10389
10390 #ifdef VK_USE_PLATFORM_WIN32_KHR
10391 VKAPI_ATTR VkResult VKAPI_CALL GetFenceWin32HandleKHR(VkDevice device, const VkFenceGetWin32HandleInfoKHR *pGetWin32HandleInfo,
10392                                                       HANDLE *pHandle) {
10393     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
10394     VkResult result = dev_data->dispatch_table.GetFenceWin32HandleKHR(device, pGetWin32HandleInfo, pHandle);
10395
10396     if (result == VK_SUCCESS) {
10397         PostCallRecordGetFence(dev_data, pGetWin32HandleInfo->fence, pGetWin32HandleInfo->handleType);
10398     }
10399     return result;
10400 }
10401 #endif
10402
10403 VKAPI_ATTR VkResult VKAPI_CALL GetFenceFdKHR(VkDevice device, const VkFenceGetFdInfoKHR *pGetFdInfo, int *pFd) {
10404     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
10405     VkResult result = dev_data->dispatch_table.GetFenceFdKHR(device, pGetFdInfo, pFd);
10406
10407     if (result == VK_SUCCESS) {
10408         PostCallRecordGetFence(dev_data, pGetFdInfo->fence, pGetFdInfo->handleType);
10409     }
10410     return result;
10411 }
10412
10413 VKAPI_ATTR VkResult VKAPI_CALL CreateEvent(VkDevice device, const VkEventCreateInfo *pCreateInfo,
10414                                            const VkAllocationCallbacks *pAllocator, VkEvent *pEvent) {
10415     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
10416     VkResult result = dev_data->dispatch_table.CreateEvent(device, pCreateInfo, pAllocator, pEvent);
10417     if (result == VK_SUCCESS) {
10418         lock_guard_t lock(global_lock);
10419         dev_data->eventMap[*pEvent].needsSignaled = false;
10420         dev_data->eventMap[*pEvent].write_in_use = 0;
10421         dev_data->eventMap[*pEvent].stageMask = VkPipelineStageFlags(0);
10422     }
10423     return result;
10424 }
10425
10426 static bool PreCallValidateCreateSwapchainKHR(layer_data *dev_data, const char *func_name,
10427                                               VkSwapchainCreateInfoKHR const *pCreateInfo, SURFACE_STATE *surface_state,
10428                                               SWAPCHAIN_NODE *old_swapchain_state) {
10429     auto most_recent_swapchain = surface_state->swapchain ? surface_state->swapchain : surface_state->old_swapchain;
10430
10431     // TODO: revisit this. some of these rules are being relaxed.
10432
10433     // All physical devices and queue families are required to be able
10434     // to present to any native window on Android; require the
10435     // application to have established support on any other platform.
10436     if (!dev_data->instance_data->extensions.vk_khr_android_surface) {
10437         auto support_predicate = [dev_data](decltype(surface_state->gpu_queue_support)::value_type qs) -> bool {
10438             // TODO: should restrict search only to queue families of VkDeviceQueueCreateInfos, not whole phys. device
10439             return (qs.first.gpu == dev_data->physical_device) && qs.second;
10440         };
10441         const auto &support = surface_state->gpu_queue_support;
10442         bool is_supported = std::any_of(support.begin(), support.end(), support_predicate);
10443
10444         if (!is_supported) {
10445             if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
10446                         HandleToUint64(dev_data->device), VALIDATION_ERROR_146009ec,
10447                         "%s: pCreateInfo->surface is not known at this time to be supported for presentation by this device. The "
10448                         "vkGetPhysicalDeviceSurfaceSupportKHR() must be called beforehand, and it must return VK_TRUE support with "
10449                         "this surface for at least one queue family of this device.",
10450                         func_name))
10451                 return true;
10452         }
10453     }
10454
10455     if (most_recent_swapchain != old_swapchain_state || (surface_state->old_swapchain && surface_state->swapchain)) {
10456         if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
10457                     HandleToUint64(dev_data->device), DRAWSTATE_SWAPCHAIN_ALREADY_EXISTS,
10458                     "%s: surface has an existing swapchain other than oldSwapchain", func_name))
10459             return true;
10460     }
10461     if (old_swapchain_state && old_swapchain_state->createInfo.surface != pCreateInfo->surface) {
10462         if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
10463                     HandleToUint64(pCreateInfo->oldSwapchain), DRAWSTATE_SWAPCHAIN_WRONG_SURFACE,
10464                     "%s: pCreateInfo->oldSwapchain's surface is not pCreateInfo->surface", func_name))
10465             return true;
10466     }
10467
10468     if ((pCreateInfo->imageExtent.width == 0) || (pCreateInfo->imageExtent.height == 0)) {
10469         if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
10470                     HandleToUint64(dev_data->device), VALIDATION_ERROR_14600d32,
10471                     "%s: pCreateInfo->imageExtent = (%d, %d) which is illegal.", func_name, pCreateInfo->imageExtent.width,
10472                     pCreateInfo->imageExtent.height))
10473             return true;
10474     }
10475
10476     auto physical_device_state = GetPhysicalDeviceState(dev_data->instance_data, dev_data->physical_device);
10477     if (physical_device_state->vkGetPhysicalDeviceSurfaceCapabilitiesKHRState == UNCALLED) {
10478         if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
10479                     HandleToUint64(dev_data->physical_device), DRAWSTATE_SWAPCHAIN_CREATE_BEFORE_QUERY,
10480                     "%s: surface capabilities not retrieved for this physical device", func_name))
10481             return true;
10482     } else {  // have valid capabilities
10483         auto &capabilities = physical_device_state->surfaceCapabilities;
10484         // Validate pCreateInfo->minImageCount against VkSurfaceCapabilitiesKHR::{min|max}ImageCount:
10485         if (pCreateInfo->minImageCount < capabilities.minImageCount) {
10486             if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
10487                         HandleToUint64(dev_data->device), VALIDATION_ERROR_146009ee,
10488                         "%s called with minImageCount = %d, which is outside the bounds returned by "
10489                         "vkGetPhysicalDeviceSurfaceCapabilitiesKHR() (i.e. minImageCount = %d, maxImageCount = %d).",
10490                         func_name, pCreateInfo->minImageCount, capabilities.minImageCount, capabilities.maxImageCount))
10491                 return true;
10492         }
10493
10494         if ((capabilities.maxImageCount > 0) && (pCreateInfo->minImageCount > capabilities.maxImageCount)) {
10495             if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
10496                         HandleToUint64(dev_data->device), VALIDATION_ERROR_146009f0,
10497                         "%s called with minImageCount = %d, which is outside the bounds returned by "
10498                         "vkGetPhysicalDeviceSurfaceCapabilitiesKHR() (i.e. minImageCount = %d, maxImageCount = %d).",
10499                         func_name, pCreateInfo->minImageCount, capabilities.minImageCount, capabilities.maxImageCount))
10500                 return true;
10501         }
10502
10503         // Validate pCreateInfo->imageExtent against VkSurfaceCapabilitiesKHR::{current|min|max}ImageExtent:
10504         if ((pCreateInfo->imageExtent.width < capabilities.minImageExtent.width) ||
10505             (pCreateInfo->imageExtent.width > capabilities.maxImageExtent.width) ||
10506             (pCreateInfo->imageExtent.height < capabilities.minImageExtent.height) ||
10507             (pCreateInfo->imageExtent.height > capabilities.maxImageExtent.height)) {
10508             if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
10509                         HandleToUint64(dev_data->device), VALIDATION_ERROR_146009f4,
10510                         "%s called with imageExtent = (%d,%d), which is outside the bounds returned by "
10511                         "vkGetPhysicalDeviceSurfaceCapabilitiesKHR(): currentExtent = (%d,%d), minImageExtent = (%d,%d), "
10512                         "maxImageExtent = (%d,%d).",
10513                         func_name, pCreateInfo->imageExtent.width, pCreateInfo->imageExtent.height,
10514                         capabilities.currentExtent.width, capabilities.currentExtent.height, capabilities.minImageExtent.width,
10515                         capabilities.minImageExtent.height, capabilities.maxImageExtent.width, capabilities.maxImageExtent.height))
10516                 return true;
10517         }
10518         // pCreateInfo->preTransform should have exactly one bit set, and that bit must also be set in
10519         // VkSurfaceCapabilitiesKHR::supportedTransforms.
10520         if (!pCreateInfo->preTransform || (pCreateInfo->preTransform & (pCreateInfo->preTransform - 1)) ||
10521             !(pCreateInfo->preTransform & capabilities.supportedTransforms)) {
10522             // This is an error situation; one for which we'd like to give the developer a helpful, multi-line error message.  Build
10523             // it up a little at a time, and then log it:
10524             std::string errorString = "";
10525             char str[1024];
10526             // Here's the first part of the message:
10527             sprintf(str, "%s called with a non-supported pCreateInfo->preTransform (i.e. %s).  Supported values are:\n", func_name,
10528                     string_VkSurfaceTransformFlagBitsKHR(pCreateInfo->preTransform));
10529             errorString += str;
10530             for (int i = 0; i < 32; i++) {
10531                 // Build up the rest of the message:
10532                 if ((1 << i) & capabilities.supportedTransforms) {
10533                     const char *newStr = string_VkSurfaceTransformFlagBitsKHR((VkSurfaceTransformFlagBitsKHR)(1 << i));
10534                     sprintf(str, "    %s\n", newStr);
10535                     errorString += str;
10536                 }
10537             }
10538             // Log the message that we've built up:
10539             if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
10540                         HandleToUint64(dev_data->device), VALIDATION_ERROR_146009fe, "%s.", errorString.c_str()))
10541                 return true;
10542         }
10543
10544         // pCreateInfo->compositeAlpha should have exactly one bit set, and that bit must also be set in
10545         // VkSurfaceCapabilitiesKHR::supportedCompositeAlpha
10546         if (!pCreateInfo->compositeAlpha || (pCreateInfo->compositeAlpha & (pCreateInfo->compositeAlpha - 1)) ||
10547             !((pCreateInfo->compositeAlpha) & capabilities.supportedCompositeAlpha)) {
10548             // This is an error situation; one for which we'd like to give the developer a helpful, multi-line error message.  Build
10549             // it up a little at a time, and then log it:
10550             std::string errorString = "";
10551             char str[1024];
10552             // Here's the first part of the message:
10553             sprintf(str, "%s called with a non-supported pCreateInfo->compositeAlpha (i.e. %s).  Supported values are:\n",
10554                     func_name, string_VkCompositeAlphaFlagBitsKHR(pCreateInfo->compositeAlpha));
10555             errorString += str;
10556             for (int i = 0; i < 32; i++) {
10557                 // Build up the rest of the message:
10558                 if ((1 << i) & capabilities.supportedCompositeAlpha) {
10559                     const char *newStr = string_VkCompositeAlphaFlagBitsKHR((VkCompositeAlphaFlagBitsKHR)(1 << i));
10560                     sprintf(str, "    %s\n", newStr);
10561                     errorString += str;
10562                 }
10563             }
10564             // Log the message that we've built up:
10565             if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
10566                         HandleToUint64(dev_data->device), VALIDATION_ERROR_14600a00, "%s.", errorString.c_str()))
10567                 return true;
10568         }
10569         // Validate pCreateInfo->imageArrayLayers against VkSurfaceCapabilitiesKHR::maxImageArrayLayers:
10570         if (pCreateInfo->imageArrayLayers > capabilities.maxImageArrayLayers) {
10571             if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
10572                         HandleToUint64(dev_data->device), VALIDATION_ERROR_146009f6,
10573                         "%s called with a non-supported imageArrayLayers (i.e. %d).  Maximum value is %d.", func_name,
10574                         pCreateInfo->imageArrayLayers, capabilities.maxImageArrayLayers))
10575                 return true;
10576         }
10577         // Validate pCreateInfo->imageUsage against VkSurfaceCapabilitiesKHR::supportedUsageFlags:
10578         if (pCreateInfo->imageUsage != (pCreateInfo->imageUsage & capabilities.supportedUsageFlags)) {
10579             if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
10580                         HandleToUint64(dev_data->device), VALIDATION_ERROR_146009f8,
10581                         "%s called with a non-supported pCreateInfo->imageUsage (i.e. 0x%08x).  Supported flag bits are 0x%08x.",
10582                         func_name, pCreateInfo->imageUsage, capabilities.supportedUsageFlags))
10583                 return true;
10584         }
10585     }
10586
10587     // Validate pCreateInfo values with the results of vkGetPhysicalDeviceSurfaceFormatsKHR():
10588     if (physical_device_state->vkGetPhysicalDeviceSurfaceFormatsKHRState != QUERY_DETAILS) {
10589         if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
10590                     HandleToUint64(dev_data->device), DRAWSTATE_SWAPCHAIN_CREATE_BEFORE_QUERY,
10591                     "%s called before calling vkGetPhysicalDeviceSurfaceFormatsKHR().", func_name))
10592             return true;
10593     } else {
10594         // Validate pCreateInfo->imageFormat against VkSurfaceFormatKHR::format:
10595         bool foundFormat = false;
10596         bool foundColorSpace = false;
10597         bool foundMatch = false;
10598         for (auto const &format : physical_device_state->surface_formats) {
10599             if (pCreateInfo->imageFormat == format.format) {
10600                 // Validate pCreateInfo->imageColorSpace against VkSurfaceFormatKHR::colorSpace:
10601                 foundFormat = true;
10602                 if (pCreateInfo->imageColorSpace == format.colorSpace) {
10603                     foundMatch = true;
10604                     break;
10605                 }
10606             } else {
10607                 if (pCreateInfo->imageColorSpace == format.colorSpace) {
10608                     foundColorSpace = true;
10609                 }
10610             }
10611         }
10612         if (!foundMatch) {
10613             if (!foundFormat) {
10614                 if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
10615                             HandleToUint64(dev_data->device), VALIDATION_ERROR_146009f2,
10616                             "%s called with a non-supported pCreateInfo->imageFormat (i.e. %d).", func_name,
10617                             pCreateInfo->imageFormat))
10618                     return true;
10619             }
10620             if (!foundColorSpace) {
10621                 if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
10622                             HandleToUint64(dev_data->device), VALIDATION_ERROR_146009f2,
10623                             "%s called with a non-supported pCreateInfo->imageColorSpace (i.e. %d).", func_name,
10624                             pCreateInfo->imageColorSpace))
10625                     return true;
10626             }
10627         }
10628     }
10629
10630     // Validate pCreateInfo values with the results of vkGetPhysicalDeviceSurfacePresentModesKHR():
10631     if (physical_device_state->vkGetPhysicalDeviceSurfacePresentModesKHRState != QUERY_DETAILS) {
10632         // FIFO is required to always be supported
10633         if (pCreateInfo->presentMode != VK_PRESENT_MODE_FIFO_KHR) {
10634             if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
10635                         HandleToUint64(dev_data->device), DRAWSTATE_SWAPCHAIN_CREATE_BEFORE_QUERY,
10636                         "%s called before calling vkGetPhysicalDeviceSurfacePresentModesKHR().", func_name))
10637                 return true;
10638         }
10639     } else {
10640         // Validate pCreateInfo->presentMode against vkGetPhysicalDeviceSurfacePresentModesKHR():
10641         bool foundMatch = std::find(physical_device_state->present_modes.begin(), physical_device_state->present_modes.end(),
10642                                     pCreateInfo->presentMode) != physical_device_state->present_modes.end();
10643         if (!foundMatch) {
10644             if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
10645                         HandleToUint64(dev_data->device), VALIDATION_ERROR_14600a02,
10646                         "%s called with a non-supported presentMode (i.e. %s).", func_name,
10647                         string_VkPresentModeKHR(pCreateInfo->presentMode)))
10648                 return true;
10649         }
10650     }
10651     // Validate state for shared presentable case
10652     if (VK_PRESENT_MODE_SHARED_DEMAND_REFRESH_KHR == pCreateInfo->presentMode ||
10653         VK_PRESENT_MODE_SHARED_CONTINUOUS_REFRESH_KHR == pCreateInfo->presentMode) {
10654         if (!dev_data->extensions.vk_khr_shared_presentable_image) {
10655             if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
10656                         HandleToUint64(dev_data->device), DRAWSTATE_EXTENSION_NOT_ENABLED,
10657                         "%s called with presentMode %s which requires the VK_KHR_shared_presentable_image extension, which has not "
10658                         "been enabled.",
10659                         func_name, string_VkPresentModeKHR(pCreateInfo->presentMode)))
10660                 return true;
10661         } else if (pCreateInfo->minImageCount != 1) {
10662             if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
10663                         HandleToUint64(dev_data->device), VALIDATION_ERROR_14600ace,
10664                         "%s called with presentMode %s, but minImageCount value is %d. For shared presentable image, minImageCount "
10665                         "must be 1.",
10666                         func_name, string_VkPresentModeKHR(pCreateInfo->presentMode), pCreateInfo->minImageCount))
10667                 return true;
10668         }
10669     }
10670
10671     return false;
10672 }
10673
10674 static void PostCallRecordCreateSwapchainKHR(layer_data *dev_data, VkResult result, const VkSwapchainCreateInfoKHR *pCreateInfo,
10675                                              VkSwapchainKHR *pSwapchain, SURFACE_STATE *surface_state,
10676                                              SWAPCHAIN_NODE *old_swapchain_state) {
10677     if (VK_SUCCESS == result) {
10678         lock_guard_t lock(global_lock);
10679         auto swapchain_state = unique_ptr<SWAPCHAIN_NODE>(new SWAPCHAIN_NODE(pCreateInfo, *pSwapchain));
10680         if (VK_PRESENT_MODE_SHARED_DEMAND_REFRESH_KHR == pCreateInfo->presentMode ||
10681             VK_PRESENT_MODE_SHARED_CONTINUOUS_REFRESH_KHR == pCreateInfo->presentMode) {
10682             swapchain_state->shared_presentable = true;
10683         }
10684         surface_state->swapchain = swapchain_state.get();
10685         dev_data->swapchainMap[*pSwapchain] = std::move(swapchain_state);
10686     } else {
10687         surface_state->swapchain = nullptr;
10688     }
10689     // Spec requires that even if CreateSwapchainKHR fails, oldSwapchain behaves as replaced.
10690     if (old_swapchain_state) {
10691         old_swapchain_state->replaced = true;
10692     }
10693     surface_state->old_swapchain = old_swapchain_state;
10694     return;
10695 }
10696
10697 VKAPI_ATTR VkResult VKAPI_CALL CreateSwapchainKHR(VkDevice device, const VkSwapchainCreateInfoKHR *pCreateInfo,
10698                                                   const VkAllocationCallbacks *pAllocator, VkSwapchainKHR *pSwapchain) {
10699     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
10700     auto surface_state = GetSurfaceState(dev_data->instance_data, pCreateInfo->surface);
10701     auto old_swapchain_state = GetSwapchainNode(dev_data, pCreateInfo->oldSwapchain);
10702
10703     if (PreCallValidateCreateSwapchainKHR(dev_data, "vkCreateSwapChainKHR()", pCreateInfo, surface_state, old_swapchain_state)) {
10704         return VK_ERROR_VALIDATION_FAILED_EXT;
10705     }
10706
10707     VkResult result = dev_data->dispatch_table.CreateSwapchainKHR(device, pCreateInfo, pAllocator, pSwapchain);
10708
10709     PostCallRecordCreateSwapchainKHR(dev_data, result, pCreateInfo, pSwapchain, surface_state, old_swapchain_state);
10710
10711     return result;
10712 }
10713
10714 VKAPI_ATTR void VKAPI_CALL DestroySwapchainKHR(VkDevice device, VkSwapchainKHR swapchain, const VkAllocationCallbacks *pAllocator) {
10715     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
10716     bool skip = false;
10717
10718     unique_lock_t lock(global_lock);
10719     auto swapchain_data = GetSwapchainNode(dev_data, swapchain);
10720     if (swapchain_data) {
10721         // Pre-record to avoid Destroy/Create race
10722         if (swapchain_data->images.size() > 0) {
10723             for (auto swapchain_image : swapchain_data->images) {
10724                 auto image_sub = dev_data->imageSubresourceMap.find(swapchain_image);
10725                 if (image_sub != dev_data->imageSubresourceMap.end()) {
10726                     for (auto imgsubpair : image_sub->second) {
10727                         auto image_item = dev_data->imageLayoutMap.find(imgsubpair);
10728                         if (image_item != dev_data->imageLayoutMap.end()) {
10729                             dev_data->imageLayoutMap.erase(image_item);
10730                         }
10731                     }
10732                     dev_data->imageSubresourceMap.erase(image_sub);
10733                 }
10734                 skip = ClearMemoryObjectBindings(dev_data, HandleToUint64(swapchain_image), kVulkanObjectTypeSwapchainKHR);
10735                 dev_data->imageMap.erase(swapchain_image);
10736             }
10737         }
10738
10739         auto surface_state = GetSurfaceState(dev_data->instance_data, swapchain_data->createInfo.surface);
10740         if (surface_state) {
10741             if (surface_state->swapchain == swapchain_data) surface_state->swapchain = nullptr;
10742             if (surface_state->old_swapchain == swapchain_data) surface_state->old_swapchain = nullptr;
10743         }
10744
10745         dev_data->swapchainMap.erase(swapchain);
10746     }
10747     lock.unlock();
10748     if (!skip) dev_data->dispatch_table.DestroySwapchainKHR(device, swapchain, pAllocator);
10749 }
10750
10751 static bool PreCallValidateGetSwapchainImagesKHR(layer_data *device_data, SWAPCHAIN_NODE *swapchain_state, VkDevice device,
10752                                                  uint32_t *pSwapchainImageCount, VkImage *pSwapchainImages) {
10753     bool skip = false;
10754     if (swapchain_state && pSwapchainImages) {
10755         lock_guard_t lock(global_lock);
10756         // Compare the preliminary value of *pSwapchainImageCount with the value this time:
10757         if (swapchain_state->vkGetSwapchainImagesKHRState == UNCALLED) {
10758             skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
10759                             HandleToUint64(device), SWAPCHAIN_PRIOR_COUNT,
10760                             "vkGetSwapchainImagesKHR() called with non-NULL pSwapchainImageCount; but no prior positive value has "
10761                             "been seen for pSwapchainImages.");
10762         } else if (*pSwapchainImageCount > swapchain_state->get_swapchain_image_count) {
10763             skip |=
10764                 log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
10765                         HandleToUint64(device), SWAPCHAIN_INVALID_COUNT,
10766                         "vkGetSwapchainImagesKHR() called with non-NULL pSwapchainImageCount, and with pSwapchainImages set to a "
10767                         "value (%d) that is greater than the value (%d) that was returned when pSwapchainImageCount was NULL.",
10768                         *pSwapchainImageCount, swapchain_state->get_swapchain_image_count);
10769         }
10770     }
10771     return skip;
10772 }
10773
10774 static void PostCallRecordGetSwapchainImagesKHR(layer_data *device_data, SWAPCHAIN_NODE *swapchain_state, VkDevice device,
10775                                                 uint32_t *pSwapchainImageCount, VkImage *pSwapchainImages) {
10776     lock_guard_t lock(global_lock);
10777
10778     if (*pSwapchainImageCount > swapchain_state->images.size()) swapchain_state->images.resize(*pSwapchainImageCount);
10779
10780     if (pSwapchainImages) {
10781         if (swapchain_state->vkGetSwapchainImagesKHRState < QUERY_DETAILS) {
10782             swapchain_state->vkGetSwapchainImagesKHRState = QUERY_DETAILS;
10783         }
10784         for (uint32_t i = 0; i < *pSwapchainImageCount; ++i) {
10785             if (swapchain_state->images[i] != VK_NULL_HANDLE) continue;  // Already retrieved this.
10786
10787             IMAGE_LAYOUT_NODE image_layout_node;
10788             image_layout_node.layout = VK_IMAGE_LAYOUT_UNDEFINED;
10789             image_layout_node.format = swapchain_state->createInfo.imageFormat;
10790             // Add imageMap entries for each swapchain image
10791             VkImageCreateInfo image_ci = {};
10792             image_ci.flags = 0;
10793             image_ci.imageType = VK_IMAGE_TYPE_2D;
10794             image_ci.format = swapchain_state->createInfo.imageFormat;
10795             image_ci.extent.width = swapchain_state->createInfo.imageExtent.width;
10796             image_ci.extent.height = swapchain_state->createInfo.imageExtent.height;
10797             image_ci.extent.depth = 1;
10798             image_ci.mipLevels = 1;
10799             image_ci.arrayLayers = swapchain_state->createInfo.imageArrayLayers;
10800             image_ci.samples = VK_SAMPLE_COUNT_1_BIT;
10801             image_ci.tiling = VK_IMAGE_TILING_OPTIMAL;
10802             image_ci.usage = swapchain_state->createInfo.imageUsage;
10803             image_ci.sharingMode = swapchain_state->createInfo.imageSharingMode;
10804             device_data->imageMap[pSwapchainImages[i]] = unique_ptr<IMAGE_STATE>(new IMAGE_STATE(pSwapchainImages[i], &image_ci));
10805             auto &image_state = device_data->imageMap[pSwapchainImages[i]];
10806             image_state->valid = false;
10807             image_state->binding.mem = MEMTRACKER_SWAP_CHAIN_IMAGE_KEY;
10808             swapchain_state->images[i] = pSwapchainImages[i];
10809             ImageSubresourcePair subpair = {pSwapchainImages[i], false, VkImageSubresource()};
10810             device_data->imageSubresourceMap[pSwapchainImages[i]].push_back(subpair);
10811             device_data->imageLayoutMap[subpair] = image_layout_node;
10812         }
10813     }
10814
10815     if (*pSwapchainImageCount) {
10816         if (swapchain_state->vkGetSwapchainImagesKHRState < QUERY_COUNT) {
10817             swapchain_state->vkGetSwapchainImagesKHRState = QUERY_COUNT;
10818         }
10819         swapchain_state->get_swapchain_image_count = *pSwapchainImageCount;
10820     }
10821 }
10822
10823 VKAPI_ATTR VkResult VKAPI_CALL GetSwapchainImagesKHR(VkDevice device, VkSwapchainKHR swapchain, uint32_t *pSwapchainImageCount,
10824                                                      VkImage *pSwapchainImages) {
10825     VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
10826     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
10827
10828     auto swapchain_state = GetSwapchainNode(device_data, swapchain);
10829     bool skip = PreCallValidateGetSwapchainImagesKHR(device_data, swapchain_state, device, pSwapchainImageCount, pSwapchainImages);
10830
10831     if (!skip) {
10832         result = device_data->dispatch_table.GetSwapchainImagesKHR(device, swapchain, pSwapchainImageCount, pSwapchainImages);
10833     }
10834
10835     if ((result == VK_SUCCESS || result == VK_INCOMPLETE)) {
10836         PostCallRecordGetSwapchainImagesKHR(device_data, swapchain_state, device, pSwapchainImageCount, pSwapchainImages);
10837     }
10838     return result;
10839 }
10840
10841 VKAPI_ATTR VkResult VKAPI_CALL QueuePresentKHR(VkQueue queue, const VkPresentInfoKHR *pPresentInfo) {
10842     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(queue), layer_data_map);
10843     bool skip = false;
10844
10845     lock_guard_t lock(global_lock);
10846     auto queue_state = GetQueueState(dev_data, queue);
10847
10848     for (uint32_t i = 0; i < pPresentInfo->waitSemaphoreCount; ++i) {
10849         auto pSemaphore = GetSemaphoreNode(dev_data, pPresentInfo->pWaitSemaphores[i]);
10850         if (pSemaphore && !pSemaphore->signaled) {
10851             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
10852                             DRAWSTATE_QUEUE_FORWARD_PROGRESS,
10853                             "Queue 0x%" PRIx64 " is waiting on semaphore 0x%" PRIx64 " that has no way to be signaled.",
10854                             HandleToUint64(queue), HandleToUint64(pPresentInfo->pWaitSemaphores[i]));
10855         }
10856     }
10857
10858     for (uint32_t i = 0; i < pPresentInfo->swapchainCount; ++i) {
10859         auto swapchain_data = GetSwapchainNode(dev_data, pPresentInfo->pSwapchains[i]);
10860         if (swapchain_data) {
10861             if (pPresentInfo->pImageIndices[i] >= swapchain_data->images.size()) {
10862                 skip |=
10863                     log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
10864                             HandleToUint64(pPresentInfo->pSwapchains[i]), DRAWSTATE_SWAPCHAIN_INVALID_IMAGE,
10865                             "vkQueuePresentKHR: Swapchain image index too large (%u). There are only %u images in this swapchain.",
10866                             pPresentInfo->pImageIndices[i], (uint32_t)swapchain_data->images.size());
10867             } else {
10868                 auto image = swapchain_data->images[pPresentInfo->pImageIndices[i]];
10869                 auto image_state = GetImageState(dev_data, image);
10870
10871                 if (image_state->shared_presentable) {
10872                     image_state->layout_locked = true;
10873                 }
10874
10875                 if (!image_state->acquired) {
10876                     skip |= log_msg(
10877                         dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
10878                         HandleToUint64(pPresentInfo->pSwapchains[i]), DRAWSTATE_SWAPCHAIN_IMAGE_NOT_ACQUIRED,
10879                         "vkQueuePresentKHR: Swapchain image index %u has not been acquired.", pPresentInfo->pImageIndices[i]);
10880                 }
10881
10882                 vector<VkImageLayout> layouts;
10883                 if (FindLayouts(dev_data, image, layouts)) {
10884                     for (auto layout : layouts) {
10885                         if ((layout != VK_IMAGE_LAYOUT_PRESENT_SRC_KHR) && (!dev_data->extensions.vk_khr_shared_presentable_image ||
10886                                                                             (layout != VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR))) {
10887                             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
10888                                             VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT, HandleToUint64(queue), VALIDATION_ERROR_11200a20,
10889                                             "Images passed to present must be in layout VK_IMAGE_LAYOUT_PRESENT_SRC_KHR or "
10890                                             "VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR but is in %s.",
10891                                             string_VkImageLayout(layout));
10892                         }
10893                     }
10894                 }
10895             }
10896
10897             // All physical devices and queue families are required to be able
10898             // to present to any native window on Android; require the
10899             // application to have established support on any other platform.
10900             if (!dev_data->instance_data->extensions.vk_khr_android_surface) {
10901                 auto surface_state = GetSurfaceState(dev_data->instance_data, swapchain_data->createInfo.surface);
10902                 auto support_it = surface_state->gpu_queue_support.find({dev_data->physical_device, queue_state->queueFamilyIndex});
10903
10904                 if (support_it == surface_state->gpu_queue_support.end()) {
10905                     skip |=
10906                         log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
10907                                 HandleToUint64(pPresentInfo->pSwapchains[i]), DRAWSTATE_SWAPCHAIN_UNSUPPORTED_QUEUE,
10908                                 "vkQueuePresentKHR: Presenting image without calling vkGetPhysicalDeviceSurfaceSupportKHR");
10909                 } else if (!support_it->second) {
10910                     skip |=
10911                         log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
10912                                 HandleToUint64(pPresentInfo->pSwapchains[i]), VALIDATION_ERROR_31800a18,
10913                                 "vkQueuePresentKHR: Presenting image on queue that cannot present to this surface.");
10914                 }
10915             }
10916         }
10917     }
10918     if (pPresentInfo && pPresentInfo->pNext) {
10919         // Verify ext struct
10920         const auto *present_regions = lvl_find_in_chain<VkPresentRegionsKHR>(pPresentInfo->pNext);
10921         if (present_regions) {
10922             for (uint32_t i = 0; i < present_regions->swapchainCount; ++i) {
10923                 auto swapchain_data = GetSwapchainNode(dev_data, pPresentInfo->pSwapchains[i]);
10924                 assert(swapchain_data);
10925                 VkPresentRegionKHR region = present_regions->pRegions[i];
10926                 for (uint32_t j = 0; j < region.rectangleCount; ++j) {
10927                     VkRectLayerKHR rect = region.pRectangles[j];
10928                     if ((rect.offset.x + rect.extent.width) > swapchain_data->createInfo.imageExtent.width) {
10929                         skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
10930                                         VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT, HandleToUint64(pPresentInfo->pSwapchains[i]),
10931                                         VALIDATION_ERROR_11e009da,
10932                                         "vkQueuePresentKHR(): For VkPresentRegionKHR down pNext chain, "
10933                                         "pRegion[%i].pRectangles[%i], the sum of offset.x (%i) and extent.width (%i) is greater "
10934                                         "than the corresponding swapchain's imageExtent.width (%i).",
10935                                         i, j, rect.offset.x, rect.extent.width, swapchain_data->createInfo.imageExtent.width);
10936                     }
10937                     if ((rect.offset.y + rect.extent.height) > swapchain_data->createInfo.imageExtent.height) {
10938                         skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
10939                                         VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT, HandleToUint64(pPresentInfo->pSwapchains[i]),
10940                                         VALIDATION_ERROR_11e009da,
10941                                         "vkQueuePresentKHR(): For VkPresentRegionKHR down pNext chain, "
10942                                         "pRegion[%i].pRectangles[%i], the sum of offset.y (%i) and extent.height (%i) is greater "
10943                                         "than the corresponding swapchain's imageExtent.height (%i).",
10944                                         i, j, rect.offset.y, rect.extent.height, swapchain_data->createInfo.imageExtent.height);
10945                     }
10946                     if (rect.layer > swapchain_data->createInfo.imageArrayLayers) {
10947                         skip |= log_msg(
10948                             dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
10949                             HandleToUint64(pPresentInfo->pSwapchains[i]), VALIDATION_ERROR_11e009dc,
10950                             "vkQueuePresentKHR(): For VkPresentRegionKHR down pNext chain, pRegion[%i].pRectangles[%i], the layer "
10951                             "(%i) is greater than the corresponding swapchain's imageArrayLayers (%i).",
10952                             i, j, rect.layer, swapchain_data->createInfo.imageArrayLayers);
10953                     }
10954                 }
10955             }
10956         }
10957
10958         const auto *present_times_info = lvl_find_in_chain<VkPresentTimesInfoGOOGLE>(pPresentInfo->pNext);
10959         if (present_times_info) {
10960             if (pPresentInfo->swapchainCount != present_times_info->swapchainCount) {
10961                 skip |=
10962                     log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
10963                             HandleToUint64(pPresentInfo->pSwapchains[0]),
10964
10965                             VALIDATION_ERROR_118009be,
10966                             "vkQueuePresentKHR(): VkPresentTimesInfoGOOGLE.swapchainCount is %i but pPresentInfo->swapchainCount "
10967                             "is %i. For VkPresentTimesInfoGOOGLE down pNext chain of VkPresentInfoKHR, "
10968                             "VkPresentTimesInfoGOOGLE.swapchainCount must equal VkPresentInfoKHR.swapchainCount.",
10969                             present_times_info->swapchainCount, pPresentInfo->swapchainCount);
10970             }
10971         }
10972     }
10973
10974     if (skip) {
10975         return VK_ERROR_VALIDATION_FAILED_EXT;
10976     }
10977
10978     VkResult result = dev_data->dispatch_table.QueuePresentKHR(queue, pPresentInfo);
10979
10980     if (result != VK_ERROR_VALIDATION_FAILED_EXT) {
10981         // Semaphore waits occur before error generation, if the call reached
10982         // the ICD. (Confirm?)
10983         for (uint32_t i = 0; i < pPresentInfo->waitSemaphoreCount; ++i) {
10984             auto pSemaphore = GetSemaphoreNode(dev_data, pPresentInfo->pWaitSemaphores[i]);
10985             if (pSemaphore) {
10986                 pSemaphore->signaler.first = VK_NULL_HANDLE;
10987                 pSemaphore->signaled = false;
10988             }
10989         }
10990
10991         for (uint32_t i = 0; i < pPresentInfo->swapchainCount; ++i) {
10992             // Note: this is imperfect, in that we can get confused about what
10993             // did or didn't succeed-- but if the app does that, it's confused
10994             // itself just as much.
10995             auto local_result = pPresentInfo->pResults ? pPresentInfo->pResults[i] : result;
10996
10997             if (local_result != VK_SUCCESS && local_result != VK_SUBOPTIMAL_KHR) continue;  // this present didn't actually happen.
10998
10999             // Mark the image as having been released to the WSI
11000             auto swapchain_data = GetSwapchainNode(dev_data, pPresentInfo->pSwapchains[i]);
11001             auto image = swapchain_data->images[pPresentInfo->pImageIndices[i]];
11002             auto image_state = GetImageState(dev_data, image);
11003             image_state->acquired = false;
11004         }
11005
11006         // Note: even though presentation is directed to a queue, there is no
11007         // direct ordering between QP and subsequent work, so QP (and its
11008         // semaphore waits) /never/ participate in any completion proof.
11009     }
11010
11011     return result;
11012 }
11013
11014 static bool PreCallValidateCreateSharedSwapchainsKHR(layer_data *dev_data, uint32_t swapchainCount,
11015                                                      const VkSwapchainCreateInfoKHR *pCreateInfos, VkSwapchainKHR *pSwapchains,
11016                                                      std::vector<SURFACE_STATE *> &surface_state,
11017                                                      std::vector<SWAPCHAIN_NODE *> &old_swapchain_state) {
11018     if (pCreateInfos) {
11019         lock_guard_t lock(global_lock);
11020         for (uint32_t i = 0; i < swapchainCount; i++) {
11021             surface_state.push_back(GetSurfaceState(dev_data->instance_data, pCreateInfos[i].surface));
11022             old_swapchain_state.push_back(GetSwapchainNode(dev_data, pCreateInfos[i].oldSwapchain));
11023             std::stringstream func_name;
11024             func_name << "vkCreateSharedSwapchainsKHR[" << swapchainCount << "]";
11025             if (PreCallValidateCreateSwapchainKHR(dev_data, func_name.str().c_str(), &pCreateInfos[i], surface_state[i],
11026                                                   old_swapchain_state[i])) {
11027                 return true;
11028             }
11029         }
11030     }
11031     return false;
11032 }
11033
11034 static void PostCallRecordCreateSharedSwapchainsKHR(layer_data *dev_data, VkResult result, uint32_t swapchainCount,
11035                                                     const VkSwapchainCreateInfoKHR *pCreateInfos, VkSwapchainKHR *pSwapchains,
11036                                                     std::vector<SURFACE_STATE *> &surface_state,
11037                                                     std::vector<SWAPCHAIN_NODE *> &old_swapchain_state) {
11038     if (VK_SUCCESS == result) {
11039         for (uint32_t i = 0; i < swapchainCount; i++) {
11040             auto swapchain_state = unique_ptr<SWAPCHAIN_NODE>(new SWAPCHAIN_NODE(&pCreateInfos[i], pSwapchains[i]));
11041             if (VK_PRESENT_MODE_SHARED_DEMAND_REFRESH_KHR == pCreateInfos[i].presentMode ||
11042                 VK_PRESENT_MODE_SHARED_CONTINUOUS_REFRESH_KHR == pCreateInfos[i].presentMode) {
11043                 swapchain_state->shared_presentable = true;
11044             }
11045             surface_state[i]->swapchain = swapchain_state.get();
11046             dev_data->swapchainMap[pSwapchains[i]] = std::move(swapchain_state);
11047         }
11048     } else {
11049         for (uint32_t i = 0; i < swapchainCount; i++) {
11050             surface_state[i]->swapchain = nullptr;
11051         }
11052     }
11053     // Spec requires that even if CreateSharedSwapchainKHR fails, oldSwapchain behaves as replaced.
11054     for (uint32_t i = 0; i < swapchainCount; i++) {
11055         if (old_swapchain_state[i]) {
11056             old_swapchain_state[i]->replaced = true;
11057         }
11058         surface_state[i]->old_swapchain = old_swapchain_state[i];
11059     }
11060     return;
11061 }
11062
11063 VKAPI_ATTR VkResult VKAPI_CALL CreateSharedSwapchainsKHR(VkDevice device, uint32_t swapchainCount,
11064                                                          const VkSwapchainCreateInfoKHR *pCreateInfos,
11065                                                          const VkAllocationCallbacks *pAllocator, VkSwapchainKHR *pSwapchains) {
11066     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
11067     std::vector<SURFACE_STATE *> surface_state;
11068     std::vector<SWAPCHAIN_NODE *> old_swapchain_state;
11069
11070     if (PreCallValidateCreateSharedSwapchainsKHR(dev_data, swapchainCount, pCreateInfos, pSwapchains, surface_state,
11071                                                  old_swapchain_state)) {
11072         return VK_ERROR_VALIDATION_FAILED_EXT;
11073     }
11074
11075     VkResult result =
11076         dev_data->dispatch_table.CreateSharedSwapchainsKHR(device, swapchainCount, pCreateInfos, pAllocator, pSwapchains);
11077
11078     PostCallRecordCreateSharedSwapchainsKHR(dev_data, result, swapchainCount, pCreateInfos, pSwapchains, surface_state,
11079                                             old_swapchain_state);
11080
11081     return result;
11082 }
11083
11084 static bool PreCallValidateAcquireNextImageKHR(layer_data *dev_data, VkDevice device, VkSwapchainKHR swapchain, uint64_t timeout,
11085                                                VkSemaphore semaphore, VkFence fence, uint32_t *pImageIndex) {
11086     bool skip = false;
11087     if (fence == VK_NULL_HANDLE && semaphore == VK_NULL_HANDLE) {
11088         skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
11089                         HandleToUint64(device), DRAWSTATE_SWAPCHAIN_NO_SYNC_FOR_ACQUIRE,
11090                         "vkAcquireNextImageKHR: Semaphore and fence cannot both be VK_NULL_HANDLE. There would be no way to "
11091                         "determine the completion of this operation.");
11092     }
11093
11094     auto pSemaphore = GetSemaphoreNode(dev_data, semaphore);
11095     if (pSemaphore && pSemaphore->scope == kSyncScopeInternal && pSemaphore->signaled) {
11096         skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
11097                         HandleToUint64(semaphore), VALIDATION_ERROR_16400a0c,
11098                         "vkAcquireNextImageKHR: Semaphore must not be currently signaled or in a wait state.");
11099     }
11100
11101     auto pFence = GetFenceNode(dev_data, fence);
11102     if (pFence) {
11103         skip |= ValidateFenceForSubmit(dev_data, pFence);
11104     }
11105
11106     auto swapchain_data = GetSwapchainNode(dev_data, swapchain);
11107     if (swapchain_data->replaced) {
11108         skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
11109                         HandleToUint64(swapchain), DRAWSTATE_SWAPCHAIN_REPLACED,
11110                         "vkAcquireNextImageKHR: This swapchain has been replaced. The application can still present any images it "
11111                         "has acquired, but cannot acquire any more.");
11112     }
11113
11114     auto physical_device_state = GetPhysicalDeviceState(dev_data->instance_data, dev_data->physical_device);
11115     if (physical_device_state->vkGetPhysicalDeviceSurfaceCapabilitiesKHRState != UNCALLED) {
11116         uint64_t acquired_images = std::count_if(swapchain_data->images.begin(), swapchain_data->images.end(),
11117                                                  [=](VkImage image) { return GetImageState(dev_data, image)->acquired; });
11118         if (acquired_images > swapchain_data->images.size() - physical_device_state->surfaceCapabilities.minImageCount) {
11119             skip |=
11120                 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
11121                         HandleToUint64(swapchain), DRAWSTATE_SWAPCHAIN_TOO_MANY_IMAGES,
11122                         "vkAcquireNextImageKHR: Application has already acquired the maximum number of images (0x%" PRIxLEAST64 ")",
11123                         acquired_images);
11124         }
11125     }
11126
11127     if (swapchain_data->images.size() == 0) {
11128         skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
11129                         HandleToUint64(swapchain), DRAWSTATE_SWAPCHAIN_IMAGES_NOT_FOUND,
11130                         "vkAcquireNextImageKHR: No images found to acquire from. Application probably did not call "
11131                         "vkGetSwapchainImagesKHR after swapchain creation.");
11132     }
11133     return skip;
11134 }
11135
11136 static void PostCallRecordAcquireNextImageKHR(layer_data *dev_data, VkDevice device, VkSwapchainKHR swapchain, uint64_t timeout,
11137                                               VkSemaphore semaphore, VkFence fence, uint32_t *pImageIndex) {
11138     auto pFence = GetFenceNode(dev_data, fence);
11139     if (pFence && pFence->scope == kSyncScopeInternal) {
11140         // Treat as inflight since it is valid to wait on this fence, even in cases where it is technically a temporary
11141         // import
11142         pFence->state = FENCE_INFLIGHT;
11143         pFence->signaler.first = VK_NULL_HANDLE;  // ANI isn't on a queue, so this can't participate in a completion proof.
11144     }
11145
11146     auto pSemaphore = GetSemaphoreNode(dev_data, semaphore);
11147     if (pSemaphore && pSemaphore->scope == kSyncScopeInternal) {
11148         // Treat as signaled since it is valid to wait on this semaphore, even in cases where it is technically a
11149         // temporary import
11150         pSemaphore->signaled = true;
11151         pSemaphore->signaler.first = VK_NULL_HANDLE;
11152     }
11153
11154     // Mark the image as acquired.
11155     auto swapchain_data = GetSwapchainNode(dev_data, swapchain);
11156     auto image = swapchain_data->images[*pImageIndex];
11157     auto image_state = GetImageState(dev_data, image);
11158     image_state->acquired = true;
11159     image_state->shared_presentable = swapchain_data->shared_presentable;
11160 }
11161
11162 VKAPI_ATTR VkResult VKAPI_CALL AcquireNextImageKHR(VkDevice device, VkSwapchainKHR swapchain, uint64_t timeout,
11163                                                    VkSemaphore semaphore, VkFence fence, uint32_t *pImageIndex) {
11164     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
11165
11166     unique_lock_t lock(global_lock);
11167     bool skip = PreCallValidateAcquireNextImageKHR(dev_data, device, swapchain, timeout, semaphore, fence, pImageIndex);
11168     lock.unlock();
11169
11170     if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
11171
11172     VkResult result = dev_data->dispatch_table.AcquireNextImageKHR(device, swapchain, timeout, semaphore, fence, pImageIndex);
11173
11174     lock.lock();
11175     if (result == VK_SUCCESS || result == VK_SUBOPTIMAL_KHR) {
11176         PostCallRecordAcquireNextImageKHR(dev_data, device, swapchain, timeout, semaphore, fence, pImageIndex);
11177     }
11178     lock.unlock();
11179
11180     return result;
11181 }
11182
11183 VKAPI_ATTR VkResult VKAPI_CALL EnumeratePhysicalDevices(VkInstance instance, uint32_t *pPhysicalDeviceCount,
11184                                                         VkPhysicalDevice *pPhysicalDevices) {
11185     bool skip = false;
11186     instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
11187     assert(instance_data);
11188
11189     // For this instance, flag when vkEnumeratePhysicalDevices goes to QUERY_COUNT and then QUERY_DETAILS
11190     if (NULL == pPhysicalDevices) {
11191         instance_data->vkEnumeratePhysicalDevicesState = QUERY_COUNT;
11192     } else {
11193         if (UNCALLED == instance_data->vkEnumeratePhysicalDevicesState) {
11194             // Flag warning here. You can call this without having queried the count, but it may not be
11195             // robust on platforms with multiple physical devices.
11196             skip |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT,
11197                             0, DEVLIMITS_MISSING_QUERY_COUNT,
11198                             "Call sequence has vkEnumeratePhysicalDevices() w/ non-NULL pPhysicalDevices. You should first call "
11199                             "vkEnumeratePhysicalDevices() w/ NULL pPhysicalDevices to query pPhysicalDeviceCount.");
11200         }  // TODO : Could also flag a warning if re-calling this function in QUERY_DETAILS state
11201         else if (instance_data->physical_devices_count != *pPhysicalDeviceCount) {
11202             // Having actual count match count from app is not a requirement, so this can be a warning
11203             skip |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
11204                             VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, DEVLIMITS_COUNT_MISMATCH,
11205                             "Call to vkEnumeratePhysicalDevices() w/ pPhysicalDeviceCount value %u, but actual count supported by "
11206                             "this instance is %u.",
11207                             *pPhysicalDeviceCount, instance_data->physical_devices_count);
11208         }
11209         instance_data->vkEnumeratePhysicalDevicesState = QUERY_DETAILS;
11210     }
11211     if (skip) {
11212         return VK_ERROR_VALIDATION_FAILED_EXT;
11213     }
11214     VkResult result = instance_data->dispatch_table.EnumeratePhysicalDevices(instance, pPhysicalDeviceCount, pPhysicalDevices);
11215     if (NULL == pPhysicalDevices) {
11216         instance_data->physical_devices_count = *pPhysicalDeviceCount;
11217     } else if (result == VK_SUCCESS) {  // Save physical devices
11218         for (uint32_t i = 0; i < *pPhysicalDeviceCount; i++) {
11219             auto &phys_device_state = instance_data->physical_device_map[pPhysicalDevices[i]];
11220             phys_device_state.phys_device = pPhysicalDevices[i];
11221             // Init actual features for each physical device
11222             instance_data->dispatch_table.GetPhysicalDeviceFeatures(pPhysicalDevices[i], &phys_device_state.features);
11223         }
11224     }
11225     return result;
11226 }
11227
11228 // Common function to handle validation for GetPhysicalDeviceQueueFamilyProperties & 2KHR version
11229 static bool ValidateCommonGetPhysicalDeviceQueueFamilyProperties(instance_layer_data *instance_data,
11230                                                                  PHYSICAL_DEVICE_STATE *pd_state,
11231                                                                  uint32_t requested_queue_family_property_count, bool qfp_null,
11232                                                                  const char *caller_name) {
11233     bool skip = false;
11234     if (!qfp_null) {
11235         // Verify that for each physical device, this command is called first with NULL pQueueFamilyProperties in order to get count
11236         if (UNCALLED == pd_state->vkGetPhysicalDeviceQueueFamilyPropertiesState) {
11237             skip |= log_msg(
11238                 instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
11239                 HandleToUint64(pd_state->phys_device), DEVLIMITS_MISSING_QUERY_COUNT,
11240                 "%s is called with non-NULL pQueueFamilyProperties before obtaining pQueueFamilyPropertyCount. It is recommended "
11241                 "to first call %s with NULL pQueueFamilyProperties in order to obtain the maximal pQueueFamilyPropertyCount.",
11242                 caller_name, caller_name);
11243             // Then verify that pCount that is passed in on second call matches what was returned
11244         } else if (pd_state->queue_family_count != requested_queue_family_property_count) {
11245             skip |= log_msg(
11246                 instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
11247                 HandleToUint64(pd_state->phys_device), DEVLIMITS_COUNT_MISMATCH,
11248                 "%s is called with non-NULL pQueueFamilyProperties and pQueueFamilyPropertyCount value %" PRIu32
11249                 ", but the largest previously returned pQueueFamilyPropertyCount for this physicalDevice is %" PRIu32
11250                 ". It is recommended to instead receive all the properties by calling %s with pQueueFamilyPropertyCount that was "
11251                 "previously obtained by calling %s with NULL pQueueFamilyProperties.",
11252                 caller_name, requested_queue_family_property_count, pd_state->queue_family_count, caller_name, caller_name);
11253         }
11254         pd_state->vkGetPhysicalDeviceQueueFamilyPropertiesState = QUERY_DETAILS;
11255     }
11256
11257     return skip;
11258 }
11259
11260 static bool PreCallValidateGetPhysicalDeviceQueueFamilyProperties(instance_layer_data *instance_data,
11261                                                                   PHYSICAL_DEVICE_STATE *pd_state,
11262                                                                   uint32_t *pQueueFamilyPropertyCount,
11263                                                                   VkQueueFamilyProperties *pQueueFamilyProperties) {
11264     return ValidateCommonGetPhysicalDeviceQueueFamilyProperties(instance_data, pd_state, *pQueueFamilyPropertyCount,
11265                                                                 (nullptr == pQueueFamilyProperties),
11266                                                                 "vkGetPhysicalDeviceQueueFamilyProperties()");
11267 }
11268
11269 static bool PreCallValidateGetPhysicalDeviceQueueFamilyProperties2(instance_layer_data *instance_data,
11270                                                                    PHYSICAL_DEVICE_STATE *pd_state,
11271                                                                    uint32_t *pQueueFamilyPropertyCount,
11272                                                                    VkQueueFamilyProperties2KHR *pQueueFamilyProperties) {
11273     return ValidateCommonGetPhysicalDeviceQueueFamilyProperties(instance_data, pd_state, *pQueueFamilyPropertyCount,
11274                                                                 (nullptr == pQueueFamilyProperties),
11275                                                                 "vkGetPhysicalDeviceQueueFamilyProperties2[KHR]()");
11276 }
11277
11278 // Common function to update state for GetPhysicalDeviceQueueFamilyProperties & 2KHR version
11279 static void StateUpdateCommonGetPhysicalDeviceQueueFamilyProperties(PHYSICAL_DEVICE_STATE *pd_state, uint32_t count,
11280                                                                     VkQueueFamilyProperties2KHR *pQueueFamilyProperties) {
11281     if (!pQueueFamilyProperties) {
11282         if (UNCALLED == pd_state->vkGetPhysicalDeviceQueueFamilyPropertiesState)
11283             pd_state->vkGetPhysicalDeviceQueueFamilyPropertiesState = QUERY_COUNT;
11284         pd_state->queue_family_count = count;
11285     } else {  // Save queue family properties
11286         pd_state->vkGetPhysicalDeviceQueueFamilyPropertiesState = QUERY_DETAILS;
11287         pd_state->queue_family_count = std::max(pd_state->queue_family_count, count);
11288
11289         pd_state->queue_family_properties.resize(std::max(static_cast<uint32_t>(pd_state->queue_family_properties.size()), count));
11290         for (uint32_t i = 0; i < count; ++i) {
11291             pd_state->queue_family_properties[i] = pQueueFamilyProperties[i].queueFamilyProperties;
11292         }
11293     }
11294 }
11295
11296 static void PostCallRecordGetPhysicalDeviceQueueFamilyProperties(PHYSICAL_DEVICE_STATE *pd_state, uint32_t count,
11297                                                                  VkQueueFamilyProperties *pQueueFamilyProperties) {
11298     VkQueueFamilyProperties2KHR *pqfp = nullptr;
11299     std::vector<VkQueueFamilyProperties2KHR> qfp;
11300     qfp.resize(count);
11301     if (pQueueFamilyProperties) {
11302         for (uint32_t i = 0; i < count; ++i) {
11303             qfp[i].sType = VK_STRUCTURE_TYPE_QUEUE_FAMILY_PROPERTIES_2_KHR;
11304             qfp[i].pNext = nullptr;
11305             qfp[i].queueFamilyProperties = pQueueFamilyProperties[i];
11306         }
11307         pqfp = qfp.data();
11308     }
11309     StateUpdateCommonGetPhysicalDeviceQueueFamilyProperties(pd_state, count, pqfp);
11310 }
11311
11312 static void PostCallRecordGetPhysicalDeviceQueueFamilyProperties2(PHYSICAL_DEVICE_STATE *pd_state, uint32_t count,
11313                                                                   VkQueueFamilyProperties2KHR *pQueueFamilyProperties) {
11314     StateUpdateCommonGetPhysicalDeviceQueueFamilyProperties(pd_state, count, pQueueFamilyProperties);
11315 }
11316
11317 VKAPI_ATTR void VKAPI_CALL GetPhysicalDeviceQueueFamilyProperties(VkPhysicalDevice physicalDevice,
11318                                                                   uint32_t *pQueueFamilyPropertyCount,
11319                                                                   VkQueueFamilyProperties *pQueueFamilyProperties) {
11320     instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
11321     auto physical_device_state = GetPhysicalDeviceState(instance_data, physicalDevice);
11322     assert(physical_device_state);
11323     unique_lock_t lock(global_lock);
11324
11325     bool skip = PreCallValidateGetPhysicalDeviceQueueFamilyProperties(instance_data, physical_device_state,
11326                                                                       pQueueFamilyPropertyCount, pQueueFamilyProperties);
11327
11328     lock.unlock();
11329
11330     if (skip) return;
11331
11332     instance_data->dispatch_table.GetPhysicalDeviceQueueFamilyProperties(physicalDevice, pQueueFamilyPropertyCount,
11333                                                                          pQueueFamilyProperties);
11334
11335     lock.lock();
11336     PostCallRecordGetPhysicalDeviceQueueFamilyProperties(physical_device_state, *pQueueFamilyPropertyCount, pQueueFamilyProperties);
11337 }
11338
11339 VKAPI_ATTR void VKAPI_CALL GetPhysicalDeviceQueueFamilyProperties2(VkPhysicalDevice physicalDevice,
11340                                                                    uint32_t *pQueueFamilyPropertyCount,
11341                                                                    VkQueueFamilyProperties2KHR *pQueueFamilyProperties) {
11342     instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
11343     auto physical_device_state = GetPhysicalDeviceState(instance_data, physicalDevice);
11344     assert(physical_device_state);
11345     unique_lock_t lock(global_lock);
11346     bool skip = PreCallValidateGetPhysicalDeviceQueueFamilyProperties2(instance_data, physical_device_state,
11347                                                                        pQueueFamilyPropertyCount, pQueueFamilyProperties);
11348     lock.unlock();
11349     if (skip) return;
11350
11351     instance_data->dispatch_table.GetPhysicalDeviceQueueFamilyProperties2(physicalDevice, pQueueFamilyPropertyCount,
11352                                                                           pQueueFamilyProperties);
11353     lock.lock();
11354     PostCallRecordGetPhysicalDeviceQueueFamilyProperties2(physical_device_state, *pQueueFamilyPropertyCount,
11355                                                           pQueueFamilyProperties);
11356 }
11357
11358 VKAPI_ATTR void VKAPI_CALL GetPhysicalDeviceQueueFamilyProperties2KHR(VkPhysicalDevice physicalDevice,
11359                                                                       uint32_t *pQueueFamilyPropertyCount,
11360                                                                       VkQueueFamilyProperties2KHR *pQueueFamilyProperties) {
11361     instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
11362     auto physical_device_state = GetPhysicalDeviceState(instance_data, physicalDevice);
11363     assert(physical_device_state);
11364     unique_lock_t lock(global_lock);
11365     bool skip = PreCallValidateGetPhysicalDeviceQueueFamilyProperties2(instance_data, physical_device_state,
11366                                                                        pQueueFamilyPropertyCount, pQueueFamilyProperties);
11367     lock.unlock();
11368     if (skip) return;
11369
11370     instance_data->dispatch_table.GetPhysicalDeviceQueueFamilyProperties2KHR(physicalDevice, pQueueFamilyPropertyCount,
11371                                                                              pQueueFamilyProperties);
11372     lock.lock();
11373     PostCallRecordGetPhysicalDeviceQueueFamilyProperties2(physical_device_state, *pQueueFamilyPropertyCount,
11374                                                           pQueueFamilyProperties);
11375 }
11376
11377 template <typename TCreateInfo, typename FPtr>
11378 static VkResult CreateSurface(VkInstance instance, TCreateInfo const *pCreateInfo, VkAllocationCallbacks const *pAllocator,
11379                               VkSurfaceKHR *pSurface, FPtr fptr) {
11380     instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
11381
11382     // Call down the call chain:
11383     VkResult result = (instance_data->dispatch_table.*fptr)(instance, pCreateInfo, pAllocator, pSurface);
11384
11385     if (result == VK_SUCCESS) {
11386         unique_lock_t lock(global_lock);
11387         instance_data->surface_map[*pSurface] = SURFACE_STATE(*pSurface);
11388         lock.unlock();
11389     }
11390
11391     return result;
11392 }
11393
11394 VKAPI_ATTR void VKAPI_CALL DestroySurfaceKHR(VkInstance instance, VkSurfaceKHR surface, const VkAllocationCallbacks *pAllocator) {
11395     bool skip = false;
11396     instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
11397     unique_lock_t lock(global_lock);
11398     auto surface_state = GetSurfaceState(instance_data, surface);
11399
11400     if ((surface_state) && (surface_state->swapchain)) {
11401         skip |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT,
11402                         HandleToUint64(instance), VALIDATION_ERROR_26c009e4,
11403                         "vkDestroySurfaceKHR() called before its associated VkSwapchainKHR was destroyed.");
11404     }
11405
11406     // Pre-record to avoid Destroy/Create race
11407     instance_data->surface_map.erase(surface);
11408
11409     lock.unlock();
11410     if (!skip) {
11411         instance_data->dispatch_table.DestroySurfaceKHR(instance, surface, pAllocator);
11412     }
11413 }
11414
11415 VKAPI_ATTR VkResult VKAPI_CALL CreateDisplayPlaneSurfaceKHR(VkInstance instance, const VkDisplaySurfaceCreateInfoKHR *pCreateInfo,
11416                                                             const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
11417     return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateDisplayPlaneSurfaceKHR);
11418 }
11419
11420 #ifdef VK_USE_PLATFORM_ANDROID_KHR
11421 VKAPI_ATTR VkResult VKAPI_CALL CreateAndroidSurfaceKHR(VkInstance instance, const VkAndroidSurfaceCreateInfoKHR *pCreateInfo,
11422                                                        const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
11423     return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateAndroidSurfaceKHR);
11424 }
11425 #endif  // VK_USE_PLATFORM_ANDROID_KHR
11426
11427 #ifdef VK_USE_PLATFORM_IOS_MVK
11428 VKAPI_ATTR VkResult VKAPI_CALL CreateIOSSurfaceMVK(VkInstance instance, const VkIOSSurfaceCreateInfoMVK *pCreateInfo,
11429                                                    const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
11430     return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateIOSSurfaceMVK);
11431 }
11432 #endif  // VK_USE_PLATFORM_IOS_MVK
11433
11434 #ifdef VK_USE_PLATFORM_MACOS_MVK
11435 VKAPI_ATTR VkResult VKAPI_CALL CreateMacOSSurfaceMVK(VkInstance instance, const VkMacOSSurfaceCreateInfoMVK *pCreateInfo,
11436                                                      const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
11437     return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateMacOSSurfaceMVK);
11438 }
11439 #endif  // VK_USE_PLATFORM_MACOS_MVK
11440
11441 #ifdef VK_USE_PLATFORM_MIR_KHR
11442 VKAPI_ATTR VkResult VKAPI_CALL CreateMirSurfaceKHR(VkInstance instance, const VkMirSurfaceCreateInfoKHR *pCreateInfo,
11443                                                    const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
11444     return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateMirSurfaceKHR);
11445 }
11446
11447 VKAPI_ATTR VkBool32 VKAPI_CALL GetPhysicalDeviceMirPresentationSupportKHR(VkPhysicalDevice physicalDevice,
11448                                                                           uint32_t queueFamilyIndex, MirConnection *connection) {
11449     bool skip = false;
11450     instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
11451
11452     unique_lock_t lock(global_lock);
11453     const auto pd_state = GetPhysicalDeviceState(instance_data, physicalDevice);
11454
11455     skip |= ValidatePhysicalDeviceQueueFamily(instance_data, pd_state, queueFamilyIndex, VALIDATION_ERROR_2d2009e2,
11456                                               "vkGetPhysicalDeviceMirPresentationSupportKHR", "queueFamilyIndex");
11457
11458     lock.unlock();
11459
11460     if (skip) return VK_FALSE;
11461
11462     // Call down the call chain:
11463     VkBool32 result =
11464         instance_data->dispatch_table.GetPhysicalDeviceMirPresentationSupportKHR(physicalDevice, queueFamilyIndex, connection);
11465
11466     return result;
11467 }
11468 #endif  // VK_USE_PLATFORM_MIR_KHR
11469
11470 #ifdef VK_USE_PLATFORM_WAYLAND_KHR
11471 VKAPI_ATTR VkResult VKAPI_CALL CreateWaylandSurfaceKHR(VkInstance instance, const VkWaylandSurfaceCreateInfoKHR *pCreateInfo,
11472                                                        const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
11473     return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateWaylandSurfaceKHR);
11474 }
11475
11476 VKAPI_ATTR VkBool32 VKAPI_CALL GetPhysicalDeviceWaylandPresentationSupportKHR(VkPhysicalDevice physicalDevice,
11477                                                                               uint32_t queueFamilyIndex,
11478                                                                               struct wl_display *display) {
11479     bool skip = false;
11480     instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
11481
11482     unique_lock_t lock(global_lock);
11483     const auto pd_state = GetPhysicalDeviceState(instance_data, physicalDevice);
11484
11485     skip |= ValidatePhysicalDeviceQueueFamily(instance_data, pd_state, queueFamilyIndex, VALIDATION_ERROR_2f000a34,
11486                                               "vkGetPhysicalDeviceWaylandPresentationSupportKHR", "queueFamilyIndex");
11487
11488     lock.unlock();
11489
11490     if (skip) return VK_FALSE;
11491
11492     // Call down the call chain:
11493     VkBool32 result =
11494         instance_data->dispatch_table.GetPhysicalDeviceWaylandPresentationSupportKHR(physicalDevice, queueFamilyIndex, display);
11495
11496     return result;
11497 }
11498 #endif  // VK_USE_PLATFORM_WAYLAND_KHR
11499
11500 #ifdef VK_USE_PLATFORM_WIN32_KHR
11501 VKAPI_ATTR VkResult VKAPI_CALL CreateWin32SurfaceKHR(VkInstance instance, const VkWin32SurfaceCreateInfoKHR *pCreateInfo,
11502                                                      const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
11503     return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateWin32SurfaceKHR);
11504 }
11505
11506 VKAPI_ATTR VkBool32 VKAPI_CALL GetPhysicalDeviceWin32PresentationSupportKHR(VkPhysicalDevice physicalDevice,
11507                                                                             uint32_t queueFamilyIndex) {
11508     bool skip = false;
11509     instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
11510
11511     unique_lock_t lock(global_lock);
11512     const auto pd_state = GetPhysicalDeviceState(instance_data, physicalDevice);
11513
11514     skip |= ValidatePhysicalDeviceQueueFamily(instance_data, pd_state, queueFamilyIndex, VALIDATION_ERROR_2f200a3a,
11515                                               "vkGetPhysicalDeviceWin32PresentationSupportKHR", "queueFamilyIndex");
11516
11517     lock.unlock();
11518
11519     if (skip) return VK_FALSE;
11520
11521     // Call down the call chain:
11522     VkBool32 result = instance_data->dispatch_table.GetPhysicalDeviceWin32PresentationSupportKHR(physicalDevice, queueFamilyIndex);
11523
11524     return result;
11525 }
11526 #endif  // VK_USE_PLATFORM_WIN32_KHR
11527
11528 #ifdef VK_USE_PLATFORM_XCB_KHR
11529 VKAPI_ATTR VkResult VKAPI_CALL CreateXcbSurfaceKHR(VkInstance instance, const VkXcbSurfaceCreateInfoKHR *pCreateInfo,
11530                                                    const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
11531     return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateXcbSurfaceKHR);
11532 }
11533
11534 VKAPI_ATTR VkBool32 VKAPI_CALL GetPhysicalDeviceXcbPresentationSupportKHR(VkPhysicalDevice physicalDevice,
11535                                                                           uint32_t queueFamilyIndex, xcb_connection_t *connection,
11536                                                                           xcb_visualid_t visual_id) {
11537     bool skip = false;
11538     instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
11539
11540     unique_lock_t lock(global_lock);
11541     const auto pd_state = GetPhysicalDeviceState(instance_data, physicalDevice);
11542
11543     skip |= ValidatePhysicalDeviceQueueFamily(instance_data, pd_state, queueFamilyIndex, VALIDATION_ERROR_2f400a40,
11544                                               "vkGetPhysicalDeviceXcbPresentationSupportKHR", "queueFamilyIndex");
11545
11546     lock.unlock();
11547
11548     if (skip) return VK_FALSE;
11549
11550     // Call down the call chain:
11551     VkBool32 result = instance_data->dispatch_table.GetPhysicalDeviceXcbPresentationSupportKHR(physicalDevice, queueFamilyIndex,
11552                                                                                                connection, visual_id);
11553
11554     return result;
11555 }
11556 #endif  // VK_USE_PLATFORM_XCB_KHR
11557
11558 #ifdef VK_USE_PLATFORM_XLIB_KHR
11559 VKAPI_ATTR VkResult VKAPI_CALL CreateXlibSurfaceKHR(VkInstance instance, const VkXlibSurfaceCreateInfoKHR *pCreateInfo,
11560                                                     const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
11561     return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateXlibSurfaceKHR);
11562 }
11563
11564 VKAPI_ATTR VkBool32 VKAPI_CALL GetPhysicalDeviceXlibPresentationSupportKHR(VkPhysicalDevice physicalDevice,
11565                                                                            uint32_t queueFamilyIndex, Display *dpy,
11566                                                                            VisualID visualID) {
11567     bool skip = false;
11568     instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
11569
11570     unique_lock_t lock(global_lock);
11571     const auto pd_state = GetPhysicalDeviceState(instance_data, physicalDevice);
11572
11573     skip |= ValidatePhysicalDeviceQueueFamily(instance_data, pd_state, queueFamilyIndex, VALIDATION_ERROR_2f600a46,
11574                                               "vkGetPhysicalDeviceXlibPresentationSupportKHR", "queueFamilyIndex");
11575
11576     lock.unlock();
11577
11578     if (skip) return VK_FALSE;
11579
11580     // Call down the call chain:
11581     VkBool32 result =
11582         instance_data->dispatch_table.GetPhysicalDeviceXlibPresentationSupportKHR(physicalDevice, queueFamilyIndex, dpy, visualID);
11583
11584     return result;
11585 }
11586 #endif  // VK_USE_PLATFORM_XLIB_KHR
11587
11588 VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceSurfaceCapabilitiesKHR(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface,
11589                                                                        VkSurfaceCapabilitiesKHR *pSurfaceCapabilities) {
11590     auto instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
11591
11592     unique_lock_t lock(global_lock);
11593     auto physical_device_state = GetPhysicalDeviceState(instance_data, physicalDevice);
11594     lock.unlock();
11595
11596     auto result =
11597         instance_data->dispatch_table.GetPhysicalDeviceSurfaceCapabilitiesKHR(physicalDevice, surface, pSurfaceCapabilities);
11598
11599     if (result == VK_SUCCESS) {
11600         physical_device_state->vkGetPhysicalDeviceSurfaceCapabilitiesKHRState = QUERY_DETAILS;
11601         physical_device_state->surfaceCapabilities = *pSurfaceCapabilities;
11602     }
11603
11604     return result;
11605 }
11606
11607 static void PostCallRecordGetPhysicalDeviceSurfaceCapabilities2KHR(instance_layer_data *instanceData,
11608                                                                    VkPhysicalDevice physicalDevice,
11609                                                                    VkSurfaceCapabilities2KHR *pSurfaceCapabilities) {
11610     unique_lock_t lock(global_lock);
11611     auto physicalDeviceState = GetPhysicalDeviceState(instanceData, physicalDevice);
11612     physicalDeviceState->vkGetPhysicalDeviceSurfaceCapabilitiesKHRState = QUERY_DETAILS;
11613     physicalDeviceState->surfaceCapabilities = pSurfaceCapabilities->surfaceCapabilities;
11614 }
11615
11616 VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceSurfaceCapabilities2KHR(VkPhysicalDevice physicalDevice,
11617                                                                         const VkPhysicalDeviceSurfaceInfo2KHR *pSurfaceInfo,
11618                                                                         VkSurfaceCapabilities2KHR *pSurfaceCapabilities) {
11619     auto instanceData = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
11620
11621     auto result =
11622         instanceData->dispatch_table.GetPhysicalDeviceSurfaceCapabilities2KHR(physicalDevice, pSurfaceInfo, pSurfaceCapabilities);
11623
11624     if (result == VK_SUCCESS) {
11625         PostCallRecordGetPhysicalDeviceSurfaceCapabilities2KHR(instanceData, physicalDevice, pSurfaceCapabilities);
11626     }
11627
11628     return result;
11629 }
11630
11631 static void PostCallRecordGetPhysicalDeviceSurfaceCapabilities2EXT(instance_layer_data *instanceData,
11632                                                                    VkPhysicalDevice physicalDevice,
11633                                                                    VkSurfaceCapabilities2EXT *pSurfaceCapabilities) {
11634     unique_lock_t lock(global_lock);
11635     auto physicalDeviceState = GetPhysicalDeviceState(instanceData, physicalDevice);
11636     physicalDeviceState->vkGetPhysicalDeviceSurfaceCapabilitiesKHRState = QUERY_DETAILS;
11637     physicalDeviceState->surfaceCapabilities.minImageCount = pSurfaceCapabilities->minImageCount;
11638     physicalDeviceState->surfaceCapabilities.maxImageCount = pSurfaceCapabilities->maxImageCount;
11639     physicalDeviceState->surfaceCapabilities.currentExtent = pSurfaceCapabilities->currentExtent;
11640     physicalDeviceState->surfaceCapabilities.minImageExtent = pSurfaceCapabilities->minImageExtent;
11641     physicalDeviceState->surfaceCapabilities.maxImageExtent = pSurfaceCapabilities->maxImageExtent;
11642     physicalDeviceState->surfaceCapabilities.maxImageArrayLayers = pSurfaceCapabilities->maxImageArrayLayers;
11643     physicalDeviceState->surfaceCapabilities.supportedTransforms = pSurfaceCapabilities->supportedTransforms;
11644     physicalDeviceState->surfaceCapabilities.currentTransform = pSurfaceCapabilities->currentTransform;
11645     physicalDeviceState->surfaceCapabilities.supportedCompositeAlpha = pSurfaceCapabilities->supportedCompositeAlpha;
11646     physicalDeviceState->surfaceCapabilities.supportedUsageFlags = pSurfaceCapabilities->supportedUsageFlags;
11647 }
11648
11649 VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceSurfaceCapabilities2EXT(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface,
11650                                                                         VkSurfaceCapabilities2EXT *pSurfaceCapabilities) {
11651     auto instanceData = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
11652
11653     auto result =
11654         instanceData->dispatch_table.GetPhysicalDeviceSurfaceCapabilities2EXT(physicalDevice, surface, pSurfaceCapabilities);
11655
11656     if (result == VK_SUCCESS) {
11657         PostCallRecordGetPhysicalDeviceSurfaceCapabilities2EXT(instanceData, physicalDevice, pSurfaceCapabilities);
11658     }
11659
11660     return result;
11661 }
11662
11663 VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceSurfaceSupportKHR(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex,
11664                                                                   VkSurfaceKHR surface, VkBool32 *pSupported) {
11665     bool skip = false;
11666     auto instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
11667
11668     unique_lock_t lock(global_lock);
11669     const auto pd_state = GetPhysicalDeviceState(instance_data, physicalDevice);
11670     auto surface_state = GetSurfaceState(instance_data, surface);
11671
11672     skip |= ValidatePhysicalDeviceQueueFamily(instance_data, pd_state, queueFamilyIndex, VALIDATION_ERROR_2ee009ea,
11673                                               "vkGetPhysicalDeviceSurfaceSupportKHR", "queueFamilyIndex");
11674
11675     lock.unlock();
11676
11677     if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
11678
11679     auto result =
11680         instance_data->dispatch_table.GetPhysicalDeviceSurfaceSupportKHR(physicalDevice, queueFamilyIndex, surface, pSupported);
11681
11682     if (result == VK_SUCCESS) {
11683         surface_state->gpu_queue_support[{physicalDevice, queueFamilyIndex}] = (*pSupported == VK_TRUE);
11684     }
11685
11686     return result;
11687 }
11688
11689 VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceSurfacePresentModesKHR(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface,
11690                                                                        uint32_t *pPresentModeCount,
11691                                                                        VkPresentModeKHR *pPresentModes) {
11692     bool skip = false;
11693     auto instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
11694     unique_lock_t lock(global_lock);
11695     // TODO: this isn't quite right. available modes may differ by surface AND physical device.
11696     auto physical_device_state = GetPhysicalDeviceState(instance_data, physicalDevice);
11697     auto &call_state = physical_device_state->vkGetPhysicalDeviceSurfacePresentModesKHRState;
11698
11699     if (pPresentModes) {
11700         // Compare the preliminary value of *pPresentModeCount with the value this time:
11701         auto prev_mode_count = (uint32_t)physical_device_state->present_modes.size();
11702         switch (call_state) {
11703             case UNCALLED:
11704                 skip |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
11705                                 VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, HandleToUint64(physicalDevice),
11706                                 DEVLIMITS_MUST_QUERY_COUNT,
11707                                 "vkGetPhysicalDeviceSurfacePresentModesKHR() called with non-NULL pPresentModeCount; but no prior "
11708                                 "positive value has been seen for pPresentModeCount.");
11709                 break;
11710             default:
11711                 // both query count and query details
11712                 if (*pPresentModeCount != prev_mode_count) {
11713                     skip |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
11714                                     VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, HandleToUint64(physicalDevice),
11715                                     DEVLIMITS_COUNT_MISMATCH,
11716                                     "vkGetPhysicalDeviceSurfacePresentModesKHR() called with *pPresentModeCount (%u) that differs "
11717                                     "from the value (%u) that was returned when pPresentModes was NULL.",
11718                                     *pPresentModeCount, prev_mode_count);
11719                 }
11720                 break;
11721         }
11722     }
11723     lock.unlock();
11724
11725     if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
11726
11727     auto result = instance_data->dispatch_table.GetPhysicalDeviceSurfacePresentModesKHR(physicalDevice, surface, pPresentModeCount,
11728                                                                                         pPresentModes);
11729
11730     if (result == VK_SUCCESS || result == VK_INCOMPLETE) {
11731         lock.lock();
11732
11733         if (*pPresentModeCount) {
11734             if (call_state < QUERY_COUNT) call_state = QUERY_COUNT;
11735             if (*pPresentModeCount > physical_device_state->present_modes.size())
11736                 physical_device_state->present_modes.resize(*pPresentModeCount);
11737         }
11738         if (pPresentModes) {
11739             if (call_state < QUERY_DETAILS) call_state = QUERY_DETAILS;
11740             for (uint32_t i = 0; i < *pPresentModeCount; i++) {
11741                 physical_device_state->present_modes[i] = pPresentModes[i];
11742             }
11743         }
11744     }
11745
11746     return result;
11747 }
11748
11749 VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceSurfaceFormatsKHR(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface,
11750                                                                   uint32_t *pSurfaceFormatCount,
11751                                                                   VkSurfaceFormatKHR *pSurfaceFormats) {
11752     bool skip = false;
11753     auto instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
11754     unique_lock_t lock(global_lock);
11755     auto physical_device_state = GetPhysicalDeviceState(instance_data, physicalDevice);
11756     auto &call_state = physical_device_state->vkGetPhysicalDeviceSurfaceFormatsKHRState;
11757
11758     if (pSurfaceFormats) {
11759         auto prev_format_count = (uint32_t)physical_device_state->surface_formats.size();
11760
11761         switch (call_state) {
11762             case UNCALLED:
11763                 // Since we haven't recorded a preliminary value of *pSurfaceFormatCount, that likely means that the application
11764                 // didn't
11765                 // previously call this function with a NULL value of pSurfaceFormats:
11766                 skip |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
11767                                 VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, HandleToUint64(physicalDevice),
11768                                 DEVLIMITS_MUST_QUERY_COUNT,
11769                                 "vkGetPhysicalDeviceSurfaceFormatsKHR() called with non-NULL pSurfaceFormatCount; but no prior "
11770                                 "positive value has been seen for pSurfaceFormats.");
11771                 break;
11772             default:
11773                 if (prev_format_count != *pSurfaceFormatCount) {
11774                     skip |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
11775                                     VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, HandleToUint64(physicalDevice),
11776                                     DEVLIMITS_COUNT_MISMATCH,
11777                                     "vkGetPhysicalDeviceSurfaceFormatsKHR() called with non-NULL pSurfaceFormatCount, and with "
11778                                     "pSurfaceFormats set to a value (%u) that is greater than the value (%u) that was returned "
11779                                     "when pSurfaceFormatCount was NULL.",
11780                                     *pSurfaceFormatCount, prev_format_count);
11781                 }
11782                 break;
11783         }
11784     }
11785     lock.unlock();
11786
11787     if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
11788
11789     // Call down the call chain:
11790     auto result = instance_data->dispatch_table.GetPhysicalDeviceSurfaceFormatsKHR(physicalDevice, surface, pSurfaceFormatCount,
11791                                                                                    pSurfaceFormats);
11792
11793     if (result == VK_SUCCESS || result == VK_INCOMPLETE) {
11794         lock.lock();
11795
11796         if (*pSurfaceFormatCount) {
11797             if (call_state < QUERY_COUNT) call_state = QUERY_COUNT;
11798             if (*pSurfaceFormatCount > physical_device_state->surface_formats.size())
11799                 physical_device_state->surface_formats.resize(*pSurfaceFormatCount);
11800         }
11801         if (pSurfaceFormats) {
11802             if (call_state < QUERY_DETAILS) call_state = QUERY_DETAILS;
11803             for (uint32_t i = 0; i < *pSurfaceFormatCount; i++) {
11804                 physical_device_state->surface_formats[i] = pSurfaceFormats[i];
11805             }
11806         }
11807     }
11808     return result;
11809 }
11810
11811 static void PostCallRecordGetPhysicalDeviceSurfaceFormats2KHR(instance_layer_data *instanceData, VkPhysicalDevice physicalDevice,
11812                                                               uint32_t *pSurfaceFormatCount, VkSurfaceFormat2KHR *pSurfaceFormats) {
11813     unique_lock_t lock(global_lock);
11814     auto physicalDeviceState = GetPhysicalDeviceState(instanceData, physicalDevice);
11815     if (*pSurfaceFormatCount) {
11816         if (physicalDeviceState->vkGetPhysicalDeviceSurfaceFormatsKHRState < QUERY_COUNT) {
11817             physicalDeviceState->vkGetPhysicalDeviceSurfaceFormatsKHRState = QUERY_COUNT;
11818         }
11819         if (*pSurfaceFormatCount > physicalDeviceState->surface_formats.size())
11820             physicalDeviceState->surface_formats.resize(*pSurfaceFormatCount);
11821     }
11822     if (pSurfaceFormats) {
11823         if (physicalDeviceState->vkGetPhysicalDeviceSurfaceFormatsKHRState < QUERY_DETAILS) {
11824             physicalDeviceState->vkGetPhysicalDeviceSurfaceFormatsKHRState = QUERY_DETAILS;
11825         }
11826         for (uint32_t i = 0; i < *pSurfaceFormatCount; i++) {
11827             physicalDeviceState->surface_formats[i] = pSurfaceFormats[i].surfaceFormat;
11828         }
11829     }
11830 }
11831
11832 VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceSurfaceFormats2KHR(VkPhysicalDevice physicalDevice,
11833                                                                    const VkPhysicalDeviceSurfaceInfo2KHR *pSurfaceInfo,
11834                                                                    uint32_t *pSurfaceFormatCount,
11835                                                                    VkSurfaceFormat2KHR *pSurfaceFormats) {
11836     auto instanceData = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
11837     auto result = instanceData->dispatch_table.GetPhysicalDeviceSurfaceFormats2KHR(physicalDevice, pSurfaceInfo,
11838                                                                                    pSurfaceFormatCount, pSurfaceFormats);
11839     if (result == VK_SUCCESS || result == VK_INCOMPLETE) {
11840         PostCallRecordGetPhysicalDeviceSurfaceFormats2KHR(instanceData, physicalDevice, pSurfaceFormatCount, pSurfaceFormats);
11841     }
11842     return result;
11843 }
11844
11845 // VK_EXT_debug_utils commands
11846 VKAPI_ATTR VkResult VKAPI_CALL SetDebugUtilsObjectNameEXT(VkDevice device, const VkDebugUtilsObjectNameInfoEXT *pNameInfo) {
11847     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
11848     VkResult result = VK_SUCCESS;
11849     if (pNameInfo->pObjectName) {
11850         dev_data->report_data->debugUtilsObjectNameMap->insert(
11851             std::make_pair<uint64_t, std::string>((uint64_t &&) pNameInfo->objectHandle, pNameInfo->pObjectName));
11852     } else {
11853         dev_data->report_data->debugUtilsObjectNameMap->erase(pNameInfo->objectHandle);
11854     }
11855     if (nullptr != dev_data->dispatch_table.SetDebugUtilsObjectNameEXT) {
11856         result = dev_data->dispatch_table.SetDebugUtilsObjectNameEXT(device, pNameInfo);
11857     }
11858     return result;
11859 }
11860
11861 VKAPI_ATTR VkResult VKAPI_CALL SetDebugUtilsObjectTagEXT(VkDevice device, const VkDebugUtilsObjectTagInfoEXT *pTagInfo) {
11862     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
11863     VkResult result = VK_SUCCESS;
11864     if (nullptr != dev_data->dispatch_table.SetDebugUtilsObjectTagEXT) {
11865         result = dev_data->dispatch_table.SetDebugUtilsObjectTagEXT(device, pTagInfo);
11866     }
11867     return result;
11868 }
11869
11870 VKAPI_ATTR void VKAPI_CALL QueueBeginDebugUtilsLabelEXT(VkQueue queue, const VkDebugUtilsLabelEXT *pLabelInfo) {
11871     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(queue), layer_data_map);
11872     BeginQueueDebugUtilsLabel(dev_data->report_data, queue, pLabelInfo);
11873     if (nullptr != dev_data->dispatch_table.QueueBeginDebugUtilsLabelEXT) {
11874         dev_data->dispatch_table.QueueBeginDebugUtilsLabelEXT(queue, pLabelInfo);
11875     }
11876 }
11877
11878 VKAPI_ATTR void VKAPI_CALL QueueEndDebugUtilsLabelEXT(VkQueue queue) {
11879     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(queue), layer_data_map);
11880     if (nullptr != dev_data->dispatch_table.QueueEndDebugUtilsLabelEXT) {
11881         dev_data->dispatch_table.QueueEndDebugUtilsLabelEXT(queue);
11882     }
11883     EndQueueDebugUtilsLabel(dev_data->report_data, queue);
11884 }
11885
11886 VKAPI_ATTR void VKAPI_CALL QueueInsertDebugUtilsLabelEXT(VkQueue queue, const VkDebugUtilsLabelEXT *pLabelInfo) {
11887     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(queue), layer_data_map);
11888     InsertQueueDebugUtilsLabel(dev_data->report_data, queue, pLabelInfo);
11889     if (nullptr != dev_data->dispatch_table.QueueInsertDebugUtilsLabelEXT) {
11890         dev_data->dispatch_table.QueueInsertDebugUtilsLabelEXT(queue, pLabelInfo);
11891     }
11892 }
11893
11894 VKAPI_ATTR void VKAPI_CALL CmdBeginDebugUtilsLabelEXT(VkCommandBuffer commandBuffer, const VkDebugUtilsLabelEXT *pLabelInfo) {
11895     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
11896     BeginCmdDebugUtilsLabel(dev_data->report_data, commandBuffer, pLabelInfo);
11897     if (nullptr != dev_data->dispatch_table.CmdBeginDebugUtilsLabelEXT) {
11898         dev_data->dispatch_table.CmdBeginDebugUtilsLabelEXT(commandBuffer, pLabelInfo);
11899     }
11900 }
11901
11902 VKAPI_ATTR void VKAPI_CALL CmdEndDebugUtilsLabelEXT(VkCommandBuffer commandBuffer) {
11903     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
11904     if (nullptr != dev_data->dispatch_table.CmdEndDebugUtilsLabelEXT) {
11905         dev_data->dispatch_table.CmdEndDebugUtilsLabelEXT(commandBuffer);
11906     }
11907     EndCmdDebugUtilsLabel(dev_data->report_data, commandBuffer);
11908 }
11909
11910 VKAPI_ATTR void VKAPI_CALL CmdInsertDebugUtilsLabelEXT(VkCommandBuffer commandBuffer, const VkDebugUtilsLabelEXT *pLabelInfo) {
11911     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
11912     InsertCmdDebugUtilsLabel(dev_data->report_data, commandBuffer, pLabelInfo);
11913     if (nullptr != dev_data->dispatch_table.CmdInsertDebugUtilsLabelEXT) {
11914         dev_data->dispatch_table.CmdInsertDebugUtilsLabelEXT(commandBuffer, pLabelInfo);
11915     }
11916 }
11917
11918 VKAPI_ATTR VkResult VKAPI_CALL CreateDebugUtilsMessengerEXT(VkInstance instance,
11919                                                             const VkDebugUtilsMessengerCreateInfoEXT *pCreateInfo,
11920                                                             const VkAllocationCallbacks *pAllocator,
11921                                                             VkDebugUtilsMessengerEXT *pMessenger) {
11922     instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
11923     VkResult result = instance_data->dispatch_table.CreateDebugUtilsMessengerEXT(instance, pCreateInfo, pAllocator, pMessenger);
11924
11925     if (VK_SUCCESS == result) {
11926         result = layer_create_messenger_callback(instance_data->report_data, false, pCreateInfo, pAllocator, pMessenger);
11927     }
11928     return result;
11929 }
11930
11931 VKAPI_ATTR void VKAPI_CALL DestroyDebugUtilsMessengerEXT(VkInstance instance, VkDebugUtilsMessengerEXT messenger,
11932                                                          const VkAllocationCallbacks *pAllocator) {
11933     instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
11934     instance_data->dispatch_table.DestroyDebugUtilsMessengerEXT(instance, messenger, pAllocator);
11935     layer_destroy_messenger_callback(instance_data->report_data, messenger, pAllocator);
11936 }
11937
11938 VKAPI_ATTR void VKAPI_CALL SubmitDebugUtilsMessageEXT(VkInstance instance, VkDebugUtilsMessageSeverityFlagBitsEXT messageSeverity,
11939                                                       VkDebugUtilsMessageTypeFlagsEXT messageTypes,
11940                                                       const VkDebugUtilsMessengerCallbackDataEXT *pCallbackData) {
11941     instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
11942     instance_data->dispatch_table.SubmitDebugUtilsMessageEXT(instance, messageSeverity, messageTypes, pCallbackData);
11943 }
11944
11945 // VK_EXT_debug_report commands
11946 VKAPI_ATTR VkResult VKAPI_CALL CreateDebugReportCallbackEXT(VkInstance instance,
11947                                                             const VkDebugReportCallbackCreateInfoEXT *pCreateInfo,
11948                                                             const VkAllocationCallbacks *pAllocator,
11949                                                             VkDebugReportCallbackEXT *pMsgCallback) {
11950     instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
11951     VkResult res = instance_data->dispatch_table.CreateDebugReportCallbackEXT(instance, pCreateInfo, pAllocator, pMsgCallback);
11952     if (VK_SUCCESS == res) {
11953         lock_guard_t lock(global_lock);
11954         res = layer_create_report_callback(instance_data->report_data, false, pCreateInfo, pAllocator, pMsgCallback);
11955     }
11956     return res;
11957 }
11958
11959 VKAPI_ATTR void VKAPI_CALL DestroyDebugReportCallbackEXT(VkInstance instance, VkDebugReportCallbackEXT msgCallback,
11960                                                          const VkAllocationCallbacks *pAllocator) {
11961     instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
11962     instance_data->dispatch_table.DestroyDebugReportCallbackEXT(instance, msgCallback, pAllocator);
11963     lock_guard_t lock(global_lock);
11964     layer_destroy_report_callback(instance_data->report_data, msgCallback, pAllocator);
11965 }
11966
11967 VKAPI_ATTR void VKAPI_CALL DebugReportMessageEXT(VkInstance instance, VkDebugReportFlagsEXT flags,
11968                                                  VkDebugReportObjectTypeEXT objType, uint64_t object, size_t location,
11969                                                  int32_t msgCode, const char *pLayerPrefix, const char *pMsg) {
11970     instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
11971     instance_data->dispatch_table.DebugReportMessageEXT(instance, flags, objType, object, location, msgCode, pLayerPrefix, pMsg);
11972 }
11973
11974 VKAPI_ATTR VkResult VKAPI_CALL EnumerateInstanceLayerProperties(uint32_t *pCount, VkLayerProperties *pProperties) {
11975     return util_GetLayerProperties(1, &global_layer, pCount, pProperties);
11976 }
11977
11978 VKAPI_ATTR VkResult VKAPI_CALL EnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice, uint32_t *pCount,
11979                                                               VkLayerProperties *pProperties) {
11980     return util_GetLayerProperties(1, &global_layer, pCount, pProperties);
11981 }
11982
11983 VKAPI_ATTR VkResult VKAPI_CALL EnumerateInstanceExtensionProperties(const char *pLayerName, uint32_t *pCount,
11984                                                                     VkExtensionProperties *pProperties) {
11985     if (pLayerName && !strcmp(pLayerName, global_layer.layerName))
11986         return util_GetExtensionProperties(1, instance_extensions, pCount, pProperties);
11987
11988     return VK_ERROR_LAYER_NOT_PRESENT;
11989 }
11990
11991 VKAPI_ATTR VkResult VKAPI_CALL EnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice, const char *pLayerName,
11992                                                                   uint32_t *pCount, VkExtensionProperties *pProperties) {
11993     if (pLayerName && !strcmp(pLayerName, global_layer.layerName))
11994         return util_GetExtensionProperties(1, device_extensions, pCount, pProperties);
11995
11996     assert(physicalDevice);
11997
11998     instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
11999     return instance_data->dispatch_table.EnumerateDeviceExtensionProperties(physicalDevice, NULL, pCount, pProperties);
12000 }
12001
12002 static bool PreCallValidateEnumeratePhysicalDeviceGroups(VkInstance instance, uint32_t *pPhysicalDeviceGroupCount,
12003                                                          VkPhysicalDeviceGroupPropertiesKHR *pPhysicalDeviceGroupProperties) {
12004     instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
12005     bool skip = false;
12006
12007     if (instance_data) {
12008         // For this instance, flag when EnumeratePhysicalDeviceGroups goes to QUERY_COUNT and then QUERY_DETAILS.
12009         if (NULL != pPhysicalDeviceGroupProperties) {
12010             if (UNCALLED == instance_data->vkEnumeratePhysicalDeviceGroupsState) {
12011                 // Flag warning here. You can call this without having queried the count, but it may not be
12012                 // robust on platforms with multiple physical devices.
12013                 skip |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
12014                                 VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT, 0, DEVLIMITS_MISSING_QUERY_COUNT,
12015                                 "Call sequence has vkEnumeratePhysicalDeviceGroups() w/ non-NULL "
12016                                 "pPhysicalDeviceGroupProperties. You should first call vkEnumeratePhysicalDeviceGroups() w/ "
12017                                 "NULL pPhysicalDeviceGroupProperties to query pPhysicalDeviceGroupCount.");
12018             }  // TODO : Could also flag a warning if re-calling this function in QUERY_DETAILS state
12019             else if (instance_data->physical_device_groups_count != *pPhysicalDeviceGroupCount) {
12020                 // Having actual count match count from app is not a requirement, so this can be a warning
12021                 skip |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
12022                                 VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, DEVLIMITS_COUNT_MISMATCH,
12023                                 "Call to vkEnumeratePhysicalDeviceGroups() w/ pPhysicalDeviceGroupCount value %u, but actual count "
12024                                 "supported by this instance is %u.",
12025                                 *pPhysicalDeviceGroupCount, instance_data->physical_device_groups_count);
12026             }
12027         }
12028     } else {
12029         log_msg(instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT, 0,
12030                 DEVLIMITS_INVALID_INSTANCE, "Invalid instance (0x%" PRIx64 ") passed into vkEnumeratePhysicalDeviceGroups().",
12031                 HandleToUint64(instance));
12032     }
12033
12034     return skip;
12035 }
12036
12037 static void PreCallRecordEnumeratePhysicalDeviceGroups(instance_layer_data *instance_data,
12038                                                        VkPhysicalDeviceGroupPropertiesKHR *pPhysicalDeviceGroupProperties) {
12039     if (instance_data) {
12040         // For this instance, flag when EnumeratePhysicalDeviceGroups goes to QUERY_COUNT and then QUERY_DETAILS.
12041         if (NULL == pPhysicalDeviceGroupProperties) {
12042             instance_data->vkEnumeratePhysicalDeviceGroupsState = QUERY_COUNT;
12043         } else {
12044             instance_data->vkEnumeratePhysicalDeviceGroupsState = QUERY_DETAILS;
12045         }
12046     }
12047 }
12048
12049 static void PostCallRecordEnumeratePhysicalDeviceGroups(instance_layer_data *instance_data, uint32_t *pPhysicalDeviceGroupCount,
12050                                                         VkPhysicalDeviceGroupPropertiesKHR *pPhysicalDeviceGroupProperties) {
12051     if (NULL == pPhysicalDeviceGroupProperties) {
12052         instance_data->physical_device_groups_count = *pPhysicalDeviceGroupCount;
12053     } else {  // Save physical devices
12054         for (uint32_t i = 0; i < *pPhysicalDeviceGroupCount; i++) {
12055             for (uint32_t j = 0; j < pPhysicalDeviceGroupProperties[i].physicalDeviceCount; j++) {
12056                 VkPhysicalDevice cur_phys_dev = pPhysicalDeviceGroupProperties[i].physicalDevices[j];
12057                 auto &phys_device_state = instance_data->physical_device_map[cur_phys_dev];
12058                 phys_device_state.phys_device = cur_phys_dev;
12059                 // Init actual features for each physical device
12060                 instance_data->dispatch_table.GetPhysicalDeviceFeatures(cur_phys_dev, &phys_device_state.features);
12061             }
12062         }
12063     }
12064 }
12065
12066 VKAPI_ATTR VkResult VKAPI_CALL EnumeratePhysicalDeviceGroups(VkInstance instance, uint32_t *pPhysicalDeviceGroupCount,
12067                                                              VkPhysicalDeviceGroupPropertiesKHR *pPhysicalDeviceGroupProperties) {
12068     bool skip = false;
12069     instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
12070
12071     skip = PreCallValidateEnumeratePhysicalDeviceGroups(instance, pPhysicalDeviceGroupCount, pPhysicalDeviceGroupProperties);
12072     if (skip) {
12073         return VK_ERROR_VALIDATION_FAILED_EXT;
12074     }
12075     PreCallRecordEnumeratePhysicalDeviceGroups(instance_data, pPhysicalDeviceGroupProperties);
12076     VkResult result = instance_data->dispatch_table.EnumeratePhysicalDeviceGroups(instance, pPhysicalDeviceGroupCount,
12077                                                                                   pPhysicalDeviceGroupProperties);
12078     if (result == VK_SUCCESS) {
12079         PostCallRecordEnumeratePhysicalDeviceGroups(instance_data, pPhysicalDeviceGroupCount, pPhysicalDeviceGroupProperties);
12080     }
12081     return result;
12082 }
12083
12084 VKAPI_ATTR VkResult VKAPI_CALL EnumeratePhysicalDeviceGroupsKHR(
12085     VkInstance instance, uint32_t *pPhysicalDeviceGroupCount, VkPhysicalDeviceGroupPropertiesKHR *pPhysicalDeviceGroupProperties) {
12086     bool skip = false;
12087     instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
12088
12089     skip = PreCallValidateEnumeratePhysicalDeviceGroups(instance, pPhysicalDeviceGroupCount, pPhysicalDeviceGroupProperties);
12090     if (skip) {
12091         return VK_ERROR_VALIDATION_FAILED_EXT;
12092     }
12093     PreCallRecordEnumeratePhysicalDeviceGroups(instance_data, pPhysicalDeviceGroupProperties);
12094     VkResult result = instance_data->dispatch_table.EnumeratePhysicalDeviceGroupsKHR(instance, pPhysicalDeviceGroupCount,
12095                                                                                      pPhysicalDeviceGroupProperties);
12096     if (result == VK_SUCCESS) {
12097         PostCallRecordEnumeratePhysicalDeviceGroups(instance_data, pPhysicalDeviceGroupCount, pPhysicalDeviceGroupProperties);
12098     }
12099     return result;
12100 }
12101
12102 static bool PreCallValidateCreateDescriptorUpdateTemplate(const char *func_name, layer_data *device_data,
12103                                                           const VkDescriptorUpdateTemplateCreateInfoKHR *pCreateInfo,
12104                                                           const VkAllocationCallbacks *pAllocator,
12105                                                           VkDescriptorUpdateTemplateKHR *pDescriptorUpdateTemplate) {
12106     bool skip = false;
12107     const auto layout = GetDescriptorSetLayout(device_data, pCreateInfo->descriptorSetLayout);
12108     if (VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET == pCreateInfo->templateType && !layout) {
12109         auto ds_uint = HandleToUint64(pCreateInfo->descriptorSetLayout);
12110         skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
12111                         ds_uint, VALIDATION_ERROR_052002bc, "%s: Invalid pCreateInfo->descriptorSetLayout (%" PRIx64 ")", func_name,
12112                         ds_uint);
12113     } else if (VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_PUSH_DESCRIPTORS_KHR == pCreateInfo->templateType) {
12114         auto bind_point = pCreateInfo->pipelineBindPoint;
12115         bool valid_bp = (bind_point == VK_PIPELINE_BIND_POINT_GRAPHICS) || (bind_point == VK_PIPELINE_BIND_POINT_COMPUTE);
12116         if (!valid_bp) {
12117             skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
12118                             VALIDATION_ERROR_052002be, "%s: Invalid pCreateInfo->pipelineBindPoint (%" PRIu32 ").", func_name,
12119                             static_cast<uint32_t>(bind_point));
12120         }
12121         const auto pipeline_layout = getPipelineLayout(device_data, pCreateInfo->pipelineLayout);
12122         if (!pipeline_layout) {
12123             uint64_t pl_uint = HandleToUint64(pCreateInfo->pipelineLayout);
12124             skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
12125                             VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_LAYOUT_EXT, pl_uint, VALIDATION_ERROR_052002c0,
12126                             "%s: Invalid pCreateInfo->pipelineLayout (%" PRIx64 ")", func_name, pl_uint);
12127         } else {
12128             const uint32_t pd_set = pCreateInfo->set;
12129             if ((pd_set >= pipeline_layout->set_layouts.size()) || !pipeline_layout->set_layouts[pd_set] ||
12130                 !pipeline_layout->set_layouts[pd_set]->IsPushDescriptor()) {
12131                 uint64_t pl_uint = HandleToUint64(pCreateInfo->pipelineLayout);
12132                 skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
12133                                 VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_LAYOUT_EXT, pl_uint, VALIDATION_ERROR_052002c2,
12134                                 "%s: pCreateInfo->set (%" PRIu32
12135                                 ") does not refer to the push descriptor set layout for "
12136                                 "pCreateInfo->pipelineLayout (%" PRIx64 ").",
12137                                 func_name, pd_set, pl_uint);
12138             }
12139         }
12140     }
12141     return skip;
12142 }
12143
12144 static void PostCallRecordCreateDescriptorUpdateTemplate(layer_data *device_data,
12145                                                          const VkDescriptorUpdateTemplateCreateInfoKHR *pCreateInfo,
12146                                                          VkDescriptorUpdateTemplateKHR *pDescriptorUpdateTemplate) {
12147     // Shadow template createInfo for later updates
12148     safe_VkDescriptorUpdateTemplateCreateInfo *local_create_info = new safe_VkDescriptorUpdateTemplateCreateInfo(pCreateInfo);
12149     std::unique_ptr<TEMPLATE_STATE> template_state(new TEMPLATE_STATE(*pDescriptorUpdateTemplate, local_create_info));
12150     device_data->desc_template_map[*pDescriptorUpdateTemplate] = std::move(template_state);
12151 }
12152
12153 VKAPI_ATTR VkResult VKAPI_CALL CreateDescriptorUpdateTemplate(VkDevice device,
12154                                                               const VkDescriptorUpdateTemplateCreateInfoKHR *pCreateInfo,
12155                                                               const VkAllocationCallbacks *pAllocator,
12156                                                               VkDescriptorUpdateTemplateKHR *pDescriptorUpdateTemplate) {
12157     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
12158     unique_lock_t lock(global_lock);
12159     bool skip = PreCallValidateCreateDescriptorUpdateTemplate("vkCreateDescriptorUpdateTemplate()", device_data, pCreateInfo,
12160                                                               pAllocator, pDescriptorUpdateTemplate);
12161
12162     VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
12163     if (!skip) {
12164         lock.unlock();
12165         result =
12166             device_data->dispatch_table.CreateDescriptorUpdateTemplate(device, pCreateInfo, pAllocator, pDescriptorUpdateTemplate);
12167         if (VK_SUCCESS == result) {
12168             lock.lock();
12169             PostCallRecordCreateDescriptorUpdateTemplate(device_data, pCreateInfo, pDescriptorUpdateTemplate);
12170         }
12171     }
12172     return result;
12173 }
12174
12175 VKAPI_ATTR VkResult VKAPI_CALL CreateDescriptorUpdateTemplateKHR(VkDevice device,
12176                                                                  const VkDescriptorUpdateTemplateCreateInfoKHR *pCreateInfo,
12177                                                                  const VkAllocationCallbacks *pAllocator,
12178                                                                  VkDescriptorUpdateTemplateKHR *pDescriptorUpdateTemplate) {
12179     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
12180     unique_lock_t lock(global_lock);
12181     bool skip = PreCallValidateCreateDescriptorUpdateTemplate("vkCreateDescriptorUpdateTemplateKHR()", device_data, pCreateInfo,
12182                                                               pAllocator, pDescriptorUpdateTemplate);
12183
12184     VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
12185     if (!skip) {
12186         lock.unlock();
12187         result = device_data->dispatch_table.CreateDescriptorUpdateTemplateKHR(device, pCreateInfo, pAllocator,
12188                                                                                pDescriptorUpdateTemplate);
12189         if (VK_SUCCESS == result) {
12190             lock.lock();
12191             PostCallRecordCreateDescriptorUpdateTemplate(device_data, pCreateInfo, pDescriptorUpdateTemplate);
12192         }
12193     }
12194     return result;
12195 }
12196
12197 static void PreCallRecordDestroyDescriptorUpdateTemplate(layer_data *device_data,
12198                                                          VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate) {
12199     device_data->desc_template_map.erase(descriptorUpdateTemplate);
12200 }
12201
12202 VKAPI_ATTR void VKAPI_CALL DestroyDescriptorUpdateTemplate(VkDevice device, VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate,
12203                                                            const VkAllocationCallbacks *pAllocator) {
12204     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
12205     unique_lock_t lock(global_lock);
12206     // Pre-record to avoid Destroy/Create race
12207     PreCallRecordDestroyDescriptorUpdateTemplate(device_data, descriptorUpdateTemplate);
12208     lock.unlock();
12209     device_data->dispatch_table.DestroyDescriptorUpdateTemplate(device, descriptorUpdateTemplate, pAllocator);
12210 }
12211
12212 VKAPI_ATTR void VKAPI_CALL DestroyDescriptorUpdateTemplateKHR(VkDevice device,
12213                                                               VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate,
12214                                                               const VkAllocationCallbacks *pAllocator) {
12215     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
12216     unique_lock_t lock(global_lock);
12217     // Pre-record to avoid Destroy/Create race
12218     PreCallRecordDestroyDescriptorUpdateTemplate(device_data, descriptorUpdateTemplate);
12219     lock.unlock();
12220     device_data->dispatch_table.DestroyDescriptorUpdateTemplateKHR(device, descriptorUpdateTemplate, pAllocator);
12221 }
12222
12223 // PostCallRecord* handles recording state updates following call down chain to UpdateDescriptorSetsWithTemplate()
12224 static void PostCallRecordUpdateDescriptorSetWithTemplate(layer_data *device_data, VkDescriptorSet descriptorSet,
12225                                                           VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate,
12226                                                           const void *pData) {
12227     auto const template_map_entry = device_data->desc_template_map.find(descriptorUpdateTemplate);
12228     if (template_map_entry == device_data->desc_template_map.end()) {
12229         assert(0);
12230     }
12231
12232     cvdescriptorset::PerformUpdateDescriptorSetsWithTemplateKHR(device_data, descriptorSet, template_map_entry->second, pData);
12233 }
12234
12235 VKAPI_ATTR void VKAPI_CALL UpdateDescriptorSetWithTemplate(VkDevice device, VkDescriptorSet descriptorSet,
12236                                                            VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate,
12237                                                            const void *pData) {
12238     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
12239     device_data->dispatch_table.UpdateDescriptorSetWithTemplate(device, descriptorSet, descriptorUpdateTemplate, pData);
12240
12241     PostCallRecordUpdateDescriptorSetWithTemplate(device_data, descriptorSet, descriptorUpdateTemplate, pData);
12242 }
12243
12244 VKAPI_ATTR void VKAPI_CALL UpdateDescriptorSetWithTemplateKHR(VkDevice device, VkDescriptorSet descriptorSet,
12245                                                               VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate,
12246                                                               const void *pData) {
12247     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
12248     device_data->dispatch_table.UpdateDescriptorSetWithTemplateKHR(device, descriptorSet, descriptorUpdateTemplate, pData);
12249
12250     PostCallRecordUpdateDescriptorSetWithTemplate(device_data, descriptorSet, descriptorUpdateTemplate, pData);
12251 }
12252
12253 VKAPI_ATTR void VKAPI_CALL CmdPushDescriptorSetWithTemplateKHR(VkCommandBuffer commandBuffer,
12254                                                                VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate,
12255                                                                VkPipelineLayout layout, uint32_t set, const void *pData) {
12256     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
12257     unique_lock_t lock(global_lock);
12258     bool skip = false;
12259     GLOBAL_CB_NODE *cb_state = GetCBNode(dev_data, commandBuffer);
12260     // Minimal validation for command buffer state
12261     if (cb_state) {
12262         skip |= ValidateCmd(dev_data, cb_state, CMD_PUSHDESCRIPTORSETWITHTEMPLATEKHR, "vkCmdPushDescriptorSetWithTemplateKHR()");
12263     }
12264     lock.unlock();
12265
12266     if (!skip) {
12267         dev_data->dispatch_table.CmdPushDescriptorSetWithTemplateKHR(commandBuffer, descriptorUpdateTemplate, layout, set, pData);
12268     }
12269 }
12270
12271 static void PostCallRecordGetPhysicalDeviceDisplayPlanePropertiesKHR(instance_layer_data *instanceData,
12272                                                                      VkPhysicalDevice physicalDevice, uint32_t *pPropertyCount,
12273                                                                      VkDisplayPlanePropertiesKHR *pProperties) {
12274     unique_lock_t lock(global_lock);
12275     auto physical_device_state = GetPhysicalDeviceState(instanceData, physicalDevice);
12276
12277     if (*pPropertyCount) {
12278         if (physical_device_state->vkGetPhysicalDeviceDisplayPlanePropertiesKHRState < QUERY_COUNT) {
12279             physical_device_state->vkGetPhysicalDeviceDisplayPlanePropertiesKHRState = QUERY_COUNT;
12280         }
12281         physical_device_state->display_plane_property_count = *pPropertyCount;
12282     }
12283     if (pProperties) {
12284         if (physical_device_state->vkGetPhysicalDeviceDisplayPlanePropertiesKHRState < QUERY_DETAILS) {
12285             physical_device_state->vkGetPhysicalDeviceDisplayPlanePropertiesKHRState = QUERY_DETAILS;
12286         }
12287     }
12288 }
12289
12290 VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceDisplayPlanePropertiesKHR(VkPhysicalDevice physicalDevice, uint32_t *pPropertyCount,
12291                                                                           VkDisplayPlanePropertiesKHR *pProperties) {
12292     VkResult result = VK_SUCCESS;
12293     instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
12294
12295     result = instance_data->dispatch_table.GetPhysicalDeviceDisplayPlanePropertiesKHR(physicalDevice, pPropertyCount, pProperties);
12296
12297     if (result == VK_SUCCESS || result == VK_INCOMPLETE) {
12298         PostCallRecordGetPhysicalDeviceDisplayPlanePropertiesKHR(instance_data, physicalDevice, pPropertyCount, pProperties);
12299     }
12300
12301     return result;
12302 }
12303
12304 static bool ValidateGetPhysicalDeviceDisplayPlanePropertiesKHRQuery(instance_layer_data *instance_data,
12305                                                                     VkPhysicalDevice physicalDevice, uint32_t planeIndex,
12306                                                                     const char *api_name) {
12307     bool skip = false;
12308     auto physical_device_state = GetPhysicalDeviceState(instance_data, physicalDevice);
12309     if (physical_device_state->vkGetPhysicalDeviceDisplayPlanePropertiesKHRState == UNCALLED) {
12310         skip |= log_msg(
12311             instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
12312             HandleToUint64(physicalDevice), SWAPCHAIN_GET_SUPPORTED_DISPLAYS_WITHOUT_QUERY,
12313             "Potential problem with calling %s() without first querying vkGetPhysicalDeviceDisplayPlanePropertiesKHR.", api_name);
12314     } else {
12315         if (planeIndex >= physical_device_state->display_plane_property_count) {
12316             skip |= log_msg(
12317                 instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
12318                 HandleToUint64(physicalDevice), VALIDATION_ERROR_29c009c2,
12319                 "%s(): planeIndex must be in the range [0, %d] that was returned by vkGetPhysicalDeviceDisplayPlanePropertiesKHR. "
12320                 "Do you have the plane index hardcoded?",
12321                 api_name, physical_device_state->display_plane_property_count - 1);
12322         }
12323     }
12324     return skip;
12325 }
12326
12327 static bool PreCallValidateGetDisplayPlaneSupportedDisplaysKHR(instance_layer_data *instance_data, VkPhysicalDevice physicalDevice,
12328                                                                uint32_t planeIndex) {
12329     bool skip = false;
12330     lock_guard_t lock(global_lock);
12331     skip |= ValidateGetPhysicalDeviceDisplayPlanePropertiesKHRQuery(instance_data, physicalDevice, planeIndex,
12332                                                                     "vkGetDisplayPlaneSupportedDisplaysKHR");
12333     return skip;
12334 }
12335
12336 VKAPI_ATTR VkResult VKAPI_CALL GetDisplayPlaneSupportedDisplaysKHR(VkPhysicalDevice physicalDevice, uint32_t planeIndex,
12337                                                                    uint32_t *pDisplayCount, VkDisplayKHR *pDisplays) {
12338     VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
12339     instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
12340     bool skip = PreCallValidateGetDisplayPlaneSupportedDisplaysKHR(instance_data, physicalDevice, planeIndex);
12341     if (!skip) {
12342         result =
12343             instance_data->dispatch_table.GetDisplayPlaneSupportedDisplaysKHR(physicalDevice, planeIndex, pDisplayCount, pDisplays);
12344     }
12345     return result;
12346 }
12347
12348 static bool PreCallValidateGetDisplayPlaneCapabilitiesKHR(instance_layer_data *instance_data, VkPhysicalDevice physicalDevice,
12349                                                           uint32_t planeIndex) {
12350     bool skip = false;
12351     lock_guard_t lock(global_lock);
12352     skip |= ValidateGetPhysicalDeviceDisplayPlanePropertiesKHRQuery(instance_data, physicalDevice, planeIndex,
12353                                                                     "vkGetDisplayPlaneCapabilitiesKHR");
12354     return skip;
12355 }
12356
12357 VKAPI_ATTR VkResult VKAPI_CALL GetDisplayPlaneCapabilitiesKHR(VkPhysicalDevice physicalDevice, VkDisplayModeKHR mode,
12358                                                               uint32_t planeIndex, VkDisplayPlaneCapabilitiesKHR *pCapabilities) {
12359     VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
12360     instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
12361     bool skip = PreCallValidateGetDisplayPlaneCapabilitiesKHR(instance_data, physicalDevice, planeIndex);
12362
12363     if (!skip) {
12364         result = instance_data->dispatch_table.GetDisplayPlaneCapabilitiesKHR(physicalDevice, mode, planeIndex, pCapabilities);
12365     }
12366
12367     return result;
12368 }
12369
12370 VKAPI_ATTR VkResult VKAPI_CALL DebugMarkerSetObjectNameEXT(VkDevice device, const VkDebugMarkerObjectNameInfoEXT *pNameInfo) {
12371     unique_lock_t lock(global_lock);
12372     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
12373     if (pNameInfo->pObjectName) {
12374         device_data->report_data->debugObjectNameMap->insert(
12375             std::make_pair<uint64_t, std::string>((uint64_t &&) pNameInfo->object, pNameInfo->pObjectName));
12376     } else {
12377         device_data->report_data->debugObjectNameMap->erase(pNameInfo->object);
12378     }
12379     lock.unlock();
12380     VkResult result = device_data->dispatch_table.DebugMarkerSetObjectNameEXT(device, pNameInfo);
12381     return result;
12382 }
12383
12384 VKAPI_ATTR VkResult VKAPI_CALL DebugMarkerSetObjectTagEXT(VkDevice device, VkDebugMarkerObjectTagInfoEXT *pTagInfo) {
12385     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
12386     VkResult result = device_data->dispatch_table.DebugMarkerSetObjectTagEXT(device, pTagInfo);
12387     return result;
12388 }
12389
12390 VKAPI_ATTR void VKAPI_CALL CmdDebugMarkerBeginEXT(VkCommandBuffer commandBuffer, VkDebugMarkerMarkerInfoEXT *pMarkerInfo) {
12391     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
12392     unique_lock_t lock(global_lock);
12393     bool skip = false;
12394     GLOBAL_CB_NODE *cb_state = GetCBNode(device_data, commandBuffer);
12395     // Minimal validation for command buffer state
12396     if (cb_state) {
12397         skip |= ValidateCmd(device_data, cb_state, CMD_DEBUGMARKERBEGINEXT, "vkCmdDebugMarkerBeginEXT()");
12398     }
12399     lock.unlock();
12400     if (!skip) {
12401         device_data->dispatch_table.CmdDebugMarkerBeginEXT(commandBuffer, pMarkerInfo);
12402     }
12403 }
12404
12405 VKAPI_ATTR void VKAPI_CALL CmdDebugMarkerEndEXT(VkCommandBuffer commandBuffer) {
12406     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
12407     unique_lock_t lock(global_lock);
12408     bool skip = false;
12409     GLOBAL_CB_NODE *cb_state = GetCBNode(device_data, commandBuffer);
12410     // Minimal validation for command buffer state
12411     if (cb_state) {
12412         skip |= ValidateCmd(device_data, cb_state, CMD_DEBUGMARKERENDEXT, "vkCmdDebugMarkerEndEXT()");
12413     }
12414     lock.unlock();
12415     if (!skip) {
12416         device_data->dispatch_table.CmdDebugMarkerEndEXT(commandBuffer);
12417     }
12418 }
12419
12420 VKAPI_ATTR void VKAPI_CALL CmdDebugMarkerInsertEXT(VkCommandBuffer commandBuffer, VkDebugMarkerMarkerInfoEXT *pMarkerInfo) {
12421     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
12422     device_data->dispatch_table.CmdDebugMarkerInsertEXT(commandBuffer, pMarkerInfo);
12423 }
12424
12425 VKAPI_ATTR void VKAPI_CALL CmdSetDiscardRectangleEXT(VkCommandBuffer commandBuffer, uint32_t firstDiscardRectangle,
12426                                                      uint32_t discardRectangleCount, const VkRect2D *pDiscardRectangles) {
12427     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
12428     unique_lock_t lock(global_lock);
12429     bool skip = false;
12430     GLOBAL_CB_NODE *cb_state = GetCBNode(dev_data, commandBuffer);
12431     // Minimal validation for command buffer state
12432     if (cb_state) {
12433         skip |= ValidateCmd(dev_data, cb_state, CMD_SETDISCARDRECTANGLEEXT, "vkCmdSetDiscardRectangleEXT()");
12434     }
12435     lock.unlock();
12436
12437     if (!skip) {
12438         dev_data->dispatch_table.CmdSetDiscardRectangleEXT(commandBuffer, firstDiscardRectangle, discardRectangleCount,
12439                                                            pDiscardRectangles);
12440     }
12441 }
12442
12443 VKAPI_ATTR void VKAPI_CALL CmdSetSampleLocationsEXT(VkCommandBuffer commandBuffer,
12444                                                     const VkSampleLocationsInfoEXT *pSampleLocationsInfo) {
12445     layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
12446     unique_lock_t lock(global_lock);
12447     bool skip = false;
12448     GLOBAL_CB_NODE *cb_state = GetCBNode(dev_data, commandBuffer);
12449     // Minimal validation for command buffer state
12450     if (cb_state) {
12451         skip |= ValidateCmd(dev_data, cb_state, CMD_SETSAMPLELOCATIONSEXT, "vkCmdSetSampleLocationsEXT()");
12452     }
12453     lock.unlock();
12454
12455     if (!skip) {
12456         dev_data->dispatch_table.CmdSetSampleLocationsEXT(commandBuffer, pSampleLocationsInfo);
12457     }
12458 }
12459
12460 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetDeviceProcAddr(VkDevice device, const char *funcName);
12461 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetPhysicalDeviceProcAddr(VkInstance instance, const char *funcName);
12462 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetInstanceProcAddr(VkInstance instance, const char *funcName);
12463
12464 // Map of all APIs to be intercepted by this layer
12465 static const std::unordered_map<std::string, void *> name_to_funcptr_map = {
12466     {"vkGetInstanceProcAddr", (void *)GetInstanceProcAddr},
12467     {"vk_layerGetPhysicalDeviceProcAddr", (void *)GetPhysicalDeviceProcAddr},
12468     {"vkGetDeviceProcAddr", (void *)GetDeviceProcAddr},
12469     {"vkCreateInstance", (void *)CreateInstance},
12470     {"vkCreateDevice", (void *)CreateDevice},
12471     {"vkEnumeratePhysicalDevices", (void *)EnumeratePhysicalDevices},
12472     {"vkGetPhysicalDeviceQueueFamilyProperties", (void *)GetPhysicalDeviceQueueFamilyProperties},
12473     {"vkDestroyInstance", (void *)DestroyInstance},
12474     {"vkEnumerateInstanceLayerProperties", (void *)EnumerateInstanceLayerProperties},
12475     {"vkEnumerateDeviceLayerProperties", (void *)EnumerateDeviceLayerProperties},
12476     {"vkEnumerateInstanceExtensionProperties", (void *)EnumerateInstanceExtensionProperties},
12477     {"vkEnumerateDeviceExtensionProperties", (void *)EnumerateDeviceExtensionProperties},
12478     {"vkCreateDescriptorUpdateTemplate", (void *)CreateDescriptorUpdateTemplate},
12479     {"vkCreateDescriptorUpdateTemplateKHR", (void *)CreateDescriptorUpdateTemplateKHR},
12480     {"vkDestroyDescriptorUpdateTemplate", (void *)DestroyDescriptorUpdateTemplate},
12481     {"vkDestroyDescriptorUpdateTemplateKHR", (void *)DestroyDescriptorUpdateTemplateKHR},
12482     {"vkUpdateDescriptorSetWithTemplate", (void *)UpdateDescriptorSetWithTemplate},
12483     {"vkUpdateDescriptorSetWithTemplateKHR", (void *)UpdateDescriptorSetWithTemplateKHR},
12484     {"vkCmdPushDescriptorSetWithTemplateKHR", (void *)CmdPushDescriptorSetWithTemplateKHR},
12485     {"vkCmdPushDescriptorSetKHR", (void *)CmdPushDescriptorSetKHR},
12486     {"vkCreateSwapchainKHR", (void *)CreateSwapchainKHR},
12487     {"vkDestroySwapchainKHR", (void *)DestroySwapchainKHR},
12488     {"vkGetSwapchainImagesKHR", (void *)GetSwapchainImagesKHR},
12489     {"vkAcquireNextImageKHR", (void *)AcquireNextImageKHR},
12490     {"vkQueuePresentKHR", (void *)QueuePresentKHR},
12491     {"vkQueueSubmit", (void *)QueueSubmit},
12492     {"vkWaitForFences", (void *)WaitForFences},
12493     {"vkGetFenceStatus", (void *)GetFenceStatus},
12494     {"vkQueueWaitIdle", (void *)QueueWaitIdle},
12495     {"vkDeviceWaitIdle", (void *)DeviceWaitIdle},
12496     {"vkGetDeviceQueue", (void *)GetDeviceQueue},
12497     {"vkGetDeviceQueue2", (void *)GetDeviceQueue2},
12498     {"vkDestroyDevice", (void *)DestroyDevice},
12499     {"vkDestroyFence", (void *)DestroyFence},
12500     {"vkResetFences", (void *)ResetFences},
12501     {"vkDestroySemaphore", (void *)DestroySemaphore},
12502     {"vkDestroyEvent", (void *)DestroyEvent},
12503     {"vkDestroyQueryPool", (void *)DestroyQueryPool},
12504     {"vkDestroyBuffer", (void *)DestroyBuffer},
12505     {"vkDestroyBufferView", (void *)DestroyBufferView},
12506     {"vkDestroyImage", (void *)DestroyImage},
12507     {"vkDestroyImageView", (void *)DestroyImageView},
12508     {"vkDestroyShaderModule", (void *)DestroyShaderModule},
12509     {"vkDestroyPipeline", (void *)DestroyPipeline},
12510     {"vkDestroyPipelineLayout", (void *)DestroyPipelineLayout},
12511     {"vkDestroySampler", (void *)DestroySampler},
12512     {"vkDestroyDescriptorSetLayout", (void *)DestroyDescriptorSetLayout},
12513     {"vkDestroyDescriptorPool", (void *)DestroyDescriptorPool},
12514     {"vkDestroyFramebuffer", (void *)DestroyFramebuffer},
12515     {"vkDestroyRenderPass", (void *)DestroyRenderPass},
12516     {"vkCreateBuffer", (void *)CreateBuffer},
12517     {"vkCreateBufferView", (void *)CreateBufferView},
12518     {"vkCreateImage", (void *)CreateImage},
12519     {"vkCreateImageView", (void *)CreateImageView},
12520     {"vkCreateFence", (void *)CreateFence},
12521     {"vkCreatePipelineCache", (void *)CreatePipelineCache},
12522     {"vkDestroyPipelineCache", (void *)DestroyPipelineCache},
12523     {"vkGetPipelineCacheData", (void *)GetPipelineCacheData},
12524     {"vkMergePipelineCaches", (void *)MergePipelineCaches},
12525     {"vkCreateGraphicsPipelines", (void *)CreateGraphicsPipelines},
12526     {"vkCreateComputePipelines", (void *)CreateComputePipelines},
12527     {"vkCreateSampler", (void *)CreateSampler},
12528     {"vkCreateDescriptorSetLayout", (void *)CreateDescriptorSetLayout},
12529     {"vkCreatePipelineLayout", (void *)CreatePipelineLayout},
12530     {"vkCreateDescriptorPool", (void *)CreateDescriptorPool},
12531     {"vkResetDescriptorPool", (void *)ResetDescriptorPool},
12532     {"vkAllocateDescriptorSets", (void *)AllocateDescriptorSets},
12533     {"vkFreeDescriptorSets", (void *)FreeDescriptorSets},
12534     {"vkUpdateDescriptorSets", (void *)UpdateDescriptorSets},
12535     {"vkCreateCommandPool", (void *)CreateCommandPool},
12536     {"vkDestroyCommandPool", (void *)DestroyCommandPool},
12537     {"vkResetCommandPool", (void *)ResetCommandPool},
12538     {"vkCreateQueryPool", (void *)CreateQueryPool},
12539     {"vkAllocateCommandBuffers", (void *)AllocateCommandBuffers},
12540     {"vkFreeCommandBuffers", (void *)FreeCommandBuffers},
12541     {"vkBeginCommandBuffer", (void *)BeginCommandBuffer},
12542     {"vkEndCommandBuffer", (void *)EndCommandBuffer},
12543     {"vkResetCommandBuffer", (void *)ResetCommandBuffer},
12544     {"vkCmdBindPipeline", (void *)CmdBindPipeline},
12545     {"vkCmdSetViewport", (void *)CmdSetViewport},
12546     {"vkCmdSetScissor", (void *)CmdSetScissor},
12547     {"vkCmdSetLineWidth", (void *)CmdSetLineWidth},
12548     {"vkCmdSetDepthBias", (void *)CmdSetDepthBias},
12549     {"vkCmdSetBlendConstants", (void *)CmdSetBlendConstants},
12550     {"vkCmdSetDepthBounds", (void *)CmdSetDepthBounds},
12551     {"vkCmdSetStencilCompareMask", (void *)CmdSetStencilCompareMask},
12552     {"vkCmdSetStencilWriteMask", (void *)CmdSetStencilWriteMask},
12553     {"vkCmdSetStencilReference", (void *)CmdSetStencilReference},
12554     {"vkCmdBindDescriptorSets", (void *)CmdBindDescriptorSets},
12555     {"vkCmdBindVertexBuffers", (void *)CmdBindVertexBuffers},
12556     {"vkCmdBindIndexBuffer", (void *)CmdBindIndexBuffer},
12557     {"vkCmdDraw", (void *)CmdDraw},
12558     {"vkCmdDrawIndexed", (void *)CmdDrawIndexed},
12559     {"vkCmdDrawIndirect", (void *)CmdDrawIndirect},
12560     {"vkCmdDrawIndexedIndirect", (void *)CmdDrawIndexedIndirect},
12561     {"vkCmdDispatch", (void *)CmdDispatch},
12562     {"vkCmdDispatchIndirect", (void *)CmdDispatchIndirect},
12563     {"vkCmdCopyBuffer", (void *)CmdCopyBuffer},
12564     {"vkCmdCopyImage", (void *)CmdCopyImage},
12565     {"vkCmdBlitImage", (void *)CmdBlitImage},
12566     {"vkCmdCopyBufferToImage", (void *)CmdCopyBufferToImage},
12567     {"vkCmdCopyImageToBuffer", (void *)CmdCopyImageToBuffer},
12568     {"vkCmdUpdateBuffer", (void *)CmdUpdateBuffer},
12569     {"vkCmdFillBuffer", (void *)CmdFillBuffer},
12570     {"vkCmdClearColorImage", (void *)CmdClearColorImage},
12571     {"vkCmdClearDepthStencilImage", (void *)CmdClearDepthStencilImage},
12572     {"vkCmdClearAttachments", (void *)CmdClearAttachments},
12573     {"vkCmdResolveImage", (void *)CmdResolveImage},
12574     {"vkGetImageSubresourceLayout", (void *)GetImageSubresourceLayout},
12575     {"vkCmdSetEvent", (void *)CmdSetEvent},
12576     {"vkCmdResetEvent", (void *)CmdResetEvent},
12577     {"vkCmdWaitEvents", (void *)CmdWaitEvents},
12578     {"vkCmdPipelineBarrier", (void *)CmdPipelineBarrier},
12579     {"vkCmdBeginQuery", (void *)CmdBeginQuery},
12580     {"vkCmdEndQuery", (void *)CmdEndQuery},
12581     {"vkCmdResetQueryPool", (void *)CmdResetQueryPool},
12582     {"vkCmdCopyQueryPoolResults", (void *)CmdCopyQueryPoolResults},
12583     {"vkCmdPushConstants", (void *)CmdPushConstants},
12584     {"vkCmdWriteTimestamp", (void *)CmdWriteTimestamp},
12585     {"vkCreateFramebuffer", (void *)CreateFramebuffer},
12586     {"vkCreateShaderModule", (void *)CreateShaderModule},
12587     {"vkCreateRenderPass", (void *)CreateRenderPass},
12588     {"vkCmdBeginRenderPass", (void *)CmdBeginRenderPass},
12589     {"vkCmdNextSubpass", (void *)CmdNextSubpass},
12590     {"vkCmdEndRenderPass", (void *)CmdEndRenderPass},
12591     {"vkCmdExecuteCommands", (void *)CmdExecuteCommands},
12592     {"vkCmdDebugMarkerBeginEXT", (void *)CmdDebugMarkerBeginEXT},
12593     {"vkCmdDebugMarkerEndEXT", (void *)CmdDebugMarkerEndEXT},
12594     {"vkCmdDebugMarkerInsertEXT", (void *)CmdDebugMarkerInsertEXT},
12595     {"vkDebugMarkerSetObjectNameEXT", (void *)DebugMarkerSetObjectNameEXT},
12596     {"vkDebugMarkerSetObjectTagEXT", (void *)DebugMarkerSetObjectTagEXT},
12597     {"vkSetEvent", (void *)SetEvent},
12598     {"vkMapMemory", (void *)MapMemory},
12599     {"vkUnmapMemory", (void *)UnmapMemory},
12600     {"vkFlushMappedMemoryRanges", (void *)FlushMappedMemoryRanges},
12601     {"vkInvalidateMappedMemoryRanges", (void *)InvalidateMappedMemoryRanges},
12602     {"vkAllocateMemory", (void *)AllocateMemory},
12603     {"vkFreeMemory", (void *)FreeMemory},
12604     {"vkBindBufferMemory", (void *)BindBufferMemory},
12605     {"vkBindBufferMemory2", (void *)BindBufferMemory2},
12606     {"vkBindBufferMemory2KHR", (void *)BindBufferMemory2KHR},
12607     {"vkGetBufferMemoryRequirements", (void *)GetBufferMemoryRequirements},
12608     {"vkGetBufferMemoryRequirements2", (void *)GetBufferMemoryRequirements2},
12609     {"vkGetBufferMemoryRequirements2KHR", (void *)GetBufferMemoryRequirements2KHR},
12610     {"vkGetImageMemoryRequirements", (void *)GetImageMemoryRequirements},
12611     {"vkGetImageMemoryRequirements2", (void *)GetImageMemoryRequirements2},
12612     {"vkGetImageMemoryRequirements2KHR", (void *)GetImageMemoryRequirements2KHR},
12613     {"vkGetImageSparseMemoryRequirements", (void *)GetImageSparseMemoryRequirements},
12614     {"vkGetImageSparseMemoryRequirements2", (void *)GetImageSparseMemoryRequirements2},
12615     {"vkGetImageSparseMemoryRequirements2KHR", (void *)GetImageSparseMemoryRequirements2KHR},
12616     {"vkGetPhysicalDeviceSparseImageFormatProperties", (void *)GetPhysicalDeviceSparseImageFormatProperties},
12617     {"vkGetPhysicalDeviceSparseImageFormatProperties2", (void *)GetPhysicalDeviceSparseImageFormatProperties2},
12618     {"vkGetPhysicalDeviceSparseImageFormatProperties2KHR", (void *)GetPhysicalDeviceSparseImageFormatProperties2KHR},
12619     {"vkGetQueryPoolResults", (void *)GetQueryPoolResults},
12620     {"vkBindImageMemory", (void *)BindImageMemory},
12621     {"vkBindImageMemory2", (void *)BindImageMemory2},
12622     {"vkBindImageMemory2KHR", (void *)BindImageMemory2KHR},
12623     {"vkQueueBindSparse", (void *)QueueBindSparse},
12624     {"vkCreateSemaphore", (void *)CreateSemaphore},
12625     {"vkCreateEvent", (void *)CreateEvent},
12626 #ifdef VK_USE_PLATFORM_ANDROID_KHR
12627     {"vkCreateAndroidSurfaceKHR", (void *)CreateAndroidSurfaceKHR},
12628 #endif
12629 #ifdef VK_USE_PLATFORM_MIR_KHR
12630     {"vkCreateMirSurfaceKHR", (void *)CreateMirSurfaceKHR},
12631     {"vkGetPhysicalDeviceMirPresentationSupportKHR", (void *)GetPhysicalDeviceMirPresentationSupportKHR},
12632 #endif
12633 #ifdef VK_USE_PLATFORM_WAYLAND_KHR
12634     {"vkCreateWaylandSurfaceKHR", (void *)CreateWaylandSurfaceKHR},
12635     {"vkGetPhysicalDeviceWaylandPresentationSupportKHR", (void *)GetPhysicalDeviceWaylandPresentationSupportKHR},
12636 #endif
12637 #ifdef VK_USE_PLATFORM_WIN32_KHR
12638     {"vkCreateWin32SurfaceKHR", (void *)CreateWin32SurfaceKHR},
12639     {"vkGetPhysicalDeviceWin32PresentationSupportKHR", (void *)GetPhysicalDeviceWin32PresentationSupportKHR},
12640     {"vkImportSemaphoreWin32HandleKHR", (void *)ImportSemaphoreWin32HandleKHR},
12641     {"vkGetSemaphoreWin32HandleKHR", (void *)GetSemaphoreWin32HandleKHR},
12642     {"vkImportFenceWin32HandleKHR", (void *)ImportFenceWin32HandleKHR},
12643     {"vkGetFenceWin32HandleKHR", (void *)GetFenceWin32HandleKHR},
12644 #endif
12645 #ifdef VK_USE_PLATFORM_XCB_KHR
12646     {"vkCreateXcbSurfaceKHR", (void *)CreateXcbSurfaceKHR},
12647     {"vkGetPhysicalDeviceXcbPresentationSupportKHR", (void *)GetPhysicalDeviceXcbPresentationSupportKHR},
12648 #endif
12649 #ifdef VK_USE_PLATFORM_XLIB_KHR
12650     {"vkCreateXlibSurfaceKHR", (void *)CreateXlibSurfaceKHR},
12651     {"vkGetPhysicalDeviceXlibPresentationSupportKHR", (void *)GetPhysicalDeviceXlibPresentationSupportKHR},
12652 #endif
12653 #ifdef VK_USE_PLATFORM_IOS_MVK
12654     {"vkCreateIOSSurfaceMVK", (void *)CreateIOSSurfaceMVK},
12655 #endif
12656 #ifdef VK_USE_PLATFORM_MACOS_MVK
12657     {"vkCreateMacOSSurfaceMVK", (void *)CreateMacOSSurfaceMVK},
12658 #endif
12659     {"vkCreateDisplayPlaneSurfaceKHR", (void *)CreateDisplayPlaneSurfaceKHR},
12660     {"vkDestroySurfaceKHR", (void *)DestroySurfaceKHR},
12661     {"vkGetPhysicalDeviceSurfaceCapabilitiesKHR", (void *)GetPhysicalDeviceSurfaceCapabilitiesKHR},
12662     {"vkGetPhysicalDeviceSurfaceCapabilities2KHR", (void *)GetPhysicalDeviceSurfaceCapabilities2KHR},
12663     {"vkGetPhysicalDeviceSurfaceCapabilities2EXT", (void *)GetPhysicalDeviceSurfaceCapabilities2EXT},
12664     {"vkGetPhysicalDeviceSurfaceSupportKHR", (void *)GetPhysicalDeviceSurfaceSupportKHR},
12665     {"vkGetPhysicalDeviceSurfacePresentModesKHR", (void *)GetPhysicalDeviceSurfacePresentModesKHR},
12666     {"vkGetPhysicalDeviceSurfaceFormatsKHR", (void *)GetPhysicalDeviceSurfaceFormatsKHR},
12667     {"vkGetPhysicalDeviceSurfaceFormats2KHR", (void *)GetPhysicalDeviceSurfaceFormats2KHR},
12668     {"vkGetPhysicalDeviceQueueFamilyProperties2", (void *)GetPhysicalDeviceQueueFamilyProperties2},
12669     {"vkGetPhysicalDeviceQueueFamilyProperties2KHR", (void *)GetPhysicalDeviceQueueFamilyProperties2KHR},
12670     {"vkEnumeratePhysicalDeviceGroups", (void *)EnumeratePhysicalDeviceGroups},
12671     {"vkEnumeratePhysicalDeviceGroupsKHR", (void *)EnumeratePhysicalDeviceGroupsKHR},
12672     {"vkCreateDebugReportCallbackEXT", (void *)CreateDebugReportCallbackEXT},
12673     {"vkDestroyDebugReportCallbackEXT", (void *)DestroyDebugReportCallbackEXT},
12674     {"vkDebugReportMessageEXT", (void *)DebugReportMessageEXT},
12675     {"vkGetPhysicalDeviceDisplayPlanePropertiesKHR", (void *)GetPhysicalDeviceDisplayPlanePropertiesKHR},
12676     {"vkGetDisplayPlaneSupportedDisplaysKHR", (void *)GetDisplayPlaneSupportedDisplaysKHR},
12677     {"vkGetDisplayPlaneCapabilitiesKHR", (void *)GetDisplayPlaneCapabilitiesKHR},
12678     {"vkImportSemaphoreFdKHR", (void *)ImportSemaphoreFdKHR},
12679     {"vkGetSemaphoreFdKHR", (void *)GetSemaphoreFdKHR},
12680     {"vkImportFenceFdKHR", (void *)ImportFenceFdKHR},
12681     {"vkGetFenceFdKHR", (void *)GetFenceFdKHR},
12682     {"vkCreateValidationCacheEXT", (void *)CreateValidationCacheEXT},
12683     {"vkDestroyValidationCacheEXT", (void *)DestroyValidationCacheEXT},
12684     {"vkGetValidationCacheDataEXT", (void *)GetValidationCacheDataEXT},
12685     {"vkMergeValidationCachesEXT", (void *)MergeValidationCachesEXT},
12686     {"vkCmdSetDiscardRectangleEXT", (void *)CmdSetDiscardRectangleEXT},
12687     {"vkCmdSetSampleLocationsEXT", (void *)CmdSetSampleLocationsEXT},
12688     {"vkSetDebugUtilsObjectNameEXT", (void *)SetDebugUtilsObjectNameEXT},
12689     {"vkSetDebugUtilsObjectTagEXT", (void *)SetDebugUtilsObjectTagEXT},
12690     {"vkQueueBeginDebugUtilsLabelEXT", (void *)QueueBeginDebugUtilsLabelEXT},
12691     {"vkQueueEndDebugUtilsLabelEXT", (void *)QueueEndDebugUtilsLabelEXT},
12692     {"vkQueueInsertDebugUtilsLabelEXT", (void *)QueueInsertDebugUtilsLabelEXT},
12693     {"vkCmdBeginDebugUtilsLabelEXT", (void *)CmdBeginDebugUtilsLabelEXT},
12694     {"vkCmdEndDebugUtilsLabelEXT", (void *)CmdEndDebugUtilsLabelEXT},
12695     {"vkCmdInsertDebugUtilsLabelEXT", (void *)CmdInsertDebugUtilsLabelEXT},
12696     {"vkCreateDebugUtilsMessengerEXT", (void *)CreateDebugUtilsMessengerEXT},
12697     {"vkDestroyDebugUtilsMessengerEXT", (void *)DestroyDebugUtilsMessengerEXT},
12698     {"vkSubmitDebugUtilsMessageEXT", (void *)SubmitDebugUtilsMessageEXT},
12699 };
12700
12701 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetDeviceProcAddr(VkDevice device, const char *funcName) {
12702     assert(device);
12703     layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
12704
12705     // Is API to be intercepted by this layer?
12706     const auto &item = name_to_funcptr_map.find(funcName);
12707     if (item != name_to_funcptr_map.end()) {
12708         return reinterpret_cast<PFN_vkVoidFunction>(item->second);
12709     }
12710
12711     auto &table = device_data->dispatch_table;
12712     if (!table.GetDeviceProcAddr) return nullptr;
12713     return table.GetDeviceProcAddr(device, funcName);
12714 }
12715
12716 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetInstanceProcAddr(VkInstance instance, const char *funcName) {
12717     instance_layer_data *instance_data;
12718     // Is API to be intercepted by this layer?
12719     const auto &item = name_to_funcptr_map.find(funcName);
12720     if (item != name_to_funcptr_map.end()) {
12721         return reinterpret_cast<PFN_vkVoidFunction>(item->second);
12722     }
12723
12724     instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
12725     auto &table = instance_data->dispatch_table;
12726     if (!table.GetInstanceProcAddr) return nullptr;
12727     return table.GetInstanceProcAddr(instance, funcName);
12728 }
12729
12730 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetPhysicalDeviceProcAddr(VkInstance instance, const char *funcName) {
12731     assert(instance);
12732     instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
12733
12734     auto &table = instance_data->dispatch_table;
12735     if (!table.GetPhysicalDeviceProcAddr) return nullptr;
12736     return table.GetPhysicalDeviceProcAddr(instance, funcName);
12737 }
12738
12739 }  // namespace core_validation
12740
12741 // loader-layer interface v0, just wrappers since there is only a layer
12742
12743 VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateInstanceExtensionProperties(const char *pLayerName, uint32_t *pCount,
12744                                                                                       VkExtensionProperties *pProperties) {
12745     return core_validation::EnumerateInstanceExtensionProperties(pLayerName, pCount, pProperties);
12746 }
12747
12748 VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateInstanceLayerProperties(uint32_t *pCount,
12749                                                                                   VkLayerProperties *pProperties) {
12750     return core_validation::EnumerateInstanceLayerProperties(pCount, pProperties);
12751 }
12752
12753 VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice, uint32_t *pCount,
12754                                                                                 VkLayerProperties *pProperties) {
12755     // the layer command handles VK_NULL_HANDLE just fine internally
12756     assert(physicalDevice == VK_NULL_HANDLE);
12757     return core_validation::EnumerateDeviceLayerProperties(VK_NULL_HANDLE, pCount, pProperties);
12758 }
12759
12760 VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice,
12761                                                                                     const char *pLayerName, uint32_t *pCount,
12762                                                                                     VkExtensionProperties *pProperties) {
12763     // the layer command handles VK_NULL_HANDLE just fine internally
12764     assert(physicalDevice == VK_NULL_HANDLE);
12765     return core_validation::EnumerateDeviceExtensionProperties(VK_NULL_HANDLE, pLayerName, pCount, pProperties);
12766 }
12767
12768 VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetDeviceProcAddr(VkDevice dev, const char *funcName) {
12769     return core_validation::GetDeviceProcAddr(dev, funcName);
12770 }
12771
12772 VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetInstanceProcAddr(VkInstance instance, const char *funcName) {
12773     return core_validation::GetInstanceProcAddr(instance, funcName);
12774 }
12775
12776 VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vk_layerGetPhysicalDeviceProcAddr(VkInstance instance,
12777                                                                                            const char *funcName) {
12778     return core_validation::GetPhysicalDeviceProcAddr(instance, funcName);
12779 }
12780
12781 VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkNegotiateLoaderLayerInterfaceVersion(VkNegotiateLayerInterface *pVersionStruct) {
12782     assert(pVersionStruct != NULL);
12783     assert(pVersionStruct->sType == LAYER_NEGOTIATE_INTERFACE_STRUCT);
12784
12785     // Fill in the function pointers if our version is at least capable of having the structure contain them.
12786     if (pVersionStruct->loaderLayerInterfaceVersion >= 2) {
12787         pVersionStruct->pfnGetInstanceProcAddr = vkGetInstanceProcAddr;
12788         pVersionStruct->pfnGetDeviceProcAddr = vkGetDeviceProcAddr;
12789         pVersionStruct->pfnGetPhysicalDeviceProcAddr = vk_layerGetPhysicalDeviceProcAddr;
12790     }
12791
12792     if (pVersionStruct->loaderLayerInterfaceVersion < CURRENT_LOADER_LAYER_INTERFACE_VERSION) {
12793         core_validation::loader_layer_if_version = pVersionStruct->loaderLayerInterfaceVersion;
12794     } else if (pVersionStruct->loaderLayerInterfaceVersion > CURRENT_LOADER_LAYER_INTERFACE_VERSION) {
12795         pVersionStruct->loaderLayerInterfaceVersion = CURRENT_LOADER_LAYER_INTERFACE_VERSION;
12796     }
12797
12798     return VK_SUCCESS;
12799 }