layers: Only validate access masks if layout changes
[platform/upstream/Vulkan-LoaderAndValidationLayers.git] / layers / core_validation.cpp
1 /* Copyright (c) 2015-2016 The Khronos Group Inc.
2  * Copyright (c) 2015-2016 Valve Corporation
3  * Copyright (c) 2015-2016 LunarG, Inc.
4  * Copyright (C) 2015-2016 Google Inc.
5  *
6  * Licensed under the Apache License, Version 2.0 (the "License");
7  * you may not use this file except in compliance with the License.
8  * You may obtain a copy of the License at
9  *
10  *     http://www.apache.org/licenses/LICENSE-2.0
11  *
12  * Unless required by applicable law or agreed to in writing, software
13  * distributed under the License is distributed on an "AS IS" BASIS,
14  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15  * See the License for the specific language governing permissions and
16  * limitations under the License.
17  *
18  * Author: Cody Northrop <cnorthrop@google.com>
19  * Author: Michael Lentine <mlentine@google.com>
20  * Author: Tobin Ehlis <tobine@google.com>
21  * Author: Chia-I Wu <olv@google.com>
22  * Author: Chris Forbes <chrisf@ijw.co.nz>
23  * Author: Mark Lobodzinski <mark@lunarg.com>
24  * Author: Ian Elliott <ianelliott@google.com>
25  */
26
27 // Allow use of STL min and max functions in Windows
28 #define NOMINMAX
29
30 // Turn on mem_tracker merged code
31 #define MTMERGESOURCE 1
32
33 #include <SPIRV/spirv.hpp>
34 #include <algorithm>
35 #include <assert.h>
36 #include <iostream>
37 #include <list>
38 #include <map>
39 #include <mutex>
40 #include <set>
41 //#include <memory>
42 #include <stdio.h>
43 #include <stdlib.h>
44 #include <string.h>
45 #include <string>
46 #include <tuple>
47
48 #include "vk_loader_platform.h"
49 #include "vk_dispatch_table_helper.h"
50 #include "vk_struct_string_helper_cpp.h"
51 #if defined(__GNUC__)
52 #pragma GCC diagnostic ignored "-Wwrite-strings"
53 #endif
54 #if defined(__GNUC__)
55 #pragma GCC diagnostic warning "-Wwrite-strings"
56 #endif
57 #include "vk_struct_size_helper.h"
58 #include "core_validation.h"
59 #include "vk_layer_table.h"
60 #include "vk_layer_data.h"
61 #include "vk_layer_extension_utils.h"
62 #include "vk_layer_utils.h"
63 #include "spirv-tools/libspirv.h"
64
65 #if defined __ANDROID__
66 #include <android/log.h>
67 #define LOGCONSOLE(...) ((void)__android_log_print(ANDROID_LOG_INFO, "DS", __VA_ARGS__))
68 #else
69 #define LOGCONSOLE(...)                                                                                                            \
70     {                                                                                                                              \
71         printf(__VA_ARGS__);                                                                                                       \
72         printf("\n");                                                                                                              \
73     }
74 #endif
75
76 using namespace std;
77
78 namespace core_validation {
79
80 using std::unordered_map;
81 using std::unordered_set;
82
83 // WSI Image Objects bypass usual Image Object creation methods.  A special Memory
84 // Object value will be used to identify them internally.
85 static const VkDeviceMemory MEMTRACKER_SWAP_CHAIN_IMAGE_KEY = (VkDeviceMemory)(-1);
86 // 2nd special memory handle used to flag object as unbound from memory
87 static const VkDeviceMemory MEMORY_UNBOUND = VkDeviceMemory(~((uint64_t)(0)) - 1);
88
89 struct devExts {
90     bool wsi_enabled;
91     bool wsi_display_swapchain_enabled;
92     unordered_map<VkSwapchainKHR, unique_ptr<SWAPCHAIN_NODE>> swapchainMap;
93     unordered_map<VkImage, VkSwapchainKHR> imageToSwapchainMap;
94 };
95
96 // fwd decls
97 struct shader_module;
98
99 struct instance_layer_data {
100     VkInstance instance = VK_NULL_HANDLE;
101     unique_ptr<INSTANCE_STATE> instance_state = nullptr;
102     debug_report_data *report_data = nullptr;
103     std::vector<VkDebugReportCallbackEXT> logging_callback;
104     VkLayerInstanceDispatchTable dispatch_table;
105
106     unordered_map<VkPhysicalDevice, PHYSICAL_DEVICE_STATE> physical_device_map;
107 };
108
109 struct layer_data {
110     debug_report_data *report_data = nullptr;
111     VkLayerDispatchTable dispatch_table;
112     unique_ptr<INSTANCE_STATE> instance_state = nullptr;
113
114     devExts device_extensions = {};
115     unordered_set<VkQueue> queues;  // All queues under given device
116     // Global set of all cmdBuffers that are inFlight on this device
117     unordered_set<VkCommandBuffer> globalInFlightCmdBuffers;
118     // Layer specific data
119     unordered_map<VkSampler, unique_ptr<SAMPLER_NODE>> samplerMap;
120     unordered_map<VkImageView, unique_ptr<IMAGE_VIEW_STATE>> imageViewMap;
121     unordered_map<VkImage, unique_ptr<IMAGE_NODE>> imageMap;
122     unordered_map<VkBufferView, unique_ptr<BUFFER_VIEW_STATE>> bufferViewMap;
123     unordered_map<VkBuffer, unique_ptr<BUFFER_NODE>> bufferMap;
124     unordered_map<VkPipeline, PIPELINE_NODE *> pipelineMap;
125     unordered_map<VkCommandPool, COMMAND_POOL_NODE> commandPoolMap;
126     unordered_map<VkDescriptorPool, DESCRIPTOR_POOL_NODE *> descriptorPoolMap;
127     unordered_map<VkDescriptorSet, cvdescriptorset::DescriptorSet *> setMap;
128     unordered_map<VkDescriptorSetLayout, cvdescriptorset::DescriptorSetLayout *> descriptorSetLayoutMap;
129     unordered_map<VkPipelineLayout, PIPELINE_LAYOUT_NODE> pipelineLayoutMap;
130     unordered_map<VkDeviceMemory, unique_ptr<DEVICE_MEM_INFO>> memObjMap;
131     unordered_map<VkFence, FENCE_NODE> fenceMap;
132     unordered_map<VkQueue, QUEUE_NODE> queueMap;
133     unordered_map<VkEvent, EVENT_NODE> eventMap;
134     unordered_map<QueryObject, bool> queryToStateMap;
135     unordered_map<VkQueryPool, QUERY_POOL_NODE> queryPoolMap;
136     unordered_map<VkSemaphore, SEMAPHORE_NODE> semaphoreMap;
137     unordered_map<VkCommandBuffer, GLOBAL_CB_NODE *> commandBufferMap;
138     unordered_map<VkFramebuffer, unique_ptr<FRAMEBUFFER_NODE>> frameBufferMap;
139     unordered_map<VkImage, vector<ImageSubresourcePair>> imageSubresourceMap;
140     unordered_map<ImageSubresourcePair, IMAGE_LAYOUT_NODE> imageLayoutMap;
141     unordered_map<VkRenderPass, unique_ptr<RENDER_PASS_NODE>> renderPassMap;
142     unordered_map<VkShaderModule, unique_ptr<shader_module>> shaderModuleMap;
143     VkDevice device = VK_NULL_HANDLE;
144
145     instance_layer_data *instance_data = nullptr;  // from device to enclosing instance
146
147     VkPhysicalDeviceFeatures enabled_features = {};
148     // Device specific data
149     PHYS_DEV_PROPERTIES_NODE phys_dev_properties = {};
150     VkPhysicalDeviceMemoryProperties phys_dev_mem_props = {};
151 };
152
153 // TODO : Do we need to guard access to layer_data_map w/ lock?
154 static unordered_map<void *, layer_data *> layer_data_map;
155 static unordered_map<void *, instance_layer_data *> instance_layer_data_map;
156
157 static const VkLayerProperties global_layer = {
158     "VK_LAYER_LUNARG_core_validation", VK_LAYER_API_VERSION, 1, "LunarG Validation Layer",
159 };
160
161 template <class TCreateInfo> void ValidateLayerOrdering(const TCreateInfo &createInfo) {
162     bool foundLayer = false;
163     for (uint32_t i = 0; i < createInfo.enabledLayerCount; ++i) {
164         if (!strcmp(createInfo.ppEnabledLayerNames[i], global_layer.layerName)) {
165             foundLayer = true;
166         }
167         // This has to be logged to console as we don't have a callback at this point.
168         if (!foundLayer && !strcmp(createInfo.ppEnabledLayerNames[0], "VK_LAYER_GOOGLE_unique_objects")) {
169             LOGCONSOLE("Cannot activate layer VK_LAYER_GOOGLE_unique_objects prior to activating %s.",
170                        global_layer.layerName);
171         }
172     }
173 }
174
175 // Code imported from shader_checker
176 static void build_def_index(shader_module *);
177
178 // A forward iterator over spirv instructions. Provides easy access to len, opcode, and content words
179 // without the caller needing to care too much about the physical SPIRV module layout.
180 struct spirv_inst_iter {
181     std::vector<uint32_t>::const_iterator zero;
182     std::vector<uint32_t>::const_iterator it;
183
184     uint32_t len() {
185         auto result = *it >> 16;
186         assert(result > 0);
187         return result;
188     }
189
190     uint32_t opcode() { return *it & 0x0ffffu; }
191
192     uint32_t const &word(unsigned n) {
193         assert(n < len());
194         return it[n];
195     }
196
197     uint32_t offset() { return (uint32_t)(it - zero); }
198
199     spirv_inst_iter() {}
200
201     spirv_inst_iter(std::vector<uint32_t>::const_iterator zero, std::vector<uint32_t>::const_iterator it) : zero(zero), it(it) {}
202
203     bool operator==(spirv_inst_iter const &other) { return it == other.it; }
204
205     bool operator!=(spirv_inst_iter const &other) { return it != other.it; }
206
207     spirv_inst_iter operator++(int) { /* x++ */
208         spirv_inst_iter ii = *this;
209         it += len();
210         return ii;
211     }
212
213     spirv_inst_iter operator++() { /* ++x; */
214         it += len();
215         return *this;
216     }
217
218     /* The iterator and the value are the same thing. */
219     spirv_inst_iter &operator*() { return *this; }
220     spirv_inst_iter const &operator*() const { return *this; }
221 };
222
223 struct shader_module {
224     /* the spirv image itself */
225     vector<uint32_t> words;
226     /* a mapping of <id> to the first word of its def. this is useful because walking type
227      * trees, constant expressions, etc requires jumping all over the instruction stream.
228      */
229     unordered_map<unsigned, unsigned> def_index;
230
231     shader_module(VkShaderModuleCreateInfo const *pCreateInfo)
232         : words((uint32_t *)pCreateInfo->pCode, (uint32_t *)pCreateInfo->pCode + pCreateInfo->codeSize / sizeof(uint32_t)),
233           def_index() {
234
235         build_def_index(this);
236     }
237
238     /* expose begin() / end() to enable range-based for */
239     spirv_inst_iter begin() const { return spirv_inst_iter(words.begin(), words.begin() + 5); } /* first insn */
240     spirv_inst_iter end() const { return spirv_inst_iter(words.begin(), words.end()); }         /* just past last insn */
241     /* given an offset into the module, produce an iterator there. */
242     spirv_inst_iter at(unsigned offset) const { return spirv_inst_iter(words.begin(), words.begin() + offset); }
243
244     /* gets an iterator to the definition of an id */
245     spirv_inst_iter get_def(unsigned id) const {
246         auto it = def_index.find(id);
247         if (it == def_index.end()) {
248             return end();
249         }
250         return at(it->second);
251     }
252 };
253
254 // TODO : This can be much smarter, using separate locks for separate global data
255 static std::mutex global_lock;
256
257 // Return IMAGE_VIEW_STATE ptr for specified imageView or else NULL
258 IMAGE_VIEW_STATE *getImageViewState(const layer_data *dev_data, VkImageView image_view) {
259     auto iv_it = dev_data->imageViewMap.find(image_view);
260     if (iv_it == dev_data->imageViewMap.end()) {
261         return nullptr;
262     }
263     return iv_it->second.get();
264 }
265 // Return sampler node ptr for specified sampler or else NULL
266 SAMPLER_NODE *getSamplerNode(const layer_data *dev_data, VkSampler sampler) {
267     auto sampler_it = dev_data->samplerMap.find(sampler);
268     if (sampler_it == dev_data->samplerMap.end()) {
269         return nullptr;
270     }
271     return sampler_it->second.get();
272 }
273 // Return image node ptr for specified image or else NULL
274 IMAGE_NODE *getImageNode(const layer_data *dev_data, VkImage image) {
275     auto img_it = dev_data->imageMap.find(image);
276     if (img_it == dev_data->imageMap.end()) {
277         return nullptr;
278     }
279     return img_it->second.get();
280 }
281 // Return buffer node ptr for specified buffer or else NULL
282 BUFFER_NODE *getBufferNode(const layer_data *dev_data, VkBuffer buffer) {
283     auto buff_it = dev_data->bufferMap.find(buffer);
284     if (buff_it == dev_data->bufferMap.end()) {
285         return nullptr;
286     }
287     return buff_it->second.get();
288 }
289 // Return swapchain node for specified swapchain or else NULL
290 SWAPCHAIN_NODE *getSwapchainNode(const layer_data *dev_data, VkSwapchainKHR swapchain) {
291     auto swp_it = dev_data->device_extensions.swapchainMap.find(swapchain);
292     if (swp_it == dev_data->device_extensions.swapchainMap.end()) {
293         return nullptr;
294     }
295     return swp_it->second.get();
296 }
297 // Return swapchain for specified image or else NULL
298 VkSwapchainKHR getSwapchainFromImage(const layer_data *dev_data, VkImage image) {
299     auto img_it = dev_data->device_extensions.imageToSwapchainMap.find(image);
300     if (img_it == dev_data->device_extensions.imageToSwapchainMap.end()) {
301         return VK_NULL_HANDLE;
302     }
303     return img_it->second;
304 }
305 // Return buffer node ptr for specified buffer or else NULL
306 BUFFER_VIEW_STATE *getBufferViewState(const layer_data *my_data, VkBufferView buffer_view) {
307     auto bv_it = my_data->bufferViewMap.find(buffer_view);
308     if (bv_it == my_data->bufferViewMap.end()) {
309         return nullptr;
310     }
311     return bv_it->second.get();
312 }
313
314 FENCE_NODE *getFenceNode(layer_data *dev_data, VkFence fence) {
315     auto it = dev_data->fenceMap.find(fence);
316     if (it == dev_data->fenceMap.end()) {
317         return nullptr;
318     }
319     return &it->second;
320 }
321
322 EVENT_NODE *getEventNode(layer_data *dev_data, VkEvent event) {
323     auto it = dev_data->eventMap.find(event);
324     if (it == dev_data->eventMap.end()) {
325         return nullptr;
326     }
327     return &it->second;
328 }
329
330 QUERY_POOL_NODE *getQueryPoolNode(layer_data *dev_data, VkQueryPool query_pool) {
331     auto it = dev_data->queryPoolMap.find(query_pool);
332     if (it == dev_data->queryPoolMap.end()) {
333         return nullptr;
334     }
335     return &it->second;
336 }
337
338 QUEUE_NODE *getQueueNode(layer_data *dev_data, VkQueue queue) {
339     auto it = dev_data->queueMap.find(queue);
340     if (it == dev_data->queueMap.end()) {
341         return nullptr;
342     }
343     return &it->second;
344 }
345
346 SEMAPHORE_NODE *getSemaphoreNode(layer_data *dev_data, VkSemaphore semaphore) {
347     auto it = dev_data->semaphoreMap.find(semaphore);
348     if (it == dev_data->semaphoreMap.end()) {
349         return nullptr;
350     }
351     return &it->second;
352 }
353
354 COMMAND_POOL_NODE *getCommandPoolNode(layer_data *dev_data, VkCommandPool pool) {
355     auto it = dev_data->commandPoolMap.find(pool);
356     if (it == dev_data->commandPoolMap.end()) {
357         return nullptr;
358     }
359     return &it->second;
360 }
361
362 PHYSICAL_DEVICE_STATE *getPhysicalDeviceState(instance_layer_data *instance_data, VkPhysicalDevice phys) {
363     auto it = instance_data->physical_device_map.find(phys);
364     if (it == instance_data->physical_device_map.end()) {
365         return nullptr;
366     }
367     return &it->second;
368 }
369
370 // Return ptr to bound memory for given handle of specified type and set sparse param to indicate if binding is sparse
371 static VkDeviceMemory *GetObjectMemBinding(layer_data *my_data, uint64_t handle, VkDebugReportObjectTypeEXT type, bool *sparse) {
372     switch (type) {
373     case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT: {
374         auto img_node = getImageNode(my_data, VkImage(handle));
375         *sparse = img_node->createInfo.flags & VK_IMAGE_CREATE_SPARSE_BINDING_BIT;
376         if (img_node)
377             return &img_node->mem;
378         break;
379     }
380     case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT: {
381         auto buff_node = getBufferNode(my_data, VkBuffer(handle));
382         *sparse = buff_node->createInfo.flags & VK_BUFFER_CREATE_SPARSE_BINDING_BIT;
383         if (buff_node)
384             return &buff_node->mem;
385         break;
386     }
387     default:
388         break;
389     }
390     return nullptr;
391 }
392 // Overloaded version of above function that doesn't care about sparse bool
393 static VkDeviceMemory *GetObjectMemBinding(layer_data *my_data, uint64_t handle, VkDebugReportObjectTypeEXT type) {
394     bool sparse;
395     return GetObjectMemBinding(my_data, handle, type, &sparse);
396 }
397 // prototype
398 static GLOBAL_CB_NODE *getCBNode(layer_data const *, const VkCommandBuffer);
399
400 // Helper function to validate correct usage bits set for buffers or images
401 //  Verify that (actual & desired) flags != 0 or,
402 //   if strict is true, verify that (actual & desired) flags == desired
403 //  In case of error, report it via dbg callbacks
404 static bool validate_usage_flags(layer_data *my_data, VkFlags actual, VkFlags desired, VkBool32 strict,
405                                      uint64_t obj_handle, VkDebugReportObjectTypeEXT obj_type, char const *ty_str,
406                                      char const *func_name, char const *usage_str) {
407     bool correct_usage = false;
408     bool skip_call = false;
409     if (strict)
410         correct_usage = ((actual & desired) == desired);
411     else
412         correct_usage = ((actual & desired) != 0);
413     if (!correct_usage) {
414         skip_call = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, obj_type, obj_handle, __LINE__,
415                             MEMTRACK_INVALID_USAGE_FLAG, "MEM", "Invalid usage flag for %s 0x%" PRIxLEAST64
416                                                                 " used by %s. In this case, %s should have %s set during creation.",
417                             ty_str, obj_handle, func_name, ty_str, usage_str);
418     }
419     return skip_call;
420 }
421
422 // Helper function to validate usage flags for buffers
423 // For given buffer_node send actual vs. desired usage off to helper above where
424 //  an error will be flagged if usage is not correct
425 static bool ValidateImageUsageFlags(layer_data *dev_data, IMAGE_NODE const *image_node, VkFlags desired, VkBool32 strict,
426                                     char const *func_name, char const *usage_string) {
427     return validate_usage_flags(dev_data, image_node->createInfo.usage, desired, strict,
428                                 reinterpret_cast<const uint64_t &>(image_node->image), VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
429                                 "image", func_name, usage_string);
430 }
431
432 // Helper function to validate usage flags for buffers
433 // For given buffer_node send actual vs. desired usage off to helper above where
434 //  an error will be flagged if usage is not correct
435 static bool ValidateBufferUsageFlags(layer_data *dev_data, BUFFER_NODE const *buffer_node, VkFlags desired, VkBool32 strict,
436                                      char const *func_name, char const *usage_string) {
437     return validate_usage_flags(dev_data, buffer_node->createInfo.usage, desired, strict,
438                                 reinterpret_cast<const uint64_t &>(buffer_node->buffer), VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
439                                 "buffer", func_name, usage_string);
440 }
441
442 // Return ptr to info in map container containing mem, or NULL if not found
443 //  Calls to this function should be wrapped in mutex
444 DEVICE_MEM_INFO *getMemObjInfo(const layer_data *dev_data, const VkDeviceMemory mem) {
445     auto mem_it = dev_data->memObjMap.find(mem);
446     if (mem_it == dev_data->memObjMap.end()) {
447         return NULL;
448     }
449     return mem_it->second.get();
450 }
451
452 static void add_mem_obj_info(layer_data *my_data, void *object, const VkDeviceMemory mem,
453                              const VkMemoryAllocateInfo *pAllocateInfo) {
454     assert(object != NULL);
455
456     my_data->memObjMap[mem] = unique_ptr<DEVICE_MEM_INFO>(new DEVICE_MEM_INFO(object, mem, pAllocateInfo));
457 }
458
459 // Helper function to print lowercase string of object type
460 //  TODO: Unify string helper functions, this should really come out of a string helper if not there already
461 static const char *object_type_to_string(VkDebugReportObjectTypeEXT type) {
462     switch (type) {
463     case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT:
464         return "image";
465     case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT:
466         return "buffer";
467     case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT:
468         return "image view";
469     case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_VIEW_EXT:
470         return "buffer view";
471     case VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT:
472         return "swapchain";
473     case VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT:
474         return "descriptor set";
475     case VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT:
476         return "framebuffer";
477     case VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT:
478         return "event";
479     case VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT:
480         return "query pool";
481     case VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT:
482         return "descriptor pool";
483     case VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_POOL_EXT:
484         return "command pool";
485     case VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT:
486         return "pipeline";
487     case VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT:
488         return "sampler";
489     case VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT:
490         return "renderpass";
491     case VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT:
492         return "device memory";
493     case VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT:
494         return "semaphore";
495     default:
496         return "unknown";
497     }
498 }
499
500 // For given bound_object_handle, bound to given mem allocation, verify that the range for the bound object is valid
501 static bool ValidateMemoryIsValid(layer_data *dev_data, VkDeviceMemory mem, uint64_t bound_object_handle,
502                                   VkDebugReportObjectTypeEXT type, const char *functionName) {
503     DEVICE_MEM_INFO *mem_info = getMemObjInfo(dev_data, mem);
504     if (mem_info) {
505         if (!mem_info->bound_ranges[bound_object_handle].valid) {
506             return log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
507                            reinterpret_cast<uint64_t &>(mem), __LINE__, MEMTRACK_INVALID_MEM_REGION, "MEM",
508                            "%s: Cannot read invalid region of memory allocation 0x%" PRIx64 " for bound %s object 0x%" PRIx64
509                            ", please fill the memory before using.",
510                            functionName, reinterpret_cast<uint64_t &>(mem), object_type_to_string(type), bound_object_handle);
511         }
512     }
513     return false;
514 }
515 // For given image_node
516 //  If mem is special swapchain key, then verify that image_node valid member is true
517 //  Else verify that the image's bound memory range is valid
518 static bool ValidateImageMemoryIsValid(layer_data *dev_data, IMAGE_NODE *image_node, const char *functionName) {
519     if (image_node->mem == MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) {
520         if (!image_node->valid) {
521             return log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
522                            reinterpret_cast<uint64_t &>(image_node->mem), __LINE__, MEMTRACK_INVALID_MEM_REGION, "MEM",
523                            "%s: Cannot read invalid swapchain image 0x%" PRIx64 ", please fill the memory before using.",
524                            functionName, reinterpret_cast<uint64_t &>(image_node->image));
525         }
526     } else {
527         return ValidateMemoryIsValid(dev_data, image_node->mem, reinterpret_cast<uint64_t &>(image_node->image),
528                                      VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, functionName);
529     }
530     return false;
531 }
532 // For given buffer_node, verify that the range it's bound to is valid
533 static bool ValidateBufferMemoryIsValid(layer_data *dev_data, BUFFER_NODE *buffer_node, const char *functionName) {
534     return ValidateMemoryIsValid(dev_data, buffer_node->mem, reinterpret_cast<uint64_t &>(buffer_node->buffer),
535                                  VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, functionName);
536 }
537 // For the given memory allocation, set the range bound by the given handle object to the valid param value
538 static void SetMemoryValid(layer_data *dev_data, VkDeviceMemory mem, uint64_t handle, bool valid) {
539     DEVICE_MEM_INFO *mem_info = getMemObjInfo(dev_data, mem);
540     if (mem_info) {
541         mem_info->bound_ranges[handle].valid = valid;
542     }
543 }
544 // For given image node
545 //  If mem is special swapchain key, then set entire image_node to valid param value
546 //  Else set the image's bound memory range to valid param value
547 static void SetImageMemoryValid(layer_data *dev_data, IMAGE_NODE *image_node, bool valid) {
548     if (image_node->mem == MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) {
549         image_node->valid = valid;
550     } else {
551         SetMemoryValid(dev_data, image_node->mem, reinterpret_cast<uint64_t &>(image_node->image), valid);
552     }
553 }
554 // For given buffer node set the buffer's bound memory range to valid param value
555 static void SetBufferMemoryValid(layer_data *dev_data, BUFFER_NODE *buffer_node, bool valid) {
556     SetMemoryValid(dev_data, buffer_node->mem, reinterpret_cast<uint64_t &>(buffer_node->buffer), valid);
557 }
558 // Find CB Info and add mem reference to list container
559 // Find Mem Obj Info and add CB reference to list container
560 static bool update_cmd_buf_and_mem_references(layer_data *dev_data, const VkCommandBuffer cb, const VkDeviceMemory mem,
561                                               const char *apiName) {
562     bool skip_call = false;
563
564     // Skip validation if this image was created through WSI
565     if (mem != MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) {
566
567         // First update CB binding in MemObj mini CB list
568         DEVICE_MEM_INFO *pMemInfo = getMemObjInfo(dev_data, mem);
569         if (pMemInfo) {
570             pMemInfo->command_buffer_bindings.insert(cb);
571             // Now update CBInfo's Mem reference list
572             GLOBAL_CB_NODE *pCBNode = getCBNode(dev_data, cb);
573             // TODO: keep track of all destroyed CBs so we know if this is a stale or simply invalid object
574             if (pCBNode) {
575                 pCBNode->memObjs.insert(mem);
576             }
577         }
578     }
579     return skip_call;
580 }
581
582 // Create binding link between given sampler and command buffer node
583 void AddCommandBufferBindingSampler(GLOBAL_CB_NODE *cb_node, SAMPLER_NODE *sampler_node) {
584     sampler_node->cb_bindings.insert(cb_node);
585     cb_node->object_bindings.insert({reinterpret_cast<uint64_t &>(sampler_node->sampler), VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT});
586 }
587
588 // Create binding link between given image node and command buffer node
589 void AddCommandBufferBindingImage(const layer_data *dev_data, GLOBAL_CB_NODE *cb_node, IMAGE_NODE *img_node) {
590     // Skip validation if this image was created through WSI
591     if (img_node->mem != MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) {
592         // First update CB binding in MemObj mini CB list
593         DEVICE_MEM_INFO *pMemInfo = getMemObjInfo(dev_data, img_node->mem);
594         if (pMemInfo) {
595             pMemInfo->command_buffer_bindings.insert(cb_node->commandBuffer);
596             // Now update CBInfo's Mem reference list
597             cb_node->memObjs.insert(img_node->mem);
598         }
599         // Now update cb binding for image
600         cb_node->object_bindings.insert({reinterpret_cast<uint64_t &>(img_node->image), VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT});
601         img_node->cb_bindings.insert(cb_node);
602     }
603 }
604
605 // Create binding link between given image view node and its image with command buffer node
606 void AddCommandBufferBindingImageView(const layer_data *dev_data, GLOBAL_CB_NODE *cb_node, IMAGE_VIEW_STATE *view_state) {
607     // First add bindings for imageView
608     view_state->cb_bindings.insert(cb_node);
609     cb_node->object_bindings.insert(
610         {reinterpret_cast<uint64_t &>(view_state->image_view), VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT});
611     auto image_node = getImageNode(dev_data, view_state->create_info.image);
612     // Add bindings for image within imageView
613     if (image_node) {
614         AddCommandBufferBindingImage(dev_data, cb_node, image_node);
615     }
616 }
617
618 // Create binding link between given buffer node and command buffer node
619 void AddCommandBufferBindingBuffer(const layer_data *dev_data, GLOBAL_CB_NODE *cb_node, BUFFER_NODE *buff_node) {
620     // First update CB binding in MemObj mini CB list
621     DEVICE_MEM_INFO *pMemInfo = getMemObjInfo(dev_data, buff_node->mem);
622     if (pMemInfo) {
623         pMemInfo->command_buffer_bindings.insert(cb_node->commandBuffer);
624         // Now update CBInfo's Mem reference list
625         cb_node->memObjs.insert(buff_node->mem);
626         cb_node->object_bindings.insert({reinterpret_cast<uint64_t &>(buff_node->buffer), VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT});
627     }
628     // Now update cb binding for buffer
629     buff_node->cb_bindings.insert(cb_node);
630 }
631
632 // Create binding link between given buffer view node and its buffer with command buffer node
633 void AddCommandBufferBindingBufferView(const layer_data *dev_data, GLOBAL_CB_NODE *cb_node, BUFFER_VIEW_STATE *view_state) {
634     // First add bindings for bufferView
635     view_state->cb_bindings.insert(cb_node);
636     cb_node->object_bindings.insert(
637         {reinterpret_cast<uint64_t &>(view_state->buffer_view), VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_VIEW_EXT});
638     auto buffer_node = getBufferNode(dev_data, view_state->create_info.buffer);
639     // Add bindings for buffer within bufferView
640     if (buffer_node) {
641         AddCommandBufferBindingBuffer(dev_data, cb_node, buffer_node);
642     }
643 }
644
645 // For every mem obj bound to particular CB, free bindings related to that CB
646 static void clear_cmd_buf_and_mem_references(layer_data *dev_data, GLOBAL_CB_NODE *pCBNode) {
647     if (pCBNode) {
648         if (pCBNode->memObjs.size() > 0) {
649             for (auto mem : pCBNode->memObjs) {
650                 DEVICE_MEM_INFO *pInfo = getMemObjInfo(dev_data, mem);
651                 if (pInfo) {
652                     pInfo->command_buffer_bindings.erase(pCBNode->commandBuffer);
653                 }
654             }
655             pCBNode->memObjs.clear();
656         }
657         pCBNode->validate_functions.clear();
658     }
659 }
660 // Overloaded call to above function when GLOBAL_CB_NODE has not already been looked-up
661 static void clear_cmd_buf_and_mem_references(layer_data *dev_data, const VkCommandBuffer cb) {
662     clear_cmd_buf_and_mem_references(dev_data, getCBNode(dev_data, cb));
663 }
664
665 // For given MemObjInfo, report Obj & CB bindings. Clear any object bindings.
666 static bool ReportMemReferencesAndCleanUp(layer_data *dev_data, DEVICE_MEM_INFO *pMemObjInfo) {
667     bool skip_call = false;
668     size_t cmdBufRefCount = pMemObjInfo->command_buffer_bindings.size();
669     size_t objRefCount = pMemObjInfo->obj_bindings.size();
670
671     if ((pMemObjInfo->command_buffer_bindings.size()) != 0) {
672         skip_call = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
673                             (uint64_t)pMemObjInfo->mem, __LINE__, MEMTRACK_FREED_MEM_REF, "MEM",
674                             "Attempting to free memory object 0x%" PRIxLEAST64 " which still contains " PRINTF_SIZE_T_SPECIFIER
675                             " references",
676                             (uint64_t)pMemObjInfo->mem, (cmdBufRefCount + objRefCount));
677     }
678
679     if (cmdBufRefCount > 0 && pMemObjInfo->command_buffer_bindings.size() > 0) {
680         for (auto cb : pMemObjInfo->command_buffer_bindings) {
681             log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
682                     (uint64_t)cb, __LINE__, MEMTRACK_FREED_MEM_REF, "MEM",
683                     "Command Buffer 0x%p still has a reference to mem obj 0x%" PRIxLEAST64, cb, (uint64_t)pMemObjInfo->mem);
684         }
685         // Clear the list of hanging references
686         pMemObjInfo->command_buffer_bindings.clear();
687     }
688
689     if (objRefCount > 0 && pMemObjInfo->obj_bindings.size() > 0) {
690         for (auto obj : pMemObjInfo->obj_bindings) {
691             log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, obj.type, obj.handle, __LINE__,
692                     MEMTRACK_FREED_MEM_REF, "MEM", "VK Object 0x%" PRIxLEAST64 " still has a reference to mem obj 0x%" PRIxLEAST64,
693                     obj.handle, (uint64_t)pMemObjInfo->mem);
694             // Clear mem binding for bound objects
695             switch (obj.type) {
696             case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT: {
697                 auto image_node = getImageNode(dev_data, reinterpret_cast<VkImage &>(obj.handle));
698                 assert(image_node); // Any destroyed images should already be removed from bindings
699                 image_node->mem = MEMORY_UNBOUND;
700                 break;
701             }
702             case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT: {
703                 auto buff_node = getBufferNode(dev_data, reinterpret_cast<VkBuffer &>(obj.handle));
704                 assert(buff_node); // Any destroyed buffers should already be removed from bindings
705                 buff_node->mem = MEMORY_UNBOUND;
706                 break;
707             }
708             default:
709                 // Should only have buffer or image objects bound to memory
710                 assert(0);
711             }
712         }
713         // Clear the list of hanging references
714         pMemObjInfo->obj_bindings.clear();
715     }
716     return skip_call;
717 }
718
719 static bool freeMemObjInfo(layer_data *dev_data, void *object, VkDeviceMemory mem, bool internal) {
720     bool skip_call = false;
721     // Parse global list to find info w/ mem
722     DEVICE_MEM_INFO *pInfo = getMemObjInfo(dev_data, mem);
723     if (pInfo) {
724         // TODO: Verify against Valid Use section
725         // Clear any CB bindings for completed CBs
726         //   TODO : Is there a better place to do this?
727
728         assert(pInfo->object != VK_NULL_HANDLE);
729         // clear_cmd_buf_and_mem_references removes elements from
730         // pInfo->command_buffer_bindings -- this copy not needed in c++14,
731         // and probably not needed in practice in c++11
732         auto bindings = pInfo->command_buffer_bindings;
733         for (auto cb : bindings) {
734             if (!dev_data->globalInFlightCmdBuffers.count(cb)) {
735                 clear_cmd_buf_and_mem_references(dev_data, cb);
736             }
737         }
738         // Now check for any remaining references to this mem obj and remove bindings
739         if (pInfo->command_buffer_bindings.size() || pInfo->obj_bindings.size()) {
740             skip_call |= ReportMemReferencesAndCleanUp(dev_data, pInfo);
741         }
742         // Delete mem obj info
743         dev_data->memObjMap.erase(dev_data->memObjMap.find(mem));
744     } else if (VK_NULL_HANDLE != mem) {
745         // The request is to free an invalid, non-zero handle
746         skip_call = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
747                             VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
748                             reinterpret_cast<uint64_t &>(mem), __LINE__,
749                             MEMTRACK_INVALID_MEM_OBJ,
750                             "MEM", "Request to delete memory object 0x%"
751                             PRIxLEAST64 " not present in memory Object Map",
752                             reinterpret_cast<uint64_t &>(mem));
753     }
754     return skip_call;
755 }
756
757 // Remove object binding performs 3 tasks:
758 // 1. Remove ObjectInfo from MemObjInfo list container of obj bindings & free it
759 // 2. Clear mem binding for image/buffer by setting its handle to 0
760 // TODO : This only applied to Buffer, Image, and Swapchain objects now, how should it be updated/customized?
761 static bool clear_object_binding(layer_data *dev_data, uint64_t handle, VkDebugReportObjectTypeEXT type) {
762     // TODO : Need to customize images/buffers/swapchains to track mem binding and clear it here appropriately
763     bool skip_call = false;
764     VkDeviceMemory *pMemBinding = GetObjectMemBinding(dev_data, handle, type);
765     if (pMemBinding) {
766         DEVICE_MEM_INFO *pMemObjInfo = getMemObjInfo(dev_data, *pMemBinding);
767         // TODO : Make sure this is a reasonable way to reset mem binding
768         *pMemBinding = VK_NULL_HANDLE;
769         if (pMemObjInfo) {
770             // This obj is bound to a memory object. Remove the reference to this object in that memory object's list,
771             // and set the objects memory binding pointer to NULL.
772             if (!pMemObjInfo->obj_bindings.erase({handle, type})) {
773                 skip_call |=
774                     log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, type, handle, __LINE__, MEMTRACK_INVALID_OBJECT,
775                             "MEM", "While trying to clear mem binding for %s obj 0x%" PRIxLEAST64
776                                    ", unable to find that object referenced by mem obj 0x%" PRIxLEAST64,
777                             object_type_to_string(type), handle, (uint64_t)pMemObjInfo->mem);
778             }
779         }
780     }
781     return skip_call;
782 }
783
784 // For given mem object, verify that it is not null or UNBOUND, if it is, report error. Return skip value.
785 bool VerifyBoundMemoryIsValid(const layer_data *dev_data, VkDeviceMemory mem, uint64_t handle, const char *api_name,
786                               const char *type_name) {
787     bool result = false;
788     if (VK_NULL_HANDLE == mem) {
789         result = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, handle,
790                          __LINE__, MEMTRACK_OBJECT_NOT_BOUND, "MEM",
791                          "%s: Vk%s object 0x%" PRIxLEAST64 " used with no memory bound. Memory should be bound by calling "
792                          "vkBind%sMemory().",
793                          api_name, type_name, handle, type_name);
794     } else if (MEMORY_UNBOUND == mem) {
795         result = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, handle,
796                          __LINE__, MEMTRACK_OBJECT_NOT_BOUND, "MEM",
797                          "%s: Vk%s object 0x%" PRIxLEAST64 " used with no memory bound and previously bound memory was freed. "
798                          "Memory must not be freed prior to this operation.",
799                          api_name, type_name, handle);
800     }
801     return result;
802 }
803
804 // Check to see if memory was ever bound to this image
805 bool ValidateMemoryIsBoundToImage(const layer_data *dev_data, const IMAGE_NODE *image_node, const char *api_name) {
806     bool result = false;
807     if (0 == (static_cast<uint32_t>(image_node->createInfo.flags) & VK_IMAGE_CREATE_SPARSE_BINDING_BIT)) {
808         result = VerifyBoundMemoryIsValid(dev_data, image_node->mem, reinterpret_cast<const uint64_t &>(image_node->image),
809                                           api_name, "Image");
810     }
811     return result;
812 }
813
814 // Check to see if memory was bound to this buffer
815 bool ValidateMemoryIsBoundToBuffer(const layer_data *dev_data, const BUFFER_NODE *buffer_node, const char *api_name) {
816     bool result = false;
817     if (0 == (static_cast<uint32_t>(buffer_node->createInfo.flags) & VK_BUFFER_CREATE_SPARSE_BINDING_BIT)) {
818         result = VerifyBoundMemoryIsValid(dev_data, buffer_node->mem, reinterpret_cast<const uint64_t &>(buffer_node->buffer),
819                                           api_name, "Buffer");
820     }
821     return result;
822 }
823
824 // For NULL mem case, output warning
825 // Make sure given object is in global object map
826 //  IF a previous binding existed, output validation error
827 //  Otherwise, add reference from objectInfo to memoryInfo
828 //  Add reference off of objInfo
829 static bool SetMemBinding(layer_data *dev_data, VkDeviceMemory mem, uint64_t handle, VkDebugReportObjectTypeEXT type,
830                           const char *apiName) {
831     bool skip_call = false;
832     // Handle NULL case separately, just clear previous binding & decrement reference
833     if (mem == VK_NULL_HANDLE) {
834         skip_call = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, type, handle, __LINE__, MEMTRACK_INVALID_MEM_OBJ,
835                             "MEM", "In %s, attempting to Bind Obj(0x%" PRIxLEAST64 ") to NULL", apiName, handle);
836     } else {
837         bool sparse = false;
838         VkDeviceMemory *mem_binding = GetObjectMemBinding(dev_data, handle, type, &sparse);
839         assert(mem_binding);
840         DEVICE_MEM_INFO *mem_info = getMemObjInfo(dev_data, mem);
841         if (mem_info) {
842             DEVICE_MEM_INFO *prev_binding = getMemObjInfo(dev_data, *mem_binding);
843             if (prev_binding) {
844                 skip_call |=
845                     log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
846                             reinterpret_cast<uint64_t &>(mem), __LINE__, MEMTRACK_REBIND_OBJECT, "MEM",
847                             "In %s, attempting to bind memory (0x%" PRIxLEAST64 ") to object (0x%" PRIxLEAST64
848                             ") which has already been bound to mem object 0x%" PRIxLEAST64,
849                             apiName, reinterpret_cast<uint64_t &>(mem), handle, reinterpret_cast<uint64_t &>(prev_binding->mem));
850             } else if ((*mem_binding == MEMORY_UNBOUND) && (!sparse)) {
851                 skip_call |=
852                     log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
853                             reinterpret_cast<uint64_t &>(mem), __LINE__, MEMTRACK_REBIND_OBJECT, "MEM",
854                             "In %s, attempting to bind memory (0x%" PRIxLEAST64 ") to object (0x%" PRIxLEAST64
855                             ") which was previous bound to memory that has since been freed. Memory bindings are immutable in "
856                             "Vulkan so this attempt to bind to new memory is not allowed.",
857                             apiName, reinterpret_cast<uint64_t &>(mem), handle);
858             } else {
859                 mem_info->obj_bindings.insert({handle, type});
860                 // For image objects, make sure default memory state is correctly set
861                 // TODO : What's the best/correct way to handle this?
862                 if (VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT == type) {
863                     auto const image_node = getImageNode(dev_data, VkImage(handle));
864                     if (image_node) {
865                         VkImageCreateInfo ici = image_node->createInfo;
866                         if (ici.usage & (VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT)) {
867                             // TODO::  More memory state transition stuff.
868                         }
869                     }
870                 }
871                 *mem_binding = mem;
872             }
873         }
874     }
875     return skip_call;
876 }
877
878 // For NULL mem case, clear any previous binding Else...
879 // Make sure given object is in its object map
880 //  IF a previous binding existed, update binding
881 //  Add reference from objectInfo to memoryInfo
882 //  Add reference off of object's binding info
883 // Return VK_TRUE if addition is successful, VK_FALSE otherwise
884 static bool set_sparse_mem_binding(layer_data *dev_data, VkDeviceMemory mem, uint64_t handle,
885                                        VkDebugReportObjectTypeEXT type, const char *apiName) {
886     bool skip_call = VK_FALSE;
887     // Handle NULL case separately, just clear previous binding & decrement reference
888     if (mem == VK_NULL_HANDLE) {
889         skip_call = clear_object_binding(dev_data, handle, type);
890     } else {
891         VkDeviceMemory *pMemBinding = GetObjectMemBinding(dev_data, handle, type);
892         assert(pMemBinding);
893         DEVICE_MEM_INFO *pInfo = getMemObjInfo(dev_data, mem);
894         if (pInfo) {
895             pInfo->obj_bindings.insert({handle, type});
896             // Need to set mem binding for this object
897             *pMemBinding = mem;
898         }
899     }
900     return skip_call;
901 }
902
903 // For handle of given object type, return memory binding
904 static bool get_mem_for_type(layer_data *dev_data, uint64_t handle, VkDebugReportObjectTypeEXT type, VkDeviceMemory *mem) {
905     bool skip_call = false;
906     *mem = VK_NULL_HANDLE;
907     switch (type) {
908     case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT:
909         *mem = getImageNode(dev_data, VkImage(handle))->mem;
910         break;
911     case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT:
912         *mem = getBufferNode(dev_data, VkBuffer(handle))->mem;
913         break;
914     default:
915         assert(0);
916     }
917     if (!*mem) {
918         skip_call = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, type, handle, __LINE__, MEMTRACK_INVALID_OBJECT,
919                             "MEM", "Trying to get mem binding for %s object 0x%" PRIxLEAST64
920                                    " but binding is NULL. Has memory been bound to this object?",
921                             object_type_to_string(type), handle);
922     }
923     return skip_call;
924 }
925
926 // Print details of MemObjInfo list
927 static void print_mem_list(layer_data *dev_data) {
928     // Early out if info is not requested
929     if (!(dev_data->report_data->active_flags & VK_DEBUG_REPORT_INFORMATION_BIT_EXT)) {
930         return;
931     }
932
933     // Just printing each msg individually for now, may want to package these into single large print
934     log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, __LINE__,
935             MEMTRACK_NONE, "MEM", "Details of Memory Object list (of size " PRINTF_SIZE_T_SPECIFIER " elements)",
936             dev_data->memObjMap.size());
937     log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, __LINE__,
938             MEMTRACK_NONE, "MEM", "=============================");
939
940     if (dev_data->memObjMap.size() <= 0)
941         return;
942
943     for (auto ii = dev_data->memObjMap.begin(); ii != dev_data->memObjMap.end(); ++ii) {
944         auto mem_info = (*ii).second.get();
945
946         log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
947                 __LINE__, MEMTRACK_NONE, "MEM", "    ===MemObjInfo at 0x%p===", (void *)mem_info);
948         log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
949                 __LINE__, MEMTRACK_NONE, "MEM", "    Mem object: 0x%" PRIxLEAST64, (uint64_t)(mem_info->mem));
950         log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
951                 __LINE__, MEMTRACK_NONE, "MEM", "    Ref Count: " PRINTF_SIZE_T_SPECIFIER,
952                 mem_info->command_buffer_bindings.size() + mem_info->obj_bindings.size());
953         if (0 != mem_info->alloc_info.allocationSize) {
954             string pAllocInfoMsg = vk_print_vkmemoryallocateinfo(&mem_info->alloc_info, "MEM(INFO):         ");
955             log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
956                     __LINE__, MEMTRACK_NONE, "MEM", "    Mem Alloc info:\n%s", pAllocInfoMsg.c_str());
957         } else {
958             log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
959                     __LINE__, MEMTRACK_NONE, "MEM", "    Mem Alloc info is NULL (alloc done by vkCreateSwapchainKHR())");
960         }
961
962         log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
963                 __LINE__, MEMTRACK_NONE, "MEM", "    VK OBJECT Binding list of size " PRINTF_SIZE_T_SPECIFIER " elements:",
964                 mem_info->obj_bindings.size());
965         if (mem_info->obj_bindings.size() > 0) {
966             for (auto obj : mem_info->obj_bindings) {
967                 log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
968                         0, __LINE__, MEMTRACK_NONE, "MEM", "       VK OBJECT 0x%" PRIx64, obj.handle);
969             }
970         }
971
972         log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
973                 __LINE__, MEMTRACK_NONE, "MEM",
974                 "    VK Command Buffer (CB) binding list of size " PRINTF_SIZE_T_SPECIFIER " elements",
975                 mem_info->command_buffer_bindings.size());
976         if (mem_info->command_buffer_bindings.size() > 0) {
977             for (auto cb : mem_info->command_buffer_bindings) {
978                 log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
979                         0, __LINE__, MEMTRACK_NONE, "MEM", "      VK CB 0x%p", cb);
980             }
981         }
982     }
983 }
984
985 static void printCBList(layer_data *my_data) {
986     GLOBAL_CB_NODE *pCBInfo = NULL;
987
988     // Early out if info is not requested
989     if (!(my_data->report_data->active_flags & VK_DEBUG_REPORT_INFORMATION_BIT_EXT)) {
990         return;
991     }
992
993     log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, __LINE__,
994             MEMTRACK_NONE, "MEM", "Details of CB list (of size " PRINTF_SIZE_T_SPECIFIER " elements)",
995             my_data->commandBufferMap.size());
996     log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, __LINE__,
997             MEMTRACK_NONE, "MEM", "==================");
998
999     if (my_data->commandBufferMap.size() <= 0)
1000         return;
1001
1002     for (auto &cb_node : my_data->commandBufferMap) {
1003         pCBInfo = cb_node.second;
1004
1005         log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
1006                 __LINE__, MEMTRACK_NONE, "MEM", "    CB Info (0x%p) has CB 0x%p", (void *)pCBInfo, (void *)pCBInfo->commandBuffer);
1007
1008         if (pCBInfo->memObjs.size() <= 0)
1009             continue;
1010         for (auto obj : pCBInfo->memObjs) {
1011             log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
1012                     __LINE__, MEMTRACK_NONE, "MEM", "      Mem obj 0x%" PRIx64, (uint64_t)obj);
1013         }
1014     }
1015 }
1016
1017 // Return a string representation of CMD_TYPE enum
1018 static string cmdTypeToString(CMD_TYPE cmd) {
1019     switch (cmd) {
1020     case CMD_BINDPIPELINE:
1021         return "CMD_BINDPIPELINE";
1022     case CMD_BINDPIPELINEDELTA:
1023         return "CMD_BINDPIPELINEDELTA";
1024     case CMD_SETVIEWPORTSTATE:
1025         return "CMD_SETVIEWPORTSTATE";
1026     case CMD_SETLINEWIDTHSTATE:
1027         return "CMD_SETLINEWIDTHSTATE";
1028     case CMD_SETDEPTHBIASSTATE:
1029         return "CMD_SETDEPTHBIASSTATE";
1030     case CMD_SETBLENDSTATE:
1031         return "CMD_SETBLENDSTATE";
1032     case CMD_SETDEPTHBOUNDSSTATE:
1033         return "CMD_SETDEPTHBOUNDSSTATE";
1034     case CMD_SETSTENCILREADMASKSTATE:
1035         return "CMD_SETSTENCILREADMASKSTATE";
1036     case CMD_SETSTENCILWRITEMASKSTATE:
1037         return "CMD_SETSTENCILWRITEMASKSTATE";
1038     case CMD_SETSTENCILREFERENCESTATE:
1039         return "CMD_SETSTENCILREFERENCESTATE";
1040     case CMD_BINDDESCRIPTORSETS:
1041         return "CMD_BINDDESCRIPTORSETS";
1042     case CMD_BINDINDEXBUFFER:
1043         return "CMD_BINDINDEXBUFFER";
1044     case CMD_BINDVERTEXBUFFER:
1045         return "CMD_BINDVERTEXBUFFER";
1046     case CMD_DRAW:
1047         return "CMD_DRAW";
1048     case CMD_DRAWINDEXED:
1049         return "CMD_DRAWINDEXED";
1050     case CMD_DRAWINDIRECT:
1051         return "CMD_DRAWINDIRECT";
1052     case CMD_DRAWINDEXEDINDIRECT:
1053         return "CMD_DRAWINDEXEDINDIRECT";
1054     case CMD_DISPATCH:
1055         return "CMD_DISPATCH";
1056     case CMD_DISPATCHINDIRECT:
1057         return "CMD_DISPATCHINDIRECT";
1058     case CMD_COPYBUFFER:
1059         return "CMD_COPYBUFFER";
1060     case CMD_COPYIMAGE:
1061         return "CMD_COPYIMAGE";
1062     case CMD_BLITIMAGE:
1063         return "CMD_BLITIMAGE";
1064     case CMD_COPYBUFFERTOIMAGE:
1065         return "CMD_COPYBUFFERTOIMAGE";
1066     case CMD_COPYIMAGETOBUFFER:
1067         return "CMD_COPYIMAGETOBUFFER";
1068     case CMD_CLONEIMAGEDATA:
1069         return "CMD_CLONEIMAGEDATA";
1070     case CMD_UPDATEBUFFER:
1071         return "CMD_UPDATEBUFFER";
1072     case CMD_FILLBUFFER:
1073         return "CMD_FILLBUFFER";
1074     case CMD_CLEARCOLORIMAGE:
1075         return "CMD_CLEARCOLORIMAGE";
1076     case CMD_CLEARATTACHMENTS:
1077         return "CMD_CLEARCOLORATTACHMENT";
1078     case CMD_CLEARDEPTHSTENCILIMAGE:
1079         return "CMD_CLEARDEPTHSTENCILIMAGE";
1080     case CMD_RESOLVEIMAGE:
1081         return "CMD_RESOLVEIMAGE";
1082     case CMD_SETEVENT:
1083         return "CMD_SETEVENT";
1084     case CMD_RESETEVENT:
1085         return "CMD_RESETEVENT";
1086     case CMD_WAITEVENTS:
1087         return "CMD_WAITEVENTS";
1088     case CMD_PIPELINEBARRIER:
1089         return "CMD_PIPELINEBARRIER";
1090     case CMD_BEGINQUERY:
1091         return "CMD_BEGINQUERY";
1092     case CMD_ENDQUERY:
1093         return "CMD_ENDQUERY";
1094     case CMD_RESETQUERYPOOL:
1095         return "CMD_RESETQUERYPOOL";
1096     case CMD_COPYQUERYPOOLRESULTS:
1097         return "CMD_COPYQUERYPOOLRESULTS";
1098     case CMD_WRITETIMESTAMP:
1099         return "CMD_WRITETIMESTAMP";
1100     case CMD_INITATOMICCOUNTERS:
1101         return "CMD_INITATOMICCOUNTERS";
1102     case CMD_LOADATOMICCOUNTERS:
1103         return "CMD_LOADATOMICCOUNTERS";
1104     case CMD_SAVEATOMICCOUNTERS:
1105         return "CMD_SAVEATOMICCOUNTERS";
1106     case CMD_BEGINRENDERPASS:
1107         return "CMD_BEGINRENDERPASS";
1108     case CMD_ENDRENDERPASS:
1109         return "CMD_ENDRENDERPASS";
1110     default:
1111         return "UNKNOWN";
1112     }
1113 }
1114
1115 // SPIRV utility functions
1116 static void build_def_index(shader_module *module) {
1117     for (auto insn : *module) {
1118         switch (insn.opcode()) {
1119         /* Types */
1120         case spv::OpTypeVoid:
1121         case spv::OpTypeBool:
1122         case spv::OpTypeInt:
1123         case spv::OpTypeFloat:
1124         case spv::OpTypeVector:
1125         case spv::OpTypeMatrix:
1126         case spv::OpTypeImage:
1127         case spv::OpTypeSampler:
1128         case spv::OpTypeSampledImage:
1129         case spv::OpTypeArray:
1130         case spv::OpTypeRuntimeArray:
1131         case spv::OpTypeStruct:
1132         case spv::OpTypeOpaque:
1133         case spv::OpTypePointer:
1134         case spv::OpTypeFunction:
1135         case spv::OpTypeEvent:
1136         case spv::OpTypeDeviceEvent:
1137         case spv::OpTypeReserveId:
1138         case spv::OpTypeQueue:
1139         case spv::OpTypePipe:
1140             module->def_index[insn.word(1)] = insn.offset();
1141             break;
1142
1143         /* Fixed constants */
1144         case spv::OpConstantTrue:
1145         case spv::OpConstantFalse:
1146         case spv::OpConstant:
1147         case spv::OpConstantComposite:
1148         case spv::OpConstantSampler:
1149         case spv::OpConstantNull:
1150             module->def_index[insn.word(2)] = insn.offset();
1151             break;
1152
1153         /* Specialization constants */
1154         case spv::OpSpecConstantTrue:
1155         case spv::OpSpecConstantFalse:
1156         case spv::OpSpecConstant:
1157         case spv::OpSpecConstantComposite:
1158         case spv::OpSpecConstantOp:
1159             module->def_index[insn.word(2)] = insn.offset();
1160             break;
1161
1162         /* Variables */
1163         case spv::OpVariable:
1164             module->def_index[insn.word(2)] = insn.offset();
1165             break;
1166
1167         /* Functions */
1168         case spv::OpFunction:
1169             module->def_index[insn.word(2)] = insn.offset();
1170             break;
1171
1172         default:
1173             /* We don't care about any other defs for now. */
1174             break;
1175         }
1176     }
1177 }
1178
1179 static spirv_inst_iter find_entrypoint(shader_module *src, char const *name, VkShaderStageFlagBits stageBits) {
1180     for (auto insn : *src) {
1181         if (insn.opcode() == spv::OpEntryPoint) {
1182             auto entrypointName = (char const *)&insn.word(3);
1183             auto entrypointStageBits = 1u << insn.word(1);
1184
1185             if (!strcmp(entrypointName, name) && (entrypointStageBits & stageBits)) {
1186                 return insn;
1187             }
1188         }
1189     }
1190
1191     return src->end();
1192 }
1193
1194 static char const *storage_class_name(unsigned sc) {
1195     switch (sc) {
1196     case spv::StorageClassInput:
1197         return "input";
1198     case spv::StorageClassOutput:
1199         return "output";
1200     case spv::StorageClassUniformConstant:
1201         return "const uniform";
1202     case spv::StorageClassUniform:
1203         return "uniform";
1204     case spv::StorageClassWorkgroup:
1205         return "workgroup local";
1206     case spv::StorageClassCrossWorkgroup:
1207         return "workgroup global";
1208     case spv::StorageClassPrivate:
1209         return "private global";
1210     case spv::StorageClassFunction:
1211         return "function";
1212     case spv::StorageClassGeneric:
1213         return "generic";
1214     case spv::StorageClassAtomicCounter:
1215         return "atomic counter";
1216     case spv::StorageClassImage:
1217         return "image";
1218     case spv::StorageClassPushConstant:
1219         return "push constant";
1220     default:
1221         return "unknown";
1222     }
1223 }
1224
1225 /* get the value of an integral constant */
1226 unsigned get_constant_value(shader_module const *src, unsigned id) {
1227     auto value = src->get_def(id);
1228     assert(value != src->end());
1229
1230     if (value.opcode() != spv::OpConstant) {
1231         /* TODO: Either ensure that the specialization transform is already performed on a module we're
1232             considering here, OR -- specialize on the fly now.
1233             */
1234         return 1;
1235     }
1236
1237     return value.word(3);
1238 }
1239
1240
1241 static void describe_type_inner(std::ostringstream &ss, shader_module const *src, unsigned type) {
1242     auto insn = src->get_def(type);
1243     assert(insn != src->end());
1244
1245     switch (insn.opcode()) {
1246     case spv::OpTypeBool:
1247         ss << "bool";
1248         break;
1249     case spv::OpTypeInt:
1250         ss << (insn.word(3) ? 's' : 'u') << "int" << insn.word(2);
1251         break;
1252     case spv::OpTypeFloat:
1253         ss << "float" << insn.word(2);
1254         break;
1255     case spv::OpTypeVector:
1256         ss << "vec" << insn.word(3) << " of ";
1257         describe_type_inner(ss, src, insn.word(2));
1258         break;
1259     case spv::OpTypeMatrix:
1260         ss << "mat" << insn.word(3) << " of ";
1261         describe_type_inner(ss, src, insn.word(2));
1262         break;
1263     case spv::OpTypeArray:
1264         ss << "arr[" << get_constant_value(src, insn.word(3)) << "] of ";
1265         describe_type_inner(ss, src, insn.word(2));
1266         break;
1267     case spv::OpTypePointer:
1268         ss << "ptr to " << storage_class_name(insn.word(2)) << " ";
1269         describe_type_inner(ss, src, insn.word(3));
1270         break;
1271     case spv::OpTypeStruct: {
1272         ss << "struct of (";
1273         for (unsigned i = 2; i < insn.len(); i++) {
1274             describe_type_inner(ss, src, insn.word(i));
1275             if (i == insn.len() - 1) {
1276                 ss << ")";
1277             } else {
1278                 ss << ", ";
1279             }
1280         }
1281         break;
1282     }
1283     case spv::OpTypeSampler:
1284         ss << "sampler";
1285         break;
1286     case spv::OpTypeSampledImage:
1287         ss << "sampler+";
1288         describe_type_inner(ss, src, insn.word(2));
1289         break;
1290     case spv::OpTypeImage:
1291         ss << "image(dim=" << insn.word(3) << ", sampled=" << insn.word(7) << ")";
1292         break;
1293     default:
1294         ss << "oddtype";
1295         break;
1296     }
1297 }
1298
1299
1300 static std::string describe_type(shader_module const *src, unsigned type) {
1301     std::ostringstream ss;
1302     describe_type_inner(ss, src, type);
1303     return ss.str();
1304 }
1305
1306
1307 static bool is_narrow_numeric_type(spirv_inst_iter type)
1308 {
1309     if (type.opcode() != spv::OpTypeInt && type.opcode() != spv::OpTypeFloat)
1310         return false;
1311     return type.word(2) < 64;
1312 }
1313
1314
1315 static bool types_match(shader_module const *a, shader_module const *b, unsigned a_type, unsigned b_type, bool a_arrayed, bool b_arrayed, bool relaxed) {
1316     /* walk two type trees together, and complain about differences */
1317     auto a_insn = a->get_def(a_type);
1318     auto b_insn = b->get_def(b_type);
1319     assert(a_insn != a->end());
1320     assert(b_insn != b->end());
1321
1322     if (a_arrayed && a_insn.opcode() == spv::OpTypeArray) {
1323         return types_match(a, b, a_insn.word(2), b_type, false, b_arrayed, relaxed);
1324     }
1325
1326     if (b_arrayed && b_insn.opcode() == spv::OpTypeArray) {
1327         /* we probably just found the extra level of arrayness in b_type: compare the type inside it to a_type */
1328         return types_match(a, b, a_type, b_insn.word(2), a_arrayed, false, relaxed);
1329     }
1330
1331     if (a_insn.opcode() == spv::OpTypeVector && relaxed && is_narrow_numeric_type(b_insn)) {
1332         return types_match(a, b, a_insn.word(2), b_type, a_arrayed, b_arrayed, false);
1333     }
1334
1335     if (a_insn.opcode() != b_insn.opcode()) {
1336         return false;
1337     }
1338
1339     if (a_insn.opcode() == spv::OpTypePointer) {
1340         /* match on pointee type. storage class is expected to differ */
1341         return types_match(a, b, a_insn.word(3), b_insn.word(3), a_arrayed, b_arrayed, relaxed);
1342     }
1343
1344     if (a_arrayed || b_arrayed) {
1345         /* if we havent resolved array-of-verts by here, we're not going to. */
1346         return false;
1347     }
1348
1349     switch (a_insn.opcode()) {
1350     case spv::OpTypeBool:
1351         return true;
1352     case spv::OpTypeInt:
1353         /* match on width, signedness */
1354         return a_insn.word(2) == b_insn.word(2) && a_insn.word(3) == b_insn.word(3);
1355     case spv::OpTypeFloat:
1356         /* match on width */
1357         return a_insn.word(2) == b_insn.word(2);
1358     case spv::OpTypeVector:
1359         /* match on element type, count. */
1360         if (!types_match(a, b, a_insn.word(2), b_insn.word(2), a_arrayed, b_arrayed, false))
1361             return false;
1362         if (relaxed && is_narrow_numeric_type(a->get_def(a_insn.word(2)))) {
1363             return a_insn.word(3) >= b_insn.word(3);
1364         }
1365         else {
1366             return a_insn.word(3) == b_insn.word(3);
1367         }
1368     case spv::OpTypeMatrix:
1369         /* match on element type, count. */
1370         return types_match(a, b, a_insn.word(2), b_insn.word(2), a_arrayed, b_arrayed, false) && a_insn.word(3) == b_insn.word(3);
1371     case spv::OpTypeArray:
1372         /* match on element type, count. these all have the same layout. we don't get here if
1373          * b_arrayed. This differs from vector & matrix types in that the array size is the id of a constant instruction,
1374          * not a literal within OpTypeArray */
1375         return types_match(a, b, a_insn.word(2), b_insn.word(2), a_arrayed, b_arrayed, false) &&
1376                get_constant_value(a, a_insn.word(3)) == get_constant_value(b, b_insn.word(3));
1377     case spv::OpTypeStruct:
1378         /* match on all element types */
1379         {
1380             if (a_insn.len() != b_insn.len()) {
1381                 return false; /* structs cannot match if member counts differ */
1382             }
1383
1384             for (unsigned i = 2; i < a_insn.len(); i++) {
1385                 if (!types_match(a, b, a_insn.word(i), b_insn.word(i), a_arrayed, b_arrayed, false)) {
1386                     return false;
1387                 }
1388             }
1389
1390             return true;
1391         }
1392     default:
1393         /* remaining types are CLisms, or may not appear in the interfaces we
1394          * are interested in. Just claim no match.
1395          */
1396         return false;
1397     }
1398 }
1399
1400 static int value_or_default(std::unordered_map<unsigned, unsigned> const &map, unsigned id, int def) {
1401     auto it = map.find(id);
1402     if (it == map.end())
1403         return def;
1404     else
1405         return it->second;
1406 }
1407
1408 static unsigned get_locations_consumed_by_type(shader_module const *src, unsigned type, bool strip_array_level) {
1409     auto insn = src->get_def(type);
1410     assert(insn != src->end());
1411
1412     switch (insn.opcode()) {
1413     case spv::OpTypePointer:
1414         /* see through the ptr -- this is only ever at the toplevel for graphics shaders;
1415          * we're never actually passing pointers around. */
1416         return get_locations_consumed_by_type(src, insn.word(3), strip_array_level);
1417     case spv::OpTypeArray:
1418         if (strip_array_level) {
1419             return get_locations_consumed_by_type(src, insn.word(2), false);
1420         } else {
1421             return get_constant_value(src, insn.word(3)) * get_locations_consumed_by_type(src, insn.word(2), false);
1422         }
1423     case spv::OpTypeMatrix:
1424         /* num locations is the dimension * element size */
1425         return insn.word(3) * get_locations_consumed_by_type(src, insn.word(2), false);
1426     case spv::OpTypeVector: {
1427         auto scalar_type = src->get_def(insn.word(2));
1428         auto bit_width = (scalar_type.opcode() == spv::OpTypeInt || scalar_type.opcode() == spv::OpTypeFloat) ?
1429             scalar_type.word(2) : 32;
1430
1431         /* locations are 128-bit wide; 3- and 4-component vectors of 64 bit
1432          * types require two. */
1433         return (bit_width * insn.word(3) + 127) / 128;
1434     }
1435     default:
1436         /* everything else is just 1. */
1437         return 1;
1438
1439         /* TODO: extend to handle 64bit scalar types, whose vectors may need
1440          * multiple locations. */
1441     }
1442 }
1443
1444 static unsigned get_locations_consumed_by_format(VkFormat format) {
1445     switch (format) {
1446     case VK_FORMAT_R64G64B64A64_SFLOAT:
1447     case VK_FORMAT_R64G64B64A64_SINT:
1448     case VK_FORMAT_R64G64B64A64_UINT:
1449     case VK_FORMAT_R64G64B64_SFLOAT:
1450     case VK_FORMAT_R64G64B64_SINT:
1451     case VK_FORMAT_R64G64B64_UINT:
1452         return 2;
1453     default:
1454         return 1;
1455     }
1456 }
1457
1458 typedef std::pair<unsigned, unsigned> location_t;
1459 typedef std::pair<unsigned, unsigned> descriptor_slot_t;
1460
1461 struct interface_var {
1462     uint32_t id;
1463     uint32_t type_id;
1464     uint32_t offset;
1465     bool is_patch;
1466     bool is_block_member;
1467     /* TODO: collect the name, too? Isn't required to be present. */
1468 };
1469
1470 struct shader_stage_attributes {
1471     char const *const name;
1472     bool arrayed_input;
1473     bool arrayed_output;
1474 };
1475
1476 static shader_stage_attributes shader_stage_attribs[] = {
1477     {"vertex shader", false, false},
1478     {"tessellation control shader", true, true},
1479     {"tessellation evaluation shader", true, false},
1480     {"geometry shader", true, false},
1481     {"fragment shader", false, false},
1482 };
1483
1484 static spirv_inst_iter get_struct_type(shader_module const *src, spirv_inst_iter def, bool is_array_of_verts) {
1485     while (true) {
1486
1487         if (def.opcode() == spv::OpTypePointer) {
1488             def = src->get_def(def.word(3));
1489         } else if (def.opcode() == spv::OpTypeArray && is_array_of_verts) {
1490             def = src->get_def(def.word(2));
1491             is_array_of_verts = false;
1492         } else if (def.opcode() == spv::OpTypeStruct) {
1493             return def;
1494         } else {
1495             return src->end();
1496         }
1497     }
1498 }
1499
1500 static void collect_interface_block_members(shader_module const *src,
1501                                             std::map<location_t, interface_var> *out,
1502                                             std::unordered_map<unsigned, unsigned> const &blocks, bool is_array_of_verts,
1503                                             uint32_t id, uint32_t type_id, bool is_patch) {
1504     /* Walk down the type_id presented, trying to determine whether it's actually an interface block. */
1505     auto type = get_struct_type(src, src->get_def(type_id), is_array_of_verts && !is_patch);
1506     if (type == src->end() || blocks.find(type.word(1)) == blocks.end()) {
1507         /* this isn't an interface block. */
1508         return;
1509     }
1510
1511     std::unordered_map<unsigned, unsigned> member_components;
1512
1513     /* Walk all the OpMemberDecorate for type's result id -- first pass, collect components. */
1514     for (auto insn : *src) {
1515         if (insn.opcode() == spv::OpMemberDecorate && insn.word(1) == type.word(1)) {
1516             unsigned member_index = insn.word(2);
1517
1518             if (insn.word(3) == spv::DecorationComponent) {
1519                 unsigned component = insn.word(4);
1520                 member_components[member_index] = component;
1521             }
1522         }
1523     }
1524
1525     /* Second pass -- produce the output, from Location decorations */
1526     for (auto insn : *src) {
1527         if (insn.opcode() == spv::OpMemberDecorate && insn.word(1) == type.word(1)) {
1528             unsigned member_index = insn.word(2);
1529             unsigned member_type_id = type.word(2 + member_index);
1530
1531             if (insn.word(3) == spv::DecorationLocation) {
1532                 unsigned location = insn.word(4);
1533                 unsigned num_locations = get_locations_consumed_by_type(src, member_type_id, false);
1534                 auto component_it = member_components.find(member_index);
1535                 unsigned component = component_it == member_components.end() ? 0 : component_it->second;
1536
1537                 for (unsigned int offset = 0; offset < num_locations; offset++) {
1538                     interface_var v;
1539                     v.id = id;
1540                     /* TODO: member index in interface_var too? */
1541                     v.type_id = member_type_id;
1542                     v.offset = offset;
1543                     v.is_patch = is_patch;
1544                     v.is_block_member = true;
1545                     (*out)[std::make_pair(location + offset, component)] = v;
1546                 }
1547             }
1548         }
1549     }
1550 }
1551
1552 static std::map<location_t, interface_var> collect_interface_by_location(
1553         shader_module const *src, spirv_inst_iter entrypoint,
1554         spv::StorageClass sinterface, bool is_array_of_verts) {
1555
1556     std::unordered_map<unsigned, unsigned> var_locations;
1557     std::unordered_map<unsigned, unsigned> var_builtins;
1558     std::unordered_map<unsigned, unsigned> var_components;
1559     std::unordered_map<unsigned, unsigned> blocks;
1560     std::unordered_map<unsigned, unsigned> var_patch;
1561
1562     for (auto insn : *src) {
1563
1564         /* We consider two interface models: SSO rendezvous-by-location, and
1565          * builtins. Complain about anything that fits neither model.
1566          */
1567         if (insn.opcode() == spv::OpDecorate) {
1568             if (insn.word(2) == spv::DecorationLocation) {
1569                 var_locations[insn.word(1)] = insn.word(3);
1570             }
1571
1572             if (insn.word(2) == spv::DecorationBuiltIn) {
1573                 var_builtins[insn.word(1)] = insn.word(3);
1574             }
1575
1576             if (insn.word(2) == spv::DecorationComponent) {
1577                 var_components[insn.word(1)] = insn.word(3);
1578             }
1579
1580             if (insn.word(2) == spv::DecorationBlock) {
1581                 blocks[insn.word(1)] = 1;
1582             }
1583
1584             if (insn.word(2) == spv::DecorationPatch) {
1585                 var_patch[insn.word(1)] = 1;
1586             }
1587         }
1588     }
1589
1590     /* TODO: handle grouped decorations */
1591     /* TODO: handle index=1 dual source outputs from FS -- two vars will
1592      * have the same location, and we DON'T want to clobber. */
1593
1594     /* find the end of the entrypoint's name string. additional zero bytes follow the actual null
1595        terminator, to fill out the rest of the word - so we only need to look at the last byte in
1596        the word to determine which word contains the terminator. */
1597     uint32_t word = 3;
1598     while (entrypoint.word(word) & 0xff000000u) {
1599         ++word;
1600     }
1601     ++word;
1602
1603     std::map<location_t, interface_var> out;
1604
1605     for (; word < entrypoint.len(); word++) {
1606         auto insn = src->get_def(entrypoint.word(word));
1607         assert(insn != src->end());
1608         assert(insn.opcode() == spv::OpVariable);
1609
1610         if (insn.word(3) == static_cast<uint32_t>(sinterface)) {
1611             unsigned id = insn.word(2);
1612             unsigned type = insn.word(1);
1613
1614             int location = value_or_default(var_locations, id, -1);
1615             int builtin = value_or_default(var_builtins, id, -1);
1616             unsigned component = value_or_default(var_components, id, 0); /* unspecified is OK, is 0 */
1617             bool is_patch = var_patch.find(id) != var_patch.end();
1618
1619             /* All variables and interface block members in the Input or Output storage classes
1620              * must be decorated with either a builtin or an explicit location.
1621              *
1622              * TODO: integrate the interface block support here. For now, don't complain --
1623              * a valid SPIRV module will only hit this path for the interface block case, as the
1624              * individual members of the type are decorated, rather than variable declarations.
1625              */
1626
1627             if (location != -1) {
1628                 /* A user-defined interface variable, with a location. Where a variable
1629                  * occupied multiple locations, emit one result for each. */
1630                 unsigned num_locations = get_locations_consumed_by_type(src, type, is_array_of_verts && !is_patch);
1631                 for (unsigned int offset = 0; offset < num_locations; offset++) {
1632                     interface_var v;
1633                     v.id = id;
1634                     v.type_id = type;
1635                     v.offset = offset;
1636                     v.is_patch = is_patch;
1637                     v.is_block_member = false;
1638                     out[std::make_pair(location + offset, component)] = v;
1639                 }
1640             } else if (builtin == -1) {
1641                 /* An interface block instance */
1642                 collect_interface_block_members(src, &out, blocks, is_array_of_verts, id, type, is_patch);
1643             }
1644         }
1645     }
1646
1647     return out;
1648 }
1649
1650 static std::vector<std::pair<uint32_t, interface_var>> collect_interface_by_input_attachment_index(
1651         debug_report_data *report_data, shader_module const *src,
1652         std::unordered_set<uint32_t> const &accessible_ids) {
1653
1654     std::vector<std::pair<uint32_t, interface_var>> out;
1655
1656     for (auto insn : *src) {
1657         if (insn.opcode() == spv::OpDecorate) {
1658             if (insn.word(2) == spv::DecorationInputAttachmentIndex) {
1659                 auto attachment_index = insn.word(3);
1660                 auto id = insn.word(1);
1661
1662                 if (accessible_ids.count(id)) {
1663                     auto def = src->get_def(id);
1664                     assert(def != src->end());
1665
1666                     if (def.opcode() == spv::OpVariable && insn.word(3) == spv::StorageClassUniformConstant) {
1667                         auto num_locations = get_locations_consumed_by_type(src, def.word(1), false);
1668                         for (unsigned int offset = 0; offset < num_locations; offset++) {
1669                             interface_var v;
1670                             v.id = id;
1671                             v.type_id = def.word(1);
1672                             v.offset = offset;
1673                             v.is_patch = false;
1674                             v.is_block_member = false;
1675                             out.emplace_back(attachment_index + offset, v);
1676                         }
1677                     }
1678                 }
1679             }
1680         }
1681     }
1682
1683     return out;
1684 }
1685
1686 static std::vector<std::pair<descriptor_slot_t, interface_var>> collect_interface_by_descriptor_slot(
1687         debug_report_data *report_data, shader_module const *src,
1688         std::unordered_set<uint32_t> const &accessible_ids) {
1689
1690     std::unordered_map<unsigned, unsigned> var_sets;
1691     std::unordered_map<unsigned, unsigned> var_bindings;
1692
1693     for (auto insn : *src) {
1694         /* All variables in the Uniform or UniformConstant storage classes are required to be decorated with both
1695          * DecorationDescriptorSet and DecorationBinding.
1696          */
1697         if (insn.opcode() == spv::OpDecorate) {
1698             if (insn.word(2) == spv::DecorationDescriptorSet) {
1699                 var_sets[insn.word(1)] = insn.word(3);
1700             }
1701
1702             if (insn.word(2) == spv::DecorationBinding) {
1703                 var_bindings[insn.word(1)] = insn.word(3);
1704             }
1705         }
1706     }
1707
1708     std::vector<std::pair<descriptor_slot_t, interface_var>> out;
1709
1710     for (auto id : accessible_ids) {
1711         auto insn = src->get_def(id);
1712         assert(insn != src->end());
1713
1714         if (insn.opcode() == spv::OpVariable &&
1715             (insn.word(3) == spv::StorageClassUniform || insn.word(3) == spv::StorageClassUniformConstant)) {
1716             unsigned set = value_or_default(var_sets, insn.word(2), 0);
1717             unsigned binding = value_or_default(var_bindings, insn.word(2), 0);
1718
1719             interface_var v;
1720             v.id = insn.word(2);
1721             v.type_id = insn.word(1);
1722             v.offset = 0;
1723             v.is_patch = false;
1724             v.is_block_member = false;
1725             out.emplace_back(std::make_pair(set, binding), v);
1726         }
1727     }
1728
1729     return out;
1730 }
1731
1732 static bool validate_interface_between_stages(debug_report_data *report_data, shader_module const *producer,
1733                                               spirv_inst_iter producer_entrypoint, shader_stage_attributes const *producer_stage,
1734                                               shader_module const *consumer, spirv_inst_iter consumer_entrypoint,
1735                                               shader_stage_attributes const *consumer_stage) {
1736     bool pass = true;
1737
1738     auto outputs = collect_interface_by_location(producer, producer_entrypoint, spv::StorageClassOutput, producer_stage->arrayed_output);
1739     auto inputs = collect_interface_by_location(consumer, consumer_entrypoint, spv::StorageClassInput, consumer_stage->arrayed_input);
1740
1741     auto a_it = outputs.begin();
1742     auto b_it = inputs.begin();
1743
1744     /* maps sorted by key (location); walk them together to find mismatches */
1745     while ((outputs.size() > 0 && a_it != outputs.end()) || (inputs.size() && b_it != inputs.end())) {
1746         bool a_at_end = outputs.size() == 0 || a_it == outputs.end();
1747         bool b_at_end = inputs.size() == 0 || b_it == inputs.end();
1748         auto a_first = a_at_end ? std::make_pair(0u, 0u) : a_it->first;
1749         auto b_first = b_at_end ? std::make_pair(0u, 0u) : b_it->first;
1750
1751         if (b_at_end || ((!a_at_end) && (a_first < b_first))) {
1752             if (log_msg(report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1753                         __LINE__, SHADER_CHECKER_OUTPUT_NOT_CONSUMED, "SC",
1754                         "%s writes to output location %u.%u which is not consumed by %s", producer_stage->name, a_first.first,
1755                         a_first.second, consumer_stage->name)) {
1756                 pass = false;
1757             }
1758             a_it++;
1759         } else if (a_at_end || a_first > b_first) {
1760             if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1761                         __LINE__, SHADER_CHECKER_INPUT_NOT_PRODUCED, "SC",
1762                         "%s consumes input location %u.%u which is not written by %s", consumer_stage->name, b_first.first, b_first.second,
1763                         producer_stage->name)) {
1764                 pass = false;
1765             }
1766             b_it++;
1767         } else {
1768             // subtleties of arrayed interfaces:
1769             // - if is_patch, then the member is not arrayed, even though the interface may be.
1770             // - if is_block_member, then the extra array level of an arrayed interface is not
1771             //   expressed in the member type -- it's expressed in the block type.
1772             if (!types_match(producer, consumer, a_it->second.type_id, b_it->second.type_id,
1773                              producer_stage->arrayed_output && !a_it->second.is_patch && !a_it->second.is_block_member,
1774                              consumer_stage->arrayed_input && !b_it->second.is_patch && !b_it->second.is_block_member,
1775                              true)) {
1776                 if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1777                             __LINE__, SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, "SC", "Type mismatch on location %u.%u: '%s' vs '%s'",
1778                             a_first.first, a_first.second,
1779                             describe_type(producer, a_it->second.type_id).c_str(),
1780                             describe_type(consumer, b_it->second.type_id).c_str())) {
1781                     pass = false;
1782                 }
1783             }
1784             if (a_it->second.is_patch != b_it->second.is_patch) {
1785                 if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, /*dev*/ 0,
1786                             __LINE__, SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, "SC",
1787                             "Decoration mismatch on location %u.%u: is per-%s in %s stage but "
1788                             "per-%s in %s stage", a_first.first, a_first.second,
1789                             a_it->second.is_patch ? "patch" : "vertex", producer_stage->name,
1790                             b_it->second.is_patch ? "patch" : "vertex", consumer_stage->name)) {
1791                     pass = false;
1792                 }
1793             }
1794             a_it++;
1795             b_it++;
1796         }
1797     }
1798
1799     return pass;
1800 }
1801
1802 enum FORMAT_TYPE {
1803     FORMAT_TYPE_UNDEFINED,
1804     FORMAT_TYPE_FLOAT, /* UNORM, SNORM, FLOAT, USCALED, SSCALED, SRGB -- anything we consider float in the shader */
1805     FORMAT_TYPE_SINT,
1806     FORMAT_TYPE_UINT,
1807 };
1808
1809 static unsigned get_format_type(VkFormat fmt) {
1810     switch (fmt) {
1811     case VK_FORMAT_UNDEFINED:
1812         return FORMAT_TYPE_UNDEFINED;
1813     case VK_FORMAT_R8_SINT:
1814     case VK_FORMAT_R8G8_SINT:
1815     case VK_FORMAT_R8G8B8_SINT:
1816     case VK_FORMAT_R8G8B8A8_SINT:
1817     case VK_FORMAT_R16_SINT:
1818     case VK_FORMAT_R16G16_SINT:
1819     case VK_FORMAT_R16G16B16_SINT:
1820     case VK_FORMAT_R16G16B16A16_SINT:
1821     case VK_FORMAT_R32_SINT:
1822     case VK_FORMAT_R32G32_SINT:
1823     case VK_FORMAT_R32G32B32_SINT:
1824     case VK_FORMAT_R32G32B32A32_SINT:
1825     case VK_FORMAT_R64_SINT:
1826     case VK_FORMAT_R64G64_SINT:
1827     case VK_FORMAT_R64G64B64_SINT:
1828     case VK_FORMAT_R64G64B64A64_SINT:
1829     case VK_FORMAT_B8G8R8_SINT:
1830     case VK_FORMAT_B8G8R8A8_SINT:
1831     case VK_FORMAT_A8B8G8R8_SINT_PACK32:
1832     case VK_FORMAT_A2B10G10R10_SINT_PACK32:
1833     case VK_FORMAT_A2R10G10B10_SINT_PACK32:
1834         return FORMAT_TYPE_SINT;
1835     case VK_FORMAT_R8_UINT:
1836     case VK_FORMAT_R8G8_UINT:
1837     case VK_FORMAT_R8G8B8_UINT:
1838     case VK_FORMAT_R8G8B8A8_UINT:
1839     case VK_FORMAT_R16_UINT:
1840     case VK_FORMAT_R16G16_UINT:
1841     case VK_FORMAT_R16G16B16_UINT:
1842     case VK_FORMAT_R16G16B16A16_UINT:
1843     case VK_FORMAT_R32_UINT:
1844     case VK_FORMAT_R32G32_UINT:
1845     case VK_FORMAT_R32G32B32_UINT:
1846     case VK_FORMAT_R32G32B32A32_UINT:
1847     case VK_FORMAT_R64_UINT:
1848     case VK_FORMAT_R64G64_UINT:
1849     case VK_FORMAT_R64G64B64_UINT:
1850     case VK_FORMAT_R64G64B64A64_UINT:
1851     case VK_FORMAT_B8G8R8_UINT:
1852     case VK_FORMAT_B8G8R8A8_UINT:
1853     case VK_FORMAT_A8B8G8R8_UINT_PACK32:
1854     case VK_FORMAT_A2B10G10R10_UINT_PACK32:
1855     case VK_FORMAT_A2R10G10B10_UINT_PACK32:
1856         return FORMAT_TYPE_UINT;
1857     default:
1858         return FORMAT_TYPE_FLOAT;
1859     }
1860 }
1861
1862 /* characterizes a SPIR-V type appearing in an interface to a FF stage,
1863  * for comparison to a VkFormat's characterization above. */
1864 static unsigned get_fundamental_type(shader_module const *src, unsigned type) {
1865     auto insn = src->get_def(type);
1866     assert(insn != src->end());
1867
1868     switch (insn.opcode()) {
1869     case spv::OpTypeInt:
1870         return insn.word(3) ? FORMAT_TYPE_SINT : FORMAT_TYPE_UINT;
1871     case spv::OpTypeFloat:
1872         return FORMAT_TYPE_FLOAT;
1873     case spv::OpTypeVector:
1874         return get_fundamental_type(src, insn.word(2));
1875     case spv::OpTypeMatrix:
1876         return get_fundamental_type(src, insn.word(2));
1877     case spv::OpTypeArray:
1878         return get_fundamental_type(src, insn.word(2));
1879     case spv::OpTypePointer:
1880         return get_fundamental_type(src, insn.word(3));
1881     case spv::OpTypeImage:
1882         return get_fundamental_type(src, insn.word(2));
1883
1884     default:
1885         return FORMAT_TYPE_UNDEFINED;
1886     }
1887 }
1888
1889 static uint32_t get_shader_stage_id(VkShaderStageFlagBits stage) {
1890     uint32_t bit_pos = u_ffs(stage);
1891     return bit_pos - 1;
1892 }
1893
1894 static bool validate_vi_consistency(debug_report_data *report_data, VkPipelineVertexInputStateCreateInfo const *vi) {
1895     /* walk the binding descriptions, which describe the step rate and stride of each vertex buffer.
1896      * each binding should be specified only once.
1897      */
1898     std::unordered_map<uint32_t, VkVertexInputBindingDescription const *> bindings;
1899     bool pass = true;
1900
1901     for (unsigned i = 0; i < vi->vertexBindingDescriptionCount; i++) {
1902         auto desc = &vi->pVertexBindingDescriptions[i];
1903         auto &binding = bindings[desc->binding];
1904         if (binding) {
1905             if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1906                         __LINE__, SHADER_CHECKER_INCONSISTENT_VI, "SC",
1907                         "Duplicate vertex input binding descriptions for binding %d", desc->binding)) {
1908                 pass = false;
1909             }
1910         } else {
1911             binding = desc;
1912         }
1913     }
1914
1915     return pass;
1916 }
1917
1918 static bool validate_vi_against_vs_inputs(debug_report_data *report_data, VkPipelineVertexInputStateCreateInfo const *vi,
1919                                           shader_module const *vs, spirv_inst_iter entrypoint) {
1920     bool pass = true;
1921
1922     auto inputs = collect_interface_by_location(vs, entrypoint, spv::StorageClassInput, false);
1923
1924     /* Build index by location */
1925     std::map<uint32_t, VkVertexInputAttributeDescription const *> attribs;
1926     if (vi) {
1927         for (unsigned i = 0; i < vi->vertexAttributeDescriptionCount; i++) {
1928             auto num_locations = get_locations_consumed_by_format(vi->pVertexAttributeDescriptions[i].format);
1929             for (auto j = 0u; j < num_locations; j++) {
1930                 attribs[vi->pVertexAttributeDescriptions[i].location + j] = &vi->pVertexAttributeDescriptions[i];
1931             }
1932         }
1933     }
1934
1935     auto it_a = attribs.begin();
1936     auto it_b = inputs.begin();
1937     bool used = false;
1938
1939     while ((attribs.size() > 0 && it_a != attribs.end()) || (inputs.size() > 0 && it_b != inputs.end())) {
1940         bool a_at_end = attribs.size() == 0 || it_a == attribs.end();
1941         bool b_at_end = inputs.size() == 0 || it_b == inputs.end();
1942         auto a_first = a_at_end ? 0 : it_a->first;
1943         auto b_first = b_at_end ? 0 : it_b->first.first;
1944         if (!a_at_end && (b_at_end || a_first < b_first)) {
1945             if (!used && log_msg(report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1946                         __LINE__, SHADER_CHECKER_OUTPUT_NOT_CONSUMED, "SC",
1947                         "Vertex attribute at location %d not consumed by VS", a_first)) {
1948                 pass = false;
1949             }
1950             used = false;
1951             it_a++;
1952         } else if (!b_at_end && (a_at_end || b_first < a_first)) {
1953             if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, /*dev*/ 0,
1954                         __LINE__, SHADER_CHECKER_INPUT_NOT_PRODUCED, "SC", "VS consumes input at location %d but not provided",
1955                         b_first)) {
1956                 pass = false;
1957             }
1958             it_b++;
1959         } else {
1960             unsigned attrib_type = get_format_type(it_a->second->format);
1961             unsigned input_type = get_fundamental_type(vs, it_b->second.type_id);
1962
1963             /* type checking */
1964             if (attrib_type != FORMAT_TYPE_UNDEFINED && input_type != FORMAT_TYPE_UNDEFINED && attrib_type != input_type) {
1965                 if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1966                             __LINE__, SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, "SC",
1967                             "Attribute type of `%s` at location %d does not match VS input type of `%s`",
1968                             string_VkFormat(it_a->second->format), a_first,
1969                             describe_type(vs, it_b->second.type_id).c_str())) {
1970                     pass = false;
1971                 }
1972             }
1973
1974             /* OK! */
1975             used = true;
1976             it_b++;
1977         }
1978     }
1979
1980     return pass;
1981 }
1982
1983 static bool validate_fs_outputs_against_render_pass(debug_report_data *report_data, shader_module const *fs,
1984                                                     spirv_inst_iter entrypoint, VkRenderPassCreateInfo const *rpci,
1985                                                     uint32_t subpass_index) {
1986     std::map<uint32_t, VkFormat> color_attachments;
1987     auto subpass = rpci->pSubpasses[subpass_index];
1988     for (auto i = 0u; i < subpass.colorAttachmentCount; ++i) {
1989         uint32_t attachment = subpass.pColorAttachments[i].attachment;
1990         if (attachment == VK_ATTACHMENT_UNUSED)
1991             continue;
1992         if (rpci->pAttachments[attachment].format != VK_FORMAT_UNDEFINED) {
1993             color_attachments[i] = rpci->pAttachments[attachment].format;
1994         }
1995     }
1996
1997     bool pass = true;
1998
1999     /* TODO: dual source blend index (spv::DecIndex, zero if not provided) */
2000
2001     auto outputs = collect_interface_by_location(fs, entrypoint, spv::StorageClassOutput, false);
2002
2003     auto it_a = outputs.begin();
2004     auto it_b = color_attachments.begin();
2005
2006     /* Walk attachment list and outputs together */
2007
2008     while ((outputs.size() > 0 && it_a != outputs.end()) || (color_attachments.size() > 0 && it_b != color_attachments.end())) {
2009         bool a_at_end = outputs.size() == 0 || it_a == outputs.end();
2010         bool b_at_end = color_attachments.size() == 0 || it_b == color_attachments.end();
2011
2012         if (!a_at_end && (b_at_end || it_a->first.first < it_b->first)) {
2013             if (log_msg(report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
2014                         __LINE__, SHADER_CHECKER_OUTPUT_NOT_CONSUMED, "SC",
2015                         "FS writes to output location %d with no matching attachment", it_a->first.first)) {
2016                 pass = false;
2017             }
2018             it_a++;
2019         } else if (!b_at_end && (a_at_end || it_a->first.first > it_b->first)) {
2020             if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
2021                         __LINE__, SHADER_CHECKER_INPUT_NOT_PRODUCED, "SC", "Attachment %d not written by FS", it_b->first)) {
2022                 pass = false;
2023             }
2024             it_b++;
2025         } else {
2026             unsigned output_type = get_fundamental_type(fs, it_a->second.type_id);
2027             unsigned att_type = get_format_type(it_b->second);
2028
2029             /* type checking */
2030             if (att_type != FORMAT_TYPE_UNDEFINED && output_type != FORMAT_TYPE_UNDEFINED && att_type != output_type) {
2031                 if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
2032                             __LINE__, SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, "SC",
2033                             "Attachment %d of type `%s` does not match FS output type of `%s`", it_b->first,
2034                             string_VkFormat(it_b->second),
2035                             describe_type(fs, it_a->second.type_id).c_str())) {
2036                     pass = false;
2037                 }
2038             }
2039
2040             /* OK! */
2041             it_a++;
2042             it_b++;
2043         }
2044     }
2045
2046     return pass;
2047 }
2048
2049 /* For some analyses, we need to know about all ids referenced by the static call tree of a particular
2050  * entrypoint. This is important for identifying the set of shader resources actually used by an entrypoint,
2051  * for example.
2052  * Note: we only explore parts of the image which might actually contain ids we care about for the above analyses.
2053  *  - NOT the shader input/output interfaces.
2054  *
2055  * TODO: The set of interesting opcodes here was determined by eyeballing the SPIRV spec. It might be worth
2056  * converting parts of this to be generated from the machine-readable spec instead.
2057  */
2058 static std::unordered_set<uint32_t> mark_accessible_ids(shader_module const *src, spirv_inst_iter entrypoint) {
2059     std::unordered_set<uint32_t> ids;
2060     std::unordered_set<uint32_t> worklist;
2061     worklist.insert(entrypoint.word(2));
2062
2063     while (!worklist.empty()) {
2064         auto id_iter = worklist.begin();
2065         auto id = *id_iter;
2066         worklist.erase(id_iter);
2067
2068         auto insn = src->get_def(id);
2069         if (insn == src->end()) {
2070             /* id is something we didn't collect in build_def_index. that's OK -- we'll stumble
2071              * across all kinds of things here that we may not care about. */
2072             continue;
2073         }
2074
2075         /* try to add to the output set */
2076         if (!ids.insert(id).second) {
2077             continue; /* if we already saw this id, we don't want to walk it again. */
2078         }
2079
2080         switch (insn.opcode()) {
2081         case spv::OpFunction:
2082             /* scan whole body of the function, enlisting anything interesting */
2083             while (++insn, insn.opcode() != spv::OpFunctionEnd) {
2084                 switch (insn.opcode()) {
2085                 case spv::OpLoad:
2086                 case spv::OpAtomicLoad:
2087                 case spv::OpAtomicExchange:
2088                 case spv::OpAtomicCompareExchange:
2089                 case spv::OpAtomicCompareExchangeWeak:
2090                 case spv::OpAtomicIIncrement:
2091                 case spv::OpAtomicIDecrement:
2092                 case spv::OpAtomicIAdd:
2093                 case spv::OpAtomicISub:
2094                 case spv::OpAtomicSMin:
2095                 case spv::OpAtomicUMin:
2096                 case spv::OpAtomicSMax:
2097                 case spv::OpAtomicUMax:
2098                 case spv::OpAtomicAnd:
2099                 case spv::OpAtomicOr:
2100                 case spv::OpAtomicXor:
2101                     worklist.insert(insn.word(3)); /* ptr */
2102                     break;
2103                 case spv::OpStore:
2104                 case spv::OpAtomicStore:
2105                     worklist.insert(insn.word(1)); /* ptr */
2106                     break;
2107                 case spv::OpAccessChain:
2108                 case spv::OpInBoundsAccessChain:
2109                     worklist.insert(insn.word(3)); /* base ptr */
2110                     break;
2111                 case spv::OpSampledImage:
2112                 case spv::OpImageSampleImplicitLod:
2113                 case spv::OpImageSampleExplicitLod:
2114                 case spv::OpImageSampleDrefImplicitLod:
2115                 case spv::OpImageSampleDrefExplicitLod:
2116                 case spv::OpImageSampleProjImplicitLod:
2117                 case spv::OpImageSampleProjExplicitLod:
2118                 case spv::OpImageSampleProjDrefImplicitLod:
2119                 case spv::OpImageSampleProjDrefExplicitLod:
2120                 case spv::OpImageFetch:
2121                 case spv::OpImageGather:
2122                 case spv::OpImageDrefGather:
2123                 case spv::OpImageRead:
2124                 case spv::OpImage:
2125                 case spv::OpImageQueryFormat:
2126                 case spv::OpImageQueryOrder:
2127                 case spv::OpImageQuerySizeLod:
2128                 case spv::OpImageQuerySize:
2129                 case spv::OpImageQueryLod:
2130                 case spv::OpImageQueryLevels:
2131                 case spv::OpImageQuerySamples:
2132                 case spv::OpImageSparseSampleImplicitLod:
2133                 case spv::OpImageSparseSampleExplicitLod:
2134                 case spv::OpImageSparseSampleDrefImplicitLod:
2135                 case spv::OpImageSparseSampleDrefExplicitLod:
2136                 case spv::OpImageSparseSampleProjImplicitLod:
2137                 case spv::OpImageSparseSampleProjExplicitLod:
2138                 case spv::OpImageSparseSampleProjDrefImplicitLod:
2139                 case spv::OpImageSparseSampleProjDrefExplicitLod:
2140                 case spv::OpImageSparseFetch:
2141                 case spv::OpImageSparseGather:
2142                 case spv::OpImageSparseDrefGather:
2143                 case spv::OpImageTexelPointer:
2144                     worklist.insert(insn.word(3)); /* image or sampled image */
2145                     break;
2146                 case spv::OpImageWrite:
2147                     worklist.insert(insn.word(1)); /* image -- different operand order to above */
2148                     break;
2149                 case spv::OpFunctionCall:
2150                     for (uint32_t i = 3; i < insn.len(); i++) {
2151                         worklist.insert(insn.word(i)); /* fn itself, and all args */
2152                     }
2153                     break;
2154
2155                 case spv::OpExtInst:
2156                     for (uint32_t i = 5; i < insn.len(); i++) {
2157                         worklist.insert(insn.word(i)); /* operands to ext inst */
2158                     }
2159                     break;
2160                 }
2161             }
2162             break;
2163         }
2164     }
2165
2166     return ids;
2167 }
2168
2169 static bool validate_push_constant_block_against_pipeline(debug_report_data *report_data,
2170                                                           std::vector<VkPushConstantRange> const *push_constant_ranges,
2171                                                           shader_module const *src, spirv_inst_iter type,
2172                                                           VkShaderStageFlagBits stage) {
2173     bool pass = true;
2174
2175     /* strip off ptrs etc */
2176     type = get_struct_type(src, type, false);
2177     assert(type != src->end());
2178
2179     /* validate directly off the offsets. this isn't quite correct for arrays
2180      * and matrices, but is a good first step. TODO: arrays, matrices, weird
2181      * sizes */
2182     for (auto insn : *src) {
2183         if (insn.opcode() == spv::OpMemberDecorate && insn.word(1) == type.word(1)) {
2184
2185             if (insn.word(3) == spv::DecorationOffset) {
2186                 unsigned offset = insn.word(4);
2187                 auto size = 4; /* bytes; TODO: calculate this based on the type */
2188
2189                 bool found_range = false;
2190                 for (auto const &range : *push_constant_ranges) {
2191                     if (range.offset <= offset && range.offset + range.size >= offset + size) {
2192                         found_range = true;
2193
2194                         if ((range.stageFlags & stage) == 0) {
2195                             if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
2196                                         __LINE__, SHADER_CHECKER_PUSH_CONSTANT_NOT_ACCESSIBLE_FROM_STAGE, "SC",
2197                                         "Push constant range covering variable starting at "
2198                                         "offset %u not accessible from stage %s",
2199                                         offset, string_VkShaderStageFlagBits(stage))) {
2200                                 pass = false;
2201                             }
2202                         }
2203
2204                         break;
2205                     }
2206                 }
2207
2208                 if (!found_range) {
2209                     if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
2210                                 __LINE__, SHADER_CHECKER_PUSH_CONSTANT_OUT_OF_RANGE, "SC",
2211                                 "Push constant range covering variable starting at "
2212                                 "offset %u not declared in layout",
2213                                 offset)) {
2214                         pass = false;
2215                     }
2216                 }
2217             }
2218         }
2219     }
2220
2221     return pass;
2222 }
2223
2224 static bool validate_push_constant_usage(debug_report_data *report_data,
2225                                          std::vector<VkPushConstantRange> const *push_constant_ranges, shader_module const *src,
2226                                          std::unordered_set<uint32_t> accessible_ids, VkShaderStageFlagBits stage) {
2227     bool pass = true;
2228
2229     for (auto id : accessible_ids) {
2230         auto def_insn = src->get_def(id);
2231         if (def_insn.opcode() == spv::OpVariable && def_insn.word(3) == spv::StorageClassPushConstant) {
2232             pass &= validate_push_constant_block_against_pipeline(report_data, push_constant_ranges, src,
2233                                                                   src->get_def(def_insn.word(1)), stage);
2234         }
2235     }
2236
2237     return pass;
2238 }
2239
2240 // For given pipelineLayout verify that the set_layout_node at slot.first
2241 //  has the requested binding at slot.second and return ptr to that binding
2242 static VkDescriptorSetLayoutBinding const * get_descriptor_binding(PIPELINE_LAYOUT_NODE const *pipelineLayout, descriptor_slot_t slot) {
2243
2244     if (!pipelineLayout)
2245         return nullptr;
2246
2247     if (slot.first >= pipelineLayout->set_layouts.size())
2248         return nullptr;
2249
2250     return pipelineLayout->set_layouts[slot.first]->GetDescriptorSetLayoutBindingPtrFromBinding(slot.second);
2251 }
2252
2253 // Block of code at start here for managing/tracking Pipeline state that this layer cares about
2254
2255 static uint64_t g_drawCount[NUM_DRAW_TYPES] = {0, 0, 0, 0};
2256
2257 // TODO : Should be tracking lastBound per commandBuffer and when draws occur, report based on that cmd buffer lastBound
2258 //   Then need to synchronize the accesses based on cmd buffer so that if I'm reading state on one cmd buffer, updates
2259 //   to that same cmd buffer by separate thread are not changing state from underneath us
2260 // Track the last cmd buffer touched by this thread
2261
2262 static bool hasDrawCmd(GLOBAL_CB_NODE *pCB) {
2263     for (uint32_t i = 0; i < NUM_DRAW_TYPES; i++) {
2264         if (pCB->drawCount[i])
2265             return true;
2266     }
2267     return false;
2268 }
2269
2270 // Check object status for selected flag state
2271 static bool validate_status(layer_data *my_data, GLOBAL_CB_NODE *pNode, CBStatusFlags status_mask, VkFlags msg_flags,
2272                             DRAW_STATE_ERROR error_code, const char *fail_msg) {
2273     if (!(pNode->status & status_mask)) {
2274         return log_msg(my_data->report_data, msg_flags, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
2275                        reinterpret_cast<const uint64_t &>(pNode->commandBuffer), __LINE__, error_code, "DS",
2276                        "CB object 0x%" PRIxLEAST64 ": %s", reinterpret_cast<const uint64_t &>(pNode->commandBuffer), fail_msg);
2277     }
2278     return false;
2279 }
2280
2281 // Retrieve pipeline node ptr for given pipeline object
2282 static PIPELINE_NODE *getPipeline(layer_data const *my_data, VkPipeline pipeline) {
2283     auto it = my_data->pipelineMap.find(pipeline);
2284     if (it == my_data->pipelineMap.end()) {
2285         return nullptr;
2286     }
2287     return it->second;
2288 }
2289
2290 static RENDER_PASS_NODE *getRenderPass(layer_data const *my_data, VkRenderPass renderpass) {
2291     auto it = my_data->renderPassMap.find(renderpass);
2292     if (it == my_data->renderPassMap.end()) {
2293         return nullptr;
2294     }
2295     return it->second.get();
2296 }
2297
2298 static FRAMEBUFFER_NODE *getFramebuffer(const layer_data *my_data, VkFramebuffer framebuffer) {
2299     auto it = my_data->frameBufferMap.find(framebuffer);
2300     if (it == my_data->frameBufferMap.end()) {
2301         return nullptr;
2302     }
2303     return it->second.get();
2304 }
2305
2306 cvdescriptorset::DescriptorSetLayout const *getDescriptorSetLayout(layer_data const *my_data, VkDescriptorSetLayout dsLayout) {
2307     auto it = my_data->descriptorSetLayoutMap.find(dsLayout);
2308     if (it == my_data->descriptorSetLayoutMap.end()) {
2309         return nullptr;
2310     }
2311     return it->second;
2312 }
2313
2314 static PIPELINE_LAYOUT_NODE const *getPipelineLayout(layer_data const *my_data, VkPipelineLayout pipeLayout) {
2315     auto it = my_data->pipelineLayoutMap.find(pipeLayout);
2316     if (it == my_data->pipelineLayoutMap.end()) {
2317         return nullptr;
2318     }
2319     return &it->second;
2320 }
2321
2322 // Return true if for a given PSO, the given state enum is dynamic, else return false
2323 static bool isDynamic(const PIPELINE_NODE *pPipeline, const VkDynamicState state) {
2324     if (pPipeline && pPipeline->graphicsPipelineCI.pDynamicState) {
2325         for (uint32_t i = 0; i < pPipeline->graphicsPipelineCI.pDynamicState->dynamicStateCount; i++) {
2326             if (state == pPipeline->graphicsPipelineCI.pDynamicState->pDynamicStates[i])
2327                 return true;
2328         }
2329     }
2330     return false;
2331 }
2332
2333 // Validate state stored as flags at time of draw call
2334 static bool validate_draw_state_flags(layer_data *dev_data, GLOBAL_CB_NODE *pCB, const PIPELINE_NODE *pPipe, bool indexedDraw) {
2335     bool result = false;
2336     if (pPipe->graphicsPipelineCI.pInputAssemblyState &&
2337         ((pPipe->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_LINE_LIST) ||
2338          (pPipe->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_LINE_STRIP))) {
2339         result |= validate_status(dev_data, pCB, CBSTATUS_LINE_WIDTH_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2340                                   DRAWSTATE_LINE_WIDTH_NOT_BOUND, "Dynamic line width state not set for this command buffer");
2341     }
2342     if (pPipe->graphicsPipelineCI.pRasterizationState &&
2343         (pPipe->graphicsPipelineCI.pRasterizationState->depthBiasEnable == VK_TRUE)) {
2344         result |= validate_status(dev_data, pCB, CBSTATUS_DEPTH_BIAS_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2345                                   DRAWSTATE_DEPTH_BIAS_NOT_BOUND, "Dynamic depth bias state not set for this command buffer");
2346     }
2347     if (pPipe->blendConstantsEnabled) {
2348         result |= validate_status(dev_data, pCB, CBSTATUS_BLEND_CONSTANTS_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2349                                   DRAWSTATE_BLEND_NOT_BOUND, "Dynamic blend constants state not set for this command buffer");
2350     }
2351     if (pPipe->graphicsPipelineCI.pDepthStencilState &&
2352         (pPipe->graphicsPipelineCI.pDepthStencilState->depthBoundsTestEnable == VK_TRUE)) {
2353         result |= validate_status(dev_data, pCB, CBSTATUS_DEPTH_BOUNDS_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2354                                   DRAWSTATE_DEPTH_BOUNDS_NOT_BOUND, "Dynamic depth bounds state not set for this command buffer");
2355     }
2356     if (pPipe->graphicsPipelineCI.pDepthStencilState &&
2357         (pPipe->graphicsPipelineCI.pDepthStencilState->stencilTestEnable == VK_TRUE)) {
2358         result |= validate_status(dev_data, pCB, CBSTATUS_STENCIL_READ_MASK_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2359                                   DRAWSTATE_STENCIL_NOT_BOUND, "Dynamic stencil read mask state not set for this command buffer");
2360         result |= validate_status(dev_data, pCB, CBSTATUS_STENCIL_WRITE_MASK_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2361                                   DRAWSTATE_STENCIL_NOT_BOUND, "Dynamic stencil write mask state not set for this command buffer");
2362         result |= validate_status(dev_data, pCB, CBSTATUS_STENCIL_REFERENCE_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2363                                   DRAWSTATE_STENCIL_NOT_BOUND, "Dynamic stencil reference state not set for this command buffer");
2364     }
2365     if (indexedDraw) {
2366         result |= validate_status(dev_data, pCB, CBSTATUS_INDEX_BUFFER_BOUND, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2367                                   DRAWSTATE_INDEX_BUFFER_NOT_BOUND,
2368                                   "Index buffer object not bound to this command buffer when Indexed Draw attempted");
2369     }
2370     return result;
2371 }
2372
2373 // Verify attachment reference compatibility according to spec
2374 //  If one array is larger, treat missing elements of shorter array as VK_ATTACHMENT_UNUSED & other array much match this
2375 //  If both AttachmentReference arrays have requested index, check their corresponding AttachmentDescriptions
2376 //   to make sure that format and samples counts match.
2377 //  If not, they are not compatible.
2378 static bool attachment_references_compatible(const uint32_t index, const VkAttachmentReference *pPrimary,
2379                                              const uint32_t primaryCount, const VkAttachmentDescription *pPrimaryAttachments,
2380                                              const VkAttachmentReference *pSecondary, const uint32_t secondaryCount,
2381                                              const VkAttachmentDescription *pSecondaryAttachments) {
2382     // Check potential NULL cases first to avoid nullptr issues later
2383     if (pPrimary == nullptr) {
2384         if (pSecondary == nullptr) {
2385             return true;
2386         }
2387         return false;
2388     } else if (pSecondary == nullptr) {
2389         return false;
2390     }
2391     if (index >= primaryCount) { // Check secondary as if primary is VK_ATTACHMENT_UNUSED
2392         if (VK_ATTACHMENT_UNUSED == pSecondary[index].attachment)
2393             return true;
2394     } else if (index >= secondaryCount) { // Check primary as if secondary is VK_ATTACHMENT_UNUSED
2395         if (VK_ATTACHMENT_UNUSED == pPrimary[index].attachment)
2396             return true;
2397     } else { // Format and sample count must match
2398         if ((pPrimary[index].attachment == VK_ATTACHMENT_UNUSED) && (pSecondary[index].attachment == VK_ATTACHMENT_UNUSED)) {
2399             return true;
2400         } else if ((pPrimary[index].attachment == VK_ATTACHMENT_UNUSED) || (pSecondary[index].attachment == VK_ATTACHMENT_UNUSED)) {
2401             return false;
2402         }
2403         if ((pPrimaryAttachments[pPrimary[index].attachment].format ==
2404              pSecondaryAttachments[pSecondary[index].attachment].format) &&
2405             (pPrimaryAttachments[pPrimary[index].attachment].samples ==
2406              pSecondaryAttachments[pSecondary[index].attachment].samples))
2407             return true;
2408     }
2409     // Format and sample counts didn't match
2410     return false;
2411 }
2412 // TODO : Scrub verify_renderpass_compatibility() and validateRenderPassCompatibility() and unify them and/or share code
2413 // For given primary RenderPass object and secondry RenderPassCreateInfo, verify that they're compatible
2414 static bool verify_renderpass_compatibility(const layer_data *my_data, const VkRenderPassCreateInfo *primaryRPCI,
2415                                             const VkRenderPassCreateInfo *secondaryRPCI, string &errorMsg) {
2416     if (primaryRPCI->subpassCount != secondaryRPCI->subpassCount) {
2417         stringstream errorStr;
2418         errorStr << "RenderPass for primary cmdBuffer has " << primaryRPCI->subpassCount
2419                  << " subpasses but renderPass for secondary cmdBuffer has " << secondaryRPCI->subpassCount << " subpasses.";
2420         errorMsg = errorStr.str();
2421         return false;
2422     }
2423     uint32_t spIndex = 0;
2424     for (spIndex = 0; spIndex < primaryRPCI->subpassCount; ++spIndex) {
2425         // For each subpass, verify that corresponding color, input, resolve & depth/stencil attachment references are compatible
2426         uint32_t primaryColorCount = primaryRPCI->pSubpasses[spIndex].colorAttachmentCount;
2427         uint32_t secondaryColorCount = secondaryRPCI->pSubpasses[spIndex].colorAttachmentCount;
2428         uint32_t colorMax = std::max(primaryColorCount, secondaryColorCount);
2429         for (uint32_t cIdx = 0; cIdx < colorMax; ++cIdx) {
2430             if (!attachment_references_compatible(cIdx, primaryRPCI->pSubpasses[spIndex].pColorAttachments, primaryColorCount,
2431                                                   primaryRPCI->pAttachments, secondaryRPCI->pSubpasses[spIndex].pColorAttachments,
2432                                                   secondaryColorCount, secondaryRPCI->pAttachments)) {
2433                 stringstream errorStr;
2434                 errorStr << "color attachments at index " << cIdx << " of subpass index " << spIndex << " are not compatible.";
2435                 errorMsg = errorStr.str();
2436                 return false;
2437             } else if (!attachment_references_compatible(cIdx, primaryRPCI->pSubpasses[spIndex].pResolveAttachments,
2438                                                          primaryColorCount, primaryRPCI->pAttachments,
2439                                                          secondaryRPCI->pSubpasses[spIndex].pResolveAttachments,
2440                                                          secondaryColorCount, secondaryRPCI->pAttachments)) {
2441                 stringstream errorStr;
2442                 errorStr << "resolve attachments at index " << cIdx << " of subpass index " << spIndex << " are not compatible.";
2443                 errorMsg = errorStr.str();
2444                 return false;
2445             }
2446         }
2447
2448         if (!attachment_references_compatible(0, primaryRPCI->pSubpasses[spIndex].pDepthStencilAttachment,
2449                                               1, primaryRPCI->pAttachments,
2450                                               secondaryRPCI->pSubpasses[spIndex].pDepthStencilAttachment,
2451                                               1, secondaryRPCI->pAttachments)) {
2452             stringstream errorStr;
2453             errorStr << "depth/stencil attachments of subpass index " << spIndex << " are not compatible.";
2454             errorMsg = errorStr.str();
2455             return false;
2456         }
2457
2458         uint32_t primaryInputCount = primaryRPCI->pSubpasses[spIndex].inputAttachmentCount;
2459         uint32_t secondaryInputCount = secondaryRPCI->pSubpasses[spIndex].inputAttachmentCount;
2460         uint32_t inputMax = std::max(primaryInputCount, secondaryInputCount);
2461         for (uint32_t i = 0; i < inputMax; ++i) {
2462             if (!attachment_references_compatible(i, primaryRPCI->pSubpasses[spIndex].pInputAttachments, primaryColorCount,
2463                                                   primaryRPCI->pAttachments, secondaryRPCI->pSubpasses[spIndex].pInputAttachments,
2464                                                   secondaryColorCount, secondaryRPCI->pAttachments)) {
2465                 stringstream errorStr;
2466                 errorStr << "input attachments at index " << i << " of subpass index " << spIndex << " are not compatible.";
2467                 errorMsg = errorStr.str();
2468                 return false;
2469             }
2470         }
2471     }
2472     return true;
2473 }
2474
2475 // For given cvdescriptorset::DescriptorSet, verify that its Set is compatible w/ the setLayout corresponding to
2476 // pipelineLayout[layoutIndex]
2477 static bool verify_set_layout_compatibility(layer_data *my_data, const cvdescriptorset::DescriptorSet *pSet,
2478                                             PIPELINE_LAYOUT_NODE const *pipeline_layout, const uint32_t layoutIndex,
2479                                             string &errorMsg) {
2480     auto num_sets = pipeline_layout->set_layouts.size();
2481     if (layoutIndex >= num_sets) {
2482         stringstream errorStr;
2483         errorStr << "VkPipelineLayout (" << pipeline_layout->layout << ") only contains " << num_sets
2484                  << " setLayouts corresponding to sets 0-" << num_sets - 1 << ", but you're attempting to bind set to index "
2485                  << layoutIndex;
2486         errorMsg = errorStr.str();
2487         return false;
2488     }
2489     auto layout_node = pipeline_layout->set_layouts[layoutIndex];
2490     return pSet->IsCompatible(layout_node, &errorMsg);
2491 }
2492
2493 // Validate that data for each specialization entry is fully contained within the buffer.
2494 static bool validate_specialization_offsets(debug_report_data *report_data, VkPipelineShaderStageCreateInfo const *info) {
2495     bool pass = true;
2496
2497     VkSpecializationInfo const *spec = info->pSpecializationInfo;
2498
2499     if (spec) {
2500         for (auto i = 0u; i < spec->mapEntryCount; i++) {
2501             if (spec->pMapEntries[i].offset + spec->pMapEntries[i].size > spec->dataSize) {
2502                 if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
2503                             /*dev*/ 0, __LINE__, SHADER_CHECKER_BAD_SPECIALIZATION, "SC",
2504                             "Specialization entry %u (for constant id %u) references memory outside provided "
2505                             "specialization data (bytes %u.." PRINTF_SIZE_T_SPECIFIER "; " PRINTF_SIZE_T_SPECIFIER
2506                             " bytes provided)",
2507                             i, spec->pMapEntries[i].constantID, spec->pMapEntries[i].offset,
2508                             spec->pMapEntries[i].offset + spec->pMapEntries[i].size - 1, spec->dataSize)) {
2509
2510                     pass = false;
2511                 }
2512             }
2513         }
2514     }
2515
2516     return pass;
2517 }
2518
2519 static bool descriptor_type_match(shader_module const *module, uint32_t type_id,
2520                                   VkDescriptorType descriptor_type, unsigned &descriptor_count) {
2521     auto type = module->get_def(type_id);
2522
2523     descriptor_count = 1;
2524
2525     /* Strip off any array or ptrs. Where we remove array levels, adjust the
2526      * descriptor count for each dimension. */
2527     while (type.opcode() == spv::OpTypeArray || type.opcode() == spv::OpTypePointer) {
2528         if (type.opcode() == spv::OpTypeArray) {
2529             descriptor_count *= get_constant_value(module, type.word(3));
2530             type = module->get_def(type.word(2));
2531         }
2532         else {
2533             type = module->get_def(type.word(3));
2534         }
2535     }
2536
2537     switch (type.opcode()) {
2538     case spv::OpTypeStruct: {
2539         for (auto insn : *module) {
2540             if (insn.opcode() == spv::OpDecorate && insn.word(1) == type.word(1)) {
2541                 if (insn.word(2) == spv::DecorationBlock) {
2542                     return descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER ||
2543                            descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC;
2544                 } else if (insn.word(2) == spv::DecorationBufferBlock) {
2545                     return descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER ||
2546                            descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC;
2547                 }
2548             }
2549         }
2550
2551         /* Invalid */
2552         return false;
2553     }
2554
2555     case spv::OpTypeSampler:
2556         return descriptor_type == VK_DESCRIPTOR_TYPE_SAMPLER ||
2557             descriptor_type == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
2558
2559     case spv::OpTypeSampledImage:
2560         if (descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER) {
2561             /* Slight relaxation for some GLSL historical madness: samplerBuffer
2562              * doesn't really have a sampler, and a texel buffer descriptor
2563              * doesn't really provide one. Allow this slight mismatch.
2564              */
2565             auto image_type = module->get_def(type.word(2));
2566             auto dim = image_type.word(3);
2567             auto sampled = image_type.word(7);
2568             return dim == spv::DimBuffer && sampled == 1;
2569         }
2570         return descriptor_type == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
2571
2572     case spv::OpTypeImage: {
2573         /* Many descriptor types backing image types-- depends on dimension
2574          * and whether the image will be used with a sampler. SPIRV for
2575          * Vulkan requires that sampled be 1 or 2 -- leaving the decision to
2576          * runtime is unacceptable.
2577          */
2578         auto dim = type.word(3);
2579         auto sampled = type.word(7);
2580
2581         if (dim == spv::DimSubpassData) {
2582             return descriptor_type == VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT;
2583         } else if (dim == spv::DimBuffer) {
2584             if (sampled == 1) {
2585                 return descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER;
2586             } else {
2587                 return descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER;
2588             }
2589         } else if (sampled == 1) {
2590             return descriptor_type == VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE ||
2591                 descriptor_type == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
2592         } else {
2593             return descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE;
2594         }
2595     }
2596
2597     /* We shouldn't really see any other junk types -- but if we do, they're
2598      * a mismatch.
2599      */
2600     default:
2601         return false; /* Mismatch */
2602     }
2603 }
2604
2605 static bool require_feature(debug_report_data *report_data, VkBool32 feature, char const *feature_name) {
2606     if (!feature) {
2607         if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
2608                     __LINE__, SHADER_CHECKER_FEATURE_NOT_ENABLED, "SC",
2609                     "Shader requires VkPhysicalDeviceFeatures::%s but is not "
2610                     "enabled on the device",
2611                     feature_name)) {
2612             return false;
2613         }
2614     }
2615
2616     return true;
2617 }
2618
2619 static bool validate_shader_capabilities(debug_report_data *report_data, shader_module const *src,
2620                                          VkPhysicalDeviceFeatures const *enabledFeatures) {
2621     bool pass = true;
2622
2623
2624     for (auto insn : *src) {
2625         if (insn.opcode() == spv::OpCapability) {
2626             switch (insn.word(1)) {
2627             case spv::CapabilityMatrix:
2628             case spv::CapabilityShader:
2629             case spv::CapabilityInputAttachment:
2630             case spv::CapabilitySampled1D:
2631             case spv::CapabilityImage1D:
2632             case spv::CapabilitySampledBuffer:
2633             case spv::CapabilityImageBuffer:
2634             case spv::CapabilityImageQuery:
2635             case spv::CapabilityDerivativeControl:
2636                 // Always supported by a Vulkan 1.0 implementation -- no feature bits.
2637                 break;
2638
2639             case spv::CapabilityGeometry:
2640                 pass &= require_feature(report_data, enabledFeatures->geometryShader, "geometryShader");
2641                 break;
2642
2643             case spv::CapabilityTessellation:
2644                 pass &= require_feature(report_data, enabledFeatures->tessellationShader, "tessellationShader");
2645                 break;
2646
2647             case spv::CapabilityFloat64:
2648                 pass &= require_feature(report_data, enabledFeatures->shaderFloat64, "shaderFloat64");
2649                 break;
2650
2651             case spv::CapabilityInt64:
2652                 pass &= require_feature(report_data, enabledFeatures->shaderInt64, "shaderInt64");
2653                 break;
2654
2655             case spv::CapabilityTessellationPointSize:
2656             case spv::CapabilityGeometryPointSize:
2657                 pass &= require_feature(report_data, enabledFeatures->shaderTessellationAndGeometryPointSize,
2658                                         "shaderTessellationAndGeometryPointSize");
2659                 break;
2660
2661             case spv::CapabilityImageGatherExtended:
2662                 pass &= require_feature(report_data, enabledFeatures->shaderImageGatherExtended, "shaderImageGatherExtended");
2663                 break;
2664
2665             case spv::CapabilityStorageImageMultisample:
2666                 pass &= require_feature(report_data, enabledFeatures->shaderStorageImageMultisample, "shaderStorageImageMultisample");
2667                 break;
2668
2669             case spv::CapabilityUniformBufferArrayDynamicIndexing:
2670                 pass &= require_feature(report_data, enabledFeatures->shaderUniformBufferArrayDynamicIndexing,
2671                                         "shaderUniformBufferArrayDynamicIndexing");
2672                 break;
2673
2674             case spv::CapabilitySampledImageArrayDynamicIndexing:
2675                 pass &= require_feature(report_data, enabledFeatures->shaderSampledImageArrayDynamicIndexing,
2676                                         "shaderSampledImageArrayDynamicIndexing");
2677                 break;
2678
2679             case spv::CapabilityStorageBufferArrayDynamicIndexing:
2680                 pass &= require_feature(report_data, enabledFeatures->shaderStorageBufferArrayDynamicIndexing,
2681                                         "shaderStorageBufferArrayDynamicIndexing");
2682                 break;
2683
2684             case spv::CapabilityStorageImageArrayDynamicIndexing:
2685                 pass &= require_feature(report_data, enabledFeatures->shaderStorageImageArrayDynamicIndexing,
2686                                         "shaderStorageImageArrayDynamicIndexing");
2687                 break;
2688
2689             case spv::CapabilityClipDistance:
2690                 pass &= require_feature(report_data, enabledFeatures->shaderClipDistance, "shaderClipDistance");
2691                 break;
2692
2693             case spv::CapabilityCullDistance:
2694                 pass &= require_feature(report_data, enabledFeatures->shaderCullDistance, "shaderCullDistance");
2695                 break;
2696
2697             case spv::CapabilityImageCubeArray:
2698                 pass &= require_feature(report_data, enabledFeatures->imageCubeArray, "imageCubeArray");
2699                 break;
2700
2701             case spv::CapabilitySampleRateShading:
2702                 pass &= require_feature(report_data, enabledFeatures->sampleRateShading, "sampleRateShading");
2703                 break;
2704
2705             case spv::CapabilitySparseResidency:
2706                 pass &= require_feature(report_data, enabledFeatures->shaderResourceResidency, "shaderResourceResidency");
2707                 break;
2708
2709             case spv::CapabilityMinLod:
2710                 pass &= require_feature(report_data, enabledFeatures->shaderResourceMinLod, "shaderResourceMinLod");
2711                 break;
2712
2713             case spv::CapabilitySampledCubeArray:
2714                 pass &= require_feature(report_data, enabledFeatures->imageCubeArray, "imageCubeArray");
2715                 break;
2716
2717             case spv::CapabilityImageMSArray:
2718                 pass &= require_feature(report_data, enabledFeatures->shaderStorageImageMultisample, "shaderStorageImageMultisample");
2719                 break;
2720
2721             case spv::CapabilityStorageImageExtendedFormats:
2722                 pass &= require_feature(report_data, enabledFeatures->shaderStorageImageExtendedFormats,
2723                                         "shaderStorageImageExtendedFormats");
2724                 break;
2725
2726             case spv::CapabilityInterpolationFunction:
2727                 pass &= require_feature(report_data, enabledFeatures->sampleRateShading, "sampleRateShading");
2728                 break;
2729
2730             case spv::CapabilityStorageImageReadWithoutFormat:
2731                 pass &= require_feature(report_data, enabledFeatures->shaderStorageImageReadWithoutFormat,
2732                                         "shaderStorageImageReadWithoutFormat");
2733                 break;
2734
2735             case spv::CapabilityStorageImageWriteWithoutFormat:
2736                 pass &= require_feature(report_data, enabledFeatures->shaderStorageImageWriteWithoutFormat,
2737                                         "shaderStorageImageWriteWithoutFormat");
2738                 break;
2739
2740             case spv::CapabilityMultiViewport:
2741                 pass &= require_feature(report_data, enabledFeatures->multiViewport, "multiViewport");
2742                 break;
2743
2744             default:
2745                 if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
2746                             __LINE__, SHADER_CHECKER_BAD_CAPABILITY, "SC",
2747                             "Shader declares capability %u, not supported in Vulkan.",
2748                             insn.word(1)))
2749                     pass = false;
2750                 break;
2751             }
2752         }
2753     }
2754
2755     return pass;
2756 }
2757
2758
2759 static uint32_t descriptor_type_to_reqs(shader_module const *module, uint32_t type_id) {
2760     auto type = module->get_def(type_id);
2761
2762     while (true) {
2763         switch (type.opcode()) {
2764         case spv::OpTypeArray:
2765         case spv::OpTypeSampledImage:
2766             type = module->get_def(type.word(2));
2767             break;
2768         case spv::OpTypePointer:
2769             type = module->get_def(type.word(3));
2770             break;
2771         case spv::OpTypeImage: {
2772             auto dim = type.word(3);
2773             auto arrayed = type.word(5);
2774             auto msaa = type.word(6);
2775
2776             switch (dim) {
2777             case spv::Dim1D:
2778                 return arrayed ? DESCRIPTOR_REQ_VIEW_TYPE_1D_ARRAY : DESCRIPTOR_REQ_VIEW_TYPE_1D;
2779             case spv::Dim2D:
2780                 return (msaa ? DESCRIPTOR_REQ_MULTI_SAMPLE : DESCRIPTOR_REQ_SINGLE_SAMPLE) |
2781                     (arrayed ? DESCRIPTOR_REQ_VIEW_TYPE_2D_ARRAY : DESCRIPTOR_REQ_VIEW_TYPE_2D);
2782             case spv::Dim3D:
2783                 return DESCRIPTOR_REQ_VIEW_TYPE_3D;
2784             case spv::DimCube:
2785                 return arrayed ? DESCRIPTOR_REQ_VIEW_TYPE_CUBE_ARRAY : DESCRIPTOR_REQ_VIEW_TYPE_CUBE;
2786             case spv::DimSubpassData:
2787                 return msaa ? DESCRIPTOR_REQ_MULTI_SAMPLE : DESCRIPTOR_REQ_SINGLE_SAMPLE;
2788             default:  // buffer, etc.
2789                 return 0;
2790             }
2791         }
2792         default:
2793             return 0;
2794         }
2795     }
2796 }
2797
2798
2799 static bool validate_pipeline_shader_stage(debug_report_data *report_data,
2800                                            VkPipelineShaderStageCreateInfo const *pStage,
2801                                            PIPELINE_NODE *pipeline,
2802                                            shader_module **out_module,
2803                                            spirv_inst_iter *out_entrypoint,
2804                                            VkPhysicalDeviceFeatures const *enabledFeatures,
2805                                            std::unordered_map<VkShaderModule,
2806                                            std::unique_ptr<shader_module>> const &shaderModuleMap) {
2807     bool pass = true;
2808     auto module_it = shaderModuleMap.find(pStage->module);
2809     auto module = *out_module = module_it->second.get();
2810
2811     /* find the entrypoint */
2812     auto entrypoint = *out_entrypoint = find_entrypoint(module, pStage->pName, pStage->stage);
2813     if (entrypoint == module->end()) {
2814         if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
2815                     __LINE__, SHADER_CHECKER_MISSING_ENTRYPOINT, "SC",
2816                     "No entrypoint found named `%s` for stage %s", pStage->pName,
2817                     string_VkShaderStageFlagBits(pStage->stage))) {
2818             return false;   // no point continuing beyond here, any analysis is just going to be garbage.
2819         }
2820     }
2821
2822     /* validate shader capabilities against enabled device features */
2823     pass &= validate_shader_capabilities(report_data, module, enabledFeatures);
2824
2825     /* mark accessible ids */
2826     auto accessible_ids = mark_accessible_ids(module, entrypoint);
2827
2828     /* validate descriptor set layout against what the entrypoint actually uses */
2829     auto descriptor_uses = collect_interface_by_descriptor_slot(report_data, module, accessible_ids);
2830
2831     auto pipelineLayout = pipeline->pipeline_layout;
2832
2833     pass &= validate_specialization_offsets(report_data, pStage);
2834     pass &= validate_push_constant_usage(report_data, &pipelineLayout.push_constant_ranges, module, accessible_ids, pStage->stage);
2835
2836     /* validate descriptor use */
2837     for (auto use : descriptor_uses) {
2838         // While validating shaders capture which slots are used by the pipeline
2839         auto & reqs = pipeline->active_slots[use.first.first][use.first.second];
2840         reqs = descriptor_req(reqs | descriptor_type_to_reqs(module, use.second.type_id));
2841
2842         /* verify given pipelineLayout has requested setLayout with requested binding */
2843         const auto &binding = get_descriptor_binding(&pipelineLayout, use.first);
2844         unsigned required_descriptor_count;
2845
2846         if (!binding) {
2847             if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
2848                         __LINE__, SHADER_CHECKER_MISSING_DESCRIPTOR, "SC",
2849                         "Shader uses descriptor slot %u.%u (used as type `%s`) but not declared in pipeline layout",
2850                         use.first.first, use.first.second, describe_type(module, use.second.type_id).c_str())) {
2851                 pass = false;
2852             }
2853         } else if (~binding->stageFlags & pStage->stage) {
2854             if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
2855                         /*dev*/ 0, __LINE__, SHADER_CHECKER_DESCRIPTOR_NOT_ACCESSIBLE_FROM_STAGE, "SC",
2856                         "Shader uses descriptor slot %u.%u (used "
2857                         "as type `%s`) but descriptor not "
2858                         "accessible from stage %s",
2859                         use.first.first, use.first.second, describe_type(module, use.second.type_id).c_str(),
2860                         string_VkShaderStageFlagBits(pStage->stage))) {
2861                 pass = false;
2862             }
2863         } else if (!descriptor_type_match(module, use.second.type_id, binding->descriptorType,
2864                                           /*out*/ required_descriptor_count)) {
2865             if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
2866                         SHADER_CHECKER_DESCRIPTOR_TYPE_MISMATCH, "SC", "Type mismatch on descriptor slot "
2867                                                                        "%u.%u (used as type `%s`) but "
2868                                                                        "descriptor of type %s",
2869                         use.first.first, use.first.second, describe_type(module, use.second.type_id).c_str(),
2870                         string_VkDescriptorType(binding->descriptorType))) {
2871                 pass = false;
2872             }
2873         } else if (binding->descriptorCount < required_descriptor_count) {
2874             if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
2875                         SHADER_CHECKER_DESCRIPTOR_TYPE_MISMATCH, "SC",
2876                         "Shader expects at least %u descriptors for binding %u.%u (used as type `%s`) but only %u provided",
2877                         required_descriptor_count, use.first.first, use.first.second,
2878                         describe_type(module, use.second.type_id).c_str(), binding->descriptorCount)) {
2879                 pass = false;
2880             }
2881         }
2882     }
2883
2884     /* validate use of input attachments against subpass structure */
2885     if (pStage->stage == VK_SHADER_STAGE_FRAGMENT_BIT) {
2886         auto input_attachment_uses = collect_interface_by_input_attachment_index(report_data, module, accessible_ids);
2887
2888         auto rpci = pipeline->render_pass_ci.ptr();
2889         auto subpass = pipeline->graphicsPipelineCI.subpass;
2890
2891         for (auto use : input_attachment_uses) {
2892             auto input_attachments = rpci->pSubpasses[subpass].pInputAttachments;
2893             auto index = (input_attachments && use.first < rpci->pSubpasses[subpass].inputAttachmentCount) ?
2894                     input_attachments[use.first].attachment : VK_ATTACHMENT_UNUSED;
2895
2896             if (index == VK_ATTACHMENT_UNUSED) {
2897                 if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
2898                             SHADER_CHECKER_MISSING_INPUT_ATTACHMENT, "SC",
2899                             "Shader consumes input attachment index %d but not provided in subpass",
2900                             use.first)) {
2901                     pass = false;
2902                 }
2903             }
2904             else if (get_format_type(rpci->pAttachments[index].format) !=
2905                     get_fundamental_type(module, use.second.type_id)) {
2906                 if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
2907                             SHADER_CHECKER_INPUT_ATTACHMENT_TYPE_MISMATCH, "SC",
2908                             "Subpass input attachment %u format of %s does not match type used in shader `%s`",
2909                             use.first, string_VkFormat(rpci->pAttachments[index].format),
2910                             describe_type(module, use.second.type_id).c_str())) {
2911                     pass = false;
2912                 }
2913             }
2914         }
2915     }
2916
2917     return pass;
2918 }
2919
2920
2921 // Validate that the shaders used by the given pipeline and store the active_slots
2922 //  that are actually used by the pipeline into pPipeline->active_slots
2923 static bool validate_and_capture_pipeline_shader_state(debug_report_data *report_data, PIPELINE_NODE *pPipeline,
2924                                                        VkPhysicalDeviceFeatures const *enabledFeatures,
2925                                                        std::unordered_map<VkShaderModule, unique_ptr<shader_module>> const & shaderModuleMap) {
2926     auto pCreateInfo = pPipeline->graphicsPipelineCI.ptr();
2927     int vertex_stage = get_shader_stage_id(VK_SHADER_STAGE_VERTEX_BIT);
2928     int fragment_stage = get_shader_stage_id(VK_SHADER_STAGE_FRAGMENT_BIT);
2929
2930     shader_module *shaders[5];
2931     memset(shaders, 0, sizeof(shaders));
2932     spirv_inst_iter entrypoints[5];
2933     memset(entrypoints, 0, sizeof(entrypoints));
2934     VkPipelineVertexInputStateCreateInfo const *vi = 0;
2935     bool pass = true;
2936
2937     for (uint32_t i = 0; i < pCreateInfo->stageCount; i++) {
2938         auto pStage = &pCreateInfo->pStages[i];
2939         auto stage_id = get_shader_stage_id(pStage->stage);
2940         pass &= validate_pipeline_shader_stage(report_data, pStage, pPipeline,
2941                                                &shaders[stage_id], &entrypoints[stage_id],
2942                                                enabledFeatures, shaderModuleMap);
2943     }
2944
2945     // if the shader stages are no good individually, cross-stage validation is pointless.
2946     if (!pass)
2947         return false;
2948
2949     vi = pCreateInfo->pVertexInputState;
2950
2951     if (vi) {
2952         pass &= validate_vi_consistency(report_data, vi);
2953     }
2954
2955     if (shaders[vertex_stage]) {
2956         pass &= validate_vi_against_vs_inputs(report_data, vi, shaders[vertex_stage], entrypoints[vertex_stage]);
2957     }
2958
2959     int producer = get_shader_stage_id(VK_SHADER_STAGE_VERTEX_BIT);
2960     int consumer = get_shader_stage_id(VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT);
2961
2962     while (!shaders[producer] && producer != fragment_stage) {
2963         producer++;
2964         consumer++;
2965     }
2966
2967     for (; producer != fragment_stage && consumer <= fragment_stage; consumer++) {
2968         assert(shaders[producer]);
2969         if (shaders[consumer]) {
2970             pass &= validate_interface_between_stages(report_data,
2971                                                       shaders[producer], entrypoints[producer], &shader_stage_attribs[producer],
2972                                                       shaders[consumer], entrypoints[consumer], &shader_stage_attribs[consumer]);
2973
2974             producer = consumer;
2975         }
2976     }
2977
2978     if (shaders[fragment_stage]) {
2979         pass &= validate_fs_outputs_against_render_pass(report_data, shaders[fragment_stage], entrypoints[fragment_stage],
2980                                                         pPipeline->render_pass_ci.ptr(), pCreateInfo->subpass);
2981     }
2982
2983     return pass;
2984 }
2985
2986 static bool validate_compute_pipeline(debug_report_data *report_data, PIPELINE_NODE *pPipeline, VkPhysicalDeviceFeatures const *enabledFeatures,
2987                                       std::unordered_map<VkShaderModule, unique_ptr<shader_module>> const & shaderModuleMap) {
2988     auto pCreateInfo = pPipeline->computePipelineCI.ptr();
2989
2990     shader_module *module;
2991     spirv_inst_iter entrypoint;
2992
2993     return validate_pipeline_shader_stage(report_data, &pCreateInfo->stage, pPipeline,
2994                                           &module, &entrypoint, enabledFeatures, shaderModuleMap);
2995 }
2996 // Return Set node ptr for specified set or else NULL
2997 cvdescriptorset::DescriptorSet *getSetNode(const layer_data *my_data, VkDescriptorSet set) {
2998     auto set_it = my_data->setMap.find(set);
2999     if (set_it == my_data->setMap.end()) {
3000         return NULL;
3001     }
3002     return set_it->second;
3003 }
3004 // For the given command buffer, verify and update the state for activeSetBindingsPairs
3005 //  This includes:
3006 //  1. Verifying that any dynamic descriptor in that set has a valid dynamic offset bound.
3007 //     To be valid, the dynamic offset combined with the offset and range from its
3008 //     descriptor update must not overflow the size of its buffer being updated
3009 //  2. Grow updateImages for given pCB to include any bound STORAGE_IMAGE descriptor images
3010 //  3. Grow updateBuffers for pCB to include buffers from STORAGE*_BUFFER descriptor buffers
3011 static bool validate_and_update_drawtime_descriptor_state(
3012     layer_data *dev_data, GLOBAL_CB_NODE *pCB,
3013     const vector<std::tuple<cvdescriptorset::DescriptorSet *, std::map<uint32_t, descriptor_req>, std::vector<uint32_t> const *>>
3014         &activeSetBindingsPairs,
3015     const char *function) {
3016     bool result = false;
3017     for (auto set_bindings_pair : activeSetBindingsPairs) {
3018         cvdescriptorset::DescriptorSet *set_node = std::get<0>(set_bindings_pair);
3019         std::string err_str;
3020         if (!set_node->ValidateDrawState(std::get<1>(set_bindings_pair), *std::get<2>(set_bindings_pair),
3021                                          &err_str)) {
3022             // Report error here
3023             auto set = set_node->GetSet();
3024             result |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
3025                               reinterpret_cast<const uint64_t &>(set), __LINE__, DRAWSTATE_DESCRIPTOR_SET_NOT_UPDATED, "DS",
3026                               "DS 0x%" PRIxLEAST64 " encountered the following validation error at %s() time: %s",
3027                               reinterpret_cast<const uint64_t &>(set), function, err_str.c_str());
3028         }
3029         set_node->GetStorageUpdates(std::get<1>(set_bindings_pair), &pCB->updateBuffers, &pCB->updateImages);
3030     }
3031     return result;
3032 }
3033
3034 // For given pipeline, return number of MSAA samples, or one if MSAA disabled
3035 static VkSampleCountFlagBits getNumSamples(PIPELINE_NODE const *pipe) {
3036     if (pipe->graphicsPipelineCI.pMultisampleState != NULL &&
3037         VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO == pipe->graphicsPipelineCI.pMultisampleState->sType) {
3038         return pipe->graphicsPipelineCI.pMultisampleState->rasterizationSamples;
3039     }
3040     return VK_SAMPLE_COUNT_1_BIT;
3041 }
3042
3043 static void list_bits(std::ostream& s, uint32_t bits) {
3044     for (int i = 0; i < 32 && bits; i++) {
3045         if (bits & (1 << i)) {
3046             s << i;
3047             bits &= ~(1 << i);
3048             if (bits) {
3049                 s << ",";
3050             }
3051         }
3052     }
3053 }
3054
3055 // Validate draw-time state related to the PSO
3056 static bool validatePipelineDrawtimeState(layer_data const *my_data,
3057                                           LAST_BOUND_STATE const &state,
3058                                           const GLOBAL_CB_NODE *pCB,
3059                                           PIPELINE_NODE const *pPipeline) {
3060     bool skip_call = false;
3061
3062     // Verify Vtx binding
3063     if (pPipeline->vertexBindingDescriptions.size() > 0) {
3064         for (size_t i = 0; i < pPipeline->vertexBindingDescriptions.size(); i++) {
3065             auto vertex_binding = pPipeline->vertexBindingDescriptions[i].binding;
3066             if ((pCB->currentDrawData.buffers.size() < (vertex_binding + 1)) ||
3067                 (pCB->currentDrawData.buffers[vertex_binding] == VK_NULL_HANDLE)) {
3068                 skip_call |= log_msg(
3069                     my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3070                     DRAWSTATE_VTX_INDEX_OUT_OF_BOUNDS, "DS",
3071                     "The Pipeline State Object (0x%" PRIxLEAST64 ") expects that this Command Buffer's vertex binding Index %u "
3072                     "should be set via vkCmdBindVertexBuffers. This is because VkVertexInputBindingDescription struct "
3073                     "at index " PRINTF_SIZE_T_SPECIFIER " of pVertexBindingDescriptions has a binding value of %u.",
3074                     (uint64_t)state.pipeline_node->pipeline, vertex_binding, i, vertex_binding);
3075             }
3076         }
3077     } else {
3078         if (!pCB->currentDrawData.buffers.empty()) {
3079             skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0,
3080                                  0, __LINE__, DRAWSTATE_VTX_INDEX_OUT_OF_BOUNDS, "DS",
3081                                  "Vertex buffers are bound to command buffer (0x%" PRIxLEAST64
3082                                  ") but no vertex buffers are attached to this Pipeline State Object (0x%" PRIxLEAST64 ").",
3083                                  (uint64_t)pCB->commandBuffer, (uint64_t)state.pipeline_node->pipeline);
3084         }
3085     }
3086     // If Viewport or scissors are dynamic, verify that dynamic count matches PSO count.
3087     // Skip check if rasterization is disabled or there is no viewport.
3088     if ((!pPipeline->graphicsPipelineCI.pRasterizationState ||
3089          (pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable == VK_FALSE)) &&
3090         pPipeline->graphicsPipelineCI.pViewportState) {
3091         bool dynViewport = isDynamic(pPipeline, VK_DYNAMIC_STATE_VIEWPORT);
3092         bool dynScissor = isDynamic(pPipeline, VK_DYNAMIC_STATE_SCISSOR);
3093
3094         if (dynViewport) {
3095             auto requiredViewportsMask = (1 << pPipeline->graphicsPipelineCI.pViewportState->viewportCount) - 1;
3096             auto missingViewportMask = ~pCB->viewportMask & requiredViewportsMask;
3097             if (missingViewportMask) {
3098                 std::stringstream ss;
3099                 ss << "Dynamic viewport(s) ";
3100                 list_bits(ss, missingViewportMask);
3101                 ss << " are used by PSO, but were not provided via calls to vkCmdSetViewport().";
3102                 skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
3103                                      __LINE__, DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS",
3104                                      "%s", ss.str().c_str());
3105             }
3106         }
3107
3108         if (dynScissor) {
3109             auto requiredScissorMask = (1 << pPipeline->graphicsPipelineCI.pViewportState->scissorCount) - 1;
3110             auto missingScissorMask = ~pCB->scissorMask & requiredScissorMask;
3111             if (missingScissorMask) {
3112                 std::stringstream ss;
3113                 ss << "Dynamic scissor(s) ";
3114                 list_bits(ss, missingScissorMask);
3115                 ss << " are used by PSO, but were not provided via calls to vkCmdSetScissor().";
3116                 skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
3117                                      __LINE__, DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS",
3118                                      "%s", ss.str().c_str());
3119             }
3120         }
3121     }
3122
3123     // Verify that any MSAA request in PSO matches sample# in bound FB
3124     // Skip the check if rasterization is disabled.
3125     if (!pPipeline->graphicsPipelineCI.pRasterizationState ||
3126         (pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable == VK_FALSE)) {
3127         VkSampleCountFlagBits pso_num_samples = getNumSamples(pPipeline);
3128         if (pCB->activeRenderPass) {
3129             auto const render_pass_info = pCB->activeRenderPass->createInfo.ptr();
3130             const VkSubpassDescription *subpass_desc = &render_pass_info->pSubpasses[pCB->activeSubpass];
3131             uint32_t i;
3132
3133             const safe_VkPipelineColorBlendStateCreateInfo *color_blend_state = pPipeline->graphicsPipelineCI.pColorBlendState;
3134             if ((color_blend_state != NULL) && (pCB->activeSubpass == pPipeline->graphicsPipelineCI.subpass) &&
3135                 (color_blend_state->attachmentCount != subpass_desc->colorAttachmentCount)) {
3136                 skip_call |=
3137                         log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
3138                                 reinterpret_cast<const uint64_t &>(pPipeline->pipeline), __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
3139                                 "Render pass subpass %u mismatch with blending state defined and blend state attachment "
3140                                 "count %u while subpass color attachment count %u in Pipeline (0x%" PRIxLEAST64 ")!  These "
3141                                 "must be the same at draw-time.",
3142                                 pCB->activeSubpass, color_blend_state->attachmentCount, subpass_desc->colorAttachmentCount,
3143                                 reinterpret_cast<const uint64_t &>(pPipeline->pipeline));
3144             }
3145
3146             unsigned subpass_num_samples = 0;
3147
3148             for (i = 0; i < subpass_desc->colorAttachmentCount; i++) {
3149                 auto attachment = subpass_desc->pColorAttachments[i].attachment;
3150                 if (attachment != VK_ATTACHMENT_UNUSED)
3151                     subpass_num_samples |= (unsigned)render_pass_info->pAttachments[attachment].samples;
3152             }
3153
3154             if (subpass_desc->pDepthStencilAttachment &&
3155                 subpass_desc->pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
3156                 auto attachment = subpass_desc->pDepthStencilAttachment->attachment;
3157                 subpass_num_samples |= (unsigned)render_pass_info->pAttachments[attachment].samples;
3158             }
3159
3160             if (subpass_num_samples && static_cast<unsigned>(pso_num_samples) != subpass_num_samples) {
3161                 skip_call |=
3162                         log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
3163                                 reinterpret_cast<const uint64_t &>(pPipeline->pipeline), __LINE__, DRAWSTATE_NUM_SAMPLES_MISMATCH, "DS",
3164                                 "Num samples mismatch! At draw-time in Pipeline (0x%" PRIxLEAST64
3165                                 ") with %u samples while current RenderPass (0x%" PRIxLEAST64 ") w/ %u samples!",
3166                                 reinterpret_cast<const uint64_t &>(pPipeline->pipeline), pso_num_samples,
3167                                 reinterpret_cast<const uint64_t &>(pCB->activeRenderPass->renderPass), subpass_num_samples);
3168             }
3169         } else {
3170             skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
3171                                  reinterpret_cast<const uint64_t &>(pPipeline->pipeline), __LINE__, DRAWSTATE_NUM_SAMPLES_MISMATCH, "DS",
3172                                  "No active render pass found at draw-time in Pipeline (0x%" PRIxLEAST64 ")!",
3173                                  reinterpret_cast<const uint64_t &>(pPipeline->pipeline));
3174         }
3175     }
3176     // Verify that PSO creation renderPass is compatible with active renderPass
3177     if (pCB->activeRenderPass) {
3178         std::string err_string;
3179         if ((pCB->activeRenderPass->renderPass != pPipeline->graphicsPipelineCI.renderPass) &&
3180             !verify_renderpass_compatibility(my_data, pCB->activeRenderPass->createInfo.ptr(), pPipeline->render_pass_ci.ptr(),
3181                                              err_string)) {
3182             // renderPass that PSO was created with must be compatible with active renderPass that PSO is being used with
3183             skip_call |=
3184                 log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
3185                         reinterpret_cast<const uint64_t &>(pPipeline->pipeline), __LINE__, DRAWSTATE_RENDERPASS_INCOMPATIBLE, "DS",
3186                         "At Draw time the active render pass (0x%" PRIxLEAST64 ") is incompatible w/ gfx pipeline "
3187                         "(0x%" PRIxLEAST64 ") that was created w/ render pass (0x%" PRIxLEAST64 ") due to: %s",
3188                         reinterpret_cast<uint64_t &>(pCB->activeRenderPass->renderPass), reinterpret_cast<uint64_t &>(pPipeline),
3189                         reinterpret_cast<const uint64_t &>(pPipeline->graphicsPipelineCI.renderPass), err_string.c_str());
3190         }
3191
3192         if (pPipeline->graphicsPipelineCI.subpass != pCB->activeSubpass) {
3193             skip_call |=
3194                 log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
3195                         reinterpret_cast<uint64_t const &>(pPipeline->pipeline), __LINE__, DRAWSTATE_RENDERPASS_INCOMPATIBLE, "DS",
3196                         "Pipeline was built for subpass %u but used in subpass %u", pPipeline->graphicsPipelineCI.subpass,
3197                         pCB->activeSubpass);
3198         }
3199     }
3200     // TODO : Add more checks here
3201
3202     return skip_call;
3203 }
3204
3205 // Validate overall state at the time of a draw call
3206 static bool validate_and_update_draw_state(layer_data *my_data, GLOBAL_CB_NODE *cb_node, const bool indexedDraw,
3207                                            const VkPipelineBindPoint bindPoint, const char *function) {
3208     bool result = false;
3209     auto const &state = cb_node->lastBound[bindPoint];
3210     PIPELINE_NODE *pPipe = state.pipeline_node;
3211     if (nullptr == pPipe) {
3212         result |= log_msg(
3213             my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
3214             DRAWSTATE_INVALID_PIPELINE, "DS",
3215             "At Draw/Dispatch time no valid VkPipeline is bound! This is illegal. Please bind one with vkCmdBindPipeline().");
3216         // Early return as any further checks below will be busted w/o a pipeline
3217         if (result)
3218             return true;
3219     }
3220     // First check flag states
3221     if (VK_PIPELINE_BIND_POINT_GRAPHICS == bindPoint)
3222         result = validate_draw_state_flags(my_data, cb_node, pPipe, indexedDraw);
3223
3224     // Now complete other state checks
3225     if (VK_NULL_HANDLE != state.pipeline_layout.layout) {
3226         string errorString;
3227         auto pipeline_layout = pPipe->pipeline_layout;
3228
3229         // Need a vector (vs. std::set) of active Sets for dynamicOffset validation in case same set bound w/ different offsets
3230         vector<std::tuple<cvdescriptorset::DescriptorSet *, std::map<uint32_t, descriptor_req>, std::vector<uint32_t> const *>>
3231             activeSetBindingsPairs;
3232         for (auto & setBindingPair : pPipe->active_slots) {
3233             uint32_t setIndex = setBindingPair.first;
3234             // If valid set is not bound throw an error
3235             if ((state.boundDescriptorSets.size() <= setIndex) || (!state.boundDescriptorSets[setIndex])) {
3236                 result |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3237                                   DRAWSTATE_DESCRIPTOR_SET_NOT_BOUND, "DS",
3238                                   "VkPipeline 0x%" PRIxLEAST64 " uses set #%u but that set is not bound.", (uint64_t)pPipe->pipeline,
3239                                   setIndex);
3240             } else if (!verify_set_layout_compatibility(my_data, state.boundDescriptorSets[setIndex], &pipeline_layout, setIndex,
3241                                                         errorString)) {
3242                 // Set is bound but not compatible w/ overlapping pipeline_layout from PSO
3243                 VkDescriptorSet setHandle = state.boundDescriptorSets[setIndex]->GetSet();
3244                 result |=
3245                     log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
3246                             (uint64_t)setHandle, __LINE__, DRAWSTATE_PIPELINE_LAYOUTS_INCOMPATIBLE, "DS",
3247                             "VkDescriptorSet (0x%" PRIxLEAST64
3248                             ") bound as set #%u is not compatible with overlapping VkPipelineLayout 0x%" PRIxLEAST64 " due to: %s",
3249                             reinterpret_cast<uint64_t &>(setHandle), setIndex, reinterpret_cast<uint64_t &>(pipeline_layout.layout),
3250                             errorString.c_str());
3251             } else { // Valid set is bound and layout compatible, validate that it's updated
3252                 // Pull the set node
3253                 cvdescriptorset::DescriptorSet *pSet = state.boundDescriptorSets[setIndex];
3254                 // Gather active bindings
3255                 std::unordered_set<uint32_t> bindings;
3256                 for (auto binding : setBindingPair.second) {
3257                     bindings.insert(binding.first);
3258                 }
3259                 // Bind this set and its active descriptor resources to the command buffer
3260                 pSet->BindCommandBuffer(cb_node, bindings);
3261                 // Save vector of all active sets to verify dynamicOffsets below
3262                 activeSetBindingsPairs.push_back(std::make_tuple(pSet, setBindingPair.second, &state.dynamicOffsets[setIndex]));
3263                 // Make sure set has been updated if it has no immutable samplers
3264                 //  If it has immutable samplers, we'll flag error later as needed depending on binding
3265                 if (!pSet->IsUpdated()) {
3266                     for (auto binding : bindings) {
3267                         if (!pSet->GetImmutableSamplerPtrFromBinding(binding)) {
3268                             result |= log_msg(
3269                                 my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
3270                                 (uint64_t)pSet->GetSet(), __LINE__, DRAWSTATE_DESCRIPTOR_SET_NOT_UPDATED, "DS",
3271                                 "DS 0x%" PRIxLEAST64 " bound but it was never updated. It is now being used to draw so "
3272                                 "this will result in undefined behavior.",
3273                                 (uint64_t)pSet->GetSet());
3274                         }
3275                     }
3276                 }
3277             }
3278         }
3279         // For given active slots, verify any dynamic descriptors and record updated images & buffers
3280         result |= validate_and_update_drawtime_descriptor_state(my_data, cb_node, activeSetBindingsPairs, function);
3281     }
3282
3283     // Check general pipeline state that needs to be validated at drawtime
3284     if (VK_PIPELINE_BIND_POINT_GRAPHICS == bindPoint)
3285         result |= validatePipelineDrawtimeState(my_data, state, cb_node, pPipe);
3286
3287     return result;
3288 }
3289
3290 // Validate HW line width capabilities prior to setting requested line width.
3291 static bool verifyLineWidth(layer_data *my_data, DRAW_STATE_ERROR dsError, const uint64_t &target, float lineWidth) {
3292     bool skip_call = false;
3293
3294     // First check to see if the physical device supports wide lines.
3295     if ((VK_FALSE == my_data->enabled_features.wideLines) && (1.0f != lineWidth)) {
3296         skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, target, __LINE__,
3297                              dsError, "DS", "Attempt to set lineWidth to %f but physical device wideLines feature "
3298                                             "not supported/enabled so lineWidth must be 1.0f!",
3299                              lineWidth);
3300     } else {
3301         // Otherwise, make sure the width falls in the valid range.
3302         if ((my_data->phys_dev_properties.properties.limits.lineWidthRange[0] > lineWidth) ||
3303             (my_data->phys_dev_properties.properties.limits.lineWidthRange[1] < lineWidth)) {
3304             skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, target,
3305                                  __LINE__, dsError, "DS", "Attempt to set lineWidth to %f but physical device limits line width "
3306                                                           "to between [%f, %f]!",
3307                                  lineWidth, my_data->phys_dev_properties.properties.limits.lineWidthRange[0],
3308                                  my_data->phys_dev_properties.properties.limits.lineWidthRange[1]);
3309         }
3310     }
3311
3312     return skip_call;
3313 }
3314
3315 // Verify that create state for a pipeline is valid
3316 static bool verifyPipelineCreateState(layer_data *my_data, const VkDevice device, std::vector<PIPELINE_NODE *> pPipelines,
3317                                       int pipelineIndex) {
3318     bool skip_call = false;
3319
3320     PIPELINE_NODE *pPipeline = pPipelines[pipelineIndex];
3321
3322     // If create derivative bit is set, check that we've specified a base
3323     // pipeline correctly, and that the base pipeline was created to allow
3324     // derivatives.
3325     if (pPipeline->graphicsPipelineCI.flags & VK_PIPELINE_CREATE_DERIVATIVE_BIT) {
3326         PIPELINE_NODE *pBasePipeline = nullptr;
3327         if (!((pPipeline->graphicsPipelineCI.basePipelineHandle != VK_NULL_HANDLE) ^
3328               (pPipeline->graphicsPipelineCI.basePipelineIndex != -1))) {
3329             skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3330                                  DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3331                                  "Invalid Pipeline CreateInfo: exactly one of base pipeline index and handle must be specified");
3332         } else if (pPipeline->graphicsPipelineCI.basePipelineIndex != -1) {
3333             if (pPipeline->graphicsPipelineCI.basePipelineIndex >= pipelineIndex) {
3334                 skip_call |=
3335                     log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3336                             DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3337                             "Invalid Pipeline CreateInfo: base pipeline must occur earlier in array than derivative pipeline.");
3338             } else {
3339                 pBasePipeline = pPipelines[pPipeline->graphicsPipelineCI.basePipelineIndex];
3340             }
3341         } else if (pPipeline->graphicsPipelineCI.basePipelineHandle != VK_NULL_HANDLE) {
3342             pBasePipeline = getPipeline(my_data, pPipeline->graphicsPipelineCI.basePipelineHandle);
3343         }
3344
3345         if (pBasePipeline && !(pBasePipeline->graphicsPipelineCI.flags & VK_PIPELINE_CREATE_ALLOW_DERIVATIVES_BIT)) {
3346             skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3347                                  DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3348                                  "Invalid Pipeline CreateInfo: base pipeline does not allow derivatives.");
3349         }
3350     }
3351
3352     if (pPipeline->graphicsPipelineCI.pColorBlendState != NULL) {
3353         if (!my_data->enabled_features.independentBlend) {
3354             if (pPipeline->attachments.size() > 1) {
3355                 VkPipelineColorBlendAttachmentState *pAttachments = &pPipeline->attachments[0];
3356                 for (size_t i = 1; i < pPipeline->attachments.size(); i++) {
3357                     // Quoting the spec: "If [the independent blend] feature is not enabled, the VkPipelineColorBlendAttachmentState
3358                     // settings for all color attachments must be identical." VkPipelineColorBlendAttachmentState contains
3359                     // only attachment state, so memcmp is best suited for the comparison
3360                     if (memcmp(static_cast<const void *>(pAttachments), static_cast<const void *>(&pAttachments[i]),
3361                                sizeof(pAttachments[0]))) {
3362                         skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
3363                                              __LINE__, DRAWSTATE_INDEPENDENT_BLEND, "DS",
3364                                              "Invalid Pipeline CreateInfo: If independent blend feature not "
3365                                              "enabled, all elements of pAttachments must be identical");
3366                         break;
3367                     }
3368                 }
3369             }
3370         }
3371         if (!my_data->enabled_features.logicOp &&
3372             (pPipeline->graphicsPipelineCI.pColorBlendState->logicOpEnable != VK_FALSE)) {
3373             skip_call |=
3374                 log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3375                         DRAWSTATE_DISABLED_LOGIC_OP, "DS",
3376                         "Invalid Pipeline CreateInfo: If logic operations feature not enabled, logicOpEnable must be VK_FALSE");
3377         }
3378     }
3379
3380     // Ensure the subpass index is valid. If not, then validate_and_capture_pipeline_shader_state
3381     // produces nonsense errors that confuse users. Other layers should already
3382     // emit errors for renderpass being invalid.
3383     auto renderPass = getRenderPass(my_data, pPipeline->graphicsPipelineCI.renderPass);
3384     if (renderPass && pPipeline->graphicsPipelineCI.subpass >= renderPass->createInfo.subpassCount) {
3385         skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3386                              DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", "Invalid Pipeline CreateInfo State: Subpass index %u "
3387                                                                             "is out of range for this renderpass (0..%u)",
3388                              pPipeline->graphicsPipelineCI.subpass, renderPass->createInfo.subpassCount - 1);
3389     }
3390
3391     if (!validate_and_capture_pipeline_shader_state(my_data->report_data, pPipeline, &my_data->enabled_features,
3392                                                     my_data->shaderModuleMap)) {
3393         skip_call = true;
3394     }
3395     // Each shader's stage must be unique
3396     if (pPipeline->duplicate_shaders) {
3397         for (uint32_t stage = VK_SHADER_STAGE_VERTEX_BIT; stage & VK_SHADER_STAGE_ALL_GRAPHICS; stage <<= 1) {
3398             if (pPipeline->duplicate_shaders & stage) {
3399                 skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
3400                                      __LINE__, DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3401                                      "Invalid Pipeline CreateInfo State: Multiple shaders provided for stage %s",
3402                                      string_VkShaderStageFlagBits(VkShaderStageFlagBits(stage)));
3403             }
3404         }
3405     }
3406     // VS is required
3407     if (!(pPipeline->active_shaders & VK_SHADER_STAGE_VERTEX_BIT)) {
3408         skip_call |=
3409             log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3410                     DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", "Invalid Pipeline CreateInfo State: Vtx Shader required");
3411     }
3412     // Either both or neither TC/TE shaders should be defined
3413     if (((pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT) == 0) !=
3414         ((pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT) == 0)) {
3415         skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3416                              DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3417                              "Invalid Pipeline CreateInfo State: TE and TC shaders must be included or excluded as a pair");
3418     }
3419     // Compute shaders should be specified independent of Gfx shaders
3420     if ((pPipeline->active_shaders & VK_SHADER_STAGE_COMPUTE_BIT) &&
3421         (pPipeline->active_shaders &
3422          (VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT | VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT |
3423           VK_SHADER_STAGE_GEOMETRY_BIT | VK_SHADER_STAGE_FRAGMENT_BIT))) {
3424         skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3425                              DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3426                              "Invalid Pipeline CreateInfo State: Do not specify Compute Shader for Gfx Pipeline");
3427     }
3428     // VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive topology is only valid for tessellation pipelines.
3429     // Mismatching primitive topology and tessellation fails graphics pipeline creation.
3430     if (pPipeline->active_shaders & (VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT | VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT) &&
3431         (!pPipeline->graphicsPipelineCI.pInputAssemblyState ||
3432          pPipeline->graphicsPipelineCI.pInputAssemblyState->topology != VK_PRIMITIVE_TOPOLOGY_PATCH_LIST)) {
3433         skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3434                              DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", "Invalid Pipeline CreateInfo State: "
3435                                                                             "VK_PRIMITIVE_TOPOLOGY_PATCH_LIST must be set as IA "
3436                                                                             "topology for tessellation pipelines");
3437     }
3438     if (pPipeline->graphicsPipelineCI.pInputAssemblyState &&
3439         pPipeline->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_PATCH_LIST) {
3440         if (~pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT) {
3441             skip_call |=
3442                 log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3443                         DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", "Invalid Pipeline CreateInfo State: "
3444                                                                        "VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive "
3445                                                                        "topology is only valid for tessellation pipelines");
3446         }
3447         if (!pPipeline->graphicsPipelineCI.pTessellationState) {
3448             skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3449                                  DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3450                                  "Invalid Pipeline CreateInfo State: "
3451                                  "pTessellationState is NULL when VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive "
3452                                  "topology used. pTessellationState must not be NULL in this case.");
3453         } else if (!pPipeline->graphicsPipelineCI.pTessellationState->patchControlPoints ||
3454                    (pPipeline->graphicsPipelineCI.pTessellationState->patchControlPoints > 32)) {
3455             skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3456                                  DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", "Invalid Pipeline CreateInfo State: "
3457                                                                                 "VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive "
3458                                                                                 "topology used with patchControlPoints value %u."
3459                                                                                 " patchControlPoints should be >0 and <=32.",
3460                                  pPipeline->graphicsPipelineCI.pTessellationState->patchControlPoints);
3461         }
3462     }
3463     // If a rasterization state is provided, make sure that the line width conforms to the HW.
3464     if (pPipeline->graphicsPipelineCI.pRasterizationState) {
3465         if (!isDynamic(pPipeline, VK_DYNAMIC_STATE_LINE_WIDTH)) {
3466             skip_call |= verifyLineWidth(my_data, DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, reinterpret_cast<uint64_t &>(pPipeline),
3467                                          pPipeline->graphicsPipelineCI.pRasterizationState->lineWidth);
3468         }
3469     }
3470     // Viewport state must be included if rasterization is enabled.
3471     // If the viewport state is included, the viewport and scissor counts should always match.
3472     // NOTE : Even if these are flagged as dynamic, counts need to be set correctly for shader compiler
3473     if (!pPipeline->graphicsPipelineCI.pRasterizationState ||
3474         (pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable == VK_FALSE)) {
3475         if (!pPipeline->graphicsPipelineCI.pViewportState) {
3476             skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3477                                  DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS", "Gfx Pipeline pViewportState is null. Even if viewport "
3478                                                                             "and scissors are dynamic PSO must include "
3479                                                                             "viewportCount and scissorCount in pViewportState.");
3480         } else if (pPipeline->graphicsPipelineCI.pViewportState->scissorCount !=
3481                    pPipeline->graphicsPipelineCI.pViewportState->viewportCount) {
3482             skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3483                                  DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS",
3484                                  "Gfx Pipeline viewport count (%u) must match scissor count (%u).",
3485                                  pPipeline->graphicsPipelineCI.pViewportState->viewportCount,
3486                                  pPipeline->graphicsPipelineCI.pViewportState->scissorCount);
3487         } else {
3488             // If viewport or scissor are not dynamic, then verify that data is appropriate for count
3489             bool dynViewport = isDynamic(pPipeline, VK_DYNAMIC_STATE_VIEWPORT);
3490             bool dynScissor = isDynamic(pPipeline, VK_DYNAMIC_STATE_SCISSOR);
3491             if (!dynViewport) {
3492                 if (pPipeline->graphicsPipelineCI.pViewportState->viewportCount &&
3493                     !pPipeline->graphicsPipelineCI.pViewportState->pViewports) {
3494                     skip_call |=
3495                         log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3496                                 DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS",
3497                                 "Gfx Pipeline viewportCount is %u, but pViewports is NULL. For non-zero viewportCount, you "
3498                                 "must either include pViewports data, or include viewport in pDynamicState and set it with "
3499                                 "vkCmdSetViewport().",
3500                                 pPipeline->graphicsPipelineCI.pViewportState->viewportCount);
3501                 }
3502             }
3503             if (!dynScissor) {
3504                 if (pPipeline->graphicsPipelineCI.pViewportState->scissorCount &&
3505                     !pPipeline->graphicsPipelineCI.pViewportState->pScissors) {
3506                     skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
3507                                          __LINE__, DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS",
3508                                          "Gfx Pipeline scissorCount is %u, but pScissors is NULL. For non-zero scissorCount, you "
3509                                          "must either include pScissors data, or include scissor in pDynamicState and set it with "
3510                                          "vkCmdSetScissor().",
3511                                          pPipeline->graphicsPipelineCI.pViewportState->scissorCount);
3512                 }
3513             }
3514         }
3515
3516         // If rasterization is not disabled, and subpass uses a depth/stencil
3517         // attachment, pDepthStencilState must be a pointer to a valid structure
3518         auto subpass_desc = renderPass ? &renderPass->createInfo.pSubpasses[pPipeline->graphicsPipelineCI.subpass] : nullptr;
3519         if (subpass_desc && subpass_desc->pDepthStencilAttachment &&
3520             subpass_desc->pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
3521             if (!pPipeline->graphicsPipelineCI.pDepthStencilState) {
3522                 skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
3523                                      __LINE__, DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3524                                      "Invalid Pipeline CreateInfo State: "
3525                                      "pDepthStencilState is NULL when rasterization is enabled and subpass uses a "
3526                                      "depth/stencil attachment");
3527             }
3528         }
3529     }
3530     return skip_call;
3531 }
3532
3533 // Free the Pipeline nodes
3534 static void deletePipelines(layer_data *my_data) {
3535     if (my_data->pipelineMap.size() <= 0)
3536         return;
3537     for (auto &pipe_map_pair : my_data->pipelineMap) {
3538         delete pipe_map_pair.second;
3539     }
3540     my_data->pipelineMap.clear();
3541 }
3542
3543 // Block of code at start here specifically for managing/tracking DSs
3544
3545 // Return Pool node ptr for specified pool or else NULL
3546 DESCRIPTOR_POOL_NODE *getPoolNode(const layer_data *dev_data, const VkDescriptorPool pool) {
3547     auto pool_it = dev_data->descriptorPoolMap.find(pool);
3548     if (pool_it == dev_data->descriptorPoolMap.end()) {
3549         return NULL;
3550     }
3551     return pool_it->second;
3552 }
3553
3554 // Return false if update struct is of valid type, otherwise flag error and return code from callback
3555 static bool validUpdateStruct(layer_data *my_data, const VkDevice device, const GENERIC_HEADER *pUpdateStruct) {
3556     switch (pUpdateStruct->sType) {
3557     case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET:
3558     case VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET:
3559         return false;
3560     default:
3561         return log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3562                        DRAWSTATE_INVALID_UPDATE_STRUCT, "DS",
3563                        "Unexpected UPDATE struct of type %s (value %u) in vkUpdateDescriptors() struct tree",
3564                        string_VkStructureType(pUpdateStruct->sType), pUpdateStruct->sType);
3565     }
3566 }
3567
3568 // Set count for given update struct in the last parameter
3569 static uint32_t getUpdateCount(layer_data *my_data, const VkDevice device, const GENERIC_HEADER *pUpdateStruct) {
3570     switch (pUpdateStruct->sType) {
3571     case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET:
3572         return ((VkWriteDescriptorSet *)pUpdateStruct)->descriptorCount;
3573     case VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET:
3574         // TODO : Need to understand this case better and make sure code is correct
3575         return ((VkCopyDescriptorSet *)pUpdateStruct)->descriptorCount;
3576     default:
3577         return 0;
3578     }
3579 }
3580
3581 // For given layout and update, return the first overall index of the layout that is updated
3582 static uint32_t getUpdateStartIndex(layer_data *my_data, const VkDevice device, const uint32_t binding_start_index,
3583                                     const uint32_t arrayIndex, const GENERIC_HEADER *pUpdateStruct) {
3584     return binding_start_index + arrayIndex;
3585 }
3586 // For given layout and update, return the last overall index of the layout that is updated
3587 static uint32_t getUpdateEndIndex(layer_data *my_data, const VkDevice device, const uint32_t binding_start_index,
3588                                   const uint32_t arrayIndex, const GENERIC_HEADER *pUpdateStruct) {
3589     uint32_t count = getUpdateCount(my_data, device, pUpdateStruct);
3590     return binding_start_index + arrayIndex + count - 1;
3591 }
3592 // Verify that the descriptor type in the update struct matches what's expected by the layout
3593 static bool validateUpdateConsistency(layer_data *my_data, const VkDevice device, const VkDescriptorType layout_type,
3594                                       const GENERIC_HEADER *pUpdateStruct, uint32_t startIndex, uint32_t endIndex) {
3595     // First get actual type of update
3596     bool skip_call = false;
3597     VkDescriptorType actualType = VK_DESCRIPTOR_TYPE_MAX_ENUM;
3598     switch (pUpdateStruct->sType) {
3599     case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET:
3600         actualType = ((VkWriteDescriptorSet *)pUpdateStruct)->descriptorType;
3601         break;
3602     case VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET:
3603         /* no need to validate */
3604         return false;
3605         break;
3606     default:
3607         skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3608                              DRAWSTATE_INVALID_UPDATE_STRUCT, "DS",
3609                              "Unexpected UPDATE struct of type %s (value %u) in vkUpdateDescriptors() struct tree",
3610                              string_VkStructureType(pUpdateStruct->sType), pUpdateStruct->sType);
3611     }
3612     if (!skip_call) {
3613         if (layout_type != actualType) {
3614             skip_call |= log_msg(
3615                 my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3616                 DRAWSTATE_DESCRIPTOR_TYPE_MISMATCH, "DS",
3617                 "Write descriptor update has descriptor type %s that does not match overlapping binding descriptor type of %s!",
3618                 string_VkDescriptorType(actualType), string_VkDescriptorType(layout_type));
3619         }
3620     }
3621     return skip_call;
3622 }
3623 //TODO: Consolidate functions
3624 bool FindLayout(const GLOBAL_CB_NODE *pCB, ImageSubresourcePair imgpair, IMAGE_CMD_BUF_LAYOUT_NODE &node, const VkImageAspectFlags aspectMask) {
3625     layer_data *my_data = get_my_data_ptr(get_dispatch_key(pCB->commandBuffer), layer_data_map);
3626     if (!(imgpair.subresource.aspectMask & aspectMask)) {
3627         return false;
3628     }
3629     VkImageAspectFlags oldAspectMask = imgpair.subresource.aspectMask;
3630     imgpair.subresource.aspectMask = aspectMask;
3631     auto imgsubIt = pCB->imageLayoutMap.find(imgpair);
3632     if (imgsubIt == pCB->imageLayoutMap.end()) {
3633         return false;
3634     }
3635     if (node.layout != VK_IMAGE_LAYOUT_MAX_ENUM && node.layout != imgsubIt->second.layout) {
3636         log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
3637                 reinterpret_cast<uint64_t&>(imgpair.image), __LINE__, DRAWSTATE_INVALID_LAYOUT, "DS",
3638                 "Cannot query for VkImage 0x%" PRIx64 " layout when combined aspect mask %d has multiple layout types: %s and %s",
3639                 reinterpret_cast<uint64_t&>(imgpair.image), oldAspectMask, string_VkImageLayout(node.layout), string_VkImageLayout(imgsubIt->second.layout));
3640     }
3641     if (node.initialLayout != VK_IMAGE_LAYOUT_MAX_ENUM && node.initialLayout != imgsubIt->second.initialLayout) {
3642         log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
3643                 reinterpret_cast<uint64_t&>(imgpair.image), __LINE__, DRAWSTATE_INVALID_LAYOUT, "DS",
3644                 "Cannot query for VkImage 0x%" PRIx64 " layout when combined aspect mask %d has multiple initial layout types: %s and %s",
3645                 reinterpret_cast<uint64_t&>(imgpair.image), oldAspectMask, string_VkImageLayout(node.initialLayout), string_VkImageLayout(imgsubIt->second.initialLayout));
3646     }
3647     node = imgsubIt->second;
3648     return true;
3649 }
3650
3651 bool FindLayout(const layer_data *my_data, ImageSubresourcePair imgpair, VkImageLayout &layout, const VkImageAspectFlags aspectMask) {
3652     if (!(imgpair.subresource.aspectMask & aspectMask)) {
3653         return false;
3654     }
3655     VkImageAspectFlags oldAspectMask = imgpair.subresource.aspectMask;
3656     imgpair.subresource.aspectMask = aspectMask;
3657     auto imgsubIt = my_data->imageLayoutMap.find(imgpair);
3658     if (imgsubIt == my_data->imageLayoutMap.end()) {
3659         return false;
3660     }
3661     if (layout != VK_IMAGE_LAYOUT_MAX_ENUM && layout != imgsubIt->second.layout) {
3662         log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
3663                 reinterpret_cast<uint64_t&>(imgpair.image), __LINE__, DRAWSTATE_INVALID_LAYOUT, "DS",
3664                 "Cannot query for VkImage 0x%" PRIx64 " layout when combined aspect mask %d has multiple layout types: %s and %s",
3665                 reinterpret_cast<uint64_t&>(imgpair.image), oldAspectMask, string_VkImageLayout(layout), string_VkImageLayout(imgsubIt->second.layout));
3666     }
3667     layout = imgsubIt->second.layout;
3668     return true;
3669 }
3670
3671 // find layout(s) on the cmd buf level
3672 bool FindLayout(const GLOBAL_CB_NODE *pCB, VkImage image, VkImageSubresource range, IMAGE_CMD_BUF_LAYOUT_NODE &node) {
3673     ImageSubresourcePair imgpair = {image, true, range};
3674     node = IMAGE_CMD_BUF_LAYOUT_NODE(VK_IMAGE_LAYOUT_MAX_ENUM, VK_IMAGE_LAYOUT_MAX_ENUM);
3675     FindLayout(pCB, imgpair, node, VK_IMAGE_ASPECT_COLOR_BIT);
3676     FindLayout(pCB, imgpair, node, VK_IMAGE_ASPECT_DEPTH_BIT);
3677     FindLayout(pCB, imgpair, node, VK_IMAGE_ASPECT_STENCIL_BIT);
3678     FindLayout(pCB, imgpair, node, VK_IMAGE_ASPECT_METADATA_BIT);
3679     if (node.layout == VK_IMAGE_LAYOUT_MAX_ENUM) {
3680         imgpair = {image, false, VkImageSubresource()};
3681         auto imgsubIt = pCB->imageLayoutMap.find(imgpair);
3682         if (imgsubIt == pCB->imageLayoutMap.end())
3683             return false;
3684         node = imgsubIt->second;
3685     }
3686     return true;
3687 }
3688
3689 // find layout(s) on the global level
3690 bool FindLayout(const layer_data *my_data, ImageSubresourcePair imgpair, VkImageLayout &layout) {
3691     layout = VK_IMAGE_LAYOUT_MAX_ENUM;
3692     FindLayout(my_data, imgpair, layout, VK_IMAGE_ASPECT_COLOR_BIT);
3693     FindLayout(my_data, imgpair, layout, VK_IMAGE_ASPECT_DEPTH_BIT);
3694     FindLayout(my_data, imgpair, layout, VK_IMAGE_ASPECT_STENCIL_BIT);
3695     FindLayout(my_data, imgpair, layout, VK_IMAGE_ASPECT_METADATA_BIT);
3696     if (layout == VK_IMAGE_LAYOUT_MAX_ENUM) {
3697         imgpair = {imgpair.image, false, VkImageSubresource()};
3698         auto imgsubIt = my_data->imageLayoutMap.find(imgpair);
3699         if (imgsubIt == my_data->imageLayoutMap.end())
3700             return false;
3701         layout = imgsubIt->second.layout;
3702     }
3703     return true;
3704 }
3705
3706 bool FindLayout(const layer_data *my_data, VkImage image, VkImageSubresource range, VkImageLayout &layout) {
3707     ImageSubresourcePair imgpair = {image, true, range};
3708     return FindLayout(my_data, imgpair, layout);
3709 }
3710
3711 bool FindLayouts(const layer_data *my_data, VkImage image, std::vector<VkImageLayout> &layouts) {
3712     auto sub_data = my_data->imageSubresourceMap.find(image);
3713     if (sub_data == my_data->imageSubresourceMap.end())
3714         return false;
3715     auto img_node = getImageNode(my_data, image);
3716     if (!img_node)
3717         return false;
3718     bool ignoreGlobal = false;
3719     // TODO: Make this robust for >1 aspect mask. Now it will just say ignore
3720     // potential errors in this case.
3721     if (sub_data->second.size() >= (img_node->createInfo.arrayLayers * img_node->createInfo.mipLevels + 1)) {
3722         ignoreGlobal = true;
3723     }
3724     for (auto imgsubpair : sub_data->second) {
3725         if (ignoreGlobal && !imgsubpair.hasSubresource)
3726             continue;
3727         auto img_data = my_data->imageLayoutMap.find(imgsubpair);
3728         if (img_data != my_data->imageLayoutMap.end()) {
3729             layouts.push_back(img_data->second.layout);
3730         }
3731     }
3732     return true;
3733 }
3734
3735 // Set the layout on the global level
3736 void SetLayout(layer_data *my_data, ImageSubresourcePair imgpair, const VkImageLayout &layout) {
3737     VkImage &image = imgpair.image;
3738     // TODO (mlentine): Maybe set format if new? Not used atm.
3739     my_data->imageLayoutMap[imgpair].layout = layout;
3740     // TODO (mlentine): Maybe make vector a set?
3741     auto subresource = std::find(my_data->imageSubresourceMap[image].begin(), my_data->imageSubresourceMap[image].end(), imgpair);
3742     if (subresource == my_data->imageSubresourceMap[image].end()) {
3743         my_data->imageSubresourceMap[image].push_back(imgpair);
3744     }
3745 }
3746
3747 // Set the layout on the cmdbuf level
3748 void SetLayout(GLOBAL_CB_NODE *pCB, ImageSubresourcePair imgpair, const IMAGE_CMD_BUF_LAYOUT_NODE &node) {
3749     pCB->imageLayoutMap[imgpair] = node;
3750     // TODO (mlentine): Maybe make vector a set?
3751     auto subresource =
3752         std::find(pCB->imageSubresourceMap[imgpair.image].begin(), pCB->imageSubresourceMap[imgpair.image].end(), imgpair);
3753     if (subresource == pCB->imageSubresourceMap[imgpair.image].end()) {
3754         pCB->imageSubresourceMap[imgpair.image].push_back(imgpair);
3755     }
3756 }
3757
3758 void SetLayout(GLOBAL_CB_NODE *pCB, ImageSubresourcePair imgpair, const VkImageLayout &layout) {
3759     // TODO (mlentine): Maybe make vector a set?
3760     if (std::find(pCB->imageSubresourceMap[imgpair.image].begin(), pCB->imageSubresourceMap[imgpair.image].end(), imgpair) !=
3761         pCB->imageSubresourceMap[imgpair.image].end()) {
3762         pCB->imageLayoutMap[imgpair].layout = layout;
3763     } else {
3764         // TODO (mlentine): Could be expensive and might need to be removed.
3765         assert(imgpair.hasSubresource);
3766         IMAGE_CMD_BUF_LAYOUT_NODE node;
3767         if (!FindLayout(pCB, imgpair.image, imgpair.subresource, node)) {
3768             node.initialLayout = layout;
3769         }
3770         SetLayout(pCB, imgpair, {node.initialLayout, layout});
3771     }
3772 }
3773
3774 template <class OBJECT, class LAYOUT>
3775 void SetLayout(OBJECT *pObject, ImageSubresourcePair imgpair, const LAYOUT &layout, VkImageAspectFlags aspectMask) {
3776     if (imgpair.subresource.aspectMask & aspectMask) {
3777         imgpair.subresource.aspectMask = aspectMask;
3778         SetLayout(pObject, imgpair, layout);
3779     }
3780 }
3781
3782 template <class OBJECT, class LAYOUT>
3783 void SetLayout(OBJECT *pObject, VkImage image, VkImageSubresource range, const LAYOUT &layout) {
3784     ImageSubresourcePair imgpair = {image, true, range};
3785     SetLayout(pObject, imgpair, layout, VK_IMAGE_ASPECT_COLOR_BIT);
3786     SetLayout(pObject, imgpair, layout, VK_IMAGE_ASPECT_DEPTH_BIT);
3787     SetLayout(pObject, imgpair, layout, VK_IMAGE_ASPECT_STENCIL_BIT);
3788     SetLayout(pObject, imgpair, layout, VK_IMAGE_ASPECT_METADATA_BIT);
3789 }
3790
3791 template <class OBJECT, class LAYOUT> void SetLayout(OBJECT *pObject, VkImage image, const LAYOUT &layout) {
3792     ImageSubresourcePair imgpair = {image, false, VkImageSubresource()};
3793     SetLayout(pObject, image, imgpair, layout);
3794 }
3795
3796 void SetLayout(const layer_data *dev_data, GLOBAL_CB_NODE *pCB, VkImageView imageView, const VkImageLayout &layout) {
3797     auto view_state = getImageViewState(dev_data, imageView);
3798     assert(view_state);
3799     auto image = view_state->create_info.image;
3800     const VkImageSubresourceRange &subRange = view_state->create_info.subresourceRange;
3801     // TODO: Do not iterate over every possibility - consolidate where possible
3802     for (uint32_t j = 0; j < subRange.levelCount; j++) {
3803         uint32_t level = subRange.baseMipLevel + j;
3804         for (uint32_t k = 0; k < subRange.layerCount; k++) {
3805             uint32_t layer = subRange.baseArrayLayer + k;
3806             VkImageSubresource sub = {subRange.aspectMask, level, layer};
3807             // TODO: If ImageView was created with depth or stencil, transition both layouts as
3808             // the aspectMask is ignored and both are used. Verify that the extra implicit layout
3809             // is OK for descriptor set layout validation
3810             if (subRange.aspectMask & (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)) {
3811                 if (vk_format_is_depth_and_stencil(view_state->create_info.format)) {
3812                     sub.aspectMask |= (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT);
3813                 }
3814             }
3815             SetLayout(pCB, image, sub, layout);
3816         }
3817     }
3818 }
3819
3820 // Validate that given set is valid and that it's not being used by an in-flight CmdBuffer
3821 // func_str is the name of the calling function
3822 // Return false if no errors occur
3823 // Return true if validation error occurs and callback returns true (to skip upcoming API call down the chain)
3824 static bool validateIdleDescriptorSet(const layer_data *my_data, VkDescriptorSet set, std::string func_str) {
3825     bool skip_call = false;
3826     auto set_node = my_data->setMap.find(set);
3827     if (set_node == my_data->setMap.end()) {
3828         skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
3829                              (uint64_t)(set), __LINE__, DRAWSTATE_DOUBLE_DESTROY, "DS",
3830                              "Cannot call %s() on descriptor set 0x%" PRIxLEAST64 " that has not been allocated.", func_str.c_str(),
3831                              (uint64_t)(set));
3832     } else {
3833         if (set_node->second->in_use.load()) {
3834             skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
3835                                  VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)(set), __LINE__, DRAWSTATE_OBJECT_INUSE,
3836                                  "DS", "Cannot call %s() on descriptor set 0x%" PRIxLEAST64 " that is in use by a command buffer.",
3837                                  func_str.c_str(), (uint64_t)(set));
3838         }
3839     }
3840     return skip_call;
3841 }
3842
3843 // Remove set from setMap and delete the set
3844 static void freeDescriptorSet(layer_data *dev_data, cvdescriptorset::DescriptorSet *descriptor_set) {
3845     dev_data->setMap.erase(descriptor_set->GetSet());
3846     delete descriptor_set;
3847 }
3848 // Free all DS Pools including their Sets & related sub-structs
3849 // NOTE : Calls to this function should be wrapped in mutex
3850 static void deletePools(layer_data *my_data) {
3851     if (my_data->descriptorPoolMap.size() <= 0)
3852         return;
3853     for (auto ii = my_data->descriptorPoolMap.begin(); ii != my_data->descriptorPoolMap.end(); ++ii) {
3854         // Remove this pools' sets from setMap and delete them
3855         for (auto ds : (*ii).second->sets) {
3856             freeDescriptorSet(my_data, ds);
3857         }
3858         (*ii).second->sets.clear();
3859     }
3860     my_data->descriptorPoolMap.clear();
3861 }
3862
3863 static void clearDescriptorPool(layer_data *my_data, const VkDevice device, const VkDescriptorPool pool,
3864                                 VkDescriptorPoolResetFlags flags) {
3865     DESCRIPTOR_POOL_NODE *pPool = getPoolNode(my_data, pool);
3866     // TODO: validate flags
3867     // For every set off of this pool, clear it, remove from setMap, and free cvdescriptorset::DescriptorSet
3868     for (auto ds : pPool->sets) {
3869         freeDescriptorSet(my_data, ds);
3870     }
3871     pPool->sets.clear();
3872     // Reset available count for each type and available sets for this pool
3873     for (uint32_t i = 0; i < pPool->availableDescriptorTypeCount.size(); ++i) {
3874         pPool->availableDescriptorTypeCount[i] = pPool->maxDescriptorTypeCount[i];
3875     }
3876     pPool->availableSets = pPool->maxSets;
3877 }
3878
3879 // For given CB object, fetch associated CB Node from map
3880 static GLOBAL_CB_NODE *getCBNode(layer_data const *my_data, const VkCommandBuffer cb) {
3881     auto it = my_data->commandBufferMap.find(cb);
3882     if (it == my_data->commandBufferMap.end()) {
3883         log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
3884                 reinterpret_cast<const uint64_t &>(cb), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
3885                 "Attempt to use CommandBuffer 0x%" PRIxLEAST64 " that doesn't exist!", (uint64_t)(cb));
3886         return NULL;
3887     }
3888     return it->second;
3889 }
3890 // Free all CB Nodes
3891 // NOTE : Calls to this function should be wrapped in mutex
3892 static void deleteCommandBuffers(layer_data *my_data) {
3893     if (my_data->commandBufferMap.empty()) {
3894         return;
3895     }
3896     for (auto ii = my_data->commandBufferMap.begin(); ii != my_data->commandBufferMap.end(); ++ii) {
3897         delete (*ii).second;
3898     }
3899     my_data->commandBufferMap.clear();
3900 }
3901
3902 static bool report_error_no_cb_begin(const layer_data *dev_data, const VkCommandBuffer cb, const char *caller_name) {
3903     return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
3904                    (uint64_t)cb, __LINE__, DRAWSTATE_NO_BEGIN_COMMAND_BUFFER, "DS",
3905                    "You must call vkBeginCommandBuffer() before this call to %s", caller_name);
3906 }
3907
3908 bool validateCmdsInCmdBuffer(const layer_data *dev_data, const GLOBAL_CB_NODE *pCB, const CMD_TYPE cmd_type) {
3909     if (!pCB->activeRenderPass)
3910         return false;
3911     bool skip_call = false;
3912     if (pCB->activeSubpassContents == VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS &&
3913         (cmd_type != CMD_EXECUTECOMMANDS && cmd_type != CMD_NEXTSUBPASS && cmd_type != CMD_ENDRENDERPASS)) {
3914         skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3915                              DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
3916                              "Commands cannot be called in a subpass using secondary command buffers.");
3917     } else if (pCB->activeSubpassContents == VK_SUBPASS_CONTENTS_INLINE && cmd_type == CMD_EXECUTECOMMANDS) {
3918         skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3919                              DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
3920                              "vkCmdExecuteCommands() cannot be called in a subpass using inline commands.");
3921     }
3922     return skip_call;
3923 }
3924
3925 static bool checkGraphicsBit(const layer_data *my_data, VkQueueFlags flags, const char *name) {
3926     if (!(flags & VK_QUEUE_GRAPHICS_BIT))
3927         return log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3928                        DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
3929                        "Cannot call %s on a command buffer allocated from a pool without graphics capabilities.", name);
3930     return false;
3931 }
3932
3933 static bool checkComputeBit(const layer_data *my_data, VkQueueFlags flags, const char *name) {
3934     if (!(flags & VK_QUEUE_COMPUTE_BIT))
3935         return log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3936                        DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
3937                        "Cannot call %s on a command buffer allocated from a pool without compute capabilities.", name);
3938     return false;
3939 }
3940
3941 static bool checkGraphicsOrComputeBit(const layer_data *my_data, VkQueueFlags flags, const char *name) {
3942     if (!((flags & VK_QUEUE_GRAPHICS_BIT) || (flags & VK_QUEUE_COMPUTE_BIT)))
3943         return log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3944                        DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
3945                        "Cannot call %s on a command buffer allocated from a pool without graphics capabilities.", name);
3946     return false;
3947 }
3948
3949 // Add specified CMD to the CmdBuffer in given pCB, flagging errors if CB is not
3950 //  in the recording state or if there's an issue with the Cmd ordering
3951 static bool addCmd(layer_data *my_data, GLOBAL_CB_NODE *pCB, const CMD_TYPE cmd, const char *caller_name) {
3952     bool skip_call = false;
3953     auto pPool = getCommandPoolNode(my_data, pCB->createInfo.commandPool);
3954     if (pPool) {
3955         VkQueueFlags flags = my_data->phys_dev_properties.queue_family_properties[pPool->queueFamilyIndex].queueFlags;
3956         switch (cmd) {
3957         case CMD_BINDPIPELINE:
3958         case CMD_BINDPIPELINEDELTA:
3959         case CMD_BINDDESCRIPTORSETS:
3960         case CMD_FILLBUFFER:
3961         case CMD_CLEARCOLORIMAGE:
3962         case CMD_SETEVENT:
3963         case CMD_RESETEVENT:
3964         case CMD_WAITEVENTS:
3965         case CMD_BEGINQUERY:
3966         case CMD_ENDQUERY:
3967         case CMD_RESETQUERYPOOL:
3968         case CMD_COPYQUERYPOOLRESULTS:
3969         case CMD_WRITETIMESTAMP:
3970             skip_call |= checkGraphicsOrComputeBit(my_data, flags, cmdTypeToString(cmd).c_str());
3971             break;
3972         case CMD_SETVIEWPORTSTATE:
3973         case CMD_SETSCISSORSTATE:
3974         case CMD_SETLINEWIDTHSTATE:
3975         case CMD_SETDEPTHBIASSTATE:
3976         case CMD_SETBLENDSTATE:
3977         case CMD_SETDEPTHBOUNDSSTATE:
3978         case CMD_SETSTENCILREADMASKSTATE:
3979         case CMD_SETSTENCILWRITEMASKSTATE:
3980         case CMD_SETSTENCILREFERENCESTATE:
3981         case CMD_BINDINDEXBUFFER:
3982         case CMD_BINDVERTEXBUFFER:
3983         case CMD_DRAW:
3984         case CMD_DRAWINDEXED:
3985         case CMD_DRAWINDIRECT:
3986         case CMD_DRAWINDEXEDINDIRECT:
3987         case CMD_BLITIMAGE:
3988         case CMD_CLEARATTACHMENTS:
3989         case CMD_CLEARDEPTHSTENCILIMAGE:
3990         case CMD_RESOLVEIMAGE:
3991         case CMD_BEGINRENDERPASS:
3992         case CMD_NEXTSUBPASS:
3993         case CMD_ENDRENDERPASS:
3994             skip_call |= checkGraphicsBit(my_data, flags, cmdTypeToString(cmd).c_str());
3995             break;
3996         case CMD_DISPATCH:
3997         case CMD_DISPATCHINDIRECT:
3998             skip_call |= checkComputeBit(my_data, flags, cmdTypeToString(cmd).c_str());
3999             break;
4000         case CMD_COPYBUFFER:
4001         case CMD_COPYIMAGE:
4002         case CMD_COPYBUFFERTOIMAGE:
4003         case CMD_COPYIMAGETOBUFFER:
4004         case CMD_CLONEIMAGEDATA:
4005         case CMD_UPDATEBUFFER:
4006         case CMD_PIPELINEBARRIER:
4007         case CMD_EXECUTECOMMANDS:
4008         case CMD_END:
4009             break;
4010         default:
4011             break;
4012         }
4013     }
4014     if (pCB->state != CB_RECORDING) {
4015         skip_call |= report_error_no_cb_begin(my_data, pCB->commandBuffer, caller_name);
4016     } else {
4017         skip_call |= validateCmdsInCmdBuffer(my_data, pCB, cmd);
4018         CMD_NODE cmdNode = {};
4019         // init cmd node and append to end of cmd LL
4020         cmdNode.cmdNumber = ++pCB->numCmds;
4021         cmdNode.type = cmd;
4022         pCB->cmds.push_back(cmdNode);
4023     }
4024     return skip_call;
4025 }
4026 // For given object struct return a ptr of BASE_NODE type for its wrapping struct
4027 BASE_NODE *GetStateStructPtrFromObject(layer_data *dev_data, VK_OBJECT object_struct) {
4028     BASE_NODE *base_ptr = nullptr;
4029     switch (object_struct.type) {
4030     case VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT: {
4031         base_ptr = getSetNode(dev_data, reinterpret_cast<VkDescriptorSet &>(object_struct.handle));
4032         break;
4033     }
4034     case VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT: {
4035         base_ptr = getSamplerNode(dev_data, reinterpret_cast<VkSampler &>(object_struct.handle));
4036         break;
4037     }
4038     case VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT: {
4039         base_ptr = getQueryPoolNode(dev_data, reinterpret_cast<VkQueryPool &>(object_struct.handle));
4040         break;
4041     }
4042     case VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT: {
4043         base_ptr = getPipeline(dev_data, reinterpret_cast<VkPipeline &>(object_struct.handle));
4044         break;
4045     }
4046     case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT: {
4047         base_ptr = getBufferNode(dev_data, reinterpret_cast<VkBuffer &>(object_struct.handle));
4048         break;
4049     }
4050     case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_VIEW_EXT: {
4051         base_ptr = getBufferViewState(dev_data, reinterpret_cast<VkBufferView &>(object_struct.handle));
4052         break;
4053     }
4054     case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT: {
4055         base_ptr = getImageNode(dev_data, reinterpret_cast<VkImage &>(object_struct.handle));
4056         break;
4057     }
4058     case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT: {
4059         base_ptr = getImageViewState(dev_data, reinterpret_cast<VkImageView &>(object_struct.handle));
4060         break;
4061     }
4062     case VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT: {
4063         base_ptr = getEventNode(dev_data, reinterpret_cast<VkEvent &>(object_struct.handle));
4064         break;
4065     }
4066     case VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT: {
4067         base_ptr = getPoolNode(dev_data, reinterpret_cast<VkDescriptorPool &>(object_struct.handle));
4068         break;
4069     }
4070     case VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_POOL_EXT: {
4071         base_ptr = getCommandPoolNode(dev_data, reinterpret_cast<VkCommandPool &>(object_struct.handle));
4072         break;
4073     }
4074     case VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT: {
4075         base_ptr = getFramebuffer(dev_data, reinterpret_cast<VkFramebuffer &>(object_struct.handle));
4076         break;
4077     }
4078     case VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT: {
4079         base_ptr = getRenderPass(dev_data, reinterpret_cast<VkRenderPass &>(object_struct.handle));
4080         break;
4081     }
4082     case VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT: {
4083         base_ptr = getMemObjInfo(dev_data, reinterpret_cast<VkDeviceMemory &>(object_struct.handle));
4084         break;
4085     }
4086     default:
4087         // TODO : Any other objects to be handled here?
4088         assert(0);
4089         break;
4090     }
4091     return base_ptr;
4092 }
4093
4094 // Tie the VK_OBJECT to the cmd buffer which includes:
4095 //  Add object_binding to cmd buffer
4096 //  Add cb_binding to object
4097 static void addCommandBufferBinding(std::unordered_set<GLOBAL_CB_NODE *> *cb_bindings, VK_OBJECT obj, GLOBAL_CB_NODE *cb_node) {
4098     cb_bindings->insert(cb_node);
4099     cb_node->object_bindings.insert(obj);
4100 }
4101 // For a given object, if cb_node is in that objects cb_bindings, remove cb_node
4102 static void removeCommandBufferBinding(layer_data *dev_data, VK_OBJECT const *object, GLOBAL_CB_NODE *cb_node) {
4103     BASE_NODE *base_obj = GetStateStructPtrFromObject(dev_data, *object);
4104     if (base_obj)
4105         base_obj->cb_bindings.erase(cb_node);
4106 }
4107 // Reset the command buffer state
4108 //  Maintain the createInfo and set state to CB_NEW, but clear all other state
4109 static void resetCB(layer_data *dev_data, const VkCommandBuffer cb) {
4110     GLOBAL_CB_NODE *pCB = dev_data->commandBufferMap[cb];
4111     if (pCB) {
4112         pCB->in_use.store(0);
4113         pCB->cmds.clear();
4114         // Reset CB state (note that createInfo is not cleared)
4115         pCB->commandBuffer = cb;
4116         memset(&pCB->beginInfo, 0, sizeof(VkCommandBufferBeginInfo));
4117         memset(&pCB->inheritanceInfo, 0, sizeof(VkCommandBufferInheritanceInfo));
4118         pCB->numCmds = 0;
4119         memset(pCB->drawCount, 0, NUM_DRAW_TYPES * sizeof(uint64_t));
4120         pCB->state = CB_NEW;
4121         pCB->submitCount = 0;
4122         pCB->status = 0;
4123         pCB->viewportMask = 0;
4124         pCB->scissorMask = 0;
4125
4126         for (uint32_t i = 0; i < VK_PIPELINE_BIND_POINT_RANGE_SIZE; ++i) {
4127             pCB->lastBound[i].reset();
4128         }
4129
4130         memset(&pCB->activeRenderPassBeginInfo, 0, sizeof(pCB->activeRenderPassBeginInfo));
4131         pCB->activeRenderPass = nullptr;
4132         pCB->activeSubpassContents = VK_SUBPASS_CONTENTS_INLINE;
4133         pCB->activeSubpass = 0;
4134         pCB->broken_bindings.clear();
4135         pCB->waitedEvents.clear();
4136         pCB->events.clear();
4137         pCB->writeEventsBeforeWait.clear();
4138         pCB->waitedEventsBeforeQueryReset.clear();
4139         pCB->queryToStateMap.clear();
4140         pCB->activeQueries.clear();
4141         pCB->startedQueries.clear();
4142         pCB->imageSubresourceMap.clear();
4143         pCB->imageLayoutMap.clear();
4144         pCB->eventToStageMap.clear();
4145         pCB->drawData.clear();
4146         pCB->currentDrawData.buffers.clear();
4147         pCB->primaryCommandBuffer = VK_NULL_HANDLE;
4148         // Make sure any secondaryCommandBuffers are removed from globalInFlight
4149         for (auto secondary_cb : pCB->secondaryCommandBuffers) {
4150             dev_data->globalInFlightCmdBuffers.erase(secondary_cb);
4151         }
4152         pCB->secondaryCommandBuffers.clear();
4153         pCB->updateImages.clear();
4154         pCB->updateBuffers.clear();
4155         clear_cmd_buf_and_mem_references(dev_data, pCB);
4156         pCB->eventUpdates.clear();
4157         pCB->queryUpdates.clear();
4158
4159         // Remove object bindings
4160         for (auto obj : pCB->object_bindings) {
4161             removeCommandBufferBinding(dev_data, &obj, pCB);
4162         }
4163         pCB->object_bindings.clear();
4164         // Remove this cmdBuffer's reference from each FrameBuffer's CB ref list
4165         for (auto framebuffer : pCB->framebuffers) {
4166             auto fb_node = getFramebuffer(dev_data, framebuffer);
4167             if (fb_node)
4168                 fb_node->cb_bindings.erase(pCB);
4169         }
4170         pCB->framebuffers.clear();
4171         pCB->activeFramebuffer = VK_NULL_HANDLE;
4172     }
4173 }
4174
4175 // Set PSO-related status bits for CB, including dynamic state set via PSO
4176 static void set_cb_pso_status(GLOBAL_CB_NODE *pCB, const PIPELINE_NODE *pPipe) {
4177     // Account for any dynamic state not set via this PSO
4178     if (!pPipe->graphicsPipelineCI.pDynamicState ||
4179         !pPipe->graphicsPipelineCI.pDynamicState->dynamicStateCount) { // All state is static
4180         pCB->status |= CBSTATUS_ALL;
4181     } else {
4182         // First consider all state on
4183         // Then unset any state that's noted as dynamic in PSO
4184         // Finally OR that into CB statemask
4185         CBStatusFlags psoDynStateMask = CBSTATUS_ALL;
4186         for (uint32_t i = 0; i < pPipe->graphicsPipelineCI.pDynamicState->dynamicStateCount; i++) {
4187             switch (pPipe->graphicsPipelineCI.pDynamicState->pDynamicStates[i]) {
4188             case VK_DYNAMIC_STATE_LINE_WIDTH:
4189                 psoDynStateMask &= ~CBSTATUS_LINE_WIDTH_SET;
4190                 break;
4191             case VK_DYNAMIC_STATE_DEPTH_BIAS:
4192                 psoDynStateMask &= ~CBSTATUS_DEPTH_BIAS_SET;
4193                 break;
4194             case VK_DYNAMIC_STATE_BLEND_CONSTANTS:
4195                 psoDynStateMask &= ~CBSTATUS_BLEND_CONSTANTS_SET;
4196                 break;
4197             case VK_DYNAMIC_STATE_DEPTH_BOUNDS:
4198                 psoDynStateMask &= ~CBSTATUS_DEPTH_BOUNDS_SET;
4199                 break;
4200             case VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK:
4201                 psoDynStateMask &= ~CBSTATUS_STENCIL_READ_MASK_SET;
4202                 break;
4203             case VK_DYNAMIC_STATE_STENCIL_WRITE_MASK:
4204                 psoDynStateMask &= ~CBSTATUS_STENCIL_WRITE_MASK_SET;
4205                 break;
4206             case VK_DYNAMIC_STATE_STENCIL_REFERENCE:
4207                 psoDynStateMask &= ~CBSTATUS_STENCIL_REFERENCE_SET;
4208                 break;
4209             default:
4210                 // TODO : Flag error here
4211                 break;
4212             }
4213         }
4214         pCB->status |= psoDynStateMask;
4215     }
4216 }
4217
4218 // Print the last bound Gfx Pipeline
4219 static bool printPipeline(layer_data *my_data, const VkCommandBuffer cb) {
4220     bool skip_call = false;
4221     GLOBAL_CB_NODE *pCB = getCBNode(my_data, cb);
4222     if (pCB) {
4223         PIPELINE_NODE *pPipeTrav = pCB->lastBound[VK_PIPELINE_BIND_POINT_GRAPHICS].pipeline_node;
4224         if (!pPipeTrav) {
4225             // nothing to print
4226         } else {
4227             skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
4228                                  __LINE__, DRAWSTATE_NONE, "DS", "%s",
4229                                  vk_print_vkgraphicspipelinecreateinfo(
4230                                      reinterpret_cast<const VkGraphicsPipelineCreateInfo *>(&pPipeTrav->graphicsPipelineCI), "{DS}")
4231                                      .c_str());
4232         }
4233     }
4234     return skip_call;
4235 }
4236
4237 static void printCB(layer_data *my_data, const VkCommandBuffer cb) {
4238     GLOBAL_CB_NODE *pCB = getCBNode(my_data, cb);
4239     if (pCB && pCB->cmds.size() > 0) {
4240         log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
4241                 DRAWSTATE_NONE, "DS", "Cmds in CB 0x%p", (void *)cb);
4242         vector<CMD_NODE> cmds = pCB->cmds;
4243         for (auto ii = cmds.begin(); ii != cmds.end(); ++ii) {
4244             // TODO : Need to pass cb as srcObj here
4245             log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
4246                     __LINE__, DRAWSTATE_NONE, "DS", "  CMD 0x%" PRIx64 ": %s", (*ii).cmdNumber, cmdTypeToString((*ii).type).c_str());
4247         }
4248     } else {
4249         // Nothing to print
4250     }
4251 }
4252
4253 static bool synchAndPrintDSConfig(layer_data *my_data, const VkCommandBuffer cb) {
4254     bool skip_call = false;
4255     if (!(my_data->report_data->active_flags & VK_DEBUG_REPORT_INFORMATION_BIT_EXT)) {
4256         return skip_call;
4257     }
4258     skip_call |= printPipeline(my_data, cb);
4259     return skip_call;
4260 }
4261
4262 // Flags validation error if the associated call is made inside a render pass. The apiName
4263 // routine should ONLY be called outside a render pass.
4264 static bool insideRenderPass(const layer_data *my_data, GLOBAL_CB_NODE *pCB, const char *apiName) {
4265     bool inside = false;
4266     if (pCB->activeRenderPass) {
4267         inside = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4268                          (uint64_t)pCB->commandBuffer, __LINE__, DRAWSTATE_INVALID_RENDERPASS_CMD, "DS",
4269                          "%s: It is invalid to issue this call inside an active render pass (0x%" PRIxLEAST64 ")", apiName,
4270                          (uint64_t)pCB->activeRenderPass->renderPass);
4271     }
4272     return inside;
4273 }
4274
4275 // Flags validation error if the associated call is made outside a render pass. The apiName
4276 // routine should ONLY be called inside a render pass.
4277 static bool outsideRenderPass(const layer_data *my_data, GLOBAL_CB_NODE *pCB, const char *apiName) {
4278     bool outside = false;
4279     if (((pCB->createInfo.level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) && (!pCB->activeRenderPass)) ||
4280         ((pCB->createInfo.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY) && (!pCB->activeRenderPass) &&
4281          !(pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT))) {
4282         outside = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4283                           (uint64_t)pCB->commandBuffer, __LINE__, DRAWSTATE_NO_ACTIVE_RENDERPASS, "DS",
4284                           "%s: This call must be issued inside an active render pass.", apiName);
4285     }
4286     return outside;
4287 }
4288
4289 static void init_core_validation(instance_layer_data *instance_data, const VkAllocationCallbacks *pAllocator) {
4290
4291     layer_debug_actions(instance_data->report_data, instance_data->logging_callback, pAllocator, "lunarg_core_validation");
4292
4293 }
4294
4295 VKAPI_ATTR VkResult VKAPI_CALL
4296 CreateInstance(const VkInstanceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkInstance *pInstance) {
4297     VkLayerInstanceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO);
4298
4299     assert(chain_info->u.pLayerInfo);
4300     PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr;
4301     PFN_vkCreateInstance fpCreateInstance = (PFN_vkCreateInstance)fpGetInstanceProcAddr(NULL, "vkCreateInstance");
4302     if (fpCreateInstance == NULL)
4303         return VK_ERROR_INITIALIZATION_FAILED;
4304
4305     // Advance the link info for the next element on the chain
4306     chain_info->u.pLayerInfo = chain_info->u.pLayerInfo->pNext;
4307
4308     VkResult result = fpCreateInstance(pCreateInfo, pAllocator, pInstance);
4309     if (result != VK_SUCCESS)
4310         return result;
4311
4312     instance_layer_data *instance_data = get_my_data_ptr(get_dispatch_key(*pInstance), instance_layer_data_map);
4313     instance_data->instance = *pInstance;
4314     layer_init_instance_dispatch_table(*pInstance, &instance_data->dispatch_table, fpGetInstanceProcAddr);
4315
4316     instance_data->report_data = debug_report_create_instance(
4317         &instance_data->dispatch_table, *pInstance, pCreateInfo->enabledExtensionCount, pCreateInfo->ppEnabledExtensionNames);
4318     init_core_validation(instance_data, pAllocator);
4319
4320     instance_data->instance_state = unique_ptr<INSTANCE_STATE>(new INSTANCE_STATE());
4321     ValidateLayerOrdering(*pCreateInfo);
4322
4323     return result;
4324 }
4325
4326 /* hook DestroyInstance to remove tableInstanceMap entry */
4327 VKAPI_ATTR void VKAPI_CALL DestroyInstance(VkInstance instance, const VkAllocationCallbacks *pAllocator) {
4328     // TODOSC : Shouldn't need any customization here
4329     dispatch_key key = get_dispatch_key(instance);
4330     // TBD: Need any locking this early, in case this function is called at the
4331     // same time by more than one thread?
4332     instance_layer_data *instance_data = get_my_data_ptr(key, instance_layer_data_map);
4333     instance_data->dispatch_table.DestroyInstance(instance, pAllocator);
4334
4335     std::lock_guard<std::mutex> lock(global_lock);
4336     // Clean up logging callback, if any
4337     while (instance_data->logging_callback.size() > 0) {
4338         VkDebugReportCallbackEXT callback = instance_data->logging_callback.back();
4339         layer_destroy_msg_callback(instance_data->report_data, callback, pAllocator);
4340         instance_data->logging_callback.pop_back();
4341     }
4342
4343     layer_debug_report_destroy_instance(instance_data->report_data);
4344     layer_data_map.erase(key);
4345 }
4346
4347 static void checkDeviceRegisterExtensions(const VkDeviceCreateInfo *pCreateInfo, VkDevice device) {
4348     uint32_t i;
4349     // TBD: Need any locking, in case this function is called at the same time
4350     // by more than one thread?
4351     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
4352     dev_data->device_extensions.wsi_enabled = false;
4353     dev_data->device_extensions.wsi_display_swapchain_enabled = false;
4354
4355     for (i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
4356         if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_SWAPCHAIN_EXTENSION_NAME) == 0)
4357             dev_data->device_extensions.wsi_enabled = true;
4358         if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_DISPLAY_SWAPCHAIN_EXTENSION_NAME) == 0)
4359             dev_data->device_extensions.wsi_display_swapchain_enabled = true;
4360     }
4361 }
4362
4363 // Verify that queue family has been properly requested
4364 bool ValidateRequestedQueueFamilyProperties(instance_layer_data *instance_data, VkPhysicalDevice gpu, const VkDeviceCreateInfo *create_info) {
4365     bool skip_call = false;
4366     auto physical_device_state = getPhysicalDeviceState(instance_data, gpu);
4367     // First check is app has actually requested queueFamilyProperties
4368     if (!physical_device_state) {
4369         skip_call |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
4370                              0, __LINE__, DEVLIMITS_MUST_QUERY_COUNT, "DL",
4371                              "Invalid call to vkCreateDevice() w/o first calling vkEnumeratePhysicalDevices().");
4372     } else if (QUERY_DETAILS != physical_device_state->vkGetPhysicalDeviceQueueFamilyPropertiesState) {
4373         // TODO: This is not called out as an invalid use in the spec so make more informative recommendation.
4374         skip_call |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
4375                              VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, DEVLIMITS_INVALID_QUEUE_CREATE_REQUEST,
4376                              "DL", "Call to vkCreateDevice() w/o first calling vkGetPhysicalDeviceQueueFamilyProperties().");
4377     } else {
4378         // Check that the requested queue properties are valid
4379         for (uint32_t i = 0; i < create_info->queueCreateInfoCount; i++) {
4380             uint32_t requestedIndex = create_info->pQueueCreateInfos[i].queueFamilyIndex;
4381             if (requestedIndex >= physical_device_state->queue_family_properties.size()) {
4382                 skip_call |= log_msg(
4383                     instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0,
4384                     __LINE__, DEVLIMITS_INVALID_QUEUE_CREATE_REQUEST, "DL",
4385                     "Invalid queue create request in vkCreateDevice(). Invalid queueFamilyIndex %u requested.", requestedIndex);
4386             } else if (create_info->pQueueCreateInfos[i].queueCount >
4387                        physical_device_state->queue_family_properties[requestedIndex].queueCount) {
4388                 skip_call |=
4389                     log_msg(instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
4390                             0, __LINE__, DEVLIMITS_INVALID_QUEUE_CREATE_REQUEST, "DL",
4391                             "Invalid queue create request in vkCreateDevice(). QueueFamilyIndex %u only has %u queues, but "
4392                             "requested queueCount is %u.",
4393                             requestedIndex, physical_device_state->queue_family_properties[requestedIndex].queueCount,
4394                             create_info->pQueueCreateInfos[i].queueCount);
4395             }
4396         }
4397     }
4398     return skip_call;
4399 }
4400
4401 // Verify that features have been queried and that they are available
4402 static bool ValidateRequestedFeatures(instance_layer_data *dev_data, VkPhysicalDevice phys, const VkPhysicalDeviceFeatures *requested_features) {
4403     bool skip_call = false;
4404
4405     auto phys_device_state = getPhysicalDeviceState(dev_data, phys);
4406     const VkBool32 *actual = reinterpret_cast<VkBool32 *>(&phys_device_state->features);
4407     const VkBool32 *requested = reinterpret_cast<const VkBool32 *>(requested_features);
4408     // TODO : This is a nice, compact way to loop through struct, but a bad way to report issues
4409     //  Need to provide the struct member name with the issue. To do that seems like we'll
4410     //  have to loop through each struct member which should be done w/ codegen to keep in synch.
4411     uint32_t errors = 0;
4412     uint32_t total_bools = sizeof(VkPhysicalDeviceFeatures) / sizeof(VkBool32);
4413     for (uint32_t i = 0; i < total_bools; i++) {
4414         if (requested[i] > actual[i]) {
4415             // TODO: Add index to struct member name helper to be able to include a feature name
4416             skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
4417                 VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, DEVLIMITS_INVALID_FEATURE_REQUESTED,
4418                 "DL", "While calling vkCreateDevice(), requesting feature #%u in VkPhysicalDeviceFeatures struct, "
4419                 "which is not available on this device.",
4420                 i);
4421             errors++;
4422         }
4423     }
4424     if (errors && (UNCALLED == phys_device_state->vkGetPhysicalDeviceFeaturesState)) {
4425         // If user didn't request features, notify them that they should
4426         // TODO: Verify this against the spec. I believe this is an invalid use of the API and should return an error
4427         skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
4428                              VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, DEVLIMITS_INVALID_FEATURE_REQUESTED,
4429                              "DL", "You requested features that are unavailable on this device. You should first query feature "
4430                                    "availability by calling vkGetPhysicalDeviceFeatures().");
4431     }
4432     return skip_call;
4433 }
4434
4435 VKAPI_ATTR VkResult VKAPI_CALL CreateDevice(VkPhysicalDevice gpu, const VkDeviceCreateInfo *pCreateInfo,
4436                                             const VkAllocationCallbacks *pAllocator, VkDevice *pDevice) {
4437     instance_layer_data *my_instance_data = get_my_data_ptr(get_dispatch_key(gpu), instance_layer_data_map);
4438     bool skip_call = false;
4439
4440     // Check that any requested features are available
4441     if (pCreateInfo->pEnabledFeatures) {
4442         skip_call |= ValidateRequestedFeatures(my_instance_data, gpu, pCreateInfo->pEnabledFeatures);
4443     }
4444     skip_call |= ValidateRequestedQueueFamilyProperties(my_instance_data, gpu, pCreateInfo);
4445
4446     if (skip_call) {
4447         return VK_ERROR_VALIDATION_FAILED_EXT;
4448     }
4449
4450     VkLayerDeviceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO);
4451
4452     assert(chain_info->u.pLayerInfo);
4453     PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr;
4454     PFN_vkGetDeviceProcAddr fpGetDeviceProcAddr = chain_info->u.pLayerInfo->pfnNextGetDeviceProcAddr;
4455     PFN_vkCreateDevice fpCreateDevice = (PFN_vkCreateDevice)fpGetInstanceProcAddr(my_instance_data->instance, "vkCreateDevice");
4456     if (fpCreateDevice == NULL) {
4457         return VK_ERROR_INITIALIZATION_FAILED;
4458     }
4459
4460     // Advance the link info for the next element on the chain
4461     chain_info->u.pLayerInfo = chain_info->u.pLayerInfo->pNext;
4462
4463     VkResult result = fpCreateDevice(gpu, pCreateInfo, pAllocator, pDevice);
4464     if (result != VK_SUCCESS) {
4465         return result;
4466     }
4467
4468     std::unique_lock<std::mutex> lock(global_lock);
4469     layer_data *my_device_data = get_my_data_ptr(get_dispatch_key(*pDevice), layer_data_map);
4470
4471     // Copy instance state into this device's layer_data struct
4472     my_device_data->instance_state = unique_ptr<INSTANCE_STATE>(new INSTANCE_STATE(*(my_instance_data->instance_state)));
4473     my_device_data->instance_data = my_instance_data;
4474     // Setup device dispatch table
4475     layer_init_device_dispatch_table(*pDevice, &my_device_data->dispatch_table, fpGetDeviceProcAddr);
4476     my_device_data->device = *pDevice;
4477
4478     my_device_data->report_data = layer_debug_report_create_device(my_instance_data->report_data, *pDevice);
4479     checkDeviceRegisterExtensions(pCreateInfo, *pDevice);
4480     // Get physical device limits for this device
4481     my_instance_data->dispatch_table.GetPhysicalDeviceProperties(gpu, &(my_device_data->phys_dev_properties.properties));
4482     uint32_t count;
4483     my_instance_data->dispatch_table.GetPhysicalDeviceQueueFamilyProperties(gpu, &count, nullptr);
4484     my_device_data->phys_dev_properties.queue_family_properties.resize(count);
4485     my_instance_data->dispatch_table.GetPhysicalDeviceQueueFamilyProperties(
4486         gpu, &count, &my_device_data->phys_dev_properties.queue_family_properties[0]);
4487     // TODO: device limits should make sure these are compatible
4488     if (pCreateInfo->pEnabledFeatures) {
4489         my_device_data->enabled_features = *pCreateInfo->pEnabledFeatures;
4490     } else {
4491         memset(&my_device_data->enabled_features, 0, sizeof(VkPhysicalDeviceFeatures));
4492     }
4493     // Store physical device mem limits into device layer_data struct
4494     my_instance_data->dispatch_table.GetPhysicalDeviceMemoryProperties(gpu, &my_device_data->phys_dev_mem_props);
4495     lock.unlock();
4496
4497     ValidateLayerOrdering(*pCreateInfo);
4498
4499     return result;
4500 }
4501
4502 // prototype
4503 VKAPI_ATTR void VKAPI_CALL DestroyDevice(VkDevice device, const VkAllocationCallbacks *pAllocator) {
4504     // TODOSC : Shouldn't need any customization here
4505     dispatch_key key = get_dispatch_key(device);
4506     layer_data *dev_data = get_my_data_ptr(key, layer_data_map);
4507     // Free all the memory
4508     std::unique_lock<std::mutex> lock(global_lock);
4509     deletePipelines(dev_data);
4510     dev_data->renderPassMap.clear();
4511     deleteCommandBuffers(dev_data);
4512     // This will also delete all sets in the pool & remove them from setMap
4513     deletePools(dev_data);
4514     // All sets should be removed
4515     assert(dev_data->setMap.empty());
4516     for (auto del_layout : dev_data->descriptorSetLayoutMap) {
4517         delete del_layout.second;
4518     }
4519     dev_data->descriptorSetLayoutMap.clear();
4520     dev_data->imageViewMap.clear();
4521     dev_data->imageMap.clear();
4522     dev_data->imageSubresourceMap.clear();
4523     dev_data->imageLayoutMap.clear();
4524     dev_data->bufferViewMap.clear();
4525     dev_data->bufferMap.clear();
4526     // Queues persist until device is destroyed
4527     dev_data->queueMap.clear();
4528     lock.unlock();
4529 #if MTMERGESOURCE
4530     bool skip_call = false;
4531     lock.lock();
4532     log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
4533             (uint64_t)device, __LINE__, MEMTRACK_NONE, "MEM", "Printing List details prior to vkDestroyDevice()");
4534     log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
4535             (uint64_t)device, __LINE__, MEMTRACK_NONE, "MEM", "================================================");
4536     print_mem_list(dev_data);
4537     printCBList(dev_data);
4538     // Report any memory leaks
4539     DEVICE_MEM_INFO *pInfo = NULL;
4540     if (!dev_data->memObjMap.empty()) {
4541         for (auto ii = dev_data->memObjMap.begin(); ii != dev_data->memObjMap.end(); ++ii) {
4542             pInfo = (*ii).second.get();
4543             if (pInfo->alloc_info.allocationSize != 0) {
4544                 // Valid Usage: All child objects created on device must have been destroyed prior to destroying device
4545                 skip_call |=
4546                     log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
4547                             (uint64_t)pInfo->mem, __LINE__, MEMTRACK_MEMORY_LEAK, "MEM",
4548                             "Mem Object 0x%" PRIx64 " has not been freed. You should clean up this memory by calling "
4549                             "vkFreeMemory(0x%" PRIx64 ") prior to vkDestroyDevice().",
4550                             (uint64_t)(pInfo->mem), (uint64_t)(pInfo->mem));
4551             }
4552         }
4553     }
4554     layer_debug_report_destroy_device(device);
4555     lock.unlock();
4556
4557 #if DISPATCH_MAP_DEBUG
4558     fprintf(stderr, "Device: 0x%p, key: 0x%p\n", device, key);
4559 #endif
4560     if (!skip_call) {
4561         dev_data->dispatch_table.DestroyDevice(device, pAllocator);
4562     }
4563 #else
4564     dev_data->dispatch_table.DestroyDevice(device, pAllocator);
4565 #endif
4566     layer_data_map.erase(key);
4567 }
4568
4569 static const VkExtensionProperties instance_extensions[] = {{VK_EXT_DEBUG_REPORT_EXTENSION_NAME, VK_EXT_DEBUG_REPORT_SPEC_VERSION}};
4570
4571 // This validates that the initial layout specified in the command buffer for
4572 // the IMAGE is the same
4573 // as the global IMAGE layout
4574 static bool ValidateCmdBufImageLayouts(layer_data *dev_data, GLOBAL_CB_NODE *pCB) {
4575     bool skip_call = false;
4576     for (auto cb_image_data : pCB->imageLayoutMap) {
4577         VkImageLayout imageLayout;
4578         if (!FindLayout(dev_data, cb_image_data.first, imageLayout)) {
4579             skip_call |=
4580                 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
4581                         __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Cannot submit cmd buffer using deleted image 0x%" PRIx64 ".",
4582                         reinterpret_cast<const uint64_t &>(cb_image_data.first));
4583         } else {
4584             if (cb_image_data.second.initialLayout == VK_IMAGE_LAYOUT_UNDEFINED) {
4585                 // TODO: Set memory invalid which is in mem_tracker currently
4586             } else if (imageLayout != cb_image_data.second.initialLayout) {
4587                 if (cb_image_data.first.hasSubresource) {
4588                     skip_call |= log_msg(
4589                         dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4590                         reinterpret_cast<uint64_t &>(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
4591                         "Cannot submit cmd buffer using image (0x%" PRIx64 ") [sub-resource: aspectMask 0x%X array layer %u, mip level %u], "
4592                         "with layout %s when first use is %s.",
4593                         reinterpret_cast<const uint64_t &>(cb_image_data.first.image), cb_image_data.first.subresource.aspectMask,
4594                                 cb_image_data.first.subresource.arrayLayer,
4595                                 cb_image_data.first.subresource.mipLevel, string_VkImageLayout(imageLayout),
4596                         string_VkImageLayout(cb_image_data.second.initialLayout));
4597                 } else {
4598                     skip_call |= log_msg(
4599                         dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4600                         reinterpret_cast<uint64_t &>(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
4601                         "Cannot submit cmd buffer using image (0x%" PRIx64 ") with layout %s when "
4602                         "first use is %s.",
4603                         reinterpret_cast<const uint64_t &>(cb_image_data.first.image), string_VkImageLayout(imageLayout),
4604                         string_VkImageLayout(cb_image_data.second.initialLayout));
4605                 }
4606             }
4607             SetLayout(dev_data, cb_image_data.first, cb_image_data.second.layout);
4608         }
4609     }
4610     return skip_call;
4611 }
4612
4613 // Loop through bound objects and increment their in_use counts
4614 //  For any unknown objects, flag an error
4615 static bool ValidateAndIncrementBoundObjects(layer_data *dev_data, GLOBAL_CB_NODE const *cb_node) {
4616     bool skip = false;
4617     DRAW_STATE_ERROR error_code = DRAWSTATE_NONE;
4618     BASE_NODE *base_obj = nullptr;
4619     for (auto obj : cb_node->object_bindings) {
4620         switch (obj.type) {
4621         case VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT: {
4622             base_obj = getSetNode(dev_data, reinterpret_cast<VkDescriptorSet &>(obj.handle));
4623             error_code = DRAWSTATE_INVALID_DESCRIPTOR_SET;
4624             break;
4625         }
4626         case VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT: {
4627             base_obj = getSamplerNode(dev_data, reinterpret_cast<VkSampler &>(obj.handle));
4628             error_code = DRAWSTATE_INVALID_SAMPLER;
4629             break;
4630         }
4631         case VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT: {
4632             base_obj = getQueryPoolNode(dev_data, reinterpret_cast<VkQueryPool &>(obj.handle));
4633             error_code = DRAWSTATE_INVALID_QUERY_POOL;
4634             break;
4635         }
4636         case VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT: {
4637             base_obj = getPipeline(dev_data, reinterpret_cast<VkPipeline &>(obj.handle));
4638             error_code = DRAWSTATE_INVALID_PIPELINE;
4639             break;
4640         }
4641         case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT: {
4642             base_obj = getBufferNode(dev_data, reinterpret_cast<VkBuffer &>(obj.handle));
4643             error_code = DRAWSTATE_INVALID_BUFFER;
4644             break;
4645         }
4646         case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_VIEW_EXT: {
4647             base_obj = getBufferViewState(dev_data, reinterpret_cast<VkBufferView &>(obj.handle));
4648             error_code = DRAWSTATE_INVALID_BUFFER_VIEW;
4649             break;
4650         }
4651         case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT: {
4652             base_obj = getImageNode(dev_data, reinterpret_cast<VkImage &>(obj.handle));
4653             error_code = DRAWSTATE_INVALID_IMAGE;
4654             break;
4655         }
4656         case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT: {
4657             base_obj = getImageViewState(dev_data, reinterpret_cast<VkImageView &>(obj.handle));
4658             error_code = DRAWSTATE_INVALID_IMAGE_VIEW;
4659             break;
4660         }
4661         case VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT: {
4662             base_obj = getEventNode(dev_data, reinterpret_cast<VkEvent &>(obj.handle));
4663             error_code = DRAWSTATE_INVALID_EVENT;
4664             break;
4665         }
4666         case VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT: {
4667             base_obj = getPoolNode(dev_data, reinterpret_cast<VkDescriptorPool &>(obj.handle));
4668             error_code = DRAWSTATE_INVALID_DESCRIPTOR_POOL;
4669             break;
4670         }
4671         case VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_POOL_EXT: {
4672             base_obj = getCommandPoolNode(dev_data, reinterpret_cast<VkCommandPool &>(obj.handle));
4673             error_code = DRAWSTATE_INVALID_COMMAND_POOL;
4674             break;
4675         }
4676         case VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT: {
4677             base_obj = getFramebuffer(dev_data, reinterpret_cast<VkFramebuffer &>(obj.handle));
4678             error_code = DRAWSTATE_INVALID_FRAMEBUFFER;
4679             break;
4680         }
4681         case VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT: {
4682             base_obj = getRenderPass(dev_data, reinterpret_cast<VkRenderPass &>(obj.handle));
4683             error_code = DRAWSTATE_INVALID_RENDERPASS;
4684             break;
4685         }
4686         case VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT: {
4687             base_obj = getMemObjInfo(dev_data, reinterpret_cast<VkDeviceMemory &>(obj.handle));
4688             error_code = DRAWSTATE_INVALID_DEVICE_MEMORY;
4689             break;
4690         }
4691         default:
4692             // TODO : Merge handling of other objects types into this code
4693             break;
4694         }
4695         if (!base_obj) {
4696             skip |=
4697                 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, obj.type, obj.handle, __LINE__, error_code, "DS",
4698                         "Cannot submit cmd buffer using deleted %s 0x%" PRIx64 ".", object_type_to_string(obj.type), obj.handle);
4699         } else {
4700             base_obj->in_use.fetch_add(1);
4701         }
4702     }
4703     return skip;
4704 }
4705
4706 // Track which resources are in-flight by atomically incrementing their "in_use" count
4707 static bool validateAndIncrementResources(layer_data *dev_data, GLOBAL_CB_NODE *cb_node) {
4708     bool skip_call = false;
4709
4710     cb_node->in_use.fetch_add(1);
4711     dev_data->globalInFlightCmdBuffers.insert(cb_node->commandBuffer);
4712
4713     // First Increment for all "generic" objects bound to cmd buffer, followed by special-case objects below
4714     skip_call |= ValidateAndIncrementBoundObjects(dev_data, cb_node);
4715     // TODO : We should be able to remove the NULL look-up checks from the code below as long as
4716     //  all the corresponding cases are verified to cause CB_INVALID state and the CB_INVALID state
4717     //  should then be flagged prior to calling this function
4718     for (auto drawDataElement : cb_node->drawData) {
4719         for (auto buffer : drawDataElement.buffers) {
4720             auto buffer_node = getBufferNode(dev_data, buffer);
4721             if (!buffer_node) {
4722                 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
4723                                      (uint64_t)(buffer), __LINE__, DRAWSTATE_INVALID_BUFFER, "DS",
4724                                      "Cannot submit cmd buffer using deleted buffer 0x%" PRIx64 ".", (uint64_t)(buffer));
4725             } else {
4726                 buffer_node->in_use.fetch_add(1);
4727             }
4728         }
4729     }
4730     for (auto event : cb_node->writeEventsBeforeWait) {
4731         auto event_node = getEventNode(dev_data, event);
4732         if (event_node)
4733             event_node->write_in_use++;
4734     }
4735     return skip_call;
4736 }
4737
4738 // Note: This function assumes that the global lock is held by the calling
4739 // thread.
4740 // TODO: untangle this.
4741 static bool cleanInFlightCmdBuffer(layer_data *my_data, VkCommandBuffer cmdBuffer) {
4742     bool skip_call = false;
4743     GLOBAL_CB_NODE *pCB = getCBNode(my_data, cmdBuffer);
4744     if (pCB) {
4745         for (auto queryEventsPair : pCB->waitedEventsBeforeQueryReset) {
4746             for (auto event : queryEventsPair.second) {
4747                 if (my_data->eventMap[event].needsSignaled) {
4748                     skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
4749                                          VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, 0, DRAWSTATE_INVALID_QUERY, "DS",
4750                                          "Cannot get query results on queryPool 0x%" PRIx64
4751                                          " with index %d which was guarded by unsignaled event 0x%" PRIx64 ".",
4752                                          (uint64_t)(queryEventsPair.first.pool), queryEventsPair.first.index, (uint64_t)(event));
4753                 }
4754             }
4755         }
4756     }
4757     return skip_call;
4758 }
4759
4760 // TODO: nuke this completely.
4761 // Decrement cmd_buffer in_use and if it goes to 0 remove cmd_buffer from globalInFlightCmdBuffers
4762 static inline void removeInFlightCmdBuffer(layer_data *dev_data, VkCommandBuffer cmd_buffer) {
4763     // Pull it off of global list initially, but if we find it in any other queue list, add it back in
4764     GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmd_buffer);
4765     pCB->in_use.fetch_sub(1);
4766     if (!pCB->in_use.load()) {
4767         dev_data->globalInFlightCmdBuffers.erase(cmd_buffer);
4768     }
4769 }
4770
4771 // Decrement in-use count for objects bound to command buffer
4772 static void DecrementBoundResources(layer_data *dev_data, GLOBAL_CB_NODE const *cb_node) {
4773     BASE_NODE *base_obj = nullptr;
4774     for (auto obj : cb_node->object_bindings) {
4775         base_obj = GetStateStructPtrFromObject(dev_data, obj);
4776         if (base_obj) {
4777             base_obj->in_use.fetch_sub(1);
4778         }
4779     }
4780 }
4781
4782 static bool RetireWorkOnQueue(layer_data *dev_data, QUEUE_NODE *pQueue, uint64_t seq)
4783 {
4784     bool skip_call = false; // TODO: extract everything that might fail to precheck
4785     std::unordered_map<VkQueue, uint64_t> otherQueueSeqs;
4786
4787     // Roll this queue forward, one submission at a time.
4788     while (pQueue->seq < seq) {
4789         auto & submission = pQueue->submissions.front();
4790
4791         for (auto & wait : submission.waitSemaphores) {
4792             auto pSemaphore = getSemaphoreNode(dev_data, wait.semaphore);
4793             pSemaphore->in_use.fetch_sub(1);
4794             auto & lastSeq = otherQueueSeqs[wait.queue];
4795             lastSeq = std::max(lastSeq, wait.seq);
4796         }
4797
4798         for (auto & semaphore : submission.signalSemaphores) {
4799             auto pSemaphore = getSemaphoreNode(dev_data, semaphore);
4800             pSemaphore->in_use.fetch_sub(1);
4801         }
4802
4803         for (auto cb : submission.cbs) {
4804             auto cb_node = getCBNode(dev_data, cb);
4805             // First perform decrement on general case bound objects
4806             DecrementBoundResources(dev_data, cb_node);
4807             for (auto drawDataElement : cb_node->drawData) {
4808                 for (auto buffer : drawDataElement.buffers) {
4809                     auto buffer_node = getBufferNode(dev_data, buffer);
4810                     if (buffer_node) {
4811                         buffer_node->in_use.fetch_sub(1);
4812                     }
4813                 }
4814             }
4815             for (auto event : cb_node->writeEventsBeforeWait) {
4816                 auto eventNode = dev_data->eventMap.find(event);
4817                 if (eventNode != dev_data->eventMap.end()) {
4818                     eventNode->second.write_in_use--;
4819                 }
4820             }
4821             for (auto queryStatePair : cb_node->queryToStateMap) {
4822                 dev_data->queryToStateMap[queryStatePair.first] = queryStatePair.second;
4823             }
4824             for (auto eventStagePair : cb_node->eventToStageMap) {
4825                 dev_data->eventMap[eventStagePair.first].stageMask = eventStagePair.second;
4826             }
4827
4828             skip_call |= cleanInFlightCmdBuffer(dev_data, cb);
4829             removeInFlightCmdBuffer(dev_data, cb);
4830         }
4831
4832         auto pFence = getFenceNode(dev_data, submission.fence);
4833         if (pFence) {
4834             pFence->state = FENCE_RETIRED;
4835         }
4836
4837         pQueue->submissions.pop_front();
4838         pQueue->seq++;
4839     }
4840
4841     // Roll other queues forward to the highest seq we saw a wait for
4842     for (auto qs : otherQueueSeqs) {
4843         skip_call |= RetireWorkOnQueue(dev_data, getQueueNode(dev_data, qs.first), qs.second);
4844     }
4845
4846     return skip_call;
4847 }
4848
4849
4850 // Submit a fence to a queue, delimiting previous fences and previous untracked
4851 // work by it.
4852 static void
4853 SubmitFence(QUEUE_NODE *pQueue, FENCE_NODE *pFence, uint64_t submitCount)
4854 {
4855     pFence->state = FENCE_INFLIGHT;
4856     pFence->signaler.first = pQueue->queue;
4857     pFence->signaler.second = pQueue->seq + pQueue->submissions.size() + submitCount;
4858 }
4859
4860 static bool validateCommandBufferSimultaneousUse(layer_data *dev_data, GLOBAL_CB_NODE *pCB) {
4861     bool skip_call = false;
4862     if (dev_data->globalInFlightCmdBuffers.count(pCB->commandBuffer) &&
4863         !(pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
4864         skip_call |=
4865             log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
4866                     __LINE__, DRAWSTATE_INVALID_CB_SIMULTANEOUS_USE, "DS",
4867                     "Command Buffer 0x%" PRIx64 " is already in use and is not marked for simultaneous use.",
4868                     reinterpret_cast<uint64_t>(pCB->commandBuffer));
4869     }
4870     return skip_call;
4871 }
4872
4873 static bool validateCommandBufferState(layer_data *dev_data, GLOBAL_CB_NODE *pCB, const char *call_source) {
4874     bool skip = false;
4875     if (dev_data->instance_state->disabled.command_buffer_state)
4876         return skip;
4877     // Validate ONE_TIME_SUBMIT_BIT CB is not being submitted more than once
4878     if ((pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT) && (pCB->submitCount > 1)) {
4879         skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
4880                         __LINE__, DRAWSTATE_COMMAND_BUFFER_SINGLE_SUBMIT_VIOLATION, "DS",
4881                         "CB 0x%" PRIxLEAST64 " was begun w/ VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT "
4882                         "set, but has been submitted 0x%" PRIxLEAST64 " times.",
4883                         (uint64_t)(pCB->commandBuffer), pCB->submitCount);
4884     }
4885     // Validate that cmd buffers have been updated
4886     if (CB_RECORDED != pCB->state) {
4887         if (CB_INVALID == pCB->state) {
4888             // Inform app of reason CB invalid
4889             for (auto obj : pCB->broken_bindings) {
4890                 const char *type_str = object_type_to_string(obj.type);
4891                 // Descriptor sets are a special case that can be either destroyed or updated to invalidated a CB
4892                 const char *cause_str =
4893                     (obj.type == VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT) ? "destroyed or updated" : "destroyed";
4894
4895                 skip |=
4896                     log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4897                             reinterpret_cast<uint64_t &>(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
4898                             "You are submitting command buffer 0x%" PRIxLEAST64 " that is invalid because bound %s 0x%" PRIxLEAST64
4899                             " was %s.",
4900                             reinterpret_cast<uint64_t &>(pCB->commandBuffer), type_str, obj.handle, cause_str);
4901             }
4902         } else { // Flag error for using CB w/o vkEndCommandBuffer() called
4903             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4904                             (uint64_t)(pCB->commandBuffer), __LINE__, DRAWSTATE_NO_END_COMMAND_BUFFER, "DS",
4905                             "You must call vkEndCommandBuffer() on CB 0x%" PRIxLEAST64 " before this call to %s!",
4906                             reinterpret_cast<uint64_t &>(pCB->commandBuffer), call_source);
4907         }
4908     }
4909     return skip;
4910 }
4911
4912 // Validate that queueFamilyIndices of primary command buffers match this queue
4913 // Secondary command buffers were previously validated in vkCmdExecuteCommands().
4914 static bool validateQueueFamilyIndices(layer_data *dev_data, GLOBAL_CB_NODE *pCB, VkQueue queue) {
4915     bool skip_call = false;
4916     auto pPool = getCommandPoolNode(dev_data, pCB->createInfo.commandPool);
4917     auto queue_node = getQueueNode(dev_data, queue);
4918
4919     if (pPool && queue_node && (pPool->queueFamilyIndex != queue_node->queueFamilyIndex)) {
4920         skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4921             reinterpret_cast<uint64_t>(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_QUEUE_FAMILY, "DS",
4922             "vkQueueSubmit: Primary command buffer 0x%" PRIxLEAST64
4923             " created in queue family %d is being submitted on queue 0x%" PRIxLEAST64 " from queue family %d.",
4924             reinterpret_cast<uint64_t>(pCB->commandBuffer), pPool->queueFamilyIndex,
4925             reinterpret_cast<uint64_t>(queue), queue_node->queueFamilyIndex);
4926     }
4927
4928     return skip_call;
4929 }
4930
4931 static bool validatePrimaryCommandBufferState(layer_data *dev_data, GLOBAL_CB_NODE *pCB) {
4932     // Track in-use for resources off of primary and any secondary CBs
4933     bool skip_call = false;
4934
4935     // If USAGE_SIMULTANEOUS_USE_BIT not set then CB cannot already be executing
4936     // on device
4937     skip_call |= validateCommandBufferSimultaneousUse(dev_data, pCB);
4938
4939     skip_call |= validateAndIncrementResources(dev_data, pCB);
4940
4941     if (!pCB->secondaryCommandBuffers.empty()) {
4942         for (auto secondaryCmdBuffer : pCB->secondaryCommandBuffers) {
4943             GLOBAL_CB_NODE *pSubCB = getCBNode(dev_data, secondaryCmdBuffer);
4944             skip_call |= validateAndIncrementResources(dev_data, pSubCB);
4945             if ((pSubCB->primaryCommandBuffer != pCB->commandBuffer) &&
4946                 !(pSubCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
4947                 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
4948                         __LINE__, DRAWSTATE_COMMAND_BUFFER_SINGLE_SUBMIT_VIOLATION, "DS",
4949                         "CB 0x%" PRIxLEAST64 " was submitted with secondary buffer 0x%" PRIxLEAST64
4950                         " but that buffer has subsequently been bound to "
4951                         "primary cmd buffer 0x%" PRIxLEAST64
4952                         " and it does not have VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set.",
4953                         reinterpret_cast<uint64_t>(pCB->commandBuffer), reinterpret_cast<uint64_t>(secondaryCmdBuffer),
4954                         reinterpret_cast<uint64_t>(pSubCB->primaryCommandBuffer));
4955             }
4956         }
4957     }
4958
4959     skip_call |= validateCommandBufferState(dev_data, pCB, "vkQueueSubmit()");
4960
4961     return skip_call;
4962 }
4963
4964 static bool
4965 ValidateFenceForSubmit(layer_data *dev_data, FENCE_NODE *pFence)
4966 {
4967     bool skip_call = false;
4968
4969     if (pFence) {
4970         if (pFence->state == FENCE_INFLIGHT) {
4971             skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
4972                                  (uint64_t)(pFence->fence), __LINE__, DRAWSTATE_INVALID_FENCE, "DS",
4973                                  "Fence 0x%" PRIx64 " is already in use by another submission.", (uint64_t)(pFence->fence));
4974         }
4975
4976         else if (pFence->state == FENCE_RETIRED) {
4977             skip_call |=
4978                 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
4979                         reinterpret_cast<uint64_t &>(pFence->fence), __LINE__, MEMTRACK_INVALID_FENCE_STATE, "MEM",
4980                         "Fence 0x%" PRIxLEAST64 " submitted in SIGNALED state.  Fences must be reset before being submitted",
4981                         reinterpret_cast<uint64_t &>(pFence->fence));
4982         }
4983     }
4984
4985     return skip_call;
4986 }
4987
4988
4989 VKAPI_ATTR VkResult VKAPI_CALL
4990 QueueSubmit(VkQueue queue, uint32_t submitCount, const VkSubmitInfo *pSubmits, VkFence fence) {
4991     bool skip_call = false;
4992     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
4993     VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
4994     std::unique_lock<std::mutex> lock(global_lock);
4995
4996     auto pQueue = getQueueNode(dev_data, queue);
4997     auto pFence = getFenceNode(dev_data, fence);
4998     skip_call |= ValidateFenceForSubmit(dev_data, pFence);
4999
5000     if (skip_call) {
5001         return VK_ERROR_VALIDATION_FAILED_EXT;
5002     }
5003
5004     // TODO : Review these old print functions and clean up as appropriate
5005     print_mem_list(dev_data);
5006     printCBList(dev_data);
5007
5008     // Mark the fence in-use.
5009     if (pFence) {
5010         SubmitFence(pQueue, pFence, std::max(1u, submitCount));
5011     }
5012
5013     // Now verify each individual submit
5014     for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) {
5015         const VkSubmitInfo *submit = &pSubmits[submit_idx];
5016         vector<SEMAPHORE_WAIT> semaphore_waits;
5017         vector<VkSemaphore> semaphore_signals;
5018         for (uint32_t i = 0; i < submit->waitSemaphoreCount; ++i) {
5019             VkSemaphore semaphore = submit->pWaitSemaphores[i];
5020             auto pSemaphore = getSemaphoreNode(dev_data, semaphore);
5021             if (pSemaphore) {
5022                 if (pSemaphore->signaled) {
5023                     if (pSemaphore->signaler.first != VK_NULL_HANDLE) {
5024                         semaphore_waits.push_back({semaphore, pSemaphore->signaler.first, pSemaphore->signaler.second});
5025                         pSemaphore->in_use.fetch_add(1);
5026                     }
5027                     pSemaphore->signaler.first = VK_NULL_HANDLE;
5028                     pSemaphore->signaled = false;
5029                 } else {
5030                     skip_call |=
5031                         log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
5032                                 reinterpret_cast<const uint64_t &>(semaphore), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
5033                                 "Queue 0x%" PRIx64 " is waiting on semaphore 0x%" PRIx64 " that has no way to be signaled.",
5034                                 reinterpret_cast<uint64_t &>(queue), reinterpret_cast<const uint64_t &>(semaphore));
5035                 }
5036             }
5037         }
5038         for (uint32_t i = 0; i < submit->signalSemaphoreCount; ++i) {
5039             VkSemaphore semaphore = submit->pSignalSemaphores[i];
5040             auto pSemaphore = getSemaphoreNode(dev_data, semaphore);
5041             if (pSemaphore) {
5042                 if (pSemaphore->signaled) {
5043                     skip_call |=
5044                         log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
5045                                 reinterpret_cast<const uint64_t &>(semaphore), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
5046                                 "Queue 0x%" PRIx64 " is signaling semaphore 0x%" PRIx64
5047                                 " that has already been signaled but not waited on by queue 0x%" PRIx64 ".",
5048                                 reinterpret_cast<uint64_t &>(queue), reinterpret_cast<const uint64_t &>(semaphore),
5049                                 reinterpret_cast<uint64_t &>(pSemaphore->signaler.first));
5050                 } else {
5051                     pSemaphore->signaler.first = queue;
5052                     pSemaphore->signaler.second = pQueue->seq + pQueue->submissions.size() + 1;
5053                     pSemaphore->signaled = true;
5054                     pSemaphore->in_use.fetch_add(1);
5055                     semaphore_signals.push_back(semaphore);
5056                 }
5057             }
5058         }
5059
5060         std::vector<VkCommandBuffer> cbs;
5061
5062         for (uint32_t i = 0; i < submit->commandBufferCount; i++) {
5063             auto pCBNode = getCBNode(dev_data, submit->pCommandBuffers[i]);
5064             skip_call |= ValidateCmdBufImageLayouts(dev_data, pCBNode);
5065             if (pCBNode) {
5066                 cbs.push_back(submit->pCommandBuffers[i]);
5067                 for (auto secondaryCmdBuffer : pCBNode->secondaryCommandBuffers) {
5068                     cbs.push_back(secondaryCmdBuffer);
5069                 }
5070
5071                 pCBNode->submitCount++; // increment submit count
5072                 skip_call |= validatePrimaryCommandBufferState(dev_data, pCBNode);
5073                 skip_call |= validateQueueFamilyIndices(dev_data, pCBNode, queue);
5074                 // Potential early exit here as bad object state may crash in delayed function calls
5075                 if (skip_call)
5076                     return result;
5077                 // Call submit-time functions to validate/update state
5078                 for (auto &function : pCBNode->validate_functions) {
5079                     skip_call |= function();
5080                 }
5081                 for (auto &function : pCBNode->eventUpdates) {
5082                     skip_call |= function(queue);
5083                 }
5084                 for (auto &function : pCBNode->queryUpdates) {
5085                     skip_call |= function(queue);
5086                 }
5087             }
5088         }
5089
5090         pQueue->submissions.emplace_back(cbs, semaphore_waits, semaphore_signals,
5091                                          submit_idx == submitCount - 1 ? fence : VK_NULL_HANDLE);
5092     }
5093
5094     if (pFence && !submitCount) {
5095         // If no submissions, but just dropping a fence on the end of the queue,
5096         // record an empty submission with just the fence, so we can determine
5097         // its completion.
5098         pQueue->submissions.emplace_back(std::vector<VkCommandBuffer>(),
5099                                          std::vector<SEMAPHORE_WAIT>(),
5100                                          std::vector<VkSemaphore>(),
5101                                          fence);
5102     }
5103
5104     lock.unlock();
5105     if (!skip_call)
5106         result = dev_data->dispatch_table.QueueSubmit(queue, submitCount, pSubmits, fence);
5107
5108     return result;
5109 }
5110
5111 VKAPI_ATTR VkResult VKAPI_CALL AllocateMemory(VkDevice device, const VkMemoryAllocateInfo *pAllocateInfo,
5112                                               const VkAllocationCallbacks *pAllocator, VkDeviceMemory *pMemory) {
5113     layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5114     VkResult result = my_data->dispatch_table.AllocateMemory(device, pAllocateInfo, pAllocator, pMemory);
5115     // TODO : Track allocations and overall size here
5116     std::lock_guard<std::mutex> lock(global_lock);
5117     add_mem_obj_info(my_data, device, *pMemory, pAllocateInfo);
5118     print_mem_list(my_data);
5119     return result;
5120 }
5121
5122 VKAPI_ATTR void VKAPI_CALL
5123 FreeMemory(VkDevice device, VkDeviceMemory mem, const VkAllocationCallbacks *pAllocator) {
5124     layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5125
5126     // From spec : A memory object is freed by calling vkFreeMemory() when it is no longer needed.
5127     // Before freeing a memory object, an application must ensure the memory object is no longer
5128     // in use by the device—for example by command buffers queued for execution. The memory need
5129     // not yet be unbound from all images and buffers, but any further use of those images or
5130     // buffers (on host or device) for anything other than destroying those objects will result in
5131     // undefined behavior.
5132
5133     std::unique_lock<std::mutex> lock(global_lock);
5134     bool skip_call = freeMemObjInfo(my_data, device, mem, false);
5135     print_mem_list(my_data);
5136     printCBList(my_data);
5137     lock.unlock();
5138     if (!skip_call) {
5139         my_data->dispatch_table.FreeMemory(device, mem, pAllocator);
5140     }
5141 }
5142
5143 // Validate that given Map memory range is valid. This means that the memory should not already be mapped,
5144 //  and that the size of the map range should be:
5145 //  1. Not zero
5146 //  2. Within the size of the memory allocation
5147 static bool ValidateMapMemRange(layer_data *my_data, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size) {
5148     bool skip_call = false;
5149
5150     if (size == 0) {
5151         skip_call = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
5152                             (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP, "MEM",
5153                             "VkMapMemory: Attempting to map memory range of size zero");
5154     }
5155
5156     auto mem_element = my_data->memObjMap.find(mem);
5157     if (mem_element != my_data->memObjMap.end()) {
5158         auto mem_info = mem_element->second.get();
5159         // It is an application error to call VkMapMemory on an object that is already mapped
5160         if (mem_info->mem_range.size != 0) {
5161             skip_call = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
5162                                 (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP, "MEM",
5163                                 "VkMapMemory: Attempting to map memory on an already-mapped object 0x%" PRIxLEAST64, (uint64_t)mem);
5164         }
5165
5166         // Validate that offset + size is within object's allocationSize
5167         if (size == VK_WHOLE_SIZE) {
5168             if (offset >= mem_info->alloc_info.allocationSize) {
5169                 skip_call = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5170                                     VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP,
5171                                     "MEM", "Mapping Memory from 0x%" PRIx64 " to 0x%" PRIx64
5172                                            " with size of VK_WHOLE_SIZE oversteps total array size 0x%" PRIx64,
5173                                     offset, mem_info->alloc_info.allocationSize, mem_info->alloc_info.allocationSize);
5174             }
5175         } else {
5176             if ((offset + size) > mem_info->alloc_info.allocationSize) {
5177                 skip_call =
5178                     log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
5179                             (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP, "MEM",
5180                             "Mapping Memory from 0x%" PRIx64 " to 0x%" PRIx64 " oversteps total array size 0x%" PRIx64, offset,
5181                             size + offset, mem_info->alloc_info.allocationSize);
5182             }
5183         }
5184     }
5185     return skip_call;
5186 }
5187
5188 static void storeMemRanges(layer_data *my_data, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size) {
5189     auto mem_info = getMemObjInfo(my_data, mem);
5190     if (mem_info) {
5191         mem_info->mem_range.offset = offset;
5192         mem_info->mem_range.size = size;
5193     }
5194 }
5195
5196 static bool deleteMemRanges(layer_data *my_data, VkDeviceMemory mem) {
5197     bool skip_call = false;
5198     auto mem_info = getMemObjInfo(my_data, mem);
5199     if (mem_info) {
5200         if (!mem_info->mem_range.size) {
5201             // Valid Usage: memory must currently be mapped
5202             skip_call = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
5203                                 (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP, "MEM",
5204                                 "Unmapping Memory without memory being mapped: mem obj 0x%" PRIxLEAST64, (uint64_t)mem);
5205         }
5206         mem_info->mem_range.size = 0;
5207         if (mem_info->shadow_copy) {
5208             free(mem_info->shadow_copy_base);
5209             mem_info->shadow_copy_base = 0;
5210             mem_info->shadow_copy = 0;
5211         }
5212     }
5213     return skip_call;
5214 }
5215
5216 // Guard value for pad data
5217 static char NoncoherentMemoryFillValue = 0xb;
5218
5219 static void initializeAndTrackMemory(layer_data *dev_data, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size,
5220                                      void **ppData) {
5221     auto mem_info = getMemObjInfo(dev_data, mem);
5222     if (mem_info) {
5223         mem_info->p_driver_data = *ppData;
5224         uint32_t index = mem_info->alloc_info.memoryTypeIndex;
5225         if (dev_data->phys_dev_mem_props.memoryTypes[index].propertyFlags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) {
5226             mem_info->shadow_copy = 0;
5227         } else {
5228             if (size == VK_WHOLE_SIZE) {
5229                 size = mem_info->alloc_info.allocationSize - offset;
5230             }
5231             mem_info->shadow_pad_size = dev_data->phys_dev_properties.properties.limits.minMemoryMapAlignment;
5232             assert(vk_safe_modulo(mem_info->shadow_pad_size,
5233                                   dev_data->phys_dev_properties.properties.limits.minMemoryMapAlignment) == 0);
5234             // Ensure start of mapped region reflects hardware alignment constraints
5235             uint64_t map_alignment = dev_data->phys_dev_properties.properties.limits.minMemoryMapAlignment;
5236
5237             // From spec: (ppData - offset) must be aligned to at least limits::minMemoryMapAlignment.
5238             uint64_t start_offset = offset % map_alignment;
5239             // Data passed to driver will be wrapped by a guardband of data to detect over- or under-writes.
5240             mem_info->shadow_copy_base = malloc(static_cast<size_t>(2 * mem_info->shadow_pad_size + size + map_alignment + start_offset));
5241
5242             mem_info->shadow_copy =
5243                 reinterpret_cast<char *>((reinterpret_cast<uintptr_t>(mem_info->shadow_copy_base) + map_alignment) &
5244                                          ~(map_alignment - 1)) + start_offset;
5245             assert(vk_safe_modulo(reinterpret_cast<uintptr_t>(mem_info->shadow_copy) + mem_info->shadow_pad_size - start_offset,
5246                                   map_alignment) == 0);
5247
5248             memset(mem_info->shadow_copy, NoncoherentMemoryFillValue, static_cast<size_t>(2 * mem_info->shadow_pad_size + size));
5249             *ppData = static_cast<char *>(mem_info->shadow_copy) + mem_info->shadow_pad_size;
5250         }
5251     }
5252 }
5253
5254 // Verify that state for fence being waited on is appropriate. That is,
5255 //  a fence being waited on should not already be signaled and
5256 //  it should have been submitted on a queue or during acquire next image
5257 static inline bool verifyWaitFenceState(layer_data *dev_data, VkFence fence, const char *apiCall) {
5258     bool skip_call = false;
5259
5260     auto pFence = getFenceNode(dev_data, fence);
5261     if (pFence) {
5262         if (pFence->state == FENCE_UNSIGNALED) {
5263             skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
5264                                  reinterpret_cast<uint64_t &>(fence), __LINE__, MEMTRACK_INVALID_FENCE_STATE, "MEM",
5265                                  "%s called for fence 0x%" PRIxLEAST64 " which has not been submitted on a Queue or during "
5266                                  "acquire next image.",
5267                                  apiCall, reinterpret_cast<uint64_t &>(fence));
5268         }
5269     }
5270     return skip_call;
5271 }
5272
5273 static bool RetireFence(layer_data *dev_data, VkFence fence) {
5274     auto pFence = getFenceNode(dev_data, fence);
5275     if (pFence->signaler.first != VK_NULL_HANDLE) {
5276         /* Fence signaller is a queue -- use this as proof that prior operations
5277          * on that queue have completed.
5278          */
5279         return RetireWorkOnQueue(dev_data,
5280                                  getQueueNode(dev_data, pFence->signaler.first),
5281                                  pFence->signaler.second);
5282     }
5283     else {
5284         /* Fence signaller is the WSI. We're not tracking what the WSI op
5285          * actually /was/ in CV yet, but we need to mark the fence as retired.
5286          */
5287         pFence->state = FENCE_RETIRED;
5288         return false;
5289     }
5290 }
5291
5292 VKAPI_ATTR VkResult VKAPI_CALL
5293 WaitForFences(VkDevice device, uint32_t fenceCount, const VkFence *pFences, VkBool32 waitAll, uint64_t timeout) {
5294     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5295     bool skip_call = false;
5296     // Verify fence status of submitted fences
5297     std::unique_lock<std::mutex> lock(global_lock);
5298     for (uint32_t i = 0; i < fenceCount; i++) {
5299         skip_call |= verifyWaitFenceState(dev_data, pFences[i], "vkWaitForFences");
5300     }
5301     lock.unlock();
5302     if (skip_call)
5303         return VK_ERROR_VALIDATION_FAILED_EXT;
5304
5305     VkResult result = dev_data->dispatch_table.WaitForFences(device, fenceCount, pFences, waitAll, timeout);
5306
5307     if (result == VK_SUCCESS) {
5308         lock.lock();
5309         // When we know that all fences are complete we can clean/remove their CBs
5310         if (waitAll || fenceCount == 1) {
5311             for (uint32_t i = 0; i < fenceCount; i++) {
5312                 skip_call |= RetireFence(dev_data, pFences[i]);
5313             }
5314         }
5315         // NOTE : Alternate case not handled here is when some fences have completed. In
5316         //  this case for app to guarantee which fences completed it will have to call
5317         //  vkGetFenceStatus() at which point we'll clean/remove their CBs if complete.
5318         lock.unlock();
5319     }
5320     if (skip_call)
5321         return VK_ERROR_VALIDATION_FAILED_EXT;
5322     return result;
5323 }
5324
5325 VKAPI_ATTR VkResult VKAPI_CALL GetFenceStatus(VkDevice device, VkFence fence) {
5326     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5327     bool skip_call = false;
5328     std::unique_lock<std::mutex> lock(global_lock);
5329     skip_call = verifyWaitFenceState(dev_data, fence, "vkGetFenceStatus");
5330     lock.unlock();
5331
5332     if (skip_call)
5333         return VK_ERROR_VALIDATION_FAILED_EXT;
5334
5335     VkResult result = dev_data->dispatch_table.GetFenceStatus(device, fence);
5336     lock.lock();
5337     if (result == VK_SUCCESS) {
5338         skip_call |= RetireFence(dev_data, fence);
5339     }
5340     lock.unlock();
5341     if (skip_call)
5342         return VK_ERROR_VALIDATION_FAILED_EXT;
5343     return result;
5344 }
5345
5346 VKAPI_ATTR void VKAPI_CALL GetDeviceQueue(VkDevice device, uint32_t queueFamilyIndex, uint32_t queueIndex,
5347                                                             VkQueue *pQueue) {
5348     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5349     dev_data->dispatch_table.GetDeviceQueue(device, queueFamilyIndex, queueIndex, pQueue);
5350     std::lock_guard<std::mutex> lock(global_lock);
5351
5352     // Add queue to tracking set only if it is new
5353     auto result = dev_data->queues.emplace(*pQueue);
5354     if (result.second == true) {
5355         QUEUE_NODE *pQNode = &dev_data->queueMap[*pQueue];
5356         pQNode->queue = *pQueue;
5357         pQNode->queueFamilyIndex = queueFamilyIndex;
5358         pQNode->seq = 0;
5359     }
5360 }
5361
5362 VKAPI_ATTR VkResult VKAPI_CALL QueueWaitIdle(VkQueue queue) {
5363     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
5364     bool skip_call = false;
5365     std::unique_lock<std::mutex> lock(global_lock);
5366     auto pQueue = getQueueNode(dev_data, queue);
5367     skip_call |= RetireWorkOnQueue(dev_data, pQueue, pQueue->seq + pQueue->submissions.size());
5368     lock.unlock();
5369     if (skip_call)
5370         return VK_ERROR_VALIDATION_FAILED_EXT;
5371     VkResult result = dev_data->dispatch_table.QueueWaitIdle(queue);
5372     return result;
5373 }
5374
5375 VKAPI_ATTR VkResult VKAPI_CALL DeviceWaitIdle(VkDevice device) {
5376     bool skip_call = false;
5377     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5378     std::unique_lock<std::mutex> lock(global_lock);
5379     for (auto & queue : dev_data->queueMap) {
5380         skip_call |= RetireWorkOnQueue(dev_data, &queue.second, queue.second.seq + queue.second.submissions.size());
5381     }
5382     lock.unlock();
5383     if (skip_call)
5384         return VK_ERROR_VALIDATION_FAILED_EXT;
5385     VkResult result = dev_data->dispatch_table.DeviceWaitIdle(device);
5386     return result;
5387 }
5388
5389 VKAPI_ATTR void VKAPI_CALL DestroyFence(VkDevice device, VkFence fence, const VkAllocationCallbacks *pAllocator) {
5390     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5391     bool skip_call = false;
5392     std::unique_lock<std::mutex> lock(global_lock);
5393     auto fence_pair = dev_data->fenceMap.find(fence);
5394     if (fence_pair != dev_data->fenceMap.end()) {
5395         if (fence_pair->second.state == FENCE_INFLIGHT) {
5396             skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
5397                                  (uint64_t)(fence), __LINE__, DRAWSTATE_INVALID_FENCE, "DS", "Fence 0x%" PRIx64 " is in use.",
5398                                  (uint64_t)(fence));
5399         }
5400         dev_data->fenceMap.erase(fence_pair);
5401     }
5402     lock.unlock();
5403
5404     if (!skip_call)
5405         dev_data->dispatch_table.DestroyFence(device, fence, pAllocator);
5406 }
5407
5408 // For given obj node, if it is use, flag a validation error and return callback result, else return false
5409 bool ValidateObjectNotInUse(const layer_data *dev_data, BASE_NODE *obj_node, VK_OBJECT obj_struct) {
5410     if (dev_data->instance_state->disabled.object_in_use)
5411         return false;
5412     bool skip = false;
5413     if (obj_node->in_use.load()) {
5414         skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, obj_struct.type, obj_struct.handle, __LINE__,
5415                         DRAWSTATE_OBJECT_INUSE, "DS", "Cannot delete %s 0x%" PRIx64 " that is currently in use by a command buffer.",
5416                         object_type_to_string(obj_struct.type), obj_struct.handle);
5417     }
5418     return skip;
5419 }
5420
5421 VKAPI_ATTR void VKAPI_CALL
5422 DestroySemaphore(VkDevice device, VkSemaphore semaphore, const VkAllocationCallbacks *pAllocator) {
5423     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5424     bool skip = false;
5425     std::unique_lock<std::mutex> lock(global_lock);
5426     auto sema_node = getSemaphoreNode(dev_data, semaphore);
5427     if (sema_node) {
5428         skip |= ValidateObjectNotInUse(dev_data, sema_node,
5429                                        {reinterpret_cast<uint64_t &>(semaphore), VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT});
5430     }
5431     if (!skip) {
5432         dev_data->semaphoreMap.erase(semaphore);
5433         lock.unlock();
5434         dev_data->dispatch_table.DestroySemaphore(device, semaphore, pAllocator);
5435     }
5436 }
5437
5438 VKAPI_ATTR void VKAPI_CALL DestroyEvent(VkDevice device, VkEvent event, const VkAllocationCallbacks *pAllocator) {
5439     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5440     bool skip = false;
5441     std::unique_lock<std::mutex> lock(global_lock);
5442     auto event_node = getEventNode(dev_data, event);
5443     if (event_node) {
5444         VK_OBJECT obj_struct = {reinterpret_cast<uint64_t &>(event), VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT};
5445         skip |= ValidateObjectNotInUse(dev_data, event_node, obj_struct);
5446         // Any bound cmd buffers are now invalid
5447         invalidateCommandBuffers(event_node->cb_bindings, obj_struct);
5448     }
5449     if (!skip) {
5450         dev_data->eventMap.erase(event);
5451         lock.unlock();
5452         dev_data->dispatch_table.DestroyEvent(device, event, pAllocator);
5453     }
5454 }
5455
5456 VKAPI_ATTR void VKAPI_CALL
5457 DestroyQueryPool(VkDevice device, VkQueryPool queryPool, const VkAllocationCallbacks *pAllocator) {
5458     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5459     bool skip = false;
5460     std::unique_lock<std::mutex> lock(global_lock);
5461     auto qp_node = getQueryPoolNode(dev_data, queryPool);
5462     if (qp_node) {
5463         VK_OBJECT obj_struct = {reinterpret_cast<uint64_t &>(queryPool), VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT};
5464         skip |= ValidateObjectNotInUse(dev_data, qp_node, obj_struct);
5465         // Any bound cmd buffers are now invalid
5466         invalidateCommandBuffers(qp_node->cb_bindings, obj_struct);
5467     }
5468     if (!skip) {
5469         dev_data->queryPoolMap.erase(queryPool);
5470         lock.unlock();
5471         dev_data->dispatch_table.DestroyQueryPool(device, queryPool, pAllocator);
5472     }
5473 }
5474
5475 VKAPI_ATTR VkResult VKAPI_CALL GetQueryPoolResults(VkDevice device, VkQueryPool queryPool, uint32_t firstQuery,
5476                                                    uint32_t queryCount, size_t dataSize, void *pData, VkDeviceSize stride,
5477                                                    VkQueryResultFlags flags) {
5478     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5479     unordered_map<QueryObject, vector<VkCommandBuffer>> queriesInFlight;
5480     std::unique_lock<std::mutex> lock(global_lock);
5481     for (auto cmdBuffer : dev_data->globalInFlightCmdBuffers) {
5482         auto pCB = getCBNode(dev_data, cmdBuffer);
5483         for (auto queryStatePair : pCB->queryToStateMap) {
5484             queriesInFlight[queryStatePair.first].push_back(cmdBuffer);
5485         }
5486     }
5487     bool skip_call = false;
5488     for (uint32_t i = 0; i < queryCount; ++i) {
5489         QueryObject query = {queryPool, firstQuery + i};
5490         auto queryElement = queriesInFlight.find(query);
5491         auto queryToStateElement = dev_data->queryToStateMap.find(query);
5492         if (queryToStateElement != dev_data->queryToStateMap.end()) {
5493             // Available and in flight
5494             if (queryElement != queriesInFlight.end() && queryToStateElement != dev_data->queryToStateMap.end() &&
5495                 queryToStateElement->second) {
5496                 for (auto cmdBuffer : queryElement->second) {
5497                     auto pCB = getCBNode(dev_data, cmdBuffer);
5498                     auto queryEventElement = pCB->waitedEventsBeforeQueryReset.find(query);
5499                     if (queryEventElement == pCB->waitedEventsBeforeQueryReset.end()) {
5500                         skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5501                                              VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
5502                                              "Cannot get query results on queryPool 0x%" PRIx64 " with index %d which is in flight.",
5503                                              (uint64_t)(queryPool), firstQuery + i);
5504                     } else {
5505                         for (auto event : queryEventElement->second) {
5506                             dev_data->eventMap[event].needsSignaled = true;
5507                         }
5508                     }
5509                 }
5510                 // Unavailable and in flight
5511             } else if (queryElement != queriesInFlight.end() && queryToStateElement != dev_data->queryToStateMap.end() &&
5512                        !queryToStateElement->second) {
5513                 // TODO : Can there be the same query in use by multiple command buffers in flight?
5514                 bool make_available = false;
5515                 for (auto cmdBuffer : queryElement->second) {
5516                     auto pCB = getCBNode(dev_data, cmdBuffer);
5517                     make_available |= pCB->queryToStateMap[query];
5518                 }
5519                 if (!(((flags & VK_QUERY_RESULT_PARTIAL_BIT) || (flags & VK_QUERY_RESULT_WAIT_BIT)) && make_available)) {
5520                     skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5521                                          VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
5522                                          "Cannot get query results on queryPool 0x%" PRIx64 " with index %d which is unavailable.",
5523                                          (uint64_t)(queryPool), firstQuery + i);
5524                 }
5525                 // Unavailable
5526             } else if (queryToStateElement != dev_data->queryToStateMap.end() && !queryToStateElement->second) {
5527                 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5528                                      VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
5529                                      "Cannot get query results on queryPool 0x%" PRIx64 " with index %d which is unavailable.",
5530                                      (uint64_t)(queryPool), firstQuery + i);
5531                 // Unitialized
5532             } else if (queryToStateElement == dev_data->queryToStateMap.end()) {
5533                 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5534                                      VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
5535                                      "Cannot get query results on queryPool 0x%" PRIx64
5536                                      " with index %d as data has not been collected for this index.",
5537                                      (uint64_t)(queryPool), firstQuery + i);
5538             }
5539         }
5540     }
5541     lock.unlock();
5542     if (skip_call)
5543         return VK_ERROR_VALIDATION_FAILED_EXT;
5544     return dev_data->dispatch_table.GetQueryPoolResults(device, queryPool, firstQuery, queryCount, dataSize, pData, stride, flags);
5545 }
5546
5547 static bool validateIdleBuffer(const layer_data *my_data, VkBuffer buffer) {
5548     bool skip_call = false;
5549     auto buffer_node = getBufferNode(my_data, buffer);
5550     if (!buffer_node) {
5551         skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
5552                              (uint64_t)(buffer), __LINE__, DRAWSTATE_DOUBLE_DESTROY, "DS",
5553                              "Cannot free buffer 0x%" PRIxLEAST64 " that has not been allocated.", (uint64_t)(buffer));
5554     } else {
5555         if (buffer_node->in_use.load()) {
5556             skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
5557                                  (uint64_t)(buffer), __LINE__, DRAWSTATE_OBJECT_INUSE, "DS",
5558                                  "Cannot free buffer 0x%" PRIxLEAST64 " that is in use by a command buffer.", (uint64_t)(buffer));
5559         }
5560     }
5561     return skip_call;
5562 }
5563
5564 // Return true if given ranges intersect, else false
5565 // Prereq : For both ranges, range->end - range->start > 0. This case should have already resulted
5566 //  in an error so not checking that here
5567 // pad_ranges bool indicates a linear and non-linear comparison which requires padding
5568 // In the case where padding is required, if an alias is encountered then a validation error is reported and skip_call
5569 //  may be set by the callback function so caller should merge in skip_call value if padding case is possible.
5570 static bool rangesIntersect(layer_data const *dev_data, MEMORY_RANGE const *range1, MEMORY_RANGE const *range2, bool *skip_call) {
5571     *skip_call = false;
5572     auto r1_start = range1->start;
5573     auto r1_end = range1->end;
5574     auto r2_start = range2->start;
5575     auto r2_end = range2->end;
5576     VkDeviceSize pad_align = 1;
5577     if (range1->linear != range2->linear) {
5578         pad_align = dev_data->phys_dev_properties.properties.limits.bufferImageGranularity;
5579     }
5580     if ((r1_end & ~(pad_align - 1)) < (r2_start & ~(pad_align - 1)))
5581         return false;
5582     if ((r1_start & ~(pad_align - 1)) > (r2_end & ~(pad_align - 1)))
5583         return false;
5584
5585     if (range1->linear != range2->linear) {
5586         // In linear vs. non-linear case, it's an error to alias
5587         const char *r1_linear_str = range1->linear ? "Linear" : "Non-linear";
5588         const char *r1_type_str = range1->image ? "image" : "buffer";
5589         const char *r2_linear_str = range2->linear ? "linear" : "non-linear";
5590         const char *r2_type_str = range2->image ? "image" : "buffer";
5591         auto obj_type = range1->image ? VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT : VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT;
5592         *skip_call |=
5593             log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, obj_type, range1->handle, 0, MEMTRACK_INVALID_ALIASING,
5594                     "MEM", "%s %s 0x%" PRIx64 " is aliased with %s %s 0x%" PRIx64
5595                            " which is in violation of the Buffer-Image Granularity section of the Vulkan specification.",
5596                     r1_linear_str, r1_type_str, range1->handle, r2_linear_str, r2_type_str, range2->handle);
5597     }
5598     // Ranges intersect
5599     return true;
5600 }
5601 // Simplified rangesIntersect that calls above function to check range1 for intersection with offset & end addresses
5602 static bool rangesIntersect(layer_data const *dev_data, MEMORY_RANGE const *range1, VkDeviceSize offset, VkDeviceSize end) {
5603     // Create a local MEMORY_RANGE struct to wrap offset/size
5604     MEMORY_RANGE range_wrap;
5605     // Synch linear with range1 to avoid padding and potential validation error case
5606     range_wrap.linear = range1->linear;
5607     range_wrap.start = offset;
5608     range_wrap.end = end;
5609     bool tmp_bool;
5610     return rangesIntersect(dev_data, range1, &range_wrap, &tmp_bool);
5611 }
5612 // For given mem_info, set all ranges valid that intersect [offset-end] range
5613 // TODO : For ranges where there is no alias, we may want to create new buffer ranges that are valid
5614 static void SetMemRangesValid(layer_data const *dev_data, DEVICE_MEM_INFO *mem_info, VkDeviceSize offset, VkDeviceSize end) {
5615     bool tmp_bool = false;
5616     MEMORY_RANGE map_range;
5617     map_range.linear = true;
5618     map_range.start = offset;
5619     map_range.end = end;
5620     for (auto &handle_range_pair : mem_info->bound_ranges) {
5621         if (rangesIntersect(dev_data, &handle_range_pair.second, &map_range, &tmp_bool)) {
5622             // TODO : WARN here if tmp_bool true?
5623             handle_range_pair.second.valid = true;
5624         }
5625     }
5626 }
5627 // Object with given handle is being bound to memory w/ given mem_info struct.
5628 //  Track the newly bound memory range with given memoryOffset
5629 //  Also scan any previous ranges, track aliased ranges with new range, and flag an error if a linear
5630 //  and non-linear range incorrectly overlap.
5631 // Return true if an error is flagged and the user callback returns "true", otherwise false
5632 // is_image indicates an image object, otherwise handle is for a buffer
5633 // is_linear indicates a buffer or linear image
5634 static bool InsertMemoryRange(layer_data const *dev_data, uint64_t handle, DEVICE_MEM_INFO *mem_info, VkDeviceSize memoryOffset,
5635                               VkMemoryRequirements memRequirements, bool is_image, bool is_linear) {
5636     bool skip_call = false;
5637     MEMORY_RANGE range;
5638
5639     range.image = is_image;
5640     range.handle = handle;
5641     range.linear = is_linear;
5642     range.valid = mem_info->global_valid;
5643     range.memory = mem_info->mem;
5644     range.start = memoryOffset;
5645     range.size = memRequirements.size;
5646     range.end = memoryOffset + memRequirements.size - 1;
5647     range.aliases.clear();
5648     // Update Memory aliasing
5649     // Save aliase ranges so we can copy into final map entry below. Can't do it in loop b/c we don't yet have final ptr. If we
5650     // inserted into map before loop to get the final ptr, then we may enter loop when not needed & we check range against itself
5651     std::unordered_set<MEMORY_RANGE *> tmp_alias_ranges;
5652     for (auto &obj_range_pair : mem_info->bound_ranges) {
5653         auto check_range = &obj_range_pair.second;
5654         bool intersection_error = false;
5655         if (rangesIntersect(dev_data, &range, check_range, &intersection_error)) {
5656             skip_call |= intersection_error;
5657             range.aliases.insert(check_range);
5658             tmp_alias_ranges.insert(check_range);
5659         }
5660     }
5661     mem_info->bound_ranges[handle] = std::move(range);
5662     for (auto tmp_range : tmp_alias_ranges) {
5663         tmp_range->aliases.insert(&mem_info->bound_ranges[handle]);
5664     }
5665     if (is_image)
5666         mem_info->bound_images.insert(handle);
5667     else
5668         mem_info->bound_buffers.insert(handle);
5669
5670     return skip_call;
5671 }
5672
5673 static bool InsertImageMemoryRange(layer_data const *dev_data, VkImage image, DEVICE_MEM_INFO *mem_info, VkDeviceSize mem_offset,
5674                                    VkMemoryRequirements mem_reqs, bool is_linear) {
5675     return InsertMemoryRange(dev_data, reinterpret_cast<uint64_t &>(image), mem_info, mem_offset, mem_reqs, true, is_linear);
5676 }
5677
5678 static bool InsertBufferMemoryRange(layer_data const *dev_data, VkBuffer buffer, DEVICE_MEM_INFO *mem_info, VkDeviceSize mem_offset,
5679                                     VkMemoryRequirements mem_reqs) {
5680     return InsertMemoryRange(dev_data, reinterpret_cast<uint64_t &>(buffer), mem_info, mem_offset, mem_reqs, false, true);
5681 }
5682
5683 // Remove MEMORY_RANGE struct for give handle from bound_ranges of mem_info
5684 //  is_image indicates if handle is for image or buffer
5685 //  This function will also remove the handle-to-index mapping from the appropriate
5686 //  map and clean up any aliases for range being removed.
5687 static void RemoveMemoryRange(uint64_t handle, DEVICE_MEM_INFO *mem_info, bool is_image) {
5688     auto erase_range = &mem_info->bound_ranges[handle];
5689     for (auto alias_range : erase_range->aliases) {
5690         alias_range->aliases.erase(erase_range);
5691     }
5692     erase_range->aliases.clear();
5693     mem_info->bound_ranges.erase(handle);
5694     if (is_image) {
5695         mem_info->bound_images.erase(handle);
5696     } else {
5697         mem_info->bound_buffers.erase(handle);
5698     }
5699 }
5700
5701 static void RemoveBufferMemoryRange(uint64_t handle, DEVICE_MEM_INFO *mem_info) { RemoveMemoryRange(handle, mem_info, false); }
5702
5703 static void RemoveImageMemoryRange(uint64_t handle, DEVICE_MEM_INFO *mem_info) { RemoveMemoryRange(handle, mem_info, true); }
5704
5705 VKAPI_ATTR void VKAPI_CALL DestroyBuffer(VkDevice device, VkBuffer buffer,
5706                                          const VkAllocationCallbacks *pAllocator) {
5707     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5708     std::unique_lock<std::mutex> lock(global_lock);
5709     if (!validateIdleBuffer(dev_data, buffer)) {
5710         // Clean up memory binding and range information for buffer
5711         auto buff_node = getBufferNode(dev_data, buffer);
5712         if (buff_node) {
5713             // Any bound cmd buffers are now invalid
5714             invalidateCommandBuffers(buff_node->cb_bindings,
5715                                      {reinterpret_cast<uint64_t &>(buff_node->buffer), VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT});
5716             auto mem_info = getMemObjInfo(dev_data, buff_node->mem);
5717             if (mem_info) {
5718                 RemoveBufferMemoryRange(reinterpret_cast<uint64_t &>(buffer), mem_info);
5719             }
5720             clear_object_binding(dev_data, reinterpret_cast<uint64_t &>(buffer), VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT);
5721             dev_data->bufferMap.erase(buff_node->buffer);
5722         }
5723         lock.unlock();
5724         dev_data->dispatch_table.DestroyBuffer(device, buffer, pAllocator);
5725     }
5726 }
5727
5728 static bool PreCallValidateDestroyBufferView(layer_data *dev_data, VkBufferView buffer_view, BUFFER_VIEW_STATE **buffer_view_state,
5729                                              VK_OBJECT *obj_struct) {
5730     if (dev_data->instance_state->disabled.destroy_buffer_view)
5731         return false;
5732     bool skip = false;
5733     *buffer_view_state = getBufferViewState(dev_data, buffer_view);
5734     if (buffer_view_state) {
5735         *obj_struct = {reinterpret_cast<uint64_t &>(buffer_view), VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_VIEW_EXT};
5736         skip |= ValidateObjectNotInUse(dev_data, *buffer_view_state, *obj_struct);
5737     }
5738     return skip;
5739 }
5740
5741 static void PostCallRecordDestroyBufferView(layer_data *dev_data, VkBufferView buffer_view, BUFFER_VIEW_STATE *buffer_view_state,
5742                                             VK_OBJECT obj_struct) {
5743     dev_data->bufferViewMap.erase(buffer_view);
5744     // Any bound cmd buffers are now invalid
5745     invalidateCommandBuffers(buffer_view_state->cb_bindings, obj_struct);
5746 }
5747
5748 VKAPI_ATTR void VKAPI_CALL
5749 DestroyBufferView(VkDevice device, VkBufferView bufferView, const VkAllocationCallbacks *pAllocator) {
5750     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5751     std::unique_lock<std::mutex> lock(global_lock);
5752     // Common data objects use pre & post call
5753     BUFFER_VIEW_STATE *buffer_view_state = nullptr;
5754     VK_OBJECT obj_struct;
5755     // Validate state before calling down chain, update common data if we'll be calling down chain
5756     bool skip = PreCallValidateDestroyBufferView(dev_data, bufferView, &buffer_view_state, &obj_struct);
5757     if (!skip) {
5758         lock.unlock();
5759         dev_data->dispatch_table.DestroyBufferView(device, bufferView, pAllocator);
5760         lock.lock();
5761         // We made call so update state
5762         PostCallRecordDestroyBufferView(dev_data, bufferView, buffer_view_state, obj_struct);
5763     }
5764 }
5765
5766 VKAPI_ATTR void VKAPI_CALL DestroyImage(VkDevice device, VkImage image, const VkAllocationCallbacks *pAllocator) {
5767     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5768     bool skip = false;
5769     std::unique_lock<std::mutex> lock(global_lock);
5770     auto img_node = getImageNode(dev_data, image);
5771     if (img_node) {
5772         VK_OBJECT obj_struct = {reinterpret_cast<uint64_t &>(img_node->image), VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT};
5773         // Any bound cmd buffers are now invalid
5774         invalidateCommandBuffers(img_node->cb_bindings, obj_struct);
5775         skip |= ValidateObjectNotInUse(dev_data, img_node, obj_struct);
5776     }
5777     if (!skip) {
5778         // Clean up memory mapping, bindings and range references for image
5779         auto mem_info = getMemObjInfo(dev_data, img_node->mem);
5780         if (mem_info) {
5781             RemoveImageMemoryRange(reinterpret_cast<uint64_t &>(image), mem_info);
5782             clear_object_binding(dev_data, reinterpret_cast<uint64_t &>(image), VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT);
5783         }
5784         // Remove image from imageMap
5785         dev_data->imageMap.erase(img_node->image);
5786
5787         const auto &subEntry = dev_data->imageSubresourceMap.find(image);
5788         if (subEntry != dev_data->imageSubresourceMap.end()) {
5789             for (const auto &pair : subEntry->second) {
5790                 dev_data->imageLayoutMap.erase(pair);
5791             }
5792             dev_data->imageSubresourceMap.erase(subEntry);
5793         }
5794         lock.unlock();
5795         dev_data->dispatch_table.DestroyImage(device, image, pAllocator);
5796     }
5797 }
5798
5799 static bool ValidateMemoryTypes(const layer_data *dev_data, const DEVICE_MEM_INFO *mem_info, const uint32_t memory_type_bits,
5800                                   const char *funcName) {
5801     bool skip_call = false;
5802     if (((1 << mem_info->alloc_info.memoryTypeIndex) & memory_type_bits) == 0) {
5803         skip_call = log_msg(
5804             dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
5805             reinterpret_cast<const uint64_t &>(mem_info->mem), __LINE__, MEMTRACK_INVALID_MEM_TYPE, "MT",
5806             "%s(): MemoryRequirements->memoryTypeBits (0x%X) for this object type are not compatible with the memory "
5807             "type (0x%X) of this memory object 0x%" PRIx64 ".",
5808             funcName, memory_type_bits, mem_info->alloc_info.memoryTypeIndex, reinterpret_cast<const uint64_t &>(mem_info->mem));
5809     }
5810     return skip_call;
5811 }
5812
5813 VKAPI_ATTR VkResult VKAPI_CALL
5814 BindBufferMemory(VkDevice device, VkBuffer buffer, VkDeviceMemory mem, VkDeviceSize memoryOffset) {
5815     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5816     VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
5817     std::unique_lock<std::mutex> lock(global_lock);
5818     // Track objects tied to memory
5819     uint64_t buffer_handle = reinterpret_cast<uint64_t &>(buffer);
5820     bool skip_call = SetMemBinding(dev_data, mem, buffer_handle, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, "vkBindBufferMemory");
5821     auto buffer_node = getBufferNode(dev_data, buffer);
5822     if (buffer_node) {
5823         VkMemoryRequirements memRequirements;
5824         dev_data->dispatch_table.GetBufferMemoryRequirements(device, buffer, &memRequirements);
5825         buffer_node->mem = mem;
5826         buffer_node->memOffset = memoryOffset;
5827         buffer_node->memSize = memRequirements.size;
5828
5829         // Track and validate bound memory range information
5830         auto mem_info = getMemObjInfo(dev_data, mem);
5831         if (mem_info) {
5832             skip_call |= InsertBufferMemoryRange(dev_data, buffer, mem_info, memoryOffset, memRequirements);
5833             skip_call |= ValidateMemoryTypes(dev_data, mem_info, memRequirements.memoryTypeBits, "BindBufferMemory");
5834         }
5835
5836         // Validate memory requirements alignment
5837         if (vk_safe_modulo(memoryOffset, memRequirements.alignment) != 0) {
5838             skip_call |=
5839                 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0,
5840                         __LINE__, DRAWSTATE_INVALID_BUFFER_MEMORY_OFFSET, "DS",
5841                         "vkBindBufferMemory(): memoryOffset is 0x%" PRIxLEAST64 " but must be an integer multiple of the "
5842                         "VkMemoryRequirements::alignment value 0x%" PRIxLEAST64
5843                         ", returned from a call to vkGetBufferMemoryRequirements with buffer",
5844                         memoryOffset, memRequirements.alignment);
5845         }
5846
5847         // Validate device limits alignments
5848         static const VkBufferUsageFlagBits usage_list[3] = {
5849             static_cast<VkBufferUsageFlagBits>(VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT),
5850             VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT,
5851             VK_BUFFER_USAGE_STORAGE_BUFFER_BIT};
5852         static const char *memory_type[3] = {"texel",
5853                                              "uniform",
5854                                              "storage"};
5855         static const char *offset_name[3] = {
5856             "minTexelBufferOffsetAlignment",
5857             "minUniformBufferOffsetAlignment",
5858             "minStorageBufferOffsetAlignment"
5859         };
5860
5861         // Keep this one fresh!
5862         const VkDeviceSize offset_requirement[3] = {
5863             dev_data->phys_dev_properties.properties.limits.minTexelBufferOffsetAlignment,
5864             dev_data->phys_dev_properties.properties.limits.minUniformBufferOffsetAlignment,
5865             dev_data->phys_dev_properties.properties.limits.minStorageBufferOffsetAlignment
5866         };
5867         VkBufferUsageFlags usage = dev_data->bufferMap[buffer].get()->createInfo.usage;
5868
5869         for (int i = 0; i < 3; i++) {
5870             if (usage & usage_list[i]) {
5871                 if (vk_safe_modulo(memoryOffset, offset_requirement[i]) != 0) {
5872                     skip_call |=
5873                         log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
5874                                 0, __LINE__, DRAWSTATE_INVALID_TEXEL_BUFFER_OFFSET, "DS",
5875                                 "vkBindBufferMemory(): %s memoryOffset is 0x%" PRIxLEAST64 " but must be a multiple of "
5876                                 "device limit %s 0x%" PRIxLEAST64,
5877                                 memory_type[i], memoryOffset, offset_name[i], offset_requirement[i]);
5878                 }
5879             }
5880         }
5881     }
5882     print_mem_list(dev_data);
5883     lock.unlock();
5884     if (!skip_call) {
5885         result = dev_data->dispatch_table.BindBufferMemory(device, buffer, mem, memoryOffset);
5886     }
5887     return result;
5888 }
5889
5890 VKAPI_ATTR void VKAPI_CALL
5891 GetBufferMemoryRequirements(VkDevice device, VkBuffer buffer, VkMemoryRequirements *pMemoryRequirements) {
5892     layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5893     // TODO : What to track here?
5894     //   Could potentially save returned mem requirements and validate values passed into BindBufferMemory
5895     my_data->dispatch_table.GetBufferMemoryRequirements(device, buffer, pMemoryRequirements);
5896 }
5897
5898 VKAPI_ATTR void VKAPI_CALL
5899 GetImageMemoryRequirements(VkDevice device, VkImage image, VkMemoryRequirements *pMemoryRequirements) {
5900     layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5901     // TODO : What to track here?
5902     //   Could potentially save returned mem requirements and validate values passed into BindImageMemory
5903     my_data->dispatch_table.GetImageMemoryRequirements(device, image, pMemoryRequirements);
5904 }
5905
5906 VKAPI_ATTR void VKAPI_CALL
5907 DestroyImageView(VkDevice device, VkImageView imageView, const VkAllocationCallbacks *pAllocator) {
5908     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5909     bool skip = false;
5910     std::unique_lock<std::mutex> lock(global_lock);
5911     auto view_state = getImageViewState(dev_data, imageView);
5912     if (view_state) {
5913         VK_OBJECT obj_struct = {reinterpret_cast<uint64_t &>(imageView), VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT};
5914         skip |= ValidateObjectNotInUse(dev_data, view_state, obj_struct);
5915         // Any bound cmd buffers are now invalid
5916         invalidateCommandBuffers(view_state->cb_bindings, obj_struct);
5917     }
5918     if (!skip) {
5919         dev_data->imageViewMap.erase(imageView);
5920         lock.unlock();
5921         dev_data->dispatch_table.DestroyImageView(device, imageView, pAllocator);
5922     }
5923 }
5924
5925 VKAPI_ATTR void VKAPI_CALL
5926 DestroyShaderModule(VkDevice device, VkShaderModule shaderModule, const VkAllocationCallbacks *pAllocator) {
5927     layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5928
5929     std::unique_lock<std::mutex> lock(global_lock);
5930     my_data->shaderModuleMap.erase(shaderModule);
5931     lock.unlock();
5932
5933     my_data->dispatch_table.DestroyShaderModule(device, shaderModule, pAllocator);
5934 }
5935
5936 VKAPI_ATTR void VKAPI_CALL
5937 DestroyPipeline(VkDevice device, VkPipeline pipeline, const VkAllocationCallbacks *pAllocator) {
5938     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5939     bool skip = false;
5940     std::unique_lock<std::mutex> lock(global_lock);
5941     auto pipe_node = getPipeline(dev_data, pipeline);
5942     if (pipe_node) {
5943         VK_OBJECT obj_struct = {reinterpret_cast<uint64_t &>(pipeline), VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT};
5944         skip |= ValidateObjectNotInUse(dev_data, pipe_node, obj_struct);
5945         // Any bound cmd buffers are now invalid
5946         invalidateCommandBuffers(pipe_node->cb_bindings, obj_struct);
5947     }
5948     if (!skip) {
5949         dev_data->pipelineMap.erase(pipeline);
5950         lock.unlock();
5951         dev_data->dispatch_table.DestroyPipeline(device, pipeline, pAllocator);
5952     }
5953 }
5954
5955 VKAPI_ATTR void VKAPI_CALL
5956 DestroyPipelineLayout(VkDevice device, VkPipelineLayout pipelineLayout, const VkAllocationCallbacks *pAllocator) {
5957     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5958     std::unique_lock<std::mutex> lock(global_lock);
5959     dev_data->pipelineLayoutMap.erase(pipelineLayout);
5960     lock.unlock();
5961
5962     dev_data->dispatch_table.DestroyPipelineLayout(device, pipelineLayout, pAllocator);
5963 }
5964
5965 VKAPI_ATTR void VKAPI_CALL
5966 DestroySampler(VkDevice device, VkSampler sampler, const VkAllocationCallbacks *pAllocator) {
5967     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5968     bool skip = false;
5969     std::unique_lock<std::mutex> lock(global_lock);
5970     auto sampler_node = getSamplerNode(dev_data, sampler);
5971     if (sampler_node) {
5972         VK_OBJECT obj_struct = {reinterpret_cast<uint64_t &>(sampler), VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT};
5973         skip |= ValidateObjectNotInUse(dev_data, sampler_node, obj_struct);
5974         // Any bound cmd buffers are now invalid
5975         invalidateCommandBuffers(sampler_node->cb_bindings, obj_struct);
5976     }
5977     if (!skip) {
5978         dev_data->samplerMap.erase(sampler);
5979         lock.unlock();
5980         dev_data->dispatch_table.DestroySampler(device, sampler, pAllocator);
5981     }
5982 }
5983
5984 VKAPI_ATTR void VKAPI_CALL
5985 DestroyDescriptorSetLayout(VkDevice device, VkDescriptorSetLayout descriptorSetLayout, const VkAllocationCallbacks *pAllocator) {
5986     // TODO : Clean up any internal data structures using this obj.
5987     get_my_data_ptr(get_dispatch_key(device), layer_data_map)
5988         ->dispatch_table.DestroyDescriptorSetLayout(device, descriptorSetLayout, pAllocator);
5989 }
5990
5991 VKAPI_ATTR void VKAPI_CALL
5992 DestroyDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool, const VkAllocationCallbacks *pAllocator) {
5993     // TODO : Clean up any internal data structures using this obj.
5994     get_my_data_ptr(get_dispatch_key(device), layer_data_map)
5995         ->dispatch_table.DestroyDescriptorPool(device, descriptorPool, pAllocator);
5996 }
5997 // Verify cmdBuffer in given cb_node is not in global in-flight set, and return skip_call result
5998 //  If this is a secondary command buffer, then make sure its primary is also in-flight
5999 //  If primary is not in-flight, then remove secondary from global in-flight set
6000 // This function is only valid at a point when cmdBuffer is being reset or freed
6001 static bool checkCommandBufferInFlight(layer_data *dev_data, const GLOBAL_CB_NODE *cb_node, const char *action) {
6002     bool skip_call = false;
6003     if (dev_data->globalInFlightCmdBuffers.count(cb_node->commandBuffer)) {
6004         // Primary CB or secondary where primary is also in-flight is an error
6005         if ((cb_node->createInfo.level != VK_COMMAND_BUFFER_LEVEL_SECONDARY) ||
6006             (dev_data->globalInFlightCmdBuffers.count(cb_node->primaryCommandBuffer))) {
6007             skip_call |= log_msg(
6008                 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6009                 reinterpret_cast<const uint64_t &>(cb_node->commandBuffer), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER_RESET, "DS",
6010                 "Attempt to %s command buffer (0x%" PRIxLEAST64 ") which is in use.", action,
6011                 reinterpret_cast<const uint64_t &>(cb_node->commandBuffer));
6012         }
6013     }
6014     return skip_call;
6015 }
6016
6017 // Iterate over all cmdBuffers in given commandPool and verify that each is not in use
6018 static bool checkCommandBuffersInFlight(layer_data *dev_data, COMMAND_POOL_NODE *pPool, const char *action) {
6019     bool skip_call = false;
6020     for (auto cmd_buffer : pPool->commandBuffers) {
6021         if (dev_data->globalInFlightCmdBuffers.count(cmd_buffer)) {
6022             skip_call |= checkCommandBufferInFlight(dev_data, getCBNode(dev_data, cmd_buffer), action);
6023         }
6024     }
6025     return skip_call;
6026 }
6027
6028 static void clearCommandBuffersInFlight(layer_data *dev_data, COMMAND_POOL_NODE *pPool) {
6029     for (auto cmd_buffer : pPool->commandBuffers) {
6030         dev_data->globalInFlightCmdBuffers.erase(cmd_buffer);
6031     }
6032 }
6033
6034 VKAPI_ATTR void VKAPI_CALL
6035 FreeCommandBuffers(VkDevice device, VkCommandPool commandPool, uint32_t commandBufferCount, const VkCommandBuffer *pCommandBuffers) {
6036     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6037     bool skip_call = false;
6038     std::unique_lock<std::mutex> lock(global_lock);
6039
6040     for (uint32_t i = 0; i < commandBufferCount; i++) {
6041         auto cb_node = getCBNode(dev_data, pCommandBuffers[i]);
6042         // Delete CB information structure, and remove from commandBufferMap
6043         if (cb_node) {
6044             skip_call |= checkCommandBufferInFlight(dev_data, cb_node, "free");
6045         }
6046     }
6047
6048     if (skip_call)
6049         return;
6050
6051     auto pPool = getCommandPoolNode(dev_data, commandPool);
6052     for (uint32_t i = 0; i < commandBufferCount; i++) {
6053         auto cb_node = getCBNode(dev_data, pCommandBuffers[i]);
6054         // Delete CB information structure, and remove from commandBufferMap
6055         if (cb_node) {
6056             dev_data->globalInFlightCmdBuffers.erase(cb_node->commandBuffer);
6057             // reset prior to delete for data clean-up
6058             resetCB(dev_data, cb_node->commandBuffer);
6059             dev_data->commandBufferMap.erase(cb_node->commandBuffer);
6060             delete cb_node;
6061         }
6062
6063         // Remove commandBuffer reference from commandPoolMap
6064         pPool->commandBuffers.remove(pCommandBuffers[i]);
6065     }
6066     printCBList(dev_data);
6067     lock.unlock();
6068
6069     dev_data->dispatch_table.FreeCommandBuffers(device, commandPool, commandBufferCount, pCommandBuffers);
6070 }
6071
6072 VKAPI_ATTR VkResult VKAPI_CALL CreateCommandPool(VkDevice device, const VkCommandPoolCreateInfo *pCreateInfo,
6073                                                  const VkAllocationCallbacks *pAllocator,
6074                                                  VkCommandPool *pCommandPool) {
6075     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6076
6077     VkResult result = dev_data->dispatch_table.CreateCommandPool(device, pCreateInfo, pAllocator, pCommandPool);
6078
6079     if (VK_SUCCESS == result) {
6080         std::lock_guard<std::mutex> lock(global_lock);
6081         dev_data->commandPoolMap[*pCommandPool].createFlags = pCreateInfo->flags;
6082         dev_data->commandPoolMap[*pCommandPool].queueFamilyIndex = pCreateInfo->queueFamilyIndex;
6083     }
6084     return result;
6085 }
6086
6087 VKAPI_ATTR VkResult VKAPI_CALL CreateQueryPool(VkDevice device, const VkQueryPoolCreateInfo *pCreateInfo,
6088                                                const VkAllocationCallbacks *pAllocator, VkQueryPool *pQueryPool) {
6089
6090     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6091     VkResult result = dev_data->dispatch_table.CreateQueryPool(device, pCreateInfo, pAllocator, pQueryPool);
6092     if (result == VK_SUCCESS) {
6093         std::lock_guard<std::mutex> lock(global_lock);
6094         QUERY_POOL_NODE *qp_node = &dev_data->queryPoolMap[*pQueryPool];
6095         qp_node->createInfo = *pCreateInfo;
6096     }
6097     return result;
6098 }
6099
6100 // Destroy commandPool along with all of the commandBuffers allocated from that pool
6101 VKAPI_ATTR void VKAPI_CALL
6102 DestroyCommandPool(VkDevice device, VkCommandPool commandPool, const VkAllocationCallbacks *pAllocator) {
6103     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6104     bool skip_call = false;
6105     std::unique_lock<std::mutex> lock(global_lock);
6106     // Verify that command buffers in pool are complete (not in-flight)
6107     auto pPool = getCommandPoolNode(dev_data, commandPool);
6108     skip_call |= checkCommandBuffersInFlight(dev_data, pPool, "destroy command pool with");
6109
6110     if (skip_call)
6111         return;
6112     // Must remove cmdpool from cmdpoolmap, after removing all cmdbuffers in its list from the commandBufferMap
6113     clearCommandBuffersInFlight(dev_data, pPool);
6114     for (auto cb : pPool->commandBuffers) {
6115         clear_cmd_buf_and_mem_references(dev_data, cb);
6116         auto cb_node = getCBNode(dev_data, cb);
6117         // Remove references to this cb_node prior to delete
6118         // TODO : Need better solution here, resetCB?
6119         for (auto obj : cb_node->object_bindings) {
6120             removeCommandBufferBinding(dev_data, &obj, cb_node);
6121         }
6122         for (auto framebuffer : cb_node->framebuffers) {
6123             auto fb_node = getFramebuffer(dev_data, framebuffer);
6124             if (fb_node)
6125                 fb_node->cb_bindings.erase(cb_node);
6126         }
6127         dev_data->commandBufferMap.erase(cb); // Remove this command buffer
6128         delete cb_node;                       // delete CB info structure
6129     }
6130     dev_data->commandPoolMap.erase(commandPool);
6131     lock.unlock();
6132
6133     dev_data->dispatch_table.DestroyCommandPool(device, commandPool, pAllocator);
6134 }
6135
6136 VKAPI_ATTR VkResult VKAPI_CALL
6137 ResetCommandPool(VkDevice device, VkCommandPool commandPool, VkCommandPoolResetFlags flags) {
6138     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6139     bool skip_call = false;
6140
6141     std::unique_lock<std::mutex> lock(global_lock);
6142     auto pPool = getCommandPoolNode(dev_data, commandPool);
6143     skip_call |= checkCommandBuffersInFlight(dev_data, pPool, "reset command pool with");
6144     lock.unlock();
6145
6146     if (skip_call)
6147         return VK_ERROR_VALIDATION_FAILED_EXT;
6148
6149     VkResult result = dev_data->dispatch_table.ResetCommandPool(device, commandPool, flags);
6150
6151     // Reset all of the CBs allocated from this pool
6152     if (VK_SUCCESS == result) {
6153         lock.lock();
6154         clearCommandBuffersInFlight(dev_data, pPool);
6155         for (auto cmdBuffer : pPool->commandBuffers) {
6156             resetCB(dev_data, cmdBuffer);
6157         }
6158         lock.unlock();
6159     }
6160     return result;
6161 }
6162
6163 VKAPI_ATTR VkResult VKAPI_CALL ResetFences(VkDevice device, uint32_t fenceCount, const VkFence *pFences) {
6164     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6165     bool skip_call = false;
6166     std::unique_lock<std::mutex> lock(global_lock);
6167     for (uint32_t i = 0; i < fenceCount; ++i) {
6168         auto pFence = getFenceNode(dev_data, pFences[i]);
6169         if (pFence && pFence->state == FENCE_INFLIGHT) {
6170             skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
6171                                  reinterpret_cast<const uint64_t &>(pFences[i]), __LINE__, DRAWSTATE_INVALID_FENCE, "DS",
6172                                  "Fence 0x%" PRIx64 " is in use.", reinterpret_cast<const uint64_t &>(pFences[i]));
6173         }
6174     }
6175     lock.unlock();
6176
6177     if (skip_call)
6178         return VK_ERROR_VALIDATION_FAILED_EXT;
6179
6180     VkResult result = dev_data->dispatch_table.ResetFences(device, fenceCount, pFences);
6181
6182     if (result == VK_SUCCESS) {
6183         lock.lock();
6184         for (uint32_t i = 0; i < fenceCount; ++i) {
6185             auto pFence = getFenceNode(dev_data, pFences[i]);
6186             if (pFence) {
6187                 pFence->state = FENCE_UNSIGNALED;
6188             }
6189         }
6190         lock.unlock();
6191     }
6192
6193     return result;
6194 }
6195
6196 // For given cb_nodes, invalidate them and track object causing invalidation
6197 void invalidateCommandBuffers(std::unordered_set<GLOBAL_CB_NODE *> cb_nodes, VK_OBJECT obj) {
6198     for (auto cb_node : cb_nodes) {
6199         cb_node->state = CB_INVALID;
6200         cb_node->broken_bindings.push_back(obj);
6201     }
6202 }
6203
6204 VKAPI_ATTR void VKAPI_CALL
6205 DestroyFramebuffer(VkDevice device, VkFramebuffer framebuffer, const VkAllocationCallbacks *pAllocator) {
6206     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6207     std::unique_lock<std::mutex> lock(global_lock);
6208     auto fb_node = getFramebuffer(dev_data, framebuffer);
6209     if (fb_node) {
6210         invalidateCommandBuffers(fb_node->cb_bindings,
6211                                  {reinterpret_cast<uint64_t &>(fb_node->framebuffer), VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT});
6212         dev_data->frameBufferMap.erase(fb_node->framebuffer);
6213     }
6214     lock.unlock();
6215     dev_data->dispatch_table.DestroyFramebuffer(device, framebuffer, pAllocator);
6216 }
6217
6218 VKAPI_ATTR void VKAPI_CALL
6219 DestroyRenderPass(VkDevice device, VkRenderPass renderPass, const VkAllocationCallbacks *pAllocator) {
6220     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6221     bool skip = false;
6222     std::unique_lock<std::mutex> lock(global_lock);
6223     auto rp_state = getRenderPass(dev_data, renderPass);
6224     if (rp_state) {
6225         VK_OBJECT obj_struct = {reinterpret_cast<uint64_t &>(renderPass), VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT};
6226         skip |= ValidateObjectNotInUse(dev_data, rp_state, obj_struct);
6227         // Any bound cmd buffers are now invalid
6228         invalidateCommandBuffers(rp_state->cb_bindings, obj_struct);
6229     }
6230     if (!skip) {
6231         dev_data->renderPassMap.erase(renderPass);
6232         lock.unlock();
6233         dev_data->dispatch_table.DestroyRenderPass(device, renderPass, pAllocator);
6234     }
6235 }
6236
6237 VKAPI_ATTR VkResult VKAPI_CALL CreateBuffer(VkDevice device, const VkBufferCreateInfo *pCreateInfo,
6238                                             const VkAllocationCallbacks *pAllocator, VkBuffer *pBuffer) {
6239     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6240
6241     VkResult result = dev_data->dispatch_table.CreateBuffer(device, pCreateInfo, pAllocator, pBuffer);
6242
6243     if (VK_SUCCESS == result) {
6244         std::lock_guard<std::mutex> lock(global_lock);
6245         // TODO : This doesn't create deep copy of pQueueFamilyIndices so need to fix that if/when we want that data to be valid
6246         dev_data->bufferMap.insert(std::make_pair(*pBuffer, unique_ptr<BUFFER_NODE>(new BUFFER_NODE(*pBuffer, pCreateInfo))));
6247     }
6248     return result;
6249 }
6250
6251 static bool PreCallValidateCreateBufferView(layer_data *dev_data, const VkBufferViewCreateInfo *pCreateInfo) {
6252     bool skip_call = false;
6253     BUFFER_NODE *buf_node = getBufferNode(dev_data, pCreateInfo->buffer);
6254     // If this isn't a sparse buffer, it needs to have memory backing it at CreateBufferView time
6255     if (buf_node) {
6256         skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, buf_node, "vkCreateBufferView()");
6257         // In order to create a valid buffer view, the buffer must have been created with at least one of the
6258         // following flags:  UNIFORM_TEXEL_BUFFER_BIT or STORAGE_TEXEL_BUFFER_BIT
6259         skip_call |= ValidateBufferUsageFlags(dev_data, buf_node,
6260                                               VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT,
6261                                               false, "vkCreateBufferView()", "VK_BUFFER_USAGE_[STORAGE|UNIFORM]_TEXEL_BUFFER_BIT");
6262     }
6263     return skip_call;
6264 }
6265
6266 VKAPI_ATTR VkResult VKAPI_CALL CreateBufferView(VkDevice device, const VkBufferViewCreateInfo *pCreateInfo,
6267                                                 const VkAllocationCallbacks *pAllocator, VkBufferView *pView) {
6268     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6269     std::unique_lock<std::mutex> lock(global_lock);
6270     bool skip_call = PreCallValidateCreateBufferView(dev_data, pCreateInfo);
6271     lock.unlock();
6272     if (skip_call)
6273         return VK_ERROR_VALIDATION_FAILED_EXT;
6274     VkResult result = dev_data->dispatch_table.CreateBufferView(device, pCreateInfo, pAllocator, pView);
6275     if (VK_SUCCESS == result) {
6276         lock.lock();
6277         dev_data->bufferViewMap[*pView] = unique_ptr<BUFFER_VIEW_STATE>(new BUFFER_VIEW_STATE(*pView, pCreateInfo));
6278         lock.unlock();
6279     }
6280     return result;
6281 }
6282
6283 VKAPI_ATTR VkResult VKAPI_CALL CreateImage(VkDevice device, const VkImageCreateInfo *pCreateInfo,
6284                                            const VkAllocationCallbacks *pAllocator, VkImage *pImage) {
6285     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6286
6287     VkResult result = dev_data->dispatch_table.CreateImage(device, pCreateInfo, pAllocator, pImage);
6288
6289     if (VK_SUCCESS == result) {
6290         std::lock_guard<std::mutex> lock(global_lock);
6291         IMAGE_LAYOUT_NODE image_node;
6292         image_node.layout = pCreateInfo->initialLayout;
6293         image_node.format = pCreateInfo->format;
6294         dev_data->imageMap.insert(std::make_pair(*pImage, unique_ptr<IMAGE_NODE>(new IMAGE_NODE(*pImage, pCreateInfo))));
6295         ImageSubresourcePair subpair = {*pImage, false, VkImageSubresource()};
6296         dev_data->imageSubresourceMap[*pImage].push_back(subpair);
6297         dev_data->imageLayoutMap[subpair] = image_node;
6298     }
6299     return result;
6300 }
6301
6302 static void ResolveRemainingLevelsLayers(layer_data *dev_data, VkImageSubresourceRange *range, VkImage image) {
6303     /* expects global_lock to be held by caller */
6304
6305     auto image_node = getImageNode(dev_data, image);
6306     if (image_node) {
6307         /* If the caller used the special values VK_REMAINING_MIP_LEVELS and
6308          * VK_REMAINING_ARRAY_LAYERS, resolve them now in our internal state to
6309          * the actual values.
6310          */
6311         if (range->levelCount == VK_REMAINING_MIP_LEVELS) {
6312             range->levelCount = image_node->createInfo.mipLevels - range->baseMipLevel;
6313         }
6314
6315         if (range->layerCount == VK_REMAINING_ARRAY_LAYERS) {
6316             range->layerCount = image_node->createInfo.arrayLayers - range->baseArrayLayer;
6317         }
6318     }
6319 }
6320
6321 // Return the correct layer/level counts if the caller used the special
6322 // values VK_REMAINING_MIP_LEVELS or VK_REMAINING_ARRAY_LAYERS.
6323 static void ResolveRemainingLevelsLayers(layer_data *dev_data, uint32_t *levels, uint32_t *layers, VkImageSubresourceRange range,
6324                                          VkImage image) {
6325     /* expects global_lock to be held by caller */
6326
6327     *levels = range.levelCount;
6328     *layers = range.layerCount;
6329     auto image_node = getImageNode(dev_data, image);
6330     if (image_node) {
6331         if (range.levelCount == VK_REMAINING_MIP_LEVELS) {
6332             *levels = image_node->createInfo.mipLevels - range.baseMipLevel;
6333         }
6334         if (range.layerCount == VK_REMAINING_ARRAY_LAYERS) {
6335             *layers = image_node->createInfo.arrayLayers - range.baseArrayLayer;
6336         }
6337     }
6338 }
6339
6340 static bool PreCallValidateCreateImageView(layer_data *dev_data, const VkImageViewCreateInfo *pCreateInfo) {
6341     bool skip_call = false;
6342     IMAGE_NODE *image_node = getImageNode(dev_data, pCreateInfo->image);
6343     if (image_node) {
6344         skip_call |= ValidateImageUsageFlags(
6345             dev_data, image_node, VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_STORAGE_BIT | VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT |
6346                                       VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT,
6347             false, "vkCreateImageView()",
6348             "VK_IMAGE_USAGE_[SAMPLED|STORAGE|COLOR_ATTACHMENT|DEPTH_STENCIL_ATTACHMENT|INPUT_ATTACHMENT]_BIT");
6349         // If this isn't a sparse image, it needs to have memory backing it at CreateImageView time
6350         skip_call |= ValidateMemoryIsBoundToImage(dev_data, image_node, "vkCreateImageView()");
6351     }
6352     return skip_call;
6353 }
6354
6355 static inline void PostCallRecordCreateImageView(layer_data *dev_data, const VkImageViewCreateInfo *pCreateInfo, VkImageView view) {
6356     dev_data->imageViewMap[view] = unique_ptr<IMAGE_VIEW_STATE>(new IMAGE_VIEW_STATE(view, pCreateInfo));
6357     ResolveRemainingLevelsLayers(dev_data, &dev_data->imageViewMap[view].get()->create_info.subresourceRange, pCreateInfo->image);
6358 }
6359
6360 VKAPI_ATTR VkResult VKAPI_CALL CreateImageView(VkDevice device, const VkImageViewCreateInfo *pCreateInfo,
6361                                                const VkAllocationCallbacks *pAllocator, VkImageView *pView) {
6362     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6363     std::unique_lock<std::mutex> lock(global_lock);
6364     bool skip_call = PreCallValidateCreateImageView(dev_data, pCreateInfo);
6365     lock.unlock();
6366     if (skip_call)
6367         return VK_ERROR_VALIDATION_FAILED_EXT;
6368     VkResult result = dev_data->dispatch_table.CreateImageView(device, pCreateInfo, pAllocator, pView);
6369     if (VK_SUCCESS == result) {
6370         lock.lock();
6371         PostCallRecordCreateImageView(dev_data, pCreateInfo, *pView);
6372         lock.unlock();
6373     }
6374
6375     return result;
6376 }
6377
6378 VKAPI_ATTR VkResult VKAPI_CALL
6379 CreateFence(VkDevice device, const VkFenceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkFence *pFence) {
6380     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6381     VkResult result = dev_data->dispatch_table.CreateFence(device, pCreateInfo, pAllocator, pFence);
6382     if (VK_SUCCESS == result) {
6383         std::lock_guard<std::mutex> lock(global_lock);
6384         auto &fence_node = dev_data->fenceMap[*pFence];
6385         fence_node.fence = *pFence;
6386         fence_node.createInfo = *pCreateInfo;
6387         fence_node.state = (pCreateInfo->flags & VK_FENCE_CREATE_SIGNALED_BIT) ? FENCE_RETIRED : FENCE_UNSIGNALED;
6388     }
6389     return result;
6390 }
6391
6392 // TODO handle pipeline caches
6393 VKAPI_ATTR VkResult VKAPI_CALL CreatePipelineCache(VkDevice device, const VkPipelineCacheCreateInfo *pCreateInfo,
6394                                                    const VkAllocationCallbacks *pAllocator, VkPipelineCache *pPipelineCache) {
6395     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6396     VkResult result = dev_data->dispatch_table.CreatePipelineCache(device, pCreateInfo, pAllocator, pPipelineCache);
6397     return result;
6398 }
6399
6400 VKAPI_ATTR void VKAPI_CALL
6401 DestroyPipelineCache(VkDevice device, VkPipelineCache pipelineCache, const VkAllocationCallbacks *pAllocator) {
6402     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6403     dev_data->dispatch_table.DestroyPipelineCache(device, pipelineCache, pAllocator);
6404 }
6405
6406 VKAPI_ATTR VkResult VKAPI_CALL
6407 GetPipelineCacheData(VkDevice device, VkPipelineCache pipelineCache, size_t *pDataSize, void *pData) {
6408     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6409     VkResult result = dev_data->dispatch_table.GetPipelineCacheData(device, pipelineCache, pDataSize, pData);
6410     return result;
6411 }
6412
6413 VKAPI_ATTR VkResult VKAPI_CALL
6414 MergePipelineCaches(VkDevice device, VkPipelineCache dstCache, uint32_t srcCacheCount, const VkPipelineCache *pSrcCaches) {
6415     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6416     VkResult result = dev_data->dispatch_table.MergePipelineCaches(device, dstCache, srcCacheCount, pSrcCaches);
6417     return result;
6418 }
6419
6420 // utility function to set collective state for pipeline
6421 void set_pipeline_state(PIPELINE_NODE *pPipe) {
6422     // If any attachment used by this pipeline has blendEnable, set top-level blendEnable
6423     if (pPipe->graphicsPipelineCI.pColorBlendState) {
6424         for (size_t i = 0; i < pPipe->attachments.size(); ++i) {
6425             if (VK_TRUE == pPipe->attachments[i].blendEnable) {
6426                 if (((pPipe->attachments[i].dstAlphaBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
6427                      (pPipe->attachments[i].dstAlphaBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA)) ||
6428                     ((pPipe->attachments[i].dstColorBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
6429                      (pPipe->attachments[i].dstColorBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA)) ||
6430                     ((pPipe->attachments[i].srcAlphaBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
6431                      (pPipe->attachments[i].srcAlphaBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA)) ||
6432                     ((pPipe->attachments[i].srcColorBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
6433                      (pPipe->attachments[i].srcColorBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA))) {
6434                     pPipe->blendConstantsEnabled = true;
6435                 }
6436             }
6437         }
6438     }
6439 }
6440
6441 VKAPI_ATTR VkResult VKAPI_CALL
6442 CreateGraphicsPipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count,
6443                         const VkGraphicsPipelineCreateInfo *pCreateInfos, const VkAllocationCallbacks *pAllocator,
6444                         VkPipeline *pPipelines) {
6445     VkResult result = VK_SUCCESS;
6446     // TODO What to do with pipelineCache?
6447     // The order of operations here is a little convoluted but gets the job done
6448     //  1. Pipeline create state is first shadowed into PIPELINE_NODE struct
6449     //  2. Create state is then validated (which uses flags setup during shadowing)
6450     //  3. If everything looks good, we'll then create the pipeline and add NODE to pipelineMap
6451     bool skip_call = false;
6452     // TODO : Improve this data struct w/ unique_ptrs so cleanup below is automatic
6453     vector<PIPELINE_NODE *> pPipeNode(count);
6454     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6455
6456     uint32_t i = 0;
6457     std::unique_lock<std::mutex> lock(global_lock);
6458
6459     for (i = 0; i < count; i++) {
6460         pPipeNode[i] = new PIPELINE_NODE;
6461         pPipeNode[i]->initGraphicsPipeline(&pCreateInfos[i]);
6462         pPipeNode[i]->render_pass_ci.initialize(getRenderPass(dev_data, pCreateInfos[i].renderPass)->createInfo.ptr());
6463         pPipeNode[i]->pipeline_layout = *getPipelineLayout(dev_data, pCreateInfos[i].layout);
6464
6465         skip_call |= verifyPipelineCreateState(dev_data, device, pPipeNode, i);
6466     }
6467
6468     if (!skip_call) {
6469         lock.unlock();
6470         result =
6471             dev_data->dispatch_table.CreateGraphicsPipelines(device, pipelineCache, count, pCreateInfos, pAllocator, pPipelines);
6472         lock.lock();
6473         for (i = 0; i < count; i++) {
6474             pPipeNode[i]->pipeline = pPipelines[i];
6475             dev_data->pipelineMap[pPipeNode[i]->pipeline] = pPipeNode[i];
6476         }
6477         lock.unlock();
6478     } else {
6479         for (i = 0; i < count; i++) {
6480             delete pPipeNode[i];
6481         }
6482         lock.unlock();
6483         return VK_ERROR_VALIDATION_FAILED_EXT;
6484     }
6485     return result;
6486 }
6487
6488 VKAPI_ATTR VkResult VKAPI_CALL
6489 CreateComputePipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count,
6490                        const VkComputePipelineCreateInfo *pCreateInfos, const VkAllocationCallbacks *pAllocator,
6491                        VkPipeline *pPipelines) {
6492     VkResult result = VK_SUCCESS;
6493     bool skip_call = false;
6494
6495     // TODO : Improve this data struct w/ unique_ptrs so cleanup below is automatic
6496     vector<PIPELINE_NODE *> pPipeNode(count);
6497     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6498
6499     uint32_t i = 0;
6500     std::unique_lock<std::mutex> lock(global_lock);
6501     for (i = 0; i < count; i++) {
6502         // TODO: Verify compute stage bits
6503
6504         // Create and initialize internal tracking data structure
6505         pPipeNode[i] = new PIPELINE_NODE;
6506         pPipeNode[i]->initComputePipeline(&pCreateInfos[i]);
6507         pPipeNode[i]->pipeline_layout = *getPipelineLayout(dev_data, pCreateInfos[i].layout);
6508         // memcpy(&pPipeNode[i]->computePipelineCI, (const void *)&pCreateInfos[i], sizeof(VkComputePipelineCreateInfo));
6509
6510         // TODO: Add Compute Pipeline Verification
6511         skip_call |= !validate_compute_pipeline(dev_data->report_data, pPipeNode[i], &dev_data->enabled_features,
6512                                                 dev_data->shaderModuleMap);
6513         // skip_call |= verifyPipelineCreateState(dev_data, device, pPipeNode[i]);
6514     }
6515
6516     if (!skip_call) {
6517         lock.unlock();
6518         result =
6519             dev_data->dispatch_table.CreateComputePipelines(device, pipelineCache, count, pCreateInfos, pAllocator, pPipelines);
6520         lock.lock();
6521         for (i = 0; i < count; i++) {
6522             pPipeNode[i]->pipeline = pPipelines[i];
6523             dev_data->pipelineMap[pPipeNode[i]->pipeline] = pPipeNode[i];
6524         }
6525         lock.unlock();
6526     } else {
6527         for (i = 0; i < count; i++) {
6528             // Clean up any locally allocated data structures
6529             delete pPipeNode[i];
6530         }
6531         lock.unlock();
6532         return VK_ERROR_VALIDATION_FAILED_EXT;
6533     }
6534     return result;
6535 }
6536
6537 VKAPI_ATTR VkResult VKAPI_CALL CreateSampler(VkDevice device, const VkSamplerCreateInfo *pCreateInfo,
6538                                              const VkAllocationCallbacks *pAllocator, VkSampler *pSampler) {
6539     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6540     VkResult result = dev_data->dispatch_table.CreateSampler(device, pCreateInfo, pAllocator, pSampler);
6541     if (VK_SUCCESS == result) {
6542         std::lock_guard<std::mutex> lock(global_lock);
6543         dev_data->samplerMap[*pSampler] = unique_ptr<SAMPLER_NODE>(new SAMPLER_NODE(pSampler, pCreateInfo));
6544     }
6545     return result;
6546 }
6547
6548 VKAPI_ATTR VkResult VKAPI_CALL
6549 CreateDescriptorSetLayout(VkDevice device, const VkDescriptorSetLayoutCreateInfo *pCreateInfo,
6550                           const VkAllocationCallbacks *pAllocator, VkDescriptorSetLayout *pSetLayout) {
6551     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6552     VkResult result = dev_data->dispatch_table.CreateDescriptorSetLayout(device, pCreateInfo, pAllocator, pSetLayout);
6553     if (VK_SUCCESS == result) {
6554         // TODOSC : Capture layout bindings set
6555         std::lock_guard<std::mutex> lock(global_lock);
6556         dev_data->descriptorSetLayoutMap[*pSetLayout] =
6557             new cvdescriptorset::DescriptorSetLayout(dev_data->report_data, pCreateInfo, *pSetLayout);
6558     }
6559     return result;
6560 }
6561
6562 // Used by CreatePipelineLayout and CmdPushConstants.
6563 // Note that the index argument is optional and only used by CreatePipelineLayout.
6564 static bool validatePushConstantRange(const layer_data *dev_data, const uint32_t offset, const uint32_t size,
6565                                       const char *caller_name, uint32_t index = 0) {
6566     uint32_t const maxPushConstantsSize = dev_data->phys_dev_properties.properties.limits.maxPushConstantsSize;
6567     bool skip_call = false;
6568     // Check that offset + size don't exceed the max.
6569     // Prevent arithetic overflow here by avoiding addition and testing in this order.
6570     if ((offset >= maxPushConstantsSize) || (size > maxPushConstantsSize - offset)) {
6571         // This is a pain just to adapt the log message to the caller, but better to sort it out only when there is a problem.
6572         if (0 == strcmp(caller_name, "vkCreatePipelineLayout()")) {
6573             skip_call |=
6574                 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6575                         DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "%s call has push constants index %u with offset %u and size %u that "
6576                                                               "exceeds this device's maxPushConstantSize of %u.",
6577                         caller_name, index, offset, size, maxPushConstantsSize);
6578         } else if (0 == strcmp(caller_name, "vkCmdPushConstants()")) {
6579             skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6580                                  DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "%s call has push constants with offset %u and size %u that "
6581                                                                        "exceeds this device's maxPushConstantSize of %u.",
6582                                  caller_name, offset, size, maxPushConstantsSize);
6583         } else {
6584             skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6585                                  DRAWSTATE_INTERNAL_ERROR, "DS", "%s caller not supported.", caller_name);
6586         }
6587     }
6588     // size needs to be non-zero and a multiple of 4.
6589     if ((size == 0) || ((size & 0x3) != 0)) {
6590         if (0 == strcmp(caller_name, "vkCreatePipelineLayout()")) {
6591             skip_call |=
6592                 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6593                         DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "%s call has push constants index %u with "
6594                                                               "size %u. Size must be greater than zero and a multiple of 4.",
6595                         caller_name, index, size);
6596         } else if (0 == strcmp(caller_name, "vkCmdPushConstants()")) {
6597             skip_call |=
6598                 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6599                         DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "%s call has push constants with "
6600                                                               "size %u. Size must be greater than zero and a multiple of 4.",
6601                         caller_name, size);
6602         } else {
6603             skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6604                                  DRAWSTATE_INTERNAL_ERROR, "DS", "%s caller not supported.", caller_name);
6605         }
6606     }
6607     // offset needs to be a multiple of 4.
6608     if ((offset & 0x3) != 0) {
6609         if (0 == strcmp(caller_name, "vkCreatePipelineLayout()")) {
6610             skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6611                                  DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "%s call has push constants index %u with "
6612                                                                        "offset %u. Offset must be a multiple of 4.",
6613                                  caller_name, index, offset);
6614         } else if (0 == strcmp(caller_name, "vkCmdPushConstants()")) {
6615             skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6616                                  DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "%s call has push constants with "
6617                                                                        "offset %u. Offset must be a multiple of 4.",
6618                                  caller_name, offset);
6619         } else {
6620             skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6621                                  DRAWSTATE_INTERNAL_ERROR, "DS", "%s caller not supported.", caller_name);
6622         }
6623     }
6624     return skip_call;
6625 }
6626
6627 VKAPI_ATTR VkResult VKAPI_CALL CreatePipelineLayout(VkDevice device, const VkPipelineLayoutCreateInfo *pCreateInfo,
6628                                                     const VkAllocationCallbacks *pAllocator, VkPipelineLayout *pPipelineLayout) {
6629     bool skip_call = false;
6630     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6631     // Push Constant Range checks
6632     uint32_t i, j;
6633     for (i = 0; i < pCreateInfo->pushConstantRangeCount; ++i) {
6634         skip_call |= validatePushConstantRange(dev_data, pCreateInfo->pPushConstantRanges[i].offset,
6635                                                pCreateInfo->pPushConstantRanges[i].size, "vkCreatePipelineLayout()", i);
6636         if (0 == pCreateInfo->pPushConstantRanges[i].stageFlags) {
6637             skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6638                                  DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "vkCreatePipelineLayout() call has no stageFlags set.");
6639         }
6640     }
6641     if (skip_call)
6642         return VK_ERROR_VALIDATION_FAILED_EXT;
6643
6644     // Each range has been validated.  Now check for overlap between ranges (if they are good).
6645     // There's no explicit Valid Usage language against this, so issue a warning instead of an error.
6646     for (i = 0; i < pCreateInfo->pushConstantRangeCount; ++i) {
6647         for (j = i + 1; j < pCreateInfo->pushConstantRangeCount; ++j) {
6648             const uint32_t minA = pCreateInfo->pPushConstantRanges[i].offset;
6649             const uint32_t maxA = minA + pCreateInfo->pPushConstantRanges[i].size;
6650             const uint32_t minB = pCreateInfo->pPushConstantRanges[j].offset;
6651             const uint32_t maxB = minB + pCreateInfo->pPushConstantRanges[j].size;
6652             if ((minA <= minB && maxA > minB) || (minB <= minA && maxB > minA)) {
6653                 skip_call |=
6654                     log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6655                             DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "vkCreatePipelineLayout() call has push constants with "
6656                                                                   "overlapping ranges: %u:[%u, %u), %u:[%u, %u)",
6657                             i, minA, maxA, j, minB, maxB);
6658             }
6659         }
6660     }
6661
6662     VkResult result = dev_data->dispatch_table.CreatePipelineLayout(device, pCreateInfo, pAllocator, pPipelineLayout);
6663     if (VK_SUCCESS == result) {
6664         std::lock_guard<std::mutex> lock(global_lock);
6665         PIPELINE_LAYOUT_NODE &plNode = dev_data->pipelineLayoutMap[*pPipelineLayout];
6666         plNode.layout = *pPipelineLayout;
6667         plNode.set_layouts.resize(pCreateInfo->setLayoutCount);
6668         for (i = 0; i < pCreateInfo->setLayoutCount; ++i) {
6669             plNode.set_layouts[i] = getDescriptorSetLayout(dev_data, pCreateInfo->pSetLayouts[i]);
6670         }
6671         plNode.push_constant_ranges.resize(pCreateInfo->pushConstantRangeCount);
6672         for (i = 0; i < pCreateInfo->pushConstantRangeCount; ++i) {
6673             plNode.push_constant_ranges[i] = pCreateInfo->pPushConstantRanges[i];
6674         }
6675     }
6676     return result;
6677 }
6678
6679 VKAPI_ATTR VkResult VKAPI_CALL
6680 CreateDescriptorPool(VkDevice device, const VkDescriptorPoolCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator,
6681                      VkDescriptorPool *pDescriptorPool) {
6682     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6683     VkResult result = dev_data->dispatch_table.CreateDescriptorPool(device, pCreateInfo, pAllocator, pDescriptorPool);
6684     if (VK_SUCCESS == result) {
6685         // Insert this pool into Global Pool LL at head
6686         if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
6687                     (uint64_t)*pDescriptorPool, __LINE__, DRAWSTATE_OUT_OF_MEMORY, "DS", "Created Descriptor Pool 0x%" PRIxLEAST64,
6688                     (uint64_t)*pDescriptorPool))
6689             return VK_ERROR_VALIDATION_FAILED_EXT;
6690         DESCRIPTOR_POOL_NODE *pNewNode = new DESCRIPTOR_POOL_NODE(*pDescriptorPool, pCreateInfo);
6691         if (NULL == pNewNode) {
6692             if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
6693                         (uint64_t)*pDescriptorPool, __LINE__, DRAWSTATE_OUT_OF_MEMORY, "DS",
6694                         "Out of memory while attempting to allocate DESCRIPTOR_POOL_NODE in vkCreateDescriptorPool()"))
6695                 return VK_ERROR_VALIDATION_FAILED_EXT;
6696         } else {
6697             std::lock_guard<std::mutex> lock(global_lock);
6698             dev_data->descriptorPoolMap[*pDescriptorPool] = pNewNode;
6699         }
6700     } else {
6701         // Need to do anything if pool create fails?
6702     }
6703     return result;
6704 }
6705
6706 VKAPI_ATTR VkResult VKAPI_CALL
6707 ResetDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool, VkDescriptorPoolResetFlags flags) {
6708     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6709     VkResult result = dev_data->dispatch_table.ResetDescriptorPool(device, descriptorPool, flags);
6710     if (VK_SUCCESS == result) {
6711         std::lock_guard<std::mutex> lock(global_lock);
6712         clearDescriptorPool(dev_data, device, descriptorPool, flags);
6713     }
6714     return result;
6715 }
6716 // Ensure the pool contains enough descriptors and descriptor sets to satisfy
6717 // an allocation request. Fills common_data with the total number of descriptors of each type required,
6718 // as well as DescriptorSetLayout ptrs used for later update.
6719 static bool PreCallValidateAllocateDescriptorSets(layer_data *dev_data, const VkDescriptorSetAllocateInfo *pAllocateInfo,
6720                                                   cvdescriptorset::AllocateDescriptorSetsData *common_data) {
6721     // All state checks for AllocateDescriptorSets is done in single function
6722     return cvdescriptorset::ValidateAllocateDescriptorSets(dev_data->report_data, pAllocateInfo, dev_data, common_data);
6723 }
6724 // Allocation state was good and call down chain was made so update state based on allocating descriptor sets
6725 static void PostCallRecordAllocateDescriptorSets(layer_data *dev_data, const VkDescriptorSetAllocateInfo *pAllocateInfo,
6726                                                  VkDescriptorSet *pDescriptorSets,
6727                                                  const cvdescriptorset::AllocateDescriptorSetsData *common_data) {
6728     // All the updates are contained in a single cvdescriptorset function
6729     cvdescriptorset::PerformAllocateDescriptorSets(pAllocateInfo, pDescriptorSets, common_data, &dev_data->descriptorPoolMap,
6730                                                    &dev_data->setMap, dev_data);
6731 }
6732
6733 VKAPI_ATTR VkResult VKAPI_CALL
6734 AllocateDescriptorSets(VkDevice device, const VkDescriptorSetAllocateInfo *pAllocateInfo, VkDescriptorSet *pDescriptorSets) {
6735     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6736     std::unique_lock<std::mutex> lock(global_lock);
6737     cvdescriptorset::AllocateDescriptorSetsData common_data(pAllocateInfo->descriptorSetCount);
6738     bool skip_call = PreCallValidateAllocateDescriptorSets(dev_data, pAllocateInfo, &common_data);
6739     lock.unlock();
6740
6741     if (skip_call)
6742         return VK_ERROR_VALIDATION_FAILED_EXT;
6743
6744     VkResult result = dev_data->dispatch_table.AllocateDescriptorSets(device, pAllocateInfo, pDescriptorSets);
6745
6746     if (VK_SUCCESS == result) {
6747         lock.lock();
6748         PostCallRecordAllocateDescriptorSets(dev_data, pAllocateInfo, pDescriptorSets, &common_data);
6749         lock.unlock();
6750     }
6751     return result;
6752 }
6753 // Verify state before freeing DescriptorSets
6754 static bool PreCallValidateFreeDescriptorSets(const layer_data *dev_data, VkDescriptorPool pool, uint32_t count,
6755                                               const VkDescriptorSet *descriptor_sets) {
6756     bool skip_call = false;
6757     // First make sure sets being destroyed are not currently in-use
6758     for (uint32_t i = 0; i < count; ++i)
6759         skip_call |= validateIdleDescriptorSet(dev_data, descriptor_sets[i], "vkFreeDescriptorSets");
6760
6761     DESCRIPTOR_POOL_NODE *pool_node = getPoolNode(dev_data, pool);
6762     if (pool_node && !(VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT & pool_node->createInfo.flags)) {
6763         // Can't Free from a NON_FREE pool
6764         skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
6765                              reinterpret_cast<uint64_t &>(pool), __LINE__, DRAWSTATE_CANT_FREE_FROM_NON_FREE_POOL, "DS",
6766                              "It is invalid to call vkFreeDescriptorSets() with a pool created without setting "
6767                              "VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT.");
6768     }
6769     return skip_call;
6770 }
6771 // Sets have been removed from the pool so update underlying state
6772 static void PostCallRecordFreeDescriptorSets(layer_data *dev_data, VkDescriptorPool pool, uint32_t count,
6773                                              const VkDescriptorSet *descriptor_sets) {
6774     DESCRIPTOR_POOL_NODE *pool_state = getPoolNode(dev_data, pool);
6775     // Update available descriptor sets in pool
6776     pool_state->availableSets += count;
6777
6778     // For each freed descriptor add its resources back into the pool as available and remove from pool and setMap
6779     for (uint32_t i = 0; i < count; ++i) {
6780         auto set_state = dev_data->setMap[descriptor_sets[i]];
6781         uint32_t type_index = 0, descriptor_count = 0;
6782         for (uint32_t j = 0; j < set_state->GetBindingCount(); ++j) {
6783             type_index = static_cast<uint32_t>(set_state->GetTypeFromIndex(j));
6784             descriptor_count = set_state->GetDescriptorCountFromIndex(j);
6785             pool_state->availableDescriptorTypeCount[type_index] += descriptor_count;
6786         }
6787         freeDescriptorSet(dev_data, set_state);
6788         pool_state->sets.erase(set_state);
6789     }
6790 }
6791
6792 VKAPI_ATTR VkResult VKAPI_CALL
6793 FreeDescriptorSets(VkDevice device, VkDescriptorPool descriptorPool, uint32_t count, const VkDescriptorSet *pDescriptorSets) {
6794     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6795     // Make sure that no sets being destroyed are in-flight
6796     std::unique_lock<std::mutex> lock(global_lock);
6797     bool skip_call = PreCallValidateFreeDescriptorSets(dev_data, descriptorPool, count, pDescriptorSets);
6798     lock.unlock();
6799
6800     if (skip_call)
6801         return VK_ERROR_VALIDATION_FAILED_EXT;
6802     VkResult result = dev_data->dispatch_table.FreeDescriptorSets(device, descriptorPool, count, pDescriptorSets);
6803     if (VK_SUCCESS == result) {
6804         lock.lock();
6805         PostCallRecordFreeDescriptorSets(dev_data, descriptorPool, count, pDescriptorSets);
6806         lock.unlock();
6807     }
6808     return result;
6809 }
6810 // TODO : This is a Proof-of-concept for core validation architecture
6811 //  Really we'll want to break out these functions to separate files but
6812 //  keeping it all together here to prove out design
6813 // PreCallValidate* handles validating all of the state prior to calling down chain to UpdateDescriptorSets()
6814 static bool PreCallValidateUpdateDescriptorSets(layer_data *dev_data, uint32_t descriptorWriteCount,
6815                                                 const VkWriteDescriptorSet *pDescriptorWrites, uint32_t descriptorCopyCount,
6816                                                 const VkCopyDescriptorSet *pDescriptorCopies) {
6817     // First thing to do is perform map look-ups.
6818     // NOTE : UpdateDescriptorSets is somewhat unique in that it's operating on a number of DescriptorSets
6819     //  so we can't just do a single map look-up up-front, but do them individually in functions below
6820
6821     // Now make call(s) that validate state, but don't perform state updates in this function
6822     // Note, here DescriptorSets is unique in that we don't yet have an instance. Using a helper function in the
6823     //  namespace which will parse params and make calls into specific class instances
6824     return cvdescriptorset::ValidateUpdateDescriptorSets(dev_data->report_data, dev_data, descriptorWriteCount, pDescriptorWrites,
6825                                                          descriptorCopyCount, pDescriptorCopies);
6826 }
6827 // PostCallRecord* handles recording state updates following call down chain to UpdateDescriptorSets()
6828 static void PostCallRecordUpdateDescriptorSets(layer_data *dev_data, uint32_t descriptorWriteCount,
6829                                                const VkWriteDescriptorSet *pDescriptorWrites, uint32_t descriptorCopyCount,
6830                                                const VkCopyDescriptorSet *pDescriptorCopies) {
6831     cvdescriptorset::PerformUpdateDescriptorSets(dev_data, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount,
6832                                                  pDescriptorCopies);
6833 }
6834
6835 VKAPI_ATTR void VKAPI_CALL
6836 UpdateDescriptorSets(VkDevice device, uint32_t descriptorWriteCount, const VkWriteDescriptorSet *pDescriptorWrites,
6837                      uint32_t descriptorCopyCount, const VkCopyDescriptorSet *pDescriptorCopies) {
6838     // Only map look-up at top level is for device-level layer_data
6839     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6840     std::unique_lock<std::mutex> lock(global_lock);
6841     bool skip_call = PreCallValidateUpdateDescriptorSets(dev_data, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount,
6842                                                          pDescriptorCopies);
6843     lock.unlock();
6844     if (!skip_call) {
6845         dev_data->dispatch_table.UpdateDescriptorSets(device, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount,
6846                                                       pDescriptorCopies);
6847         lock.lock();
6848         // Since UpdateDescriptorSets() is void, nothing to check prior to updating state
6849         PostCallRecordUpdateDescriptorSets(dev_data, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount,
6850                                            pDescriptorCopies);
6851     }
6852 }
6853
6854 VKAPI_ATTR VkResult VKAPI_CALL
6855 AllocateCommandBuffers(VkDevice device, const VkCommandBufferAllocateInfo *pCreateInfo, VkCommandBuffer *pCommandBuffer) {
6856     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6857     VkResult result = dev_data->dispatch_table.AllocateCommandBuffers(device, pCreateInfo, pCommandBuffer);
6858     if (VK_SUCCESS == result) {
6859         std::unique_lock<std::mutex> lock(global_lock);
6860         auto pPool = getCommandPoolNode(dev_data, pCreateInfo->commandPool);
6861
6862         if (pPool) {
6863             for (uint32_t i = 0; i < pCreateInfo->commandBufferCount; i++) {
6864                 // Add command buffer to its commandPool map
6865                 pPool->commandBuffers.push_back(pCommandBuffer[i]);
6866                 GLOBAL_CB_NODE *pCB = new GLOBAL_CB_NODE;
6867                 // Add command buffer to map
6868                 dev_data->commandBufferMap[pCommandBuffer[i]] = pCB;
6869                 resetCB(dev_data, pCommandBuffer[i]);
6870                 pCB->createInfo = *pCreateInfo;
6871                 pCB->device = device;
6872             }
6873         }
6874         printCBList(dev_data);
6875         lock.unlock();
6876     }
6877     return result;
6878 }
6879
6880 // Add bindings between the given cmd buffer & framebuffer and the framebuffer's children
6881 static void AddFramebufferBinding(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, FRAMEBUFFER_NODE *fb_state) {
6882     fb_state->cb_bindings.insert(cb_state);
6883     for (auto attachment : fb_state->attachments) {
6884         auto view_state = attachment.view_state;
6885         if (view_state) {
6886             AddCommandBufferBindingImageView(dev_data, cb_state, view_state);
6887         }
6888         auto rp_state = getRenderPass(dev_data, fb_state->createInfo.renderPass);
6889         if (rp_state) {
6890             addCommandBufferBinding(
6891                 &rp_state->cb_bindings,
6892                 {reinterpret_cast<uint64_t &>(rp_state->renderPass), VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT}, cb_state);
6893         }
6894     }
6895 }
6896
6897 VKAPI_ATTR VkResult VKAPI_CALL
6898 BeginCommandBuffer(VkCommandBuffer commandBuffer, const VkCommandBufferBeginInfo *pBeginInfo) {
6899     bool skip_call = false;
6900     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6901     std::unique_lock<std::mutex> lock(global_lock);
6902     // Validate command buffer level
6903     GLOBAL_CB_NODE *cb_node = getCBNode(dev_data, commandBuffer);
6904     if (cb_node) {
6905         // This implicitly resets the Cmd Buffer so make sure any fence is done and then clear memory references
6906         if (dev_data->globalInFlightCmdBuffers.count(commandBuffer)) {
6907             skip_call |=
6908                 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6909                         (uint64_t)commandBuffer, __LINE__, MEMTRACK_RESET_CB_WHILE_IN_FLIGHT, "MEM",
6910                         "Calling vkBeginCommandBuffer() on active CB 0x%p before it has completed. "
6911                         "You must check CB fence before this call.",
6912                         commandBuffer);
6913         }
6914         clear_cmd_buf_and_mem_references(dev_data, cb_node);
6915         if (cb_node->createInfo.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
6916             // Secondary Command Buffer
6917             const VkCommandBufferInheritanceInfo *pInfo = pBeginInfo->pInheritanceInfo;
6918             if (!pInfo) {
6919                 skip_call |=
6920                     log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6921                             reinterpret_cast<uint64_t>(commandBuffer), __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
6922                             "vkBeginCommandBuffer(): Secondary Command Buffer (0x%p) must have inheritance info.",
6923                             reinterpret_cast<void *>(commandBuffer));
6924             } else {
6925                 if (pBeginInfo->flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT) {
6926                     if (!pInfo->renderPass) { // renderpass should NOT be null for a Secondary CB
6927                         skip_call |= log_msg(
6928                             dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6929                             reinterpret_cast<uint64_t>(commandBuffer), __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
6930                             "vkBeginCommandBuffer(): Secondary Command Buffers (0x%p) must specify a valid renderpass parameter.",
6931                             reinterpret_cast<void *>(commandBuffer));
6932                     }
6933                     if (!pInfo->framebuffer) { // framebuffer may be null for a Secondary CB, but this affects perf
6934                         skip_call |= log_msg(
6935                             dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6936                             reinterpret_cast<uint64_t>(commandBuffer), __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
6937                             "vkBeginCommandBuffer(): Secondary Command Buffers (0x%p) may perform better if a "
6938                             "valid framebuffer parameter is specified.",
6939                             reinterpret_cast<void *>(commandBuffer));
6940                     } else {
6941                         string errorString = "";
6942                         auto framebuffer = getFramebuffer(dev_data, pInfo->framebuffer);
6943                         if (framebuffer) {
6944                             if ((framebuffer->createInfo.renderPass != pInfo->renderPass) &&
6945                                 !verify_renderpass_compatibility(dev_data, framebuffer->renderPassCreateInfo.ptr(),
6946                                                                  getRenderPass(dev_data, pInfo->renderPass)->createInfo.ptr(),
6947                                                                  errorString)) {
6948                                 // renderPass that framebuffer was created with must be compatible with local renderPass
6949                                 skip_call |= log_msg(
6950                                     dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6951                                     VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, reinterpret_cast<uint64_t>(commandBuffer),
6952                                     __LINE__, DRAWSTATE_RENDERPASS_INCOMPATIBLE, "DS",
6953                                     "vkBeginCommandBuffer(): Secondary Command "
6954                                     "Buffer (0x%p) renderPass (0x%" PRIxLEAST64 ") is incompatible w/ framebuffer "
6955                                     "(0x%" PRIxLEAST64 ") w/ render pass (0x%" PRIxLEAST64 ") due to: %s",
6956                                     reinterpret_cast<void *>(commandBuffer), reinterpret_cast<const uint64_t &>(pInfo->renderPass),
6957                                     reinterpret_cast<const uint64_t &>(pInfo->framebuffer),
6958                                     reinterpret_cast<uint64_t &>(framebuffer->createInfo.renderPass), errorString.c_str());
6959                             }
6960                             // Connect this framebuffer and its children to this cmdBuffer
6961                             AddFramebufferBinding(dev_data, cb_node, framebuffer);
6962                         }
6963                     }
6964                 }
6965                 if ((pInfo->occlusionQueryEnable == VK_FALSE ||
6966                      dev_data->enabled_features.occlusionQueryPrecise == VK_FALSE) &&
6967                     (pInfo->queryFlags & VK_QUERY_CONTROL_PRECISE_BIT)) {
6968                     skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6969                                          VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, reinterpret_cast<uint64_t>(commandBuffer),
6970                                          __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
6971                                          "vkBeginCommandBuffer(): Secondary Command Buffer (0x%p) must not have "
6972                                          "VK_QUERY_CONTROL_PRECISE_BIT if occulusionQuery is disabled or the device does not "
6973                                          "support precise occlusion queries.",
6974                                          reinterpret_cast<void *>(commandBuffer));
6975                 }
6976             }
6977             if (pInfo && pInfo->renderPass != VK_NULL_HANDLE) {
6978                 auto renderPass = getRenderPass(dev_data, pInfo->renderPass);
6979                 if (renderPass) {
6980                     if (pInfo->subpass >= renderPass->createInfo.subpassCount) {
6981                         skip_call |= log_msg(
6982                             dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6983                             (uint64_t)commandBuffer, __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
6984                             "vkBeginCommandBuffer(): Secondary Command Buffers (0x%p) must has a subpass index (%d) "
6985                             "that is less than the number of subpasses (%d).",
6986                             (void *)commandBuffer, pInfo->subpass, renderPass->createInfo.subpassCount);
6987                     }
6988                 }
6989             }
6990         }
6991         if (CB_RECORDING == cb_node->state) {
6992             skip_call |=
6993                 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6994                         (uint64_t)commandBuffer, __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
6995                         "vkBeginCommandBuffer(): Cannot call Begin on CB (0x%" PRIxLEAST64
6996                         ") in the RECORDING state. Must first call vkEndCommandBuffer().",
6997                         (uint64_t)commandBuffer);
6998         } else if (CB_RECORDED == cb_node->state || (CB_INVALID == cb_node->state && CMD_END == cb_node->cmds.back().type)) {
6999             VkCommandPool cmdPool = cb_node->createInfo.commandPool;
7000             auto pPool = getCommandPoolNode(dev_data, cmdPool);
7001             if (!(VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT & pPool->createFlags)) {
7002                 skip_call |=
7003                     log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7004                             (uint64_t)commandBuffer, __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER_RESET, "DS",
7005                             "Call to vkBeginCommandBuffer() on command buffer (0x%" PRIxLEAST64
7006                             ") attempts to implicitly reset cmdBuffer created from command pool (0x%" PRIxLEAST64
7007                             ") that does NOT have the VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT bit set.",
7008                             (uint64_t)commandBuffer, (uint64_t)cmdPool);
7009             }
7010             resetCB(dev_data, commandBuffer);
7011         }
7012         // Set updated state here in case implicit reset occurs above
7013         cb_node->state = CB_RECORDING;
7014         cb_node->beginInfo = *pBeginInfo;
7015         if (cb_node->beginInfo.pInheritanceInfo) {
7016             cb_node->inheritanceInfo = *(cb_node->beginInfo.pInheritanceInfo);
7017             cb_node->beginInfo.pInheritanceInfo = &cb_node->inheritanceInfo;
7018             // If we are a secondary command-buffer and inheriting.  Update the items we should inherit.
7019             if ((cb_node->createInfo.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY) &&
7020                 (cb_node->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) {
7021                 cb_node->activeRenderPass = getRenderPass(dev_data, cb_node->beginInfo.pInheritanceInfo->renderPass);
7022                 cb_node->activeSubpass = cb_node->beginInfo.pInheritanceInfo->subpass;
7023                 cb_node->framebuffers.insert(cb_node->beginInfo.pInheritanceInfo->framebuffer);
7024             }
7025         }
7026     } else {
7027         skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7028                              (uint64_t)commandBuffer, __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
7029                              "In vkBeginCommandBuffer() and unable to find CommandBuffer Node for CB 0x%p!", (void *)commandBuffer);
7030     }
7031     lock.unlock();
7032     if (skip_call) {
7033         return VK_ERROR_VALIDATION_FAILED_EXT;
7034     }
7035     VkResult result = dev_data->dispatch_table.BeginCommandBuffer(commandBuffer, pBeginInfo);
7036
7037     return result;
7038 }
7039
7040 VKAPI_ATTR VkResult VKAPI_CALL EndCommandBuffer(VkCommandBuffer commandBuffer) {
7041     bool skip_call = false;
7042     VkResult result = VK_SUCCESS;
7043     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7044     std::unique_lock<std::mutex> lock(global_lock);
7045     GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7046     if (pCB) {
7047         if ((VK_COMMAND_BUFFER_LEVEL_PRIMARY == pCB->createInfo.level) || !(pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) {
7048             // This needs spec clarification to update valid usage, see comments in PR:
7049             // https://github.com/KhronosGroup/Vulkan-LoaderAndValidationLayers/pull/516#discussion_r63013756
7050             skip_call |= insideRenderPass(dev_data, pCB, "vkEndCommandBuffer");
7051         }
7052         skip_call |= addCmd(dev_data, pCB, CMD_END, "vkEndCommandBuffer()");
7053         for (auto query : pCB->activeQueries) {
7054             skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7055                                  DRAWSTATE_INVALID_QUERY, "DS",
7056                                  "Ending command buffer with in progress query: queryPool 0x%" PRIx64 ", index %d",
7057                                  (uint64_t)(query.pool), query.index);
7058         }
7059     }
7060     if (!skip_call) {
7061         lock.unlock();
7062         result = dev_data->dispatch_table.EndCommandBuffer(commandBuffer);
7063         lock.lock();
7064         if (VK_SUCCESS == result) {
7065             pCB->state = CB_RECORDED;
7066             // Reset CB status flags
7067             pCB->status = 0;
7068             printCB(dev_data, commandBuffer);
7069         }
7070     } else {
7071         result = VK_ERROR_VALIDATION_FAILED_EXT;
7072     }
7073     lock.unlock();
7074     return result;
7075 }
7076
7077 VKAPI_ATTR VkResult VKAPI_CALL
7078 ResetCommandBuffer(VkCommandBuffer commandBuffer, VkCommandBufferResetFlags flags) {
7079     bool skip_call = false;
7080     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7081     std::unique_lock<std::mutex> lock(global_lock);
7082     GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7083     VkCommandPool cmdPool = pCB->createInfo.commandPool;
7084     auto pPool = getCommandPoolNode(dev_data, cmdPool);
7085     if (!(VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT & pPool->createFlags)) {
7086         skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7087                              (uint64_t)commandBuffer, __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER_RESET, "DS",
7088                              "Attempt to reset command buffer (0x%" PRIxLEAST64 ") created from command pool (0x%" PRIxLEAST64
7089                              ") that does NOT have the VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT bit set.",
7090                              (uint64_t)commandBuffer, (uint64_t)cmdPool);
7091     }
7092     skip_call |= checkCommandBufferInFlight(dev_data, pCB, "reset");
7093     lock.unlock();
7094     if (skip_call)
7095         return VK_ERROR_VALIDATION_FAILED_EXT;
7096     VkResult result = dev_data->dispatch_table.ResetCommandBuffer(commandBuffer, flags);
7097     if (VK_SUCCESS == result) {
7098         lock.lock();
7099         dev_data->globalInFlightCmdBuffers.erase(commandBuffer);
7100         resetCB(dev_data, commandBuffer);
7101         lock.unlock();
7102     }
7103     return result;
7104 }
7105
7106 VKAPI_ATTR void VKAPI_CALL
7107 CmdBindPipeline(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipeline pipeline) {
7108     bool skip_call = false;
7109     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7110     std::unique_lock<std::mutex> lock(global_lock);
7111     GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7112     if (pCB) {
7113         skip_call |= addCmd(dev_data, pCB, CMD_BINDPIPELINE, "vkCmdBindPipeline()");
7114         if ((VK_PIPELINE_BIND_POINT_COMPUTE == pipelineBindPoint) && (pCB->activeRenderPass)) {
7115             skip_call |=
7116                 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
7117                         (uint64_t)pipeline, __LINE__, DRAWSTATE_INVALID_RENDERPASS_CMD, "DS",
7118                         "Incorrectly binding compute pipeline (0x%" PRIxLEAST64 ") during active RenderPass (0x%" PRIxLEAST64 ")",
7119                         (uint64_t)pipeline, (uint64_t)pCB->activeRenderPass->renderPass);
7120         }
7121
7122         PIPELINE_NODE *pPN = getPipeline(dev_data, pipeline);
7123         if (pPN) {
7124             pCB->lastBound[pipelineBindPoint].pipeline_node = pPN;
7125             set_cb_pso_status(pCB, pPN);
7126             set_pipeline_state(pPN);
7127         } else {
7128             skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
7129                                  (uint64_t)pipeline, __LINE__, DRAWSTATE_INVALID_PIPELINE, "DS",
7130                                  "Attempt to bind Pipeline 0x%" PRIxLEAST64 " that doesn't exist!", (uint64_t)(pipeline));
7131         }
7132         addCommandBufferBinding(&getPipeline(dev_data, pipeline)->cb_bindings,
7133                                 {reinterpret_cast<uint64_t &>(pipeline), VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT}, pCB);
7134     }
7135     lock.unlock();
7136     if (!skip_call)
7137         dev_data->dispatch_table.CmdBindPipeline(commandBuffer, pipelineBindPoint, pipeline);
7138 }
7139
7140 VKAPI_ATTR void VKAPI_CALL
7141 CmdSetViewport(VkCommandBuffer commandBuffer, uint32_t firstViewport, uint32_t viewportCount, const VkViewport *pViewports) {
7142     bool skip_call = false;
7143     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7144     std::unique_lock<std::mutex> lock(global_lock);
7145     GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7146     if (pCB) {
7147         skip_call |= addCmd(dev_data, pCB, CMD_SETVIEWPORTSTATE, "vkCmdSetViewport()");
7148         pCB->viewportMask |= ((1u<<viewportCount) - 1u) << firstViewport;
7149     }
7150     lock.unlock();
7151     if (!skip_call)
7152         dev_data->dispatch_table.CmdSetViewport(commandBuffer, firstViewport, viewportCount, pViewports);
7153 }
7154
7155 VKAPI_ATTR void VKAPI_CALL
7156 CmdSetScissor(VkCommandBuffer commandBuffer, uint32_t firstScissor, uint32_t scissorCount, const VkRect2D *pScissors) {
7157     bool skip_call = false;
7158     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7159     std::unique_lock<std::mutex> lock(global_lock);
7160     GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7161     if (pCB) {
7162         skip_call |= addCmd(dev_data, pCB, CMD_SETSCISSORSTATE, "vkCmdSetScissor()");
7163         pCB->scissorMask |= ((1u<<scissorCount) - 1u) << firstScissor;
7164     }
7165     lock.unlock();
7166     if (!skip_call)
7167         dev_data->dispatch_table.CmdSetScissor(commandBuffer, firstScissor, scissorCount, pScissors);
7168 }
7169
7170 VKAPI_ATTR void VKAPI_CALL CmdSetLineWidth(VkCommandBuffer commandBuffer, float lineWidth) {
7171     bool skip_call = false;
7172     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7173     std::unique_lock<std::mutex> lock(global_lock);
7174     GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7175     if (pCB) {
7176         skip_call |= addCmd(dev_data, pCB, CMD_SETLINEWIDTHSTATE, "vkCmdSetLineWidth()");
7177         pCB->status |= CBSTATUS_LINE_WIDTH_SET;
7178
7179         PIPELINE_NODE *pPipeTrav = pCB->lastBound[VK_PIPELINE_BIND_POINT_GRAPHICS].pipeline_node;
7180         if (pPipeTrav != NULL && !isDynamic(pPipeTrav, VK_DYNAMIC_STATE_LINE_WIDTH)) {
7181             skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0,
7182                                  reinterpret_cast<uint64_t &>(commandBuffer), __LINE__, DRAWSTATE_INVALID_SET, "DS",
7183                                  "vkCmdSetLineWidth called but pipeline was created without VK_DYNAMIC_STATE_LINE_WIDTH "
7184                                  "flag.  This is undefined behavior and could be ignored.");
7185         } else {
7186             skip_call |= verifyLineWidth(dev_data, DRAWSTATE_INVALID_SET, reinterpret_cast<uint64_t &>(commandBuffer), lineWidth);
7187         }
7188     }
7189     lock.unlock();
7190     if (!skip_call)
7191         dev_data->dispatch_table.CmdSetLineWidth(commandBuffer, lineWidth);
7192 }
7193
7194 VKAPI_ATTR void VKAPI_CALL
7195 CmdSetDepthBias(VkCommandBuffer commandBuffer, float depthBiasConstantFactor, float depthBiasClamp, float depthBiasSlopeFactor) {
7196     bool skip_call = false;
7197     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7198     std::unique_lock<std::mutex> lock(global_lock);
7199     GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7200     if (pCB) {
7201         skip_call |= addCmd(dev_data, pCB, CMD_SETDEPTHBIASSTATE, "vkCmdSetDepthBias()");
7202         pCB->status |= CBSTATUS_DEPTH_BIAS_SET;
7203     }
7204     lock.unlock();
7205     if (!skip_call)
7206         dev_data->dispatch_table.CmdSetDepthBias(commandBuffer, depthBiasConstantFactor, depthBiasClamp, depthBiasSlopeFactor);
7207 }
7208
7209 VKAPI_ATTR void VKAPI_CALL CmdSetBlendConstants(VkCommandBuffer commandBuffer, const float blendConstants[4]) {
7210     bool skip_call = false;
7211     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7212     std::unique_lock<std::mutex> lock(global_lock);
7213     GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7214     if (pCB) {
7215         skip_call |= addCmd(dev_data, pCB, CMD_SETBLENDSTATE, "vkCmdSetBlendConstants()");
7216         pCB->status |= CBSTATUS_BLEND_CONSTANTS_SET;
7217     }
7218     lock.unlock();
7219     if (!skip_call)
7220         dev_data->dispatch_table.CmdSetBlendConstants(commandBuffer, blendConstants);
7221 }
7222
7223 VKAPI_ATTR void VKAPI_CALL
7224 CmdSetDepthBounds(VkCommandBuffer commandBuffer, float minDepthBounds, float maxDepthBounds) {
7225     bool skip_call = false;
7226     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7227     std::unique_lock<std::mutex> lock(global_lock);
7228     GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7229     if (pCB) {
7230         skip_call |= addCmd(dev_data, pCB, CMD_SETDEPTHBOUNDSSTATE, "vkCmdSetDepthBounds()");
7231         pCB->status |= CBSTATUS_DEPTH_BOUNDS_SET;
7232     }
7233     lock.unlock();
7234     if (!skip_call)
7235         dev_data->dispatch_table.CmdSetDepthBounds(commandBuffer, minDepthBounds, maxDepthBounds);
7236 }
7237
7238 VKAPI_ATTR void VKAPI_CALL
7239 CmdSetStencilCompareMask(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t compareMask) {
7240     bool skip_call = false;
7241     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7242     std::unique_lock<std::mutex> lock(global_lock);
7243     GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7244     if (pCB) {
7245         skip_call |= addCmd(dev_data, pCB, CMD_SETSTENCILREADMASKSTATE, "vkCmdSetStencilCompareMask()");
7246         pCB->status |= CBSTATUS_STENCIL_READ_MASK_SET;
7247     }
7248     lock.unlock();
7249     if (!skip_call)
7250         dev_data->dispatch_table.CmdSetStencilCompareMask(commandBuffer, faceMask, compareMask);
7251 }
7252
7253 VKAPI_ATTR void VKAPI_CALL
7254 CmdSetStencilWriteMask(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t writeMask) {
7255     bool skip_call = false;
7256     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7257     std::unique_lock<std::mutex> lock(global_lock);
7258     GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7259     if (pCB) {
7260         skip_call |= addCmd(dev_data, pCB, CMD_SETSTENCILWRITEMASKSTATE, "vkCmdSetStencilWriteMask()");
7261         pCB->status |= CBSTATUS_STENCIL_WRITE_MASK_SET;
7262     }
7263     lock.unlock();
7264     if (!skip_call)
7265         dev_data->dispatch_table.CmdSetStencilWriteMask(commandBuffer, faceMask, writeMask);
7266 }
7267
7268 VKAPI_ATTR void VKAPI_CALL
7269 CmdSetStencilReference(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t reference) {
7270     bool skip_call = false;
7271     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7272     std::unique_lock<std::mutex> lock(global_lock);
7273     GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7274     if (pCB) {
7275         skip_call |= addCmd(dev_data, pCB, CMD_SETSTENCILREFERENCESTATE, "vkCmdSetStencilReference()");
7276         pCB->status |= CBSTATUS_STENCIL_REFERENCE_SET;
7277     }
7278     lock.unlock();
7279     if (!skip_call)
7280         dev_data->dispatch_table.CmdSetStencilReference(commandBuffer, faceMask, reference);
7281 }
7282
7283 VKAPI_ATTR void VKAPI_CALL
7284 CmdBindDescriptorSets(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipelineLayout layout,
7285                       uint32_t firstSet, uint32_t setCount, const VkDescriptorSet *pDescriptorSets, uint32_t dynamicOffsetCount,
7286                       const uint32_t *pDynamicOffsets) {
7287     bool skip_call = false;
7288     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7289     std::unique_lock<std::mutex> lock(global_lock);
7290     GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7291     if (pCB) {
7292         if (pCB->state == CB_RECORDING) {
7293             // Track total count of dynamic descriptor types to make sure we have an offset for each one
7294             uint32_t totalDynamicDescriptors = 0;
7295             string errorString = "";
7296             uint32_t lastSetIndex = firstSet + setCount - 1;
7297             if (lastSetIndex >= pCB->lastBound[pipelineBindPoint].boundDescriptorSets.size()) {
7298                 pCB->lastBound[pipelineBindPoint].boundDescriptorSets.resize(lastSetIndex + 1);
7299                 pCB->lastBound[pipelineBindPoint].dynamicOffsets.resize(lastSetIndex + 1);
7300             }
7301             auto oldFinalBoundSet = pCB->lastBound[pipelineBindPoint].boundDescriptorSets[lastSetIndex];
7302             auto pipeline_layout = getPipelineLayout(dev_data, layout);
7303             for (uint32_t i = 0; i < setCount; i++) {
7304                 cvdescriptorset::DescriptorSet *pSet = getSetNode(dev_data, pDescriptorSets[i]);
7305                 if (pSet) {
7306                     pCB->lastBound[pipelineBindPoint].pipeline_layout = *pipeline_layout;
7307                     pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i + firstSet] = pSet;
7308                     skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT,
7309                                          VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i], __LINE__,
7310                                          DRAWSTATE_NONE, "DS", "DS 0x%" PRIxLEAST64 " bound on pipeline %s",
7311                                          (uint64_t)pDescriptorSets[i], string_VkPipelineBindPoint(pipelineBindPoint));
7312                     if (!pSet->IsUpdated() && (pSet->GetTotalDescriptorCount() != 0)) {
7313                         skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
7314                                              VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i], __LINE__,
7315                                              DRAWSTATE_DESCRIPTOR_SET_NOT_UPDATED, "DS",
7316                                              "DS 0x%" PRIxLEAST64
7317                                              " bound but it was never updated. You may want to either update it or not bind it.",
7318                                              (uint64_t)pDescriptorSets[i]);
7319                     }
7320                     // Verify that set being bound is compatible with overlapping setLayout of pipelineLayout
7321                     if (!verify_set_layout_compatibility(dev_data, pSet, pipeline_layout, i + firstSet, errorString)) {
7322                         skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7323                                              VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i], __LINE__,
7324                                              DRAWSTATE_PIPELINE_LAYOUTS_INCOMPATIBLE, "DS",
7325                                              "descriptorSet #%u being bound is not compatible with overlapping descriptorSetLayout "
7326                                              "at index %u of pipelineLayout 0x%" PRIxLEAST64 " due to: %s",
7327                                              i, i + firstSet, reinterpret_cast<uint64_t &>(layout), errorString.c_str());
7328                     }
7329
7330                     auto setDynamicDescriptorCount = pSet->GetDynamicDescriptorCount();
7331
7332                     pCB->lastBound[pipelineBindPoint].dynamicOffsets[firstSet + i].clear();
7333
7334                     if (setDynamicDescriptorCount) {
7335                         // First make sure we won't overstep bounds of pDynamicOffsets array
7336                         if ((totalDynamicDescriptors + setDynamicDescriptorCount) > dynamicOffsetCount) {
7337                             skip_call |=
7338                                 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7339                                         VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i], __LINE__,
7340                                         DRAWSTATE_INVALID_DYNAMIC_OFFSET_COUNT, "DS",
7341                                         "descriptorSet #%u (0x%" PRIxLEAST64
7342                                         ") requires %u dynamicOffsets, but only %u dynamicOffsets are left in pDynamicOffsets "
7343                                         "array. There must be one dynamic offset for each dynamic descriptor being bound.",
7344                                         i, (uint64_t)pDescriptorSets[i], pSet->GetDynamicDescriptorCount(),
7345                                         (dynamicOffsetCount - totalDynamicDescriptors));
7346                         } else { // Validate and store dynamic offsets with the set
7347                             // Validate Dynamic Offset Minimums
7348                             uint32_t cur_dyn_offset = totalDynamicDescriptors;
7349                             for (uint32_t d = 0; d < pSet->GetTotalDescriptorCount(); d++) {
7350                                 if (pSet->GetTypeFromGlobalIndex(d) == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC) {
7351                                     if (vk_safe_modulo(
7352                                             pDynamicOffsets[cur_dyn_offset],
7353                                             dev_data->phys_dev_properties.properties.limits.minUniformBufferOffsetAlignment) != 0) {
7354                                         skip_call |= log_msg(
7355                                             dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7356                                             VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__,
7357                                             DRAWSTATE_INVALID_UNIFORM_BUFFER_OFFSET, "DS",
7358                                             "vkCmdBindDescriptorSets(): pDynamicOffsets[%d] is %d but must be a multiple of "
7359                                             "device limit minUniformBufferOffsetAlignment 0x%" PRIxLEAST64,
7360                                             cur_dyn_offset, pDynamicOffsets[cur_dyn_offset],
7361                                             dev_data->phys_dev_properties.properties.limits.minUniformBufferOffsetAlignment);
7362                                     }
7363                                     cur_dyn_offset++;
7364                                 } else if (pSet->GetTypeFromGlobalIndex(d) == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC) {
7365                                     if (vk_safe_modulo(
7366                                             pDynamicOffsets[cur_dyn_offset],
7367                                             dev_data->phys_dev_properties.properties.limits.minStorageBufferOffsetAlignment) != 0) {
7368                                         skip_call |= log_msg(
7369                                             dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7370                                             VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__,
7371                                             DRAWSTATE_INVALID_STORAGE_BUFFER_OFFSET, "DS",
7372                                             "vkCmdBindDescriptorSets(): pDynamicOffsets[%d] is %d but must be a multiple of "
7373                                             "device limit minStorageBufferOffsetAlignment 0x%" PRIxLEAST64,
7374                                             cur_dyn_offset, pDynamicOffsets[cur_dyn_offset],
7375                                             dev_data->phys_dev_properties.properties.limits.minStorageBufferOffsetAlignment);
7376                                     }
7377                                     cur_dyn_offset++;
7378                                 }
7379                             }
7380
7381                             pCB->lastBound[pipelineBindPoint].dynamicOffsets[firstSet + i] =
7382                                 std::vector<uint32_t>(pDynamicOffsets + totalDynamicDescriptors,
7383                                                       pDynamicOffsets + totalDynamicDescriptors + setDynamicDescriptorCount);
7384                             // Keep running total of dynamic descriptor count to verify at the end
7385                             totalDynamicDescriptors += setDynamicDescriptorCount;
7386
7387                         }
7388                     }
7389                 } else {
7390                     skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7391                                          VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i], __LINE__,
7392                                          DRAWSTATE_INVALID_SET, "DS", "Attempt to bind DS 0x%" PRIxLEAST64 " that doesn't exist!",
7393                                          (uint64_t)pDescriptorSets[i]);
7394                 }
7395                 skip_call |= addCmd(dev_data, pCB, CMD_BINDDESCRIPTORSETS, "vkCmdBindDescriptorSets()");
7396                 // For any previously bound sets, need to set them to "invalid" if they were disturbed by this update
7397                 if (firstSet > 0) { // Check set #s below the first bound set
7398                     for (uint32_t i = 0; i < firstSet; ++i) {
7399                         if (pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i] &&
7400                             !verify_set_layout_compatibility(dev_data, pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i],
7401                                                              pipeline_layout, i, errorString)) {
7402                             skip_call |= log_msg(
7403                                 dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
7404                                 VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
7405                                 (uint64_t)pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i], __LINE__, DRAWSTATE_NONE, "DS",
7406                                 "DescriptorSetDS 0x%" PRIxLEAST64
7407                                 " previously bound as set #%u was disturbed by newly bound pipelineLayout (0x%" PRIxLEAST64 ")",
7408                                 (uint64_t)pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i], i, (uint64_t)layout);
7409                             pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i] = VK_NULL_HANDLE;
7410                         }
7411                     }
7412                 }
7413                 // Check if newly last bound set invalidates any remaining bound sets
7414                 if ((pCB->lastBound[pipelineBindPoint].boundDescriptorSets.size() - 1) > (lastSetIndex)) {
7415                     if (oldFinalBoundSet &&
7416                         !verify_set_layout_compatibility(dev_data, oldFinalBoundSet, pipeline_layout, lastSetIndex, errorString)) {
7417                         auto old_set = oldFinalBoundSet->GetSet();
7418                         skip_call |=
7419                             log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
7420                                     VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, reinterpret_cast<uint64_t &>(old_set), __LINE__,
7421                                     DRAWSTATE_NONE, "DS", "DescriptorSetDS 0x%" PRIxLEAST64
7422                                                           " previously bound as set #%u is incompatible with set 0x%" PRIxLEAST64
7423                                                           " newly bound as set #%u so set #%u and any subsequent sets were "
7424                                                           "disturbed by newly bound pipelineLayout (0x%" PRIxLEAST64 ")",
7425                                     reinterpret_cast<uint64_t &>(old_set), lastSetIndex,
7426                                     (uint64_t)pCB->lastBound[pipelineBindPoint].boundDescriptorSets[lastSetIndex], lastSetIndex,
7427                                     lastSetIndex + 1, (uint64_t)layout);
7428                         pCB->lastBound[pipelineBindPoint].boundDescriptorSets.resize(lastSetIndex + 1);
7429                     }
7430                 }
7431             }
7432             //  dynamicOffsetCount must equal the total number of dynamic descriptors in the sets being bound
7433             if (totalDynamicDescriptors != dynamicOffsetCount) {
7434                 skip_call |=
7435                     log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7436                             (uint64_t)commandBuffer, __LINE__, DRAWSTATE_INVALID_DYNAMIC_OFFSET_COUNT, "DS",
7437                             "Attempting to bind %u descriptorSets with %u dynamic descriptors, but dynamicOffsetCount "
7438                             "is %u. It should exactly match the number of dynamic descriptors.",
7439                             setCount, totalDynamicDescriptors, dynamicOffsetCount);
7440             }
7441         } else {
7442             skip_call |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdBindDescriptorSets()");
7443         }
7444     }
7445     lock.unlock();
7446     if (!skip_call)
7447         dev_data->dispatch_table.CmdBindDescriptorSets(commandBuffer, pipelineBindPoint, layout, firstSet, setCount,
7448                                                        pDescriptorSets, dynamicOffsetCount, pDynamicOffsets);
7449 }
7450
7451 VKAPI_ATTR void VKAPI_CALL
7452 CmdBindIndexBuffer(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkIndexType indexType) {
7453     bool skip_call = false;
7454     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7455     // TODO : Somewhere need to verify that IBs have correct usage state flagged
7456     std::unique_lock<std::mutex> lock(global_lock);
7457
7458     auto buff_node = getBufferNode(dev_data, buffer);
7459     auto cb_node = getCBNode(dev_data, commandBuffer);
7460     if (cb_node && buff_node) {
7461         skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, buff_node, "vkCmdBindIndexBuffer()");
7462         std::function<bool()> function = [=]() {
7463             return ValidateBufferMemoryIsValid(dev_data, buff_node, "vkCmdBindIndexBuffer()");
7464         };
7465         cb_node->validate_functions.push_back(function);
7466         skip_call |= addCmd(dev_data, cb_node, CMD_BINDINDEXBUFFER, "vkCmdBindIndexBuffer()");
7467         VkDeviceSize offset_align = 0;
7468         switch (indexType) {
7469         case VK_INDEX_TYPE_UINT16:
7470             offset_align = 2;
7471             break;
7472         case VK_INDEX_TYPE_UINT32:
7473             offset_align = 4;
7474             break;
7475         default:
7476             // ParamChecker should catch bad enum, we'll also throw alignment error below if offset_align stays 0
7477             break;
7478         }
7479         if (!offset_align || (offset % offset_align)) {
7480             skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7481                                  DRAWSTATE_VTX_INDEX_ALIGNMENT_ERROR, "DS",
7482                                  "vkCmdBindIndexBuffer() offset (0x%" PRIxLEAST64 ") does not fall on alignment (%s) boundary.",
7483                                  offset, string_VkIndexType(indexType));
7484         }
7485         cb_node->status |= CBSTATUS_INDEX_BUFFER_BOUND;
7486     } else {
7487         assert(0);
7488     }
7489     lock.unlock();
7490     if (!skip_call)
7491         dev_data->dispatch_table.CmdBindIndexBuffer(commandBuffer, buffer, offset, indexType);
7492 }
7493
7494 void updateResourceTracking(GLOBAL_CB_NODE *pCB, uint32_t firstBinding, uint32_t bindingCount, const VkBuffer *pBuffers) {
7495     uint32_t end = firstBinding + bindingCount;
7496     if (pCB->currentDrawData.buffers.size() < end) {
7497         pCB->currentDrawData.buffers.resize(end);
7498     }
7499     for (uint32_t i = 0; i < bindingCount; ++i) {
7500         pCB->currentDrawData.buffers[i + firstBinding] = pBuffers[i];
7501     }
7502 }
7503
7504 static inline void updateResourceTrackingOnDraw(GLOBAL_CB_NODE *pCB) { pCB->drawData.push_back(pCB->currentDrawData); }
7505
7506 VKAPI_ATTR void VKAPI_CALL CmdBindVertexBuffers(VkCommandBuffer commandBuffer, uint32_t firstBinding,
7507                                                 uint32_t bindingCount, const VkBuffer *pBuffers,
7508                                                 const VkDeviceSize *pOffsets) {
7509     bool skip_call = false;
7510     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7511     // TODO : Somewhere need to verify that VBs have correct usage state flagged
7512     std::unique_lock<std::mutex> lock(global_lock);
7513
7514     auto cb_node = getCBNode(dev_data, commandBuffer);
7515     if (cb_node) {
7516         for (uint32_t i = 0; i < bindingCount; ++i) {
7517             auto buff_node = getBufferNode(dev_data, pBuffers[i]);
7518             assert(buff_node);
7519             skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, buff_node, "vkCmdBindVertexBuffers()");
7520             std::function<bool()> function = [=]() {
7521                 return ValidateBufferMemoryIsValid(dev_data, buff_node, "vkCmdBindVertexBuffers()");
7522             };
7523             cb_node->validate_functions.push_back(function);
7524         }
7525         addCmd(dev_data, cb_node, CMD_BINDVERTEXBUFFER, "vkCmdBindVertexBuffer()");
7526         updateResourceTracking(cb_node, firstBinding, bindingCount, pBuffers);
7527     } else {
7528         skip_call |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdBindVertexBuffer()");
7529     }
7530     lock.unlock();
7531     if (!skip_call)
7532         dev_data->dispatch_table.CmdBindVertexBuffers(commandBuffer, firstBinding, bindingCount, pBuffers, pOffsets);
7533 }
7534
7535 /* expects global_lock to be held by caller */
7536 static bool markStoreImagesAndBuffersAsWritten(layer_data *dev_data, GLOBAL_CB_NODE *pCB) {
7537     bool skip_call = false;
7538
7539     for (auto imageView : pCB->updateImages) {
7540         auto view_state = getImageViewState(dev_data, imageView);
7541         if (!view_state)
7542             continue;
7543
7544         auto img_node = getImageNode(dev_data, view_state->create_info.image);
7545         assert(img_node);
7546         std::function<bool()> function = [=]() {
7547             SetImageMemoryValid(dev_data, img_node, true);
7548             return false;
7549         };
7550         pCB->validate_functions.push_back(function);
7551     }
7552     for (auto buffer : pCB->updateBuffers) {
7553         auto buff_node = getBufferNode(dev_data, buffer);
7554         assert(buff_node);
7555         std::function<bool()> function = [=]() {
7556             SetBufferMemoryValid(dev_data, buff_node, true);
7557             return false;
7558         };
7559         pCB->validate_functions.push_back(function);
7560     }
7561     return skip_call;
7562 }
7563
7564 VKAPI_ATTR void VKAPI_CALL CmdDraw(VkCommandBuffer commandBuffer, uint32_t vertexCount, uint32_t instanceCount,
7565                                    uint32_t firstVertex, uint32_t firstInstance) {
7566     bool skip_call = false;
7567     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7568     std::unique_lock<std::mutex> lock(global_lock);
7569     GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7570     if (pCB) {
7571         skip_call |= addCmd(dev_data, pCB, CMD_DRAW, "vkCmdDraw()");
7572         pCB->drawCount[DRAW]++;
7573         skip_call |= validate_and_update_draw_state(dev_data, pCB, false, VK_PIPELINE_BIND_POINT_GRAPHICS, "vkCmdDraw");
7574         skip_call |= markStoreImagesAndBuffersAsWritten(dev_data, pCB);
7575         // TODO : Need to pass commandBuffer as srcObj here
7576         skip_call |=
7577             log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
7578                     __LINE__, DRAWSTATE_NONE, "DS", "vkCmdDraw() call 0x%" PRIx64 ", reporting DS state:", g_drawCount[DRAW]++);
7579         skip_call |= synchAndPrintDSConfig(dev_data, commandBuffer);
7580         if (!skip_call) {
7581             updateResourceTrackingOnDraw(pCB);
7582         }
7583         skip_call |= outsideRenderPass(dev_data, pCB, "vkCmdDraw");
7584     }
7585     lock.unlock();
7586     if (!skip_call)
7587         dev_data->dispatch_table.CmdDraw(commandBuffer, vertexCount, instanceCount, firstVertex, firstInstance);
7588 }
7589
7590 VKAPI_ATTR void VKAPI_CALL CmdDrawIndexed(VkCommandBuffer commandBuffer, uint32_t indexCount,
7591                                           uint32_t instanceCount, uint32_t firstIndex, int32_t vertexOffset,
7592                                                             uint32_t firstInstance) {
7593     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7594     bool skip_call = false;
7595     std::unique_lock<std::mutex> lock(global_lock);
7596     GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7597     if (pCB) {
7598         skip_call |= addCmd(dev_data, pCB, CMD_DRAWINDEXED, "vkCmdDrawIndexed()");
7599         pCB->drawCount[DRAW_INDEXED]++;
7600         skip_call |= validate_and_update_draw_state(dev_data, pCB, true, VK_PIPELINE_BIND_POINT_GRAPHICS, "vkCmdDrawIndexed");
7601         skip_call |= markStoreImagesAndBuffersAsWritten(dev_data, pCB);
7602         // TODO : Need to pass commandBuffer as srcObj here
7603         skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT,
7604                              VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, __LINE__, DRAWSTATE_NONE, "DS",
7605                              "vkCmdDrawIndexed() call 0x%" PRIx64 ", reporting DS state:", g_drawCount[DRAW_INDEXED]++);
7606         skip_call |= synchAndPrintDSConfig(dev_data, commandBuffer);
7607         if (!skip_call) {
7608             updateResourceTrackingOnDraw(pCB);
7609         }
7610         skip_call |= outsideRenderPass(dev_data, pCB, "vkCmdDrawIndexed");
7611     }
7612     lock.unlock();
7613     if (!skip_call)
7614         dev_data->dispatch_table.CmdDrawIndexed(commandBuffer, indexCount, instanceCount, firstIndex, vertexOffset, firstInstance);
7615 }
7616
7617 VKAPI_ATTR void VKAPI_CALL
7618 CmdDrawIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t count, uint32_t stride) {
7619     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7620     bool skip_call = false;
7621     std::unique_lock<std::mutex> lock(global_lock);
7622
7623     auto cb_node = getCBNode(dev_data, commandBuffer);
7624     auto buff_node = getBufferNode(dev_data, buffer);
7625     if (cb_node && buff_node) {
7626         skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, buff_node, "vkCmdDrawIndirect()");
7627         AddCommandBufferBindingBuffer(dev_data, cb_node, buff_node);
7628         skip_call |= addCmd(dev_data, cb_node, CMD_DRAWINDIRECT, "vkCmdDrawIndirect()");
7629         cb_node->drawCount[DRAW_INDIRECT]++;
7630         skip_call |= validate_and_update_draw_state(dev_data, cb_node, false, VK_PIPELINE_BIND_POINT_GRAPHICS, "vkCmdDrawIndirect");
7631         skip_call |= markStoreImagesAndBuffersAsWritten(dev_data, cb_node);
7632         // TODO : Need to pass commandBuffer as srcObj here
7633         skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT,
7634                              VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, __LINE__, DRAWSTATE_NONE, "DS",
7635                              "vkCmdDrawIndirect() call 0x%" PRIx64 ", reporting DS state:", g_drawCount[DRAW_INDIRECT]++);
7636         skip_call |= synchAndPrintDSConfig(dev_data, commandBuffer);
7637         if (!skip_call) {
7638             updateResourceTrackingOnDraw(cb_node);
7639         }
7640         skip_call |= outsideRenderPass(dev_data, cb_node, "vkCmdDrawIndirect()");
7641     } else {
7642         assert(0);
7643     }
7644     lock.unlock();
7645     if (!skip_call)
7646         dev_data->dispatch_table.CmdDrawIndirect(commandBuffer, buffer, offset, count, stride);
7647 }
7648
7649 VKAPI_ATTR void VKAPI_CALL
7650 CmdDrawIndexedIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t count, uint32_t stride) {
7651     bool skip_call = false;
7652     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7653     std::unique_lock<std::mutex> lock(global_lock);
7654
7655     auto cb_node = getCBNode(dev_data, commandBuffer);
7656     auto buff_node = getBufferNode(dev_data, buffer);
7657     if (cb_node && buff_node) {
7658         skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, buff_node, "vkCmdDrawIndexedIndirect()");
7659         AddCommandBufferBindingBuffer(dev_data, cb_node, buff_node);
7660         skip_call |= addCmd(dev_data, cb_node, CMD_DRAWINDEXEDINDIRECT, "vkCmdDrawIndexedIndirect()");
7661         cb_node->drawCount[DRAW_INDEXED_INDIRECT]++;
7662         skip_call |=
7663             validate_and_update_draw_state(dev_data, cb_node, true, VK_PIPELINE_BIND_POINT_GRAPHICS, "vkCmdDrawIndexedIndirect");
7664         skip_call |= markStoreImagesAndBuffersAsWritten(dev_data, cb_node);
7665         // TODO : Need to pass commandBuffer as srcObj here
7666         skip_call |=
7667             log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
7668                     __LINE__, DRAWSTATE_NONE, "DS", "vkCmdDrawIndexedIndirect() call 0x%" PRIx64 ", reporting DS state:",
7669                     g_drawCount[DRAW_INDEXED_INDIRECT]++);
7670         skip_call |= synchAndPrintDSConfig(dev_data, commandBuffer);
7671         if (!skip_call) {
7672             updateResourceTrackingOnDraw(cb_node);
7673         }
7674         skip_call |= outsideRenderPass(dev_data, cb_node, "vkCmdDrawIndexedIndirect()");
7675     } else {
7676         assert(0);
7677     }
7678     lock.unlock();
7679     if (!skip_call)
7680         dev_data->dispatch_table.CmdDrawIndexedIndirect(commandBuffer, buffer, offset, count, stride);
7681 }
7682
7683 VKAPI_ATTR void VKAPI_CALL CmdDispatch(VkCommandBuffer commandBuffer, uint32_t x, uint32_t y, uint32_t z) {
7684     bool skip_call = false;
7685     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7686     std::unique_lock<std::mutex> lock(global_lock);
7687     GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7688     if (pCB) {
7689         skip_call |= validate_and_update_draw_state(dev_data, pCB, false, VK_PIPELINE_BIND_POINT_COMPUTE, "vkCmdDispatch");
7690         skip_call |= markStoreImagesAndBuffersAsWritten(dev_data, pCB);
7691         skip_call |= addCmd(dev_data, pCB, CMD_DISPATCH, "vkCmdDispatch()");
7692         skip_call |= insideRenderPass(dev_data, pCB, "vkCmdDispatch");
7693     }
7694     lock.unlock();
7695     if (!skip_call)
7696         dev_data->dispatch_table.CmdDispatch(commandBuffer, x, y, z);
7697 }
7698
7699 VKAPI_ATTR void VKAPI_CALL
7700 CmdDispatchIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset) {
7701     bool skip_call = false;
7702     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7703     std::unique_lock<std::mutex> lock(global_lock);
7704
7705     auto cb_node = getCBNode(dev_data, commandBuffer);
7706     auto buff_node = getBufferNode(dev_data, buffer);
7707     if (cb_node && buff_node) {
7708         skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, buff_node, "vkCmdDispatchIndirect()");
7709         AddCommandBufferBindingBuffer(dev_data, cb_node, buff_node);
7710         skip_call |=
7711             validate_and_update_draw_state(dev_data, cb_node, false, VK_PIPELINE_BIND_POINT_COMPUTE, "vkCmdDispatchIndirect");
7712         skip_call |= markStoreImagesAndBuffersAsWritten(dev_data, cb_node);
7713         skip_call |= addCmd(dev_data, cb_node, CMD_DISPATCHINDIRECT, "vkCmdDispatchIndirect()");
7714         skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdDispatchIndirect()");
7715     }
7716     lock.unlock();
7717     if (!skip_call)
7718         dev_data->dispatch_table.CmdDispatchIndirect(commandBuffer, buffer, offset);
7719 }
7720
7721 VKAPI_ATTR void VKAPI_CALL CmdCopyBuffer(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkBuffer dstBuffer,
7722                                          uint32_t regionCount, const VkBufferCopy *pRegions) {
7723     bool skip_call = false;
7724     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7725     std::unique_lock<std::mutex> lock(global_lock);
7726
7727     auto cb_node = getCBNode(dev_data, commandBuffer);
7728     auto src_buff_node = getBufferNode(dev_data, srcBuffer);
7729     auto dst_buff_node = getBufferNode(dev_data, dstBuffer);
7730     if (cb_node && src_buff_node && dst_buff_node) {
7731         skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, src_buff_node, "vkCmdCopyBuffer()");
7732         skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, dst_buff_node, "vkCmdCopyBuffer()");
7733         // Update bindings between buffers and cmd buffer
7734         AddCommandBufferBindingBuffer(dev_data, cb_node, src_buff_node);
7735         AddCommandBufferBindingBuffer(dev_data, cb_node, dst_buff_node);
7736         // Validate that SRC & DST buffers have correct usage flags set
7737         skip_call |= ValidateBufferUsageFlags(dev_data, src_buff_node, VK_BUFFER_USAGE_TRANSFER_SRC_BIT, true, "vkCmdCopyBuffer()",
7738                                               "VK_BUFFER_USAGE_TRANSFER_SRC_BIT");
7739         skip_call |= ValidateBufferUsageFlags(dev_data, dst_buff_node, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true, "vkCmdCopyBuffer()",
7740                                               "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
7741
7742         std::function<bool()> function = [=]() {
7743             return ValidateBufferMemoryIsValid(dev_data, src_buff_node, "vkCmdCopyBuffer()");
7744         };
7745         cb_node->validate_functions.push_back(function);
7746         function = [=]() {
7747             SetBufferMemoryValid(dev_data, dst_buff_node, true);
7748             return false;
7749         };
7750         cb_node->validate_functions.push_back(function);
7751
7752         skip_call |= addCmd(dev_data, cb_node, CMD_COPYBUFFER, "vkCmdCopyBuffer()");
7753         skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdCopyBuffer()");
7754     } else {
7755         // Param_checker will flag errors on invalid objects, just assert here as debugging aid
7756         assert(0);
7757     }
7758     lock.unlock();
7759     if (!skip_call)
7760         dev_data->dispatch_table.CmdCopyBuffer(commandBuffer, srcBuffer, dstBuffer, regionCount, pRegions);
7761 }
7762
7763 static bool VerifySourceImageLayout(layer_data *dev_data, GLOBAL_CB_NODE *cb_node, VkImage srcImage,
7764                                     VkImageSubresourceLayers subLayers, VkImageLayout srcImageLayout) {
7765     bool skip_call = false;
7766
7767     for (uint32_t i = 0; i < subLayers.layerCount; ++i) {
7768         uint32_t layer = i + subLayers.baseArrayLayer;
7769         VkImageSubresource sub = {subLayers.aspectMask, subLayers.mipLevel, layer};
7770         IMAGE_CMD_BUF_LAYOUT_NODE node;
7771         if (!FindLayout(cb_node, srcImage, sub, node)) {
7772             SetLayout(cb_node, srcImage, sub, IMAGE_CMD_BUF_LAYOUT_NODE(srcImageLayout, srcImageLayout));
7773             continue;
7774         }
7775         if (node.layout != srcImageLayout) {
7776             // TODO: Improve log message in the next pass
7777             skip_call |=
7778                 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
7779                         __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Cannot copy from an image whose source layout is %s "
7780                                                                         "and doesn't match the current layout %s.",
7781                         string_VkImageLayout(srcImageLayout), string_VkImageLayout(node.layout));
7782         }
7783     }
7784     if (srcImageLayout != VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL) {
7785         if (srcImageLayout == VK_IMAGE_LAYOUT_GENERAL) {
7786             // TODO : Can we deal with image node from the top of call tree and avoid map look-up here?
7787             auto image_node = getImageNode(dev_data, srcImage);
7788             if (image_node->createInfo.tiling != VK_IMAGE_TILING_LINEAR) {
7789                 // LAYOUT_GENERAL is allowed, but may not be performance optimal, flag as perf warning.
7790                 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
7791                                      (VkDebugReportObjectTypeEXT)0, 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
7792                                      "Layout for input image should be TRANSFER_SRC_OPTIMAL instead of GENERAL.");
7793             }
7794         } else {
7795             skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7796                                  DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Layout for input image is %s but can only be "
7797                                                                        "TRANSFER_SRC_OPTIMAL or GENERAL.",
7798                                  string_VkImageLayout(srcImageLayout));
7799         }
7800     }
7801     return skip_call;
7802 }
7803
7804 static bool VerifyDestImageLayout(layer_data *dev_data, GLOBAL_CB_NODE *cb_node, VkImage destImage,
7805                                   VkImageSubresourceLayers subLayers, VkImageLayout destImageLayout) {
7806     bool skip_call = false;
7807
7808     for (uint32_t i = 0; i < subLayers.layerCount; ++i) {
7809         uint32_t layer = i + subLayers.baseArrayLayer;
7810         VkImageSubresource sub = {subLayers.aspectMask, subLayers.mipLevel, layer};
7811         IMAGE_CMD_BUF_LAYOUT_NODE node;
7812         if (!FindLayout(cb_node, destImage, sub, node)) {
7813             SetLayout(cb_node, destImage, sub, IMAGE_CMD_BUF_LAYOUT_NODE(destImageLayout, destImageLayout));
7814             continue;
7815         }
7816         if (node.layout != destImageLayout) {
7817             skip_call |=
7818                 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
7819                         __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Cannot copy from an image whose dest layout is %s and "
7820                                                                         "doesn't match the current layout %s.",
7821                         string_VkImageLayout(destImageLayout), string_VkImageLayout(node.layout));
7822         }
7823     }
7824     if (destImageLayout != VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL) {
7825         if (destImageLayout == VK_IMAGE_LAYOUT_GENERAL) {
7826             auto image_node = getImageNode(dev_data, destImage);
7827             if (image_node->createInfo.tiling != VK_IMAGE_TILING_LINEAR) {
7828                 // LAYOUT_GENERAL is allowed, but may not be performance optimal, flag as perf warning.
7829                 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
7830                                      (VkDebugReportObjectTypeEXT)0, 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
7831                                      "Layout for output image should be TRANSFER_DST_OPTIMAL instead of GENERAL.");
7832             }
7833         } else {
7834             skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7835                                  DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Layout for output image is %s but can only be "
7836                                                                        "TRANSFER_DST_OPTIMAL or GENERAL.",
7837                                  string_VkImageLayout(destImageLayout));
7838         }
7839     }
7840     return skip_call;
7841 }
7842
7843 // Test if two VkExtent3D structs are equivalent
7844 static inline bool IsExtentEqual(const VkExtent3D *extent, const VkExtent3D *other_extent) {
7845     bool result = true;
7846     if ((extent->width != other_extent->width) || (extent->height != other_extent->height) ||
7847         (extent->depth != other_extent->depth)) {
7848         result = false;
7849     }
7850     return result;
7851 }
7852
7853 // Returns the image extent of a specific subresource.
7854 static inline VkExtent3D GetImageSubresourceExtent(const IMAGE_NODE *img, const VkImageSubresourceLayers *subresource) {
7855     const uint32_t mip = subresource->mipLevel;
7856     VkExtent3D extent = img->createInfo.extent;
7857     extent.width = std::max(1U, extent.width >> mip);
7858     extent.height = std::max(1U, extent.height >> mip);
7859     extent.depth = std::max(1U, extent.depth >> mip);
7860     return extent;
7861 }
7862
7863 // Test if the extent argument has all dimensions set to 0.
7864 static inline bool IsExtentZero(const VkExtent3D *extent) {
7865     return ((extent->width == 0) && (extent->height == 0) && (extent->depth == 0));
7866 }
7867
7868 // Returns the image transfer granularity for a specific image scaled by compressed block size if necessary.
7869 static inline VkExtent3D GetScaledItg(layer_data *dev_data, const GLOBAL_CB_NODE *cb_node, const IMAGE_NODE *img) {
7870     // Default to (0, 0, 0) granularity in case we can't find the real granularity for the physical device.
7871     VkExtent3D granularity = { 0, 0, 0 };
7872     auto pPool = getCommandPoolNode(dev_data, cb_node->createInfo.commandPool);
7873     if (pPool) {
7874         granularity = dev_data->phys_dev_properties.queue_family_properties[pPool->queueFamilyIndex].minImageTransferGranularity;
7875         if (vk_format_is_compressed(img->createInfo.format)) {
7876             auto block_size = vk_format_compressed_block_size(img->createInfo.format);
7877             granularity.width *= block_size.width;
7878             granularity.height *= block_size.height;
7879         }
7880     }
7881     return granularity;
7882 }
7883
7884 // Test elements of a VkExtent3D structure against alignment constraints contained in another VkExtent3D structure
7885 static inline bool IsExtentAligned(const VkExtent3D *extent, const VkExtent3D *granularity) {
7886     bool valid = true;
7887     if ((vk_safe_modulo(extent->depth, granularity->depth) != 0) || (vk_safe_modulo(extent->width, granularity->width) != 0) ||
7888         (vk_safe_modulo(extent->height, granularity->height) != 0)) {
7889         valid = false;
7890     }
7891     return valid;
7892 }
7893
7894 // Check elements of a VkOffset3D structure against a queue family's Image Transfer Granularity values
7895 static inline bool CheckItgOffset(layer_data *dev_data, const GLOBAL_CB_NODE *cb_node, const VkOffset3D *offset,
7896                                   const VkExtent3D *granularity, const uint32_t i, const char *function, const char *member) {
7897     bool skip = false;
7898     VkExtent3D offset_extent = {};
7899     offset_extent.width = static_cast<uint32_t>(abs(offset->x));
7900     offset_extent.height = static_cast<uint32_t>(abs(offset->y));
7901     offset_extent.depth = static_cast<uint32_t>(abs(offset->z));
7902     if (IsExtentZero(granularity)) {
7903         // If the queue family image transfer granularity is (0, 0, 0), then the offset must always be (0, 0, 0)
7904         if (IsExtentZero(&offset_extent) == false) {
7905             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7906                             DRAWSTATE_IMAGE_TRANSFER_GRANULARITY, "DS",
7907                             "%s: pRegion[%d].%s (x=%d, y=%d, z=%d) must be (x=0, y=0, z=0) "
7908                             "when the command buffer's queue family image transfer granularity is (w=0, h=0, d=0).",
7909                             function, i, member, offset->x, offset->y, offset->z);
7910         }
7911     } else {
7912         // If the queue family image transfer granularity is not (0, 0, 0), then the offset dimensions must always be even
7913         // integer multiples of the image transfer granularity.
7914         if (IsExtentAligned(&offset_extent, granularity) == false) {
7915             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7916                             DRAWSTATE_IMAGE_TRANSFER_GRANULARITY, "DS",
7917                             "%s: pRegion[%d].%s (x=%d, y=%d, z=%d) dimensions must be even integer "
7918                             "multiples of this command buffer's queue family image transfer granularity (w=%d, h=%d, d=%d).",
7919                             function, i, member, offset->x, offset->y, offset->z, granularity->width, granularity->height,
7920                             granularity->depth);
7921         }
7922     }
7923     return skip;
7924 }
7925
7926 // Check elements of a VkExtent3D structure against a queue family's Image Transfer Granularity values
7927 static inline bool CheckItgExtent(layer_data *dev_data, const GLOBAL_CB_NODE *cb_node, const VkExtent3D *extent,
7928                                   const VkOffset3D *offset, const VkExtent3D *granularity, const VkExtent3D *subresource_extent,
7929                                   const uint32_t i, const char *function, const char *member) {
7930     bool skip = false;
7931     if (IsExtentZero(granularity)) {
7932         // If the queue family image transfer granularity is (0, 0, 0), then the extent must always match the image
7933         // subresource extent.
7934         if (IsExtentEqual(extent, subresource_extent) == false) {
7935             skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7936                             DRAWSTATE_IMAGE_TRANSFER_GRANULARITY, "DS",
7937                             "%s: pRegion[%d].%s (w=%d, h=%d, d=%d) must match the image subresource extents (w=%d, h=%d, d=%d) "
7938                             "when the command buffer's queue family image transfer granularity is (w=0, h=0, d=0).",
7939                             function, i, member, extent->width, extent->height, extent->depth, subresource_extent->width,
7940                             subresource_extent->height, subresource_extent->depth);
7941         }
7942     } else {
7943         // If the queue family image transfer granularity is not (0, 0, 0), then the extent dimensions must always be even
7944         // integer multiples of the image transfer granularity or the offset + extent dimensions must always match the image
7945         // subresource extent dimensions.
7946         VkExtent3D offset_extent_sum = {};
7947         offset_extent_sum.width = static_cast<uint32_t>(abs(offset->x)) + extent->width;
7948         offset_extent_sum.height = static_cast<uint32_t>(abs(offset->y)) + extent->height;
7949         offset_extent_sum.depth = static_cast<uint32_t>(abs(offset->z)) + extent->depth;
7950         if ((IsExtentAligned(extent, granularity) == false) && (IsExtentEqual(&offset_extent_sum, subresource_extent) == false)) {
7951             skip |=
7952                 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7953                         DRAWSTATE_IMAGE_TRANSFER_GRANULARITY, "DS",
7954                         "%s: pRegion[%d].%s (w=%d, h=%d, d=%d) dimensions must be even integer multiples of this command buffer's "
7955                         "queue family image transfer granularity (w=%d, h=%d, d=%d) or offset (x=%d, y=%d, z=%d) + "
7956                         "extent (w=%d, h=%d, d=%d) must match the image subresource extents (w=%d, h=%d, d=%d).",
7957                         function, i, member, extent->width, extent->height, extent->depth, granularity->width, granularity->height,
7958                         granularity->depth, offset->x, offset->y, offset->z, extent->width, extent->height, extent->depth,
7959                         subresource_extent->width, subresource_extent->height, subresource_extent->depth);
7960         }
7961     }
7962     return skip;
7963 }
7964
7965 // Check a uint32_t width or stride value against a queue family's Image Transfer Granularity width value
7966 static inline bool CheckItgInt(layer_data *dev_data, const GLOBAL_CB_NODE *cb_node, const uint32_t value,
7967                                const uint32_t granularity, const uint32_t i, const char *function, const char *member) {
7968     bool skip = false;
7969     if (vk_safe_modulo(value, granularity) != 0) {
7970         skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7971                         DRAWSTATE_IMAGE_TRANSFER_GRANULARITY, "DS",
7972                         "%s: pRegion[%d].%s (%d) must be an even integer multiple of this command buffer's queue family image "
7973                         "transfer granularity width (%d).",
7974                         function, i, member, value, granularity);
7975     }
7976     return skip;
7977 }
7978
7979 // Check a VkDeviceSize value against a queue family's Image Transfer Granularity width value
7980 static inline bool CheckItgSize(layer_data *dev_data, const GLOBAL_CB_NODE *cb_node, const VkDeviceSize value,
7981                                 const uint32_t granularity, const uint32_t i, const char *function, const char *member) {
7982     bool skip = false;
7983     if (vk_safe_modulo(value, granularity) != 0) {
7984         skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7985                         DRAWSTATE_IMAGE_TRANSFER_GRANULARITY, "DS",
7986                         "%s: pRegion[%d].%s (%" PRIdLEAST64
7987                         ") must be an even integer multiple of this command buffer's queue family image transfer "
7988                         "granularity width (%d).",
7989                         function, i, member, value, granularity);
7990     }
7991     return skip;
7992 }
7993
7994 // Check valid usage Image Tranfer Granularity requirements for elements of a VkImageCopy structure
7995 static inline bool ValidateCopyImageTransferGranularityRequirements(layer_data *dev_data, const GLOBAL_CB_NODE *cb_node,
7996                                                                     const IMAGE_NODE *img, const VkImageCopy *region,
7997                                                                     const uint32_t i, const char *function) {
7998     bool skip = false;
7999     VkExtent3D granularity = GetScaledItg(dev_data, cb_node, img);
8000     skip |= CheckItgOffset(dev_data, cb_node, &region->srcOffset, &granularity, i, function, "srcOffset");
8001     skip |= CheckItgOffset(dev_data, cb_node, &region->dstOffset, &granularity, i, function, "dstOffset");
8002     VkExtent3D subresource_extent = GetImageSubresourceExtent(img, &region->dstSubresource);
8003     skip |= CheckItgExtent(dev_data, cb_node, &region->extent, &region->dstOffset, &granularity, &subresource_extent, i, function,
8004                            "extent");
8005     return skip;
8006 }
8007
8008 // Check valid usage Image Tranfer Granularity requirements for elements of a VkBufferImageCopy structure
8009 static inline bool ValidateCopyBufferImageTransferGranularityRequirements(layer_data *dev_data, const GLOBAL_CB_NODE *cb_node,
8010                                                                           const IMAGE_NODE *img, const VkBufferImageCopy *region,
8011                                                                           const uint32_t i, const char *function) {
8012     bool skip = false;
8013     VkExtent3D granularity = GetScaledItg(dev_data, cb_node, img);
8014     skip |= CheckItgSize(dev_data, cb_node, region->bufferOffset, granularity.width, i, function, "bufferOffset");
8015     skip |= CheckItgInt(dev_data, cb_node, region->bufferRowLength, granularity.width, i, function, "bufferRowLength");
8016     skip |= CheckItgInt(dev_data, cb_node, region->bufferImageHeight, granularity.width, i, function, "bufferImageHeight");
8017     skip |= CheckItgOffset(dev_data, cb_node, &region->imageOffset, &granularity, i, function, "imageOffset");
8018     VkExtent3D subresource_extent = GetImageSubresourceExtent(img, &region->imageSubresource);
8019     skip |= CheckItgExtent(dev_data, cb_node, &region->imageExtent, &region->imageOffset, &granularity, &subresource_extent, i,
8020                            function, "imageExtent");
8021     return skip;
8022 }
8023
8024 VKAPI_ATTR void VKAPI_CALL
8025 CmdCopyImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage,
8026              VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageCopy *pRegions) {
8027     bool skip_call = false;
8028     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8029     std::unique_lock<std::mutex> lock(global_lock);
8030
8031     auto cb_node = getCBNode(dev_data, commandBuffer);
8032     auto src_img_node = getImageNode(dev_data, srcImage);
8033     auto dst_img_node = getImageNode(dev_data, dstImage);
8034     if (cb_node && src_img_node && dst_img_node) {
8035         skip_call |= ValidateMemoryIsBoundToImage(dev_data, src_img_node, "vkCmdCopyImage()");
8036         skip_call |= ValidateMemoryIsBoundToImage(dev_data, dst_img_node, "vkCmdCopyImage()");
8037         // Update bindings between images and cmd buffer
8038         AddCommandBufferBindingImage(dev_data, cb_node, src_img_node);
8039         AddCommandBufferBindingImage(dev_data, cb_node, dst_img_node);
8040         // Validate that SRC & DST images have correct usage flags set
8041         skip_call |= ValidateImageUsageFlags(dev_data, src_img_node, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, true, "vkCmdCopyImage()",
8042                                              "VK_IMAGE_USAGE_TRANSFER_SRC_BIT");
8043         skip_call |= ValidateImageUsageFlags(dev_data, dst_img_node, VK_IMAGE_USAGE_TRANSFER_DST_BIT, true, "vkCmdCopyImage()",
8044                                              "VK_IMAGE_USAGE_TRANSFER_DST_BIT");
8045         std::function<bool()> function = [=]() { return ValidateImageMemoryIsValid(dev_data, src_img_node, "vkCmdCopyImage()"); };
8046         cb_node->validate_functions.push_back(function);
8047         function = [=]() {
8048             SetImageMemoryValid(dev_data, dst_img_node, true);
8049             return false;
8050         };
8051         cb_node->validate_functions.push_back(function);
8052
8053         skip_call |= addCmd(dev_data, cb_node, CMD_COPYIMAGE, "vkCmdCopyImage()");
8054         skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdCopyImage()");
8055         for (uint32_t i = 0; i < regionCount; ++i) {
8056             skip_call |= VerifySourceImageLayout(dev_data, cb_node, srcImage, pRegions[i].srcSubresource, srcImageLayout);
8057             skip_call |= VerifyDestImageLayout(dev_data, cb_node, dstImage, pRegions[i].dstSubresource, dstImageLayout);
8058             skip_call |= ValidateCopyImageTransferGranularityRequirements(dev_data, cb_node, dst_img_node, &pRegions[i], i,
8059                                                                           "vkCmdCopyImage()");
8060         }
8061     } else {
8062         assert(0);
8063     }
8064     lock.unlock();
8065     if (!skip_call)
8066         dev_data->dispatch_table.CmdCopyImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount,
8067                                               pRegions);
8068 }
8069
8070 // Validate that an image's sampleCount matches the requirement for a specific API call
8071 static inline bool ValidateImageSampleCount(layer_data *dev_data, IMAGE_NODE *image_node, VkSampleCountFlagBits sample_count,
8072                                             const char *location) {
8073     bool skip = false;
8074     if (image_node->createInfo.samples != sample_count) {
8075         skip = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
8076                        reinterpret_cast<uint64_t &>(image_node->image), 0, DRAWSTATE_NUM_SAMPLES_MISMATCH, "DS",
8077                        "%s for image 0x%" PRIxLEAST64 " was created with a sample count of %s but must be %s.", location,
8078                        reinterpret_cast<uint64_t &>(image_node->image),
8079                        string_VkSampleCountFlagBits(image_node->createInfo.samples), string_VkSampleCountFlagBits(sample_count));
8080     }
8081     return skip;
8082 }
8083
8084 VKAPI_ATTR void VKAPI_CALL
8085 CmdBlitImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage,
8086              VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageBlit *pRegions, VkFilter filter) {
8087     bool skip_call = false;
8088     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8089     std::unique_lock<std::mutex> lock(global_lock);
8090
8091     auto cb_node = getCBNode(dev_data, commandBuffer);
8092     auto src_img_node = getImageNode(dev_data, srcImage);
8093     auto dst_img_node = getImageNode(dev_data, dstImage);
8094     if (cb_node && src_img_node && dst_img_node) {
8095         skip_call |= ValidateImageSampleCount(dev_data, src_img_node, VK_SAMPLE_COUNT_1_BIT, "vkCmdBlitImage(): srcImage");
8096         skip_call |= ValidateImageSampleCount(dev_data, dst_img_node, VK_SAMPLE_COUNT_1_BIT, "vkCmdBlitImage(): dstImage");
8097         skip_call |= ValidateMemoryIsBoundToImage(dev_data, src_img_node, "vkCmdBlitImage()");
8098         skip_call |= ValidateMemoryIsBoundToImage(dev_data, dst_img_node, "vkCmdBlitImage()");
8099         // Update bindings between images and cmd buffer
8100         AddCommandBufferBindingImage(dev_data, cb_node, src_img_node);
8101         AddCommandBufferBindingImage(dev_data, cb_node, dst_img_node);
8102         // Validate that SRC & DST images have correct usage flags set
8103         skip_call |= ValidateImageUsageFlags(dev_data, src_img_node, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, true, "vkCmdBlitImage()",
8104                                              "VK_IMAGE_USAGE_TRANSFER_SRC_BIT");
8105         skip_call |= ValidateImageUsageFlags(dev_data, dst_img_node, VK_IMAGE_USAGE_TRANSFER_DST_BIT, true, "vkCmdBlitImage()",
8106                                              "VK_IMAGE_USAGE_TRANSFER_DST_BIT");
8107         std::function<bool()> function = [=]() { return ValidateImageMemoryIsValid(dev_data, src_img_node, "vkCmdBlitImage()"); };
8108         cb_node->validate_functions.push_back(function);
8109         function = [=]() {
8110             SetImageMemoryValid(dev_data, dst_img_node, true);
8111             return false;
8112         };
8113         cb_node->validate_functions.push_back(function);
8114
8115         skip_call |= addCmd(dev_data, cb_node, CMD_BLITIMAGE, "vkCmdBlitImage()");
8116         skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdBlitImage()");
8117     } else {
8118         assert(0);
8119     }
8120     lock.unlock();
8121     if (!skip_call)
8122         dev_data->dispatch_table.CmdBlitImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount,
8123                                               pRegions, filter);
8124 }
8125
8126 VKAPI_ATTR void VKAPI_CALL CmdCopyBufferToImage(VkCommandBuffer commandBuffer, VkBuffer srcBuffer,
8127                                                 VkImage dstImage, VkImageLayout dstImageLayout,
8128                                                 uint32_t regionCount, const VkBufferImageCopy *pRegions) {
8129     bool skip_call = false;
8130     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8131     std::unique_lock<std::mutex> lock(global_lock);
8132
8133     auto cb_node = getCBNode(dev_data, commandBuffer);
8134     auto src_buff_node = getBufferNode(dev_data, srcBuffer);
8135     auto dst_img_node = getImageNode(dev_data, dstImage);
8136     if (cb_node && src_buff_node && dst_img_node) {
8137         skip_call |= ValidateImageSampleCount(dev_data, dst_img_node, VK_SAMPLE_COUNT_1_BIT, "vkCmdCopyBufferToImage(): dstImage");
8138         skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, src_buff_node, "vkCmdCopyBufferToImage()");
8139         skip_call |= ValidateMemoryIsBoundToImage(dev_data, dst_img_node, "vkCmdCopyBufferToImage()");
8140         AddCommandBufferBindingBuffer(dev_data, cb_node, src_buff_node);
8141         AddCommandBufferBindingImage(dev_data, cb_node, dst_img_node);
8142         skip_call |= ValidateBufferUsageFlags(dev_data, src_buff_node, VK_BUFFER_USAGE_TRANSFER_SRC_BIT, true,
8143                                               "vkCmdCopyBufferToImage()", "VK_BUFFER_USAGE_TRANSFER_SRC_BIT");
8144         skip_call |= ValidateImageUsageFlags(dev_data, dst_img_node, VK_IMAGE_USAGE_TRANSFER_DST_BIT, true,
8145                                              "vkCmdCopyBufferToImage()", "VK_IMAGE_USAGE_TRANSFER_DST_BIT");
8146         std::function<bool()> function = [=]() {
8147             SetImageMemoryValid(dev_data, dst_img_node, true);
8148             return false;
8149         };
8150         cb_node->validate_functions.push_back(function);
8151         function = [=]() { return ValidateBufferMemoryIsValid(dev_data, src_buff_node, "vkCmdCopyBufferToImage()"); };
8152         cb_node->validate_functions.push_back(function);
8153
8154         skip_call |= addCmd(dev_data, cb_node, CMD_COPYBUFFERTOIMAGE, "vkCmdCopyBufferToImage()");
8155         skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdCopyBufferToImage()");
8156         for (uint32_t i = 0; i < regionCount; ++i) {
8157             skip_call |= VerifyDestImageLayout(dev_data, cb_node, dstImage, pRegions[i].imageSubresource, dstImageLayout);
8158             skip_call |= ValidateCopyBufferImageTransferGranularityRequirements(dev_data, cb_node, dst_img_node, &pRegions[i], i,
8159                                                                                 "vkCmdCopyBufferToImage()");
8160         }
8161     } else {
8162         assert(0);
8163     }
8164     lock.unlock();
8165     if (!skip_call)
8166         dev_data->dispatch_table.CmdCopyBufferToImage(commandBuffer, srcBuffer, dstImage, dstImageLayout, regionCount, pRegions);
8167 }
8168
8169 VKAPI_ATTR void VKAPI_CALL CmdCopyImageToBuffer(VkCommandBuffer commandBuffer, VkImage srcImage,
8170                                                 VkImageLayout srcImageLayout, VkBuffer dstBuffer,
8171                                                 uint32_t regionCount, const VkBufferImageCopy *pRegions) {
8172     bool skip_call = false;
8173     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8174     std::unique_lock<std::mutex> lock(global_lock);
8175
8176     auto cb_node = getCBNode(dev_data, commandBuffer);
8177     auto src_img_node = getImageNode(dev_data, srcImage);
8178     auto dst_buff_node = getBufferNode(dev_data, dstBuffer);
8179     if (cb_node && src_img_node && dst_buff_node) {
8180         skip_call |= ValidateImageSampleCount(dev_data, src_img_node, VK_SAMPLE_COUNT_1_BIT, "vkCmdCopyImageToBuffer(): srcImage");
8181         skip_call |= ValidateMemoryIsBoundToImage(dev_data, src_img_node, "vkCmdCopyImageToBuffer()");
8182         skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, dst_buff_node, "vkCmdCopyImageToBuffer()");
8183         // Update bindings between buffer/image and cmd buffer
8184         AddCommandBufferBindingImage(dev_data, cb_node, src_img_node);
8185         AddCommandBufferBindingBuffer(dev_data, cb_node, dst_buff_node);
8186         // Validate that SRC image & DST buffer have correct usage flags set
8187         skip_call |= ValidateImageUsageFlags(dev_data, src_img_node, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, true,
8188                                              "vkCmdCopyImageToBuffer()", "VK_IMAGE_USAGE_TRANSFER_SRC_BIT");
8189         skip_call |= ValidateBufferUsageFlags(dev_data, dst_buff_node, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true,
8190                                               "vkCmdCopyImageToBuffer()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
8191         std::function<bool()> function = [=]() {
8192             return ValidateImageMemoryIsValid(dev_data, src_img_node, "vkCmdCopyImageToBuffer()");
8193         };
8194         cb_node->validate_functions.push_back(function);
8195         function = [=]() {
8196             SetBufferMemoryValid(dev_data, dst_buff_node, true);
8197             return false;
8198         };
8199         cb_node->validate_functions.push_back(function);
8200
8201         skip_call |= addCmd(dev_data, cb_node, CMD_COPYIMAGETOBUFFER, "vkCmdCopyImageToBuffer()");
8202         skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdCopyImageToBuffer()");
8203         for (uint32_t i = 0; i < regionCount; ++i) {
8204             skip_call |= VerifySourceImageLayout(dev_data, cb_node, srcImage, pRegions[i].imageSubresource, srcImageLayout);
8205             skip_call |= ValidateCopyBufferImageTransferGranularityRequirements(dev_data, cb_node, src_img_node, &pRegions[i], i,
8206                                                                                 "CmdCopyImageToBuffer");
8207         }
8208     } else {
8209         assert(0);
8210     }
8211     lock.unlock();
8212     if (!skip_call)
8213         dev_data->dispatch_table.CmdCopyImageToBuffer(commandBuffer, srcImage, srcImageLayout, dstBuffer, regionCount, pRegions);
8214 }
8215
8216 VKAPI_ATTR void VKAPI_CALL CmdUpdateBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer,
8217                                            VkDeviceSize dstOffset, VkDeviceSize dataSize, const uint32_t *pData) {
8218     bool skip_call = false;
8219     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8220     std::unique_lock<std::mutex> lock(global_lock);
8221
8222     auto cb_node = getCBNode(dev_data, commandBuffer);
8223     auto dst_buff_node = getBufferNode(dev_data, dstBuffer);
8224     if (cb_node && dst_buff_node) {
8225         skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, dst_buff_node, "vkCmdUpdateBuffer()");
8226         // Update bindings between buffer and cmd buffer
8227         AddCommandBufferBindingBuffer(dev_data, cb_node, dst_buff_node);
8228         // Validate that DST buffer has correct usage flags set
8229         skip_call |= ValidateBufferUsageFlags(dev_data, dst_buff_node, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true,
8230                                               "vkCmdUpdateBuffer()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
8231         std::function<bool()> function = [=]() {
8232             SetBufferMemoryValid(dev_data, dst_buff_node, true);
8233             return false;
8234         };
8235         cb_node->validate_functions.push_back(function);
8236
8237         skip_call |= addCmd(dev_data, cb_node, CMD_UPDATEBUFFER, "vkCmdUpdateBuffer()");
8238         skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdCopyUpdateBuffer()");
8239     } else {
8240         assert(0);
8241     }
8242     lock.unlock();
8243     if (!skip_call)
8244         dev_data->dispatch_table.CmdUpdateBuffer(commandBuffer, dstBuffer, dstOffset, dataSize, pData);
8245 }
8246
8247 VKAPI_ATTR void VKAPI_CALL
8248 CmdFillBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize size, uint32_t data) {
8249     bool skip_call = false;
8250     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8251     std::unique_lock<std::mutex> lock(global_lock);
8252
8253     auto cb_node = getCBNode(dev_data, commandBuffer);
8254     auto dst_buff_node = getBufferNode(dev_data, dstBuffer);
8255     if (cb_node && dst_buff_node) {
8256         skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, dst_buff_node, "vkCmdFillBuffer()");
8257         // Update bindings between buffer and cmd buffer
8258         AddCommandBufferBindingBuffer(dev_data, cb_node, dst_buff_node);
8259         // Validate that DST buffer has correct usage flags set
8260         skip_call |= ValidateBufferUsageFlags(dev_data, dst_buff_node, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true, "vkCmdFillBuffer()",
8261                                               "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
8262         std::function<bool()> function = [=]() {
8263             SetBufferMemoryValid(dev_data, dst_buff_node, true);
8264             return false;
8265         };
8266         cb_node->validate_functions.push_back(function);
8267
8268         skip_call |= addCmd(dev_data, cb_node, CMD_FILLBUFFER, "vkCmdFillBuffer()");
8269         skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdCopyFillBuffer()");
8270     } else {
8271         assert(0);
8272     }
8273     lock.unlock();
8274     if (!skip_call)
8275         dev_data->dispatch_table.CmdFillBuffer(commandBuffer, dstBuffer, dstOffset, size, data);
8276 }
8277
8278 VKAPI_ATTR void VKAPI_CALL CmdClearAttachments(VkCommandBuffer commandBuffer, uint32_t attachmentCount,
8279                                                const VkClearAttachment *pAttachments, uint32_t rectCount,
8280                                                const VkClearRect *pRects) {
8281     bool skip_call = false;
8282     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8283     std::unique_lock<std::mutex> lock(global_lock);
8284     GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8285     if (pCB) {
8286         skip_call |= addCmd(dev_data, pCB, CMD_CLEARATTACHMENTS, "vkCmdClearAttachments()");
8287         // Warn if this is issued prior to Draw Cmd and clearing the entire attachment
8288         if (!hasDrawCmd(pCB) && (pCB->activeRenderPassBeginInfo.renderArea.extent.width == pRects[0].rect.extent.width) &&
8289             (pCB->activeRenderPassBeginInfo.renderArea.extent.height == pRects[0].rect.extent.height)) {
8290             // There are times where app needs to use ClearAttachments (generally when reusing a buffer inside of a render pass)
8291             // Can we make this warning more specific? I'd like to avoid triggering this test if we can tell it's a use that must
8292             // call CmdClearAttachments
8293             // Otherwise this seems more like a performance warning.
8294             skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
8295                                  VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, reinterpret_cast<uint64_t &>(commandBuffer),
8296                                  0, DRAWSTATE_CLEAR_CMD_BEFORE_DRAW, "DS",
8297                                  "vkCmdClearAttachments() issued on CB object 0x%" PRIxLEAST64 " prior to any Draw Cmds."
8298                                  " It is recommended you use RenderPass LOAD_OP_CLEAR on Attachments prior to any Draw.",
8299                                  (uint64_t)(commandBuffer));
8300         }
8301         skip_call |= outsideRenderPass(dev_data, pCB, "vkCmdClearAttachments()");
8302     }
8303
8304     // Validate that attachment is in reference list of active subpass
8305     if (pCB->activeRenderPass) {
8306         const VkRenderPassCreateInfo *pRPCI = pCB->activeRenderPass->createInfo.ptr();
8307         const VkSubpassDescription *pSD = &pRPCI->pSubpasses[pCB->activeSubpass];
8308
8309         for (uint32_t attachment_idx = 0; attachment_idx < attachmentCount; attachment_idx++) {
8310             const VkClearAttachment *attachment = &pAttachments[attachment_idx];
8311             if (attachment->aspectMask & VK_IMAGE_ASPECT_COLOR_BIT) {
8312                 if (attachment->colorAttachment >= pSD->colorAttachmentCount) {
8313                     skip_call |= log_msg(
8314                         dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
8315                         (uint64_t)commandBuffer, __LINE__, DRAWSTATE_MISSING_ATTACHMENT_REFERENCE, "DS",
8316                         "vkCmdClearAttachments() color attachment index %d out of range for active subpass %d; ignored",
8317                         attachment->colorAttachment, pCB->activeSubpass);
8318                 }
8319                 else if (pSD->pColorAttachments[attachment->colorAttachment].attachment == VK_ATTACHMENT_UNUSED) {
8320                     skip_call |= log_msg(
8321                         dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
8322                         (uint64_t)commandBuffer, __LINE__, DRAWSTATE_MISSING_ATTACHMENT_REFERENCE, "DS",
8323                         "vkCmdClearAttachments() color attachment index %d is VK_ATTACHMENT_UNUSED; ignored",
8324                         attachment->colorAttachment);
8325                 }
8326             } else if (attachment->aspectMask & (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)) {
8327                 if (!pSD->pDepthStencilAttachment || // Says no DS will be used in active subpass
8328                     (pSD->pDepthStencilAttachment->attachment ==
8329                      VK_ATTACHMENT_UNUSED)) { // Says no DS will be used in active subpass
8330
8331                     skip_call |= log_msg(
8332                         dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
8333                         (uint64_t)commandBuffer, __LINE__, DRAWSTATE_MISSING_ATTACHMENT_REFERENCE, "DS",
8334                         "vkCmdClearAttachments() depth/stencil clear with no depth/stencil attachment in subpass; ignored");
8335                 }
8336             }
8337         }
8338     }
8339     lock.unlock();
8340     if (!skip_call)
8341         dev_data->dispatch_table.CmdClearAttachments(commandBuffer, attachmentCount, pAttachments, rectCount, pRects);
8342 }
8343
8344 VKAPI_ATTR void VKAPI_CALL CmdClearColorImage(VkCommandBuffer commandBuffer, VkImage image,
8345                                               VkImageLayout imageLayout, const VkClearColorValue *pColor,
8346                                               uint32_t rangeCount, const VkImageSubresourceRange *pRanges) {
8347     bool skip_call = false;
8348     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8349     std::unique_lock<std::mutex> lock(global_lock);
8350     // TODO : Verify memory is in VK_IMAGE_STATE_CLEAR state
8351
8352     auto cb_node = getCBNode(dev_data, commandBuffer);
8353     auto img_node = getImageNode(dev_data, image);
8354     if (cb_node && img_node) {
8355         skip_call |= ValidateMemoryIsBoundToImage(dev_data, img_node, "vkCmdClearColorImage()");
8356         AddCommandBufferBindingImage(dev_data, cb_node, img_node);
8357         std::function<bool()> function = [=]() {
8358             SetImageMemoryValid(dev_data, img_node, true);
8359             return false;
8360         };
8361         cb_node->validate_functions.push_back(function);
8362
8363         skip_call |= addCmd(dev_data, cb_node, CMD_CLEARCOLORIMAGE, "vkCmdClearColorImage()");
8364         skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdClearColorImage()");
8365     } else {
8366         assert(0);
8367     }
8368     lock.unlock();
8369     if (!skip_call)
8370         dev_data->dispatch_table.CmdClearColorImage(commandBuffer, image, imageLayout, pColor, rangeCount, pRanges);
8371 }
8372
8373 VKAPI_ATTR void VKAPI_CALL
8374 CmdClearDepthStencilImage(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout,
8375                           const VkClearDepthStencilValue *pDepthStencil, uint32_t rangeCount,
8376                           const VkImageSubresourceRange *pRanges) {
8377     bool skip_call = false;
8378     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8379     std::unique_lock<std::mutex> lock(global_lock);
8380     // TODO : Verify memory is in VK_IMAGE_STATE_CLEAR state
8381
8382     auto cb_node = getCBNode(dev_data, commandBuffer);
8383     auto img_node = getImageNode(dev_data, image);
8384     if (cb_node && img_node) {
8385         skip_call |= ValidateMemoryIsBoundToImage(dev_data, img_node, "vkCmdClearDepthStencilImage()");
8386         AddCommandBufferBindingImage(dev_data, cb_node, img_node);
8387         std::function<bool()> function = [=]() {
8388             SetImageMemoryValid(dev_data, img_node, true);
8389             return false;
8390         };
8391         cb_node->validate_functions.push_back(function);
8392
8393         skip_call |= addCmd(dev_data, cb_node, CMD_CLEARDEPTHSTENCILIMAGE, "vkCmdClearDepthStencilImage()");
8394         skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdClearDepthStencilImage()");
8395     } else {
8396         assert(0);
8397     }
8398     lock.unlock();
8399     if (!skip_call)
8400         dev_data->dispatch_table.CmdClearDepthStencilImage(commandBuffer, image, imageLayout, pDepthStencil, rangeCount, pRanges);
8401 }
8402
8403 VKAPI_ATTR void VKAPI_CALL
8404 CmdResolveImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage,
8405                 VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageResolve *pRegions) {
8406     bool skip_call = false;
8407     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8408     std::unique_lock<std::mutex> lock(global_lock);
8409
8410     auto cb_node = getCBNode(dev_data, commandBuffer);
8411     auto src_img_node = getImageNode(dev_data, srcImage);
8412     auto dst_img_node = getImageNode(dev_data, dstImage);
8413     if (cb_node && src_img_node && dst_img_node) {
8414         skip_call |= ValidateMemoryIsBoundToImage(dev_data, src_img_node, "vkCmdResolveImage()");
8415         skip_call |= ValidateMemoryIsBoundToImage(dev_data, dst_img_node, "vkCmdResolveImage()");
8416         // Update bindings between images and cmd buffer
8417         AddCommandBufferBindingImage(dev_data, cb_node, src_img_node);
8418         AddCommandBufferBindingImage(dev_data, cb_node, dst_img_node);
8419         std::function<bool()> function = [=]() {
8420             return ValidateImageMemoryIsValid(dev_data, src_img_node, "vkCmdResolveImage()");
8421         };
8422         cb_node->validate_functions.push_back(function);
8423         function = [=]() {
8424             SetImageMemoryValid(dev_data, dst_img_node, true);
8425             return false;
8426         };
8427         cb_node->validate_functions.push_back(function);
8428
8429         skip_call |= addCmd(dev_data, cb_node, CMD_RESOLVEIMAGE, "vkCmdResolveImage()");
8430         skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdResolveImage()");
8431     } else {
8432         assert(0);
8433     }
8434     lock.unlock();
8435     if (!skip_call)
8436         dev_data->dispatch_table.CmdResolveImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount,
8437                                                  pRegions);
8438 }
8439
8440 bool setEventStageMask(VkQueue queue, VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
8441     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8442     GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8443     if (pCB) {
8444         pCB->eventToStageMap[event] = stageMask;
8445     }
8446     auto queue_data = dev_data->queueMap.find(queue);
8447     if (queue_data != dev_data->queueMap.end()) {
8448         queue_data->second.eventToStageMap[event] = stageMask;
8449     }
8450     return false;
8451 }
8452
8453 VKAPI_ATTR void VKAPI_CALL
8454 CmdSetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
8455     bool skip_call = false;
8456     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8457     std::unique_lock<std::mutex> lock(global_lock);
8458     GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8459     if (pCB) {
8460         skip_call |= addCmd(dev_data, pCB, CMD_SETEVENT, "vkCmdSetEvent()");
8461         skip_call |= insideRenderPass(dev_data, pCB, "vkCmdSetEvent");
8462         auto event_node = getEventNode(dev_data, event);
8463         if (event_node) {
8464             addCommandBufferBinding(&event_node->cb_bindings,
8465                                     {reinterpret_cast<uint64_t &>(event), VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT}, pCB);
8466             event_node->cb_bindings.insert(pCB);
8467         }
8468         pCB->events.push_back(event);
8469         if (!pCB->waitedEvents.count(event)) {
8470             pCB->writeEventsBeforeWait.push_back(event);
8471         }
8472         std::function<bool(VkQueue)> eventUpdate =
8473             std::bind(setEventStageMask, std::placeholders::_1, commandBuffer, event, stageMask);
8474         pCB->eventUpdates.push_back(eventUpdate);
8475     }
8476     lock.unlock();
8477     if (!skip_call)
8478         dev_data->dispatch_table.CmdSetEvent(commandBuffer, event, stageMask);
8479 }
8480
8481 VKAPI_ATTR void VKAPI_CALL
8482 CmdResetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
8483     bool skip_call = false;
8484     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8485     std::unique_lock<std::mutex> lock(global_lock);
8486     GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8487     if (pCB) {
8488         skip_call |= addCmd(dev_data, pCB, CMD_RESETEVENT, "vkCmdResetEvent()");
8489         skip_call |= insideRenderPass(dev_data, pCB, "vkCmdResetEvent");
8490         auto event_node = getEventNode(dev_data, event);
8491         if (event_node) {
8492             addCommandBufferBinding(&event_node->cb_bindings,
8493                                     {reinterpret_cast<uint64_t &>(event), VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT}, pCB);
8494             event_node->cb_bindings.insert(pCB);
8495         }
8496         pCB->events.push_back(event);
8497         if (!pCB->waitedEvents.count(event)) {
8498             pCB->writeEventsBeforeWait.push_back(event);
8499         }
8500         std::function<bool(VkQueue)> eventUpdate =
8501             std::bind(setEventStageMask, std::placeholders::_1, commandBuffer, event, VkPipelineStageFlags(0));
8502         pCB->eventUpdates.push_back(eventUpdate);
8503     }
8504     lock.unlock();
8505     if (!skip_call)
8506         dev_data->dispatch_table.CmdResetEvent(commandBuffer, event, stageMask);
8507 }
8508
8509 static bool TransitionImageLayouts(VkCommandBuffer cmdBuffer, uint32_t memBarrierCount,
8510                                    const VkImageMemoryBarrier *pImgMemBarriers) {
8511     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map);
8512     GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer);
8513     bool skip = false;
8514     uint32_t levelCount = 0;
8515     uint32_t layerCount = 0;
8516
8517     for (uint32_t i = 0; i < memBarrierCount; ++i) {
8518         auto mem_barrier = &pImgMemBarriers[i];
8519         if (!mem_barrier)
8520             continue;
8521         // TODO: Do not iterate over every possibility - consolidate where
8522         // possible
8523         ResolveRemainingLevelsLayers(dev_data, &levelCount, &layerCount, mem_barrier->subresourceRange, mem_barrier->image);
8524
8525         for (uint32_t j = 0; j < levelCount; j++) {
8526             uint32_t level = mem_barrier->subresourceRange.baseMipLevel + j;
8527             for (uint32_t k = 0; k < layerCount; k++) {
8528                 uint32_t layer = mem_barrier->subresourceRange.baseArrayLayer + k;
8529                 VkImageSubresource sub = {mem_barrier->subresourceRange.aspectMask, level, layer};
8530                 IMAGE_CMD_BUF_LAYOUT_NODE node;
8531                 if (!FindLayout(pCB, mem_barrier->image, sub, node)) {
8532                     SetLayout(pCB, mem_barrier->image, sub,
8533                               IMAGE_CMD_BUF_LAYOUT_NODE(mem_barrier->oldLayout, mem_barrier->newLayout));
8534                     continue;
8535                 }
8536                 if (mem_barrier->oldLayout == VK_IMAGE_LAYOUT_UNDEFINED) {
8537                     // TODO: Set memory invalid which is in mem_tracker currently
8538                 } else if (node.layout != mem_barrier->oldLayout) {
8539                     skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
8540                                     __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "You cannot transition the layout from %s "
8541                                                                                     "when current layout is %s.",
8542                                     string_VkImageLayout(mem_barrier->oldLayout), string_VkImageLayout(node.layout));
8543                 }
8544                 SetLayout(pCB, mem_barrier->image, sub, mem_barrier->newLayout);
8545             }
8546         }
8547     }
8548     return skip;
8549 }
8550
8551 // Print readable FlagBits in FlagMask
8552 static std::string string_VkAccessFlags(VkAccessFlags accessMask) {
8553     std::string result;
8554     std::string separator;
8555
8556     if (accessMask == 0) {
8557         result = "[None]";
8558     } else {
8559         result = "[";
8560         for (auto i = 0; i < 32; i++) {
8561             if (accessMask & (1 << i)) {
8562                 result = result + separator + string_VkAccessFlagBits((VkAccessFlagBits)(1 << i));
8563                 separator = " | ";
8564             }
8565         }
8566         result = result + "]";
8567     }
8568     return result;
8569 }
8570
8571 // AccessFlags MUST have 'required_bit' set, and may have one or more of 'optional_bits' set.
8572 // If required_bit is zero, accessMask must have at least one of 'optional_bits' set
8573 // TODO: Add tracking to ensure that at least one barrier has been set for these layout transitions
8574 static bool ValidateMaskBits(const layer_data *my_data, VkCommandBuffer cmdBuffer, const VkAccessFlags &accessMask,
8575                              const VkImageLayout &layout, VkAccessFlags required_bit, VkAccessFlags optional_bits,
8576                              const char *type) {
8577     bool skip_call = false;
8578
8579     if ((accessMask & required_bit) || (!required_bit && (accessMask & optional_bits))) {
8580         if (accessMask & ~(required_bit | optional_bits)) {
8581             // TODO: Verify against Valid Use
8582             skip_call |=
8583                 log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8584                         DRAWSTATE_INVALID_BARRIER, "DS", "Additional bits in %s accessMask 0x%X %s are specified when layout is %s.",
8585                         type, accessMask, string_VkAccessFlags(accessMask).c_str(), string_VkImageLayout(layout));
8586         }
8587     } else {
8588         if (!required_bit) {
8589             skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8590                                  DRAWSTATE_INVALID_BARRIER, "DS", "%s AccessMask %d %s must contain at least one of access bits %d "
8591                                                                   "%s when layout is %s, unless the app has previously added a "
8592                                                                   "barrier for this transition.",
8593                                  type, accessMask, string_VkAccessFlags(accessMask).c_str(), optional_bits,
8594                                  string_VkAccessFlags(optional_bits).c_str(), string_VkImageLayout(layout));
8595         } else {
8596             std::string opt_bits;
8597             if (optional_bits != 0) {
8598                 std::stringstream ss;
8599                 ss << optional_bits;
8600                 opt_bits = "and may have optional bits " + ss.str() + ' ' + string_VkAccessFlags(optional_bits);
8601             }
8602             skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8603                                  DRAWSTATE_INVALID_BARRIER, "DS", "%s AccessMask %d %s must have required access bit %d %s %s when "
8604                                                                   "layout is %s, unless the app has previously added a barrier for "
8605                                                                   "this transition.",
8606                                  type, accessMask, string_VkAccessFlags(accessMask).c_str(), required_bit,
8607                                  string_VkAccessFlags(required_bit).c_str(), opt_bits.c_str(), string_VkImageLayout(layout));
8608         }
8609     }
8610     return skip_call;
8611 }
8612
8613 static bool ValidateMaskBitsFromLayouts(const layer_data *my_data, VkCommandBuffer cmdBuffer, const VkAccessFlags &accessMask,
8614                                         const VkImageLayout &layout, const char *type) {
8615     bool skip_call = false;
8616     switch (layout) {
8617     case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL: {
8618         skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
8619                                       VK_ACCESS_COLOR_ATTACHMENT_READ_BIT, type);
8620         break;
8621     }
8622     case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL: {
8623         skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT,
8624                                       VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT, type);
8625         break;
8626     }
8627     case VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL: {
8628         skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_TRANSFER_WRITE_BIT, 0, type);
8629         break;
8630     }
8631     case VK_IMAGE_LAYOUT_PREINITIALIZED: {
8632         skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_HOST_WRITE_BIT, 0, type);
8633         break;
8634     }
8635     case VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL: {
8636         skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, 0,
8637                                       VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT | VK_ACCESS_SHADER_READ_BIT, type);
8638         break;
8639     }
8640     case VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL: {
8641         skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, 0,
8642                                       VK_ACCESS_INPUT_ATTACHMENT_READ_BIT | VK_ACCESS_SHADER_READ_BIT, type);
8643         break;
8644     }
8645     case VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL: {
8646         skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_TRANSFER_READ_BIT, 0, type);
8647         break;
8648     }
8649     case VK_IMAGE_LAYOUT_PRESENT_SRC_KHR: {
8650         skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_MEMORY_READ_BIT, 0, type);
8651         break;
8652     }
8653     case VK_IMAGE_LAYOUT_UNDEFINED: {
8654         if (accessMask != 0) {
8655             // TODO: Verify against Valid Use section spec
8656             skip_call |=
8657                 log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8658                         DRAWSTATE_INVALID_BARRIER, "DS", "Additional bits in %s accessMask 0x%X %s are specified when layout is %s.",
8659                         type, accessMask, string_VkAccessFlags(accessMask).c_str(), string_VkImageLayout(layout));
8660         }
8661         break;
8662     }
8663     case VK_IMAGE_LAYOUT_GENERAL:
8664     default: { break; }
8665     }
8666     return skip_call;
8667 }
8668
8669 static bool ValidateBarriers(const char *funcName, VkCommandBuffer cmdBuffer, uint32_t memBarrierCount,
8670                              const VkMemoryBarrier *pMemBarriers, uint32_t bufferBarrierCount,
8671                              const VkBufferMemoryBarrier *pBufferMemBarriers, uint32_t imageMemBarrierCount,
8672                              const VkImageMemoryBarrier *pImageMemBarriers) {
8673     bool skip_call = false;
8674     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map);
8675     GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer);
8676     if (pCB->activeRenderPass && memBarrierCount) {
8677         if (!pCB->activeRenderPass->hasSelfDependency[pCB->activeSubpass]) {
8678             skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8679                                  DRAWSTATE_INVALID_BARRIER, "DS", "%s: Barriers cannot be set during subpass %d "
8680                                                                   "with no self dependency specified.",
8681                                  funcName, pCB->activeSubpass);
8682         }
8683     }
8684     for (uint32_t i = 0; i < imageMemBarrierCount; ++i) {
8685         auto mem_barrier = &pImageMemBarriers[i];
8686         auto image_data = getImageNode(dev_data, mem_barrier->image);
8687         if (image_data) {
8688             uint32_t src_q_f_index = mem_barrier->srcQueueFamilyIndex;
8689             uint32_t dst_q_f_index = mem_barrier->dstQueueFamilyIndex;
8690             if (image_data->createInfo.sharingMode == VK_SHARING_MODE_CONCURRENT) {
8691                 // srcQueueFamilyIndex and dstQueueFamilyIndex must both
8692                 // be VK_QUEUE_FAMILY_IGNORED
8693                 if ((src_q_f_index != VK_QUEUE_FAMILY_IGNORED) || (dst_q_f_index != VK_QUEUE_FAMILY_IGNORED)) {
8694                     skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
8695                                          __LINE__, DRAWSTATE_INVALID_QUEUE_INDEX, "DS",
8696                                          "%s: Image Barrier for image 0x%" PRIx64 " was created with sharingMode of "
8697                                          "VK_SHARING_MODE_CONCURRENT.  Src and dst "
8698                                          " queueFamilyIndices must be VK_QUEUE_FAMILY_IGNORED.",
8699                                          funcName, reinterpret_cast<const uint64_t &>(mem_barrier->image));
8700                 }
8701             } else {
8702                 // Sharing mode is VK_SHARING_MODE_EXCLUSIVE. srcQueueFamilyIndex and
8703                 // dstQueueFamilyIndex must either both be VK_QUEUE_FAMILY_IGNORED,
8704                 // or both be a valid queue family
8705                 if (((src_q_f_index == VK_QUEUE_FAMILY_IGNORED) || (dst_q_f_index == VK_QUEUE_FAMILY_IGNORED)) &&
8706                     (src_q_f_index != dst_q_f_index)) {
8707                     skip_call |=
8708                         log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8709                                 DRAWSTATE_INVALID_QUEUE_INDEX, "DS", "%s: Image 0x%" PRIx64 " was created with sharingMode "
8710                                                                      "of VK_SHARING_MODE_EXCLUSIVE. If one of src- or "
8711                                                                      "dstQueueFamilyIndex is VK_QUEUE_FAMILY_IGNORED, both "
8712                                                                      "must be.",
8713                                 funcName, reinterpret_cast<const uint64_t &>(mem_barrier->image));
8714                 } else if (((src_q_f_index != VK_QUEUE_FAMILY_IGNORED) && (dst_q_f_index != VK_QUEUE_FAMILY_IGNORED)) &&
8715                            ((src_q_f_index >= dev_data->phys_dev_properties.queue_family_properties.size()) ||
8716                             (dst_q_f_index >= dev_data->phys_dev_properties.queue_family_properties.size()))) {
8717                     skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
8718                                          __LINE__, DRAWSTATE_INVALID_QUEUE_INDEX, "DS",
8719                                          "%s: Image 0x%" PRIx64 " was created with sharingMode "
8720                                          "of VK_SHARING_MODE_EXCLUSIVE, but srcQueueFamilyIndex %d"
8721                                          " or dstQueueFamilyIndex %d is greater than " PRINTF_SIZE_T_SPECIFIER
8722                                          "queueFamilies crated for this device.",
8723                                          funcName, reinterpret_cast<const uint64_t &>(mem_barrier->image), src_q_f_index,
8724                                          dst_q_f_index, dev_data->phys_dev_properties.queue_family_properties.size());
8725                 }
8726             }
8727         }
8728
8729         if (mem_barrier) {
8730             if (mem_barrier->oldLayout != mem_barrier->newLayout) {
8731                 skip_call |=
8732                     ValidateMaskBitsFromLayouts(dev_data, cmdBuffer, mem_barrier->srcAccessMask, mem_barrier->oldLayout, "Source");
8733                 skip_call |=
8734                     ValidateMaskBitsFromLayouts(dev_data, cmdBuffer, mem_barrier->dstAccessMask, mem_barrier->newLayout, "Dest");
8735             }
8736             if (mem_barrier->newLayout == VK_IMAGE_LAYOUT_UNDEFINED || mem_barrier->newLayout == VK_IMAGE_LAYOUT_PREINITIALIZED) {
8737                 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8738                         DRAWSTATE_INVALID_BARRIER, "DS", "%s: Image Layout cannot be transitioned to UNDEFINED or "
8739                                                          "PREINITIALIZED.",
8740                         funcName);
8741             }
8742             auto image_data = getImageNode(dev_data, mem_barrier->image);
8743             VkFormat format = VK_FORMAT_UNDEFINED;
8744             uint32_t arrayLayers = 0, mipLevels = 0;
8745             bool imageFound = false;
8746             if (image_data) {
8747                 format = image_data->createInfo.format;
8748                 arrayLayers = image_data->createInfo.arrayLayers;
8749                 mipLevels = image_data->createInfo.mipLevels;
8750                 imageFound = true;
8751             } else if (dev_data->device_extensions.wsi_enabled) {
8752                 auto imageswap_data = getSwapchainFromImage(dev_data, mem_barrier->image);
8753                 if (imageswap_data) {
8754                     auto swapchain_data = getSwapchainNode(dev_data, imageswap_data);
8755                     if (swapchain_data) {
8756                         format = swapchain_data->createInfo.imageFormat;
8757                         arrayLayers = swapchain_data->createInfo.imageArrayLayers;
8758                         mipLevels = 1;
8759                         imageFound = true;
8760                     }
8761                 }
8762             }
8763             if (imageFound) {
8764                 auto aspect_mask = mem_barrier->subresourceRange.aspectMask;
8765                 if (vk_format_is_depth_or_stencil(format)) {
8766                     if (vk_format_is_depth_and_stencil(format)) {
8767                         if (!(aspect_mask & VK_IMAGE_ASPECT_DEPTH_BIT) && !(aspect_mask & VK_IMAGE_ASPECT_STENCIL_BIT)) {
8768                             log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
8769                                     __LINE__, DRAWSTATE_INVALID_BARRIER, "DS",
8770                                     "%s: Image is a depth and stencil format and thus must "
8771                                     "have either one or both of VK_IMAGE_ASPECT_DEPTH_BIT and "
8772                                     "VK_IMAGE_ASPECT_STENCIL_BIT set.",
8773                                     funcName);
8774                         }
8775                     } else if (vk_format_is_depth_only(format)) {
8776                         if (!(aspect_mask & VK_IMAGE_ASPECT_DEPTH_BIT)) {
8777                             log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
8778                                     __LINE__, DRAWSTATE_INVALID_BARRIER, "DS", "%s: Image is a depth-only format and thus must "
8779                                                                                "have VK_IMAGE_ASPECT_DEPTH_BIT set.",
8780                                     funcName);
8781                         }
8782                     } else { // stencil-only case
8783                         if (!(aspect_mask & VK_IMAGE_ASPECT_STENCIL_BIT)) {
8784                             log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
8785                                     __LINE__, DRAWSTATE_INVALID_BARRIER, "DS", "%s: Image is a stencil-only format and thus must "
8786                                                                                "have VK_IMAGE_ASPECT_STENCIL_BIT set.",
8787                                     funcName);
8788                         }
8789                     }
8790                 } else { // image is a color format
8791                     if (!(aspect_mask & VK_IMAGE_ASPECT_COLOR_BIT)) {
8792                         log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8793                                 DRAWSTATE_INVALID_BARRIER, "DS", "%s: Image is a color format and thus must "
8794                                                                  "have VK_IMAGE_ASPECT_COLOR_BIT set.",
8795                                 funcName);
8796                     }
8797                 }
8798                 int layerCount = (mem_barrier->subresourceRange.layerCount == VK_REMAINING_ARRAY_LAYERS)
8799                                      ? 1
8800                                      : mem_barrier->subresourceRange.layerCount;
8801                 if ((mem_barrier->subresourceRange.baseArrayLayer + layerCount) > arrayLayers) {
8802                     log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8803                             DRAWSTATE_INVALID_BARRIER, "DS", "%s: Subresource must have the sum of the "
8804                                                              "baseArrayLayer (%d) and layerCount (%d) be less "
8805                                                              "than or equal to the total number of layers (%d).",
8806                             funcName, mem_barrier->subresourceRange.baseArrayLayer, mem_barrier->subresourceRange.layerCount,
8807                             arrayLayers);
8808                 }
8809                 int levelCount = (mem_barrier->subresourceRange.levelCount == VK_REMAINING_MIP_LEVELS)
8810                                      ? 1
8811                                      : mem_barrier->subresourceRange.levelCount;
8812                 if ((mem_barrier->subresourceRange.baseMipLevel + levelCount) > mipLevels) {
8813                     log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8814                             DRAWSTATE_INVALID_BARRIER, "DS", "%s: Subresource must have the sum of the baseMipLevel "
8815                                                              "(%d) and levelCount (%d) be less than or equal to "
8816                                                              "the total number of levels (%d).",
8817                             funcName, mem_barrier->subresourceRange.baseMipLevel, mem_barrier->subresourceRange.levelCount,
8818                             mipLevels);
8819                 }
8820             }
8821         }
8822     }
8823     for (uint32_t i = 0; i < bufferBarrierCount; ++i) {
8824         auto mem_barrier = &pBufferMemBarriers[i];
8825         if (pCB->activeRenderPass) {
8826             skip_call |=
8827                 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8828                         DRAWSTATE_INVALID_BARRIER, "DS", "%s: Buffer Barriers cannot be used during a render pass.", funcName);
8829         }
8830         if (!mem_barrier)
8831             continue;
8832
8833         // Validate buffer barrier queue family indices
8834         if ((mem_barrier->srcQueueFamilyIndex != VK_QUEUE_FAMILY_IGNORED &&
8835              mem_barrier->srcQueueFamilyIndex >= dev_data->phys_dev_properties.queue_family_properties.size()) ||
8836             (mem_barrier->dstQueueFamilyIndex != VK_QUEUE_FAMILY_IGNORED &&
8837              mem_barrier->dstQueueFamilyIndex >= dev_data->phys_dev_properties.queue_family_properties.size())) {
8838             skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8839                                  DRAWSTATE_INVALID_QUEUE_INDEX, "DS",
8840                                  "%s: Buffer Barrier 0x%" PRIx64 " has QueueFamilyIndex greater "
8841                                  "than the number of QueueFamilies (" PRINTF_SIZE_T_SPECIFIER ") for this device.",
8842                                  funcName, reinterpret_cast<const uint64_t &>(mem_barrier->buffer),
8843                                  dev_data->phys_dev_properties.queue_family_properties.size());
8844         }
8845
8846         auto buffer_node = getBufferNode(dev_data, mem_barrier->buffer);
8847         if (buffer_node) {
8848             auto buffer_size = buffer_node->memSize;
8849             if (mem_barrier->offset >= buffer_size) {
8850                 skip_call |= log_msg(
8851                     dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8852                     DRAWSTATE_INVALID_BARRIER, "DS",
8853                     "%s: Buffer Barrier 0x%" PRIx64 " has offset 0x%" PRIx64 " which is not less than total size 0x%" PRIx64 ".",
8854                     funcName, reinterpret_cast<const uint64_t &>(mem_barrier->buffer),
8855                     reinterpret_cast<const uint64_t &>(mem_barrier->offset), reinterpret_cast<const uint64_t &>(buffer_size));
8856             } else if (mem_barrier->size != VK_WHOLE_SIZE && (mem_barrier->offset + mem_barrier->size > buffer_size)) {
8857                 skip_call |= log_msg(
8858                     dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8859                     DRAWSTATE_INVALID_BARRIER, "DS", "%s: Buffer Barrier 0x%" PRIx64 " has offset 0x%" PRIx64 " and size 0x%" PRIx64
8860                                                      " whose sum is greater than total size 0x%" PRIx64 ".",
8861                     funcName, reinterpret_cast<const uint64_t &>(mem_barrier->buffer),
8862                     reinterpret_cast<const uint64_t &>(mem_barrier->offset), reinterpret_cast<const uint64_t &>(mem_barrier->size),
8863                     reinterpret_cast<const uint64_t &>(buffer_size));
8864             }
8865         }
8866     }
8867     return skip_call;
8868 }
8869
8870 bool validateEventStageMask(VkQueue queue, GLOBAL_CB_NODE *pCB, uint32_t eventCount, size_t firstEventIndex, VkPipelineStageFlags sourceStageMask) {
8871     bool skip_call = false;
8872     VkPipelineStageFlags stageMask = 0;
8873     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
8874     for (uint32_t i = 0; i < eventCount; ++i) {
8875         auto event = pCB->events[firstEventIndex + i];
8876         auto queue_data = dev_data->queueMap.find(queue);
8877         if (queue_data == dev_data->queueMap.end())
8878             return false;
8879         auto event_data = queue_data->second.eventToStageMap.find(event);
8880         if (event_data != queue_data->second.eventToStageMap.end()) {
8881             stageMask |= event_data->second;
8882         } else {
8883             auto global_event_data = getEventNode(dev_data, event);
8884             if (!global_event_data) {
8885                 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT,
8886                                      reinterpret_cast<const uint64_t &>(event), __LINE__, DRAWSTATE_INVALID_EVENT, "DS",
8887                                      "Event 0x%" PRIx64 " cannot be waited on if it has never been set.",
8888                                      reinterpret_cast<const uint64_t &>(event));
8889             } else {
8890                 stageMask |= global_event_data->stageMask;
8891             }
8892         }
8893     }
8894     // TODO: Need to validate that host_bit is only set if set event is called
8895     // but set event can be called at any time.
8896     if (sourceStageMask != stageMask && sourceStageMask != (stageMask | VK_PIPELINE_STAGE_HOST_BIT)) {
8897         skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8898                              DRAWSTATE_INVALID_EVENT, "DS", "Submitting cmdbuffer with call to VkCmdWaitEvents "
8899                                                             "using srcStageMask 0x%X which must be the bitwise "
8900                                                             "OR of the stageMask parameters used in calls to "
8901                                                             "vkCmdSetEvent and VK_PIPELINE_STAGE_HOST_BIT if "
8902                                                             "used with vkSetEvent but instead is 0x%X.",
8903                              sourceStageMask, stageMask);
8904     }
8905     return skip_call;
8906 }
8907
8908 VKAPI_ATTR void VKAPI_CALL
8909 CmdWaitEvents(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent *pEvents, VkPipelineStageFlags sourceStageMask,
8910               VkPipelineStageFlags dstStageMask, uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
8911               uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers,
8912               uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) {
8913     bool skip_call = false;
8914     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8915     std::unique_lock<std::mutex> lock(global_lock);
8916     GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8917     if (pCB) {
8918         auto firstEventIndex = pCB->events.size();
8919         for (uint32_t i = 0; i < eventCount; ++i) {
8920             auto event_node = getEventNode(dev_data, pEvents[i]);
8921             if (event_node) {
8922                 addCommandBufferBinding(&event_node->cb_bindings,
8923                                         {reinterpret_cast<const uint64_t &>(pEvents[i]), VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT},
8924                                         pCB);
8925                 event_node->cb_bindings.insert(pCB);
8926             }
8927             pCB->waitedEvents.insert(pEvents[i]);
8928             pCB->events.push_back(pEvents[i]);
8929         }
8930         std::function<bool(VkQueue)> eventUpdate =
8931             std::bind(validateEventStageMask, std::placeholders::_1, pCB, eventCount, firstEventIndex, sourceStageMask);
8932         pCB->eventUpdates.push_back(eventUpdate);
8933         if (pCB->state == CB_RECORDING) {
8934             skip_call |= addCmd(dev_data, pCB, CMD_WAITEVENTS, "vkCmdWaitEvents()");
8935         } else {
8936             skip_call |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdWaitEvents()");
8937         }
8938         skip_call |= TransitionImageLayouts(commandBuffer, imageMemoryBarrierCount, pImageMemoryBarriers);
8939         skip_call |=
8940             ValidateBarriers("vkCmdWaitEvents", commandBuffer, memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
8941                              pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
8942     }
8943     lock.unlock();
8944     if (!skip_call)
8945         dev_data->dispatch_table.CmdWaitEvents(commandBuffer, eventCount, pEvents, sourceStageMask, dstStageMask,
8946                                                memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount, pBufferMemoryBarriers,
8947                                                imageMemoryBarrierCount, pImageMemoryBarriers);
8948 }
8949
8950 VKAPI_ATTR void VKAPI_CALL
8951 CmdPipelineBarrier(VkCommandBuffer commandBuffer, VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask,
8952                    VkDependencyFlags dependencyFlags, uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
8953                    uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers,
8954                    uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) {
8955     bool skip_call = false;
8956     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8957     std::unique_lock<std::mutex> lock(global_lock);
8958     GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8959     if (pCB) {
8960         skip_call |= addCmd(dev_data, pCB, CMD_PIPELINEBARRIER, "vkCmdPipelineBarrier()");
8961         skip_call |= TransitionImageLayouts(commandBuffer, imageMemoryBarrierCount, pImageMemoryBarriers);
8962         skip_call |=
8963             ValidateBarriers("vkCmdPipelineBarrier", commandBuffer, memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
8964                              pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
8965     }
8966     lock.unlock();
8967     if (!skip_call)
8968         dev_data->dispatch_table.CmdPipelineBarrier(commandBuffer, srcStageMask, dstStageMask, dependencyFlags, memoryBarrierCount,
8969                                                     pMemoryBarriers, bufferMemoryBarrierCount, pBufferMemoryBarriers,
8970                                                     imageMemoryBarrierCount, pImageMemoryBarriers);
8971 }
8972
8973 bool setQueryState(VkQueue queue, VkCommandBuffer commandBuffer, QueryObject object, bool value) {
8974     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8975     GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8976     if (pCB) {
8977         pCB->queryToStateMap[object] = value;
8978     }
8979     auto queue_data = dev_data->queueMap.find(queue);
8980     if (queue_data != dev_data->queueMap.end()) {
8981         queue_data->second.queryToStateMap[object] = value;
8982     }
8983     return false;
8984 }
8985
8986 VKAPI_ATTR void VKAPI_CALL
8987 CmdBeginQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot, VkFlags flags) {
8988     bool skip_call = false;
8989     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8990     std::unique_lock<std::mutex> lock(global_lock);
8991     GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8992     if (pCB) {
8993         QueryObject query = {queryPool, slot};
8994         pCB->activeQueries.insert(query);
8995         if (!pCB->startedQueries.count(query)) {
8996             pCB->startedQueries.insert(query);
8997         }
8998         skip_call |= addCmd(dev_data, pCB, CMD_BEGINQUERY, "vkCmdBeginQuery()");
8999         addCommandBufferBinding(&getQueryPoolNode(dev_data, queryPool)->cb_bindings,
9000                                 {reinterpret_cast<uint64_t &>(queryPool), VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT}, pCB);
9001     }
9002     lock.unlock();
9003     if (!skip_call)
9004         dev_data->dispatch_table.CmdBeginQuery(commandBuffer, queryPool, slot, flags);
9005 }
9006
9007 VKAPI_ATTR void VKAPI_CALL CmdEndQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot) {
9008     bool skip_call = false;
9009     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
9010     std::unique_lock<std::mutex> lock(global_lock);
9011     GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
9012     if (pCB) {
9013         QueryObject query = {queryPool, slot};
9014         if (!pCB->activeQueries.count(query)) {
9015             skip_call |=
9016                 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9017                         DRAWSTATE_INVALID_QUERY, "DS", "Ending a query before it was started: queryPool 0x%" PRIx64 ", index %d",
9018                         (uint64_t)(queryPool), slot);
9019         } else {
9020             pCB->activeQueries.erase(query);
9021         }
9022         std::function<bool(VkQueue)> queryUpdate = std::bind(setQueryState, std::placeholders::_1, commandBuffer, query, true);
9023         pCB->queryUpdates.push_back(queryUpdate);
9024         if (pCB->state == CB_RECORDING) {
9025             skip_call |= addCmd(dev_data, pCB, CMD_ENDQUERY, "VkCmdEndQuery()");
9026         } else {
9027             skip_call |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdEndQuery()");
9028         }
9029         addCommandBufferBinding(&getQueryPoolNode(dev_data, queryPool)->cb_bindings,
9030                                 {reinterpret_cast<uint64_t &>(queryPool), VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT}, pCB);
9031     }
9032     lock.unlock();
9033     if (!skip_call)
9034         dev_data->dispatch_table.CmdEndQuery(commandBuffer, queryPool, slot);
9035 }
9036
9037 VKAPI_ATTR void VKAPI_CALL
9038 CmdResetQueryPool(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount) {
9039     bool skip_call = false;
9040     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
9041     std::unique_lock<std::mutex> lock(global_lock);
9042     GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
9043     if (pCB) {
9044         for (uint32_t i = 0; i < queryCount; i++) {
9045             QueryObject query = {queryPool, firstQuery + i};
9046             pCB->waitedEventsBeforeQueryReset[query] = pCB->waitedEvents;
9047             std::function<bool(VkQueue)> queryUpdate = std::bind(setQueryState, std::placeholders::_1, commandBuffer, query, false);
9048             pCB->queryUpdates.push_back(queryUpdate);
9049         }
9050         if (pCB->state == CB_RECORDING) {
9051             skip_call |= addCmd(dev_data, pCB, CMD_RESETQUERYPOOL, "VkCmdResetQueryPool()");
9052         } else {
9053             skip_call |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdResetQueryPool()");
9054         }
9055         skip_call |= insideRenderPass(dev_data, pCB, "vkCmdQueryPool");
9056         addCommandBufferBinding(&getQueryPoolNode(dev_data, queryPool)->cb_bindings,
9057                                 {reinterpret_cast<uint64_t &>(queryPool), VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT}, pCB);
9058     }
9059     lock.unlock();
9060     if (!skip_call)
9061         dev_data->dispatch_table.CmdResetQueryPool(commandBuffer, queryPool, firstQuery, queryCount);
9062 }
9063
9064 bool validateQuery(VkQueue queue, GLOBAL_CB_NODE *pCB, VkQueryPool queryPool, uint32_t queryCount, uint32_t firstQuery) {
9065     bool skip_call = false;
9066     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(pCB->commandBuffer), layer_data_map);
9067     auto queue_data = dev_data->queueMap.find(queue);
9068     if (queue_data == dev_data->queueMap.end())
9069         return false;
9070     for (uint32_t i = 0; i < queryCount; i++) {
9071         QueryObject query = {queryPool, firstQuery + i};
9072         auto query_data = queue_data->second.queryToStateMap.find(query);
9073         bool fail = false;
9074         if (query_data != queue_data->second.queryToStateMap.end()) {
9075             if (!query_data->second) {
9076                 fail = true;
9077             }
9078         } else {
9079             auto global_query_data = dev_data->queryToStateMap.find(query);
9080             if (global_query_data != dev_data->queryToStateMap.end()) {
9081                 if (!global_query_data->second) {
9082                     fail = true;
9083                 }
9084             } else {
9085                 fail = true;
9086             }
9087         }
9088         if (fail) {
9089             skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9090                                  DRAWSTATE_INVALID_QUERY, "DS",
9091                                  "Requesting a copy from query to buffer with invalid query: queryPool 0x%" PRIx64 ", index %d",
9092                                  reinterpret_cast<uint64_t &>(queryPool), firstQuery + i);
9093         }
9094     }
9095     return skip_call;
9096 }
9097
9098 VKAPI_ATTR void VKAPI_CALL
9099 CmdCopyQueryPoolResults(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount,
9100                         VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize stride, VkQueryResultFlags flags) {
9101     bool skip_call = false;
9102     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
9103     std::unique_lock<std::mutex> lock(global_lock);
9104
9105     auto cb_node = getCBNode(dev_data, commandBuffer);
9106     auto dst_buff_node = getBufferNode(dev_data, dstBuffer);
9107     if (cb_node && dst_buff_node) {
9108         skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, dst_buff_node, "vkCmdCopyQueryPoolResults()");
9109         // Update bindings between buffer and cmd buffer
9110         AddCommandBufferBindingBuffer(dev_data, cb_node, dst_buff_node);
9111         // Validate that DST buffer has correct usage flags set
9112         skip_call |= ValidateBufferUsageFlags(dev_data, dst_buff_node, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true,
9113                                               "vkCmdCopyQueryPoolResults()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
9114         std::function<bool()> function = [=]() {
9115             SetBufferMemoryValid(dev_data, dst_buff_node, true);
9116             return false;
9117         };
9118         cb_node->validate_functions.push_back(function);
9119         std::function<bool(VkQueue)> queryUpdate =
9120             std::bind(validateQuery, std::placeholders::_1, cb_node, queryPool, queryCount, firstQuery);
9121         cb_node->queryUpdates.push_back(queryUpdate);
9122         if (cb_node->state == CB_RECORDING) {
9123             skip_call |= addCmd(dev_data, cb_node, CMD_COPYQUERYPOOLRESULTS, "vkCmdCopyQueryPoolResults()");
9124         } else {
9125             skip_call |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdCopyQueryPoolResults()");
9126         }
9127         skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdCopyQueryPoolResults()");
9128         addCommandBufferBinding(&getQueryPoolNode(dev_data, queryPool)->cb_bindings,
9129                                 {reinterpret_cast<uint64_t &>(queryPool), VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT}, cb_node);
9130     } else {
9131         assert(0);
9132     }
9133     lock.unlock();
9134     if (!skip_call)
9135         dev_data->dispatch_table.CmdCopyQueryPoolResults(commandBuffer, queryPool, firstQuery, queryCount, dstBuffer, dstOffset,
9136                                                          stride, flags);
9137 }
9138
9139 VKAPI_ATTR void VKAPI_CALL CmdPushConstants(VkCommandBuffer commandBuffer, VkPipelineLayout layout,
9140                                             VkShaderStageFlags stageFlags, uint32_t offset, uint32_t size,
9141                                             const void *pValues) {
9142     bool skip_call = false;
9143     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
9144     std::unique_lock<std::mutex> lock(global_lock);
9145     GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
9146     if (pCB) {
9147         if (pCB->state == CB_RECORDING) {
9148             skip_call |= addCmd(dev_data, pCB, CMD_PUSHCONSTANTS, "vkCmdPushConstants()");
9149         } else {
9150             skip_call |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdPushConstants()");
9151         }
9152     }
9153     skip_call |= validatePushConstantRange(dev_data, offset, size, "vkCmdPushConstants()");
9154     if (0 == stageFlags) {
9155         skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9156                              DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "vkCmdPushConstants() call has no stageFlags set.");
9157     }
9158
9159     // Check if push constant update is within any of the ranges with the same stage flags specified in pipeline layout.
9160     auto pipeline_layout = getPipelineLayout(dev_data, layout);
9161     // Coalesce adjacent/overlapping pipeline ranges before checking to see if incoming range is
9162     // contained in the pipeline ranges.
9163     // Build a {start, end} span list for ranges with matching stage flags.
9164     const auto &ranges = pipeline_layout->push_constant_ranges;
9165     struct span {
9166         uint32_t start;
9167         uint32_t end;
9168     };
9169     std::vector<span> spans;
9170     spans.reserve(ranges.size());
9171     for (const auto &iter : ranges) {
9172         if (iter.stageFlags == stageFlags) {
9173             spans.push_back({iter.offset, iter.offset + iter.size});
9174         }
9175     }
9176     if (spans.size() == 0) {
9177         // There were no ranges that matched the stageFlags.
9178         skip_call |=
9179             log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9180                     DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "vkCmdPushConstants() stageFlags = 0x%" PRIx32 " do not match "
9181                                                           "the stageFlags in any of the ranges in pipeline layout 0x%" PRIx64 ".",
9182                     (uint32_t)stageFlags, (uint64_t)layout);
9183     } else {
9184         // Sort span list by start value.
9185         struct comparer {
9186             bool operator()(struct span i, struct span j) { return i.start < j.start; }
9187         } my_comparer;
9188         std::sort(spans.begin(), spans.end(), my_comparer);
9189
9190         // Examine two spans at a time.
9191         std::vector<span>::iterator current = spans.begin();
9192         std::vector<span>::iterator next = current + 1;
9193         while (next != spans.end()) {
9194             if (current->end < next->start) {
9195                 // There is a gap; cannot coalesce. Move to the next two spans.
9196                 ++current;
9197                 ++next;
9198             } else {
9199                 // Coalesce the two spans.  The start of the next span
9200                 // is within the current span, so pick the larger of
9201                 // the end values to extend the current span.
9202                 // Then delete the next span and set next to the span after it.
9203                 current->end = max(current->end, next->end);
9204                 next = spans.erase(next);
9205             }
9206         }
9207
9208         // Now we can check if the incoming range is within any of the spans.
9209         bool contained_in_a_range = false;
9210         for (uint32_t i = 0; i < spans.size(); ++i) {
9211             if ((offset >= spans[i].start) && ((uint64_t)offset + (uint64_t)size <= (uint64_t)spans[i].end)) {
9212                 contained_in_a_range = true;
9213                 break;
9214             }
9215         }
9216         if (!contained_in_a_range) {
9217             skip_call |=
9218                 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9219                         DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "vkCmdPushConstants() Push constant range [%d, %d) "
9220                                                               "with stageFlags = 0x%" PRIx32 " "
9221                                                               "not within flag-matching ranges in pipeline layout 0x%" PRIx64 ".",
9222                         offset, offset + size, (uint32_t)stageFlags, (uint64_t)layout);
9223         }
9224     }
9225     lock.unlock();
9226     if (!skip_call)
9227         dev_data->dispatch_table.CmdPushConstants(commandBuffer, layout, stageFlags, offset, size, pValues);
9228 }
9229
9230 VKAPI_ATTR void VKAPI_CALL
9231 CmdWriteTimestamp(VkCommandBuffer commandBuffer, VkPipelineStageFlagBits pipelineStage, VkQueryPool queryPool, uint32_t slot) {
9232     bool skip_call = false;
9233     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
9234     std::unique_lock<std::mutex> lock(global_lock);
9235     GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
9236     if (pCB) {
9237         QueryObject query = {queryPool, slot};
9238         std::function<bool(VkQueue)> queryUpdate = std::bind(setQueryState, std::placeholders::_1, commandBuffer, query, true);
9239         pCB->queryUpdates.push_back(queryUpdate);
9240         if (pCB->state == CB_RECORDING) {
9241             skip_call |= addCmd(dev_data, pCB, CMD_WRITETIMESTAMP, "vkCmdWriteTimestamp()");
9242         } else {
9243             skip_call |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdWriteTimestamp()");
9244         }
9245     }
9246     lock.unlock();
9247     if (!skip_call)
9248         dev_data->dispatch_table.CmdWriteTimestamp(commandBuffer, pipelineStage, queryPool, slot);
9249 }
9250
9251 static bool MatchUsage(layer_data *dev_data, uint32_t count, const VkAttachmentReference *attachments,
9252                        const VkFramebufferCreateInfo *fbci, VkImageUsageFlagBits usage_flag) {
9253     bool skip_call = false;
9254
9255     for (uint32_t attach = 0; attach < count; attach++) {
9256         if (attachments[attach].attachment != VK_ATTACHMENT_UNUSED) {
9257             // Attachment counts are verified elsewhere, but prevent an invalid access
9258             if (attachments[attach].attachment < fbci->attachmentCount) {
9259                 const VkImageView *image_view = &fbci->pAttachments[attachments[attach].attachment];
9260                 auto view_state = getImageViewState(dev_data, *image_view);
9261                 if (view_state) {
9262                     const VkImageCreateInfo *ici = &getImageNode(dev_data, view_state->create_info.image)->createInfo;
9263                     if (ici != nullptr) {
9264                         if ((ici->usage & usage_flag) == 0) {
9265                             skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
9266                                                  (VkDebugReportObjectTypeEXT)0, 0, __LINE__, DRAWSTATE_INVALID_IMAGE_USAGE, "DS",
9267                                                  "vkCreateFramebuffer:  Framebuffer Attachment (%d) conflicts with the image's "
9268                                                  "IMAGE_USAGE flags (%s).",
9269                                                  attachments[attach].attachment, string_VkImageUsageFlagBits(usage_flag));
9270                         }
9271                     }
9272                 }
9273             }
9274         }
9275     }
9276     return skip_call;
9277 }
9278
9279 // Validate VkFramebufferCreateInfo which includes:
9280 // 1. attachmentCount equals renderPass attachmentCount
9281 // 2. corresponding framebuffer and renderpass attachments have matching formats
9282 // 3. corresponding framebuffer and renderpass attachments have matching sample counts
9283 // 4. fb attachments only have a single mip level
9284 // 5. fb attachment dimensions are each at least as large as the fb
9285 // 6. fb attachments use idenity swizzle
9286 // 7. fb attachments used by renderPass for color/input/ds have correct usage bit set
9287 // 8. fb dimensions are within physical device limits
9288 static bool ValidateFramebufferCreateInfo(layer_data *dev_data, const VkFramebufferCreateInfo *pCreateInfo) {
9289     bool skip_call = false;
9290
9291     auto rp_node = getRenderPass(dev_data, pCreateInfo->renderPass);
9292     if (rp_node) {
9293         const VkRenderPassCreateInfo *rpci = rp_node->createInfo.ptr();
9294         if (rpci->attachmentCount != pCreateInfo->attachmentCount) {
9295             skip_call |= log_msg(
9296                 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
9297                 reinterpret_cast<const uint64_t &>(pCreateInfo->renderPass), __LINE__, DRAWSTATE_RENDERPASS_INCOMPATIBLE, "DS",
9298                 "vkCreateFramebuffer(): VkFramebufferCreateInfo attachmentCount of %u does not match attachmentCount of %u of "
9299                 "renderPass (0x%" PRIxLEAST64 ") being used to create Framebuffer.",
9300                 pCreateInfo->attachmentCount, rpci->attachmentCount, reinterpret_cast<const uint64_t &>(pCreateInfo->renderPass));
9301         } else {
9302             // attachmentCounts match, so make sure corresponding attachment details line up
9303             const VkImageView *image_views = pCreateInfo->pAttachments;
9304             for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
9305                 auto view_state = getImageViewState(dev_data, image_views[i]);
9306                 auto &ivci = view_state->create_info;
9307                 if (ivci.format != rpci->pAttachments[i].format) {
9308                     skip_call |= log_msg(
9309                         dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
9310                         reinterpret_cast<const uint64_t &>(pCreateInfo->renderPass), __LINE__, DRAWSTATE_RENDERPASS_INCOMPATIBLE,
9311                         "DS", "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has format of %s that does not match "
9312                               "the format of "
9313                               "%s used by the corresponding attachment for renderPass (0x%" PRIxLEAST64 ").",
9314                         i, string_VkFormat(ivci.format), string_VkFormat(rpci->pAttachments[i].format),
9315                         reinterpret_cast<const uint64_t &>(pCreateInfo->renderPass));
9316                 }
9317                 const VkImageCreateInfo *ici = &getImageNode(dev_data, ivci.image)->createInfo;
9318                 if (ici->samples != rpci->pAttachments[i].samples) {
9319                     skip_call |= log_msg(
9320                         dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
9321                         reinterpret_cast<const uint64_t &>(pCreateInfo->renderPass), __LINE__, DRAWSTATE_RENDERPASS_INCOMPATIBLE,
9322                         "DS", "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has %s samples that do not match "
9323                               "the %s samples used by the corresponding attachment for renderPass (0x%" PRIxLEAST64 ").",
9324                         i, string_VkSampleCountFlagBits(ici->samples), string_VkSampleCountFlagBits(rpci->pAttachments[i].samples),
9325                         reinterpret_cast<const uint64_t &>(pCreateInfo->renderPass));
9326                 }
9327                 // Verify that view only has a single mip level
9328                 if (ivci.subresourceRange.levelCount != 1) {
9329                     skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
9330                                          __LINE__, DRAWSTATE_INVALID_FRAMEBUFFER_CREATE_INFO, "DS",
9331                                          "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has mip levelCount of %u "
9332                                          "but only a single mip level (levelCount ==  1) is allowed when creating a Framebuffer.",
9333                                          i, ivci.subresourceRange.levelCount);
9334                 }
9335                 const uint32_t mip_level = ivci.subresourceRange.baseMipLevel;
9336                 uint32_t mip_width = max(1u, ici->extent.width >> mip_level);
9337                 uint32_t mip_height = max(1u, ici->extent.height >> mip_level);
9338                 if ((ivci.subresourceRange.layerCount < pCreateInfo->layers) || (mip_width < pCreateInfo->width) ||
9339                     (mip_height < pCreateInfo->height)) {
9340                     skip_call |=
9341                         log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
9342                                 DRAWSTATE_INVALID_FRAMEBUFFER_CREATE_INFO, "DS",
9343                                 "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u mip level %u has dimensions smaller "
9344                                 "than the corresponding "
9345                                 "framebuffer dimensions. Attachment dimensions must be at least as large. Here are the respective "
9346                                 "dimensions for "
9347                                 "attachment #%u, framebuffer:\n"
9348                                 "width: %u, %u\n"
9349                                 "height: %u, %u\n"
9350                                 "layerCount: %u, %u\n",
9351                                 i, ivci.subresourceRange.baseMipLevel, i, mip_width, pCreateInfo->width, mip_height,
9352                                 pCreateInfo->height, ivci.subresourceRange.layerCount, pCreateInfo->layers);
9353                 }
9354                 if (((ivci.components.r != VK_COMPONENT_SWIZZLE_IDENTITY) && (ivci.components.r != VK_COMPONENT_SWIZZLE_R)) ||
9355                     ((ivci.components.g != VK_COMPONENT_SWIZZLE_IDENTITY) && (ivci.components.g != VK_COMPONENT_SWIZZLE_G)) ||
9356                     ((ivci.components.b != VK_COMPONENT_SWIZZLE_IDENTITY) && (ivci.components.b != VK_COMPONENT_SWIZZLE_B)) ||
9357                     ((ivci.components.a != VK_COMPONENT_SWIZZLE_IDENTITY) && (ivci.components.a != VK_COMPONENT_SWIZZLE_A))) {
9358                     skip_call |= log_msg(
9359                         dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
9360                         DRAWSTATE_INVALID_FRAMEBUFFER_CREATE_INFO, "DS",
9361                         "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has non-identy swizzle. All framebuffer "
9362                         "attachments must have been created with the identity swizzle. Here are the actual swizzle values:\n"
9363                         "r swizzle = %s\n"
9364                         "g swizzle = %s\n"
9365                         "b swizzle = %s\n"
9366                         "a swizzle = %s\n",
9367                         i, string_VkComponentSwizzle(ivci.components.r), string_VkComponentSwizzle(ivci.components.g),
9368                         string_VkComponentSwizzle(ivci.components.b), string_VkComponentSwizzle(ivci.components.a));
9369                 }
9370             }
9371         }
9372         // Verify correct attachment usage flags
9373         for (uint32_t subpass = 0; subpass < rpci->subpassCount; subpass++) {
9374             // Verify input attachments:
9375             skip_call |= MatchUsage(dev_data, rpci->pSubpasses[subpass].inputAttachmentCount,
9376                                     rpci->pSubpasses[subpass].pInputAttachments, pCreateInfo, VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT);
9377             // Verify color attachments:
9378             skip_call |= MatchUsage(dev_data, rpci->pSubpasses[subpass].colorAttachmentCount,
9379                                     rpci->pSubpasses[subpass].pColorAttachments, pCreateInfo, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT);
9380             // Verify depth/stencil attachments:
9381             if (rpci->pSubpasses[subpass].pDepthStencilAttachment != nullptr) {
9382                 skip_call |= MatchUsage(dev_data, 1, rpci->pSubpasses[subpass].pDepthStencilAttachment, pCreateInfo,
9383                                         VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT);
9384             }
9385         }
9386     } else {
9387         skip_call |=
9388             log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
9389                     reinterpret_cast<const uint64_t &>(pCreateInfo->renderPass), __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
9390                     "vkCreateFramebuffer(): Attempt to create framebuffer with invalid renderPass (0x%" PRIxLEAST64 ").",
9391                     reinterpret_cast<const uint64_t &>(pCreateInfo->renderPass));
9392     }
9393     // Verify FB dimensions are within physical device limits
9394     if ((pCreateInfo->height > dev_data->phys_dev_properties.properties.limits.maxFramebufferHeight) ||
9395         (pCreateInfo->width > dev_data->phys_dev_properties.properties.limits.maxFramebufferWidth) ||
9396         (pCreateInfo->layers > dev_data->phys_dev_properties.properties.limits.maxFramebufferLayers)) {
9397         skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
9398                              DRAWSTATE_INVALID_FRAMEBUFFER_CREATE_INFO, "DS",
9399                              "vkCreateFramebuffer(): Requested VkFramebufferCreateInfo dimensions exceed physical device limits. "
9400                              "Here are the respective dimensions: requested, device max:\n"
9401                              "width: %u, %u\n"
9402                              "height: %u, %u\n"
9403                              "layerCount: %u, %u\n",
9404                              pCreateInfo->width, dev_data->phys_dev_properties.properties.limits.maxFramebufferWidth,
9405                              pCreateInfo->height, dev_data->phys_dev_properties.properties.limits.maxFramebufferHeight,
9406                              pCreateInfo->layers, dev_data->phys_dev_properties.properties.limits.maxFramebufferLayers);
9407     }
9408     return skip_call;
9409 }
9410
9411 // Validate VkFramebufferCreateInfo state prior to calling down chain to create Framebuffer object
9412 //  Return true if an error is encountered and callback returns true to skip call down chain
9413 //   false indicates that call down chain should proceed
9414 static bool PreCallValidateCreateFramebuffer(layer_data *dev_data, const VkFramebufferCreateInfo *pCreateInfo) {
9415     // TODO : Verify that renderPass FB is created with is compatible with FB
9416     bool skip_call = false;
9417     skip_call |= ValidateFramebufferCreateInfo(dev_data, pCreateInfo);
9418     return skip_call;
9419 }
9420
9421 // CreateFramebuffer state has been validated and call down chain completed so record new framebuffer object
9422 static void PostCallRecordCreateFramebuffer(layer_data *dev_data, const VkFramebufferCreateInfo *pCreateInfo, VkFramebuffer fb) {
9423     // Shadow create info and store in map
9424     std::unique_ptr<FRAMEBUFFER_NODE> fb_node(
9425         new FRAMEBUFFER_NODE(fb, pCreateInfo, dev_data->renderPassMap[pCreateInfo->renderPass]->createInfo.ptr()));
9426
9427     for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
9428         VkImageView view = pCreateInfo->pAttachments[i];
9429         auto view_state = getImageViewState(dev_data, view);
9430         if (!view_state) {
9431             continue;
9432         }
9433         MT_FB_ATTACHMENT_INFO fb_info;
9434         fb_info.mem = getImageNode(dev_data, view_state->create_info.image)->mem;
9435         fb_info.view_state = view_state;
9436         fb_info.image = view_state->create_info.image;
9437         fb_node->attachments.push_back(fb_info);
9438     }
9439     dev_data->frameBufferMap[fb] = std::move(fb_node);
9440 }
9441
9442 VKAPI_ATTR VkResult VKAPI_CALL CreateFramebuffer(VkDevice device, const VkFramebufferCreateInfo *pCreateInfo,
9443                                                  const VkAllocationCallbacks *pAllocator,
9444                                                  VkFramebuffer *pFramebuffer) {
9445     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
9446     std::unique_lock<std::mutex> lock(global_lock);
9447     bool skip_call = PreCallValidateCreateFramebuffer(dev_data, pCreateInfo);
9448     lock.unlock();
9449
9450     if (skip_call)
9451         return VK_ERROR_VALIDATION_FAILED_EXT;
9452
9453     VkResult result = dev_data->dispatch_table.CreateFramebuffer(device, pCreateInfo, pAllocator, pFramebuffer);
9454
9455     if (VK_SUCCESS == result) {
9456         lock.lock();
9457         PostCallRecordCreateFramebuffer(dev_data, pCreateInfo, *pFramebuffer);
9458         lock.unlock();
9459     }
9460     return result;
9461 }
9462
9463 static bool FindDependency(const int index, const int dependent, const std::vector<DAGNode> &subpass_to_node,
9464                            std::unordered_set<uint32_t> &processed_nodes) {
9465     // If we have already checked this node we have not found a dependency path so return false.
9466     if (processed_nodes.count(index))
9467         return false;
9468     processed_nodes.insert(index);
9469     const DAGNode &node = subpass_to_node[index];
9470     // Look for a dependency path. If one exists return true else recurse on the previous nodes.
9471     if (std::find(node.prev.begin(), node.prev.end(), dependent) == node.prev.end()) {
9472         for (auto elem : node.prev) {
9473             if (FindDependency(elem, dependent, subpass_to_node, processed_nodes))
9474                 return true;
9475         }
9476     } else {
9477         return true;
9478     }
9479     return false;
9480 }
9481
9482 static bool CheckDependencyExists(const layer_data *dev_data, const int subpass, const std::vector<uint32_t> &dependent_subpasses,
9483                                   const std::vector<DAGNode> &subpass_to_node, bool &skip_call) {
9484     bool result = true;
9485     // Loop through all subpasses that share the same attachment and make sure a dependency exists
9486     for (uint32_t k = 0; k < dependent_subpasses.size(); ++k) {
9487         if (static_cast<uint32_t>(subpass) == dependent_subpasses[k])
9488             continue;
9489         const DAGNode &node = subpass_to_node[subpass];
9490         // Check for a specified dependency between the two nodes. If one exists we are done.
9491         auto prev_elem = std::find(node.prev.begin(), node.prev.end(), dependent_subpasses[k]);
9492         auto next_elem = std::find(node.next.begin(), node.next.end(), dependent_subpasses[k]);
9493         if (prev_elem == node.prev.end() && next_elem == node.next.end()) {
9494             // If no dependency exits an implicit dependency still might. If not, throw an error.
9495             std::unordered_set<uint32_t> processed_nodes;
9496             if (!(FindDependency(subpass, dependent_subpasses[k], subpass_to_node, processed_nodes) ||
9497                 FindDependency(dependent_subpasses[k], subpass, subpass_to_node, processed_nodes))) {
9498                 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
9499                                      __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
9500                                      "A dependency between subpasses %d and %d must exist but one is not specified.", subpass,
9501                                      dependent_subpasses[k]);
9502                 result = false;
9503             }
9504         }
9505     }
9506     return result;
9507 }
9508
9509 static bool CheckPreserved(const layer_data *dev_data, const VkRenderPassCreateInfo *pCreateInfo, const int index,
9510                            const uint32_t attachment, const std::vector<DAGNode> &subpass_to_node, int depth, bool &skip_call) {
9511     const DAGNode &node = subpass_to_node[index];
9512     // If this node writes to the attachment return true as next nodes need to preserve the attachment.
9513     const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[index];
9514     for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
9515         if (attachment == subpass.pColorAttachments[j].attachment)
9516             return true;
9517     }
9518     if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
9519         if (attachment == subpass.pDepthStencilAttachment->attachment)
9520             return true;
9521     }
9522     bool result = false;
9523     // Loop through previous nodes and see if any of them write to the attachment.
9524     for (auto elem : node.prev) {
9525         result |= CheckPreserved(dev_data, pCreateInfo, elem, attachment, subpass_to_node, depth + 1, skip_call);
9526     }
9527     // If the attachment was written to by a previous node than this node needs to preserve it.
9528     if (result && depth > 0) {
9529         const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[index];
9530         bool has_preserved = false;
9531         for (uint32_t j = 0; j < subpass.preserveAttachmentCount; ++j) {
9532             if (subpass.pPreserveAttachments[j] == attachment) {
9533                 has_preserved = true;
9534                 break;
9535             }
9536         }
9537         if (!has_preserved) {
9538             skip_call |=
9539                 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9540                         DRAWSTATE_INVALID_RENDERPASS, "DS",
9541                         "Attachment %d is used by a later subpass and must be preserved in subpass %d.", attachment, index);
9542         }
9543     }
9544     return result;
9545 }
9546
9547 template <class T> bool isRangeOverlapping(T offset1, T size1, T offset2, T size2) {
9548     return (((offset1 + size1) > offset2) && ((offset1 + size1) < (offset2 + size2))) ||
9549            ((offset1 > offset2) && (offset1 < (offset2 + size2)));
9550 }
9551
9552 bool isRegionOverlapping(VkImageSubresourceRange range1, VkImageSubresourceRange range2) {
9553     return (isRangeOverlapping(range1.baseMipLevel, range1.levelCount, range2.baseMipLevel, range2.levelCount) &&
9554             isRangeOverlapping(range1.baseArrayLayer, range1.layerCount, range2.baseArrayLayer, range2.layerCount));
9555 }
9556
9557 static bool ValidateDependencies(const layer_data *dev_data, FRAMEBUFFER_NODE const *framebuffer,
9558                                  RENDER_PASS_NODE const *renderPass) {
9559     bool skip_call = false;
9560     auto const pFramebufferInfo = framebuffer->createInfo.ptr();
9561     auto const pCreateInfo = renderPass->createInfo.ptr();
9562     auto const & subpass_to_node = renderPass->subpassToNode;
9563     std::vector<std::vector<uint32_t>> output_attachment_to_subpass(pCreateInfo->attachmentCount);
9564     std::vector<std::vector<uint32_t>> input_attachment_to_subpass(pCreateInfo->attachmentCount);
9565     std::vector<std::vector<uint32_t>> overlapping_attachments(pCreateInfo->attachmentCount);
9566     // Find overlapping attachments
9567     for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
9568         for (uint32_t j = i + 1; j < pCreateInfo->attachmentCount; ++j) {
9569             VkImageView viewi = pFramebufferInfo->pAttachments[i];
9570             VkImageView viewj = pFramebufferInfo->pAttachments[j];
9571             if (viewi == viewj) {
9572                 overlapping_attachments[i].push_back(j);
9573                 overlapping_attachments[j].push_back(i);
9574                 continue;
9575             }
9576             auto view_state_i = getImageViewState(dev_data, viewi);
9577             auto view_state_j = getImageViewState(dev_data, viewj);
9578             if (!view_state_i || !view_state_j) {
9579                 continue;
9580             }
9581             auto view_ci_i = view_state_i->create_info;
9582             auto view_ci_j = view_state_j->create_info;
9583             if (view_ci_i.image == view_ci_j.image && isRegionOverlapping(view_ci_i.subresourceRange, view_ci_j.subresourceRange)) {
9584                 overlapping_attachments[i].push_back(j);
9585                 overlapping_attachments[j].push_back(i);
9586                 continue;
9587             }
9588             auto image_data_i = getImageNode(dev_data, view_ci_i.image);
9589             auto image_data_j = getImageNode(dev_data, view_ci_j.image);
9590             if (!image_data_i || !image_data_j) {
9591                 continue;
9592             }
9593             if (image_data_i->mem == image_data_j->mem && isRangeOverlapping(image_data_i->memOffset, image_data_i->memSize,
9594                                                                              image_data_j->memOffset, image_data_j->memSize)) {
9595                 overlapping_attachments[i].push_back(j);
9596                 overlapping_attachments[j].push_back(i);
9597             }
9598         }
9599     }
9600     for (uint32_t i = 0; i < overlapping_attachments.size(); ++i) {
9601         uint32_t attachment = i;
9602         for (auto other_attachment : overlapping_attachments[i]) {
9603             if (!(pCreateInfo->pAttachments[attachment].flags & VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT)) {
9604                 skip_call |=
9605                     log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9606                             DRAWSTATE_INVALID_RENDERPASS, "DS", "Attachment %d aliases attachment %d but doesn't "
9607                                                                 "set VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT.",
9608                             attachment, other_attachment);
9609             }
9610             if (!(pCreateInfo->pAttachments[other_attachment].flags & VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT)) {
9611                 skip_call |=
9612                     log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9613                             DRAWSTATE_INVALID_RENDERPASS, "DS", "Attachment %d aliases attachment %d but doesn't "
9614                                                                 "set VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT.",
9615                             other_attachment, attachment);
9616             }
9617         }
9618     }
9619     // Find for each attachment the subpasses that use them.
9620     unordered_set<uint32_t> attachmentIndices;
9621     for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
9622         const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
9623         attachmentIndices.clear();
9624         for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
9625             uint32_t attachment = subpass.pInputAttachments[j].attachment;
9626             if (attachment == VK_ATTACHMENT_UNUSED)
9627                 continue;
9628             input_attachment_to_subpass[attachment].push_back(i);
9629             for (auto overlapping_attachment : overlapping_attachments[attachment]) {
9630                 input_attachment_to_subpass[overlapping_attachment].push_back(i);
9631             }
9632         }
9633         for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
9634             uint32_t attachment = subpass.pColorAttachments[j].attachment;
9635             if (attachment == VK_ATTACHMENT_UNUSED)
9636                 continue;
9637             output_attachment_to_subpass[attachment].push_back(i);
9638             for (auto overlapping_attachment : overlapping_attachments[attachment]) {
9639                 output_attachment_to_subpass[overlapping_attachment].push_back(i);
9640             }
9641             attachmentIndices.insert(attachment);
9642         }
9643         if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
9644             uint32_t attachment = subpass.pDepthStencilAttachment->attachment;
9645             output_attachment_to_subpass[attachment].push_back(i);
9646             for (auto overlapping_attachment : overlapping_attachments[attachment]) {
9647                 output_attachment_to_subpass[overlapping_attachment].push_back(i);
9648             }
9649
9650             if (attachmentIndices.count(attachment)) {
9651                 skip_call |=
9652                     log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9653                             DRAWSTATE_INVALID_RENDERPASS, "DS",
9654                             "Cannot use same attachment (%u) as both color and depth output in same subpass (%u).", attachment, i);
9655             }
9656         }
9657     }
9658     // If there is a dependency needed make sure one exists
9659     for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
9660         const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
9661         // If the attachment is an input then all subpasses that output must have a dependency relationship
9662         for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
9663             uint32_t attachment = subpass.pInputAttachments[j].attachment;
9664             if (attachment == VK_ATTACHMENT_UNUSED)
9665                 continue;
9666             CheckDependencyExists(dev_data, i, output_attachment_to_subpass[attachment], subpass_to_node, skip_call);
9667         }
9668         // If the attachment is an output then all subpasses that use the attachment must have a dependency relationship
9669         for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
9670             uint32_t attachment = subpass.pColorAttachments[j].attachment;
9671             if (attachment == VK_ATTACHMENT_UNUSED)
9672                 continue;
9673             CheckDependencyExists(dev_data, i, output_attachment_to_subpass[attachment], subpass_to_node, skip_call);
9674             CheckDependencyExists(dev_data, i, input_attachment_to_subpass[attachment], subpass_to_node, skip_call);
9675         }
9676         if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
9677             const uint32_t &attachment = subpass.pDepthStencilAttachment->attachment;
9678             CheckDependencyExists(dev_data, i, output_attachment_to_subpass[attachment], subpass_to_node, skip_call);
9679             CheckDependencyExists(dev_data, i, input_attachment_to_subpass[attachment], subpass_to_node, skip_call);
9680         }
9681     }
9682     // Loop through implicit dependencies, if this pass reads make sure the attachment is preserved for all passes after it was
9683     // written.
9684     for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
9685         const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
9686         for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
9687             CheckPreserved(dev_data, pCreateInfo, i, subpass.pInputAttachments[j].attachment, subpass_to_node, 0, skip_call);
9688         }
9689     }
9690     return skip_call;
9691 }
9692 // ValidateLayoutVsAttachmentDescription is a general function where we can validate various state associated with the
9693 // VkAttachmentDescription structs that are used by the sub-passes of a renderpass. Initial check is to make sure that
9694 // READ_ONLY layout attachments don't have CLEAR as their loadOp.
9695 static bool ValidateLayoutVsAttachmentDescription(debug_report_data *report_data, const VkImageLayout first_layout,
9696                                                   const uint32_t attachment,
9697                                                   const VkAttachmentDescription &attachment_description) {
9698     bool skip_call = false;
9699     // Verify that initial loadOp on READ_ONLY attachments is not CLEAR
9700     if (attachment_description.loadOp == VK_ATTACHMENT_LOAD_OP_CLEAR) {
9701         if ((first_layout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL) ||
9702             (first_layout == VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL)) {
9703             skip_call |=
9704                 log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT,
9705                         VkDebugReportObjectTypeEXT(0), __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
9706                         "Cannot clear attachment %d with invalid first layout %s.", attachment, string_VkImageLayout(first_layout));
9707         }
9708     }
9709     return skip_call;
9710 }
9711
9712 static bool ValidateLayouts(const layer_data *dev_data, VkDevice device, const VkRenderPassCreateInfo *pCreateInfo) {
9713     bool skip = false;
9714
9715     // Track when we're observing the first use of an attachment
9716     std::vector<bool> attach_first_use(pCreateInfo->attachmentCount, true);
9717     for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
9718         const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
9719         for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
9720             auto attach_index = subpass.pColorAttachments[j].attachment;
9721             if (attach_index == VK_ATTACHMENT_UNUSED)
9722                 continue;
9723
9724             switch (subpass.pColorAttachments[j].layout) {
9725             case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL:
9726                 /* This is ideal. */
9727                 break;
9728
9729             case VK_IMAGE_LAYOUT_GENERAL:
9730                 /* May not be optimal; TODO: reconsider this warning based on
9731                  * other constraints?
9732                  */
9733                 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
9734                                 VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
9735                                 "Layout for color attachment is GENERAL but should be COLOR_ATTACHMENT_OPTIMAL.");
9736                 break;
9737
9738             default:
9739                 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
9740                                 __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
9741                                 "Layout for color attachment is %s but can only be COLOR_ATTACHMENT_OPTIMAL or GENERAL.",
9742                                 string_VkImageLayout(subpass.pColorAttachments[j].layout));
9743             }
9744
9745             if (attach_first_use[attach_index]) {
9746                 skip |= ValidateLayoutVsAttachmentDescription(dev_data->report_data, subpass.pColorAttachments[j].layout,
9747                                                               attach_index, pCreateInfo->pAttachments[attach_index]);
9748             }
9749             attach_first_use[attach_index] = false;
9750         }
9751         if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
9752             switch (subpass.pDepthStencilAttachment->layout) {
9753             case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL:
9754             case VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL:
9755                 /* These are ideal. */
9756                 break;
9757
9758             case VK_IMAGE_LAYOUT_GENERAL:
9759                 /* May not be optimal; TODO: reconsider this warning based on
9760                  * other constraints? GENERAL can be better than doing a bunch
9761                  * of transitions.
9762                  */
9763                 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
9764                                 VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
9765                                 "GENERAL layout for depth attachment may not give optimal performance.");
9766                 break;
9767
9768             default:
9769                 /* No other layouts are acceptable */
9770                 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
9771                                 __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
9772                                 "Layout for depth attachment is %s but can only be DEPTH_STENCIL_ATTACHMENT_OPTIMAL, "
9773                                 "DEPTH_STENCIL_READ_ONLY_OPTIMAL or GENERAL.",
9774                                 string_VkImageLayout(subpass.pDepthStencilAttachment->layout));
9775             }
9776
9777             auto attach_index = subpass.pDepthStencilAttachment->attachment;
9778             if (attach_first_use[attach_index]) {
9779                 skip |= ValidateLayoutVsAttachmentDescription(dev_data->report_data, subpass.pDepthStencilAttachment->layout,
9780                                                               attach_index, pCreateInfo->pAttachments[attach_index]);
9781             }
9782             attach_first_use[attach_index] = false;
9783         }
9784         for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
9785             auto attach_index = subpass.pInputAttachments[j].attachment;
9786             if (attach_index == VK_ATTACHMENT_UNUSED)
9787                 continue;
9788
9789             switch (subpass.pInputAttachments[j].layout) {
9790             case VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL:
9791             case VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL:
9792                 /* These are ideal. */
9793                 break;
9794
9795             case VK_IMAGE_LAYOUT_GENERAL:
9796                 /* May not be optimal. TODO: reconsider this warning based on
9797                  * other constraints.
9798                  */
9799                 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
9800                                 VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
9801                                 "Layout for input attachment is GENERAL but should be READ_ONLY_OPTIMAL.");
9802                 break;
9803
9804             default:
9805                 /* No other layouts are acceptable */
9806                 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9807                                 DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
9808                                 "Layout for input attachment is %s but can only be READ_ONLY_OPTIMAL or GENERAL.",
9809                                 string_VkImageLayout(subpass.pInputAttachments[j].layout));
9810             }
9811
9812             if (attach_first_use[attach_index]) {
9813                 skip |= ValidateLayoutVsAttachmentDescription(dev_data->report_data, subpass.pInputAttachments[j].layout,
9814                                                               attach_index, pCreateInfo->pAttachments[attach_index]);
9815             }
9816             attach_first_use[attach_index] = false;
9817         }
9818     }
9819     return skip;
9820 }
9821
9822 static bool CreatePassDAG(const layer_data *dev_data, VkDevice device, const VkRenderPassCreateInfo *pCreateInfo,
9823                           std::vector<DAGNode> &subpass_to_node, std::vector<bool> &has_self_dependency) {
9824     bool skip_call = false;
9825     for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
9826         DAGNode &subpass_node = subpass_to_node[i];
9827         subpass_node.pass = i;
9828     }
9829     for (uint32_t i = 0; i < pCreateInfo->dependencyCount; ++i) {
9830         const VkSubpassDependency &dependency = pCreateInfo->pDependencies[i];
9831         if (dependency.srcSubpass == VK_SUBPASS_EXTERNAL || dependency.dstSubpass == VK_SUBPASS_EXTERNAL) {
9832             if (dependency.srcSubpass == dependency.dstSubpass) {
9833                 skip_call |=
9834                     log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9835                             DRAWSTATE_INVALID_RENDERPASS, "DS", "The src and dest subpasses cannot both be external.");
9836             }
9837
9838             // We don't want to add edges to the DAG for dependencies to/from
9839             // VK_SUBPASS_EXTERNAL. We don't use them for anything, and their
9840             // presence complicates other code.
9841             continue;
9842         } else if (dependency.srcSubpass > dependency.dstSubpass) {
9843             skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9844                                  DRAWSTATE_INVALID_RENDERPASS, "DS",
9845                                  "Depedency graph must be specified such that an earlier pass cannot depend on a later pass.");
9846         } else if (dependency.srcSubpass == dependency.dstSubpass) {
9847             has_self_dependency[dependency.srcSubpass] = true;
9848         }
9849
9850         subpass_to_node[dependency.dstSubpass].prev.push_back(dependency.srcSubpass);
9851         subpass_to_node[dependency.srcSubpass].next.push_back(dependency.dstSubpass);
9852     }
9853     return skip_call;
9854 }
9855
9856
9857 VKAPI_ATTR VkResult VKAPI_CALL CreateShaderModule(VkDevice device, const VkShaderModuleCreateInfo *pCreateInfo,
9858                                                   const VkAllocationCallbacks *pAllocator,
9859                                                   VkShaderModule *pShaderModule) {
9860     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
9861     bool skip_call = false;
9862
9863     /* Use SPIRV-Tools validator to try and catch any issues with the module itself */
9864     spv_context ctx = spvContextCreate(SPV_ENV_VULKAN_1_0);
9865     spv_const_binary_t binary { pCreateInfo->pCode, pCreateInfo->codeSize / sizeof(uint32_t) };
9866     spv_diagnostic diag = nullptr;
9867
9868     auto result = spvValidate(ctx, &binary, &diag);
9869     if (result != SPV_SUCCESS) {
9870         skip_call |=
9871             log_msg(dev_data->report_data, result == SPV_WARNING ? VK_DEBUG_REPORT_WARNING_BIT_EXT : VK_DEBUG_REPORT_ERROR_BIT_EXT,
9872                     VkDebugReportObjectTypeEXT(0), 0, __LINE__, SHADER_CHECKER_INCONSISTENT_SPIRV, "SC",
9873                     "SPIR-V module not valid: %s", diag && diag->error ? diag->error : "(no error text)");
9874     }
9875
9876     spvDiagnosticDestroy(diag);
9877     spvContextDestroy(ctx);
9878
9879     if (skip_call)
9880         return VK_ERROR_VALIDATION_FAILED_EXT;
9881
9882     VkResult res = dev_data->dispatch_table.CreateShaderModule(device, pCreateInfo, pAllocator, pShaderModule);
9883
9884     if (res == VK_SUCCESS) {
9885         std::lock_guard<std::mutex> lock(global_lock);
9886         dev_data->shaderModuleMap[*pShaderModule] = unique_ptr<shader_module>(new shader_module(pCreateInfo));
9887     }
9888     return res;
9889 }
9890
9891 static bool ValidateAttachmentIndex(layer_data *dev_data, uint32_t attachment, uint32_t attachment_count, const char *type) {
9892     bool skip_call = false;
9893     if (attachment >= attachment_count && attachment != VK_ATTACHMENT_UNUSED) {
9894         skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9895                              DRAWSTATE_INVALID_ATTACHMENT_INDEX, "DS",
9896                              "CreateRenderPass: %s attachment %d cannot be greater than the total number of attachments %d.",
9897                              type, attachment, attachment_count);
9898     }
9899     return skip_call;
9900 }
9901
9902 static bool IsPowerOfTwo(unsigned x) {
9903     return x && !(x & (x-1));
9904 }
9905
9906 static bool ValidateRenderpassAttachmentUsage(layer_data *dev_data, const VkRenderPassCreateInfo *pCreateInfo) {
9907     bool skip_call = false;
9908     for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
9909         const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
9910         if (subpass.pipelineBindPoint != VK_PIPELINE_BIND_POINT_GRAPHICS) {
9911             skip_call |=
9912                 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9913                         DRAWSTATE_INVALID_RENDERPASS, "DS",
9914                         "CreateRenderPass: Pipeline bind point for subpass %d must be VK_PIPELINE_BIND_POINT_GRAPHICS.", i);
9915         }
9916         for (uint32_t j = 0; j < subpass.preserveAttachmentCount; ++j) {
9917             uint32_t attachment = subpass.pPreserveAttachments[j];
9918             if (attachment == VK_ATTACHMENT_UNUSED) {
9919                 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
9920                                      __LINE__, DRAWSTATE_INVALID_ATTACHMENT_INDEX, "DS",
9921                                      "CreateRenderPass:  Preserve attachment (%d) must not be VK_ATTACHMENT_UNUSED.", j);
9922             } else {
9923                 skip_call |= ValidateAttachmentIndex(dev_data, attachment, pCreateInfo->attachmentCount, "Preserve");
9924             }
9925         }
9926
9927         auto subpass_performs_resolve = subpass.pResolveAttachments && std::any_of(
9928             subpass.pResolveAttachments, subpass.pResolveAttachments + subpass.colorAttachmentCount,
9929             [](VkAttachmentReference ref) { return ref.attachment != VK_ATTACHMENT_UNUSED; });
9930
9931         unsigned sample_count = 0;
9932
9933         for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
9934             uint32_t attachment;
9935             if (subpass.pResolveAttachments) {
9936                 attachment = subpass.pResolveAttachments[j].attachment;
9937                 skip_call |= ValidateAttachmentIndex(dev_data, attachment, pCreateInfo->attachmentCount, "Resolve");
9938
9939                 if (!skip_call && attachment != VK_ATTACHMENT_UNUSED &&
9940                     pCreateInfo->pAttachments[attachment].samples != VK_SAMPLE_COUNT_1_BIT) {
9941                     skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
9942                                          __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
9943                                          "CreateRenderPass:  Subpass %u requests multisample resolve into attachment %u, "
9944                                          "which must have VK_SAMPLE_COUNT_1_BIT but has %s",
9945                                          i, attachment, string_VkSampleCountFlagBits(pCreateInfo->pAttachments[attachment].samples));
9946                 }
9947             }
9948             attachment = subpass.pColorAttachments[j].attachment;
9949             skip_call |= ValidateAttachmentIndex(dev_data, attachment, pCreateInfo->attachmentCount, "Color");
9950
9951             if (!skip_call && attachment != VK_ATTACHMENT_UNUSED) {
9952                 sample_count |= (unsigned)pCreateInfo->pAttachments[attachment].samples;
9953
9954                 if (subpass_performs_resolve &&
9955                     pCreateInfo->pAttachments[attachment].samples == VK_SAMPLE_COUNT_1_BIT) {
9956                     skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
9957                                          __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
9958                                          "CreateRenderPass:  Subpass %u requests multisample resolve from attachment %u "
9959                                          "which has VK_SAMPLE_COUNT_1_BIT",
9960                                          i, attachment);
9961                 }
9962             }
9963         }
9964
9965         if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
9966             uint32_t attachment = subpass.pDepthStencilAttachment->attachment;
9967             skip_call |= ValidateAttachmentIndex(dev_data, attachment, pCreateInfo->attachmentCount, "Depth stencil");
9968
9969             if (!skip_call && attachment != VK_ATTACHMENT_UNUSED) {
9970                 sample_count |= (unsigned)pCreateInfo->pAttachments[attachment].samples;
9971             }
9972         }
9973
9974         for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
9975             uint32_t attachment = subpass.pInputAttachments[j].attachment;
9976             skip_call |= ValidateAttachmentIndex(dev_data, attachment, pCreateInfo->attachmentCount, "Input");
9977         }
9978
9979         if (sample_count && !IsPowerOfTwo(sample_count)) {
9980             skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
9981                                  __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
9982                                  "CreateRenderPass:  Subpass %u attempts to render to "
9983                                  "attachments with inconsistent sample counts",
9984                                  i);
9985         }
9986     }
9987     return skip_call;
9988 }
9989
9990 VKAPI_ATTR VkResult VKAPI_CALL CreateRenderPass(VkDevice device, const VkRenderPassCreateInfo *pCreateInfo,
9991                                                 const VkAllocationCallbacks *pAllocator, VkRenderPass *pRenderPass) {
9992     bool skip_call = false;
9993     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
9994
9995     std::unique_lock<std::mutex> lock(global_lock);
9996
9997     skip_call |= ValidateLayouts(dev_data, device, pCreateInfo);
9998     // TODO: As part of wrapping up the mem_tracker/core_validation merge the following routine should be consolidated with
9999     //       ValidateLayouts.
10000     skip_call |= ValidateRenderpassAttachmentUsage(dev_data, pCreateInfo);
10001     lock.unlock();
10002
10003     if (skip_call) {
10004         return VK_ERROR_VALIDATION_FAILED_EXT;
10005     }
10006
10007     VkResult result = dev_data->dispatch_table.CreateRenderPass(device, pCreateInfo, pAllocator, pRenderPass);
10008
10009     if (VK_SUCCESS == result) {
10010         lock.lock();
10011
10012         std::vector<bool> has_self_dependency(pCreateInfo->subpassCount);
10013         std::vector<DAGNode> subpass_to_node(pCreateInfo->subpassCount);
10014         skip_call |= CreatePassDAG(dev_data, device, pCreateInfo, subpass_to_node, has_self_dependency);
10015
10016         auto render_pass = unique_ptr<RENDER_PASS_NODE>(new RENDER_PASS_NODE(pCreateInfo));
10017         render_pass->renderPass = *pRenderPass;
10018         render_pass->hasSelfDependency = has_self_dependency;
10019         render_pass->subpassToNode = subpass_to_node;
10020
10021         // TODO: Maybe fill list and then copy instead of locking
10022         std::unordered_map<uint32_t, bool> &attachment_first_read = render_pass->attachment_first_read;
10023         std::unordered_map<uint32_t, VkImageLayout> &attachment_first_layout = render_pass->attachment_first_layout;
10024         for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
10025             const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
10026             for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
10027                 uint32_t attachment = subpass.pColorAttachments[j].attachment;
10028                 if (!attachment_first_read.count(attachment)) {
10029                     attachment_first_read.insert(std::make_pair(attachment, false));
10030                     attachment_first_layout.insert(std::make_pair(attachment, subpass.pColorAttachments[j].layout));
10031                 }
10032             }
10033             if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
10034                 uint32_t attachment = subpass.pDepthStencilAttachment->attachment;
10035                 if (!attachment_first_read.count(attachment)) {
10036                     attachment_first_read.insert(std::make_pair(attachment, false));
10037                     attachment_first_layout.insert(std::make_pair(attachment, subpass.pDepthStencilAttachment->layout));
10038                 }
10039             }
10040             for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
10041                 uint32_t attachment = subpass.pInputAttachments[j].attachment;
10042                 if (!attachment_first_read.count(attachment)) {
10043                     attachment_first_read.insert(std::make_pair(attachment, true));
10044                     attachment_first_layout.insert(std::make_pair(attachment, subpass.pInputAttachments[j].layout));
10045                 }
10046             }
10047         }
10048
10049         dev_data->renderPassMap[*pRenderPass] = std::move(render_pass);
10050     }
10051     return result;
10052 }
10053
10054 static bool VerifyFramebufferAndRenderPassLayouts(layer_data *dev_data, GLOBAL_CB_NODE *pCB, const VkRenderPassBeginInfo *pRenderPassBegin) {
10055     bool skip_call = false;
10056     auto const pRenderPassInfo = getRenderPass(dev_data, pRenderPassBegin->renderPass)->createInfo.ptr();
10057     auto const & framebufferInfo = dev_data->frameBufferMap[pRenderPassBegin->framebuffer]->createInfo;
10058     if (pRenderPassInfo->attachmentCount != framebufferInfo.attachmentCount) {
10059         skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
10060                              DRAWSTATE_INVALID_RENDERPASS, "DS", "You cannot start a render pass using a framebuffer "
10061                                                                  "with a different number of attachments.");
10062     }
10063     for (uint32_t i = 0; i < pRenderPassInfo->attachmentCount; ++i) {
10064         const VkImageView &image_view = framebufferInfo.pAttachments[i];
10065         auto view_state = getImageViewState(dev_data, image_view);
10066         assert(view_state);
10067         const VkImage &image = view_state->create_info.image;
10068         const VkImageSubresourceRange &subRange = view_state->create_info.subresourceRange;
10069         IMAGE_CMD_BUF_LAYOUT_NODE newNode = {pRenderPassInfo->pAttachments[i].initialLayout,
10070                                              pRenderPassInfo->pAttachments[i].initialLayout};
10071         // TODO: Do not iterate over every possibility - consolidate where possible
10072         for (uint32_t j = 0; j < subRange.levelCount; j++) {
10073             uint32_t level = subRange.baseMipLevel + j;
10074             for (uint32_t k = 0; k < subRange.layerCount; k++) {
10075                 uint32_t layer = subRange.baseArrayLayer + k;
10076                 VkImageSubresource sub = {subRange.aspectMask, level, layer};
10077                 IMAGE_CMD_BUF_LAYOUT_NODE node;
10078                 if (!FindLayout(pCB, image, sub, node)) {
10079                     SetLayout(pCB, image, sub, newNode);
10080                     continue;
10081                 }
10082                 if (newNode.layout != VK_IMAGE_LAYOUT_UNDEFINED &&
10083                     newNode.layout != node.layout) {
10084                     skip_call |=
10085                         log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
10086                                 DRAWSTATE_INVALID_RENDERPASS, "DS",
10087                                 "You cannot start a render pass using attachment %u "
10088                                 "where the render pass initial layout is %s and the previous "
10089                                 "known layout of the attachment is %s. The layouts must match, or "
10090                                 "the render pass initial layout for the attachment must be "
10091                                 "VK_IMAGE_LAYOUT_UNDEFINED",
10092                                 i, string_VkImageLayout(newNode.layout), string_VkImageLayout(node.layout));
10093                 }
10094             }
10095         }
10096     }
10097     return skip_call;
10098 }
10099
10100 static void TransitionAttachmentRefLayout(layer_data *dev_data, GLOBAL_CB_NODE *pCB,
10101                                           FRAMEBUFFER_NODE *pFramebuffer,
10102                                           VkAttachmentReference ref)
10103 {
10104     if (ref.attachment != VK_ATTACHMENT_UNUSED) {
10105         auto image_view = pFramebuffer->createInfo.pAttachments[ref.attachment];
10106         SetLayout(dev_data, pCB, image_view, ref.layout);
10107     }
10108 }
10109
10110 static void TransitionSubpassLayouts(layer_data *dev_data, GLOBAL_CB_NODE *pCB, const VkRenderPassBeginInfo *pRenderPassBegin,
10111                                      const int subpass_index) {
10112     auto renderPass = getRenderPass(dev_data, pRenderPassBegin->renderPass);
10113     if (!renderPass)
10114         return;
10115
10116     auto framebuffer = getFramebuffer(dev_data, pRenderPassBegin->framebuffer);
10117     if (!framebuffer)
10118         return;
10119
10120     auto const &subpass = renderPass->createInfo.pSubpasses[subpass_index];
10121     for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
10122         TransitionAttachmentRefLayout(dev_data, pCB, framebuffer, subpass.pInputAttachments[j]);
10123     }
10124     for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
10125         TransitionAttachmentRefLayout(dev_data, pCB, framebuffer, subpass.pColorAttachments[j]);
10126     }
10127     if (subpass.pDepthStencilAttachment) {
10128         TransitionAttachmentRefLayout(dev_data, pCB, framebuffer, *subpass.pDepthStencilAttachment);
10129     }
10130 }
10131
10132 static bool validatePrimaryCommandBuffer(const layer_data *dev_data, const GLOBAL_CB_NODE *pCB, const std::string &cmd_name) {
10133     bool skip_call = false;
10134     if (pCB->createInfo.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
10135         skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
10136                              DRAWSTATE_INVALID_COMMAND_BUFFER, "DS", "Cannot execute command %s on a secondary command buffer.",
10137                              cmd_name.c_str());
10138     }
10139     return skip_call;
10140 }
10141
10142 static void TransitionFinalSubpassLayouts(layer_data *dev_data, GLOBAL_CB_NODE *pCB, const VkRenderPassBeginInfo *pRenderPassBegin) {
10143     auto renderPass = getRenderPass(dev_data, pRenderPassBegin->renderPass);
10144     if (!renderPass)
10145         return;
10146
10147     const VkRenderPassCreateInfo *pRenderPassInfo = renderPass->createInfo.ptr();
10148     auto framebuffer = getFramebuffer(dev_data, pRenderPassBegin->framebuffer);
10149     if (!framebuffer)
10150         return;
10151
10152     for (uint32_t i = 0; i < pRenderPassInfo->attachmentCount; ++i) {
10153         auto image_view = framebuffer->createInfo.pAttachments[i];
10154         SetLayout(dev_data, pCB, image_view, pRenderPassInfo->pAttachments[i].finalLayout);
10155     }
10156 }
10157
10158 static bool VerifyRenderAreaBounds(const layer_data *dev_data, const VkRenderPassBeginInfo *pRenderPassBegin) {
10159     bool skip_call = false;
10160     const safe_VkFramebufferCreateInfo *pFramebufferInfo = &getFramebuffer(dev_data, pRenderPassBegin->framebuffer)->createInfo;
10161     if (pRenderPassBegin->renderArea.offset.x < 0 ||
10162         (pRenderPassBegin->renderArea.offset.x + pRenderPassBegin->renderArea.extent.width) > pFramebufferInfo->width ||
10163         pRenderPassBegin->renderArea.offset.y < 0 ||
10164         (pRenderPassBegin->renderArea.offset.y + pRenderPassBegin->renderArea.extent.height) > pFramebufferInfo->height) {
10165         skip_call |= static_cast<bool>(log_msg(
10166             dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
10167             DRAWSTATE_INVALID_RENDER_AREA, "CORE",
10168             "Cannot execute a render pass with renderArea not within the bound of the "
10169             "framebuffer. RenderArea: x %d, y %d, width %d, height %d. Framebuffer: width %d, "
10170             "height %d.",
10171             pRenderPassBegin->renderArea.offset.x, pRenderPassBegin->renderArea.offset.y, pRenderPassBegin->renderArea.extent.width,
10172             pRenderPassBegin->renderArea.extent.height, pFramebufferInfo->width, pFramebufferInfo->height));
10173     }
10174     return skip_call;
10175 }
10176
10177 // If this is a stencil format, make sure the stencil[Load|Store]Op flag is checked, while if it is a depth/color attachment the
10178 // [load|store]Op flag must be checked
10179 // TODO: The memory valid flag in DEVICE_MEM_INFO should probably be split to track the validity of stencil memory separately.
10180 template <typename T> static bool FormatSpecificLoadAndStoreOpSettings(VkFormat format, T color_depth_op, T stencil_op, T op) {
10181     if (color_depth_op != op && stencil_op != op) {
10182         return false;
10183     }
10184     bool check_color_depth_load_op = !vk_format_is_stencil_only(format);
10185     bool check_stencil_load_op = vk_format_is_depth_and_stencil(format) || !check_color_depth_load_op;
10186
10187     return (((check_color_depth_load_op == true) && (color_depth_op == op)) ||
10188             ((check_stencil_load_op == true) && (stencil_op == op)));
10189 }
10190
10191 VKAPI_ATTR void VKAPI_CALL
10192 CmdBeginRenderPass(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin, VkSubpassContents contents) {
10193     bool skip_call = false;
10194     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
10195     std::unique_lock<std::mutex> lock(global_lock);
10196     GLOBAL_CB_NODE *cb_node = getCBNode(dev_data, commandBuffer);
10197     auto renderPass = pRenderPassBegin ? getRenderPass(dev_data, pRenderPassBegin->renderPass) : nullptr;
10198     auto framebuffer = pRenderPassBegin ? getFramebuffer(dev_data, pRenderPassBegin->framebuffer) : nullptr;
10199     if (cb_node) {
10200         if (renderPass) {
10201             uint32_t clear_op_size = 0; // Make sure pClearValues is at least as large as last LOAD_OP_CLEAR
10202             cb_node->activeFramebuffer = pRenderPassBegin->framebuffer;
10203             for (uint32_t i = 0; i < renderPass->createInfo.attachmentCount; ++i) {
10204                 MT_FB_ATTACHMENT_INFO &fb_info = framebuffer->attachments[i];
10205                 auto pAttachment = &renderPass->createInfo.pAttachments[i];
10206                 if (FormatSpecificLoadAndStoreOpSettings(pAttachment->format, pAttachment->loadOp,
10207                                                          pAttachment->stencilLoadOp,
10208                                                          VK_ATTACHMENT_LOAD_OP_CLEAR)) {
10209                     clear_op_size = static_cast<uint32_t>(i) + 1;
10210                     std::function<bool()> function = [=]() {
10211                         SetImageMemoryValid(dev_data, getImageNode(dev_data, fb_info.image), true);
10212                         return false;
10213                     };
10214                     cb_node->validate_functions.push_back(function);
10215                 } else if (FormatSpecificLoadAndStoreOpSettings(pAttachment->format, pAttachment->loadOp,
10216                                                                 pAttachment->stencilLoadOp,
10217                                                                 VK_ATTACHMENT_LOAD_OP_DONT_CARE)) {
10218                     std::function<bool()> function = [=]() {
10219                         SetImageMemoryValid(dev_data, getImageNode(dev_data, fb_info.image), false);
10220                         return false;
10221                     };
10222                     cb_node->validate_functions.push_back(function);
10223                 } else if (FormatSpecificLoadAndStoreOpSettings(pAttachment->format, pAttachment->loadOp,
10224                                                                 pAttachment->stencilLoadOp,
10225                                                                 VK_ATTACHMENT_LOAD_OP_LOAD)) {
10226                     std::function<bool()> function = [=]() {
10227                         return ValidateImageMemoryIsValid(dev_data, getImageNode(dev_data, fb_info.image),
10228                                                           "vkCmdBeginRenderPass()");
10229                     };
10230                     cb_node->validate_functions.push_back(function);
10231                 }
10232                 if (renderPass->attachment_first_read[i]) {
10233                     std::function<bool()> function = [=]() {
10234                         return ValidateImageMemoryIsValid(dev_data, getImageNode(dev_data, fb_info.image),
10235                                                           "vkCmdBeginRenderPass()");
10236                     };
10237                     cb_node->validate_functions.push_back(function);
10238                 }
10239             }
10240             if (clear_op_size > pRenderPassBegin->clearValueCount) {
10241                 skip_call |=
10242                     log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
10243                             reinterpret_cast<uint64_t &>(renderPass), __LINE__, DRAWSTATE_RENDERPASS_INCOMPATIBLE, "DS",
10244                             "In vkCmdBeginRenderPass() the VkRenderPassBeginInfo struct has a clearValueCount of %u but there must "
10245                             "be at least %u "
10246                             "entries in pClearValues array to account for the highest index attachment in renderPass 0x%" PRIx64
10247                             " that uses VK_ATTACHMENT_LOAD_OP_CLEAR is %u. Note that the pClearValues array "
10248                             "is indexed by attachment number so even if some pClearValues entries between 0 and %u correspond to "
10249                             "attachments that aren't cleared they will be ignored.",
10250                             pRenderPassBegin->clearValueCount, clear_op_size, reinterpret_cast<uint64_t &>(renderPass),
10251                             clear_op_size, clear_op_size - 1);
10252             }
10253             skip_call |= VerifyRenderAreaBounds(dev_data, pRenderPassBegin);
10254             skip_call |= VerifyFramebufferAndRenderPassLayouts(dev_data, cb_node, pRenderPassBegin);
10255             skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdBeginRenderPass");
10256             skip_call |= ValidateDependencies(dev_data, framebuffer, renderPass);
10257             skip_call |= validatePrimaryCommandBuffer(dev_data, cb_node, "vkCmdBeginRenderPass");
10258             skip_call |= addCmd(dev_data, cb_node, CMD_BEGINRENDERPASS, "vkCmdBeginRenderPass()");
10259             cb_node->activeRenderPass = renderPass;
10260             // This is a shallow copy as that is all that is needed for now
10261             cb_node->activeRenderPassBeginInfo = *pRenderPassBegin;
10262             cb_node->activeSubpass = 0;
10263             cb_node->activeSubpassContents = contents;
10264             cb_node->framebuffers.insert(pRenderPassBegin->framebuffer);
10265             // Connect this framebuffer and its children to this cmdBuffer
10266             AddFramebufferBinding(dev_data, cb_node, framebuffer);
10267             // transition attachments to the correct layouts for the first subpass
10268             TransitionSubpassLayouts(dev_data, cb_node, &cb_node->activeRenderPassBeginInfo, cb_node->activeSubpass);
10269         } else {
10270             skip_call |=
10271                 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
10272                         DRAWSTATE_INVALID_RENDERPASS, "DS", "You cannot use a NULL RenderPass object in vkCmdBeginRenderPass()");
10273         }
10274     }
10275     lock.unlock();
10276     if (!skip_call) {
10277         dev_data->dispatch_table.CmdBeginRenderPass(commandBuffer, pRenderPassBegin, contents);
10278     }
10279 }
10280
10281 VKAPI_ATTR void VKAPI_CALL CmdNextSubpass(VkCommandBuffer commandBuffer, VkSubpassContents contents) {
10282     bool skip_call = false;
10283     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
10284     std::unique_lock<std::mutex> lock(global_lock);
10285     GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
10286     if (pCB) {
10287         skip_call |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdNextSubpass");
10288         skip_call |= addCmd(dev_data, pCB, CMD_NEXTSUBPASS, "vkCmdNextSubpass()");
10289         skip_call |= outsideRenderPass(dev_data, pCB, "vkCmdNextSubpass");
10290
10291         auto subpassCount = pCB->activeRenderPass->createInfo.subpassCount;
10292         if (pCB->activeSubpass == subpassCount - 1) {
10293             skip_call |=
10294                 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
10295                         reinterpret_cast<uint64_t>(commandBuffer), __LINE__, DRAWSTATE_INVALID_SUBPASS_INDEX, "DS",
10296                         "vkCmdNextSubpass(): Attempted to advance beyond final subpass");
10297         }
10298     }
10299     lock.unlock();
10300
10301     if (skip_call)
10302         return;
10303
10304     dev_data->dispatch_table.CmdNextSubpass(commandBuffer, contents);
10305
10306     if (pCB) {
10307       lock.lock();
10308       pCB->activeSubpass++;
10309       pCB->activeSubpassContents = contents;
10310       TransitionSubpassLayouts(dev_data, pCB, &pCB->activeRenderPassBeginInfo, pCB->activeSubpass);
10311     }
10312 }
10313
10314 VKAPI_ATTR void VKAPI_CALL CmdEndRenderPass(VkCommandBuffer commandBuffer) {
10315     bool skip_call = false;
10316     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
10317     std::unique_lock<std::mutex> lock(global_lock);
10318     auto pCB = getCBNode(dev_data, commandBuffer);
10319     if (pCB) {
10320         RENDER_PASS_NODE* pRPNode = pCB->activeRenderPass;
10321         auto framebuffer = getFramebuffer(dev_data, pCB->activeFramebuffer);
10322         if (pRPNode) {
10323             if (pCB->activeSubpass != pRPNode->createInfo.subpassCount - 1) {
10324                 skip_call |=
10325                     log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
10326                             reinterpret_cast<uint64_t>(commandBuffer), __LINE__, DRAWSTATE_INVALID_SUBPASS_INDEX, "DS",
10327                             "vkCmdEndRenderPass(): Called before reaching final subpass");
10328             }
10329
10330             for (size_t i = 0; i < pRPNode->createInfo.attachmentCount; ++i) {
10331                 MT_FB_ATTACHMENT_INFO &fb_info = framebuffer->attachments[i];
10332                 auto pAttachment = &pRPNode->createInfo.pAttachments[i];
10333                 if (FormatSpecificLoadAndStoreOpSettings(pAttachment->format, pAttachment->storeOp,
10334                                                          pAttachment->stencilStoreOp, VK_ATTACHMENT_STORE_OP_STORE)) {
10335                     std::function<bool()> function = [=]() {
10336                         SetImageMemoryValid(dev_data, getImageNode(dev_data, fb_info.image), true);
10337                         return false;
10338                     };
10339                     pCB->validate_functions.push_back(function);
10340                 } else if (FormatSpecificLoadAndStoreOpSettings(pAttachment->format, pAttachment->storeOp,
10341                                                                 pAttachment->stencilStoreOp,
10342                                                                 VK_ATTACHMENT_STORE_OP_DONT_CARE)) {
10343                     std::function<bool()> function = [=]() {
10344                         SetImageMemoryValid(dev_data, getImageNode(dev_data, fb_info.image), false);
10345                         return false;
10346                     };
10347                     pCB->validate_functions.push_back(function);
10348                 }
10349             }
10350         }
10351         skip_call |= outsideRenderPass(dev_data, pCB, "vkCmdEndRenderpass");
10352         skip_call |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdEndRenderPass");
10353         skip_call |= addCmd(dev_data, pCB, CMD_ENDRENDERPASS, "vkCmdEndRenderPass()");
10354     }
10355     lock.unlock();
10356
10357     if (skip_call)
10358         return;
10359
10360     dev_data->dispatch_table.CmdEndRenderPass(commandBuffer);
10361
10362     if (pCB) {
10363         lock.lock();
10364         TransitionFinalSubpassLayouts(dev_data, pCB, &pCB->activeRenderPassBeginInfo);
10365         pCB->activeRenderPass = nullptr;
10366         pCB->activeSubpass = 0;
10367         pCB->activeFramebuffer = VK_NULL_HANDLE;
10368     }
10369 }
10370
10371 static bool logInvalidAttachmentMessage(layer_data *dev_data, VkCommandBuffer secondaryBuffer, uint32_t primaryAttach,
10372                                         uint32_t secondaryAttach, const char *msg) {
10373     return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
10374                    DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
10375                    "vkCmdExecuteCommands() called w/ invalid Secondary Cmd Buffer 0x%" PRIx64 " which has a render pass "
10376                    "that is not compatible with the Primary Cmd Buffer current render pass. "
10377                    "Attachment %u is not compatible with %u: %s",
10378                    reinterpret_cast<uint64_t &>(secondaryBuffer), primaryAttach, secondaryAttach, msg);
10379 }
10380
10381 static bool validateAttachmentCompatibility(layer_data *dev_data, VkCommandBuffer primaryBuffer,
10382                                             VkRenderPassCreateInfo const *primaryPassCI, uint32_t primaryAttach,
10383                                             VkCommandBuffer secondaryBuffer, VkRenderPassCreateInfo const *secondaryPassCI,
10384                                             uint32_t secondaryAttach, bool is_multi) {
10385     bool skip_call = false;
10386     if (primaryPassCI->attachmentCount <= primaryAttach) {
10387         primaryAttach = VK_ATTACHMENT_UNUSED;
10388     }
10389     if (secondaryPassCI->attachmentCount <= secondaryAttach) {
10390         secondaryAttach = VK_ATTACHMENT_UNUSED;
10391     }
10392     if (primaryAttach == VK_ATTACHMENT_UNUSED && secondaryAttach == VK_ATTACHMENT_UNUSED) {
10393         return skip_call;
10394     }
10395     if (primaryAttach == VK_ATTACHMENT_UNUSED) {
10396         skip_call |= logInvalidAttachmentMessage(dev_data, secondaryBuffer, primaryAttach, secondaryAttach,
10397                                                  "The first is unused while the second is not.");
10398         return skip_call;
10399     }
10400     if (secondaryAttach == VK_ATTACHMENT_UNUSED) {
10401         skip_call |= logInvalidAttachmentMessage(dev_data, secondaryBuffer, primaryAttach, secondaryAttach,
10402                                                  "The second is unused while the first is not.");
10403         return skip_call;
10404     }
10405     if (primaryPassCI->pAttachments[primaryAttach].format != secondaryPassCI->pAttachments[secondaryAttach].format) {
10406         skip_call |=
10407             logInvalidAttachmentMessage(dev_data, secondaryBuffer, primaryAttach, secondaryAttach, "They have different formats.");
10408     }
10409     if (primaryPassCI->pAttachments[primaryAttach].samples != secondaryPassCI->pAttachments[secondaryAttach].samples) {
10410         skip_call |=
10411             logInvalidAttachmentMessage(dev_data, secondaryBuffer, primaryAttach, secondaryAttach, "They have different samples.");
10412     }
10413     if (is_multi && primaryPassCI->pAttachments[primaryAttach].flags != secondaryPassCI->pAttachments[secondaryAttach].flags) {
10414         skip_call |=
10415             logInvalidAttachmentMessage(dev_data, secondaryBuffer, primaryAttach, secondaryAttach, "They have different flags.");
10416     }
10417     return skip_call;
10418 }
10419
10420 static bool validateSubpassCompatibility(layer_data *dev_data, VkCommandBuffer primaryBuffer,
10421                                          VkRenderPassCreateInfo const *primaryPassCI, VkCommandBuffer secondaryBuffer,
10422                                          VkRenderPassCreateInfo const *secondaryPassCI, const int subpass, bool is_multi) {
10423     bool skip_call = false;
10424     const VkSubpassDescription &primary_desc = primaryPassCI->pSubpasses[subpass];
10425     const VkSubpassDescription &secondary_desc = secondaryPassCI->pSubpasses[subpass];
10426     uint32_t maxInputAttachmentCount = std::max(primary_desc.inputAttachmentCount, secondary_desc.inputAttachmentCount);
10427     for (uint32_t i = 0; i < maxInputAttachmentCount; ++i) {
10428         uint32_t primary_input_attach = VK_ATTACHMENT_UNUSED, secondary_input_attach = VK_ATTACHMENT_UNUSED;
10429         if (i < primary_desc.inputAttachmentCount) {
10430             primary_input_attach = primary_desc.pInputAttachments[i].attachment;
10431         }
10432         if (i < secondary_desc.inputAttachmentCount) {
10433             secondary_input_attach = secondary_desc.pInputAttachments[i].attachment;
10434         }
10435         skip_call |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPassCI, primary_input_attach, secondaryBuffer,
10436                                                      secondaryPassCI, secondary_input_attach, is_multi);
10437     }
10438     uint32_t maxColorAttachmentCount = std::max(primary_desc.colorAttachmentCount, secondary_desc.colorAttachmentCount);
10439     for (uint32_t i = 0; i < maxColorAttachmentCount; ++i) {
10440         uint32_t primary_color_attach = VK_ATTACHMENT_UNUSED, secondary_color_attach = VK_ATTACHMENT_UNUSED;
10441         if (i < primary_desc.colorAttachmentCount) {
10442             primary_color_attach = primary_desc.pColorAttachments[i].attachment;
10443         }
10444         if (i < secondary_desc.colorAttachmentCount) {
10445             secondary_color_attach = secondary_desc.pColorAttachments[i].attachment;
10446         }
10447         skip_call |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPassCI, primary_color_attach, secondaryBuffer,
10448                                                      secondaryPassCI, secondary_color_attach, is_multi);
10449         uint32_t primary_resolve_attach = VK_ATTACHMENT_UNUSED, secondary_resolve_attach = VK_ATTACHMENT_UNUSED;
10450         if (i < primary_desc.colorAttachmentCount && primary_desc.pResolveAttachments) {
10451             primary_resolve_attach = primary_desc.pResolveAttachments[i].attachment;
10452         }
10453         if (i < secondary_desc.colorAttachmentCount && secondary_desc.pResolveAttachments) {
10454             secondary_resolve_attach = secondary_desc.pResolveAttachments[i].attachment;
10455         }
10456         skip_call |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPassCI, primary_resolve_attach,
10457                                                      secondaryBuffer, secondaryPassCI, secondary_resolve_attach, is_multi);
10458     }
10459     uint32_t primary_depthstencil_attach = VK_ATTACHMENT_UNUSED, secondary_depthstencil_attach = VK_ATTACHMENT_UNUSED;
10460     if (primary_desc.pDepthStencilAttachment) {
10461         primary_depthstencil_attach = primary_desc.pDepthStencilAttachment[0].attachment;
10462     }
10463     if (secondary_desc.pDepthStencilAttachment) {
10464         secondary_depthstencil_attach = secondary_desc.pDepthStencilAttachment[0].attachment;
10465     }
10466     skip_call |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPassCI, primary_depthstencil_attach,
10467                                                  secondaryBuffer, secondaryPassCI, secondary_depthstencil_attach, is_multi);
10468     return skip_call;
10469 }
10470
10471 // Verify that given renderPass CreateInfo for primary and secondary command buffers are compatible.
10472 //  This function deals directly with the CreateInfo, there are overloaded versions below that can take the renderPass handle and
10473 //  will then feed into this function
10474 static bool validateRenderPassCompatibility(layer_data *dev_data, VkCommandBuffer primaryBuffer,
10475                                             VkRenderPassCreateInfo const *primaryPassCI, VkCommandBuffer secondaryBuffer,
10476                                             VkRenderPassCreateInfo const *secondaryPassCI) {
10477     bool skip_call = false;
10478
10479     if (primaryPassCI->subpassCount != secondaryPassCI->subpassCount) {
10480         skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
10481                              DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
10482                              "vkCmdExecuteCommands() called w/ invalid secondary Cmd Buffer 0x%" PRIx64
10483                              " that has a subpassCount of %u that is incompatible with the primary Cmd Buffer 0x%" PRIx64
10484                              " that has a subpassCount of %u.",
10485                              reinterpret_cast<uint64_t &>(secondaryBuffer), secondaryPassCI->subpassCount,
10486                              reinterpret_cast<uint64_t &>(primaryBuffer), primaryPassCI->subpassCount);
10487     } else {
10488         for (uint32_t i = 0; i < primaryPassCI->subpassCount; ++i) {
10489             skip_call |= validateSubpassCompatibility(dev_data, primaryBuffer, primaryPassCI, secondaryBuffer, secondaryPassCI, i,
10490                                                       primaryPassCI->subpassCount > 1);
10491         }
10492     }
10493     return skip_call;
10494 }
10495
10496 static bool validateFramebuffer(layer_data *dev_data, VkCommandBuffer primaryBuffer, const GLOBAL_CB_NODE *pCB,
10497                                 VkCommandBuffer secondaryBuffer, const GLOBAL_CB_NODE *pSubCB) {
10498     bool skip_call = false;
10499     if (!pSubCB->beginInfo.pInheritanceInfo) {
10500         return skip_call;
10501     }
10502     VkFramebuffer primary_fb = pCB->activeFramebuffer;
10503     VkFramebuffer secondary_fb = pSubCB->beginInfo.pInheritanceInfo->framebuffer;
10504     if (secondary_fb != VK_NULL_HANDLE) {
10505         if (primary_fb != secondary_fb) {
10506             skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
10507                                  DRAWSTATE_FRAMEBUFFER_INCOMPATIBLE, "DS",
10508                                  "vkCmdExecuteCommands() called w/ invalid secondary Cmd Buffer 0x%" PRIx64
10509                                  " which has a framebuffer 0x%" PRIx64
10510                                  " that is not the same as the primaryCB's current active framebuffer 0x%" PRIx64 ".",
10511                                  reinterpret_cast<uint64_t &>(secondaryBuffer), reinterpret_cast<uint64_t &>(secondary_fb),
10512                                  reinterpret_cast<uint64_t &>(primary_fb));
10513         }
10514         auto fb = getFramebuffer(dev_data, secondary_fb);
10515         if (!fb) {
10516             skip_call |=
10517                 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
10518                         DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS", "vkCmdExecuteCommands() called w/ invalid Cmd Buffer 0x%p "
10519                                                                           "which has invalid framebuffer 0x%" PRIx64 ".",
10520                         (void *)secondaryBuffer, (uint64_t)(secondary_fb));
10521             return skip_call;
10522         }
10523         auto cb_renderpass = getRenderPass(dev_data, pSubCB->beginInfo.pInheritanceInfo->renderPass);
10524         if (cb_renderpass->renderPass != fb->createInfo.renderPass) {
10525             skip_call |= validateRenderPassCompatibility(dev_data, secondaryBuffer, fb->renderPassCreateInfo.ptr(), secondaryBuffer,
10526                                                          cb_renderpass->createInfo.ptr());
10527         }
10528     }
10529     return skip_call;
10530 }
10531
10532 static bool validateSecondaryCommandBufferState(layer_data *dev_data, GLOBAL_CB_NODE *pCB, GLOBAL_CB_NODE *pSubCB) {
10533     bool skip_call = false;
10534     unordered_set<int> activeTypes;
10535     for (auto queryObject : pCB->activeQueries) {
10536         auto queryPoolData = dev_data->queryPoolMap.find(queryObject.pool);
10537         if (queryPoolData != dev_data->queryPoolMap.end()) {
10538             if (queryPoolData->second.createInfo.queryType == VK_QUERY_TYPE_PIPELINE_STATISTICS &&
10539                 pSubCB->beginInfo.pInheritanceInfo) {
10540                 VkQueryPipelineStatisticFlags cmdBufStatistics = pSubCB->beginInfo.pInheritanceInfo->pipelineStatistics;
10541                 if ((cmdBufStatistics & queryPoolData->second.createInfo.pipelineStatistics) != cmdBufStatistics) {
10542                     skip_call |= log_msg(
10543                         dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
10544                         DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
10545                         "vkCmdExecuteCommands() called w/ invalid Cmd Buffer 0x%p "
10546                         "which has invalid active query pool 0x%" PRIx64 ". Pipeline statistics is being queried so the command "
10547                         "buffer must have all bits set on the queryPool.",
10548                         reinterpret_cast<void *>(pCB->commandBuffer), reinterpret_cast<const uint64_t &>(queryPoolData->first));
10549                 }
10550             }
10551             activeTypes.insert(queryPoolData->second.createInfo.queryType);
10552         }
10553     }
10554     for (auto queryObject : pSubCB->startedQueries) {
10555         auto queryPoolData = dev_data->queryPoolMap.find(queryObject.pool);
10556         if (queryPoolData != dev_data->queryPoolMap.end() && activeTypes.count(queryPoolData->second.createInfo.queryType)) {
10557             skip_call |=
10558                 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
10559                         DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
10560                         "vkCmdExecuteCommands() called w/ invalid Cmd Buffer 0x%p "
10561                         "which has invalid active query pool 0x%" PRIx64 "of type %d but a query of that type has been started on "
10562                         "secondary Cmd Buffer 0x%p.",
10563                         reinterpret_cast<void *>(pCB->commandBuffer), reinterpret_cast<const uint64_t &>(queryPoolData->first),
10564                         queryPoolData->second.createInfo.queryType, reinterpret_cast<void *>(pSubCB->commandBuffer));
10565         }
10566     }
10567
10568     auto primary_pool = getCommandPoolNode(dev_data, pCB->createInfo.commandPool);
10569     auto secondary_pool = getCommandPoolNode(dev_data, pSubCB->createInfo.commandPool);
10570     if (primary_pool && secondary_pool && (primary_pool->queueFamilyIndex != secondary_pool->queueFamilyIndex)) {
10571         skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
10572                              reinterpret_cast<uint64_t>(pSubCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_QUEUE_FAMILY, "DS",
10573                              "vkCmdExecuteCommands(): Primary command buffer 0x%" PRIxLEAST64
10574                              " created in queue family %d has secondary command buffer 0x%" PRIxLEAST64 " created in queue family %d.",
10575                              reinterpret_cast<uint64_t>(pCB->commandBuffer), primary_pool->queueFamilyIndex,
10576                              reinterpret_cast<uint64_t>(pSubCB->commandBuffer), secondary_pool->queueFamilyIndex);
10577     }
10578
10579     return skip_call;
10580 }
10581
10582 VKAPI_ATTR void VKAPI_CALL
10583 CmdExecuteCommands(VkCommandBuffer commandBuffer, uint32_t commandBuffersCount, const VkCommandBuffer *pCommandBuffers) {
10584     bool skip_call = false;
10585     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
10586     std::unique_lock<std::mutex> lock(global_lock);
10587     GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
10588     if (pCB) {
10589         GLOBAL_CB_NODE *pSubCB = NULL;
10590         for (uint32_t i = 0; i < commandBuffersCount; i++) {
10591             pSubCB = getCBNode(dev_data, pCommandBuffers[i]);
10592             if (!pSubCB) {
10593                 skip_call |=
10594                     log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
10595                             DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
10596                             "vkCmdExecuteCommands() called w/ invalid Cmd Buffer 0x%p in element %u of pCommandBuffers array.",
10597                             (void *)pCommandBuffers[i], i);
10598             } else if (VK_COMMAND_BUFFER_LEVEL_PRIMARY == pSubCB->createInfo.level) {
10599                 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
10600                                      __LINE__, DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
10601                                      "vkCmdExecuteCommands() called w/ Primary Cmd Buffer 0x%p in element %u of pCommandBuffers "
10602                                      "array. All cmd buffers in pCommandBuffers array must be secondary.",
10603                                      (void *)pCommandBuffers[i], i);
10604             } else if (pCB->activeRenderPass) { // Secondary CB w/i RenderPass must have *CONTINUE_BIT set
10605                 auto secondary_rp_node = getRenderPass(dev_data, pSubCB->beginInfo.pInheritanceInfo->renderPass);
10606                 if (!(pSubCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) {
10607                     skip_call |= log_msg(
10608                         dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
10609                         (uint64_t)pCommandBuffers[i], __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
10610                         "vkCmdExecuteCommands(): Secondary Command Buffer (0x%p) executed within render pass (0x%" PRIxLEAST64
10611                         ") must have had vkBeginCommandBuffer() called w/ VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT set.",
10612                         (void *)pCommandBuffers[i], (uint64_t)pCB->activeRenderPass->renderPass);
10613                 } else {
10614                     // Make sure render pass is compatible with parent command buffer pass if has continue
10615                     if (pCB->activeRenderPass->renderPass != secondary_rp_node->renderPass) {
10616                         skip_call |=
10617                             validateRenderPassCompatibility(dev_data, commandBuffer, pCB->activeRenderPass->createInfo.ptr(),
10618                                                             pCommandBuffers[i], secondary_rp_node->createInfo.ptr());
10619                     }
10620                     //  If framebuffer for secondary CB is not NULL, then it must match active FB from primaryCB
10621                     skip_call |= validateFramebuffer(dev_data, commandBuffer, pCB, pCommandBuffers[i], pSubCB);
10622                 }
10623                 string errorString = "";
10624                 // secondaryCB must have been created w/ RP compatible w/ primaryCB active renderpass
10625                 if ((pCB->activeRenderPass->renderPass != secondary_rp_node->renderPass) &&
10626                     !verify_renderpass_compatibility(dev_data, pCB->activeRenderPass->createInfo.ptr(),
10627                                                      secondary_rp_node->createInfo.ptr(), errorString)) {
10628                     skip_call |= log_msg(
10629                         dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
10630                         (uint64_t)pCommandBuffers[i], __LINE__, DRAWSTATE_RENDERPASS_INCOMPATIBLE, "DS",
10631                         "vkCmdExecuteCommands(): Secondary Command Buffer (0x%p) w/ render pass (0x%" PRIxLEAST64
10632                         ") is incompatible w/ primary command buffer (0x%p) w/ render pass (0x%" PRIxLEAST64 ") due to: %s",
10633                         (void *)pCommandBuffers[i], (uint64_t)pSubCB->beginInfo.pInheritanceInfo->renderPass, (void *)commandBuffer,
10634                         (uint64_t)pCB->activeRenderPass->renderPass, errorString.c_str());
10635                 }
10636             }
10637             // TODO(mlentine): Move more logic into this method
10638             skip_call |= validateSecondaryCommandBufferState(dev_data, pCB, pSubCB);
10639             skip_call |= validateCommandBufferState(dev_data, pSubCB, "vkCmdExecuteCommands()");
10640             // Secondary cmdBuffers are considered pending execution starting w/
10641             // being recorded
10642             if (!(pSubCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
10643                 if (dev_data->globalInFlightCmdBuffers.find(pSubCB->commandBuffer) != dev_data->globalInFlightCmdBuffers.end()) {
10644                     skip_call |= log_msg(
10645                         dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
10646                         (uint64_t)(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_CB_SIMULTANEOUS_USE, "DS",
10647                         "Attempt to simultaneously execute CB 0x%" PRIxLEAST64 " w/o VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT "
10648                         "set!",
10649                         (uint64_t)(pCB->commandBuffer));
10650                 }
10651                 if (pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT) {
10652                     // Warn that non-simultaneous secondary cmd buffer renders primary non-simultaneous
10653                     skip_call |= log_msg(
10654                         dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
10655                         (uint64_t)(pCommandBuffers[i]), __LINE__, DRAWSTATE_INVALID_CB_SIMULTANEOUS_USE, "DS",
10656                         "vkCmdExecuteCommands(): Secondary Command Buffer (0x%" PRIxLEAST64
10657                         ") does not have VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set and will cause primary command buffer "
10658                         "(0x%" PRIxLEAST64 ") to be treated as if it does not have VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT "
10659                         "set, even though it does.",
10660                         (uint64_t)(pCommandBuffers[i]), (uint64_t)(pCB->commandBuffer));
10661                     pCB->beginInfo.flags &= ~VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT;
10662                 }
10663             }
10664             if (!pCB->activeQueries.empty() && !dev_data->enabled_features.inheritedQueries) {
10665                 skip_call |=
10666                     log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
10667                             reinterpret_cast<uint64_t>(pCommandBuffers[i]), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
10668                             "vkCmdExecuteCommands(): Secondary Command Buffer "
10669                             "(0x%" PRIxLEAST64 ") cannot be submitted with a query in "
10670                             "flight and inherited queries not "
10671                             "supported on this device.",
10672                             reinterpret_cast<uint64_t>(pCommandBuffers[i]));
10673             }
10674             pSubCB->primaryCommandBuffer = pCB->commandBuffer;
10675             pCB->secondaryCommandBuffers.insert(pSubCB->commandBuffer);
10676             dev_data->globalInFlightCmdBuffers.insert(pSubCB->commandBuffer);
10677             for (auto &function : pSubCB->queryUpdates) {
10678                 pCB->queryUpdates.push_back(function);
10679             }
10680         }
10681         skip_call |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdExecuteComands");
10682         skip_call |= addCmd(dev_data, pCB, CMD_EXECUTECOMMANDS, "vkCmdExecuteComands()");
10683     }
10684     lock.unlock();
10685     if (!skip_call)
10686         dev_data->dispatch_table.CmdExecuteCommands(commandBuffer, commandBuffersCount, pCommandBuffers);
10687 }
10688
10689 // For any image objects that overlap mapped memory, verify that their layouts are PREINIT or GENERAL
10690 static bool ValidateMapImageLayouts(VkDevice device, DEVICE_MEM_INFO const *mem_info, VkDeviceSize offset,
10691                                     VkDeviceSize end_offset) {
10692     bool skip_call = false;
10693     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10694     // Iterate over all bound image ranges and verify that for any that overlap the
10695     //  map ranges, the layouts are VK_IMAGE_LAYOUT_PREINITIALIZED or VK_IMAGE_LAYOUT_GENERAL
10696     // TODO : This can be optimized if we store ranges based on starting address and early exit when we pass our range
10697     for (auto image_handle : mem_info->bound_images) {
10698         auto img_it = mem_info->bound_ranges.find(image_handle);
10699         if (img_it != mem_info->bound_ranges.end()) {
10700             if (rangesIntersect(dev_data, &img_it->second, offset, end_offset)) {
10701                 std::vector<VkImageLayout> layouts;
10702                 if (FindLayouts(dev_data, VkImage(image_handle), layouts)) {
10703                     for (auto layout : layouts) {
10704                         if (layout != VK_IMAGE_LAYOUT_PREINITIALIZED && layout != VK_IMAGE_LAYOUT_GENERAL) {
10705                             skip_call |=
10706                                 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
10707                                         __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Cannot map an image with layout %s. Only "
10708                                                                                         "GENERAL or PREINITIALIZED are supported.",
10709                                         string_VkImageLayout(layout));
10710                         }
10711                     }
10712                 }
10713             }
10714         }
10715     }
10716     return skip_call;
10717 }
10718
10719 VKAPI_ATTR VkResult VKAPI_CALL
10720 MapMemory(VkDevice device, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size, VkFlags flags, void **ppData) {
10721     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10722
10723     bool skip_call = false;
10724     VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
10725     std::unique_lock<std::mutex> lock(global_lock);
10726     DEVICE_MEM_INFO *mem_info = getMemObjInfo(dev_data, mem);
10727     if (mem_info) {
10728         // TODO : This could me more fine-grained to track just region that is valid
10729         mem_info->global_valid = true;
10730         auto end_offset = (VK_WHOLE_SIZE == size) ? mem_info->alloc_info.allocationSize - 1 : offset + size - 1;
10731         skip_call |= ValidateMapImageLayouts(device, mem_info, offset, end_offset);
10732         // TODO : Do we need to create new "bound_range" for the mapped range?
10733         SetMemRangesValid(dev_data, mem_info, offset, end_offset);
10734         if ((dev_data->phys_dev_mem_props.memoryTypes[mem_info->alloc_info.memoryTypeIndex].propertyFlags &
10735              VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0) {
10736             skip_call =
10737                 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
10738                         (uint64_t)mem, __LINE__, MEMTRACK_INVALID_STATE, "MEM",
10739                         "Mapping Memory without VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT set: mem obj 0x%" PRIxLEAST64, (uint64_t)mem);
10740         }
10741     }
10742     skip_call |= ValidateMapMemRange(dev_data, mem, offset, size);
10743     lock.unlock();
10744
10745     if (!skip_call) {
10746         result = dev_data->dispatch_table.MapMemory(device, mem, offset, size, flags, ppData);
10747         if (VK_SUCCESS == result) {
10748             lock.lock();
10749             // TODO : What's the point of this range? See comment on creating new "bound_range" above, which may replace this
10750             storeMemRanges(dev_data, mem, offset, size);
10751             initializeAndTrackMemory(dev_data, mem, offset, size, ppData);
10752             lock.unlock();
10753         }
10754     }
10755     return result;
10756 }
10757
10758 VKAPI_ATTR void VKAPI_CALL UnmapMemory(VkDevice device, VkDeviceMemory mem) {
10759     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10760     bool skip_call = false;
10761
10762     std::unique_lock<std::mutex> lock(global_lock);
10763     skip_call |= deleteMemRanges(dev_data, mem);
10764     lock.unlock();
10765     if (!skip_call) {
10766         dev_data->dispatch_table.UnmapMemory(device, mem);
10767     }
10768 }
10769
10770 static bool validateMemoryIsMapped(layer_data *dev_data, const char *funcName, uint32_t memRangeCount,
10771                                    const VkMappedMemoryRange *pMemRanges) {
10772     bool skip_call = false;
10773     for (uint32_t i = 0; i < memRangeCount; ++i) {
10774         auto mem_info = getMemObjInfo(dev_data, pMemRanges[i].memory);
10775         if (mem_info) {
10776             if (mem_info->mem_range.offset > pMemRanges[i].offset) {
10777                 skip_call |=
10778                     log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
10779                             (uint64_t)pMemRanges[i].memory, __LINE__, MEMTRACK_INVALID_MAP, "MEM",
10780                             "%s: Flush/Invalidate offset (" PRINTF_SIZE_T_SPECIFIER ") is less than Memory Object's offset "
10781                             "(" PRINTF_SIZE_T_SPECIFIER ").",
10782                             funcName, static_cast<size_t>(pMemRanges[i].offset), static_cast<size_t>(mem_info->mem_range.offset));
10783             }
10784
10785             const uint64_t dev_dataTerminus = (mem_info->mem_range.size == VK_WHOLE_SIZE)
10786                                                   ? mem_info->alloc_info.allocationSize
10787                                                   : (mem_info->mem_range.offset + mem_info->mem_range.size);
10788             if (pMemRanges[i].size != VK_WHOLE_SIZE && (dev_dataTerminus < (pMemRanges[i].offset + pMemRanges[i].size))) {
10789                 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
10790                                      VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)pMemRanges[i].memory, __LINE__,
10791                                      MEMTRACK_INVALID_MAP, "MEM", "%s: Flush/Invalidate upper-bound (" PRINTF_SIZE_T_SPECIFIER
10792                                                                   ") exceeds the Memory Object's upper-bound "
10793                                                                   "(" PRINTF_SIZE_T_SPECIFIER ").",
10794                                      funcName, static_cast<size_t>(pMemRanges[i].offset + pMemRanges[i].size),
10795                                      static_cast<size_t>(dev_dataTerminus));
10796             }
10797         }
10798     }
10799     return skip_call;
10800 }
10801
10802 static bool ValidateAndCopyNoncoherentMemoryToDriver(layer_data *dev_data, uint32_t memRangeCount,
10803                                                      const VkMappedMemoryRange *pMemRanges) {
10804     bool skip_call = false;
10805     for (uint32_t i = 0; i < memRangeCount; ++i) {
10806         auto mem_info = getMemObjInfo(dev_data, pMemRanges[i].memory);
10807         if (mem_info) {
10808             if (mem_info->shadow_copy) {
10809                 VkDeviceSize size = (mem_info->mem_range.size != VK_WHOLE_SIZE)
10810                                         ? mem_info->mem_range.size
10811                                         : (mem_info->alloc_info.allocationSize - pMemRanges[i].offset);
10812                 char *data = static_cast<char *>(mem_info->shadow_copy);
10813                 for (uint64_t j = 0; j < mem_info->shadow_pad_size; ++j) {
10814                     if (data[j] != NoncoherentMemoryFillValue) {
10815                         skip_call |= log_msg(
10816                             dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
10817                             (uint64_t)pMemRanges[i].memory, __LINE__, MEMTRACK_INVALID_MAP, "MEM",
10818                             "Memory underflow was detected on mem obj 0x%" PRIxLEAST64, (uint64_t)pMemRanges[i].memory);
10819                     }
10820                 }
10821                 for (uint64_t j = (size + mem_info->shadow_pad_size); j < (2 * mem_info->shadow_pad_size + size); ++j) {
10822                     if (data[j] != NoncoherentMemoryFillValue) {
10823                         skip_call |= log_msg(
10824                             dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
10825                             (uint64_t)pMemRanges[i].memory, __LINE__, MEMTRACK_INVALID_MAP, "MEM",
10826                             "Memory overflow was detected on mem obj 0x%" PRIxLEAST64, (uint64_t)pMemRanges[i].memory);
10827                     }
10828                 }
10829                 memcpy(mem_info->p_driver_data, static_cast<void *>(data + mem_info->shadow_pad_size), (size_t)(size));
10830             }
10831         }
10832     }
10833     return skip_call;
10834 }
10835
10836 static void CopyNoncoherentMemoryFromDriver(layer_data *dev_data, uint32_t memory_range_count,
10837                                             const VkMappedMemoryRange *mem_ranges) {
10838     for (uint32_t i = 0; i < memory_range_count; ++i) {
10839         auto mem_info = getMemObjInfo(dev_data, mem_ranges[i].memory);
10840         if (mem_info && mem_info->shadow_copy) {
10841             VkDeviceSize size = (mem_info->mem_range.size != VK_WHOLE_SIZE)
10842                                     ? mem_info->mem_range.size
10843                                     : (mem_info->alloc_info.allocationSize - mem_ranges[i].offset);
10844             char *data = static_cast<char *>(mem_info->shadow_copy);
10845             memcpy(data + mem_info->shadow_pad_size, mem_info->p_driver_data, (size_t)(size));
10846         }
10847     }
10848 }
10849
10850 VkResult VKAPI_CALL
10851 FlushMappedMemoryRanges(VkDevice device, uint32_t memRangeCount, const VkMappedMemoryRange *pMemRanges) {
10852     VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
10853     bool skip_call = false;
10854     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10855
10856     std::unique_lock<std::mutex> lock(global_lock);
10857     skip_call |= ValidateAndCopyNoncoherentMemoryToDriver(dev_data, memRangeCount, pMemRanges);
10858     skip_call |= validateMemoryIsMapped(dev_data, "vkFlushMappedMemoryRanges", memRangeCount, pMemRanges);
10859     lock.unlock();
10860     if (!skip_call) {
10861         result = dev_data->dispatch_table.FlushMappedMemoryRanges(device, memRangeCount, pMemRanges);
10862     }
10863     return result;
10864 }
10865
10866 VkResult VKAPI_CALL
10867 InvalidateMappedMemoryRanges(VkDevice device, uint32_t memRangeCount, const VkMappedMemoryRange *pMemRanges) {
10868     VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
10869     bool skip_call = false;
10870     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10871
10872     std::unique_lock<std::mutex> lock(global_lock);
10873     skip_call |= validateMemoryIsMapped(dev_data, "vkInvalidateMappedMemoryRanges", memRangeCount, pMemRanges);
10874     lock.unlock();
10875     if (!skip_call) {
10876         result = dev_data->dispatch_table.InvalidateMappedMemoryRanges(device, memRangeCount, pMemRanges);
10877         // Update our shadow copy with modified driver data
10878         CopyNoncoherentMemoryFromDriver(dev_data, memRangeCount, pMemRanges);
10879     }
10880     return result;
10881 }
10882
10883 VKAPI_ATTR VkResult VKAPI_CALL BindImageMemory(VkDevice device, VkImage image, VkDeviceMemory mem, VkDeviceSize memoryOffset) {
10884     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10885     VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
10886     bool skip_call = false;
10887     std::unique_lock<std::mutex> lock(global_lock);
10888     auto image_node = getImageNode(dev_data, image);
10889     if (image_node) {
10890         // Track objects tied to memory
10891         uint64_t image_handle = reinterpret_cast<uint64_t &>(image);
10892         skip_call = SetMemBinding(dev_data, mem, image_handle, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, "vkBindImageMemory");
10893         VkMemoryRequirements memRequirements;
10894         lock.unlock();
10895         dev_data->dispatch_table.GetImageMemoryRequirements(device, image, &memRequirements);
10896         lock.lock();
10897
10898         // Track and validate bound memory range information
10899         auto mem_info = getMemObjInfo(dev_data, mem);
10900         if (mem_info) {
10901             skip_call |= InsertImageMemoryRange(dev_data, image, mem_info, memoryOffset, memRequirements,
10902                                                 image_node->createInfo.tiling == VK_IMAGE_TILING_LINEAR);
10903             skip_call |= ValidateMemoryTypes(dev_data, mem_info, memRequirements.memoryTypeBits, "vkBindImageMemory");
10904         }
10905
10906         print_mem_list(dev_data);
10907         lock.unlock();
10908         if (!skip_call) {
10909             result = dev_data->dispatch_table.BindImageMemory(device, image, mem, memoryOffset);
10910             lock.lock();
10911             image_node->mem = mem;
10912             image_node->memOffset = memoryOffset;
10913             image_node->memSize = memRequirements.size;
10914             lock.unlock();
10915         }
10916     } else {
10917         log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
10918                 reinterpret_cast<const uint64_t &>(image), __LINE__, MEMTRACK_INVALID_OBJECT, "MT",
10919                 "vkBindImageMemory: Cannot find invalid image 0x%" PRIx64 ", has it already been deleted?",
10920                 reinterpret_cast<const uint64_t &>(image));
10921     }
10922     return result;
10923 }
10924
10925 VKAPI_ATTR VkResult VKAPI_CALL SetEvent(VkDevice device, VkEvent event) {
10926     bool skip_call = false;
10927     VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
10928     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10929     std::unique_lock<std::mutex> lock(global_lock);
10930     auto event_node = getEventNode(dev_data, event);
10931     if (event_node) {
10932         event_node->needsSignaled = false;
10933         event_node->stageMask = VK_PIPELINE_STAGE_HOST_BIT;
10934         if (event_node->write_in_use) {
10935             skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT,
10936                                  reinterpret_cast<const uint64_t &>(event), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
10937                                  "Cannot call vkSetEvent() on event 0x%" PRIxLEAST64 " that is already in use by a command buffer.",
10938                                  reinterpret_cast<const uint64_t &>(event));
10939         }
10940     }
10941     lock.unlock();
10942     // Host setting event is visible to all queues immediately so update stageMask for any queue that's seen this event
10943     // TODO : For correctness this needs separate fix to verify that app doesn't make incorrect assumptions about the
10944     // ordering of this command in relation to vkCmd[Set|Reset]Events (see GH297)
10945     for (auto queue_data : dev_data->queueMap) {
10946         auto event_entry = queue_data.second.eventToStageMap.find(event);
10947         if (event_entry != queue_data.second.eventToStageMap.end()) {
10948             event_entry->second |= VK_PIPELINE_STAGE_HOST_BIT;
10949         }
10950     }
10951     if (!skip_call)
10952         result = dev_data->dispatch_table.SetEvent(device, event);
10953     return result;
10954 }
10955
10956 VKAPI_ATTR VkResult VKAPI_CALL
10957 QueueBindSparse(VkQueue queue, uint32_t bindInfoCount, const VkBindSparseInfo *pBindInfo, VkFence fence) {
10958     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
10959     VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
10960     bool skip_call = false;
10961     std::unique_lock<std::mutex> lock(global_lock);
10962     auto pFence = getFenceNode(dev_data, fence);
10963     auto pQueue = getQueueNode(dev_data, queue);
10964
10965     // First verify that fence is not in use
10966     skip_call |= ValidateFenceForSubmit(dev_data, pFence);
10967
10968     if (pFence) {
10969         SubmitFence(pQueue, pFence, bindInfoCount);
10970     }
10971
10972     for (uint32_t bindIdx = 0; bindIdx < bindInfoCount; ++bindIdx) {
10973         const VkBindSparseInfo &bindInfo = pBindInfo[bindIdx];
10974         // Track objects tied to memory
10975         for (uint32_t j = 0; j < bindInfo.bufferBindCount; j++) {
10976             for (uint32_t k = 0; k < bindInfo.pBufferBinds[j].bindCount; k++) {
10977                 if (set_sparse_mem_binding(dev_data, bindInfo.pBufferBinds[j].pBinds[k].memory,
10978                                            (uint64_t)bindInfo.pBufferBinds[j].buffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
10979                                            "vkQueueBindSparse"))
10980                     skip_call = true;
10981             }
10982         }
10983         for (uint32_t j = 0; j < bindInfo.imageOpaqueBindCount; j++) {
10984             for (uint32_t k = 0; k < bindInfo.pImageOpaqueBinds[j].bindCount; k++) {
10985                 if (set_sparse_mem_binding(dev_data, bindInfo.pImageOpaqueBinds[j].pBinds[k].memory,
10986                                            (uint64_t)bindInfo.pImageOpaqueBinds[j].image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
10987                                            "vkQueueBindSparse"))
10988                     skip_call = true;
10989             }
10990         }
10991         for (uint32_t j = 0; j < bindInfo.imageBindCount; j++) {
10992             for (uint32_t k = 0; k < bindInfo.pImageBinds[j].bindCount; k++) {
10993                 if (set_sparse_mem_binding(dev_data, bindInfo.pImageBinds[j].pBinds[k].memory,
10994                                            (uint64_t)bindInfo.pImageBinds[j].image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
10995                                            "vkQueueBindSparse"))
10996                     skip_call = true;
10997             }
10998         }
10999
11000         std::vector<SEMAPHORE_WAIT> semaphore_waits;
11001         std::vector<VkSemaphore> semaphore_signals;
11002         for (uint32_t i = 0; i < bindInfo.waitSemaphoreCount; ++i) {
11003             VkSemaphore semaphore = bindInfo.pWaitSemaphores[i];
11004             auto pSemaphore = getSemaphoreNode(dev_data, semaphore);
11005             if (pSemaphore) {
11006                 if (pSemaphore->signaled) {
11007                     if (pSemaphore->signaler.first != VK_NULL_HANDLE) {
11008                         semaphore_waits.push_back({semaphore, pSemaphore->signaler.first, pSemaphore->signaler.second});
11009                         pSemaphore->in_use.fetch_add(1);
11010                     }
11011                     pSemaphore->signaler.first = VK_NULL_HANDLE;
11012                     pSemaphore->signaled = false;
11013                 } else {
11014                     skip_call |=
11015                         log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
11016                                 reinterpret_cast<const uint64_t &>(semaphore), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
11017                                 "vkQueueBindSparse: Queue 0x%" PRIx64 " is waiting on semaphore 0x%" PRIx64
11018                                 " that has no way to be signaled.",
11019                                 reinterpret_cast<const uint64_t &>(queue), reinterpret_cast<const uint64_t &>(semaphore));
11020                 }
11021             }
11022         }
11023         for (uint32_t i = 0; i < bindInfo.signalSemaphoreCount; ++i) {
11024             VkSemaphore semaphore = bindInfo.pSignalSemaphores[i];
11025             auto pSemaphore = getSemaphoreNode(dev_data, semaphore);
11026             if (pSemaphore) {
11027                 if (pSemaphore->signaled) {
11028                     skip_call =
11029                         log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
11030                                 reinterpret_cast<const uint64_t &>(semaphore), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
11031                                 "vkQueueBindSparse: Queue 0x%" PRIx64 " is signaling semaphore 0x%" PRIx64
11032                                 ", but that semaphore is already signaled.",
11033                                 reinterpret_cast<const uint64_t &>(queue), reinterpret_cast<const uint64_t &>(semaphore));
11034                 }
11035                 else {
11036                     pSemaphore->signaler.first = queue;
11037                     pSemaphore->signaler.second = pQueue->seq + pQueue->submissions.size() + 1;
11038                     pSemaphore->signaled = true;
11039                     pSemaphore->in_use.fetch_add(1);
11040                     semaphore_signals.push_back(semaphore);
11041                 }
11042             }
11043         }
11044
11045         pQueue->submissions.emplace_back(std::vector<VkCommandBuffer>(),
11046                                          semaphore_waits,
11047                                          semaphore_signals,
11048                                          bindIdx == bindInfoCount - 1 ? fence : VK_NULL_HANDLE);
11049     }
11050
11051     if (pFence && !bindInfoCount) {
11052         // No work to do, just dropping a fence in the queue by itself.
11053         pQueue->submissions.emplace_back(std::vector<VkCommandBuffer>(),
11054                                          std::vector<SEMAPHORE_WAIT>(),
11055                                          std::vector<VkSemaphore>(),
11056                                          fence);
11057     }
11058
11059     print_mem_list(dev_data);
11060     lock.unlock();
11061
11062     if (!skip_call)
11063         return dev_data->dispatch_table.QueueBindSparse(queue, bindInfoCount, pBindInfo, fence);
11064
11065     return result;
11066 }
11067
11068 VKAPI_ATTR VkResult VKAPI_CALL CreateSemaphore(VkDevice device, const VkSemaphoreCreateInfo *pCreateInfo,
11069                                                const VkAllocationCallbacks *pAllocator, VkSemaphore *pSemaphore) {
11070     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
11071     VkResult result = dev_data->dispatch_table.CreateSemaphore(device, pCreateInfo, pAllocator, pSemaphore);
11072     if (result == VK_SUCCESS) {
11073         std::lock_guard<std::mutex> lock(global_lock);
11074         SEMAPHORE_NODE* sNode = &dev_data->semaphoreMap[*pSemaphore];
11075         sNode->signaler.first = VK_NULL_HANDLE;
11076         sNode->signaler.second = 0;
11077         sNode->signaled = false;
11078     }
11079     return result;
11080 }
11081
11082 VKAPI_ATTR VkResult VKAPI_CALL
11083 CreateEvent(VkDevice device, const VkEventCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkEvent *pEvent) {
11084     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
11085     VkResult result = dev_data->dispatch_table.CreateEvent(device, pCreateInfo, pAllocator, pEvent);
11086     if (result == VK_SUCCESS) {
11087         std::lock_guard<std::mutex> lock(global_lock);
11088         dev_data->eventMap[*pEvent].needsSignaled = false;
11089         dev_data->eventMap[*pEvent].write_in_use = 0;
11090         dev_data->eventMap[*pEvent].stageMask = VkPipelineStageFlags(0);
11091     }
11092     return result;
11093 }
11094
11095 VKAPI_ATTR VkResult VKAPI_CALL CreateSwapchainKHR(VkDevice device, const VkSwapchainCreateInfoKHR *pCreateInfo,
11096                                                   const VkAllocationCallbacks *pAllocator,
11097                                                   VkSwapchainKHR *pSwapchain) {
11098     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
11099     VkResult result = dev_data->dispatch_table.CreateSwapchainKHR(device, pCreateInfo, pAllocator, pSwapchain);
11100
11101     if (VK_SUCCESS == result) {
11102         std::lock_guard<std::mutex> lock(global_lock);
11103         dev_data->device_extensions.swapchainMap[*pSwapchain] = unique_ptr<SWAPCHAIN_NODE>(new SWAPCHAIN_NODE(pCreateInfo));
11104     }
11105
11106     return result;
11107 }
11108
11109 VKAPI_ATTR void VKAPI_CALL
11110 DestroySwapchainKHR(VkDevice device, VkSwapchainKHR swapchain, const VkAllocationCallbacks *pAllocator) {
11111     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
11112     bool skip_call = false;
11113
11114     std::unique_lock<std::mutex> lock(global_lock);
11115     auto swapchain_data = getSwapchainNode(dev_data, swapchain);
11116     if (swapchain_data) {
11117         if (swapchain_data->images.size() > 0) {
11118             for (auto swapchain_image : swapchain_data->images) {
11119                 auto image_sub = dev_data->imageSubresourceMap.find(swapchain_image);
11120                 if (image_sub != dev_data->imageSubresourceMap.end()) {
11121                     for (auto imgsubpair : image_sub->second) {
11122                         auto image_item = dev_data->imageLayoutMap.find(imgsubpair);
11123                         if (image_item != dev_data->imageLayoutMap.end()) {
11124                             dev_data->imageLayoutMap.erase(image_item);
11125                         }
11126                     }
11127                     dev_data->imageSubresourceMap.erase(image_sub);
11128                 }
11129                 skip_call =
11130                     clear_object_binding(dev_data, (uint64_t)swapchain_image, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT);
11131                 dev_data->imageMap.erase(swapchain_image);
11132             }
11133         }
11134         dev_data->device_extensions.swapchainMap.erase(swapchain);
11135     }
11136     lock.unlock();
11137     if (!skip_call)
11138         dev_data->dispatch_table.DestroySwapchainKHR(device, swapchain, pAllocator);
11139 }
11140
11141 VKAPI_ATTR VkResult VKAPI_CALL
11142 GetSwapchainImagesKHR(VkDevice device, VkSwapchainKHR swapchain, uint32_t *pCount, VkImage *pSwapchainImages) {
11143     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
11144     VkResult result = dev_data->dispatch_table.GetSwapchainImagesKHR(device, swapchain, pCount, pSwapchainImages);
11145
11146     if (result == VK_SUCCESS && pSwapchainImages != NULL) {
11147         // This should never happen and is checked by param checker.
11148         if (!pCount)
11149             return result;
11150         std::lock_guard<std::mutex> lock(global_lock);
11151         const size_t count = *pCount;
11152         auto swapchain_node = getSwapchainNode(dev_data, swapchain);
11153         if (swapchain_node && !swapchain_node->images.empty()) {
11154             // TODO : Not sure I like the memcmp here, but it works
11155             const bool mismatch = (swapchain_node->images.size() != count ||
11156                                    memcmp(&swapchain_node->images[0], pSwapchainImages, sizeof(swapchain_node->images[0]) * count));
11157             if (mismatch) {
11158                 // TODO: Verify against Valid Usage section of extension
11159                 log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
11160                         (uint64_t)swapchain, __LINE__, MEMTRACK_NONE, "SWAP_CHAIN",
11161                         "vkGetSwapchainInfoKHR(0x%" PRIx64
11162                         ", VK_SWAP_CHAIN_INFO_TYPE_PERSISTENT_IMAGES_KHR) returned mismatching data",
11163                         (uint64_t)(swapchain));
11164             }
11165         }
11166         for (uint32_t i = 0; i < *pCount; ++i) {
11167             IMAGE_LAYOUT_NODE image_layout_node;
11168             image_layout_node.layout = VK_IMAGE_LAYOUT_UNDEFINED;
11169             image_layout_node.format = swapchain_node->createInfo.imageFormat;
11170             // Add imageMap entries for each swapchain image
11171             VkImageCreateInfo image_ci = {};
11172             image_ci.mipLevels = 1;
11173             image_ci.arrayLayers = swapchain_node->createInfo.imageArrayLayers;
11174             image_ci.usage = swapchain_node->createInfo.imageUsage;
11175             image_ci.format = swapchain_node->createInfo.imageFormat;
11176             image_ci.samples = VK_SAMPLE_COUNT_1_BIT;
11177             image_ci.extent.width = swapchain_node->createInfo.imageExtent.width;
11178             image_ci.extent.height = swapchain_node->createInfo.imageExtent.height;
11179             image_ci.sharingMode = swapchain_node->createInfo.imageSharingMode;
11180             dev_data->imageMap[pSwapchainImages[i]] = unique_ptr<IMAGE_NODE>(new IMAGE_NODE(pSwapchainImages[i], &image_ci));
11181             auto &image_node = dev_data->imageMap[pSwapchainImages[i]];
11182             image_node->valid = false;
11183             image_node->mem = MEMTRACKER_SWAP_CHAIN_IMAGE_KEY;
11184             swapchain_node->images.push_back(pSwapchainImages[i]);
11185             ImageSubresourcePair subpair = {pSwapchainImages[i], false, VkImageSubresource()};
11186             dev_data->imageSubresourceMap[pSwapchainImages[i]].push_back(subpair);
11187             dev_data->imageLayoutMap[subpair] = image_layout_node;
11188             dev_data->device_extensions.imageToSwapchainMap[pSwapchainImages[i]] = swapchain;
11189         }
11190     }
11191     return result;
11192 }
11193
11194 VKAPI_ATTR VkResult VKAPI_CALL QueuePresentKHR(VkQueue queue, const VkPresentInfoKHR *pPresentInfo) {
11195     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
11196     bool skip_call = false;
11197
11198     std::lock_guard<std::mutex> lock(global_lock);
11199     for (uint32_t i = 0; i < pPresentInfo->waitSemaphoreCount; ++i) {
11200         auto pSemaphore = getSemaphoreNode(dev_data, pPresentInfo->pWaitSemaphores[i]);
11201         if (pSemaphore && !pSemaphore->signaled) {
11202             skip_call |=
11203                     log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
11204                             VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
11205                             "Queue 0x%" PRIx64 " is waiting on semaphore 0x%" PRIx64 " that has no way to be signaled.",
11206                             reinterpret_cast<uint64_t &>(queue), reinterpret_cast<const uint64_t &>(pPresentInfo->pWaitSemaphores[i]));
11207         }
11208     }
11209
11210     for (uint32_t i = 0; i < pPresentInfo->swapchainCount; ++i) {
11211         auto swapchain_data = getSwapchainNode(dev_data, pPresentInfo->pSwapchains[i]);
11212         if (swapchain_data) {
11213             if (pPresentInfo->pImageIndices[i] >= swapchain_data->images.size()) {
11214                 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
11215                                      reinterpret_cast<uint64_t const &>(pPresentInfo->pSwapchains[i]), __LINE__, DRAWSTATE_SWAPCHAIN_INVALID_IMAGE,
11216                                      "DS", "vkQueuePresentKHR: Swapchain image index too large (%u). There are only %u images in this swapchain.",
11217                                      pPresentInfo->pImageIndices[i], (uint32_t)swapchain_data->images.size());
11218             }
11219             else {
11220                 auto image = swapchain_data->images[pPresentInfo->pImageIndices[i]];
11221                 auto image_node = getImageNode(dev_data, image);
11222                 skip_call |= ValidateImageMemoryIsValid(dev_data, image_node, "vkQueuePresentKHR()");
11223
11224                 if (!image_node->acquired) {
11225                     skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
11226                                          reinterpret_cast<uint64_t const &>(pPresentInfo->pSwapchains[i]), __LINE__, DRAWSTATE_SWAPCHAIN_IMAGE_NOT_ACQUIRED,
11227                                          "DS", "vkQueuePresentKHR: Swapchain image index %u has not been acquired.",
11228                                          pPresentInfo->pImageIndices[i]);
11229                 }
11230
11231                 vector<VkImageLayout> layouts;
11232                 if (FindLayouts(dev_data, image, layouts)) {
11233                     for (auto layout : layouts) {
11234                         if (layout != VK_IMAGE_LAYOUT_PRESENT_SRC_KHR) {
11235                             skip_call |=
11236                                     log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT,
11237                                             reinterpret_cast<uint64_t &>(queue), __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
11238                                             "Images passed to present must be in layout "
11239                                             "VK_IMAGE_LAYOUT_PRESENT_SRC_KHR but is in %s",
11240                                             string_VkImageLayout(layout));
11241                         }
11242                     }
11243                 }
11244             }
11245         }
11246     }
11247
11248     if (skip_call) {
11249         return VK_ERROR_VALIDATION_FAILED_EXT;
11250     }
11251
11252     VkResult result = dev_data->dispatch_table.QueuePresentKHR(queue, pPresentInfo);
11253
11254     if (result != VK_ERROR_VALIDATION_FAILED_EXT) {
11255         // Semaphore waits occur before error generation, if the call reached
11256         // the ICD. (Confirm?)
11257         for (uint32_t i = 0; i < pPresentInfo->waitSemaphoreCount; ++i) {
11258             auto pSemaphore = getSemaphoreNode(dev_data, pPresentInfo->pWaitSemaphores[i]);
11259             if (pSemaphore) {
11260                 pSemaphore->signaler.first = VK_NULL_HANDLE;
11261                 pSemaphore->signaled = false;
11262             }
11263         }
11264
11265         for (uint32_t i = 0; i < pPresentInfo->swapchainCount; ++i) {
11266             // Note: this is imperfect, in that we can get confused about what
11267             // did or didn't succeed-- but if the app does that, it's confused
11268             // itself just as much.
11269             auto local_result = pPresentInfo->pResults ? pPresentInfo->pResults[i] : result;
11270
11271             if (local_result != VK_SUCCESS && local_result != VK_SUBOPTIMAL_KHR)
11272                 continue; // this present didn't actually happen.
11273
11274             // Mark the image as having been released to the WSI
11275             auto swapchain_data = getSwapchainNode(dev_data, pPresentInfo->pSwapchains[i]);
11276             auto image = swapchain_data->images[pPresentInfo->pImageIndices[i]];
11277             auto image_node = getImageNode(dev_data, image);
11278             image_node->acquired = false;
11279         }
11280
11281         // Note: even though presentation is directed to a queue, there is no
11282         // direct ordering between QP and subsequent work, so QP (and its
11283         // semaphore waits) /never/ participate in any completion proof.
11284     }
11285
11286     return result;
11287 }
11288
11289 VKAPI_ATTR VkResult VKAPI_CALL CreateSharedSwapchainsKHR(VkDevice device, uint32_t swapchainCount,
11290                                                          const VkSwapchainCreateInfoKHR *pCreateInfos,
11291                                                          const VkAllocationCallbacks *pAllocator, VkSwapchainKHR *pSwapchains) {
11292     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
11293     std::unique_lock<std::mutex> lock(global_lock);
11294     VkResult result =
11295         dev_data->dispatch_table.CreateSharedSwapchainsKHR(device, swapchainCount, pCreateInfos, pAllocator, pSwapchains);
11296     return result;
11297 }
11298
11299 VKAPI_ATTR VkResult VKAPI_CALL AcquireNextImageKHR(VkDevice device, VkSwapchainKHR swapchain, uint64_t timeout,
11300                                                    VkSemaphore semaphore, VkFence fence, uint32_t *pImageIndex) {
11301     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
11302     bool skip_call = false;
11303
11304     std::unique_lock<std::mutex> lock(global_lock);
11305
11306     if (fence == VK_NULL_HANDLE && semaphore == VK_NULL_HANDLE) {
11307         skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
11308                              reinterpret_cast<uint64_t &>(device), __LINE__, DRAWSTATE_SWAPCHAIN_NO_SYNC_FOR_ACQUIRE, "DS",
11309                              "vkAcquireNextImageKHR: Semaphore and fence cannot both be VK_NULL_HANDLE. There would be no way "
11310                              "to determine the completion of this operation.");
11311     }
11312
11313     auto pSemaphore = getSemaphoreNode(dev_data, semaphore);
11314     if (pSemaphore && pSemaphore->signaled) {
11315         skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
11316                              reinterpret_cast<const uint64_t &>(semaphore), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
11317                              "vkAcquireNextImageKHR: Semaphore must not be currently signaled or in a wait state");
11318     }
11319
11320     auto pFence = getFenceNode(dev_data, fence);
11321     if (pFence) {
11322         skip_call |= ValidateFenceForSubmit(dev_data, pFence);
11323     }
11324     lock.unlock();
11325
11326     if (skip_call)
11327         return VK_ERROR_VALIDATION_FAILED_EXT;
11328
11329     VkResult result = dev_data->dispatch_table.AcquireNextImageKHR(device, swapchain, timeout, semaphore, fence, pImageIndex);
11330
11331     lock.lock();
11332     if (result == VK_SUCCESS || result == VK_SUBOPTIMAL_KHR) {
11333         if (pFence) {
11334             pFence->state = FENCE_INFLIGHT;
11335             pFence->signaler.first = VK_NULL_HANDLE;   // ANI isn't on a queue, so this can't participate in a completion proof.
11336         }
11337
11338         // A successful call to AcquireNextImageKHR counts as a signal operation on semaphore
11339         if (pSemaphore) {
11340             pSemaphore->signaled = true;
11341             pSemaphore->signaler.first = VK_NULL_HANDLE;
11342         }
11343
11344         // Mark the image as acquired.
11345         auto swapchain_data = getSwapchainNode(dev_data, swapchain);
11346         auto image = swapchain_data->images[*pImageIndex];
11347         auto image_node = getImageNode(dev_data, image);
11348         image_node->acquired = true;
11349     }
11350     lock.unlock();
11351
11352     return result;
11353 }
11354
11355 VKAPI_ATTR VkResult VKAPI_CALL EnumeratePhysicalDevices(VkInstance instance, uint32_t *pPhysicalDeviceCount,
11356                                                         VkPhysicalDevice *pPhysicalDevices) {
11357     bool skip_call = false;
11358     instance_layer_data *instance_data = get_my_data_ptr(get_dispatch_key(instance), instance_layer_data_map);
11359     if (instance_data->instance_state) {
11360         // For this instance, flag when vkEnumeratePhysicalDevices goes to QUERY_COUNT and then QUERY_DETAILS
11361         if (NULL == pPhysicalDevices) {
11362             instance_data->instance_state->vkEnumeratePhysicalDevicesState = QUERY_COUNT;
11363         } else {
11364             if (UNCALLED == instance_data->instance_state->vkEnumeratePhysicalDevicesState) {
11365                 // Flag warning here. You can call this without having queried the count, but it may not be
11366                 // robust on platforms with multiple physical devices.
11367                 skip_call |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT,
11368                                     0, __LINE__, DEVLIMITS_MISSING_QUERY_COUNT, "DL",
11369                                     "Call sequence has vkEnumeratePhysicalDevices() w/ non-NULL pPhysicalDevices. You should first "
11370                                     "call vkEnumeratePhysicalDevices() w/ NULL pPhysicalDevices to query pPhysicalDeviceCount.");
11371             } // TODO : Could also flag a warning if re-calling this function in QUERY_DETAILS state
11372             else if (instance_data->instance_state->physical_devices_count != *pPhysicalDeviceCount) {
11373                 // Having actual count match count from app is not a requirement, so this can be a warning
11374                 skip_call |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
11375                                     VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, DEVLIMITS_COUNT_MISMATCH, "DL",
11376                                     "Call to vkEnumeratePhysicalDevices() w/ pPhysicalDeviceCount value %u, but actual count "
11377                                     "supported by this instance is %u.",
11378                                     *pPhysicalDeviceCount, instance_data->instance_state->physical_devices_count);
11379             }
11380             instance_data->instance_state->vkEnumeratePhysicalDevicesState = QUERY_DETAILS;
11381         }
11382         if (skip_call) {
11383             return VK_ERROR_VALIDATION_FAILED_EXT;
11384         }
11385         VkResult result = instance_data->dispatch_table.EnumeratePhysicalDevices(instance, pPhysicalDeviceCount, pPhysicalDevices);
11386         if (NULL == pPhysicalDevices) {
11387             instance_data->instance_state->physical_devices_count = *pPhysicalDeviceCount;
11388         } else if (result == VK_SUCCESS){ // Save physical devices
11389             for (uint32_t i = 0; i < *pPhysicalDeviceCount; i++) {
11390                 auto & phys_device_state = instance_data->physical_device_map[pPhysicalDevices[i]];
11391                 phys_device_state.phys_device = pPhysicalDevices[i];
11392                 // Init actual features for each physical device
11393                 instance_data->dispatch_table.GetPhysicalDeviceFeatures(pPhysicalDevices[i], &phys_device_state.features);
11394             }
11395         }
11396         return result;
11397     } else {
11398         log_msg(instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT, 0, __LINE__,
11399                 DEVLIMITS_INVALID_INSTANCE, "DL", "Invalid instance (0x%" PRIxLEAST64 ") passed into vkEnumeratePhysicalDevices().",
11400                 (uint64_t)instance);
11401     }
11402     return VK_ERROR_VALIDATION_FAILED_EXT;
11403 }
11404
11405 VKAPI_ATTR void VKAPI_CALL
11406 GetPhysicalDeviceQueueFamilyProperties(VkPhysicalDevice physicalDevice, uint32_t *pCount,
11407     VkQueueFamilyProperties *pQueueFamilyProperties) {
11408     bool skip_call = false;
11409     instance_layer_data *instance_data = get_my_data_ptr(get_dispatch_key(physicalDevice), instance_layer_data_map);
11410     auto physical_device_state = getPhysicalDeviceState(instance_data, physicalDevice);
11411     if (physical_device_state) {
11412         if (!pQueueFamilyProperties) {
11413             physical_device_state->vkGetPhysicalDeviceQueueFamilyPropertiesState = QUERY_COUNT;
11414         }
11415         else {
11416             // Verify that for each physical device, this function is called first with NULL pQueueFamilyProperties ptr in order to
11417             // get count
11418             if (UNCALLED == physical_device_state->vkGetPhysicalDeviceQueueFamilyPropertiesState) {
11419                 skip_call |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
11420                     VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, DEVLIMITS_MISSING_QUERY_COUNT, "DL",
11421                     "Call sequence has vkGetPhysicalDeviceQueueFamilyProperties() w/ non-NULL "
11422                     "pQueueFamilyProperties. You should first call vkGetPhysicalDeviceQueueFamilyProperties() w/ "
11423                     "NULL pQueueFamilyProperties to query pCount.");
11424             }
11425             // Then verify that pCount that is passed in on second call matches what was returned
11426             if (physical_device_state->queueFamilyPropertiesCount != *pCount) {
11427
11428                 // TODO: this is not a requirement of the Valid Usage section for vkGetPhysicalDeviceQueueFamilyProperties, so
11429                 // provide as warning
11430                 skip_call |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
11431                     VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, DEVLIMITS_COUNT_MISMATCH, "DL",
11432                     "Call to vkGetPhysicalDeviceQueueFamilyProperties() w/ pCount value %u, but actual count "
11433                     "supported by this physicalDevice is %u.",
11434                     *pCount, physical_device_state->queueFamilyPropertiesCount);
11435             }
11436             physical_device_state->vkGetPhysicalDeviceQueueFamilyPropertiesState = QUERY_DETAILS;
11437         }
11438         if (skip_call) {
11439             return;
11440         }
11441         instance_data->dispatch_table.GetPhysicalDeviceQueueFamilyProperties(physicalDevice, pCount, pQueueFamilyProperties);
11442         if (!pQueueFamilyProperties) {
11443             physical_device_state->queueFamilyPropertiesCount = *pCount;
11444         }
11445         else { // Save queue family properties
11446             if (physical_device_state->queue_family_properties.size() < *pCount)
11447                 physical_device_state->queue_family_properties.resize(*pCount);
11448             for (uint32_t i = 0; i < *pCount; i++) {
11449                 physical_device_state->queue_family_properties[i] = pQueueFamilyProperties[i];
11450             }
11451         }
11452     }
11453     else {
11454         log_msg(instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0,
11455             __LINE__, DEVLIMITS_INVALID_PHYSICAL_DEVICE, "DL",
11456             "Invalid physicalDevice (0x%" PRIxLEAST64 ") passed into vkGetPhysicalDeviceQueueFamilyProperties().",
11457             (uint64_t)physicalDevice);
11458     }
11459 }
11460
11461 VKAPI_ATTR VkResult VKAPI_CALL
11462 CreateDebugReportCallbackEXT(VkInstance instance, const VkDebugReportCallbackCreateInfoEXT *pCreateInfo,
11463                              const VkAllocationCallbacks *pAllocator, VkDebugReportCallbackEXT *pMsgCallback) {
11464     instance_layer_data *instance_data = get_my_data_ptr(get_dispatch_key(instance), instance_layer_data_map);
11465     VkResult res = instance_data->dispatch_table.CreateDebugReportCallbackEXT(instance, pCreateInfo, pAllocator, pMsgCallback);
11466     if (VK_SUCCESS == res) {
11467         std::lock_guard<std::mutex> lock(global_lock);
11468         res = layer_create_msg_callback(instance_data->report_data, false, pCreateInfo, pAllocator, pMsgCallback);
11469     }
11470     return res;
11471 }
11472
11473 VKAPI_ATTR void VKAPI_CALL DestroyDebugReportCallbackEXT(VkInstance instance,
11474                                                          VkDebugReportCallbackEXT msgCallback,
11475                                                          const VkAllocationCallbacks *pAllocator) {
11476     instance_layer_data *instance_data = get_my_data_ptr(get_dispatch_key(instance), instance_layer_data_map);
11477     instance_data->dispatch_table.DestroyDebugReportCallbackEXT(instance, msgCallback, pAllocator);
11478     std::lock_guard<std::mutex> lock(global_lock);
11479     layer_destroy_msg_callback(instance_data->report_data, msgCallback, pAllocator);
11480 }
11481
11482 VKAPI_ATTR void VKAPI_CALL
11483 DebugReportMessageEXT(VkInstance instance, VkDebugReportFlagsEXT flags, VkDebugReportObjectTypeEXT objType, uint64_t object,
11484                       size_t location, int32_t msgCode, const char *pLayerPrefix, const char *pMsg) {
11485     instance_layer_data *instance_data = get_my_data_ptr(get_dispatch_key(instance), instance_layer_data_map);
11486     instance_data->dispatch_table.DebugReportMessageEXT(instance, flags, objType, object, location, msgCode, pLayerPrefix, pMsg);
11487 }
11488
11489 VKAPI_ATTR VkResult VKAPI_CALL
11490 EnumerateInstanceLayerProperties(uint32_t *pCount, VkLayerProperties *pProperties) {
11491     return util_GetLayerProperties(1, &global_layer, pCount, pProperties);
11492 }
11493
11494 VKAPI_ATTR VkResult VKAPI_CALL
11495 EnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice, uint32_t *pCount, VkLayerProperties *pProperties) {
11496     return util_GetLayerProperties(1, &global_layer, pCount, pProperties);
11497 }
11498
11499 VKAPI_ATTR VkResult VKAPI_CALL
11500 EnumerateInstanceExtensionProperties(const char *pLayerName, uint32_t *pCount, VkExtensionProperties *pProperties) {
11501     if (pLayerName && !strcmp(pLayerName, global_layer.layerName))
11502         return util_GetExtensionProperties(1, instance_extensions, pCount, pProperties);
11503
11504     return VK_ERROR_LAYER_NOT_PRESENT;
11505 }
11506
11507 VKAPI_ATTR VkResult VKAPI_CALL EnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice,
11508                                                                   const char *pLayerName, uint32_t *pCount,
11509                                                                   VkExtensionProperties *pProperties) {
11510     if (pLayerName && !strcmp(pLayerName, global_layer.layerName))
11511         return util_GetExtensionProperties(0, NULL, pCount, pProperties);
11512
11513     assert(physicalDevice);
11514
11515     instance_layer_data *instance_data = get_my_data_ptr(get_dispatch_key(physicalDevice), instance_layer_data_map);
11516     return instance_data->dispatch_table.EnumerateDeviceExtensionProperties(physicalDevice, NULL, pCount, pProperties);
11517 }
11518
11519 static PFN_vkVoidFunction
11520 intercept_core_instance_command(const char *name);
11521
11522 static PFN_vkVoidFunction
11523 intercept_core_device_command(const char *name);
11524
11525 static PFN_vkVoidFunction
11526 intercept_khr_swapchain_command(const char *name, VkDevice dev);
11527
11528 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetDeviceProcAddr(VkDevice dev, const char *funcName) {
11529     PFN_vkVoidFunction proc = intercept_core_device_command(funcName);
11530     if (proc)
11531         return proc;
11532
11533     assert(dev);
11534
11535     proc = intercept_khr_swapchain_command(funcName, dev);
11536     if (proc)
11537         return proc;
11538
11539     layer_data *dev_data = get_my_data_ptr(get_dispatch_key(dev), layer_data_map);
11540
11541     auto &table = dev_data->dispatch_table;
11542     if (!table.GetDeviceProcAddr)
11543         return nullptr;
11544     return table.GetDeviceProcAddr(dev, funcName);
11545 }
11546
11547 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetInstanceProcAddr(VkInstance instance, const char *funcName) {
11548     PFN_vkVoidFunction proc = intercept_core_instance_command(funcName);
11549     if (!proc)
11550         proc = intercept_core_device_command(funcName);
11551     if (!proc)
11552         proc = intercept_khr_swapchain_command(funcName, VK_NULL_HANDLE);
11553     if (proc)
11554         return proc;
11555
11556     assert(instance);
11557
11558     instance_layer_data *instance_data = get_my_data_ptr(get_dispatch_key(instance), instance_layer_data_map);
11559     proc = debug_report_get_instance_proc_addr(instance_data->report_data, funcName);
11560     if (proc)
11561         return proc;
11562
11563     auto &table = instance_data->dispatch_table;
11564     if (!table.GetInstanceProcAddr)
11565         return nullptr;
11566     return table.GetInstanceProcAddr(instance, funcName);
11567 }
11568
11569 static PFN_vkVoidFunction
11570 intercept_core_instance_command(const char *name) {
11571     static const struct {
11572         const char *name;
11573         PFN_vkVoidFunction proc;
11574     } core_instance_commands[] = {
11575         { "vkGetInstanceProcAddr", reinterpret_cast<PFN_vkVoidFunction>(GetInstanceProcAddr) },
11576         { "vkGetDeviceProcAddr", reinterpret_cast<PFN_vkVoidFunction>(GetDeviceProcAddr) },
11577         { "vkCreateInstance", reinterpret_cast<PFN_vkVoidFunction>(CreateInstance) },
11578         { "vkCreateDevice", reinterpret_cast<PFN_vkVoidFunction>(CreateDevice) },
11579         { "vkEnumeratePhysicalDevices", reinterpret_cast<PFN_vkVoidFunction>(EnumeratePhysicalDevices) },
11580         { "vkGetPhysicalDeviceQueueFamilyProperties", reinterpret_cast<PFN_vkVoidFunction>(GetPhysicalDeviceQueueFamilyProperties) },
11581         { "vkDestroyInstance", reinterpret_cast<PFN_vkVoidFunction>(DestroyInstance) },
11582         { "vkEnumerateInstanceLayerProperties", reinterpret_cast<PFN_vkVoidFunction>(EnumerateInstanceLayerProperties) },
11583         { "vkEnumerateDeviceLayerProperties", reinterpret_cast<PFN_vkVoidFunction>(EnumerateDeviceLayerProperties) },
11584         { "vkEnumerateInstanceExtensionProperties", reinterpret_cast<PFN_vkVoidFunction>(EnumerateInstanceExtensionProperties) },
11585         { "vkEnumerateDeviceExtensionProperties", reinterpret_cast<PFN_vkVoidFunction>(EnumerateDeviceExtensionProperties) },
11586     };
11587
11588     for (size_t i = 0; i < ARRAY_SIZE(core_instance_commands); i++) {
11589         if (!strcmp(core_instance_commands[i].name, name))
11590             return core_instance_commands[i].proc;
11591     }
11592
11593     return nullptr;
11594 }
11595
11596 static PFN_vkVoidFunction
11597 intercept_core_device_command(const char *name) {
11598     static const struct {
11599         const char *name;
11600         PFN_vkVoidFunction proc;
11601     } core_device_commands[] = {
11602         {"vkGetDeviceProcAddr", reinterpret_cast<PFN_vkVoidFunction>(GetDeviceProcAddr)},
11603         {"vkQueueSubmit", reinterpret_cast<PFN_vkVoidFunction>(QueueSubmit)},
11604         {"vkWaitForFences", reinterpret_cast<PFN_vkVoidFunction>(WaitForFences)},
11605         {"vkGetFenceStatus", reinterpret_cast<PFN_vkVoidFunction>(GetFenceStatus)},
11606         {"vkQueueWaitIdle", reinterpret_cast<PFN_vkVoidFunction>(QueueWaitIdle)},
11607         {"vkDeviceWaitIdle", reinterpret_cast<PFN_vkVoidFunction>(DeviceWaitIdle)},
11608         {"vkGetDeviceQueue", reinterpret_cast<PFN_vkVoidFunction>(GetDeviceQueue)},
11609         {"vkDestroyInstance", reinterpret_cast<PFN_vkVoidFunction>(DestroyInstance)},
11610         {"vkDestroyDevice", reinterpret_cast<PFN_vkVoidFunction>(DestroyDevice)},
11611         {"vkDestroyFence", reinterpret_cast<PFN_vkVoidFunction>(DestroyFence)},
11612         {"vkResetFences", reinterpret_cast<PFN_vkVoidFunction>(ResetFences)},
11613         {"vkDestroySemaphore", reinterpret_cast<PFN_vkVoidFunction>(DestroySemaphore)},
11614         {"vkDestroyEvent", reinterpret_cast<PFN_vkVoidFunction>(DestroyEvent)},
11615         {"vkDestroyQueryPool", reinterpret_cast<PFN_vkVoidFunction>(DestroyQueryPool)},
11616         {"vkDestroyBuffer", reinterpret_cast<PFN_vkVoidFunction>(DestroyBuffer)},
11617         {"vkDestroyBufferView", reinterpret_cast<PFN_vkVoidFunction>(DestroyBufferView)},
11618         {"vkDestroyImage", reinterpret_cast<PFN_vkVoidFunction>(DestroyImage)},
11619         {"vkDestroyImageView", reinterpret_cast<PFN_vkVoidFunction>(DestroyImageView)},
11620         {"vkDestroyShaderModule", reinterpret_cast<PFN_vkVoidFunction>(DestroyShaderModule)},
11621         {"vkDestroyPipeline", reinterpret_cast<PFN_vkVoidFunction>(DestroyPipeline)},
11622         {"vkDestroyPipelineLayout", reinterpret_cast<PFN_vkVoidFunction>(DestroyPipelineLayout)},
11623         {"vkDestroySampler", reinterpret_cast<PFN_vkVoidFunction>(DestroySampler)},
11624         {"vkDestroyDescriptorSetLayout", reinterpret_cast<PFN_vkVoidFunction>(DestroyDescriptorSetLayout)},
11625         {"vkDestroyDescriptorPool", reinterpret_cast<PFN_vkVoidFunction>(DestroyDescriptorPool)},
11626         {"vkDestroyFramebuffer", reinterpret_cast<PFN_vkVoidFunction>(DestroyFramebuffer)},
11627         {"vkDestroyRenderPass", reinterpret_cast<PFN_vkVoidFunction>(DestroyRenderPass)},
11628         {"vkCreateBuffer", reinterpret_cast<PFN_vkVoidFunction>(CreateBuffer)},
11629         {"vkCreateBufferView", reinterpret_cast<PFN_vkVoidFunction>(CreateBufferView)},
11630         {"vkCreateImage", reinterpret_cast<PFN_vkVoidFunction>(CreateImage)},
11631         {"vkCreateImageView", reinterpret_cast<PFN_vkVoidFunction>(CreateImageView)},
11632         {"vkCreateFence", reinterpret_cast<PFN_vkVoidFunction>(CreateFence)},
11633         {"vkCreatePipelineCache", reinterpret_cast<PFN_vkVoidFunction>(CreatePipelineCache)},
11634         {"vkDestroyPipelineCache", reinterpret_cast<PFN_vkVoidFunction>(DestroyPipelineCache)},
11635         {"vkGetPipelineCacheData", reinterpret_cast<PFN_vkVoidFunction>(GetPipelineCacheData)},
11636         {"vkMergePipelineCaches", reinterpret_cast<PFN_vkVoidFunction>(MergePipelineCaches)},
11637         {"vkCreateGraphicsPipelines", reinterpret_cast<PFN_vkVoidFunction>(CreateGraphicsPipelines)},
11638         {"vkCreateComputePipelines", reinterpret_cast<PFN_vkVoidFunction>(CreateComputePipelines)},
11639         {"vkCreateSampler", reinterpret_cast<PFN_vkVoidFunction>(CreateSampler)},
11640         {"vkCreateDescriptorSetLayout", reinterpret_cast<PFN_vkVoidFunction>(CreateDescriptorSetLayout)},
11641         {"vkCreatePipelineLayout", reinterpret_cast<PFN_vkVoidFunction>(CreatePipelineLayout)},
11642         {"vkCreateDescriptorPool", reinterpret_cast<PFN_vkVoidFunction>(CreateDescriptorPool)},
11643         {"vkResetDescriptorPool", reinterpret_cast<PFN_vkVoidFunction>(ResetDescriptorPool)},
11644         {"vkAllocateDescriptorSets", reinterpret_cast<PFN_vkVoidFunction>(AllocateDescriptorSets)},
11645         {"vkFreeDescriptorSets", reinterpret_cast<PFN_vkVoidFunction>(FreeDescriptorSets)},
11646         {"vkUpdateDescriptorSets", reinterpret_cast<PFN_vkVoidFunction>(UpdateDescriptorSets)},
11647         {"vkCreateCommandPool", reinterpret_cast<PFN_vkVoidFunction>(CreateCommandPool)},
11648         {"vkDestroyCommandPool", reinterpret_cast<PFN_vkVoidFunction>(DestroyCommandPool)},
11649         {"vkResetCommandPool", reinterpret_cast<PFN_vkVoidFunction>(ResetCommandPool)},
11650         {"vkCreateQueryPool", reinterpret_cast<PFN_vkVoidFunction>(CreateQueryPool)},
11651         {"vkAllocateCommandBuffers", reinterpret_cast<PFN_vkVoidFunction>(AllocateCommandBuffers)},
11652         {"vkFreeCommandBuffers", reinterpret_cast<PFN_vkVoidFunction>(FreeCommandBuffers)},
11653         {"vkBeginCommandBuffer", reinterpret_cast<PFN_vkVoidFunction>(BeginCommandBuffer)},
11654         {"vkEndCommandBuffer", reinterpret_cast<PFN_vkVoidFunction>(EndCommandBuffer)},
11655         {"vkResetCommandBuffer", reinterpret_cast<PFN_vkVoidFunction>(ResetCommandBuffer)},
11656         {"vkCmdBindPipeline", reinterpret_cast<PFN_vkVoidFunction>(CmdBindPipeline)},
11657         {"vkCmdSetViewport", reinterpret_cast<PFN_vkVoidFunction>(CmdSetViewport)},
11658         {"vkCmdSetScissor", reinterpret_cast<PFN_vkVoidFunction>(CmdSetScissor)},
11659         {"vkCmdSetLineWidth", reinterpret_cast<PFN_vkVoidFunction>(CmdSetLineWidth)},
11660         {"vkCmdSetDepthBias", reinterpret_cast<PFN_vkVoidFunction>(CmdSetDepthBias)},
11661         {"vkCmdSetBlendConstants", reinterpret_cast<PFN_vkVoidFunction>(CmdSetBlendConstants)},
11662         {"vkCmdSetDepthBounds", reinterpret_cast<PFN_vkVoidFunction>(CmdSetDepthBounds)},
11663         {"vkCmdSetStencilCompareMask", reinterpret_cast<PFN_vkVoidFunction>(CmdSetStencilCompareMask)},
11664         {"vkCmdSetStencilWriteMask", reinterpret_cast<PFN_vkVoidFunction>(CmdSetStencilWriteMask)},
11665         {"vkCmdSetStencilReference", reinterpret_cast<PFN_vkVoidFunction>(CmdSetStencilReference)},
11666         {"vkCmdBindDescriptorSets", reinterpret_cast<PFN_vkVoidFunction>(CmdBindDescriptorSets)},
11667         {"vkCmdBindVertexBuffers", reinterpret_cast<PFN_vkVoidFunction>(CmdBindVertexBuffers)},
11668         {"vkCmdBindIndexBuffer", reinterpret_cast<PFN_vkVoidFunction>(CmdBindIndexBuffer)},
11669         {"vkCmdDraw", reinterpret_cast<PFN_vkVoidFunction>(CmdDraw)},
11670         {"vkCmdDrawIndexed", reinterpret_cast<PFN_vkVoidFunction>(CmdDrawIndexed)},
11671         {"vkCmdDrawIndirect", reinterpret_cast<PFN_vkVoidFunction>(CmdDrawIndirect)},
11672         {"vkCmdDrawIndexedIndirect", reinterpret_cast<PFN_vkVoidFunction>(CmdDrawIndexedIndirect)},
11673         {"vkCmdDispatch", reinterpret_cast<PFN_vkVoidFunction>(CmdDispatch)},
11674         {"vkCmdDispatchIndirect", reinterpret_cast<PFN_vkVoidFunction>(CmdDispatchIndirect)},
11675         {"vkCmdCopyBuffer", reinterpret_cast<PFN_vkVoidFunction>(CmdCopyBuffer)},
11676         {"vkCmdCopyImage", reinterpret_cast<PFN_vkVoidFunction>(CmdCopyImage)},
11677         {"vkCmdBlitImage", reinterpret_cast<PFN_vkVoidFunction>(CmdBlitImage)},
11678         {"vkCmdCopyBufferToImage", reinterpret_cast<PFN_vkVoidFunction>(CmdCopyBufferToImage)},
11679         {"vkCmdCopyImageToBuffer", reinterpret_cast<PFN_vkVoidFunction>(CmdCopyImageToBuffer)},
11680         {"vkCmdUpdateBuffer", reinterpret_cast<PFN_vkVoidFunction>(CmdUpdateBuffer)},
11681         {"vkCmdFillBuffer", reinterpret_cast<PFN_vkVoidFunction>(CmdFillBuffer)},
11682         {"vkCmdClearColorImage", reinterpret_cast<PFN_vkVoidFunction>(CmdClearColorImage)},
11683         {"vkCmdClearDepthStencilImage", reinterpret_cast<PFN_vkVoidFunction>(CmdClearDepthStencilImage)},
11684         {"vkCmdClearAttachments", reinterpret_cast<PFN_vkVoidFunction>(CmdClearAttachments)},
11685         {"vkCmdResolveImage", reinterpret_cast<PFN_vkVoidFunction>(CmdResolveImage)},
11686         {"vkCmdSetEvent", reinterpret_cast<PFN_vkVoidFunction>(CmdSetEvent)},
11687         {"vkCmdResetEvent", reinterpret_cast<PFN_vkVoidFunction>(CmdResetEvent)},
11688         {"vkCmdWaitEvents", reinterpret_cast<PFN_vkVoidFunction>(CmdWaitEvents)},
11689         {"vkCmdPipelineBarrier", reinterpret_cast<PFN_vkVoidFunction>(CmdPipelineBarrier)},
11690         {"vkCmdBeginQuery", reinterpret_cast<PFN_vkVoidFunction>(CmdBeginQuery)},
11691         {"vkCmdEndQuery", reinterpret_cast<PFN_vkVoidFunction>(CmdEndQuery)},
11692         {"vkCmdResetQueryPool", reinterpret_cast<PFN_vkVoidFunction>(CmdResetQueryPool)},
11693         {"vkCmdCopyQueryPoolResults", reinterpret_cast<PFN_vkVoidFunction>(CmdCopyQueryPoolResults)},
11694         {"vkCmdPushConstants", reinterpret_cast<PFN_vkVoidFunction>(CmdPushConstants)},
11695         {"vkCmdWriteTimestamp", reinterpret_cast<PFN_vkVoidFunction>(CmdWriteTimestamp)},
11696         {"vkCreateFramebuffer", reinterpret_cast<PFN_vkVoidFunction>(CreateFramebuffer)},
11697         {"vkCreateShaderModule", reinterpret_cast<PFN_vkVoidFunction>(CreateShaderModule)},
11698         {"vkCreateRenderPass", reinterpret_cast<PFN_vkVoidFunction>(CreateRenderPass)},
11699         {"vkCmdBeginRenderPass", reinterpret_cast<PFN_vkVoidFunction>(CmdBeginRenderPass)},
11700         {"vkCmdNextSubpass", reinterpret_cast<PFN_vkVoidFunction>(CmdNextSubpass)},
11701         {"vkCmdEndRenderPass", reinterpret_cast<PFN_vkVoidFunction>(CmdEndRenderPass)},
11702         {"vkCmdExecuteCommands", reinterpret_cast<PFN_vkVoidFunction>(CmdExecuteCommands)},
11703         {"vkSetEvent", reinterpret_cast<PFN_vkVoidFunction>(SetEvent)},
11704         {"vkMapMemory", reinterpret_cast<PFN_vkVoidFunction>(MapMemory)},
11705         {"vkUnmapMemory", reinterpret_cast<PFN_vkVoidFunction>(UnmapMemory)},
11706         {"vkFlushMappedMemoryRanges", reinterpret_cast<PFN_vkVoidFunction>(FlushMappedMemoryRanges)},
11707         {"vkInvalidateMappedMemoryRanges", reinterpret_cast<PFN_vkVoidFunction>(InvalidateMappedMemoryRanges)},
11708         {"vkAllocateMemory", reinterpret_cast<PFN_vkVoidFunction>(AllocateMemory)},
11709         {"vkFreeMemory", reinterpret_cast<PFN_vkVoidFunction>(FreeMemory)},
11710         {"vkBindBufferMemory", reinterpret_cast<PFN_vkVoidFunction>(BindBufferMemory)},
11711         {"vkGetBufferMemoryRequirements", reinterpret_cast<PFN_vkVoidFunction>(GetBufferMemoryRequirements)},
11712         {"vkGetImageMemoryRequirements", reinterpret_cast<PFN_vkVoidFunction>(GetImageMemoryRequirements)},
11713         {"vkGetQueryPoolResults", reinterpret_cast<PFN_vkVoidFunction>(GetQueryPoolResults)},
11714         {"vkBindImageMemory", reinterpret_cast<PFN_vkVoidFunction>(BindImageMemory)},
11715         {"vkQueueBindSparse", reinterpret_cast<PFN_vkVoidFunction>(QueueBindSparse)},
11716         {"vkCreateSemaphore", reinterpret_cast<PFN_vkVoidFunction>(CreateSemaphore)},
11717         {"vkCreateEvent", reinterpret_cast<PFN_vkVoidFunction>(CreateEvent)},
11718     };
11719
11720     for (size_t i = 0; i < ARRAY_SIZE(core_device_commands); i++) {
11721         if (!strcmp(core_device_commands[i].name, name))
11722             return core_device_commands[i].proc;
11723     }
11724
11725     return nullptr;
11726 }
11727
11728 static PFN_vkVoidFunction
11729 intercept_khr_swapchain_command(const char *name, VkDevice dev) {
11730     static const struct {
11731         const char *name;
11732         PFN_vkVoidFunction proc;
11733     } khr_swapchain_commands[] = {
11734         { "vkCreateSwapchainKHR", reinterpret_cast<PFN_vkVoidFunction>(CreateSwapchainKHR) },
11735         { "vkDestroySwapchainKHR", reinterpret_cast<PFN_vkVoidFunction>(DestroySwapchainKHR) },
11736         { "vkGetSwapchainImagesKHR", reinterpret_cast<PFN_vkVoidFunction>(GetSwapchainImagesKHR) },
11737         { "vkAcquireNextImageKHR", reinterpret_cast<PFN_vkVoidFunction>(AcquireNextImageKHR) },
11738         { "vkQueuePresentKHR", reinterpret_cast<PFN_vkVoidFunction>(QueuePresentKHR) },
11739     };
11740     layer_data *dev_data = nullptr;
11741
11742     if (dev) {
11743         dev_data = get_my_data_ptr(get_dispatch_key(dev), layer_data_map);
11744         if (!dev_data->device_extensions.wsi_enabled)
11745             return nullptr;
11746     }
11747
11748     for (size_t i = 0; i < ARRAY_SIZE(khr_swapchain_commands); i++) {
11749         if (!strcmp(khr_swapchain_commands[i].name, name))
11750             return khr_swapchain_commands[i].proc;
11751     }
11752
11753     if (dev_data) {
11754         if (!dev_data->device_extensions.wsi_display_swapchain_enabled)
11755             return nullptr;
11756     }
11757
11758     if (!strcmp("vkCreateSharedSwapchainsKHR", name))
11759         return reinterpret_cast<PFN_vkVoidFunction>(CreateSharedSwapchainsKHR);
11760
11761     return nullptr;
11762 }
11763
11764 } // namespace core_validation
11765
11766 // vk_layer_logging.h expects these to be defined
11767
11768 VKAPI_ATTR VkResult VKAPI_CALL
11769 vkCreateDebugReportCallbackEXT(VkInstance instance, const VkDebugReportCallbackCreateInfoEXT *pCreateInfo,
11770                                const VkAllocationCallbacks *pAllocator, VkDebugReportCallbackEXT *pMsgCallback) {
11771     return core_validation::CreateDebugReportCallbackEXT(instance, pCreateInfo, pAllocator, pMsgCallback);
11772 }
11773
11774 VKAPI_ATTR void VKAPI_CALL
11775 vkDestroyDebugReportCallbackEXT(VkInstance instance,
11776                                 VkDebugReportCallbackEXT msgCallback,
11777                                 const VkAllocationCallbacks *pAllocator) {
11778     core_validation::DestroyDebugReportCallbackEXT(instance, msgCallback, pAllocator);
11779 }
11780
11781 VKAPI_ATTR void VKAPI_CALL
11782 vkDebugReportMessageEXT(VkInstance instance, VkDebugReportFlagsEXT flags, VkDebugReportObjectTypeEXT objType, uint64_t object,
11783                         size_t location, int32_t msgCode, const char *pLayerPrefix, const char *pMsg) {
11784     core_validation::DebugReportMessageEXT(instance, flags, objType, object, location, msgCode, pLayerPrefix, pMsg);
11785 }
11786
11787 // loader-layer interface v0, just wrappers since there is only a layer
11788
11789 VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
11790 vkEnumerateInstanceExtensionProperties(const char *pLayerName, uint32_t *pCount, VkExtensionProperties *pProperties) {
11791     return core_validation::EnumerateInstanceExtensionProperties(pLayerName, pCount, pProperties);
11792 }
11793
11794 VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
11795 vkEnumerateInstanceLayerProperties(uint32_t *pCount, VkLayerProperties *pProperties) {
11796     return core_validation::EnumerateInstanceLayerProperties(pCount, pProperties);
11797 }
11798
11799 VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
11800 vkEnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice, uint32_t *pCount, VkLayerProperties *pProperties) {
11801     // the layer command handles VK_NULL_HANDLE just fine internally
11802     assert(physicalDevice == VK_NULL_HANDLE);
11803     return core_validation::EnumerateDeviceLayerProperties(VK_NULL_HANDLE, pCount, pProperties);
11804 }
11805
11806 VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice,
11807                                                                                     const char *pLayerName, uint32_t *pCount,
11808                                                                                     VkExtensionProperties *pProperties) {
11809     // the layer command handles VK_NULL_HANDLE just fine internally
11810     assert(physicalDevice == VK_NULL_HANDLE);
11811     return core_validation::EnumerateDeviceExtensionProperties(VK_NULL_HANDLE, pLayerName, pCount, pProperties);
11812 }
11813
11814 VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetDeviceProcAddr(VkDevice dev, const char *funcName) {
11815     return core_validation::GetDeviceProcAddr(dev, funcName);
11816 }
11817
11818 VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetInstanceProcAddr(VkInstance instance, const char *funcName) {
11819     return core_validation::GetInstanceProcAddr(instance, funcName);
11820 }