1 /* Copyright (c) 2015-2016 The Khronos Group Inc.
2 * Copyright (c) 2015-2016 Valve Corporation
3 * Copyright (c) 2015-2016 LunarG, Inc.
4 * Copyright (C) 2015-2016 Google Inc.
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
10 * http://www.apache.org/licenses/LICENSE-2.0
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
18 * Author: Courtney Goeltzenleuchter <courtneygo@google.com>
19 * Author: Tobin Ehlis <tobine@google.com>
20 * Author: Chris Forbes <chrisf@ijw.co.nz>
21 * Author: Mark Lobodzinski <mark@lunarg.com>
23 #ifndef CORE_VALIDATION_TYPES_H_
24 #define CORE_VALIDATION_TYPES_H_
26 #include "hash_vk_types.h"
27 #include "vk_safe_struct.h"
28 #include "vulkan/vulkan.h"
29 #include "vk_validation_error_messages.h"
30 #include "vk_layer_logging.h"
31 #include "vk_object_types.h"
32 #include "vk_extension_helper.h"
37 #include <unordered_map>
38 #include <unordered_set>
43 // Fwd declarations -- including descriptor_set.h creates an ugly include loop
44 namespace cvdescriptorset {
45 class DescriptorSetLayoutDef;
46 class DescriptorSetLayout;
48 } // namespace cvdescriptorset
50 struct GLOBAL_CB_NODE;
53 UNCALLED, // Function has not been called
54 QUERY_COUNT, // Function called once to query a count
55 QUERY_DETAILS, // Function called w/ a count to query details
60 // Track when object is being used by an in-flight command buffer
61 std::atomic_int in_use;
62 // Track command buffers that this object is bound to
63 // binding initialized when cmd referencing object is bound to command buffer
64 // binding removed when command buffer is reset or destroyed
65 // When an object is destroyed, any bound cbs are set to INVALID
66 std::unordered_set<GLOBAL_CB_NODE *> cb_bindings;
68 BASE_NODE() { in_use.store(0); };
71 // Track command pools and their command buffers
72 struct COMMAND_POOL_NODE : public BASE_NODE {
73 VkCommandPoolCreateFlags createFlags;
74 uint32_t queueFamilyIndex;
75 // Cmd buffers allocated from this pool
76 std::unordered_set<VkCommandBuffer> commandBuffers;
79 // Utilities for barriers and the commmand pool
80 template <typename Barrier>
81 static bool IsTransferOp(const Barrier *barrier) {
82 return barrier->srcQueueFamilyIndex != barrier->dstQueueFamilyIndex;
85 template <typename Barrier, bool assume_transfer = false>
86 static bool IsReleaseOp(const COMMAND_POOL_NODE *pool, const Barrier *barrier) {
87 return (assume_transfer || IsTransferOp(barrier)) && (pool->queueFamilyIndex == barrier->srcQueueFamilyIndex);
90 template <typename Barrier, bool assume_transfer = false>
91 static bool IsAcquireOp(const COMMAND_POOL_NODE *pool, const Barrier *barrier) {
92 return (assume_transfer || IsTransferOp(barrier)) && (pool->queueFamilyIndex == barrier->dstQueueFamilyIndex);
95 // Generic wrapper for vulkan objects
98 VulkanObjectType type;
101 inline bool operator==(VK_OBJECT a, VK_OBJECT b) NOEXCEPT { return a.handle == b.handle && a.type == b.type; }
105 struct hash<VK_OBJECT> {
106 size_t operator()(VK_OBJECT obj) const NOEXCEPT { return hash<uint64_t>()(obj.handle) ^ hash<uint32_t>()(obj.type); }
110 class PHYS_DEV_PROPERTIES_NODE {
112 VkPhysicalDeviceProperties properties;
113 std::vector<VkQueueFamilyProperties> queue_family_properties;
116 // Flags describing requirements imposed by the pipeline on a descriptor. These
117 // can't be checked at pipeline creation time as they depend on the Image or
119 enum descriptor_req {
120 DESCRIPTOR_REQ_VIEW_TYPE_1D = 1 << VK_IMAGE_VIEW_TYPE_1D,
121 DESCRIPTOR_REQ_VIEW_TYPE_1D_ARRAY = 1 << VK_IMAGE_VIEW_TYPE_1D_ARRAY,
122 DESCRIPTOR_REQ_VIEW_TYPE_2D = 1 << VK_IMAGE_VIEW_TYPE_2D,
123 DESCRIPTOR_REQ_VIEW_TYPE_2D_ARRAY = 1 << VK_IMAGE_VIEW_TYPE_2D_ARRAY,
124 DESCRIPTOR_REQ_VIEW_TYPE_3D = 1 << VK_IMAGE_VIEW_TYPE_3D,
125 DESCRIPTOR_REQ_VIEW_TYPE_CUBE = 1 << VK_IMAGE_VIEW_TYPE_CUBE,
126 DESCRIPTOR_REQ_VIEW_TYPE_CUBE_ARRAY = 1 << VK_IMAGE_VIEW_TYPE_CUBE_ARRAY,
128 DESCRIPTOR_REQ_ALL_VIEW_TYPE_BITS = (1 << (VK_IMAGE_VIEW_TYPE_END_RANGE + 1)) - 1,
130 DESCRIPTOR_REQ_SINGLE_SAMPLE = 2 << VK_IMAGE_VIEW_TYPE_END_RANGE,
131 DESCRIPTOR_REQ_MULTI_SAMPLE = DESCRIPTOR_REQ_SINGLE_SAMPLE << 1,
134 struct DESCRIPTOR_POOL_STATE : BASE_NODE {
135 VkDescriptorPool pool;
136 uint32_t maxSets; // Max descriptor sets allowed in this pool
137 uint32_t availableSets; // Available descriptor sets in this pool
139 safe_VkDescriptorPoolCreateInfo createInfo;
140 std::unordered_set<cvdescriptorset::DescriptorSet *> sets; // Collection of all sets in this pool
141 std::vector<uint32_t> maxDescriptorTypeCount; // Max # of descriptors of each type in this pool
142 std::vector<uint32_t> availableDescriptorTypeCount; // Available # of descriptors of each type in this pool
144 DESCRIPTOR_POOL_STATE(const VkDescriptorPool pool, const VkDescriptorPoolCreateInfo *pCreateInfo)
146 maxSets(pCreateInfo->maxSets),
147 availableSets(pCreateInfo->maxSets),
148 createInfo(pCreateInfo),
149 maxDescriptorTypeCount(VK_DESCRIPTOR_TYPE_RANGE_SIZE, 0),
150 availableDescriptorTypeCount(VK_DESCRIPTOR_TYPE_RANGE_SIZE, 0) {
151 // Collect maximums per descriptor type.
152 for (uint32_t i = 0; i < createInfo.poolSizeCount; ++i) {
153 uint32_t typeIndex = static_cast<uint32_t>(createInfo.pPoolSizes[i].type);
154 // Same descriptor types can appear several times
155 maxDescriptorTypeCount[typeIndex] += createInfo.pPoolSizes[i].descriptorCount;
156 availableDescriptorTypeCount[typeIndex] = maxDescriptorTypeCount[typeIndex];
161 // Generic memory binding struct to track objects bound to objects
168 inline bool operator==(MEM_BINDING a, MEM_BINDING b) NOEXCEPT { return a.mem == b.mem && a.offset == b.offset && a.size == b.size; }
172 struct hash<MEM_BINDING> {
173 size_t operator()(MEM_BINDING mb) const NOEXCEPT {
174 auto intermediate = hash<uint64_t>()(reinterpret_cast<uint64_t &>(mb.mem)) ^ hash<uint64_t>()(mb.offset);
175 return intermediate ^ hash<uint64_t>()(mb.size);
180 // Superclass for bindable object state (currently images and buffers)
181 class BINDABLE : public BASE_NODE {
183 bool sparse; // Is this object being bound with sparse memory or not?
184 // Non-sparse binding data
186 // Memory requirements for this BINDABLE
187 VkMemoryRequirements requirements;
188 // bool to track if memory requirements were checked
189 bool memory_requirements_checked;
190 // Sparse binding data, initially just tracking MEM_BINDING per mem object
191 // There's more data for sparse bindings so need better long-term solution
192 // TODO : Need to update solution to track all sparse binding data
193 std::unordered_set<MEM_BINDING> sparse_bindings;
195 std::unordered_set<VkDeviceMemory> bound_memory_set_;
198 : sparse(false), binding{}, requirements{}, memory_requirements_checked(false), sparse_bindings{}, bound_memory_set_{} {};
200 // Update the cached set of memory bindings.
201 // Code that changes binding.mem or sparse_bindings must call UpdateBoundMemorySet()
202 void UpdateBoundMemorySet() {
203 bound_memory_set_.clear();
205 bound_memory_set_.insert(binding.mem);
207 for (auto sb : sparse_bindings) {
208 bound_memory_set_.insert(sb.mem);
213 // Return unordered set of memory objects that are bound
214 // Instead of creating a set from scratch each query, return the cached one
215 const std::unordered_set<VkDeviceMemory> &GetBoundMemory() const { return bound_memory_set_; }
218 class BUFFER_STATE : public BINDABLE {
221 VkBufferCreateInfo createInfo;
222 BUFFER_STATE(VkBuffer buff, const VkBufferCreateInfo *pCreateInfo) : buffer(buff), createInfo(*pCreateInfo) {
223 if ((createInfo.sharingMode == VK_SHARING_MODE_CONCURRENT) && (createInfo.queueFamilyIndexCount > 0)) {
224 uint32_t *pQueueFamilyIndices = new uint32_t[createInfo.queueFamilyIndexCount];
225 for (uint32_t i = 0; i < createInfo.queueFamilyIndexCount; i++) {
226 pQueueFamilyIndices[i] = pCreateInfo->pQueueFamilyIndices[i];
228 createInfo.pQueueFamilyIndices = pQueueFamilyIndices;
231 if (createInfo.flags & VK_BUFFER_CREATE_SPARSE_BINDING_BIT) {
236 BUFFER_STATE(BUFFER_STATE const &rh_obj) = delete;
239 if ((createInfo.sharingMode == VK_SHARING_MODE_CONCURRENT) && (createInfo.queueFamilyIndexCount > 0)) {
240 delete[] createInfo.pQueueFamilyIndices;
241 createInfo.pQueueFamilyIndices = nullptr;
246 class BUFFER_VIEW_STATE : public BASE_NODE {
248 VkBufferView buffer_view;
249 VkBufferViewCreateInfo create_info;
250 BUFFER_VIEW_STATE(VkBufferView bv, const VkBufferViewCreateInfo *ci) : buffer_view(bv), create_info(*ci){};
251 BUFFER_VIEW_STATE(const BUFFER_VIEW_STATE &rh_obj) = delete;
254 struct SAMPLER_STATE : public BASE_NODE {
256 VkSamplerCreateInfo createInfo;
258 SAMPLER_STATE(const VkSampler *ps, const VkSamplerCreateInfo *pci) : sampler(*ps), createInfo(*pci){};
261 class IMAGE_STATE : public BINDABLE {
264 VkImageCreateInfo createInfo;
265 bool valid; // If this is a swapchain image backing memory track valid here as it doesn't have DEVICE_MEM_INFO
266 bool acquired; // If this is a swapchain image, has it been acquired by the app.
267 bool shared_presentable; // True for a front-buffered swapchain image
268 bool layout_locked; // A front-buffered image that has been presented can never have layout transitioned
269 bool get_sparse_reqs_called; // Track if GetImageSparseMemoryRequirements() has been called for this image
270 bool sparse_metadata_required; // Track if sparse metadata aspect is required for this image
271 bool sparse_metadata_bound; // Track if sparse metadata aspect is bound to this image
272 std::vector<VkSparseImageMemoryRequirements> sparse_requirements;
273 IMAGE_STATE(VkImage img, const VkImageCreateInfo *pCreateInfo)
275 createInfo(*pCreateInfo),
278 shared_presentable(false),
279 layout_locked(false),
280 get_sparse_reqs_called(false),
281 sparse_metadata_required(false),
282 sparse_metadata_bound(false),
283 sparse_requirements{} {
284 if ((createInfo.sharingMode == VK_SHARING_MODE_CONCURRENT) && (createInfo.queueFamilyIndexCount > 0)) {
285 uint32_t *pQueueFamilyIndices = new uint32_t[createInfo.queueFamilyIndexCount];
286 for (uint32_t i = 0; i < createInfo.queueFamilyIndexCount; i++) {
287 pQueueFamilyIndices[i] = pCreateInfo->pQueueFamilyIndices[i];
289 createInfo.pQueueFamilyIndices = pQueueFamilyIndices;
292 if (createInfo.flags & VK_IMAGE_CREATE_SPARSE_BINDING_BIT) {
297 IMAGE_STATE(IMAGE_STATE const &rh_obj) = delete;
300 if ((createInfo.sharingMode == VK_SHARING_MODE_CONCURRENT) && (createInfo.queueFamilyIndexCount > 0)) {
301 delete[] createInfo.pQueueFamilyIndices;
302 createInfo.pQueueFamilyIndices = nullptr;
307 class IMAGE_VIEW_STATE : public BASE_NODE {
309 VkImageView image_view;
310 VkImageViewCreateInfo create_info;
311 IMAGE_VIEW_STATE(VkImageView iv, const VkImageViewCreateInfo *ci) : image_view(iv), create_info(*ci){};
312 IMAGE_VIEW_STATE(const IMAGE_VIEW_STATE &rh_obj) = delete;
320 struct MEMORY_RANGE {
322 bool image; // True for image, false for buffer
323 bool linear; // True for buffers and linear images
324 bool valid; // True if this range is know to be valid
325 VkDeviceMemory memory;
328 VkDeviceSize end; // Store this pre-computed for simplicity
329 // Set of ptrs to every range aliased with this one
330 std::unordered_set<MEMORY_RANGE *> aliases;
333 // Data struct for tracking memory object
334 struct DEVICE_MEM_INFO : public BASE_NODE {
335 void *object; // Dispatchable object used to create this memory (device of swapchain)
336 bool global_valid; // If allocation is mapped or external, set to "true" to be picked up by subsequently bound ranges
338 VkMemoryAllocateInfo alloc_info;
340 VkBuffer dedicated_buffer;
341 VkImage dedicated_image;
342 std::unordered_set<VK_OBJECT> obj_bindings; // objects bound to this memory
343 std::unordered_map<uint64_t, MEMORY_RANGE> bound_ranges; // Map of object to its binding range
344 // Convenience vectors image/buff handles to speed up iterating over images or buffers independently
345 std::unordered_set<uint64_t> bound_images;
346 std::unordered_set<uint64_t> bound_buffers;
349 void *shadow_copy_base; // Base of layer's allocation for guard band, data, and alignment space
350 void *shadow_copy; // Pointer to start of guard-band data before mapped region
351 uint64_t shadow_pad_size; // Size of the guard-band data before and after actual data. It MUST be a
352 // multiple of limits.minMemoryMapAlignment
353 void *p_driver_data; // Pointer to application's actual memory
355 DEVICE_MEM_INFO(void *disp_object, const VkDeviceMemory in_mem, const VkMemoryAllocateInfo *p_alloc_info)
356 : object(disp_object),
359 alloc_info(*p_alloc_info),
361 dedicated_buffer(VK_NULL_HANDLE),
362 dedicated_image(VK_NULL_HANDLE),
370 class SWAPCHAIN_NODE {
372 safe_VkSwapchainCreateInfoKHR createInfo;
373 VkSwapchainKHR swapchain;
374 std::vector<VkImage> images;
375 bool replaced = false;
376 bool shared_presentable = false;
377 CALL_STATE vkGetSwapchainImagesKHRState = UNCALLED;
378 uint32_t get_swapchain_image_count = 0;
379 SWAPCHAIN_NODE(const VkSwapchainCreateInfoKHR *pCreateInfo, VkSwapchainKHR swapchain)
380 : createInfo(pCreateInfo), swapchain(swapchain) {}
383 class IMAGE_CMD_BUF_LAYOUT_NODE {
385 IMAGE_CMD_BUF_LAYOUT_NODE() = default;
386 IMAGE_CMD_BUF_LAYOUT_NODE(VkImageLayout initialLayoutInput, VkImageLayout layoutInput)
387 : initialLayout(initialLayoutInput), layout(layoutInput) {}
389 VkImageLayout initialLayout;
390 VkImageLayout layout;
396 std::vector<uint32_t> prev;
397 std::vector<uint32_t> next;
400 struct RENDER_PASS_STATE : public BASE_NODE {
401 VkRenderPass renderPass;
402 safe_VkRenderPassCreateInfo createInfo;
403 std::vector<bool> hasSelfDependency;
404 std::vector<DAGNode> subpassToNode;
405 std::vector<int32_t> subpass_to_dependency_index; // srcSubpass to dependency index of self dep, or -1 if none
406 std::unordered_map<uint32_t, bool> attachment_first_read;
408 RENDER_PASS_STATE(VkRenderPassCreateInfo const *pCreateInfo) : createInfo(pCreateInfo) {}
411 // vkCmd tracking -- complete as of header 1.0.68
412 // please keep in "none, then sorted" order
413 // Note: grepping vulkan.h for VKAPI_CALL.*vkCmd will return all functions except vkEndCommandBuffer
419 CMD_BINDDESCRIPTORSETS,
422 CMD_BINDVERTEXBUFFERS,
424 CMD_CLEARATTACHMENTS,
426 CMD_CLEARDEPTHSTENCILIMAGE,
428 CMD_COPYBUFFERTOIMAGE,
430 CMD_COPYIMAGETOBUFFER,
431 CMD_COPYQUERYPOOLRESULTS,
432 CMD_DEBUGMARKERBEGINEXT,
433 CMD_DEBUGMARKERENDEXT,
434 CMD_DEBUGMARKERINSERTEXT,
437 CMD_DISPATCHINDIRECT,
440 CMD_DRAWINDEXEDINDIRECT,
441 CMD_DRAWINDEXEDINDIRECTCOUNTAMD,
443 CMD_DRAWINDIRECTCOUNTAMD,
444 CMD_ENDCOMMANDBUFFER, // Should be the last command in any RECORDED cmd buffer
451 CMD_PROCESSCOMMANDSNVX,
453 CMD_PUSHDESCRIPTORSETKHR,
454 CMD_PUSHDESCRIPTORSETWITHTEMPLATEKHR,
455 CMD_RESERVESPACEFORCOMMANDSNVX,
459 CMD_SETBLENDCONSTANTS,
462 CMD_SETDEVICEMASKKHX,
463 CMD_SETDISCARDRECTANGLEEXT,
466 CMD_SETSAMPLELOCATIONSEXT,
468 CMD_SETSTENCILCOMPAREMASK,
469 CMD_SETSTENCILREFERENCE,
470 CMD_SETSTENCILWRITEMASK,
472 CMD_SETVIEWPORTWSCALINGNV,
479 CB_NEW, // Newly created CB w/o any cmds
480 CB_RECORDING, // BeginCB has been called on this CB
481 CB_RECORDED, // EndCB has been called on this CB
482 CB_INVALID_COMPLETE, // had a complete recording, but was since invalidated
483 CB_INVALID_INCOMPLETE, // fouled before recording was completed
486 // CB Status -- used to track status of various bindings on cmd buffer objects
487 typedef VkFlags CBStatusFlags;
488 enum CBStatusFlagBits {
490 CBSTATUS_NONE = 0x00000000, // No status is set
491 CBSTATUS_LINE_WIDTH_SET = 0x00000001, // Line width has been set
492 CBSTATUS_DEPTH_BIAS_SET = 0x00000002, // Depth bias has been set
493 CBSTATUS_BLEND_CONSTANTS_SET = 0x00000004, // Blend constants state has been set
494 CBSTATUS_DEPTH_BOUNDS_SET = 0x00000008, // Depth bounds state object has been set
495 CBSTATUS_STENCIL_READ_MASK_SET = 0x00000010, // Stencil read mask has been set
496 CBSTATUS_STENCIL_WRITE_MASK_SET = 0x00000020, // Stencil write mask has been set
497 CBSTATUS_STENCIL_REFERENCE_SET = 0x00000040, // Stencil reference has been set
498 CBSTATUS_VIEWPORT_SET = 0x00000080,
499 CBSTATUS_SCISSOR_SET = 0x00000100,
500 CBSTATUS_INDEX_BUFFER_BOUND = 0x00000200, // Index buffer has been set
501 CBSTATUS_ALL_STATE_SET = 0x000001FF, // All state set (intentionally exclude index buffer)
505 struct TEMPLATE_STATE {
506 VkDescriptorUpdateTemplateKHR desc_update_template;
507 safe_VkDescriptorUpdateTemplateCreateInfo create_info;
509 TEMPLATE_STATE(VkDescriptorUpdateTemplateKHR update_template, safe_VkDescriptorUpdateTemplateCreateInfo *pCreateInfo)
510 : desc_update_template(update_template), create_info(*pCreateInfo) {}
518 inline bool operator==(const QueryObject &query1, const QueryObject &query2) {
519 return (query1.pool == query2.pool && query1.index == query2.index);
524 struct hash<QueryObject> {
525 size_t operator()(QueryObject query) const throw() {
526 return hash<uint64_t>()((uint64_t)(query.pool)) ^ hash<uint32_t>()(query.index);
531 std::vector<VkBuffer> buffers;
534 struct ImageSubresourcePair {
537 VkImageSubresource subresource;
540 inline bool operator==(const ImageSubresourcePair &img1, const ImageSubresourcePair &img2) {
541 if (img1.image != img2.image || img1.hasSubresource != img2.hasSubresource) return false;
542 return !img1.hasSubresource ||
543 (img1.subresource.aspectMask == img2.subresource.aspectMask && img1.subresource.mipLevel == img2.subresource.mipLevel &&
544 img1.subresource.arrayLayer == img2.subresource.arrayLayer);
549 struct hash<ImageSubresourcePair> {
550 size_t operator()(ImageSubresourcePair img) const throw() {
551 size_t hashVal = hash<uint64_t>()(reinterpret_cast<uint64_t &>(img.image));
552 hashVal ^= hash<bool>()(img.hasSubresource);
553 if (img.hasSubresource) {
554 hashVal ^= hash<uint32_t>()(reinterpret_cast<uint32_t &>(img.subresource.aspectMask));
555 hashVal ^= hash<uint32_t>()(img.subresource.mipLevel);
556 hashVal ^= hash<uint32_t>()(img.subresource.arrayLayer);
563 // Canonical dictionary for PushConstantRanges
564 using PushConstantRangesDict = hash_util::Dictionary<PushConstantRanges>;
565 using PushConstantRangesId = PushConstantRangesDict::Id;
567 // Canonical dictionary for the pipeline layout's layout of descriptorsetlayouts
568 using DescriptorSetLayoutDef = cvdescriptorset::DescriptorSetLayoutDef;
569 using DescriptorSetLayoutId = std::shared_ptr<const DescriptorSetLayoutDef>;
570 using PipelineLayoutSetLayoutsDef = std::vector<DescriptorSetLayoutId>;
571 using PipelineLayoutSetLayoutsDict =
572 hash_util::Dictionary<PipelineLayoutSetLayoutsDef, hash_util::IsOrderedContainer<PipelineLayoutSetLayoutsDef>>;
573 using PipelineLayoutSetLayoutsId = PipelineLayoutSetLayoutsDict::Id;
575 // Defines/stores a compatibility defintion for set N
576 // The "layout layout" must store at least set+1 entries, but only the first set+1 are considered for hash and equality testing
577 // Note: the "cannonical" data are referenced by Id, not including handle or device specific state
578 // Note: hash and equality only consider layout_id entries [0, set] for determining uniqueness
579 struct PipelineLayoutCompatDef {
581 PushConstantRangesId push_constant_ranges;
582 PipelineLayoutSetLayoutsId set_layouts_id;
583 PipelineLayoutCompatDef(const uint32_t set_index, const PushConstantRangesId pcr_id, const PipelineLayoutSetLayoutsId sl_id)
584 : set(set_index), push_constant_ranges(pcr_id), set_layouts_id(sl_id) {}
586 bool operator==(const PipelineLayoutCompatDef &other) const;
589 // Canonical dictionary for PipelineLayoutCompat records
590 using PipelineLayoutCompatDict = hash_util::Dictionary<PipelineLayoutCompatDef, hash_util::HasHashMember<PipelineLayoutCompatDef>>;
591 using PipelineLayoutCompatId = PipelineLayoutCompatDict::Id;
593 // Store layouts and pushconstants for PipelineLayout
594 struct PIPELINE_LAYOUT_NODE {
595 VkPipelineLayout layout;
596 std::vector<std::shared_ptr<cvdescriptorset::DescriptorSetLayout const>> set_layouts;
597 PushConstantRangesId push_constant_ranges;
598 std::vector<PipelineLayoutCompatId> compat_for_set;
600 PIPELINE_LAYOUT_NODE() : layout(VK_NULL_HANDLE), set_layouts{}, push_constant_ranges{}, compat_for_set{} {}
603 layout = VK_NULL_HANDLE;
605 push_constant_ranges.reset();
606 compat_for_set.clear();
610 class PIPELINE_STATE : public BASE_NODE {
613 safe_VkGraphicsPipelineCreateInfo graphicsPipelineCI;
614 // Hold shared ptr to RP in case RP itself is destroyed
615 std::shared_ptr<RENDER_PASS_STATE> rp_state;
616 safe_VkComputePipelineCreateInfo computePipelineCI;
617 // Flag of which shader stages are active for this pipeline
618 uint32_t active_shaders;
619 uint32_t duplicate_shaders;
620 // Capture which slots (set#->bindings) are actually used by the shaders of this pipeline
621 std::unordered_map<uint32_t, std::map<uint32_t, descriptor_req>> active_slots;
622 // Vtx input info (if any)
623 std::vector<VkVertexInputBindingDescription> vertexBindingDescriptions;
624 std::vector<VkPipelineColorBlendAttachmentState> attachments;
625 bool blendConstantsEnabled; // Blend constants enabled for any attachments
626 PIPELINE_LAYOUT_NODE pipeline_layout;
627 VkPrimitiveTopology topology_at_rasterizer;
629 // Default constructor
632 graphicsPipelineCI{},
636 duplicate_shaders(0),
638 vertexBindingDescriptions(),
640 blendConstantsEnabled(false),
642 topology_at_rasterizer{} {}
644 void initGraphicsPipeline(const VkGraphicsPipelineCreateInfo *pCreateInfo, std::shared_ptr<RENDER_PASS_STATE> &&rpstate) {
645 bool uses_color_attachment = false;
646 bool uses_depthstencil_attachment = false;
647 if (pCreateInfo->subpass < rpstate->createInfo.subpassCount) {
648 const auto &subpass = rpstate->createInfo.pSubpasses[pCreateInfo->subpass];
650 for (uint32_t i = 0; i < subpass.colorAttachmentCount; ++i) {
651 if (subpass.pColorAttachments[i].attachment != VK_ATTACHMENT_UNUSED) {
652 uses_color_attachment = true;
657 if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
658 uses_depthstencil_attachment = true;
661 graphicsPipelineCI.initialize(pCreateInfo, uses_color_attachment, uses_depthstencil_attachment);
662 // Make sure compute pipeline is null
663 VkComputePipelineCreateInfo emptyComputeCI = {};
664 computePipelineCI.initialize(&emptyComputeCI);
665 for (uint32_t i = 0; i < pCreateInfo->stageCount; i++) {
666 const VkPipelineShaderStageCreateInfo *pPSSCI = &pCreateInfo->pStages[i];
667 this->duplicate_shaders |= this->active_shaders & pPSSCI->stage;
668 this->active_shaders |= pPSSCI->stage;
670 if (graphicsPipelineCI.pVertexInputState) {
671 const auto pVICI = graphicsPipelineCI.pVertexInputState;
672 if (pVICI->vertexBindingDescriptionCount) {
673 this->vertexBindingDescriptions = std::vector<VkVertexInputBindingDescription>(
674 pVICI->pVertexBindingDescriptions, pVICI->pVertexBindingDescriptions + pVICI->vertexBindingDescriptionCount);
677 if (graphicsPipelineCI.pColorBlendState) {
678 const auto pCBCI = graphicsPipelineCI.pColorBlendState;
679 if (pCBCI->attachmentCount) {
680 this->attachments = std::vector<VkPipelineColorBlendAttachmentState>(pCBCI->pAttachments,
681 pCBCI->pAttachments + pCBCI->attachmentCount);
684 if (graphicsPipelineCI.pInputAssemblyState) {
685 topology_at_rasterizer = graphicsPipelineCI.pInputAssemblyState->topology;
690 void initComputePipeline(const VkComputePipelineCreateInfo *pCreateInfo) {
691 computePipelineCI.initialize(pCreateInfo);
692 // Make sure gfx pipeline is null
693 VkGraphicsPipelineCreateInfo emptyGraphicsCI = {};
694 graphicsPipelineCI.initialize(&emptyGraphicsCI, false, false);
695 switch (computePipelineCI.stage.stage) {
696 case VK_SHADER_STAGE_COMPUTE_BIT:
697 this->active_shaders |= VK_SHADER_STAGE_COMPUTE_BIT;
706 // Track last states that are bound per pipeline bind point (Gfx & Compute)
707 struct LAST_BOUND_STATE {
708 PIPELINE_STATE *pipeline_state;
709 VkPipelineLayout pipeline_layout;
710 // Track each set that has been bound
711 // Ordered bound set tracking where index is set# that given set is bound to
712 std::vector<cvdescriptorset::DescriptorSet *> boundDescriptorSets;
713 std::unique_ptr<cvdescriptorset::DescriptorSet> push_descriptor_set;
714 // one dynamic offset per dynamic descriptor bound to this CB
715 std::vector<std::vector<uint32_t>> dynamicOffsets;
716 std::vector<PipelineLayoutCompatId> compat_id_for_set;
719 pipeline_state = nullptr;
720 pipeline_layout = VK_NULL_HANDLE;
721 boundDescriptorSets.clear();
722 push_descriptor_set = nullptr;
723 dynamicOffsets.clear();
726 // Cmd Buffer Wrapper Struct - TODO : This desperately needs its own class
727 struct GLOBAL_CB_NODE : public BASE_NODE {
728 VkCommandBuffer commandBuffer;
729 VkCommandBufferAllocateInfo createInfo = {};
730 VkCommandBufferBeginInfo beginInfo;
731 VkCommandBufferInheritanceInfo inheritanceInfo;
732 VkDevice device; // device this CB belongs to
734 CB_STATE state; // Track cmd buffer update state
735 uint64_t submitCount; // Number of times CB has been submitted
736 typedef uint64_t ImageLayoutUpdateCount;
737 ImageLayoutUpdateCount image_layout_change_count; // The sequence number for changes to image layout (for cached validation)
738 CBStatusFlags status; // Track status of various bindings on cmd buffer
739 CBStatusFlags static_status; // All state bits provided by current graphics pipeline
740 // rather than dynamic state
741 // Currently storing "lastBound" objects on per-CB basis
742 // long-term may want to create caches of "lastBound" states and could have
743 // each individual CMD_NODE referencing its own "lastBound" state
744 // Store last bound state for Gfx & Compute pipeline bind points
745 LAST_BOUND_STATE lastBound[VK_PIPELINE_BIND_POINT_RANGE_SIZE];
747 uint32_t viewportMask;
748 uint32_t scissorMask;
749 VkRenderPassBeginInfo activeRenderPassBeginInfo;
750 RENDER_PASS_STATE *activeRenderPass;
751 VkSubpassContents activeSubpassContents;
752 uint32_t activeSubpass;
753 VkFramebuffer activeFramebuffer;
754 std::unordered_set<VkFramebuffer> framebuffers;
755 // Unified data structs to track objects bound to this command buffer as well as object
756 // dependencies that have been broken : either destroyed objects, or updated descriptor sets
757 std::unordered_set<VK_OBJECT> object_bindings;
758 std::vector<VK_OBJECT> broken_bindings;
760 std::unordered_set<VkEvent> waitedEvents;
761 std::vector<VkEvent> writeEventsBeforeWait;
762 std::vector<VkEvent> events;
763 std::unordered_map<QueryObject, std::unordered_set<VkEvent>> waitedEventsBeforeQueryReset;
764 std::unordered_map<QueryObject, bool> queryToStateMap; // 0 is unavailable, 1 is available
765 std::unordered_set<QueryObject> activeQueries;
766 std::unordered_set<QueryObject> startedQueries;
767 std::unordered_map<ImageSubresourcePair, IMAGE_CMD_BUF_LAYOUT_NODE> imageLayoutMap;
768 std::unordered_map<VkEvent, VkPipelineStageFlags> eventToStageMap;
769 std::vector<DRAW_DATA> drawData;
770 DRAW_DATA currentDrawData;
771 bool vertex_buffer_used; // Track for perf warning to make sure any bound vtx buffer used
772 VkCommandBuffer primaryCommandBuffer;
773 // Track images and buffers that are updated by this CB at the point of a draw
774 std::unordered_set<VkImageView> updateImages;
775 std::unordered_set<VkBuffer> updateBuffers;
776 // If primary, the secondary command buffers we will call.
777 // If secondary, the primary command buffers we will be called by.
778 std::unordered_set<GLOBAL_CB_NODE *> linkedCommandBuffers;
779 // Validation functions run at primary CB queue submit time
780 std::vector<std::function<bool()>> queue_submit_functions;
781 // Validation functions run when secondary CB is executed in primary
782 std::vector<std::function<bool(GLOBAL_CB_NODE *, VkFramebuffer)>> cmd_execute_commands_functions;
783 std::unordered_set<VkDeviceMemory> memObjs;
784 std::vector<std::function<bool(VkQueue)>> eventUpdates;
785 std::vector<std::function<bool(VkQueue)>> queryUpdates;
786 std::unordered_set<cvdescriptorset::DescriptorSet *> validated_descriptor_sets;
789 struct SEMAPHORE_WAIT {
790 VkSemaphore semaphore;
795 struct CB_SUBMISSION {
796 CB_SUBMISSION(std::vector<VkCommandBuffer> const &cbs, std::vector<SEMAPHORE_WAIT> const &waitSemaphores,
797 std::vector<VkSemaphore> const &signalSemaphores, std::vector<VkSemaphore> const &externalSemaphores,
800 waitSemaphores(waitSemaphores),
801 signalSemaphores(signalSemaphores),
802 externalSemaphores(externalSemaphores),
805 std::vector<VkCommandBuffer> cbs;
806 std::vector<SEMAPHORE_WAIT> waitSemaphores;
807 std::vector<VkSemaphore> signalSemaphores;
808 std::vector<VkSemaphore> externalSemaphores;
812 struct IMAGE_LAYOUT_NODE {
813 VkImageLayout layout;
817 // CHECK_DISABLED struct is a container for bools that can block validation checks from being performed.
818 // The end goal is to have all checks guarded by a bool. The bools are all "false" by default meaning that all checks
819 // are enabled. At CreateInstance time, the user can use the VK_EXT_validation_flags extension to pass in enum values
820 // of VkValidationCheckEXT that will selectively disable checks.
821 struct CHECK_DISABLED {
822 bool command_buffer_state;
823 bool create_descriptor_set_layout;
824 bool destroy_buffer_view; // Skip validation at DestroyBufferView time
825 bool destroy_image_view; // Skip validation at DestroyImageView time
826 bool destroy_pipeline; // Skip validation at DestroyPipeline time
827 bool destroy_descriptor_pool; // Skip validation at DestroyDescriptorPool time
828 bool destroy_framebuffer; // Skip validation at DestroyFramebuffer time
829 bool destroy_renderpass; // Skip validation at DestroyRenderpass time
830 bool destroy_image; // Skip validation at DestroyImage time
831 bool destroy_sampler; // Skip validation at DestroySampler time
832 bool destroy_command_pool; // Skip validation at DestroyCommandPool time
833 bool destroy_event; // Skip validation at DestroyEvent time
834 bool free_memory; // Skip validation at FreeMemory time
835 bool object_in_use; // Skip all object in_use checking
836 bool idle_descriptor_set; // Skip check to verify that descriptor set is no in-use
837 bool push_constant_range; // Skip push constant range checks
838 bool free_descriptor_sets; // Skip validation prior to vkFreeDescriptorSets()
839 bool allocate_descriptor_sets; // Skip validation prior to vkAllocateDescriptorSets()
840 bool update_descriptor_sets; // Skip validation prior to vkUpdateDescriptorSets()
841 bool wait_for_fences;
842 bool get_fence_state;
843 bool queue_wait_idle;
844 bool device_wait_idle;
846 bool destroy_semaphore;
847 bool destroy_query_pool;
848 bool get_query_pool_results;
850 bool shader_validation; // Skip validation for shaders
852 void SetAll(bool value) { std::fill(&command_buffer_state, &shader_validation + 1, value); }
855 struct MT_FB_ATTACHMENT_INFO {
856 IMAGE_VIEW_STATE *view_state;
860 class FRAMEBUFFER_STATE : public BASE_NODE {
862 VkFramebuffer framebuffer;
863 safe_VkFramebufferCreateInfo createInfo;
864 std::shared_ptr<RENDER_PASS_STATE> rp_state;
865 std::vector<MT_FB_ATTACHMENT_INFO> attachments;
866 FRAMEBUFFER_STATE(VkFramebuffer fb, const VkFramebufferCreateInfo *pCreateInfo, std::shared_ptr<RENDER_PASS_STATE> &&rpstate)
867 : framebuffer(fb), createInfo(pCreateInfo), rp_state(rpstate){};
870 struct shader_module;
871 struct DeviceExtensions;
873 // Fwd declarations of layer_data and helpers to look-up/validate state from layer_data maps
874 namespace core_validation {
876 cvdescriptorset::DescriptorSet *GetSetNode(const layer_data *, VkDescriptorSet);
877 std::shared_ptr<cvdescriptorset::DescriptorSetLayout const> const GetDescriptorSetLayout(layer_data const *, VkDescriptorSetLayout);
878 DESCRIPTOR_POOL_STATE *GetDescriptorPoolState(const layer_data *, const VkDescriptorPool);
879 BUFFER_STATE *GetBufferState(const layer_data *, VkBuffer);
880 IMAGE_STATE *GetImageState(const layer_data *, VkImage);
881 DEVICE_MEM_INFO *GetMemObjInfo(const layer_data *, VkDeviceMemory);
882 BUFFER_VIEW_STATE *GetBufferViewState(const layer_data *, VkBufferView);
883 SAMPLER_STATE *GetSamplerState(const layer_data *, VkSampler);
884 IMAGE_VIEW_STATE *GetImageViewState(const layer_data *, VkImageView);
885 SWAPCHAIN_NODE *GetSwapchainNode(const layer_data *, VkSwapchainKHR);
886 GLOBAL_CB_NODE *GetCBNode(layer_data const *my_data, const VkCommandBuffer cb);
887 RENDER_PASS_STATE *GetRenderPassState(layer_data const *dev_data, VkRenderPass renderpass);
888 std::shared_ptr<RENDER_PASS_STATE> GetRenderPassStateSharedPtr(layer_data const *dev_data, VkRenderPass renderpass);
889 FRAMEBUFFER_STATE *GetFramebufferState(const layer_data *my_data, VkFramebuffer framebuffer);
890 COMMAND_POOL_NODE *GetCommandPoolNode(layer_data *dev_data, VkCommandPool pool);
891 shader_module const *GetShaderModuleState(layer_data const *dev_data, VkShaderModule module);
892 const PHYS_DEV_PROPERTIES_NODE *GetPhysDevProperties(const layer_data *device_data);
893 const VkPhysicalDeviceFeatures *GetEnabledFeatures(const layer_data *device_data);
894 const DeviceExtensions *GetEnabledExtensions(const layer_data *device_data);
895 const VkPhysicalDeviceDescriptorIndexingFeaturesEXT *GetEnabledDescriptorIndexingFeatures(const layer_data *device_data);
897 void invalidateCommandBuffers(const layer_data *, std::unordered_set<GLOBAL_CB_NODE *> const &, VK_OBJECT);
898 bool ValidateMemoryIsBoundToBuffer(const layer_data *, const BUFFER_STATE *, const char *, UNIQUE_VALIDATION_ERROR_CODE);
899 bool ValidateMemoryIsBoundToImage(const layer_data *, const IMAGE_STATE *, const char *, UNIQUE_VALIDATION_ERROR_CODE);
900 void AddCommandBufferBindingSampler(GLOBAL_CB_NODE *, SAMPLER_STATE *);
901 void AddCommandBufferBindingImage(const layer_data *, GLOBAL_CB_NODE *, IMAGE_STATE *);
902 void AddCommandBufferBindingImageView(const layer_data *, GLOBAL_CB_NODE *, IMAGE_VIEW_STATE *);
903 void AddCommandBufferBindingBuffer(const layer_data *, GLOBAL_CB_NODE *, BUFFER_STATE *);
904 void AddCommandBufferBindingBufferView(const layer_data *, GLOBAL_CB_NODE *, BUFFER_VIEW_STATE *);
905 bool ValidateObjectNotInUse(const layer_data *dev_data, BASE_NODE *obj_node, VK_OBJECT obj_struct, const char *caller_name,
906 UNIQUE_VALIDATION_ERROR_CODE error_code);
907 void invalidateCommandBuffers(const layer_data *dev_data, std::unordered_set<GLOBAL_CB_NODE *> const &cb_nodes, VK_OBJECT obj);
908 void RemoveImageMemoryRange(uint64_t handle, DEVICE_MEM_INFO *mem_info);
909 void RemoveBufferMemoryRange(uint64_t handle, DEVICE_MEM_INFO *mem_info);
910 bool ClearMemoryObjectBindings(layer_data *dev_data, uint64_t handle, VulkanObjectType type);
911 bool ValidateCmdQueueFlags(layer_data *dev_data, const GLOBAL_CB_NODE *cb_node, const char *caller_name, VkQueueFlags flags,
912 UNIQUE_VALIDATION_ERROR_CODE error_code);
913 bool ValidateCmd(layer_data *my_data, const GLOBAL_CB_NODE *pCB, const CMD_TYPE cmd, const char *caller_name);
914 bool insideRenderPass(const layer_data *my_data, const GLOBAL_CB_NODE *pCB, const char *apiName,
915 UNIQUE_VALIDATION_ERROR_CODE msgCode);
916 void SetImageMemoryValid(layer_data *dev_data, IMAGE_STATE *image_state, bool valid);
917 bool outsideRenderPass(const layer_data *my_data, GLOBAL_CB_NODE *pCB, const char *apiName, UNIQUE_VALIDATION_ERROR_CODE msgCode);
918 void SetLayout(GLOBAL_CB_NODE *pCB, ImageSubresourcePair imgpair, const IMAGE_CMD_BUF_LAYOUT_NODE &node);
919 void SetLayout(GLOBAL_CB_NODE *pCB, ImageSubresourcePair imgpair, const VkImageLayout &layout);
920 bool ValidateImageMemoryIsValid(layer_data *dev_data, IMAGE_STATE *image_state, const char *functionName);
921 bool ValidateImageSampleCount(layer_data *dev_data, IMAGE_STATE *image_state, VkSampleCountFlagBits sample_count,
922 const char *location, UNIQUE_VALIDATION_ERROR_CODE msgCode);
923 bool rangesIntersect(layer_data const *dev_data, MEMORY_RANGE const *range1, VkDeviceSize offset, VkDeviceSize end);
924 bool ValidateBufferMemoryIsValid(layer_data *dev_data, BUFFER_STATE *buffer_state, const char *functionName);
925 void SetBufferMemoryValid(layer_data *dev_data, BUFFER_STATE *buffer_state, bool valid);
926 bool ValidateCmdSubpassState(const layer_data *dev_data, const GLOBAL_CB_NODE *pCB, const CMD_TYPE cmd_type);
927 bool ValidateCmd(layer_data *dev_data, const GLOBAL_CB_NODE *cb_state, const CMD_TYPE cmd, const char *caller_name);
929 // Prototypes for layer_data accessor functions. These should be in their own header file at some point
930 VkFormatProperties GetFormatProperties(core_validation::layer_data *device_data, VkFormat format);
931 VkResult GetImageFormatProperties(core_validation::layer_data *device_data, const VkImageCreateInfo *image_ci,
932 VkImageFormatProperties *image_format_properties);
933 const debug_report_data *GetReportData(const layer_data *);
934 const VkPhysicalDeviceProperties *GetPhysicalDeviceProperties(layer_data *);
935 const CHECK_DISABLED *GetDisables(layer_data *);
936 std::unordered_map<VkImage, std::unique_ptr<IMAGE_STATE>> *GetImageMap(core_validation::layer_data *);
937 std::unordered_map<VkImage, std::vector<ImageSubresourcePair>> *GetImageSubresourceMap(layer_data *);
938 std::unordered_map<ImageSubresourcePair, IMAGE_LAYOUT_NODE> *GetImageLayoutMap(layer_data *);
939 std::unordered_map<ImageSubresourcePair, IMAGE_LAYOUT_NODE> const *GetImageLayoutMap(layer_data const *);
940 std::unordered_map<VkBuffer, std::unique_ptr<BUFFER_STATE>> *GetBufferMap(layer_data *device_data);
941 std::unordered_map<VkBufferView, std::unique_ptr<BUFFER_VIEW_STATE>> *GetBufferViewMap(layer_data *device_data);
942 std::unordered_map<VkImageView, std::unique_ptr<IMAGE_VIEW_STATE>> *GetImageViewMap(layer_data *device_data);
943 const DeviceExtensions *GetDeviceExtensions(const layer_data *);
944 } // namespace core_validation
946 #endif // CORE_VALIDATION_TYPES_H_