layers: Add support for VK_EXT_descriptor_indexing
[platform/upstream/Vulkan-LoaderAndValidationLayers.git] / layers / shader_validation.cpp
1 /* Copyright (c) 2015-2017 The Khronos Group Inc.
2  * Copyright (c) 2015-2017 Valve Corporation
3  * Copyright (c) 2015-2017 LunarG, Inc.
4  * Copyright (C) 2015-2017 Google Inc.
5  *
6  * Licensed under the Apache License, Version 2.0 (the "License");
7  * you may not use this file except in compliance with the License.
8  * You may obtain a copy of the License at
9  *
10  *     http://www.apache.org/licenses/LICENSE-2.0
11  *
12  * Unless required by applicable law or agreed to in writing, software
13  * distributed under the License is distributed on an "AS IS" BASIS,
14  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15  * See the License for the specific language governing permissions and
16  * limitations under the License.
17  *
18  * Author: Chris Forbes <chrisf@ijw.co.nz>
19  */
20
21 #include <cinttypes>
22 #include <cassert>
23 #include <vector>
24 #include <unordered_map>
25 #include <string>
26 #include <sstream>
27 #include <SPIRV/spirv.hpp>
28 #include "vk_loader_platform.h"
29 #include "vk_enum_string_helper.h"
30 #include "vk_layer_table.h"
31 #include "vk_layer_data.h"
32 #include "vk_layer_extension_utils.h"
33 #include "vk_layer_utils.h"
34 #include "core_validation.h"
35 #include "core_validation_types.h"
36 #include "shader_validation.h"
37 #include "spirv-tools/libspirv.h"
38 #include "xxhash.h"
39
40 enum FORMAT_TYPE {
41     FORMAT_TYPE_FLOAT = 1,  // UNORM, SNORM, FLOAT, USCALED, SSCALED, SRGB -- anything we consider float in the shader
42     FORMAT_TYPE_SINT = 2,
43     FORMAT_TYPE_UINT = 4,
44 };
45
46 typedef std::pair<unsigned, unsigned> location_t;
47
48 struct interface_var {
49     uint32_t id;
50     uint32_t type_id;
51     uint32_t offset;
52     bool is_patch;
53     bool is_block_member;
54     bool is_relaxed_precision;
55     // TODO: collect the name, too? Isn't required to be present.
56 };
57
58 struct shader_stage_attributes {
59     char const *const name;
60     bool arrayed_input;
61     bool arrayed_output;
62 };
63
64 static shader_stage_attributes shader_stage_attribs[] = {
65     {"vertex shader", false, false},  {"tessellation control shader", true, true}, {"tessellation evaluation shader", true, false},
66     {"geometry shader", true, false}, {"fragment shader", false, false},
67 };
68
69 // SPIRV utility functions
70 void shader_module::build_def_index() {
71     for (auto insn : *this) {
72         switch (insn.opcode()) {
73             // Types
74             case spv::OpTypeVoid:
75             case spv::OpTypeBool:
76             case spv::OpTypeInt:
77             case spv::OpTypeFloat:
78             case spv::OpTypeVector:
79             case spv::OpTypeMatrix:
80             case spv::OpTypeImage:
81             case spv::OpTypeSampler:
82             case spv::OpTypeSampledImage:
83             case spv::OpTypeArray:
84             case spv::OpTypeRuntimeArray:
85             case spv::OpTypeStruct:
86             case spv::OpTypeOpaque:
87             case spv::OpTypePointer:
88             case spv::OpTypeFunction:
89             case spv::OpTypeEvent:
90             case spv::OpTypeDeviceEvent:
91             case spv::OpTypeReserveId:
92             case spv::OpTypeQueue:
93             case spv::OpTypePipe:
94                 def_index[insn.word(1)] = insn.offset();
95                 break;
96
97                 // Fixed constants
98             case spv::OpConstantTrue:
99             case spv::OpConstantFalse:
100             case spv::OpConstant:
101             case spv::OpConstantComposite:
102             case spv::OpConstantSampler:
103             case spv::OpConstantNull:
104                 def_index[insn.word(2)] = insn.offset();
105                 break;
106
107                 // Specialization constants
108             case spv::OpSpecConstantTrue:
109             case spv::OpSpecConstantFalse:
110             case spv::OpSpecConstant:
111             case spv::OpSpecConstantComposite:
112             case spv::OpSpecConstantOp:
113                 def_index[insn.word(2)] = insn.offset();
114                 break;
115
116                 // Variables
117             case spv::OpVariable:
118                 def_index[insn.word(2)] = insn.offset();
119                 break;
120
121                 // Functions
122             case spv::OpFunction:
123                 def_index[insn.word(2)] = insn.offset();
124                 break;
125
126             default:
127                 // We don't care about any other defs for now.
128                 break;
129         }
130     }
131 }
132
133 static spirv_inst_iter find_entrypoint(shader_module const *src, char const *name, VkShaderStageFlagBits stageBits) {
134     for (auto insn : *src) {
135         if (insn.opcode() == spv::OpEntryPoint) {
136             auto entrypointName = (char const *)&insn.word(3);
137             auto entrypointStageBits = 1u << insn.word(1);
138
139             if (!strcmp(entrypointName, name) && (entrypointStageBits & stageBits)) {
140                 return insn;
141             }
142         }
143     }
144
145     return src->end();
146 }
147
148 static char const *storage_class_name(unsigned sc) {
149     switch (sc) {
150         case spv::StorageClassInput:
151             return "input";
152         case spv::StorageClassOutput:
153             return "output";
154         case spv::StorageClassUniformConstant:
155             return "const uniform";
156         case spv::StorageClassUniform:
157             return "uniform";
158         case spv::StorageClassWorkgroup:
159             return "workgroup local";
160         case spv::StorageClassCrossWorkgroup:
161             return "workgroup global";
162         case spv::StorageClassPrivate:
163             return "private global";
164         case spv::StorageClassFunction:
165             return "function";
166         case spv::StorageClassGeneric:
167             return "generic";
168         case spv::StorageClassAtomicCounter:
169             return "atomic counter";
170         case spv::StorageClassImage:
171             return "image";
172         case spv::StorageClassPushConstant:
173             return "push constant";
174         case spv::StorageClassStorageBuffer:
175             return "storage buffer";
176         default:
177             return "unknown";
178     }
179 }
180
181 // Get the value of an integral constant
182 unsigned get_constant_value(shader_module const *src, unsigned id) {
183     auto value = src->get_def(id);
184     assert(value != src->end());
185
186     if (value.opcode() != spv::OpConstant) {
187         // TODO: Either ensure that the specialization transform is already performed on a module we're
188         //       considering here, OR -- specialize on the fly now.
189         return 1;
190     }
191
192     return value.word(3);
193 }
194
195 static void describe_type_inner(std::ostringstream &ss, shader_module const *src, unsigned type) {
196     auto insn = src->get_def(type);
197     assert(insn != src->end());
198
199     switch (insn.opcode()) {
200         case spv::OpTypeBool:
201             ss << "bool";
202             break;
203         case spv::OpTypeInt:
204             ss << (insn.word(3) ? 's' : 'u') << "int" << insn.word(2);
205             break;
206         case spv::OpTypeFloat:
207             ss << "float" << insn.word(2);
208             break;
209         case spv::OpTypeVector:
210             ss << "vec" << insn.word(3) << " of ";
211             describe_type_inner(ss, src, insn.word(2));
212             break;
213         case spv::OpTypeMatrix:
214             ss << "mat" << insn.word(3) << " of ";
215             describe_type_inner(ss, src, insn.word(2));
216             break;
217         case spv::OpTypeArray:
218             ss << "arr[" << get_constant_value(src, insn.word(3)) << "] of ";
219             describe_type_inner(ss, src, insn.word(2));
220             break;
221         case spv::OpTypePointer:
222             ss << "ptr to " << storage_class_name(insn.word(2)) << " ";
223             describe_type_inner(ss, src, insn.word(3));
224             break;
225         case spv::OpTypeStruct: {
226             ss << "struct of (";
227             for (unsigned i = 2; i < insn.len(); i++) {
228                 describe_type_inner(ss, src, insn.word(i));
229                 if (i == insn.len() - 1) {
230                     ss << ")";
231                 } else {
232                     ss << ", ";
233                 }
234             }
235             break;
236         }
237         case spv::OpTypeSampler:
238             ss << "sampler";
239             break;
240         case spv::OpTypeSampledImage:
241             ss << "sampler+";
242             describe_type_inner(ss, src, insn.word(2));
243             break;
244         case spv::OpTypeImage:
245             ss << "image(dim=" << insn.word(3) << ", sampled=" << insn.word(7) << ")";
246             break;
247         default:
248             ss << "oddtype";
249             break;
250     }
251 }
252
253 static std::string describe_type(shader_module const *src, unsigned type) {
254     std::ostringstream ss;
255     describe_type_inner(ss, src, type);
256     return ss.str();
257 }
258
259 static bool is_narrow_numeric_type(spirv_inst_iter type) {
260     if (type.opcode() != spv::OpTypeInt && type.opcode() != spv::OpTypeFloat) return false;
261     return type.word(2) < 64;
262 }
263
264 static bool types_match(shader_module const *a, shader_module const *b, unsigned a_type, unsigned b_type, bool a_arrayed,
265                         bool b_arrayed, bool relaxed) {
266     // Walk two type trees together, and complain about differences
267     auto a_insn = a->get_def(a_type);
268     auto b_insn = b->get_def(b_type);
269     assert(a_insn != a->end());
270     assert(b_insn != b->end());
271
272     if (a_arrayed && a_insn.opcode() == spv::OpTypeArray) {
273         return types_match(a, b, a_insn.word(2), b_type, false, b_arrayed, relaxed);
274     }
275
276     if (b_arrayed && b_insn.opcode() == spv::OpTypeArray) {
277         // We probably just found the extra level of arrayness in b_type: compare the type inside it to a_type
278         return types_match(a, b, a_type, b_insn.word(2), a_arrayed, false, relaxed);
279     }
280
281     if (a_insn.opcode() == spv::OpTypeVector && relaxed && is_narrow_numeric_type(b_insn)) {
282         return types_match(a, b, a_insn.word(2), b_type, a_arrayed, b_arrayed, false);
283     }
284
285     if (a_insn.opcode() != b_insn.opcode()) {
286         return false;
287     }
288
289     if (a_insn.opcode() == spv::OpTypePointer) {
290         // Match on pointee type. storage class is expected to differ
291         return types_match(a, b, a_insn.word(3), b_insn.word(3), a_arrayed, b_arrayed, relaxed);
292     }
293
294     if (a_arrayed || b_arrayed) {
295         // If we havent resolved array-of-verts by here, we're not going to.
296         return false;
297     }
298
299     switch (a_insn.opcode()) {
300         case spv::OpTypeBool:
301             return true;
302         case spv::OpTypeInt:
303             // Match on width, signedness
304             return a_insn.word(2) == b_insn.word(2) && a_insn.word(3) == b_insn.word(3);
305         case spv::OpTypeFloat:
306             // Match on width
307             return a_insn.word(2) == b_insn.word(2);
308         case spv::OpTypeVector:
309             // Match on element type, count.
310             if (!types_match(a, b, a_insn.word(2), b_insn.word(2), a_arrayed, b_arrayed, false)) return false;
311             if (relaxed && is_narrow_numeric_type(a->get_def(a_insn.word(2)))) {
312                 return a_insn.word(3) >= b_insn.word(3);
313             } else {
314                 return a_insn.word(3) == b_insn.word(3);
315             }
316         case spv::OpTypeMatrix:
317             // Match on element type, count.
318             return types_match(a, b, a_insn.word(2), b_insn.word(2), a_arrayed, b_arrayed, false) &&
319                    a_insn.word(3) == b_insn.word(3);
320         case spv::OpTypeArray:
321             // Match on element type, count. these all have the same layout. we don't get here if b_arrayed. This differs from
322             // vector & matrix types in that the array size is the id of a constant instruction, * not a literal within OpTypeArray
323             return types_match(a, b, a_insn.word(2), b_insn.word(2), a_arrayed, b_arrayed, false) &&
324                    get_constant_value(a, a_insn.word(3)) == get_constant_value(b, b_insn.word(3));
325         case spv::OpTypeStruct:
326             // Match on all element types
327             {
328                 if (a_insn.len() != b_insn.len()) {
329                     return false;  // Structs cannot match if member counts differ
330                 }
331
332                 for (unsigned i = 2; i < a_insn.len(); i++) {
333                     if (!types_match(a, b, a_insn.word(i), b_insn.word(i), a_arrayed, b_arrayed, false)) {
334                         return false;
335                     }
336                 }
337
338                 return true;
339             }
340         default:
341             // Remaining types are CLisms, or may not appear in the interfaces we are interested in. Just claim no match.
342             return false;
343     }
344 }
345
346 static unsigned value_or_default(std::unordered_map<unsigned, unsigned> const &map, unsigned id, unsigned def) {
347     auto it = map.find(id);
348     if (it == map.end())
349         return def;
350     else
351         return it->second;
352 }
353
354 static unsigned get_locations_consumed_by_type(shader_module const *src, unsigned type, bool strip_array_level) {
355     auto insn = src->get_def(type);
356     assert(insn != src->end());
357
358     switch (insn.opcode()) {
359         case spv::OpTypePointer:
360             // See through the ptr -- this is only ever at the toplevel for graphics shaders we're never actually passing
361             // pointers around.
362             return get_locations_consumed_by_type(src, insn.word(3), strip_array_level);
363         case spv::OpTypeArray:
364             if (strip_array_level) {
365                 return get_locations_consumed_by_type(src, insn.word(2), false);
366             } else {
367                 return get_constant_value(src, insn.word(3)) * get_locations_consumed_by_type(src, insn.word(2), false);
368             }
369         case spv::OpTypeMatrix:
370             // Num locations is the dimension * element size
371             return insn.word(3) * get_locations_consumed_by_type(src, insn.word(2), false);
372         case spv::OpTypeVector: {
373             auto scalar_type = src->get_def(insn.word(2));
374             auto bit_width =
375                 (scalar_type.opcode() == spv::OpTypeInt || scalar_type.opcode() == spv::OpTypeFloat) ? scalar_type.word(2) : 32;
376
377             // Locations are 128-bit wide; 3- and 4-component vectors of 64 bit types require two.
378             return (bit_width * insn.word(3) + 127) / 128;
379         }
380         default:
381             // Everything else is just 1.
382             return 1;
383
384             // TODO: extend to handle 64bit scalar types, whose vectors may need multiple locations.
385     }
386 }
387
388 static unsigned get_locations_consumed_by_format(VkFormat format) {
389     switch (format) {
390         case VK_FORMAT_R64G64B64A64_SFLOAT:
391         case VK_FORMAT_R64G64B64A64_SINT:
392         case VK_FORMAT_R64G64B64A64_UINT:
393         case VK_FORMAT_R64G64B64_SFLOAT:
394         case VK_FORMAT_R64G64B64_SINT:
395         case VK_FORMAT_R64G64B64_UINT:
396             return 2;
397         default:
398             return 1;
399     }
400 }
401
402 static unsigned get_format_type(VkFormat fmt) {
403     if (FormatIsSInt(fmt)) return FORMAT_TYPE_SINT;
404     if (FormatIsUInt(fmt)) return FORMAT_TYPE_UINT;
405     if (FormatIsDepthAndStencil(fmt)) return FORMAT_TYPE_FLOAT | FORMAT_TYPE_UINT;
406     if (fmt == VK_FORMAT_UNDEFINED) return 0;
407     // everything else -- UNORM/SNORM/FLOAT/USCALED/SSCALED is all float in the shader.
408     return FORMAT_TYPE_FLOAT;
409 }
410
411 // characterizes a SPIR-V type appearing in an interface to a FF stage, for comparison to a VkFormat's characterization above.
412 static unsigned get_fundamental_type(shader_module const *src, unsigned type) {
413     auto insn = src->get_def(type);
414     assert(insn != src->end());
415
416     switch (insn.opcode()) {
417         case spv::OpTypeInt:
418             return insn.word(3) ? FORMAT_TYPE_SINT : FORMAT_TYPE_UINT;
419         case spv::OpTypeFloat:
420             return FORMAT_TYPE_FLOAT;
421         case spv::OpTypeVector:
422             return get_fundamental_type(src, insn.word(2));
423         case spv::OpTypeMatrix:
424             return get_fundamental_type(src, insn.word(2));
425         case spv::OpTypeArray:
426             return get_fundamental_type(src, insn.word(2));
427         case spv::OpTypePointer:
428             return get_fundamental_type(src, insn.word(3));
429         case spv::OpTypeImage:
430             return get_fundamental_type(src, insn.word(2));
431
432         default:
433             return 0;
434     }
435 }
436
437 static uint32_t get_shader_stage_id(VkShaderStageFlagBits stage) {
438     uint32_t bit_pos = uint32_t(u_ffs(stage));
439     return bit_pos - 1;
440 }
441
442 static spirv_inst_iter get_struct_type(shader_module const *src, spirv_inst_iter def, bool is_array_of_verts) {
443     while (true) {
444         if (def.opcode() == spv::OpTypePointer) {
445             def = src->get_def(def.word(3));
446         } else if (def.opcode() == spv::OpTypeArray && is_array_of_verts) {
447             def = src->get_def(def.word(2));
448             is_array_of_verts = false;
449         } else if (def.opcode() == spv::OpTypeStruct) {
450             return def;
451         } else {
452             return src->end();
453         }
454     }
455 }
456
457 static bool collect_interface_block_members(shader_module const *src, std::map<location_t, interface_var> *out,
458                                             std::unordered_map<unsigned, unsigned> const &blocks, bool is_array_of_verts,
459                                             uint32_t id, uint32_t type_id, bool is_patch, int /*first_location*/) {
460     // Walk down the type_id presented, trying to determine whether it's actually an interface block.
461     auto type = get_struct_type(src, src->get_def(type_id), is_array_of_verts && !is_patch);
462     if (type == src->end() || blocks.find(type.word(1)) == blocks.end()) {
463         // This isn't an interface block.
464         return false;
465     }
466
467     std::unordered_map<unsigned, unsigned> member_components;
468     std::unordered_map<unsigned, unsigned> member_relaxed_precision;
469     std::unordered_map<unsigned, unsigned> member_patch;
470
471     // Walk all the OpMemberDecorate for type's result id -- first pass, collect components.
472     for (auto insn : *src) {
473         if (insn.opcode() == spv::OpMemberDecorate && insn.word(1) == type.word(1)) {
474             unsigned member_index = insn.word(2);
475
476             if (insn.word(3) == spv::DecorationComponent) {
477                 unsigned component = insn.word(4);
478                 member_components[member_index] = component;
479             }
480
481             if (insn.word(3) == spv::DecorationRelaxedPrecision) {
482                 member_relaxed_precision[member_index] = 1;
483             }
484
485             if (insn.word(3) == spv::DecorationPatch) {
486                 member_patch[member_index] = 1;
487             }
488         }
489     }
490
491     // TODO: correctly handle location assignment from outside
492
493     // Second pass -- produce the output, from Location decorations
494     for (auto insn : *src) {
495         if (insn.opcode() == spv::OpMemberDecorate && insn.word(1) == type.word(1)) {
496             unsigned member_index = insn.word(2);
497             unsigned member_type_id = type.word(2 + member_index);
498
499             if (insn.word(3) == spv::DecorationLocation) {
500                 unsigned location = insn.word(4);
501                 unsigned num_locations = get_locations_consumed_by_type(src, member_type_id, false);
502                 auto component_it = member_components.find(member_index);
503                 unsigned component = component_it == member_components.end() ? 0 : component_it->second;
504                 bool is_relaxed_precision = member_relaxed_precision.find(member_index) != member_relaxed_precision.end();
505                 bool member_is_patch = is_patch || member_patch.count(member_index) > 0;
506
507                 for (unsigned int offset = 0; offset < num_locations; offset++) {
508                     interface_var v = {};
509                     v.id = id;
510                     // TODO: member index in interface_var too?
511                     v.type_id = member_type_id;
512                     v.offset = offset;
513                     v.is_patch = member_is_patch;
514                     v.is_block_member = true;
515                     v.is_relaxed_precision = is_relaxed_precision;
516                     (*out)[std::make_pair(location + offset, component)] = v;
517                 }
518             }
519         }
520     }
521
522     return true;
523 }
524
525 static std::map<location_t, interface_var> collect_interface_by_location(shader_module const *src, spirv_inst_iter entrypoint,
526                                                                          spv::StorageClass sinterface, bool is_array_of_verts) {
527     std::unordered_map<unsigned, unsigned> var_locations;
528     std::unordered_map<unsigned, unsigned> var_builtins;
529     std::unordered_map<unsigned, unsigned> var_components;
530     std::unordered_map<unsigned, unsigned> blocks;
531     std::unordered_map<unsigned, unsigned> var_patch;
532     std::unordered_map<unsigned, unsigned> var_relaxed_precision;
533
534     for (auto insn : *src) {
535         // We consider two interface models: SSO rendezvous-by-location, and builtins. Complain about anything that
536         // fits neither model.
537         if (insn.opcode() == spv::OpDecorate) {
538             if (insn.word(2) == spv::DecorationLocation) {
539                 var_locations[insn.word(1)] = insn.word(3);
540             }
541
542             if (insn.word(2) == spv::DecorationBuiltIn) {
543                 var_builtins[insn.word(1)] = insn.word(3);
544             }
545
546             if (insn.word(2) == spv::DecorationComponent) {
547                 var_components[insn.word(1)] = insn.word(3);
548             }
549
550             if (insn.word(2) == spv::DecorationBlock) {
551                 blocks[insn.word(1)] = 1;
552             }
553
554             if (insn.word(2) == spv::DecorationPatch) {
555                 var_patch[insn.word(1)] = 1;
556             }
557
558             if (insn.word(2) == spv::DecorationRelaxedPrecision) {
559                 var_relaxed_precision[insn.word(1)] = 1;
560             }
561         }
562     }
563
564     // TODO: handle grouped decorations
565     // TODO: handle index=1 dual source outputs from FS -- two vars will have the same location, and we DON'T want to clobber.
566
567     // Find the end of the entrypoint's name string. additional zero bytes follow the actual null terminator, to fill out the
568     // rest of the word - so we only need to look at the last byte in the word to determine which word contains the terminator.
569     uint32_t word = 3;
570     while (entrypoint.word(word) & 0xff000000u) {
571         ++word;
572     }
573     ++word;
574
575     std::map<location_t, interface_var> out;
576
577     for (; word < entrypoint.len(); word++) {
578         auto insn = src->get_def(entrypoint.word(word));
579         assert(insn != src->end());
580         assert(insn.opcode() == spv::OpVariable);
581
582         if (insn.word(3) == static_cast<uint32_t>(sinterface)) {
583             unsigned id = insn.word(2);
584             unsigned type = insn.word(1);
585
586             int location = value_or_default(var_locations, id, static_cast<unsigned>(-1));
587             int builtin = value_or_default(var_builtins, id, static_cast<unsigned>(-1));
588             unsigned component = value_or_default(var_components, id, 0);  // Unspecified is OK, is 0
589             bool is_patch = var_patch.find(id) != var_patch.end();
590             bool is_relaxed_precision = var_relaxed_precision.find(id) != var_relaxed_precision.end();
591
592             if (builtin != -1)
593                 continue;
594             else if (!collect_interface_block_members(src, &out, blocks, is_array_of_verts, id, type, is_patch, location)) {
595                 // A user-defined interface variable, with a location. Where a variable occupied multiple locations, emit
596                 // one result for each.
597                 unsigned num_locations = get_locations_consumed_by_type(src, type, is_array_of_verts && !is_patch);
598                 for (unsigned int offset = 0; offset < num_locations; offset++) {
599                     interface_var v = {};
600                     v.id = id;
601                     v.type_id = type;
602                     v.offset = offset;
603                     v.is_patch = is_patch;
604                     v.is_relaxed_precision = is_relaxed_precision;
605                     out[std::make_pair(location + offset, component)] = v;
606                 }
607             }
608         }
609     }
610
611     return out;
612 }
613
614 static std::vector<std::pair<uint32_t, interface_var>> collect_interface_by_input_attachment_index(
615     shader_module const *src, std::unordered_set<uint32_t> const &accessible_ids) {
616     std::vector<std::pair<uint32_t, interface_var>> out;
617
618     for (auto insn : *src) {
619         if (insn.opcode() == spv::OpDecorate) {
620             if (insn.word(2) == spv::DecorationInputAttachmentIndex) {
621                 auto attachment_index = insn.word(3);
622                 auto id = insn.word(1);
623
624                 if (accessible_ids.count(id)) {
625                     auto def = src->get_def(id);
626                     assert(def != src->end());
627
628                     if (def.opcode() == spv::OpVariable && insn.word(3) == spv::StorageClassUniformConstant) {
629                         auto num_locations = get_locations_consumed_by_type(src, def.word(1), false);
630                         for (unsigned int offset = 0; offset < num_locations; offset++) {
631                             interface_var v = {};
632                             v.id = id;
633                             v.type_id = def.word(1);
634                             v.offset = offset;
635                             out.emplace_back(attachment_index + offset, v);
636                         }
637                     }
638                 }
639             }
640         }
641     }
642
643     return out;
644 }
645
646 static bool is_writable_descriptor_type(shader_module const *module, uint32_t type_id) {
647     auto type = module->get_def(type_id);
648
649     // Strip off any array or ptrs. Where we remove array levels, adjust the  descriptor count for each dimension.
650     while (type.opcode() == spv::OpTypeArray || type.opcode() == spv::OpTypePointer) {
651         if (type.opcode() == spv::OpTypeArray) {
652             type = module->get_def(type.word(2));
653         } else {
654             if (type.word(2) == spv::StorageClassStorageBuffer) {
655                 return true;
656             }
657             type = module->get_def(type.word(3));
658         }
659     }
660
661     switch (type.opcode()) {
662         case spv::OpTypeImage: {
663             auto dim = type.word(3);
664             auto sampled = type.word(7);
665             return sampled == 2 && dim != spv::DimSubpassData;
666         }
667
668         case spv::OpTypeStruct:
669             for (auto insn : *module) {
670                 if (insn.opcode() == spv::OpDecorate && insn.word(1) == type.word(1)) {
671                     if (insn.word(2) == spv::DecorationBufferBlock) {
672                         return true;
673                     }
674                 }
675             }
676     }
677
678     return false;
679 }
680
681 static std::vector<std::pair<descriptor_slot_t, interface_var>> collect_interface_by_descriptor_slot(
682     debug_report_data const *report_data, shader_module const *src, std::unordered_set<uint32_t> const &accessible_ids,
683     bool *has_writable_descriptor) {
684     std::unordered_map<unsigned, unsigned> var_sets;
685     std::unordered_map<unsigned, unsigned> var_bindings;
686     std::unordered_map<unsigned, unsigned> var_nonwritable;
687
688     for (auto insn : *src) {
689         // All variables in the Uniform or UniformConstant storage classes are required to be decorated with both
690         // DecorationDescriptorSet and DecorationBinding.
691         if (insn.opcode() == spv::OpDecorate) {
692             if (insn.word(2) == spv::DecorationDescriptorSet) {
693                 var_sets[insn.word(1)] = insn.word(3);
694             }
695
696             if (insn.word(2) == spv::DecorationBinding) {
697                 var_bindings[insn.word(1)] = insn.word(3);
698             }
699
700             if (insn.word(2) == spv::DecorationNonWritable) {
701                 var_nonwritable[insn.word(1)] = 1;
702             }
703         }
704     }
705
706     std::vector<std::pair<descriptor_slot_t, interface_var>> out;
707
708     for (auto id : accessible_ids) {
709         auto insn = src->get_def(id);
710         assert(insn != src->end());
711
712         if (insn.opcode() == spv::OpVariable &&
713             (insn.word(3) == spv::StorageClassUniform || insn.word(3) == spv::StorageClassUniformConstant ||
714              insn.word(3) == spv::StorageClassStorageBuffer)) {
715             unsigned set = value_or_default(var_sets, insn.word(2), 0);
716             unsigned binding = value_or_default(var_bindings, insn.word(2), 0);
717
718             interface_var v = {};
719             v.id = insn.word(2);
720             v.type_id = insn.word(1);
721             out.emplace_back(std::make_pair(set, binding), v);
722
723             if (var_nonwritable.find(id) == var_nonwritable.end() && is_writable_descriptor_type(src, insn.word(1))) {
724                 *has_writable_descriptor = true;
725             }
726         }
727     }
728
729     return out;
730 }
731
732 static bool validate_vi_consistency(debug_report_data const *report_data, VkPipelineVertexInputStateCreateInfo const *vi) {
733     // Walk the binding descriptions, which describe the step rate and stride of each vertex buffer.  Each binding should
734     // be specified only once.
735     std::unordered_map<uint32_t, VkVertexInputBindingDescription const *> bindings;
736     bool skip = false;
737
738     for (unsigned i = 0; i < vi->vertexBindingDescriptionCount; i++) {
739         auto desc = &vi->pVertexBindingDescriptions[i];
740         auto &binding = bindings[desc->binding];
741         if (binding) {
742             // TODO: VALIDATION_ERROR_096005cc perhaps?
743             skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
744                             SHADER_CHECKER_INCONSISTENT_VI, "Duplicate vertex input binding descriptions for binding %d",
745                             desc->binding);
746         } else {
747             binding = desc;
748         }
749     }
750
751     return skip;
752 }
753
754 static bool validate_vi_against_vs_inputs(debug_report_data const *report_data, VkPipelineVertexInputStateCreateInfo const *vi,
755                                           shader_module const *vs, spirv_inst_iter entrypoint) {
756     bool skip = false;
757
758     auto inputs = collect_interface_by_location(vs, entrypoint, spv::StorageClassInput, false);
759
760     // Build index by location
761     std::map<uint32_t, VkVertexInputAttributeDescription const *> attribs;
762     if (vi) {
763         for (unsigned i = 0; i < vi->vertexAttributeDescriptionCount; i++) {
764             auto num_locations = get_locations_consumed_by_format(vi->pVertexAttributeDescriptions[i].format);
765             for (auto j = 0u; j < num_locations; j++) {
766                 attribs[vi->pVertexAttributeDescriptions[i].location + j] = &vi->pVertexAttributeDescriptions[i];
767             }
768         }
769     }
770
771     auto it_a = attribs.begin();
772     auto it_b = inputs.begin();
773     bool used = false;
774
775     while ((attribs.size() > 0 && it_a != attribs.end()) || (inputs.size() > 0 && it_b != inputs.end())) {
776         bool a_at_end = attribs.size() == 0 || it_a == attribs.end();
777         bool b_at_end = inputs.size() == 0 || it_b == inputs.end();
778         auto a_first = a_at_end ? 0 : it_a->first;
779         auto b_first = b_at_end ? 0 : it_b->first.first;
780         if (!a_at_end && (b_at_end || a_first < b_first)) {
781             if (!used && log_msg(report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT,
782                                  0, SHADER_CHECKER_OUTPUT_NOT_CONSUMED,
783                                  "Vertex attribute at location %d not consumed by vertex shader", a_first)) {
784                 skip = true;
785             }
786             used = false;
787             it_a++;
788         } else if (!b_at_end && (a_at_end || b_first < a_first)) {
789             skip |=
790                 log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, 0,
791                         SHADER_CHECKER_INPUT_NOT_PRODUCED, "Vertex shader consumes input at location %d but not provided", b_first);
792             it_b++;
793         } else {
794             unsigned attrib_type = get_format_type(it_a->second->format);
795             unsigned input_type = get_fundamental_type(vs, it_b->second.type_id);
796
797             // Type checking
798             if (!(attrib_type & input_type)) {
799                 skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
800                                 SHADER_CHECKER_INTERFACE_TYPE_MISMATCH,
801                                 "Attribute type of `%s` at location %d does not match vertex shader input type of `%s`",
802                                 string_VkFormat(it_a->second->format), a_first, describe_type(vs, it_b->second.type_id).c_str());
803             }
804
805             // OK!
806             used = true;
807             it_b++;
808         }
809     }
810
811     return skip;
812 }
813
814 static bool validate_fs_outputs_against_render_pass(debug_report_data const *report_data, shader_module const *fs,
815                                                     spirv_inst_iter entrypoint, PIPELINE_STATE const *pipeline,
816                                                     uint32_t subpass_index) {
817     auto rpci = pipeline->rp_state->createInfo.ptr();
818
819     std::map<uint32_t, VkFormat> color_attachments;
820     auto subpass = rpci->pSubpasses[subpass_index];
821     for (auto i = 0u; i < subpass.colorAttachmentCount; ++i) {
822         uint32_t attachment = subpass.pColorAttachments[i].attachment;
823         if (attachment == VK_ATTACHMENT_UNUSED) continue;
824         if (rpci->pAttachments[attachment].format != VK_FORMAT_UNDEFINED) {
825             color_attachments[i] = rpci->pAttachments[attachment].format;
826         }
827     }
828
829     bool skip = false;
830
831     // TODO: dual source blend index (spv::DecIndex, zero if not provided)
832
833     auto outputs = collect_interface_by_location(fs, entrypoint, spv::StorageClassOutput, false);
834
835     auto it_a = outputs.begin();
836     auto it_b = color_attachments.begin();
837
838     // Walk attachment list and outputs together
839
840     while ((outputs.size() > 0 && it_a != outputs.end()) || (color_attachments.size() > 0 && it_b != color_attachments.end())) {
841         bool a_at_end = outputs.size() == 0 || it_a == outputs.end();
842         bool b_at_end = color_attachments.size() == 0 || it_b == color_attachments.end();
843
844         if (!a_at_end && (b_at_end || it_a->first.first < it_b->first)) {
845             skip |= log_msg(report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
846                             SHADER_CHECKER_OUTPUT_NOT_CONSUMED,
847                             "fragment shader writes to output location %d with no matching attachment", it_a->first.first);
848             it_a++;
849         } else if (!b_at_end && (a_at_end || it_a->first.first > it_b->first)) {
850             // Only complain if there are unmasked channels for this attachment. If the writemask is 0, it's acceptable for the
851             // shader to not produce a matching output.
852             if (pipeline->attachments[it_b->first].colorWriteMask != 0) {
853                 skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
854                                 SHADER_CHECKER_INPUT_NOT_PRODUCED, "Attachment %d not written by fragment shader", it_b->first);
855             }
856             it_b++;
857         } else {
858             unsigned output_type = get_fundamental_type(fs, it_a->second.type_id);
859             unsigned att_type = get_format_type(it_b->second);
860
861             // Type checking
862             if (!(output_type & att_type)) {
863                 skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
864                                 SHADER_CHECKER_INTERFACE_TYPE_MISMATCH,
865                                 "Attachment %d of type `%s` does not match fragment shader output type of `%s`", it_b->first,
866                                 string_VkFormat(it_b->second), describe_type(fs, it_a->second.type_id).c_str());
867             }
868
869             // OK!
870             it_a++;
871             it_b++;
872         }
873     }
874
875     return skip;
876 }
877
878 // For some analyses, we need to know about all ids referenced by the static call tree of a particular entrypoint. This is
879 // important for identifying the set of shader resources actually used by an entrypoint, for example.
880 // Note: we only explore parts of the image which might actually contain ids we care about for the above analyses.
881 //  - NOT the shader input/output interfaces.
882 //
883 // TODO: The set of interesting opcodes here was determined by eyeballing the SPIRV spec. It might be worth
884 // converting parts of this to be generated from the machine-readable spec instead.
885 static std::unordered_set<uint32_t> mark_accessible_ids(shader_module const *src, spirv_inst_iter entrypoint) {
886     std::unordered_set<uint32_t> ids;
887     std::unordered_set<uint32_t> worklist;
888     worklist.insert(entrypoint.word(2));
889
890     while (!worklist.empty()) {
891         auto id_iter = worklist.begin();
892         auto id = *id_iter;
893         worklist.erase(id_iter);
894
895         auto insn = src->get_def(id);
896         if (insn == src->end()) {
897             // ID is something we didn't collect in build_def_index. that's OK -- we'll stumble across all kinds of things here
898             // that we may not care about.
899             continue;
900         }
901
902         // Try to add to the output set
903         if (!ids.insert(id).second) {
904             continue;  // If we already saw this id, we don't want to walk it again.
905         }
906
907         switch (insn.opcode()) {
908             case spv::OpFunction:
909                 // Scan whole body of the function, enlisting anything interesting
910                 while (++insn, insn.opcode() != spv::OpFunctionEnd) {
911                     switch (insn.opcode()) {
912                         case spv::OpLoad:
913                         case spv::OpAtomicLoad:
914                         case spv::OpAtomicExchange:
915                         case spv::OpAtomicCompareExchange:
916                         case spv::OpAtomicCompareExchangeWeak:
917                         case spv::OpAtomicIIncrement:
918                         case spv::OpAtomicIDecrement:
919                         case spv::OpAtomicIAdd:
920                         case spv::OpAtomicISub:
921                         case spv::OpAtomicSMin:
922                         case spv::OpAtomicUMin:
923                         case spv::OpAtomicSMax:
924                         case spv::OpAtomicUMax:
925                         case spv::OpAtomicAnd:
926                         case spv::OpAtomicOr:
927                         case spv::OpAtomicXor:
928                             worklist.insert(insn.word(3));  // ptr
929                             break;
930                         case spv::OpStore:
931                         case spv::OpAtomicStore:
932                             worklist.insert(insn.word(1));  // ptr
933                             break;
934                         case spv::OpAccessChain:
935                         case spv::OpInBoundsAccessChain:
936                             worklist.insert(insn.word(3));  // base ptr
937                             break;
938                         case spv::OpSampledImage:
939                         case spv::OpImageSampleImplicitLod:
940                         case spv::OpImageSampleExplicitLod:
941                         case spv::OpImageSampleDrefImplicitLod:
942                         case spv::OpImageSampleDrefExplicitLod:
943                         case spv::OpImageSampleProjImplicitLod:
944                         case spv::OpImageSampleProjExplicitLod:
945                         case spv::OpImageSampleProjDrefImplicitLod:
946                         case spv::OpImageSampleProjDrefExplicitLod:
947                         case spv::OpImageFetch:
948                         case spv::OpImageGather:
949                         case spv::OpImageDrefGather:
950                         case spv::OpImageRead:
951                         case spv::OpImage:
952                         case spv::OpImageQueryFormat:
953                         case spv::OpImageQueryOrder:
954                         case spv::OpImageQuerySizeLod:
955                         case spv::OpImageQuerySize:
956                         case spv::OpImageQueryLod:
957                         case spv::OpImageQueryLevels:
958                         case spv::OpImageQuerySamples:
959                         case spv::OpImageSparseSampleImplicitLod:
960                         case spv::OpImageSparseSampleExplicitLod:
961                         case spv::OpImageSparseSampleDrefImplicitLod:
962                         case spv::OpImageSparseSampleDrefExplicitLod:
963                         case spv::OpImageSparseSampleProjImplicitLod:
964                         case spv::OpImageSparseSampleProjExplicitLod:
965                         case spv::OpImageSparseSampleProjDrefImplicitLod:
966                         case spv::OpImageSparseSampleProjDrefExplicitLod:
967                         case spv::OpImageSparseFetch:
968                         case spv::OpImageSparseGather:
969                         case spv::OpImageSparseDrefGather:
970                         case spv::OpImageTexelPointer:
971                             worklist.insert(insn.word(3));  // Image or sampled image
972                             break;
973                         case spv::OpImageWrite:
974                             worklist.insert(insn.word(1));  // Image -- different operand order to above
975                             break;
976                         case spv::OpFunctionCall:
977                             for (uint32_t i = 3; i < insn.len(); i++) {
978                                 worklist.insert(insn.word(i));  // fn itself, and all args
979                             }
980                             break;
981
982                         case spv::OpExtInst:
983                             for (uint32_t i = 5; i < insn.len(); i++) {
984                                 worklist.insert(insn.word(i));  // Operands to ext inst
985                             }
986                             break;
987                     }
988                 }
989                 break;
990         }
991     }
992
993     return ids;
994 }
995
996 static bool validate_push_constant_block_against_pipeline(debug_report_data const *report_data,
997                                                           std::vector<VkPushConstantRange> const *push_constant_ranges,
998                                                           shader_module const *src, spirv_inst_iter type,
999                                                           VkShaderStageFlagBits stage) {
1000     bool skip = false;
1001
1002     // Strip off ptrs etc
1003     type = get_struct_type(src, type, false);
1004     assert(type != src->end());
1005
1006     // Validate directly off the offsets. this isn't quite correct for arrays and matrices, but is a good first step.
1007     // TODO: arrays, matrices, weird sizes
1008     for (auto insn : *src) {
1009         if (insn.opcode() == spv::OpMemberDecorate && insn.word(1) == type.word(1)) {
1010             if (insn.word(3) == spv::DecorationOffset) {
1011                 unsigned offset = insn.word(4);
1012                 auto size = 4;  // Bytes; TODO: calculate this based on the type
1013
1014                 bool found_range = false;
1015                 for (auto const &range : *push_constant_ranges) {
1016                     if (range.offset <= offset && range.offset + range.size >= offset + size) {
1017                         found_range = true;
1018
1019                         if ((range.stageFlags & stage) == 0) {
1020                             skip |=
1021                                 log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
1022                                         SHADER_CHECKER_PUSH_CONSTANT_NOT_ACCESSIBLE_FROM_STAGE,
1023                                         "Push constant range covering variable starting at offset %u not accessible from stage %s",
1024                                         offset, string_VkShaderStageFlagBits(stage));
1025                         }
1026
1027                         break;
1028                     }
1029                 }
1030
1031                 if (!found_range) {
1032                     skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
1033                                     SHADER_CHECKER_PUSH_CONSTANT_OUT_OF_RANGE,
1034                                     "Push constant range covering variable starting at offset %u not declared in layout", offset);
1035                 }
1036             }
1037         }
1038     }
1039
1040     return skip;
1041 }
1042
1043 static bool validate_push_constant_usage(debug_report_data const *report_data,
1044                                          std::vector<VkPushConstantRange> const *push_constant_ranges, shader_module const *src,
1045                                          std::unordered_set<uint32_t> accessible_ids, VkShaderStageFlagBits stage) {
1046     bool skip = false;
1047
1048     for (auto id : accessible_ids) {
1049         auto def_insn = src->get_def(id);
1050         if (def_insn.opcode() == spv::OpVariable && def_insn.word(3) == spv::StorageClassPushConstant) {
1051             skip |= validate_push_constant_block_against_pipeline(report_data, push_constant_ranges, src,
1052                                                                   src->get_def(def_insn.word(1)), stage);
1053         }
1054     }
1055
1056     return skip;
1057 }
1058
1059 // Validate that data for each specialization entry is fully contained within the buffer.
1060 static bool validate_specialization_offsets(debug_report_data const *report_data, VkPipelineShaderStageCreateInfo const *info) {
1061     bool skip = false;
1062
1063     VkSpecializationInfo const *spec = info->pSpecializationInfo;
1064
1065     if (spec) {
1066         for (auto i = 0u; i < spec->mapEntryCount; i++) {
1067             // TODO: This is a good place for VALIDATION_ERROR_1360060a.
1068             if (spec->pMapEntries[i].offset + spec->pMapEntries[i].size > spec->dataSize) {
1069                 skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, 0,
1070                                 VALIDATION_ERROR_1360060c,
1071                                 "Specialization entry %u (for constant id %u) references memory outside provided specialization "
1072                                 "data (bytes %u.." PRINTF_SIZE_T_SPECIFIER "; " PRINTF_SIZE_T_SPECIFIER " bytes provided)..",
1073                                 i, spec->pMapEntries[i].constantID, spec->pMapEntries[i].offset,
1074                                 spec->pMapEntries[i].offset + spec->pMapEntries[i].size - 1, spec->dataSize);
1075             }
1076         }
1077     }
1078
1079     return skip;
1080 }
1081
1082 static bool descriptor_type_match(shader_module const *module, uint32_t type_id, VkDescriptorType descriptor_type,
1083                                   unsigned &descriptor_count) {
1084     auto type = module->get_def(type_id);
1085     bool is_storage_buffer = false;
1086     descriptor_count = 1;
1087
1088     // Strip off any array or ptrs. Where we remove array levels, adjust the  descriptor count for each dimension.
1089     while (type.opcode() == spv::OpTypeArray || type.opcode() == spv::OpTypePointer || type.opcode() == spv::OpTypeRuntimeArray) {
1090         if (type.opcode() == spv::OpTypeRuntimeArray) {
1091             descriptor_count = 0;
1092             type = module->get_def(type.word(2));
1093         } else if (type.opcode() == spv::OpTypeArray) {
1094             descriptor_count *= get_constant_value(module, type.word(3));
1095             type = module->get_def(type.word(2));
1096         } else {
1097             if (type.word(2) == spv::StorageClassStorageBuffer) {
1098                 is_storage_buffer = true;
1099             }
1100             type = module->get_def(type.word(3));
1101         }
1102     }
1103
1104     switch (type.opcode()) {
1105         case spv::OpTypeStruct: {
1106             for (auto insn : *module) {
1107                 if (insn.opcode() == spv::OpDecorate && insn.word(1) == type.word(1)) {
1108                     if (insn.word(2) == spv::DecorationBlock) {
1109                         if (is_storage_buffer) {
1110                             return descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER ||
1111                                    descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC;
1112                         } else {
1113                             return descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER ||
1114                                    descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC;
1115                         }
1116                     } else if (insn.word(2) == spv::DecorationBufferBlock) {
1117                         return descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER ||
1118                                descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC;
1119                     }
1120                 }
1121             }
1122
1123             // Invalid
1124             return false;
1125         }
1126
1127         case spv::OpTypeSampler:
1128             return descriptor_type == VK_DESCRIPTOR_TYPE_SAMPLER || descriptor_type == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
1129
1130         case spv::OpTypeSampledImage:
1131             if (descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER) {
1132                 // Slight relaxation for some GLSL historical madness: samplerBuffer doesn't really have a sampler, and a texel
1133                 // buffer descriptor doesn't really provide one. Allow this slight mismatch.
1134                 auto image_type = module->get_def(type.word(2));
1135                 auto dim = image_type.word(3);
1136                 auto sampled = image_type.word(7);
1137                 return dim == spv::DimBuffer && sampled == 1;
1138             }
1139             return descriptor_type == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
1140
1141         case spv::OpTypeImage: {
1142             // Many descriptor types backing image types-- depends on dimension and whether the image will be used with a sampler.
1143             // SPIRV for Vulkan requires that sampled be 1 or 2 -- leaving the decision to runtime is unacceptable.
1144             auto dim = type.word(3);
1145             auto sampled = type.word(7);
1146
1147             if (dim == spv::DimSubpassData) {
1148                 return descriptor_type == VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT;
1149             } else if (dim == spv::DimBuffer) {
1150                 if (sampled == 1) {
1151                     return descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER;
1152                 } else {
1153                     return descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER;
1154                 }
1155             } else if (sampled == 1) {
1156                 return descriptor_type == VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE ||
1157                        descriptor_type == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
1158             } else {
1159                 return descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE;
1160             }
1161         }
1162
1163             // We shouldn't really see any other junk types -- but if we do, they're a mismatch.
1164         default:
1165             return false;  // Mismatch
1166     }
1167 }
1168
1169 static bool require_feature(debug_report_data const *report_data, VkBool32 feature, char const *feature_name) {
1170     if (!feature) {
1171         if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
1172                     SHADER_CHECKER_FEATURE_NOT_ENABLED, "Shader requires %s but is not enabled on the device", feature_name)) {
1173             return true;
1174         }
1175     }
1176
1177     return false;
1178 }
1179
1180 static bool require_extension(debug_report_data const *report_data, bool extension, char const *extension_name) {
1181     if (!extension) {
1182         if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
1183                     SHADER_CHECKER_FEATURE_NOT_ENABLED, "Shader requires extension %s but is not enabled on the device",
1184                     extension_name)) {
1185             return true;
1186         }
1187     }
1188
1189     return false;
1190 }
1191
1192 static bool validate_shader_capabilities(layer_data *dev_data, shader_module const *src, VkShaderStageFlagBits stage,
1193                                          bool has_writable_descriptor) {
1194     bool skip = false;
1195
1196     auto report_data = GetReportData(dev_data);
1197     auto const &enabledFeatures = GetEnabledFeatures(dev_data);
1198     auto const &extensions = GetEnabledExtensions(dev_data);
1199     auto const &descriptorIndexingFeatures = GetEnabledDescriptorIndexingFeatures(dev_data);
1200
1201     struct CapabilityInfo {
1202         char const *name;
1203         VkBool32 const *feature;
1204         bool const *extension;
1205     };
1206
1207     // clang-format off
1208     static const std::unordered_multimap<uint32_t, CapabilityInfo> capabilities = {
1209         // Capabilities always supported by a Vulkan 1.0 implementation -- no
1210         // feature bits.
1211         {spv::CapabilityMatrix, {nullptr}},
1212         {spv::CapabilityShader, {nullptr}},
1213         {spv::CapabilityInputAttachment, {nullptr}},
1214         {spv::CapabilitySampled1D, {nullptr}},
1215         {spv::CapabilityImage1D, {nullptr}},
1216         {spv::CapabilitySampledBuffer, {nullptr}},
1217         {spv::CapabilityImageQuery, {nullptr}},
1218         {spv::CapabilityDerivativeControl, {nullptr}},
1219
1220         // Capabilities that are optionally supported, but require a feature to
1221         // be enabled on the device
1222         {spv::CapabilityGeometry, {"VkPhysicalDeviceFeatures::geometryShader", &enabledFeatures->geometryShader}},
1223         {spv::CapabilityTessellation, {"VkPhysicalDeviceFeatures::tessellationShader", &enabledFeatures->tessellationShader}},
1224         {spv::CapabilityFloat64, {"VkPhysicalDeviceFeatures::shaderFloat64", &enabledFeatures->shaderFloat64}},
1225         {spv::CapabilityInt64, {"VkPhysicalDeviceFeatures::shaderInt64", &enabledFeatures->shaderInt64}},
1226         {spv::CapabilityTessellationPointSize, {"VkPhysicalDeviceFeatures::shaderTessellationAndGeometryPointSize", &enabledFeatures->shaderTessellationAndGeometryPointSize}},
1227         {spv::CapabilityGeometryPointSize, {"VkPhysicalDeviceFeatures::shaderTessellationAndGeometryPointSize", &enabledFeatures->shaderTessellationAndGeometryPointSize}},
1228         {spv::CapabilityImageGatherExtended, {"VkPhysicalDeviceFeatures::shaderImageGatherExtended", &enabledFeatures->shaderImageGatherExtended}},
1229         {spv::CapabilityStorageImageMultisample, {"VkPhysicalDeviceFeatures::shaderStorageImageMultisample", &enabledFeatures->shaderStorageImageMultisample}},
1230         {spv::CapabilityUniformBufferArrayDynamicIndexing, {"VkPhysicalDeviceFeatures::shaderUniformBufferArrayDynamicIndexing", &enabledFeatures->shaderUniformBufferArrayDynamicIndexing}},
1231         {spv::CapabilitySampledImageArrayDynamicIndexing, {"VkPhysicalDeviceFeatures::shaderSampledImageArrayDynamicIndexing", &enabledFeatures->shaderSampledImageArrayDynamicIndexing}},
1232         {spv::CapabilityStorageBufferArrayDynamicIndexing, {"VkPhysicalDeviceFeatures::shaderStorageBufferArrayDynamicIndexing", &enabledFeatures->shaderStorageBufferArrayDynamicIndexing}},
1233         {spv::CapabilityStorageImageArrayDynamicIndexing, {"VkPhysicalDeviceFeatures::shaderStorageImageArrayDynamicIndexing", &enabledFeatures->shaderStorageBufferArrayDynamicIndexing}},
1234         {spv::CapabilityClipDistance, {"VkPhysicalDeviceFeatures::shaderClipDistance", &enabledFeatures->shaderClipDistance}},
1235         {spv::CapabilityCullDistance, {"VkPhysicalDeviceFeatures::shaderCullDistance", &enabledFeatures->shaderCullDistance}},
1236         {spv::CapabilityImageCubeArray, {"VkPhysicalDeviceFeatures::imageCubeArray", &enabledFeatures->imageCubeArray}},
1237         {spv::CapabilitySampleRateShading, {"VkPhysicalDeviceFeatures::sampleRateShading", &enabledFeatures->sampleRateShading}},
1238         {spv::CapabilitySparseResidency, {"VkPhysicalDeviceFeatures::shaderResourceResidency", &enabledFeatures->shaderResourceResidency}},
1239         {spv::CapabilityMinLod, {"VkPhysicalDeviceFeatures::shaderResourceMinLod", &enabledFeatures->shaderResourceMinLod}},
1240         {spv::CapabilitySampledCubeArray, {"VkPhysicalDeviceFeatures::imageCubeArray", &enabledFeatures->imageCubeArray}},
1241         {spv::CapabilityImageMSArray, {"VkPhysicalDeviceFeatures::shaderStorageImageMultisample", &enabledFeatures->shaderStorageImageMultisample}},
1242         {spv::CapabilityStorageImageExtendedFormats, {"VkPhysicalDeviceFeatures::shaderStorageImageExtendedFormats", &enabledFeatures->shaderStorageImageExtendedFormats}},
1243         {spv::CapabilityInterpolationFunction, {"VkPhysicalDeviceFeatures::sampleRateShading", &enabledFeatures->sampleRateShading}},
1244         {spv::CapabilityStorageImageReadWithoutFormat, {"VkPhysicalDeviceFeatures::shaderStorageImageReadWithoutFormat", &enabledFeatures->shaderStorageImageReadWithoutFormat}},
1245         {spv::CapabilityStorageImageWriteWithoutFormat, {"VkPhysicalDeviceFeatures::shaderStorageImageWriteWithoutFormat", &enabledFeatures->shaderStorageImageWriteWithoutFormat}},
1246         {spv::CapabilityMultiViewport, {"VkPhysicalDeviceFeatures::multiViewport", &enabledFeatures->multiViewport}},
1247
1248         // XXX TODO: Descriptor indexing capability enums are not yet available in the spirv-tools we fetch.
1249 #define CapabilityShaderNonUniformEXT 5301
1250 #define CapabilityRuntimeDescriptorArrayEXT 5302
1251 #define CapabilityInputAttachmentArrayDynamicIndexingEXT 5303
1252 #define CapabilityUniformTexelBufferArrayDynamicIndexingEXT 5304
1253 #define CapabilityStorageTexelBufferArrayDynamicIndexingEXT 5305
1254 #define CapabilityUniformBufferArrayNonUniformIndexingEXT 5306
1255 #define CapabilitySampledImageArrayNonUniformIndexingEXT 5307
1256 #define CapabilityStorageBufferArrayNonUniformIndexingEXT 5308
1257 #define CapabilityStorageImageArrayNonUniformIndexingEXT 5309
1258 #define CapabilityInputAttachmentArrayNonUniformIndexingEXT 5310
1259 #define CapabilityUniformTexelBufferArrayNonUniformIndexingEXT 5311
1260 #define CapabilityStorageTexelBufferArrayNonUniformIndexingEXT 5312
1261         {CapabilityShaderNonUniformEXT, {VK_EXT_DESCRIPTOR_INDEXING_EXTENSION_NAME, nullptr, &extensions->vk_ext_descriptor_indexing}},
1262         {CapabilityRuntimeDescriptorArrayEXT, {"VkPhysicalDeviceDescriptorIndexingFeaturesEXT::runtimeDescriptorArray", &descriptorIndexingFeatures->runtimeDescriptorArray}},
1263         {CapabilityInputAttachmentArrayDynamicIndexingEXT, {"VkPhysicalDeviceDescriptorIndexingFeaturesEXT::shaderInputAttachmentArrayDynamicIndexing", &descriptorIndexingFeatures->shaderInputAttachmentArrayDynamicIndexing}},
1264         {CapabilityUniformTexelBufferArrayDynamicIndexingEXT, {"VkPhysicalDeviceDescriptorIndexingFeaturesEXT::shaderUniformTexelBufferArrayDynamicIndexing", &descriptorIndexingFeatures->shaderUniformTexelBufferArrayDynamicIndexing}},
1265         {CapabilityStorageTexelBufferArrayDynamicIndexingEXT, {"VkPhysicalDeviceDescriptorIndexingFeaturesEXT::shaderStorageTexelBufferArrayDynamicIndexing", &descriptorIndexingFeatures->shaderStorageTexelBufferArrayDynamicIndexing}},
1266         {CapabilityUniformBufferArrayNonUniformIndexingEXT, {"VkPhysicalDeviceDescriptorIndexingFeaturesEXT::shaderUniformBufferArrayNonUniformIndexing", &descriptorIndexingFeatures->shaderUniformBufferArrayNonUniformIndexing}},
1267         {CapabilitySampledImageArrayNonUniformIndexingEXT, {"VkPhysicalDeviceDescriptorIndexingFeaturesEXT::shaderSampledImageArrayNonUniformIndexing", &descriptorIndexingFeatures->shaderSampledImageArrayNonUniformIndexing}},
1268         {CapabilityStorageBufferArrayNonUniformIndexingEXT, {"VkPhysicalDeviceDescriptorIndexingFeaturesEXT::shaderStorageBufferArrayNonUniformIndexing", &descriptorIndexingFeatures->shaderStorageBufferArrayNonUniformIndexing}},
1269         {CapabilityStorageImageArrayNonUniformIndexingEXT, {"VkPhysicalDeviceDescriptorIndexingFeaturesEXT::shaderStorageImageArrayNonUniformIndexing", &descriptorIndexingFeatures->shaderStorageImageArrayNonUniformIndexing}},
1270         {CapabilityInputAttachmentArrayNonUniformIndexingEXT, {"VkPhysicalDeviceDescriptorIndexingFeaturesEXT::shaderInputAttachmentArrayNonUniformIndexing", &descriptorIndexingFeatures->shaderInputAttachmentArrayNonUniformIndexing}},
1271         {CapabilityUniformTexelBufferArrayNonUniformIndexingEXT, {"VkPhysicalDeviceDescriptorIndexingFeaturesEXT::shaderUniformTexelBufferArrayNonUniformIndexing", &descriptorIndexingFeatures->shaderUniformTexelBufferArrayNonUniformIndexing}},
1272         {CapabilityStorageTexelBufferArrayNonUniformIndexingEXT , {"VkPhysicalDeviceDescriptorIndexingFeaturesEXT::shaderStorageTexelBufferArrayNonUniformIndexing", &descriptorIndexingFeatures->shaderStorageTexelBufferArrayNonUniformIndexing}},
1273
1274         // Capabilities that require an extension
1275         {spv::CapabilityDrawParameters, {VK_KHR_SHADER_DRAW_PARAMETERS_EXTENSION_NAME, nullptr, &extensions->vk_khr_shader_draw_parameters}},
1276         {spv::CapabilityGeometryShaderPassthroughNV, {VK_NV_GEOMETRY_SHADER_PASSTHROUGH_EXTENSION_NAME, nullptr, &extensions->vk_nv_geometry_shader_passthrough}},
1277         {spv::CapabilitySampleMaskOverrideCoverageNV, {VK_NV_SAMPLE_MASK_OVERRIDE_COVERAGE_EXTENSION_NAME, nullptr, &extensions->vk_nv_sample_mask_override_coverage}},
1278         {spv::CapabilityShaderViewportIndexLayerEXT, {VK_EXT_SHADER_VIEWPORT_INDEX_LAYER_EXTENSION_NAME, nullptr, &extensions->vk_ext_shader_viewport_index_layer}},
1279         {spv::CapabilityShaderViewportIndexLayerNV, {VK_NV_VIEWPORT_ARRAY2_EXTENSION_NAME, nullptr, &extensions->vk_nv_viewport_array2}},
1280         {spv::CapabilityShaderViewportMaskNV, {VK_NV_VIEWPORT_ARRAY2_EXTENSION_NAME, nullptr, &extensions->vk_nv_viewport_array2}},
1281         {spv::CapabilitySubgroupBallotKHR, {VK_EXT_SHADER_SUBGROUP_BALLOT_EXTENSION_NAME, nullptr, &extensions->vk_ext_shader_subgroup_ballot }},
1282         {spv::CapabilitySubgroupVoteKHR, {VK_EXT_SHADER_SUBGROUP_VOTE_EXTENSION_NAME, nullptr, &extensions->vk_ext_shader_subgroup_vote }},
1283     };
1284     // clang-format on
1285
1286     for (auto insn : *src) {
1287         if (insn.opcode() == spv::OpCapability) {
1288             size_t n = capabilities.count(insn.word(1));
1289             if (1 == n) {  // key occurs exactly once
1290                 auto it = capabilities.find(insn.word(1));
1291                 if (it != capabilities.end()) {
1292                     if (it->second.feature) {
1293                         skip |= require_feature(report_data, *(it->second.feature), it->second.name);
1294                     }
1295                     if (it->second.extension) {
1296                         skip |= require_extension(report_data, *(it->second.extension), it->second.name);
1297                     }
1298                 }
1299             } else if (1 < n) {  // key occurs multiple times, at least one must be enabled
1300                 bool needs_feature = false, has_feature = false;
1301                 bool needs_ext = false, has_ext = false;
1302                 std::string feature_names = "(one of) [ ";
1303                 std::string extension_names = feature_names;
1304                 auto caps = capabilities.equal_range(insn.word(1));
1305                 for (auto it = caps.first; it != caps.second; ++it) {
1306                     if (it->second.feature) {
1307                         needs_feature = true;
1308                         has_feature = has_feature || *(it->second.feature);
1309                         feature_names += it->second.name;
1310                         feature_names += " ";
1311                     }
1312                     if (it->second.extension) {
1313                         needs_ext = true;
1314                         has_ext = has_ext || *(it->second.extension);
1315                         extension_names += it->second.name;
1316                         extension_names += " ";
1317                     }
1318                 }
1319                 if (needs_feature) {
1320                     feature_names += "]";
1321                     skip |= require_feature(report_data, has_feature, feature_names.c_str());
1322                 }
1323                 if (needs_ext) {
1324                     extension_names += "]";
1325                     skip |= require_extension(report_data, has_ext, extension_names.c_str());
1326                 }
1327             }
1328         }
1329     }
1330
1331     if (has_writable_descriptor) {
1332         switch (stage) {
1333             case VK_SHADER_STAGE_COMPUTE_BIT:
1334                 /* No feature requirements for writes and atomics from compute
1335                  * stage */
1336                 break;
1337             case VK_SHADER_STAGE_FRAGMENT_BIT:
1338                 skip |= require_feature(report_data, enabledFeatures->fragmentStoresAndAtomics, "fragmentStoresAndAtomics");
1339                 break;
1340             default:
1341                 skip |=
1342                     require_feature(report_data, enabledFeatures->vertexPipelineStoresAndAtomics, "vertexPipelineStoresAndAtomics");
1343                 break;
1344         }
1345     }
1346
1347     return skip;
1348 }
1349
1350 static uint32_t descriptor_type_to_reqs(shader_module const *module, uint32_t type_id) {
1351     auto type = module->get_def(type_id);
1352
1353     while (true) {
1354         switch (type.opcode()) {
1355             case spv::OpTypeArray:
1356             case spv::OpTypeSampledImage:
1357                 type = module->get_def(type.word(2));
1358                 break;
1359             case spv::OpTypePointer:
1360                 type = module->get_def(type.word(3));
1361                 break;
1362             case spv::OpTypeImage: {
1363                 auto dim = type.word(3);
1364                 auto arrayed = type.word(5);
1365                 auto msaa = type.word(6);
1366
1367                 switch (dim) {
1368                     case spv::Dim1D:
1369                         return arrayed ? DESCRIPTOR_REQ_VIEW_TYPE_1D_ARRAY : DESCRIPTOR_REQ_VIEW_TYPE_1D;
1370                     case spv::Dim2D:
1371                         return (msaa ? DESCRIPTOR_REQ_MULTI_SAMPLE : DESCRIPTOR_REQ_SINGLE_SAMPLE) |
1372                                (arrayed ? DESCRIPTOR_REQ_VIEW_TYPE_2D_ARRAY : DESCRIPTOR_REQ_VIEW_TYPE_2D);
1373                     case spv::Dim3D:
1374                         return DESCRIPTOR_REQ_VIEW_TYPE_3D;
1375                     case spv::DimCube:
1376                         return arrayed ? DESCRIPTOR_REQ_VIEW_TYPE_CUBE_ARRAY : DESCRIPTOR_REQ_VIEW_TYPE_CUBE;
1377                     case spv::DimSubpassData:
1378                         return msaa ? DESCRIPTOR_REQ_MULTI_SAMPLE : DESCRIPTOR_REQ_SINGLE_SAMPLE;
1379                     default:  // buffer, etc.
1380                         return 0;
1381                 }
1382             }
1383             default:
1384                 return 0;
1385         }
1386     }
1387 }
1388
1389 // For given pipelineLayout verify that the set_layout_node at slot.first
1390 //  has the requested binding at slot.second and return ptr to that binding
1391 static VkDescriptorSetLayoutBinding const *get_descriptor_binding(PIPELINE_LAYOUT_NODE const *pipelineLayout,
1392                                                                   descriptor_slot_t slot) {
1393     if (!pipelineLayout) return nullptr;
1394
1395     if (slot.first >= pipelineLayout->set_layouts.size()) return nullptr;
1396
1397     return pipelineLayout->set_layouts[slot.first]->GetDescriptorSetLayoutBindingPtrFromBinding(slot.second);
1398 }
1399
1400 static void process_execution_modes(shader_module const *src, spirv_inst_iter entrypoint, PIPELINE_STATE *pipeline) {
1401     auto entrypoint_id = entrypoint.word(1);
1402     bool is_point_mode = false;
1403
1404     for (auto insn : *src) {
1405         if (insn.opcode() == spv::OpExecutionMode && insn.word(1) == entrypoint_id) {
1406             switch (insn.word(2)) {
1407                 case spv::ExecutionModePointMode:
1408                     // In tessellation shaders, PointMode is separate and trumps the tessellation topology.
1409                     is_point_mode = true;
1410                     break;
1411
1412                 case spv::ExecutionModeOutputPoints:
1413                     pipeline->topology_at_rasterizer = VK_PRIMITIVE_TOPOLOGY_POINT_LIST;
1414                     break;
1415
1416                 case spv::ExecutionModeIsolines:
1417                 case spv::ExecutionModeOutputLineStrip:
1418                     pipeline->topology_at_rasterizer = VK_PRIMITIVE_TOPOLOGY_LINE_STRIP;
1419                     break;
1420
1421                 case spv::ExecutionModeTriangles:
1422                 case spv::ExecutionModeQuads:
1423                 case spv::ExecutionModeOutputTriangleStrip:
1424                     pipeline->topology_at_rasterizer = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP;
1425                     break;
1426             }
1427         }
1428     }
1429
1430     if (is_point_mode) pipeline->topology_at_rasterizer = VK_PRIMITIVE_TOPOLOGY_POINT_LIST;
1431 }
1432
1433 static bool validate_pipeline_shader_stage(layer_data *dev_data, VkPipelineShaderStageCreateInfo const *pStage,
1434                                            PIPELINE_STATE *pipeline, shader_module const **out_module,
1435                                            spirv_inst_iter *out_entrypoint) {
1436     bool skip = false;
1437     auto module = *out_module = GetShaderModuleState(dev_data, pStage->module);
1438     auto report_data = GetReportData(dev_data);
1439
1440     if (!module->has_valid_spirv) return false;
1441
1442     // Find the entrypoint
1443     auto entrypoint = *out_entrypoint = find_entrypoint(module, pStage->pName, pStage->stage);
1444     if (entrypoint == module->end()) {
1445         if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
1446                     VALIDATION_ERROR_10600586, "No entrypoint found named `%s` for stage %s..", pStage->pName,
1447                     string_VkShaderStageFlagBits(pStage->stage))) {
1448             return true;  // no point continuing beyond here, any analysis is just going to be garbage.
1449         }
1450     }
1451
1452     // Mark accessible ids
1453     auto accessible_ids = mark_accessible_ids(module, entrypoint);
1454     process_execution_modes(module, entrypoint, pipeline);
1455
1456     // Validate descriptor set layout against what the entrypoint actually uses
1457     bool has_writable_descriptor = false;
1458     auto descriptor_uses = collect_interface_by_descriptor_slot(report_data, module, accessible_ids, &has_writable_descriptor);
1459
1460     // Validate shader capabilities against enabled device features
1461     skip |= validate_shader_capabilities(dev_data, module, pStage->stage, has_writable_descriptor);
1462
1463     skip |= validate_specialization_offsets(report_data, pStage);
1464     skip |= validate_push_constant_usage(report_data, pipeline->pipeline_layout.push_constant_ranges.get(), module, accessible_ids,
1465                                          pStage->stage);
1466
1467     // Validate descriptor use
1468     for (auto use : descriptor_uses) {
1469         // While validating shaders capture which slots are used by the pipeline
1470         auto &reqs = pipeline->active_slots[use.first.first][use.first.second];
1471         reqs = descriptor_req(reqs | descriptor_type_to_reqs(module, use.second.type_id));
1472
1473         // Verify given pipelineLayout has requested setLayout with requested binding
1474         const auto &binding = get_descriptor_binding(&pipeline->pipeline_layout, use.first);
1475         unsigned required_descriptor_count;
1476
1477         if (!binding) {
1478             skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
1479                             SHADER_CHECKER_MISSING_DESCRIPTOR,
1480                             "Shader uses descriptor slot %u.%u (used as type `%s`) but not declared in pipeline layout",
1481                             use.first.first, use.first.second, describe_type(module, use.second.type_id).c_str());
1482         } else if (~binding->stageFlags & pStage->stage) {
1483             skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, 0,
1484                             SHADER_CHECKER_DESCRIPTOR_NOT_ACCESSIBLE_FROM_STAGE,
1485                             "Shader uses descriptor slot %u.%u (used as type `%s`) but descriptor not accessible from stage %s",
1486                             use.first.first, use.first.second, describe_type(module, use.second.type_id).c_str(),
1487                             string_VkShaderStageFlagBits(pStage->stage));
1488         } else if (!descriptor_type_match(module, use.second.type_id, binding->descriptorType, required_descriptor_count)) {
1489             skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
1490                             SHADER_CHECKER_DESCRIPTOR_TYPE_MISMATCH,
1491                             "Type mismatch on descriptor slot %u.%u (used as type `%s`) but descriptor of type %s", use.first.first,
1492                             use.first.second, describe_type(module, use.second.type_id).c_str(),
1493                             string_VkDescriptorType(binding->descriptorType));
1494         } else if (binding->descriptorCount < required_descriptor_count) {
1495             skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
1496                             SHADER_CHECKER_DESCRIPTOR_TYPE_MISMATCH,
1497                             "Shader expects at least %u descriptors for binding %u.%u (used as type `%s`) but only %u provided",
1498                             required_descriptor_count, use.first.first, use.first.second,
1499                             describe_type(module, use.second.type_id).c_str(), binding->descriptorCount);
1500         }
1501     }
1502
1503     // Validate use of input attachments against subpass structure
1504     if (pStage->stage == VK_SHADER_STAGE_FRAGMENT_BIT) {
1505         auto input_attachment_uses = collect_interface_by_input_attachment_index(module, accessible_ids);
1506
1507         auto rpci = pipeline->rp_state->createInfo.ptr();
1508         auto subpass = pipeline->graphicsPipelineCI.subpass;
1509
1510         for (auto use : input_attachment_uses) {
1511             auto input_attachments = rpci->pSubpasses[subpass].pInputAttachments;
1512             auto index = (input_attachments && use.first < rpci->pSubpasses[subpass].inputAttachmentCount)
1513                              ? input_attachments[use.first].attachment
1514                              : VK_ATTACHMENT_UNUSED;
1515
1516             if (index == VK_ATTACHMENT_UNUSED) {
1517                 skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
1518                                 SHADER_CHECKER_MISSING_INPUT_ATTACHMENT,
1519                                 "Shader consumes input attachment index %d but not provided in subpass", use.first);
1520             } else if (!(get_format_type(rpci->pAttachments[index].format) & get_fundamental_type(module, use.second.type_id))) {
1521                 skip |=
1522                     log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
1523                             SHADER_CHECKER_INPUT_ATTACHMENT_TYPE_MISMATCH,
1524                             "Subpass input attachment %u format of %s does not match type used in shader `%s`", use.first,
1525                             string_VkFormat(rpci->pAttachments[index].format), describe_type(module, use.second.type_id).c_str());
1526             }
1527         }
1528     }
1529
1530     return skip;
1531 }
1532
1533 static bool validate_interface_between_stages(debug_report_data const *report_data, shader_module const *producer,
1534                                               spirv_inst_iter producer_entrypoint, shader_stage_attributes const *producer_stage,
1535                                               shader_module const *consumer, spirv_inst_iter consumer_entrypoint,
1536                                               shader_stage_attributes const *consumer_stage) {
1537     bool skip = false;
1538
1539     auto outputs =
1540         collect_interface_by_location(producer, producer_entrypoint, spv::StorageClassOutput, producer_stage->arrayed_output);
1541     auto inputs =
1542         collect_interface_by_location(consumer, consumer_entrypoint, spv::StorageClassInput, consumer_stage->arrayed_input);
1543
1544     auto a_it = outputs.begin();
1545     auto b_it = inputs.begin();
1546
1547     // Maps sorted by key (location); walk them together to find mismatches
1548     while ((outputs.size() > 0 && a_it != outputs.end()) || (inputs.size() && b_it != inputs.end())) {
1549         bool a_at_end = outputs.size() == 0 || a_it == outputs.end();
1550         bool b_at_end = inputs.size() == 0 || b_it == inputs.end();
1551         auto a_first = a_at_end ? std::make_pair(0u, 0u) : a_it->first;
1552         auto b_first = b_at_end ? std::make_pair(0u, 0u) : b_it->first;
1553
1554         if (b_at_end || ((!a_at_end) && (a_first < b_first))) {
1555             skip |= log_msg(report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
1556                             SHADER_CHECKER_OUTPUT_NOT_CONSUMED, "%s writes to output location %u.%u which is not consumed by %s",
1557                             producer_stage->name, a_first.first, a_first.second, consumer_stage->name);
1558             a_it++;
1559         } else if (a_at_end || a_first > b_first) {
1560             skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
1561                             SHADER_CHECKER_INPUT_NOT_PRODUCED, "%s consumes input location %u.%u which is not written by %s",
1562                             consumer_stage->name, b_first.first, b_first.second, producer_stage->name);
1563             b_it++;
1564         } else {
1565             // subtleties of arrayed interfaces:
1566             // - if is_patch, then the member is not arrayed, even though the interface may be.
1567             // - if is_block_member, then the extra array level of an arrayed interface is not
1568             //   expressed in the member type -- it's expressed in the block type.
1569             if (!types_match(producer, consumer, a_it->second.type_id, b_it->second.type_id,
1570                              producer_stage->arrayed_output && !a_it->second.is_patch && !a_it->second.is_block_member,
1571                              consumer_stage->arrayed_input && !b_it->second.is_patch && !b_it->second.is_block_member, true)) {
1572                 skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
1573                                 SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, "Type mismatch on location %u.%u: '%s' vs '%s'",
1574                                 a_first.first, a_first.second, describe_type(producer, a_it->second.type_id).c_str(),
1575                                 describe_type(consumer, b_it->second.type_id).c_str());
1576             }
1577             if (a_it->second.is_patch != b_it->second.is_patch) {
1578                 skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, 0,
1579                                 SHADER_CHECKER_INTERFACE_TYPE_MISMATCH,
1580                                 "Decoration mismatch on location %u.%u: is per-%s in %s stage but per-%s in %s stage",
1581                                 a_first.first, a_first.second, a_it->second.is_patch ? "patch" : "vertex", producer_stage->name,
1582                                 b_it->second.is_patch ? "patch" : "vertex", consumer_stage->name);
1583             }
1584             if (a_it->second.is_relaxed_precision != b_it->second.is_relaxed_precision) {
1585                 skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, 0,
1586                                 SHADER_CHECKER_INTERFACE_TYPE_MISMATCH,
1587                                 "Decoration mismatch on location %u.%u: %s and %s stages differ in precision", a_first.first,
1588                                 a_first.second, producer_stage->name, consumer_stage->name);
1589             }
1590             a_it++;
1591             b_it++;
1592         }
1593     }
1594
1595     return skip;
1596 }
1597
1598 // Validate that the shaders used by the given pipeline and store the active_slots
1599 //  that are actually used by the pipeline into pPipeline->active_slots
1600 bool validate_and_capture_pipeline_shader_state(layer_data *dev_data, PIPELINE_STATE *pipeline) {
1601     auto pCreateInfo = pipeline->graphicsPipelineCI.ptr();
1602     int vertex_stage = get_shader_stage_id(VK_SHADER_STAGE_VERTEX_BIT);
1603     int fragment_stage = get_shader_stage_id(VK_SHADER_STAGE_FRAGMENT_BIT);
1604     auto report_data = GetReportData(dev_data);
1605
1606     shader_module const *shaders[5];
1607     memset(shaders, 0, sizeof(shaders));
1608     spirv_inst_iter entrypoints[5];
1609     memset(entrypoints, 0, sizeof(entrypoints));
1610     bool skip = false;
1611
1612     for (uint32_t i = 0; i < pCreateInfo->stageCount; i++) {
1613         auto pStage = &pCreateInfo->pStages[i];
1614         auto stage_id = get_shader_stage_id(pStage->stage);
1615         skip |= validate_pipeline_shader_stage(dev_data, pStage, pipeline, &shaders[stage_id], &entrypoints[stage_id]);
1616     }
1617
1618     // if the shader stages are no good individually, cross-stage validation is pointless.
1619     if (skip) return true;
1620
1621     auto vi = pCreateInfo->pVertexInputState;
1622
1623     if (vi) {
1624         skip |= validate_vi_consistency(report_data, vi);
1625     }
1626
1627     if (shaders[vertex_stage] && shaders[vertex_stage]->has_valid_spirv) {
1628         skip |= validate_vi_against_vs_inputs(report_data, vi, shaders[vertex_stage], entrypoints[vertex_stage]);
1629     }
1630
1631     int producer = get_shader_stage_id(VK_SHADER_STAGE_VERTEX_BIT);
1632     int consumer = get_shader_stage_id(VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT);
1633
1634     while (!shaders[producer] && producer != fragment_stage) {
1635         producer++;
1636         consumer++;
1637     }
1638
1639     for (; producer != fragment_stage && consumer <= fragment_stage; consumer++) {
1640         assert(shaders[producer]);
1641         if (shaders[consumer]) {
1642             if (shaders[consumer]->has_valid_spirv && shaders[producer]->has_valid_spirv) {
1643                 skip |= validate_interface_between_stages(report_data, shaders[producer], entrypoints[producer],
1644                                                           &shader_stage_attribs[producer], shaders[consumer], entrypoints[consumer],
1645                                                           &shader_stage_attribs[consumer]);
1646             }
1647
1648             producer = consumer;
1649         }
1650     }
1651
1652     if (shaders[fragment_stage] && shaders[fragment_stage]->has_valid_spirv) {
1653         skip |= validate_fs_outputs_against_render_pass(report_data, shaders[fragment_stage], entrypoints[fragment_stage], pipeline,
1654                                                         pCreateInfo->subpass);
1655     }
1656
1657     return skip;
1658 }
1659
1660 bool validate_compute_pipeline(layer_data *dev_data, PIPELINE_STATE *pipeline) {
1661     auto pCreateInfo = pipeline->computePipelineCI.ptr();
1662
1663     shader_module const *module;
1664     spirv_inst_iter entrypoint;
1665
1666     return validate_pipeline_shader_stage(dev_data, &pCreateInfo->stage, pipeline, &module, &entrypoint);
1667 }
1668
1669 uint32_t ValidationCache::MakeShaderHash(VkShaderModuleCreateInfo const *smci) { return XXH32(smci->pCode, smci->codeSize, 0); }
1670
1671 static ValidationCache *GetValidationCacheInfo(VkShaderModuleCreateInfo const *pCreateInfo) {
1672     while ((pCreateInfo = (VkShaderModuleCreateInfo const *)pCreateInfo->pNext) != nullptr) {
1673         if (pCreateInfo->sType == VK_STRUCTURE_TYPE_SHADER_MODULE_VALIDATION_CACHE_CREATE_INFO_EXT)
1674             return (ValidationCache *)((VkShaderModuleValidationCacheCreateInfoEXT const *)pCreateInfo)->validationCache;
1675     }
1676
1677     return nullptr;
1678 }
1679
1680 bool PreCallValidateCreateShaderModule(layer_data *dev_data, VkShaderModuleCreateInfo const *pCreateInfo, bool *spirv_valid) {
1681     bool skip = false;
1682     spv_result_t spv_valid = SPV_SUCCESS;
1683     auto report_data = GetReportData(dev_data);
1684
1685     if (GetDisables(dev_data)->shader_validation) {
1686         return false;
1687     }
1688
1689     auto have_glsl_shader = GetEnabledExtensions(dev_data)->vk_nv_glsl_shader;
1690
1691     if (!have_glsl_shader && (pCreateInfo->codeSize % 4)) {
1692         skip |= log_msg(
1693             report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, VALIDATION_ERROR_12a00ac0,
1694             "SPIR-V module not valid: Codesize must be a multiple of 4 but is " PRINTF_SIZE_T_SPECIFIER ".", pCreateInfo->codeSize);
1695     } else {
1696         auto cache = GetValidationCacheInfo(pCreateInfo);
1697         uint32_t hash = 0;
1698         if (cache) {
1699             hash = ValidationCache::MakeShaderHash(pCreateInfo);
1700             if (cache->Contains(hash)) return false;
1701         }
1702
1703         // Use SPIRV-Tools validator to try and catch any issues with the module itself
1704         spv_context ctx = spvContextCreate(SPV_ENV_VULKAN_1_0);
1705         spv_const_binary_t binary{pCreateInfo->pCode, pCreateInfo->codeSize / sizeof(uint32_t)};
1706         spv_diagnostic diag = nullptr;
1707
1708         spv_valid = spvValidate(ctx, &binary, &diag);
1709         if (spv_valid != SPV_SUCCESS) {
1710             if (!have_glsl_shader || (pCreateInfo->pCode[0] == spv::MagicNumber)) {
1711                 skip |=
1712                     log_msg(report_data, spv_valid == SPV_WARNING ? VK_DEBUG_REPORT_WARNING_BIT_EXT : VK_DEBUG_REPORT_ERROR_BIT_EXT,
1713                             VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, SHADER_CHECKER_INCONSISTENT_SPIRV,
1714                             "SPIR-V module not valid: %s", diag && diag->error ? diag->error : "(no error text)");
1715             }
1716         } else {
1717             if (cache) {
1718                 cache->Insert(hash);
1719             }
1720         }
1721
1722         spvDiagnosticDestroy(diag);
1723         spvContextDestroy(ctx);
1724     }
1725
1726     *spirv_valid = (spv_valid == SPV_SUCCESS);
1727     return skip;
1728 }