From 430ef40ab4af75fdb6a0637cd3871f7e88843594 Mon Sep 17 00:00:00 2001 From: Rex Xu Date: Fri, 14 Oct 2016 17:22:23 +0800 Subject: [PATCH] Implement new revision of extension GL_AMD_shader_ballot - Add support for invocation functions with "InclusiveScan" and "ExclusiveScan" modes. - Add support for invocation functions taking int64/uint64/doube/float16 as inout data types. --- SPIRV/GlslangToSpv.cpp | 96 +- SPIRV/SpvBuilder.cpp | 2 +- SPIRV/SpvBuilder.h | 2 +- Test/baseResults/spv.shaderBallotAMD.comp.out | 1217 +++++++++++++++++++++++++ Test/spv.shaderBallotAMD.comp | 165 ++++ glslang/Include/intermediate.h | 12 + glslang/MachineIndependent/Initialize.cpp | 585 +++++++++++- glslang/MachineIndependent/intermOut.cpp | 15 + gtests/Spv.FromFile.cpp | 1 + 9 files changed, 2067 insertions(+), 28 deletions(-) create mode 100644 Test/baseResults/spv.shaderBallotAMD.comp.out create mode 100644 Test/spv.shaderBallotAMD.comp diff --git a/SPIRV/GlslangToSpv.cpp b/SPIRV/GlslangToSpv.cpp index 5e3dc52..af08e4b 100755 --- a/SPIRV/GlslangToSpv.cpp +++ b/SPIRV/GlslangToSpv.cpp @@ -161,7 +161,7 @@ protected: spv::Id makeSmearedConstant(spv::Id constant, int vectorSize); spv::Id createAtomicOperation(glslang::TOperator op, spv::Decoration precision, spv::Id typeId, std::vector& operands, glslang::TBasicType typeProxy); spv::Id createInvocationsOperation(glslang::TOperator op, spv::Id typeId, std::vector& operands, glslang::TBasicType typeProxy); - spv::Id CreateInvocationsVectorOperation(spv::Op op, spv::Id typeId, std::vector& operands); + spv::Id CreateInvocationsVectorOperation(spv::Op op, spv::GroupOperation groupOperation, spv::Id typeId, std::vector& operands); spv::Id createMiscOperation(glslang::TOperator op, spv::Decoration precision, spv::Id typeId, std::vector& operands, glslang::TBasicType typeProxy); spv::Id createNoArgOperation(glslang::TOperator op, spv::Decoration precision, spv::Id typeId); spv::Id getSymbolId(const glslang::TIntermSymbol* node); @@ -2015,7 +2015,6 @@ spv::Id TGlslangToSpvTraverser::convertGlslangToSpvType(const glslang::TType& ty #ifdef AMD_EXTENSIONS case glslang::EbtFloat16: builder.addExtension(spv::E_SPV_AMD_gpu_shader_half_float); - builder.addCapability(spv::CapabilityFloat16); spvType = builder.makeFloatType(16); break; #endif @@ -3743,6 +3742,18 @@ spv::Id TGlslangToSpvTraverser::createUnaryOperation(glslang::TOperator op, spv: case glslang::EOpMinInvocationsNonUniform: case glslang::EOpMaxInvocationsNonUniform: case glslang::EOpAddInvocationsNonUniform: + case glslang::EOpMinInvocationsInclusiveScan: + case glslang::EOpMaxInvocationsInclusiveScan: + case glslang::EOpAddInvocationsInclusiveScan: + case glslang::EOpMinInvocationsInclusiveScanNonUniform: + case glslang::EOpMaxInvocationsInclusiveScanNonUniform: + case glslang::EOpAddInvocationsInclusiveScanNonUniform: + case glslang::EOpMinInvocationsExclusiveScan: + case glslang::EOpMaxInvocationsExclusiveScan: + case glslang::EOpAddInvocationsExclusiveScan: + case glslang::EOpMinInvocationsExclusiveScanNonUniform: + case glslang::EOpMaxInvocationsExclusiveScanNonUniform: + case glslang::EOpAddInvocationsExclusiveScanNonUniform: #endif { std::vector operands; @@ -4130,8 +4141,9 @@ spv::Id TGlslangToSpvTraverser::createInvocationsOperation(glslang::TOperator op #endif spv::Op opCode = spv::OpNop; - std::vector spvGroupOperands; + spv::GroupOperation groupOperation = spv::GroupOperationMax; + if (op == glslang::EOpBallot || op == glslang::EOpReadFirstInvocation || op == glslang::EOpReadInvocation) { builder.addExtension(spv::E_SPV_KHR_shader_ballot); @@ -4141,15 +4153,47 @@ spv::Id TGlslangToSpvTraverser::createInvocationsOperation(glslang::TOperator op #ifdef AMD_EXTENSIONS if (op == glslang::EOpMinInvocationsNonUniform || op == glslang::EOpMaxInvocationsNonUniform || - op == glslang::EOpAddInvocationsNonUniform) + op == glslang::EOpAddInvocationsNonUniform || + op == glslang::EOpMinInvocationsInclusiveScanNonUniform || + op == glslang::EOpMaxInvocationsInclusiveScanNonUniform || + op == glslang::EOpAddInvocationsInclusiveScanNonUniform || + op == glslang::EOpMinInvocationsExclusiveScanNonUniform || + op == glslang::EOpMaxInvocationsExclusiveScanNonUniform || + op == glslang::EOpAddInvocationsExclusiveScanNonUniform) builder.addExtension(spv::E_SPV_AMD_shader_ballot); #endif spvGroupOperands.push_back(builder.makeUintConstant(spv::ScopeSubgroup)); #ifdef AMD_EXTENSIONS - if (op == glslang::EOpMinInvocations || op == glslang::EOpMaxInvocations || op == glslang::EOpAddInvocations || - op == glslang::EOpMinInvocationsNonUniform || op == glslang::EOpMaxInvocationsNonUniform || op == glslang::EOpAddInvocationsNonUniform) - spvGroupOperands.push_back(spv::GroupOperationReduce); + switch (op) { + case glslang::EOpMinInvocations: + case glslang::EOpMaxInvocations: + case glslang::EOpAddInvocations: + case glslang::EOpMinInvocationsNonUniform: + case glslang::EOpMaxInvocationsNonUniform: + case glslang::EOpAddInvocationsNonUniform: + groupOperation = spv::GroupOperationReduce; + spvGroupOperands.push_back(groupOperation); + break; + case glslang::EOpMinInvocationsInclusiveScan: + case glslang::EOpMaxInvocationsInclusiveScan: + case glslang::EOpAddInvocationsInclusiveScan: + case glslang::EOpMinInvocationsInclusiveScanNonUniform: + case glslang::EOpMaxInvocationsInclusiveScanNonUniform: + case glslang::EOpAddInvocationsInclusiveScanNonUniform: + groupOperation = spv::GroupOperationInclusiveScan; + spvGroupOperands.push_back(groupOperation); + break; + case glslang::EOpMinInvocationsExclusiveScan: + case glslang::EOpMaxInvocationsExclusiveScan: + case glslang::EOpAddInvocationsExclusiveScan: + case glslang::EOpMinInvocationsExclusiveScanNonUniform: + case glslang::EOpMaxInvocationsExclusiveScanNonUniform: + case glslang::EOpAddInvocationsExclusiveScanNonUniform: + groupOperation = spv::GroupOperationExclusiveScan; + spvGroupOperands.push_back(groupOperation); + break; + } #endif } @@ -4175,7 +4219,7 @@ spv::Id TGlslangToSpvTraverser::createInvocationsOperation(glslang::TOperator op case glslang::EOpReadInvocation: opCode = spv::OpSubgroupReadInvocationKHR; if (builder.isVectorType(typeId)) - return CreateInvocationsVectorOperation(opCode, typeId, operands); + return CreateInvocationsVectorOperation(opCode, groupOperation, typeId, operands); break; case glslang::EOpReadFirstInvocation: opCode = spv::OpSubgroupFirstInvocationKHR; @@ -4205,7 +4249,15 @@ spv::Id TGlslangToSpvTraverser::createInvocationsOperation(glslang::TOperator op case glslang::EOpMinInvocations: case glslang::EOpMaxInvocations: case glslang::EOpAddInvocations: - if (op == glslang::EOpMinInvocations) { + case glslang::EOpMinInvocationsInclusiveScan: + case glslang::EOpMaxInvocationsInclusiveScan: + case glslang::EOpAddInvocationsInclusiveScan: + case glslang::EOpMinInvocationsExclusiveScan: + case glslang::EOpMaxInvocationsExclusiveScan: + case glslang::EOpAddInvocationsExclusiveScan: + if (op == glslang::EOpMinInvocations || + op == glslang::EOpMinInvocationsInclusiveScan || + op == glslang::EOpMinInvocationsExclusiveScan) { if (isFloat) opCode = spv::OpGroupFMin; else { @@ -4214,7 +4266,9 @@ spv::Id TGlslangToSpvTraverser::createInvocationsOperation(glslang::TOperator op else opCode = spv::OpGroupSMin; } - } else if (op == glslang::EOpMaxInvocations) { + } else if (op == glslang::EOpMaxInvocations || + op == glslang::EOpMaxInvocationsInclusiveScan || + op == glslang::EOpMaxInvocationsExclusiveScan) { if (isFloat) opCode = spv::OpGroupFMax; else { @@ -4231,13 +4285,21 @@ spv::Id TGlslangToSpvTraverser::createInvocationsOperation(glslang::TOperator op } if (builder.isVectorType(typeId)) - return CreateInvocationsVectorOperation(opCode, typeId, operands); + return CreateInvocationsVectorOperation(opCode, groupOperation, typeId, operands); break; case glslang::EOpMinInvocationsNonUniform: case glslang::EOpMaxInvocationsNonUniform: case glslang::EOpAddInvocationsNonUniform: - if (op == glslang::EOpMinInvocationsNonUniform) { + case glslang::EOpMinInvocationsInclusiveScanNonUniform: + case glslang::EOpMaxInvocationsInclusiveScanNonUniform: + case glslang::EOpAddInvocationsInclusiveScanNonUniform: + case glslang::EOpMinInvocationsExclusiveScanNonUniform: + case glslang::EOpMaxInvocationsExclusiveScanNonUniform: + case glslang::EOpAddInvocationsExclusiveScanNonUniform: + if (op == glslang::EOpMinInvocationsNonUniform || + op == glslang::EOpMinInvocationsInclusiveScanNonUniform || + op == glslang::EOpMinInvocationsExclusiveScanNonUniform) { if (isFloat) opCode = spv::OpGroupFMinNonUniformAMD; else { @@ -4247,7 +4309,9 @@ spv::Id TGlslangToSpvTraverser::createInvocationsOperation(glslang::TOperator op opCode = spv::OpGroupSMinNonUniformAMD; } } - else if (op == glslang::EOpMaxInvocationsNonUniform) { + else if (op == glslang::EOpMaxInvocationsNonUniform || + op == glslang::EOpMaxInvocationsInclusiveScanNonUniform || + op == glslang::EOpMaxInvocationsExclusiveScanNonUniform) { if (isFloat) opCode = spv::OpGroupFMaxNonUniformAMD; else { @@ -4265,7 +4329,7 @@ spv::Id TGlslangToSpvTraverser::createInvocationsOperation(glslang::TOperator op } if (builder.isVectorType(typeId)) - return CreateInvocationsVectorOperation(opCode, typeId, operands); + return CreateInvocationsVectorOperation(opCode, groupOperation, typeId, operands); break; #endif @@ -4279,7 +4343,7 @@ spv::Id TGlslangToSpvTraverser::createInvocationsOperation(glslang::TOperator op } // Create group invocation operations on a vector -spv::Id TGlslangToSpvTraverser::CreateInvocationsVectorOperation(spv::Op op, spv::Id typeId, std::vector& operands) +spv::Id TGlslangToSpvTraverser::CreateInvocationsVectorOperation(spv::Op op, spv::GroupOperation groupOperation, spv::Id typeId, std::vector& operands) { #ifdef AMD_EXTENSIONS assert(op == spv::OpGroupFMin || op == spv::OpGroupUMin || op == spv::OpGroupSMin || @@ -4323,7 +4387,7 @@ spv::Id TGlslangToSpvTraverser::CreateInvocationsVectorOperation(spv::Op op, spv spvGroupOperands.push_back(operands[1]); } else { spvGroupOperands.push_back(builder.makeUintConstant(spv::ScopeSubgroup)); - spvGroupOperands.push_back(spv::GroupOperationReduce); + spvGroupOperands.push_back(groupOperation); spvGroupOperands.push_back(scalar); } diff --git a/SPIRV/SpvBuilder.cpp b/SPIRV/SpvBuilder.cpp index 720eac2..1b04a6c 100644 --- a/SPIRV/SpvBuilder.cpp +++ b/SPIRV/SpvBuilder.cpp @@ -2354,7 +2354,7 @@ void Builder::dump(std::vector& out) const for (auto it = extensions.cbegin(); it != extensions.cend(); ++it) { Instruction extInst(0, 0, OpExtension); - extInst.addStringOperand(*it); + extInst.addStringOperand(it->c_str()); extInst.dump(out); } diff --git a/SPIRV/SpvBuilder.h b/SPIRV/SpvBuilder.h index 331f0e0..a97b44b 100755 --- a/SPIRV/SpvBuilder.h +++ b/SPIRV/SpvBuilder.h @@ -555,7 +555,7 @@ public: SourceLanguage source; int sourceVersion; - std::set extensions; + std::set extensions; std::vector sourceExtensions; AddressingModel addressModel; MemoryModel memoryModel; diff --git a/Test/baseResults/spv.shaderBallotAMD.comp.out b/Test/baseResults/spv.shaderBallotAMD.comp.out new file mode 100644 index 0000000..bb7f8c1 --- /dev/null +++ b/Test/baseResults/spv.shaderBallotAMD.comp.out @@ -0,0 +1,1217 @@ +spv.shaderBallotAMD.comp +Warning, version 450 is not yet complete; most version-specific features are present, but some are missing. + +// Module Version 10000 +// Generated by (magic number): 80001 +// Id's are bound by 1048 + + Capability Shader + Capability Float16 + Capability Float64 + Capability Int64 + Capability Groups + Extension "SPV_AMD_gpu_shader_half_float" + Extension "SPV_AMD_shader_ballot" + 1: ExtInstImport "GLSL.std.450" + MemoryModel Logical GLSL450 + EntryPoint GLCompute 4 "main" + ExecutionMode 4 LocalSize 8 8 1 + Source GLSL 450 + SourceExtension "GL_AMD_gpu_shader_half_float" + SourceExtension "GL_AMD_shader_ballot" + SourceExtension "GL_ARB_gpu_shader_int64" + Name 4 "main" + Name 18 "Buffers" + MemberName 18(Buffers) 0 "i" + MemberName 18(Buffers) 1 "uv" + MemberName 18(Buffers) 2 "fv" + MemberName 18(Buffers) 3 "dv" + MemberName 18(Buffers) 4 "i64" + MemberName 18(Buffers) 5 "u64v" + MemberName 18(Buffers) 6 "f16v" + Name 20 "" + MemberDecorate 18(Buffers) 0 Offset 0 + MemberDecorate 18(Buffers) 1 Offset 8 + MemberDecorate 18(Buffers) 2 Offset 16 + MemberDecorate 18(Buffers) 3 Offset 32 + MemberDecorate 18(Buffers) 4 Offset 64 + MemberDecorate 18(Buffers) 5 Offset 80 + MemberDecorate 18(Buffers) 6 Offset 96 + Decorate 18(Buffers) BufferBlock + Decorate 20 DescriptorSet 0 + Decorate 20 Binding 0 + Decorate 1047 BuiltIn WorkgroupSize + 2: TypeVoid + 3: TypeFunction 2 + 6: TypeInt 32 1 + 7: TypeInt 32 0 + 8: TypeVector 7(int) 2 + 9: TypeFloat 32 + 10: TypeVector 9(float) 3 + 11: TypeFloat 64 + 12: TypeVector 11(float) 4 + 13: TypeInt 64 1 + 14: TypeInt 64 0 + 15: TypeVector 14(int) 2 + 16: TypeFloat 16 + 17: TypeVector 16(float) 3 + 18(Buffers): TypeStruct 6(int) 8(ivec2) 10(fvec3) 12(fvec4) 13(int) 15(ivec2) 17(fvec3) + 19: TypePointer Uniform 18(Buffers) + 20: 19(ptr) Variable Uniform + 21: 6(int) Constant 0 + 22: TypePointer Uniform 6(int) + 25: 7(int) Constant 3 + 28: 6(int) Constant 1 + 29: TypePointer Uniform 8(ivec2) + 38: 6(int) Constant 2 + 39: TypePointer Uniform 10(fvec3) + 50: 6(int) Constant 3 + 51: TypePointer Uniform 12(fvec4) + 64: 6(int) Constant 4 + 65: TypePointer Uniform 13(int) + 70: 6(int) Constant 5 + 71: TypePointer Uniform 15(ivec2) + 80: 6(int) Constant 6 + 81: TypePointer Uniform 17(fvec3) + 1044: TypeVector 7(int) 3 + 1045: 7(int) Constant 8 + 1046: 7(int) Constant 1 + 1047: 1044(ivec3) ConstantComposite 1045 1045 1046 + 4(main): 2 Function None 3 + 5: Label + 23: 22(ptr) AccessChain 20 21 + 24: 6(int) Load 23 + 26: 6(int) GroupSMin 25 Reduce 24 + 27: 22(ptr) AccessChain 20 21 + Store 27 26 + 30: 29(ptr) AccessChain 20 28 + 31: 8(ivec2) Load 30 + 32: 7(int) CompositeExtract 31 0 + 33: 7(int) GroupUMin 25 Reduce 32 + 34: 7(int) CompositeExtract 31 1 + 35: 7(int) GroupUMin 25 Reduce 34 + 36: 8(ivec2) CompositeConstruct 33 35 + 37: 29(ptr) AccessChain 20 28 + Store 37 36 + 40: 39(ptr) AccessChain 20 38 + 41: 10(fvec3) Load 40 + 42: 9(float) CompositeExtract 41 0 + 43: 9(float) GroupFMin 25 Reduce 42 + 44: 9(float) CompositeExtract 41 1 + 45: 9(float) GroupFMin 25 Reduce 44 + 46: 9(float) CompositeExtract 41 2 + 47: 9(float) GroupFMin 25 Reduce 46 + 48: 10(fvec3) CompositeConstruct 43 45 47 + 49: 39(ptr) AccessChain 20 38 + Store 49 48 + 52: 51(ptr) AccessChain 20 50 + 53: 12(fvec4) Load 52 + 54: 11(float) CompositeExtract 53 0 + 55: 11(float) GroupFMin 25 Reduce 54 + 56: 11(float) CompositeExtract 53 1 + 57: 11(float) GroupFMin 25 Reduce 56 + 58: 11(float) CompositeExtract 53 2 + 59: 11(float) GroupFMin 25 Reduce 58 + 60: 11(float) CompositeExtract 53 3 + 61: 11(float) GroupFMin 25 Reduce 60 + 62: 12(fvec4) CompositeConstruct 55 57 59 61 + 63: 51(ptr) AccessChain 20 50 + Store 63 62 + 66: 65(ptr) AccessChain 20 64 + 67: 13(int) Load 66 + 68: 13(int) GroupSMin 25 Reduce 67 + 69: 65(ptr) AccessChain 20 64 + Store 69 68 + 72: 71(ptr) AccessChain 20 70 + 73: 15(ivec2) Load 72 + 74: 14(int) CompositeExtract 73 0 + 75: 14(int) GroupUMin 25 Reduce 74 + 76: 14(int) CompositeExtract 73 1 + 77: 14(int) GroupUMin 25 Reduce 76 + 78: 15(ivec2) CompositeConstruct 75 77 + 79: 71(ptr) AccessChain 20 70 + Store 79 78 + 82: 81(ptr) AccessChain 20 80 + 83: 17(fvec3) Load 82 + 84: 16(float) CompositeExtract 83 0 + 85: 16(float) GroupFMin 25 Reduce 84 + 86: 16(float) CompositeExtract 83 1 + 87: 16(float) GroupFMin 25 Reduce 86 + 88: 16(float) CompositeExtract 83 2 + 89: 16(float) GroupFMin 25 Reduce 88 + 90: 17(fvec3) CompositeConstruct 85 87 89 + 91: 81(ptr) AccessChain 20 80 + Store 91 90 + 92: 22(ptr) AccessChain 20 21 + 93: 6(int) Load 92 + 94: 6(int) GroupSMax 25 Reduce 93 + 95: 22(ptr) AccessChain 20 21 + Store 95 94 + 96: 29(ptr) AccessChain 20 28 + 97: 8(ivec2) Load 96 + 98: 7(int) CompositeExtract 97 0 + 99: 7(int) GroupUMax 25 Reduce 98 + 100: 7(int) CompositeExtract 97 1 + 101: 7(int) GroupUMax 25 Reduce 100 + 102: 8(ivec2) CompositeConstruct 99 101 + 103: 29(ptr) AccessChain 20 28 + Store 103 102 + 104: 39(ptr) AccessChain 20 38 + 105: 10(fvec3) Load 104 + 106: 9(float) CompositeExtract 105 0 + 107: 9(float) GroupFMax 25 Reduce 106 + 108: 9(float) CompositeExtract 105 1 + 109: 9(float) GroupFMax 25 Reduce 108 + 110: 9(float) CompositeExtract 105 2 + 111: 9(float) GroupFMax 25 Reduce 110 + 112: 10(fvec3) CompositeConstruct 107 109 111 + 113: 39(ptr) AccessChain 20 38 + Store 113 112 + 114: 51(ptr) AccessChain 20 50 + 115: 12(fvec4) Load 114 + 116: 11(float) CompositeExtract 115 0 + 117: 11(float) GroupFMax 25 Reduce 116 + 118: 11(float) CompositeExtract 115 1 + 119: 11(float) GroupFMax 25 Reduce 118 + 120: 11(float) CompositeExtract 115 2 + 121: 11(float) GroupFMax 25 Reduce 120 + 122: 11(float) CompositeExtract 115 3 + 123: 11(float) GroupFMax 25 Reduce 122 + 124: 12(fvec4) CompositeConstruct 117 119 121 123 + 125: 51(ptr) AccessChain 20 50 + Store 125 124 + 126: 65(ptr) AccessChain 20 64 + 127: 13(int) Load 126 + 128: 13(int) GroupSMax 25 Reduce 127 + 129: 65(ptr) AccessChain 20 64 + Store 129 128 + 130: 71(ptr) AccessChain 20 70 + 131: 15(ivec2) Load 130 + 132: 14(int) CompositeExtract 131 0 + 133: 14(int) GroupUMax 25 Reduce 132 + 134: 14(int) CompositeExtract 131 1 + 135: 14(int) GroupUMax 25 Reduce 134 + 136: 15(ivec2) CompositeConstruct 133 135 + 137: 71(ptr) AccessChain 20 70 + Store 137 136 + 138: 81(ptr) AccessChain 20 80 + 139: 17(fvec3) Load 138 + 140: 16(float) CompositeExtract 139 0 + 141: 16(float) GroupFMax 25 Reduce 140 + 142: 16(float) CompositeExtract 139 1 + 143: 16(float) GroupFMax 25 Reduce 142 + 144: 16(float) CompositeExtract 139 2 + 145: 16(float) GroupFMax 25 Reduce 144 + 146: 17(fvec3) CompositeConstruct 141 143 145 + 147: 81(ptr) AccessChain 20 80 + Store 147 146 + 148: 22(ptr) AccessChain 20 21 + 149: 6(int) Load 148 + 150: 6(int) GroupIAdd 25 Reduce 149 + 151: 22(ptr) AccessChain 20 21 + Store 151 150 + 152: 29(ptr) AccessChain 20 28 + 153: 8(ivec2) Load 152 + 154: 7(int) CompositeExtract 153 0 + 155: 7(int) GroupIAdd 25 Reduce 154 + 156: 7(int) CompositeExtract 153 1 + 157: 7(int) GroupIAdd 25 Reduce 156 + 158: 8(ivec2) CompositeConstruct 155 157 + 159: 29(ptr) AccessChain 20 28 + Store 159 158 + 160: 39(ptr) AccessChain 20 38 + 161: 10(fvec3) Load 160 + 162: 9(float) CompositeExtract 161 0 + 163: 9(float) GroupFAdd 25 Reduce 162 + 164: 9(float) CompositeExtract 161 1 + 165: 9(float) GroupFAdd 25 Reduce 164 + 166: 9(float) CompositeExtract 161 2 + 167: 9(float) GroupFAdd 25 Reduce 166 + 168: 10(fvec3) CompositeConstruct 163 165 167 + 169: 39(ptr) AccessChain 20 38 + Store 169 168 + 170: 51(ptr) AccessChain 20 50 + 171: 12(fvec4) Load 170 + 172: 11(float) CompositeExtract 171 0 + 173: 11(float) GroupFAdd 25 Reduce 172 + 174: 11(float) CompositeExtract 171 1 + 175: 11(float) GroupFAdd 25 Reduce 174 + 176: 11(float) CompositeExtract 171 2 + 177: 11(float) GroupFAdd 25 Reduce 176 + 178: 11(float) CompositeExtract 171 3 + 179: 11(float) GroupFAdd 25 Reduce 178 + 180: 12(fvec4) CompositeConstruct 173 175 177 179 + 181: 51(ptr) AccessChain 20 50 + Store 181 180 + 182: 65(ptr) AccessChain 20 64 + 183: 13(int) Load 182 + 184: 13(int) GroupIAdd 25 Reduce 183 + 185: 65(ptr) AccessChain 20 64 + Store 185 184 + 186: 71(ptr) AccessChain 20 70 + 187: 15(ivec2) Load 186 + 188: 14(int) CompositeExtract 187 0 + 189: 14(int) GroupIAdd 25 Reduce 188 + 190: 14(int) CompositeExtract 187 1 + 191: 14(int) GroupIAdd 25 Reduce 190 + 192: 15(ivec2) CompositeConstruct 189 191 + 193: 71(ptr) AccessChain 20 70 + Store 193 192 + 194: 81(ptr) AccessChain 20 80 + 195: 17(fvec3) Load 194 + 196: 16(float) CompositeExtract 195 0 + 197: 16(float) GroupFAdd 25 Reduce 196 + 198: 16(float) CompositeExtract 195 1 + 199: 16(float) GroupFAdd 25 Reduce 198 + 200: 16(float) CompositeExtract 195 2 + 201: 16(float) GroupFAdd 25 Reduce 200 + 202: 17(fvec3) CompositeConstruct 197 199 201 + 203: 81(ptr) AccessChain 20 80 + Store 203 202 + 204: 22(ptr) AccessChain 20 21 + 205: 6(int) Load 204 + 206: 6(int) GroupSMinNonUniformAMD 25 Reduce 205 + 207: 22(ptr) AccessChain 20 21 + Store 207 206 + 208: 29(ptr) AccessChain 20 28 + 209: 8(ivec2) Load 208 + 210: 7(int) CompositeExtract 209 0 + 211: 7(int) GroupUMinNonUniformAMD 25 Reduce 210 + 212: 7(int) CompositeExtract 209 1 + 213: 7(int) GroupUMinNonUniformAMD 25 Reduce 212 + 214: 8(ivec2) CompositeConstruct 211 213 + 215: 29(ptr) AccessChain 20 28 + Store 215 214 + 216: 39(ptr) AccessChain 20 38 + 217: 10(fvec3) Load 216 + 218: 9(float) CompositeExtract 217 0 + 219: 9(float) GroupFMinNonUniformAMD 25 Reduce 218 + 220: 9(float) CompositeExtract 217 1 + 221: 9(float) GroupFMinNonUniformAMD 25 Reduce 220 + 222: 9(float) CompositeExtract 217 2 + 223: 9(float) GroupFMinNonUniformAMD 25 Reduce 222 + 224: 10(fvec3) CompositeConstruct 219 221 223 + 225: 39(ptr) AccessChain 20 38 + Store 225 224 + 226: 51(ptr) AccessChain 20 50 + 227: 12(fvec4) Load 226 + 228: 11(float) CompositeExtract 227 0 + 229: 11(float) GroupFMinNonUniformAMD 25 Reduce 228 + 230: 11(float) CompositeExtract 227 1 + 231: 11(float) GroupFMinNonUniformAMD 25 Reduce 230 + 232: 11(float) CompositeExtract 227 2 + 233: 11(float) GroupFMinNonUniformAMD 25 Reduce 232 + 234: 11(float) CompositeExtract 227 3 + 235: 11(float) GroupFMinNonUniformAMD 25 Reduce 234 + 236: 12(fvec4) CompositeConstruct 229 231 233 235 + 237: 51(ptr) AccessChain 20 50 + Store 237 236 + 238: 65(ptr) AccessChain 20 64 + 239: 13(int) Load 238 + 240: 13(int) GroupSMinNonUniformAMD 25 Reduce 239 + 241: 65(ptr) AccessChain 20 64 + Store 241 240 + 242: 71(ptr) AccessChain 20 70 + 243: 15(ivec2) Load 242 + 244: 14(int) CompositeExtract 243 0 + 245: 14(int) GroupUMinNonUniformAMD 25 Reduce 244 + 246: 14(int) CompositeExtract 243 1 + 247: 14(int) GroupUMinNonUniformAMD 25 Reduce 246 + 248: 15(ivec2) CompositeConstruct 245 247 + 249: 71(ptr) AccessChain 20 70 + Store 249 248 + 250: 81(ptr) AccessChain 20 80 + 251: 17(fvec3) Load 250 + 252: 16(float) CompositeExtract 251 0 + 253: 16(float) GroupFMinNonUniformAMD 25 Reduce 252 + 254: 16(float) CompositeExtract 251 1 + 255: 16(float) GroupFMinNonUniformAMD 25 Reduce 254 + 256: 16(float) CompositeExtract 251 2 + 257: 16(float) GroupFMinNonUniformAMD 25 Reduce 256 + 258: 17(fvec3) CompositeConstruct 253 255 257 + 259: 81(ptr) AccessChain 20 80 + Store 259 258 + 260: 22(ptr) AccessChain 20 21 + 261: 6(int) Load 260 + 262: 6(int) GroupSMaxNonUniformAMD 25 Reduce 261 + 263: 22(ptr) AccessChain 20 21 + Store 263 262 + 264: 29(ptr) AccessChain 20 28 + 265: 8(ivec2) Load 264 + 266: 7(int) CompositeExtract 265 0 + 267: 7(int) GroupUMaxNonUniformAMD 25 Reduce 266 + 268: 7(int) CompositeExtract 265 1 + 269: 7(int) GroupUMaxNonUniformAMD 25 Reduce 268 + 270: 8(ivec2) CompositeConstruct 267 269 + 271: 29(ptr) AccessChain 20 28 + Store 271 270 + 272: 39(ptr) AccessChain 20 38 + 273: 10(fvec3) Load 272 + 274: 9(float) CompositeExtract 273 0 + 275: 9(float) GroupFMaxNonUniformAMD 25 Reduce 274 + 276: 9(float) CompositeExtract 273 1 + 277: 9(float) GroupFMaxNonUniformAMD 25 Reduce 276 + 278: 9(float) CompositeExtract 273 2 + 279: 9(float) GroupFMaxNonUniformAMD 25 Reduce 278 + 280: 10(fvec3) CompositeConstruct 275 277 279 + 281: 39(ptr) AccessChain 20 38 + Store 281 280 + 282: 51(ptr) AccessChain 20 50 + 283: 12(fvec4) Load 282 + 284: 11(float) CompositeExtract 283 0 + 285: 11(float) GroupFMaxNonUniformAMD 25 Reduce 284 + 286: 11(float) CompositeExtract 283 1 + 287: 11(float) GroupFMaxNonUniformAMD 25 Reduce 286 + 288: 11(float) CompositeExtract 283 2 + 289: 11(float) GroupFMaxNonUniformAMD 25 Reduce 288 + 290: 11(float) CompositeExtract 283 3 + 291: 11(float) GroupFMaxNonUniformAMD 25 Reduce 290 + 292: 12(fvec4) CompositeConstruct 285 287 289 291 + 293: 51(ptr) AccessChain 20 50 + Store 293 292 + 294: 65(ptr) AccessChain 20 64 + 295: 13(int) Load 294 + 296: 13(int) GroupSMaxNonUniformAMD 25 Reduce 295 + 297: 65(ptr) AccessChain 20 64 + Store 297 296 + 298: 71(ptr) AccessChain 20 70 + 299: 15(ivec2) Load 298 + 300: 14(int) CompositeExtract 299 0 + 301: 14(int) GroupUMaxNonUniformAMD 25 Reduce 300 + 302: 14(int) CompositeExtract 299 1 + 303: 14(int) GroupUMaxNonUniformAMD 25 Reduce 302 + 304: 15(ivec2) CompositeConstruct 301 303 + 305: 71(ptr) AccessChain 20 70 + Store 305 304 + 306: 81(ptr) AccessChain 20 80 + 307: 17(fvec3) Load 306 + 308: 16(float) CompositeExtract 307 0 + 309: 16(float) GroupFMaxNonUniformAMD 25 Reduce 308 + 310: 16(float) CompositeExtract 307 1 + 311: 16(float) GroupFMaxNonUniformAMD 25 Reduce 310 + 312: 16(float) CompositeExtract 307 2 + 313: 16(float) GroupFMaxNonUniformAMD 25 Reduce 312 + 314: 17(fvec3) CompositeConstruct 309 311 313 + 315: 81(ptr) AccessChain 20 80 + Store 315 314 + 316: 22(ptr) AccessChain 20 21 + 317: 6(int) Load 316 + 318: 6(int) GroupIAddNonUniformAMD 25 Reduce 317 + 319: 22(ptr) AccessChain 20 21 + Store 319 318 + 320: 29(ptr) AccessChain 20 28 + 321: 8(ivec2) Load 320 + 322: 7(int) CompositeExtract 321 0 + 323: 7(int) GroupIAddNonUniformAMD 25 Reduce 322 + 324: 7(int) CompositeExtract 321 1 + 325: 7(int) GroupIAddNonUniformAMD 25 Reduce 324 + 326: 8(ivec2) CompositeConstruct 323 325 + 327: 29(ptr) AccessChain 20 28 + Store 327 326 + 328: 39(ptr) AccessChain 20 38 + 329: 10(fvec3) Load 328 + 330: 9(float) CompositeExtract 329 0 + 331: 9(float) GroupFAddNonUniformAMD 25 Reduce 330 + 332: 9(float) CompositeExtract 329 1 + 333: 9(float) GroupFAddNonUniformAMD 25 Reduce 332 + 334: 9(float) CompositeExtract 329 2 + 335: 9(float) GroupFAddNonUniformAMD 25 Reduce 334 + 336: 10(fvec3) CompositeConstruct 331 333 335 + 337: 39(ptr) AccessChain 20 38 + Store 337 336 + 338: 51(ptr) AccessChain 20 50 + 339: 12(fvec4) Load 338 + 340: 11(float) CompositeExtract 339 0 + 341: 11(float) GroupFAddNonUniformAMD 25 Reduce 340 + 342: 11(float) CompositeExtract 339 1 + 343: 11(float) GroupFAddNonUniformAMD 25 Reduce 342 + 344: 11(float) CompositeExtract 339 2 + 345: 11(float) GroupFAddNonUniformAMD 25 Reduce 344 + 346: 11(float) CompositeExtract 339 3 + 347: 11(float) GroupFAddNonUniformAMD 25 Reduce 346 + 348: 12(fvec4) CompositeConstruct 341 343 345 347 + 349: 51(ptr) AccessChain 20 50 + Store 349 348 + 350: 65(ptr) AccessChain 20 64 + 351: 13(int) Load 350 + 352: 13(int) GroupIAddNonUniformAMD 25 Reduce 351 + 353: 65(ptr) AccessChain 20 64 + Store 353 352 + 354: 71(ptr) AccessChain 20 70 + 355: 15(ivec2) Load 354 + 356: 14(int) CompositeExtract 355 0 + 357: 14(int) GroupIAddNonUniformAMD 25 Reduce 356 + 358: 14(int) CompositeExtract 355 1 + 359: 14(int) GroupIAddNonUniformAMD 25 Reduce 358 + 360: 15(ivec2) CompositeConstruct 357 359 + 361: 71(ptr) AccessChain 20 70 + Store 361 360 + 362: 81(ptr) AccessChain 20 80 + 363: 17(fvec3) Load 362 + 364: 16(float) CompositeExtract 363 0 + 365: 16(float) GroupFAddNonUniformAMD 25 Reduce 364 + 366: 16(float) CompositeExtract 363 1 + 367: 16(float) GroupFAddNonUniformAMD 25 Reduce 366 + 368: 16(float) CompositeExtract 363 2 + 369: 16(float) GroupFAddNonUniformAMD 25 Reduce 368 + 370: 17(fvec3) CompositeConstruct 365 367 369 + 371: 81(ptr) AccessChain 20 80 + Store 371 370 + 372: 22(ptr) AccessChain 20 21 + 373: 6(int) Load 372 + 374: 6(int) GroupSMin 25 InclusiveScan 373 + 375: 22(ptr) AccessChain 20 21 + Store 375 374 + 376: 29(ptr) AccessChain 20 28 + 377: 8(ivec2) Load 376 + 378: 7(int) CompositeExtract 377 0 + 379: 7(int) GroupUMin 25 InclusiveScan 378 + 380: 7(int) CompositeExtract 377 1 + 381: 7(int) GroupUMin 25 InclusiveScan 380 + 382: 8(ivec2) CompositeConstruct 379 381 + 383: 29(ptr) AccessChain 20 28 + Store 383 382 + 384: 39(ptr) AccessChain 20 38 + 385: 10(fvec3) Load 384 + 386: 9(float) CompositeExtract 385 0 + 387: 9(float) GroupFMin 25 InclusiveScan 386 + 388: 9(float) CompositeExtract 385 1 + 389: 9(float) GroupFMin 25 InclusiveScan 388 + 390: 9(float) CompositeExtract 385 2 + 391: 9(float) GroupFMin 25 InclusiveScan 390 + 392: 10(fvec3) CompositeConstruct 387 389 391 + 393: 39(ptr) AccessChain 20 38 + Store 393 392 + 394: 51(ptr) AccessChain 20 50 + 395: 12(fvec4) Load 394 + 396: 11(float) CompositeExtract 395 0 + 397: 11(float) GroupFMin 25 InclusiveScan 396 + 398: 11(float) CompositeExtract 395 1 + 399: 11(float) GroupFMin 25 InclusiveScan 398 + 400: 11(float) CompositeExtract 395 2 + 401: 11(float) GroupFMin 25 InclusiveScan 400 + 402: 11(float) CompositeExtract 395 3 + 403: 11(float) GroupFMin 25 InclusiveScan 402 + 404: 12(fvec4) CompositeConstruct 397 399 401 403 + 405: 51(ptr) AccessChain 20 50 + Store 405 404 + 406: 65(ptr) AccessChain 20 64 + 407: 13(int) Load 406 + 408: 13(int) GroupSMin 25 InclusiveScan 407 + 409: 65(ptr) AccessChain 20 64 + Store 409 408 + 410: 71(ptr) AccessChain 20 70 + 411: 15(ivec2) Load 410 + 412: 14(int) CompositeExtract 411 0 + 413: 14(int) GroupUMin 25 InclusiveScan 412 + 414: 14(int) CompositeExtract 411 1 + 415: 14(int) GroupUMin 25 InclusiveScan 414 + 416: 15(ivec2) CompositeConstruct 413 415 + 417: 71(ptr) AccessChain 20 70 + Store 417 416 + 418: 81(ptr) AccessChain 20 80 + 419: 17(fvec3) Load 418 + 420: 16(float) CompositeExtract 419 0 + 421: 16(float) GroupFMin 25 InclusiveScan 420 + 422: 16(float) CompositeExtract 419 1 + 423: 16(float) GroupFMin 25 InclusiveScan 422 + 424: 16(float) CompositeExtract 419 2 + 425: 16(float) GroupFMin 25 InclusiveScan 424 + 426: 17(fvec3) CompositeConstruct 421 423 425 + 427: 81(ptr) AccessChain 20 80 + Store 427 426 + 428: 22(ptr) AccessChain 20 21 + 429: 6(int) Load 428 + 430: 6(int) GroupSMax 25 InclusiveScan 429 + 431: 22(ptr) AccessChain 20 21 + Store 431 430 + 432: 29(ptr) AccessChain 20 28 + 433: 8(ivec2) Load 432 + 434: 7(int) CompositeExtract 433 0 + 435: 7(int) GroupUMax 25 InclusiveScan 434 + 436: 7(int) CompositeExtract 433 1 + 437: 7(int) GroupUMax 25 InclusiveScan 436 + 438: 8(ivec2) CompositeConstruct 435 437 + 439: 29(ptr) AccessChain 20 28 + Store 439 438 + 440: 39(ptr) AccessChain 20 38 + 441: 10(fvec3) Load 440 + 442: 9(float) CompositeExtract 441 0 + 443: 9(float) GroupFMax 25 InclusiveScan 442 + 444: 9(float) CompositeExtract 441 1 + 445: 9(float) GroupFMax 25 InclusiveScan 444 + 446: 9(float) CompositeExtract 441 2 + 447: 9(float) GroupFMax 25 InclusiveScan 446 + 448: 10(fvec3) CompositeConstruct 443 445 447 + 449: 39(ptr) AccessChain 20 38 + Store 449 448 + 450: 51(ptr) AccessChain 20 50 + 451: 12(fvec4) Load 450 + 452: 11(float) CompositeExtract 451 0 + 453: 11(float) GroupFMax 25 InclusiveScan 452 + 454: 11(float) CompositeExtract 451 1 + 455: 11(float) GroupFMax 25 InclusiveScan 454 + 456: 11(float) CompositeExtract 451 2 + 457: 11(float) GroupFMax 25 InclusiveScan 456 + 458: 11(float) CompositeExtract 451 3 + 459: 11(float) GroupFMax 25 InclusiveScan 458 + 460: 12(fvec4) CompositeConstruct 453 455 457 459 + 461: 51(ptr) AccessChain 20 50 + Store 461 460 + 462: 65(ptr) AccessChain 20 64 + 463: 13(int) Load 462 + 464: 13(int) GroupSMax 25 InclusiveScan 463 + 465: 65(ptr) AccessChain 20 64 + Store 465 464 + 466: 71(ptr) AccessChain 20 70 + 467: 15(ivec2) Load 466 + 468: 14(int) CompositeExtract 467 0 + 469: 14(int) GroupUMax 25 InclusiveScan 468 + 470: 14(int) CompositeExtract 467 1 + 471: 14(int) GroupUMax 25 InclusiveScan 470 + 472: 15(ivec2) CompositeConstruct 469 471 + 473: 71(ptr) AccessChain 20 70 + Store 473 472 + 474: 81(ptr) AccessChain 20 80 + 475: 17(fvec3) Load 474 + 476: 16(float) CompositeExtract 475 0 + 477: 16(float) GroupFMax 25 InclusiveScan 476 + 478: 16(float) CompositeExtract 475 1 + 479: 16(float) GroupFMax 25 InclusiveScan 478 + 480: 16(float) CompositeExtract 475 2 + 481: 16(float) GroupFMax 25 InclusiveScan 480 + 482: 17(fvec3) CompositeConstruct 477 479 481 + 483: 81(ptr) AccessChain 20 80 + Store 483 482 + 484: 22(ptr) AccessChain 20 21 + 485: 6(int) Load 484 + 486: 6(int) GroupIAdd 25 InclusiveScan 485 + 487: 22(ptr) AccessChain 20 21 + Store 487 486 + 488: 29(ptr) AccessChain 20 28 + 489: 8(ivec2) Load 488 + 490: 7(int) CompositeExtract 489 0 + 491: 7(int) GroupIAdd 25 InclusiveScan 490 + 492: 7(int) CompositeExtract 489 1 + 493: 7(int) GroupIAdd 25 InclusiveScan 492 + 494: 8(ivec2) CompositeConstruct 491 493 + 495: 29(ptr) AccessChain 20 28 + Store 495 494 + 496: 39(ptr) AccessChain 20 38 + 497: 10(fvec3) Load 496 + 498: 9(float) CompositeExtract 497 0 + 499: 9(float) GroupFAdd 25 InclusiveScan 498 + 500: 9(float) CompositeExtract 497 1 + 501: 9(float) GroupFAdd 25 InclusiveScan 500 + 502: 9(float) CompositeExtract 497 2 + 503: 9(float) GroupFAdd 25 InclusiveScan 502 + 504: 10(fvec3) CompositeConstruct 499 501 503 + 505: 39(ptr) AccessChain 20 38 + Store 505 504 + 506: 51(ptr) AccessChain 20 50 + 507: 12(fvec4) Load 506 + 508: 11(float) CompositeExtract 507 0 + 509: 11(float) GroupFAdd 25 InclusiveScan 508 + 510: 11(float) CompositeExtract 507 1 + 511: 11(float) GroupFAdd 25 InclusiveScan 510 + 512: 11(float) CompositeExtract 507 2 + 513: 11(float) GroupFAdd 25 InclusiveScan 512 + 514: 11(float) CompositeExtract 507 3 + 515: 11(float) GroupFAdd 25 InclusiveScan 514 + 516: 12(fvec4) CompositeConstruct 509 511 513 515 + 517: 51(ptr) AccessChain 20 50 + Store 517 516 + 518: 65(ptr) AccessChain 20 64 + 519: 13(int) Load 518 + 520: 13(int) GroupIAdd 25 InclusiveScan 519 + 521: 65(ptr) AccessChain 20 64 + Store 521 520 + 522: 71(ptr) AccessChain 20 70 + 523: 15(ivec2) Load 522 + 524: 14(int) CompositeExtract 523 0 + 525: 14(int) GroupIAdd 25 InclusiveScan 524 + 526: 14(int) CompositeExtract 523 1 + 527: 14(int) GroupIAdd 25 InclusiveScan 526 + 528: 15(ivec2) CompositeConstruct 525 527 + 529: 71(ptr) AccessChain 20 70 + Store 529 528 + 530: 81(ptr) AccessChain 20 80 + 531: 17(fvec3) Load 530 + 532: 16(float) CompositeExtract 531 0 + 533: 16(float) GroupFAdd 25 InclusiveScan 532 + 534: 16(float) CompositeExtract 531 1 + 535: 16(float) GroupFAdd 25 InclusiveScan 534 + 536: 16(float) CompositeExtract 531 2 + 537: 16(float) GroupFAdd 25 InclusiveScan 536 + 538: 17(fvec3) CompositeConstruct 533 535 537 + 539: 81(ptr) AccessChain 20 80 + Store 539 538 + 540: 22(ptr) AccessChain 20 21 + 541: 6(int) Load 540 + 542: 6(int) GroupSMin 25 ExclusiveScan 541 + 543: 22(ptr) AccessChain 20 21 + Store 543 542 + 544: 29(ptr) AccessChain 20 28 + 545: 8(ivec2) Load 544 + 546: 7(int) CompositeExtract 545 0 + 547: 7(int) GroupUMin 25 ExclusiveScan 546 + 548: 7(int) CompositeExtract 545 1 + 549: 7(int) GroupUMin 25 ExclusiveScan 548 + 550: 8(ivec2) CompositeConstruct 547 549 + 551: 29(ptr) AccessChain 20 28 + Store 551 550 + 552: 39(ptr) AccessChain 20 38 + 553: 10(fvec3) Load 552 + 554: 9(float) CompositeExtract 553 0 + 555: 9(float) GroupFMin 25 ExclusiveScan 554 + 556: 9(float) CompositeExtract 553 1 + 557: 9(float) GroupFMin 25 ExclusiveScan 556 + 558: 9(float) CompositeExtract 553 2 + 559: 9(float) GroupFMin 25 ExclusiveScan 558 + 560: 10(fvec3) CompositeConstruct 555 557 559 + 561: 39(ptr) AccessChain 20 38 + Store 561 560 + 562: 51(ptr) AccessChain 20 50 + 563: 12(fvec4) Load 562 + 564: 11(float) CompositeExtract 563 0 + 565: 11(float) GroupFMin 25 ExclusiveScan 564 + 566: 11(float) CompositeExtract 563 1 + 567: 11(float) GroupFMin 25 ExclusiveScan 566 + 568: 11(float) CompositeExtract 563 2 + 569: 11(float) GroupFMin 25 ExclusiveScan 568 + 570: 11(float) CompositeExtract 563 3 + 571: 11(float) GroupFMin 25 ExclusiveScan 570 + 572: 12(fvec4) CompositeConstruct 565 567 569 571 + 573: 51(ptr) AccessChain 20 50 + Store 573 572 + 574: 65(ptr) AccessChain 20 64 + 575: 13(int) Load 574 + 576: 13(int) GroupSMin 25 ExclusiveScan 575 + 577: 65(ptr) AccessChain 20 64 + Store 577 576 + 578: 71(ptr) AccessChain 20 70 + 579: 15(ivec2) Load 578 + 580: 14(int) CompositeExtract 579 0 + 581: 14(int) GroupUMin 25 ExclusiveScan 580 + 582: 14(int) CompositeExtract 579 1 + 583: 14(int) GroupUMin 25 ExclusiveScan 582 + 584: 15(ivec2) CompositeConstruct 581 583 + 585: 71(ptr) AccessChain 20 70 + Store 585 584 + 586: 81(ptr) AccessChain 20 80 + 587: 17(fvec3) Load 586 + 588: 16(float) CompositeExtract 587 0 + 589: 16(float) GroupFMin 25 ExclusiveScan 588 + 590: 16(float) CompositeExtract 587 1 + 591: 16(float) GroupFMin 25 ExclusiveScan 590 + 592: 16(float) CompositeExtract 587 2 + 593: 16(float) GroupFMin 25 ExclusiveScan 592 + 594: 17(fvec3) CompositeConstruct 589 591 593 + 595: 81(ptr) AccessChain 20 80 + Store 595 594 + 596: 22(ptr) AccessChain 20 21 + 597: 6(int) Load 596 + 598: 6(int) GroupSMax 25 ExclusiveScan 597 + 599: 22(ptr) AccessChain 20 21 + Store 599 598 + 600: 29(ptr) AccessChain 20 28 + 601: 8(ivec2) Load 600 + 602: 7(int) CompositeExtract 601 0 + 603: 7(int) GroupUMax 25 ExclusiveScan 602 + 604: 7(int) CompositeExtract 601 1 + 605: 7(int) GroupUMax 25 ExclusiveScan 604 + 606: 8(ivec2) CompositeConstruct 603 605 + 607: 29(ptr) AccessChain 20 28 + Store 607 606 + 608: 39(ptr) AccessChain 20 38 + 609: 10(fvec3) Load 608 + 610: 9(float) CompositeExtract 609 0 + 611: 9(float) GroupFMax 25 ExclusiveScan 610 + 612: 9(float) CompositeExtract 609 1 + 613: 9(float) GroupFMax 25 ExclusiveScan 612 + 614: 9(float) CompositeExtract 609 2 + 615: 9(float) GroupFMax 25 ExclusiveScan 614 + 616: 10(fvec3) CompositeConstruct 611 613 615 + 617: 39(ptr) AccessChain 20 38 + Store 617 616 + 618: 51(ptr) AccessChain 20 50 + 619: 12(fvec4) Load 618 + 620: 11(float) CompositeExtract 619 0 + 621: 11(float) GroupFMax 25 ExclusiveScan 620 + 622: 11(float) CompositeExtract 619 1 + 623: 11(float) GroupFMax 25 ExclusiveScan 622 + 624: 11(float) CompositeExtract 619 2 + 625: 11(float) GroupFMax 25 ExclusiveScan 624 + 626: 11(float) CompositeExtract 619 3 + 627: 11(float) GroupFMax 25 ExclusiveScan 626 + 628: 12(fvec4) CompositeConstruct 621 623 625 627 + 629: 51(ptr) AccessChain 20 50 + Store 629 628 + 630: 65(ptr) AccessChain 20 64 + 631: 13(int) Load 630 + 632: 13(int) GroupSMax 25 ExclusiveScan 631 + 633: 65(ptr) AccessChain 20 64 + Store 633 632 + 634: 71(ptr) AccessChain 20 70 + 635: 15(ivec2) Load 634 + 636: 14(int) CompositeExtract 635 0 + 637: 14(int) GroupUMax 25 ExclusiveScan 636 + 638: 14(int) CompositeExtract 635 1 + 639: 14(int) GroupUMax 25 ExclusiveScan 638 + 640: 15(ivec2) CompositeConstruct 637 639 + 641: 71(ptr) AccessChain 20 70 + Store 641 640 + 642: 81(ptr) AccessChain 20 80 + 643: 17(fvec3) Load 642 + 644: 16(float) CompositeExtract 643 0 + 645: 16(float) GroupFMax 25 ExclusiveScan 644 + 646: 16(float) CompositeExtract 643 1 + 647: 16(float) GroupFMax 25 ExclusiveScan 646 + 648: 16(float) CompositeExtract 643 2 + 649: 16(float) GroupFMax 25 ExclusiveScan 648 + 650: 17(fvec3) CompositeConstruct 645 647 649 + 651: 81(ptr) AccessChain 20 80 + Store 651 650 + 652: 22(ptr) AccessChain 20 21 + 653: 6(int) Load 652 + 654: 6(int) GroupIAdd 25 ExclusiveScan 653 + 655: 22(ptr) AccessChain 20 21 + Store 655 654 + 656: 29(ptr) AccessChain 20 28 + 657: 8(ivec2) Load 656 + 658: 7(int) CompositeExtract 657 0 + 659: 7(int) GroupIAdd 25 ExclusiveScan 658 + 660: 7(int) CompositeExtract 657 1 + 661: 7(int) GroupIAdd 25 ExclusiveScan 660 + 662: 8(ivec2) CompositeConstruct 659 661 + 663: 29(ptr) AccessChain 20 28 + Store 663 662 + 664: 39(ptr) AccessChain 20 38 + 665: 10(fvec3) Load 664 + 666: 9(float) CompositeExtract 665 0 + 667: 9(float) GroupFAdd 25 ExclusiveScan 666 + 668: 9(float) CompositeExtract 665 1 + 669: 9(float) GroupFAdd 25 ExclusiveScan 668 + 670: 9(float) CompositeExtract 665 2 + 671: 9(float) GroupFAdd 25 ExclusiveScan 670 + 672: 10(fvec3) CompositeConstruct 667 669 671 + 673: 39(ptr) AccessChain 20 38 + Store 673 672 + 674: 51(ptr) AccessChain 20 50 + 675: 12(fvec4) Load 674 + 676: 11(float) CompositeExtract 675 0 + 677: 11(float) GroupFAdd 25 ExclusiveScan 676 + 678: 11(float) CompositeExtract 675 1 + 679: 11(float) GroupFAdd 25 ExclusiveScan 678 + 680: 11(float) CompositeExtract 675 2 + 681: 11(float) GroupFAdd 25 ExclusiveScan 680 + 682: 11(float) CompositeExtract 675 3 + 683: 11(float) GroupFAdd 25 ExclusiveScan 682 + 684: 12(fvec4) CompositeConstruct 677 679 681 683 + 685: 51(ptr) AccessChain 20 50 + Store 685 684 + 686: 65(ptr) AccessChain 20 64 + 687: 13(int) Load 686 + 688: 13(int) GroupIAdd 25 ExclusiveScan 687 + 689: 65(ptr) AccessChain 20 64 + Store 689 688 + 690: 71(ptr) AccessChain 20 70 + 691: 15(ivec2) Load 690 + 692: 14(int) CompositeExtract 691 0 + 693: 14(int) GroupIAdd 25 ExclusiveScan 692 + 694: 14(int) CompositeExtract 691 1 + 695: 14(int) GroupIAdd 25 ExclusiveScan 694 + 696: 15(ivec2) CompositeConstruct 693 695 + 697: 71(ptr) AccessChain 20 70 + Store 697 696 + 698: 81(ptr) AccessChain 20 80 + 699: 17(fvec3) Load 698 + 700: 16(float) CompositeExtract 699 0 + 701: 16(float) GroupFAdd 25 ExclusiveScan 700 + 702: 16(float) CompositeExtract 699 1 + 703: 16(float) GroupFAdd 25 ExclusiveScan 702 + 704: 16(float) CompositeExtract 699 2 + 705: 16(float) GroupFAdd 25 ExclusiveScan 704 + 706: 17(fvec3) CompositeConstruct 701 703 705 + 707: 81(ptr) AccessChain 20 80 + Store 707 706 + 708: 22(ptr) AccessChain 20 21 + 709: 6(int) Load 708 + 710: 6(int) GroupSMinNonUniformAMD 25 InclusiveScan 709 + 711: 22(ptr) AccessChain 20 21 + Store 711 710 + 712: 29(ptr) AccessChain 20 28 + 713: 8(ivec2) Load 712 + 714: 7(int) CompositeExtract 713 0 + 715: 7(int) GroupUMinNonUniformAMD 25 InclusiveScan 714 + 716: 7(int) CompositeExtract 713 1 + 717: 7(int) GroupUMinNonUniformAMD 25 InclusiveScan 716 + 718: 8(ivec2) CompositeConstruct 715 717 + 719: 29(ptr) AccessChain 20 28 + Store 719 718 + 720: 39(ptr) AccessChain 20 38 + 721: 10(fvec3) Load 720 + 722: 9(float) CompositeExtract 721 0 + 723: 9(float) GroupFMinNonUniformAMD 25 InclusiveScan 722 + 724: 9(float) CompositeExtract 721 1 + 725: 9(float) GroupFMinNonUniformAMD 25 InclusiveScan 724 + 726: 9(float) CompositeExtract 721 2 + 727: 9(float) GroupFMinNonUniformAMD 25 InclusiveScan 726 + 728: 10(fvec3) CompositeConstruct 723 725 727 + 729: 39(ptr) AccessChain 20 38 + Store 729 728 + 730: 51(ptr) AccessChain 20 50 + 731: 12(fvec4) Load 730 + 732: 11(float) CompositeExtract 731 0 + 733: 11(float) GroupFMinNonUniformAMD 25 InclusiveScan 732 + 734: 11(float) CompositeExtract 731 1 + 735: 11(float) GroupFMinNonUniformAMD 25 InclusiveScan 734 + 736: 11(float) CompositeExtract 731 2 + 737: 11(float) GroupFMinNonUniformAMD 25 InclusiveScan 736 + 738: 11(float) CompositeExtract 731 3 + 739: 11(float) GroupFMinNonUniformAMD 25 InclusiveScan 738 + 740: 12(fvec4) CompositeConstruct 733 735 737 739 + 741: 51(ptr) AccessChain 20 50 + Store 741 740 + 742: 65(ptr) AccessChain 20 64 + 743: 13(int) Load 742 + 744: 13(int) GroupSMinNonUniformAMD 25 InclusiveScan 743 + 745: 65(ptr) AccessChain 20 64 + Store 745 744 + 746: 71(ptr) AccessChain 20 70 + 747: 15(ivec2) Load 746 + 748: 14(int) CompositeExtract 747 0 + 749: 14(int) GroupUMinNonUniformAMD 25 InclusiveScan 748 + 750: 14(int) CompositeExtract 747 1 + 751: 14(int) GroupUMinNonUniformAMD 25 InclusiveScan 750 + 752: 15(ivec2) CompositeConstruct 749 751 + 753: 71(ptr) AccessChain 20 70 + Store 753 752 + 754: 81(ptr) AccessChain 20 80 + 755: 17(fvec3) Load 754 + 756: 16(float) CompositeExtract 755 0 + 757: 16(float) GroupFMinNonUniformAMD 25 InclusiveScan 756 + 758: 16(float) CompositeExtract 755 1 + 759: 16(float) GroupFMinNonUniformAMD 25 InclusiveScan 758 + 760: 16(float) CompositeExtract 755 2 + 761: 16(float) GroupFMinNonUniformAMD 25 InclusiveScan 760 + 762: 17(fvec3) CompositeConstruct 757 759 761 + 763: 81(ptr) AccessChain 20 80 + Store 763 762 + 764: 22(ptr) AccessChain 20 21 + 765: 6(int) Load 764 + 766: 6(int) GroupSMaxNonUniformAMD 25 InclusiveScan 765 + 767: 22(ptr) AccessChain 20 21 + Store 767 766 + 768: 29(ptr) AccessChain 20 28 + 769: 8(ivec2) Load 768 + 770: 7(int) CompositeExtract 769 0 + 771: 7(int) GroupUMaxNonUniformAMD 25 InclusiveScan 770 + 772: 7(int) CompositeExtract 769 1 + 773: 7(int) GroupUMaxNonUniformAMD 25 InclusiveScan 772 + 774: 8(ivec2) CompositeConstruct 771 773 + 775: 29(ptr) AccessChain 20 28 + Store 775 774 + 776: 39(ptr) AccessChain 20 38 + 777: 10(fvec3) Load 776 + 778: 9(float) CompositeExtract 777 0 + 779: 9(float) GroupFMaxNonUniformAMD 25 InclusiveScan 778 + 780: 9(float) CompositeExtract 777 1 + 781: 9(float) GroupFMaxNonUniformAMD 25 InclusiveScan 780 + 782: 9(float) CompositeExtract 777 2 + 783: 9(float) GroupFMaxNonUniformAMD 25 InclusiveScan 782 + 784: 10(fvec3) CompositeConstruct 779 781 783 + 785: 39(ptr) AccessChain 20 38 + Store 785 784 + 786: 51(ptr) AccessChain 20 50 + 787: 12(fvec4) Load 786 + 788: 11(float) CompositeExtract 787 0 + 789: 11(float) GroupFMaxNonUniformAMD 25 InclusiveScan 788 + 790: 11(float) CompositeExtract 787 1 + 791: 11(float) GroupFMaxNonUniformAMD 25 InclusiveScan 790 + 792: 11(float) CompositeExtract 787 2 + 793: 11(float) GroupFMaxNonUniformAMD 25 InclusiveScan 792 + 794: 11(float) CompositeExtract 787 3 + 795: 11(float) GroupFMaxNonUniformAMD 25 InclusiveScan 794 + 796: 12(fvec4) CompositeConstruct 789 791 793 795 + 797: 51(ptr) AccessChain 20 50 + Store 797 796 + 798: 65(ptr) AccessChain 20 64 + 799: 13(int) Load 798 + 800: 13(int) GroupSMaxNonUniformAMD 25 InclusiveScan 799 + 801: 65(ptr) AccessChain 20 64 + Store 801 800 + 802: 71(ptr) AccessChain 20 70 + 803: 15(ivec2) Load 802 + 804: 14(int) CompositeExtract 803 0 + 805: 14(int) GroupUMaxNonUniformAMD 25 InclusiveScan 804 + 806: 14(int) CompositeExtract 803 1 + 807: 14(int) GroupUMaxNonUniformAMD 25 InclusiveScan 806 + 808: 15(ivec2) CompositeConstruct 805 807 + 809: 71(ptr) AccessChain 20 70 + Store 809 808 + 810: 81(ptr) AccessChain 20 80 + 811: 17(fvec3) Load 810 + 812: 16(float) CompositeExtract 811 0 + 813: 16(float) GroupFMaxNonUniformAMD 25 InclusiveScan 812 + 814: 16(float) CompositeExtract 811 1 + 815: 16(float) GroupFMaxNonUniformAMD 25 InclusiveScan 814 + 816: 16(float) CompositeExtract 811 2 + 817: 16(float) GroupFMaxNonUniformAMD 25 InclusiveScan 816 + 818: 17(fvec3) CompositeConstruct 813 815 817 + 819: 81(ptr) AccessChain 20 80 + Store 819 818 + 820: 22(ptr) AccessChain 20 21 + 821: 6(int) Load 820 + 822: 6(int) GroupIAddNonUniformAMD 25 InclusiveScan 821 + 823: 22(ptr) AccessChain 20 21 + Store 823 822 + 824: 29(ptr) AccessChain 20 28 + 825: 8(ivec2) Load 824 + 826: 7(int) CompositeExtract 825 0 + 827: 7(int) GroupIAddNonUniformAMD 25 InclusiveScan 826 + 828: 7(int) CompositeExtract 825 1 + 829: 7(int) GroupIAddNonUniformAMD 25 InclusiveScan 828 + 830: 8(ivec2) CompositeConstruct 827 829 + 831: 29(ptr) AccessChain 20 28 + Store 831 830 + 832: 39(ptr) AccessChain 20 38 + 833: 10(fvec3) Load 832 + 834: 9(float) CompositeExtract 833 0 + 835: 9(float) GroupFAddNonUniformAMD 25 InclusiveScan 834 + 836: 9(float) CompositeExtract 833 1 + 837: 9(float) GroupFAddNonUniformAMD 25 InclusiveScan 836 + 838: 9(float) CompositeExtract 833 2 + 839: 9(float) GroupFAddNonUniformAMD 25 InclusiveScan 838 + 840: 10(fvec3) CompositeConstruct 835 837 839 + 841: 39(ptr) AccessChain 20 38 + Store 841 840 + 842: 51(ptr) AccessChain 20 50 + 843: 12(fvec4) Load 842 + 844: 11(float) CompositeExtract 843 0 + 845: 11(float) GroupFAddNonUniformAMD 25 InclusiveScan 844 + 846: 11(float) CompositeExtract 843 1 + 847: 11(float) GroupFAddNonUniformAMD 25 InclusiveScan 846 + 848: 11(float) CompositeExtract 843 2 + 849: 11(float) GroupFAddNonUniformAMD 25 InclusiveScan 848 + 850: 11(float) CompositeExtract 843 3 + 851: 11(float) GroupFAddNonUniformAMD 25 InclusiveScan 850 + 852: 12(fvec4) CompositeConstruct 845 847 849 851 + 853: 51(ptr) AccessChain 20 50 + Store 853 852 + 854: 65(ptr) AccessChain 20 64 + 855: 13(int) Load 854 + 856: 13(int) GroupIAddNonUniformAMD 25 InclusiveScan 855 + 857: 65(ptr) AccessChain 20 64 + Store 857 856 + 858: 71(ptr) AccessChain 20 70 + 859: 15(ivec2) Load 858 + 860: 14(int) CompositeExtract 859 0 + 861: 14(int) GroupIAddNonUniformAMD 25 InclusiveScan 860 + 862: 14(int) CompositeExtract 859 1 + 863: 14(int) GroupIAddNonUniformAMD 25 InclusiveScan 862 + 864: 15(ivec2) CompositeConstruct 861 863 + 865: 71(ptr) AccessChain 20 70 + Store 865 864 + 866: 81(ptr) AccessChain 20 80 + 867: 17(fvec3) Load 866 + 868: 16(float) CompositeExtract 867 0 + 869: 16(float) GroupFAddNonUniformAMD 25 InclusiveScan 868 + 870: 16(float) CompositeExtract 867 1 + 871: 16(float) GroupFAddNonUniformAMD 25 InclusiveScan 870 + 872: 16(float) CompositeExtract 867 2 + 873: 16(float) GroupFAddNonUniformAMD 25 InclusiveScan 872 + 874: 17(fvec3) CompositeConstruct 869 871 873 + 875: 81(ptr) AccessChain 20 80 + Store 875 874 + 876: 22(ptr) AccessChain 20 21 + 877: 6(int) Load 876 + 878: 6(int) GroupSMinNonUniformAMD 25 ExclusiveScan 877 + 879: 22(ptr) AccessChain 20 21 + Store 879 878 + 880: 29(ptr) AccessChain 20 28 + 881: 8(ivec2) Load 880 + 882: 7(int) CompositeExtract 881 0 + 883: 7(int) GroupUMinNonUniformAMD 25 ExclusiveScan 882 + 884: 7(int) CompositeExtract 881 1 + 885: 7(int) GroupUMinNonUniformAMD 25 ExclusiveScan 884 + 886: 8(ivec2) CompositeConstruct 883 885 + 887: 29(ptr) AccessChain 20 28 + Store 887 886 + 888: 39(ptr) AccessChain 20 38 + 889: 10(fvec3) Load 888 + 890: 9(float) CompositeExtract 889 0 + 891: 9(float) GroupFMinNonUniformAMD 25 ExclusiveScan 890 + 892: 9(float) CompositeExtract 889 1 + 893: 9(float) GroupFMinNonUniformAMD 25 ExclusiveScan 892 + 894: 9(float) CompositeExtract 889 2 + 895: 9(float) GroupFMinNonUniformAMD 25 ExclusiveScan 894 + 896: 10(fvec3) CompositeConstruct 891 893 895 + 897: 39(ptr) AccessChain 20 38 + Store 897 896 + 898: 51(ptr) AccessChain 20 50 + 899: 12(fvec4) Load 898 + 900: 11(float) CompositeExtract 899 0 + 901: 11(float) GroupFMinNonUniformAMD 25 ExclusiveScan 900 + 902: 11(float) CompositeExtract 899 1 + 903: 11(float) GroupFMinNonUniformAMD 25 ExclusiveScan 902 + 904: 11(float) CompositeExtract 899 2 + 905: 11(float) GroupFMinNonUniformAMD 25 ExclusiveScan 904 + 906: 11(float) CompositeExtract 899 3 + 907: 11(float) GroupFMinNonUniformAMD 25 ExclusiveScan 906 + 908: 12(fvec4) CompositeConstruct 901 903 905 907 + 909: 51(ptr) AccessChain 20 50 + Store 909 908 + 910: 65(ptr) AccessChain 20 64 + 911: 13(int) Load 910 + 912: 13(int) GroupSMinNonUniformAMD 25 ExclusiveScan 911 + 913: 65(ptr) AccessChain 20 64 + Store 913 912 + 914: 71(ptr) AccessChain 20 70 + 915: 15(ivec2) Load 914 + 916: 14(int) CompositeExtract 915 0 + 917: 14(int) GroupUMinNonUniformAMD 25 ExclusiveScan 916 + 918: 14(int) CompositeExtract 915 1 + 919: 14(int) GroupUMinNonUniformAMD 25 ExclusiveScan 918 + 920: 15(ivec2) CompositeConstruct 917 919 + 921: 71(ptr) AccessChain 20 70 + Store 921 920 + 922: 81(ptr) AccessChain 20 80 + 923: 17(fvec3) Load 922 + 924: 16(float) CompositeExtract 923 0 + 925: 16(float) GroupFMinNonUniformAMD 25 ExclusiveScan 924 + 926: 16(float) CompositeExtract 923 1 + 927: 16(float) GroupFMinNonUniformAMD 25 ExclusiveScan 926 + 928: 16(float) CompositeExtract 923 2 + 929: 16(float) GroupFMinNonUniformAMD 25 ExclusiveScan 928 + 930: 17(fvec3) CompositeConstruct 925 927 929 + 931: 81(ptr) AccessChain 20 80 + Store 931 930 + 932: 22(ptr) AccessChain 20 21 + 933: 6(int) Load 932 + 934: 6(int) GroupSMaxNonUniformAMD 25 ExclusiveScan 933 + 935: 22(ptr) AccessChain 20 21 + Store 935 934 + 936: 29(ptr) AccessChain 20 28 + 937: 8(ivec2) Load 936 + 938: 7(int) CompositeExtract 937 0 + 939: 7(int) GroupUMaxNonUniformAMD 25 ExclusiveScan 938 + 940: 7(int) CompositeExtract 937 1 + 941: 7(int) GroupUMaxNonUniformAMD 25 ExclusiveScan 940 + 942: 8(ivec2) CompositeConstruct 939 941 + 943: 29(ptr) AccessChain 20 28 + Store 943 942 + 944: 39(ptr) AccessChain 20 38 + 945: 10(fvec3) Load 944 + 946: 9(float) CompositeExtract 945 0 + 947: 9(float) GroupFMaxNonUniformAMD 25 ExclusiveScan 946 + 948: 9(float) CompositeExtract 945 1 + 949: 9(float) GroupFMaxNonUniformAMD 25 ExclusiveScan 948 + 950: 9(float) CompositeExtract 945 2 + 951: 9(float) GroupFMaxNonUniformAMD 25 ExclusiveScan 950 + 952: 10(fvec3) CompositeConstruct 947 949 951 + 953: 39(ptr) AccessChain 20 38 + Store 953 952 + 954: 51(ptr) AccessChain 20 50 + 955: 12(fvec4) Load 954 + 956: 11(float) CompositeExtract 955 0 + 957: 11(float) GroupFMaxNonUniformAMD 25 ExclusiveScan 956 + 958: 11(float) CompositeExtract 955 1 + 959: 11(float) GroupFMaxNonUniformAMD 25 ExclusiveScan 958 + 960: 11(float) CompositeExtract 955 2 + 961: 11(float) GroupFMaxNonUniformAMD 25 ExclusiveScan 960 + 962: 11(float) CompositeExtract 955 3 + 963: 11(float) GroupFMaxNonUniformAMD 25 ExclusiveScan 962 + 964: 12(fvec4) CompositeConstruct 957 959 961 963 + 965: 51(ptr) AccessChain 20 50 + Store 965 964 + 966: 65(ptr) AccessChain 20 64 + 967: 13(int) Load 966 + 968: 13(int) GroupSMaxNonUniformAMD 25 ExclusiveScan 967 + 969: 65(ptr) AccessChain 20 64 + Store 969 968 + 970: 71(ptr) AccessChain 20 70 + 971: 15(ivec2) Load 970 + 972: 14(int) CompositeExtract 971 0 + 973: 14(int) GroupUMaxNonUniformAMD 25 ExclusiveScan 972 + 974: 14(int) CompositeExtract 971 1 + 975: 14(int) GroupUMaxNonUniformAMD 25 ExclusiveScan 974 + 976: 15(ivec2) CompositeConstruct 973 975 + 977: 71(ptr) AccessChain 20 70 + Store 977 976 + 978: 81(ptr) AccessChain 20 80 + 979: 17(fvec3) Load 978 + 980: 16(float) CompositeExtract 979 0 + 981: 16(float) GroupFMaxNonUniformAMD 25 ExclusiveScan 980 + 982: 16(float) CompositeExtract 979 1 + 983: 16(float) GroupFMaxNonUniformAMD 25 ExclusiveScan 982 + 984: 16(float) CompositeExtract 979 2 + 985: 16(float) GroupFMaxNonUniformAMD 25 ExclusiveScan 984 + 986: 17(fvec3) CompositeConstruct 981 983 985 + 987: 81(ptr) AccessChain 20 80 + Store 987 986 + 988: 22(ptr) AccessChain 20 21 + 989: 6(int) Load 988 + 990: 6(int) GroupIAddNonUniformAMD 25 ExclusiveScan 989 + 991: 22(ptr) AccessChain 20 21 + Store 991 990 + 992: 29(ptr) AccessChain 20 28 + 993: 8(ivec2) Load 992 + 994: 7(int) CompositeExtract 993 0 + 995: 7(int) GroupIAddNonUniformAMD 25 ExclusiveScan 994 + 996: 7(int) CompositeExtract 993 1 + 997: 7(int) GroupIAddNonUniformAMD 25 ExclusiveScan 996 + 998: 8(ivec2) CompositeConstruct 995 997 + 999: 29(ptr) AccessChain 20 28 + Store 999 998 + 1000: 39(ptr) AccessChain 20 38 + 1001: 10(fvec3) Load 1000 + 1002: 9(float) CompositeExtract 1001 0 + 1003: 9(float) GroupFAddNonUniformAMD 25 ExclusiveScan 1002 + 1004: 9(float) CompositeExtract 1001 1 + 1005: 9(float) GroupFAddNonUniformAMD 25 ExclusiveScan 1004 + 1006: 9(float) CompositeExtract 1001 2 + 1007: 9(float) GroupFAddNonUniformAMD 25 ExclusiveScan 1006 + 1008: 10(fvec3) CompositeConstruct 1003 1005 1007 + 1009: 39(ptr) AccessChain 20 38 + Store 1009 1008 + 1010: 51(ptr) AccessChain 20 50 + 1011: 12(fvec4) Load 1010 + 1012: 11(float) CompositeExtract 1011 0 + 1013: 11(float) GroupFAddNonUniformAMD 25 ExclusiveScan 1012 + 1014: 11(float) CompositeExtract 1011 1 + 1015: 11(float) GroupFAddNonUniformAMD 25 ExclusiveScan 1014 + 1016: 11(float) CompositeExtract 1011 2 + 1017: 11(float) GroupFAddNonUniformAMD 25 ExclusiveScan 1016 + 1018: 11(float) CompositeExtract 1011 3 + 1019: 11(float) GroupFAddNonUniformAMD 25 ExclusiveScan 1018 + 1020: 12(fvec4) CompositeConstruct 1013 1015 1017 1019 + 1021: 51(ptr) AccessChain 20 50 + Store 1021 1020 + 1022: 65(ptr) AccessChain 20 64 + 1023: 13(int) Load 1022 + 1024: 13(int) GroupIAddNonUniformAMD 25 ExclusiveScan 1023 + 1025: 65(ptr) AccessChain 20 64 + Store 1025 1024 + 1026: 71(ptr) AccessChain 20 70 + 1027: 15(ivec2) Load 1026 + 1028: 14(int) CompositeExtract 1027 0 + 1029: 14(int) GroupIAddNonUniformAMD 25 ExclusiveScan 1028 + 1030: 14(int) CompositeExtract 1027 1 + 1031: 14(int) GroupIAddNonUniformAMD 25 ExclusiveScan 1030 + 1032: 15(ivec2) CompositeConstruct 1029 1031 + 1033: 71(ptr) AccessChain 20 70 + Store 1033 1032 + 1034: 81(ptr) AccessChain 20 80 + 1035: 17(fvec3) Load 1034 + 1036: 16(float) CompositeExtract 1035 0 + 1037: 16(float) GroupFAddNonUniformAMD 25 ExclusiveScan 1036 + 1038: 16(float) CompositeExtract 1035 1 + 1039: 16(float) GroupFAddNonUniformAMD 25 ExclusiveScan 1038 + 1040: 16(float) CompositeExtract 1035 2 + 1041: 16(float) GroupFAddNonUniformAMD 25 ExclusiveScan 1040 + 1042: 17(fvec3) CompositeConstruct 1037 1039 1041 + 1043: 81(ptr) AccessChain 20 80 + Store 1043 1042 + Return + FunctionEnd diff --git a/Test/spv.shaderBallotAMD.comp b/Test/spv.shaderBallotAMD.comp new file mode 100644 index 0000000..d6d370a --- /dev/null +++ b/Test/spv.shaderBallotAMD.comp @@ -0,0 +1,165 @@ +#version 450 + +#extension GL_ARB_gpu_shader_int64: enable +#extension GL_AMD_gpu_shader_half_float: enable +#extension GL_AMD_shader_ballot: enable + +layout (local_size_x = 8, local_size_y = 8, local_size_z = 1) in; + +layout(binding = 0) buffer Buffers +{ + int i; + uvec2 uv; + vec3 fv; + dvec4 dv; + int64_t i64; + u64vec2 u64v; + f16vec3 f16v; +}; + +void main() +{ + i = minInvocationsAMD(i); + uv = minInvocationsAMD(uv); + fv = minInvocationsAMD(fv); + dv = minInvocationsAMD(dv); + i64 = minInvocationsAMD(i64); + u64v = minInvocationsAMD(u64v); + f16v = minInvocationsAMD(f16v); + + i = maxInvocationsAMD(i); + uv = maxInvocationsAMD(uv); + fv = maxInvocationsAMD(fv); + dv = maxInvocationsAMD(dv); + i64 = maxInvocationsAMD(i64); + u64v = maxInvocationsAMD(u64v); + f16v = maxInvocationsAMD(f16v); + + i = addInvocationsAMD(i); + uv = addInvocationsAMD(uv); + fv = addInvocationsAMD(fv); + dv = addInvocationsAMD(dv); + i64 = addInvocationsAMD(i64); + u64v = addInvocationsAMD(u64v); + f16v = addInvocationsAMD(f16v); + + i = minInvocationsNonUniformAMD(i); + uv = minInvocationsNonUniformAMD(uv); + fv = minInvocationsNonUniformAMD(fv); + dv = minInvocationsNonUniformAMD(dv); + i64 = minInvocationsNonUniformAMD(i64); + u64v = minInvocationsNonUniformAMD(u64v); + f16v = minInvocationsNonUniformAMD(f16v); + + i = maxInvocationsNonUniformAMD(i); + uv = maxInvocationsNonUniformAMD(uv); + fv = maxInvocationsNonUniformAMD(fv); + dv = maxInvocationsNonUniformAMD(dv); + i64 = maxInvocationsNonUniformAMD(i64); + u64v = maxInvocationsNonUniformAMD(u64v); + f16v = maxInvocationsNonUniformAMD(f16v); + + i = addInvocationsNonUniformAMD(i); + uv = addInvocationsNonUniformAMD(uv); + fv = addInvocationsNonUniformAMD(fv); + dv = addInvocationsNonUniformAMD(dv); + i64 = addInvocationsNonUniformAMD(i64); + u64v = addInvocationsNonUniformAMD(u64v); + f16v = addInvocationsNonUniformAMD(f16v); + + i = minInvocationsInclusiveScanAMD(i); + uv = minInvocationsInclusiveScanAMD(uv); + fv = minInvocationsInclusiveScanAMD(fv); + dv = minInvocationsInclusiveScanAMD(dv); + i64 = minInvocationsInclusiveScanAMD(i64); + u64v = minInvocationsInclusiveScanAMD(u64v); + f16v = minInvocationsInclusiveScanAMD(f16v); + + i = maxInvocationsInclusiveScanAMD(i); + uv = maxInvocationsInclusiveScanAMD(uv); + fv = maxInvocationsInclusiveScanAMD(fv); + dv = maxInvocationsInclusiveScanAMD(dv); + i64 = maxInvocationsInclusiveScanAMD(i64); + u64v = maxInvocationsInclusiveScanAMD(u64v); + f16v = maxInvocationsInclusiveScanAMD(f16v); + + i = addInvocationsInclusiveScanAMD(i); + uv = addInvocationsInclusiveScanAMD(uv); + fv = addInvocationsInclusiveScanAMD(fv); + dv = addInvocationsInclusiveScanAMD(dv); + i64 = addInvocationsInclusiveScanAMD(i64); + u64v = addInvocationsInclusiveScanAMD(u64v); + f16v = addInvocationsInclusiveScanAMD(f16v); + + i = minInvocationsExclusiveScanAMD(i); + uv = minInvocationsExclusiveScanAMD(uv); + fv = minInvocationsExclusiveScanAMD(fv); + dv = minInvocationsExclusiveScanAMD(dv); + i64 = minInvocationsExclusiveScanAMD(i64); + u64v = minInvocationsExclusiveScanAMD(u64v); + f16v = minInvocationsExclusiveScanAMD(f16v); + + i = maxInvocationsExclusiveScanAMD(i); + uv = maxInvocationsExclusiveScanAMD(uv); + fv = maxInvocationsExclusiveScanAMD(fv); + dv = maxInvocationsExclusiveScanAMD(dv); + i64 = maxInvocationsExclusiveScanAMD(i64); + u64v = maxInvocationsExclusiveScanAMD(u64v); + f16v = maxInvocationsExclusiveScanAMD(f16v); + + i = addInvocationsExclusiveScanAMD(i); + uv = addInvocationsExclusiveScanAMD(uv); + fv = addInvocationsExclusiveScanAMD(fv); + dv = addInvocationsExclusiveScanAMD(dv); + i64 = addInvocationsExclusiveScanAMD(i64); + u64v = addInvocationsExclusiveScanAMD(u64v); + f16v = addInvocationsExclusiveScanAMD(f16v); + + i = minInvocationsInclusiveScanNonUniformAMD(i); + uv = minInvocationsInclusiveScanNonUniformAMD(uv); + fv = minInvocationsInclusiveScanNonUniformAMD(fv); + dv = minInvocationsInclusiveScanNonUniformAMD(dv); + i64 = minInvocationsInclusiveScanNonUniformAMD(i64); + u64v = minInvocationsInclusiveScanNonUniformAMD(u64v); + f16v = minInvocationsInclusiveScanNonUniformAMD(f16v); + + i = maxInvocationsInclusiveScanNonUniformAMD(i); + uv = maxInvocationsInclusiveScanNonUniformAMD(uv); + fv = maxInvocationsInclusiveScanNonUniformAMD(fv); + dv = maxInvocationsInclusiveScanNonUniformAMD(dv); + i64 = maxInvocationsInclusiveScanNonUniformAMD(i64); + u64v = maxInvocationsInclusiveScanNonUniformAMD(u64v); + f16v = maxInvocationsInclusiveScanNonUniformAMD(f16v); + + i = addInvocationsInclusiveScanNonUniformAMD(i); + uv = addInvocationsInclusiveScanNonUniformAMD(uv); + fv = addInvocationsInclusiveScanNonUniformAMD(fv); + dv = addInvocationsInclusiveScanNonUniformAMD(dv); + i64 = addInvocationsInclusiveScanNonUniformAMD(i64); + u64v = addInvocationsInclusiveScanNonUniformAMD(u64v); + f16v = addInvocationsInclusiveScanNonUniformAMD(f16v); + + i = minInvocationsExclusiveScanNonUniformAMD(i); + uv = minInvocationsExclusiveScanNonUniformAMD(uv); + fv = minInvocationsExclusiveScanNonUniformAMD(fv); + dv = minInvocationsExclusiveScanNonUniformAMD(dv); + i64 = minInvocationsExclusiveScanNonUniformAMD(i64); + u64v = minInvocationsExclusiveScanNonUniformAMD(u64v); + f16v = minInvocationsExclusiveScanNonUniformAMD(f16v); + + i = maxInvocationsExclusiveScanNonUniformAMD(i); + uv = maxInvocationsExclusiveScanNonUniformAMD(uv); + fv = maxInvocationsExclusiveScanNonUniformAMD(fv); + dv = maxInvocationsExclusiveScanNonUniformAMD(dv); + i64 = maxInvocationsExclusiveScanNonUniformAMD(i64); + u64v = maxInvocationsExclusiveScanNonUniformAMD(u64v); + f16v = maxInvocationsExclusiveScanNonUniformAMD(f16v); + + i = addInvocationsExclusiveScanNonUniformAMD(i); + uv = addInvocationsExclusiveScanNonUniformAMD(uv); + fv = addInvocationsExclusiveScanNonUniformAMD(fv); + dv = addInvocationsExclusiveScanNonUniformAMD(dv); + i64 = addInvocationsExclusiveScanNonUniformAMD(i64); + u64v = addInvocationsExclusiveScanNonUniformAMD(u64v); + f16v = addInvocationsExclusiveScanNonUniformAMD(f16v); +} diff --git a/glslang/Include/intermediate.h b/glslang/Include/intermediate.h index 600b4b6..dc87ba9 100644 --- a/glslang/Include/intermediate.h +++ b/glslang/Include/intermediate.h @@ -335,6 +335,18 @@ enum TOperator { EOpMinInvocationsNonUniform, EOpMaxInvocationsNonUniform, EOpAddInvocationsNonUniform, + EOpMinInvocationsInclusiveScan, + EOpMaxInvocationsInclusiveScan, + EOpAddInvocationsInclusiveScan, + EOpMinInvocationsInclusiveScanNonUniform, + EOpMaxInvocationsInclusiveScanNonUniform, + EOpAddInvocationsInclusiveScanNonUniform, + EOpMinInvocationsExclusiveScan, + EOpMaxInvocationsExclusiveScan, + EOpAddInvocationsExclusiveScan, + EOpMinInvocationsExclusiveScanNonUniform, + EOpMaxInvocationsExclusiveScanNonUniform, + EOpAddInvocationsExclusiveScanNonUniform, EOpSwizzleInvocations, EOpSwizzleInvocationsMasked, EOpWriteInvocation, diff --git a/glslang/MachineIndependent/Initialize.cpp b/glslang/MachineIndependent/Initialize.cpp index 2ed2381..6ea7b26 100644 --- a/glslang/MachineIndependent/Initialize.cpp +++ b/glslang/MachineIndependent/Initialize.cpp @@ -1587,6 +1587,96 @@ void TBuiltIns::initialize(int version, EProfile profile, const SpvVersion& spvV "uvec3 minInvocationsAMD(uvec3);" "uvec4 minInvocationsAMD(uvec4);" + "double minInvocationsAMD(double);" + "dvec2 minInvocationsAMD(dvec2);" + "dvec3 minInvocationsAMD(dvec3);" + "dvec4 minInvocationsAMD(dvec4);" + + "int64_t minInvocationsAMD(int64_t);" + "i64vec2 minInvocationsAMD(i64vec2);" + "i64vec3 minInvocationsAMD(i64vec3);" + "i64vec4 minInvocationsAMD(i64vec4);" + + "uint64_t minInvocationsAMD(uint64_t);" + "u64vec2 minInvocationsAMD(u64vec2);" + "u64vec3 minInvocationsAMD(u64vec3);" + "u64vec4 minInvocationsAMD(u64vec4);" + + "float16_t minInvocationsAMD(float16_t);" + "f16vec2 minInvocationsAMD(f16vec2);" + "f16vec3 minInvocationsAMD(f16vec3);" + "f16vec4 minInvocationsAMD(f16vec4);" + + "float minInvocationsInclusiveScanAMD(float);" + "vec2 minInvocationsInclusiveScanAMD(vec2);" + "vec3 minInvocationsInclusiveScanAMD(vec3);" + "vec4 minInvocationsInclusiveScanAMD(vec4);" + + "int minInvocationsInclusiveScanAMD(int);" + "ivec2 minInvocationsInclusiveScanAMD(ivec2);" + "ivec3 minInvocationsInclusiveScanAMD(ivec3);" + "ivec4 minInvocationsInclusiveScanAMD(ivec4);" + + "uint minInvocationsInclusiveScanAMD(uint);" + "uvec2 minInvocationsInclusiveScanAMD(uvec2);" + "uvec3 minInvocationsInclusiveScanAMD(uvec3);" + "uvec4 minInvocationsInclusiveScanAMD(uvec4);" + + "double minInvocationsInclusiveScanAMD(double);" + "dvec2 minInvocationsInclusiveScanAMD(dvec2);" + "dvec3 minInvocationsInclusiveScanAMD(dvec3);" + "dvec4 minInvocationsInclusiveScanAMD(dvec4);" + + "int64_t minInvocationsInclusiveScanAMD(int64_t);" + "i64vec2 minInvocationsInclusiveScanAMD(i64vec2);" + "i64vec3 minInvocationsInclusiveScanAMD(i64vec3);" + "i64vec4 minInvocationsInclusiveScanAMD(i64vec4);" + + "uint64_t minInvocationsInclusiveScanAMD(uint64_t);" + "u64vec2 minInvocationsInclusiveScanAMD(u64vec2);" + "u64vec3 minInvocationsInclusiveScanAMD(u64vec3);" + "u64vec4 minInvocationsInclusiveScanAMD(u64vec4);" + + "float16_t minInvocationsInclusiveScanAMD(float16_t);" + "f16vec2 minInvocationsInclusiveScanAMD(f16vec2);" + "f16vec3 minInvocationsInclusiveScanAMD(f16vec3);" + "f16vec4 minInvocationsInclusiveScanAMD(f16vec4);" + + "float minInvocationsExclusiveScanAMD(float);" + "vec2 minInvocationsExclusiveScanAMD(vec2);" + "vec3 minInvocationsExclusiveScanAMD(vec3);" + "vec4 minInvocationsExclusiveScanAMD(vec4);" + + "int minInvocationsExclusiveScanAMD(int);" + "ivec2 minInvocationsExclusiveScanAMD(ivec2);" + "ivec3 minInvocationsExclusiveScanAMD(ivec3);" + "ivec4 minInvocationsExclusiveScanAMD(ivec4);" + + "uint minInvocationsExclusiveScanAMD(uint);" + "uvec2 minInvocationsExclusiveScanAMD(uvec2);" + "uvec3 minInvocationsExclusiveScanAMD(uvec3);" + "uvec4 minInvocationsExclusiveScanAMD(uvec4);" + + "double minInvocationsExclusiveScanAMD(double);" + "dvec2 minInvocationsExclusiveScanAMD(dvec2);" + "dvec3 minInvocationsExclusiveScanAMD(dvec3);" + "dvec4 minInvocationsExclusiveScanAMD(dvec4);" + + "int64_t minInvocationsExclusiveScanAMD(int64_t);" + "i64vec2 minInvocationsExclusiveScanAMD(i64vec2);" + "i64vec3 minInvocationsExclusiveScanAMD(i64vec3);" + "i64vec4 minInvocationsExclusiveScanAMD(i64vec4);" + + "uint64_t minInvocationsExclusiveScanAMD(uint64_t);" + "u64vec2 minInvocationsExclusiveScanAMD(u64vec2);" + "u64vec3 minInvocationsExclusiveScanAMD(u64vec3);" + "u64vec4 minInvocationsExclusiveScanAMD(u64vec4);" + + "float16_t minInvocationsExclusiveScanAMD(float16_t);" + "f16vec2 minInvocationsExclusiveScanAMD(f16vec2);" + "f16vec3 minInvocationsExclusiveScanAMD(f16vec3);" + "f16vec4 minInvocationsExclusiveScanAMD(f16vec4);" + "float maxInvocationsAMD(float);" "vec2 maxInvocationsAMD(vec2);" "vec3 maxInvocationsAMD(vec3);" @@ -1602,6 +1692,96 @@ void TBuiltIns::initialize(int version, EProfile profile, const SpvVersion& spvV "uvec3 maxInvocationsAMD(uvec3);" "uvec4 maxInvocationsAMD(uvec4);" + "double maxInvocationsAMD(double);" + "dvec2 maxInvocationsAMD(dvec2);" + "dvec3 maxInvocationsAMD(dvec3);" + "dvec4 maxInvocationsAMD(dvec4);" + + "int64_t maxInvocationsAMD(int64_t);" + "i64vec2 maxInvocationsAMD(i64vec2);" + "i64vec3 maxInvocationsAMD(i64vec3);" + "i64vec4 maxInvocationsAMD(i64vec4);" + + "uint64_t maxInvocationsAMD(uint64_t);" + "u64vec2 maxInvocationsAMD(u64vec2);" + "u64vec3 maxInvocationsAMD(u64vec3);" + "u64vec4 maxInvocationsAMD(u64vec4);" + + "float16_t maxInvocationsAMD(float16_t);" + "f16vec2 maxInvocationsAMD(f16vec2);" + "f16vec3 maxInvocationsAMD(f16vec3);" + "f16vec4 maxInvocationsAMD(f16vec4);" + + "float maxInvocationsInclusiveScanAMD(float);" + "vec2 maxInvocationsInclusiveScanAMD(vec2);" + "vec3 maxInvocationsInclusiveScanAMD(vec3);" + "vec4 maxInvocationsInclusiveScanAMD(vec4);" + + "int maxInvocationsInclusiveScanAMD(int);" + "ivec2 maxInvocationsInclusiveScanAMD(ivec2);" + "ivec3 maxInvocationsInclusiveScanAMD(ivec3);" + "ivec4 maxInvocationsInclusiveScanAMD(ivec4);" + + "uint maxInvocationsInclusiveScanAMD(uint);" + "uvec2 maxInvocationsInclusiveScanAMD(uvec2);" + "uvec3 maxInvocationsInclusiveScanAMD(uvec3);" + "uvec4 maxInvocationsInclusiveScanAMD(uvec4);" + + "double maxInvocationsInclusiveScanAMD(double);" + "dvec2 maxInvocationsInclusiveScanAMD(dvec2);" + "dvec3 maxInvocationsInclusiveScanAMD(dvec3);" + "dvec4 maxInvocationsInclusiveScanAMD(dvec4);" + + "int64_t maxInvocationsInclusiveScanAMD(int64_t);" + "i64vec2 maxInvocationsInclusiveScanAMD(i64vec2);" + "i64vec3 maxInvocationsInclusiveScanAMD(i64vec3);" + "i64vec4 maxInvocationsInclusiveScanAMD(i64vec4);" + + "uint64_t maxInvocationsInclusiveScanAMD(uint64_t);" + "u64vec2 maxInvocationsInclusiveScanAMD(u64vec2);" + "u64vec3 maxInvocationsInclusiveScanAMD(u64vec3);" + "u64vec4 maxInvocationsInclusiveScanAMD(u64vec4);" + + "float16_t maxInvocationsInclusiveScanAMD(float16_t);" + "f16vec2 maxInvocationsInclusiveScanAMD(f16vec2);" + "f16vec3 maxInvocationsInclusiveScanAMD(f16vec3);" + "f16vec4 maxInvocationsInclusiveScanAMD(f16vec4);" + + "float maxInvocationsExclusiveScanAMD(float);" + "vec2 maxInvocationsExclusiveScanAMD(vec2);" + "vec3 maxInvocationsExclusiveScanAMD(vec3);" + "vec4 maxInvocationsExclusiveScanAMD(vec4);" + + "int maxInvocationsExclusiveScanAMD(int);" + "ivec2 maxInvocationsExclusiveScanAMD(ivec2);" + "ivec3 maxInvocationsExclusiveScanAMD(ivec3);" + "ivec4 maxInvocationsExclusiveScanAMD(ivec4);" + + "uint maxInvocationsExclusiveScanAMD(uint);" + "uvec2 maxInvocationsExclusiveScanAMD(uvec2);" + "uvec3 maxInvocationsExclusiveScanAMD(uvec3);" + "uvec4 maxInvocationsExclusiveScanAMD(uvec4);" + + "double maxInvocationsExclusiveScanAMD(double);" + "dvec2 maxInvocationsExclusiveScanAMD(dvec2);" + "dvec3 maxInvocationsExclusiveScanAMD(dvec3);" + "dvec4 maxInvocationsExclusiveScanAMD(dvec4);" + + "int64_t maxInvocationsExclusiveScanAMD(int64_t);" + "i64vec2 maxInvocationsExclusiveScanAMD(i64vec2);" + "i64vec3 maxInvocationsExclusiveScanAMD(i64vec3);" + "i64vec4 maxInvocationsExclusiveScanAMD(i64vec4);" + + "uint64_t maxInvocationsExclusiveScanAMD(uint64_t);" + "u64vec2 maxInvocationsExclusiveScanAMD(u64vec2);" + "u64vec3 maxInvocationsExclusiveScanAMD(u64vec3);" + "u64vec4 maxInvocationsExclusiveScanAMD(u64vec4);" + + "float16_t maxInvocationsExclusiveScanAMD(float16_t);" + "f16vec2 maxInvocationsExclusiveScanAMD(f16vec2);" + "f16vec3 maxInvocationsExclusiveScanAMD(f16vec3);" + "f16vec4 maxInvocationsExclusiveScanAMD(f16vec4);" + "float addInvocationsAMD(float);" "vec2 addInvocationsAMD(vec2);" "vec3 addInvocationsAMD(vec3);" @@ -1617,6 +1797,96 @@ void TBuiltIns::initialize(int version, EProfile profile, const SpvVersion& spvV "uvec3 addInvocationsAMD(uvec3);" "uvec4 addInvocationsAMD(uvec4);" + "double addInvocationsAMD(double);" + "dvec2 addInvocationsAMD(dvec2);" + "dvec3 addInvocationsAMD(dvec3);" + "dvec4 addInvocationsAMD(dvec4);" + + "int64_t addInvocationsAMD(int64_t);" + "i64vec2 addInvocationsAMD(i64vec2);" + "i64vec3 addInvocationsAMD(i64vec3);" + "i64vec4 addInvocationsAMD(i64vec4);" + + "uint64_t addInvocationsAMD(uint64_t);" + "u64vec2 addInvocationsAMD(u64vec2);" + "u64vec3 addInvocationsAMD(u64vec3);" + "u64vec4 addInvocationsAMD(u64vec4);" + + "float16_t addInvocationsAMD(float16_t);" + "f16vec2 addInvocationsAMD(f16vec2);" + "f16vec3 addInvocationsAMD(f16vec3);" + "f16vec4 addInvocationsAMD(f16vec4);" + + "float addInvocationsInclusiveScanAMD(float);" + "vec2 addInvocationsInclusiveScanAMD(vec2);" + "vec3 addInvocationsInclusiveScanAMD(vec3);" + "vec4 addInvocationsInclusiveScanAMD(vec4);" + + "int addInvocationsInclusiveScanAMD(int);" + "ivec2 addInvocationsInclusiveScanAMD(ivec2);" + "ivec3 addInvocationsInclusiveScanAMD(ivec3);" + "ivec4 addInvocationsInclusiveScanAMD(ivec4);" + + "uint addInvocationsInclusiveScanAMD(uint);" + "uvec2 addInvocationsInclusiveScanAMD(uvec2);" + "uvec3 addInvocationsInclusiveScanAMD(uvec3);" + "uvec4 addInvocationsInclusiveScanAMD(uvec4);" + + "double addInvocationsInclusiveScanAMD(double);" + "dvec2 addInvocationsInclusiveScanAMD(dvec2);" + "dvec3 addInvocationsInclusiveScanAMD(dvec3);" + "dvec4 addInvocationsInclusiveScanAMD(dvec4);" + + "int64_t addInvocationsInclusiveScanAMD(int64_t);" + "i64vec2 addInvocationsInclusiveScanAMD(i64vec2);" + "i64vec3 addInvocationsInclusiveScanAMD(i64vec3);" + "i64vec4 addInvocationsInclusiveScanAMD(i64vec4);" + + "uint64_t addInvocationsInclusiveScanAMD(uint64_t);" + "u64vec2 addInvocationsInclusiveScanAMD(u64vec2);" + "u64vec3 addInvocationsInclusiveScanAMD(u64vec3);" + "u64vec4 addInvocationsInclusiveScanAMD(u64vec4);" + + "float16_t addInvocationsInclusiveScanAMD(float16_t);" + "f16vec2 addInvocationsInclusiveScanAMD(f16vec2);" + "f16vec3 addInvocationsInclusiveScanAMD(f16vec3);" + "f16vec4 addInvocationsInclusiveScanAMD(f16vec4);" + + "float addInvocationsExclusiveScanAMD(float);" + "vec2 addInvocationsExclusiveScanAMD(vec2);" + "vec3 addInvocationsExclusiveScanAMD(vec3);" + "vec4 addInvocationsExclusiveScanAMD(vec4);" + + "int addInvocationsExclusiveScanAMD(int);" + "ivec2 addInvocationsExclusiveScanAMD(ivec2);" + "ivec3 addInvocationsExclusiveScanAMD(ivec3);" + "ivec4 addInvocationsExclusiveScanAMD(ivec4);" + + "uint addInvocationsExclusiveScanAMD(uint);" + "uvec2 addInvocationsExclusiveScanAMD(uvec2);" + "uvec3 addInvocationsExclusiveScanAMD(uvec3);" + "uvec4 addInvocationsExclusiveScanAMD(uvec4);" + + "double addInvocationsExclusiveScanAMD(double);" + "dvec2 addInvocationsExclusiveScanAMD(dvec2);" + "dvec3 addInvocationsExclusiveScanAMD(dvec3);" + "dvec4 addInvocationsExclusiveScanAMD(dvec4);" + + "int64_t addInvocationsExclusiveScanAMD(int64_t);" + "i64vec2 addInvocationsExclusiveScanAMD(i64vec2);" + "i64vec3 addInvocationsExclusiveScanAMD(i64vec3);" + "i64vec4 addInvocationsExclusiveScanAMD(i64vec4);" + + "uint64_t addInvocationsExclusiveScanAMD(uint64_t);" + "u64vec2 addInvocationsExclusiveScanAMD(u64vec2);" + "u64vec3 addInvocationsExclusiveScanAMD(u64vec3);" + "u64vec4 addInvocationsExclusiveScanAMD(u64vec4);" + + "float16_t addInvocationsExclusiveScanAMD(float16_t);" + "f16vec2 addInvocationsExclusiveScanAMD(f16vec2);" + "f16vec3 addInvocationsExclusiveScanAMD(f16vec3);" + "f16vec4 addInvocationsExclusiveScanAMD(f16vec4);" + "float minInvocationsNonUniformAMD(float);" "vec2 minInvocationsNonUniformAMD(vec2);" "vec3 minInvocationsNonUniformAMD(vec3);" @@ -1632,6 +1902,96 @@ void TBuiltIns::initialize(int version, EProfile profile, const SpvVersion& spvV "uvec3 minInvocationsNonUniformAMD(uvec3);" "uvec4 minInvocationsNonUniformAMD(uvec4);" + "double minInvocationsNonUniformAMD(double);" + "dvec2 minInvocationsNonUniformAMD(dvec2);" + "dvec3 minInvocationsNonUniformAMD(dvec3);" + "dvec4 minInvocationsNonUniformAMD(dvec4);" + + "int64_t minInvocationsNonUniformAMD(int64_t);" + "i64vec2 minInvocationsNonUniformAMD(i64vec2);" + "i64vec3 minInvocationsNonUniformAMD(i64vec3);" + "i64vec4 minInvocationsNonUniformAMD(i64vec4);" + + "uint64_t minInvocationsNonUniformAMD(uint64_t);" + "u64vec2 minInvocationsNonUniformAMD(u64vec2);" + "u64vec3 minInvocationsNonUniformAMD(u64vec3);" + "u64vec4 minInvocationsNonUniformAMD(u64vec4);" + + "float16_t minInvocationsNonUniformAMD(float16_t);" + "f16vec2 minInvocationsNonUniformAMD(f16vec2);" + "f16vec3 minInvocationsNonUniformAMD(f16vec3);" + "f16vec4 minInvocationsNonUniformAMD(f16vec4);" + + "float minInvocationsInclusiveScanNonUniformAMD(float);" + "vec2 minInvocationsInclusiveScanNonUniformAMD(vec2);" + "vec3 minInvocationsInclusiveScanNonUniformAMD(vec3);" + "vec4 minInvocationsInclusiveScanNonUniformAMD(vec4);" + + "int minInvocationsInclusiveScanNonUniformAMD(int);" + "ivec2 minInvocationsInclusiveScanNonUniformAMD(ivec2);" + "ivec3 minInvocationsInclusiveScanNonUniformAMD(ivec3);" + "ivec4 minInvocationsInclusiveScanNonUniformAMD(ivec4);" + + "uint minInvocationsInclusiveScanNonUniformAMD(uint);" + "uvec2 minInvocationsInclusiveScanNonUniformAMD(uvec2);" + "uvec3 minInvocationsInclusiveScanNonUniformAMD(uvec3);" + "uvec4 minInvocationsInclusiveScanNonUniformAMD(uvec4);" + + "double minInvocationsInclusiveScanNonUniformAMD(double);" + "dvec2 minInvocationsInclusiveScanNonUniformAMD(dvec2);" + "dvec3 minInvocationsInclusiveScanNonUniformAMD(dvec3);" + "dvec4 minInvocationsInclusiveScanNonUniformAMD(dvec4);" + + "int64_t minInvocationsInclusiveScanNonUniformAMD(int64_t);" + "i64vec2 minInvocationsInclusiveScanNonUniformAMD(i64vec2);" + "i64vec3 minInvocationsInclusiveScanNonUniformAMD(i64vec3);" + "i64vec4 minInvocationsInclusiveScanNonUniformAMD(i64vec4);" + + "uint64_t minInvocationsInclusiveScanNonUniformAMD(uint64_t);" + "u64vec2 minInvocationsInclusiveScanNonUniformAMD(u64vec2);" + "u64vec3 minInvocationsInclusiveScanNonUniformAMD(u64vec3);" + "u64vec4 minInvocationsInclusiveScanNonUniformAMD(u64vec4);" + + "float16_t minInvocationsInclusiveScanNonUniformAMD(float16_t);" + "f16vec2 minInvocationsInclusiveScanNonUniformAMD(f16vec2);" + "f16vec3 minInvocationsInclusiveScanNonUniformAMD(f16vec3);" + "f16vec4 minInvocationsInclusiveScanNonUniformAMD(f16vec4);" + + "float minInvocationsExclusiveScanNonUniformAMD(float);" + "vec2 minInvocationsExclusiveScanNonUniformAMD(vec2);" + "vec3 minInvocationsExclusiveScanNonUniformAMD(vec3);" + "vec4 minInvocationsExclusiveScanNonUniformAMD(vec4);" + + "int minInvocationsExclusiveScanNonUniformAMD(int);" + "ivec2 minInvocationsExclusiveScanNonUniformAMD(ivec2);" + "ivec3 minInvocationsExclusiveScanNonUniformAMD(ivec3);" + "ivec4 minInvocationsExclusiveScanNonUniformAMD(ivec4);" + + "uint minInvocationsExclusiveScanNonUniformAMD(uint);" + "uvec2 minInvocationsExclusiveScanNonUniformAMD(uvec2);" + "uvec3 minInvocationsExclusiveScanNonUniformAMD(uvec3);" + "uvec4 minInvocationsExclusiveScanNonUniformAMD(uvec4);" + + "double minInvocationsExclusiveScanNonUniformAMD(double);" + "dvec2 minInvocationsExclusiveScanNonUniformAMD(dvec2);" + "dvec3 minInvocationsExclusiveScanNonUniformAMD(dvec3);" + "dvec4 minInvocationsExclusiveScanNonUniformAMD(dvec4);" + + "int64_t minInvocationsExclusiveScanNonUniformAMD(int64_t);" + "i64vec2 minInvocationsExclusiveScanNonUniformAMD(i64vec2);" + "i64vec3 minInvocationsExclusiveScanNonUniformAMD(i64vec3);" + "i64vec4 minInvocationsExclusiveScanNonUniformAMD(i64vec4);" + + "uint64_t minInvocationsExclusiveScanNonUniformAMD(uint64_t);" + "u64vec2 minInvocationsExclusiveScanNonUniformAMD(u64vec2);" + "u64vec3 minInvocationsExclusiveScanNonUniformAMD(u64vec3);" + "u64vec4 minInvocationsExclusiveScanNonUniformAMD(u64vec4);" + + "float16_t minInvocationsExclusiveScanNonUniformAMD(float16_t);" + "f16vec2 minInvocationsExclusiveScanNonUniformAMD(f16vec2);" + "f16vec3 minInvocationsExclusiveScanNonUniformAMD(f16vec3);" + "f16vec4 minInvocationsExclusiveScanNonUniformAMD(f16vec4);" + "float maxInvocationsNonUniformAMD(float);" "vec2 maxInvocationsNonUniformAMD(vec2);" "vec3 maxInvocationsNonUniformAMD(vec3);" @@ -1647,6 +2007,96 @@ void TBuiltIns::initialize(int version, EProfile profile, const SpvVersion& spvV "uvec3 maxInvocationsNonUniformAMD(uvec3);" "uvec4 maxInvocationsNonUniformAMD(uvec4);" + "double maxInvocationsNonUniformAMD(double);" + "dvec2 maxInvocationsNonUniformAMD(dvec2);" + "dvec3 maxInvocationsNonUniformAMD(dvec3);" + "dvec4 maxInvocationsNonUniformAMD(dvec4);" + + "int64_t maxInvocationsNonUniformAMD(int64_t);" + "i64vec2 maxInvocationsNonUniformAMD(i64vec2);" + "i64vec3 maxInvocationsNonUniformAMD(i64vec3);" + "i64vec4 maxInvocationsNonUniformAMD(i64vec4);" + + "uint64_t maxInvocationsNonUniformAMD(uint64_t);" + "u64vec2 maxInvocationsNonUniformAMD(u64vec2);" + "u64vec3 maxInvocationsNonUniformAMD(u64vec3);" + "u64vec4 maxInvocationsNonUniformAMD(u64vec4);" + + "float16_t maxInvocationsNonUniformAMD(float16_t);" + "f16vec2 maxInvocationsNonUniformAMD(f16vec2);" + "f16vec3 maxInvocationsNonUniformAMD(f16vec3);" + "f16vec4 maxInvocationsNonUniformAMD(f16vec4);" + + "float maxInvocationsInclusiveScanNonUniformAMD(float);" + "vec2 maxInvocationsInclusiveScanNonUniformAMD(vec2);" + "vec3 maxInvocationsInclusiveScanNonUniformAMD(vec3);" + "vec4 maxInvocationsInclusiveScanNonUniformAMD(vec4);" + + "int maxInvocationsInclusiveScanNonUniformAMD(int);" + "ivec2 maxInvocationsInclusiveScanNonUniformAMD(ivec2);" + "ivec3 maxInvocationsInclusiveScanNonUniformAMD(ivec3);" + "ivec4 maxInvocationsInclusiveScanNonUniformAMD(ivec4);" + + "uint maxInvocationsInclusiveScanNonUniformAMD(uint);" + "uvec2 maxInvocationsInclusiveScanNonUniformAMD(uvec2);" + "uvec3 maxInvocationsInclusiveScanNonUniformAMD(uvec3);" + "uvec4 maxInvocationsInclusiveScanNonUniformAMD(uvec4);" + + "double maxInvocationsInclusiveScanNonUniformAMD(double);" + "dvec2 maxInvocationsInclusiveScanNonUniformAMD(dvec2);" + "dvec3 maxInvocationsInclusiveScanNonUniformAMD(dvec3);" + "dvec4 maxInvocationsInclusiveScanNonUniformAMD(dvec4);" + + "int64_t maxInvocationsInclusiveScanNonUniformAMD(int64_t);" + "i64vec2 maxInvocationsInclusiveScanNonUniformAMD(i64vec2);" + "i64vec3 maxInvocationsInclusiveScanNonUniformAMD(i64vec3);" + "i64vec4 maxInvocationsInclusiveScanNonUniformAMD(i64vec4);" + + "uint64_t maxInvocationsInclusiveScanNonUniformAMD(uint64_t);" + "u64vec2 maxInvocationsInclusiveScanNonUniformAMD(u64vec2);" + "u64vec3 maxInvocationsInclusiveScanNonUniformAMD(u64vec3);" + "u64vec4 maxInvocationsInclusiveScanNonUniformAMD(u64vec4);" + + "float16_t maxInvocationsInclusiveScanNonUniformAMD(float16_t);" + "f16vec2 maxInvocationsInclusiveScanNonUniformAMD(f16vec2);" + "f16vec3 maxInvocationsInclusiveScanNonUniformAMD(f16vec3);" + "f16vec4 maxInvocationsInclusiveScanNonUniformAMD(f16vec4);" + + "float maxInvocationsExclusiveScanNonUniformAMD(float);" + "vec2 maxInvocationsExclusiveScanNonUniformAMD(vec2);" + "vec3 maxInvocationsExclusiveScanNonUniformAMD(vec3);" + "vec4 maxInvocationsExclusiveScanNonUniformAMD(vec4);" + + "int maxInvocationsExclusiveScanNonUniformAMD(int);" + "ivec2 maxInvocationsExclusiveScanNonUniformAMD(ivec2);" + "ivec3 maxInvocationsExclusiveScanNonUniformAMD(ivec3);" + "ivec4 maxInvocationsExclusiveScanNonUniformAMD(ivec4);" + + "uint maxInvocationsExclusiveScanNonUniformAMD(uint);" + "uvec2 maxInvocationsExclusiveScanNonUniformAMD(uvec2);" + "uvec3 maxInvocationsExclusiveScanNonUniformAMD(uvec3);" + "uvec4 maxInvocationsExclusiveScanNonUniformAMD(uvec4);" + + "double maxInvocationsExclusiveScanNonUniformAMD(double);" + "dvec2 maxInvocationsExclusiveScanNonUniformAMD(dvec2);" + "dvec3 maxInvocationsExclusiveScanNonUniformAMD(dvec3);" + "dvec4 maxInvocationsExclusiveScanNonUniformAMD(dvec4);" + + "int64_t maxInvocationsExclusiveScanNonUniformAMD(int64_t);" + "i64vec2 maxInvocationsExclusiveScanNonUniformAMD(i64vec2);" + "i64vec3 maxInvocationsExclusiveScanNonUniformAMD(i64vec3);" + "i64vec4 maxInvocationsExclusiveScanNonUniformAMD(i64vec4);" + + "uint64_t maxInvocationsExclusiveScanNonUniformAMD(uint64_t);" + "u64vec2 maxInvocationsExclusiveScanNonUniformAMD(u64vec2);" + "u64vec3 maxInvocationsExclusiveScanNonUniformAMD(u64vec3);" + "u64vec4 maxInvocationsExclusiveScanNonUniformAMD(u64vec4);" + + "float16_t maxInvocationsExclusiveScanNonUniformAMD(float16_t);" + "f16vec2 maxInvocationsExclusiveScanNonUniformAMD(f16vec2);" + "f16vec3 maxInvocationsExclusiveScanNonUniformAMD(f16vec3);" + "f16vec4 maxInvocationsExclusiveScanNonUniformAMD(f16vec4);" + "float addInvocationsNonUniformAMD(float);" "vec2 addInvocationsNonUniformAMD(vec2);" "vec3 addInvocationsNonUniformAMD(vec3);" @@ -1662,6 +2112,96 @@ void TBuiltIns::initialize(int version, EProfile profile, const SpvVersion& spvV "uvec3 addInvocationsNonUniformAMD(uvec3);" "uvec4 addInvocationsNonUniformAMD(uvec4);" + "double addInvocationsNonUniformAMD(double);" + "dvec2 addInvocationsNonUniformAMD(dvec2);" + "dvec3 addInvocationsNonUniformAMD(dvec3);" + "dvec4 addInvocationsNonUniformAMD(dvec4);" + + "int64_t addInvocationsNonUniformAMD(int64_t);" + "i64vec2 addInvocationsNonUniformAMD(i64vec2);" + "i64vec3 addInvocationsNonUniformAMD(i64vec3);" + "i64vec4 addInvocationsNonUniformAMD(i64vec4);" + + "uint64_t addInvocationsNonUniformAMD(uint64_t);" + "u64vec2 addInvocationsNonUniformAMD(u64vec2);" + "u64vec3 addInvocationsNonUniformAMD(u64vec3);" + "u64vec4 addInvocationsNonUniformAMD(u64vec4);" + + "float16_t addInvocationsNonUniformAMD(float16_t);" + "f16vec2 addInvocationsNonUniformAMD(f16vec2);" + "f16vec3 addInvocationsNonUniformAMD(f16vec3);" + "f16vec4 addInvocationsNonUniformAMD(f16vec4);" + + "float addInvocationsInclusiveScanNonUniformAMD(float);" + "vec2 addInvocationsInclusiveScanNonUniformAMD(vec2);" + "vec3 addInvocationsInclusiveScanNonUniformAMD(vec3);" + "vec4 addInvocationsInclusiveScanNonUniformAMD(vec4);" + + "int addInvocationsInclusiveScanNonUniformAMD(int);" + "ivec2 addInvocationsInclusiveScanNonUniformAMD(ivec2);" + "ivec3 addInvocationsInclusiveScanNonUniformAMD(ivec3);" + "ivec4 addInvocationsInclusiveScanNonUniformAMD(ivec4);" + + "uint addInvocationsInclusiveScanNonUniformAMD(uint);" + "uvec2 addInvocationsInclusiveScanNonUniformAMD(uvec2);" + "uvec3 addInvocationsInclusiveScanNonUniformAMD(uvec3);" + "uvec4 addInvocationsInclusiveScanNonUniformAMD(uvec4);" + + "double addInvocationsInclusiveScanNonUniformAMD(double);" + "dvec2 addInvocationsInclusiveScanNonUniformAMD(dvec2);" + "dvec3 addInvocationsInclusiveScanNonUniformAMD(dvec3);" + "dvec4 addInvocationsInclusiveScanNonUniformAMD(dvec4);" + + "int64_t addInvocationsInclusiveScanNonUniformAMD(int64_t);" + "i64vec2 addInvocationsInclusiveScanNonUniformAMD(i64vec2);" + "i64vec3 addInvocationsInclusiveScanNonUniformAMD(i64vec3);" + "i64vec4 addInvocationsInclusiveScanNonUniformAMD(i64vec4);" + + "uint64_t addInvocationsInclusiveScanNonUniformAMD(uint64_t);" + "u64vec2 addInvocationsInclusiveScanNonUniformAMD(u64vec2);" + "u64vec3 addInvocationsInclusiveScanNonUniformAMD(u64vec3);" + "u64vec4 addInvocationsInclusiveScanNonUniformAMD(u64vec4);" + + "float16_t addInvocationsInclusiveScanNonUniformAMD(float16_t);" + "f16vec2 addInvocationsInclusiveScanNonUniformAMD(f16vec2);" + "f16vec3 addInvocationsInclusiveScanNonUniformAMD(f16vec3);" + "f16vec4 addInvocationsInclusiveScanNonUniformAMD(f16vec4);" + + "float addInvocationsExclusiveScanNonUniformAMD(float);" + "vec2 addInvocationsExclusiveScanNonUniformAMD(vec2);" + "vec3 addInvocationsExclusiveScanNonUniformAMD(vec3);" + "vec4 addInvocationsExclusiveScanNonUniformAMD(vec4);" + + "int addInvocationsExclusiveScanNonUniformAMD(int);" + "ivec2 addInvocationsExclusiveScanNonUniformAMD(ivec2);" + "ivec3 addInvocationsExclusiveScanNonUniformAMD(ivec3);" + "ivec4 addInvocationsExclusiveScanNonUniformAMD(ivec4);" + + "uint addInvocationsExclusiveScanNonUniformAMD(uint);" + "uvec2 addInvocationsExclusiveScanNonUniformAMD(uvec2);" + "uvec3 addInvocationsExclusiveScanNonUniformAMD(uvec3);" + "uvec4 addInvocationsExclusiveScanNonUniformAMD(uvec4);" + + "double addInvocationsExclusiveScanNonUniformAMD(double);" + "dvec2 addInvocationsExclusiveScanNonUniformAMD(dvec2);" + "dvec3 addInvocationsExclusiveScanNonUniformAMD(dvec3);" + "dvec4 addInvocationsExclusiveScanNonUniformAMD(dvec4);" + + "int64_t addInvocationsExclusiveScanNonUniformAMD(int64_t);" + "i64vec2 addInvocationsExclusiveScanNonUniformAMD(i64vec2);" + "i64vec3 addInvocationsExclusiveScanNonUniformAMD(i64vec3);" + "i64vec4 addInvocationsExclusiveScanNonUniformAMD(i64vec4);" + + "uint64_t addInvocationsExclusiveScanNonUniformAMD(uint64_t);" + "u64vec2 addInvocationsExclusiveScanNonUniformAMD(u64vec2);" + "u64vec3 addInvocationsExclusiveScanNonUniformAMD(u64vec3);" + "u64vec4 addInvocationsExclusiveScanNonUniformAMD(u64vec4);" + + "float16_t addInvocationsExclusiveScanNonUniformAMD(float16_t);" + "f16vec2 addInvocationsExclusiveScanNonUniformAMD(f16vec2);" + "f16vec3 addInvocationsExclusiveScanNonUniformAMD(f16vec3);" + "f16vec4 addInvocationsExclusiveScanNonUniformAMD(f16vec4);" + "float swizzleInvocationsAMD(float, uvec4);" "vec2 swizzleInvocationsAMD(vec2, uvec4);" "vec3 swizzleInvocationsAMD(vec3, uvec4);" @@ -4324,6 +4864,19 @@ void TBuiltIns::identifyBuiltIns(int version, EProfile profile, const SpvVersion symbolTable.setFunctionExtensions("swizzleInvocationsWithPatternAMD", 1, &E_GL_AMD_shader_ballot); symbolTable.setFunctionExtensions("writeInvocationAMD", 1, &E_GL_AMD_shader_ballot); symbolTable.setFunctionExtensions("mbcntAMD", 1, &E_GL_AMD_shader_ballot); + + symbolTable.setFunctionExtensions("minInvocationsInclusiveScanAMD", 1, &E_GL_AMD_shader_ballot); + symbolTable.setFunctionExtensions("maxInvocationsInclusiveScanAMD", 1, &E_GL_AMD_shader_ballot); + symbolTable.setFunctionExtensions("addInvocationsInclusiveScanAMD", 1, &E_GL_AMD_shader_ballot); + symbolTable.setFunctionExtensions("minInvocationsInclusiveScanNonUniformAMD", 1, &E_GL_AMD_shader_ballot); + symbolTable.setFunctionExtensions("maxInvocationsInclusiveScanNonUniformAMD", 1, &E_GL_AMD_shader_ballot); + symbolTable.setFunctionExtensions("addInvocationsInclusiveScanNonUniformAMD", 1, &E_GL_AMD_shader_ballot); + symbolTable.setFunctionExtensions("minInvocationsExclusiveScanAMD", 1, &E_GL_AMD_shader_ballot); + symbolTable.setFunctionExtensions("maxInvocationsExclusiveScanAMD", 1, &E_GL_AMD_shader_ballot); + symbolTable.setFunctionExtensions("addInvocationsExclusiveScanAMD", 1, &E_GL_AMD_shader_ballot); + symbolTable.setFunctionExtensions("minInvocationsExclusiveScanNonUniformAMD", 1, &E_GL_AMD_shader_ballot); + symbolTable.setFunctionExtensions("maxInvocationsExclusiveScanNonUniformAMD", 1, &E_GL_AMD_shader_ballot); + symbolTable.setFunctionExtensions("addInvocationsExclusiveScanNonUniformAMD", 1, &E_GL_AMD_shader_ballot); } if (profile != EEsProfile) { @@ -4986,16 +5539,28 @@ void TBuiltIns::identifyBuiltIns(int version, EProfile profile, const SpvVersion symbolTable.relateToOperator("allInvocationsEqualARB", EOpAllInvocationsEqual); #ifdef AMD_EXTENSIONS - symbolTable.relateToOperator("minInvocationsAMD", EOpMinInvocations); - symbolTable.relateToOperator("maxInvocationsAMD", EOpMaxInvocations); - symbolTable.relateToOperator("addInvocationsAMD", EOpAddInvocations); - symbolTable.relateToOperator("minInvocationsNonUniformAMD", EOpMinInvocationsNonUniform); - symbolTable.relateToOperator("maxInvocationsNonUniformAMD", EOpMaxInvocationsNonUniform); - symbolTable.relateToOperator("addInvocationsNonUniformAMD", EOpAddInvocationsNonUniform); - symbolTable.relateToOperator("swizzleInvocationsAMD", EOpSwizzleInvocations); - symbolTable.relateToOperator("swizzleInvocationsMaskedAMD", EOpSwizzleInvocationsMasked); - symbolTable.relateToOperator("writeInvocationAMD", EOpWriteInvocation); - symbolTable.relateToOperator("mbcntAMD", EOpMbcnt); + symbolTable.relateToOperator("minInvocationsAMD", EOpMinInvocations); + symbolTable.relateToOperator("maxInvocationsAMD", EOpMaxInvocations); + symbolTable.relateToOperator("addInvocationsAMD", EOpAddInvocations); + symbolTable.relateToOperator("minInvocationsNonUniformAMD", EOpMinInvocationsNonUniform); + symbolTable.relateToOperator("maxInvocationsNonUniformAMD", EOpMaxInvocationsNonUniform); + symbolTable.relateToOperator("addInvocationsNonUniformAMD", EOpAddInvocationsNonUniform); + symbolTable.relateToOperator("minInvocationsInclusiveScanAMD", EOpMinInvocationsInclusiveScan); + symbolTable.relateToOperator("maxInvocationsInclusiveScanAMD", EOpMaxInvocationsInclusiveScan); + symbolTable.relateToOperator("addInvocationsInclusiveScanAMD", EOpAddInvocationsInclusiveScan); + symbolTable.relateToOperator("minInvocationsInclusiveScanNonUniformAMD", EOpMinInvocationsInclusiveScanNonUniform); + symbolTable.relateToOperator("maxInvocationsInclusiveScanNonUniformAMD", EOpMaxInvocationsInclusiveScanNonUniform); + symbolTable.relateToOperator("addInvocationsInclusiveScanNonUniformAMD", EOpAddInvocationsInclusiveScanNonUniform); + symbolTable.relateToOperator("minInvocationsExclusiveScanAMD", EOpMinInvocationsExclusiveScan); + symbolTable.relateToOperator("maxInvocationsExclusiveScanAMD", EOpMaxInvocationsExclusiveScan); + symbolTable.relateToOperator("addInvocationsExclusiveScanAMD", EOpAddInvocationsExclusiveScan); + symbolTable.relateToOperator("minInvocationsExclusiveScanNonUniformAMD", EOpMinInvocationsExclusiveScanNonUniform); + symbolTable.relateToOperator("maxInvocationsExclusiveScanNonUniformAMD", EOpMaxInvocationsExclusiveScanNonUniform); + symbolTable.relateToOperator("addInvocationsExclusiveScanNonUniformAMD", EOpAddInvocationsExclusiveScanNonUniform); + symbolTable.relateToOperator("swizzleInvocationsAMD", EOpSwizzleInvocations); + symbolTable.relateToOperator("swizzleInvocationsMaskedAMD", EOpSwizzleInvocationsMasked); + symbolTable.relateToOperator("writeInvocationAMD", EOpWriteInvocation); + symbolTable.relateToOperator("mbcntAMD", EOpMbcnt); symbolTable.relateToOperator("min3", EOpMin3); symbolTable.relateToOperator("max3", EOpMax3); diff --git a/glslang/MachineIndependent/intermOut.cpp b/glslang/MachineIndependent/intermOut.cpp index a035954..b8a6969 100644 --- a/glslang/MachineIndependent/intermOut.cpp +++ b/glslang/MachineIndependent/intermOut.cpp @@ -377,6 +377,21 @@ bool TOutputTraverser::visitUnary(TVisit /* visit */, TIntermUnary* node) case EOpMinInvocationsNonUniform: out.debug << "minInvocationsNonUniform"; break; case EOpMaxInvocationsNonUniform: out.debug << "maxInvocationsNonUniform"; break; case EOpAddInvocationsNonUniform: out.debug << "addInvocationsNonUniform"; break; + + case EOpMinInvocationsInclusiveScan: out.debug << "minInvocationsInclusiveScan"; break; + case EOpMaxInvocationsInclusiveScan: out.debug << "maxInvocationsInclusiveScan"; break; + case EOpAddInvocationsInclusiveScan: out.debug << "addInvocationsInclusiveScan"; break; + case EOpMinInvocationsInclusiveScanNonUniform: out.debug << "minInvocationsInclusiveScanNonUniform"; break; + case EOpMaxInvocationsInclusiveScanNonUniform: out.debug << "maxInvocationsInclusiveScanNonUniform"; break; + case EOpAddInvocationsInclusiveScanNonUniform: out.debug << "addInvocationsInclusiveScanNonUniform"; break; + + case EOpMinInvocationsExclusiveScan: out.debug << "minInvocationsExclusiveScan"; break; + case EOpMaxInvocationsExclusiveScan: out.debug << "maxInvocationsExclusiveScan"; break; + case EOpAddInvocationsExclusiveScan: out.debug << "addInvocationsExclusiveScan"; break; + case EOpMinInvocationsExclusiveScanNonUniform: out.debug << "minInvocationsExclusiveScanNonUniform"; break; + case EOpMaxInvocationsExclusiveScanNonUniform: out.debug << "maxInvocationsExclusiveScanNonUniform"; break; + case EOpAddInvocationsExclusiveScanNonUniform: out.debug << "addInvocationsExclusiveScanNonUniform"; break; + case EOpMbcnt: out.debug << "mbcnt"; break; case EOpCubeFaceIndex: out.debug << "cubeFaceIndex"; break; diff --git a/gtests/Spv.FromFile.cpp b/gtests/Spv.FromFile.cpp index c946ef7..f317cdb 100644 --- a/gtests/Spv.FromFile.cpp +++ b/gtests/Spv.FromFile.cpp @@ -358,6 +358,7 @@ INSTANTIATE_TEST_CASE_P( Glsl, CompileVulkanToSpirvTestAMD, ::testing::ValuesIn(std::vector({ "spv.float16.frag", + "spv.shaderBallotAMD.comp" })), FileNameAsCustomTestSuffix ); -- 2.7.4