Merge "Merge "Merge "Try harder to defeat GLSL compiler dead-code optimizations"...
[platform/upstream/VK-GL-CTS.git] / external / vulkancts / modules / vulkan / spirv_assembly / vktSpvAsmComputeShaderCase.cpp
1 /*-------------------------------------------------------------------------
2  * Vulkan Conformance Tests
3  * ------------------------
4  *
5  * Copyright (c) 2015 Google Inc.
6  *
7  * Licensed under the Apache License, Version 2.0 (the "License");
8  * you may not use this file except in compliance with the License.
9  * You may obtain a copy of the License at
10  *
11  *      http://www.apache.org/licenses/LICENSE-2.0
12  *
13  * Unless required by applicable law or agreed to in writing, software
14  * distributed under the License is distributed on an "AS IS" BASIS,
15  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16  * See the License for the specific language governing permissions and
17  * limitations under the License.
18  *
19  *//*!
20  * \file
21  * \brief Test Case Skeleton Based on Compute Shaders
22  *//*--------------------------------------------------------------------*/
23
24 #include "vktSpvAsmComputeShaderCase.hpp"
25
26 #include "deSharedPtr.hpp"
27 #include "deSTLUtil.hpp"
28
29 #include "vkBuilderUtil.hpp"
30 #include "vkMemUtil.hpp"
31 #include "vkPlatform.hpp"
32 #include "vkRefUtil.hpp"
33 #include "vkQueryUtil.hpp"
34 #include "vkTypeUtil.hpp"
35
36 namespace
37 {
38
39 using namespace vk;
40 using std::vector;
41
42 typedef vkt::SpirVAssembly::AllocationMp                        AllocationMp;
43 typedef vkt::SpirVAssembly::AllocationSp                        AllocationSp;
44
45 typedef Unique<VkBuffer>                                                        BufferHandleUp;
46 typedef de::SharedPtr<BufferHandleUp>                           BufferHandleSp;
47
48 /*--------------------------------------------------------------------*//*!
49  * \brief Create storage buffer, allocate and bind memory for the buffer
50  *
51  * The memory is created as host visible and passed back as a vk::Allocation
52  * instance via outMemory.
53  *//*--------------------------------------------------------------------*/
54 Move<VkBuffer> createBufferAndBindMemory (const DeviceInterface& vkdi, const VkDevice& device, VkDescriptorType dtype, Allocator& allocator, size_t numBytes, AllocationMp* outMemory)
55 {
56         VkBufferUsageFlags                      usageBit                = (VkBufferUsageFlags)0;
57
58         switch (dtype)
59         {
60                 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER: usageBit = VK_BUFFER_USAGE_STORAGE_BUFFER_BIT; break;
61                 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER: usageBit = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT; break;
62                 default:                                                                DE_ASSERT(false);
63         }
64
65         const VkBufferCreateInfo bufferCreateInfo       =
66         {
67                 VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,   // sType
68                 DE_NULL,                                                                // pNext
69                 0u,                                                                             // flags
70                 numBytes,                                                               // size
71                 usageBit,                                                               // usage
72                 VK_SHARING_MODE_EXCLUSIVE,                              // sharingMode
73                 0u,                                                                             // queueFamilyCount
74                 DE_NULL,                                                                // pQueueFamilyIndices
75         };
76
77         Move<VkBuffer>                          buffer                  (createBuffer(vkdi, device, &bufferCreateInfo));
78         const VkMemoryRequirements      requirements    = getBufferMemoryRequirements(vkdi, device, *buffer);
79         AllocationMp                            bufferMemory    = allocator.allocate(requirements, MemoryRequirement::HostVisible);
80
81         VK_CHECK(vkdi.bindBufferMemory(device, *buffer, bufferMemory->getMemory(), bufferMemory->getOffset()));
82         *outMemory = bufferMemory;
83
84         return buffer;
85 }
86
87 void setMemory (const DeviceInterface& vkdi, const VkDevice& device, Allocation* destAlloc, size_t numBytes, const void* data)
88 {
89         void* const hostPtr = destAlloc->getHostPtr();
90
91         deMemcpy((deUint8*)hostPtr, data, numBytes);
92         flushMappedMemoryRange(vkdi, device, destAlloc->getMemory(), destAlloc->getOffset(), numBytes);
93 }
94
95 void fillMemoryWithValue (const DeviceInterface& vkdi, const VkDevice& device, Allocation* destAlloc, size_t numBytes, deUint8 value)
96 {
97         void* const hostPtr = destAlloc->getHostPtr();
98
99         deMemset((deUint8*)hostPtr, value, numBytes);
100         flushMappedMemoryRange(vkdi, device, destAlloc->getMemory(), destAlloc->getOffset(), numBytes);
101 }
102
103 /*--------------------------------------------------------------------*//*!
104  * \brief Create a descriptor set layout with the given descriptor types
105  *
106  * All descriptors are created for compute pipeline.
107  *//*--------------------------------------------------------------------*/
108 Move<VkDescriptorSetLayout> createDescriptorSetLayout (const DeviceInterface& vkdi, const VkDevice& device, const vector<VkDescriptorType>& dtypes)
109 {
110         DescriptorSetLayoutBuilder builder;
111
112         for (size_t bindingNdx = 0; bindingNdx < dtypes.size(); ++bindingNdx)
113                 builder.addSingleBinding(dtypes[bindingNdx], VK_SHADER_STAGE_COMPUTE_BIT);
114
115         return builder.build(vkdi, device);
116 }
117
118 /*--------------------------------------------------------------------*//*!
119  * \brief Create a pipeline layout with one descriptor set
120  *//*--------------------------------------------------------------------*/
121 Move<VkPipelineLayout> createPipelineLayout (const DeviceInterface& vkdi, const VkDevice& device, VkDescriptorSetLayout descriptorSetLayout, const vkt::SpirVAssembly::BufferSp& pushConstants)
122 {
123         VkPipelineLayoutCreateInfo              createInfo      =
124         {
125                 VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO,  // sType
126                 DE_NULL,                                                                                // pNext
127                 (VkPipelineLayoutCreateFlags)0,
128                 1u,                                                                                             // descriptorSetCount
129                 &descriptorSetLayout,                                                   // pSetLayouts
130                 0u,                                                                                             // pushConstantRangeCount
131                 DE_NULL,                                                                                // pPushConstantRanges
132         };
133
134         VkPushConstantRange                             range           =
135         {
136                 VK_SHADER_STAGE_COMPUTE_BIT,                                    // stageFlags
137                 0,                                                                                              // offset
138                 0,                                                                                              // size
139         };
140
141         if (pushConstants != DE_NULL)
142         {
143                 range.size                                                      = static_cast<deUint32>(pushConstants->getNumBytes());
144                 createInfo.pushConstantRangeCount       = 1;
145                 createInfo.pPushConstantRanges          = &range;
146         }
147
148         return createPipelineLayout(vkdi, device, &createInfo);
149 }
150
151 /*--------------------------------------------------------------------*//*!
152  * \brief Create a one-time descriptor pool for one descriptor set that
153  * support the given descriptor types.
154  *//*--------------------------------------------------------------------*/
155 inline Move<VkDescriptorPool> createDescriptorPool (const DeviceInterface& vkdi, const VkDevice& device, const vector<VkDescriptorType>& dtypes)
156 {
157         DescriptorPoolBuilder builder;
158
159         for (size_t typeNdx = 0; typeNdx < dtypes.size(); ++typeNdx)
160                 builder.addType(dtypes[typeNdx], 1);
161
162         return builder.build(vkdi, device, VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, /* maxSets = */ 1);
163 }
164
165 /*--------------------------------------------------------------------*//*!
166  * \brief Create a descriptor set
167  *
168  * The descriptor set's layout contains the given descriptor types,
169  * sequentially binded to binding points starting from 0.
170  *//*--------------------------------------------------------------------*/
171 Move<VkDescriptorSet> createDescriptorSet (const DeviceInterface& vkdi, const VkDevice& device, VkDescriptorPool pool, VkDescriptorSetLayout layout, const vector<VkDescriptorType>& dtypes, const vector<VkDescriptorBufferInfo>& descriptorInfos)
172 {
173         DE_ASSERT(dtypes.size() == descriptorInfos.size());
174
175         const VkDescriptorSetAllocateInfo       allocInfo       =
176         {
177                 VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
178                 DE_NULL,
179                 pool,
180                 1u,
181                 &layout
182         };
183
184         Move<VkDescriptorSet>                           descriptorSet   = allocateDescriptorSet(vkdi, device, &allocInfo);
185         DescriptorSetUpdateBuilder                      builder;
186
187         for (deUint32 descriptorNdx = 0; descriptorNdx < dtypes.size(); ++descriptorNdx)
188                 builder.writeSingle(*descriptorSet, DescriptorSetUpdateBuilder::Location::binding(descriptorNdx), dtypes[descriptorNdx], &descriptorInfos[descriptorNdx]);
189         builder.update(vkdi, device);
190
191         return descriptorSet;
192 }
193
194 /*--------------------------------------------------------------------*//*!
195  * \brief Create a compute pipeline based on the given shader
196  *//*--------------------------------------------------------------------*/
197 Move<VkPipeline> createComputePipeline (const DeviceInterface& vkdi, const VkDevice& device, VkPipelineLayout pipelineLayout, VkShaderModule shader, const char* entryPoint, const vector<deUint32>& specConstants)
198 {
199         const deUint32                                                  numSpecConstants                                = (deUint32)specConstants.size();
200         vector<VkSpecializationMapEntry>                entries;
201         VkSpecializationInfo                                    specInfo;
202
203         if (numSpecConstants != 0)
204         {
205                 entries.resize(numSpecConstants);
206
207                 for (deUint32 ndx = 0; ndx < numSpecConstants; ++ndx)
208                 {
209                         entries[ndx].constantID = ndx;
210                         entries[ndx].offset             = ndx * (deUint32)sizeof(deUint32);
211                         entries[ndx].size               = sizeof(deUint32);
212                 }
213
214                 specInfo.mapEntryCount          = numSpecConstants;
215                 specInfo.pMapEntries            = &entries[0];
216                 specInfo.dataSize                       = numSpecConstants * sizeof(deUint32);
217                 specInfo.pData                          = specConstants.data();
218         }
219
220         const VkPipelineShaderStageCreateInfo   pipelineShaderStageCreateInfo   =
221         {
222                 VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,    // sType
223                 DE_NULL,                                                                                                // pNext
224                 (VkPipelineShaderStageCreateFlags)0,                                    // flags
225                 VK_SHADER_STAGE_COMPUTE_BIT,                                                    // stage
226                 shader,                                                                                                 // module
227                 entryPoint,                                                                                             // pName
228                 (numSpecConstants == 0) ? DE_NULL : &specInfo,                  // pSpecializationInfo
229         };
230         const VkComputePipelineCreateInfo               pipelineCreateInfo                              =
231         {
232                 VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO,                 // sType
233                 DE_NULL,                                                                                                // pNext
234                 (VkPipelineCreateFlags)0,
235                 pipelineShaderStageCreateInfo,                                                  // cs
236                 pipelineLayout,                                                                                 // layout
237                 (VkPipeline)0,                                                                                  // basePipelineHandle
238                 0u,                                                                                                             // basePipelineIndex
239         };
240
241         return createComputePipeline(vkdi, device, (VkPipelineCache)0u, &pipelineCreateInfo);
242 }
243
244 /*--------------------------------------------------------------------*//*!
245  * \brief Create a command pool
246  *
247  * The created command pool is designated for use on the queue type
248  * represented by the given queueFamilyIndex.
249  *//*--------------------------------------------------------------------*/
250 Move<VkCommandPool> createCommandPool (const DeviceInterface& vkdi, VkDevice device, deUint32 queueFamilyIndex)
251 {
252         return createCommandPool(vkdi, device, 0u, queueFamilyIndex);
253 }
254
255 } // anonymous
256
257 namespace vkt
258 {
259 namespace SpirVAssembly
260 {
261
262 /*--------------------------------------------------------------------*//*!
263  * \brief Test instance for compute pipeline
264  *
265  * The compute shader is specified in the format of SPIR-V assembly, which
266  * is allowed to access MAX_NUM_INPUT_BUFFERS input storage buffers and
267  * MAX_NUM_OUTPUT_BUFFERS output storage buffers maximally. The shader
268  * source and input/output data are given in a ComputeShaderSpec object.
269  *
270  * This instance runs the given compute shader by feeding the data from input
271  * buffers and compares the data in the output buffers with the expected.
272  *//*--------------------------------------------------------------------*/
273 class SpvAsmComputeShaderInstance : public TestInstance
274 {
275 public:
276                                                                                 SpvAsmComputeShaderInstance     (Context& ctx, const ComputeShaderSpec& spec, const ComputeTestFeatures features);
277         tcu::TestStatus                                         iterate                                         (void);
278
279 private:
280         const ComputeShaderSpec&                        m_shaderSpec;
281         const ComputeTestFeatures                       m_features;
282 };
283
284 // ComputeShaderTestCase implementations
285
286 SpvAsmComputeShaderCase::SpvAsmComputeShaderCase (tcu::TestContext& testCtx, const char* name, const char* description, const ComputeShaderSpec& spec, const ComputeTestFeatures features)
287         : TestCase              (testCtx, name, description)
288         , m_shaderSpec  (spec)
289         , m_features    (features)
290 {
291 }
292
293 void SpvAsmComputeShaderCase::initPrograms (SourceCollections& programCollection) const
294 {
295         programCollection.spirvAsmSources.add("compute") << m_shaderSpec.assembly.c_str();
296 }
297
298 TestInstance* SpvAsmComputeShaderCase::createInstance (Context& ctx) const
299 {
300         return new SpvAsmComputeShaderInstance(ctx, m_shaderSpec, m_features);
301 }
302
303 // ComputeShaderTestInstance implementations
304
305 SpvAsmComputeShaderInstance::SpvAsmComputeShaderInstance (Context& ctx, const ComputeShaderSpec& spec, const ComputeTestFeatures features)
306         : TestInstance          (ctx)
307         , m_shaderSpec          (spec)
308         , m_features            (features)
309 {
310 }
311
312 tcu::TestStatus SpvAsmComputeShaderInstance::iterate (void)
313 {
314         const VkPhysicalDeviceFeatures&         features                        = m_context.getDeviceFeatures();
315         const vector<std::string>&                      extensions                      = m_context.getDeviceExtensions();
316
317         for (deUint32 extNdx = 0; extNdx < m_shaderSpec.extensions.size(); ++extNdx)
318         {
319                 const std::string&                              ext                                     = m_shaderSpec.extensions[extNdx];
320
321                 if (!de::contains(extensions.begin(), extensions.end(), ext))
322                 {
323                         TCU_THROW(NotSupportedError, (std::string("Device extension not supported: ") + ext).c_str());
324                 }
325         }
326
327         if ((m_features == COMPUTE_TEST_USES_INT16 || m_features == COMPUTE_TEST_USES_INT16_INT64) && !features.shaderInt16)
328         {
329                 TCU_THROW(NotSupportedError, "shaderInt16 feature is not supported");
330         }
331
332         if ((m_features == COMPUTE_TEST_USES_INT64 || m_features == COMPUTE_TEST_USES_INT16_INT64) && !features.shaderInt64)
333         {
334                 TCU_THROW(NotSupportedError, "shaderInt64 feature is not supported");
335         }
336
337         {
338                 const InstanceInterface&                        vki                                     = m_context.getInstanceInterface();
339                 const VkPhysicalDevice                          physicalDevice          = m_context.getPhysicalDevice();
340
341                 // 16bit storage features
342                 {
343                         if (!is16BitStorageFeaturesSupported(vki, physicalDevice, m_context.getInstanceExtensions(), m_shaderSpec.requestedVulkanFeatures.ext16BitStorage))
344                                 TCU_THROW(NotSupportedError, "Requested 16bit storage features not supported");
345                 }
346
347                 // VariablePointers features
348                 {
349                         if (!isVariablePointersFeaturesSupported(vki, physicalDevice, m_context.getInstanceExtensions(), m_shaderSpec.requestedVulkanFeatures.extVariablePointers))
350                                 TCU_THROW(NotSupportedError, "Request Variable Pointer feature not supported");
351                 }
352         }
353
354         // defer device and resource creation until after feature checks
355         const Unique<VkDevice>                          vkDevice                        (createDeviceWithExtensions(m_context, m_context.getUniversalQueueFamilyIndex(), m_context.getDeviceExtensions(), m_shaderSpec.extensions));
356         const VkDevice&                                         device                          = *vkDevice;
357         const DeviceDriver                                      vkDeviceInterface       (m_context.getInstanceInterface(), device);
358         const DeviceInterface&                          vkdi                            = vkDeviceInterface;
359         const de::UniquePtr<vk::Allocator>      vkAllocator                     (createAllocator(m_context.getInstanceInterface(), m_context.getPhysicalDevice(), vkDeviceInterface, device));
360         Allocator&                                                      allocator                       = *vkAllocator;
361         const VkQueue                                           queue                           (getDeviceQueue(vkDeviceInterface, device, m_context.getUniversalQueueFamilyIndex(), 0));
362
363         vector<AllocationSp>                            inputAllocs;
364         vector<AllocationSp>                            outputAllocs;
365         vector<BufferHandleSp>                          inputBuffers;
366         vector<BufferHandleSp>                          outputBuffers;
367         vector<VkDescriptorBufferInfo>          descriptorInfos;
368         vector<VkDescriptorType>                        descriptorTypes;
369
370         DE_ASSERT(!m_shaderSpec.outputs.empty());
371
372         // Create buffer object, allocate storage, and create view for all input/output buffers.
373
374         for (deUint32 inputNdx = 0; inputNdx < m_shaderSpec.inputs.size(); ++inputNdx)
375         {
376                 if (m_shaderSpec.inputTypes.count(inputNdx) != 0)
377                         descriptorTypes.push_back(m_shaderSpec.inputTypes.at(inputNdx));
378                 else
379                         descriptorTypes.push_back(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER);
380
381                 AllocationMp            alloc;
382                 const BufferSp&         input           = m_shaderSpec.inputs[inputNdx];
383                 const size_t            numBytes        = input->getNumBytes();
384                 BufferHandleUp*         buffer          = new BufferHandleUp(createBufferAndBindMemory(vkdi, device, descriptorTypes.back(), allocator, numBytes, &alloc));
385
386                 setMemory(vkdi, device, &*alloc, numBytes, input->data());
387                 descriptorInfos.push_back(vk::makeDescriptorBufferInfo(**buffer, 0u, numBytes));
388                 inputBuffers.push_back(BufferHandleSp(buffer));
389                 inputAllocs.push_back(de::SharedPtr<Allocation>(alloc.release()));
390         }
391
392         for (deUint32 outputNdx = 0; outputNdx < m_shaderSpec.outputs.size(); ++outputNdx)
393         {
394                 descriptorTypes.push_back(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER);
395
396                 AllocationMp            alloc;
397                 const BufferSp&         output          = m_shaderSpec.outputs[outputNdx];
398                 const size_t            numBytes        = output->getNumBytes();
399                 BufferHandleUp*         buffer          = new BufferHandleUp(createBufferAndBindMemory(vkdi, device, descriptorTypes.back(), allocator, numBytes, &alloc));
400
401                 fillMemoryWithValue(vkdi, device, &*alloc, numBytes, 0xff);
402                 descriptorInfos.push_back(vk::makeDescriptorBufferInfo(**buffer, 0u, numBytes));
403                 outputBuffers.push_back(BufferHandleSp(buffer));
404                 outputAllocs.push_back(de::SharedPtr<Allocation>(alloc.release()));
405         }
406
407         // Create layouts and descriptor set.
408
409         Unique<VkDescriptorSetLayout>           descriptorSetLayout     (createDescriptorSetLayout(vkdi, device, descriptorTypes));
410         Unique<VkPipelineLayout>                        pipelineLayout          (createPipelineLayout(vkdi, device, *descriptorSetLayout, m_shaderSpec.pushConstants));
411         Unique<VkDescriptorPool>                        descriptorPool          (createDescriptorPool(vkdi, device, descriptorTypes));
412         Unique<VkDescriptorSet>                         descriptorSet           (createDescriptorSet(vkdi, device, *descriptorPool, *descriptorSetLayout, descriptorTypes, descriptorInfos));
413
414         // Create compute shader and pipeline.
415
416         const ProgramBinary&                            binary                          = m_context.getBinaryCollection().get("compute");
417         Unique<VkShaderModule>                          module                          (createShaderModule(vkdi, device, binary, (VkShaderModuleCreateFlags)0u));
418
419         Unique<VkPipeline>                                      computePipeline         (createComputePipeline(vkdi, device, *pipelineLayout, *module, m_shaderSpec.entryPoint.c_str(), m_shaderSpec.specConstants));
420
421         // Create command buffer and record commands
422
423         const Unique<VkCommandPool>                     cmdPool                         (createCommandPool(vkdi, device, m_context.getUniversalQueueFamilyIndex()));
424         Unique<VkCommandBuffer>                         cmdBuffer                       (allocateCommandBuffer(vkdi, device, *cmdPool, VK_COMMAND_BUFFER_LEVEL_PRIMARY));
425
426         const VkCommandBufferBeginInfo          cmdBufferBeginInfo      =
427         {
428                 VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO,    // sType
429                 DE_NULL,                                                                                // pNext
430                 VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT,
431                 (const VkCommandBufferInheritanceInfo*)DE_NULL,
432         };
433
434         const tcu::IVec3&                               numWorkGroups           = m_shaderSpec.numWorkGroups;
435
436         VK_CHECK(vkdi.beginCommandBuffer(*cmdBuffer, &cmdBufferBeginInfo));
437         vkdi.cmdBindPipeline(*cmdBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, *computePipeline);
438         vkdi.cmdBindDescriptorSets(*cmdBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, *pipelineLayout, 0, 1, &descriptorSet.get(), 0, DE_NULL);
439         if (m_shaderSpec.pushConstants != DE_NULL)
440         {
441                 const deUint32  size    = static_cast<deUint32>(m_shaderSpec.pushConstants->getNumBytes());
442                 const void*             data    = m_shaderSpec.pushConstants->data();
443
444                 vkdi.cmdPushConstants(*cmdBuffer, *pipelineLayout, VK_SHADER_STAGE_COMPUTE_BIT, /* offset = */ 0, /* size = */ size, data);
445         }
446         vkdi.cmdDispatch(*cmdBuffer, numWorkGroups.x(), numWorkGroups.y(), numWorkGroups.z());
447         VK_CHECK(vkdi.endCommandBuffer(*cmdBuffer));
448
449         // Create fence and run.
450
451         const Unique<VkFence>                   cmdCompleteFence        (createFence(vkdi, device));
452         const deUint64                                  infiniteTimeout         = ~(deUint64)0u;
453         const VkSubmitInfo                              submitInfo                      =
454         {
455                 VK_STRUCTURE_TYPE_SUBMIT_INFO,
456                 DE_NULL,
457                 0u,
458                 (const VkSemaphore*)DE_NULL,
459                 (const VkPipelineStageFlags*)DE_NULL,
460                 1u,
461                 &cmdBuffer.get(),
462                 0u,
463                 (const VkSemaphore*)DE_NULL,
464         };
465
466         VK_CHECK(vkdi.queueSubmit(queue, 1, &submitInfo, *cmdCompleteFence));
467         VK_CHECK(vkdi.waitForFences(device, 1, &cmdCompleteFence.get(), 0u, infiniteTimeout)); // \note: timeout is failure
468
469         // Check output.
470         if (m_shaderSpec.verifyIO)
471         {
472                 if (!(*m_shaderSpec.verifyIO)(m_shaderSpec.inputs, outputAllocs, m_shaderSpec.outputs, m_context.getTestContext().getLog()))
473                         return tcu::TestStatus(m_shaderSpec.failResult, m_shaderSpec.failMessage);
474         }
475         else
476         {
477                 for (size_t outputNdx = 0; outputNdx < m_shaderSpec.outputs.size(); ++outputNdx)
478                 {
479                         const BufferSp& expectedOutput = m_shaderSpec.outputs[outputNdx];
480                         if (deMemCmp(expectedOutput->data(), outputAllocs[outputNdx]->getHostPtr(), expectedOutput->getNumBytes()))
481                                 return tcu::TestStatus(m_shaderSpec.failResult, m_shaderSpec.failMessage);
482                 }
483         }
484
485         return tcu::TestStatus::pass("Output match with expected");
486 }
487
488 } // SpirVAssembly
489 } // vkt