fix DEQP AndroidTest.xml generation script am: a60033dd24
[platform/upstream/VK-GL-CTS.git] / external / vulkancts / modules / vulkan / shaderexecutor / vktAtomicOperationTests.cpp
1 /*------------------------------------------------------------------------
2  * Vulkan Conformance Tests
3  * ------------------------
4  *
5  * Copyright (c) 2015 The Khronos Group Inc.
6  * Copyright (c) 2017 Google Inc.
7  *
8  * Licensed under the Apache License, Version 2.0 (the "License");
9  * you may not use this file except in compliance with the License.
10  * You may obtain a copy of the License at
11  *
12  *      http://www.apache.org/licenses/LICENSE-2.0
13  *
14  * Unless required by applicable law or agreed to in writing, software
15  * distributed under the License is distributed on an "AS IS" BASIS,
16  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17  * See the License for the specific language governing permissions and
18  * limitations under the License.
19  *
20  *//*!
21  * \file
22  * \brief Atomic operations (OpAtomic*) tests.
23  *//*--------------------------------------------------------------------*/
24
25 #include "vktAtomicOperationTests.hpp"
26 #include "vktShaderExecutor.hpp"
27
28 #include "vkRefUtil.hpp"
29 #include "vkMemUtil.hpp"
30 #include "vkQueryUtil.hpp"
31 #include "vktTestGroupUtil.hpp"
32
33 #include "tcuTestLog.hpp"
34 #include "tcuStringTemplate.hpp"
35 #include "tcuResultCollector.hpp"
36
37 #include "deStringUtil.hpp"
38 #include "deSharedPtr.hpp"
39 #include "deRandom.hpp"
40 #include "deArrayUtil.hpp"
41
42 #include <string>
43
44 namespace vkt
45 {
46 namespace shaderexecutor
47 {
48
49 namespace
50 {
51
52 using de::UniquePtr;
53 using de::MovePtr;
54 using std::vector;
55
56 using namespace vk;
57
58 // Buffer helper
59 class Buffer
60 {
61 public:
62                                                 Buffer                          (Context& context, VkBufferUsageFlags usage, size_t size);
63
64         VkBuffer                        getBuffer                       (void) const { return *m_buffer;                                        }
65         void*                           getHostPtr                      (void) const { return m_allocation->getHostPtr();       }
66         void                            flush                           (void);
67         void                            invalidate                      (void);
68
69 private:
70         const DeviceInterface&          m_vkd;
71         const VkDevice                          m_device;
72         const Unique<VkBuffer>          m_buffer;
73         const UniquePtr<Allocation>     m_allocation;
74 };
75
76 typedef de::SharedPtr<Buffer> BufferSp;
77
78 Move<VkBuffer> createBuffer (const DeviceInterface& vkd, VkDevice device, VkDeviceSize size, VkBufferUsageFlags usageFlags)
79 {
80         const VkBufferCreateInfo createInfo     =
81         {
82                 VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
83                 DE_NULL,
84                 (VkBufferCreateFlags)0,
85                 size,
86                 usageFlags,
87                 VK_SHARING_MODE_EXCLUSIVE,
88                 0u,
89                 DE_NULL
90         };
91         return createBuffer(vkd, device, &createInfo);
92 }
93
94 MovePtr<Allocation> allocateAndBindMemory (const DeviceInterface& vkd, VkDevice device, Allocator& allocator, VkBuffer buffer)
95 {
96         MovePtr<Allocation>     alloc(allocator.allocate(getBufferMemoryRequirements(vkd, device, buffer), MemoryRequirement::HostVisible));
97
98         VK_CHECK(vkd.bindBufferMemory(device, buffer, alloc->getMemory(), alloc->getOffset()));
99
100         return alloc;
101 }
102
103 Buffer::Buffer (Context& context, VkBufferUsageFlags usage, size_t size)
104         : m_vkd                 (context.getDeviceInterface())
105         , m_device              (context.getDevice())
106         , m_buffer              (createBuffer                   (context.getDeviceInterface(),
107                                                                                          context.getDevice(),
108                                                                                          (VkDeviceSize)size,
109                                                                                          usage))
110         , m_allocation  (allocateAndBindMemory  (context.getDeviceInterface(),
111                                                                                          context.getDevice(),
112                                                                                          context.getDefaultAllocator(),
113                                                                                          *m_buffer))
114 {
115 }
116
117 void Buffer::flush (void)
118 {
119         flushMappedMemoryRange(m_vkd, m_device, m_allocation->getMemory(), m_allocation->getOffset(), VK_WHOLE_SIZE);
120 }
121
122 void Buffer::invalidate (void)
123 {
124         invalidateMappedMemoryRange(m_vkd, m_device, m_allocation->getMemory(), m_allocation->getOffset(), VK_WHOLE_SIZE);
125 }
126
127 // Tests
128
129 enum AtomicOperation
130 {
131         ATOMIC_OP_EXCHANGE = 0,
132         ATOMIC_OP_COMP_SWAP,
133         ATOMIC_OP_ADD,
134         ATOMIC_OP_MIN,
135         ATOMIC_OP_MAX,
136         ATOMIC_OP_AND,
137         ATOMIC_OP_OR,
138         ATOMIC_OP_XOR,
139
140         ATOMIC_OP_LAST
141 };
142
143 std::string atomicOp2Str (AtomicOperation op)
144 {
145         static const char* const s_names[] =
146         {
147                 "atomicExchange",
148                 "atomicCompSwap",
149                 "atomicAdd",
150                 "atomicMin",
151                 "atomicMax",
152                 "atomicAnd",
153                 "atomicOr",
154                 "atomicXor"
155         };
156         return de::getSizedArrayElement<ATOMIC_OP_LAST>(s_names, op);
157 }
158
159 enum
160 {
161         NUM_ELEMENTS = 32
162 };
163
164 class AtomicOperationCaseInstance : public TestInstance
165 {
166 public:
167                                                                         AtomicOperationCaseInstance             (Context&                       context,
168                                                                                                                                          const ShaderSpec&      shaderSpec,
169                                                                                                                                          glu::ShaderType        shaderType,
170                                                                                                                                          bool                           sign,
171                                                                                                                                          AtomicOperation        atomicOp);
172         virtual                                                 ~AtomicOperationCaseInstance    (void);
173
174         virtual tcu::TestStatus                 iterate                                                 (void);
175
176 private:
177         const ShaderSpec&                               m_shaderSpec;
178         glu::ShaderType                                 m_shaderType;
179         bool                                                    m_sign;
180         AtomicOperation                                 m_atomicOp;
181
182         struct BufferInterface
183         {
184                 // Use half the number of elements for inout to cause overlap between atomic operations.
185                 // Each inout element at index i will have two atomic operations using input from
186                 // indices i and i + NUM_ELEMENTS / 2.
187                 deInt32         index;
188                 deUint32        inout[NUM_ELEMENTS / 2];
189                 deUint32        input[NUM_ELEMENTS];
190                 deUint32        compare[NUM_ELEMENTS];
191                 deUint32        output[NUM_ELEMENTS];
192         };
193
194         template<typename T>
195         struct Expected
196         {
197                 T m_inout;
198                 T m_output[2];
199
200                 Expected (T inout, T output0, T output1)
201                 : m_inout(inout)
202                 {
203                         m_output[0] = output0;
204                         m_output[1] = output1;
205                 }
206
207                 bool compare (deUint32 inout, deUint32 output0, deUint32 output1)
208                 {
209                         return (deMemCmp((const void*)&m_inout, (const void*)&inout, sizeof(inout)) == 0
210                                         && deMemCmp((const void*)&m_output[0], (const void*)&output0, sizeof(output0)) == 0
211                                         && deMemCmp((const void*)&m_output[1], (const void*)&output1, sizeof(output1)) == 0);
212                 }
213         };
214
215         template<typename T> void checkOperation        (const BufferInterface& original,
216                                                                                                  const BufferInterface& result,
217                                                                                                  tcu::ResultCollector&  resultCollector);
218
219 };
220
221 AtomicOperationCaseInstance::AtomicOperationCaseInstance (Context&                      context,
222                                                                                                                   const ShaderSpec&     shaderSpec,
223                                                                                                                   glu::ShaderType       shaderType,
224                                                                                                                   bool                          sign,
225                                                                                                                   AtomicOperation       atomicOp)
226         : TestInstance  (context)
227         , m_shaderSpec  (shaderSpec)
228         , m_shaderType  (shaderType)
229         , m_sign                (sign)
230         , m_atomicOp    (atomicOp)
231 {
232 }
233
234 AtomicOperationCaseInstance::~AtomicOperationCaseInstance (void)
235 {
236 }
237
238 // Use template to handle both signed and unsigned cases. SPIR-V should
239 // have separate operations for both.
240 template<typename T>
241 void AtomicOperationCaseInstance::checkOperation (const BufferInterface&        original,
242                                                                                                   const BufferInterface&        result,
243                                                                                                   tcu::ResultCollector&         resultCollector)
244 {
245         // originalInout = original inout
246         // input0 = input at index i
247         // iinput1 = input at index i + NUM_ELEMENTS / 2
248         //
249         // atomic operation will return the memory contents before
250         // the operation and this is stored as output. Two operations
251         // are executed for each InOut value (using input0 and input1).
252         //
253         // Since there is an overlap of two operations per each
254         // InOut element, the outcome of the resulting InOut and
255         // the outputs of the operations have two result candidates
256         // depending on the execution order. Verification passes
257         // if the results match one of these options.
258
259         for (int elementNdx = 0; elementNdx < NUM_ELEMENTS / 2; elementNdx++)
260         {
261                 // Needed when reinterpeting the data as signed values.
262                 const T originalInout   = *reinterpret_cast<const T*>(&original.inout[elementNdx]);
263                 const T input0                  = *reinterpret_cast<const T*>(&original.input[elementNdx]);
264                 const T input1                  = *reinterpret_cast<const T*>(&original.input[elementNdx + NUM_ELEMENTS / 2]);
265
266                 // Expected results are collected to this vector.
267                 vector<Expected<T> > exp;
268
269                 switch (m_atomicOp)
270                 {
271                         case ATOMIC_OP_ADD:
272                         {
273                                 exp.push_back(Expected<T>(originalInout + input0 + input1, originalInout, originalInout + input0));
274                                 exp.push_back(Expected<T>(originalInout + input0 + input1, originalInout + input1, originalInout));
275                         }
276                         break;
277
278                         case ATOMIC_OP_AND:
279                         {
280                                 exp.push_back(Expected<T>(originalInout & input0 & input1, originalInout, originalInout & input0));
281                                 exp.push_back(Expected<T>(originalInout & input0 & input1, originalInout & input1, originalInout));
282                         }
283                         break;
284
285                         case ATOMIC_OP_OR:
286                         {
287                                 exp.push_back(Expected<T>(originalInout | input0 | input1, originalInout, originalInout | input0));
288                                 exp.push_back(Expected<T>(originalInout | input0 | input1, originalInout | input1, originalInout));
289                         }
290                         break;
291
292                         case ATOMIC_OP_XOR:
293                         {
294                                 exp.push_back(Expected<T>(originalInout ^ input0 ^ input1, originalInout, originalInout ^ input0));
295                                 exp.push_back(Expected<T>(originalInout ^ input0 ^ input1, originalInout ^ input1, originalInout));
296                         }
297                         break;
298
299                         case ATOMIC_OP_MIN:
300                         {
301                                 exp.push_back(Expected<T>(de::min(de::min(originalInout, input0), input1), originalInout, de::min(originalInout, input0)));
302                                 exp.push_back(Expected<T>(de::min(de::min(originalInout, input0), input1), de::min(originalInout, input1), originalInout));
303                         }
304                         break;
305
306                         case ATOMIC_OP_MAX:
307                         {
308                                 exp.push_back(Expected<T>(de::max(de::max(originalInout, input0), input1), originalInout, de::max(originalInout, input0)));
309                                 exp.push_back(Expected<T>(de::max(de::max(originalInout, input0), input1), de::max(originalInout, input1), originalInout));
310                         }
311                         break;
312
313                         case ATOMIC_OP_EXCHANGE:
314                         {
315                                 exp.push_back(Expected<T>(input1, originalInout, input0));
316                                 exp.push_back(Expected<T>(input0, input1, originalInout));
317                         }
318                         break;
319
320                         case ATOMIC_OP_COMP_SWAP:
321                         {
322                                 if (elementNdx % 2 == 0)
323                                 {
324                                         exp.push_back(Expected<T>(input0, originalInout, input0));
325                                         exp.push_back(Expected<T>(input0, originalInout, originalInout));
326                                 }
327                                 else
328                                 {
329                                         exp.push_back(Expected<T>(input1, input1, originalInout));
330                                         exp.push_back(Expected<T>(input1, originalInout, originalInout));
331                                 }
332                         }
333                         break;
334
335
336                         default:
337                                 DE_FATAL("Unexpected atomic operation.");
338                                 break;
339                 };
340
341                 const deUint32 resIo            = result.inout[elementNdx];
342                 const deUint32 resOutput0       = result.output[elementNdx];
343                 const deUint32 resOutput1       = result.output[elementNdx + NUM_ELEMENTS / 2];
344
345                 if (!exp[0].compare(resIo, resOutput0, resOutput1) && !exp[1].compare(resIo, resOutput0, resOutput1))
346                 {
347                         std::ostringstream errorMessage;
348                         errorMessage    << "ERROR: Result value check failed at index " << elementNdx
349                                                         << ". Expected one of the two outcomes: InOut = " << tcu::toHex(exp[0].m_inout)
350                                                         << ", Output0 = " << tcu::toHex(exp[0].m_output[0]) << ", Output1 = "
351                                                         << tcu::toHex(exp[0].m_output[1]) << ", or InOut = " << tcu::toHex(exp[1].m_inout)
352                                                         << ", Output0 = " << tcu::toHex(exp[1].m_output[0]) << ", Output1 = "
353                                                         << tcu::toHex(exp[1].m_output[1]) << ". Got: InOut = " << tcu::toHex(resIo)
354                                                         << ", Output0 = " << tcu::toHex(resOutput0) << ", Output1 = "
355                                                         << tcu::toHex(resOutput1) << ". Using Input0 = " << tcu::toHex(original.input[elementNdx])
356                                                         << " and Input1 = " << tcu::toHex(original.input[elementNdx + NUM_ELEMENTS / 2]) << ".";
357
358                         resultCollector.fail(errorMessage.str());
359                 }
360         }
361 }
362
363 tcu::TestStatus AtomicOperationCaseInstance::iterate (void)
364 {
365         //Check stores and atomic operation support.
366         switch (m_shaderType)
367         {
368                 case glu::SHADERTYPE_VERTEX:
369                 case glu::SHADERTYPE_TESSELLATION_CONTROL:
370                 case glu::SHADERTYPE_TESSELLATION_EVALUATION:
371                 case glu::SHADERTYPE_GEOMETRY:
372                         if(!m_context.getDeviceFeatures().vertexPipelineStoresAndAtomics)
373                                 TCU_THROW(NotSupportedError, "Stores and atomic operations are not supported in Vertex, Tessellation, and Geometry shader.");
374                         break;
375                 case glu::SHADERTYPE_FRAGMENT:
376                         if(!m_context.getDeviceFeatures().fragmentStoresAndAtomics)
377                                 TCU_THROW(NotSupportedError, "Stores and atomic operations are not supported in fragment shader.");
378                         break;
379                 case glu::SHADERTYPE_COMPUTE:
380                         break;
381                 default:
382                         DE_FATAL("Unsupported shader type");
383         }
384
385         tcu::TestLog&                           log                     = m_context.getTestContext().getLog();
386         const DeviceInterface&          vkd                     = m_context.getDeviceInterface();
387         const VkDevice                          device          = m_context.getDevice();
388         de::Random                                      rnd                     (0x62a15e34);
389         Buffer                                          buffer          (m_context, VK_BUFFER_USAGE_STORAGE_BUFFER_BIT, sizeof(BufferInterface));
390         BufferInterface*                        ptr                     = (BufferInterface*)buffer.getHostPtr();
391
392         for (int i = 0; i < NUM_ELEMENTS / 2; i++)
393         {
394                 ptr->inout[i] = rnd.getUint32();
395                 // The first half of compare elements match with every even index.
396                 // The second half matches with odd indices. This causes the
397                 // overlapping operations to only select one.
398                 ptr->compare[i] = ptr->inout[i] + (i % 2);
399                 ptr->compare[i + NUM_ELEMENTS / 2] = ptr->inout[i] + 1 - (i % 2);
400         }
401         for (int i = 0; i < NUM_ELEMENTS; i++)
402         {
403                 ptr->input[i] = rnd.getUint32();
404                 ptr->output[i] = 0xcdcdcdcd;
405         }
406         ptr->index = 0;
407
408         // Take a copy to be used when calculating expected values.
409         BufferInterface original = *ptr;
410
411         buffer.flush();
412
413         Move<VkDescriptorSetLayout>     extraResourcesLayout;
414         Move<VkDescriptorPool>          extraResourcesSetPool;
415         Move<VkDescriptorSet>           extraResourcesSet;
416
417         const VkDescriptorSetLayoutBinding bindings[] =
418         {
419                 { 0u, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 1, VK_SHADER_STAGE_ALL, DE_NULL }
420         };
421
422         const VkDescriptorSetLayoutCreateInfo   layoutInfo      =
423         {
424                 VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO,
425                 DE_NULL,
426                 (VkDescriptorSetLayoutCreateFlags)0u,
427                 DE_LENGTH_OF_ARRAY(bindings),
428                 bindings
429         };
430
431         extraResourcesLayout = createDescriptorSetLayout(vkd, device, &layoutInfo);
432
433         const VkDescriptorPoolSize poolSizes[] =
434         {
435                 { VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 1u }
436         };
437         const VkDescriptorPoolCreateInfo poolInfo =
438         {
439                 VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
440                 DE_NULL,
441                 (VkDescriptorPoolCreateFlags)VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT,
442                 1u,             // maxSets
443                 DE_LENGTH_OF_ARRAY(poolSizes),
444                 poolSizes
445         };
446
447         extraResourcesSetPool = createDescriptorPool(vkd, device, &poolInfo);
448
449         const VkDescriptorSetAllocateInfo allocInfo =
450         {
451                 VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
452                 DE_NULL,
453                 *extraResourcesSetPool,
454                 1u,
455                 &extraResourcesLayout.get()
456         };
457
458         extraResourcesSet = allocateDescriptorSet(vkd, device, &allocInfo);
459
460         VkDescriptorBufferInfo bufferInfo;
461         bufferInfo.buffer       = buffer.getBuffer();
462         bufferInfo.offset       = 0u;
463         bufferInfo.range        = VK_WHOLE_SIZE;
464
465         const VkWriteDescriptorSet descriptorWrite =
466         {
467                 VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
468                 DE_NULL,
469                 *extraResourcesSet,
470                 0u,             // dstBinding
471                 0u,             // dstArrayElement
472                 1u,
473                 VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
474                 (const VkDescriptorImageInfo*)DE_NULL,
475                 &bufferInfo,
476                 (const VkBufferView*)DE_NULL
477         };
478
479
480         vkd.updateDescriptorSets(device, 1u, &descriptorWrite, 0u, DE_NULL);
481
482         // Storage for output varying data.
483         std::vector<deUint32>   outputs         (NUM_ELEMENTS);
484         std::vector<void*>              outputPtr       (NUM_ELEMENTS);
485
486         for (size_t i = 0; i < NUM_ELEMENTS; i++)
487         {
488                 outputs[i] = 0xcdcdcdcd;
489                 outputPtr[i] = &outputs[i];
490         }
491
492         UniquePtr<ShaderExecutor> executor(createExecutor(m_context, m_shaderType, m_shaderSpec, *extraResourcesLayout));
493         executor->execute(NUM_ELEMENTS, DE_NULL, &outputPtr[0], *extraResourcesSet);
494         buffer.invalidate();
495
496         tcu::ResultCollector resultCollector(log);
497
498         // Check the results of the atomic operation
499         if (m_sign)
500                 checkOperation<deInt32>(original, *ptr, resultCollector);
501         else
502                 checkOperation<deUint32>(original, *ptr, resultCollector);
503
504         return tcu::TestStatus(resultCollector.getResult(), resultCollector.getMessage());
505 }
506
507 class AtomicOperationCase : public TestCase
508 {
509 public:
510                                                         AtomicOperationCase             (tcu::TestContext&              testCtx,
511                                                                                                          const char*                    name,
512                                                                                                          const char*                    description,
513                                                                                                          glu::ShaderType                type,
514                                                                                                          bool                                   sign,
515                                                                                                          AtomicOperation                atomicOp);
516         virtual                                 ~AtomicOperationCase    (void);
517
518         virtual TestInstance*   createInstance                  (Context& ctx) const;
519         virtual void                    initPrograms                    (vk::SourceCollections& programCollection) const
520         {
521                 generateSources(m_shaderType, m_shaderSpec, programCollection);
522         }
523
524 private:
525
526         void                                    createShaderSpec();
527         ShaderSpec                              m_shaderSpec;
528         const glu::ShaderType   m_shaderType;
529         const bool                              m_sign;
530         const AtomicOperation   m_atomicOp;
531 };
532
533 AtomicOperationCase::AtomicOperationCase (tcu::TestContext&     testCtx,
534                                                                                   const char*           name,
535                                                                                   const char*           description,
536                                                                                   glu::ShaderType       shaderType,
537                                                                                   bool                          sign,
538                                                                                   AtomicOperation       atomicOp)
539         : TestCase                      (testCtx, name, description)
540         , m_shaderType          (shaderType)
541         , m_sign                        (sign)
542         , m_atomicOp            (atomicOp)
543 {
544         createShaderSpec();
545         init();
546 }
547
548 AtomicOperationCase::~AtomicOperationCase (void)
549 {
550 }
551
552 TestInstance* AtomicOperationCase::createInstance (Context& ctx) const
553 {
554         return new AtomicOperationCaseInstance(ctx, m_shaderSpec, m_shaderType, m_sign, m_atomicOp);
555 }
556
557 void AtomicOperationCase::createShaderSpec (void)
558 {
559         const tcu::StringTemplate shaderTemplateGlobal(
560                 "layout (set = ${SETIDX}, binding = 0, std430) buffer AtomicBuffer\n"
561                 "{\n"
562                 "    highp int index;\n"
563                 "    highp ${DATATYPE} inoutValues[${N}/2];\n"
564                 "    highp ${DATATYPE} inputValues[${N}];\n"
565                 "    highp ${DATATYPE} compareValues[${N}];\n"
566                 "    highp ${DATATYPE} outputValues[${N}];\n"
567                 "} buf;\n");
568
569         std::map<std::string, std::string> specializations;
570         specializations["DATATYPE"] = m_sign ? "int" : "uint";
571         specializations["ATOMICOP"] = atomicOp2Str(m_atomicOp);
572         specializations["SETIDX"] = de::toString((int)EXTRA_RESOURCES_DESCRIPTOR_SET_INDEX);
573         specializations["N"] = de::toString((int)NUM_ELEMENTS);
574         specializations["COMPARE_ARG"] = m_atomicOp == ATOMIC_OP_COMP_SWAP ? "buf.compareValues[idx], " : "";
575
576         const tcu::StringTemplate shaderTemplateSrc(
577                 "int idx = atomicAdd(buf.index, 1);\n"
578                 "buf.outputValues[idx] = ${ATOMICOP}(buf.inoutValues[idx % (${N}/2)], ${COMPARE_ARG}buf.inputValues[idx]);\n");
579
580         m_shaderSpec.outputs.push_back(Symbol("outData", glu::VarType(glu::TYPE_UINT, glu::PRECISION_HIGHP)));
581         m_shaderSpec.globalDeclarations = shaderTemplateGlobal.specialize(specializations);
582         m_shaderSpec.source = shaderTemplateSrc.specialize(specializations);
583 }
584
585 void addAtomicOperationTests (tcu::TestCaseGroup* atomicOperationTestsGroup)
586 {
587         tcu::TestContext& testCtx = atomicOperationTestsGroup->getTestContext();
588
589         static const struct
590         {
591                 glu::ShaderType type;
592                 const char*             name;
593         } shaderTypes[] =
594         {
595                 { glu::SHADERTYPE_VERTEX,                                       "vertex"        },
596                 { glu::SHADERTYPE_FRAGMENT,                                     "fragment"      },
597                 { glu::SHADERTYPE_GEOMETRY,                                     "geometry"      },
598                 { glu::SHADERTYPE_TESSELLATION_CONTROL,         "tess_ctrl"     },
599                 { glu::SHADERTYPE_TESSELLATION_EVALUATION,      "tess_eval"     },
600                 { glu::SHADERTYPE_COMPUTE,                                      "compute"       }
601         };
602
603         static const struct
604         {
605                 bool                    value;
606                 const char*             name;
607                 const char*             description;
608         } dataSign[] =
609         {
610                 { true,         "signed",       "Tests using signed data (int)"         },
611                 { false,        "unsigned",     "Tests using unsigned data (uint)"      }
612         };
613
614         static const struct
615         {
616                 AtomicOperation         value;
617                 const char*                     name;
618         } atomicOp[] =
619         {
620                 { ATOMIC_OP_EXCHANGE,   "exchange"      },
621                 { ATOMIC_OP_COMP_SWAP,  "comp_swap"     },
622                 { ATOMIC_OP_ADD,                "add"           },
623                 { ATOMIC_OP_MIN,                "min"           },
624                 { ATOMIC_OP_MAX,                "max"           },
625                 { ATOMIC_OP_AND,                "and"           },
626                 { ATOMIC_OP_OR,                 "or"            },
627                 { ATOMIC_OP_XOR,                "xor"           }
628         };
629
630         for (int opNdx = 0; opNdx < DE_LENGTH_OF_ARRAY(atomicOp); opNdx++)
631         {
632                 for (int signNdx = 0; signNdx < DE_LENGTH_OF_ARRAY(dataSign); signNdx++)
633                 {
634                         for (int shaderTypeNdx = 0; shaderTypeNdx < DE_LENGTH_OF_ARRAY(shaderTypes); shaderTypeNdx++)
635                         {
636                                 const std::string description = std::string("Tests atomic operation ") + atomicOp2Str(atomicOp[opNdx].value) + std::string(".");
637                                 std::string name = std::string(atomicOp[opNdx].name) + "_" + std::string(dataSign[signNdx].name) + "_" + std::string(shaderTypes[shaderTypeNdx].name);
638                                 atomicOperationTestsGroup->addChild(new AtomicOperationCase(testCtx, name.c_str(), description.c_str(), shaderTypes[shaderTypeNdx].type, dataSign[signNdx].value, atomicOp[opNdx].value));
639                         }
640                 }
641         }
642 }
643
644 } // anonymous
645
646 tcu::TestCaseGroup* createAtomicOperationTests (tcu::TestContext& testCtx)
647 {
648         return createTestGroup(testCtx, "atomic_operations", "Atomic Operation Tests", addAtomicOperationTests);
649 }
650
651 } // shaderexecutor
652 } // vkt