Fix missing dependency on sparse binds
[platform/upstream/VK-GL-CTS.git] / external / vulkancts / modules / vulkan / sparse_resources / vktSparseResourcesBufferSparseResidency.cpp
1 /*------------------------------------------------------------------------
2  * Vulkan Conformance Tests
3  * ------------------------
4  *
5  * Copyright (c) 2016 The Khronos Group Inc.
6  *
7  * Licensed under the Apache License, Version 2.0 (the "License");
8  * you may not use this file except in compliance with the License.
9  * You may obtain a copy of the License at
10  *
11  *      http://www.apache.org/licenses/LICENSE-2.0
12  *
13  * Unless required by applicable law or agreed to in writing, software
14  * distributed under the License is distributed on an "AS IS" BASIS,
15  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16  * See the License for the specific language governing permissions and
17  * limitations under the License.
18  *
19  *//*!
20  * \file  vktSparseResourcesBufferSparseResidency.cpp
21  * \brief Sparse partially resident buffers tests
22  *//*--------------------------------------------------------------------*/
23
24 #include "vktSparseResourcesBufferSparseResidency.hpp"
25 #include "vktSparseResourcesTestsUtil.hpp"
26 #include "vktSparseResourcesBase.hpp"
27 #include "vktTestCaseUtil.hpp"
28
29 #include "vkDefs.hpp"
30 #include "vkRef.hpp"
31 #include "vkRefUtil.hpp"
32 #include "vkPlatform.hpp"
33 #include "vkPrograms.hpp"
34 #include "vkRefUtil.hpp"
35 #include "vkMemUtil.hpp"
36 #include "vkBarrierUtil.hpp"
37 #include "vkQueryUtil.hpp"
38 #include "vkBuilderUtil.hpp"
39 #include "vkTypeUtil.hpp"
40 #include "vkCmdUtil.hpp"
41 #include "vkObjUtil.hpp"
42
43 #include "deStringUtil.hpp"
44 #include "deUniquePtr.hpp"
45
46 #include <string>
47 #include <vector>
48
49 using namespace vk;
50
51 namespace vkt
52 {
53 namespace sparse
54 {
55 namespace
56 {
57
58 enum ShaderParameters
59 {
60         SIZE_OF_UINT_IN_SHADER = 4u,
61 };
62
63 class BufferSparseResidencyCase : public TestCase
64 {
65 public:
66                                         BufferSparseResidencyCase       (tcu::TestContext&              testCtx,
67                                                                                                  const std::string&             name,
68                                                                                                  const std::string&             description,
69                                                                                                  const deUint32                 bufferSize,
70                                                                                                  const glu::GLSLVersion glslVersion,
71                                                                                                  const bool                             useDeviceGroups);
72
73
74         void                    initPrograms                            (SourceCollections&             sourceCollections) const;
75         TestInstance*   createInstance                          (Context&                               context) const;
76
77 private:
78         const deUint32                  m_bufferSize;
79         const glu::GLSLVersion  m_glslVersion;
80         const bool                              m_useDeviceGroups;
81
82 };
83
84 BufferSparseResidencyCase::BufferSparseResidencyCase (tcu::TestContext&                 testCtx,
85                                                                                                           const std::string&            name,
86                                                                                                           const std::string&            description,
87                                                                                                           const deUint32                        bufferSize,
88                                                                                                           const glu::GLSLVersion        glslVersion,
89                                                                                                           const bool                            useDeviceGroups)
90
91         : TestCase                      (testCtx, name, description)
92         , m_bufferSize          (bufferSize)
93         , m_glslVersion         (glslVersion)
94         , m_useDeviceGroups     (useDeviceGroups)
95 {
96 }
97
98 void BufferSparseResidencyCase::initPrograms (SourceCollections& sourceCollections) const
99 {
100         const char* const       versionDecl             = glu::getGLSLVersionDeclaration(m_glslVersion);
101         const deUint32          iterationsCount = m_bufferSize / SIZE_OF_UINT_IN_SHADER;
102
103         std::ostringstream src;
104
105         src << versionDecl << "\n"
106                 << "layout (local_size_x = 1, local_size_y = 1, local_size_z = 1) in;\n"
107                 << "layout(set = 0, binding = 0, std430) readonly buffer Input\n"
108                 << "{\n"
109                 << "    uint data[];\n"
110                 << "} sb_in;\n"
111                 << "\n"
112                 << "layout(set = 0, binding = 1, std430) writeonly buffer Output\n"
113                 << "{\n"
114                 << "    uint result[];\n"
115                 << "} sb_out;\n"
116                 << "\n"
117                 << "void main (void)\n"
118                 << "{\n"
119                 << "    for(int i=0; i<" << iterationsCount << "; ++i) \n"
120                 << "    {\n"
121                 << "            sb_out.result[i] = sb_in.data[i];"
122                 << "    }\n"
123                 << "}\n";
124
125         sourceCollections.glslSources.add("comp") << glu::ComputeSource(src.str());
126 }
127
128 class BufferSparseResidencyInstance : public SparseResourcesBaseInstance
129 {
130 public:
131                                         BufferSparseResidencyInstance   (Context&                       context,
132                                                                                                          const deUint32         bufferSize,
133                                                                                                          const bool                     useDeviceGroups);
134
135         tcu::TestStatus iterate                                                 (void);
136
137 private:
138         const deUint32  m_bufferSize;
139 };
140
141 BufferSparseResidencyInstance::BufferSparseResidencyInstance (Context&                  context,
142                                                                                                                           const deUint32        bufferSize,
143                                                                                                                           const bool            useDeviceGroups)
144         : SparseResourcesBaseInstance   (context, useDeviceGroups)
145         , m_bufferSize                                  (bufferSize)
146 {
147 }
148
149 tcu::TestStatus BufferSparseResidencyInstance::iterate (void)
150 {
151         const InstanceInterface&                 instance                                       = m_context.getInstanceInterface();
152         {
153                 // Create logical device supporting both sparse and compute operations
154                 QueueRequirementsVec queueRequirements;
155                 queueRequirements.push_back(QueueRequirements(VK_QUEUE_SPARSE_BINDING_BIT, 1u));
156                 queueRequirements.push_back(QueueRequirements(VK_QUEUE_COMPUTE_BIT, 1u));
157
158                 createDeviceSupportingQueues(queueRequirements);
159         }
160         const VkPhysicalDevice                   physicalDevice                         = getPhysicalDevice();
161         const VkPhysicalDeviceProperties physicalDeviceProperties       = getPhysicalDeviceProperties(instance, physicalDevice);
162
163         if (!getPhysicalDeviceFeatures(instance, physicalDevice).sparseResidencyBuffer)
164                 TCU_THROW(NotSupportedError, "Sparse partially resident buffers not supported");
165
166         const DeviceInterface&  deviceInterface = getDeviceInterface();
167         const Queue&                    sparseQueue             = getQueue(VK_QUEUE_SPARSE_BINDING_BIT, 0);
168         const Queue&                    computeQueue    = getQueue(VK_QUEUE_COMPUTE_BIT, 0);
169
170         // Go through all physical devices
171         for (deUint32 physDevID = 0; physDevID < m_numPhysicalDevices; physDevID++)
172         {
173                 const deUint32  firstDeviceID   = physDevID;
174                 const deUint32  secondDeviceID  = (firstDeviceID + 1) % m_numPhysicalDevices;
175
176                 VkBufferCreateInfo bufferCreateInfo =
177                 {
178                         VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,   // VkStructureType              sType;
179                         DE_NULL,                                                                // const void*                  pNext;
180                         VK_BUFFER_CREATE_SPARSE_BINDING_BIT |
181                         VK_BUFFER_CREATE_SPARSE_RESIDENCY_BIT,  // VkBufferCreateFlags  flags;
182                         m_bufferSize,                                                   // VkDeviceSize                 size;
183                         VK_BUFFER_USAGE_STORAGE_BUFFER_BIT |
184                         VK_BUFFER_USAGE_TRANSFER_SRC_BIT,               // VkBufferUsageFlags   usage;
185                         VK_SHARING_MODE_EXCLUSIVE,                              // VkSharingMode                sharingMode;
186                         0u,                                                                             // deUint32                             queueFamilyIndexCount;
187                         DE_NULL                                                                 // const deUint32*              pQueueFamilyIndices;
188                 };
189
190                 const deUint32 queueFamilyIndices[] = { sparseQueue.queueFamilyIndex, computeQueue.queueFamilyIndex };
191
192                 if (sparseQueue.queueFamilyIndex != computeQueue.queueFamilyIndex)
193                 {
194                         bufferCreateInfo.sharingMode                    = VK_SHARING_MODE_CONCURRENT;
195                         bufferCreateInfo.queueFamilyIndexCount  = 2u;
196                         bufferCreateInfo.pQueueFamilyIndices    = queueFamilyIndices;
197                 }
198
199                 // Create sparse buffer
200                 const Unique<VkBuffer> sparseBuffer(createBuffer(deviceInterface, getDevice(), &bufferCreateInfo));
201
202                 // Create sparse buffer memory bind semaphore
203                 const Unique<VkSemaphore> bufferMemoryBindSemaphore(createSemaphore(deviceInterface, getDevice()));
204
205                 const VkMemoryRequirements bufferMemRequirements = getBufferMemoryRequirements(deviceInterface, getDevice(), *sparseBuffer);
206
207                 if (bufferMemRequirements.size > physicalDeviceProperties.limits.sparseAddressSpaceSize)
208                         TCU_THROW(NotSupportedError, "Required memory size for sparse resources exceeds device limits");
209
210                 DE_ASSERT((bufferMemRequirements.size % bufferMemRequirements.alignment) == 0);
211
212                 const deUint32                          numSparseSlots = static_cast<deUint32>(bufferMemRequirements.size / bufferMemRequirements.alignment);
213                 std::vector<DeviceMemorySp>     deviceMemUniquePtrVec;
214
215                 {
216                         std::vector<VkSparseMemoryBind>         sparseMemoryBinds;
217                         const deUint32                                          memoryType              = findMatchingMemoryType(instance, getPhysicalDevice(secondDeviceID), bufferMemRequirements, MemoryRequirement::Any);
218
219                         if (memoryType == NO_MATCH_FOUND)
220                                 return tcu::TestStatus::fail("No matching memory type found");
221
222                         if (firstDeviceID != secondDeviceID)
223                         {
224                                 VkPeerMemoryFeatureFlags        peerMemoryFeatureFlags = (VkPeerMemoryFeatureFlags)0;
225                                 const deUint32                          heapIndex = getHeapIndexForMemoryType(instance, getPhysicalDevice(secondDeviceID), memoryType);
226                                 deviceInterface.getDeviceGroupPeerMemoryFeatures(getDevice(), heapIndex, firstDeviceID, secondDeviceID, &peerMemoryFeatureFlags);
227
228                                 if (((peerMemoryFeatureFlags & VK_PEER_MEMORY_FEATURE_COPY_SRC_BIT)    == 0) ||
229                                         ((peerMemoryFeatureFlags & VK_PEER_MEMORY_FEATURE_GENERIC_DST_BIT) == 0))
230                                 {
231                                         TCU_THROW(NotSupportedError, "Peer memory does not support COPY_SRC and GENERIC_DST");
232                                 }
233                         }
234
235                         for (deUint32 sparseBindNdx = 0; sparseBindNdx < numSparseSlots; sparseBindNdx += 2)
236                         {
237                                 const VkSparseMemoryBind sparseMemoryBind = makeSparseMemoryBind(deviceInterface, getDevice(), bufferMemRequirements.alignment, memoryType, bufferMemRequirements.alignment * sparseBindNdx);
238
239                                 deviceMemUniquePtrVec.push_back(makeVkSharedPtr(Move<VkDeviceMemory>(check<VkDeviceMemory>(sparseMemoryBind.memory), Deleter<VkDeviceMemory>(deviceInterface, getDevice(), DE_NULL))));
240
241                                 sparseMemoryBinds.push_back(sparseMemoryBind);
242                         }
243
244                         const VkSparseBufferMemoryBindInfo sparseBufferBindInfo = makeSparseBufferMemoryBindInfo(*sparseBuffer, static_cast<deUint32>(sparseMemoryBinds.size()), &sparseMemoryBinds[0]);
245
246                         const VkDeviceGroupBindSparseInfo devGroupBindSparseInfo =
247                         {
248                                 VK_STRUCTURE_TYPE_DEVICE_GROUP_BIND_SPARSE_INFO,                //VkStructureType                                                       sType;
249                                 DE_NULL,                                                                                                //const void*                                                           pNext;
250                                 firstDeviceID,                                                                                  //deUint32                                                                      resourceDeviceIndex;
251                                 secondDeviceID,                                                                                 //deUint32                                                                      memoryDeviceIndex;
252                         };
253                         const VkBindSparseInfo bindSparseInfo =
254                         {
255                                 VK_STRUCTURE_TYPE_BIND_SPARSE_INFO,                                             //VkStructureType                                                       sType;
256                                 usingDeviceGroups() ? &devGroupBindSparseInfo : DE_NULL,//const void*                                                           pNext;
257                                 0u,                                                                                                             //deUint32                                                                      waitSemaphoreCount;
258                                 DE_NULL,                                                                                                //const VkSemaphore*                                            pWaitSemaphores;
259                                 1u,                                                                                                             //deUint32                                                                      bufferBindCount;
260                                 &sparseBufferBindInfo,                                                                  //const VkSparseBufferMemoryBindInfo*           pBufferBinds;
261                                 0u,                                                                                                             //deUint32                                                                      imageOpaqueBindCount;
262                                 DE_NULL,                                                                                                //const VkSparseImageOpaqueMemoryBindInfo*      pImageOpaqueBinds;
263                                 0u,                                                                                                             //deUint32                                                                      imageBindCount;
264                                 DE_NULL,                                                                                                //const VkSparseImageMemoryBindInfo*            pImageBinds;
265                                 1u,                                                                                                             //deUint32                                                                      signalSemaphoreCount;
266                                 &bufferMemoryBindSemaphore.get()                                                //const VkSemaphore*                                            pSignalSemaphores;
267                         };
268
269                         VK_CHECK(deviceInterface.queueBindSparse(sparseQueue.queueHandle, 1u, &bindSparseInfo, DE_NULL));
270                 }
271
272                 // Create input buffer
273                 const VkBufferCreateInfo                inputBufferCreateInfo   = makeBufferCreateInfo(m_bufferSize, VK_BUFFER_USAGE_STORAGE_BUFFER_BIT);
274                 const Unique<VkBuffer>                  inputBuffer                             (createBuffer(deviceInterface, getDevice(), &inputBufferCreateInfo));
275                 const de::UniquePtr<Allocation> inputBufferAlloc                (bindBuffer(deviceInterface, getDevice(), getAllocator(), *inputBuffer, MemoryRequirement::HostVisible));
276
277
278                 std::vector<deUint8> referenceData;
279                 referenceData.resize(m_bufferSize);
280
281                 for (deUint32 valueNdx = 0; valueNdx < m_bufferSize; ++valueNdx)
282                 {
283                         referenceData[valueNdx] = static_cast<deUint8>((valueNdx % bufferMemRequirements.alignment) + 1u);
284                 }
285
286                 deMemcpy(inputBufferAlloc->getHostPtr(), &referenceData[0], m_bufferSize);
287
288                 flushAlloc(deviceInterface, getDevice(), *inputBufferAlloc);
289
290                 // Create output buffer
291                 const VkBufferCreateInfo                outputBufferCreateInfo  = makeBufferCreateInfo(m_bufferSize, VK_BUFFER_USAGE_TRANSFER_DST_BIT);
292                 const Unique<VkBuffer>                  outputBuffer                    (createBuffer(deviceInterface, getDevice(), &outputBufferCreateInfo));
293                 const de::UniquePtr<Allocation> outputBufferAlloc               (bindBuffer(deviceInterface, getDevice(), getAllocator(), *outputBuffer, MemoryRequirement::HostVisible));
294
295                 // Create command buffer for compute and data transfer operations
296                 const Unique<VkCommandPool>       commandPool(makeCommandPool(deviceInterface, getDevice(), computeQueue.queueFamilyIndex));
297                 const Unique<VkCommandBuffer> commandBuffer(allocateCommandBuffer(deviceInterface, getDevice(), *commandPool, VK_COMMAND_BUFFER_LEVEL_PRIMARY));
298
299                 // Start recording compute and transfer commands
300                 beginCommandBuffer(deviceInterface, *commandBuffer);
301
302                 // Create descriptor set
303                 const Unique<VkDescriptorSetLayout> descriptorSetLayout(
304                         DescriptorSetLayoutBuilder()
305                         .addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, VK_SHADER_STAGE_COMPUTE_BIT)
306                         .addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, VK_SHADER_STAGE_COMPUTE_BIT)
307                         .build(deviceInterface, getDevice()));
308
309                 // Create compute pipeline
310                 const Unique<VkShaderModule>    shaderModule(createShaderModule(deviceInterface, getDevice(), m_context.getBinaryCollection().get("comp"), DE_NULL));
311                 const Unique<VkPipelineLayout>  pipelineLayout(makePipelineLayout(deviceInterface, getDevice(), *descriptorSetLayout));
312                 const Unique<VkPipeline>                computePipeline(makeComputePipeline(deviceInterface, getDevice(), *pipelineLayout, *shaderModule));
313
314                 deviceInterface.cmdBindPipeline(*commandBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, *computePipeline);
315
316                 const Unique<VkDescriptorPool> descriptorPool(
317                         DescriptorPoolBuilder()
318                         .addType(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 2u)
319                         .build(deviceInterface, getDevice(), VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, 1u));
320
321                 const Unique<VkDescriptorSet> descriptorSet(makeDescriptorSet(deviceInterface, getDevice(), *descriptorPool, *descriptorSetLayout));
322
323                 {
324                         const VkDescriptorBufferInfo inputBufferInfo = makeDescriptorBufferInfo(*inputBuffer, 0ull, m_bufferSize);
325                         const VkDescriptorBufferInfo sparseBufferInfo = makeDescriptorBufferInfo(*sparseBuffer, 0ull, m_bufferSize);
326
327                         DescriptorSetUpdateBuilder()
328                                 .writeSingle(*descriptorSet, DescriptorSetUpdateBuilder::Location::binding(0u), VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, &inputBufferInfo)
329                                 .writeSingle(*descriptorSet, DescriptorSetUpdateBuilder::Location::binding(1u), VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, &sparseBufferInfo)
330                                 .update(deviceInterface, getDevice());
331                 }
332
333                 deviceInterface.cmdBindDescriptorSets(*commandBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, *pipelineLayout, 0u, 1u, &descriptorSet.get(), 0u, DE_NULL);
334
335                 {
336                         const VkBufferMemoryBarrier inputBufferBarrier
337                                 = makeBufferMemoryBarrier(      VK_ACCESS_HOST_WRITE_BIT,
338                                                                                         VK_ACCESS_SHADER_READ_BIT,
339                                                                                         *inputBuffer,
340                                                                                         0ull,
341                                                                                         m_bufferSize);
342
343                         deviceInterface.cmdPipelineBarrier(*commandBuffer, VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, 0u, 0u, DE_NULL, 1u, &inputBufferBarrier, 0u, DE_NULL);
344                 }
345
346                 deviceInterface.cmdDispatch(*commandBuffer, 1u, 1u, 1u);
347
348                 {
349                         const VkBufferMemoryBarrier sparseBufferBarrier
350                                 = makeBufferMemoryBarrier(      VK_ACCESS_SHADER_WRITE_BIT,
351                                                                                         VK_ACCESS_TRANSFER_READ_BIT,
352                                                                                         *sparseBuffer,
353                                                                                         0ull,
354                                                                                         m_bufferSize);
355
356                         deviceInterface.cmdPipelineBarrier(*commandBuffer, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, 0u, 0u, DE_NULL, 1u, &sparseBufferBarrier, 0u, DE_NULL);
357                 }
358
359                 {
360                         const VkBufferCopy bufferCopy = makeBufferCopy(0u, 0u, m_bufferSize);
361
362                         deviceInterface.cmdCopyBuffer(*commandBuffer, *sparseBuffer, *outputBuffer, 1u, &bufferCopy);
363                 }
364
365                 {
366                         const VkBufferMemoryBarrier outputBufferBarrier
367                                 = makeBufferMemoryBarrier(      VK_ACCESS_TRANSFER_WRITE_BIT,
368                                                                                         VK_ACCESS_HOST_READ_BIT,
369                                                                                         *outputBuffer,
370                                                                                         0ull,
371                                                                                         m_bufferSize);
372
373                         deviceInterface.cmdPipelineBarrier(*commandBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_HOST_BIT, 0u, 0u, DE_NULL, 1u, &outputBufferBarrier, 0u, DE_NULL);
374                 }
375
376                 // End recording compute and transfer commands
377                 endCommandBuffer(deviceInterface, *commandBuffer);
378
379                 const VkPipelineStageFlags waitStageBits[] = { VK_PIPELINE_STAGE_TRANSFER_BIT };
380
381                 // Submit transfer commands for execution and wait for completion
382                 submitCommandsAndWait(deviceInterface, getDevice(), computeQueue.queueHandle, *commandBuffer, 1u, &bufferMemoryBindSemaphore.get(),
383                         waitStageBits, 0, DE_NULL, usingDeviceGroups(), firstDeviceID);
384
385                 // Retrieve data from output buffer to host memory
386                 invalidateAlloc(deviceInterface, getDevice(), *outputBufferAlloc);
387
388                 const deUint8* outputData = static_cast<const deUint8*>(outputBufferAlloc->getHostPtr());
389
390                 // Wait for sparse queue to become idle
391                 deviceInterface.queueWaitIdle(sparseQueue.queueHandle);
392
393                 // Compare output data with reference data
394                 for (deUint32 sparseBindNdx = 0; sparseBindNdx < numSparseSlots; ++sparseBindNdx)
395                 {
396                         const deUint32 alignment = static_cast<deUint32>(bufferMemRequirements.alignment);
397                         const deUint32 offset    = alignment * sparseBindNdx;
398                         const deUint32 size              = sparseBindNdx == (numSparseSlots - 1) ? m_bufferSize % alignment : alignment;
399
400                         if (sparseBindNdx % 2u == 0u)
401                         {
402                                 if (deMemCmp(&referenceData[offset], outputData + offset, size) != 0)
403                                         return tcu::TestStatus::fail("Failed");
404                         }
405                         else if (physicalDeviceProperties.sparseProperties.residencyNonResidentStrict)
406                         {
407                                 deMemset(&referenceData[offset], 0u, size);
408
409                                 if (deMemCmp(&referenceData[offset], outputData + offset, size) != 0)
410                                         return tcu::TestStatus::fail("Failed");
411                         }
412                 }
413         }
414
415         return tcu::TestStatus::pass("Passed");
416 }
417
418 TestInstance* BufferSparseResidencyCase::createInstance (Context& context) const
419 {
420         return new BufferSparseResidencyInstance(context, m_bufferSize, m_useDeviceGroups);
421 }
422
423 } // anonymous ns
424
425 void addBufferSparseResidencyTests(tcu::TestCaseGroup* group, const bool useDeviceGroups)
426 {
427         group->addChild(new BufferSparseResidencyCase(group->getTestContext(), "buffer_size_2_10", "", 1 << 10, glu::GLSL_VERSION_440, useDeviceGroups));
428         group->addChild(new BufferSparseResidencyCase(group->getTestContext(), "buffer_size_2_12", "", 1 << 12, glu::GLSL_VERSION_440, useDeviceGroups));
429         group->addChild(new BufferSparseResidencyCase(group->getTestContext(), "buffer_size_2_16", "", 1 << 16, glu::GLSL_VERSION_440, useDeviceGroups));
430         group->addChild(new BufferSparseResidencyCase(group->getTestContext(), "buffer_size_2_17", "", 1 << 17, glu::GLSL_VERSION_440, useDeviceGroups));
431         group->addChild(new BufferSparseResidencyCase(group->getTestContext(), "buffer_size_2_20", "", 1 << 20, glu::GLSL_VERSION_440, useDeviceGroups));
432         group->addChild(new BufferSparseResidencyCase(group->getTestContext(), "buffer_size_2_24", "", 1 << 24, glu::GLSL_VERSION_440, useDeviceGroups));
433 }
434
435 } // sparse
436 } // vkt