1 /*------------------------------------------------------------------------
2 * Vulkan Conformance Tests
3 * ------------------------
5 * Copyright (c) 2016 The Khronos Group Inc.
7 * Licensed under the Apache License, Version 2.0 (the "License");
8 * you may not use this file except in compliance with the License.
9 * You may obtain a copy of the License at
11 * http://www.apache.org/licenses/LICENSE-2.0
13 * Unless required by applicable law or agreed to in writing, software
14 * distributed under the License is distributed on an "AS IS" BASIS,
15 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 * See the License for the specific language governing permissions and
17 * limitations under the License.
21 * \brief Sparse buffer tests
22 *//*--------------------------------------------------------------------*/
24 #include "vktSparseResourcesBufferTests.hpp"
25 #include "vktTestCaseUtil.hpp"
26 #include "vktTestGroupUtil.hpp"
27 #include "vktSparseResourcesTestsUtil.hpp"
28 #include "vktSparseResourcesBase.hpp"
29 #include "vktSparseResourcesBufferSparseBinding.hpp"
30 #include "vktSparseResourcesBufferSparseResidency.hpp"
31 #include "vktSparseResourcesBufferMemoryAliasing.hpp"
34 #include "vkRefUtil.hpp"
35 #include "vkPlatform.hpp"
36 #include "vkPrograms.hpp"
37 #include "vkMemUtil.hpp"
38 #include "vkBuilderUtil.hpp"
39 #include "vkQueryUtil.hpp"
40 #include "vkTypeUtil.hpp"
42 #include "tcuTestLog.hpp"
44 #include "deUniquePtr.hpp"
45 #include "deSharedPtr.hpp"
67 typedef SharedPtr<UniquePtr<Allocation> > AllocationSp;
71 RENDER_SIZE = 128, //!< framebuffer size in pixels
72 GRID_SIZE = RENDER_SIZE / 8, //!< number of grid tiles in a row
77 // sparseBinding is implied
78 TEST_FLAG_ALIASED = 1u << 0, //!< sparseResidencyAliased
79 TEST_FLAG_RESIDENCY = 1u << 1, //!< sparseResidencyBuffer
80 TEST_FLAG_NON_RESIDENT_STRICT = 1u << 2, //!< residencyNonResidentStrict
82 typedef deUint32 TestFlags;
84 //! SparseAllocationBuilder output. Owns the allocated memory.
85 struct SparseAllocation
87 deUint32 numResourceChunks;
88 VkDeviceSize resourceSize; //!< buffer size in bytes
89 std::vector<AllocationSp> allocations; //!< actual allocated memory
90 std::vector<VkSparseMemoryBind> memoryBinds; //!< memory binds backing the resource
93 //! Utility to lay out memory allocations for a sparse buffer, including holes and aliased regions.
94 //! Will allocate memory upon building.
95 class SparseAllocationBuilder
98 SparseAllocationBuilder (void);
100 // \note "chunk" is the smallest (due to alignment) bindable amount of memory
102 SparseAllocationBuilder& addMemoryHole (const deUint32 numChunks = 1u);
103 SparseAllocationBuilder& addResourceHole (const deUint32 numChunks = 1u);
104 SparseAllocationBuilder& addMemoryBind (const deUint32 numChunks = 1u);
105 SparseAllocationBuilder& addAliasedMemoryBind (const deUint32 allocationNdx, const deUint32 chunkOffset, const deUint32 numChunks = 1u);
106 SparseAllocationBuilder& addMemoryAllocation (void);
108 MovePtr<SparseAllocation> build (const DeviceInterface& vk,
109 const VkDevice device,
110 Allocator& allocator,
111 VkBufferCreateInfo referenceCreateInfo, //!< buffer size is ignored in this info
112 const VkDeviceSize minChunkSize = 0ull) const; //!< make sure chunks are at least this big
117 deUint32 allocationNdx;
118 deUint32 resourceChunkNdx;
119 deUint32 memoryChunkNdx;
123 deUint32 m_allocationNdx;
124 deUint32 m_resourceChunkNdx;
125 deUint32 m_memoryChunkNdx;
126 std::vector<MemoryBind> m_memoryBinds;
127 std::vector<deUint32> m_chunksPerAllocation;
131 SparseAllocationBuilder::SparseAllocationBuilder (void)
132 : m_allocationNdx (0)
133 , m_resourceChunkNdx (0)
134 , m_memoryChunkNdx (0)
136 m_chunksPerAllocation.push_back(0);
139 SparseAllocationBuilder& SparseAllocationBuilder::addMemoryHole (const deUint32 numChunks)
141 m_memoryChunkNdx += numChunks;
142 m_chunksPerAllocation[m_allocationNdx] += numChunks;
147 SparseAllocationBuilder& SparseAllocationBuilder::addResourceHole (const deUint32 numChunks)
149 m_resourceChunkNdx += numChunks;
154 SparseAllocationBuilder& SparseAllocationBuilder::addMemoryAllocation (void)
156 DE_ASSERT(m_memoryChunkNdx != 0); // doesn't make sense to have an empty allocation
158 m_allocationNdx += 1;
159 m_memoryChunkNdx = 0;
160 m_chunksPerAllocation.push_back(0);
165 SparseAllocationBuilder& SparseAllocationBuilder::addMemoryBind (const deUint32 numChunks)
167 const MemoryBind memoryBind =
174 m_memoryBinds.push_back(memoryBind);
176 m_resourceChunkNdx += numChunks;
177 m_memoryChunkNdx += numChunks;
178 m_chunksPerAllocation[m_allocationNdx] += numChunks;
183 SparseAllocationBuilder& SparseAllocationBuilder::addAliasedMemoryBind (const deUint32 allocationNdx, const deUint32 chunkOffset, const deUint32 numChunks)
185 DE_ASSERT(allocationNdx <= m_allocationNdx);
187 const MemoryBind memoryBind =
194 m_memoryBinds.push_back(memoryBind);
196 m_resourceChunkNdx += numChunks;
201 inline VkMemoryRequirements requirementsWithSize (VkMemoryRequirements requirements, const VkDeviceSize size)
203 requirements.size = size;
207 MovePtr<SparseAllocation> SparseAllocationBuilder::build (const DeviceInterface& vk,
208 const VkDevice device,
209 Allocator& allocator,
210 VkBufferCreateInfo referenceCreateInfo,
211 const VkDeviceSize minChunkSize) const
214 MovePtr<SparseAllocation> sparseAllocation (new SparseAllocation());
216 referenceCreateInfo.size = sizeof(deUint32);
217 const Unique<VkBuffer> refBuffer (createBuffer(vk, device, &referenceCreateInfo));
218 const VkMemoryRequirements memoryRequirements = getBufferMemoryRequirements(vk, device, *refBuffer);
219 const VkDeviceSize chunkSize = std::max(memoryRequirements.alignment, static_cast<VkDeviceSize>(deAlign64(minChunkSize, memoryRequirements.alignment)));
221 for (std::vector<deUint32>::const_iterator numChunksIter = m_chunksPerAllocation.begin(); numChunksIter != m_chunksPerAllocation.end(); ++numChunksIter)
223 sparseAllocation->allocations.push_back(makeDeSharedPtr(
224 allocator.allocate(requirementsWithSize(memoryRequirements, *numChunksIter * chunkSize), MemoryRequirement::Any)));
227 for (std::vector<MemoryBind>::const_iterator memBindIter = m_memoryBinds.begin(); memBindIter != m_memoryBinds.end(); ++memBindIter)
229 const Allocation& alloc = **sparseAllocation->allocations[memBindIter->allocationNdx];
230 const VkSparseMemoryBind bind =
232 memBindIter->resourceChunkNdx * chunkSize, // VkDeviceSize resourceOffset;
233 memBindIter->numChunks * chunkSize, // VkDeviceSize size;
234 alloc.getMemory(), // VkDeviceMemory memory;
235 alloc.getOffset() + memBindIter->memoryChunkNdx * chunkSize, // VkDeviceSize memoryOffset;
236 (VkSparseMemoryBindFlags)0, // VkSparseMemoryBindFlags flags;
238 sparseAllocation->memoryBinds.push_back(bind);
239 referenceCreateInfo.size = std::max(referenceCreateInfo.size, bind.resourceOffset + bind.size);
242 sparseAllocation->resourceSize = referenceCreateInfo.size;
243 sparseAllocation->numResourceChunks = m_resourceChunkNdx;
245 return sparseAllocation;
248 VkImageCreateInfo makeImageCreateInfo (const VkFormat format, const IVec2& size, const VkImageUsageFlags usage)
250 const VkImageCreateInfo imageParams =
252 VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, // VkStructureType sType;
253 DE_NULL, // const void* pNext;
254 (VkImageCreateFlags)0, // VkImageCreateFlags flags;
255 VK_IMAGE_TYPE_2D, // VkImageType imageType;
256 format, // VkFormat format;
257 makeExtent3D(size.x(), size.y(), 1), // VkExtent3D extent;
258 1u, // deUint32 mipLevels;
259 1u, // deUint32 arrayLayers;
260 VK_SAMPLE_COUNT_1_BIT, // VkSampleCountFlagBits samples;
261 VK_IMAGE_TILING_OPTIMAL, // VkImageTiling tiling;
262 usage, // VkImageUsageFlags usage;
263 VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode;
264 0u, // deUint32 queueFamilyIndexCount;
265 DE_NULL, // const deUint32* pQueueFamilyIndices;
266 VK_IMAGE_LAYOUT_UNDEFINED, // VkImageLayout initialLayout;
271 Move<VkRenderPass> makeRenderPass (const DeviceInterface& vk,
272 const VkDevice device,
273 const VkFormat colorFormat)
275 const VkAttachmentDescription colorAttachmentDescription =
277 (VkAttachmentDescriptionFlags)0, // VkAttachmentDescriptionFlags flags;
278 colorFormat, // VkFormat format;
279 VK_SAMPLE_COUNT_1_BIT, // VkSampleCountFlagBits samples;
280 VK_ATTACHMENT_LOAD_OP_CLEAR, // VkAttachmentLoadOp loadOp;
281 VK_ATTACHMENT_STORE_OP_STORE, // VkAttachmentStoreOp storeOp;
282 VK_ATTACHMENT_LOAD_OP_DONT_CARE, // VkAttachmentLoadOp stencilLoadOp;
283 VK_ATTACHMENT_STORE_OP_DONT_CARE, // VkAttachmentStoreOp stencilStoreOp;
284 VK_IMAGE_LAYOUT_UNDEFINED, // VkImageLayout initialLayout;
285 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // VkImageLayout finalLayout;
288 const VkAttachmentReference colorAttachmentRef =
290 0u, // deUint32 attachment;
291 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL // VkImageLayout layout;
294 const VkSubpassDescription subpassDescription =
296 (VkSubpassDescriptionFlags)0, // VkSubpassDescriptionFlags flags;
297 VK_PIPELINE_BIND_POINT_GRAPHICS, // VkPipelineBindPoint pipelineBindPoint;
298 0u, // deUint32 inputAttachmentCount;
299 DE_NULL, // const VkAttachmentReference* pInputAttachments;
300 1u, // deUint32 colorAttachmentCount;
301 &colorAttachmentRef, // const VkAttachmentReference* pColorAttachments;
302 DE_NULL, // const VkAttachmentReference* pResolveAttachments;
303 DE_NULL, // const VkAttachmentReference* pDepthStencilAttachment;
304 0u, // deUint32 preserveAttachmentCount;
305 DE_NULL // const deUint32* pPreserveAttachments;
308 const VkRenderPassCreateInfo renderPassInfo =
310 VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, // VkStructureType sType;
311 DE_NULL, // const void* pNext;
312 (VkRenderPassCreateFlags)0, // VkRenderPassCreateFlags flags;
313 1u, // deUint32 attachmentCount;
314 &colorAttachmentDescription, // const VkAttachmentDescription* pAttachments;
315 1u, // deUint32 subpassCount;
316 &subpassDescription, // const VkSubpassDescription* pSubpasses;
317 0u, // deUint32 dependencyCount;
318 DE_NULL // const VkSubpassDependency* pDependencies;
321 return createRenderPass(vk, device, &renderPassInfo);
324 Move<VkPipeline> makeGraphicsPipeline (const DeviceInterface& vk,
325 const VkDevice device,
326 const VkPipelineLayout pipelineLayout,
327 const VkRenderPass renderPass,
328 const IVec2 renderSize,
329 const VkPrimitiveTopology topology,
330 const deUint32 stageCount,
331 const VkPipelineShaderStageCreateInfo* pStages)
333 const VkVertexInputBindingDescription vertexInputBindingDescription =
335 0u, // uint32_t binding;
336 sizeof(Vec4), // uint32_t stride;
337 VK_VERTEX_INPUT_RATE_VERTEX, // VkVertexInputRate inputRate;
340 const VkVertexInputAttributeDescription vertexInputAttributeDescription =
342 0u, // uint32_t location;
343 0u, // uint32_t binding;
344 VK_FORMAT_R32G32B32A32_SFLOAT, // VkFormat format;
345 0u, // uint32_t offset;
348 const VkPipelineVertexInputStateCreateInfo vertexInputStateInfo =
350 VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO, // VkStructureType sType;
351 DE_NULL, // const void* pNext;
352 (VkPipelineVertexInputStateCreateFlags)0, // VkPipelineVertexInputStateCreateFlags flags;
353 1u, // uint32_t vertexBindingDescriptionCount;
354 &vertexInputBindingDescription, // const VkVertexInputBindingDescription* pVertexBindingDescriptions;
355 1u, // uint32_t vertexAttributeDescriptionCount;
356 &vertexInputAttributeDescription, // const VkVertexInputAttributeDescription* pVertexAttributeDescriptions;
359 const VkPipelineInputAssemblyStateCreateInfo pipelineInputAssemblyStateInfo =
361 VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO, // VkStructureType sType;
362 DE_NULL, // const void* pNext;
363 (VkPipelineInputAssemblyStateCreateFlags)0, // VkPipelineInputAssemblyStateCreateFlags flags;
364 topology, // VkPrimitiveTopology topology;
365 VK_FALSE, // VkBool32 primitiveRestartEnable;
368 const VkViewport viewport = makeViewport(
370 static_cast<float>(renderSize.x()), static_cast<float>(renderSize.y()),
373 const VkRect2D scissor = {
375 makeExtent2D(static_cast<deUint32>(renderSize.x()), static_cast<deUint32>(renderSize.y())),
378 const VkPipelineViewportStateCreateInfo pipelineViewportStateInfo =
380 VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO, // VkStructureType sType;
381 DE_NULL, // const void* pNext;
382 (VkPipelineViewportStateCreateFlags)0, // VkPipelineViewportStateCreateFlags flags;
383 1u, // uint32_t viewportCount;
384 &viewport, // const VkViewport* pViewports;
385 1u, // uint32_t scissorCount;
386 &scissor, // const VkRect2D* pScissors;
389 const VkPipelineRasterizationStateCreateInfo pipelineRasterizationStateInfo =
391 VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO, // VkStructureType sType;
392 DE_NULL, // const void* pNext;
393 (VkPipelineRasterizationStateCreateFlags)0, // VkPipelineRasterizationStateCreateFlags flags;
394 VK_FALSE, // VkBool32 depthClampEnable;
395 VK_FALSE, // VkBool32 rasterizerDiscardEnable;
396 VK_POLYGON_MODE_FILL, // VkPolygonMode polygonMode;
397 VK_CULL_MODE_NONE, // VkCullModeFlags cullMode;
398 VK_FRONT_FACE_COUNTER_CLOCKWISE, // VkFrontFace frontFace;
399 VK_FALSE, // VkBool32 depthBiasEnable;
400 0.0f, // float depthBiasConstantFactor;
401 0.0f, // float depthBiasClamp;
402 0.0f, // float depthBiasSlopeFactor;
403 1.0f, // float lineWidth;
406 const VkPipelineMultisampleStateCreateInfo pipelineMultisampleStateInfo =
408 VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO, // VkStructureType sType;
409 DE_NULL, // const void* pNext;
410 (VkPipelineMultisampleStateCreateFlags)0, // VkPipelineMultisampleStateCreateFlags flags;
411 VK_SAMPLE_COUNT_1_BIT, // VkSampleCountFlagBits rasterizationSamples;
412 VK_FALSE, // VkBool32 sampleShadingEnable;
413 0.0f, // float minSampleShading;
414 DE_NULL, // const VkSampleMask* pSampleMask;
415 VK_FALSE, // VkBool32 alphaToCoverageEnable;
416 VK_FALSE // VkBool32 alphaToOneEnable;
419 const VkStencilOpState stencilOpState = makeStencilOpState(
420 VK_STENCIL_OP_KEEP, // stencil fail
421 VK_STENCIL_OP_KEEP, // depth & stencil pass
422 VK_STENCIL_OP_KEEP, // depth only fail
423 VK_COMPARE_OP_ALWAYS, // compare op
428 VkPipelineDepthStencilStateCreateInfo pipelineDepthStencilStateInfo =
430 VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO, // VkStructureType sType;
431 DE_NULL, // const void* pNext;
432 (VkPipelineDepthStencilStateCreateFlags)0, // VkPipelineDepthStencilStateCreateFlags flags;
433 VK_FALSE, // VkBool32 depthTestEnable;
434 VK_FALSE, // VkBool32 depthWriteEnable;
435 VK_COMPARE_OP_LESS, // VkCompareOp depthCompareOp;
436 VK_FALSE, // VkBool32 depthBoundsTestEnable;
437 VK_FALSE, // VkBool32 stencilTestEnable;
438 stencilOpState, // VkStencilOpState front;
439 stencilOpState, // VkStencilOpState back;
440 0.0f, // float minDepthBounds;
441 1.0f, // float maxDepthBounds;
444 const VkColorComponentFlags colorComponentsAll = VK_COLOR_COMPONENT_R_BIT | VK_COLOR_COMPONENT_G_BIT | VK_COLOR_COMPONENT_B_BIT | VK_COLOR_COMPONENT_A_BIT;
445 const VkPipelineColorBlendAttachmentState pipelineColorBlendAttachmentState =
447 VK_FALSE, // VkBool32 blendEnable;
448 VK_BLEND_FACTOR_ONE, // VkBlendFactor srcColorBlendFactor;
449 VK_BLEND_FACTOR_ZERO, // VkBlendFactor dstColorBlendFactor;
450 VK_BLEND_OP_ADD, // VkBlendOp colorBlendOp;
451 VK_BLEND_FACTOR_ONE, // VkBlendFactor srcAlphaBlendFactor;
452 VK_BLEND_FACTOR_ZERO, // VkBlendFactor dstAlphaBlendFactor;
453 VK_BLEND_OP_ADD, // VkBlendOp alphaBlendOp;
454 colorComponentsAll, // VkColorComponentFlags colorWriteMask;
457 const VkPipelineColorBlendStateCreateInfo pipelineColorBlendStateInfo =
459 VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO, // VkStructureType sType;
460 DE_NULL, // const void* pNext;
461 (VkPipelineColorBlendStateCreateFlags)0, // VkPipelineColorBlendStateCreateFlags flags;
462 VK_FALSE, // VkBool32 logicOpEnable;
463 VK_LOGIC_OP_COPY, // VkLogicOp logicOp;
464 1u, // deUint32 attachmentCount;
465 &pipelineColorBlendAttachmentState, // const VkPipelineColorBlendAttachmentState* pAttachments;
466 { 0.0f, 0.0f, 0.0f, 0.0f }, // float blendConstants[4];
469 const VkGraphicsPipelineCreateInfo graphicsPipelineInfo =
471 VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO, // VkStructureType sType;
472 DE_NULL, // const void* pNext;
473 (VkPipelineCreateFlags)0, // VkPipelineCreateFlags flags;
474 stageCount, // deUint32 stageCount;
475 pStages, // const VkPipelineShaderStageCreateInfo* pStages;
476 &vertexInputStateInfo, // const VkPipelineVertexInputStateCreateInfo* pVertexInputState;
477 &pipelineInputAssemblyStateInfo, // const VkPipelineInputAssemblyStateCreateInfo* pInputAssemblyState;
478 DE_NULL, // const VkPipelineTessellationStateCreateInfo* pTessellationState;
479 &pipelineViewportStateInfo, // const VkPipelineViewportStateCreateInfo* pViewportState;
480 &pipelineRasterizationStateInfo, // const VkPipelineRasterizationStateCreateInfo* pRasterizationState;
481 &pipelineMultisampleStateInfo, // const VkPipelineMultisampleStateCreateInfo* pMultisampleState;
482 &pipelineDepthStencilStateInfo, // const VkPipelineDepthStencilStateCreateInfo* pDepthStencilState;
483 &pipelineColorBlendStateInfo, // const VkPipelineColorBlendStateCreateInfo* pColorBlendState;
484 DE_NULL, // const VkPipelineDynamicStateCreateInfo* pDynamicState;
485 pipelineLayout, // VkPipelineLayout layout;
486 renderPass, // VkRenderPass renderPass;
487 0u, // deUint32 subpass;
488 DE_NULL, // VkPipeline basePipelineHandle;
489 0, // deInt32 basePipelineIndex;
492 return createGraphicsPipeline(vk, device, DE_NULL, &graphicsPipelineInfo);
495 //! Return true if there are any red (or all zero) pixels in the image
496 bool imageHasErrorPixels (const tcu::ConstPixelBufferAccess image)
498 const Vec4 errorColor = Vec4(1.0f, 0.0f, 0.0f, 1.0f);
499 const Vec4 blankColor = Vec4();
501 for (int y = 0; y < image.getHeight(); ++y)
502 for (int x = 0; x < image.getWidth(); ++x)
504 const Vec4 color = image.getPixel(x, y);
505 if (color == errorColor || color == blankColor)
515 typedef std::map<VkShaderStageFlagBits, const VkSpecializationInfo*> SpecializationMap;
517 //! Use the delegate to bind descriptor sets, vertex buffers, etc. and make a draw call
520 virtual ~Delegate (void) {}
521 virtual void rendererDraw (const VkPipelineLayout pipelineLayout, const VkCommandBuffer cmdBuffer) const = 0;
524 Renderer (const DeviceInterface& vk,
525 const VkDevice device,
526 Allocator& allocator,
527 const deUint32 queueFamilyIndex,
528 const VkDescriptorSetLayout descriptorSetLayout, //!< may be NULL, if no descriptors are used
529 ProgramCollection<vk::ProgramBinary>& binaryCollection,
530 const std::string& vertexName,
531 const std::string& fragmentName,
532 const VkBuffer colorBuffer,
533 const IVec2& renderSize,
534 const VkFormat colorFormat,
535 const Vec4& clearColor,
536 const VkPrimitiveTopology topology,
537 SpecializationMap specMap = SpecializationMap())
538 : m_colorBuffer (colorBuffer)
539 , m_renderSize (renderSize)
540 , m_colorFormat (colorFormat)
541 , m_colorSubresourceRange (makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u, 1u))
542 , m_clearColor (clearColor)
543 , m_topology (topology)
544 , m_descriptorSetLayout (descriptorSetLayout)
546 m_colorImage = makeImage (vk, device, makeImageCreateInfo(m_colorFormat, m_renderSize, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT));
547 m_colorImageAlloc = bindImage (vk, device, allocator, *m_colorImage, MemoryRequirement::Any);
548 m_colorAttachment = makeImageView (vk, device, *m_colorImage, VK_IMAGE_VIEW_TYPE_2D, m_colorFormat, m_colorSubresourceRange);
550 m_vertexModule = createShaderModule (vk, device, binaryCollection.get(vertexName), 0u);
551 m_fragmentModule = createShaderModule (vk, device, binaryCollection.get(fragmentName), 0u);
553 const VkPipelineShaderStageCreateInfo pShaderStages[] =
556 VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, // VkStructureType sType;
557 DE_NULL, // const void* pNext;
558 (VkPipelineShaderStageCreateFlags)0, // VkPipelineShaderStageCreateFlags flags;
559 VK_SHADER_STAGE_VERTEX_BIT, // VkShaderStageFlagBits stage;
560 *m_vertexModule, // VkShaderModule module;
561 "main", // const char* pName;
562 specMap[VK_SHADER_STAGE_VERTEX_BIT], // const VkSpecializationInfo* pSpecializationInfo;
565 VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, // VkStructureType sType;
566 DE_NULL, // const void* pNext;
567 (VkPipelineShaderStageCreateFlags)0, // VkPipelineShaderStageCreateFlags flags;
568 VK_SHADER_STAGE_FRAGMENT_BIT, // VkShaderStageFlagBits stage;
569 *m_fragmentModule, // VkShaderModule module;
570 "main", // const char* pName;
571 specMap[VK_SHADER_STAGE_FRAGMENT_BIT], // const VkSpecializationInfo* pSpecializationInfo;
575 m_renderPass = makeRenderPass (vk, device, m_colorFormat);
576 m_framebuffer = makeFramebuffer (vk, device, *m_renderPass, 1u, &m_colorAttachment.get(),
577 static_cast<deUint32>(m_renderSize.x()), static_cast<deUint32>(m_renderSize.y()));
578 m_pipelineLayout = makePipelineLayout (vk, device, m_descriptorSetLayout);
579 m_pipeline = makeGraphicsPipeline (vk, device, *m_pipelineLayout, *m_renderPass, m_renderSize, m_topology, DE_LENGTH_OF_ARRAY(pShaderStages), pShaderStages);
580 m_cmdPool = makeCommandPool (vk, device, queueFamilyIndex);
581 m_cmdBuffer = allocateCommandBuffer (vk, device, *m_cmdPool, VK_COMMAND_BUFFER_LEVEL_PRIMARY);
584 void draw (const DeviceInterface& vk,
585 const VkDevice device,
587 const Delegate& drawDelegate) const
589 beginCommandBuffer(vk, *m_cmdBuffer);
591 const VkClearValue clearValue = makeClearValueColor(m_clearColor);
592 const VkRect2D renderArea =
595 makeExtent2D(m_renderSize.x(), m_renderSize.y()),
597 const VkRenderPassBeginInfo renderPassBeginInfo =
599 VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO, // VkStructureType sType;
600 DE_NULL, // const void* pNext;
601 *m_renderPass, // VkRenderPass renderPass;
602 *m_framebuffer, // VkFramebuffer framebuffer;
603 renderArea, // VkRect2D renderArea;
604 1u, // uint32_t clearValueCount;
605 &clearValue, // const VkClearValue* pClearValues;
607 vk.cmdBeginRenderPass(*m_cmdBuffer, &renderPassBeginInfo, VK_SUBPASS_CONTENTS_INLINE);
609 vk.cmdBindPipeline(*m_cmdBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS, *m_pipeline);
610 drawDelegate.rendererDraw(*m_pipelineLayout, *m_cmdBuffer);
612 vk.cmdEndRenderPass(*m_cmdBuffer);
614 // Prepare color image for copy
616 const VkImageMemoryBarrier barriers[] =
619 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // VkStructureType sType;
620 DE_NULL, // const void* pNext;
621 VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, // VkAccessFlags outputMask;
622 VK_ACCESS_TRANSFER_READ_BIT, // VkAccessFlags inputMask;
623 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // VkImageLayout oldLayout;
624 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, // VkImageLayout newLayout;
625 VK_QUEUE_FAMILY_IGNORED, // deUint32 srcQueueFamilyIndex;
626 VK_QUEUE_FAMILY_IGNORED, // deUint32 destQueueFamilyIndex;
627 *m_colorImage, // VkImage image;
628 m_colorSubresourceRange, // VkImageSubresourceRange subresourceRange;
632 vk.cmdPipelineBarrier(*m_cmdBuffer, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, 0u,
633 0u, DE_NULL, 0u, DE_NULL, DE_LENGTH_OF_ARRAY(barriers), barriers);
635 // Color image -> host buffer
637 const VkBufferImageCopy region =
639 0ull, // VkDeviceSize bufferOffset;
640 0u, // uint32_t bufferRowLength;
641 0u, // uint32_t bufferImageHeight;
642 makeImageSubresourceLayers(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 0u, 1u), // VkImageSubresourceLayers imageSubresource;
643 makeOffset3D(0, 0, 0), // VkOffset3D imageOffset;
644 makeExtent3D(m_renderSize.x(), m_renderSize.y(), 1u), // VkExtent3D imageExtent;
647 vk.cmdCopyImageToBuffer(*m_cmdBuffer, *m_colorImage, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, m_colorBuffer, 1u, ®ion);
649 // Buffer write barrier
651 const VkBufferMemoryBarrier barriers[] =
654 VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER, // VkStructureType sType;
655 DE_NULL, // const void* pNext;
656 VK_ACCESS_TRANSFER_WRITE_BIT, // VkAccessFlags srcAccessMask;
657 VK_ACCESS_HOST_READ_BIT, // VkAccessFlags dstAccessMask;
658 VK_QUEUE_FAMILY_IGNORED, // uint32_t srcQueueFamilyIndex;
659 VK_QUEUE_FAMILY_IGNORED, // uint32_t dstQueueFamilyIndex;
660 m_colorBuffer, // VkBuffer buffer;
661 0ull, // VkDeviceSize offset;
662 VK_WHOLE_SIZE, // VkDeviceSize size;
666 vk.cmdPipelineBarrier(*m_cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_HOST_BIT, 0u,
667 0u, DE_NULL, DE_LENGTH_OF_ARRAY(barriers), barriers, DE_NULL, 0u);
670 VK_CHECK(vk.endCommandBuffer(*m_cmdBuffer));
671 submitCommandsAndWait(vk, device, queue, *m_cmdBuffer);
675 const VkBuffer m_colorBuffer;
676 const IVec2 m_renderSize;
677 const VkFormat m_colorFormat;
678 const VkImageSubresourceRange m_colorSubresourceRange;
679 const Vec4 m_clearColor;
680 const VkPrimitiveTopology m_topology;
681 const VkDescriptorSetLayout m_descriptorSetLayout;
683 Move<VkImage> m_colorImage;
684 MovePtr<Allocation> m_colorImageAlloc;
685 Move<VkImageView> m_colorAttachment;
686 Move<VkShaderModule> m_vertexModule;
687 Move<VkShaderModule> m_fragmentModule;
688 Move<VkRenderPass> m_renderPass;
689 Move<VkFramebuffer> m_framebuffer;
690 Move<VkPipelineLayout> m_pipelineLayout;
691 Move<VkPipeline> m_pipeline;
692 Move<VkCommandPool> m_cmdPool;
693 Move<VkCommandBuffer> m_cmdBuffer;
696 Renderer (const Renderer&);
697 Renderer& operator= (const Renderer&);
700 void bindSparseBuffer (const DeviceInterface& vk, const VkDevice device, const VkQueue sparseQueue, const VkBuffer buffer, const SparseAllocation& sparseAllocation)
702 const VkSparseBufferMemoryBindInfo sparseBufferMemoryBindInfo =
704 buffer, // VkBuffer buffer;
705 static_cast<deUint32>(sparseAllocation.memoryBinds.size()), // uint32_t bindCount;
706 &sparseAllocation.memoryBinds[0], // const VkSparseMemoryBind* pBinds;
709 const VkBindSparseInfo bindInfo =
711 VK_STRUCTURE_TYPE_BIND_SPARSE_INFO, // VkStructureType sType;
712 DE_NULL, // const void* pNext;
713 0u, // uint32_t waitSemaphoreCount;
714 DE_NULL, // const VkSemaphore* pWaitSemaphores;
715 1u, // uint32_t bufferBindCount;
716 &sparseBufferMemoryBindInfo, // const VkSparseBufferMemoryBindInfo* pBufferBinds;
717 0u, // uint32_t imageOpaqueBindCount;
718 DE_NULL, // const VkSparseImageOpaqueMemoryBindInfo* pImageOpaqueBinds;
719 0u, // uint32_t imageBindCount;
720 DE_NULL, // const VkSparseImageMemoryBindInfo* pImageBinds;
721 0u, // uint32_t signalSemaphoreCount;
722 DE_NULL, // const VkSemaphore* pSignalSemaphores;
725 const Unique<VkFence> fence(createFence(vk, device));
727 VK_CHECK(vk.queueBindSparse(sparseQueue, 1u, &bindInfo, *fence));
728 VK_CHECK(vk.waitForFences(device, 1u, &fence.get(), VK_TRUE, ~0ull));
731 class SparseBufferTestInstance : public SparseResourcesBaseInstance, Renderer::Delegate
734 SparseBufferTestInstance (Context& context, const TestFlags flags)
735 : SparseResourcesBaseInstance (context)
736 , m_aliased ((flags & TEST_FLAG_ALIASED) != 0)
737 , m_residency ((flags & TEST_FLAG_RESIDENCY) != 0)
738 , m_nonResidentStrict ((flags & TEST_FLAG_NON_RESIDENT_STRICT) != 0)
739 , m_renderSize (RENDER_SIZE, RENDER_SIZE)
740 , m_colorFormat (VK_FORMAT_R8G8B8A8_UNORM)
741 , m_colorBufferSize (m_renderSize.x() * m_renderSize.y() * tcu::getPixelSize(mapVkFormat(m_colorFormat)))
743 const VkPhysicalDeviceFeatures features = getPhysicalDeviceFeatures(m_context.getInstanceInterface(), m_context.getPhysicalDevice());
745 if (!features.sparseBinding)
746 TCU_THROW(NotSupportedError, "Missing feature: sparseBinding");
748 if (m_residency && !features.sparseResidencyBuffer)
749 TCU_THROW(NotSupportedError, "Missing feature: sparseResidencyBuffer");
751 if (m_aliased && !features.sparseResidencyAliased)
752 TCU_THROW(NotSupportedError, "Missing feature: sparseResidencyAliased");
754 if (m_nonResidentStrict && !m_context.getDeviceProperties().sparseProperties.residencyNonResidentStrict)
755 TCU_THROW(NotSupportedError, "Missing sparse property: residencyNonResidentStrict");
758 QueueRequirementsVec requirements;
759 requirements.push_back(QueueRequirements(VK_QUEUE_SPARSE_BINDING_BIT, 1u));
760 requirements.push_back(QueueRequirements(VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT, 1u));
762 createDeviceSupportingQueues(requirements);
765 const DeviceInterface& vk = getDeviceInterface();
766 m_sparseQueue = getQueue(VK_QUEUE_SPARSE_BINDING_BIT, 0u);
767 m_universalQueue = getQueue(VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT, 0u);
769 m_sharedQueueFamilyIndices[0] = m_sparseQueue.queueFamilyIndex;
770 m_sharedQueueFamilyIndices[1] = m_universalQueue.queueFamilyIndex;
772 m_colorBuffer = makeBuffer(vk, getDevice(), makeBufferCreateInfo(m_colorBufferSize, VK_BUFFER_USAGE_TRANSFER_DST_BIT));
773 m_colorBufferAlloc = bindBuffer(vk, getDevice(), getAllocator(), *m_colorBuffer, MemoryRequirement::HostVisible);
775 deMemset(m_colorBufferAlloc->getHostPtr(), 0, static_cast<std::size_t>(m_colorBufferSize));
776 flushMappedMemoryRange(vk, getDevice(), m_colorBufferAlloc->getMemory(), m_colorBufferAlloc->getOffset(), m_colorBufferSize);
780 VkBufferCreateInfo getSparseBufferCreateInfo (const VkBufferUsageFlags usage) const
782 VkBufferCreateFlags flags = VK_BUFFER_CREATE_SPARSE_BINDING_BIT;
784 flags |= VK_BUFFER_CREATE_SPARSE_RESIDENCY_BIT;
786 flags |= VK_BUFFER_CREATE_SPARSE_ALIASED_BIT;
788 VkBufferCreateInfo referenceBufferCreateInfo =
790 VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, // VkStructureType sType;
791 DE_NULL, // const void* pNext;
792 flags, // VkBufferCreateFlags flags;
793 0u, // override later // VkDeviceSize size;
794 VK_BUFFER_USAGE_TRANSFER_DST_BIT | usage, // VkBufferUsageFlags usage;
795 VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode;
796 0u, // uint32_t queueFamilyIndexCount;
797 DE_NULL, // const uint32_t* pQueueFamilyIndices;
800 if (m_sparseQueue.queueFamilyIndex != m_universalQueue.queueFamilyIndex)
802 referenceBufferCreateInfo.sharingMode = VK_SHARING_MODE_CONCURRENT;
803 referenceBufferCreateInfo.queueFamilyIndexCount = DE_LENGTH_OF_ARRAY(m_sharedQueueFamilyIndices);
804 referenceBufferCreateInfo.pQueueFamilyIndices = m_sharedQueueFamilyIndices;
807 return referenceBufferCreateInfo;
810 void draw (const VkPrimitiveTopology topology,
811 const VkDescriptorSetLayout descriptorSetLayout = DE_NULL,
812 Renderer::SpecializationMap specMap = Renderer::SpecializationMap())
814 const UniquePtr<Renderer> renderer(new Renderer(
815 getDeviceInterface(), getDevice(), getAllocator(), m_universalQueue.queueFamilyIndex, descriptorSetLayout,
816 m_context.getBinaryCollection(), "vert", "frag", *m_colorBuffer, m_renderSize, m_colorFormat, Vec4(1.0f, 0.0f, 0.0f, 1.0f), topology, specMap));
818 renderer->draw(getDeviceInterface(), getDevice(), m_universalQueue.queueHandle, *this);
821 tcu::TestStatus verifyDrawResult (void) const
823 invalidateMappedMemoryRange(getDeviceInterface(), getDevice(), m_colorBufferAlloc->getMemory(), 0ull, m_colorBufferSize);
825 const tcu::ConstPixelBufferAccess resultImage (mapVkFormat(m_colorFormat), m_renderSize.x(), m_renderSize.y(), 1u, m_colorBufferAlloc->getHostPtr());
827 m_context.getTestContext().getLog()
828 << tcu::LogImageSet("Result", "Result") << tcu::LogImage("color0", "", resultImage) << tcu::TestLog::EndImageSet;
830 if (imageHasErrorPixels(resultImage))
831 return tcu::TestStatus::fail("Some buffer values were incorrect");
833 return tcu::TestStatus::pass("Pass");
836 const bool m_aliased;
837 const bool m_residency;
838 const bool m_nonResidentStrict;
841 Queue m_universalQueue;
844 const IVec2 m_renderSize;
845 const VkFormat m_colorFormat;
846 const VkDeviceSize m_colorBufferSize;
848 Move<VkBuffer> m_colorBuffer;
849 MovePtr<Allocation> m_colorBufferAlloc;
851 deUint32 m_sharedQueueFamilyIndices[2];
854 void initProgramsDrawWithUBO (vk::SourceCollections& programCollection, const TestFlags flags)
858 std::ostringstream src;
859 src << glu::getGLSLVersionDeclaration(glu::GLSL_VERSION_450) << "\n"
861 << "layout(location = 0) in vec4 in_position;\n"
863 << "out gl_PerVertex {\n"
864 << " vec4 gl_Position;\n"
867 << "void main(void)\n"
869 << " gl_Position = in_position;\n"
872 programCollection.glslSources.add("vert") << glu::VertexSource(src.str());
877 const bool aliased = (flags & TEST_FLAG_ALIASED) != 0;
878 const bool residency = (flags & TEST_FLAG_RESIDENCY) != 0;
879 const bool nonResidentStrict = (flags & TEST_FLAG_NON_RESIDENT_STRICT) != 0;
880 const std::string valueExpr = (aliased ? "ivec4(3*(ndx % nonAliasedSize) ^ 127, 0, 0, 0)" : "ivec4(3*ndx ^ 127, 0, 0, 0)");
882 std::ostringstream src;
883 src << glu::getGLSLVersionDeclaration(glu::GLSL_VERSION_450) << "\n"
885 << "layout(location = 0) out vec4 o_color;\n"
887 << "layout(constant_id = 1) const int dataSize = 1;\n"
888 << "layout(constant_id = 2) const int chunkSize = 1;\n"
890 << "layout(set = 0, binding = 0, std140) uniform SparseBuffer {\n"
891 << " ivec4 data[dataSize];\n"
894 << "void main(void)\n"
896 << " const int fragNdx = int(gl_FragCoord.x) + " << RENDER_SIZE << " * int(gl_FragCoord.y);\n"
897 << " const int pageSize = " << RENDER_SIZE << " * " << RENDER_SIZE << ";\n"
898 << " const int numChunks = dataSize / chunkSize;\n";
901 src << " const int nonAliasedSize = (numChunks > 1 ? dataSize - chunkSize : dataSize);\n";
903 src << " bool ok = true;\n"
905 << " for (int ndx = fragNdx; ndx < dataSize; ndx += pageSize)\n"
908 if (residency && nonResidentStrict)
910 src << " if (ndx >= chunkSize && ndx < 2*chunkSize)\n"
911 << " ok = ok && (ubo.data[ndx] == ivec4(0));\n"
913 << " ok = ok && (ubo.data[ndx] == " + valueExpr + ");\n";
917 src << " if (ndx >= chunkSize && ndx < 2*chunkSize)\n"
919 << " ok = ok && (ubo.data[ndx] == " << valueExpr << ");\n";
922 src << " ok = ok && (ubo.data[ndx] == " << valueExpr << ");\n";
927 << " o_color = vec4(0.0, 1.0, 0.0, 1.0);\n"
929 << " o_color = vec4(1.0, 0.0, 0.0, 1.0);\n"
932 programCollection.glslSources.add("frag") << glu::FragmentSource(src.str());
936 //! Sparse buffer backing a UBO
937 class UBOTestInstance : public SparseBufferTestInstance
940 UBOTestInstance (Context& context, const TestFlags flags)
941 : SparseBufferTestInstance (context, flags)
945 void rendererDraw (const VkPipelineLayout pipelineLayout, const VkCommandBuffer cmdBuffer) const
947 const DeviceInterface& vk = getDeviceInterface();
948 const VkDeviceSize vertexOffset = 0ull;
950 vk.cmdBindVertexBuffers (cmdBuffer, 0u, 1u, &m_vertexBuffer.get(), &vertexOffset);
951 vk.cmdBindDescriptorSets(cmdBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS, pipelineLayout, 0u, 1u, &m_descriptorSet.get(), 0u, DE_NULL);
952 vk.cmdDraw (cmdBuffer, 4u, 1u, 0u, 0u);
955 tcu::TestStatus iterate (void)
957 const DeviceInterface& vk = getDeviceInterface();
958 MovePtr<SparseAllocation> sparseAllocation;
959 Move<VkBuffer> sparseBuffer;
960 Move<VkBuffer> sparseBufferAliased;
962 // Set up the sparse buffer
964 VkBufferCreateInfo referenceBufferCreateInfo = getSparseBufferCreateInfo(VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT);
965 const VkDeviceSize minChunkSize = 512u; // make sure the smallest allocation is at least this big
966 deUint32 numMaxChunks = 0u;
968 // Check how many chunks we can allocate given the alignment and size requirements of UBOs
970 const UniquePtr<SparseAllocation> minAllocation(SparseAllocationBuilder()
972 .build(vk, getDevice(), getAllocator(), referenceBufferCreateInfo, minChunkSize));
974 numMaxChunks = deMaxu32(static_cast<deUint32>(m_context.getDeviceProperties().limits.maxUniformBufferRange / minAllocation->resourceSize), 1u);
977 if (numMaxChunks < 4)
979 sparseAllocation = SparseAllocationBuilder()
981 .build(vk, getDevice(), getAllocator(), referenceBufferCreateInfo, minChunkSize);
985 // Try to use a non-trivial memory allocation scheme to make it different from a non-sparse binding
986 SparseAllocationBuilder builder;
987 builder.addMemoryBind();
990 builder.addResourceHole();
993 .addMemoryAllocation()
998 builder.addAliasedMemoryBind(0u, 0u);
1000 sparseAllocation = builder.build(vk, getDevice(), getAllocator(), referenceBufferCreateInfo, minChunkSize);
1001 DE_ASSERT(sparseAllocation->resourceSize <= m_context.getDeviceProperties().limits.maxUniformBufferRange);
1004 // Create the buffer
1005 referenceBufferCreateInfo.size = sparseAllocation->resourceSize;
1006 sparseBuffer = makeBuffer(vk, getDevice(), referenceBufferCreateInfo);
1007 bindSparseBuffer(vk, getDevice(), m_sparseQueue.queueHandle, *sparseBuffer, *sparseAllocation);
1011 sparseBufferAliased = makeBuffer(vk, getDevice(), referenceBufferCreateInfo);
1012 bindSparseBuffer(vk, getDevice(), m_sparseQueue.queueHandle, *sparseBufferAliased, *sparseAllocation);
1018 const bool hasAliasedChunk = (m_aliased && sparseAllocation->memoryBinds.size() > 1u);
1019 const VkDeviceSize chunkSize = sparseAllocation->resourceSize / sparseAllocation->numResourceChunks;
1020 const VkDeviceSize stagingBufferSize = sparseAllocation->resourceSize - (hasAliasedChunk ? chunkSize : 0);
1021 const deUint32 numBufferEntries = static_cast<deUint32>(stagingBufferSize / sizeof(IVec4));
1023 const Unique<VkBuffer> stagingBuffer (makeBuffer(vk, getDevice(), makeBufferCreateInfo(stagingBufferSize, VK_BUFFER_USAGE_TRANSFER_SRC_BIT)));
1024 const UniquePtr<Allocation> stagingBufferAlloc (bindBuffer(vk, getDevice(), getAllocator(), *stagingBuffer, MemoryRequirement::HostVisible));
1027 // If aliased chunk is used, the staging buffer is smaller than the sparse buffer and we don't overwrite the last chunk
1028 IVec4* const pData = static_cast<IVec4*>(stagingBufferAlloc->getHostPtr());
1029 for (deUint32 i = 0; i < numBufferEntries; ++i)
1030 pData[i] = IVec4(3*i ^ 127, 0, 0, 0);
1032 flushMappedMemoryRange(vk, getDevice(), stagingBufferAlloc->getMemory(), stagingBufferAlloc->getOffset(), stagingBufferSize);
1034 const VkBufferCopy copyRegion =
1036 0ull, // VkDeviceSize srcOffset;
1037 0ull, // VkDeviceSize dstOffset;
1038 stagingBufferSize, // VkDeviceSize size;
1041 const Unique<VkCommandPool> cmdPool (makeCommandPool(vk, getDevice(), m_universalQueue.queueFamilyIndex));
1042 const Unique<VkCommandBuffer> cmdBuffer (allocateCommandBuffer(vk, getDevice(), *cmdPool, VK_COMMAND_BUFFER_LEVEL_PRIMARY));
1044 beginCommandBuffer (vk, *cmdBuffer);
1045 vk.cmdCopyBuffer (*cmdBuffer, *stagingBuffer, *sparseBuffer, 1u, ©Region);
1046 endCommandBuffer (vk, *cmdBuffer);
1048 submitCommandsAndWait(vk, getDevice(), m_universalQueue.queueHandle, *cmdBuffer);
1049 // Once the fence is signaled, the write is also available to the aliasing buffer.
1053 // Make sure that we don't try to access a larger range than is allowed. This only applies to a single chunk case.
1054 const deUint32 maxBufferRange = deMinu32(static_cast<deUint32>(sparseAllocation->resourceSize), m_context.getDeviceProperties().limits.maxUniformBufferRange);
1058 m_descriptorSetLayout = DescriptorSetLayoutBuilder()
1059 .addSingleBinding(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, VK_SHADER_STAGE_FRAGMENT_BIT)
1060 .build(vk, getDevice());
1062 m_descriptorPool = DescriptorPoolBuilder()
1063 .addType(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER)
1064 .build(vk, getDevice(), VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, 1u);
1066 m_descriptorSet = makeDescriptorSet(vk, getDevice(), *m_descriptorPool, *m_descriptorSetLayout);
1068 const VkBuffer buffer = (m_aliased ? *sparseBufferAliased : *sparseBuffer);
1069 const VkDescriptorBufferInfo sparseBufferInfo = makeDescriptorBufferInfo(buffer, 0ull, maxBufferRange);
1071 DescriptorSetUpdateBuilder()
1072 .writeSingle(*m_descriptorSet, DescriptorSetUpdateBuilder::Location::binding(0u), VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, &sparseBufferInfo)
1073 .update(vk, getDevice());
1078 const Vec4 vertexData[] =
1080 Vec4(-1.0f, -1.0f, 0.0f, 1.0f),
1081 Vec4(-1.0f, 1.0f, 0.0f, 1.0f),
1082 Vec4( 1.0f, -1.0f, 0.0f, 1.0f),
1083 Vec4( 1.0f, 1.0f, 0.0f, 1.0f),
1086 const VkDeviceSize vertexBufferSize = sizeof(vertexData);
1088 m_vertexBuffer = makeBuffer(vk, getDevice(), makeBufferCreateInfo(vertexBufferSize, VK_BUFFER_USAGE_VERTEX_BUFFER_BIT));
1089 m_vertexBufferAlloc = bindBuffer(vk, getDevice(), getAllocator(), *m_vertexBuffer, MemoryRequirement::HostVisible);
1091 deMemcpy(m_vertexBufferAlloc->getHostPtr(), &vertexData[0], vertexBufferSize);
1092 flushMappedMemoryRange(vk, getDevice(), m_vertexBufferAlloc->getMemory(), m_vertexBufferAlloc->getOffset(), vertexBufferSize);
1097 std::vector<deInt32> specializationData;
1099 const deUint32 numBufferEntries = maxBufferRange / static_cast<deUint32>(sizeof(IVec4));
1100 const deUint32 numEntriesPerChunk = numBufferEntries / sparseAllocation->numResourceChunks;
1102 specializationData.push_back(numBufferEntries);
1103 specializationData.push_back(numEntriesPerChunk);
1106 const VkSpecializationMapEntry specMapEntries[] =
1109 1u, // uint32_t constantID;
1110 0u, // uint32_t offset;
1111 sizeof(deInt32), // size_t size;
1114 2u, // uint32_t constantID;
1115 sizeof(deInt32), // uint32_t offset;
1116 sizeof(deInt32), // size_t size;
1120 const VkSpecializationInfo specInfo =
1122 DE_LENGTH_OF_ARRAY(specMapEntries), // uint32_t mapEntryCount;
1123 specMapEntries, // const VkSpecializationMapEntry* pMapEntries;
1124 sizeInBytes(specializationData), // size_t dataSize;
1125 getDataOrNullptr(specializationData), // const void* pData;
1128 Renderer::SpecializationMap specMap;
1129 specMap[VK_SHADER_STAGE_FRAGMENT_BIT] = &specInfo;
1131 draw(VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP, *m_descriptorSetLayout, specMap);
1134 return verifyDrawResult();
1138 Move<VkBuffer> m_vertexBuffer;
1139 MovePtr<Allocation> m_vertexBufferAlloc;
1141 Move<VkDescriptorSetLayout> m_descriptorSetLayout;
1142 Move<VkDescriptorPool> m_descriptorPool;
1143 Move<VkDescriptorSet> m_descriptorSet;
1146 void initProgramsDrawGrid (vk::SourceCollections& programCollection, const TestFlags flags)
1152 std::ostringstream src;
1153 src << glu::getGLSLVersionDeclaration(glu::GLSL_VERSION_450) << "\n"
1155 << "layout(location = 0) in vec4 in_position;\n"
1156 << "layout(location = 0) out int out_ndx;\n"
1158 << "out gl_PerVertex {\n"
1159 << " vec4 gl_Position;\n"
1162 << "void main(void)\n"
1164 << " gl_Position = in_position;\n"
1165 << " out_ndx = gl_VertexIndex;\n"
1168 programCollection.glslSources.add("vert") << glu::VertexSource(src.str());
1173 std::ostringstream src;
1174 src << glu::getGLSLVersionDeclaration(glu::GLSL_VERSION_450) << "\n"
1176 << "layout(location = 0) flat in int in_ndx;\n"
1177 << "layout(location = 0) out vec4 o_color;\n"
1179 << "void main(void)\n"
1181 << " if (in_ndx % 2 == 0)\n"
1182 << " o_color = vec4(vec3(1.0), 1.0);\n"
1184 << " o_color = vec4(vec3(0.75), 1.0);\n"
1187 programCollection.glslSources.add("frag") << glu::FragmentSource(src.str());
1191 //! Generate vertex positions for a grid of tiles composed of two triangles each (6 vertices)
1192 void generateGrid (void* pRawData, const float step, const float ox, const float oy, const deUint32 numX, const deUint32 numY, const float z = 0.0f)
1194 typedef Vec4 (*TilePtr)[6];
1196 TilePtr const pData = static_cast<TilePtr>(pRawData);
1198 for (deUint32 iy = 0; iy < numY; ++iy)
1199 for (deUint32 ix = 0; ix < numX; ++ix)
1201 const deUint32 ndx = ix + numX * iy;
1202 const float x = ox + step * static_cast<float>(ix);
1203 const float y = oy + step * static_cast<float>(iy);
1205 pData[ndx][0] = Vec4(x + step, y, z, 1.0f);
1206 pData[ndx][1] = Vec4(x, y, z, 1.0f);
1207 pData[ndx][2] = Vec4(x, y + step, z, 1.0f);
1209 pData[ndx][3] = Vec4(x, y + step, z, 1.0f);
1210 pData[ndx][4] = Vec4(x + step, y + step, z, 1.0f);
1211 pData[ndx][5] = Vec4(x + step, y, z, 1.0f);
1216 //! Base test for a sparse buffer backing a vertex/index buffer
1217 class DrawGridTestInstance : public SparseBufferTestInstance
1220 DrawGridTestInstance (Context& context, const TestFlags flags, const VkBufferUsageFlags usage, const VkDeviceSize minChunkSize)
1221 : SparseBufferTestInstance (context, flags)
1223 const DeviceInterface& vk = getDeviceInterface();
1224 VkBufferCreateInfo referenceBufferCreateInfo = getSparseBufferCreateInfo(usage);
1227 // Allocate two chunks, each covering half of the viewport
1228 SparseAllocationBuilder builder;
1229 builder.addMemoryBind();
1232 builder.addResourceHole();
1235 .addMemoryAllocation()
1240 builder.addAliasedMemoryBind(0u, 0u);
1242 m_sparseAllocation = builder.build(vk, getDevice(), getAllocator(), referenceBufferCreateInfo, minChunkSize);
1245 // Create the buffer
1246 referenceBufferCreateInfo.size = m_sparseAllocation->resourceSize;
1247 m_sparseBuffer = makeBuffer(vk, getDevice(), referenceBufferCreateInfo);
1250 bindSparseBuffer(vk, getDevice(), m_sparseQueue.queueHandle, *m_sparseBuffer, *m_sparseAllocation);
1252 m_perDrawBufferOffset = m_sparseAllocation->resourceSize / m_sparseAllocation->numResourceChunks;
1253 m_stagingBufferSize = 2 * m_perDrawBufferOffset;
1254 m_stagingBuffer = makeBuffer(vk, getDevice(), makeBufferCreateInfo(m_stagingBufferSize, VK_BUFFER_USAGE_TRANSFER_SRC_BIT));
1255 m_stagingBufferAlloc = bindBuffer(vk, getDevice(), getAllocator(), *m_stagingBuffer, MemoryRequirement::HostVisible);
1258 tcu::TestStatus iterate (void)
1260 initializeBuffers();
1262 const DeviceInterface& vk = getDeviceInterface();
1264 // Upload to the sparse buffer
1266 flushMappedMemoryRange(vk, getDevice(), m_stagingBufferAlloc->getMemory(), m_stagingBufferAlloc->getOffset(), m_stagingBufferSize);
1268 VkDeviceSize firstChunkOffset = 0ull;
1269 VkDeviceSize secondChunkOffset = m_perDrawBufferOffset;
1272 secondChunkOffset += m_perDrawBufferOffset;
1275 firstChunkOffset = secondChunkOffset + m_perDrawBufferOffset;
1277 const VkBufferCopy copyRegions[] =
1280 0ull, // VkDeviceSize srcOffset;
1281 firstChunkOffset, // VkDeviceSize dstOffset;
1282 m_perDrawBufferOffset, // VkDeviceSize size;
1285 m_perDrawBufferOffset, // VkDeviceSize srcOffset;
1286 secondChunkOffset, // VkDeviceSize dstOffset;
1287 m_perDrawBufferOffset, // VkDeviceSize size;
1291 const Unique<VkCommandPool> cmdPool (makeCommandPool(vk, getDevice(), m_universalQueue.queueFamilyIndex));
1292 const Unique<VkCommandBuffer> cmdBuffer (allocateCommandBuffer(vk, getDevice(), *cmdPool, VK_COMMAND_BUFFER_LEVEL_PRIMARY));
1294 beginCommandBuffer (vk, *cmdBuffer);
1295 vk.cmdCopyBuffer (*cmdBuffer, *m_stagingBuffer, *m_sparseBuffer, DE_LENGTH_OF_ARRAY(copyRegions), copyRegions);
1296 endCommandBuffer (vk, *cmdBuffer);
1298 submitCommandsAndWait(vk, getDevice(), m_universalQueue.queueHandle, *cmdBuffer);
1301 draw(VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST);
1303 return verifyDrawResult();
1307 virtual void initializeBuffers (void) = 0;
1309 VkDeviceSize m_perDrawBufferOffset;
1311 VkDeviceSize m_stagingBufferSize;
1312 Move<VkBuffer> m_stagingBuffer;
1313 MovePtr<Allocation> m_stagingBufferAlloc;
1315 MovePtr<SparseAllocation> m_sparseAllocation;
1316 Move<VkBuffer> m_sparseBuffer;
1319 //! Sparse buffer backing a vertex input buffer
1320 class VertexBufferTestInstance : public DrawGridTestInstance
1323 VertexBufferTestInstance (Context& context, const TestFlags flags)
1324 : DrawGridTestInstance (context,
1326 VK_BUFFER_USAGE_VERTEX_BUFFER_BIT,
1327 GRID_SIZE * GRID_SIZE * 6 * sizeof(Vec4))
1331 void rendererDraw (const VkPipelineLayout pipelineLayout, const VkCommandBuffer cmdBuffer) const
1333 DE_UNREF(pipelineLayout);
1335 m_context.getTestContext().getLog()
1336 << tcu::TestLog::Message << "Drawing a grid of triangles backed by a sparse vertex buffer. There should be no red pixels visible." << tcu::TestLog::EndMessage;
1338 const DeviceInterface& vk = getDeviceInterface();
1339 const deUint32 vertexCount = 6 * (GRID_SIZE * GRID_SIZE) / 2;
1340 VkDeviceSize vertexOffset = 0ull;
1342 vk.cmdBindVertexBuffers (cmdBuffer, 0u, 1u, &m_sparseBuffer.get(), &vertexOffset);
1343 vk.cmdDraw (cmdBuffer, vertexCount, 1u, 0u, 0u);
1345 vertexOffset += m_perDrawBufferOffset * (m_residency ? 2 : 1);
1347 vk.cmdBindVertexBuffers (cmdBuffer, 0u, 1u, &m_sparseBuffer.get(), &vertexOffset);
1348 vk.cmdDraw (cmdBuffer, vertexCount, 1u, 0u, 0u);
1351 void initializeBuffers (void)
1353 deUint8* pData = static_cast<deUint8*>(m_stagingBufferAlloc->getHostPtr());
1354 const float step = 2.0f / static_cast<float>(GRID_SIZE);
1356 // Prepare data for two draw calls
1357 generateGrid(pData, step, -1.0f, -1.0f, GRID_SIZE, GRID_SIZE/2);
1358 generateGrid(pData + m_perDrawBufferOffset, step, -1.0f, 0.0f, GRID_SIZE, GRID_SIZE/2);
1362 //! Sparse buffer backing an index buffer
1363 class IndexBufferTestInstance : public DrawGridTestInstance
1366 IndexBufferTestInstance (Context& context, const TestFlags flags)
1367 : DrawGridTestInstance (context,
1369 VK_BUFFER_USAGE_INDEX_BUFFER_BIT,
1370 GRID_SIZE * GRID_SIZE * 6 * sizeof(deUint32))
1371 , m_halfVertexCount (6 * (GRID_SIZE * GRID_SIZE) / 2)
1375 void rendererDraw (const VkPipelineLayout pipelineLayout, const VkCommandBuffer cmdBuffer) const
1377 DE_UNREF(pipelineLayout);
1379 m_context.getTestContext().getLog()
1380 << tcu::TestLog::Message << "Drawing a grid of triangles from a sparse index buffer. There should be no red pixels visible." << tcu::TestLog::EndMessage;
1382 const DeviceInterface& vk = getDeviceInterface();
1383 const VkDeviceSize vertexOffset = 0ull;
1384 VkDeviceSize indexOffset = 0ull;
1386 vk.cmdBindVertexBuffers (cmdBuffer, 0u, 1u, &m_vertexBuffer.get(), &vertexOffset);
1388 vk.cmdBindIndexBuffer (cmdBuffer, *m_sparseBuffer, indexOffset, VK_INDEX_TYPE_UINT32);
1389 vk.cmdDrawIndexed (cmdBuffer, m_halfVertexCount, 1u, 0u, 0, 0u);
1391 indexOffset += m_perDrawBufferOffset * (m_residency ? 2 : 1);
1393 vk.cmdBindIndexBuffer (cmdBuffer, *m_sparseBuffer, indexOffset, VK_INDEX_TYPE_UINT32);
1394 vk.cmdDrawIndexed (cmdBuffer, m_halfVertexCount, 1u, 0u, 0, 0u);
1397 void initializeBuffers (void)
1400 const DeviceInterface& vk = getDeviceInterface();
1401 const VkDeviceSize vertexBufferSize = 2 * m_halfVertexCount * sizeof(Vec4);
1402 m_vertexBuffer = makeBuffer(vk, getDevice(), makeBufferCreateInfo(vertexBufferSize, VK_BUFFER_USAGE_VERTEX_BUFFER_BIT));
1403 m_vertexBufferAlloc = bindBuffer(vk, getDevice(), getAllocator(), *m_vertexBuffer, MemoryRequirement::HostVisible);
1406 const float step = 2.0f / static_cast<float>(GRID_SIZE);
1408 generateGrid(m_vertexBufferAlloc->getHostPtr(), step, -1.0f, -1.0f, GRID_SIZE, GRID_SIZE);
1410 flushMappedMemoryRange(vk, getDevice(), m_vertexBufferAlloc->getMemory(), m_vertexBufferAlloc->getOffset(), vertexBufferSize);
1413 // Sparse index buffer
1414 for (deUint32 chunkNdx = 0u; chunkNdx < 2; ++chunkNdx)
1416 deUint8* const pData = static_cast<deUint8*>(m_stagingBufferAlloc->getHostPtr()) + chunkNdx * m_perDrawBufferOffset;
1417 deUint32* const pIndexData = reinterpret_cast<deUint32*>(pData);
1418 const deUint32 ndxBase = chunkNdx * m_halfVertexCount;
1420 for (deUint32 i = 0u; i < m_halfVertexCount; ++i)
1421 pIndexData[i] = ndxBase + i;
1426 const deUint32 m_halfVertexCount;
1427 Move<VkBuffer> m_vertexBuffer;
1428 MovePtr<Allocation> m_vertexBufferAlloc;
1431 //! Draw from a sparse indirect buffer
1432 class IndirectBufferTestInstance : public DrawGridTestInstance
1435 IndirectBufferTestInstance (Context& context, const TestFlags flags)
1436 : DrawGridTestInstance (context,
1438 VK_BUFFER_USAGE_INDIRECT_BUFFER_BIT,
1439 sizeof(VkDrawIndirectCommand))
1443 void rendererDraw (const VkPipelineLayout pipelineLayout, const VkCommandBuffer cmdBuffer) const
1445 DE_UNREF(pipelineLayout);
1447 m_context.getTestContext().getLog()
1448 << tcu::TestLog::Message << "Drawing two triangles covering the whole viewport. There should be no red pixels visible." << tcu::TestLog::EndMessage;
1450 const DeviceInterface& vk = getDeviceInterface();
1451 const VkDeviceSize vertexOffset = 0ull;
1452 VkDeviceSize indirectOffset = 0ull;
1454 vk.cmdBindVertexBuffers (cmdBuffer, 0u, 1u, &m_vertexBuffer.get(), &vertexOffset);
1455 vk.cmdDrawIndirect (cmdBuffer, *m_sparseBuffer, indirectOffset, 1u, 0u);
1457 indirectOffset += m_perDrawBufferOffset * (m_residency ? 2 : 1);
1459 vk.cmdDrawIndirect (cmdBuffer, *m_sparseBuffer, indirectOffset, 1u, 0u);
1462 void initializeBuffers (void)
1465 const DeviceInterface& vk = getDeviceInterface();
1466 const VkDeviceSize vertexBufferSize = 2 * 3 * sizeof(Vec4);
1467 m_vertexBuffer = makeBuffer(vk, getDevice(), makeBufferCreateInfo(vertexBufferSize, VK_BUFFER_USAGE_VERTEX_BUFFER_BIT));
1468 m_vertexBufferAlloc = bindBuffer(vk, getDevice(), getAllocator(), *m_vertexBuffer, MemoryRequirement::HostVisible);
1471 generateGrid(m_vertexBufferAlloc->getHostPtr(), 2.0f, -1.0f, -1.0f, 1, 1);
1472 flushMappedMemoryRange(vk, getDevice(), m_vertexBufferAlloc->getMemory(), m_vertexBufferAlloc->getOffset(), vertexBufferSize);
1476 for (deUint32 chunkNdx = 0u; chunkNdx < 2; ++chunkNdx)
1478 deUint8* const pData = static_cast<deUint8*>(m_stagingBufferAlloc->getHostPtr()) + chunkNdx * m_perDrawBufferOffset;
1479 VkDrawIndirectCommand* const pCmdData = reinterpret_cast<VkDrawIndirectCommand*>(pData);
1481 pCmdData->firstVertex = 3u * chunkNdx;
1482 pCmdData->firstInstance = 0u;
1483 pCmdData->vertexCount = 3u;
1484 pCmdData->instanceCount = 1u;
1489 Move<VkBuffer> m_vertexBuffer;
1490 MovePtr<Allocation> m_vertexBufferAlloc;
1493 //! Similar to the class in vktTestCaseUtil.hpp, but uses Arg0 directly rather than through a InstanceFunction1
1494 template<typename Arg0>
1495 class FunctionProgramsSimple1
1498 typedef void (*Function) (vk::SourceCollections& dst, Arg0 arg0);
1499 FunctionProgramsSimple1 (Function func) : m_func(func) {}
1500 void init (vk::SourceCollections& dst, const Arg0& arg0) const { m_func(dst, arg0); }
1503 const Function m_func;
1506 //! Convenience function to create a TestCase based on a freestanding initPrograms and a TestInstance implementation
1507 template<typename TestInstanceT, typename Arg0>
1508 TestCase* createTestInstanceWithPrograms (tcu::TestContext& testCtx,
1509 const std::string& name,
1510 const std::string& desc,
1511 typename FunctionProgramsSimple1<Arg0>::Function initPrograms,
1514 return new InstanceFactory1<TestInstanceT, Arg0, FunctionProgramsSimple1<Arg0> >(
1515 testCtx, tcu::NODETYPE_SELF_VALIDATE, name, desc, FunctionProgramsSimple1<Arg0>(initPrograms), arg0);
1518 void populateTestGroup (tcu::TestCaseGroup* parentGroup)
1526 { "sparse_binding", 0u },
1527 { "sparse_binding_aliased", TEST_FLAG_ALIASED, },
1528 { "sparse_residency", TEST_FLAG_RESIDENCY, },
1529 { "sparse_residency_aliased", TEST_FLAG_RESIDENCY | TEST_FLAG_ALIASED, },
1530 { "sparse_residency_non_resident_strict", TEST_FLAG_RESIDENCY | TEST_FLAG_NON_RESIDENT_STRICT, },
1533 const int numGroupsIncludingNonResidentStrict = DE_LENGTH_OF_ARRAY(groups);
1534 const int numGroupsDefaultList = numGroupsIncludingNonResidentStrict - 1;
1538 MovePtr<tcu::TestCaseGroup> group(new tcu::TestCaseGroup(parentGroup->getTestContext(), "transfer", ""));
1540 MovePtr<tcu::TestCaseGroup> subGroup(new tcu::TestCaseGroup(parentGroup->getTestContext(), "sparse_binding", ""));
1541 addBufferSparseBindingTests(subGroup.get());
1542 group->addChild(subGroup.release());
1544 parentGroup->addChild(group.release());
1549 MovePtr<tcu::TestCaseGroup> group(new tcu::TestCaseGroup(parentGroup->getTestContext(), "ssbo", ""));
1551 MovePtr<tcu::TestCaseGroup> subGroup(new tcu::TestCaseGroup(parentGroup->getTestContext(), "sparse_binding_aliased", ""));
1552 addBufferSparseMemoryAliasingTests(subGroup.get());
1553 group->addChild(subGroup.release());
1556 MovePtr<tcu::TestCaseGroup> subGroup(new tcu::TestCaseGroup(parentGroup->getTestContext(), "sparse_residency", ""));
1557 addBufferSparseResidencyTests(subGroup.get());
1558 group->addChild(subGroup.release());
1560 parentGroup->addChild(group.release());
1565 MovePtr<tcu::TestCaseGroup> group(new tcu::TestCaseGroup(parentGroup->getTestContext(), "ubo", ""));
1567 for (int groupNdx = 0u; groupNdx < numGroupsIncludingNonResidentStrict; ++groupNdx)
1568 group->addChild(createTestInstanceWithPrograms<UBOTestInstance>(group->getTestContext(), groups[groupNdx].name.c_str(), "", initProgramsDrawWithUBO, groups[groupNdx].flags));
1570 parentGroup->addChild(group.release());
1575 MovePtr<tcu::TestCaseGroup> group(new tcu::TestCaseGroup(parentGroup->getTestContext(), "vertex_buffer", ""));
1577 for (int groupNdx = 0u; groupNdx < numGroupsDefaultList; ++groupNdx)
1578 group->addChild(createTestInstanceWithPrograms<VertexBufferTestInstance>(group->getTestContext(), groups[groupNdx].name.c_str(), "", initProgramsDrawGrid, groups[groupNdx].flags));
1580 parentGroup->addChild(group.release());
1585 MovePtr<tcu::TestCaseGroup> group(new tcu::TestCaseGroup(parentGroup->getTestContext(), "index_buffer", ""));
1587 for (int groupNdx = 0u; groupNdx < numGroupsDefaultList; ++groupNdx)
1588 group->addChild(createTestInstanceWithPrograms<IndexBufferTestInstance>(group->getTestContext(), groups[groupNdx].name.c_str(), "", initProgramsDrawGrid, groups[groupNdx].flags));
1590 parentGroup->addChild(group.release());
1595 MovePtr<tcu::TestCaseGroup> group(new tcu::TestCaseGroup(parentGroup->getTestContext(), "indirect_buffer", ""));
1597 for (int groupNdx = 0u; groupNdx < numGroupsDefaultList; ++groupNdx)
1598 group->addChild(createTestInstanceWithPrograms<IndirectBufferTestInstance>(group->getTestContext(), groups[groupNdx].name.c_str(), "", initProgramsDrawGrid, groups[groupNdx].flags));
1600 parentGroup->addChild(group.release());
1606 tcu::TestCaseGroup* createSparseBufferTests (tcu::TestContext& testCtx)
1608 return createTestGroup(testCtx, "buffer", "Sparse buffer usage tests", populateTestGroup);