1 /*------------------------------------------------------------------------
2 * Vulkan Conformance Tests
3 * ------------------------
5 * Copyright (c) 2016 The Khronos Group Inc.
7 * Licensed under the Apache License, Version 2.0 (the "License");
8 * you may not use this file except in compliance with the License.
9 * You may obtain a copy of the License at
11 * http://www.apache.org/licenses/LICENSE-2.0
13 * Unless required by applicable law or agreed to in writing, software
14 * distributed under the License is distributed on an "AS IS" BASIS,
15 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 * See the License for the specific language governing permissions and
17 * limitations under the License.
21 * \brief Sparse buffer tests
22 *//*--------------------------------------------------------------------*/
24 #include "vktSparseResourcesBufferTests.hpp"
25 #include "vktTestCaseUtil.hpp"
26 #include "vktTestGroupUtil.hpp"
27 #include "vktSparseResourcesTestsUtil.hpp"
28 #include "vktSparseResourcesBase.hpp"
29 #include "vktSparseResourcesBufferSparseBinding.hpp"
30 #include "vktSparseResourcesBufferSparseResidency.hpp"
31 #include "vktSparseResourcesBufferMemoryAliasing.hpp"
34 #include "vkRefUtil.hpp"
35 #include "vkPlatform.hpp"
36 #include "vkPrograms.hpp"
37 #include "vkMemUtil.hpp"
38 #include "vkBuilderUtil.hpp"
39 #include "vkQueryUtil.hpp"
40 #include "vkTypeUtil.hpp"
42 #include "deUniquePtr.hpp"
43 #include "deSharedPtr.hpp"
65 typedef SharedPtr<UniquePtr<Allocation> > AllocationSp;
69 RENDER_SIZE = 128, //!< framebuffer size in pixels
70 GRID_SIZE = RENDER_SIZE / 8, //!< number of grid tiles in a row
75 // sparseBinding is implied
76 TEST_FLAG_ALIASED = 1u << 0, //!< sparseResidencyAliased
77 TEST_FLAG_RESIDENCY = 1u << 1, //!< sparseResidencyBuffer
78 TEST_FLAG_NON_RESIDENT_STRICT = 1u << 2, //!< residencyNonResidentStrict
80 typedef deUint32 TestFlags;
82 //! SparseAllocationBuilder output. Owns the allocated memory.
83 struct SparseAllocation
85 deUint32 numResourceChunks;
86 VkDeviceSize resourceSize; //!< buffer size in bytes
87 std::vector<AllocationSp> allocations; //!< actual allocated memory
88 std::vector<VkSparseMemoryBind> memoryBinds; //!< memory binds backing the resource
91 //! Utility to lay out memory allocations for a sparse buffer, including holes and aliased regions.
92 //! Will allocate memory upon building.
93 class SparseAllocationBuilder
96 SparseAllocationBuilder (void);
98 // \note "chunk" is the smallest (due to alignment) bindable amount of memory
100 SparseAllocationBuilder& addMemoryHole (const deUint32 numChunks = 1u);
101 SparseAllocationBuilder& addResourceHole (const deUint32 numChunks = 1u);
102 SparseAllocationBuilder& addMemoryBind (const deUint32 numChunks = 1u);
103 SparseAllocationBuilder& addAliasedMemoryBind (const deUint32 allocationNdx, const deUint32 chunkOffset, const deUint32 numChunks = 1u);
104 SparseAllocationBuilder& addMemoryAllocation (void);
106 MovePtr<SparseAllocation> build (const DeviceInterface& vk,
107 const VkDevice device,
108 Allocator& allocator,
109 VkBufferCreateInfo referenceCreateInfo, //!< buffer size is ignored in this info
110 const VkDeviceSize minChunkSize = 0ull) const; //!< make sure chunks are at least this big
115 deUint32 allocationNdx;
116 deUint32 resourceChunkNdx;
117 deUint32 memoryChunkNdx;
121 deUint32 m_allocationNdx;
122 deUint32 m_resourceChunkNdx;
123 deUint32 m_memoryChunkNdx;
124 std::vector<MemoryBind> m_memoryBinds;
125 std::vector<deUint32> m_chunksPerAllocation;
129 SparseAllocationBuilder::SparseAllocationBuilder (void)
130 : m_allocationNdx (0)
131 , m_resourceChunkNdx (0)
132 , m_memoryChunkNdx (0)
134 m_chunksPerAllocation.push_back(0);
137 SparseAllocationBuilder& SparseAllocationBuilder::addMemoryHole (const deUint32 numChunks)
139 m_memoryChunkNdx += numChunks;
140 m_chunksPerAllocation[m_allocationNdx] += numChunks;
145 SparseAllocationBuilder& SparseAllocationBuilder::addResourceHole (const deUint32 numChunks)
147 m_resourceChunkNdx += numChunks;
152 SparseAllocationBuilder& SparseAllocationBuilder::addMemoryAllocation (void)
154 DE_ASSERT(m_memoryChunkNdx != 0); // doesn't make sense to have an empty allocation
156 m_allocationNdx += 1;
157 m_memoryChunkNdx = 0;
158 m_chunksPerAllocation.push_back(0);
163 SparseAllocationBuilder& SparseAllocationBuilder::addMemoryBind (const deUint32 numChunks)
165 const MemoryBind memoryBind =
172 m_memoryBinds.push_back(memoryBind);
174 m_resourceChunkNdx += numChunks;
175 m_memoryChunkNdx += numChunks;
176 m_chunksPerAllocation[m_allocationNdx] += numChunks;
181 SparseAllocationBuilder& SparseAllocationBuilder::addAliasedMemoryBind (const deUint32 allocationNdx, const deUint32 chunkOffset, const deUint32 numChunks)
183 DE_ASSERT(allocationNdx <= m_allocationNdx);
185 const MemoryBind memoryBind =
192 m_memoryBinds.push_back(memoryBind);
194 m_resourceChunkNdx += numChunks;
199 inline VkMemoryRequirements requirementsWithSize (VkMemoryRequirements requirements, const VkDeviceSize size)
201 requirements.size = size;
205 inline VkDeviceSize alignSize (const VkDeviceSize val, const VkDeviceSize align)
207 DE_ASSERT(deIsPowerOfTwo64(align));
208 return (val + align - 1) & ~(align - 1);
211 MovePtr<SparseAllocation> SparseAllocationBuilder::build (const DeviceInterface& vk,
212 const VkDevice device,
213 Allocator& allocator,
214 VkBufferCreateInfo referenceCreateInfo,
215 const VkDeviceSize minChunkSize) const
218 MovePtr<SparseAllocation> sparseAllocation (new SparseAllocation());
220 referenceCreateInfo.size = sizeof(deUint32);
221 const Unique<VkBuffer> refBuffer (createBuffer(vk, device, &referenceCreateInfo));
222 const VkMemoryRequirements memoryRequirements = getBufferMemoryRequirements(vk, device, *refBuffer);
223 const VkDeviceSize chunkSize = std::max(memoryRequirements.alignment, alignSize(minChunkSize, memoryRequirements.alignment));
225 for (std::vector<deUint32>::const_iterator numChunksIter = m_chunksPerAllocation.begin(); numChunksIter != m_chunksPerAllocation.end(); ++numChunksIter)
227 sparseAllocation->allocations.push_back(makeDeSharedPtr(
228 allocator.allocate(requirementsWithSize(memoryRequirements, *numChunksIter * chunkSize), MemoryRequirement::Any)));
231 for (std::vector<MemoryBind>::const_iterator memBindIter = m_memoryBinds.begin(); memBindIter != m_memoryBinds.end(); ++memBindIter)
233 const Allocation& alloc = **sparseAllocation->allocations[memBindIter->allocationNdx];
234 const VkSparseMemoryBind bind =
236 memBindIter->resourceChunkNdx * chunkSize, // VkDeviceSize resourceOffset;
237 memBindIter->numChunks * chunkSize, // VkDeviceSize size;
238 alloc.getMemory(), // VkDeviceMemory memory;
239 alloc.getOffset() + memBindIter->memoryChunkNdx * chunkSize, // VkDeviceSize memoryOffset;
240 (VkSparseMemoryBindFlags)0, // VkSparseMemoryBindFlags flags;
242 sparseAllocation->memoryBinds.push_back(bind);
243 referenceCreateInfo.size = std::max(referenceCreateInfo.size, bind.resourceOffset + bind.size);
246 sparseAllocation->resourceSize = referenceCreateInfo.size;
247 sparseAllocation->numResourceChunks = m_resourceChunkNdx;
249 return sparseAllocation;
252 VkImageCreateInfo makeImageCreateInfo (const VkFormat format, const IVec2& size, const VkImageUsageFlags usage)
254 const VkImageCreateInfo imageParams =
256 VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, // VkStructureType sType;
257 DE_NULL, // const void* pNext;
258 (VkImageCreateFlags)0, // VkImageCreateFlags flags;
259 VK_IMAGE_TYPE_2D, // VkImageType imageType;
260 format, // VkFormat format;
261 makeExtent3D(size.x(), size.y(), 1), // VkExtent3D extent;
262 1u, // deUint32 mipLevels;
263 1u, // deUint32 arrayLayers;
264 VK_SAMPLE_COUNT_1_BIT, // VkSampleCountFlagBits samples;
265 VK_IMAGE_TILING_OPTIMAL, // VkImageTiling tiling;
266 usage, // VkImageUsageFlags usage;
267 VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode;
268 0u, // deUint32 queueFamilyIndexCount;
269 DE_NULL, // const deUint32* pQueueFamilyIndices;
270 VK_IMAGE_LAYOUT_UNDEFINED, // VkImageLayout initialLayout;
275 Move<VkRenderPass> makeRenderPass (const DeviceInterface& vk,
276 const VkDevice device,
277 const VkFormat colorFormat)
279 const VkAttachmentDescription colorAttachmentDescription =
281 (VkAttachmentDescriptionFlags)0, // VkAttachmentDescriptionFlags flags;
282 colorFormat, // VkFormat format;
283 VK_SAMPLE_COUNT_1_BIT, // VkSampleCountFlagBits samples;
284 VK_ATTACHMENT_LOAD_OP_CLEAR, // VkAttachmentLoadOp loadOp;
285 VK_ATTACHMENT_STORE_OP_STORE, // VkAttachmentStoreOp storeOp;
286 VK_ATTACHMENT_LOAD_OP_DONT_CARE, // VkAttachmentLoadOp stencilLoadOp;
287 VK_ATTACHMENT_STORE_OP_DONT_CARE, // VkAttachmentStoreOp stencilStoreOp;
288 VK_IMAGE_LAYOUT_UNDEFINED, // VkImageLayout initialLayout;
289 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // VkImageLayout finalLayout;
292 const VkAttachmentReference colorAttachmentRef =
294 0u, // deUint32 attachment;
295 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL // VkImageLayout layout;
298 const VkSubpassDescription subpassDescription =
300 (VkSubpassDescriptionFlags)0, // VkSubpassDescriptionFlags flags;
301 VK_PIPELINE_BIND_POINT_GRAPHICS, // VkPipelineBindPoint pipelineBindPoint;
302 0u, // deUint32 inputAttachmentCount;
303 DE_NULL, // const VkAttachmentReference* pInputAttachments;
304 1u, // deUint32 colorAttachmentCount;
305 &colorAttachmentRef, // const VkAttachmentReference* pColorAttachments;
306 DE_NULL, // const VkAttachmentReference* pResolveAttachments;
307 DE_NULL, // const VkAttachmentReference* pDepthStencilAttachment;
308 0u, // deUint32 preserveAttachmentCount;
309 DE_NULL // const deUint32* pPreserveAttachments;
312 const VkRenderPassCreateInfo renderPassInfo =
314 VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, // VkStructureType sType;
315 DE_NULL, // const void* pNext;
316 (VkRenderPassCreateFlags)0, // VkRenderPassCreateFlags flags;
317 1u, // deUint32 attachmentCount;
318 &colorAttachmentDescription, // const VkAttachmentDescription* pAttachments;
319 1u, // deUint32 subpassCount;
320 &subpassDescription, // const VkSubpassDescription* pSubpasses;
321 0u, // deUint32 dependencyCount;
322 DE_NULL // const VkSubpassDependency* pDependencies;
325 return createRenderPass(vk, device, &renderPassInfo);
328 Move<VkPipeline> makeGraphicsPipeline (const DeviceInterface& vk,
329 const VkDevice device,
330 const VkPipelineLayout pipelineLayout,
331 const VkRenderPass renderPass,
332 const IVec2 renderSize,
333 const VkPrimitiveTopology topology,
334 const deUint32 stageCount,
335 const VkPipelineShaderStageCreateInfo* pStages)
337 const VkVertexInputBindingDescription vertexInputBindingDescription =
339 0u, // uint32_t binding;
340 sizeof(Vec4), // uint32_t stride;
341 VK_VERTEX_INPUT_RATE_VERTEX, // VkVertexInputRate inputRate;
344 const VkVertexInputAttributeDescription vertexInputAttributeDescription =
346 0u, // uint32_t location;
347 0u, // uint32_t binding;
348 VK_FORMAT_R32G32B32A32_SFLOAT, // VkFormat format;
349 0u, // uint32_t offset;
352 const VkPipelineVertexInputStateCreateInfo vertexInputStateInfo =
354 VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO, // VkStructureType sType;
355 DE_NULL, // const void* pNext;
356 (VkPipelineVertexInputStateCreateFlags)0, // VkPipelineVertexInputStateCreateFlags flags;
357 1u, // uint32_t vertexBindingDescriptionCount;
358 &vertexInputBindingDescription, // const VkVertexInputBindingDescription* pVertexBindingDescriptions;
359 1u, // uint32_t vertexAttributeDescriptionCount;
360 &vertexInputAttributeDescription, // const VkVertexInputAttributeDescription* pVertexAttributeDescriptions;
363 const VkPipelineInputAssemblyStateCreateInfo pipelineInputAssemblyStateInfo =
365 VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO, // VkStructureType sType;
366 DE_NULL, // const void* pNext;
367 (VkPipelineInputAssemblyStateCreateFlags)0, // VkPipelineInputAssemblyStateCreateFlags flags;
368 topology, // VkPrimitiveTopology topology;
369 VK_FALSE, // VkBool32 primitiveRestartEnable;
372 const VkViewport viewport = makeViewport(
374 static_cast<float>(renderSize.x()), static_cast<float>(renderSize.y()),
377 const VkRect2D scissor = {
379 makeExtent2D(static_cast<deUint32>(renderSize.x()), static_cast<deUint32>(renderSize.y())),
382 const VkPipelineViewportStateCreateInfo pipelineViewportStateInfo =
384 VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO, // VkStructureType sType;
385 DE_NULL, // const void* pNext;
386 (VkPipelineViewportStateCreateFlags)0, // VkPipelineViewportStateCreateFlags flags;
387 1u, // uint32_t viewportCount;
388 &viewport, // const VkViewport* pViewports;
389 1u, // uint32_t scissorCount;
390 &scissor, // const VkRect2D* pScissors;
393 const VkPipelineRasterizationStateCreateInfo pipelineRasterizationStateInfo =
395 VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO, // VkStructureType sType;
396 DE_NULL, // const void* pNext;
397 (VkPipelineRasterizationStateCreateFlags)0, // VkPipelineRasterizationStateCreateFlags flags;
398 VK_FALSE, // VkBool32 depthClampEnable;
399 VK_FALSE, // VkBool32 rasterizerDiscardEnable;
400 VK_POLYGON_MODE_FILL, // VkPolygonMode polygonMode;
401 VK_CULL_MODE_NONE, // VkCullModeFlags cullMode;
402 VK_FRONT_FACE_COUNTER_CLOCKWISE, // VkFrontFace frontFace;
403 VK_FALSE, // VkBool32 depthBiasEnable;
404 0.0f, // float depthBiasConstantFactor;
405 0.0f, // float depthBiasClamp;
406 0.0f, // float depthBiasSlopeFactor;
407 1.0f, // float lineWidth;
410 const VkPipelineMultisampleStateCreateInfo pipelineMultisampleStateInfo =
412 VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO, // VkStructureType sType;
413 DE_NULL, // const void* pNext;
414 (VkPipelineMultisampleStateCreateFlags)0, // VkPipelineMultisampleStateCreateFlags flags;
415 VK_SAMPLE_COUNT_1_BIT, // VkSampleCountFlagBits rasterizationSamples;
416 VK_FALSE, // VkBool32 sampleShadingEnable;
417 0.0f, // float minSampleShading;
418 DE_NULL, // const VkSampleMask* pSampleMask;
419 VK_FALSE, // VkBool32 alphaToCoverageEnable;
420 VK_FALSE // VkBool32 alphaToOneEnable;
423 const VkStencilOpState stencilOpState = makeStencilOpState(
424 VK_STENCIL_OP_KEEP, // stencil fail
425 VK_STENCIL_OP_KEEP, // depth & stencil pass
426 VK_STENCIL_OP_KEEP, // depth only fail
427 VK_COMPARE_OP_ALWAYS, // compare op
432 VkPipelineDepthStencilStateCreateInfo pipelineDepthStencilStateInfo =
434 VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO, // VkStructureType sType;
435 DE_NULL, // const void* pNext;
436 (VkPipelineDepthStencilStateCreateFlags)0, // VkPipelineDepthStencilStateCreateFlags flags;
437 VK_FALSE, // VkBool32 depthTestEnable;
438 VK_FALSE, // VkBool32 depthWriteEnable;
439 VK_COMPARE_OP_LESS, // VkCompareOp depthCompareOp;
440 VK_FALSE, // VkBool32 depthBoundsTestEnable;
441 VK_FALSE, // VkBool32 stencilTestEnable;
442 stencilOpState, // VkStencilOpState front;
443 stencilOpState, // VkStencilOpState back;
444 0.0f, // float minDepthBounds;
445 1.0f, // float maxDepthBounds;
448 const VkColorComponentFlags colorComponentsAll = VK_COLOR_COMPONENT_R_BIT | VK_COLOR_COMPONENT_G_BIT | VK_COLOR_COMPONENT_B_BIT | VK_COLOR_COMPONENT_A_BIT;
449 const VkPipelineColorBlendAttachmentState pipelineColorBlendAttachmentState =
451 VK_FALSE, // VkBool32 blendEnable;
452 VK_BLEND_FACTOR_ONE, // VkBlendFactor srcColorBlendFactor;
453 VK_BLEND_FACTOR_ZERO, // VkBlendFactor dstColorBlendFactor;
454 VK_BLEND_OP_ADD, // VkBlendOp colorBlendOp;
455 VK_BLEND_FACTOR_ONE, // VkBlendFactor srcAlphaBlendFactor;
456 VK_BLEND_FACTOR_ZERO, // VkBlendFactor dstAlphaBlendFactor;
457 VK_BLEND_OP_ADD, // VkBlendOp alphaBlendOp;
458 colorComponentsAll, // VkColorComponentFlags colorWriteMask;
461 const VkPipelineColorBlendStateCreateInfo pipelineColorBlendStateInfo =
463 VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO, // VkStructureType sType;
464 DE_NULL, // const void* pNext;
465 (VkPipelineColorBlendStateCreateFlags)0, // VkPipelineColorBlendStateCreateFlags flags;
466 VK_FALSE, // VkBool32 logicOpEnable;
467 VK_LOGIC_OP_COPY, // VkLogicOp logicOp;
468 1u, // deUint32 attachmentCount;
469 &pipelineColorBlendAttachmentState, // const VkPipelineColorBlendAttachmentState* pAttachments;
470 { 0.0f, 0.0f, 0.0f, 0.0f }, // float blendConstants[4];
473 const VkGraphicsPipelineCreateInfo graphicsPipelineInfo =
475 VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO, // VkStructureType sType;
476 DE_NULL, // const void* pNext;
477 (VkPipelineCreateFlags)0, // VkPipelineCreateFlags flags;
478 stageCount, // deUint32 stageCount;
479 pStages, // const VkPipelineShaderStageCreateInfo* pStages;
480 &vertexInputStateInfo, // const VkPipelineVertexInputStateCreateInfo* pVertexInputState;
481 &pipelineInputAssemblyStateInfo, // const VkPipelineInputAssemblyStateCreateInfo* pInputAssemblyState;
482 DE_NULL, // const VkPipelineTessellationStateCreateInfo* pTessellationState;
483 &pipelineViewportStateInfo, // const VkPipelineViewportStateCreateInfo* pViewportState;
484 &pipelineRasterizationStateInfo, // const VkPipelineRasterizationStateCreateInfo* pRasterizationState;
485 &pipelineMultisampleStateInfo, // const VkPipelineMultisampleStateCreateInfo* pMultisampleState;
486 &pipelineDepthStencilStateInfo, // const VkPipelineDepthStencilStateCreateInfo* pDepthStencilState;
487 &pipelineColorBlendStateInfo, // const VkPipelineColorBlendStateCreateInfo* pColorBlendState;
488 DE_NULL, // const VkPipelineDynamicStateCreateInfo* pDynamicState;
489 pipelineLayout, // VkPipelineLayout layout;
490 renderPass, // VkRenderPass renderPass;
491 0u, // deUint32 subpass;
492 DE_NULL, // VkPipeline basePipelineHandle;
493 0, // deInt32 basePipelineIndex;
496 return createGraphicsPipeline(vk, device, DE_NULL, &graphicsPipelineInfo);
499 //! Return true if there are any red (or all zero) pixels in the image
500 bool imageHasErrorPixels (const tcu::ConstPixelBufferAccess image)
502 const Vec4 errorColor = Vec4(1.0f, 0.0f, 0.0f, 1.0f);
503 const Vec4 blankColor = Vec4();
505 for (int y = 0; y < image.getHeight(); ++y)
506 for (int x = 0; x < image.getWidth(); ++x)
508 const Vec4 color = image.getPixel(x, y);
509 if (color == errorColor || color == blankColor)
519 typedef std::map<VkShaderStageFlagBits, const VkSpecializationInfo*> SpecializationMap;
521 //! Use the delegate to bind descriptor sets, vertex buffers, etc. and make a draw call
524 virtual ~Delegate (void) {}
525 virtual void rendererDraw (const VkPipelineLayout pipelineLayout, const VkCommandBuffer cmdBuffer) const = 0;
528 Renderer (const DeviceInterface& vk,
529 const VkDevice device,
530 Allocator& allocator,
531 const deUint32 queueFamilyIndex,
532 const VkDescriptorSetLayout descriptorSetLayout, //!< may be NULL, if no descriptors are used
533 ProgramCollection<vk::ProgramBinary>& binaryCollection,
534 const std::string& vertexName,
535 const std::string& fragmentName,
536 const VkBuffer colorBuffer,
537 const IVec2& renderSize,
538 const VkFormat colorFormat,
539 const Vec4& clearColor,
540 const VkPrimitiveTopology topology,
541 SpecializationMap specMap = SpecializationMap())
542 : m_colorBuffer (colorBuffer)
543 , m_renderSize (renderSize)
544 , m_colorFormat (colorFormat)
545 , m_colorSubresourceRange (makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u, 1u))
546 , m_clearColor (clearColor)
547 , m_topology (topology)
548 , m_descriptorSetLayout (descriptorSetLayout)
550 m_colorImage = makeImage (vk, device, makeImageCreateInfo(m_colorFormat, m_renderSize, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT));
551 m_colorImageAlloc = bindImage (vk, device, allocator, *m_colorImage, MemoryRequirement::Any);
552 m_colorAttachment = makeImageView (vk, device, *m_colorImage, VK_IMAGE_VIEW_TYPE_2D, m_colorFormat, m_colorSubresourceRange);
554 m_vertexModule = createShaderModule (vk, device, binaryCollection.get(vertexName), 0u);
555 m_fragmentModule = createShaderModule (vk, device, binaryCollection.get(fragmentName), 0u);
557 const VkPipelineShaderStageCreateInfo pShaderStages[] =
560 VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, // VkStructureType sType;
561 DE_NULL, // const void* pNext;
562 (VkPipelineShaderStageCreateFlags)0, // VkPipelineShaderStageCreateFlags flags;
563 VK_SHADER_STAGE_VERTEX_BIT, // VkShaderStageFlagBits stage;
564 *m_vertexModule, // VkShaderModule module;
565 "main", // const char* pName;
566 specMap[VK_SHADER_STAGE_VERTEX_BIT], // const VkSpecializationInfo* pSpecializationInfo;
569 VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, // VkStructureType sType;
570 DE_NULL, // const void* pNext;
571 (VkPipelineShaderStageCreateFlags)0, // VkPipelineShaderStageCreateFlags flags;
572 VK_SHADER_STAGE_FRAGMENT_BIT, // VkShaderStageFlagBits stage;
573 *m_fragmentModule, // VkShaderModule module;
574 "main", // const char* pName;
575 specMap[VK_SHADER_STAGE_FRAGMENT_BIT], // const VkSpecializationInfo* pSpecializationInfo;
579 m_renderPass = makeRenderPass (vk, device, m_colorFormat);
580 m_framebuffer = makeFramebuffer (vk, device, *m_renderPass, 1u, &m_colorAttachment.get(),
581 static_cast<deUint32>(m_renderSize.x()), static_cast<deUint32>(m_renderSize.y()));
582 m_pipelineLayout = makePipelineLayout (vk, device, m_descriptorSetLayout);
583 m_pipeline = makeGraphicsPipeline (vk, device, *m_pipelineLayout, *m_renderPass, m_renderSize, m_topology, DE_LENGTH_OF_ARRAY(pShaderStages), pShaderStages);
584 m_cmdPool = makeCommandPool (vk, device, queueFamilyIndex);
585 m_cmdBuffer = makeCommandBuffer (vk, device, *m_cmdPool);
588 void draw (const DeviceInterface& vk,
589 const VkDevice device,
591 const Delegate& drawDelegate) const
593 beginCommandBuffer(vk, *m_cmdBuffer);
595 const VkClearValue clearValue = makeClearValueColor(m_clearColor);
596 const VkRect2D renderArea =
599 makeExtent2D(m_renderSize.x(), m_renderSize.y()),
601 const VkRenderPassBeginInfo renderPassBeginInfo =
603 VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO, // VkStructureType sType;
604 DE_NULL, // const void* pNext;
605 *m_renderPass, // VkRenderPass renderPass;
606 *m_framebuffer, // VkFramebuffer framebuffer;
607 renderArea, // VkRect2D renderArea;
608 1u, // uint32_t clearValueCount;
609 &clearValue, // const VkClearValue* pClearValues;
611 vk.cmdBeginRenderPass(*m_cmdBuffer, &renderPassBeginInfo, VK_SUBPASS_CONTENTS_INLINE);
613 vk.cmdBindPipeline(*m_cmdBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS, *m_pipeline);
614 drawDelegate.rendererDraw(*m_pipelineLayout, *m_cmdBuffer);
616 vk.cmdEndRenderPass(*m_cmdBuffer);
618 // Prepare color image for copy
620 const VkImageMemoryBarrier barriers[] =
623 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // VkStructureType sType;
624 DE_NULL, // const void* pNext;
625 VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, // VkAccessFlags outputMask;
626 VK_ACCESS_TRANSFER_READ_BIT, // VkAccessFlags inputMask;
627 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // VkImageLayout oldLayout;
628 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, // VkImageLayout newLayout;
629 VK_QUEUE_FAMILY_IGNORED, // deUint32 srcQueueFamilyIndex;
630 VK_QUEUE_FAMILY_IGNORED, // deUint32 destQueueFamilyIndex;
631 *m_colorImage, // VkImage image;
632 m_colorSubresourceRange, // VkImageSubresourceRange subresourceRange;
636 vk.cmdPipelineBarrier(*m_cmdBuffer, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, 0u,
637 0u, DE_NULL, 0u, DE_NULL, DE_LENGTH_OF_ARRAY(barriers), barriers);
639 // Color image -> host buffer
641 const VkBufferImageCopy region =
643 0ull, // VkDeviceSize bufferOffset;
644 0u, // uint32_t bufferRowLength;
645 0u, // uint32_t bufferImageHeight;
646 makeImageSubresourceLayers(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 0u, 1u), // VkImageSubresourceLayers imageSubresource;
647 makeOffset3D(0, 0, 0), // VkOffset3D imageOffset;
648 makeExtent3D(m_renderSize.x(), m_renderSize.y(), 1u), // VkExtent3D imageExtent;
651 vk.cmdCopyImageToBuffer(*m_cmdBuffer, *m_colorImage, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, m_colorBuffer, 1u, ®ion);
653 // Buffer write barrier
655 const VkBufferMemoryBarrier barriers[] =
658 VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER, // VkStructureType sType;
659 DE_NULL, // const void* pNext;
660 VK_ACCESS_TRANSFER_WRITE_BIT, // VkAccessFlags srcAccessMask;
661 VK_ACCESS_HOST_READ_BIT, // VkAccessFlags dstAccessMask;
662 VK_QUEUE_FAMILY_IGNORED, // uint32_t srcQueueFamilyIndex;
663 VK_QUEUE_FAMILY_IGNORED, // uint32_t dstQueueFamilyIndex;
664 m_colorBuffer, // VkBuffer buffer;
665 0ull, // VkDeviceSize offset;
666 VK_WHOLE_SIZE, // VkDeviceSize size;
670 vk.cmdPipelineBarrier(*m_cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_HOST_BIT, 0u,
671 0u, DE_NULL, DE_LENGTH_OF_ARRAY(barriers), barriers, DE_NULL, 0u);
674 VK_CHECK(vk.endCommandBuffer(*m_cmdBuffer));
675 submitCommandsAndWait(vk, device, queue, *m_cmdBuffer);
679 const VkBuffer m_colorBuffer;
680 const IVec2 m_renderSize;
681 const VkFormat m_colorFormat;
682 const VkImageSubresourceRange m_colorSubresourceRange;
683 const Vec4 m_clearColor;
684 const VkPrimitiveTopology m_topology;
685 const VkDescriptorSetLayout m_descriptorSetLayout;
687 Move<VkImage> m_colorImage;
688 MovePtr<Allocation> m_colorImageAlloc;
689 Move<VkImageView> m_colorAttachment;
690 Move<VkShaderModule> m_vertexModule;
691 Move<VkShaderModule> m_fragmentModule;
692 Move<VkRenderPass> m_renderPass;
693 Move<VkFramebuffer> m_framebuffer;
694 Move<VkPipelineLayout> m_pipelineLayout;
695 Move<VkPipeline> m_pipeline;
696 Move<VkCommandPool> m_cmdPool;
697 Move<VkCommandBuffer> m_cmdBuffer;
700 Renderer (const Renderer&);
701 Renderer& operator= (const Renderer&);
704 void bindSparseBuffer (const DeviceInterface& vk, const VkDevice device, const VkQueue sparseQueue, const VkBuffer buffer, const SparseAllocation& sparseAllocation)
706 const VkSparseBufferMemoryBindInfo sparseBufferMemoryBindInfo =
708 buffer, // VkBuffer buffer;
709 static_cast<deUint32>(sparseAllocation.memoryBinds.size()), // uint32_t bindCount;
710 &sparseAllocation.memoryBinds[0], // const VkSparseMemoryBind* pBinds;
713 const VkBindSparseInfo bindInfo =
715 VK_STRUCTURE_TYPE_BIND_SPARSE_INFO, // VkStructureType sType;
716 DE_NULL, // const void* pNext;
717 0u, // uint32_t waitSemaphoreCount;
718 DE_NULL, // const VkSemaphore* pWaitSemaphores;
719 1u, // uint32_t bufferBindCount;
720 &sparseBufferMemoryBindInfo, // const VkSparseBufferMemoryBindInfo* pBufferBinds;
721 0u, // uint32_t imageOpaqueBindCount;
722 DE_NULL, // const VkSparseImageOpaqueMemoryBindInfo* pImageOpaqueBinds;
723 0u, // uint32_t imageBindCount;
724 DE_NULL, // const VkSparseImageMemoryBindInfo* pImageBinds;
725 0u, // uint32_t signalSemaphoreCount;
726 DE_NULL, // const VkSemaphore* pSignalSemaphores;
729 const Unique<VkFence> fence(makeFence(vk, device));
731 VK_CHECK(vk.queueBindSparse(sparseQueue, 1u, &bindInfo, *fence));
732 VK_CHECK(vk.waitForFences(device, 1u, &fence.get(), VK_TRUE, ~0ull));
735 class SparseBufferTestInstance : public SparseResourcesBaseInstance, Renderer::Delegate
738 SparseBufferTestInstance (Context& context, const TestFlags flags)
739 : SparseResourcesBaseInstance (context)
740 , m_aliased ((flags & TEST_FLAG_ALIASED) != 0)
741 , m_residency ((flags & TEST_FLAG_RESIDENCY) != 0)
742 , m_nonResidentStrict ((flags & TEST_FLAG_NON_RESIDENT_STRICT) != 0)
743 , m_renderSize (RENDER_SIZE, RENDER_SIZE)
744 , m_colorFormat (VK_FORMAT_R8G8B8A8_UNORM)
745 , m_colorBufferSize (m_renderSize.x() * m_renderSize.y() * tcu::getPixelSize(mapVkFormat(m_colorFormat)))
747 const VkPhysicalDeviceFeatures features = getPhysicalDeviceFeatures(m_context.getInstanceInterface(), m_context.getPhysicalDevice());
749 if (!features.sparseBinding)
750 TCU_THROW(NotSupportedError, "Missing feature: sparseBinding");
752 if (m_residency && !features.sparseResidencyBuffer)
753 TCU_THROW(NotSupportedError, "Missing feature: sparseResidencyBuffer");
755 if (m_aliased && !features.sparseResidencyAliased)
756 TCU_THROW(NotSupportedError, "Missing feature: sparseResidencyAliased");
758 if (m_nonResidentStrict && !m_context.getDeviceProperties().sparseProperties.residencyNonResidentStrict)
759 TCU_THROW(NotSupportedError, "Missing sparse property: residencyNonResidentStrict");
762 QueueRequirementsVec requirements;
763 requirements.push_back(QueueRequirements(VK_QUEUE_SPARSE_BINDING_BIT, 1u));
764 requirements.push_back(QueueRequirements(VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT, 1u));
766 createDeviceSupportingQueues(requirements);
769 const DeviceInterface& vk = getDeviceInterface();
770 m_sparseQueue = getQueue(VK_QUEUE_SPARSE_BINDING_BIT, 0u);
771 m_universalQueue = getQueue(VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT, 0u);
773 m_sharedQueueFamilyIndices[0] = m_sparseQueue.queueFamilyIndex;
774 m_sharedQueueFamilyIndices[1] = m_universalQueue.queueFamilyIndex;
776 m_colorBuffer = makeBuffer(vk, getDevice(), makeBufferCreateInfo(m_colorBufferSize, VK_BUFFER_USAGE_TRANSFER_DST_BIT));
777 m_colorBufferAlloc = bindBuffer(vk, getDevice(), getAllocator(), *m_colorBuffer, MemoryRequirement::HostVisible);
779 deMemset(m_colorBufferAlloc->getHostPtr(), 0, static_cast<std::size_t>(m_colorBufferSize));
780 flushMappedMemoryRange(vk, getDevice(), m_colorBufferAlloc->getMemory(), m_colorBufferAlloc->getOffset(), m_colorBufferSize);
784 VkBufferCreateInfo getSparseBufferCreateInfo (const VkBufferUsageFlags usage) const
786 VkBufferCreateFlags flags = VK_BUFFER_CREATE_SPARSE_BINDING_BIT;
788 flags |= VK_BUFFER_CREATE_SPARSE_RESIDENCY_BIT;
790 flags |= VK_BUFFER_CREATE_SPARSE_ALIASED_BIT;
792 VkBufferCreateInfo referenceBufferCreateInfo =
794 VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, // VkStructureType sType;
795 DE_NULL, // const void* pNext;
796 flags, // VkBufferCreateFlags flags;
797 0u, // override later // VkDeviceSize size;
798 VK_BUFFER_USAGE_TRANSFER_DST_BIT | usage, // VkBufferUsageFlags usage;
799 VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode;
800 0u, // uint32_t queueFamilyIndexCount;
801 DE_NULL, // const uint32_t* pQueueFamilyIndices;
804 if (m_sparseQueue.queueFamilyIndex != m_universalQueue.queueFamilyIndex)
806 referenceBufferCreateInfo.sharingMode = VK_SHARING_MODE_CONCURRENT;
807 referenceBufferCreateInfo.queueFamilyIndexCount = DE_LENGTH_OF_ARRAY(m_sharedQueueFamilyIndices);
808 referenceBufferCreateInfo.pQueueFamilyIndices = m_sharedQueueFamilyIndices;
811 return referenceBufferCreateInfo;
814 void draw (const VkPrimitiveTopology topology,
815 const VkDescriptorSetLayout descriptorSetLayout = DE_NULL,
816 Renderer::SpecializationMap specMap = Renderer::SpecializationMap())
818 const UniquePtr<Renderer> renderer(new Renderer(
819 getDeviceInterface(), getDevice(), getAllocator(), m_universalQueue.queueFamilyIndex, descriptorSetLayout,
820 m_context.getBinaryCollection(), "vert", "frag", *m_colorBuffer, m_renderSize, m_colorFormat, Vec4(1.0f, 0.0f, 0.0f, 1.0f), topology, specMap));
822 renderer->draw(getDeviceInterface(), getDevice(), m_universalQueue.queueHandle, *this);
825 tcu::TestStatus verifyDrawResult (void) const
827 invalidateMappedMemoryRange(getDeviceInterface(), getDevice(), m_colorBufferAlloc->getMemory(), 0ull, m_colorBufferSize);
829 const tcu::ConstPixelBufferAccess resultImage (mapVkFormat(m_colorFormat), m_renderSize.x(), m_renderSize.y(), 1u, m_colorBufferAlloc->getHostPtr());
831 m_context.getTestContext().getLog()
832 << tcu::LogImageSet("Result", "Result") << tcu::LogImage("color0", "", resultImage) << tcu::TestLog::EndImageSet;
834 if (imageHasErrorPixels(resultImage))
835 return tcu::TestStatus::fail("Some buffer values were incorrect");
837 return tcu::TestStatus::pass("Pass");
840 const bool m_aliased;
841 const bool m_residency;
842 const bool m_nonResidentStrict;
845 Queue m_universalQueue;
848 const IVec2 m_renderSize;
849 const VkFormat m_colorFormat;
850 const VkDeviceSize m_colorBufferSize;
852 Move<VkBuffer> m_colorBuffer;
853 MovePtr<Allocation> m_colorBufferAlloc;
855 deUint32 m_sharedQueueFamilyIndices[2];
858 void initProgramsDrawWithUBO (vk::SourceCollections& programCollection, const TestFlags flags)
862 std::ostringstream src;
863 src << glu::getGLSLVersionDeclaration(glu::GLSL_VERSION_450) << "\n"
865 << "layout(location = 0) in vec4 in_position;\n"
867 << "out gl_PerVertex {\n"
868 << " vec4 gl_Position;\n"
871 << "void main(void)\n"
873 << " gl_Position = in_position;\n"
876 programCollection.glslSources.add("vert") << glu::VertexSource(src.str());
881 const bool aliased = (flags & TEST_FLAG_ALIASED) != 0;
882 const bool residency = (flags & TEST_FLAG_RESIDENCY) != 0;
883 const bool nonResidentStrict = (flags & TEST_FLAG_NON_RESIDENT_STRICT) != 0;
884 const std::string valueExpr = (aliased ? "ivec4(3*(ndx % nonAliasedSize) ^ 127, 0, 0, 0)" : "ivec4(3*ndx ^ 127, 0, 0, 0)");
886 std::ostringstream src;
887 src << glu::getGLSLVersionDeclaration(glu::GLSL_VERSION_450) << "\n"
889 << "layout(location = 0) out vec4 o_color;\n"
891 << "layout(constant_id = 1) const int dataSize = 1;\n"
892 << "layout(constant_id = 2) const int chunkSize = 1;\n"
894 << "layout(set = 0, binding = 0, std140) uniform SparseBuffer {\n"
895 << " ivec4 data[dataSize];\n"
898 << "void main(void)\n"
900 << " const int fragNdx = int(gl_FragCoord.x) + " << RENDER_SIZE << " * int(gl_FragCoord.y);\n"
901 << " const int pageSize = " << RENDER_SIZE << " * " << RENDER_SIZE << ";\n"
902 << " const int numChunks = dataSize / chunkSize;\n";
905 src << " const int nonAliasedSize = (numChunks > 1 ? dataSize - chunkSize : dataSize);\n";
907 src << " bool ok = true;\n"
909 << " for (int ndx = fragNdx; ndx < dataSize; ndx += pageSize)\n"
912 if (residency && nonResidentStrict)
914 src << " if (ndx >= chunkSize && ndx < 2*chunkSize)\n"
915 << " ok = ok && (ubo.data[ndx] == ivec4(0));\n"
917 << " ok = ok && (ubo.data[ndx] == " + valueExpr + ");\n";
921 src << " if (ndx >= chunkSize && ndx < 2*chunkSize)\n"
923 << " ok = ok && (ubo.data[ndx] == " << valueExpr << ");\n";
926 src << " ok = ok && (ubo.data[ndx] == " << valueExpr << ");\n";
931 << " o_color = vec4(0.0, 1.0, 0.0, 1.0);\n"
933 << " o_color = vec4(1.0, 0.0, 0.0, 1.0);\n"
936 programCollection.glslSources.add("frag") << glu::FragmentSource(src.str());
940 //! Sparse buffer backing a UBO
941 class UBOTestInstance : public SparseBufferTestInstance
944 UBOTestInstance (Context& context, const TestFlags flags)
945 : SparseBufferTestInstance (context, flags)
949 void rendererDraw (const VkPipelineLayout pipelineLayout, const VkCommandBuffer cmdBuffer) const
951 const DeviceInterface& vk = getDeviceInterface();
952 const VkDeviceSize vertexOffset = 0ull;
954 vk.cmdBindVertexBuffers (cmdBuffer, 0u, 1u, &m_vertexBuffer.get(), &vertexOffset);
955 vk.cmdBindDescriptorSets(cmdBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS, pipelineLayout, 0u, 1u, &m_descriptorSet.get(), 0u, DE_NULL);
956 vk.cmdDraw (cmdBuffer, 4u, 1u, 0u, 0u);
959 tcu::TestStatus iterate (void)
961 const DeviceInterface& vk = getDeviceInterface();
962 MovePtr<SparseAllocation> sparseAllocation;
963 Move<VkBuffer> sparseBuffer;
964 Move<VkBuffer> sparseBufferAliased;
966 // Set up the sparse buffer
968 VkBufferCreateInfo referenceBufferCreateInfo = getSparseBufferCreateInfo(VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT);
969 const VkDeviceSize minChunkSize = 512u; // make sure the smallest allocation is at least this big
970 deUint32 numMaxChunks = 0u;
972 // Check how many chunks we can allocate given the alignment and size requirements of UBOs
974 const UniquePtr<SparseAllocation> minAllocation(SparseAllocationBuilder()
976 .build(vk, getDevice(), getAllocator(), referenceBufferCreateInfo, minChunkSize));
978 numMaxChunks = deMaxu32(static_cast<deUint32>(m_context.getDeviceProperties().limits.maxUniformBufferRange / minAllocation->resourceSize), 1u);
981 if (numMaxChunks < 4)
983 sparseAllocation = SparseAllocationBuilder()
985 .build(vk, getDevice(), getAllocator(), referenceBufferCreateInfo, minChunkSize);
989 // Try to use a non-trivial memory allocation scheme to make it different from a non-sparse binding
990 SparseAllocationBuilder builder;
991 builder.addMemoryBind();
994 builder.addResourceHole();
997 .addMemoryAllocation()
1002 builder.addAliasedMemoryBind(0u, 0u);
1004 sparseAllocation = builder.build(vk, getDevice(), getAllocator(), referenceBufferCreateInfo, minChunkSize);
1005 DE_ASSERT(sparseAllocation->resourceSize <= m_context.getDeviceProperties().limits.maxUniformBufferRange);
1008 // Create the buffer
1009 referenceBufferCreateInfo.size = sparseAllocation->resourceSize;
1010 sparseBuffer = makeBuffer(vk, getDevice(), referenceBufferCreateInfo);
1011 bindSparseBuffer(vk, getDevice(), m_sparseQueue.queueHandle, *sparseBuffer, *sparseAllocation);
1015 sparseBufferAliased = makeBuffer(vk, getDevice(), referenceBufferCreateInfo);
1016 bindSparseBuffer(vk, getDevice(), m_sparseQueue.queueHandle, *sparseBufferAliased, *sparseAllocation);
1022 const bool hasAliasedChunk = (m_aliased && sparseAllocation->memoryBinds.size() > 1u);
1023 const VkDeviceSize chunkSize = sparseAllocation->resourceSize / sparseAllocation->numResourceChunks;
1024 const VkDeviceSize stagingBufferSize = sparseAllocation->resourceSize - (hasAliasedChunk ? chunkSize : 0);
1025 const deUint32 numBufferEntries = static_cast<deUint32>(stagingBufferSize / sizeof(IVec4));
1027 const Unique<VkBuffer> stagingBuffer (makeBuffer(vk, getDevice(), makeBufferCreateInfo(stagingBufferSize, VK_BUFFER_USAGE_TRANSFER_SRC_BIT)));
1028 const UniquePtr<Allocation> stagingBufferAlloc (bindBuffer(vk, getDevice(), getAllocator(), *stagingBuffer, MemoryRequirement::HostVisible));
1031 // If aliased chunk is used, the staging buffer is smaller than the sparse buffer and we don't overwrite the last chunk
1032 IVec4* const pData = static_cast<IVec4*>(stagingBufferAlloc->getHostPtr());
1033 for (deUint32 i = 0; i < numBufferEntries; ++i)
1034 pData[i] = IVec4(3*i ^ 127, 0, 0, 0);
1036 flushMappedMemoryRange(vk, getDevice(), stagingBufferAlloc->getMemory(), stagingBufferAlloc->getOffset(), stagingBufferSize);
1038 const VkBufferCopy copyRegion =
1040 0ull, // VkDeviceSize srcOffset;
1041 0ull, // VkDeviceSize dstOffset;
1042 stagingBufferSize, // VkDeviceSize size;
1045 const Unique<VkCommandPool> cmdPool (makeCommandPool (vk, getDevice(), m_universalQueue.queueFamilyIndex));
1046 const Unique<VkCommandBuffer> cmdBuffer (makeCommandBuffer (vk, getDevice(), *cmdPool));
1048 beginCommandBuffer (vk, *cmdBuffer);
1049 vk.cmdCopyBuffer (*cmdBuffer, *stagingBuffer, *sparseBuffer, 1u, ©Region);
1050 endCommandBuffer (vk, *cmdBuffer);
1052 submitCommandsAndWait(vk, getDevice(), m_universalQueue.queueHandle, *cmdBuffer);
1053 // Once the fence is signaled, the write is also available to the aliasing buffer.
1057 // Make sure that we don't try to access a larger range than is allowed. This only applies to a single chunk case.
1058 const deUint32 maxBufferRange = deMinu32(static_cast<deUint32>(sparseAllocation->resourceSize), m_context.getDeviceProperties().limits.maxUniformBufferRange);
1062 m_descriptorSetLayout = DescriptorSetLayoutBuilder()
1063 .addSingleBinding(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, VK_SHADER_STAGE_FRAGMENT_BIT)
1064 .build(vk, getDevice());
1066 m_descriptorPool = DescriptorPoolBuilder()
1067 .addType(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER)
1068 .build(vk, getDevice(), VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, 1u);
1070 m_descriptorSet = makeDescriptorSet(vk, getDevice(), *m_descriptorPool, *m_descriptorSetLayout);
1072 const VkBuffer buffer = (m_aliased ? *sparseBufferAliased : *sparseBuffer);
1073 const VkDescriptorBufferInfo sparseBufferInfo = makeDescriptorBufferInfo(buffer, 0ull, maxBufferRange);
1075 DescriptorSetUpdateBuilder()
1076 .writeSingle(*m_descriptorSet, DescriptorSetUpdateBuilder::Location::binding(0u), VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, &sparseBufferInfo)
1077 .update(vk, getDevice());
1082 const Vec4 vertexData[] =
1084 Vec4(-1.0f, -1.0f, 0.0f, 1.0f),
1085 Vec4(-1.0f, 1.0f, 0.0f, 1.0f),
1086 Vec4( 1.0f, -1.0f, 0.0f, 1.0f),
1087 Vec4( 1.0f, 1.0f, 0.0f, 1.0f),
1090 const VkDeviceSize vertexBufferSize = sizeof(vertexData);
1092 m_vertexBuffer = makeBuffer(vk, getDevice(), makeBufferCreateInfo(vertexBufferSize, VK_BUFFER_USAGE_VERTEX_BUFFER_BIT));
1093 m_vertexBufferAlloc = bindBuffer(vk, getDevice(), getAllocator(), *m_vertexBuffer, MemoryRequirement::HostVisible);
1095 deMemcpy(m_vertexBufferAlloc->getHostPtr(), &vertexData[0], vertexBufferSize);
1096 flushMappedMemoryRange(vk, getDevice(), m_vertexBufferAlloc->getMemory(), m_vertexBufferAlloc->getOffset(), vertexBufferSize);
1101 std::vector<deInt32> specializationData;
1103 const deUint32 numBufferEntries = maxBufferRange / static_cast<deUint32>(sizeof(IVec4));
1104 const deUint32 numEntriesPerChunk = numBufferEntries / sparseAllocation->numResourceChunks;
1106 specializationData.push_back(numBufferEntries);
1107 specializationData.push_back(numEntriesPerChunk);
1110 const VkSpecializationMapEntry specMapEntries[] =
1113 1u, // uint32_t constantID;
1114 0u, // uint32_t offset;
1115 sizeof(deInt32), // size_t size;
1118 2u, // uint32_t constantID;
1119 sizeof(deInt32), // uint32_t offset;
1120 sizeof(deInt32), // size_t size;
1124 const VkSpecializationInfo specInfo =
1126 DE_LENGTH_OF_ARRAY(specMapEntries), // uint32_t mapEntryCount;
1127 specMapEntries, // const VkSpecializationMapEntry* pMapEntries;
1128 sizeInBytes(specializationData), // size_t dataSize;
1129 getDataOrNullptr(specializationData), // const void* pData;
1132 Renderer::SpecializationMap specMap;
1133 specMap[VK_SHADER_STAGE_FRAGMENT_BIT] = &specInfo;
1135 draw(VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP, *m_descriptorSetLayout, specMap);
1138 return verifyDrawResult();
1142 Move<VkBuffer> m_vertexBuffer;
1143 MovePtr<Allocation> m_vertexBufferAlloc;
1145 Move<VkDescriptorSetLayout> m_descriptorSetLayout;
1146 Move<VkDescriptorPool> m_descriptorPool;
1147 Move<VkDescriptorSet> m_descriptorSet;
1150 void initProgramsDrawGrid (vk::SourceCollections& programCollection, const TestFlags flags)
1156 std::ostringstream src;
1157 src << glu::getGLSLVersionDeclaration(glu::GLSL_VERSION_450) << "\n"
1159 << "layout(location = 0) in vec4 in_position;\n"
1160 << "layout(location = 0) out int out_ndx;\n"
1162 << "out gl_PerVertex {\n"
1163 << " vec4 gl_Position;\n"
1166 << "void main(void)\n"
1168 << " gl_Position = in_position;\n"
1169 << " out_ndx = gl_VertexIndex;\n"
1172 programCollection.glslSources.add("vert") << glu::VertexSource(src.str());
1177 std::ostringstream src;
1178 src << glu::getGLSLVersionDeclaration(glu::GLSL_VERSION_450) << "\n"
1180 << "layout(location = 0) flat in int in_ndx;\n"
1181 << "layout(location = 0) out vec4 o_color;\n"
1183 << "void main(void)\n"
1185 << " if (in_ndx % 2 == 0)\n"
1186 << " o_color = vec4(vec3(1.0), 1.0);\n"
1188 << " o_color = vec4(vec3(0.75), 1.0);\n"
1191 programCollection.glslSources.add("frag") << glu::FragmentSource(src.str());
1195 //! Generate vertex positions for a grid of tiles composed of two triangles each (6 vertices)
1196 void generateGrid (void* pRawData, const float step, const float ox, const float oy, const deUint32 numX, const deUint32 numY, const float z = 0.0f)
1198 typedef Vec4 (*TilePtr)[6];
1200 TilePtr const pData = static_cast<TilePtr>(pRawData);
1202 for (deUint32 iy = 0; iy < numY; ++iy)
1203 for (deUint32 ix = 0; ix < numX; ++ix)
1205 const deUint32 ndx = ix + numX * iy;
1206 const float x = ox + step * static_cast<float>(ix);
1207 const float y = oy + step * static_cast<float>(iy);
1209 pData[ndx][0] = Vec4(x + step, y, z, 1.0f);
1210 pData[ndx][1] = Vec4(x, y, z, 1.0f);
1211 pData[ndx][2] = Vec4(x, y + step, z, 1.0f);
1213 pData[ndx][3] = Vec4(x, y + step, z, 1.0f);
1214 pData[ndx][4] = Vec4(x + step, y + step, z, 1.0f);
1215 pData[ndx][5] = Vec4(x + step, y, z, 1.0f);
1220 //! Base test for a sparse buffer backing a vertex/index buffer
1221 class DrawGridTestInstance : public SparseBufferTestInstance
1224 DrawGridTestInstance (Context& context, const TestFlags flags, const VkBufferUsageFlags usage, const VkDeviceSize minChunkSize)
1225 : SparseBufferTestInstance (context, flags)
1227 const DeviceInterface& vk = getDeviceInterface();
1228 VkBufferCreateInfo referenceBufferCreateInfo = getSparseBufferCreateInfo(usage);
1231 // Allocate two chunks, each covering half of the viewport
1232 SparseAllocationBuilder builder;
1233 builder.addMemoryBind();
1236 builder.addResourceHole();
1239 .addMemoryAllocation()
1244 builder.addAliasedMemoryBind(0u, 0u);
1246 m_sparseAllocation = builder.build(vk, getDevice(), getAllocator(), referenceBufferCreateInfo, minChunkSize);
1249 // Create the buffer
1250 referenceBufferCreateInfo.size = m_sparseAllocation->resourceSize;
1251 m_sparseBuffer = makeBuffer(vk, getDevice(), referenceBufferCreateInfo);
1254 bindSparseBuffer(vk, getDevice(), m_sparseQueue.queueHandle, *m_sparseBuffer, *m_sparseAllocation);
1256 m_perDrawBufferOffset = m_sparseAllocation->resourceSize / m_sparseAllocation->numResourceChunks;
1257 m_stagingBufferSize = 2 * m_perDrawBufferOffset;
1258 m_stagingBuffer = makeBuffer(vk, getDevice(), makeBufferCreateInfo(m_stagingBufferSize, VK_BUFFER_USAGE_TRANSFER_SRC_BIT));
1259 m_stagingBufferAlloc = bindBuffer(vk, getDevice(), getAllocator(), *m_stagingBuffer, MemoryRequirement::HostVisible);
1262 tcu::TestStatus iterate (void)
1264 initializeBuffers();
1266 const DeviceInterface& vk = getDeviceInterface();
1268 // Upload to the sparse buffer
1270 flushMappedMemoryRange(vk, getDevice(), m_stagingBufferAlloc->getMemory(), m_stagingBufferAlloc->getOffset(), m_stagingBufferSize);
1272 VkDeviceSize firstChunkOffset = 0ull;
1273 VkDeviceSize secondChunkOffset = m_perDrawBufferOffset;
1276 secondChunkOffset += m_perDrawBufferOffset;
1279 firstChunkOffset = secondChunkOffset + m_perDrawBufferOffset;
1281 const VkBufferCopy copyRegions[] =
1284 0ull, // VkDeviceSize srcOffset;
1285 firstChunkOffset, // VkDeviceSize dstOffset;
1286 m_perDrawBufferOffset, // VkDeviceSize size;
1289 m_perDrawBufferOffset, // VkDeviceSize srcOffset;
1290 secondChunkOffset, // VkDeviceSize dstOffset;
1291 m_perDrawBufferOffset, // VkDeviceSize size;
1295 const Unique<VkCommandPool> cmdPool (makeCommandPool (vk, getDevice(), m_universalQueue.queueFamilyIndex));
1296 const Unique<VkCommandBuffer> cmdBuffer (makeCommandBuffer (vk, getDevice(), *cmdPool));
1298 beginCommandBuffer (vk, *cmdBuffer);
1299 vk.cmdCopyBuffer (*cmdBuffer, *m_stagingBuffer, *m_sparseBuffer, DE_LENGTH_OF_ARRAY(copyRegions), copyRegions);
1300 endCommandBuffer (vk, *cmdBuffer);
1302 submitCommandsAndWait(vk, getDevice(), m_universalQueue.queueHandle, *cmdBuffer);
1305 draw(VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST);
1307 return verifyDrawResult();
1311 virtual void initializeBuffers (void) = 0;
1313 VkDeviceSize m_perDrawBufferOffset;
1315 VkDeviceSize m_stagingBufferSize;
1316 Move<VkBuffer> m_stagingBuffer;
1317 MovePtr<Allocation> m_stagingBufferAlloc;
1319 MovePtr<SparseAllocation> m_sparseAllocation;
1320 Move<VkBuffer> m_sparseBuffer;
1323 //! Sparse buffer backing a vertex input buffer
1324 class VertexBufferTestInstance : public DrawGridTestInstance
1327 VertexBufferTestInstance (Context& context, const TestFlags flags)
1328 : DrawGridTestInstance (context,
1330 VK_BUFFER_USAGE_VERTEX_BUFFER_BIT,
1331 GRID_SIZE * GRID_SIZE * 6 * sizeof(Vec4))
1335 void rendererDraw (const VkPipelineLayout pipelineLayout, const VkCommandBuffer cmdBuffer) const
1337 DE_UNREF(pipelineLayout);
1339 m_context.getTestContext().getLog()
1340 << tcu::TestLog::Message << "Drawing a grid of triangles backed by a sparse vertex buffer. There should be no red pixels visible." << tcu::TestLog::EndMessage;
1342 const DeviceInterface& vk = getDeviceInterface();
1343 const deUint32 vertexCount = 6 * (GRID_SIZE * GRID_SIZE) / 2;
1344 VkDeviceSize vertexOffset = 0ull;
1346 vk.cmdBindVertexBuffers (cmdBuffer, 0u, 1u, &m_sparseBuffer.get(), &vertexOffset);
1347 vk.cmdDraw (cmdBuffer, vertexCount, 1u, 0u, 0u);
1349 vertexOffset += m_perDrawBufferOffset * (m_residency ? 2 : 1);
1351 vk.cmdBindVertexBuffers (cmdBuffer, 0u, 1u, &m_sparseBuffer.get(), &vertexOffset);
1352 vk.cmdDraw (cmdBuffer, vertexCount, 1u, 0u, 0u);
1355 void initializeBuffers (void)
1357 deUint8* pData = static_cast<deUint8*>(m_stagingBufferAlloc->getHostPtr());
1358 const float step = 2.0f / static_cast<float>(GRID_SIZE);
1360 // Prepare data for two draw calls
1361 generateGrid(pData, step, -1.0f, -1.0f, GRID_SIZE, GRID_SIZE/2);
1362 generateGrid(pData + m_perDrawBufferOffset, step, -1.0f, 0.0f, GRID_SIZE, GRID_SIZE/2);
1366 //! Sparse buffer backing an index buffer
1367 class IndexBufferTestInstance : public DrawGridTestInstance
1370 IndexBufferTestInstance (Context& context, const TestFlags flags)
1371 : DrawGridTestInstance (context,
1373 VK_BUFFER_USAGE_INDEX_BUFFER_BIT,
1374 GRID_SIZE * GRID_SIZE * 6 * sizeof(deUint32))
1375 , m_halfVertexCount (6 * (GRID_SIZE * GRID_SIZE) / 2)
1379 void rendererDraw (const VkPipelineLayout pipelineLayout, const VkCommandBuffer cmdBuffer) const
1381 DE_UNREF(pipelineLayout);
1383 m_context.getTestContext().getLog()
1384 << tcu::TestLog::Message << "Drawing a grid of triangles from a sparse index buffer. There should be no red pixels visible." << tcu::TestLog::EndMessage;
1386 const DeviceInterface& vk = getDeviceInterface();
1387 const VkDeviceSize vertexOffset = 0ull;
1388 VkDeviceSize indexOffset = 0ull;
1390 vk.cmdBindVertexBuffers (cmdBuffer, 0u, 1u, &m_vertexBuffer.get(), &vertexOffset);
1392 vk.cmdBindIndexBuffer (cmdBuffer, *m_sparseBuffer, indexOffset, VK_INDEX_TYPE_UINT32);
1393 vk.cmdDrawIndexed (cmdBuffer, m_halfVertexCount, 1u, 0u, 0, 0u);
1395 indexOffset += m_perDrawBufferOffset * (m_residency ? 2 : 1);
1397 vk.cmdBindIndexBuffer (cmdBuffer, *m_sparseBuffer, indexOffset, VK_INDEX_TYPE_UINT32);
1398 vk.cmdDrawIndexed (cmdBuffer, m_halfVertexCount, 1u, 0u, 0, 0u);
1401 void initializeBuffers (void)
1404 const DeviceInterface& vk = getDeviceInterface();
1405 const VkDeviceSize vertexBufferSize = 2 * m_halfVertexCount * sizeof(Vec4);
1406 m_vertexBuffer = makeBuffer(vk, getDevice(), makeBufferCreateInfo(vertexBufferSize, VK_BUFFER_USAGE_VERTEX_BUFFER_BIT));
1407 m_vertexBufferAlloc = bindBuffer(vk, getDevice(), getAllocator(), *m_vertexBuffer, MemoryRequirement::HostVisible);
1410 const float step = 2.0f / static_cast<float>(GRID_SIZE);
1412 generateGrid(m_vertexBufferAlloc->getHostPtr(), step, -1.0f, -1.0f, GRID_SIZE, GRID_SIZE);
1414 flushMappedMemoryRange(vk, getDevice(), m_vertexBufferAlloc->getMemory(), m_vertexBufferAlloc->getOffset(), vertexBufferSize);
1417 // Sparse index buffer
1418 for (deUint32 chunkNdx = 0u; chunkNdx < 2; ++chunkNdx)
1420 deUint8* const pData = static_cast<deUint8*>(m_stagingBufferAlloc->getHostPtr()) + chunkNdx * m_perDrawBufferOffset;
1421 deUint32* const pIndexData = reinterpret_cast<deUint32*>(pData);
1422 const deUint32 ndxBase = chunkNdx * m_halfVertexCount;
1424 for (deUint32 i = 0u; i < m_halfVertexCount; ++i)
1425 pIndexData[i] = ndxBase + i;
1430 const deUint32 m_halfVertexCount;
1431 Move<VkBuffer> m_vertexBuffer;
1432 MovePtr<Allocation> m_vertexBufferAlloc;
1435 //! Draw from a sparse indirect buffer
1436 class IndirectBufferTestInstance : public DrawGridTestInstance
1439 IndirectBufferTestInstance (Context& context, const TestFlags flags)
1440 : DrawGridTestInstance (context,
1442 VK_BUFFER_USAGE_INDIRECT_BUFFER_BIT,
1443 sizeof(VkDrawIndirectCommand))
1447 void rendererDraw (const VkPipelineLayout pipelineLayout, const VkCommandBuffer cmdBuffer) const
1449 DE_UNREF(pipelineLayout);
1451 m_context.getTestContext().getLog()
1452 << tcu::TestLog::Message << "Drawing two triangles covering the whole viewport. There should be no red pixels visible." << tcu::TestLog::EndMessage;
1454 const DeviceInterface& vk = getDeviceInterface();
1455 const VkDeviceSize vertexOffset = 0ull;
1456 VkDeviceSize indirectOffset = 0ull;
1458 vk.cmdBindVertexBuffers (cmdBuffer, 0u, 1u, &m_vertexBuffer.get(), &vertexOffset);
1459 vk.cmdDrawIndirect (cmdBuffer, *m_sparseBuffer, indirectOffset, 1u, 0u);
1461 indirectOffset += m_perDrawBufferOffset * (m_residency ? 2 : 1);
1463 vk.cmdDrawIndirect (cmdBuffer, *m_sparseBuffer, indirectOffset, 1u, 0u);
1466 void initializeBuffers (void)
1469 const DeviceInterface& vk = getDeviceInterface();
1470 const VkDeviceSize vertexBufferSize = 2 * 3 * sizeof(Vec4);
1471 m_vertexBuffer = makeBuffer(vk, getDevice(), makeBufferCreateInfo(vertexBufferSize, VK_BUFFER_USAGE_VERTEX_BUFFER_BIT));
1472 m_vertexBufferAlloc = bindBuffer(vk, getDevice(), getAllocator(), *m_vertexBuffer, MemoryRequirement::HostVisible);
1475 generateGrid(m_vertexBufferAlloc->getHostPtr(), 2.0f, -1.0f, -1.0f, 1, 1);
1476 flushMappedMemoryRange(vk, getDevice(), m_vertexBufferAlloc->getMemory(), m_vertexBufferAlloc->getOffset(), vertexBufferSize);
1480 for (deUint32 chunkNdx = 0u; chunkNdx < 2; ++chunkNdx)
1482 deUint8* const pData = static_cast<deUint8*>(m_stagingBufferAlloc->getHostPtr()) + chunkNdx * m_perDrawBufferOffset;
1483 VkDrawIndirectCommand* const pCmdData = reinterpret_cast<VkDrawIndirectCommand*>(pData);
1485 pCmdData->firstVertex = 3u * chunkNdx;
1486 pCmdData->firstInstance = 0u;
1487 pCmdData->vertexCount = 3u;
1488 pCmdData->instanceCount = 1u;
1493 Move<VkBuffer> m_vertexBuffer;
1494 MovePtr<Allocation> m_vertexBufferAlloc;
1497 //! Similar to the class in vktTestCaseUtil.hpp, but uses Arg0 directly rather than through a InstanceFunction1
1498 template<typename Arg0>
1499 class FunctionProgramsSimple1
1502 typedef void (*Function) (vk::SourceCollections& dst, Arg0 arg0);
1503 FunctionProgramsSimple1 (Function func) : m_func(func) {}
1504 void init (vk::SourceCollections& dst, const Arg0& arg0) const { m_func(dst, arg0); }
1507 const Function m_func;
1510 //! Convenience function to create a TestCase based on a freestanding initPrograms and a TestInstance implementation
1511 template<typename TestInstanceT, typename Arg0>
1512 TestCase* createTestInstanceWithPrograms (tcu::TestContext& testCtx,
1513 const std::string& name,
1514 const std::string& desc,
1515 typename FunctionProgramsSimple1<Arg0>::Function initPrograms,
1518 return new InstanceFactory1<TestInstanceT, Arg0, FunctionProgramsSimple1<Arg0> >(
1519 testCtx, tcu::NODETYPE_SELF_VALIDATE, name, desc, FunctionProgramsSimple1<Arg0>(initPrograms), arg0);
1522 void populateTestGroup (tcu::TestCaseGroup* parentGroup)
1530 { "sparse_binding", 0u },
1531 { "sparse_binding_aliased", TEST_FLAG_ALIASED, },
1532 { "sparse_residency", TEST_FLAG_RESIDENCY, },
1533 { "sparse_residency_aliased", TEST_FLAG_RESIDENCY | TEST_FLAG_ALIASED, },
1534 { "sparse_residency_non_resident_strict", TEST_FLAG_RESIDENCY | TEST_FLAG_NON_RESIDENT_STRICT, },
1537 const int numGroupsIncludingNonResidentStrict = DE_LENGTH_OF_ARRAY(groups);
1538 const int numGroupsDefaultList = numGroupsIncludingNonResidentStrict - 1;
1542 MovePtr<tcu::TestCaseGroup> group(new tcu::TestCaseGroup(parentGroup->getTestContext(), "transfer", ""));
1544 MovePtr<tcu::TestCaseGroup> subGroup(new tcu::TestCaseGroup(parentGroup->getTestContext(), "sparse_binding", ""));
1545 addBufferSparseBindingTests(subGroup.get());
1546 group->addChild(subGroup.release());
1548 parentGroup->addChild(group.release());
1553 MovePtr<tcu::TestCaseGroup> group(new tcu::TestCaseGroup(parentGroup->getTestContext(), "ssbo", ""));
1555 MovePtr<tcu::TestCaseGroup> subGroup(new tcu::TestCaseGroup(parentGroup->getTestContext(), "sparse_binding_aliased", ""));
1556 addBufferSparseMemoryAliasingTests(subGroup.get());
1557 group->addChild(subGroup.release());
1560 MovePtr<tcu::TestCaseGroup> subGroup(new tcu::TestCaseGroup(parentGroup->getTestContext(), "sparse_residency", ""));
1561 addBufferSparseResidencyTests(subGroup.get());
1562 group->addChild(subGroup.release());
1564 parentGroup->addChild(group.release());
1569 MovePtr<tcu::TestCaseGroup> group(new tcu::TestCaseGroup(parentGroup->getTestContext(), "ubo", ""));
1571 for (int groupNdx = 0u; groupNdx < numGroupsIncludingNonResidentStrict; ++groupNdx)
1572 group->addChild(createTestInstanceWithPrograms<UBOTestInstance>(group->getTestContext(), groups[groupNdx].name.c_str(), "", initProgramsDrawWithUBO, groups[groupNdx].flags));
1574 parentGroup->addChild(group.release());
1579 MovePtr<tcu::TestCaseGroup> group(new tcu::TestCaseGroup(parentGroup->getTestContext(), "vertex_buffer", ""));
1581 for (int groupNdx = 0u; groupNdx < numGroupsDefaultList; ++groupNdx)
1582 group->addChild(createTestInstanceWithPrograms<VertexBufferTestInstance>(group->getTestContext(), groups[groupNdx].name.c_str(), "", initProgramsDrawGrid, groups[groupNdx].flags));
1584 parentGroup->addChild(group.release());
1589 MovePtr<tcu::TestCaseGroup> group(new tcu::TestCaseGroup(parentGroup->getTestContext(), "index_buffer", ""));
1591 for (int groupNdx = 0u; groupNdx < numGroupsDefaultList; ++groupNdx)
1592 group->addChild(createTestInstanceWithPrograms<IndexBufferTestInstance>(group->getTestContext(), groups[groupNdx].name.c_str(), "", initProgramsDrawGrid, groups[groupNdx].flags));
1594 parentGroup->addChild(group.release());
1599 MovePtr<tcu::TestCaseGroup> group(new tcu::TestCaseGroup(parentGroup->getTestContext(), "indirect_buffer", ""));
1601 for (int groupNdx = 0u; groupNdx < numGroupsDefaultList; ++groupNdx)
1602 group->addChild(createTestInstanceWithPrograms<IndirectBufferTestInstance>(group->getTestContext(), groups[groupNdx].name.c_str(), "", initProgramsDrawGrid, groups[groupNdx].flags));
1604 parentGroup->addChild(group.release());
1610 tcu::TestCaseGroup* createSparseBufferTests (tcu::TestContext& testCtx)
1612 return createTestGroup(testCtx, "buffer", "Sparse buffer usage tests", populateTestGroup);