1 /*------------------------------------------------------------------------
2 * Vulkan Conformance Tests
3 * ------------------------
5 * Copyright (c) 2016 The Khronos Group Inc.
7 * Licensed under the Apache License, Version 2.0 (the "License");
8 * you may not use this file except in compliance with the License.
9 * You may obtain a copy of the License at
11 * http://www.apache.org/licenses/LICENSE-2.0
13 * Unless required by applicable law or agreed to in writing, software
14 * distributed under the License is distributed on an "AS IS" BASIS,
15 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 * See the License for the specific language governing permissions and
17 * limitations under the License.
21 * \brief Sparse buffer tests
22 *//*--------------------------------------------------------------------*/
24 #include "vktSparseResourcesBufferTests.hpp"
25 #include "vktTestCaseUtil.hpp"
26 #include "vktTestGroupUtil.hpp"
27 #include "vktSparseResourcesTestsUtil.hpp"
28 #include "vktSparseResourcesBase.hpp"
29 #include "vktSparseResourcesBufferSparseBinding.hpp"
30 #include "vktSparseResourcesBufferSparseResidency.hpp"
31 #include "vktSparseResourcesBufferMemoryAliasing.hpp"
34 #include "vkRefUtil.hpp"
35 #include "vkPlatform.hpp"
36 #include "vkPrograms.hpp"
37 #include "vkMemUtil.hpp"
38 #include "vkBuilderUtil.hpp"
39 #include "vkQueryUtil.hpp"
40 #include "vkTypeUtil.hpp"
42 #include "tcuTestLog.hpp"
44 #include "deUniquePtr.hpp"
45 #include "deSharedPtr.hpp"
67 typedef SharedPtr<UniquePtr<Allocation> > AllocationSp;
71 RENDER_SIZE = 128, //!< framebuffer size in pixels
72 GRID_SIZE = RENDER_SIZE / 8, //!< number of grid tiles in a row
77 // sparseBinding is implied
78 TEST_FLAG_ALIASED = 1u << 0, //!< sparseResidencyAliased
79 TEST_FLAG_RESIDENCY = 1u << 1, //!< sparseResidencyBuffer
80 TEST_FLAG_NON_RESIDENT_STRICT = 1u << 2, //!< residencyNonResidentStrict
82 typedef deUint32 TestFlags;
84 //! SparseAllocationBuilder output. Owns the allocated memory.
85 struct SparseAllocation
87 deUint32 numResourceChunks;
88 VkDeviceSize resourceSize; //!< buffer size in bytes
89 std::vector<AllocationSp> allocations; //!< actual allocated memory
90 std::vector<VkSparseMemoryBind> memoryBinds; //!< memory binds backing the resource
93 //! Utility to lay out memory allocations for a sparse buffer, including holes and aliased regions.
94 //! Will allocate memory upon building.
95 class SparseAllocationBuilder
98 SparseAllocationBuilder (void);
100 // \note "chunk" is the smallest (due to alignment) bindable amount of memory
102 SparseAllocationBuilder& addMemoryHole (const deUint32 numChunks = 1u);
103 SparseAllocationBuilder& addResourceHole (const deUint32 numChunks = 1u);
104 SparseAllocationBuilder& addMemoryBind (const deUint32 numChunks = 1u);
105 SparseAllocationBuilder& addAliasedMemoryBind (const deUint32 allocationNdx, const deUint32 chunkOffset, const deUint32 numChunks = 1u);
106 SparseAllocationBuilder& addMemoryAllocation (void);
108 MovePtr<SparseAllocation> build (const DeviceInterface& vk,
109 const VkDevice device,
110 Allocator& allocator,
111 VkBufferCreateInfo referenceCreateInfo, //!< buffer size is ignored in this info
112 const VkDeviceSize minChunkSize = 0ull) const; //!< make sure chunks are at least this big
117 deUint32 allocationNdx;
118 deUint32 resourceChunkNdx;
119 deUint32 memoryChunkNdx;
123 deUint32 m_allocationNdx;
124 deUint32 m_resourceChunkNdx;
125 deUint32 m_memoryChunkNdx;
126 std::vector<MemoryBind> m_memoryBinds;
127 std::vector<deUint32> m_chunksPerAllocation;
131 SparseAllocationBuilder::SparseAllocationBuilder (void)
132 : m_allocationNdx (0)
133 , m_resourceChunkNdx (0)
134 , m_memoryChunkNdx (0)
136 m_chunksPerAllocation.push_back(0);
139 SparseAllocationBuilder& SparseAllocationBuilder::addMemoryHole (const deUint32 numChunks)
141 m_memoryChunkNdx += numChunks;
142 m_chunksPerAllocation[m_allocationNdx] += numChunks;
147 SparseAllocationBuilder& SparseAllocationBuilder::addResourceHole (const deUint32 numChunks)
149 m_resourceChunkNdx += numChunks;
154 SparseAllocationBuilder& SparseAllocationBuilder::addMemoryAllocation (void)
156 DE_ASSERT(m_memoryChunkNdx != 0); // doesn't make sense to have an empty allocation
158 m_allocationNdx += 1;
159 m_memoryChunkNdx = 0;
160 m_chunksPerAllocation.push_back(0);
165 SparseAllocationBuilder& SparseAllocationBuilder::addMemoryBind (const deUint32 numChunks)
167 const MemoryBind memoryBind =
174 m_memoryBinds.push_back(memoryBind);
176 m_resourceChunkNdx += numChunks;
177 m_memoryChunkNdx += numChunks;
178 m_chunksPerAllocation[m_allocationNdx] += numChunks;
183 SparseAllocationBuilder& SparseAllocationBuilder::addAliasedMemoryBind (const deUint32 allocationNdx, const deUint32 chunkOffset, const deUint32 numChunks)
185 DE_ASSERT(allocationNdx <= m_allocationNdx);
187 const MemoryBind memoryBind =
194 m_memoryBinds.push_back(memoryBind);
196 m_resourceChunkNdx += numChunks;
201 inline VkMemoryRequirements requirementsWithSize (VkMemoryRequirements requirements, const VkDeviceSize size)
203 requirements.size = size;
207 inline VkDeviceSize alignSize (const VkDeviceSize val, const VkDeviceSize align)
209 DE_ASSERT(deIsPowerOfTwo64(align));
210 return (val + align - 1) & ~(align - 1);
213 MovePtr<SparseAllocation> SparseAllocationBuilder::build (const DeviceInterface& vk,
214 const VkDevice device,
215 Allocator& allocator,
216 VkBufferCreateInfo referenceCreateInfo,
217 const VkDeviceSize minChunkSize) const
220 MovePtr<SparseAllocation> sparseAllocation (new SparseAllocation());
222 referenceCreateInfo.size = sizeof(deUint32);
223 const Unique<VkBuffer> refBuffer (createBuffer(vk, device, &referenceCreateInfo));
224 const VkMemoryRequirements memoryRequirements = getBufferMemoryRequirements(vk, device, *refBuffer);
225 const VkDeviceSize chunkSize = std::max(memoryRequirements.alignment, alignSize(minChunkSize, memoryRequirements.alignment));
227 for (std::vector<deUint32>::const_iterator numChunksIter = m_chunksPerAllocation.begin(); numChunksIter != m_chunksPerAllocation.end(); ++numChunksIter)
229 sparseAllocation->allocations.push_back(makeDeSharedPtr(
230 allocator.allocate(requirementsWithSize(memoryRequirements, *numChunksIter * chunkSize), MemoryRequirement::Any)));
233 for (std::vector<MemoryBind>::const_iterator memBindIter = m_memoryBinds.begin(); memBindIter != m_memoryBinds.end(); ++memBindIter)
235 const Allocation& alloc = **sparseAllocation->allocations[memBindIter->allocationNdx];
236 const VkSparseMemoryBind bind =
238 memBindIter->resourceChunkNdx * chunkSize, // VkDeviceSize resourceOffset;
239 memBindIter->numChunks * chunkSize, // VkDeviceSize size;
240 alloc.getMemory(), // VkDeviceMemory memory;
241 alloc.getOffset() + memBindIter->memoryChunkNdx * chunkSize, // VkDeviceSize memoryOffset;
242 (VkSparseMemoryBindFlags)0, // VkSparseMemoryBindFlags flags;
244 sparseAllocation->memoryBinds.push_back(bind);
245 referenceCreateInfo.size = std::max(referenceCreateInfo.size, bind.resourceOffset + bind.size);
248 sparseAllocation->resourceSize = referenceCreateInfo.size;
249 sparseAllocation->numResourceChunks = m_resourceChunkNdx;
251 return sparseAllocation;
254 VkImageCreateInfo makeImageCreateInfo (const VkFormat format, const IVec2& size, const VkImageUsageFlags usage)
256 const VkImageCreateInfo imageParams =
258 VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, // VkStructureType sType;
259 DE_NULL, // const void* pNext;
260 (VkImageCreateFlags)0, // VkImageCreateFlags flags;
261 VK_IMAGE_TYPE_2D, // VkImageType imageType;
262 format, // VkFormat format;
263 makeExtent3D(size.x(), size.y(), 1), // VkExtent3D extent;
264 1u, // deUint32 mipLevels;
265 1u, // deUint32 arrayLayers;
266 VK_SAMPLE_COUNT_1_BIT, // VkSampleCountFlagBits samples;
267 VK_IMAGE_TILING_OPTIMAL, // VkImageTiling tiling;
268 usage, // VkImageUsageFlags usage;
269 VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode;
270 0u, // deUint32 queueFamilyIndexCount;
271 DE_NULL, // const deUint32* pQueueFamilyIndices;
272 VK_IMAGE_LAYOUT_UNDEFINED, // VkImageLayout initialLayout;
277 Move<VkRenderPass> makeRenderPass (const DeviceInterface& vk,
278 const VkDevice device,
279 const VkFormat colorFormat)
281 const VkAttachmentDescription colorAttachmentDescription =
283 (VkAttachmentDescriptionFlags)0, // VkAttachmentDescriptionFlags flags;
284 colorFormat, // VkFormat format;
285 VK_SAMPLE_COUNT_1_BIT, // VkSampleCountFlagBits samples;
286 VK_ATTACHMENT_LOAD_OP_CLEAR, // VkAttachmentLoadOp loadOp;
287 VK_ATTACHMENT_STORE_OP_STORE, // VkAttachmentStoreOp storeOp;
288 VK_ATTACHMENT_LOAD_OP_DONT_CARE, // VkAttachmentLoadOp stencilLoadOp;
289 VK_ATTACHMENT_STORE_OP_DONT_CARE, // VkAttachmentStoreOp stencilStoreOp;
290 VK_IMAGE_LAYOUT_UNDEFINED, // VkImageLayout initialLayout;
291 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // VkImageLayout finalLayout;
294 const VkAttachmentReference colorAttachmentRef =
296 0u, // deUint32 attachment;
297 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL // VkImageLayout layout;
300 const VkSubpassDescription subpassDescription =
302 (VkSubpassDescriptionFlags)0, // VkSubpassDescriptionFlags flags;
303 VK_PIPELINE_BIND_POINT_GRAPHICS, // VkPipelineBindPoint pipelineBindPoint;
304 0u, // deUint32 inputAttachmentCount;
305 DE_NULL, // const VkAttachmentReference* pInputAttachments;
306 1u, // deUint32 colorAttachmentCount;
307 &colorAttachmentRef, // const VkAttachmentReference* pColorAttachments;
308 DE_NULL, // const VkAttachmentReference* pResolveAttachments;
309 DE_NULL, // const VkAttachmentReference* pDepthStencilAttachment;
310 0u, // deUint32 preserveAttachmentCount;
311 DE_NULL // const deUint32* pPreserveAttachments;
314 const VkRenderPassCreateInfo renderPassInfo =
316 VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, // VkStructureType sType;
317 DE_NULL, // const void* pNext;
318 (VkRenderPassCreateFlags)0, // VkRenderPassCreateFlags flags;
319 1u, // deUint32 attachmentCount;
320 &colorAttachmentDescription, // const VkAttachmentDescription* pAttachments;
321 1u, // deUint32 subpassCount;
322 &subpassDescription, // const VkSubpassDescription* pSubpasses;
323 0u, // deUint32 dependencyCount;
324 DE_NULL // const VkSubpassDependency* pDependencies;
327 return createRenderPass(vk, device, &renderPassInfo);
330 Move<VkPipeline> makeGraphicsPipeline (const DeviceInterface& vk,
331 const VkDevice device,
332 const VkPipelineLayout pipelineLayout,
333 const VkRenderPass renderPass,
334 const IVec2 renderSize,
335 const VkPrimitiveTopology topology,
336 const deUint32 stageCount,
337 const VkPipelineShaderStageCreateInfo* pStages)
339 const VkVertexInputBindingDescription vertexInputBindingDescription =
341 0u, // uint32_t binding;
342 sizeof(Vec4), // uint32_t stride;
343 VK_VERTEX_INPUT_RATE_VERTEX, // VkVertexInputRate inputRate;
346 const VkVertexInputAttributeDescription vertexInputAttributeDescription =
348 0u, // uint32_t location;
349 0u, // uint32_t binding;
350 VK_FORMAT_R32G32B32A32_SFLOAT, // VkFormat format;
351 0u, // uint32_t offset;
354 const VkPipelineVertexInputStateCreateInfo vertexInputStateInfo =
356 VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO, // VkStructureType sType;
357 DE_NULL, // const void* pNext;
358 (VkPipelineVertexInputStateCreateFlags)0, // VkPipelineVertexInputStateCreateFlags flags;
359 1u, // uint32_t vertexBindingDescriptionCount;
360 &vertexInputBindingDescription, // const VkVertexInputBindingDescription* pVertexBindingDescriptions;
361 1u, // uint32_t vertexAttributeDescriptionCount;
362 &vertexInputAttributeDescription, // const VkVertexInputAttributeDescription* pVertexAttributeDescriptions;
365 const VkPipelineInputAssemblyStateCreateInfo pipelineInputAssemblyStateInfo =
367 VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO, // VkStructureType sType;
368 DE_NULL, // const void* pNext;
369 (VkPipelineInputAssemblyStateCreateFlags)0, // VkPipelineInputAssemblyStateCreateFlags flags;
370 topology, // VkPrimitiveTopology topology;
371 VK_FALSE, // VkBool32 primitiveRestartEnable;
374 const VkViewport viewport = makeViewport(
376 static_cast<float>(renderSize.x()), static_cast<float>(renderSize.y()),
379 const VkRect2D scissor = {
381 makeExtent2D(static_cast<deUint32>(renderSize.x()), static_cast<deUint32>(renderSize.y())),
384 const VkPipelineViewportStateCreateInfo pipelineViewportStateInfo =
386 VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO, // VkStructureType sType;
387 DE_NULL, // const void* pNext;
388 (VkPipelineViewportStateCreateFlags)0, // VkPipelineViewportStateCreateFlags flags;
389 1u, // uint32_t viewportCount;
390 &viewport, // const VkViewport* pViewports;
391 1u, // uint32_t scissorCount;
392 &scissor, // const VkRect2D* pScissors;
395 const VkPipelineRasterizationStateCreateInfo pipelineRasterizationStateInfo =
397 VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO, // VkStructureType sType;
398 DE_NULL, // const void* pNext;
399 (VkPipelineRasterizationStateCreateFlags)0, // VkPipelineRasterizationStateCreateFlags flags;
400 VK_FALSE, // VkBool32 depthClampEnable;
401 VK_FALSE, // VkBool32 rasterizerDiscardEnable;
402 VK_POLYGON_MODE_FILL, // VkPolygonMode polygonMode;
403 VK_CULL_MODE_NONE, // VkCullModeFlags cullMode;
404 VK_FRONT_FACE_COUNTER_CLOCKWISE, // VkFrontFace frontFace;
405 VK_FALSE, // VkBool32 depthBiasEnable;
406 0.0f, // float depthBiasConstantFactor;
407 0.0f, // float depthBiasClamp;
408 0.0f, // float depthBiasSlopeFactor;
409 1.0f, // float lineWidth;
412 const VkPipelineMultisampleStateCreateInfo pipelineMultisampleStateInfo =
414 VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO, // VkStructureType sType;
415 DE_NULL, // const void* pNext;
416 (VkPipelineMultisampleStateCreateFlags)0, // VkPipelineMultisampleStateCreateFlags flags;
417 VK_SAMPLE_COUNT_1_BIT, // VkSampleCountFlagBits rasterizationSamples;
418 VK_FALSE, // VkBool32 sampleShadingEnable;
419 0.0f, // float minSampleShading;
420 DE_NULL, // const VkSampleMask* pSampleMask;
421 VK_FALSE, // VkBool32 alphaToCoverageEnable;
422 VK_FALSE // VkBool32 alphaToOneEnable;
425 const VkStencilOpState stencilOpState = makeStencilOpState(
426 VK_STENCIL_OP_KEEP, // stencil fail
427 VK_STENCIL_OP_KEEP, // depth & stencil pass
428 VK_STENCIL_OP_KEEP, // depth only fail
429 VK_COMPARE_OP_ALWAYS, // compare op
434 VkPipelineDepthStencilStateCreateInfo pipelineDepthStencilStateInfo =
436 VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO, // VkStructureType sType;
437 DE_NULL, // const void* pNext;
438 (VkPipelineDepthStencilStateCreateFlags)0, // VkPipelineDepthStencilStateCreateFlags flags;
439 VK_FALSE, // VkBool32 depthTestEnable;
440 VK_FALSE, // VkBool32 depthWriteEnable;
441 VK_COMPARE_OP_LESS, // VkCompareOp depthCompareOp;
442 VK_FALSE, // VkBool32 depthBoundsTestEnable;
443 VK_FALSE, // VkBool32 stencilTestEnable;
444 stencilOpState, // VkStencilOpState front;
445 stencilOpState, // VkStencilOpState back;
446 0.0f, // float minDepthBounds;
447 1.0f, // float maxDepthBounds;
450 const VkColorComponentFlags colorComponentsAll = VK_COLOR_COMPONENT_R_BIT | VK_COLOR_COMPONENT_G_BIT | VK_COLOR_COMPONENT_B_BIT | VK_COLOR_COMPONENT_A_BIT;
451 const VkPipelineColorBlendAttachmentState pipelineColorBlendAttachmentState =
453 VK_FALSE, // VkBool32 blendEnable;
454 VK_BLEND_FACTOR_ONE, // VkBlendFactor srcColorBlendFactor;
455 VK_BLEND_FACTOR_ZERO, // VkBlendFactor dstColorBlendFactor;
456 VK_BLEND_OP_ADD, // VkBlendOp colorBlendOp;
457 VK_BLEND_FACTOR_ONE, // VkBlendFactor srcAlphaBlendFactor;
458 VK_BLEND_FACTOR_ZERO, // VkBlendFactor dstAlphaBlendFactor;
459 VK_BLEND_OP_ADD, // VkBlendOp alphaBlendOp;
460 colorComponentsAll, // VkColorComponentFlags colorWriteMask;
463 const VkPipelineColorBlendStateCreateInfo pipelineColorBlendStateInfo =
465 VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO, // VkStructureType sType;
466 DE_NULL, // const void* pNext;
467 (VkPipelineColorBlendStateCreateFlags)0, // VkPipelineColorBlendStateCreateFlags flags;
468 VK_FALSE, // VkBool32 logicOpEnable;
469 VK_LOGIC_OP_COPY, // VkLogicOp logicOp;
470 1u, // deUint32 attachmentCount;
471 &pipelineColorBlendAttachmentState, // const VkPipelineColorBlendAttachmentState* pAttachments;
472 { 0.0f, 0.0f, 0.0f, 0.0f }, // float blendConstants[4];
475 const VkGraphicsPipelineCreateInfo graphicsPipelineInfo =
477 VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO, // VkStructureType sType;
478 DE_NULL, // const void* pNext;
479 (VkPipelineCreateFlags)0, // VkPipelineCreateFlags flags;
480 stageCount, // deUint32 stageCount;
481 pStages, // const VkPipelineShaderStageCreateInfo* pStages;
482 &vertexInputStateInfo, // const VkPipelineVertexInputStateCreateInfo* pVertexInputState;
483 &pipelineInputAssemblyStateInfo, // const VkPipelineInputAssemblyStateCreateInfo* pInputAssemblyState;
484 DE_NULL, // const VkPipelineTessellationStateCreateInfo* pTessellationState;
485 &pipelineViewportStateInfo, // const VkPipelineViewportStateCreateInfo* pViewportState;
486 &pipelineRasterizationStateInfo, // const VkPipelineRasterizationStateCreateInfo* pRasterizationState;
487 &pipelineMultisampleStateInfo, // const VkPipelineMultisampleStateCreateInfo* pMultisampleState;
488 &pipelineDepthStencilStateInfo, // const VkPipelineDepthStencilStateCreateInfo* pDepthStencilState;
489 &pipelineColorBlendStateInfo, // const VkPipelineColorBlendStateCreateInfo* pColorBlendState;
490 DE_NULL, // const VkPipelineDynamicStateCreateInfo* pDynamicState;
491 pipelineLayout, // VkPipelineLayout layout;
492 renderPass, // VkRenderPass renderPass;
493 0u, // deUint32 subpass;
494 DE_NULL, // VkPipeline basePipelineHandle;
495 0, // deInt32 basePipelineIndex;
498 return createGraphicsPipeline(vk, device, DE_NULL, &graphicsPipelineInfo);
501 //! Return true if there are any red (or all zero) pixels in the image
502 bool imageHasErrorPixels (const tcu::ConstPixelBufferAccess image)
504 const Vec4 errorColor = Vec4(1.0f, 0.0f, 0.0f, 1.0f);
505 const Vec4 blankColor = Vec4();
507 for (int y = 0; y < image.getHeight(); ++y)
508 for (int x = 0; x < image.getWidth(); ++x)
510 const Vec4 color = image.getPixel(x, y);
511 if (color == errorColor || color == blankColor)
521 typedef std::map<VkShaderStageFlagBits, const VkSpecializationInfo*> SpecializationMap;
523 //! Use the delegate to bind descriptor sets, vertex buffers, etc. and make a draw call
526 virtual ~Delegate (void) {}
527 virtual void rendererDraw (const VkPipelineLayout pipelineLayout, const VkCommandBuffer cmdBuffer) const = 0;
530 Renderer (const DeviceInterface& vk,
531 const VkDevice device,
532 Allocator& allocator,
533 const deUint32 queueFamilyIndex,
534 const VkDescriptorSetLayout descriptorSetLayout, //!< may be NULL, if no descriptors are used
535 ProgramCollection<vk::ProgramBinary>& binaryCollection,
536 const std::string& vertexName,
537 const std::string& fragmentName,
538 const VkBuffer colorBuffer,
539 const IVec2& renderSize,
540 const VkFormat colorFormat,
541 const Vec4& clearColor,
542 const VkPrimitiveTopology topology,
543 SpecializationMap specMap = SpecializationMap())
544 : m_colorBuffer (colorBuffer)
545 , m_renderSize (renderSize)
546 , m_colorFormat (colorFormat)
547 , m_colorSubresourceRange (makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u, 1u))
548 , m_clearColor (clearColor)
549 , m_topology (topology)
550 , m_descriptorSetLayout (descriptorSetLayout)
552 m_colorImage = makeImage (vk, device, makeImageCreateInfo(m_colorFormat, m_renderSize, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT));
553 m_colorImageAlloc = bindImage (vk, device, allocator, *m_colorImage, MemoryRequirement::Any);
554 m_colorAttachment = makeImageView (vk, device, *m_colorImage, VK_IMAGE_VIEW_TYPE_2D, m_colorFormat, m_colorSubresourceRange);
556 m_vertexModule = createShaderModule (vk, device, binaryCollection.get(vertexName), 0u);
557 m_fragmentModule = createShaderModule (vk, device, binaryCollection.get(fragmentName), 0u);
559 const VkPipelineShaderStageCreateInfo pShaderStages[] =
562 VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, // VkStructureType sType;
563 DE_NULL, // const void* pNext;
564 (VkPipelineShaderStageCreateFlags)0, // VkPipelineShaderStageCreateFlags flags;
565 VK_SHADER_STAGE_VERTEX_BIT, // VkShaderStageFlagBits stage;
566 *m_vertexModule, // VkShaderModule module;
567 "main", // const char* pName;
568 specMap[VK_SHADER_STAGE_VERTEX_BIT], // const VkSpecializationInfo* pSpecializationInfo;
571 VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, // VkStructureType sType;
572 DE_NULL, // const void* pNext;
573 (VkPipelineShaderStageCreateFlags)0, // VkPipelineShaderStageCreateFlags flags;
574 VK_SHADER_STAGE_FRAGMENT_BIT, // VkShaderStageFlagBits stage;
575 *m_fragmentModule, // VkShaderModule module;
576 "main", // const char* pName;
577 specMap[VK_SHADER_STAGE_FRAGMENT_BIT], // const VkSpecializationInfo* pSpecializationInfo;
581 m_renderPass = makeRenderPass (vk, device, m_colorFormat);
582 m_framebuffer = makeFramebuffer (vk, device, *m_renderPass, 1u, &m_colorAttachment.get(),
583 static_cast<deUint32>(m_renderSize.x()), static_cast<deUint32>(m_renderSize.y()));
584 m_pipelineLayout = makePipelineLayout (vk, device, m_descriptorSetLayout);
585 m_pipeline = makeGraphicsPipeline (vk, device, *m_pipelineLayout, *m_renderPass, m_renderSize, m_topology, DE_LENGTH_OF_ARRAY(pShaderStages), pShaderStages);
586 m_cmdPool = makeCommandPool (vk, device, queueFamilyIndex);
587 m_cmdBuffer = allocateCommandBuffer (vk, device, *m_cmdPool, VK_COMMAND_BUFFER_LEVEL_PRIMARY);
590 void draw (const DeviceInterface& vk,
591 const VkDevice device,
593 const Delegate& drawDelegate) const
595 beginCommandBuffer(vk, *m_cmdBuffer);
597 const VkClearValue clearValue = makeClearValueColor(m_clearColor);
598 const VkRect2D renderArea =
601 makeExtent2D(m_renderSize.x(), m_renderSize.y()),
603 const VkRenderPassBeginInfo renderPassBeginInfo =
605 VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO, // VkStructureType sType;
606 DE_NULL, // const void* pNext;
607 *m_renderPass, // VkRenderPass renderPass;
608 *m_framebuffer, // VkFramebuffer framebuffer;
609 renderArea, // VkRect2D renderArea;
610 1u, // uint32_t clearValueCount;
611 &clearValue, // const VkClearValue* pClearValues;
613 vk.cmdBeginRenderPass(*m_cmdBuffer, &renderPassBeginInfo, VK_SUBPASS_CONTENTS_INLINE);
615 vk.cmdBindPipeline(*m_cmdBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS, *m_pipeline);
616 drawDelegate.rendererDraw(*m_pipelineLayout, *m_cmdBuffer);
618 vk.cmdEndRenderPass(*m_cmdBuffer);
620 // Prepare color image for copy
622 const VkImageMemoryBarrier barriers[] =
625 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // VkStructureType sType;
626 DE_NULL, // const void* pNext;
627 VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, // VkAccessFlags outputMask;
628 VK_ACCESS_TRANSFER_READ_BIT, // VkAccessFlags inputMask;
629 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // VkImageLayout oldLayout;
630 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, // VkImageLayout newLayout;
631 VK_QUEUE_FAMILY_IGNORED, // deUint32 srcQueueFamilyIndex;
632 VK_QUEUE_FAMILY_IGNORED, // deUint32 destQueueFamilyIndex;
633 *m_colorImage, // VkImage image;
634 m_colorSubresourceRange, // VkImageSubresourceRange subresourceRange;
638 vk.cmdPipelineBarrier(*m_cmdBuffer, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, 0u,
639 0u, DE_NULL, 0u, DE_NULL, DE_LENGTH_OF_ARRAY(barriers), barriers);
641 // Color image -> host buffer
643 const VkBufferImageCopy region =
645 0ull, // VkDeviceSize bufferOffset;
646 0u, // uint32_t bufferRowLength;
647 0u, // uint32_t bufferImageHeight;
648 makeImageSubresourceLayers(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 0u, 1u), // VkImageSubresourceLayers imageSubresource;
649 makeOffset3D(0, 0, 0), // VkOffset3D imageOffset;
650 makeExtent3D(m_renderSize.x(), m_renderSize.y(), 1u), // VkExtent3D imageExtent;
653 vk.cmdCopyImageToBuffer(*m_cmdBuffer, *m_colorImage, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, m_colorBuffer, 1u, ®ion);
655 // Buffer write barrier
657 const VkBufferMemoryBarrier barriers[] =
660 VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER, // VkStructureType sType;
661 DE_NULL, // const void* pNext;
662 VK_ACCESS_TRANSFER_WRITE_BIT, // VkAccessFlags srcAccessMask;
663 VK_ACCESS_HOST_READ_BIT, // VkAccessFlags dstAccessMask;
664 VK_QUEUE_FAMILY_IGNORED, // uint32_t srcQueueFamilyIndex;
665 VK_QUEUE_FAMILY_IGNORED, // uint32_t dstQueueFamilyIndex;
666 m_colorBuffer, // VkBuffer buffer;
667 0ull, // VkDeviceSize offset;
668 VK_WHOLE_SIZE, // VkDeviceSize size;
672 vk.cmdPipelineBarrier(*m_cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_HOST_BIT, 0u,
673 0u, DE_NULL, DE_LENGTH_OF_ARRAY(barriers), barriers, DE_NULL, 0u);
676 VK_CHECK(vk.endCommandBuffer(*m_cmdBuffer));
677 submitCommandsAndWait(vk, device, queue, *m_cmdBuffer);
681 const VkBuffer m_colorBuffer;
682 const IVec2 m_renderSize;
683 const VkFormat m_colorFormat;
684 const VkImageSubresourceRange m_colorSubresourceRange;
685 const Vec4 m_clearColor;
686 const VkPrimitiveTopology m_topology;
687 const VkDescriptorSetLayout m_descriptorSetLayout;
689 Move<VkImage> m_colorImage;
690 MovePtr<Allocation> m_colorImageAlloc;
691 Move<VkImageView> m_colorAttachment;
692 Move<VkShaderModule> m_vertexModule;
693 Move<VkShaderModule> m_fragmentModule;
694 Move<VkRenderPass> m_renderPass;
695 Move<VkFramebuffer> m_framebuffer;
696 Move<VkPipelineLayout> m_pipelineLayout;
697 Move<VkPipeline> m_pipeline;
698 Move<VkCommandPool> m_cmdPool;
699 Move<VkCommandBuffer> m_cmdBuffer;
702 Renderer (const Renderer&);
703 Renderer& operator= (const Renderer&);
706 void bindSparseBuffer (const DeviceInterface& vk, const VkDevice device, const VkQueue sparseQueue, const VkBuffer buffer, const SparseAllocation& sparseAllocation)
708 const VkSparseBufferMemoryBindInfo sparseBufferMemoryBindInfo =
710 buffer, // VkBuffer buffer;
711 static_cast<deUint32>(sparseAllocation.memoryBinds.size()), // uint32_t bindCount;
712 &sparseAllocation.memoryBinds[0], // const VkSparseMemoryBind* pBinds;
715 const VkBindSparseInfo bindInfo =
717 VK_STRUCTURE_TYPE_BIND_SPARSE_INFO, // VkStructureType sType;
718 DE_NULL, // const void* pNext;
719 0u, // uint32_t waitSemaphoreCount;
720 DE_NULL, // const VkSemaphore* pWaitSemaphores;
721 1u, // uint32_t bufferBindCount;
722 &sparseBufferMemoryBindInfo, // const VkSparseBufferMemoryBindInfo* pBufferBinds;
723 0u, // uint32_t imageOpaqueBindCount;
724 DE_NULL, // const VkSparseImageOpaqueMemoryBindInfo* pImageOpaqueBinds;
725 0u, // uint32_t imageBindCount;
726 DE_NULL, // const VkSparseImageMemoryBindInfo* pImageBinds;
727 0u, // uint32_t signalSemaphoreCount;
728 DE_NULL, // const VkSemaphore* pSignalSemaphores;
731 const Unique<VkFence> fence(createFence(vk, device));
733 VK_CHECK(vk.queueBindSparse(sparseQueue, 1u, &bindInfo, *fence));
734 VK_CHECK(vk.waitForFences(device, 1u, &fence.get(), VK_TRUE, ~0ull));
737 class SparseBufferTestInstance : public SparseResourcesBaseInstance, Renderer::Delegate
740 SparseBufferTestInstance (Context& context, const TestFlags flags)
741 : SparseResourcesBaseInstance (context)
742 , m_aliased ((flags & TEST_FLAG_ALIASED) != 0)
743 , m_residency ((flags & TEST_FLAG_RESIDENCY) != 0)
744 , m_nonResidentStrict ((flags & TEST_FLAG_NON_RESIDENT_STRICT) != 0)
745 , m_renderSize (RENDER_SIZE, RENDER_SIZE)
746 , m_colorFormat (VK_FORMAT_R8G8B8A8_UNORM)
747 , m_colorBufferSize (m_renderSize.x() * m_renderSize.y() * tcu::getPixelSize(mapVkFormat(m_colorFormat)))
749 const VkPhysicalDeviceFeatures features = getPhysicalDeviceFeatures(m_context.getInstanceInterface(), m_context.getPhysicalDevice());
751 if (!features.sparseBinding)
752 TCU_THROW(NotSupportedError, "Missing feature: sparseBinding");
754 if (m_residency && !features.sparseResidencyBuffer)
755 TCU_THROW(NotSupportedError, "Missing feature: sparseResidencyBuffer");
757 if (m_aliased && !features.sparseResidencyAliased)
758 TCU_THROW(NotSupportedError, "Missing feature: sparseResidencyAliased");
760 if (m_nonResidentStrict && !m_context.getDeviceProperties().sparseProperties.residencyNonResidentStrict)
761 TCU_THROW(NotSupportedError, "Missing sparse property: residencyNonResidentStrict");
764 QueueRequirementsVec requirements;
765 requirements.push_back(QueueRequirements(VK_QUEUE_SPARSE_BINDING_BIT, 1u));
766 requirements.push_back(QueueRequirements(VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT, 1u));
768 createDeviceSupportingQueues(requirements);
771 const DeviceInterface& vk = getDeviceInterface();
772 m_sparseQueue = getQueue(VK_QUEUE_SPARSE_BINDING_BIT, 0u);
773 m_universalQueue = getQueue(VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT, 0u);
775 m_sharedQueueFamilyIndices[0] = m_sparseQueue.queueFamilyIndex;
776 m_sharedQueueFamilyIndices[1] = m_universalQueue.queueFamilyIndex;
778 m_colorBuffer = makeBuffer(vk, getDevice(), makeBufferCreateInfo(m_colorBufferSize, VK_BUFFER_USAGE_TRANSFER_DST_BIT));
779 m_colorBufferAlloc = bindBuffer(vk, getDevice(), getAllocator(), *m_colorBuffer, MemoryRequirement::HostVisible);
781 deMemset(m_colorBufferAlloc->getHostPtr(), 0, static_cast<std::size_t>(m_colorBufferSize));
782 flushMappedMemoryRange(vk, getDevice(), m_colorBufferAlloc->getMemory(), m_colorBufferAlloc->getOffset(), m_colorBufferSize);
786 VkBufferCreateInfo getSparseBufferCreateInfo (const VkBufferUsageFlags usage) const
788 VkBufferCreateFlags flags = VK_BUFFER_CREATE_SPARSE_BINDING_BIT;
790 flags |= VK_BUFFER_CREATE_SPARSE_RESIDENCY_BIT;
792 flags |= VK_BUFFER_CREATE_SPARSE_ALIASED_BIT;
794 VkBufferCreateInfo referenceBufferCreateInfo =
796 VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, // VkStructureType sType;
797 DE_NULL, // const void* pNext;
798 flags, // VkBufferCreateFlags flags;
799 0u, // override later // VkDeviceSize size;
800 VK_BUFFER_USAGE_TRANSFER_DST_BIT | usage, // VkBufferUsageFlags usage;
801 VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode;
802 0u, // uint32_t queueFamilyIndexCount;
803 DE_NULL, // const uint32_t* pQueueFamilyIndices;
806 if (m_sparseQueue.queueFamilyIndex != m_universalQueue.queueFamilyIndex)
808 referenceBufferCreateInfo.sharingMode = VK_SHARING_MODE_CONCURRENT;
809 referenceBufferCreateInfo.queueFamilyIndexCount = DE_LENGTH_OF_ARRAY(m_sharedQueueFamilyIndices);
810 referenceBufferCreateInfo.pQueueFamilyIndices = m_sharedQueueFamilyIndices;
813 return referenceBufferCreateInfo;
816 void draw (const VkPrimitiveTopology topology,
817 const VkDescriptorSetLayout descriptorSetLayout = DE_NULL,
818 Renderer::SpecializationMap specMap = Renderer::SpecializationMap())
820 const UniquePtr<Renderer> renderer(new Renderer(
821 getDeviceInterface(), getDevice(), getAllocator(), m_universalQueue.queueFamilyIndex, descriptorSetLayout,
822 m_context.getBinaryCollection(), "vert", "frag", *m_colorBuffer, m_renderSize, m_colorFormat, Vec4(1.0f, 0.0f, 0.0f, 1.0f), topology, specMap));
824 renderer->draw(getDeviceInterface(), getDevice(), m_universalQueue.queueHandle, *this);
827 tcu::TestStatus verifyDrawResult (void) const
829 invalidateMappedMemoryRange(getDeviceInterface(), getDevice(), m_colorBufferAlloc->getMemory(), 0ull, m_colorBufferSize);
831 const tcu::ConstPixelBufferAccess resultImage (mapVkFormat(m_colorFormat), m_renderSize.x(), m_renderSize.y(), 1u, m_colorBufferAlloc->getHostPtr());
833 m_context.getTestContext().getLog()
834 << tcu::LogImageSet("Result", "Result") << tcu::LogImage("color0", "", resultImage) << tcu::TestLog::EndImageSet;
836 if (imageHasErrorPixels(resultImage))
837 return tcu::TestStatus::fail("Some buffer values were incorrect");
839 return tcu::TestStatus::pass("Pass");
842 const bool m_aliased;
843 const bool m_residency;
844 const bool m_nonResidentStrict;
847 Queue m_universalQueue;
850 const IVec2 m_renderSize;
851 const VkFormat m_colorFormat;
852 const VkDeviceSize m_colorBufferSize;
854 Move<VkBuffer> m_colorBuffer;
855 MovePtr<Allocation> m_colorBufferAlloc;
857 deUint32 m_sharedQueueFamilyIndices[2];
860 void initProgramsDrawWithUBO (vk::SourceCollections& programCollection, const TestFlags flags)
864 std::ostringstream src;
865 src << glu::getGLSLVersionDeclaration(glu::GLSL_VERSION_450) << "\n"
867 << "layout(location = 0) in vec4 in_position;\n"
869 << "out gl_PerVertex {\n"
870 << " vec4 gl_Position;\n"
873 << "void main(void)\n"
875 << " gl_Position = in_position;\n"
878 programCollection.glslSources.add("vert") << glu::VertexSource(src.str());
883 const bool aliased = (flags & TEST_FLAG_ALIASED) != 0;
884 const bool residency = (flags & TEST_FLAG_RESIDENCY) != 0;
885 const bool nonResidentStrict = (flags & TEST_FLAG_NON_RESIDENT_STRICT) != 0;
886 const std::string valueExpr = (aliased ? "ivec4(3*(ndx % nonAliasedSize) ^ 127, 0, 0, 0)" : "ivec4(3*ndx ^ 127, 0, 0, 0)");
888 std::ostringstream src;
889 src << glu::getGLSLVersionDeclaration(glu::GLSL_VERSION_450) << "\n"
891 << "layout(location = 0) out vec4 o_color;\n"
893 << "layout(constant_id = 1) const int dataSize = 1;\n"
894 << "layout(constant_id = 2) const int chunkSize = 1;\n"
896 << "layout(set = 0, binding = 0, std140) uniform SparseBuffer {\n"
897 << " ivec4 data[dataSize];\n"
900 << "void main(void)\n"
902 << " const int fragNdx = int(gl_FragCoord.x) + " << RENDER_SIZE << " * int(gl_FragCoord.y);\n"
903 << " const int pageSize = " << RENDER_SIZE << " * " << RENDER_SIZE << ";\n"
904 << " const int numChunks = dataSize / chunkSize;\n";
907 src << " const int nonAliasedSize = (numChunks > 1 ? dataSize - chunkSize : dataSize);\n";
909 src << " bool ok = true;\n"
911 << " for (int ndx = fragNdx; ndx < dataSize; ndx += pageSize)\n"
914 if (residency && nonResidentStrict)
916 src << " if (ndx >= chunkSize && ndx < 2*chunkSize)\n"
917 << " ok = ok && (ubo.data[ndx] == ivec4(0));\n"
919 << " ok = ok && (ubo.data[ndx] == " + valueExpr + ");\n";
923 src << " if (ndx >= chunkSize && ndx < 2*chunkSize)\n"
925 << " ok = ok && (ubo.data[ndx] == " << valueExpr << ");\n";
928 src << " ok = ok && (ubo.data[ndx] == " << valueExpr << ");\n";
933 << " o_color = vec4(0.0, 1.0, 0.0, 1.0);\n"
935 << " o_color = vec4(1.0, 0.0, 0.0, 1.0);\n"
938 programCollection.glslSources.add("frag") << glu::FragmentSource(src.str());
942 //! Sparse buffer backing a UBO
943 class UBOTestInstance : public SparseBufferTestInstance
946 UBOTestInstance (Context& context, const TestFlags flags)
947 : SparseBufferTestInstance (context, flags)
951 void rendererDraw (const VkPipelineLayout pipelineLayout, const VkCommandBuffer cmdBuffer) const
953 const DeviceInterface& vk = getDeviceInterface();
954 const VkDeviceSize vertexOffset = 0ull;
956 vk.cmdBindVertexBuffers (cmdBuffer, 0u, 1u, &m_vertexBuffer.get(), &vertexOffset);
957 vk.cmdBindDescriptorSets(cmdBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS, pipelineLayout, 0u, 1u, &m_descriptorSet.get(), 0u, DE_NULL);
958 vk.cmdDraw (cmdBuffer, 4u, 1u, 0u, 0u);
961 tcu::TestStatus iterate (void)
963 const DeviceInterface& vk = getDeviceInterface();
964 MovePtr<SparseAllocation> sparseAllocation;
965 Move<VkBuffer> sparseBuffer;
966 Move<VkBuffer> sparseBufferAliased;
968 // Set up the sparse buffer
970 VkBufferCreateInfo referenceBufferCreateInfo = getSparseBufferCreateInfo(VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT);
971 const VkDeviceSize minChunkSize = 512u; // make sure the smallest allocation is at least this big
972 deUint32 numMaxChunks = 0u;
974 // Check how many chunks we can allocate given the alignment and size requirements of UBOs
976 const UniquePtr<SparseAllocation> minAllocation(SparseAllocationBuilder()
978 .build(vk, getDevice(), getAllocator(), referenceBufferCreateInfo, minChunkSize));
980 numMaxChunks = deMaxu32(static_cast<deUint32>(m_context.getDeviceProperties().limits.maxUniformBufferRange / minAllocation->resourceSize), 1u);
983 if (numMaxChunks < 4)
985 sparseAllocation = SparseAllocationBuilder()
987 .build(vk, getDevice(), getAllocator(), referenceBufferCreateInfo, minChunkSize);
991 // Try to use a non-trivial memory allocation scheme to make it different from a non-sparse binding
992 SparseAllocationBuilder builder;
993 builder.addMemoryBind();
996 builder.addResourceHole();
999 .addMemoryAllocation()
1004 builder.addAliasedMemoryBind(0u, 0u);
1006 sparseAllocation = builder.build(vk, getDevice(), getAllocator(), referenceBufferCreateInfo, minChunkSize);
1007 DE_ASSERT(sparseAllocation->resourceSize <= m_context.getDeviceProperties().limits.maxUniformBufferRange);
1010 // Create the buffer
1011 referenceBufferCreateInfo.size = sparseAllocation->resourceSize;
1012 sparseBuffer = makeBuffer(vk, getDevice(), referenceBufferCreateInfo);
1013 bindSparseBuffer(vk, getDevice(), m_sparseQueue.queueHandle, *sparseBuffer, *sparseAllocation);
1017 sparseBufferAliased = makeBuffer(vk, getDevice(), referenceBufferCreateInfo);
1018 bindSparseBuffer(vk, getDevice(), m_sparseQueue.queueHandle, *sparseBufferAliased, *sparseAllocation);
1024 const bool hasAliasedChunk = (m_aliased && sparseAllocation->memoryBinds.size() > 1u);
1025 const VkDeviceSize chunkSize = sparseAllocation->resourceSize / sparseAllocation->numResourceChunks;
1026 const VkDeviceSize stagingBufferSize = sparseAllocation->resourceSize - (hasAliasedChunk ? chunkSize : 0);
1027 const deUint32 numBufferEntries = static_cast<deUint32>(stagingBufferSize / sizeof(IVec4));
1029 const Unique<VkBuffer> stagingBuffer (makeBuffer(vk, getDevice(), makeBufferCreateInfo(stagingBufferSize, VK_BUFFER_USAGE_TRANSFER_SRC_BIT)));
1030 const UniquePtr<Allocation> stagingBufferAlloc (bindBuffer(vk, getDevice(), getAllocator(), *stagingBuffer, MemoryRequirement::HostVisible));
1033 // If aliased chunk is used, the staging buffer is smaller than the sparse buffer and we don't overwrite the last chunk
1034 IVec4* const pData = static_cast<IVec4*>(stagingBufferAlloc->getHostPtr());
1035 for (deUint32 i = 0; i < numBufferEntries; ++i)
1036 pData[i] = IVec4(3*i ^ 127, 0, 0, 0);
1038 flushMappedMemoryRange(vk, getDevice(), stagingBufferAlloc->getMemory(), stagingBufferAlloc->getOffset(), stagingBufferSize);
1040 const VkBufferCopy copyRegion =
1042 0ull, // VkDeviceSize srcOffset;
1043 0ull, // VkDeviceSize dstOffset;
1044 stagingBufferSize, // VkDeviceSize size;
1047 const Unique<VkCommandPool> cmdPool (makeCommandPool(vk, getDevice(), m_universalQueue.queueFamilyIndex));
1048 const Unique<VkCommandBuffer> cmdBuffer (allocateCommandBuffer(vk, getDevice(), *cmdPool, VK_COMMAND_BUFFER_LEVEL_PRIMARY));
1050 beginCommandBuffer (vk, *cmdBuffer);
1051 vk.cmdCopyBuffer (*cmdBuffer, *stagingBuffer, *sparseBuffer, 1u, ©Region);
1052 endCommandBuffer (vk, *cmdBuffer);
1054 submitCommandsAndWait(vk, getDevice(), m_universalQueue.queueHandle, *cmdBuffer);
1055 // Once the fence is signaled, the write is also available to the aliasing buffer.
1059 // Make sure that we don't try to access a larger range than is allowed. This only applies to a single chunk case.
1060 const deUint32 maxBufferRange = deMinu32(static_cast<deUint32>(sparseAllocation->resourceSize), m_context.getDeviceProperties().limits.maxUniformBufferRange);
1064 m_descriptorSetLayout = DescriptorSetLayoutBuilder()
1065 .addSingleBinding(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, VK_SHADER_STAGE_FRAGMENT_BIT)
1066 .build(vk, getDevice());
1068 m_descriptorPool = DescriptorPoolBuilder()
1069 .addType(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER)
1070 .build(vk, getDevice(), VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, 1u);
1072 m_descriptorSet = makeDescriptorSet(vk, getDevice(), *m_descriptorPool, *m_descriptorSetLayout);
1074 const VkBuffer buffer = (m_aliased ? *sparseBufferAliased : *sparseBuffer);
1075 const VkDescriptorBufferInfo sparseBufferInfo = makeDescriptorBufferInfo(buffer, 0ull, maxBufferRange);
1077 DescriptorSetUpdateBuilder()
1078 .writeSingle(*m_descriptorSet, DescriptorSetUpdateBuilder::Location::binding(0u), VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, &sparseBufferInfo)
1079 .update(vk, getDevice());
1084 const Vec4 vertexData[] =
1086 Vec4(-1.0f, -1.0f, 0.0f, 1.0f),
1087 Vec4(-1.0f, 1.0f, 0.0f, 1.0f),
1088 Vec4( 1.0f, -1.0f, 0.0f, 1.0f),
1089 Vec4( 1.0f, 1.0f, 0.0f, 1.0f),
1092 const VkDeviceSize vertexBufferSize = sizeof(vertexData);
1094 m_vertexBuffer = makeBuffer(vk, getDevice(), makeBufferCreateInfo(vertexBufferSize, VK_BUFFER_USAGE_VERTEX_BUFFER_BIT));
1095 m_vertexBufferAlloc = bindBuffer(vk, getDevice(), getAllocator(), *m_vertexBuffer, MemoryRequirement::HostVisible);
1097 deMemcpy(m_vertexBufferAlloc->getHostPtr(), &vertexData[0], vertexBufferSize);
1098 flushMappedMemoryRange(vk, getDevice(), m_vertexBufferAlloc->getMemory(), m_vertexBufferAlloc->getOffset(), vertexBufferSize);
1103 std::vector<deInt32> specializationData;
1105 const deUint32 numBufferEntries = maxBufferRange / static_cast<deUint32>(sizeof(IVec4));
1106 const deUint32 numEntriesPerChunk = numBufferEntries / sparseAllocation->numResourceChunks;
1108 specializationData.push_back(numBufferEntries);
1109 specializationData.push_back(numEntriesPerChunk);
1112 const VkSpecializationMapEntry specMapEntries[] =
1115 1u, // uint32_t constantID;
1116 0u, // uint32_t offset;
1117 sizeof(deInt32), // size_t size;
1120 2u, // uint32_t constantID;
1121 sizeof(deInt32), // uint32_t offset;
1122 sizeof(deInt32), // size_t size;
1126 const VkSpecializationInfo specInfo =
1128 DE_LENGTH_OF_ARRAY(specMapEntries), // uint32_t mapEntryCount;
1129 specMapEntries, // const VkSpecializationMapEntry* pMapEntries;
1130 sizeInBytes(specializationData), // size_t dataSize;
1131 getDataOrNullptr(specializationData), // const void* pData;
1134 Renderer::SpecializationMap specMap;
1135 specMap[VK_SHADER_STAGE_FRAGMENT_BIT] = &specInfo;
1137 draw(VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP, *m_descriptorSetLayout, specMap);
1140 return verifyDrawResult();
1144 Move<VkBuffer> m_vertexBuffer;
1145 MovePtr<Allocation> m_vertexBufferAlloc;
1147 Move<VkDescriptorSetLayout> m_descriptorSetLayout;
1148 Move<VkDescriptorPool> m_descriptorPool;
1149 Move<VkDescriptorSet> m_descriptorSet;
1152 void initProgramsDrawGrid (vk::SourceCollections& programCollection, const TestFlags flags)
1158 std::ostringstream src;
1159 src << glu::getGLSLVersionDeclaration(glu::GLSL_VERSION_450) << "\n"
1161 << "layout(location = 0) in vec4 in_position;\n"
1162 << "layout(location = 0) out int out_ndx;\n"
1164 << "out gl_PerVertex {\n"
1165 << " vec4 gl_Position;\n"
1168 << "void main(void)\n"
1170 << " gl_Position = in_position;\n"
1171 << " out_ndx = gl_VertexIndex;\n"
1174 programCollection.glslSources.add("vert") << glu::VertexSource(src.str());
1179 std::ostringstream src;
1180 src << glu::getGLSLVersionDeclaration(glu::GLSL_VERSION_450) << "\n"
1182 << "layout(location = 0) flat in int in_ndx;\n"
1183 << "layout(location = 0) out vec4 o_color;\n"
1185 << "void main(void)\n"
1187 << " if (in_ndx % 2 == 0)\n"
1188 << " o_color = vec4(vec3(1.0), 1.0);\n"
1190 << " o_color = vec4(vec3(0.75), 1.0);\n"
1193 programCollection.glslSources.add("frag") << glu::FragmentSource(src.str());
1197 //! Generate vertex positions for a grid of tiles composed of two triangles each (6 vertices)
1198 void generateGrid (void* pRawData, const float step, const float ox, const float oy, const deUint32 numX, const deUint32 numY, const float z = 0.0f)
1200 typedef Vec4 (*TilePtr)[6];
1202 TilePtr const pData = static_cast<TilePtr>(pRawData);
1204 for (deUint32 iy = 0; iy < numY; ++iy)
1205 for (deUint32 ix = 0; ix < numX; ++ix)
1207 const deUint32 ndx = ix + numX * iy;
1208 const float x = ox + step * static_cast<float>(ix);
1209 const float y = oy + step * static_cast<float>(iy);
1211 pData[ndx][0] = Vec4(x + step, y, z, 1.0f);
1212 pData[ndx][1] = Vec4(x, y, z, 1.0f);
1213 pData[ndx][2] = Vec4(x, y + step, z, 1.0f);
1215 pData[ndx][3] = Vec4(x, y + step, z, 1.0f);
1216 pData[ndx][4] = Vec4(x + step, y + step, z, 1.0f);
1217 pData[ndx][5] = Vec4(x + step, y, z, 1.0f);
1222 //! Base test for a sparse buffer backing a vertex/index buffer
1223 class DrawGridTestInstance : public SparseBufferTestInstance
1226 DrawGridTestInstance (Context& context, const TestFlags flags, const VkBufferUsageFlags usage, const VkDeviceSize minChunkSize)
1227 : SparseBufferTestInstance (context, flags)
1229 const DeviceInterface& vk = getDeviceInterface();
1230 VkBufferCreateInfo referenceBufferCreateInfo = getSparseBufferCreateInfo(usage);
1233 // Allocate two chunks, each covering half of the viewport
1234 SparseAllocationBuilder builder;
1235 builder.addMemoryBind();
1238 builder.addResourceHole();
1241 .addMemoryAllocation()
1246 builder.addAliasedMemoryBind(0u, 0u);
1248 m_sparseAllocation = builder.build(vk, getDevice(), getAllocator(), referenceBufferCreateInfo, minChunkSize);
1251 // Create the buffer
1252 referenceBufferCreateInfo.size = m_sparseAllocation->resourceSize;
1253 m_sparseBuffer = makeBuffer(vk, getDevice(), referenceBufferCreateInfo);
1256 bindSparseBuffer(vk, getDevice(), m_sparseQueue.queueHandle, *m_sparseBuffer, *m_sparseAllocation);
1258 m_perDrawBufferOffset = m_sparseAllocation->resourceSize / m_sparseAllocation->numResourceChunks;
1259 m_stagingBufferSize = 2 * m_perDrawBufferOffset;
1260 m_stagingBuffer = makeBuffer(vk, getDevice(), makeBufferCreateInfo(m_stagingBufferSize, VK_BUFFER_USAGE_TRANSFER_SRC_BIT));
1261 m_stagingBufferAlloc = bindBuffer(vk, getDevice(), getAllocator(), *m_stagingBuffer, MemoryRequirement::HostVisible);
1264 tcu::TestStatus iterate (void)
1266 initializeBuffers();
1268 const DeviceInterface& vk = getDeviceInterface();
1270 // Upload to the sparse buffer
1272 flushMappedMemoryRange(vk, getDevice(), m_stagingBufferAlloc->getMemory(), m_stagingBufferAlloc->getOffset(), m_stagingBufferSize);
1274 VkDeviceSize firstChunkOffset = 0ull;
1275 VkDeviceSize secondChunkOffset = m_perDrawBufferOffset;
1278 secondChunkOffset += m_perDrawBufferOffset;
1281 firstChunkOffset = secondChunkOffset + m_perDrawBufferOffset;
1283 const VkBufferCopy copyRegions[] =
1286 0ull, // VkDeviceSize srcOffset;
1287 firstChunkOffset, // VkDeviceSize dstOffset;
1288 m_perDrawBufferOffset, // VkDeviceSize size;
1291 m_perDrawBufferOffset, // VkDeviceSize srcOffset;
1292 secondChunkOffset, // VkDeviceSize dstOffset;
1293 m_perDrawBufferOffset, // VkDeviceSize size;
1297 const Unique<VkCommandPool> cmdPool (makeCommandPool(vk, getDevice(), m_universalQueue.queueFamilyIndex));
1298 const Unique<VkCommandBuffer> cmdBuffer (allocateCommandBuffer(vk, getDevice(), *cmdPool, VK_COMMAND_BUFFER_LEVEL_PRIMARY));
1300 beginCommandBuffer (vk, *cmdBuffer);
1301 vk.cmdCopyBuffer (*cmdBuffer, *m_stagingBuffer, *m_sparseBuffer, DE_LENGTH_OF_ARRAY(copyRegions), copyRegions);
1302 endCommandBuffer (vk, *cmdBuffer);
1304 submitCommandsAndWait(vk, getDevice(), m_universalQueue.queueHandle, *cmdBuffer);
1307 draw(VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST);
1309 return verifyDrawResult();
1313 virtual void initializeBuffers (void) = 0;
1315 VkDeviceSize m_perDrawBufferOffset;
1317 VkDeviceSize m_stagingBufferSize;
1318 Move<VkBuffer> m_stagingBuffer;
1319 MovePtr<Allocation> m_stagingBufferAlloc;
1321 MovePtr<SparseAllocation> m_sparseAllocation;
1322 Move<VkBuffer> m_sparseBuffer;
1325 //! Sparse buffer backing a vertex input buffer
1326 class VertexBufferTestInstance : public DrawGridTestInstance
1329 VertexBufferTestInstance (Context& context, const TestFlags flags)
1330 : DrawGridTestInstance (context,
1332 VK_BUFFER_USAGE_VERTEX_BUFFER_BIT,
1333 GRID_SIZE * GRID_SIZE * 6 * sizeof(Vec4))
1337 void rendererDraw (const VkPipelineLayout pipelineLayout, const VkCommandBuffer cmdBuffer) const
1339 DE_UNREF(pipelineLayout);
1341 m_context.getTestContext().getLog()
1342 << tcu::TestLog::Message << "Drawing a grid of triangles backed by a sparse vertex buffer. There should be no red pixels visible." << tcu::TestLog::EndMessage;
1344 const DeviceInterface& vk = getDeviceInterface();
1345 const deUint32 vertexCount = 6 * (GRID_SIZE * GRID_SIZE) / 2;
1346 VkDeviceSize vertexOffset = 0ull;
1348 vk.cmdBindVertexBuffers (cmdBuffer, 0u, 1u, &m_sparseBuffer.get(), &vertexOffset);
1349 vk.cmdDraw (cmdBuffer, vertexCount, 1u, 0u, 0u);
1351 vertexOffset += m_perDrawBufferOffset * (m_residency ? 2 : 1);
1353 vk.cmdBindVertexBuffers (cmdBuffer, 0u, 1u, &m_sparseBuffer.get(), &vertexOffset);
1354 vk.cmdDraw (cmdBuffer, vertexCount, 1u, 0u, 0u);
1357 void initializeBuffers (void)
1359 deUint8* pData = static_cast<deUint8*>(m_stagingBufferAlloc->getHostPtr());
1360 const float step = 2.0f / static_cast<float>(GRID_SIZE);
1362 // Prepare data for two draw calls
1363 generateGrid(pData, step, -1.0f, -1.0f, GRID_SIZE, GRID_SIZE/2);
1364 generateGrid(pData + m_perDrawBufferOffset, step, -1.0f, 0.0f, GRID_SIZE, GRID_SIZE/2);
1368 //! Sparse buffer backing an index buffer
1369 class IndexBufferTestInstance : public DrawGridTestInstance
1372 IndexBufferTestInstance (Context& context, const TestFlags flags)
1373 : DrawGridTestInstance (context,
1375 VK_BUFFER_USAGE_INDEX_BUFFER_BIT,
1376 GRID_SIZE * GRID_SIZE * 6 * sizeof(deUint32))
1377 , m_halfVertexCount (6 * (GRID_SIZE * GRID_SIZE) / 2)
1381 void rendererDraw (const VkPipelineLayout pipelineLayout, const VkCommandBuffer cmdBuffer) const
1383 DE_UNREF(pipelineLayout);
1385 m_context.getTestContext().getLog()
1386 << tcu::TestLog::Message << "Drawing a grid of triangles from a sparse index buffer. There should be no red pixels visible." << tcu::TestLog::EndMessage;
1388 const DeviceInterface& vk = getDeviceInterface();
1389 const VkDeviceSize vertexOffset = 0ull;
1390 VkDeviceSize indexOffset = 0ull;
1392 vk.cmdBindVertexBuffers (cmdBuffer, 0u, 1u, &m_vertexBuffer.get(), &vertexOffset);
1394 vk.cmdBindIndexBuffer (cmdBuffer, *m_sparseBuffer, indexOffset, VK_INDEX_TYPE_UINT32);
1395 vk.cmdDrawIndexed (cmdBuffer, m_halfVertexCount, 1u, 0u, 0, 0u);
1397 indexOffset += m_perDrawBufferOffset * (m_residency ? 2 : 1);
1399 vk.cmdBindIndexBuffer (cmdBuffer, *m_sparseBuffer, indexOffset, VK_INDEX_TYPE_UINT32);
1400 vk.cmdDrawIndexed (cmdBuffer, m_halfVertexCount, 1u, 0u, 0, 0u);
1403 void initializeBuffers (void)
1406 const DeviceInterface& vk = getDeviceInterface();
1407 const VkDeviceSize vertexBufferSize = 2 * m_halfVertexCount * sizeof(Vec4);
1408 m_vertexBuffer = makeBuffer(vk, getDevice(), makeBufferCreateInfo(vertexBufferSize, VK_BUFFER_USAGE_VERTEX_BUFFER_BIT));
1409 m_vertexBufferAlloc = bindBuffer(vk, getDevice(), getAllocator(), *m_vertexBuffer, MemoryRequirement::HostVisible);
1412 const float step = 2.0f / static_cast<float>(GRID_SIZE);
1414 generateGrid(m_vertexBufferAlloc->getHostPtr(), step, -1.0f, -1.0f, GRID_SIZE, GRID_SIZE);
1416 flushMappedMemoryRange(vk, getDevice(), m_vertexBufferAlloc->getMemory(), m_vertexBufferAlloc->getOffset(), vertexBufferSize);
1419 // Sparse index buffer
1420 for (deUint32 chunkNdx = 0u; chunkNdx < 2; ++chunkNdx)
1422 deUint8* const pData = static_cast<deUint8*>(m_stagingBufferAlloc->getHostPtr()) + chunkNdx * m_perDrawBufferOffset;
1423 deUint32* const pIndexData = reinterpret_cast<deUint32*>(pData);
1424 const deUint32 ndxBase = chunkNdx * m_halfVertexCount;
1426 for (deUint32 i = 0u; i < m_halfVertexCount; ++i)
1427 pIndexData[i] = ndxBase + i;
1432 const deUint32 m_halfVertexCount;
1433 Move<VkBuffer> m_vertexBuffer;
1434 MovePtr<Allocation> m_vertexBufferAlloc;
1437 //! Draw from a sparse indirect buffer
1438 class IndirectBufferTestInstance : public DrawGridTestInstance
1441 IndirectBufferTestInstance (Context& context, const TestFlags flags)
1442 : DrawGridTestInstance (context,
1444 VK_BUFFER_USAGE_INDIRECT_BUFFER_BIT,
1445 sizeof(VkDrawIndirectCommand))
1449 void rendererDraw (const VkPipelineLayout pipelineLayout, const VkCommandBuffer cmdBuffer) const
1451 DE_UNREF(pipelineLayout);
1453 m_context.getTestContext().getLog()
1454 << tcu::TestLog::Message << "Drawing two triangles covering the whole viewport. There should be no red pixels visible." << tcu::TestLog::EndMessage;
1456 const DeviceInterface& vk = getDeviceInterface();
1457 const VkDeviceSize vertexOffset = 0ull;
1458 VkDeviceSize indirectOffset = 0ull;
1460 vk.cmdBindVertexBuffers (cmdBuffer, 0u, 1u, &m_vertexBuffer.get(), &vertexOffset);
1461 vk.cmdDrawIndirect (cmdBuffer, *m_sparseBuffer, indirectOffset, 1u, 0u);
1463 indirectOffset += m_perDrawBufferOffset * (m_residency ? 2 : 1);
1465 vk.cmdDrawIndirect (cmdBuffer, *m_sparseBuffer, indirectOffset, 1u, 0u);
1468 void initializeBuffers (void)
1471 const DeviceInterface& vk = getDeviceInterface();
1472 const VkDeviceSize vertexBufferSize = 2 * 3 * sizeof(Vec4);
1473 m_vertexBuffer = makeBuffer(vk, getDevice(), makeBufferCreateInfo(vertexBufferSize, VK_BUFFER_USAGE_VERTEX_BUFFER_BIT));
1474 m_vertexBufferAlloc = bindBuffer(vk, getDevice(), getAllocator(), *m_vertexBuffer, MemoryRequirement::HostVisible);
1477 generateGrid(m_vertexBufferAlloc->getHostPtr(), 2.0f, -1.0f, -1.0f, 1, 1);
1478 flushMappedMemoryRange(vk, getDevice(), m_vertexBufferAlloc->getMemory(), m_vertexBufferAlloc->getOffset(), vertexBufferSize);
1482 for (deUint32 chunkNdx = 0u; chunkNdx < 2; ++chunkNdx)
1484 deUint8* const pData = static_cast<deUint8*>(m_stagingBufferAlloc->getHostPtr()) + chunkNdx * m_perDrawBufferOffset;
1485 VkDrawIndirectCommand* const pCmdData = reinterpret_cast<VkDrawIndirectCommand*>(pData);
1487 pCmdData->firstVertex = 3u * chunkNdx;
1488 pCmdData->firstInstance = 0u;
1489 pCmdData->vertexCount = 3u;
1490 pCmdData->instanceCount = 1u;
1495 Move<VkBuffer> m_vertexBuffer;
1496 MovePtr<Allocation> m_vertexBufferAlloc;
1499 //! Similar to the class in vktTestCaseUtil.hpp, but uses Arg0 directly rather than through a InstanceFunction1
1500 template<typename Arg0>
1501 class FunctionProgramsSimple1
1504 typedef void (*Function) (vk::SourceCollections& dst, Arg0 arg0);
1505 FunctionProgramsSimple1 (Function func) : m_func(func) {}
1506 void init (vk::SourceCollections& dst, const Arg0& arg0) const { m_func(dst, arg0); }
1509 const Function m_func;
1512 //! Convenience function to create a TestCase based on a freestanding initPrograms and a TestInstance implementation
1513 template<typename TestInstanceT, typename Arg0>
1514 TestCase* createTestInstanceWithPrograms (tcu::TestContext& testCtx,
1515 const std::string& name,
1516 const std::string& desc,
1517 typename FunctionProgramsSimple1<Arg0>::Function initPrograms,
1520 return new InstanceFactory1<TestInstanceT, Arg0, FunctionProgramsSimple1<Arg0> >(
1521 testCtx, tcu::NODETYPE_SELF_VALIDATE, name, desc, FunctionProgramsSimple1<Arg0>(initPrograms), arg0);
1524 void populateTestGroup (tcu::TestCaseGroup* parentGroup)
1532 { "sparse_binding", 0u },
1533 { "sparse_binding_aliased", TEST_FLAG_ALIASED, },
1534 { "sparse_residency", TEST_FLAG_RESIDENCY, },
1535 { "sparse_residency_aliased", TEST_FLAG_RESIDENCY | TEST_FLAG_ALIASED, },
1536 { "sparse_residency_non_resident_strict", TEST_FLAG_RESIDENCY | TEST_FLAG_NON_RESIDENT_STRICT, },
1539 const int numGroupsIncludingNonResidentStrict = DE_LENGTH_OF_ARRAY(groups);
1540 const int numGroupsDefaultList = numGroupsIncludingNonResidentStrict - 1;
1544 MovePtr<tcu::TestCaseGroup> group(new tcu::TestCaseGroup(parentGroup->getTestContext(), "transfer", ""));
1546 MovePtr<tcu::TestCaseGroup> subGroup(new tcu::TestCaseGroup(parentGroup->getTestContext(), "sparse_binding", ""));
1547 addBufferSparseBindingTests(subGroup.get());
1548 group->addChild(subGroup.release());
1550 parentGroup->addChild(group.release());
1555 MovePtr<tcu::TestCaseGroup> group(new tcu::TestCaseGroup(parentGroup->getTestContext(), "ssbo", ""));
1557 MovePtr<tcu::TestCaseGroup> subGroup(new tcu::TestCaseGroup(parentGroup->getTestContext(), "sparse_binding_aliased", ""));
1558 addBufferSparseMemoryAliasingTests(subGroup.get());
1559 group->addChild(subGroup.release());
1562 MovePtr<tcu::TestCaseGroup> subGroup(new tcu::TestCaseGroup(parentGroup->getTestContext(), "sparse_residency", ""));
1563 addBufferSparseResidencyTests(subGroup.get());
1564 group->addChild(subGroup.release());
1566 parentGroup->addChild(group.release());
1571 MovePtr<tcu::TestCaseGroup> group(new tcu::TestCaseGroup(parentGroup->getTestContext(), "ubo", ""));
1573 for (int groupNdx = 0u; groupNdx < numGroupsIncludingNonResidentStrict; ++groupNdx)
1574 group->addChild(createTestInstanceWithPrograms<UBOTestInstance>(group->getTestContext(), groups[groupNdx].name.c_str(), "", initProgramsDrawWithUBO, groups[groupNdx].flags));
1576 parentGroup->addChild(group.release());
1581 MovePtr<tcu::TestCaseGroup> group(new tcu::TestCaseGroup(parentGroup->getTestContext(), "vertex_buffer", ""));
1583 for (int groupNdx = 0u; groupNdx < numGroupsDefaultList; ++groupNdx)
1584 group->addChild(createTestInstanceWithPrograms<VertexBufferTestInstance>(group->getTestContext(), groups[groupNdx].name.c_str(), "", initProgramsDrawGrid, groups[groupNdx].flags));
1586 parentGroup->addChild(group.release());
1591 MovePtr<tcu::TestCaseGroup> group(new tcu::TestCaseGroup(parentGroup->getTestContext(), "index_buffer", ""));
1593 for (int groupNdx = 0u; groupNdx < numGroupsDefaultList; ++groupNdx)
1594 group->addChild(createTestInstanceWithPrograms<IndexBufferTestInstance>(group->getTestContext(), groups[groupNdx].name.c_str(), "", initProgramsDrawGrid, groups[groupNdx].flags));
1596 parentGroup->addChild(group.release());
1601 MovePtr<tcu::TestCaseGroup> group(new tcu::TestCaseGroup(parentGroup->getTestContext(), "indirect_buffer", ""));
1603 for (int groupNdx = 0u; groupNdx < numGroupsDefaultList; ++groupNdx)
1604 group->addChild(createTestInstanceWithPrograms<IndirectBufferTestInstance>(group->getTestContext(), groups[groupNdx].name.c_str(), "", initProgramsDrawGrid, groups[groupNdx].flags));
1606 parentGroup->addChild(group.release());
1612 tcu::TestCaseGroup* createSparseBufferTests (tcu::TestContext& testCtx)
1614 return createTestGroup(testCtx, "buffer", "Sparse buffer usage tests", populateTestGroup);