1 /*------------------------------------------------------------------------
2 * Vulkan Conformance Tests
3 * ------------------------
5 * Copyright (c) 2017 The Khronos Group Inc.
6 * Copyright (c) 2017 Nvidia Corporation
8 * Licensed under the Apache License, Version 2.0 (the "License");
9 * you may not use this file except in compliance with the License.
10 * You may obtain a copy of the License at
12 * http://www.apache.org/licenses/LICENSE-2.0
14 * Unless required by applicable law or agreed to in writing, software
15 * distributed under the License is distributed on an "AS IS" BASIS,
16 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17 * See the License for the specific language governing permissions and
18 * limitations under the License.
22 * \brief Device Group Tests
23 *//*--------------------------------------------------------------------*/
25 #include "vktDeviceGroupTests.hpp"
28 #include "vkDeviceUtil.hpp"
29 #include "vkImageUtil.hpp"
30 #include "vkMemUtil.hpp"
31 #include "vkPlatform.hpp"
32 #include "vkPrograms.hpp"
33 #include "vkQueryUtil.hpp"
35 #include "vkRefUtil.hpp"
36 #include "vkStrUtil.hpp"
37 #include "vkTypeUtil.hpp"
38 #include "vkCmdUtil.hpp"
39 #include "vktTestCase.hpp"
40 #include "vktTestCaseUtil.hpp"
41 #include "vktTestGroupUtil.hpp"
43 #include "tcuDefs.hpp"
44 #include "tcuFormatUtil.hpp"
45 #include "tcuImageCompare.hpp"
46 #include "tcuResource.hpp"
47 #include "tcuTestCase.hpp"
48 #include "tcuTestLog.hpp"
49 #include "tcuCommandLine.hpp"
50 #include "tcuTextureUtil.hpp"
51 #include "tcuImageIO.hpp"
53 #include "rrRenderer.hpp"
68 //Device group test modes
71 TEST_MODE_SFR = 1 << 0, //!< Split frame remdering
72 TEST_MODE_AFR = 1 << 1, //!< Alternate frame rendering
73 TEST_MODE_HOSTMEMORY = 1 << 2, //!< Use host memory for rendertarget
74 TEST_MODE_DEDICATED = 1 << 3, //!< Use dedicated allocations
75 TEST_MODE_PEER_FETCH = 1 << 4, //!< Peer vertex attributes from peer memroy
76 TEST_MODE_TESSELLATION = 1 << 5, //!< Generate a tessellated sphere instead of triangle
77 TEST_MODE_LINEFILL = 1 << 6, //!< Draw polygon edges as line segments
80 class RefVertexShader : public rr::VertexShader
83 RefVertexShader (void)
84 : rr::VertexShader(1, 0)
86 m_inputs[0].type = rr::GENERICVECTYPE_FLOAT;
88 virtual ~RefVertexShader(void) {}
90 void shadeVertices (const rr::VertexAttrib* inputs, rr::VertexPacket* const* packets, const int numPackets) const
92 for (int packetNdx = 0; packetNdx < numPackets; ++packetNdx)
94 packets[packetNdx]->position = rr::readVertexAttribFloat(inputs[0],
95 packets[packetNdx]->instanceNdx,
96 packets[packetNdx]->vertexNdx);
101 class RefFragmentShader : public rr::FragmentShader
104 RefFragmentShader (void)
105 : rr::FragmentShader(0, 1)
107 m_outputs[0].type = rr::GENERICVECTYPE_FLOAT;
110 virtual ~RefFragmentShader(void) {}
112 void shadeFragments (rr::FragmentPacket*, const int numPackets, const rr::FragmentShadingContext& context) const
114 for (int packetNdx = 0; packetNdx < numPackets; ++packetNdx)
116 for (int fragNdx = 0; fragNdx < rr::NUM_FRAGMENTS_PER_PACKET; ++fragNdx)
118 rr::writeFragmentOutput(context, packetNdx, fragNdx, 0, tcu::Vec4(1.0f, 1.0f, 0.0f, 1.0f));
124 void renderReferenceTriangle (const tcu::PixelBufferAccess& dst, const tcu::Vec4(&vertices)[3])
126 const RefVertexShader vertShader;
127 const RefFragmentShader fragShader;
128 const rr::Program program(&vertShader, &fragShader);
129 const rr::MultisamplePixelBufferAccess colorBuffer = rr::MultisamplePixelBufferAccess::fromSinglesampleAccess(dst);
130 const rr::RenderTarget renderTarget(colorBuffer);
131 const rr::RenderState renderState((rr::ViewportState(colorBuffer)));
132 const rr::Renderer renderer;
133 const rr::VertexAttrib vertexAttribs[] =
135 rr::VertexAttrib(rr::VERTEXATTRIBTYPE_FLOAT, 4, sizeof(tcu::Vec4), 0, vertices[0].getPtr())
137 renderer.draw(rr::DrawCommand(renderState,
140 DE_LENGTH_OF_ARRAY(vertexAttribs),
142 rr::PrimitiveList(rr::PRIMITIVETYPE_TRIANGLES, DE_LENGTH_OF_ARRAY(vertices), 0)));
145 class DeviceGroupTestInstance : public TestInstance
148 DeviceGroupTestInstance(Context& context, deUint32 mode);
149 ~DeviceGroupTestInstance(void) {}
152 deUint32 getMemoryIndex (deUint32 memoryTypeBits, deUint32 memoryPropertyFlag);
153 void getDeviceLayers (vector<string>& enabledLayers);
154 bool isPeerFetchAllowed (deUint32 memoryTypeIndex, deUint32 firstdeviceID, deUint32 seconddeviceID);
155 void SubmitBufferAndWaitForIdle (const DeviceDriver& vk, VkCommandBuffer cmdBuf, deUint32 deviceMask);
156 virtual tcu::TestStatus iterate (void);
158 Move<VkDevice> m_deviceGroup;
159 deUint32 m_physicalDeviceCount;
160 VkQueue m_deviceGroupQueue;
161 vector<VkPhysicalDevice> m_physicalDevices;
164 bool m_useHostMemory;
167 bool m_subsetAllocation;
168 bool m_fillModeNonSolid;
169 bool m_drawTessellatedSphere;
172 DeviceGroupTestInstance::DeviceGroupTestInstance (Context& context, const deUint32 mode)
173 : TestInstance (context)
174 , m_physicalDeviceCount (0)
175 , m_deviceGroupQueue (DE_NULL)
177 , m_useHostMemory (m_testMode & TEST_MODE_HOSTMEMORY)
178 , m_useDedicated (m_testMode & TEST_MODE_DEDICATED)
179 , m_usePeerFetch (m_testMode & TEST_MODE_PEER_FETCH)
180 , m_subsetAllocation (true)
181 , m_fillModeNonSolid (m_testMode & TEST_MODE_LINEFILL)
182 , m_drawTessellatedSphere (m_testMode & TEST_MODE_TESSELLATION)
187 deUint32 DeviceGroupTestInstance::getMemoryIndex (const deUint32 memoryTypeBits, const deUint32 memoryPropertyFlag)
189 const VkPhysicalDeviceMemoryProperties deviceMemProps = getPhysicalDeviceMemoryProperties(m_context.getInstanceInterface(), m_context.getPhysicalDevice());
190 for (deUint32 memoryTypeNdx = 0; memoryTypeNdx < deviceMemProps.memoryTypeCount; memoryTypeNdx++)
192 if ((memoryTypeBits & (1u << memoryTypeNdx)) != 0 &&
193 (deviceMemProps.memoryTypes[memoryTypeNdx].propertyFlags & memoryPropertyFlag) == memoryPropertyFlag)
194 return memoryTypeNdx;
196 TCU_THROW(NotSupportedError, "No compatible memory type found");
199 bool DeviceGroupTestInstance::isPeerFetchAllowed (deUint32 memoryTypeIndex, deUint32 firstdeviceID, deUint32 seconddeviceID)
201 VkPeerMemoryFeatureFlags peerMemFeatures1;
202 VkPeerMemoryFeatureFlags peerMemFeatures2;
203 const DeviceDriver vk (m_context.getInstanceInterface(), *m_deviceGroup);
204 const VkPhysicalDeviceMemoryProperties deviceMemProps1 = getPhysicalDeviceMemoryProperties(m_context.getInstanceInterface(), m_physicalDevices[firstdeviceID]);
205 const VkPhysicalDeviceMemoryProperties deviceMemProps2 = getPhysicalDeviceMemoryProperties(m_context.getInstanceInterface(), m_physicalDevices[seconddeviceID]);
206 vk.getDeviceGroupPeerMemoryFeatures(*m_deviceGroup, deviceMemProps2.memoryTypes[memoryTypeIndex].heapIndex, firstdeviceID, seconddeviceID, &peerMemFeatures1);
207 vk.getDeviceGroupPeerMemoryFeatures(*m_deviceGroup, deviceMemProps1.memoryTypes[memoryTypeIndex].heapIndex, seconddeviceID, firstdeviceID, &peerMemFeatures2);
208 return (peerMemFeatures1 & VK_PEER_MEMORY_FEATURE_GENERIC_SRC_BIT) && (peerMemFeatures2 & VK_PEER_MEMORY_FEATURE_GENERIC_SRC_BIT);
211 void DeviceGroupTestInstance::getDeviceLayers (vector<string>& enabledLayers)
213 const tcu::CommandLine& cmdLine = m_context.getTestContext().getCommandLine();
214 if (cmdLine.isValidationEnabled())
216 const vector<VkLayerProperties> layerProperties = enumerateDeviceLayerProperties(m_context.getInstanceInterface(), m_context.getPhysicalDevice());
218 static const char* s_magicLayer = "VK_LAYER_LUNARG_standard_validation";
219 static const char* s_defaultLayers[] =
221 "VK_LAYER_GOOGLE_threading",
222 "VK_LAYER_LUNARG_parameter_validation",
223 "VK_LAYER_LUNARG_device_limits",
224 "VK_LAYER_LUNARG_object_tracker",
225 "VK_LAYER_LUNARG_image",
226 "VK_LAYER_LUNARG_core_validation",
227 "VK_LAYER_LUNARG_swapchain",
228 "VK_LAYER_GOOGLE_unique_objects",
231 if (isLayerSupported(layerProperties, RequiredLayer(s_magicLayer)))
232 enabledLayers.push_back(s_magicLayer);
235 for (deUint32 ndx = 0; ndx < DE_LENGTH_OF_ARRAY(s_defaultLayers); ++ndx)
237 if (isLayerSupported(layerProperties, RequiredLayer(s_defaultLayers[ndx])))
238 enabledLayers.push_back(s_defaultLayers[ndx]);
241 if (enabledLayers.empty())
242 TCU_THROW(NotSupportedError, "No device validation layers found");
246 void DeviceGroupTestInstance::init (void)
248 if (!isInstanceExtensionSupported(m_context.getUsedApiVersion(), m_context.getInstanceExtensions(), "VK_KHR_device_group_creation"))
249 TCU_THROW(NotSupportedError, "Device Group tests are not supported, no device group extension present.");
251 const InstanceInterface& instanceInterface = m_context.getInstanceInterface();
252 const deUint32 queueFamilyIndex = m_context.getUniversalQueueFamilyIndex();
253 const deUint32 queueIndex = 0;
254 const float queuePriority = 1.0f;
255 vector<const char*> extensionPtrs;
256 de::MovePtr<vk::DeviceDriver> deviceDriver;
257 vector<const char*> layerPtrs;
258 vector<string> deviceExtensions;
259 vector<string> enabledLayers;
261 if (!isDeviceExtensionSupported(m_context.getUsedApiVersion(), m_context.getDeviceExtensions(), "VK_KHR_device_group"))
262 TCU_THROW(NotSupportedError, "Missing extension: VK_KHR_device_group");
264 if (!isCoreDeviceExtension(m_context.getUsedApiVersion(), "VK_KHR_device_group"))
265 deviceExtensions.push_back("VK_KHR_device_group");
269 if (!isDeviceExtensionSupported(m_context.getUsedApiVersion(), m_context.getDeviceExtensions(), "VK_KHR_dedicated_allocation"))
270 TCU_THROW(NotSupportedError, "Missing extension: VK_KHR_dedicated_allocation");
272 if (!isCoreDeviceExtension(m_context.getUsedApiVersion(), "VK_KHR_dedicated_allocation"))
273 deviceExtensions.push_back("VK_KHR_dedicated_allocation");
277 const tcu::CommandLine& cmdLine = m_context.getTestContext().getCommandLine();
278 const vector<VkPhysicalDeviceGroupProperties> properties = enumeratePhysicalDeviceGroups(instanceInterface, m_context.getInstance());
279 if ((size_t)cmdLine.getVKDeviceGroupId() > properties.size())
280 TCU_THROW(TestError, "Invalid device group index.");
282 m_physicalDeviceCount = properties[cmdLine.getVKDeviceGroupId() - 1].physicalDeviceCount;
283 for (deUint32 idx = 0; idx < m_physicalDeviceCount; idx++)
285 m_physicalDevices.push_back(properties[cmdLine.getVKDeviceGroupId() - 1].physicalDevices[idx]);
288 if (m_usePeerFetch && m_physicalDeviceCount < 2)
289 TCU_THROW(NotSupportedError, "Peer fetching needs more than 1 physical device.");
291 if (!(m_testMode & TEST_MODE_AFR) || (m_physicalDeviceCount > 1))
293 if (!de::contains(m_context.getDeviceExtensions().begin(), m_context.getDeviceExtensions().end(), std::string("VK_KHR_bind_memory2")))
294 TCU_THROW(NotSupportedError, "Missing extension: VK_KHR_bind_memory2");
295 deviceExtensions.push_back("VK_KHR_bind_memory2");
298 const VkDeviceQueueCreateInfo deviceQueueCreateInfo =
300 VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO, //type
302 (VkDeviceQueueCreateFlags)0u, //flags
303 queueFamilyIndex, //queueFamilyIndex;
305 &queuePriority, //pQueuePriorities;
307 const VkDeviceGroupDeviceCreateInfo deviceGroupInfo =
309 VK_STRUCTURE_TYPE_DEVICE_GROUP_DEVICE_CREATE_INFO, //stype
311 properties[cmdLine.getVKDeviceGroupId() - 1].physicalDeviceCount, //physicalDeviceCount
312 properties[cmdLine.getVKDeviceGroupId() - 1].physicalDevices //physicalDevices
315 VkPhysicalDevice physicalDevice = properties[cmdLine.getVKDeviceGroupId() - 1].physicalDevices[(size_t)(cmdLine.getVKDeviceId() - 1)];
316 VkPhysicalDeviceFeatures enabledDeviceFeatures = getPhysicalDeviceFeatures(instanceInterface, physicalDevice);
317 m_subsetAllocation = properties[cmdLine.getVKDeviceGroupId() - 1].subsetAllocation;
319 if (m_drawTessellatedSphere & static_cast<bool>(!enabledDeviceFeatures.tessellationShader))
320 TCU_THROW(NotSupportedError, "Tessellation is not supported.");
322 if (m_fillModeNonSolid & static_cast<bool>(!enabledDeviceFeatures.fillModeNonSolid))
323 TCU_THROW(NotSupportedError, "Line polygon mode is not supported.");
325 extensionPtrs.resize(deviceExtensions.size());
326 for (size_t ndx = 0; ndx < deviceExtensions.size(); ++ndx)
327 extensionPtrs[ndx] = deviceExtensions[ndx].c_str();
330 getDeviceLayers(enabledLayers);
331 layerPtrs.resize(enabledLayers.size());
332 for (size_t ndx = 0; ndx < enabledLayers.size(); ++ndx)
333 layerPtrs[ndx] = enabledLayers[ndx].c_str();
335 const VkDeviceCreateInfo deviceCreateInfo =
337 VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO, //sType;
338 &deviceGroupInfo, //pNext;
339 (VkDeviceCreateFlags)0u, //flags
340 1, //queueRecordCount;
341 &deviceQueueCreateInfo, //pRequestedQueues;
342 (deUint32)layerPtrs.size(), //layerCount;
343 (layerPtrs.empty() ? DE_NULL : &layerPtrs[0]), //ppEnabledLayerNames;
344 (deUint32)extensionPtrs.size(), //extensionCount;
345 (extensionPtrs.empty() ? DE_NULL : &extensionPtrs[0]), //ppEnabledExtensionNames;
346 &enabledDeviceFeatures, //pEnabledFeatures;
348 m_deviceGroup = createDevice(instanceInterface, physicalDevice, &deviceCreateInfo);
351 deviceDriver = de::MovePtr<vk::DeviceDriver>(new vk::DeviceDriver(instanceInterface, *m_deviceGroup));
352 m_deviceGroupQueue = getDeviceQueue(*deviceDriver, *m_deviceGroup, queueFamilyIndex, queueIndex);
355 void DeviceGroupTestInstance::SubmitBufferAndWaitForIdle(const DeviceDriver& vk, VkCommandBuffer cmdBuf, deUint32 deviceMask)
357 submitCommandsAndWait(vk, *m_deviceGroup, m_deviceGroupQueue, cmdBuf, true, deviceMask);
358 VK_CHECK(vk.deviceWaitIdle(*m_deviceGroup));
361 tcu::TestStatus DeviceGroupTestInstance::iterate (void)
363 const InstanceInterface& vki (m_context.getInstanceInterface());
364 const DeviceDriver vk (vki, *m_deviceGroup);
365 const deUint32 queueFamilyIndex = m_context.getUniversalQueueFamilyIndex();
366 const tcu::UVec2 renderSize (256, 256);
367 const VkFormat colorFormat = VK_FORMAT_R8G8B8A8_UNORM;
368 const tcu::Vec4 clearColor (0.125f, 0.25f, 0.75f, 1.0f);
369 const tcu::Vec4 drawColor (1.0f, 1.0f, 0.0f, 1.0f);
370 const float tessLevel = 16.0f;
371 SimpleAllocator memAlloc (vk, *m_deviceGroup, getPhysicalDeviceMemoryProperties(m_context.getInstanceInterface(), m_context.getPhysicalDevice()));
372 bool iterateResultSuccess = false;
373 const tcu::Vec4 sphereVertices[] =
375 tcu::Vec4(0.0f, 0.0f, 1.0f, 1.0f),
376 tcu::Vec4(0.0f, 1.0f, 0.0f, 1.0f),
377 tcu::Vec4(1.0f, 0.0f, 0.0f, 1.0f),
378 tcu::Vec4(0.0f, 0.0f, -1.0f, 1.0f),
379 tcu::Vec4(0.0f, -1.0f, 0.0f, 1.0f),
380 tcu::Vec4(-1.0f, 0.0f, 0.0f, 1.0f),
382 const deUint32 sphereIndices[] = {0, 1, 2, 2, 1, 3, 3, 1, 5, 5, 1, 0, 0, 2, 4, 2, 3, 4, 3, 5, 4, 5, 0, 4};
383 const tcu::Vec4 triVertices[] =
385 tcu::Vec4(-0.5f, -0.5f, 0.0f, 1.0f),
386 tcu::Vec4(+0.5f, -0.5f, 0.0f, 1.0f),
387 tcu::Vec4(0.0f, +0.5f, 0.0f, 1.0f)
389 const deUint32 triIndices[] = {0, 1, 2};
390 const tcu::Vec4 * vertices = m_drawTessellatedSphere ? &sphereVertices[0] : &triVertices[0];
391 const deUint32 * indices = m_drawTessellatedSphere ? &sphereIndices[0] : &triIndices[0];
392 const deUint32 verticesSize = m_drawTessellatedSphere ? deUint32(sizeof(sphereVertices)) : deUint32(sizeof(triVertices));
393 const deUint32 numIndices = m_drawTessellatedSphere ? deUint32(sizeof(sphereIndices)/sizeof(sphereIndices[0])) : deUint32(sizeof(triIndices)/sizeof(triIndices[0]));
394 const deUint32 indicesSize = m_drawTessellatedSphere ? deUint32(sizeof(sphereIndices)) : deUint32(sizeof(triIndices));
396 // Loop through all physical devices in the device group
397 for (deUint32 physDevID = 0; physDevID < m_physicalDeviceCount; physDevID++)
399 const deUint32 firstDeviceID = physDevID;
400 const deUint32 secondDeviceID = (firstDeviceID + 1 ) % m_physicalDeviceCount;
401 vector<deUint32> deviceIndices (m_physicalDeviceCount);
402 bool isPeerMemAsCopySrcAllowed = true;
403 // Set broadcast on memory allocation
404 const deUint32 allocDeviceMask = m_subsetAllocation ? (1 << firstDeviceID) | (1 << secondDeviceID) : (1 << m_physicalDeviceCount) - 1;
406 for (deUint32 i = 0; i < m_physicalDeviceCount; i++)
407 deviceIndices[i] = i;
408 deviceIndices[firstDeviceID] = secondDeviceID;
409 deviceIndices[secondDeviceID] = firstDeviceID;
411 VkMemoryRequirements memReqs =
413 0, // VkDeviceSize size
414 0, // VkDeviceSize alignment
415 0, // uint32_t memoryTypeBits
417 deUint32 memoryTypeNdx = 0;
418 de::MovePtr<Allocation> stagingVertexBufferMemory;
419 de::MovePtr<Allocation> stagingIndexBufferMemory;
420 de::MovePtr<Allocation> stagingUniformBufferMemory;
421 de::MovePtr<Allocation> stagingSboBufferMemory;
423 vk::Move<vk::VkDeviceMemory> vertexBufferMemory;
424 vk::Move<vk::VkDeviceMemory> indexBufferMemory;
425 vk::Move<vk::VkDeviceMemory> uniformBufferMemory;
426 vk::Move<vk::VkDeviceMemory> sboBufferMemory;
427 vk::Move<vk::VkDeviceMemory> imageMemory;
429 Move<VkRenderPass> renderPass;
430 Move<VkImage> renderImage;
431 Move<VkImage> readImage;
433 Move<VkDescriptorSetLayout> descriptorSetLayout;
434 Move<VkDescriptorPool> descriptorPool;
435 Move<VkDescriptorSet> descriptorSet;
437 Move<VkBuffer> stagingVertexBuffer;
438 Move<VkBuffer> stagingUniformBuffer;
439 Move<VkBuffer> stagingIndexBuffer;
440 Move<VkBuffer> stagingSboBuffer;
442 Move<VkBuffer> vertexBuffer;
443 Move<VkBuffer> indexBuffer;
444 Move<VkBuffer> uniformBuffer;
445 Move<VkBuffer> sboBuffer;
447 Move<VkPipeline> pipeline;
448 Move<VkPipelineLayout> pipelineLayout;
450 Move<VkImageView> colorAttView;
451 Move<VkFramebuffer> framebuffer;
452 Move<VkCommandPool> cmdPool;
453 Move<VkCommandBuffer> cmdBuffer;
455 VkMemoryDedicatedAllocateInfo dedicatedAllocInfo =
457 VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO, // sType
463 VkMemoryAllocateFlagsInfo allocDeviceMaskInfo =
465 VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_FLAGS_INFO, // sType
466 m_useDedicated ? &dedicatedAllocInfo : DE_NULL, // pNext
467 VK_MEMORY_ALLOCATE_DEVICE_MASK_BIT, // flags
468 allocDeviceMask, // deviceMask
471 VkMemoryAllocateInfo allocInfo =
473 VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO, // sType
474 &allocDeviceMaskInfo, // pNext
475 0u, // allocationSize
476 0u, // memoryTypeIndex
479 // create vertex buffers
481 const VkBufferCreateInfo stagingVertexBufferParams =
483 VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, // sType
486 (VkDeviceSize)verticesSize, // size
487 VK_BUFFER_USAGE_TRANSFER_SRC_BIT, // usage
488 VK_SHARING_MODE_EXCLUSIVE, // sharingMode
489 1u, // queueFamilyIndexCount
490 &queueFamilyIndex, // pQueueFamilyIndices
492 stagingVertexBuffer = createBuffer(vk, *m_deviceGroup, &stagingVertexBufferParams);
493 stagingVertexBufferMemory = memAlloc.allocate(getBufferMemoryRequirements(vk, *m_deviceGroup, *stagingVertexBuffer), MemoryRequirement::HostVisible);
494 VK_CHECK(vk.bindBufferMemory(*m_deviceGroup, *stagingVertexBuffer, stagingVertexBufferMemory->getMemory(), stagingVertexBufferMemory->getOffset()));
496 const VkMappedMemoryRange range =
498 VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE, // sType
500 stagingVertexBufferMemory->getMemory(), // memory
502 (VkDeviceSize)verticesSize, // size
504 void* vertexBufPtr = stagingVertexBufferMemory->getHostPtr();
505 deMemcpy(vertexBufPtr, &vertices[0], verticesSize);
506 VK_CHECK(vk.flushMappedMemoryRanges(*m_deviceGroup, 1u, &range));
510 const VkBufferCreateInfo vertexBufferParams =
512 VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, // sType
515 (VkDeviceSize)verticesSize, // size
516 VK_BUFFER_USAGE_VERTEX_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT, // usage
517 VK_SHARING_MODE_EXCLUSIVE, // sharingMode
518 1u, // queueFamilyIndexCount
519 &queueFamilyIndex, // pQueueFamilyIndices
521 vertexBuffer = createBuffer(vk, *m_deviceGroup, &vertexBufferParams);
523 memReqs = getBufferMemoryRequirements(vk, *m_deviceGroup, vertexBuffer.get());
524 memoryTypeNdx = getMemoryIndex(memReqs.memoryTypeBits, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT);
526 dedicatedAllocInfo.buffer = vertexBuffer.get();
527 allocInfo.allocationSize = memReqs.size;
528 allocInfo.memoryTypeIndex = memoryTypeNdx;
529 vertexBufferMemory = allocateMemory(vk, *m_deviceGroup, &allocInfo);
531 if (m_usePeerFetch && !isPeerFetchAllowed(memoryTypeNdx, firstDeviceID, secondDeviceID))
532 TCU_THROW(NotSupportedError, "Peer fetch is not supported.");
534 // Bind vertex buffer
537 VkBindBufferMemoryDeviceGroupInfo devGroupBindInfo =
539 VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_DEVICE_GROUP_INFO, // sType
541 m_physicalDeviceCount, // deviceIndexCount
542 &deviceIndices[0], // pDeviceIndices
545 VkBindBufferMemoryInfo bindInfo =
547 VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO, // sType
548 &devGroupBindInfo, // pNext
549 vertexBuffer.get(), // buffer
550 vertexBufferMemory.get(), // memory
553 VK_CHECK(vk.bindBufferMemory2(*m_deviceGroup, 1, &bindInfo));
556 VK_CHECK(vk.bindBufferMemory(*m_deviceGroup, *vertexBuffer, vertexBufferMemory.get(), 0));
559 // create index buffers
561 const VkBufferCreateInfo stagingIndexBufferParams =
563 VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, // sType
566 (VkDeviceSize)indicesSize, // size
567 VK_BUFFER_USAGE_TRANSFER_SRC_BIT, // usage
568 VK_SHARING_MODE_EXCLUSIVE, // sharingMode
569 1u, // queueFamilyIndexCount
570 &queueFamilyIndex, // pQueueFamilyIndices
572 stagingIndexBuffer = createBuffer(vk, *m_deviceGroup, &stagingIndexBufferParams);
573 stagingIndexBufferMemory = memAlloc.allocate(getBufferMemoryRequirements(vk, *m_deviceGroup, *stagingIndexBuffer), MemoryRequirement::HostVisible);
574 VK_CHECK(vk.bindBufferMemory(*m_deviceGroup, *stagingIndexBuffer, stagingIndexBufferMemory->getMemory(), stagingIndexBufferMemory->getOffset()));
576 const VkMappedMemoryRange range =
578 VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE, // sType
580 stagingIndexBufferMemory->getMemory(), // memory
582 (VkDeviceSize)indicesSize, // size
584 void* indexBufPtr = stagingIndexBufferMemory->getHostPtr();
585 deMemcpy(indexBufPtr, &indices[0], indicesSize);
586 VK_CHECK(vk.flushMappedMemoryRanges(*m_deviceGroup, 1u, &range));
590 const VkBufferCreateInfo indexBufferParams =
592 VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, // sType
595 (VkDeviceSize)indicesSize, // size
596 VK_BUFFER_USAGE_INDEX_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT, // usage
597 VK_SHARING_MODE_EXCLUSIVE, // sharingMode
598 1u, // queueFamilyIndexCount
599 &queueFamilyIndex, // pQueueFamilyIndices
601 indexBuffer = createBuffer(vk, *m_deviceGroup, &indexBufferParams);
603 memReqs = getBufferMemoryRequirements(vk, *m_deviceGroup, indexBuffer.get());
604 memoryTypeNdx = getMemoryIndex(memReqs.memoryTypeBits, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT);
606 dedicatedAllocInfo.buffer = indexBuffer.get();
607 allocInfo.allocationSize = memReqs.size;
608 allocInfo.memoryTypeIndex = memoryTypeNdx;
609 indexBufferMemory = allocateMemory(vk, *m_deviceGroup, &allocInfo);
611 if (m_usePeerFetch && !isPeerFetchAllowed(memoryTypeNdx, firstDeviceID, secondDeviceID))
612 TCU_THROW(NotSupportedError, "Peer fetch is not supported.");
617 VkBindBufferMemoryDeviceGroupInfo devGroupBindInfo =
619 VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_DEVICE_GROUP_INFO, // sType
621 m_physicalDeviceCount, // deviceIndexCount
622 &deviceIndices[0], // pDeviceIndices
625 VkBindBufferMemoryInfo bindInfo =
627 VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO, // sType
628 &devGroupBindInfo, // pNext
629 indexBuffer.get(), // buffer
630 indexBufferMemory.get(), // memory
633 VK_CHECK(vk.bindBufferMemory2(*m_deviceGroup, 1, &bindInfo));
636 VK_CHECK(vk.bindBufferMemory(*m_deviceGroup, *indexBuffer, indexBufferMemory.get(), 0));
639 // create uniform buffers
641 const VkBufferCreateInfo stagingUniformBufferParams =
643 VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, // sType
646 (VkDeviceSize)sizeof(drawColor), // size
647 VK_BUFFER_USAGE_TRANSFER_SRC_BIT, // usage
648 VK_SHARING_MODE_EXCLUSIVE, // sharingMode
649 1u, // queueFamilyIndexCount
650 &queueFamilyIndex, // pQueueFamilyIndices
652 stagingUniformBuffer = createBuffer(vk, *m_deviceGroup, &stagingUniformBufferParams);
653 stagingUniformBufferMemory = memAlloc.allocate(getBufferMemoryRequirements(vk, *m_deviceGroup, *stagingUniformBuffer), MemoryRequirement::HostVisible);
654 VK_CHECK(vk.bindBufferMemory(*m_deviceGroup, *stagingUniformBuffer, stagingUniformBufferMemory->getMemory(), stagingUniformBufferMemory->getOffset()));
656 const VkMappedMemoryRange range =
658 VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE, // sType
660 stagingUniformBufferMemory->getMemory(),// memory
662 (VkDeviceSize)sizeof(drawColor), // size
664 void* uniformBufPtr = stagingUniformBufferMemory->getHostPtr();
665 deMemcpy(uniformBufPtr, &drawColor[0], sizeof(drawColor));
666 VK_CHECK(vk.flushMappedMemoryRanges(*m_deviceGroup, 1u, &range));
670 const VkBufferCreateInfo uniformBufferParams =
672 VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, // sType
675 (VkDeviceSize)sizeof(drawColor), // size
676 VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT, // usage
677 VK_SHARING_MODE_EXCLUSIVE, // sharingMode
678 1u, // queueFamilyIndexCount
679 &queueFamilyIndex, // pQueueFamilyIndices
681 uniformBuffer = createBuffer(vk, *m_deviceGroup, &uniformBufferParams);
683 memReqs = getBufferMemoryRequirements(vk, *m_deviceGroup, uniformBuffer.get());
684 memoryTypeNdx = getMemoryIndex(memReqs.memoryTypeBits, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT);
686 dedicatedAllocInfo.buffer = uniformBuffer.get();
687 allocInfo.allocationSize = memReqs.size;
688 allocInfo.memoryTypeIndex = memoryTypeNdx;
689 uniformBufferMemory = allocateMemory(vk, *m_deviceGroup, &allocInfo);
691 if (m_usePeerFetch && !isPeerFetchAllowed(memoryTypeNdx, firstDeviceID, secondDeviceID))
692 TCU_THROW(NotSupportedError, "Peer fetch is not supported.");
696 VkBindBufferMemoryDeviceGroupInfo devGroupBindInfo =
698 VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_DEVICE_GROUP_INFO, // sType
700 m_physicalDeviceCount, // deviceIndexCount
701 &deviceIndices[0], // pDeviceIndices
704 VkBindBufferMemoryInfo bindInfo =
706 VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO, // sType
707 &devGroupBindInfo, // pNext
708 uniformBuffer.get(), // buffer
709 uniformBufferMemory.get(), // memory
712 VK_CHECK(vk.bindBufferMemory2(*m_deviceGroup, 1, &bindInfo));
715 VK_CHECK(vk.bindBufferMemory(*m_deviceGroup, uniformBuffer.get(), uniformBufferMemory.get(), 0));
718 // create SBO buffers
720 const VkBufferCreateInfo stagingSboBufferParams =
722 VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, // sType
725 (VkDeviceSize)sizeof(tessLevel), // size
726 VK_BUFFER_USAGE_TRANSFER_SRC_BIT, // usage
727 VK_SHARING_MODE_EXCLUSIVE, // sharingMode
728 1u, // queueFamilyIndexCount
729 &queueFamilyIndex, // pQueueFamilyIndices
731 stagingSboBuffer = createBuffer(vk, *m_deviceGroup, &stagingSboBufferParams);
732 stagingSboBufferMemory = memAlloc.allocate(getBufferMemoryRequirements(vk, *m_deviceGroup, *stagingSboBuffer), MemoryRequirement::HostVisible);
733 VK_CHECK(vk.bindBufferMemory(*m_deviceGroup, *stagingSboBuffer, stagingSboBufferMemory->getMemory(), stagingSboBufferMemory->getOffset()));
735 const VkMappedMemoryRange range =
737 VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE, // sType
739 stagingSboBufferMemory->getMemory(), // memory
741 (VkDeviceSize)sizeof(tessLevel), // size
743 void* sboBufPtr = stagingSboBufferMemory->getHostPtr();
744 deMemcpy(sboBufPtr, &tessLevel, sizeof(tessLevel));
745 VK_CHECK(vk.flushMappedMemoryRanges(*m_deviceGroup, 1u, &range));
749 const VkBufferCreateInfo sboBufferParams =
751 VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, // sType
754 (VkDeviceSize)sizeof(tessLevel), // size
755 VK_BUFFER_USAGE_STORAGE_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT, // usage
756 VK_SHARING_MODE_EXCLUSIVE, // sharingMode
757 1u, // queueFamilyIndexCount
758 &queueFamilyIndex, // pQueueFamilyIndices
760 sboBuffer = createBuffer(vk, *m_deviceGroup, &sboBufferParams);
762 memReqs = getBufferMemoryRequirements(vk, *m_deviceGroup, sboBuffer.get());
763 memoryTypeNdx = getMemoryIndex(memReqs.memoryTypeBits, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT);
765 dedicatedAllocInfo.buffer = sboBuffer.get();
766 allocInfo.allocationSize = memReqs.size;
767 allocInfo.memoryTypeIndex = memoryTypeNdx;
768 sboBufferMemory = allocateMemory(vk, *m_deviceGroup, &allocInfo);
770 if (m_usePeerFetch && !isPeerFetchAllowed(memoryTypeNdx, firstDeviceID, secondDeviceID))
771 TCU_THROW(NotSupportedError, "Peer fetch is not supported.");
775 VkBindBufferMemoryDeviceGroupInfo devGroupBindInfo =
777 VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_DEVICE_GROUP_INFO, // sType
779 m_physicalDeviceCount, // deviceIndexCount
780 &deviceIndices[0], // pDeviceIndices
783 VkBindBufferMemoryInfo bindInfo =
785 VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO, // sType
786 &devGroupBindInfo, // pNext
787 sboBuffer.get(), // buffer
788 sboBufferMemory.get(), // memory
791 VK_CHECK(vk.bindBufferMemory2(*m_deviceGroup, 1, &bindInfo));
794 VK_CHECK(vk.bindBufferMemory(*m_deviceGroup, sboBuffer.get(), sboBufferMemory.get(), 0));
797 // Create image resources
798 // Use a consistent usage flag because of memory aliasing
799 VkImageUsageFlags imageUsageFlag = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
801 // Check for SFR support
802 VkImageFormatProperties properties;
803 if ((m_testMode & TEST_MODE_SFR) && vki.getPhysicalDeviceImageFormatProperties(m_context.getPhysicalDevice(),
804 colorFormat, // format
805 VK_IMAGE_TYPE_2D, // type
806 VK_IMAGE_TILING_OPTIMAL, // tiling
807 VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT, // usage
808 VK_IMAGE_CREATE_BIND_SFR_BIT, // flags
809 &properties) != VK_SUCCESS) // properties
811 TCU_THROW(NotSupportedError, "Format not supported for SFR");
814 VkImageCreateFlags imageCreateFlags = VK_IMAGE_CREATE_ALIAS_BIT; // The image objects alias same memory
815 if ((m_testMode & TEST_MODE_SFR) && (m_physicalDeviceCount > 1))
817 imageCreateFlags |= VK_IMAGE_CREATE_BIND_SFR_BIT;
820 const VkImageCreateInfo imageParams =
822 VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, // sType
824 imageCreateFlags, // flags
825 VK_IMAGE_TYPE_2D, // imageType
826 colorFormat, // format
827 { renderSize.x(), renderSize.y(), 1 }, // extent
830 VK_SAMPLE_COUNT_1_BIT, // samples
831 VK_IMAGE_TILING_OPTIMAL, // tiling
832 imageUsageFlag, // usage
833 VK_SHARING_MODE_EXCLUSIVE, // sharingMode
834 1u, // queueFamilyIndexCount
835 &queueFamilyIndex, // pQueueFamilyIndices
836 VK_IMAGE_LAYOUT_UNDEFINED, // initialLayout
839 renderImage = createImage(vk, *m_deviceGroup, &imageParams);
840 readImage = createImage(vk, *m_deviceGroup, &imageParams);
842 dedicatedAllocInfo.image = *renderImage;
843 dedicatedAllocInfo.buffer = DE_NULL;
844 memReqs = getImageMemoryRequirements(vk, *m_deviceGroup, renderImage.get());
845 memoryTypeNdx = getMemoryIndex(memReqs.memoryTypeBits, m_useHostMemory ? 0 : VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT);
846 allocInfo.allocationSize = memReqs.size;
847 allocInfo.memoryTypeIndex = memoryTypeNdx;
848 imageMemory = allocateMemory(vk, *m_deviceGroup, &allocInfo);
851 if ((m_testMode & TEST_MODE_SFR) && (m_physicalDeviceCount > 1))
853 if (m_usePeerFetch && !isPeerFetchAllowed(memoryTypeNdx, firstDeviceID, secondDeviceID))
854 TCU_THROW(NotSupportedError, "Peer texture reads is not supported.");
856 // Check if peer memory can be used as source of a copy command in case of SFR bindings, always allowed in case of 1 device
857 VkPeerMemoryFeatureFlags peerMemFeatures;
858 const VkPhysicalDeviceMemoryProperties deviceMemProps = getPhysicalDeviceMemoryProperties(m_context.getInstanceInterface(), m_physicalDevices[secondDeviceID]);
859 vk.getDeviceGroupPeerMemoryFeatures(*m_deviceGroup, deviceMemProps.memoryTypes[memoryTypeNdx].heapIndex, firstDeviceID, secondDeviceID, &peerMemFeatures);
860 isPeerMemAsCopySrcAllowed = (peerMemFeatures & VK_PEER_MEMORY_FEATURE_COPY_SRC_BIT);
862 VkRect2D zeroRect = {
872 vector<VkRect2D> sfrRects;
873 for (deUint32 i = 0; i < m_physicalDeviceCount*m_physicalDeviceCount; i++)
874 sfrRects.push_back(zeroRect);
876 if (m_physicalDeviceCount == 1u)
878 sfrRects[0].extent.width = (deInt32)renderSize.x();
879 sfrRects[0].extent.height = (deInt32)renderSize.y();
883 // Split into 2 vertical halves
884 sfrRects[firstDeviceID * m_physicalDeviceCount + firstDeviceID].extent.width = (deInt32)renderSize.x() / 2;
885 sfrRects[firstDeviceID * m_physicalDeviceCount + firstDeviceID].extent.height = (deInt32)renderSize.y();
886 sfrRects[firstDeviceID * m_physicalDeviceCount + secondDeviceID] = sfrRects[firstDeviceID * m_physicalDeviceCount + firstDeviceID];
887 sfrRects[firstDeviceID * m_physicalDeviceCount + secondDeviceID].offset.x = (deInt32)renderSize.x() / 2;
888 sfrRects[secondDeviceID * m_physicalDeviceCount + firstDeviceID] = sfrRects[firstDeviceID * m_physicalDeviceCount + firstDeviceID];
889 sfrRects[secondDeviceID * m_physicalDeviceCount + secondDeviceID] = sfrRects[firstDeviceID * m_physicalDeviceCount + secondDeviceID];
892 VkBindImageMemoryDeviceGroupInfo devGroupBindInfo =
894 VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_DEVICE_GROUP_INFO, // sType
896 0u, // deviceIndexCount
897 DE_NULL, // pDeviceIndices
898 m_physicalDeviceCount*m_physicalDeviceCount, // SFRRectCount
899 &sfrRects[0], // pSFRRects
902 VkBindImageMemoryInfo bindInfo =
904 VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_INFO, // sType
905 &devGroupBindInfo, // pNext
906 *renderImage, // image
907 imageMemory.get(), // memory
910 VK_CHECK(vk.bindImageMemory2(*m_deviceGroup, 1, &bindInfo));
913 VK_CHECK(vk.bindImageMemory(*m_deviceGroup, *renderImage, imageMemory.get(), 0));
915 VK_CHECK(vk.bindImageMemory(*m_deviceGroup, *readImage, imageMemory.get(), 0));
919 const VkAttachmentDescription colorAttDesc =
922 colorFormat, // format
923 VK_SAMPLE_COUNT_1_BIT, // samples
924 VK_ATTACHMENT_LOAD_OP_CLEAR, // loadOp
925 VK_ATTACHMENT_STORE_OP_STORE, // storeOp
926 VK_ATTACHMENT_LOAD_OP_DONT_CARE, // stencilLoadOp
927 VK_ATTACHMENT_STORE_OP_DONT_CARE, // stencilStoreOp
928 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // initialLayout
929 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // finalLayout
931 const VkAttachmentReference colorAttRef =
934 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // layout
936 const VkSubpassDescription subpassDesc =
938 (VkSubpassDescriptionFlags)0u, // flags
939 VK_PIPELINE_BIND_POINT_GRAPHICS, // pipelineBindPoint
940 0u, // inputAttachmentCount
941 DE_NULL, // pInputAttachments
942 1u, // colorAttachmentCount
943 &colorAttRef, // pColorAttachments
944 DE_NULL, // pResolveAttachments
945 DE_NULL, // depthStencilAttachment
946 0u, // preserveAttachmentCount
947 DE_NULL, // pPreserveAttachments
949 const VkRenderPassCreateInfo renderPassParams =
951 VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, // sType
954 1u, // attachmentCount
955 &colorAttDesc, // pAttachments
957 &subpassDesc, // pSubpasses
958 0u, // dependencyCount
959 DE_NULL, // pDependencies
961 renderPass = createRenderPass(vk, *m_deviceGroup, &renderPassParams);
964 // Create descriptors
966 vector<VkDescriptorSetLayoutBinding> layoutBindings;
967 vector<VkDescriptorPoolSize> descriptorTypes;
968 vector<VkWriteDescriptorSet> writeDescritporSets;
970 const VkDescriptorSetLayoutBinding layoutBindingUBO =
972 0u, // deUint32 binding;
973 VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, // VkDescriptorType descriptorType;
974 1u, // deUint32 descriptorCount;
975 VK_SHADER_STAGE_FRAGMENT_BIT, // VkShaderStageFlags stageFlags;
976 DE_NULL // const VkSampler* pImmutableSamplers;
978 const VkDescriptorSetLayoutBinding layoutBindingSBO =
980 1u, // deUint32 binding;
981 VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, // VkDescriptorType descriptorType;
982 1u, // deUint32 descriptorCount;
983 VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT, // VkShaderStageFlags stageFlags;
984 DE_NULL // const VkSampler* pImmutableSamplers;
987 layoutBindings.push_back(layoutBindingUBO);
988 if (m_drawTessellatedSphere)
989 layoutBindings.push_back(layoutBindingSBO);
991 const VkDescriptorSetLayoutCreateInfo descriptorLayoutParams =
993 VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO, // VkStructureType sType;
994 DE_NULL, // cost void* pNext;
995 (VkDescriptorSetLayoutCreateFlags)0, // VkDescriptorSetLayoutCreateFlags flags
996 deUint32(layoutBindings.size()), // deUint32 count;
997 layoutBindings.data() // const VkDescriptorSetLayoutBinding pBinding;
999 descriptorSetLayout = createDescriptorSetLayout(vk, *m_deviceGroup, &descriptorLayoutParams);
1001 const VkDescriptorPoolSize descriptorTypeUBO =
1003 VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, // VkDescriptorType type;
1004 1 // deUint32 count;
1006 const VkDescriptorPoolSize descriptorTypeSBO =
1008 VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, // VkDescriptorType type;
1009 1 // deUint32 count;
1011 descriptorTypes.push_back(descriptorTypeUBO);
1012 if (m_drawTessellatedSphere)
1013 descriptorTypes.push_back(descriptorTypeSBO);
1015 const VkDescriptorPoolCreateInfo descriptorPoolParams =
1017 VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO, // VkStructureType sType;
1018 DE_NULL, // void* pNext;
1019 VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, // VkDescriptorPoolCreateFlags flags;
1020 1u, // deUint32 maxSets;
1021 deUint32(descriptorTypes.size()), // deUint32 count;
1022 descriptorTypes.data() // const VkDescriptorTypeCount* pTypeCount
1024 descriptorPool = createDescriptorPool(vk, *m_deviceGroup, &descriptorPoolParams);
1026 const VkDescriptorSetAllocateInfo descriptorSetParams =
1028 VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
1032 &descriptorSetLayout.get(),
1034 descriptorSet = allocateDescriptorSet(vk, *m_deviceGroup, &descriptorSetParams);
1036 const VkDescriptorBufferInfo uboDescriptorInfo =
1038 uniformBuffer.get(),
1040 (VkDeviceSize)sizeof(drawColor)
1042 const VkDescriptorBufferInfo sboDescriptorInfo =
1046 (VkDeviceSize)sizeof(tessLevel)
1048 const VkWriteDescriptorSet writeDescritporSetUBO =
1050 VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET, // VkStructureType sType;
1051 DE_NULL, // const void* pNext;
1052 *descriptorSet, // VkDescriptorSet destSet;
1053 0, // deUint32 destBinding;
1054 0, // deUint32 destArrayElement;
1055 1u, // deUint32 count;
1056 VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, // VkDescriptorType descriptorType;
1057 (const VkDescriptorImageInfo*)DE_NULL, // VkDescriptorImageInfo* pImageInfo;
1058 &uboDescriptorInfo, // VkDescriptorBufferInfo* pBufferInfo;
1059 (const VkBufferView*)DE_NULL // VkBufferView* pTexelBufferView;
1062 const VkWriteDescriptorSet writeDescritporSetSBO =
1064 VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET, // VkStructureType sType;
1065 DE_NULL, // const void* pNext;
1066 *descriptorSet, // VkDescriptorSet destSet;
1067 1, // deUint32 destBinding;
1068 0, // deUint32 destArrayElement;
1069 1u, // deUint32 count;
1070 VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, // VkDescriptorType descriptorType;
1071 (const VkDescriptorImageInfo*)DE_NULL, // VkDescriptorImageInfo* pImageInfo;
1072 &sboDescriptorInfo, // VkDescriptorBufferInfo* pBufferInfo;
1073 (const VkBufferView*)DE_NULL // VkBufferView* pTexelBufferView;
1075 writeDescritporSets.push_back(writeDescritporSetUBO);
1076 if (m_drawTessellatedSphere)
1077 writeDescritporSets.push_back(writeDescritporSetSBO);
1079 vk.updateDescriptorSets(*m_deviceGroup, deUint32(writeDescritporSets.size()), writeDescritporSets.data(), 0u, DE_NULL);
1084 vector<VkPipelineShaderStageCreateInfo> shaderStageParams;
1085 Move<VkShaderModule> vertShaderModule;
1086 Move<VkShaderModule> tcssShaderModule;
1087 Move<VkShaderModule> tessShaderModule;
1088 Move<VkShaderModule> fragShaderModule;
1090 const VkDescriptorSetLayout descset = descriptorSetLayout.get();
1091 const VkPipelineLayoutCreateInfo pipelineLayoutParams =
1093 VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO, // sType
1095 (vk::VkPipelineLayoutCreateFlags)0, // flags
1096 1u, // setLayoutCount
1097 &descset, // pSetLayouts
1098 0u, // pushConstantRangeCount
1099 DE_NULL, // pPushConstantRanges
1101 pipelineLayout = createPipelineLayout(vk, *m_deviceGroup, &pipelineLayoutParams);
1104 vertShaderModule = createShaderModule(vk, *m_deviceGroup, m_context.getBinaryCollection().get("vert"), 0);
1105 fragShaderModule = createShaderModule(vk, *m_deviceGroup, m_context.getBinaryCollection().get("frag"), 0);
1107 const VkSpecializationInfo emptyShaderSpecParams =
1109 0u, // mapEntryCount
1114 const VkPipelineShaderStageCreateInfo vertexShaderStageParams =
1116 VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, // sType
1119 VK_SHADER_STAGE_VERTEX_BIT, // stage
1120 *vertShaderModule, // module
1122 &emptyShaderSpecParams, // pSpecializationInfo
1124 shaderStageParams.push_back(vertexShaderStageParams);
1126 if (m_drawTessellatedSphere)
1128 tcssShaderModule = createShaderModule(vk, *m_deviceGroup, m_context.getBinaryCollection().get("tesc"), 0);
1129 tessShaderModule = createShaderModule(vk, *m_deviceGroup, m_context.getBinaryCollection().get("tese"), 0);
1131 const VkPipelineShaderStageCreateInfo tessControlShaderStageParams =
1133 VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, // sType
1136 VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT, // stage
1137 *tcssShaderModule, // module
1139 &emptyShaderSpecParams, // pSpecializationInfo
1141 const VkPipelineShaderStageCreateInfo tessEvalShaderStageParams =
1143 VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, // sType
1146 VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT, // stage
1147 *tessShaderModule, // module
1149 &emptyShaderSpecParams, // pSpecializationInfo
1152 shaderStageParams.push_back(tessControlShaderStageParams);
1153 shaderStageParams.push_back(tessEvalShaderStageParams);
1156 const VkPipelineShaderStageCreateInfo fragmentShaderStageParams =
1158 VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, // sType
1161 VK_SHADER_STAGE_FRAGMENT_BIT, // stage
1162 *fragShaderModule, // module
1164 &emptyShaderSpecParams, // pSpecializationInfo
1166 shaderStageParams.push_back(fragmentShaderStageParams);
1168 const VkPipelineDepthStencilStateCreateInfo depthStencilParams =
1170 VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO, // sType
1173 DE_FALSE, // depthTestEnable
1174 DE_FALSE, // depthWriteEnable
1175 VK_COMPARE_OP_ALWAYS, // depthCompareOp
1176 DE_FALSE, // depthBoundsTestEnable
1177 DE_FALSE, // stencilTestEnable
1179 VK_STENCIL_OP_KEEP, // failOp
1180 VK_STENCIL_OP_KEEP, // passOp
1181 VK_STENCIL_OP_KEEP, // depthFailOp
1182 VK_COMPARE_OP_ALWAYS, // compareOp
1188 VK_STENCIL_OP_KEEP, // failOp
1189 VK_STENCIL_OP_KEEP, // passOp
1190 VK_STENCIL_OP_KEEP, // depthFailOp
1191 VK_COMPARE_OP_ALWAYS, // compareOp
1196 0.0f, // minDepthBounds;
1197 1.0f, // maxDepthBounds;
1199 const VkViewport viewport0 =
1203 (float)renderSize.x(), // width
1204 (float)renderSize.y(), // height
1208 const VkRect2D scissor0 =
1215 renderSize.x(), // width
1216 renderSize.y(), // height
1219 const VkPipelineViewportStateCreateInfo viewportParams =
1221 VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO, // sType
1224 1u, // viewportCount
1225 &viewport0, // pViewports
1227 &scissor0 // pScissors
1229 const VkSampleMask sampleMask = ~0u;
1230 const VkPipelineMultisampleStateCreateInfo multisampleParams =
1232 VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO, // sType
1235 VK_SAMPLE_COUNT_1_BIT, // rasterizationSamples
1236 VK_FALSE, // sampleShadingEnable
1237 0.0f, // minSampleShading
1238 &sampleMask, // sampleMask
1239 VK_FALSE, // alphaToCoverageEnable
1240 VK_FALSE, // alphaToOneEnable
1242 const VkPipelineRasterizationStateCreateInfo rasterParams =
1244 VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO, // sType
1247 VK_TRUE, // depthClampEnable
1248 VK_FALSE, // rasterizerDiscardEnable
1249 m_fillModeNonSolid ? VK_POLYGON_MODE_LINE : VK_POLYGON_MODE_FILL, // polygonMode
1250 VK_CULL_MODE_NONE, // cullMode
1251 VK_FRONT_FACE_COUNTER_CLOCKWISE, // frontFace
1252 VK_FALSE, // depthBiasEnable
1253 0.0f, // depthBiasConstantFactor
1254 0.0f, // depthBiasClamp
1255 0.0f, // depthBiasSlopeFactor
1258 const VkPipelineInputAssemblyStateCreateInfo inputAssemblyParams =
1260 VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO, // sType
1263 m_drawTessellatedSphere ? VK_PRIMITIVE_TOPOLOGY_PATCH_LIST : VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST, // topology
1264 DE_FALSE, // primitiveRestartEnable
1266 const VkVertexInputBindingDescription vertexBinding0 =
1269 (deUint32)sizeof(tcu::Vec4), // stride
1270 VK_VERTEX_INPUT_RATE_VERTEX, // inputRate
1272 const VkVertexInputAttributeDescription vertexAttrib0 =
1276 VK_FORMAT_R32G32B32A32_SFLOAT, // format
1279 const VkPipelineVertexInputStateCreateInfo vertexInputStateParams =
1281 VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO, // sType
1284 1u, // vertexBindingDescriptionCount
1285 &vertexBinding0, // pVertexBindingDescriptions
1286 1u, // vertexAttributeDescriptionCount
1287 &vertexAttrib0, // pVertexAttributeDescriptions
1289 const VkPipelineColorBlendAttachmentState attBlendParams =
1291 VK_FALSE, // blendEnable
1292 VK_BLEND_FACTOR_ONE, // srcColorBlendFactor
1293 VK_BLEND_FACTOR_ZERO, // dstColorBlendFactor
1294 VK_BLEND_OP_ADD, // colorBlendOp
1295 VK_BLEND_FACTOR_ONE, // srcAlphaBlendFactor
1296 VK_BLEND_FACTOR_ZERO, // dstAlphaBlendFactor
1297 VK_BLEND_OP_ADD, // alphaBlendOp
1298 (VK_COLOR_COMPONENT_R_BIT |
1299 VK_COLOR_COMPONENT_G_BIT |
1300 VK_COLOR_COMPONENT_B_BIT |
1301 VK_COLOR_COMPONENT_A_BIT), // colorWriteMask
1303 const VkPipelineColorBlendStateCreateInfo blendParams =
1305 VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO, // sType
1308 DE_FALSE, // logicOpEnable
1309 VK_LOGIC_OP_COPY, // logicOp
1310 1u, // attachmentCount
1311 &attBlendParams, // pAttachments
1312 { 0.0f, 0.0f, 0.0f, 0.0f }, // blendConstants[4]
1315 const VkPipelineTessellationStateCreateInfo tessState =
1317 VK_STRUCTURE_TYPE_PIPELINE_TESSELLATION_STATE_CREATE_INFO, // sType
1320 3u, // patchControlPoints
1322 const VkGraphicsPipelineCreateInfo pipelineParams =
1324 VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO, // sType
1327 deUint32(shaderStageParams.size()), // stageCount
1328 shaderStageParams.data(), // pStages
1329 &vertexInputStateParams, // pVertexInputState
1330 &inputAssemblyParams, // pInputAssemblyState
1331 m_drawTessellatedSphere ? &tessState : DE_NULL, // pTessellationState
1332 &viewportParams, // pViewportState
1333 &rasterParams, // pRasterizationState
1334 &multisampleParams, // pMultisampleState
1335 &depthStencilParams, // pDepthStencilState
1336 &blendParams, // pColorBlendState
1337 (const VkPipelineDynamicStateCreateInfo*)DE_NULL, // pDynamicState
1338 *pipelineLayout, // layout
1339 *renderPass, // renderPass
1341 DE_NULL, // basePipelineHandle
1342 0u, // basePipelineIndex
1344 pipeline = createGraphicsPipeline(vk, *m_deviceGroup, DE_NULL, &pipelineParams);
1347 // Create Framebuffer
1349 const VkImageViewCreateInfo colorAttViewParams =
1351 VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO, // sType
1354 *renderImage, // image
1355 VK_IMAGE_VIEW_TYPE_2D, // viewType
1356 colorFormat, // format
1358 VK_COMPONENT_SWIZZLE_R,
1359 VK_COMPONENT_SWIZZLE_G,
1360 VK_COMPONENT_SWIZZLE_B,
1361 VK_COMPONENT_SWIZZLE_A
1364 VK_IMAGE_ASPECT_COLOR_BIT, // aspectMask
1367 0u, // baseArrayLayer
1369 }, // subresourceRange
1371 colorAttView = createImageView(vk, *m_deviceGroup, &colorAttViewParams);
1373 const VkFramebufferCreateInfo framebufferParams =
1375 VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO, // sType
1378 *renderPass, // renderPass
1379 1u, // attachmentCount
1380 &*colorAttView, // pAttachments
1381 renderSize.x(), // width
1382 renderSize.y(), // height
1385 framebuffer = createFramebuffer(vk, *m_deviceGroup, &framebufferParams);
1388 // Create Command buffer
1390 const VkCommandPoolCreateInfo cmdPoolParams =
1392 VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO, // sType
1394 VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT, // flags
1395 queueFamilyIndex, // queueFamilyIndex
1397 cmdPool = createCommandPool(vk, *m_deviceGroup, &cmdPoolParams);
1399 const VkCommandBufferAllocateInfo cmdBufParams =
1401 VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO, // sType
1404 VK_COMMAND_BUFFER_LEVEL_PRIMARY, // level
1407 cmdBuffer = allocateCommandBuffer(vk, *m_deviceGroup, &cmdBufParams);
1411 VkCommandBufferBeginInfo cmdBufBeginParams =
1413 VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO, // sType
1415 VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT, // flags
1416 (const VkCommandBufferInheritanceInfo*)DE_NULL,
1418 VK_CHECK(vk.beginCommandBuffer(*cmdBuffer, &cmdBufBeginParams));
1420 // Prepare render target for rendering
1422 const VkMemoryBarrier vertFlushBarrier =
1424 VK_STRUCTURE_TYPE_MEMORY_BARRIER, // sType
1426 VK_ACCESS_HOST_WRITE_BIT, // srcAccessMask
1427 VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT, // dstAccessMask
1429 const VkImageMemoryBarrier colorAttBarrier =
1431 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // sType
1433 0u, // srcAccessMask
1434 (VK_ACCESS_COLOR_ATTACHMENT_READ_BIT |
1435 VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT), // dstAccessMask
1436 VK_IMAGE_LAYOUT_UNDEFINED, // oldLayout
1437 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // newLayout
1438 queueFamilyIndex, // srcQueueFamilyIndex
1439 queueFamilyIndex, // dstQueueFamilyIndex
1440 *renderImage, // image
1442 VK_IMAGE_ASPECT_COLOR_BIT, // aspectMask
1445 0u, // baseArrayLayer
1447 } // subresourceRange
1449 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, (VkDependencyFlags)0, 1, &vertFlushBarrier, 0, (const VkBufferMemoryBarrier*)DE_NULL, 1, &colorAttBarrier);
1454 const VkBufferMemoryBarrier stagingVertexBufferUpdateBarrier =
1456 VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER, // VkStructureType sType;
1457 DE_NULL, // const void* pNext;
1458 VK_ACCESS_HOST_WRITE_BIT, // VkAccessFlags srcAccessMask;
1459 VK_ACCESS_TRANSFER_READ_BIT, // VkAccessFlags dstAccessMask;
1460 VK_QUEUE_FAMILY_IGNORED, // deUint32 srcQueueFamilyIndex;
1461 VK_QUEUE_FAMILY_IGNORED, // deUint32 dstQueueFamilyIndex;
1462 stagingVertexBuffer.get(), // VkBuffer buffer;
1463 0u, // VkDeviceSize offset;
1464 verticesSize // VkDeviceSize size;
1467 const VkBufferMemoryBarrier vertexBufferUpdateBarrier =
1469 VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER, // VkStructureType sType;
1470 DE_NULL, // const void* pNext;
1471 VK_ACCESS_TRANSFER_WRITE_BIT, // VkAccessFlags srcAccessMask;
1472 VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT, // VkAccessFlags dstAccessMask;
1473 VK_QUEUE_FAMILY_IGNORED, // deUint32 srcQueueFamilyIndex;
1474 VK_QUEUE_FAMILY_IGNORED, // deUint32 dstQueueFamilyIndex;
1475 vertexBuffer.get(), // VkBuffer buffer;
1476 0u, // VkDeviceSize offset;
1477 verticesSize // VkDeviceSize size;
1480 const VkBufferMemoryBarrier stagingIndexBufferUpdateBarrier =
1482 VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER, // VkStructureType sType;
1483 DE_NULL, // const void* pNext;
1484 VK_ACCESS_HOST_WRITE_BIT, // VkAccessFlags srcAccessMask;
1485 VK_ACCESS_TRANSFER_READ_BIT, // VkAccessFlags dstAccessMask;
1486 VK_QUEUE_FAMILY_IGNORED, // deUint32 srcQueueFamilyIndex;
1487 VK_QUEUE_FAMILY_IGNORED, // deUint32 dstQueueFamilyIndex;
1488 stagingIndexBuffer.get(), // VkBuffer buffer;
1489 0u, // VkDeviceSize offset;
1490 indicesSize // VkDeviceSize size;
1493 const VkBufferMemoryBarrier indexBufferUpdateBarrier =
1495 VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER, // VkStructureType sType;
1496 DE_NULL, // const void* pNext;
1497 VK_ACCESS_TRANSFER_WRITE_BIT, // VkAccessFlags srcAccessMask;
1498 VK_ACCESS_INDEX_READ_BIT, // VkAccessFlags dstAccessMask;
1499 VK_QUEUE_FAMILY_IGNORED, // deUint32 srcQueueFamilyIndex;
1500 VK_QUEUE_FAMILY_IGNORED, // deUint32 dstQueueFamilyIndex;
1501 indexBuffer.get(), // VkBuffer buffer;
1502 0u, // VkDeviceSize offset;
1503 indicesSize // VkDeviceSize size;
1506 const VkBufferMemoryBarrier stagingUboBufferUpdateBarrier =
1508 VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER, // VkStructureType sType;
1509 DE_NULL, // const void* pNext;
1510 VK_ACCESS_HOST_WRITE_BIT, // VkAccessFlags srcAccessMask;
1511 VK_ACCESS_TRANSFER_READ_BIT, // VkAccessFlags dstAccessMask;
1512 VK_QUEUE_FAMILY_IGNORED, // deUint32 srcQueueFamilyIndex;
1513 VK_QUEUE_FAMILY_IGNORED, // deUint32 dstQueueFamilyIndex;
1514 stagingUniformBuffer.get(), // VkBuffer buffer;
1515 0u, // VkDeviceSize offset;
1516 indicesSize // VkDeviceSize size;
1519 const VkBufferMemoryBarrier uboUpdateBarrier =
1521 VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER, // VkStructureType sType;
1522 DE_NULL, // const void* pNext;
1523 VK_ACCESS_TRANSFER_WRITE_BIT, // VkAccessFlags srcAccessMask;
1524 VK_ACCESS_UNIFORM_READ_BIT, // VkAccessFlags dstAccessMask;
1525 VK_QUEUE_FAMILY_IGNORED, // deUint32 srcQueueFamilyIndex;
1526 VK_QUEUE_FAMILY_IGNORED, // deUint32 dstQueueFamilyIndex;
1527 uniformBuffer.get(), // VkBuffer buffer;
1528 0u, // VkDeviceSize offset;
1529 sizeof(drawColor) // VkDeviceSize size;
1533 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 1, &stagingVertexBufferUpdateBarrier, 0, (const VkImageMemoryBarrier*)DE_NULL);
1534 VkBufferCopy vertexBufferCopy = { 0u, 0u, verticesSize };
1535 vk.cmdCopyBuffer(*cmdBuffer, stagingVertexBuffer.get(), vertexBuffer.get(), 1u, &vertexBufferCopy);
1536 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 1, &vertexBufferUpdateBarrier, 0, (const VkImageMemoryBarrier*)DE_NULL);
1538 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 1, &stagingIndexBufferUpdateBarrier, 0, (const VkImageMemoryBarrier*)DE_NULL);
1539 VkBufferCopy indexBufferCopy = { 0u, 0u, indicesSize };
1540 vk.cmdCopyBuffer(*cmdBuffer, stagingIndexBuffer.get(), indexBuffer.get(), 1u, &indexBufferCopy);
1541 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 1, &indexBufferUpdateBarrier, 0, (const VkImageMemoryBarrier*)DE_NULL);
1543 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 1, &stagingUboBufferUpdateBarrier, 0, (const VkImageMemoryBarrier*)DE_NULL);
1544 VkBufferCopy uboBufferCopy = { 0u, 0u, sizeof(drawColor) };
1545 vk.cmdCopyBuffer(*cmdBuffer, stagingUniformBuffer.get(), uniformBuffer.get(), 1u, &uboBufferCopy);
1546 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 1, &uboUpdateBarrier, 0, (const VkImageMemoryBarrier*)DE_NULL);
1548 if (m_drawTessellatedSphere)
1550 const VkBufferMemoryBarrier stagingsboUpdateBarrier =
1552 VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER, // VkStructureType sType;
1553 DE_NULL, // const void* pNext;
1554 VK_ACCESS_HOST_WRITE_BIT, // VkAccessFlags srcAccessMask;
1555 VK_ACCESS_TRANSFER_READ_BIT, // VkAccessFlags dstAccessMask;
1556 VK_QUEUE_FAMILY_IGNORED, // deUint32 srcQueueFamilyIndex;
1557 VK_QUEUE_FAMILY_IGNORED, // deUint32 dstQueueFamilyIndex;
1558 stagingSboBuffer.get(), // VkBuffer buffer;
1559 0u, // VkDeviceSize offset;
1560 sizeof(tessLevel) // VkDeviceSize size;
1563 const VkBufferMemoryBarrier sboUpdateBarrier =
1565 VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER, // VkStructureType sType;
1566 DE_NULL, // const void* pNext;
1567 VK_ACCESS_TRANSFER_WRITE_BIT, // VkAccessFlags srcAccessMask;
1568 VK_ACCESS_SHADER_READ_BIT, // VkAccessFlags dstAccessMask;
1569 VK_QUEUE_FAMILY_IGNORED, // deUint32 srcQueueFamilyIndex;
1570 VK_QUEUE_FAMILY_IGNORED, // deUint32 dstQueueFamilyIndex;
1571 sboBuffer.get(), // VkBuffer buffer;
1572 0u, // VkDeviceSize offset;
1573 sizeof(tessLevel) // VkDeviceSize size;
1576 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 1, &stagingsboUpdateBarrier, 0, (const VkImageMemoryBarrier*)DE_NULL);
1577 VkBufferCopy sboBufferCopy = { 0u, 0u, sizeof(tessLevel) };
1578 vk.cmdCopyBuffer(*cmdBuffer, stagingSboBuffer.get(), sboBuffer.get(), 1u, &sboBufferCopy);
1579 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 1, &sboUpdateBarrier, 0, (const VkImageMemoryBarrier*)DE_NULL);
1582 vk.cmdBindPipeline(*cmdBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS, *pipeline);
1583 vk.cmdBindDescriptorSets(*cmdBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS, *pipelineLayout, 0u, 1, &*descriptorSet, 0u, DE_NULL);
1585 const VkDeviceSize bindingOffset = 0;
1586 vk.cmdBindVertexBuffers(*cmdBuffer, 0u, 1u, &vertexBuffer.get(), &bindingOffset);
1587 vk.cmdBindIndexBuffer(*cmdBuffer, *indexBuffer, 0, VK_INDEX_TYPE_UINT32);
1593 const VkClearValue clearValue = makeClearValueColorF32(
1599 VkRect2D zeroRect = { { 0, 0, },{ 0, 0, } };
1600 vector<VkRect2D> renderAreas;
1601 for (deUint32 i = 0; i < m_physicalDeviceCount; i++)
1602 renderAreas.push_back(zeroRect);
1604 // Render completely if there is only 1 device
1605 if (m_physicalDeviceCount == 1u)
1607 renderAreas[0].extent.width = (deInt32)renderSize.x();
1608 renderAreas[0].extent.height = (deInt32)renderSize.y();
1612 // Split into 2 vertical halves
1613 renderAreas[firstDeviceID].extent.width = (deInt32)renderSize.x() / 2;
1614 renderAreas[firstDeviceID].extent.height = (deInt32)renderSize.y();
1615 renderAreas[secondDeviceID] = renderAreas[firstDeviceID];
1616 renderAreas[secondDeviceID].offset.x = (deInt32)renderSize.x() / 2;
1619 const VkDeviceGroupRenderPassBeginInfo deviceGroupRPBeginInfo =
1621 VK_STRUCTURE_TYPE_DEVICE_GROUP_RENDER_PASS_BEGIN_INFO,
1623 (deUint32)((1 << m_physicalDeviceCount) - 1),
1624 m_physicalDeviceCount,
1628 const VkRenderPassBeginInfo passBeginParams =
1630 VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO, // sType
1631 (m_testMode & TEST_MODE_SFR) ? &deviceGroupRPBeginInfo : DE_NULL, // pNext
1632 *renderPass, // renderPass
1633 *framebuffer, // framebuffer
1636 { renderSize.x(), renderSize.y() }
1638 1u, // clearValueCount
1639 &clearValue, // pClearValues
1641 vk.cmdBeginRenderPass(*cmdBuffer, &passBeginParams, VK_SUBPASS_CONTENTS_INLINE);
1645 if (m_testMode & TEST_MODE_AFR)
1647 vk.cmdSetDeviceMask(*cmdBuffer, 1 << secondDeviceID);
1648 vk.cmdDrawIndexed(*cmdBuffer, numIndices, 1u, 0, 0, 0);
1653 vk.cmdSetDeviceMask(*cmdBuffer, ((1 << firstDeviceID) | (1 << secondDeviceID)));
1654 vk.cmdDrawIndexed(*cmdBuffer, numIndices, 1u, 0, 0, 0);
1656 vk.cmdEndRenderPass(*cmdBuffer);
1658 // Change image layout for copy
1660 const VkImageMemoryBarrier renderFinishBarrier =
1662 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // sType
1664 VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, // outputMask
1665 VK_ACCESS_TRANSFER_READ_BIT, // inputMask
1666 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // oldLayout
1667 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, // newLayout
1668 queueFamilyIndex, // srcQueueFamilyIndex
1669 queueFamilyIndex, // dstQueueFamilyIndex
1670 *renderImage, // image
1672 VK_IMAGE_ASPECT_COLOR_BIT, // aspectMask
1675 0u, // baseArraySlice
1677 } // subresourceRange
1679 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 0, (const VkBufferMemoryBarrier*)DE_NULL, 1, &renderFinishBarrier);
1682 VK_CHECK(vk.endCommandBuffer(*cmdBuffer));
1684 // Submit & wait for completion
1686 const deUint32 deviceMask = (1 << firstDeviceID) | (1 << secondDeviceID);
1687 SubmitBufferAndWaitForIdle(vk, cmdBuffer.get(), deviceMask);
1690 // Copy image from secondDeviceID in case of AFR and SFR(only if Peer memory as copy source is not allowed)
1691 if ((m_physicalDeviceCount > 1) && ((m_testMode & TEST_MODE_AFR) || (!isPeerMemAsCopySrcAllowed)))
1693 Move<VkImage> peerImage;
1695 // Create and bind peer image
1697 const VkImageCreateInfo peerImageParams =
1699 VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, // sType
1701 VK_IMAGE_CREATE_ALIAS_BIT, // flags
1702 VK_IMAGE_TYPE_2D, // imageType
1703 colorFormat, // format
1704 { renderSize.x(), renderSize.y(), 1 }, // extent
1707 VK_SAMPLE_COUNT_1_BIT, // samples
1708 VK_IMAGE_TILING_OPTIMAL, // tiling
1709 imageUsageFlag, // usage
1710 VK_SHARING_MODE_EXCLUSIVE, // sharingMode
1711 1u, // queueFamilyIndexCount
1712 &queueFamilyIndex, // pQueueFamilyIndices
1713 VK_IMAGE_LAYOUT_UNDEFINED, // initialLayout
1715 peerImage = createImage(vk, *m_deviceGroup, &peerImageParams);
1717 VkBindImageMemoryDeviceGroupInfo devGroupBindInfo =
1719 VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_DEVICE_GROUP_INFO, // sType
1721 m_physicalDeviceCount, // deviceIndexCount
1722 &deviceIndices[0], // pDeviceIndices
1724 DE_NULL, // pSFRRects
1727 VkBindImageMemoryInfo bindInfo =
1729 VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_INFO, // sType
1730 &devGroupBindInfo, // pNext
1731 peerImage.get(), // image
1732 imageMemory.get(), // memory
1735 VK_CHECK(vk.bindImageMemory2(*m_deviceGroup, 1, &bindInfo));
1738 // Copy peer image (only needed in SFR case when peer memory as copy source is not allowed)
1740 // Change layout on firstDeviceID
1742 const VkImageMemoryBarrier preCopyBarrier =
1744 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // VkStructureType sType;
1745 DE_NULL, // const void* pNext;
1746 0, // VkAccessFlags srcAccessMask;
1747 VK_ACCESS_TRANSFER_WRITE_BIT, // VkAccessFlags dstAccessMask;
1748 VK_IMAGE_LAYOUT_UNDEFINED, // VkImageLayout oldLayout;
1749 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, // VkImageLayout newLayout;
1750 VK_QUEUE_FAMILY_IGNORED, // deUint32 srcQueueFamilyIndex;
1751 VK_QUEUE_FAMILY_IGNORED, // deUint32 dstQueueFamilyIndex;
1752 *renderImage, // VkImage image;
1753 { // VkImageSubresourceRange subresourceRange;
1754 VK_IMAGE_ASPECT_COLOR_BIT, // VkImageAspectFlags aspectMask;
1755 0u, // deUint32 baseMipLevel;
1756 1u, // deUint32 mipLevels;
1757 0u, // deUint32 baseArraySlice;
1758 1u // deUint32 arraySize;
1762 VK_CHECK(vk.beginCommandBuffer(*cmdBuffer, &cmdBufBeginParams));
1763 vk.cmdSetDeviceMask(*cmdBuffer, 1 << firstDeviceID);
1764 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 0, (const VkBufferMemoryBarrier*)DE_NULL, 1u, &preCopyBarrier);
1765 VK_CHECK(vk.endCommandBuffer(*cmdBuffer));
1767 const deUint32 deviceMask = 1 << firstDeviceID;
1768 SubmitBufferAndWaitForIdle(vk, cmdBuffer.get(), deviceMask);
1771 // Copy Image from secondDeviceID to firstDeviceID
1773 // AFR: Copy entire image from secondDeviceID
1774 // SFR: Copy the right half of image from secondDeviceID to firstDeviceID, so that the copy
1775 // to a buffer below (for checking) does not require VK_PEER_MEMORY_FEATURE_COPY_SRC_BIT
1776 deInt32 imageOffsetX = (m_testMode & TEST_MODE_AFR) ? 0 : renderSize.x() / 2;
1777 deUint32 imageExtentX = (m_testMode & TEST_MODE_AFR) ? (deUint32)renderSize.x() : (deUint32)renderSize.x() / 2;
1779 const VkImageCopy imageCopy =
1782 VK_IMAGE_ASPECT_COLOR_BIT,
1787 { imageOffsetX, 0, 0 },
1789 VK_IMAGE_ASPECT_COLOR_BIT,
1794 { imageOffsetX, 0, 0 },
1797 (deUint32)renderSize.y(),
1802 VK_CHECK(vk.beginCommandBuffer(*cmdBuffer, &cmdBufBeginParams));
1803 vk.cmdSetDeviceMask(*cmdBuffer, 1 << secondDeviceID);
1804 vk.cmdCopyImage(*cmdBuffer, *renderImage, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, *peerImage, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &imageCopy);
1805 VK_CHECK(vk.endCommandBuffer(*cmdBuffer));
1807 const deUint32 deviceMask = 1 << secondDeviceID;
1808 SubmitBufferAndWaitForIdle(vk, cmdBuffer.get(), deviceMask);
1811 // Change layout back on firstDeviceID
1813 const VkImageMemoryBarrier postCopyBarrier =
1815 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // VkStructureType sType;
1816 DE_NULL, // const void* pNext;
1817 VK_ACCESS_TRANSFER_WRITE_BIT, // VkAccessFlags srcAccessMask;
1818 VK_ACCESS_TRANSFER_READ_BIT, // VkAccessFlags dstAccessMask;
1819 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, // VkImageLayout oldLayout;
1820 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, // VkImageLayout newLayout;
1821 VK_QUEUE_FAMILY_IGNORED, // deUint32 srcQueueFamilyIndex;
1822 VK_QUEUE_FAMILY_IGNORED, // deUint32 dstQueueFamilyIndex;
1823 *renderImage, // VkImage image;
1824 { // VkImageSubresourceRange subresourceRange;
1825 VK_IMAGE_ASPECT_COLOR_BIT, // VkImageAspectFlags aspectMask;
1826 0u, // deUint32 baseMipLevel;
1827 1u, // deUint32 mipLevels;
1828 0u, // deUint32 baseArraySlice;
1829 1u // deUint32 arraySize;
1833 VK_CHECK(vk.beginCommandBuffer(*cmdBuffer, &cmdBufBeginParams));
1834 vk.cmdSetDeviceMask(*cmdBuffer, 1 << firstDeviceID);
1835 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 0, (const VkBufferMemoryBarrier*)DE_NULL, 1u, &postCopyBarrier);
1836 VK_CHECK(vk.endCommandBuffer(*cmdBuffer));
1838 const deUint32 deviceMask = 1 << firstDeviceID;
1839 SubmitBufferAndWaitForIdle(vk, cmdBuffer.get(), deviceMask);
1844 // copy image to read buffer for checking
1846 const VkDeviceSize imageSizeBytes = (VkDeviceSize)(sizeof(deUint32) * renderSize.x() * renderSize.y());
1847 const VkBufferCreateInfo readImageBufferParams =
1849 VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, // sType
1851 (VkBufferCreateFlags)0u, // flags
1852 imageSizeBytes, // size
1853 VK_BUFFER_USAGE_TRANSFER_DST_BIT, // usage
1854 VK_SHARING_MODE_EXCLUSIVE, // sharingMode
1855 1u, // queueFamilyIndexCount
1856 &queueFamilyIndex, // pQueueFamilyIndices
1858 const Unique<VkBuffer> readImageBuffer(createBuffer(vk, *m_deviceGroup, &readImageBufferParams));
1859 const UniquePtr<Allocation> readImageBufferMemory(memAlloc.allocate(getBufferMemoryRequirements(vk, *m_deviceGroup, *readImageBuffer), MemoryRequirement::HostVisible));
1860 VK_CHECK(vk.bindBufferMemory(*m_deviceGroup, *readImageBuffer, readImageBufferMemory->getMemory(), readImageBufferMemory->getOffset()));
1862 VK_CHECK(vk.beginCommandBuffer(*cmdBuffer, &cmdBufBeginParams));
1864 // Copy image to buffer
1866 const VkBufferImageCopy copyParams =
1868 (VkDeviceSize)0u, // bufferOffset
1869 renderSize.x(), // bufferRowLength
1870 renderSize.y(), // bufferImageHeight
1872 VK_IMAGE_ASPECT_COLOR_BIT, // aspectMask
1874 0u, // baseArrayLayer
1876 }, // imageSubresource
1877 { 0, 0, 0 }, // imageOffset
1885 // Use a diffferent binding in SFR when peer memory as copy source is not allowed
1886 vk.cmdCopyImageToBuffer(*cmdBuffer, isPeerMemAsCopySrcAllowed ? *renderImage : *readImage, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, *readImageBuffer, 1u, ©Params);
1888 const VkBufferMemoryBarrier copyFinishBarrier =
1890 VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER, // sType
1892 VK_ACCESS_TRANSFER_WRITE_BIT, // srcAccessMask
1893 VK_ACCESS_HOST_READ_BIT, // dstAccessMask
1894 queueFamilyIndex, // srcQueueFamilyIndex
1895 queueFamilyIndex, // dstQueueFamilyIndex
1896 *readImageBuffer, // buffer
1898 imageSizeBytes // size
1900 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_HOST_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 1, ©FinishBarrier, 0, (const VkImageMemoryBarrier*)DE_NULL);
1902 VK_CHECK(vk.endCommandBuffer(*cmdBuffer));
1904 // Submit & wait for completion
1906 const deUint32 deviceMask = 1 << firstDeviceID;
1907 SubmitBufferAndWaitForIdle(vk, cmdBuffer.get(), deviceMask);
1910 // Read results and check against reference image
1911 if (m_drawTessellatedSphere)
1913 const tcu::TextureFormat tcuFormat = vk::mapVkFormat(colorFormat);
1914 const VkMappedMemoryRange range =
1916 VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE, // sType
1918 readImageBufferMemory->getMemory(), // memory
1920 imageSizeBytes, // size
1922 const tcu::ConstPixelBufferAccess resultAccess(tcuFormat, renderSize.x(), renderSize.y(), 1, readImageBufferMemory->getHostPtr());
1923 VK_CHECK(vk.invalidateMappedMemoryRanges(*m_deviceGroup, 1u, &range));
1925 tcu::TextureLevel referenceImage;
1926 string refImage = m_fillModeNonSolid ? "vulkan/data/device_group/sphere.png" : "vulkan/data/device_group/spherefilled.png";
1927 tcu::ImageIO::loadPNG(referenceImage, m_context.getTestContext().getArchive(), refImage.c_str());
1928 iterateResultSuccess = tcu::fuzzyCompare(m_context.getTestContext().getLog(), "ImageComparison", "Image Comparison",
1929 referenceImage.getAccess(), resultAccess, 0.001f, tcu::COMPARE_LOG_RESULT);
1933 const tcu::TextureFormat tcuFormat = vk::mapVkFormat(colorFormat);
1934 const VkMappedMemoryRange range =
1936 VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE, // sType
1938 readImageBufferMemory->getMemory(), // memory
1940 imageSizeBytes, // size
1942 const tcu::ConstPixelBufferAccess resultAccess(tcuFormat, renderSize.x(), renderSize.y(), 1, readImageBufferMemory->getHostPtr());
1943 VK_CHECK(vk.invalidateMappedMemoryRanges(*m_deviceGroup, 1u, &range));
1945 // Render reference and compare
1947 tcu::TextureLevel refImage(tcuFormat, (deInt32)renderSize.x(), (deInt32)renderSize.y());
1948 const tcu::UVec4 threshold(0u);
1949 const tcu::IVec3 posDeviation(1, 1, 0);
1951 tcu::clear(refImage.getAccess(), clearColor);
1952 renderReferenceTriangle(refImage.getAccess(), triVertices);
1954 iterateResultSuccess = tcu::intThresholdPositionDeviationCompare(m_context.getTestContext().getLog(),
1956 "Image comparison result",
1957 refImage.getAccess(),
1962 tcu::COMPARE_LOG_RESULT);
1967 if (!iterateResultSuccess)
1968 return tcu::TestStatus::fail("Image comparison failed");
1971 return tcu::TestStatus(QP_TEST_RESULT_PASS, "Device group verification passed");
1974 template<class Instance>
1975 class DeviceGroupTestCase : public TestCase
1978 DeviceGroupTestCase (tcu::TestContext& context,
1980 const char* description,
1982 : TestCase(context, name, description)
1988 deUint32 m_testMode;
1990 TestInstance* createInstance (Context& context) const
1992 return new Instance(context, m_testMode);
1995 void initPrograms (vk::SourceCollections& programCollection) const
1997 programCollection.glslSources.add("vert") << glu::VertexSource("#version 430\n"
1998 "layout(location = 0) in vec4 in_Position;\n"
1999 "out gl_PerVertex { vec4 gl_Position; float gl_PointSize; };\n"
2001 " gl_Position = in_Position;\n"
2002 " gl_PointSize = 1.0;\n"
2005 if (m_testMode & TEST_MODE_TESSELLATION)
2007 programCollection.glslSources.add("tesc") << glu::TessellationControlSource("#version 450\n"
2008 "#extension GL_EXT_tessellation_shader : require\n"
2009 "layout(vertices=3) out;\n"
2010 "layout(set=0, binding=1) buffer tessLevel { \n"
2015 " gl_out[gl_InvocationID].gl_Position = gl_in[gl_InvocationID].gl_Position;\n"
2016 " if (gl_InvocationID == 0) {\n"
2017 " for (int i = 0; i < 4; i++)\n"
2018 " gl_TessLevelOuter[i] = tessLvl;\n"
2019 " for (int i = 0; i < 2; i++)\n"
2020 " gl_TessLevelInner[i] = tessLvl;\n"
2024 programCollection.glslSources.add("tese") << glu::TessellationEvaluationSource("#version 450\n"
2025 "#extension GL_EXT_tessellation_shader : require\n"
2026 "layout(triangles) in;\n"
2027 "layout(equal_spacing) in;\n"
2031 " vec4 pos = vec4(0, 0, 0, 0);\n"
2032 " vec3 tessCoord = gl_TessCoord.xyz;\n"
2033 " pos += tessCoord.z * gl_in[0].gl_Position;\n"
2034 " pos += tessCoord.x * gl_in[1].gl_Position;\n"
2035 " pos += tessCoord.y * gl_in[2].gl_Position;\n"
2036 " vec3 sign = sign(pos.xyz);\n"
2037 " pos.xyz = 0.785398 - abs(pos.xyz) * 1.5707963;\n"
2038 " pos.xyz = (1 - tan(pos.xyz))/2.0;\n"
2039 " pos.xyz = (sign * pos.xyz) / length(pos.xyz);\n"
2040 " gl_Position = pos;\n"
2044 programCollection.glslSources.add("frag") << glu::FragmentSource("#version 430\n"
2045 "layout(location = 0) out vec4 out_FragColor;\n"
2046 "layout(std140, set=0, binding=0) uniform bufferData { \n"
2051 " out_FragColor = color;\n"
2058 class DeviceGroupTestRendering : public tcu::TestCaseGroup
2061 DeviceGroupTestRendering (tcu::TestContext& testCtx);
2062 ~DeviceGroupTestRendering (void) {}
2066 DeviceGroupTestRendering (const DeviceGroupTestRendering& other);
2067 DeviceGroupTestRendering& operator= (const DeviceGroupTestRendering& other);
2070 DeviceGroupTestRendering::DeviceGroupTestRendering (tcu::TestContext& testCtx)
2071 : TestCaseGroup (testCtx, "device_group", "Testing device group test cases")
2073 // Left blank on purpose
2076 void DeviceGroupTestRendering::init (void)
2078 addChild(new DeviceGroupTestCase<DeviceGroupTestInstance>(m_testCtx, "sfr", "Test split frame rendering", TEST_MODE_SFR));
2079 addChild(new DeviceGroupTestCase<DeviceGroupTestInstance>(m_testCtx, "sfr_sys", "Test split frame rendering with render target in host memory", TEST_MODE_SFR | TEST_MODE_HOSTMEMORY));
2080 addChild(new DeviceGroupTestCase<DeviceGroupTestInstance>(m_testCtx, "sfr_dedicated", "Test split frame rendering with dedicated memory allocations", TEST_MODE_SFR | TEST_MODE_DEDICATED));
2081 addChild(new DeviceGroupTestCase<DeviceGroupTestInstance>(m_testCtx, "sfr_dedicated_peer", "Test split frame rendering with dedicated memory allocations and peer fetching", TEST_MODE_SFR | TEST_MODE_DEDICATED | TEST_MODE_PEER_FETCH));
2083 addChild(new DeviceGroupTestCase<DeviceGroupTestInstance>(m_testCtx, "afr", "Test alternate frame rendering", TEST_MODE_AFR));
2084 addChild(new DeviceGroupTestCase<DeviceGroupTestInstance>(m_testCtx, "afr_sys", "Test split frame rendering with render target in host memory", TEST_MODE_AFR | TEST_MODE_HOSTMEMORY));
2085 addChild(new DeviceGroupTestCase<DeviceGroupTestInstance>(m_testCtx, "afr_dedicated", "Test split frame rendering with dedicated memory allocations", TEST_MODE_AFR | TEST_MODE_DEDICATED));
2086 addChild(new DeviceGroupTestCase<DeviceGroupTestInstance>(m_testCtx, "afr_dedicated_peer", "Test split frame rendering with dedicated memory allocations and peer fetching", TEST_MODE_AFR | TEST_MODE_DEDICATED | TEST_MODE_PEER_FETCH));
2088 addChild(new DeviceGroupTestCase<DeviceGroupTestInstance>(m_testCtx, "sfr_tessellated", "Test split frame rendering with tessellated sphere", TEST_MODE_SFR | TEST_MODE_TESSELLATION | TEST_MODE_DEDICATED | TEST_MODE_PEER_FETCH));
2089 addChild(new DeviceGroupTestCase<DeviceGroupTestInstance>(m_testCtx, "sfr_tessellated_linefill", "Test split frame rendering with tessellated sphere with line segments", TEST_MODE_SFR | TEST_MODE_TESSELLATION | TEST_MODE_LINEFILL | TEST_MODE_DEDICATED | TEST_MODE_PEER_FETCH));
2090 addChild(new DeviceGroupTestCase<DeviceGroupTestInstance>(m_testCtx, "afr_tessellated", "Test alternate frame rendering with tesselated sphere", TEST_MODE_AFR | TEST_MODE_TESSELLATION | TEST_MODE_DEDICATED | TEST_MODE_PEER_FETCH));
2091 addChild(new DeviceGroupTestCase<DeviceGroupTestInstance>(m_testCtx, "afr_tessellated_linefill", "Test alternate frame rendering with tesselated sphere with line segments", TEST_MODE_AFR | TEST_MODE_TESSELLATION | TEST_MODE_LINEFILL | TEST_MODE_DEDICATED | TEST_MODE_PEER_FETCH));
2094 tcu::TestCaseGroup* createTests(tcu::TestContext& testCtx)
2096 return new DeviceGroupTestRendering(testCtx);