1 /*------------------------------------------------------------------------
2 * Vulkan Conformance Tests
3 * ------------------------
5 * Copyright (c) 2017 The Khronos Group Inc.
6 * Copyright (c) 2017 Nvidia Corporation
8 * Licensed under the Apache License, Version 2.0 (the "License");
9 * you may not use this file except in compliance with the License.
10 * You may obtain a copy of the License at
12 * http://www.apache.org/licenses/LICENSE-2.0
14 * Unless required by applicable law or agreed to in writing, software
15 * distributed under the License is distributed on an "AS IS" BASIS,
16 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17 * See the License for the specific language governing permissions and
18 * limitations under the License.
22 * \brief Device Group Tests
23 *//*--------------------------------------------------------------------*/
25 #include "vktDeviceGroupTests.hpp"
28 #include "vkDeviceUtil.hpp"
29 #include "vkImageUtil.hpp"
30 #include "vkMemUtil.hpp"
31 #include "vkPlatform.hpp"
32 #include "vkPrograms.hpp"
33 #include "vkQueryUtil.hpp"
35 #include "vkRefUtil.hpp"
36 #include "vkStrUtil.hpp"
37 #include "vkTypeUtil.hpp"
38 #include "vktTestCase.hpp"
39 #include "vktTestCaseUtil.hpp"
40 #include "vktTestGroupUtil.hpp"
42 #include "tcuDefs.hpp"
43 #include "tcuFormatUtil.hpp"
44 #include "tcuImageCompare.hpp"
45 #include "tcuResource.hpp"
46 #include "tcuTestCase.hpp"
47 #include "tcuTestLog.hpp"
48 #include "tcuCommandLine.hpp"
49 #include "tcuTextureUtil.hpp"
50 #include "tcuImageIO.hpp"
52 #include "rrRenderer.hpp"
67 //Device group test modes
70 TEST_MODE_SFR = 1 << 0, //!< Split frame remdering
71 TEST_MODE_AFR = 1 << 1, //!< Alternate frame rendering
72 TEST_MODE_HOSTMEMORY = 1 << 2, //!< Use host memory for rendertarget
73 TEST_MODE_DEDICATED = 1 << 3, //!< Use dedicated allocations
74 TEST_MODE_PEER_FETCH = 1 << 4, //!< Peer vertex attributes from peer memroy
75 TEST_MODE_TESSELLATION = 1 << 5, //!< Generate a tessellated sphere instead of triangle
76 TEST_MODE_LINEFILL = 1 << 6, //!< Draw polygon edges as line segments
79 class RefVertexShader : public rr::VertexShader
82 RefVertexShader (void)
83 : rr::VertexShader(1, 0)
85 m_inputs[0].type = rr::GENERICVECTYPE_FLOAT;
87 virtual ~RefVertexShader(void) {}
89 void shadeVertices (const rr::VertexAttrib* inputs, rr::VertexPacket* const* packets, const int numPackets) const
91 for (int packetNdx = 0; packetNdx < numPackets; ++packetNdx)
93 packets[packetNdx]->position = rr::readVertexAttribFloat(inputs[0],
94 packets[packetNdx]->instanceNdx,
95 packets[packetNdx]->vertexNdx);
100 class RefFragmentShader : public rr::FragmentShader
103 RefFragmentShader (void)
104 : rr::FragmentShader(0, 1)
106 m_outputs[0].type = rr::GENERICVECTYPE_FLOAT;
109 virtual ~RefFragmentShader(void) {}
111 void shadeFragments (rr::FragmentPacket*, const int numPackets, const rr::FragmentShadingContext& context) const
113 for (int packetNdx = 0; packetNdx < numPackets; ++packetNdx)
115 for (int fragNdx = 0; fragNdx < rr::NUM_FRAGMENTS_PER_PACKET; ++fragNdx)
117 rr::writeFragmentOutput(context, packetNdx, fragNdx, 0, tcu::Vec4(1.0f, 1.0f, 0.0f, 1.0f));
123 void renderReferenceTriangle (const tcu::PixelBufferAccess& dst, const tcu::Vec4(&vertices)[3])
125 const RefVertexShader vertShader;
126 const RefFragmentShader fragShader;
127 const rr::Program program(&vertShader, &fragShader);
128 const rr::MultisamplePixelBufferAccess colorBuffer = rr::MultisamplePixelBufferAccess::fromSinglesampleAccess(dst);
129 const rr::RenderTarget renderTarget(colorBuffer);
130 const rr::RenderState renderState((rr::ViewportState(colorBuffer)));
131 const rr::Renderer renderer;
132 const rr::VertexAttrib vertexAttribs[] =
134 rr::VertexAttrib(rr::VERTEXATTRIBTYPE_FLOAT, 4, sizeof(tcu::Vec4), 0, vertices[0].getPtr())
136 renderer.draw(rr::DrawCommand(renderState,
139 DE_LENGTH_OF_ARRAY(vertexAttribs),
141 rr::PrimitiveList(rr::PRIMITIVETYPE_TRIANGLES, DE_LENGTH_OF_ARRAY(vertices), 0)));
144 class DeviceGroupTestInstance : public TestInstance
147 DeviceGroupTestInstance(Context& context, deUint32 mode);
148 ~DeviceGroupTestInstance(void) {}
151 deUint32 getMemoryIndex (deUint32 memoryTypeBits, deUint32 memoryPropertyFlag);
152 void getDeviceLayers (vector<string>& enabledLayers);
153 bool isPeerFetchAllowed (deUint32 memoryTypeIndex, deUint32 firstdeviceID, deUint32 seconddeviceID);
154 void SubmitBufferAndWaitForIdle (const DeviceDriver& vk, VkCommandBuffer cmdBuf, VkDeviceGroupSubmitInfo);
155 virtual tcu::TestStatus iterate (void);
157 Move<VkDevice> m_deviceGroup;
158 deUint32 m_physicalDeviceCount;
159 VkQueue m_deviceGroupQueue;
160 vector<VkPhysicalDevice> m_physicalDevices;
163 bool m_useHostMemory;
166 bool m_subsetAllocation;
167 bool m_fillModeNonSolid;
168 bool m_drawTessellatedSphere;
171 DeviceGroupTestInstance::DeviceGroupTestInstance (Context& context, const deUint32 mode)
172 : TestInstance (context)
173 , m_physicalDeviceCount (0)
174 , m_deviceGroupQueue (DE_NULL)
176 , m_useHostMemory (m_testMode & TEST_MODE_HOSTMEMORY)
177 , m_useDedicated (m_testMode & TEST_MODE_DEDICATED)
178 , m_usePeerFetch (m_testMode & TEST_MODE_PEER_FETCH)
179 , m_subsetAllocation (true)
180 , m_fillModeNonSolid (m_testMode & TEST_MODE_LINEFILL)
181 , m_drawTessellatedSphere (m_testMode & TEST_MODE_TESSELLATION)
186 deUint32 DeviceGroupTestInstance::getMemoryIndex (const deUint32 memoryTypeBits, const deUint32 memoryPropertyFlag)
188 const VkPhysicalDeviceMemoryProperties deviceMemProps = getPhysicalDeviceMemoryProperties(m_context.getInstanceInterface(), m_context.getPhysicalDevice());
189 for (deUint32 memoryTypeNdx = 0; memoryTypeNdx < deviceMemProps.memoryTypeCount; memoryTypeNdx++)
191 if ((memoryTypeBits & (1u << memoryTypeNdx)) != 0 &&
192 (deviceMemProps.memoryTypes[memoryTypeNdx].propertyFlags & memoryPropertyFlag) == memoryPropertyFlag)
193 return memoryTypeNdx;
195 TCU_THROW(NotSupportedError, "No compatible memory type found");
198 bool DeviceGroupTestInstance::isPeerFetchAllowed (deUint32 memoryTypeIndex, deUint32 firstdeviceID, deUint32 seconddeviceID)
200 VkPeerMemoryFeatureFlags peerMemFeatures1;
201 VkPeerMemoryFeatureFlags peerMemFeatures2;
202 const DeviceDriver vk (m_context.getInstanceInterface(), *m_deviceGroup);
203 const VkPhysicalDeviceMemoryProperties deviceMemProps1 = getPhysicalDeviceMemoryProperties(m_context.getInstanceInterface(), m_physicalDevices[firstdeviceID]);
204 const VkPhysicalDeviceMemoryProperties deviceMemProps2 = getPhysicalDeviceMemoryProperties(m_context.getInstanceInterface(), m_physicalDevices[seconddeviceID]);
205 vk.getDeviceGroupPeerMemoryFeatures(*m_deviceGroup, deviceMemProps2.memoryTypes[memoryTypeIndex].heapIndex, firstdeviceID, seconddeviceID, &peerMemFeatures1);
206 vk.getDeviceGroupPeerMemoryFeatures(*m_deviceGroup, deviceMemProps1.memoryTypes[memoryTypeIndex].heapIndex, seconddeviceID, firstdeviceID, &peerMemFeatures2);
207 return (peerMemFeatures1 & VK_PEER_MEMORY_FEATURE_GENERIC_SRC_BIT) && (peerMemFeatures2 & VK_PEER_MEMORY_FEATURE_GENERIC_SRC_BIT);
210 void DeviceGroupTestInstance::getDeviceLayers (vector<string>& enabledLayers)
212 const tcu::CommandLine& cmdLine = m_context.getTestContext().getCommandLine();
213 if (cmdLine.isValidationEnabled())
215 const vector<VkLayerProperties> layerProperties = enumerateDeviceLayerProperties(m_context.getInstanceInterface(), m_context.getPhysicalDevice());
217 static const char* s_magicLayer = "VK_LAYER_LUNARG_standard_validation";
218 static const char* s_defaultLayers[] =
220 "VK_LAYER_GOOGLE_threading",
221 "VK_LAYER_LUNARG_parameter_validation",
222 "VK_LAYER_LUNARG_device_limits",
223 "VK_LAYER_LUNARG_object_tracker",
224 "VK_LAYER_LUNARG_image",
225 "VK_LAYER_LUNARG_core_validation",
226 "VK_LAYER_LUNARG_swapchain",
227 "VK_LAYER_GOOGLE_unique_objects",
230 if (isLayerSupported(layerProperties, RequiredLayer(s_magicLayer)))
231 enabledLayers.push_back(s_magicLayer);
234 for (deUint32 ndx = 0; ndx < DE_LENGTH_OF_ARRAY(s_defaultLayers); ++ndx)
236 if (isLayerSupported(layerProperties, RequiredLayer(s_defaultLayers[ndx])))
237 enabledLayers.push_back(s_defaultLayers[ndx]);
240 if (enabledLayers.empty())
241 TCU_THROW(NotSupportedError, "No device validation layers found");
245 void DeviceGroupTestInstance::init (void)
247 if (!isInstanceExtensionSupported(m_context.getUsedApiVersion(), m_context.getInstanceExtensions(), "VK_KHR_device_group_creation"))
248 TCU_THROW(NotSupportedError, "Device Group tests are not supported, no device group extension present.");
250 const InstanceInterface& instanceInterface = m_context.getInstanceInterface();
251 const deUint32 queueFamilyIndex = m_context.getUniversalQueueFamilyIndex();
252 const deUint32 queueIndex = 0;
253 const float queuePriority = 1.0f;
254 vector<const char*> extensionPtrs;
255 de::MovePtr<vk::DeviceDriver> deviceDriver;
256 vector<const char*> layerPtrs;
257 vector<string> deviceExtensions;
258 vector<string> enabledLayers;
260 if (!isDeviceExtensionSupported(m_context.getUsedApiVersion(), m_context.getDeviceExtensions(), "VK_KHR_device_group"))
261 TCU_THROW(NotSupportedError, "Missing extension: VK_KHR_device_group");
263 if (!isCoreDeviceExtension(m_context.getUsedApiVersion(), "VK_KHR_device_group"))
264 deviceExtensions.push_back("VK_KHR_device_group");
268 if (!isDeviceExtensionSupported(m_context.getUsedApiVersion(), m_context.getDeviceExtensions(), "VK_KHR_dedicated_allocation"))
269 TCU_THROW(NotSupportedError, "Missing extension: VK_KHR_dedicated_allocation");
271 if (!isCoreDeviceExtension(m_context.getUsedApiVersion(), "VK_KHR_dedicated_allocation"))
272 deviceExtensions.push_back("VK_KHR_dedicated_allocation");
276 const tcu::CommandLine& cmdLine = m_context.getTestContext().getCommandLine();
277 const vector<VkPhysicalDeviceGroupProperties> properties = enumeratePhysicalDeviceGroups(instanceInterface, m_context.getInstance());
278 if ((size_t)cmdLine.getVKDeviceGroupId() > properties.size())
279 TCU_THROW(TestError, "Invalid device group index.");
281 m_physicalDeviceCount = properties[cmdLine.getVKDeviceGroupId() - 1].physicalDeviceCount;
282 for (deUint32 idx = 0; idx < m_physicalDeviceCount; idx++)
284 m_physicalDevices.push_back(properties[cmdLine.getVKDeviceGroupId() - 1].physicalDevices[idx]);
287 if (m_usePeerFetch && m_physicalDeviceCount < 2)
288 TCU_THROW(NotSupportedError, "Peer fetching needs more than 1 physical device.");
290 if (!(m_testMode & TEST_MODE_AFR) || (m_physicalDeviceCount > 1))
292 if (!de::contains(m_context.getDeviceExtensions().begin(), m_context.getDeviceExtensions().end(), std::string("VK_KHR_bind_memory2")))
293 TCU_THROW(NotSupportedError, "Missing extension: VK_KHR_bind_memory2");
294 deviceExtensions.push_back("VK_KHR_bind_memory2");
297 const VkDeviceQueueCreateInfo deviceQueueCreateInfo =
299 VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO, //type
301 (VkDeviceQueueCreateFlags)0u, //flags
302 queueFamilyIndex, //queueFamilyIndex;
304 &queuePriority, //pQueuePriorities;
306 const VkDeviceGroupDeviceCreateInfo deviceGroupInfo =
308 VK_STRUCTURE_TYPE_DEVICE_GROUP_DEVICE_CREATE_INFO, //stype
310 properties[cmdLine.getVKDeviceGroupId() - 1].physicalDeviceCount, //physicalDeviceCount
311 properties[cmdLine.getVKDeviceGroupId() - 1].physicalDevices //physicalDevices
314 VkPhysicalDevice physicalDevice = properties[cmdLine.getVKDeviceGroupId() - 1].physicalDevices[(size_t)(cmdLine.getVKDeviceId() - 1)];
315 VkPhysicalDeviceFeatures enabledDeviceFeatures = getPhysicalDeviceFeatures(instanceInterface, physicalDevice);
316 m_subsetAllocation = properties[cmdLine.getVKDeviceGroupId() - 1].subsetAllocation;
318 if (m_drawTessellatedSphere & static_cast<bool>(!enabledDeviceFeatures.tessellationShader))
319 TCU_THROW(NotSupportedError, "Tessellation is not supported.");
321 if (m_fillModeNonSolid & static_cast<bool>(!enabledDeviceFeatures.fillModeNonSolid))
322 TCU_THROW(NotSupportedError, "Line polygon mode is not supported.");
324 extensionPtrs.resize(deviceExtensions.size());
325 for (size_t ndx = 0; ndx < deviceExtensions.size(); ++ndx)
326 extensionPtrs[ndx] = deviceExtensions[ndx].c_str();
329 getDeviceLayers(enabledLayers);
330 layerPtrs.resize(enabledLayers.size());
331 for (size_t ndx = 0; ndx < enabledLayers.size(); ++ndx)
332 layerPtrs[ndx] = enabledLayers[ndx].c_str();
334 const VkDeviceCreateInfo deviceCreateInfo =
336 VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO, //sType;
337 &deviceGroupInfo, //pNext;
338 (VkDeviceCreateFlags)0u, //flags
339 1, //queueRecordCount;
340 &deviceQueueCreateInfo, //pRequestedQueues;
341 (deUint32)layerPtrs.size(), //layerCount;
342 (layerPtrs.empty() ? DE_NULL : &layerPtrs[0]), //ppEnabledLayerNames;
343 (deUint32)extensionPtrs.size(), //extensionCount;
344 (extensionPtrs.empty() ? DE_NULL : &extensionPtrs[0]), //ppEnabledExtensionNames;
345 &enabledDeviceFeatures, //pEnabledFeatures;
347 m_deviceGroup = createDevice(instanceInterface, physicalDevice, &deviceCreateInfo);
350 deviceDriver = de::MovePtr<vk::DeviceDriver>(new vk::DeviceDriver(instanceInterface, *m_deviceGroup));
351 m_deviceGroupQueue = getDeviceQueue(*deviceDriver, *m_deviceGroup, queueFamilyIndex, queueIndex);
354 void DeviceGroupTestInstance::SubmitBufferAndWaitForIdle(const DeviceDriver& vk, VkCommandBuffer cmdBuf, VkDeviceGroupSubmitInfo deviceGroupSubmitInfo)
356 const VkFenceCreateInfo fenceParams =
358 VK_STRUCTURE_TYPE_FENCE_CREATE_INFO, // sType
362 const VkSubmitInfo submitInfo =
364 VK_STRUCTURE_TYPE_SUBMIT_INFO, // sType
365 &deviceGroupSubmitInfo, // pNext
366 0u, // waitSemaphoreCount
367 DE_NULL, // pWaitSemaphores
368 (const VkPipelineStageFlags*)DE_NULL, // pWaitDstStageMask
369 1u, // commandBufferCount
370 &cmdBuf, // pCommandBuffers
371 0u, // signalSemaphoreCount
372 DE_NULL, // pSignalSemaphores
374 const Unique<VkFence> fence(createFence(vk, *m_deviceGroup, &fenceParams));
376 VK_CHECK(vk.queueSubmit(m_deviceGroupQueue, 1u, &submitInfo, *fence));
377 VK_CHECK(vk.waitForFences(*m_deviceGroup, 1u, &fence.get(), DE_TRUE, ~0ull));
378 VK_CHECK(vk.deviceWaitIdle(*m_deviceGroup));
381 tcu::TestStatus DeviceGroupTestInstance::iterate (void)
383 const InstanceInterface& vki (m_context.getInstanceInterface());
384 const DeviceDriver vk (vki, *m_deviceGroup);
385 const deUint32 queueFamilyIndex = m_context.getUniversalQueueFamilyIndex();
386 const tcu::UVec2 renderSize (256, 256);
387 const VkFormat colorFormat = VK_FORMAT_R8G8B8A8_UNORM;
388 const tcu::Vec4 clearColor (0.125f, 0.25f, 0.75f, 1.0f);
389 const tcu::Vec4 drawColor (1.0f, 1.0f, 0.0f, 1.0f);
390 const float tessLevel = 16.0f;
391 SimpleAllocator memAlloc (vk, *m_deviceGroup, getPhysicalDeviceMemoryProperties(m_context.getInstanceInterface(), m_context.getPhysicalDevice()));
392 bool iterateResultSuccess = false;
393 const tcu::Vec4 sphereVertices[] =
395 tcu::Vec4(0.0f, 0.0f, 1.0f, 1.0f),
396 tcu::Vec4(0.0f, 1.0f, 0.0f, 1.0f),
397 tcu::Vec4(1.0f, 0.0f, 0.0f, 1.0f),
398 tcu::Vec4(0.0f, 0.0f, -1.0f, 1.0f),
399 tcu::Vec4(0.0f, -1.0f, 0.0f, 1.0f),
400 tcu::Vec4(-1.0f, 0.0f, 0.0f, 1.0f),
402 const deUint32 sphereIndices[] = {0, 1, 2, 2, 1, 3, 3, 1, 5, 5, 1, 0, 0, 2, 4, 2, 3, 4, 3, 5, 4, 5, 0, 4};
403 const tcu::Vec4 triVertices[] =
405 tcu::Vec4(-0.5f, -0.5f, 0.0f, 1.0f),
406 tcu::Vec4(+0.5f, -0.5f, 0.0f, 1.0f),
407 tcu::Vec4(0.0f, +0.5f, 0.0f, 1.0f)
409 const deUint32 triIndices[] = {0, 1, 2};
410 const tcu::Vec4 * vertices = m_drawTessellatedSphere ? &sphereVertices[0] : &triVertices[0];
411 const deUint32 * indices = m_drawTessellatedSphere ? &sphereIndices[0] : &triIndices[0];
412 const deUint32 verticesSize = m_drawTessellatedSphere ? deUint32(sizeof(sphereVertices)) : deUint32(sizeof(triVertices));
413 const deUint32 numIndices = m_drawTessellatedSphere ? deUint32(sizeof(sphereIndices)/sizeof(sphereIndices[0])) : deUint32(sizeof(triIndices)/sizeof(triIndices[0]));
414 const deUint32 indicesSize = m_drawTessellatedSphere ? deUint32(sizeof(sphereIndices)) : deUint32(sizeof(triIndices));
416 // Loop through all physical devices in the device group
417 for (deUint32 physDevID = 0; physDevID < m_physicalDeviceCount; physDevID++)
419 const deUint32 firstDeviceID = physDevID;
420 const deUint32 secondDeviceID = (firstDeviceID + 1 ) % m_physicalDeviceCount;
421 vector<deUint32> deviceIndices (m_physicalDeviceCount);
422 bool isPeerMemAsCopySrcAllowed = true;
423 // Set broadcast on memory allocation
424 const deUint32 allocDeviceMask = m_subsetAllocation ? (1 << firstDeviceID) | (1 << secondDeviceID) : (1 << m_physicalDeviceCount) - 1;
426 for (deUint32 i = 0; i < m_physicalDeviceCount; i++)
427 deviceIndices[i] = i;
428 deviceIndices[firstDeviceID] = secondDeviceID;
429 deviceIndices[secondDeviceID] = firstDeviceID;
431 VkMemoryRequirements memReqs =
433 0, // VkDeviceSize size
434 0, // VkDeviceSize alignment
435 0, // uint32_t memoryTypeBits
437 deUint32 memoryTypeNdx = 0;
438 de::MovePtr<Allocation> stagingVertexBufferMemory;
439 de::MovePtr<Allocation> stagingIndexBufferMemory;
440 de::MovePtr<Allocation> stagingUniformBufferMemory;
441 de::MovePtr<Allocation> stagingSboBufferMemory;
443 vk::Move<vk::VkDeviceMemory> vertexBufferMemory;
444 vk::Move<vk::VkDeviceMemory> indexBufferMemory;
445 vk::Move<vk::VkDeviceMemory> uniformBufferMemory;
446 vk::Move<vk::VkDeviceMemory> sboBufferMemory;
447 vk::Move<vk::VkDeviceMemory> imageMemory;
449 Move<VkRenderPass> renderPass;
450 Move<VkImage> renderImage;
451 Move<VkImage> readImage;
453 Move<VkDescriptorSetLayout> descriptorSetLayout;
454 Move<VkDescriptorPool> descriptorPool;
455 Move<VkDescriptorSet> descriptorSet;
457 Move<VkBuffer> stagingVertexBuffer;
458 Move<VkBuffer> stagingUniformBuffer;
459 Move<VkBuffer> stagingIndexBuffer;
460 Move<VkBuffer> stagingSboBuffer;
462 Move<VkBuffer> vertexBuffer;
463 Move<VkBuffer> indexBuffer;
464 Move<VkBuffer> uniformBuffer;
465 Move<VkBuffer> sboBuffer;
467 Move<VkPipeline> pipeline;
468 Move<VkPipelineLayout> pipelineLayout;
470 Move<VkImageView> colorAttView;
471 Move<VkFramebuffer> framebuffer;
472 Move<VkCommandPool> cmdPool;
473 Move<VkCommandBuffer> cmdBuffer;
475 VkMemoryDedicatedAllocateInfo dedicatedAllocInfo =
477 VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO, // sType
483 VkMemoryAllocateFlagsInfo allocDeviceMaskInfo =
485 VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_FLAGS_INFO, // sType
486 m_useDedicated ? &dedicatedAllocInfo : DE_NULL, // pNext
487 VK_MEMORY_ALLOCATE_DEVICE_MASK_BIT, // flags
488 allocDeviceMask, // deviceMask
491 VkMemoryAllocateInfo allocInfo =
493 VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO, // sType
494 &allocDeviceMaskInfo, // pNext
495 0u, // allocationSize
496 0u, // memoryTypeIndex
499 VkDeviceGroupSubmitInfo deviceGroupSubmitInfo =
501 VK_STRUCTURE_TYPE_DEVICE_GROUP_SUBMIT_INFO, // sType
503 0u, // waitSemaphoreCount
504 DE_NULL, // pWaitSemaphoreDeviceIndices
505 0u, // commandBufferCount
506 DE_NULL, // pCommandBufferDeviceMasks
507 0u, // signalSemaphoreCount
508 DE_NULL, // pSignalSemaphoreDeviceIndices
511 // create vertex buffers
513 const VkBufferCreateInfo stagingVertexBufferParams =
515 VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, // sType
518 (VkDeviceSize)verticesSize, // size
519 VK_BUFFER_USAGE_TRANSFER_SRC_BIT, // usage
520 VK_SHARING_MODE_EXCLUSIVE, // sharingMode
521 1u, // queueFamilyIndexCount
522 &queueFamilyIndex, // pQueueFamilyIndices
524 stagingVertexBuffer = createBuffer(vk, *m_deviceGroup, &stagingVertexBufferParams);
525 stagingVertexBufferMemory = memAlloc.allocate(getBufferMemoryRequirements(vk, *m_deviceGroup, *stagingVertexBuffer), MemoryRequirement::HostVisible);
526 VK_CHECK(vk.bindBufferMemory(*m_deviceGroup, *stagingVertexBuffer, stagingVertexBufferMemory->getMemory(), stagingVertexBufferMemory->getOffset()));
528 const VkMappedMemoryRange range =
530 VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE, // sType
532 stagingVertexBufferMemory->getMemory(), // memory
534 (VkDeviceSize)verticesSize, // size
536 void* vertexBufPtr = stagingVertexBufferMemory->getHostPtr();
537 deMemcpy(vertexBufPtr, &vertices[0], verticesSize);
538 VK_CHECK(vk.flushMappedMemoryRanges(*m_deviceGroup, 1u, &range));
542 const VkBufferCreateInfo vertexBufferParams =
544 VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, // sType
547 (VkDeviceSize)verticesSize, // size
548 VK_BUFFER_USAGE_VERTEX_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT, // usage
549 VK_SHARING_MODE_EXCLUSIVE, // sharingMode
550 1u, // queueFamilyIndexCount
551 &queueFamilyIndex, // pQueueFamilyIndices
553 vertexBuffer = createBuffer(vk, *m_deviceGroup, &vertexBufferParams);
555 memReqs = getBufferMemoryRequirements(vk, *m_deviceGroup, vertexBuffer.get());
556 memoryTypeNdx = getMemoryIndex(memReqs.memoryTypeBits, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT);
558 dedicatedAllocInfo.buffer = vertexBuffer.get();
559 allocInfo.allocationSize = memReqs.size;
560 allocInfo.memoryTypeIndex = memoryTypeNdx;
561 vertexBufferMemory = allocateMemory(vk, *m_deviceGroup, &allocInfo);
563 if (m_usePeerFetch && !isPeerFetchAllowed(memoryTypeNdx, firstDeviceID, secondDeviceID))
564 TCU_THROW(NotSupportedError, "Peer fetch is not supported.");
566 // Bind vertex buffer
569 VkBindBufferMemoryDeviceGroupInfo devGroupBindInfo =
571 VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_DEVICE_GROUP_INFO, // sType
573 m_physicalDeviceCount, // deviceIndexCount
574 &deviceIndices[0], // pDeviceIndices
577 VkBindBufferMemoryInfo bindInfo =
579 VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO, // sType
580 &devGroupBindInfo, // pNext
581 vertexBuffer.get(), // buffer
582 vertexBufferMemory.get(), // memory
585 VK_CHECK(vk.bindBufferMemory2(*m_deviceGroup, 1, &bindInfo));
588 VK_CHECK(vk.bindBufferMemory(*m_deviceGroup, *vertexBuffer, vertexBufferMemory.get(), 0));
591 // create index buffers
593 const VkBufferCreateInfo stagingIndexBufferParams =
595 VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, // sType
598 (VkDeviceSize)indicesSize, // size
599 VK_BUFFER_USAGE_TRANSFER_SRC_BIT, // usage
600 VK_SHARING_MODE_EXCLUSIVE, // sharingMode
601 1u, // queueFamilyIndexCount
602 &queueFamilyIndex, // pQueueFamilyIndices
604 stagingIndexBuffer = createBuffer(vk, *m_deviceGroup, &stagingIndexBufferParams);
605 stagingIndexBufferMemory = memAlloc.allocate(getBufferMemoryRequirements(vk, *m_deviceGroup, *stagingIndexBuffer), MemoryRequirement::HostVisible);
606 VK_CHECK(vk.bindBufferMemory(*m_deviceGroup, *stagingIndexBuffer, stagingIndexBufferMemory->getMemory(), stagingIndexBufferMemory->getOffset()));
608 const VkMappedMemoryRange range =
610 VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE, // sType
612 stagingIndexBufferMemory->getMemory(), // memory
614 (VkDeviceSize)indicesSize, // size
616 void* indexBufPtr = stagingIndexBufferMemory->getHostPtr();
617 deMemcpy(indexBufPtr, &indices[0], indicesSize);
618 VK_CHECK(vk.flushMappedMemoryRanges(*m_deviceGroup, 1u, &range));
622 const VkBufferCreateInfo indexBufferParams =
624 VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, // sType
627 (VkDeviceSize)indicesSize, // size
628 VK_BUFFER_USAGE_INDEX_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT, // usage
629 VK_SHARING_MODE_EXCLUSIVE, // sharingMode
630 1u, // queueFamilyIndexCount
631 &queueFamilyIndex, // pQueueFamilyIndices
633 indexBuffer = createBuffer(vk, *m_deviceGroup, &indexBufferParams);
635 memReqs = getBufferMemoryRequirements(vk, *m_deviceGroup, indexBuffer.get());
636 memoryTypeNdx = getMemoryIndex(memReqs.memoryTypeBits, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT);
638 dedicatedAllocInfo.buffer = indexBuffer.get();
639 allocInfo.allocationSize = memReqs.size;
640 allocInfo.memoryTypeIndex = memoryTypeNdx;
641 indexBufferMemory = allocateMemory(vk, *m_deviceGroup, &allocInfo);
643 if (m_usePeerFetch && !isPeerFetchAllowed(memoryTypeNdx, firstDeviceID, secondDeviceID))
644 TCU_THROW(NotSupportedError, "Peer fetch is not supported.");
649 VkBindBufferMemoryDeviceGroupInfo devGroupBindInfo =
651 VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_DEVICE_GROUP_INFO, // sType
653 m_physicalDeviceCount, // deviceIndexCount
654 &deviceIndices[0], // pDeviceIndices
657 VkBindBufferMemoryInfo bindInfo =
659 VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO, // sType
660 &devGroupBindInfo, // pNext
661 indexBuffer.get(), // buffer
662 indexBufferMemory.get(), // memory
665 VK_CHECK(vk.bindBufferMemory2(*m_deviceGroup, 1, &bindInfo));
668 VK_CHECK(vk.bindBufferMemory(*m_deviceGroup, *indexBuffer, indexBufferMemory.get(), 0));
671 // create uniform buffers
673 const VkBufferCreateInfo stagingUniformBufferParams =
675 VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, // sType
678 (VkDeviceSize)sizeof(drawColor), // size
679 VK_BUFFER_USAGE_TRANSFER_SRC_BIT, // usage
680 VK_SHARING_MODE_EXCLUSIVE, // sharingMode
681 1u, // queueFamilyIndexCount
682 &queueFamilyIndex, // pQueueFamilyIndices
684 stagingUniformBuffer = createBuffer(vk, *m_deviceGroup, &stagingUniformBufferParams);
685 stagingUniformBufferMemory = memAlloc.allocate(getBufferMemoryRequirements(vk, *m_deviceGroup, *stagingUniformBuffer), MemoryRequirement::HostVisible);
686 VK_CHECK(vk.bindBufferMemory(*m_deviceGroup, *stagingUniformBuffer, stagingUniformBufferMemory->getMemory(), stagingUniformBufferMemory->getOffset()));
688 const VkMappedMemoryRange range =
690 VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE, // sType
692 stagingUniformBufferMemory->getMemory(),// memory
694 (VkDeviceSize)sizeof(drawColor), // size
696 void* uniformBufPtr = stagingUniformBufferMemory->getHostPtr();
697 deMemcpy(uniformBufPtr, &drawColor[0], sizeof(drawColor));
698 VK_CHECK(vk.flushMappedMemoryRanges(*m_deviceGroup, 1u, &range));
702 const VkBufferCreateInfo uniformBufferParams =
704 VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, // sType
707 (VkDeviceSize)sizeof(drawColor), // size
708 VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT, // usage
709 VK_SHARING_MODE_EXCLUSIVE, // sharingMode
710 1u, // queueFamilyIndexCount
711 &queueFamilyIndex, // pQueueFamilyIndices
713 uniformBuffer = createBuffer(vk, *m_deviceGroup, &uniformBufferParams);
715 memReqs = getBufferMemoryRequirements(vk, *m_deviceGroup, uniformBuffer.get());
716 memoryTypeNdx = getMemoryIndex(memReqs.memoryTypeBits, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT);
718 dedicatedAllocInfo.buffer = uniformBuffer.get();
719 allocInfo.allocationSize = memReqs.size;
720 allocInfo.memoryTypeIndex = memoryTypeNdx;
721 uniformBufferMemory = allocateMemory(vk, *m_deviceGroup, &allocInfo);
723 if (m_usePeerFetch && !isPeerFetchAllowed(memoryTypeNdx, firstDeviceID, secondDeviceID))
724 TCU_THROW(NotSupportedError, "Peer fetch is not supported.");
728 VkBindBufferMemoryDeviceGroupInfo devGroupBindInfo =
730 VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_DEVICE_GROUP_INFO, // sType
732 m_physicalDeviceCount, // deviceIndexCount
733 &deviceIndices[0], // pDeviceIndices
736 VkBindBufferMemoryInfo bindInfo =
738 VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO, // sType
739 &devGroupBindInfo, // pNext
740 uniformBuffer.get(), // buffer
741 uniformBufferMemory.get(), // memory
744 VK_CHECK(vk.bindBufferMemory2(*m_deviceGroup, 1, &bindInfo));
747 VK_CHECK(vk.bindBufferMemory(*m_deviceGroup, uniformBuffer.get(), uniformBufferMemory.get(), 0));
750 // create SBO buffers
752 const VkBufferCreateInfo stagingSboBufferParams =
754 VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, // sType
757 (VkDeviceSize)sizeof(tessLevel), // size
758 VK_BUFFER_USAGE_TRANSFER_SRC_BIT, // usage
759 VK_SHARING_MODE_EXCLUSIVE, // sharingMode
760 1u, // queueFamilyIndexCount
761 &queueFamilyIndex, // pQueueFamilyIndices
763 stagingSboBuffer = createBuffer(vk, *m_deviceGroup, &stagingSboBufferParams);
764 stagingSboBufferMemory = memAlloc.allocate(getBufferMemoryRequirements(vk, *m_deviceGroup, *stagingSboBuffer), MemoryRequirement::HostVisible);
765 VK_CHECK(vk.bindBufferMemory(*m_deviceGroup, *stagingSboBuffer, stagingSboBufferMemory->getMemory(), stagingSboBufferMemory->getOffset()));
767 const VkMappedMemoryRange range =
769 VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE, // sType
771 stagingSboBufferMemory->getMemory(), // memory
773 (VkDeviceSize)sizeof(tessLevel), // size
775 void* sboBufPtr = stagingSboBufferMemory->getHostPtr();
776 deMemcpy(sboBufPtr, &tessLevel, sizeof(tessLevel));
777 VK_CHECK(vk.flushMappedMemoryRanges(*m_deviceGroup, 1u, &range));
781 const VkBufferCreateInfo sboBufferParams =
783 VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, // sType
786 (VkDeviceSize)sizeof(tessLevel), // size
787 VK_BUFFER_USAGE_STORAGE_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT, // usage
788 VK_SHARING_MODE_EXCLUSIVE, // sharingMode
789 1u, // queueFamilyIndexCount
790 &queueFamilyIndex, // pQueueFamilyIndices
792 sboBuffer = createBuffer(vk, *m_deviceGroup, &sboBufferParams);
794 memReqs = getBufferMemoryRequirements(vk, *m_deviceGroup, sboBuffer.get());
795 memoryTypeNdx = getMemoryIndex(memReqs.memoryTypeBits, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT);
797 dedicatedAllocInfo.buffer = sboBuffer.get();
798 allocInfo.allocationSize = memReqs.size;
799 allocInfo.memoryTypeIndex = memoryTypeNdx;
800 sboBufferMemory = allocateMemory(vk, *m_deviceGroup, &allocInfo);
802 if (m_usePeerFetch && !isPeerFetchAllowed(memoryTypeNdx, firstDeviceID, secondDeviceID))
803 TCU_THROW(NotSupportedError, "Peer fetch is not supported.");
807 VkBindBufferMemoryDeviceGroupInfo devGroupBindInfo =
809 VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_DEVICE_GROUP_INFO, // sType
811 m_physicalDeviceCount, // deviceIndexCount
812 &deviceIndices[0], // pDeviceIndices
815 VkBindBufferMemoryInfo bindInfo =
817 VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO, // sType
818 &devGroupBindInfo, // pNext
819 sboBuffer.get(), // buffer
820 sboBufferMemory.get(), // memory
823 VK_CHECK(vk.bindBufferMemory2(*m_deviceGroup, 1, &bindInfo));
826 VK_CHECK(vk.bindBufferMemory(*m_deviceGroup, sboBuffer.get(), sboBufferMemory.get(), 0));
829 // Create image resources
830 // Use a consistent usage flag because of memory aliasing
831 VkImageUsageFlags imageUsageFlag = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
833 // Check for SFR support
834 VkImageFormatProperties properties;
835 if ((m_testMode & TEST_MODE_SFR) && vki.getPhysicalDeviceImageFormatProperties(m_context.getPhysicalDevice(),
836 colorFormat, // format
837 VK_IMAGE_TYPE_2D, // type
838 VK_IMAGE_TILING_OPTIMAL, // tiling
839 VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT, // usage
840 VK_IMAGE_CREATE_BIND_SFR_BIT, // flags
841 &properties) != VK_SUCCESS) // properties
843 TCU_THROW(NotSupportedError, "Format not supported for SFR");
846 VkImageCreateFlags imageCreateFlags = VK_IMAGE_CREATE_ALIAS_BIT; // The image objects alias same memory
847 if ((m_testMode & TEST_MODE_SFR) && (m_physicalDeviceCount > 1))
849 imageCreateFlags |= VK_IMAGE_CREATE_BIND_SFR_BIT;
852 const VkImageCreateInfo imageParams =
854 VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, // sType
856 imageCreateFlags, // flags
857 VK_IMAGE_TYPE_2D, // imageType
858 colorFormat, // format
859 { renderSize.x(), renderSize.y(), 1 }, // extent
862 VK_SAMPLE_COUNT_1_BIT, // samples
863 VK_IMAGE_TILING_OPTIMAL, // tiling
864 imageUsageFlag, // usage
865 VK_SHARING_MODE_EXCLUSIVE, // sharingMode
866 1u, // queueFamilyIndexCount
867 &queueFamilyIndex, // pQueueFamilyIndices
868 VK_IMAGE_LAYOUT_UNDEFINED, // initialLayout
871 renderImage = createImage(vk, *m_deviceGroup, &imageParams);
872 readImage = createImage(vk, *m_deviceGroup, &imageParams);
874 dedicatedAllocInfo.image = *renderImage;
875 dedicatedAllocInfo.buffer = DE_NULL;
876 memReqs = getImageMemoryRequirements(vk, *m_deviceGroup, renderImage.get());
877 memoryTypeNdx = getMemoryIndex(memReqs.memoryTypeBits, m_useHostMemory ? 0 : VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT);
878 allocInfo.allocationSize = memReqs.size;
879 allocInfo.memoryTypeIndex = memoryTypeNdx;
880 imageMemory = allocateMemory(vk, *m_deviceGroup, &allocInfo);
883 if ((m_testMode & TEST_MODE_SFR) && (m_physicalDeviceCount > 1))
885 if (m_usePeerFetch && !isPeerFetchAllowed(memoryTypeNdx, firstDeviceID, secondDeviceID))
886 TCU_THROW(NotSupportedError, "Peer texture reads is not supported.");
888 // Check if peer memory can be used as source of a copy command in case of SFR bindings, always allowed in case of 1 device
889 VkPeerMemoryFeatureFlags peerMemFeatures;
890 const VkPhysicalDeviceMemoryProperties deviceMemProps = getPhysicalDeviceMemoryProperties(m_context.getInstanceInterface(), m_physicalDevices[secondDeviceID]);
891 vk.getDeviceGroupPeerMemoryFeatures(*m_deviceGroup, deviceMemProps.memoryTypes[memoryTypeNdx].heapIndex, firstDeviceID, secondDeviceID, &peerMemFeatures);
892 isPeerMemAsCopySrcAllowed = (peerMemFeatures & VK_PEER_MEMORY_FEATURE_COPY_SRC_BIT);
894 VkRect2D zeroRect = {
904 vector<VkRect2D> sfrRects;
905 for (deUint32 i = 0; i < m_physicalDeviceCount*m_physicalDeviceCount; i++)
906 sfrRects.push_back(zeroRect);
908 if (m_physicalDeviceCount == 1u)
910 sfrRects[0].extent.width = (deInt32)renderSize.x();
911 sfrRects[0].extent.height = (deInt32)renderSize.y();
915 // Split into 2 vertical halves
916 sfrRects[firstDeviceID * m_physicalDeviceCount + firstDeviceID].extent.width = (deInt32)renderSize.x() / 2;
917 sfrRects[firstDeviceID * m_physicalDeviceCount + firstDeviceID].extent.height = (deInt32)renderSize.y();
918 sfrRects[firstDeviceID * m_physicalDeviceCount + secondDeviceID] = sfrRects[firstDeviceID * m_physicalDeviceCount + firstDeviceID];
919 sfrRects[firstDeviceID * m_physicalDeviceCount + secondDeviceID].offset.x = (deInt32)renderSize.x() / 2;
920 sfrRects[secondDeviceID * m_physicalDeviceCount + firstDeviceID] = sfrRects[firstDeviceID * m_physicalDeviceCount + firstDeviceID];
921 sfrRects[secondDeviceID * m_physicalDeviceCount + secondDeviceID] = sfrRects[firstDeviceID * m_physicalDeviceCount + secondDeviceID];
924 VkBindImageMemoryDeviceGroupInfo devGroupBindInfo =
926 VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_DEVICE_GROUP_INFO, // sType
928 0u, // deviceIndexCount
929 DE_NULL, // pDeviceIndices
930 m_physicalDeviceCount*m_physicalDeviceCount, // SFRRectCount
931 &sfrRects[0], // pSFRRects
934 VkBindImageMemoryInfo bindInfo =
936 VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_INFO, // sType
937 &devGroupBindInfo, // pNext
938 *renderImage, // image
939 imageMemory.get(), // memory
942 VK_CHECK(vk.bindImageMemory2(*m_deviceGroup, 1, &bindInfo));
945 VK_CHECK(vk.bindImageMemory(*m_deviceGroup, *renderImage, imageMemory.get(), 0));
947 VK_CHECK(vk.bindImageMemory(*m_deviceGroup, *readImage, imageMemory.get(), 0));
951 const VkAttachmentDescription colorAttDesc =
954 colorFormat, // format
955 VK_SAMPLE_COUNT_1_BIT, // samples
956 VK_ATTACHMENT_LOAD_OP_CLEAR, // loadOp
957 VK_ATTACHMENT_STORE_OP_STORE, // storeOp
958 VK_ATTACHMENT_LOAD_OP_DONT_CARE, // stencilLoadOp
959 VK_ATTACHMENT_STORE_OP_DONT_CARE, // stencilStoreOp
960 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // initialLayout
961 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // finalLayout
963 const VkAttachmentReference colorAttRef =
966 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // layout
968 const VkSubpassDescription subpassDesc =
970 (VkSubpassDescriptionFlags)0u, // flags
971 VK_PIPELINE_BIND_POINT_GRAPHICS, // pipelineBindPoint
972 0u, // inputAttachmentCount
973 DE_NULL, // pInputAttachments
974 1u, // colorAttachmentCount
975 &colorAttRef, // pColorAttachments
976 DE_NULL, // pResolveAttachments
977 DE_NULL, // depthStencilAttachment
978 0u, // preserveAttachmentCount
979 DE_NULL, // pPreserveAttachments
981 const VkRenderPassCreateInfo renderPassParams =
983 VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, // sType
986 1u, // attachmentCount
987 &colorAttDesc, // pAttachments
989 &subpassDesc, // pSubpasses
990 0u, // dependencyCount
991 DE_NULL, // pDependencies
993 renderPass = createRenderPass(vk, *m_deviceGroup, &renderPassParams);
996 // Create descriptors
998 vector<VkDescriptorSetLayoutBinding> layoutBindings;
999 vector<VkDescriptorPoolSize> descriptorTypes;
1000 vector<VkWriteDescriptorSet> writeDescritporSets;
1002 const VkDescriptorSetLayoutBinding layoutBindingUBO =
1004 0u, // deUint32 binding;
1005 VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, // VkDescriptorType descriptorType;
1006 1u, // deUint32 descriptorCount;
1007 VK_SHADER_STAGE_FRAGMENT_BIT, // VkShaderStageFlags stageFlags;
1008 DE_NULL // const VkSampler* pImmutableSamplers;
1010 const VkDescriptorSetLayoutBinding layoutBindingSBO =
1012 1u, // deUint32 binding;
1013 VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, // VkDescriptorType descriptorType;
1014 1u, // deUint32 descriptorCount;
1015 VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT, // VkShaderStageFlags stageFlags;
1016 DE_NULL // const VkSampler* pImmutableSamplers;
1019 layoutBindings.push_back(layoutBindingUBO);
1020 if (m_drawTessellatedSphere)
1021 layoutBindings.push_back(layoutBindingSBO);
1023 const VkDescriptorSetLayoutCreateInfo descriptorLayoutParams =
1025 VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO, // VkStructureType sType;
1026 DE_NULL, // cost void* pNext;
1027 (VkDescriptorSetLayoutCreateFlags)0, // VkDescriptorSetLayoutCreateFlags flags
1028 deUint32(layoutBindings.size()), // deUint32 count;
1029 layoutBindings.data() // const VkDescriptorSetLayoutBinding pBinding;
1031 descriptorSetLayout = createDescriptorSetLayout(vk, *m_deviceGroup, &descriptorLayoutParams);
1033 const VkDescriptorPoolSize descriptorTypeUBO =
1035 VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, // VkDescriptorType type;
1036 1 // deUint32 count;
1038 const VkDescriptorPoolSize descriptorTypeSBO =
1040 VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, // VkDescriptorType type;
1041 1 // deUint32 count;
1043 descriptorTypes.push_back(descriptorTypeUBO);
1044 if (m_drawTessellatedSphere)
1045 descriptorTypes.push_back(descriptorTypeSBO);
1047 const VkDescriptorPoolCreateInfo descriptorPoolParams =
1049 VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO, // VkStructureType sType;
1050 DE_NULL, // void* pNext;
1051 VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, // VkDescriptorPoolCreateFlags flags;
1052 1u, // deUint32 maxSets;
1053 deUint32(descriptorTypes.size()), // deUint32 count;
1054 descriptorTypes.data() // const VkDescriptorTypeCount* pTypeCount
1056 descriptorPool = createDescriptorPool(vk, *m_deviceGroup, &descriptorPoolParams);
1058 const VkDescriptorSetAllocateInfo descriptorSetParams =
1060 VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
1064 &descriptorSetLayout.get(),
1066 descriptorSet = allocateDescriptorSet(vk, *m_deviceGroup, &descriptorSetParams);
1068 const VkDescriptorBufferInfo uboDescriptorInfo =
1070 uniformBuffer.get(),
1072 (VkDeviceSize)sizeof(drawColor)
1074 const VkDescriptorBufferInfo sboDescriptorInfo =
1078 (VkDeviceSize)sizeof(tessLevel)
1080 const VkWriteDescriptorSet writeDescritporSetUBO =
1082 VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET, // VkStructureType sType;
1083 DE_NULL, // const void* pNext;
1084 *descriptorSet, // VkDescriptorSet destSet;
1085 0, // deUint32 destBinding;
1086 0, // deUint32 destArrayElement;
1087 1u, // deUint32 count;
1088 VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, // VkDescriptorType descriptorType;
1089 (const VkDescriptorImageInfo*)DE_NULL, // VkDescriptorImageInfo* pImageInfo;
1090 &uboDescriptorInfo, // VkDescriptorBufferInfo* pBufferInfo;
1091 (const VkBufferView*)DE_NULL // VkBufferView* pTexelBufferView;
1094 const VkWriteDescriptorSet writeDescritporSetSBO =
1096 VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET, // VkStructureType sType;
1097 DE_NULL, // const void* pNext;
1098 *descriptorSet, // VkDescriptorSet destSet;
1099 1, // deUint32 destBinding;
1100 0, // deUint32 destArrayElement;
1101 1u, // deUint32 count;
1102 VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, // VkDescriptorType descriptorType;
1103 (const VkDescriptorImageInfo*)DE_NULL, // VkDescriptorImageInfo* pImageInfo;
1104 &sboDescriptorInfo, // VkDescriptorBufferInfo* pBufferInfo;
1105 (const VkBufferView*)DE_NULL // VkBufferView* pTexelBufferView;
1107 writeDescritporSets.push_back(writeDescritporSetUBO);
1108 if (m_drawTessellatedSphere)
1109 writeDescritporSets.push_back(writeDescritporSetSBO);
1111 vk.updateDescriptorSets(*m_deviceGroup, deUint32(writeDescritporSets.size()), writeDescritporSets.data(), 0u, DE_NULL);
1116 vector<VkPipelineShaderStageCreateInfo> shaderStageParams;
1117 Move<VkShaderModule> vertShaderModule;
1118 Move<VkShaderModule> tcssShaderModule;
1119 Move<VkShaderModule> tessShaderModule;
1120 Move<VkShaderModule> fragShaderModule;
1122 const VkDescriptorSetLayout descset = descriptorSetLayout.get();
1123 const VkPipelineLayoutCreateInfo pipelineLayoutParams =
1125 VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO, // sType
1127 (vk::VkPipelineLayoutCreateFlags)0, // flags
1128 1u, // setLayoutCount
1129 &descset, // pSetLayouts
1130 0u, // pushConstantRangeCount
1131 DE_NULL, // pPushConstantRanges
1133 pipelineLayout = createPipelineLayout(vk, *m_deviceGroup, &pipelineLayoutParams);
1136 vertShaderModule = createShaderModule(vk, *m_deviceGroup, m_context.getBinaryCollection().get("vert"), 0);
1137 fragShaderModule = createShaderModule(vk, *m_deviceGroup, m_context.getBinaryCollection().get("frag"), 0);
1139 const VkSpecializationInfo emptyShaderSpecParams =
1141 0u, // mapEntryCount
1146 const VkPipelineShaderStageCreateInfo vertexShaderStageParams =
1148 VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, // sType
1151 VK_SHADER_STAGE_VERTEX_BIT, // stage
1152 *vertShaderModule, // module
1154 &emptyShaderSpecParams, // pSpecializationInfo
1156 shaderStageParams.push_back(vertexShaderStageParams);
1158 if (m_drawTessellatedSphere)
1160 tcssShaderModule = createShaderModule(vk, *m_deviceGroup, m_context.getBinaryCollection().get("tesc"), 0);
1161 tessShaderModule = createShaderModule(vk, *m_deviceGroup, m_context.getBinaryCollection().get("tese"), 0);
1163 const VkPipelineShaderStageCreateInfo tessControlShaderStageParams =
1165 VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, // sType
1168 VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT, // stage
1169 *tcssShaderModule, // module
1171 &emptyShaderSpecParams, // pSpecializationInfo
1173 const VkPipelineShaderStageCreateInfo tessEvalShaderStageParams =
1175 VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, // sType
1178 VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT, // stage
1179 *tessShaderModule, // module
1181 &emptyShaderSpecParams, // pSpecializationInfo
1184 shaderStageParams.push_back(tessControlShaderStageParams);
1185 shaderStageParams.push_back(tessEvalShaderStageParams);
1188 const VkPipelineShaderStageCreateInfo fragmentShaderStageParams =
1190 VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, // sType
1193 VK_SHADER_STAGE_FRAGMENT_BIT, // stage
1194 *fragShaderModule, // module
1196 &emptyShaderSpecParams, // pSpecializationInfo
1198 shaderStageParams.push_back(fragmentShaderStageParams);
1200 const VkPipelineDepthStencilStateCreateInfo depthStencilParams =
1202 VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO, // sType
1205 DE_FALSE, // depthTestEnable
1206 DE_FALSE, // depthWriteEnable
1207 VK_COMPARE_OP_ALWAYS, // depthCompareOp
1208 DE_FALSE, // depthBoundsTestEnable
1209 DE_FALSE, // stencilTestEnable
1211 VK_STENCIL_OP_KEEP, // failOp
1212 VK_STENCIL_OP_KEEP, // passOp
1213 VK_STENCIL_OP_KEEP, // depthFailOp
1214 VK_COMPARE_OP_ALWAYS, // compareOp
1220 VK_STENCIL_OP_KEEP, // failOp
1221 VK_STENCIL_OP_KEEP, // passOp
1222 VK_STENCIL_OP_KEEP, // depthFailOp
1223 VK_COMPARE_OP_ALWAYS, // compareOp
1228 0.0f, // minDepthBounds;
1229 1.0f, // maxDepthBounds;
1231 const VkViewport viewport0 =
1235 (float)renderSize.x(), // width
1236 (float)renderSize.y(), // height
1240 const VkRect2D scissor0 =
1247 renderSize.x(), // width
1248 renderSize.y(), // height
1251 const VkPipelineViewportStateCreateInfo viewportParams =
1253 VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO, // sType
1256 1u, // viewportCount
1257 &viewport0, // pViewports
1259 &scissor0 // pScissors
1261 const VkSampleMask sampleMask = ~0u;
1262 const VkPipelineMultisampleStateCreateInfo multisampleParams =
1264 VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO, // sType
1267 VK_SAMPLE_COUNT_1_BIT, // rasterizationSamples
1268 VK_FALSE, // sampleShadingEnable
1269 0.0f, // minSampleShading
1270 &sampleMask, // sampleMask
1271 VK_FALSE, // alphaToCoverageEnable
1272 VK_FALSE, // alphaToOneEnable
1274 const VkPipelineRasterizationStateCreateInfo rasterParams =
1276 VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO, // sType
1279 VK_TRUE, // depthClampEnable
1280 VK_FALSE, // rasterizerDiscardEnable
1281 m_fillModeNonSolid ? VK_POLYGON_MODE_LINE : VK_POLYGON_MODE_FILL, // polygonMode
1282 VK_CULL_MODE_NONE, // cullMode
1283 VK_FRONT_FACE_COUNTER_CLOCKWISE, // frontFace
1284 VK_FALSE, // depthBiasEnable
1285 0.0f, // depthBiasConstantFactor
1286 0.0f, // depthBiasClamp
1287 0.0f, // depthBiasSlopeFactor
1290 const VkPipelineInputAssemblyStateCreateInfo inputAssemblyParams =
1292 VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO, // sType
1295 m_drawTessellatedSphere ? VK_PRIMITIVE_TOPOLOGY_PATCH_LIST : VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST, // topology
1296 DE_FALSE, // primitiveRestartEnable
1298 const VkVertexInputBindingDescription vertexBinding0 =
1301 (deUint32)sizeof(tcu::Vec4), // stride
1302 VK_VERTEX_INPUT_RATE_VERTEX, // inputRate
1304 const VkVertexInputAttributeDescription vertexAttrib0 =
1308 VK_FORMAT_R32G32B32A32_SFLOAT, // format
1311 const VkPipelineVertexInputStateCreateInfo vertexInputStateParams =
1313 VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO, // sType
1316 1u, // vertexBindingDescriptionCount
1317 &vertexBinding0, // pVertexBindingDescriptions
1318 1u, // vertexAttributeDescriptionCount
1319 &vertexAttrib0, // pVertexAttributeDescriptions
1321 const VkPipelineColorBlendAttachmentState attBlendParams =
1323 VK_FALSE, // blendEnable
1324 VK_BLEND_FACTOR_ONE, // srcColorBlendFactor
1325 VK_BLEND_FACTOR_ZERO, // dstColorBlendFactor
1326 VK_BLEND_OP_ADD, // colorBlendOp
1327 VK_BLEND_FACTOR_ONE, // srcAlphaBlendFactor
1328 VK_BLEND_FACTOR_ZERO, // dstAlphaBlendFactor
1329 VK_BLEND_OP_ADD, // alphaBlendOp
1330 (VK_COLOR_COMPONENT_R_BIT |
1331 VK_COLOR_COMPONENT_G_BIT |
1332 VK_COLOR_COMPONENT_B_BIT |
1333 VK_COLOR_COMPONENT_A_BIT), // colorWriteMask
1335 const VkPipelineColorBlendStateCreateInfo blendParams =
1337 VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO, // sType
1340 DE_FALSE, // logicOpEnable
1341 VK_LOGIC_OP_COPY, // logicOp
1342 1u, // attachmentCount
1343 &attBlendParams, // pAttachments
1344 { 0.0f, 0.0f, 0.0f, 0.0f }, // blendConstants[4]
1347 const VkPipelineTessellationStateCreateInfo tessState =
1349 VK_STRUCTURE_TYPE_PIPELINE_TESSELLATION_STATE_CREATE_INFO, // sType
1352 3u, // patchControlPoints
1354 const VkGraphicsPipelineCreateInfo pipelineParams =
1356 VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO, // sType
1359 deUint32(shaderStageParams.size()), // stageCount
1360 shaderStageParams.data(), // pStages
1361 &vertexInputStateParams, // pVertexInputState
1362 &inputAssemblyParams, // pInputAssemblyState
1363 m_drawTessellatedSphere ? &tessState : DE_NULL, // pTessellationState
1364 &viewportParams, // pViewportState
1365 &rasterParams, // pRasterizationState
1366 &multisampleParams, // pMultisampleState
1367 &depthStencilParams, // pDepthStencilState
1368 &blendParams, // pColorBlendState
1369 (const VkPipelineDynamicStateCreateInfo*)DE_NULL, // pDynamicState
1370 *pipelineLayout, // layout
1371 *renderPass, // renderPass
1373 DE_NULL, // basePipelineHandle
1374 0u, // basePipelineIndex
1376 pipeline = createGraphicsPipeline(vk, *m_deviceGroup, DE_NULL, &pipelineParams);
1379 // Create Framebuffer
1381 const VkImageViewCreateInfo colorAttViewParams =
1383 VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO, // sType
1386 *renderImage, // image
1387 VK_IMAGE_VIEW_TYPE_2D, // viewType
1388 colorFormat, // format
1390 VK_COMPONENT_SWIZZLE_R,
1391 VK_COMPONENT_SWIZZLE_G,
1392 VK_COMPONENT_SWIZZLE_B,
1393 VK_COMPONENT_SWIZZLE_A
1396 VK_IMAGE_ASPECT_COLOR_BIT, // aspectMask
1399 0u, // baseArrayLayer
1401 }, // subresourceRange
1403 colorAttView = createImageView(vk, *m_deviceGroup, &colorAttViewParams);
1405 const VkFramebufferCreateInfo framebufferParams =
1407 VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO, // sType
1410 *renderPass, // renderPass
1411 1u, // attachmentCount
1412 &*colorAttView, // pAttachments
1413 renderSize.x(), // width
1414 renderSize.y(), // height
1417 framebuffer = createFramebuffer(vk, *m_deviceGroup, &framebufferParams);
1420 // Create Command buffer
1422 const VkCommandPoolCreateInfo cmdPoolParams =
1424 VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO, // sType
1426 VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT, // flags
1427 queueFamilyIndex, // queueFamilyIndex
1429 cmdPool = createCommandPool(vk, *m_deviceGroup, &cmdPoolParams);
1431 const VkCommandBufferAllocateInfo cmdBufParams =
1433 VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO, // sType
1436 VK_COMMAND_BUFFER_LEVEL_PRIMARY, // level
1439 cmdBuffer = allocateCommandBuffer(vk, *m_deviceGroup, &cmdBufParams);
1443 VkCommandBufferBeginInfo cmdBufBeginParams =
1445 VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO, // sType
1447 VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT, // flags
1448 (const VkCommandBufferInheritanceInfo*)DE_NULL,
1450 VK_CHECK(vk.beginCommandBuffer(*cmdBuffer, &cmdBufBeginParams));
1452 // Prepare render target for rendering
1454 const VkMemoryBarrier vertFlushBarrier =
1456 VK_STRUCTURE_TYPE_MEMORY_BARRIER, // sType
1458 VK_ACCESS_HOST_WRITE_BIT, // srcAccessMask
1459 VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT, // dstAccessMask
1461 const VkImageMemoryBarrier colorAttBarrier =
1463 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // sType
1465 0u, // srcAccessMask
1466 (VK_ACCESS_COLOR_ATTACHMENT_READ_BIT |
1467 VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT), // dstAccessMask
1468 VK_IMAGE_LAYOUT_UNDEFINED, // oldLayout
1469 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // newLayout
1470 queueFamilyIndex, // srcQueueFamilyIndex
1471 queueFamilyIndex, // dstQueueFamilyIndex
1472 *renderImage, // image
1474 VK_IMAGE_ASPECT_COLOR_BIT, // aspectMask
1477 0u, // baseArrayLayer
1479 } // subresourceRange
1481 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, (VkDependencyFlags)0, 1, &vertFlushBarrier, 0, (const VkBufferMemoryBarrier*)DE_NULL, 1, &colorAttBarrier);
1486 const VkBufferMemoryBarrier stagingVertexBufferUpdateBarrier =
1488 VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER, // VkStructureType sType;
1489 DE_NULL, // const void* pNext;
1490 VK_ACCESS_HOST_WRITE_BIT, // VkAccessFlags srcAccessMask;
1491 VK_ACCESS_TRANSFER_READ_BIT, // VkAccessFlags dstAccessMask;
1492 VK_QUEUE_FAMILY_IGNORED, // deUint32 srcQueueFamilyIndex;
1493 VK_QUEUE_FAMILY_IGNORED, // deUint32 dstQueueFamilyIndex;
1494 stagingVertexBuffer.get(), // VkBuffer buffer;
1495 0u, // VkDeviceSize offset;
1496 verticesSize // VkDeviceSize size;
1499 const VkBufferMemoryBarrier vertexBufferUpdateBarrier =
1501 VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER, // VkStructureType sType;
1502 DE_NULL, // const void* pNext;
1503 VK_ACCESS_TRANSFER_WRITE_BIT, // VkAccessFlags srcAccessMask;
1504 VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT, // VkAccessFlags dstAccessMask;
1505 VK_QUEUE_FAMILY_IGNORED, // deUint32 srcQueueFamilyIndex;
1506 VK_QUEUE_FAMILY_IGNORED, // deUint32 dstQueueFamilyIndex;
1507 vertexBuffer.get(), // VkBuffer buffer;
1508 0u, // VkDeviceSize offset;
1509 verticesSize // VkDeviceSize size;
1512 const VkBufferMemoryBarrier stagingIndexBufferUpdateBarrier =
1514 VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER, // VkStructureType sType;
1515 DE_NULL, // const void* pNext;
1516 VK_ACCESS_HOST_WRITE_BIT, // VkAccessFlags srcAccessMask;
1517 VK_ACCESS_TRANSFER_READ_BIT, // VkAccessFlags dstAccessMask;
1518 VK_QUEUE_FAMILY_IGNORED, // deUint32 srcQueueFamilyIndex;
1519 VK_QUEUE_FAMILY_IGNORED, // deUint32 dstQueueFamilyIndex;
1520 stagingIndexBuffer.get(), // VkBuffer buffer;
1521 0u, // VkDeviceSize offset;
1522 indicesSize // VkDeviceSize size;
1525 const VkBufferMemoryBarrier indexBufferUpdateBarrier =
1527 VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER, // VkStructureType sType;
1528 DE_NULL, // const void* pNext;
1529 VK_ACCESS_TRANSFER_WRITE_BIT, // VkAccessFlags srcAccessMask;
1530 VK_ACCESS_INDEX_READ_BIT, // VkAccessFlags dstAccessMask;
1531 VK_QUEUE_FAMILY_IGNORED, // deUint32 srcQueueFamilyIndex;
1532 VK_QUEUE_FAMILY_IGNORED, // deUint32 dstQueueFamilyIndex;
1533 indexBuffer.get(), // VkBuffer buffer;
1534 0u, // VkDeviceSize offset;
1535 indicesSize // VkDeviceSize size;
1538 const VkBufferMemoryBarrier stagingUboBufferUpdateBarrier =
1540 VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER, // VkStructureType sType;
1541 DE_NULL, // const void* pNext;
1542 VK_ACCESS_HOST_WRITE_BIT, // VkAccessFlags srcAccessMask;
1543 VK_ACCESS_TRANSFER_READ_BIT, // VkAccessFlags dstAccessMask;
1544 VK_QUEUE_FAMILY_IGNORED, // deUint32 srcQueueFamilyIndex;
1545 VK_QUEUE_FAMILY_IGNORED, // deUint32 dstQueueFamilyIndex;
1546 stagingUniformBuffer.get(), // VkBuffer buffer;
1547 0u, // VkDeviceSize offset;
1548 indicesSize // VkDeviceSize size;
1551 const VkBufferMemoryBarrier uboUpdateBarrier =
1553 VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER, // VkStructureType sType;
1554 DE_NULL, // const void* pNext;
1555 VK_ACCESS_TRANSFER_WRITE_BIT, // VkAccessFlags srcAccessMask;
1556 VK_ACCESS_UNIFORM_READ_BIT, // VkAccessFlags dstAccessMask;
1557 VK_QUEUE_FAMILY_IGNORED, // deUint32 srcQueueFamilyIndex;
1558 VK_QUEUE_FAMILY_IGNORED, // deUint32 dstQueueFamilyIndex;
1559 uniformBuffer.get(), // VkBuffer buffer;
1560 0u, // VkDeviceSize offset;
1561 sizeof(drawColor) // VkDeviceSize size;
1565 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 1, &stagingVertexBufferUpdateBarrier, 0, (const VkImageMemoryBarrier*)DE_NULL);
1566 VkBufferCopy vertexBufferCopy = { 0u, 0u, verticesSize };
1567 vk.cmdCopyBuffer(*cmdBuffer, stagingVertexBuffer.get(), vertexBuffer.get(), 1u, &vertexBufferCopy);
1568 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 1, &vertexBufferUpdateBarrier, 0, (const VkImageMemoryBarrier*)DE_NULL);
1570 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 1, &stagingIndexBufferUpdateBarrier, 0, (const VkImageMemoryBarrier*)DE_NULL);
1571 VkBufferCopy indexBufferCopy = { 0u, 0u, indicesSize };
1572 vk.cmdCopyBuffer(*cmdBuffer, stagingIndexBuffer.get(), indexBuffer.get(), 1u, &indexBufferCopy);
1573 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 1, &indexBufferUpdateBarrier, 0, (const VkImageMemoryBarrier*)DE_NULL);
1575 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 1, &stagingUboBufferUpdateBarrier, 0, (const VkImageMemoryBarrier*)DE_NULL);
1576 VkBufferCopy uboBufferCopy = { 0u, 0u, sizeof(drawColor) };
1577 vk.cmdCopyBuffer(*cmdBuffer, stagingUniformBuffer.get(), uniformBuffer.get(), 1u, &uboBufferCopy);
1578 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 1, &uboUpdateBarrier, 0, (const VkImageMemoryBarrier*)DE_NULL);
1580 if (m_drawTessellatedSphere)
1582 const VkBufferMemoryBarrier stagingsboUpdateBarrier =
1584 VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER, // VkStructureType sType;
1585 DE_NULL, // const void* pNext;
1586 VK_ACCESS_HOST_WRITE_BIT, // VkAccessFlags srcAccessMask;
1587 VK_ACCESS_TRANSFER_READ_BIT, // VkAccessFlags dstAccessMask;
1588 VK_QUEUE_FAMILY_IGNORED, // deUint32 srcQueueFamilyIndex;
1589 VK_QUEUE_FAMILY_IGNORED, // deUint32 dstQueueFamilyIndex;
1590 stagingSboBuffer.get(), // VkBuffer buffer;
1591 0u, // VkDeviceSize offset;
1592 sizeof(tessLevel) // VkDeviceSize size;
1595 const VkBufferMemoryBarrier sboUpdateBarrier =
1597 VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER, // VkStructureType sType;
1598 DE_NULL, // const void* pNext;
1599 VK_ACCESS_TRANSFER_WRITE_BIT, // VkAccessFlags srcAccessMask;
1600 VK_ACCESS_SHADER_READ_BIT, // VkAccessFlags dstAccessMask;
1601 VK_QUEUE_FAMILY_IGNORED, // deUint32 srcQueueFamilyIndex;
1602 VK_QUEUE_FAMILY_IGNORED, // deUint32 dstQueueFamilyIndex;
1603 sboBuffer.get(), // VkBuffer buffer;
1604 0u, // VkDeviceSize offset;
1605 sizeof(tessLevel) // VkDeviceSize size;
1608 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 1, &stagingsboUpdateBarrier, 0, (const VkImageMemoryBarrier*)DE_NULL);
1609 VkBufferCopy sboBufferCopy = { 0u, 0u, sizeof(tessLevel) };
1610 vk.cmdCopyBuffer(*cmdBuffer, stagingSboBuffer.get(), sboBuffer.get(), 1u, &sboBufferCopy);
1611 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 1, &sboUpdateBarrier, 0, (const VkImageMemoryBarrier*)DE_NULL);
1614 vk.cmdBindPipeline(*cmdBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS, *pipeline);
1615 vk.cmdBindDescriptorSets(*cmdBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS, *pipelineLayout, 0u, 1, &*descriptorSet, 0u, DE_NULL);
1617 const VkDeviceSize bindingOffset = 0;
1618 vk.cmdBindVertexBuffers(*cmdBuffer, 0u, 1u, &vertexBuffer.get(), &bindingOffset);
1619 vk.cmdBindIndexBuffer(*cmdBuffer, *indexBuffer, 0, VK_INDEX_TYPE_UINT32);
1625 const VkClearValue clearValue = makeClearValueColorF32(
1631 VkRect2D zeroRect = { { 0, 0, },{ 0, 0, } };
1632 vector<VkRect2D> renderAreas;
1633 for (deUint32 i = 0; i < m_physicalDeviceCount; i++)
1634 renderAreas.push_back(zeroRect);
1636 // Render completely if there is only 1 device
1637 if (m_physicalDeviceCount == 1u)
1639 renderAreas[0].extent.width = (deInt32)renderSize.x();
1640 renderAreas[0].extent.height = (deInt32)renderSize.y();
1644 // Split into 2 vertical halves
1645 renderAreas[firstDeviceID].extent.width = (deInt32)renderSize.x() / 2;
1646 renderAreas[firstDeviceID].extent.height = (deInt32)renderSize.y();
1647 renderAreas[secondDeviceID] = renderAreas[firstDeviceID];
1648 renderAreas[secondDeviceID].offset.x = (deInt32)renderSize.x() / 2;
1651 const VkDeviceGroupRenderPassBeginInfo deviceGroupRPBeginInfo =
1653 VK_STRUCTURE_TYPE_DEVICE_GROUP_RENDER_PASS_BEGIN_INFO,
1655 (deUint32)((1 << m_physicalDeviceCount) - 1),
1656 m_physicalDeviceCount,
1660 const VkRenderPassBeginInfo passBeginParams =
1662 VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO, // sType
1663 (m_testMode & TEST_MODE_SFR) ? &deviceGroupRPBeginInfo : DE_NULL, // pNext
1664 *renderPass, // renderPass
1665 *framebuffer, // framebuffer
1668 { renderSize.x(), renderSize.y() }
1670 1u, // clearValueCount
1671 &clearValue, // pClearValues
1673 vk.cmdBeginRenderPass(*cmdBuffer, &passBeginParams, VK_SUBPASS_CONTENTS_INLINE);
1677 if (m_testMode & TEST_MODE_AFR)
1679 vk.cmdSetDeviceMask(*cmdBuffer, 1 << secondDeviceID);
1680 vk.cmdDrawIndexed(*cmdBuffer, numIndices, 1u, 0, 0, 0);
1685 vk.cmdSetDeviceMask(*cmdBuffer, ((1 << firstDeviceID) | (1 << secondDeviceID)));
1686 vk.cmdDrawIndexed(*cmdBuffer, numIndices, 1u, 0, 0, 0);
1688 vk.cmdEndRenderPass(*cmdBuffer);
1690 // Change image layout for copy
1692 const VkImageMemoryBarrier renderFinishBarrier =
1694 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // sType
1696 VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, // outputMask
1697 VK_ACCESS_TRANSFER_READ_BIT, // inputMask
1698 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // oldLayout
1699 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, // newLayout
1700 queueFamilyIndex, // srcQueueFamilyIndex
1701 queueFamilyIndex, // dstQueueFamilyIndex
1702 *renderImage, // image
1704 VK_IMAGE_ASPECT_COLOR_BIT, // aspectMask
1707 0u, // baseArraySlice
1709 } // subresourceRange
1711 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 0, (const VkBufferMemoryBarrier*)DE_NULL, 1, &renderFinishBarrier);
1714 VK_CHECK(vk.endCommandBuffer(*cmdBuffer));
1716 // Submit & wait for completion
1718 const deUint32 deviceMask = (1 << firstDeviceID) | (1 << secondDeviceID);
1719 deviceGroupSubmitInfo.commandBufferCount = 1;
1720 deviceGroupSubmitInfo.pCommandBufferDeviceMasks = &deviceMask;
1721 SubmitBufferAndWaitForIdle(vk, cmdBuffer.get(), deviceGroupSubmitInfo);
1724 // Copy image from secondDeviceID in case of AFR and SFR(only if Peer memory as copy source is not allowed)
1725 if ((m_physicalDeviceCount > 1) && ((m_testMode & TEST_MODE_AFR) || (!isPeerMemAsCopySrcAllowed)))
1727 Move<VkImage> peerImage;
1729 // Create and bind peer image
1731 const VkImageCreateInfo peerImageParams =
1733 VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, // sType
1735 VK_IMAGE_CREATE_ALIAS_BIT, // flags
1736 VK_IMAGE_TYPE_2D, // imageType
1737 colorFormat, // format
1738 { renderSize.x(), renderSize.y(), 1 }, // extent
1741 VK_SAMPLE_COUNT_1_BIT, // samples
1742 VK_IMAGE_TILING_OPTIMAL, // tiling
1743 imageUsageFlag, // usage
1744 VK_SHARING_MODE_EXCLUSIVE, // sharingMode
1745 1u, // queueFamilyIndexCount
1746 &queueFamilyIndex, // pQueueFamilyIndices
1747 VK_IMAGE_LAYOUT_UNDEFINED, // initialLayout
1749 peerImage = createImage(vk, *m_deviceGroup, &peerImageParams);
1751 VkBindImageMemoryDeviceGroupInfo devGroupBindInfo =
1753 VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_DEVICE_GROUP_INFO, // sType
1755 m_physicalDeviceCount, // deviceIndexCount
1756 &deviceIndices[0], // pDeviceIndices
1758 DE_NULL, // pSFRRects
1761 VkBindImageMemoryInfo bindInfo =
1763 VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_INFO, // sType
1764 &devGroupBindInfo, // pNext
1765 peerImage.get(), // image
1766 imageMemory.get(), // memory
1769 VK_CHECK(vk.bindImageMemory2(*m_deviceGroup, 1, &bindInfo));
1772 // Copy peer image (only needed in SFR case when peer memory as copy source is not allowed)
1774 // Change layout on firstDeviceID
1776 const VkImageMemoryBarrier preCopyBarrier =
1778 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // VkStructureType sType;
1779 DE_NULL, // const void* pNext;
1780 0, // VkAccessFlags srcAccessMask;
1781 VK_ACCESS_TRANSFER_WRITE_BIT, // VkAccessFlags dstAccessMask;
1782 VK_IMAGE_LAYOUT_UNDEFINED, // VkImageLayout oldLayout;
1783 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, // VkImageLayout newLayout;
1784 VK_QUEUE_FAMILY_IGNORED, // deUint32 srcQueueFamilyIndex;
1785 VK_QUEUE_FAMILY_IGNORED, // deUint32 dstQueueFamilyIndex;
1786 *renderImage, // VkImage image;
1787 { // VkImageSubresourceRange subresourceRange;
1788 VK_IMAGE_ASPECT_COLOR_BIT, // VkImageAspectFlags aspectMask;
1789 0u, // deUint32 baseMipLevel;
1790 1u, // deUint32 mipLevels;
1791 0u, // deUint32 baseArraySlice;
1792 1u // deUint32 arraySize;
1796 VK_CHECK(vk.beginCommandBuffer(*cmdBuffer, &cmdBufBeginParams));
1797 vk.cmdSetDeviceMask(*cmdBuffer, 1 << firstDeviceID);
1798 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 0, (const VkBufferMemoryBarrier*)DE_NULL, 1u, &preCopyBarrier);
1799 VK_CHECK(vk.endCommandBuffer(*cmdBuffer));
1801 const deUint32 deviceMask = 1 << firstDeviceID;
1802 deviceGroupSubmitInfo.pCommandBufferDeviceMasks = &deviceMask;
1803 SubmitBufferAndWaitForIdle(vk, cmdBuffer.get(), deviceGroupSubmitInfo);
1806 // Copy Image from secondDeviceID to firstDeviceID
1808 // AFR: Copy entire image from secondDeviceID
1809 // SFR: Copy the right half of image from secondDeviceID to firstDeviceID, so that the copy
1810 // to a buffer below (for checking) does not require VK_PEER_MEMORY_FEATURE_COPY_SRC_BIT
1811 deInt32 imageOffsetX = (m_testMode & TEST_MODE_AFR) ? 0 : renderSize.x() / 2;
1812 deUint32 imageExtentX = (m_testMode & TEST_MODE_AFR) ? (deUint32)renderSize.x() : (deUint32)renderSize.x() / 2;
1814 const VkImageCopy imageCopy =
1817 VK_IMAGE_ASPECT_COLOR_BIT,
1822 { imageOffsetX, 0, 0 },
1824 VK_IMAGE_ASPECT_COLOR_BIT,
1829 { imageOffsetX, 0, 0 },
1832 (deUint32)renderSize.y(),
1837 VK_CHECK(vk.beginCommandBuffer(*cmdBuffer, &cmdBufBeginParams));
1838 vk.cmdSetDeviceMask(*cmdBuffer, 1 << secondDeviceID);
1839 vk.cmdCopyImage(*cmdBuffer, *renderImage, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, *peerImage, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &imageCopy);
1840 VK_CHECK(vk.endCommandBuffer(*cmdBuffer));
1842 const deUint32 deviceMask = 1 << secondDeviceID;
1843 deviceGroupSubmitInfo.pCommandBufferDeviceMasks = &deviceMask;
1844 SubmitBufferAndWaitForIdle(vk, cmdBuffer.get(), deviceGroupSubmitInfo);
1847 // Change layout back on firstDeviceID
1849 const VkImageMemoryBarrier postCopyBarrier =
1851 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // VkStructureType sType;
1852 DE_NULL, // const void* pNext;
1853 VK_ACCESS_TRANSFER_WRITE_BIT, // VkAccessFlags srcAccessMask;
1854 VK_ACCESS_TRANSFER_READ_BIT, // VkAccessFlags dstAccessMask;
1855 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, // VkImageLayout oldLayout;
1856 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, // VkImageLayout newLayout;
1857 VK_QUEUE_FAMILY_IGNORED, // deUint32 srcQueueFamilyIndex;
1858 VK_QUEUE_FAMILY_IGNORED, // deUint32 dstQueueFamilyIndex;
1859 *renderImage, // VkImage image;
1860 { // VkImageSubresourceRange subresourceRange;
1861 VK_IMAGE_ASPECT_COLOR_BIT, // VkImageAspectFlags aspectMask;
1862 0u, // deUint32 baseMipLevel;
1863 1u, // deUint32 mipLevels;
1864 0u, // deUint32 baseArraySlice;
1865 1u // deUint32 arraySize;
1869 VK_CHECK(vk.beginCommandBuffer(*cmdBuffer, &cmdBufBeginParams));
1870 vk.cmdSetDeviceMask(*cmdBuffer, 1 << firstDeviceID);
1871 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 0, (const VkBufferMemoryBarrier*)DE_NULL, 1u, &postCopyBarrier);
1872 VK_CHECK(vk.endCommandBuffer(*cmdBuffer));
1874 const deUint32 deviceMask = 1 << firstDeviceID;
1875 deviceGroupSubmitInfo.pCommandBufferDeviceMasks = &deviceMask;
1876 SubmitBufferAndWaitForIdle(vk, cmdBuffer.get(), deviceGroupSubmitInfo);
1881 // copy image to read buffer for checking
1883 const VkDeviceSize imageSizeBytes = (VkDeviceSize)(sizeof(deUint32) * renderSize.x() * renderSize.y());
1884 const VkBufferCreateInfo readImageBufferParams =
1886 VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, // sType
1888 (VkBufferCreateFlags)0u, // flags
1889 imageSizeBytes, // size
1890 VK_BUFFER_USAGE_TRANSFER_DST_BIT, // usage
1891 VK_SHARING_MODE_EXCLUSIVE, // sharingMode
1892 1u, // queueFamilyIndexCount
1893 &queueFamilyIndex, // pQueueFamilyIndices
1895 const Unique<VkBuffer> readImageBuffer(createBuffer(vk, *m_deviceGroup, &readImageBufferParams));
1896 const UniquePtr<Allocation> readImageBufferMemory(memAlloc.allocate(getBufferMemoryRequirements(vk, *m_deviceGroup, *readImageBuffer), MemoryRequirement::HostVisible));
1897 VK_CHECK(vk.bindBufferMemory(*m_deviceGroup, *readImageBuffer, readImageBufferMemory->getMemory(), readImageBufferMemory->getOffset()));
1899 VK_CHECK(vk.beginCommandBuffer(*cmdBuffer, &cmdBufBeginParams));
1901 // Copy image to buffer
1903 const VkBufferImageCopy copyParams =
1905 (VkDeviceSize)0u, // bufferOffset
1906 renderSize.x(), // bufferRowLength
1907 renderSize.y(), // bufferImageHeight
1909 VK_IMAGE_ASPECT_COLOR_BIT, // aspectMask
1911 0u, // baseArrayLayer
1913 }, // imageSubresource
1914 { 0, 0, 0 }, // imageOffset
1922 // Use a diffferent binding in SFR when peer memory as copy source is not allowed
1923 vk.cmdCopyImageToBuffer(*cmdBuffer, isPeerMemAsCopySrcAllowed ? *renderImage : *readImage, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, *readImageBuffer, 1u, ©Params);
1925 const VkBufferMemoryBarrier copyFinishBarrier =
1927 VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER, // sType
1929 VK_ACCESS_TRANSFER_WRITE_BIT, // srcAccessMask
1930 VK_ACCESS_HOST_READ_BIT, // dstAccessMask
1931 queueFamilyIndex, // srcQueueFamilyIndex
1932 queueFamilyIndex, // dstQueueFamilyIndex
1933 *readImageBuffer, // buffer
1935 imageSizeBytes // size
1937 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_HOST_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 1, ©FinishBarrier, 0, (const VkImageMemoryBarrier*)DE_NULL);
1939 VK_CHECK(vk.endCommandBuffer(*cmdBuffer));
1941 // Submit & wait for completion
1943 const deUint32 deviceMask = 1 << firstDeviceID;
1944 deviceGroupSubmitInfo.pCommandBufferDeviceMasks = &deviceMask;
1945 SubmitBufferAndWaitForIdle(vk, cmdBuffer.get(), deviceGroupSubmitInfo);
1948 // Read results and check against reference image
1949 if (m_drawTessellatedSphere)
1951 const tcu::TextureFormat tcuFormat = vk::mapVkFormat(colorFormat);
1952 const VkMappedMemoryRange range =
1954 VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE, // sType
1956 readImageBufferMemory->getMemory(), // memory
1958 imageSizeBytes, // size
1960 const tcu::ConstPixelBufferAccess resultAccess(tcuFormat, renderSize.x(), renderSize.y(), 1, readImageBufferMemory->getHostPtr());
1961 VK_CHECK(vk.invalidateMappedMemoryRanges(*m_deviceGroup, 1u, &range));
1963 tcu::TextureLevel referenceImage;
1964 string refImage = m_fillModeNonSolid ? "vulkan/data/device_group/sphere.png" : "vulkan/data/device_group/spherefilled.png";
1965 tcu::ImageIO::loadPNG(referenceImage, m_context.getTestContext().getArchive(), refImage.c_str());
1966 iterateResultSuccess = tcu::fuzzyCompare(m_context.getTestContext().getLog(), "ImageComparison", "Image Comparison",
1967 referenceImage.getAccess(), resultAccess, 0.001f, tcu::COMPARE_LOG_RESULT);
1971 const tcu::TextureFormat tcuFormat = vk::mapVkFormat(colorFormat);
1972 const VkMappedMemoryRange range =
1974 VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE, // sType
1976 readImageBufferMemory->getMemory(), // memory
1978 imageSizeBytes, // size
1980 const tcu::ConstPixelBufferAccess resultAccess(tcuFormat, renderSize.x(), renderSize.y(), 1, readImageBufferMemory->getHostPtr());
1981 VK_CHECK(vk.invalidateMappedMemoryRanges(*m_deviceGroup, 1u, &range));
1983 // Render reference and compare
1985 tcu::TextureLevel refImage(tcuFormat, (deInt32)renderSize.x(), (deInt32)renderSize.y());
1986 const tcu::UVec4 threshold(0u);
1987 const tcu::IVec3 posDeviation(1, 1, 0);
1989 tcu::clear(refImage.getAccess(), clearColor);
1990 renderReferenceTriangle(refImage.getAccess(), triVertices);
1992 iterateResultSuccess = tcu::intThresholdPositionDeviationCompare(m_context.getTestContext().getLog(),
1994 "Image comparison result",
1995 refImage.getAccess(),
2000 tcu::COMPARE_LOG_RESULT);
2005 if (!iterateResultSuccess)
2006 return tcu::TestStatus::fail("Image comparison failed");
2009 return tcu::TestStatus(QP_TEST_RESULT_PASS, "Device group verification passed");
2012 template<class Instance>
2013 class DeviceGroupTestCase : public TestCase
2016 DeviceGroupTestCase (tcu::TestContext& context,
2018 const char* description,
2020 : TestCase(context, name, description)
2026 deUint32 m_testMode;
2028 TestInstance* createInstance (Context& context) const
2030 return new Instance(context, m_testMode);
2033 void initPrograms (vk::SourceCollections& programCollection) const
2035 programCollection.glslSources.add("vert") << glu::VertexSource("#version 430\n"
2036 "layout(location = 0) in vec4 in_Position;\n"
2037 "out gl_PerVertex { vec4 gl_Position; float gl_PointSize; };\n"
2039 " gl_Position = in_Position;\n"
2040 " gl_PointSize = 1.0;\n"
2043 if (m_testMode & TEST_MODE_TESSELLATION)
2045 programCollection.glslSources.add("tesc") << glu::TessellationControlSource("#version 450\n"
2046 "#extension GL_EXT_tessellation_shader : require\n"
2047 "layout(vertices=3) out;\n"
2048 "layout(set=0, binding=1) buffer tessLevel { \n"
2053 " gl_out[gl_InvocationID].gl_Position = gl_in[gl_InvocationID].gl_Position;\n"
2054 " if (gl_InvocationID == 0) {\n"
2055 " for (int i = 0; i < 4; i++)\n"
2056 " gl_TessLevelOuter[i] = tessLvl;\n"
2057 " for (int i = 0; i < 2; i++)\n"
2058 " gl_TessLevelInner[i] = tessLvl;\n"
2062 programCollection.glslSources.add("tese") << glu::TessellationEvaluationSource("#version 450\n"
2063 "#extension GL_EXT_tessellation_shader : require\n"
2064 "layout(triangles) in;\n"
2065 "layout(equal_spacing) in;\n"
2069 " vec4 pos = vec4(0, 0, 0, 0);\n"
2070 " vec3 tessCoord = gl_TessCoord.xyz;\n"
2071 " pos += tessCoord.z * gl_in[0].gl_Position;\n"
2072 " pos += tessCoord.x * gl_in[1].gl_Position;\n"
2073 " pos += tessCoord.y * gl_in[2].gl_Position;\n"
2074 " vec3 sign = sign(pos.xyz);\n"
2075 " pos.xyz = 0.785398 - abs(pos.xyz) * 1.5707963;\n"
2076 " pos.xyz = (1 - tan(pos.xyz))/2.0;\n"
2077 " pos.xyz = (sign * pos.xyz) / length(pos.xyz);\n"
2078 " gl_Position = pos;\n"
2082 programCollection.glslSources.add("frag") << glu::FragmentSource("#version 430\n"
2083 "layout(location = 0) out vec4 out_FragColor;\n"
2084 "layout(std140, set=0, binding=0) uniform bufferData { \n"
2089 " out_FragColor = color;\n"
2096 class DeviceGroupTestRendering : public tcu::TestCaseGroup
2099 DeviceGroupTestRendering (tcu::TestContext& testCtx);
2100 ~DeviceGroupTestRendering (void) {}
2104 DeviceGroupTestRendering (const DeviceGroupTestRendering& other);
2105 DeviceGroupTestRendering& operator= (const DeviceGroupTestRendering& other);
2108 DeviceGroupTestRendering::DeviceGroupTestRendering (tcu::TestContext& testCtx)
2109 : TestCaseGroup (testCtx, "device_group", "Testing device group test cases")
2111 // Left blank on purpose
2114 void DeviceGroupTestRendering::init (void)
2116 addChild(new DeviceGroupTestCase<DeviceGroupTestInstance>(m_testCtx, "sfr", "Test split frame rendering", TEST_MODE_SFR));
2117 addChild(new DeviceGroupTestCase<DeviceGroupTestInstance>(m_testCtx, "sfr_sys", "Test split frame rendering with render target in host memory", TEST_MODE_SFR | TEST_MODE_HOSTMEMORY));
2118 addChild(new DeviceGroupTestCase<DeviceGroupTestInstance>(m_testCtx, "sfr_dedicated", "Test split frame rendering with dedicated memory allocations", TEST_MODE_SFR | TEST_MODE_DEDICATED));
2119 addChild(new DeviceGroupTestCase<DeviceGroupTestInstance>(m_testCtx, "sfr_dedicated_peer", "Test split frame rendering with dedicated memory allocations and peer fetching", TEST_MODE_SFR | TEST_MODE_DEDICATED | TEST_MODE_PEER_FETCH));
2121 addChild(new DeviceGroupTestCase<DeviceGroupTestInstance>(m_testCtx, "afr", "Test alternate frame rendering", TEST_MODE_AFR));
2122 addChild(new DeviceGroupTestCase<DeviceGroupTestInstance>(m_testCtx, "afr_sys", "Test split frame rendering with render target in host memory", TEST_MODE_AFR | TEST_MODE_HOSTMEMORY));
2123 addChild(new DeviceGroupTestCase<DeviceGroupTestInstance>(m_testCtx, "afr_dedicated", "Test split frame rendering with dedicated memory allocations", TEST_MODE_AFR | TEST_MODE_DEDICATED));
2124 addChild(new DeviceGroupTestCase<DeviceGroupTestInstance>(m_testCtx, "afr_dedicated_peer", "Test split frame rendering with dedicated memory allocations and peer fetching", TEST_MODE_AFR | TEST_MODE_DEDICATED | TEST_MODE_PEER_FETCH));
2126 addChild(new DeviceGroupTestCase<DeviceGroupTestInstance>(m_testCtx, "sfr_tessellated", "Test split frame rendering with tessellated sphere", TEST_MODE_SFR | TEST_MODE_TESSELLATION | TEST_MODE_DEDICATED | TEST_MODE_PEER_FETCH));
2127 addChild(new DeviceGroupTestCase<DeviceGroupTestInstance>(m_testCtx, "sfr_tessellated_linefill", "Test split frame rendering with tessellated sphere with line segments", TEST_MODE_SFR | TEST_MODE_TESSELLATION | TEST_MODE_LINEFILL | TEST_MODE_DEDICATED | TEST_MODE_PEER_FETCH));
2128 addChild(new DeviceGroupTestCase<DeviceGroupTestInstance>(m_testCtx, "afr_tessellated", "Test alternate frame rendering with tesselated sphere", TEST_MODE_AFR | TEST_MODE_TESSELLATION | TEST_MODE_DEDICATED | TEST_MODE_PEER_FETCH));
2129 addChild(new DeviceGroupTestCase<DeviceGroupTestInstance>(m_testCtx, "afr_tessellated_linefill", "Test alternate frame rendering with tesselated sphere with line segments", TEST_MODE_AFR | TEST_MODE_TESSELLATION | TEST_MODE_LINEFILL | TEST_MODE_DEDICATED | TEST_MODE_PEER_FETCH));
2132 tcu::TestCaseGroup* createTests(tcu::TestContext& testCtx)
2134 return new DeviceGroupTestRendering(testCtx);