2 /*------------------------------------------------------------------------
3 * Vulkan Conformance Tests
4 * ------------------------
6 * Copyright (c) 2019 The Khronos Group Inc.
8 * Licensed under the Apache License, Version 2.0 (the "License");
9 * you may not use this file except in compliance with the License.
10 * You may obtain a copy of the License at
12 * http://www.apache.org/licenses/LICENSE-2.0
14 * Unless required by applicable law or agreed to in writing, software
15 * distributed under the License is distributed on an "AS IS" BASIS,
16 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17 * See the License for the specific language governing permissions and
18 * limitations under the License.
22 * \brief Signal ordering tests
23 *//*--------------------------------------------------------------------*/
25 #include "vktSynchronizationSignalOrderTests.hpp"
26 #include "vktSynchronizationOperation.hpp"
27 #include "vktSynchronizationOperationTestData.hpp"
28 #include "vktSynchronizationOperationResources.hpp"
29 #include "vktTestCaseUtil.hpp"
30 #include "vktSynchronizationUtil.hpp"
31 #include "vktExternalMemoryUtil.hpp"
32 #include "vktCustomInstancesDevices.hpp"
33 #include "vkBarrierUtil.hpp"
36 #include "vkPlatform.hpp"
37 #include "vkQueryUtil.hpp"
38 #include "vkCmdUtil.hpp"
39 #include "vkImageUtil.hpp"
41 #include "vkTypeUtil.hpp"
43 #include "tcuTestLog.hpp"
44 #include "tcuCommandLine.hpp"
46 #include "deRandom.hpp"
47 #include "deThread.hpp"
48 #include "deUniquePtr.hpp"
55 namespace synchronization
61 using namespace vkt::ExternalMemoryUtil;
67 inline SharedPtr<Move<T> > makeVkSharedPtr (Move<T> move)
69 return SharedPtr<Move<T> >(new Move<T>(move));
73 inline SharedPtr<T> makeSharedPtr (de::MovePtr<T> move)
75 return SharedPtr<T>(move.release());
79 inline SharedPtr<T> makeSharedPtr (T* ptr)
81 return SharedPtr<T>(ptr);
84 void hostSignal (const DeviceInterface& vk, const VkDevice& device, VkSemaphore semaphore, const deUint64 timelineValue)
86 VkSemaphoreSignalInfoKHR ssi =
88 VK_STRUCTURE_TYPE_SEMAPHORE_SIGNAL_INFO, // VkStructureType sType;
89 DE_NULL, // const void* pNext;
90 semaphore, // VkSemaphore semaphore;
91 timelineValue, // deUint64 value;
94 VK_CHECK(vk.signalSemaphore(device, &ssi));
97 Move<VkDevice> createTestDevice (const Context& context)
99 const float priority = 0.0f;
100 const std::vector<VkQueueFamilyProperties> queueFamilyProperties = getPhysicalDeviceQueueFamilyProperties(context.getInstanceInterface(), context.getPhysicalDevice());
101 std::vector<deUint32> queueFamilyIndices (queueFamilyProperties.size(), 0xFFFFFFFFu);
102 std::vector<const char*> extensions;
104 VkPhysicalDeviceFeatures2 createPhysicalFeature { VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2, DE_NULL, context.getDeviceFeatures() };
105 VkPhysicalDeviceTimelineSemaphoreFeatures timelineSemaphoreFeatures { VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TIMELINE_SEMAPHORE_FEATURES, DE_NULL, DE_TRUE };
106 VkPhysicalDeviceSynchronization2FeaturesKHR synchronization2Features { VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SYNCHRONIZATION_2_FEATURES_KHR, DE_NULL, DE_TRUE };
107 void** nextPtr = &createPhysicalFeature.pNext;
109 if (context.isDeviceFunctionalitySupported("VK_KHR_timeline_semaphore"))
111 extensions.push_back("VK_KHR_timeline_semaphore");
112 addToChainVulkanStructure(&nextPtr, timelineSemaphoreFeatures);
115 if (!isCoreDeviceExtension(context.getUsedApiVersion(), "VK_KHR_external_semaphore"))
116 extensions.push_back("VK_KHR_external_semaphore");
117 if (!isCoreDeviceExtension(context.getUsedApiVersion(), "VK_KHR_external_memory"))
118 extensions.push_back("VK_KHR_external_memory");
120 if (context.isDeviceFunctionalitySupported("VK_KHR_external_semaphore_fd"))
121 extensions.push_back("VK_KHR_external_semaphore_fd");
123 if (context.isDeviceFunctionalitySupported("VK_KHR_external_semaphore_win32"))
124 extensions.push_back("VK_KHR_external_semaphore_win32");
126 if (context.isDeviceFunctionalitySupported("VK_KHR_synchronization2"))
128 extensions.push_back("VK_KHR_synchronization2");
129 addToChainVulkanStructure(&nextPtr, synchronization2Features);
134 deUint32 maxQueueCount = 1;
135 for (const VkQueueFamilyProperties& qfp : queueFamilyProperties)
136 maxQueueCount = deMaxu32(qfp.queueCount, maxQueueCount);
138 std::vector<float> queuePriorities(maxQueueCount, priority);
139 std::vector<VkDeviceQueueCreateInfo> queues;
141 for (size_t ndx = 0; ndx < queueFamilyProperties.size(); ndx++)
143 const VkDeviceQueueCreateInfo createInfo =
145 VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO,
150 queueFamilyProperties[ndx].queueCount,
151 queuePriorities.data()
154 queues.push_back(createInfo);
157 const VkDeviceCreateInfo createInfo =
159 VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO,
160 &createPhysicalFeature,
163 (deUint32)queues.size(),
169 (deUint32)extensions.size(),
170 extensions.empty() ? DE_NULL : &extensions[0],
174 const auto validation = context.getTestContext().getCommandLine().isValidationEnabled();
175 return createCustomDevice(validation, context.getPlatformInterface(), context.getInstance(), context.getInstanceInterface(), context.getPhysicalDevice(), &createInfo);
177 catch (const vk::Error& error)
179 if (error.getError() == VK_ERROR_EXTENSION_NOT_PRESENT)
180 TCU_THROW(NotSupportedError, "Required extensions not supported");
186 // Class to wrap a singleton instance and device
187 class SingletonDevice
189 SingletonDevice (const Context& context)
190 : m_logicalDevice (createTestDevice(context))
196 static const Unique<vk::VkDevice>& getDevice(const Context& context)
198 if (!m_singletonDevice)
199 m_singletonDevice = SharedPtr<SingletonDevice>(new SingletonDevice(context));
201 DE_ASSERT(m_singletonDevice);
202 return m_singletonDevice->m_logicalDevice;
205 static void destroy()
207 m_singletonDevice.clear();
211 const Unique<vk::VkDevice> m_logicalDevice;
213 static SharedPtr<SingletonDevice> m_singletonDevice;
215 SharedPtr<SingletonDevice> SingletonDevice::m_singletonDevice;
217 static void cleanupGroup ()
219 // Destroy singleton object
220 SingletonDevice::destroy();
223 class SimpleAllocation : public Allocation
226 SimpleAllocation (const DeviceInterface& vkd,
228 const VkDeviceMemory memory);
229 ~SimpleAllocation (void);
232 const DeviceInterface& m_vkd;
233 const VkDevice m_device;
236 SimpleAllocation::SimpleAllocation (const DeviceInterface& vkd,
238 const VkDeviceMemory memory)
239 : Allocation (memory, 0, DE_NULL)
245 SimpleAllocation::~SimpleAllocation (void)
247 m_vkd.freeMemory(m_device, getMemory(), DE_NULL);
250 vk::VkMemoryRequirements getMemoryRequirements (const DeviceInterface& vkd,
254 const VkBufferMemoryRequirementsInfo2 requirementInfo =
256 VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2,
260 VkMemoryRequirements2 requirements =
262 VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2,
266 vkd.getBufferMemoryRequirements2(device, &requirementInfo, &requirements);
267 return requirements.memoryRequirements;
270 vk::VkMemoryRequirements getMemoryRequirements(const DeviceInterface& vkd,
274 const VkImageMemoryRequirementsInfo2 requirementInfo =
276 VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2,
280 VkMemoryRequirements2 requirements =
282 VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2,
286 vkd.getImageMemoryRequirements2(device, &requirementInfo, &requirements);
288 return requirements.memoryRequirements;
291 MovePtr<Allocation> importAndBindMemory (const DeviceInterface& vkd,
294 NativeHandle& nativeHandle,
295 VkExternalMemoryHandleTypeFlagBits externalType,
296 const deUint32 exportedMemoryTypeIndex)
298 const VkMemoryRequirements requirements = getBufferMemoryRequirements(vkd, device, buffer);
299 Move<VkDeviceMemory> memory;
302 memory = importDedicatedMemory(vkd, device, buffer, requirements, externalType, exportedMemoryTypeIndex, nativeHandle);
304 memory = importMemory(vkd, device, requirements, externalType, exportedMemoryTypeIndex, nativeHandle);
306 VK_CHECK(vkd.bindBufferMemory(device, buffer, *memory, 0u));
308 return MovePtr<Allocation>(new SimpleAllocation(vkd, device, memory.disown()));
311 MovePtr<Allocation> importAndBindMemory (const DeviceInterface& vkd,
314 NativeHandle& nativeHandle,
315 VkExternalMemoryHandleTypeFlagBits externalType,
316 deUint32 exportedMemoryTypeIndex)
318 const VkMemoryRequirements requirements = getImageMemoryRequirements(vkd, device, image);
319 Move<VkDeviceMemory> memory;
322 memory = importDedicatedMemory(vkd, device, image, requirements, externalType, exportedMemoryTypeIndex, nativeHandle);
324 memory = importMemory(vkd, device, requirements, externalType, exportedMemoryTypeIndex, nativeHandle);
326 VK_CHECK(vkd.bindImageMemory(device, image, *memory, 0u));
328 return MovePtr<Allocation>(new SimpleAllocation(vkd, device, memory.disown()));
331 struct QueueTimelineIteration
333 QueueTimelineIteration(const SharedPtr<OperationSupport>& _opSupport,
336 deUint32 _queueFamilyIdx,
338 : opSupport(_opSupport)
340 , queueFamilyIdx(_queueFamilyIdx)
342 timelineValue = lastValue + rng.getInt(1, 100);
344 ~QueueTimelineIteration() {}
346 SharedPtr<OperationSupport> opSupport;
348 deUint32 queueFamilyIdx;
349 deUint64 timelineValue;
350 SharedPtr<Operation> op;
353 de::MovePtr<Resource> importResource (const DeviceInterface& vkd,
355 const ResourceDescription& resourceDesc,
356 const deUint32 queueFamilyIndex,
357 const OperationSupport& readOp,
358 const OperationSupport& writeOp,
359 NativeHandle& nativeHandle,
360 VkExternalMemoryHandleTypeFlagBits externalType,
361 deUint32 exportedMemoryTypeIndex)
363 if (resourceDesc.type == RESOURCE_TYPE_IMAGE)
365 const VkExtent3D extent =
367 (deUint32)resourceDesc.size.x(),
368 de::max(1u, (deUint32)resourceDesc.size.y()),
369 de::max(1u, (deUint32)resourceDesc.size.z())
371 const VkImageSubresourceRange subresourceRange =
373 resourceDesc.imageAspect,
379 const VkImageSubresourceLayers subresourceLayers =
381 resourceDesc.imageAspect,
386 const VkExternalMemoryImageCreateInfo externalInfo =
388 VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO,
390 (VkExternalMemoryHandleTypeFlags)externalType
392 const VkImageTiling tiling = VK_IMAGE_TILING_OPTIMAL;
393 const VkImageCreateInfo createInfo =
395 VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
399 resourceDesc.imageType,
400 resourceDesc.imageFormat,
404 resourceDesc.imageSamples,
406 readOp.getInResourceUsageFlags() | writeOp.getOutResourceUsageFlags(),
407 VK_SHARING_MODE_EXCLUSIVE,
411 VK_IMAGE_LAYOUT_UNDEFINED
414 Move<VkImage> image = createImage(vkd, device, &createInfo);
415 MovePtr<Allocation> allocation = importAndBindMemory(vkd, device, *image, nativeHandle, externalType, exportedMemoryTypeIndex);
417 return MovePtr<Resource>(new Resource(image, allocation, extent, resourceDesc.imageType, resourceDesc.imageFormat, subresourceRange, subresourceLayers, tiling));
421 const VkDeviceSize offset = 0u;
422 const VkDeviceSize size = static_cast<VkDeviceSize>(resourceDesc.size.x());
423 const VkBufferUsageFlags usage = readOp.getInResourceUsageFlags() | writeOp.getOutResourceUsageFlags();
424 const VkExternalMemoryBufferCreateInfo externalInfo =
426 VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_BUFFER_CREATE_INFO,
428 (VkExternalMemoryHandleTypeFlags)externalType
430 const VkBufferCreateInfo createInfo =
432 VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
438 VK_SHARING_MODE_EXCLUSIVE,
442 Move<VkBuffer> buffer = createBuffer(vkd, device, &createInfo);
443 MovePtr<Allocation> allocation = importAndBindMemory(vkd,
448 exportedMemoryTypeIndex);
450 return MovePtr<Resource>(new Resource(resourceDesc.type, buffer, allocation, offset, size));
454 struct QueueSubmitOrderSharedIteration
456 QueueSubmitOrderSharedIteration() {}
457 ~QueueSubmitOrderSharedIteration() {}
459 SharedPtr<Resource> resourceA;
460 SharedPtr<Resource> resourceB;
462 SharedPtr<Operation> writeOp;
463 SharedPtr<Operation> readOp;
466 // Verifies the signaling order of the semaphores in multiple
467 // VkSubmitInfo given to vkQueueSubmit() with queueA & queueB from a
468 // different VkDevice.
470 // vkQueueSubmit(queueA, [write0, write1, write2, ..., write6])
471 // vkQueueSubmit(queueB, [read0-6])
473 // With read0-6 waiting on write6, all the data should be available
474 // for reading given that signal operations are supposed to happen in
476 class QueueSubmitSignalOrderSharedTestInstance : public TestInstance
479 QueueSubmitSignalOrderSharedTestInstance (Context& context,
480 SynchronizationType type,
481 const SharedPtr<OperationSupport> writeOpSupport,
482 const SharedPtr<OperationSupport> readOpSupport,
483 const ResourceDescription& resourceDesc,
484 VkExternalMemoryHandleTypeFlagBits memoryHandleType,
485 VkSemaphoreType semaphoreType,
486 VkExternalSemaphoreHandleTypeFlagBits semaphoreHandleType,
487 PipelineCacheData& pipelineCacheData)
488 : TestInstance (context)
490 , m_writeOpSupport (writeOpSupport)
491 , m_readOpSupport (readOpSupport)
492 , m_resourceDesc (resourceDesc)
493 , m_memoryHandleType (memoryHandleType)
494 , m_semaphoreType (semaphoreType)
495 , m_semaphoreHandleType (semaphoreHandleType)
496 , m_pipelineCacheData (pipelineCacheData)
500 const InstanceInterface& vki = context.getInstanceInterface();
501 const VkSemaphoreTypeCreateInfoKHR semaphoreTypeInfo =
503 VK_STRUCTURE_TYPE_SEMAPHORE_TYPE_CREATE_INFO_KHR,
508 const VkPhysicalDeviceExternalSemaphoreInfo info =
510 VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_SEMAPHORE_INFO,
514 VkExternalSemaphoreProperties properties =
516 VK_STRUCTURE_TYPE_EXTERNAL_SEMAPHORE_PROPERTIES,
523 vki.getPhysicalDeviceExternalSemaphoreProperties(context.getPhysicalDevice(), &info, &properties);
525 if (m_semaphoreType == VK_SEMAPHORE_TYPE_TIMELINE_KHR &&
526 !context.getTimelineSemaphoreFeatures().timelineSemaphore)
527 TCU_THROW(NotSupportedError, "Timeline semaphore not supported");
529 if ((properties.externalSemaphoreFeatures & vk::VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT_KHR) == 0
530 || (properties.externalSemaphoreFeatures & vk::VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT_KHR) == 0)
531 TCU_THROW(NotSupportedError, "Exporting and importing semaphore type not supported");
533 if (!isResourceExportable())
534 TCU_THROW(NotSupportedError, "Resource not exportable");
538 Move<VkImage> createImage (const vk::DeviceInterface& vkd,
540 const vk::VkExtent3D& extent,
541 deUint32 queueFamilyIndex,
542 vk::VkImageTiling tiling)
544 const VkExternalMemoryImageCreateInfo externalInfo =
546 VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO,
548 (VkExternalMemoryHandleTypeFlags)m_memoryHandleType
550 const VkImageCreateInfo createInfo =
552 VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
556 m_resourceDesc.imageType,
557 m_resourceDesc.imageFormat,
561 m_resourceDesc.imageSamples,
563 m_readOpSupport->getInResourceUsageFlags() | m_writeOpSupport->getOutResourceUsageFlags(),
564 VK_SHARING_MODE_EXCLUSIVE,
568 VK_IMAGE_LAYOUT_UNDEFINED
571 return vk::createImage(vkd, device, &createInfo);
574 Move<VkBuffer> createBuffer (const vk::DeviceInterface& vkd,
576 const vk::VkDeviceSize& size,
577 deUint32 queueFamilyIndex)
579 const VkExternalMemoryBufferCreateInfo externalInfo =
581 VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_BUFFER_CREATE_INFO,
583 (VkExternalMemoryHandleTypeFlags)m_memoryHandleType
585 const VkBufferCreateInfo createInfo =
587 VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
592 m_readOpSupport->getInResourceUsageFlags() | m_writeOpSupport->getOutResourceUsageFlags(),
593 VK_SHARING_MODE_EXCLUSIVE,
597 return vk::createBuffer(vkd, device, &createInfo);
600 tcu::TestStatus iterate (void)
602 // We're using 2 devices to make sure we have 2 queues even on
603 // implementations that only have a single queue.
604 const bool isTimelineSemaphore (m_semaphoreType == VK_SEMAPHORE_TYPE_TIMELINE_KHR);
605 const VkDevice& deviceA = m_context.getDevice();
606 const Unique<VkDevice>& deviceB (SingletonDevice::getDevice(m_context));
607 const DeviceInterface& vkA = m_context.getDeviceInterface();
608 const DeviceDriver vkB (m_context.getPlatformInterface(), m_context.getInstance(), *deviceB);
609 UniquePtr<SimpleAllocator> allocatorA (new SimpleAllocator(vkA, deviceA, vk::getPhysicalDeviceMemoryProperties(m_context.getInstanceInterface(),
610 m_context.getPhysicalDevice())));
611 UniquePtr<SimpleAllocator> allocatorB (new SimpleAllocator(vkB, *deviceB, vk::getPhysicalDeviceMemoryProperties(m_context.getInstanceInterface(),
612 m_context.getPhysicalDevice())));
613 UniquePtr<OperationContext> operationContextA (new OperationContext(m_context, m_type, vkA, deviceA, *allocatorA, m_pipelineCacheData));
614 UniquePtr<OperationContext> operationContextB (new OperationContext(m_context, m_type, vkB, *deviceB, *allocatorB, m_pipelineCacheData));
615 const deUint32 universalQueueFamilyIndex = m_context.getUniversalQueueFamilyIndex();
616 const VkQueue queueA = m_context.getUniversalQueue();
617 const VkQueue queueB = getDeviceQueue(vkB, *deviceB, m_context.getUniversalQueueFamilyIndex(), 0);
618 Unique<VkFence> fenceA (createFence(vkA, deviceA));
619 Unique<VkFence> fenceB (createFence(vkB, *deviceB));
620 const Unique<VkCommandPool> cmdPoolA (createCommandPool(vkA, deviceA, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT, universalQueueFamilyIndex));
621 const Unique<VkCommandPool> cmdPoolB (createCommandPool(vkB, *deviceB, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT, universalQueueFamilyIndex));
622 std::vector<SharedPtr<Move<VkCommandBuffer> > > ptrCmdBuffersA;
623 SharedPtr<Move<VkCommandBuffer> > ptrCmdBufferB;
624 std::vector<VkCommandBuffer> cmdBuffersA;
625 VkCommandBuffer cmdBufferB;
626 std::vector<Move<VkSemaphore> > semaphoresA;
627 std::vector<Move<VkSemaphore> > semaphoresB;
628 std::vector<VkSemaphore> semaphoreHandlesA;
629 std::vector<VkSemaphore> semaphoreHandlesB;
630 std::vector<deUint64> timelineValuesA;
631 std::vector<deUint64> timelineValuesB;
632 std::vector<QueueSubmitOrderSharedIteration> iterations(12);
633 std::vector<VkPipelineStageFlags2KHR> stageBits;
635 // Create a dozen of set of write/read operations.
636 for (deUint32 iterIdx = 0; iterIdx < iterations.size(); iterIdx++)
638 QueueSubmitOrderSharedIteration& iter = iterations[iterIdx];
639 deUint32 memoryTypeIndex;
640 NativeHandle nativeMemoryHandle;
642 if (m_resourceDesc.type == RESOURCE_TYPE_IMAGE)
644 const VkExtent3D extent =
646 (deUint32)m_resourceDesc.size.x(),
647 de::max(1u, (deUint32)m_resourceDesc.size.y()),
648 de::max(1u, (deUint32)m_resourceDesc.size.z())
650 const VkImageSubresourceRange subresourceRange =
652 m_resourceDesc.imageAspect,
658 const VkImageSubresourceLayers subresourceLayers =
660 m_resourceDesc.imageAspect,
666 const vk::VkImageTiling tiling = VK_IMAGE_TILING_OPTIMAL;
667 Move<VkImage> image = createImage(vkA, deviceA, extent, universalQueueFamilyIndex, tiling);
668 const vk::VkMemoryRequirements requirements = getMemoryRequirements(vkA, deviceA, *image);
669 memoryTypeIndex = chooseMemoryType(requirements.memoryTypeBits);
670 vk::Move<vk::VkDeviceMemory> memory = allocateExportableMemory(vkA, deviceA, requirements.size, memoryTypeIndex, m_memoryHandleType, *image);
672 VK_CHECK(vkA.bindImageMemory(deviceA, *image, *memory, 0u));
674 MovePtr<Allocation> allocation(new SimpleAllocation(vkA, deviceA, memory.disown()));
675 iter.resourceA = makeSharedPtr(new Resource(image, allocation, extent, m_resourceDesc.imageType, m_resourceDesc.imageFormat, subresourceRange, subresourceLayers, tiling));
679 const VkDeviceSize offset = 0u;
680 const VkDeviceSize size = static_cast<VkDeviceSize>(m_resourceDesc.size.x());
681 Move<VkBuffer> buffer = createBuffer(vkA, deviceA, size, universalQueueFamilyIndex);
682 const vk::VkMemoryRequirements requirements = getMemoryRequirements(vkA, deviceA, *buffer);
683 memoryTypeIndex = chooseMemoryType(requirements.memoryTypeBits);
684 vk::Move<vk::VkDeviceMemory> memory = allocateExportableMemory(vkA, deviceA, requirements.size, memoryTypeIndex, m_memoryHandleType, *buffer);
686 VK_CHECK(vkA.bindBufferMemory(deviceA, *buffer, *memory, 0u));
688 MovePtr<Allocation> allocation(new SimpleAllocation(vkA, deviceA, memory.disown()));
689 iter.resourceA = makeSharedPtr(new Resource(m_resourceDesc.type, buffer, allocation, offset, size));
692 getMemoryNative(vkA, deviceA, iter.resourceA->getMemory(), m_memoryHandleType, nativeMemoryHandle);
693 iter.resourceB = makeSharedPtr(importResource(vkB, *deviceB,
695 universalQueueFamilyIndex,
702 iter.writeOp = makeSharedPtr(m_writeOpSupport->build(*operationContextA,
704 iter.readOp = makeSharedPtr(m_readOpSupport->build(*operationContextB,
708 // Record each write operation into its own command buffer.
709 for (deUint32 iterIdx = 0; iterIdx < iterations.size(); iterIdx++)
711 QueueSubmitOrderSharedIteration& iter = iterations[iterIdx];
712 const Resource& resource = *iter.resourceA;
713 const SyncInfo writeSync = iter.writeOp->getOutSyncInfo();
714 const SyncInfo readSync = iter.readOp->getInSyncInfo();
716 ptrCmdBuffersA.push_back(makeVkSharedPtr(makeCommandBuffer(vkA, deviceA, *cmdPoolA)));
718 cmdBuffersA.push_back(**(ptrCmdBuffersA.back()));
720 beginCommandBuffer(vkA, cmdBuffersA.back());
722 iter.writeOp->recordCommands(cmdBuffersA.back());
725 SynchronizationWrapperPtr synchronizationWrapper = getSynchronizationWrapper(m_type, vkA, isTimelineSemaphore);
727 if (resource.getType() == RESOURCE_TYPE_IMAGE)
729 DE_ASSERT(writeSync.imageLayout != VK_IMAGE_LAYOUT_UNDEFINED);
730 DE_ASSERT(readSync.imageLayout != VK_IMAGE_LAYOUT_UNDEFINED);
732 const VkImageMemoryBarrier2KHR imageMemoryBarrier2 = makeImageMemoryBarrier2(
733 writeSync.stageMask, // VkPipelineStageFlags2KHR srcStageMask
734 writeSync.accessMask, // VkAccessFlags2KHR srcAccessMask
735 readSync.stageMask, // VkPipelineStageFlags2KHR dstStageMask
736 readSync.accessMask, // VkAccessFlags2KHR dstAccessMask
737 writeSync.imageLayout, // VkImageLayout oldLayout
738 readSync.imageLayout, // VkImageLayout newLayout
739 resource.getImage().handle, // VkImage image
740 resource.getImage().subresourceRange // VkImageSubresourceRange subresourceRange
742 VkDependencyInfoKHR dependencyInfo = makeCommonDependencyInfo(DE_NULL, DE_NULL, &imageMemoryBarrier2);
743 synchronizationWrapper->cmdPipelineBarrier(cmdBuffersA.back(), &dependencyInfo);
747 const VkBufferMemoryBarrier2KHR bufferMemoryBarrier2 = makeBufferMemoryBarrier2(
748 writeSync.stageMask, // VkPipelineStageFlags2KHR srcStageMask
749 writeSync.accessMask, // VkAccessFlags2KHR srcAccessMask
750 readSync.stageMask, // VkPipelineStageFlags2KHR dstStageMask
751 readSync.accessMask, // VkAccessFlags2KHR dstAccessMask
752 resource.getBuffer().handle, // VkBuffer buffer
753 0, // VkDeviceSize offset
754 VK_WHOLE_SIZE // VkDeviceSize size
756 VkDependencyInfoKHR dependencyInfo = makeCommonDependencyInfo(DE_NULL, &bufferMemoryBarrier2);
757 synchronizationWrapper->cmdPipelineBarrier(cmdBuffersA.back(), &dependencyInfo);
760 stageBits.push_back(writeSync.stageMask);
763 endCommandBuffer(vkA, cmdBuffersA.back());
765 addSemaphore(vkA, deviceA, semaphoresA, semaphoreHandlesA, timelineValuesA, iterIdx == (iterations.size() - 1), 2u);
768 DE_ASSERT(stageBits.size() == iterations.size());
769 DE_ASSERT(semaphoreHandlesA.size() == iterations.size());
771 // Record all read operations into a single command buffer and record the union of their stage masks.
772 VkPipelineStageFlags2KHR readStages = 0;
773 ptrCmdBufferB = makeVkSharedPtr(makeCommandBuffer(vkB, *deviceB, *cmdPoolB));
774 cmdBufferB = **(ptrCmdBufferB);
775 beginCommandBuffer(vkB, cmdBufferB);
776 for (deUint32 iterIdx = 0; iterIdx < iterations.size(); iterIdx++)
778 QueueSubmitOrderSharedIteration& iter = iterations[iterIdx];
779 readStages |= iter.readOp->getInSyncInfo().stageMask;
780 iter.readOp->recordCommands(cmdBufferB);
782 endCommandBuffer(vkB, cmdBufferB);
784 // Export the last semaphore for use on deviceB and create another semaphore to signal on deviceB.
786 VkSemaphore lastSemaphoreA = semaphoreHandlesA.back();
787 NativeHandle nativeSemaphoreHandle;
789 addSemaphore(vkB, *deviceB, semaphoresB, semaphoreHandlesB, timelineValuesB, true, timelineValuesA.back());
791 getSemaphoreNative(vkA, deviceA, lastSemaphoreA, m_semaphoreHandleType, nativeSemaphoreHandle);
792 importSemaphore(vkB, *deviceB, semaphoreHandlesB.back(), m_semaphoreHandleType, nativeSemaphoreHandle, 0u);
794 addSemaphore(vkB, *deviceB, semaphoresB, semaphoreHandlesB, timelineValuesB, false, timelineValuesA.back());
797 // Submit writes, each in its own VkSubmitInfo. With binary
798 // semaphores, submission don't wait on anything, with
799 // timeline semaphores, submissions wait on a host signal
800 // operation done below.
802 std::vector<VkCommandBufferSubmitInfoKHR> cmdBuffersInfo (iterations.size(), makeCommonCommandBufferSubmitInfo(0u));
803 std::vector<VkSemaphoreSubmitInfoKHR> waitSemaphoreSubmitInfos (iterations.size(), makeCommonSemaphoreSubmitInfo(0u, 1u, VK_PIPELINE_STAGE_2_TOP_OF_PIPE_BIT_KHR));
804 std::vector<VkSemaphoreSubmitInfoKHR> signalSemaphoreSubmitInfos (iterations.size(), makeCommonSemaphoreSubmitInfo(0u, 0u, VK_PIPELINE_STAGE_2_BOTTOM_OF_PIPE_BIT_KHR));
805 SynchronizationWrapperPtr synchronizationWrapper = getSynchronizationWrapper(m_type, vkA, isTimelineSemaphore, static_cast<deUint32>(iterations.size()));
807 for (deUint32 iterIdx = 0; iterIdx < iterations.size(); iterIdx++)
809 waitSemaphoreSubmitInfos[iterIdx].semaphore = semaphoreHandlesA.front();
810 waitSemaphoreSubmitInfos[iterIdx].stageMask = stageBits[iterIdx];
811 signalSemaphoreSubmitInfos[iterIdx].semaphore = semaphoreHandlesA[iterIdx];
812 signalSemaphoreSubmitInfos[iterIdx].value = timelineValuesA[iterIdx];
813 cmdBuffersInfo[iterIdx].commandBuffer = cmdBuffersA[iterIdx];
815 synchronizationWrapper->addSubmitInfo(
817 isTimelineSemaphore ? &waitSemaphoreSubmitInfos[iterIdx] : DE_NULL,
819 &cmdBuffersInfo[iterIdx],
821 &signalSemaphoreSubmitInfos[iterIdx],
827 VK_CHECK(synchronizationWrapper->queueSubmit(queueA, *fenceA));
830 // Submit reads, only waiting waiting on the last write
831 // operations, ordering of signaling should guarantee that
832 // when read operations kick in all writes have completed.
834 VkCommandBufferSubmitInfoKHR cmdBuffersInfo = makeCommonCommandBufferSubmitInfo(cmdBufferB);
835 VkSemaphoreSubmitInfoKHR waitSemaphoreSubmitInfo = makeCommonSemaphoreSubmitInfo(semaphoreHandlesB.front(), timelineValuesA.back(), readStages);
836 VkSemaphoreSubmitInfoKHR signalSemaphoreSubmitInfo = makeCommonSemaphoreSubmitInfo(semaphoreHandlesB.back(), timelineValuesB.back(), VK_PIPELINE_STAGE_2_BOTTOM_OF_PIPE_BIT_KHR);
837 SynchronizationWrapperPtr synchronizationWrapper = getSynchronizationWrapper(m_type, vkB, isTimelineSemaphore);
839 synchronizationWrapper->addSubmitInfo(
841 &waitSemaphoreSubmitInfo,
845 &signalSemaphoreSubmitInfo,
850 VK_CHECK(synchronizationWrapper->queueSubmit(queueB, *fenceB));
852 if (m_semaphoreType == VK_SEMAPHORE_TYPE_TIMELINE_KHR)
854 const VkSemaphoreWaitInfo waitInfo =
856 VK_STRUCTURE_TYPE_SEMAPHORE_WAIT_INFO, // VkStructureType sType;
857 DE_NULL, // const void* pNext;
858 0u, // VkSemaphoreWaitFlagsKHR flags;
859 1u, // deUint32 semaphoreCount;
860 &semaphoreHandlesB.back(), // const VkSemaphore* pSemaphores;
861 &timelineValuesB.back(), // const deUint64* pValues;
864 // Unblock the whole lot.
865 hostSignal(vkA, deviceA, semaphoreHandlesA.front(), 2);
867 VK_CHECK(vkB.waitSemaphores(*deviceB, &waitInfo, ~0ull));
871 VK_CHECK(vkB.waitForFences(*deviceB, 1, &fenceB.get(), VK_TRUE, ~0ull));
875 // Verify the result of the operations.
876 for (deUint32 iterIdx = 0; iterIdx < iterations.size(); iterIdx++)
878 QueueSubmitOrderSharedIteration& iter = iterations[iterIdx];
879 const Data expected = iter.writeOp->getData();
880 const Data actual = iter.readOp->getData();
882 if (isIndirectBuffer(iter.resourceA->getType()))
884 const deUint32 expectedValue = reinterpret_cast<const deUint32*>(expected.data)[0];
885 const deUint32 actualValue = reinterpret_cast<const deUint32*>(actual.data)[0];
887 if (actualValue < expectedValue)
888 return tcu::TestStatus::fail("Counter value is smaller than expected");
892 if (0 != deMemCmp(expected.data, actual.data, expected.size))
893 return tcu::TestStatus::fail("Memory contents don't match");
897 VK_CHECK(vkA.deviceWaitIdle(deviceA));
898 VK_CHECK(vkB.deviceWaitIdle(*deviceB));
900 return tcu::TestStatus::pass("Success");
904 void addSemaphore (const DeviceInterface& vk,
906 std::vector<Move<VkSemaphore> >& semaphores,
907 std::vector<VkSemaphore>& semaphoreHandles,
908 std::vector<deUint64>& timelineValues,
910 deUint64 firstTimelineValue)
912 Move<VkSemaphore> semaphore;
914 if (m_semaphoreType == VK_SEMAPHORE_TYPE_TIMELINE_KHR)
916 // Only allocate a single exportable semaphore.
917 if (semaphores.empty())
919 semaphores.push_back(createExportableSemaphoreType(vk, device, m_semaphoreType, m_semaphoreHandleType));
925 semaphores.push_back(createExportableSemaphoreType(vk, device, m_semaphoreType, m_semaphoreHandleType));
927 semaphores.push_back(createSemaphoreType(vk, device, m_semaphoreType));
930 semaphoreHandles.push_back(*semaphores.back());
931 timelineValues.push_back((timelineValues.empty() ? firstTimelineValue : timelineValues.back()) + m_rng.getInt(1, 100));
934 bool isResourceExportable ()
936 const InstanceInterface& vki = m_context.getInstanceInterface();
937 VkPhysicalDevice physicalDevice = m_context.getPhysicalDevice();
939 if (m_resourceDesc.type == RESOURCE_TYPE_IMAGE)
941 const VkPhysicalDeviceExternalImageFormatInfo externalInfo =
943 VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_IMAGE_FORMAT_INFO,
947 const VkPhysicalDeviceImageFormatInfo2 imageFormatInfo =
949 VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_FORMAT_INFO_2,
951 m_resourceDesc.imageFormat,
952 m_resourceDesc.imageType,
953 VK_IMAGE_TILING_OPTIMAL,
954 m_readOpSupport->getInResourceUsageFlags() | m_writeOpSupport->getOutResourceUsageFlags(),
957 VkExternalImageFormatProperties externalProperties =
959 VK_STRUCTURE_TYPE_EXTERNAL_IMAGE_FORMAT_PROPERTIES,
963 VkImageFormatProperties2 formatProperties =
965 VK_STRUCTURE_TYPE_IMAGE_FORMAT_PROPERTIES_2,
977 const VkResult res = vki.getPhysicalDeviceImageFormatProperties2(physicalDevice, &imageFormatInfo, &formatProperties);
979 if (res == VK_ERROR_FORMAT_NOT_SUPPORTED)
982 VK_CHECK(res); // Check other errors
985 if ((externalProperties.externalMemoryProperties.externalMemoryFeatures & VK_EXTERNAL_MEMORY_FEATURE_EXPORTABLE_BIT_KHR) == 0)
988 if ((externalProperties.externalMemoryProperties.externalMemoryFeatures & VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT_KHR) == 0)
995 const VkPhysicalDeviceExternalBufferInfo info =
997 VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_BUFFER_INFO,
1001 m_readOpSupport->getInResourceUsageFlags() | m_writeOpSupport->getOutResourceUsageFlags(),
1004 VkExternalBufferProperties properties =
1006 VK_STRUCTURE_TYPE_EXTERNAL_BUFFER_PROPERTIES,
1010 vki.getPhysicalDeviceExternalBufferProperties(physicalDevice, &info, &properties);
1012 if ((properties.externalMemoryProperties.externalMemoryFeatures & VK_EXTERNAL_MEMORY_FEATURE_EXPORTABLE_BIT_KHR) == 0
1013 || (properties.externalMemoryProperties.externalMemoryFeatures & VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT_KHR) == 0)
1020 SynchronizationType m_type;
1021 SharedPtr<OperationSupport> m_writeOpSupport;
1022 SharedPtr<OperationSupport> m_readOpSupport;
1023 const ResourceDescription& m_resourceDesc;
1024 VkExternalMemoryHandleTypeFlagBits m_memoryHandleType;
1025 VkSemaphoreType m_semaphoreType;
1026 VkExternalSemaphoreHandleTypeFlagBits m_semaphoreHandleType;
1027 PipelineCacheData& m_pipelineCacheData;
1031 class QueueSubmitSignalOrderSharedTestCase : public TestCase
1034 QueueSubmitSignalOrderSharedTestCase (tcu::TestContext& testCtx,
1035 SynchronizationType type,
1036 const std::string& name,
1037 OperationName writeOp,
1038 OperationName readOp,
1039 const ResourceDescription& resourceDesc,
1040 VkExternalMemoryHandleTypeFlagBits memoryHandleType,
1041 VkSemaphoreType semaphoreType,
1042 VkExternalSemaphoreHandleTypeFlagBits semaphoreHandleType,
1043 PipelineCacheData& pipelineCacheData)
1044 : TestCase (testCtx, name.c_str(), "")
1046 , m_writeOpSupport (makeOperationSupport(writeOp, resourceDesc).release())
1047 , m_readOpSupport (makeOperationSupport(readOp, resourceDesc).release())
1048 , m_resourceDesc (resourceDesc)
1049 , m_memoryHandleType (memoryHandleType)
1050 , m_semaphoreType (semaphoreType)
1051 , m_semaphoreHandleType (semaphoreHandleType)
1052 , m_pipelineCacheData (pipelineCacheData)
1056 virtual void checkSupport(Context& context) const
1058 if (m_semaphoreType == VK_SEMAPHORE_TYPE_TIMELINE_KHR &&
1059 !context.getTimelineSemaphoreFeatures().timelineSemaphore)
1060 TCU_THROW(NotSupportedError, "Timeline semaphore not supported");
1062 if ((m_semaphoreHandleType == VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT ||
1063 m_semaphoreHandleType == VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT) &&
1064 !context.isDeviceFunctionalitySupported("VK_KHR_external_semaphore_fd"))
1065 TCU_THROW(NotSupportedError, "VK_KHR_external_semaphore_fd not supported");
1067 if ((m_semaphoreHandleType == VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_BIT ||
1068 m_semaphoreHandleType == VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT) &&
1069 !context.isDeviceFunctionalitySupported("VK_KHR_external_semaphore_win32"))
1070 TCU_THROW(NotSupportedError, "VK_KHR_external_semaphore_win32 not supported");
1072 if (m_type == SynchronizationType::SYNCHRONIZATION2)
1073 context.requireDeviceFunctionality("VK_KHR_synchronization2");
1076 TestInstance* createInstance (Context& context) const
1078 return new QueueSubmitSignalOrderSharedTestInstance(context,
1085 m_semaphoreHandleType,
1086 m_pipelineCacheData);
1089 void initPrograms (SourceCollections& programCollection) const
1091 m_writeOpSupport->initPrograms(programCollection);
1092 m_readOpSupport->initPrograms(programCollection);
1096 SynchronizationType m_type;
1097 SharedPtr<OperationSupport> m_writeOpSupport;
1098 SharedPtr<OperationSupport> m_readOpSupport;
1099 const ResourceDescription& m_resourceDesc;
1100 VkExternalMemoryHandleTypeFlagBits m_memoryHandleType;
1101 VkSemaphoreType m_semaphoreType;
1102 VkExternalSemaphoreHandleTypeFlagBits m_semaphoreHandleType;
1103 PipelineCacheData& m_pipelineCacheData;
1106 class QueueSubmitSignalOrderSharedTests : public tcu::TestCaseGroup
1109 QueueSubmitSignalOrderSharedTests (tcu::TestContext& testCtx, SynchronizationType type, VkSemaphoreType semaphoreType, const char *name)
1110 : tcu::TestCaseGroup (testCtx, name, "Signal ordering of semaphores")
1112 , m_semaphoreType (semaphoreType)
1118 static const OperationName writeOps[] =
1120 OPERATION_NAME_WRITE_COPY_BUFFER,
1121 OPERATION_NAME_WRITE_COPY_BUFFER_TO_IMAGE,
1122 OPERATION_NAME_WRITE_COPY_IMAGE_TO_BUFFER,
1123 OPERATION_NAME_WRITE_COPY_IMAGE,
1124 OPERATION_NAME_WRITE_BLIT_IMAGE,
1125 OPERATION_NAME_WRITE_SSBO_VERTEX,
1126 OPERATION_NAME_WRITE_SSBO_TESSELLATION_CONTROL,
1127 OPERATION_NAME_WRITE_SSBO_TESSELLATION_EVALUATION,
1128 OPERATION_NAME_WRITE_SSBO_GEOMETRY,
1129 OPERATION_NAME_WRITE_SSBO_FRAGMENT,
1130 OPERATION_NAME_WRITE_SSBO_COMPUTE,
1131 OPERATION_NAME_WRITE_SSBO_COMPUTE_INDIRECT,
1132 OPERATION_NAME_WRITE_IMAGE_VERTEX,
1133 OPERATION_NAME_WRITE_IMAGE_TESSELLATION_CONTROL,
1134 OPERATION_NAME_WRITE_IMAGE_TESSELLATION_EVALUATION,
1135 OPERATION_NAME_WRITE_IMAGE_GEOMETRY,
1136 OPERATION_NAME_WRITE_IMAGE_FRAGMENT,
1137 OPERATION_NAME_WRITE_IMAGE_COMPUTE,
1138 OPERATION_NAME_WRITE_IMAGE_COMPUTE_INDIRECT,
1140 static const OperationName readOps[] =
1142 OPERATION_NAME_READ_COPY_BUFFER,
1143 OPERATION_NAME_READ_COPY_BUFFER_TO_IMAGE,
1144 OPERATION_NAME_READ_COPY_IMAGE_TO_BUFFER,
1145 OPERATION_NAME_READ_COPY_IMAGE,
1146 OPERATION_NAME_READ_BLIT_IMAGE,
1147 OPERATION_NAME_READ_UBO_VERTEX,
1148 OPERATION_NAME_READ_UBO_TESSELLATION_CONTROL,
1149 OPERATION_NAME_READ_UBO_TESSELLATION_EVALUATION,
1150 OPERATION_NAME_READ_UBO_GEOMETRY,
1151 OPERATION_NAME_READ_UBO_FRAGMENT,
1152 OPERATION_NAME_READ_UBO_COMPUTE,
1153 OPERATION_NAME_READ_UBO_COMPUTE_INDIRECT,
1154 OPERATION_NAME_READ_SSBO_VERTEX,
1155 OPERATION_NAME_READ_SSBO_TESSELLATION_CONTROL,
1156 OPERATION_NAME_READ_SSBO_TESSELLATION_EVALUATION,
1157 OPERATION_NAME_READ_SSBO_GEOMETRY,
1158 OPERATION_NAME_READ_SSBO_FRAGMENT,
1159 OPERATION_NAME_READ_SSBO_COMPUTE,
1160 OPERATION_NAME_READ_SSBO_COMPUTE_INDIRECT,
1161 OPERATION_NAME_READ_IMAGE_VERTEX,
1162 OPERATION_NAME_READ_IMAGE_TESSELLATION_CONTROL,
1163 OPERATION_NAME_READ_IMAGE_TESSELLATION_EVALUATION,
1164 OPERATION_NAME_READ_IMAGE_GEOMETRY,
1165 OPERATION_NAME_READ_IMAGE_FRAGMENT,
1166 OPERATION_NAME_READ_IMAGE_COMPUTE,
1167 OPERATION_NAME_READ_IMAGE_COMPUTE_INDIRECT,
1168 OPERATION_NAME_READ_INDIRECT_BUFFER_DRAW,
1169 OPERATION_NAME_READ_INDIRECT_BUFFER_DRAW_INDEXED,
1170 OPERATION_NAME_READ_INDIRECT_BUFFER_DISPATCH,
1171 OPERATION_NAME_READ_VERTEX_INPUT,
1175 VkExternalMemoryHandleTypeFlagBits memoryType;
1176 VkExternalSemaphoreHandleTypeFlagBits semaphoreType;
1179 // Only semaphore handle types having reference semantic
1180 // are valid for this test.
1182 VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT,
1183 VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT,
1186 VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT,
1187 VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT,
1190 VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_BIT,
1191 VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_BIT,
1195 for (deUint32 writeOpIdx = 0; writeOpIdx < DE_LENGTH_OF_ARRAY(writeOps); writeOpIdx++)
1196 for (deUint32 readOpIdx = 0; readOpIdx < DE_LENGTH_OF_ARRAY(readOps); readOpIdx++)
1198 const OperationName writeOp = writeOps[writeOpIdx];
1199 const OperationName readOp = readOps[readOpIdx];
1200 const std::string opGroupName = getOperationName(writeOp) + "_" + getOperationName(readOp);
1203 de::MovePtr<tcu::TestCaseGroup> opGroup (new tcu::TestCaseGroup(m_testCtx, opGroupName.c_str(), ""));
1205 for (int resourceNdx = 0; resourceNdx < DE_LENGTH_OF_ARRAY(s_resources); ++resourceNdx)
1207 const ResourceDescription& resource = s_resources[resourceNdx];
1209 if (isResourceSupported(writeOp, resource) && isResourceSupported(readOp, resource))
1211 for (deUint32 exportIdx = 0; exportIdx < DE_LENGTH_OF_ARRAY(exportCases); exportIdx++)
1213 std::string caseName = getResourceName(resource) + "_" +
1214 externalSemaphoreTypeToName(exportCases[exportIdx].semaphoreType);
1216 opGroup->addChild(new QueueSubmitSignalOrderSharedTestCase(m_testCtx,
1222 exportCases[exportIdx].memoryType,
1224 exportCases[exportIdx].semaphoreType,
1225 m_pipelineCacheData));
1231 addChild(opGroup.release());
1241 SynchronizationType m_type;
1242 VkSemaphoreType m_semaphoreType;
1243 // synchronization.op tests share pipeline cache data to speed up test
1245 PipelineCacheData m_pipelineCacheData;
1248 struct QueueSubmitOrderIteration
1250 QueueSubmitOrderIteration() {}
1251 ~QueueSubmitOrderIteration() {}
1253 SharedPtr<Resource> resource;
1255 SharedPtr<Operation> writeOp;
1256 SharedPtr<Operation> readOp;
1259 // Verifies the signaling order of the semaphores in multiple
1260 // VkSubmitInfo given to vkQueueSubmit() with queueA & queueB from the
1263 // vkQueueSubmit(queueA, [write0, write1, write2, ..., write6])
1264 // vkQueueSubmit(queueB, [read0-6])
1266 // With read0-6 waiting on write6, all the data should be available
1267 // for reading given that signal operations are supposed to happen in
1269 class QueueSubmitSignalOrderTestInstance : public TestInstance
1272 QueueSubmitSignalOrderTestInstance (Context& context,
1273 SynchronizationType type,
1274 const SharedPtr<OperationSupport> writeOpSupport,
1275 const SharedPtr<OperationSupport> readOpSupport,
1276 const ResourceDescription& resourceDesc,
1277 VkSemaphoreType semaphoreType,
1278 PipelineCacheData& pipelineCacheData)
1279 : TestInstance (context)
1281 , m_writeOpSupport (writeOpSupport)
1282 , m_readOpSupport (readOpSupport)
1283 , m_resourceDesc (resourceDesc)
1284 , m_semaphoreType (semaphoreType)
1285 , m_device (SingletonDevice::getDevice(context))
1286 , m_deviceInterface (context.getPlatformInterface(), context.getInstance(), *m_device)
1287 , m_allocator (new SimpleAllocator(m_deviceInterface,
1289 getPhysicalDeviceMemoryProperties(context.getInstanceInterface(),
1290 context.getPhysicalDevice())))
1291 , m_operationContext (new OperationContext(context, type, m_deviceInterface, *m_device, *m_allocator, pipelineCacheData))
1292 , m_queueA (DE_NULL)
1293 , m_queueB (DE_NULL)
1297 const std::vector<VkQueueFamilyProperties> queueFamilyProperties = getPhysicalDeviceQueueFamilyProperties(context.getInstanceInterface(),
1298 context.getPhysicalDevice());
1300 if (m_semaphoreType == VK_SEMAPHORE_TYPE_TIMELINE_KHR &&
1301 !context.getTimelineSemaphoreFeatures().timelineSemaphore)
1302 TCU_THROW(NotSupportedError, "Timeline semaphore not supported");
1304 VkQueueFlags writeOpQueueFlags = m_writeOpSupport->getQueueFlags(*m_operationContext);
1305 for (deUint32 familyIdx = 0; familyIdx < queueFamilyProperties.size(); familyIdx++) {
1306 if (((queueFamilyProperties[familyIdx].queueFlags & writeOpQueueFlags) == writeOpQueueFlags) ||
1307 ((writeOpQueueFlags == VK_QUEUE_TRANSFER_BIT) &&
1308 (((queueFamilyProperties[familyIdx].queueFlags & VK_QUEUE_GRAPHICS_BIT) == VK_QUEUE_GRAPHICS_BIT) ||
1309 ((queueFamilyProperties[familyIdx].queueFlags & VK_QUEUE_COMPUTE_BIT) == VK_QUEUE_COMPUTE_BIT)))) {
1310 m_queueA = getDeviceQueue(m_deviceInterface, *m_device, familyIdx, 0);
1311 m_queueFamilyIndexA = familyIdx;
1315 if (m_queueA == DE_NULL)
1316 TCU_THROW(NotSupportedError, "No queue supporting write operation");
1318 VkQueueFlags readOpQueueFlags = m_readOpSupport->getQueueFlags(*m_operationContext);
1319 for (deUint32 familyIdx = 0; familyIdx < queueFamilyProperties.size(); familyIdx++) {
1320 if (((queueFamilyProperties[familyIdx].queueFlags & readOpQueueFlags) == readOpQueueFlags) ||
1321 ((readOpQueueFlags == VK_QUEUE_TRANSFER_BIT) &&
1322 (((queueFamilyProperties[familyIdx].queueFlags & VK_QUEUE_GRAPHICS_BIT) == VK_QUEUE_GRAPHICS_BIT) ||
1323 ((queueFamilyProperties[familyIdx].queueFlags & VK_QUEUE_COMPUTE_BIT) == VK_QUEUE_COMPUTE_BIT)))) {
1324 for (deUint32 queueIdx = 0; queueIdx < queueFamilyProperties[familyIdx].queueCount; queueIdx++) {
1325 VkQueue queue = getDeviceQueue(m_deviceInterface, *m_device, familyIdx, queueIdx);
1327 if (queue == m_queueA)
1331 m_queueFamilyIndexB = familyIdx;
1335 if (m_queueB != DE_NULL)
1339 if (m_queueB == DE_NULL)
1340 TCU_THROW(NotSupportedError, "No queue supporting read operation");
1343 tcu::TestStatus iterate (void)
1345 const bool isTimelineSemaphore = (m_semaphoreType == VK_SEMAPHORE_TYPE_TIMELINE_KHR);
1346 const VkDevice& device = *m_device;
1347 const DeviceInterface& vk = m_deviceInterface;
1348 Unique<VkFence> fence (createFence(vk, device));
1349 const Unique<VkCommandPool> cmdPoolA (createCommandPool(vk, device, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT, m_queueFamilyIndexA));
1350 const Unique<VkCommandPool> cmdPoolB (createCommandPool(vk, device, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT, m_queueFamilyIndexB));
1351 std::vector<SharedPtr<Move<VkCommandBuffer> > > ptrCmdBuffersA;
1352 SharedPtr<Move<VkCommandBuffer> > ptrCmdBufferB;
1353 std::vector<VkCommandBuffer> cmdBuffersA;
1354 VkCommandBuffer cmdBufferB;
1355 std::vector<Move<VkSemaphore> > semaphoresA;
1356 std::vector<Move<VkSemaphore> > semaphoresB;
1357 std::vector<VkSemaphore> semaphoreHandlesA;
1358 std::vector<VkSemaphore> semaphoreHandlesB;
1359 std::vector<deUint64> timelineValuesA;
1360 std::vector<deUint64> timelineValuesB;
1361 std::vector<QueueSubmitOrderIteration> iterations;
1362 std::vector<VkPipelineStageFlags2KHR> stageBits;
1363 std::vector<deUint32> queueFamilies;
1364 SynchronizationWrapperPtr syncWrapper = getSynchronizationWrapper(m_type, vk, isTimelineSemaphore);
1366 queueFamilies.push_back(m_queueFamilyIndexA);
1367 queueFamilies.push_back(m_queueFamilyIndexB);
1369 // Create a dozen of set of write/read operations.
1370 iterations.resize(12);
1371 for (deUint32 iterIdx = 0; iterIdx < iterations.size(); iterIdx++)
1373 QueueSubmitOrderIteration& iter = iterations[iterIdx];
1375 iter.resource = makeSharedPtr(new Resource(*m_operationContext,
1377 m_writeOpSupport->getOutResourceUsageFlags() |
1378 m_readOpSupport->getInResourceUsageFlags(),
1379 VK_SHARING_MODE_EXCLUSIVE,
1382 iter.writeOp = makeSharedPtr(m_writeOpSupport->build(*m_operationContext,
1384 iter.readOp = makeSharedPtr(m_readOpSupport->build(*m_operationContext,
1388 // Record each write operation into its own command buffer.
1389 for (deUint32 iterIdx = 0; iterIdx < iterations.size(); iterIdx++)
1391 QueueSubmitOrderIteration& iter = iterations[iterIdx];
1393 ptrCmdBuffersA.push_back(makeVkSharedPtr(makeCommandBuffer(vk, device, *cmdPoolA)));
1394 cmdBuffersA.push_back(**(ptrCmdBuffersA.back()));
1396 beginCommandBuffer(vk, cmdBuffersA.back());
1397 iter.writeOp->recordCommands(cmdBuffersA.back());
1400 SynchronizationWrapperPtr synchronizationWrapper = getSynchronizationWrapper(m_type, vk, DE_FALSE);
1401 const SyncInfo writeSync = iter.writeOp->getOutSyncInfo();
1402 const SyncInfo readSync = iter.readOp->getInSyncInfo();
1403 const Resource& resource = *iter.resource;
1405 if (resource.getType() == RESOURCE_TYPE_IMAGE)
1407 DE_ASSERT(writeSync.imageLayout != VK_IMAGE_LAYOUT_UNDEFINED);
1408 DE_ASSERT(readSync.imageLayout != VK_IMAGE_LAYOUT_UNDEFINED);
1410 const VkImageMemoryBarrier2KHR imageMemoryBarrier2 = makeImageMemoryBarrier2(
1411 writeSync.stageMask, // VkPipelineStageFlags2KHR srcStageMask
1412 writeSync.accessMask, // VkAccessFlags2KHR srcAccessMask
1413 readSync.stageMask, // VkPipelineStageFlags2KHR dstStageMask
1414 readSync.accessMask, // VkAccessFlags2KHR dstAccessMask
1415 writeSync.imageLayout, // VkImageLayout oldLayout
1416 readSync.imageLayout, // VkImageLayout newLayout
1417 resource.getImage().handle, // VkImage image
1418 resource.getImage().subresourceRange // VkImageSubresourceRange subresourceRange
1420 VkDependencyInfoKHR dependencyInfo = makeCommonDependencyInfo(DE_NULL, DE_NULL, &imageMemoryBarrier2);
1421 synchronizationWrapper->cmdPipelineBarrier(cmdBuffersA.back(), &dependencyInfo);
1425 const VkBufferMemoryBarrier2KHR bufferMemoryBarrier2 = makeBufferMemoryBarrier2(
1426 writeSync.stageMask, // VkPipelineStageFlags2KHR srcStageMask
1427 writeSync.accessMask, // VkAccessFlags2KHR srcAccessMask
1428 readSync.stageMask, // VkPipelineStageFlags2KHR dstStageMask
1429 readSync.accessMask, // VkAccessFlags2KHR dstAccessMask
1430 resource.getBuffer().handle, // VkBuffer buffer
1431 0, // VkDeviceSize offset
1432 VK_WHOLE_SIZE // VkDeviceSize size
1434 VkDependencyInfoKHR dependencyInfo = makeCommonDependencyInfo(DE_NULL, &bufferMemoryBarrier2);
1435 synchronizationWrapper->cmdPipelineBarrier(cmdBuffersA.back(), &dependencyInfo);
1438 stageBits.push_back(writeSync.stageMask);
1441 endCommandBuffer(vk, cmdBuffersA.back());
1443 addSemaphore(vk, device, semaphoresA, semaphoreHandlesA, timelineValuesA, 2u);
1446 DE_ASSERT(stageBits.size() == iterations.size());
1447 DE_ASSERT(semaphoreHandlesA.size() == iterations.size());
1449 // Record all read operations into a single command buffer and track the union of their execution stages.
1450 ptrCmdBufferB = makeVkSharedPtr(makeCommandBuffer(vk, device, *cmdPoolB));
1451 cmdBufferB = **(ptrCmdBufferB);
1452 beginCommandBuffer(vk, cmdBufferB);
1453 for (deUint32 iterIdx = 0; iterIdx < iterations.size(); iterIdx++)
1455 QueueSubmitOrderIteration& iter = iterations[iterIdx];
1456 iter.readOp->recordCommands(cmdBufferB);
1458 endCommandBuffer(vk, cmdBufferB);
1460 addSemaphore(vk, device, semaphoresB, semaphoreHandlesB, timelineValuesB, timelineValuesA.back());
1462 // Submit writes, each in its own VkSubmitInfo. With binary
1463 // semaphores, submission don't wait on anything, with
1464 // timeline semaphores, submissions wait on a host signal
1465 // operation done below.
1467 VkSemaphoreSubmitInfoKHR waitSemaphoreSubmitInfo = makeCommonSemaphoreSubmitInfo(semaphoreHandlesA.front(), 1u, VK_PIPELINE_STAGE_2_TOP_OF_PIPE_BIT_KHR);
1468 std::vector<VkSemaphoreSubmitInfoKHR> signalSemaphoreSubmitInfo (iterations.size(), makeCommonSemaphoreSubmitInfo(0u, 0u, VK_PIPELINE_STAGE_2_BOTTOM_OF_PIPE_BIT_KHR));
1469 std::vector<VkCommandBufferSubmitInfoKHR> commandBufferSubmitInfos (iterations.size(), makeCommonCommandBufferSubmitInfo(0));
1470 SynchronizationWrapperPtr synchronizationWrapper = getSynchronizationWrapper(m_type, vk, isTimelineSemaphore, (deUint32)iterations.size());
1472 for (deUint32 iterIdx = 0; iterIdx < iterations.size(); iterIdx++)
1474 commandBufferSubmitInfos[iterIdx].commandBuffer = cmdBuffersA[iterIdx];
1475 signalSemaphoreSubmitInfo[iterIdx].semaphore = semaphoreHandlesA[iterIdx];
1476 signalSemaphoreSubmitInfo[iterIdx].value = timelineValuesA[iterIdx];
1478 synchronizationWrapper->addSubmitInfo(
1479 isTimelineSemaphore,
1480 isTimelineSemaphore ? &waitSemaphoreSubmitInfo : DE_NULL,
1482 &commandBufferSubmitInfos[iterIdx],
1484 &signalSemaphoreSubmitInfo[iterIdx],
1485 isTimelineSemaphore,
1490 VK_CHECK(synchronizationWrapper->queueSubmit(m_queueA, DE_NULL));
1493 // Submit reads, only waiting waiting on the last write
1494 // operations, ordering of signaling should guarantee that
1495 // when read operations kick in all writes have completed.
1497 VkCommandBufferSubmitInfoKHR commandBufferSubmitInfos = makeCommonCommandBufferSubmitInfo(cmdBufferB);
1498 VkSemaphoreSubmitInfoKHR waitSemaphoreSubmitInfo = makeCommonSemaphoreSubmitInfo(semaphoreHandlesA.back(), timelineValuesA.back(), VK_PIPELINE_STAGE_2_TOP_OF_PIPE_BIT_KHR);
1499 VkSemaphoreSubmitInfoKHR signalSemaphoreSubmitInfo = makeCommonSemaphoreSubmitInfo(semaphoreHandlesB.back(), timelineValuesB.back(), VK_PIPELINE_STAGE_2_BOTTOM_OF_PIPE_BIT_KHR);
1500 SynchronizationWrapperPtr synchronizationWrapper = getSynchronizationWrapper(m_type, vk, isTimelineSemaphore);
1502 synchronizationWrapper->addSubmitInfo(
1503 1u, // deUint32 waitSemaphoreInfoCount
1504 &waitSemaphoreSubmitInfo, // const VkSemaphoreSubmitInfoKHR* pWaitSemaphoreInfos
1505 1u, // deUint32 commandBufferInfoCount
1506 &commandBufferSubmitInfos, // const VkCommandBufferSubmitInfoKHR* pCommandBufferInfos
1507 1u, // deUint32 signalSemaphoreInfoCount
1508 &signalSemaphoreSubmitInfo, // const VkSemaphoreSubmitInfoKHR* pSignalSemaphoreInfos
1509 isTimelineSemaphore,
1513 VK_CHECK(synchronizationWrapper->queueSubmit(m_queueB, *fence));
1515 if (m_semaphoreType == VK_SEMAPHORE_TYPE_TIMELINE_KHR)
1517 const VkSemaphoreWaitInfo waitInfo =
1519 VK_STRUCTURE_TYPE_SEMAPHORE_WAIT_INFO, // VkStructureType sType;
1520 DE_NULL, // const void* pNext;
1521 0u, // VkSemaphoreWaitFlagsKHR flags;
1522 1u, // deUint32 semaphoreCount;
1523 &semaphoreHandlesB.back(), // const VkSemaphore* pSemaphores;
1524 &timelineValuesB.back(), // const deUint64* pValues;
1527 // Unblock the whole lot.
1528 hostSignal(vk, device, semaphoreHandlesA.front(), 1);
1530 VK_CHECK(vk.waitSemaphores(device, &waitInfo, ~0ull));
1534 VK_CHECK(vk.waitForFences(device, 1, &fence.get(), VK_TRUE, ~0ull));
1538 // Verify the result of the operations.
1539 for (deUint32 iterIdx = 0; iterIdx < iterations.size(); iterIdx++)
1541 QueueSubmitOrderIteration& iter = iterations[iterIdx];
1542 const Data expected = iter.writeOp->getData();
1543 const Data actual = iter.readOp->getData();
1545 if (isIndirectBuffer(iter.resource->getType()))
1547 const deUint32 expectedValue = reinterpret_cast<const deUint32*>(expected.data)[0];
1548 const deUint32 actualValue = reinterpret_cast<const deUint32*>(actual.data)[0];
1550 if (actualValue < expectedValue)
1551 return tcu::TestStatus::fail("Counter value is smaller than expected");
1555 if (0 != deMemCmp(expected.data, actual.data, expected.size))
1556 return tcu::TestStatus::fail("Memory contents don't match");
1560 VK_CHECK(vk.deviceWaitIdle(device));
1562 return tcu::TestStatus::pass("Success");
1566 void addSemaphore (const DeviceInterface& vk,
1568 std::vector<Move<VkSemaphore> >& semaphores,
1569 std::vector<VkSemaphore>& semaphoreHandles,
1570 std::vector<deUint64>& timelineValues,
1571 deUint64 firstTimelineValue)
1573 Move<VkSemaphore> semaphore;
1575 if (m_semaphoreType == VK_SEMAPHORE_TYPE_TIMELINE_KHR)
1577 // Only allocate a single exportable semaphore.
1578 if (semaphores.empty())
1580 semaphores.push_back(createSemaphoreType(vk, device, m_semaphoreType));
1585 semaphores.push_back(createSemaphoreType(vk, device, m_semaphoreType));
1588 semaphoreHandles.push_back(*semaphores.back());
1589 timelineValues.push_back((timelineValues.empty() ? firstTimelineValue : timelineValues.back()) + m_rng.getInt(1, 100));
1592 SynchronizationType m_type;
1593 SharedPtr<OperationSupport> m_writeOpSupport;
1594 SharedPtr<OperationSupport> m_readOpSupport;
1595 const ResourceDescription& m_resourceDesc;
1596 VkSemaphoreType m_semaphoreType;
1597 const Unique<VkDevice>& m_device;
1598 const DeviceDriver m_deviceInterface;
1599 UniquePtr<SimpleAllocator> m_allocator;
1600 UniquePtr<OperationContext> m_operationContext;
1603 deUint32 m_queueFamilyIndexA;
1604 deUint32 m_queueFamilyIndexB;
1608 class QueueSubmitSignalOrderTestCase : public TestCase
1611 QueueSubmitSignalOrderTestCase (tcu::TestContext& testCtx,
1612 SynchronizationType type,
1613 const std::string& name,
1614 OperationName writeOp,
1615 OperationName readOp,
1616 const ResourceDescription& resourceDesc,
1617 VkSemaphoreType semaphoreType,
1618 PipelineCacheData& pipelineCacheData)
1619 : TestCase (testCtx, name.c_str(), "")
1621 , m_writeOpSupport (makeOperationSupport(writeOp, resourceDesc).release())
1622 , m_readOpSupport (makeOperationSupport(readOp, resourceDesc).release())
1623 , m_resourceDesc (resourceDesc)
1624 , m_semaphoreType (semaphoreType)
1625 , m_pipelineCacheData (pipelineCacheData)
1629 virtual void checkSupport(Context& context) const
1631 if (m_semaphoreType == VK_SEMAPHORE_TYPE_TIMELINE_KHR &&
1632 !context.getTimelineSemaphoreFeatures().timelineSemaphore)
1633 TCU_THROW(NotSupportedError, "Timeline semaphore not supported");
1634 if (m_type == SynchronizationType::SYNCHRONIZATION2)
1635 context.requireDeviceFunctionality("VK_KHR_synchronization2");
1638 TestInstance* createInstance (Context& context) const
1640 return new QueueSubmitSignalOrderTestInstance(context,
1646 m_pipelineCacheData);
1649 void initPrograms (SourceCollections& programCollection) const
1651 m_writeOpSupport->initPrograms(programCollection);
1652 m_readOpSupport->initPrograms(programCollection);
1656 SynchronizationType m_type;
1657 SharedPtr<OperationSupport> m_writeOpSupport;
1658 SharedPtr<OperationSupport> m_readOpSupport;
1659 const ResourceDescription& m_resourceDesc;
1660 VkSemaphoreType m_semaphoreType;
1661 PipelineCacheData& m_pipelineCacheData;
1664 class QueueSubmitSignalOrderTests : public tcu::TestCaseGroup
1667 QueueSubmitSignalOrderTests (tcu::TestContext& testCtx, SynchronizationType type, VkSemaphoreType semaphoreType, const char *name)
1668 : tcu::TestCaseGroup (testCtx, name, "Signal ordering of semaphores")
1670 , m_semaphoreType (semaphoreType)
1676 static const OperationName writeOps[] =
1678 OPERATION_NAME_WRITE_COPY_BUFFER,
1679 OPERATION_NAME_WRITE_COPY_BUFFER_TO_IMAGE,
1680 OPERATION_NAME_WRITE_COPY_IMAGE_TO_BUFFER,
1681 OPERATION_NAME_WRITE_COPY_IMAGE,
1682 OPERATION_NAME_WRITE_BLIT_IMAGE,
1683 OPERATION_NAME_WRITE_SSBO_VERTEX,
1684 OPERATION_NAME_WRITE_SSBO_TESSELLATION_CONTROL,
1685 OPERATION_NAME_WRITE_SSBO_TESSELLATION_EVALUATION,
1686 OPERATION_NAME_WRITE_SSBO_GEOMETRY,
1687 OPERATION_NAME_WRITE_SSBO_FRAGMENT,
1688 OPERATION_NAME_WRITE_SSBO_COMPUTE,
1689 OPERATION_NAME_WRITE_SSBO_COMPUTE_INDIRECT,
1690 OPERATION_NAME_WRITE_IMAGE_VERTEX,
1691 OPERATION_NAME_WRITE_IMAGE_TESSELLATION_CONTROL,
1692 OPERATION_NAME_WRITE_IMAGE_TESSELLATION_EVALUATION,
1693 OPERATION_NAME_WRITE_IMAGE_GEOMETRY,
1694 OPERATION_NAME_WRITE_IMAGE_FRAGMENT,
1695 OPERATION_NAME_WRITE_IMAGE_COMPUTE,
1696 OPERATION_NAME_WRITE_IMAGE_COMPUTE_INDIRECT,
1698 static const OperationName readOps[] =
1700 OPERATION_NAME_READ_COPY_BUFFER,
1701 OPERATION_NAME_READ_COPY_BUFFER_TO_IMAGE,
1702 OPERATION_NAME_READ_COPY_IMAGE_TO_BUFFER,
1703 OPERATION_NAME_READ_COPY_IMAGE,
1704 OPERATION_NAME_READ_BLIT_IMAGE,
1705 OPERATION_NAME_READ_UBO_VERTEX,
1706 OPERATION_NAME_READ_UBO_TESSELLATION_CONTROL,
1707 OPERATION_NAME_READ_UBO_TESSELLATION_EVALUATION,
1708 OPERATION_NAME_READ_UBO_GEOMETRY,
1709 OPERATION_NAME_READ_UBO_FRAGMENT,
1710 OPERATION_NAME_READ_UBO_COMPUTE,
1711 OPERATION_NAME_READ_UBO_COMPUTE_INDIRECT,
1712 OPERATION_NAME_READ_SSBO_VERTEX,
1713 OPERATION_NAME_READ_SSBO_TESSELLATION_CONTROL,
1714 OPERATION_NAME_READ_SSBO_TESSELLATION_EVALUATION,
1715 OPERATION_NAME_READ_SSBO_GEOMETRY,
1716 OPERATION_NAME_READ_SSBO_FRAGMENT,
1717 OPERATION_NAME_READ_SSBO_COMPUTE,
1718 OPERATION_NAME_READ_SSBO_COMPUTE_INDIRECT,
1719 OPERATION_NAME_READ_IMAGE_VERTEX,
1720 OPERATION_NAME_READ_IMAGE_TESSELLATION_CONTROL,
1721 OPERATION_NAME_READ_IMAGE_TESSELLATION_EVALUATION,
1722 OPERATION_NAME_READ_IMAGE_GEOMETRY,
1723 OPERATION_NAME_READ_IMAGE_FRAGMENT,
1724 OPERATION_NAME_READ_IMAGE_COMPUTE,
1725 OPERATION_NAME_READ_IMAGE_COMPUTE_INDIRECT,
1726 OPERATION_NAME_READ_INDIRECT_BUFFER_DRAW,
1727 OPERATION_NAME_READ_INDIRECT_BUFFER_DRAW_INDEXED,
1728 OPERATION_NAME_READ_INDIRECT_BUFFER_DISPATCH,
1729 OPERATION_NAME_READ_VERTEX_INPUT,
1732 for (deUint32 writeOpIdx = 0; writeOpIdx < DE_LENGTH_OF_ARRAY(writeOps); writeOpIdx++)
1733 for (deUint32 readOpIdx = 0; readOpIdx < DE_LENGTH_OF_ARRAY(readOps); readOpIdx++)
1735 const OperationName writeOp = writeOps[writeOpIdx];
1736 const OperationName readOp = readOps[readOpIdx];
1737 const std::string opGroupName = getOperationName(writeOp) + "_" + getOperationName(readOp);
1740 de::MovePtr<tcu::TestCaseGroup> opGroup (new tcu::TestCaseGroup(m_testCtx, opGroupName.c_str(), ""));
1742 for (int resourceNdx = 0; resourceNdx < DE_LENGTH_OF_ARRAY(s_resources); ++resourceNdx)
1744 const ResourceDescription& resource = s_resources[resourceNdx];
1746 if (isResourceSupported(writeOp, resource) && isResourceSupported(readOp, resource))
1748 opGroup->addChild(new QueueSubmitSignalOrderTestCase(m_testCtx,
1750 getResourceName(resource),
1755 m_pipelineCacheData));
1760 addChild(opGroup.release());
1770 SynchronizationType m_type;
1771 VkSemaphoreType m_semaphoreType;
1772 // synchronization.op tests share pipeline cache data to speed up test
1774 PipelineCacheData m_pipelineCacheData;
1779 tcu::TestCaseGroup* createSignalOrderTests (tcu::TestContext& testCtx, SynchronizationType type)
1781 de::MovePtr<tcu::TestCaseGroup> orderingTests(new tcu::TestCaseGroup(testCtx, "signal_order", "Signal ordering tests"));
1783 orderingTests->addChild(new QueueSubmitSignalOrderTests(testCtx, type, VK_SEMAPHORE_TYPE_BINARY_KHR, "binary_semaphore"));
1784 orderingTests->addChild(new QueueSubmitSignalOrderTests(testCtx, type, VK_SEMAPHORE_TYPE_TIMELINE_KHR, "timeline_semaphore"));
1785 orderingTests->addChild(new QueueSubmitSignalOrderSharedTests(testCtx, type, VK_SEMAPHORE_TYPE_BINARY_KHR, "shared_binary_semaphore"));
1786 orderingTests->addChild(new QueueSubmitSignalOrderSharedTests(testCtx, type, VK_SEMAPHORE_TYPE_TIMELINE_KHR, "shared_timeline_semaphore"));
1788 return orderingTests.release();
1791 } // synchronization