1 /*------------------------------------------------------------------------
2 * Vulkan Conformance Tests
3 * ------------------------
5 * Copyright (c) 2016 The Khronos Group Inc.
7 * Licensed under the Apache License, Version 2.0 (the "License");
8 * you may not use this file except in compliance with the License.
9 * You may obtain a copy of the License at
11 * http://www.apache.org/licenses/LICENSE-2.0
13 * Unless required by applicable law or agreed to in writing, software
14 * distributed under the License is distributed on an "AS IS" BASIS,
15 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 * See the License for the specific language governing permissions and
17 * limitations under the License.
21 * \brief Synchronization internally synchronized objects tests
22 *//*--------------------------------------------------------------------*/
24 #include "vktSynchronizationInternallySynchronizedObjectsTests.hpp"
25 #include "vktTestCaseUtil.hpp"
26 #include "vktSynchronizationUtil.hpp"
29 #include "tcuDefs.hpp"
30 #include "vkTypeUtil.hpp"
31 #include "vkPlatform.hpp"
32 #include "vkBuilderUtil.hpp"
33 #include "vkImageUtil.hpp"
35 #include "tcuResultCollector.hpp"
37 #include "deThread.hpp"
38 #include "deMutex.hpp"
39 #include "deSharedPtr.hpp"
47 namespace synchronization
57 using std::ostringstream;
59 using tcu::TestStatus;
60 using tcu::TestContext;
61 using tcu::ResultCollector;
62 using tcu::TestException;
71 enum {EXECUTION_PER_THREAD = 100, BUFFER_ELEMENT_COUNT = 16, BUFFER_SIZE = BUFFER_ELEMENT_COUNT*4 };
75 typedef struct QueueType
77 vector<VkQueue> queues;
78 vector<bool> available;
83 inline void addQueueFamilyIndex (const deUint32& queueFamilyIndex, const deUint32& count)
86 vector<bool>::iterator it;
87 it = temp.available.begin();
88 temp.available.insert(it, count, false);
90 temp.queues.resize(count);
91 m_queues[queueFamilyIndex] = temp;
94 const deUint32& getQueueFamilyIndex (const int index)
96 map<deUint32,Queues>::iterator it = m_queues.begin();
101 inline size_t countQueueFamilyIndex (void)
103 return m_queues.size();
106 Queues & getQueues (const int index)
108 map<deUint32,Queues>::iterator it = m_queues.begin();
113 bool getFreeQueue (deUint32& returnQueueFamilyIndex, VkQueue& returnQueues, int& returnQueueIndex)
115 for (int queueFamilyIndexNdx = 0 ; queueFamilyIndexNdx < static_cast<int>(m_queues.size()); ++queueFamilyIndexNdx)
117 Queues& queue = m_queues[getQueueFamilyIndex(queueFamilyIndexNdx)];
118 for (int queueNdx = 0; queueNdx < static_cast<int>(queue.queues.size()); ++queueNdx)
121 if (queue.available[queueNdx])
123 queue.available[queueNdx] = false;
124 returnQueueFamilyIndex = getQueueFamilyIndex(queueFamilyIndexNdx);
125 returnQueues = queue.queues[queueNdx];
126 returnQueueIndex = queueNdx;
136 void releaseQueue (const deUint32& queueFamilyIndex, const int& queueIndex)
139 m_queues[queueFamilyIndex].available[queueIndex] = true;
143 inline void setDevice (Move<VkDevice> device)
145 m_logicalDevice = device;
148 inline VkDevice getDevice (void)
150 return *m_logicalDevice;
153 MovePtr<Allocator> m_allocator;
155 Move<VkDevice> m_logicalDevice;
156 map<deUint32,Queues> m_queues;
161 MovePtr<Allocator> createAllocator (const Context& context, const VkDevice& device)
163 const DeviceInterface& deviceInterface = context.getDeviceInterface();
164 const InstanceInterface& instance = context.getInstanceInterface();
165 const VkPhysicalDevice physicalDevice = context.getPhysicalDevice();
166 const VkPhysicalDeviceMemoryProperties deviceMemoryProperties = getPhysicalDeviceMemoryProperties(instance, physicalDevice);
168 // Create memory allocator for device
169 return MovePtr<Allocator> (new SimpleAllocator(deviceInterface, device, deviceMemoryProperties));
172 bool checkQueueFlags (const VkQueueFlags& availableFlag, const VkQueueFlags& neededFlag)
174 if (VK_QUEUE_TRANSFER_BIT == neededFlag)
176 if ( (availableFlag & VK_QUEUE_GRAPHICS_BIT) == VK_QUEUE_GRAPHICS_BIT ||
177 (availableFlag & VK_QUEUE_COMPUTE_BIT) == VK_QUEUE_COMPUTE_BIT ||
178 (availableFlag & VK_QUEUE_TRANSFER_BIT) == VK_QUEUE_TRANSFER_BIT
182 else if ((availableFlag & neededFlag) == neededFlag)
189 MovePtr<MultiQueues> createQueues (const Context& context, const VkQueueFlags& queueFlag)
191 const DeviceInterface& vk = context.getDeviceInterface();
192 const InstanceInterface& instance = context.getInstanceInterface();
193 const VkPhysicalDevice physicalDevice = context.getPhysicalDevice();
194 MovePtr<MultiQueues> moveQueues (new MultiQueues());
195 MultiQueues& queues = *moveQueues;
196 VkDeviceCreateInfo deviceInfo;
197 VkPhysicalDeviceFeatures deviceFeatures;
198 vector<VkQueueFamilyProperties> queueFamilyProperties;
199 vector<float> queuePriorities;
200 vector<VkDeviceQueueCreateInfo> queueInfos;
202 queueFamilyProperties = getPhysicalDeviceQueueFamilyProperties(instance, physicalDevice);
204 for (deUint32 queuePropertiesNdx = 0; queuePropertiesNdx < queueFamilyProperties.size(); ++queuePropertiesNdx)
206 if (checkQueueFlags(queueFamilyProperties[queuePropertiesNdx].queueFlags, queueFlag))
208 queues.addQueueFamilyIndex(queuePropertiesNdx, queueFamilyProperties[queuePropertiesNdx].queueCount);
212 if (queues.countQueueFamilyIndex() == 0)
214 TCU_THROW(NotSupportedError, "Queue not found");
218 vector<float>::iterator it = queuePriorities.begin();
219 unsigned int maxQueueCount = 0;
220 for (int queueFamilyIndexNdx = 0; queueFamilyIndexNdx < static_cast<int>(queues.countQueueFamilyIndex()); ++queueFamilyIndexNdx)
222 if (queues.getQueues(queueFamilyIndexNdx).queues.size() > maxQueueCount)
223 maxQueueCount = static_cast<unsigned int>(queues.getQueues(queueFamilyIndexNdx).queues.size());
225 queuePriorities.insert(it, maxQueueCount, 1.0);
228 for (int queueFamilyIndexNdx = 0; queueFamilyIndexNdx < static_cast<int>(queues.countQueueFamilyIndex()); ++queueFamilyIndexNdx)
230 VkDeviceQueueCreateInfo queueInfo;
231 const deUint32 queueCount = static_cast<deUint32>(queues.getQueues(queueFamilyIndexNdx).queues.size());
233 deMemset(&queueInfo, 0, sizeof(queueInfo));
235 queueInfo.sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO;
236 queueInfo.pNext = DE_NULL;
237 queueInfo.flags = (VkDeviceQueueCreateFlags)0u;
238 queueInfo.queueFamilyIndex = queues.getQueueFamilyIndex(queueFamilyIndexNdx);
239 queueInfo.queueCount = queueCount;
240 queueInfo.pQueuePriorities = &queuePriorities[0];
242 queueInfos.push_back(queueInfo);
245 deMemset(&deviceInfo, 0, sizeof(deviceInfo));
246 instance.getPhysicalDeviceFeatures(physicalDevice, &deviceFeatures);
248 deviceInfo.sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO;
249 deviceInfo.pNext = DE_NULL;
250 deviceInfo.enabledExtensionCount = 0u;
251 deviceInfo.ppEnabledExtensionNames = DE_NULL;
252 deviceInfo.enabledLayerCount = 0u;
253 deviceInfo.ppEnabledLayerNames = DE_NULL;
254 deviceInfo.pEnabledFeatures = &deviceFeatures;
255 deviceInfo.queueCreateInfoCount = static_cast<deUint32>(queues.countQueueFamilyIndex());
256 deviceInfo.pQueueCreateInfos = &queueInfos[0];
258 queues.setDevice(createDevice(instance, physicalDevice, &deviceInfo));
260 for (deUint32 queueFamilyIndex = 0; queueFamilyIndex < queues.countQueueFamilyIndex(); ++queueFamilyIndex)
262 for (deUint32 queueReqNdx = 0; queueReqNdx < queues.getQueues(queueFamilyIndex).queues.size(); ++queueReqNdx)
264 vk.getDeviceQueue(queues.getDevice(), queues.getQueueFamilyIndex(queueFamilyIndex), queueReqNdx, &queues.getQueues(queueFamilyIndex).queues[queueReqNdx]);
265 queues.getQueues(queueFamilyIndex).available[queueReqNdx]=true;
269 queues.m_allocator = createAllocator(context, queues.getDevice());
273 Move<VkRenderPass> createRenderPass (const Context& context, const VkDevice& device, const VkFormat& colorFormat)
275 const DeviceInterface& vk = context.getDeviceInterface();
276 const VkAttachmentDescription colorAttachmentDescription =
278 0u, // VkAttachmentDescriptionFlags flags;
279 colorFormat, // VkFormat format;
280 VK_SAMPLE_COUNT_1_BIT, // VkSampleCountFlagBits samples;
281 VK_ATTACHMENT_LOAD_OP_CLEAR, // VkAttachmentLoadOp loadOp;
282 VK_ATTACHMENT_STORE_OP_STORE, // VkAttachmentStoreOp storeOp;
283 VK_ATTACHMENT_LOAD_OP_DONT_CARE, // VkAttachmentLoadOp stencilLoadOp;
284 VK_ATTACHMENT_STORE_OP_DONT_CARE, // VkAttachmentStoreOp stencilStoreOp;
285 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // VkImageLayout initialLayout;
286 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // VkImageLayout finalLayout;
288 const VkAttachmentReference colorAttachmentReference =
290 0u, // deUint32 attachment;
291 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL // VkImageLayout layout;
293 const VkSubpassDescription subpassDescription =
295 0u, // VkSubpassDescriptionFlags flags;
296 VK_PIPELINE_BIND_POINT_GRAPHICS, // VkPipelineBindPoint pipelineBindPoint;
297 0u, // deUint32 inputAttachmentCount;
298 DE_NULL, // const VkAttachmentReference* pInputAttachments;
299 1u, // deUint32 colorAttachmentCount;
300 &colorAttachmentReference, // const VkAttachmentReference* pColorAttachments;
301 DE_NULL, // const VkAttachmentReference* pResolveAttachments;
302 DE_NULL, // const VkAttachmentReference* pDepthStencilAttachment;
303 0u, // deUint32 preserveAttachmentCount;
304 DE_NULL // const VkAttachmentReference* pPreserveAttachments;
306 const VkRenderPassCreateInfo renderPassParams =
308 VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, // VkStructureType sType;
309 DE_NULL, // const void* pNext;
310 0u, // VkRenderPassCreateFlags flags;
311 1u, // deUint32 attachmentCount;
312 &colorAttachmentDescription, // const VkAttachmentDescription* pAttachments;
313 1u, // deUint32 subpassCount;
314 &subpassDescription, // const VkSubpassDescription* pSubpasses;
315 0u, // deUint32 dependencyCount;
316 DE_NULL // const VkSubpassDependency* pDependencies;
318 return createRenderPass(vk, device, &renderPassParams);
321 TestStatus executeComputePipeline (const Context& context, const VkPipeline& pipeline, const VkPipelineLayout& pipelineLayout,
322 const VkDescriptorSetLayout& descriptorSetLayout, MultiQueues& queues, const deUint32& shadersExecutions)
324 const DeviceInterface& vk = context.getDeviceInterface();
325 const VkDevice device = queues.getDevice();
326 deUint32 queueFamilyIndex;
329 while(!queues.getFreeQueue(queueFamilyIndex, queue, queueIndex)){}
332 const Unique<VkDescriptorPool> descriptorPool (DescriptorPoolBuilder()
333 .addType(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER)
334 .build(vk, device, VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, 1u));
335 Buffer resultBuffer (vk, device, *queues.m_allocator, makeBufferCreateInfo(BUFFER_SIZE, VK_BUFFER_USAGE_STORAGE_BUFFER_BIT), MemoryRequirement::HostVisible);
336 const VkBufferMemoryBarrier bufferBarrier = makeBufferMemoryBarrier(VK_ACCESS_SHADER_WRITE_BIT, VK_ACCESS_HOST_READ_BIT, *resultBuffer, 0ull, BUFFER_SIZE);
337 const Unique<VkCommandPool> cmdPool (createCommandPool(vk, device, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT, queueFamilyIndex));
338 const Unique<VkCommandBuffer> cmdBuffer (makeCommandBuffer(vk, device, *cmdPool));
341 const Allocation& alloc = resultBuffer.getAllocation();
342 deMemset(alloc.getHostPtr(), 0, BUFFER_SIZE);
343 flushMappedMemoryRange(vk, device, alloc.getMemory(), alloc.getOffset(), BUFFER_SIZE);
346 // Start recording commands
347 beginCommandBuffer(vk, *cmdBuffer);
349 vk.cmdBindPipeline(*cmdBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, pipeline);
351 // Create descriptor set
352 const Unique<VkDescriptorSet> descriptorSet(makeDescriptorSet(vk, device, *descriptorPool, descriptorSetLayout));
354 const VkDescriptorBufferInfo resultDescriptorInfo = makeDescriptorBufferInfo(*resultBuffer, 0ull, BUFFER_SIZE);
356 DescriptorSetUpdateBuilder()
357 .writeSingle(*descriptorSet, DescriptorSetUpdateBuilder::Location::binding(0u), VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, &resultDescriptorInfo)
360 vk.cmdBindDescriptorSets(*cmdBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, pipelineLayout, 0u, 1u, &descriptorSet.get(), 0u, DE_NULL);
362 // Dispatch indirect compute command
363 vk.cmdDispatch(*cmdBuffer, shadersExecutions, 1u, 1u);
365 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_PIPELINE_STAGE_HOST_BIT, (VkDependencyFlags)0,
366 0, (const VkMemoryBarrier*)DE_NULL,
368 0, (const VkImageMemoryBarrier*)DE_NULL);
370 // End recording commands
371 endCommandBuffer(vk, *cmdBuffer);
373 // Wait for command buffer execution finish
374 submitCommandsAndWait(vk, device, queue, *cmdBuffer);
375 queues.releaseQueue(queueFamilyIndex, queueIndex);
378 const Allocation& resultAlloc = resultBuffer.getAllocation();
379 invalidateMappedMemoryRange(vk, device, resultAlloc.getMemory(), resultAlloc.getOffset(), BUFFER_SIZE);
381 const deInt32* ptr = reinterpret_cast<deInt32*>(resultAlloc.getHostPtr());
382 for (deInt32 ndx = 0; ndx < BUFFER_ELEMENT_COUNT; ++ndx)
386 return TestStatus::fail("The data don't match");
390 return TestStatus::pass("Passed");
395 TestStatus executeGraphicPipeline (const Context& context, const VkPipeline& pipeline, const VkPipelineLayout& pipelineLayout,
396 const VkDescriptorSetLayout& descriptorSetLayout, MultiQueues& queues, const VkRenderPass& renderPass, const deUint32 shadersExecutions)
398 const DeviceInterface& vk = context.getDeviceInterface();
399 const VkDevice device = queues.getDevice();
400 deUint32 queueFamilyIndex;
403 while(!queues.getFreeQueue(queueFamilyIndex, queue, queueIndex)){}
406 const Unique<VkDescriptorPool> descriptorPool (DescriptorPoolBuilder()
407 .addType(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER)
408 .build(vk, device, VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, 1u));
409 Move<VkDescriptorSet> descriptorSet = makeDescriptorSet(vk, device, *descriptorPool, descriptorSetLayout);
410 Buffer resultBuffer (vk, device, *queues.m_allocator, makeBufferCreateInfo(BUFFER_SIZE, VK_BUFFER_USAGE_STORAGE_BUFFER_BIT), MemoryRequirement::HostVisible);
411 const VkBufferMemoryBarrier bufferBarrier = makeBufferMemoryBarrier(VK_ACCESS_SHADER_WRITE_BIT, VK_ACCESS_HOST_READ_BIT, *resultBuffer, 0ull, BUFFER_SIZE);
412 const VkFormat colorFormat = VK_FORMAT_R8G8B8A8_UNORM;
413 const VkExtent3D colorImageExtent = makeExtent3D(1u, 1u, 1u);
414 const VkImageSubresourceRange colorImageSubresourceRange = makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u, 1u);
415 de::MovePtr<Image> colorAttachmentImage = de::MovePtr<Image>(new Image(vk, device, *queues.m_allocator,
416 makeImageCreateInfo(VK_IMAGE_TYPE_2D, colorImageExtent, colorFormat, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT),
417 MemoryRequirement::Any));
418 Move<VkImageView> colorAttachmentView = makeImageView(vk, device, **colorAttachmentImage, VK_IMAGE_VIEW_TYPE_2D, colorFormat, colorImageSubresourceRange);
419 Move<VkFramebuffer> framebuffer = makeFramebuffer(vk, device, renderPass, *colorAttachmentView, colorImageExtent.width, colorImageExtent.height, 1u);
420 const Unique<VkCommandPool> cmdPool (createCommandPool(vk, device, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT, queueFamilyIndex));
421 const Unique<VkCommandBuffer> cmdBuffer (makeCommandBuffer(vk, device, *cmdPool));
422 const VkDescriptorBufferInfo outputBufferDescriptorInfo = makeDescriptorBufferInfo(*resultBuffer, 0ull, BUFFER_SIZE);
424 DescriptorSetUpdateBuilder()
425 .writeSingle(*descriptorSet, DescriptorSetUpdateBuilder::Location::binding(0u), VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, &outputBufferDescriptorInfo)
426 .update (vk, device);
429 const Allocation& alloc = resultBuffer.getAllocation();
430 deMemset(alloc.getHostPtr(), 0, BUFFER_SIZE);
431 flushMappedMemoryRange(vk, device, alloc.getMemory(), alloc.getOffset(), BUFFER_SIZE);
434 // Start recording commands
435 beginCommandBuffer(vk, *cmdBuffer);
436 // Change color attachment image layout
438 const VkImageMemoryBarrier colorAttachmentLayoutBarrier = makeImageMemoryBarrier(
439 (VkAccessFlags)0, VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
440 VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
441 **colorAttachmentImage, colorImageSubresourceRange);
443 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, (VkDependencyFlags)0,
444 0u, DE_NULL, 0u, DE_NULL, 1u, &colorAttachmentLayoutBarrier);
448 const VkRect2D renderArea =
453 const tcu::Vec4 clearColor = tcu::Vec4(0.0f, 0.0f, 0.0f, 1.0f);
454 beginRenderPass(vk, *cmdBuffer, renderPass, *framebuffer, renderArea, clearColor);
457 vk.cmdBindPipeline(*cmdBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline);
458 vk.cmdBindDescriptorSets(*cmdBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS, pipelineLayout, 0u, 1u, &descriptorSet.get(), 0u, DE_NULL);
460 vk.cmdDraw(*cmdBuffer, shadersExecutions, 1u, 0u, 0u);
461 endRenderPass(vk, *cmdBuffer);
463 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, VK_PIPELINE_STAGE_HOST_BIT, (VkDependencyFlags)0,
464 0, (const VkMemoryBarrier*)DE_NULL,
466 0, (const VkImageMemoryBarrier*)DE_NULL);
468 // End recording commands
469 endCommandBuffer(vk, *cmdBuffer);
471 // Wait for command buffer execution finish
472 submitCommandsAndWait(vk, device, queue, *cmdBuffer);
473 queues.releaseQueue(queueFamilyIndex, queueIndex);
476 const Allocation& resultAlloc = resultBuffer.getAllocation();
477 invalidateMappedMemoryRange(vk, device, resultAlloc.getMemory(), resultAlloc.getOffset(), BUFFER_SIZE);
479 const deInt32* ptr = reinterpret_cast<deInt32*>(resultAlloc.getHostPtr());
480 for (deInt32 ndx = 0; ndx < BUFFER_ELEMENT_COUNT; ++ndx)
484 return TestStatus::fail("The data don't match");
488 return TestStatus::pass("Passed");
493 class ThreadGroupThread : private Thread
496 ThreadGroupThread (const Context& context, VkPipelineCache pipelineCache, const VkPipelineLayout& pipelineLayout,
497 const VkDescriptorSetLayout& descriptorSetLayout, MultiQueues& queues, const vector<deUint32>& shadersExecutions)
498 : m_context (context)
499 , m_pipelineCache (pipelineCache)
500 , m_pipelineLayout (pipelineLayout)
501 , m_descriptorSetLayout (descriptorSetLayout)
503 , m_shadersExecutions (shadersExecutions)
507 virtual ~ThreadGroupThread (void)
511 ResultCollector& getResultCollector (void)
513 return m_resultCollector;
520 virtual TestStatus runThread () = 0;
521 const Context& m_context;
522 VkPipelineCache m_pipelineCache;
523 const VkPipelineLayout& m_pipelineLayout;
524 const VkDescriptorSetLayout& m_descriptorSetLayout;
525 MultiQueues& m_queues;
526 const vector<deUint32>& m_shadersExecutions;
529 ThreadGroupThread (const ThreadGroupThread&);
530 ThreadGroupThread& operator= (const ThreadGroupThread&);
536 TestStatus result = runThread();
537 m_resultCollector.addResult(result.getCode(), result.getDescription());
539 catch (const TestException& e)
541 m_resultCollector.addResult(e.getTestResult(), e.getMessage());
543 catch (const exception& e)
545 m_resultCollector.addResult(QP_TEST_RESULT_FAIL, e.what());
549 m_resultCollector.addResult(QP_TEST_RESULT_FAIL, "Exception");
553 ResultCollector m_resultCollector;
558 typedef vector<SharedPtr<ThreadGroupThread> > ThreadVector;
567 void add (MovePtr<ThreadGroupThread> thread)
569 m_threads.push_back(SharedPtr<ThreadGroupThread>(thread.release()));
572 TestStatus run (void)
574 ResultCollector resultCollector;
576 for (ThreadVector::iterator threadIter = m_threads.begin(); threadIter != m_threads.end(); ++threadIter)
577 (*threadIter)->start();
579 for (ThreadVector::iterator threadIter = m_threads.begin(); threadIter != m_threads.end(); ++threadIter)
581 ResultCollector& threadResult = (*threadIter)->getResultCollector();
582 (*threadIter)->join();
583 resultCollector.addResult(threadResult.getResult(), threadResult.getMessage());
586 return TestStatus(resultCollector.getResult(), resultCollector.getMessage());
590 ThreadVector m_threads;
594 class CreateComputeThread : public ThreadGroupThread
597 CreateComputeThread (const Context& context, VkPipelineCache pipelineCache, vector<VkComputePipelineCreateInfo>& pipelineInfo,
598 const VkPipelineLayout& pipelineLayout, const VkDescriptorSetLayout& descriptorSetLayout,
599 MultiQueues& queues, const vector<deUint32>& shadersExecutions)
600 : ThreadGroupThread (context, pipelineCache, pipelineLayout, descriptorSetLayout, queues, shadersExecutions)
601 , m_pipelineInfo (pipelineInfo)
605 TestStatus runThread (void)
607 ResultCollector resultCollector;
608 for (int executionNdx = 0; executionNdx < EXECUTION_PER_THREAD; ++executionNdx)
610 const int shaderNdx = executionNdx % (int)m_pipelineInfo.size();
611 const DeviceInterface& vk = m_context.getDeviceInterface();
612 const VkDevice device = m_queues.getDevice();
613 Move<VkPipeline> pipeline = createComputePipeline(vk,device,m_pipelineCache, &m_pipelineInfo[shaderNdx]);
615 TestStatus result = executeComputePipeline(m_context, *pipeline, m_pipelineLayout, m_descriptorSetLayout, m_queues, m_shadersExecutions[shaderNdx]);
616 resultCollector.addResult(result.getCode(), result.getDescription());
618 return TestStatus(resultCollector.getResult(), resultCollector.getMessage());
621 vector<VkComputePipelineCreateInfo>& m_pipelineInfo;
624 class CreateGraphicThread : public ThreadGroupThread
627 CreateGraphicThread (const Context& context, VkPipelineCache pipelineCache, vector<VkGraphicsPipelineCreateInfo>& pipelineInfo,
628 const VkPipelineLayout& pipelineLayout, const VkDescriptorSetLayout& descriptorSetLayout,
629 MultiQueues& queues, const VkRenderPass& renderPass, const vector<deUint32>& shadersExecutions)
630 : ThreadGroupThread (context, pipelineCache, pipelineLayout, descriptorSetLayout, queues, shadersExecutions)
631 , m_pipelineInfo (pipelineInfo)
632 , m_renderPass (renderPass)
635 TestStatus runThread (void)
637 ResultCollector resultCollector;
638 for (int executionNdx = 0; executionNdx < EXECUTION_PER_THREAD; ++executionNdx)
640 const int shaderNdx = executionNdx % (int)m_pipelineInfo.size();
641 const DeviceInterface& vk = m_context.getDeviceInterface();
642 const VkDevice device = m_queues.getDevice();
643 Move<VkPipeline> pipeline = createGraphicsPipeline(vk,device, m_pipelineCache, &m_pipelineInfo[shaderNdx]);
645 TestStatus result = executeGraphicPipeline(m_context, *pipeline, m_pipelineLayout, m_descriptorSetLayout, m_queues, m_renderPass, m_shadersExecutions[shaderNdx]);
646 resultCollector.addResult(result.getCode(), result.getDescription());
648 return TestStatus(resultCollector.getResult(), resultCollector.getMessage());
652 vector<VkGraphicsPipelineCreateInfo>& m_pipelineInfo;
653 const VkRenderPass& m_renderPass;
656 class PipelineCacheComputeTestInstance : public TestInstance
658 typedef vector<SharedPtr<Unique<VkShaderModule> > > ShaderModuleVector;
660 PipelineCacheComputeTestInstance (Context& context, const vector<deUint32>& shadersExecutions)
661 : TestInstance (context)
662 , m_shadersExecutions (shadersExecutions)
667 TestStatus iterate (void)
669 const DeviceInterface& vk = m_context.getDeviceInterface();
670 MovePtr<MultiQueues> queues = createQueues(m_context, VK_QUEUE_COMPUTE_BIT);
671 const VkDevice device = queues->getDevice();
672 ShaderModuleVector shaderCompModules = addShaderModules(device);
673 Buffer resultBuffer (vk, device, *queues->m_allocator, makeBufferCreateInfo(BUFFER_SIZE, VK_BUFFER_USAGE_STORAGE_BUFFER_BIT), MemoryRequirement::HostVisible);
674 const Move<VkDescriptorSetLayout> descriptorSetLayout (DescriptorSetLayoutBuilder()
675 .addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, VK_SHADER_STAGE_COMPUTE_BIT)
677 const Move<VkPipelineLayout> pipelineLayout (makePipelineLayout(vk, device, *descriptorSetLayout));
678 vector<VkPipelineShaderStageCreateInfo> shaderStageInfos = addShaderStageInfo(shaderCompModules);
679 vector<VkComputePipelineCreateInfo> pipelineInfo = addPipelineInfo(*pipelineLayout, shaderStageInfos);
680 const VkPipelineCacheCreateInfo pipelineCacheInfo =
682 VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO, // VkStructureType sType;
683 DE_NULL, // const void* pNext;
684 0u, // VkPipelineCacheCreateFlags flags;
685 0u, // deUintptr initialDataSize;
686 DE_NULL, // const void* pInitialData;
688 Move<VkPipelineCache> pipelineCache = createPipelineCache(vk, device, &pipelineCacheInfo);
689 Move<VkPipeline> pipeline = createComputePipeline(vk, device, *pipelineCache, &pipelineInfo[0]);
690 const deUint32 numThreads = clamp(deGetNumAvailableLogicalCores(), 4u, 32u);
693 executeComputePipeline(m_context, *pipeline, *pipelineLayout, *descriptorSetLayout, *queues, m_shadersExecutions[0]);
695 for (deUint32 ndx = 0; ndx < numThreads; ++ndx)
696 threads.add(MovePtr<ThreadGroupThread>(new CreateComputeThread(
697 m_context, *pipelineCache, pipelineInfo, *pipelineLayout, *descriptorSetLayout, *queues, m_shadersExecutions)));
700 TestStatus thread_result = threads.run();
701 if(thread_result.getCode())
703 return thread_result;
706 return TestStatus::pass("Passed");
710 ShaderModuleVector addShaderModules (const VkDevice& device)
712 const DeviceInterface& vk = m_context.getDeviceInterface();
713 ShaderModuleVector shaderCompModules;
714 shaderCompModules.resize(m_shadersExecutions.size());
715 for (int shaderNdx = 0; shaderNdx < static_cast<int>(m_shadersExecutions.size()); ++shaderNdx)
717 ostringstream shaderName;
718 shaderName<<"compute_"<<shaderNdx;
719 shaderCompModules[shaderNdx] = SharedPtr<Unique<VkShaderModule> > (new Unique<VkShaderModule>(createShaderModule(vk, device, m_context.getBinaryCollection().get(shaderName.str()), (VkShaderModuleCreateFlags)0)));
721 return shaderCompModules;
724 vector<VkPipelineShaderStageCreateInfo> addShaderStageInfo (const ShaderModuleVector& shaderCompModules)
726 VkPipelineShaderStageCreateInfo shaderStageInfo;
727 vector<VkPipelineShaderStageCreateInfo> shaderStageInfos;
728 shaderStageInfo.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
729 shaderStageInfo.pNext = DE_NULL;
730 shaderStageInfo.flags = (VkPipelineShaderStageCreateFlags)0;
731 shaderStageInfo.stage = VK_SHADER_STAGE_COMPUTE_BIT;
732 shaderStageInfo.pName = "main";
733 shaderStageInfo.pSpecializationInfo = DE_NULL;
735 for (int shaderNdx = 0; shaderNdx < static_cast<int>(m_shadersExecutions.size()); ++shaderNdx)
737 shaderStageInfo.module = *(*shaderCompModules[shaderNdx]);
738 shaderStageInfos.push_back(shaderStageInfo);
740 return shaderStageInfos;
743 vector<VkComputePipelineCreateInfo> addPipelineInfo (VkPipelineLayout pipelineLayout, const vector<VkPipelineShaderStageCreateInfo>& shaderStageInfos)
745 vector<VkComputePipelineCreateInfo> pipelineInfos;
746 VkComputePipelineCreateInfo computePipelineInfo;
747 computePipelineInfo.sType = VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO;
748 computePipelineInfo.pNext = DE_NULL;
749 computePipelineInfo.flags = (VkPipelineCreateFlags)0;
750 computePipelineInfo.layout = pipelineLayout;
751 computePipelineInfo.basePipelineHandle = DE_NULL;
752 computePipelineInfo.basePipelineIndex = 0;
754 for (int shaderNdx = 0; shaderNdx < static_cast<int>(m_shadersExecutions.size()); ++shaderNdx)
756 computePipelineInfo.stage = shaderStageInfos[shaderNdx];
757 pipelineInfos.push_back(computePipelineInfo);
759 return pipelineInfos;
762 const vector<deUint32> m_shadersExecutions;
765 class PipelineCacheGraphicTestInstance : public TestInstance
767 typedef vector<SharedPtr<Unique<VkShaderModule> > > ShaderModuleVector;
769 PipelineCacheGraphicTestInstance (Context& context, const vector<deUint32>& shadersExecutions)
770 : TestInstance (context)
771 , m_shadersExecutions (shadersExecutions)
776 TestStatus iterate (void)
778 requireFeatures(m_context.getInstanceInterface(), m_context.getPhysicalDevice(), FEATURE_VERTEX_PIPELINE_STORES_AND_ATOMICS);
780 const DeviceInterface& vk = m_context.getDeviceInterface();
781 MovePtr<MultiQueues> queues = createQueues (m_context, VK_QUEUE_GRAPHICS_BIT);
782 const VkDevice device = queues->getDevice();
783 VkFormat colorFormat = VK_FORMAT_R8G8B8A8_UNORM;
784 Move<VkRenderPass> renderPass = createRenderPass(m_context, device, colorFormat);
785 const Move<VkDescriptorSetLayout> descriptorSetLayout (DescriptorSetLayoutBuilder()
786 .addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, VK_SHADER_STAGE_VERTEX_BIT)
788 ShaderModuleVector shaderGraphicModules = addShaderModules(device);
789 const Move<VkPipelineLayout> pipelineLayout (makePipelineLayout(vk, device, *descriptorSetLayout));
790 vector<VkPipelineShaderStageCreateInfo> shaderStageInfos = addShaderStageInfo(shaderGraphicModules);
791 vector<VkGraphicsPipelineCreateInfo> pipelineInfo = addPipelineInfo(*pipelineLayout, shaderStageInfos, *renderPass);
792 const VkPipelineCacheCreateInfo pipelineCacheInfo =
794 VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO, // VkStructureType sType;
795 DE_NULL, // const void* pNext;
796 0u, // VkPipelineCacheCreateFlags flags;
797 0u, // deUintptr initialDataSize;
798 DE_NULL, // const void* pInitialData;
800 Move<VkPipelineCache> pipelineCache = createPipelineCache(vk, device, &pipelineCacheInfo);
801 Move<VkPipeline> pipeline = createGraphicsPipeline(vk, device, *pipelineCache, &pipelineInfo[0]);
802 const deUint32 numThreads = clamp(deGetNumAvailableLogicalCores(), 4u, 32u);
805 executeGraphicPipeline(m_context, *pipeline, *pipelineLayout, *descriptorSetLayout, *queues, *renderPass, m_shadersExecutions[0]);
807 for (deUint32 ndx = 0; ndx < numThreads; ++ndx)
808 threads.add(MovePtr<ThreadGroupThread>(new CreateGraphicThread(
809 m_context, *pipelineCache, pipelineInfo, *pipelineLayout, *descriptorSetLayout, *queues, *renderPass, m_shadersExecutions)));
812 TestStatus thread_result = threads.run();
813 if(thread_result.getCode())
815 return thread_result;
818 return TestStatus::pass("Passed");
822 ShaderModuleVector addShaderModules (const VkDevice& device)
824 const DeviceInterface& vk = m_context.getDeviceInterface();
825 ShaderModuleVector shaderModules;
826 shaderModules.resize(m_shadersExecutions.size() + 1);
827 for (int shaderNdx = 0; shaderNdx < static_cast<int>(m_shadersExecutions.size()); ++shaderNdx)
829 ostringstream shaderName;
830 shaderName<<"vert_"<<shaderNdx;
831 shaderModules[shaderNdx] = SharedPtr<Unique<VkShaderModule> > (new Unique<VkShaderModule>(createShaderModule(vk, device, m_context.getBinaryCollection().get(shaderName.str()), (VkShaderModuleCreateFlags)0)));
833 shaderModules[m_shadersExecutions.size()] = SharedPtr<Unique<VkShaderModule> > (new Unique<VkShaderModule>(createShaderModule(vk, device, m_context.getBinaryCollection().get("frag"), (VkShaderModuleCreateFlags)0)));
834 return shaderModules;
837 vector<VkPipelineShaderStageCreateInfo> addShaderStageInfo (const ShaderModuleVector& shaderCompModules)
839 VkPipelineShaderStageCreateInfo shaderStageInfo;
840 vector<VkPipelineShaderStageCreateInfo> shaderStageInfos;
841 shaderStageInfo.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
842 shaderStageInfo.pNext = DE_NULL;
843 shaderStageInfo.flags = (VkPipelineShaderStageCreateFlags)0;
844 shaderStageInfo.pName = "main";
845 shaderStageInfo.pSpecializationInfo = DE_NULL;
847 for (int shaderNdx = 0; shaderNdx < static_cast<int>(m_shadersExecutions.size()); ++shaderNdx)
849 shaderStageInfo.stage = VK_SHADER_STAGE_VERTEX_BIT;
850 shaderStageInfo.module = *(*shaderCompModules[shaderNdx]);
851 shaderStageInfos.push_back(shaderStageInfo);
853 shaderStageInfo.stage = VK_SHADER_STAGE_FRAGMENT_BIT;
854 shaderStageInfo.module = *(*shaderCompModules[m_shadersExecutions.size()]);
855 shaderStageInfos.push_back(shaderStageInfo);
857 return shaderStageInfos;
860 vector<VkGraphicsPipelineCreateInfo> addPipelineInfo (VkPipelineLayout pipelineLayout, const vector<VkPipelineShaderStageCreateInfo>& shaderStageInfos, const VkRenderPass& renderPass)
862 VkExtent3D colorImageExtent = makeExtent3D(1u, 1u, 1u);
863 vector<VkGraphicsPipelineCreateInfo> pipelineInfo;
865 m_vertexInputStateParams.sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO;
866 m_vertexInputStateParams.pNext = DE_NULL;
867 m_vertexInputStateParams.flags = 0u;
868 m_vertexInputStateParams.vertexBindingDescriptionCount = 0u;
869 m_vertexInputStateParams.pVertexBindingDescriptions = DE_NULL;
870 m_vertexInputStateParams.vertexAttributeDescriptionCount = 0u;
871 m_vertexInputStateParams.pVertexAttributeDescriptions = DE_NULL;
873 m_inputAssemblyStateParams.sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO;
874 m_inputAssemblyStateParams.pNext = DE_NULL;
875 m_inputAssemblyStateParams.flags = 0u;
876 m_inputAssemblyStateParams.topology = VK_PRIMITIVE_TOPOLOGY_POINT_LIST;
877 m_inputAssemblyStateParams.primitiveRestartEnable = VK_FALSE;
881 m_viewport.width = (float)colorImageExtent.width;
882 m_viewport.height = (float)colorImageExtent.height;
883 m_viewport.minDepth = 0.0f;
884 m_viewport.maxDepth = 1.0f;
887 m_scissor.offset.x = 0;
888 m_scissor.offset.y = 0;
889 m_scissor.extent.width = colorImageExtent.width;
890 m_scissor.extent.height = colorImageExtent.height;
892 m_viewportStateParams.sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO;
893 m_viewportStateParams.pNext = DE_NULL;
894 m_viewportStateParams.flags = 0u;
895 m_viewportStateParams.viewportCount = 1u;
896 m_viewportStateParams.pViewports = &m_viewport;
897 m_viewportStateParams.scissorCount = 1u;
898 m_viewportStateParams.pScissors = &m_scissor;
900 m_rasterStateParams.sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO;
901 m_rasterStateParams.pNext = DE_NULL;
902 m_rasterStateParams.flags = 0u;
903 m_rasterStateParams.depthClampEnable = VK_FALSE;
904 m_rasterStateParams.rasterizerDiscardEnable = VK_FALSE;
905 m_rasterStateParams.polygonMode = VK_POLYGON_MODE_FILL;
906 m_rasterStateParams.cullMode = VK_CULL_MODE_NONE;
907 m_rasterStateParams.frontFace = VK_FRONT_FACE_COUNTER_CLOCKWISE;
908 m_rasterStateParams.depthBiasEnable = VK_FALSE;
909 m_rasterStateParams.depthBiasConstantFactor = 0.0f;
910 m_rasterStateParams.depthBiasClamp = 0.0f;
911 m_rasterStateParams.depthBiasSlopeFactor = 0.0f;
912 m_rasterStateParams.lineWidth = 1.0f;
914 m_colorBlendAttachmentState.blendEnable = VK_FALSE;
915 m_colorBlendAttachmentState.srcColorBlendFactor = VK_BLEND_FACTOR_ONE;
916 m_colorBlendAttachmentState.dstColorBlendFactor = VK_BLEND_FACTOR_ZERO;
917 m_colorBlendAttachmentState.colorBlendOp = VK_BLEND_OP_ADD;
918 m_colorBlendAttachmentState.srcAlphaBlendFactor = VK_BLEND_FACTOR_ONE;
919 m_colorBlendAttachmentState.dstAlphaBlendFactor = VK_BLEND_FACTOR_ZERO;
920 m_colorBlendAttachmentState.alphaBlendOp = VK_BLEND_OP_ADD;
921 m_colorBlendAttachmentState.colorWriteMask = VK_COLOR_COMPONENT_R_BIT |
922 VK_COLOR_COMPONENT_G_BIT |
923 VK_COLOR_COMPONENT_B_BIT |
924 VK_COLOR_COMPONENT_A_BIT;
926 m_colorBlendStateParams.sType = VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO;
927 m_colorBlendStateParams.pNext = DE_NULL;
928 m_colorBlendStateParams.flags = 0u;
929 m_colorBlendStateParams.logicOpEnable = VK_FALSE;
930 m_colorBlendStateParams.logicOp = VK_LOGIC_OP_COPY;
931 m_colorBlendStateParams.attachmentCount = 1u;
932 m_colorBlendStateParams.pAttachments = &m_colorBlendAttachmentState;
933 m_colorBlendStateParams.blendConstants[0] = 0.0f;
934 m_colorBlendStateParams.blendConstants[1] = 0.0f;
935 m_colorBlendStateParams.blendConstants[2] = 0.0f;
936 m_colorBlendStateParams.blendConstants[3] = 0.0f;
938 m_multisampleStateParams.sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO;
939 m_multisampleStateParams.pNext = DE_NULL;
940 m_multisampleStateParams.flags = 0u;
941 m_multisampleStateParams.rasterizationSamples = VK_SAMPLE_COUNT_1_BIT;
942 m_multisampleStateParams.sampleShadingEnable = VK_FALSE;
943 m_multisampleStateParams.minSampleShading = 0.0f;
944 m_multisampleStateParams.pSampleMask = DE_NULL;
945 m_multisampleStateParams.alphaToCoverageEnable = VK_FALSE;
946 m_multisampleStateParams.alphaToOneEnable = VK_FALSE;
948 m_depthStencilStateParams.sType = VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO;
949 m_depthStencilStateParams.pNext = DE_NULL;
950 m_depthStencilStateParams.flags = 0u;
951 m_depthStencilStateParams.depthTestEnable = VK_TRUE;
952 m_depthStencilStateParams.depthWriteEnable = VK_TRUE;
953 m_depthStencilStateParams.depthCompareOp = VK_COMPARE_OP_LESS_OR_EQUAL;
954 m_depthStencilStateParams.depthBoundsTestEnable = VK_FALSE;
955 m_depthStencilStateParams.stencilTestEnable = VK_FALSE;
956 m_depthStencilStateParams.front.failOp = VK_STENCIL_OP_KEEP;
957 m_depthStencilStateParams.front.passOp = VK_STENCIL_OP_KEEP;
958 m_depthStencilStateParams.front.depthFailOp = VK_STENCIL_OP_KEEP;
959 m_depthStencilStateParams.front.compareOp = VK_COMPARE_OP_NEVER;
960 m_depthStencilStateParams.front.compareMask = 0u;
961 m_depthStencilStateParams.front.writeMask = 0u;
962 m_depthStencilStateParams.front.reference = 0u;
963 m_depthStencilStateParams.back.failOp = VK_STENCIL_OP_KEEP;
964 m_depthStencilStateParams.back.passOp = VK_STENCIL_OP_KEEP;
965 m_depthStencilStateParams.back.depthFailOp = VK_STENCIL_OP_KEEP;
966 m_depthStencilStateParams.back.compareOp = VK_COMPARE_OP_NEVER;
967 m_depthStencilStateParams.back.compareMask = 0u;
968 m_depthStencilStateParams.back.writeMask = 0u;
969 m_depthStencilStateParams.back.reference = 0u;
970 m_depthStencilStateParams.minDepthBounds = 0.0f;
971 m_depthStencilStateParams.maxDepthBounds = 1.0f;
973 VkGraphicsPipelineCreateInfo graphicsPipelineParams =
975 VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO, // VkStructureType sType;
976 DE_NULL, // const void* pNext;
977 0u, // VkPipelineCreateFlags flags;
978 2u, // deUint32 stageCount;
979 DE_NULL, // const VkPipelineShaderStageCreateInfo* pStages;
980 &m_vertexInputStateParams, // const VkPipelineVertexInputStateCreateInfo* pVertexInputState;
981 &m_inputAssemblyStateParams, // const VkPipelineInputAssemblyStateCreateInfo* pInputAssemblyState;
982 DE_NULL, // const VkPipelineTessellationStateCreateInfo* pTessellationState;
983 &m_viewportStateParams, // const VkPipelineViewportStateCreateInfo* pViewportState;
984 &m_rasterStateParams, // const VkPipelineRasterizationStateCreateInfo* pRasterState;
985 &m_multisampleStateParams, // const VkPipelineMultisampleStateCreateInfo* pMultisampleState;
986 &m_depthStencilStateParams, // const VkPipelineDepthStencilStateCreateInfo* pDepthStencilState;
987 &m_colorBlendStateParams, // const VkPipelineColorBlendStateCreateInfo* pColorBlendState;
988 (const VkPipelineDynamicStateCreateInfo*)DE_NULL, // const VkPipelineDynamicStateCreateInfo* pDynamicState;
989 pipelineLayout, // VkPipelineLayout layout;
990 renderPass, // VkRenderPass renderPass;
991 0u, // deUint32 subpass;
992 DE_NULL, // VkPipeline basePipelineHandle;
993 0, // deInt32 basePipelineIndex;
995 for (int shaderNdx = 0; shaderNdx < static_cast<int>(m_shadersExecutions.size()) * 2; shaderNdx+=2)
997 graphicsPipelineParams.pStages = &shaderStageInfos[shaderNdx];
998 pipelineInfo.push_back(graphicsPipelineParams);
1000 return pipelineInfo;
1003 const vector<deUint32> m_shadersExecutions;
1004 VkPipelineVertexInputStateCreateInfo m_vertexInputStateParams;
1005 VkPipelineInputAssemblyStateCreateInfo m_inputAssemblyStateParams;
1006 VkViewport m_viewport;
1008 VkPipelineViewportStateCreateInfo m_viewportStateParams;
1009 VkPipelineRasterizationStateCreateInfo m_rasterStateParams;
1010 VkPipelineColorBlendAttachmentState m_colorBlendAttachmentState;
1011 VkPipelineColorBlendStateCreateInfo m_colorBlendStateParams;
1012 VkPipelineMultisampleStateCreateInfo m_multisampleStateParams;
1013 VkPipelineDepthStencilStateCreateInfo m_depthStencilStateParams;
1016 class PipelineCacheComputeTest : public TestCase
1019 PipelineCacheComputeTest (TestContext& testCtx,
1021 const string& description)
1022 :TestCase (testCtx, name, description)
1026 void initPrograms (SourceCollections& programCollection) const
1028 ostringstream buffer;
1029 buffer << "layout(set = 0, binding = 0, std430) buffer Output\n"
1031 << " int result[];\n"
1035 src << glu::getGLSLVersionDeclaration(glu::GLSL_VERSION_310_ES) << "\n"
1037 << "layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in;\n"
1040 << "void main (void)\n"
1042 << " highp uint ndx = gl_GlobalInvocationID.x;\n"
1043 << " sb_out.result[ndx] = int(ndx);\n"
1045 programCollection.glslSources.add("compute_0") << glu::ComputeSource(src.str());
1049 src << glu::getGLSLVersionDeclaration(glu::GLSL_VERSION_310_ES) << "\n"
1051 << "layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in;\n"
1054 << "void main (void)\n"
1056 << " for (highp uint ndx = 0u; ndx < "<<BUFFER_ELEMENT_COUNT<<"u; ndx++)\n"
1058 << " sb_out.result[ndx] = int(ndx);\n"
1061 programCollection.glslSources.add("compute_1") << glu::ComputeSource(src.str());
1065 src << glu::getGLSLVersionDeclaration(glu::GLSL_VERSION_310_ES) << "\n"
1067 << "layout(local_size_x = "<<BUFFER_ELEMENT_COUNT<<", local_size_y = 1, local_size_z = 1) in;\n"
1070 << "void main (void)\n"
1072 << " highp uint ndx = gl_LocalInvocationID.x;\n"
1073 << " sb_out.result[ndx] = int(ndx);\n"
1075 programCollection.glslSources.add("compute_2") << glu::ComputeSource(src.str());
1079 TestInstance* createInstance (Context& context) const
1081 vector<deUint32> shadersExecutions;
1082 shadersExecutions.push_back(16u); //compute_0
1083 shadersExecutions.push_back(1u); //compute_1
1084 shadersExecutions.push_back(1u); //compute_2
1085 return new PipelineCacheComputeTestInstance(context, shadersExecutions);
1089 class PipelineCacheGraphicTest : public TestCase
1092 PipelineCacheGraphicTest (TestContext& testCtx,
1094 const string& description)
1095 :TestCase (testCtx, name, description)
1100 void initPrograms (SourceCollections& programCollection) const
1102 ostringstream buffer;
1103 buffer << "layout(set = 0, binding = 0, std430) buffer Output\n"
1105 << " int result[];\n"
1110 std::ostringstream src;
1111 src << glu::getGLSLVersionDeclaration(glu::GLSL_VERSION_440) << "\n"
1115 << "void main (void)\n"
1117 << " sb_out.result[gl_VertexIndex] = int(gl_VertexIndex);\n"
1118 << " gl_PointSize = 1.0f;\n"
1120 programCollection.glslSources.add("vert_0") << glu::VertexSource(src.str());
1124 std::ostringstream src;
1125 src << glu::getGLSLVersionDeclaration(glu::GLSL_VERSION_440) << "\n"
1129 << "void main (void)\n"
1131 << " for (highp uint ndx = 0u; ndx < "<<BUFFER_ELEMENT_COUNT<<"u; ndx++)\n"
1133 << " sb_out.result[ndx] = int(ndx);\n"
1135 << " gl_PointSize = 1.0f;\n"
1137 programCollection.glslSources.add("vert_1") << glu::VertexSource(src.str());
1141 std::ostringstream src;
1142 src << glu::getGLSLVersionDeclaration(glu::GLSL_VERSION_440) << "\n"
1146 << "void main (void)\n"
1148 << " for (int ndx = "<<BUFFER_ELEMENT_COUNT-1<<"; ndx >= 0; ndx--)\n"
1150 << " sb_out.result[uint(ndx)] = ndx;\n"
1152 << " gl_PointSize = 1.0f;\n"
1154 programCollection.glslSources.add("vert_2") << glu::VertexSource(src.str());
1158 std::ostringstream src;
1159 src << glu::getGLSLVersionDeclaration(glu::GLSL_VERSION_440) << "\n"
1161 << "layout(location = 0) out vec4 o_color;\n"
1163 << "void main (void)\n"
1165 << " o_color = vec4(1.0);\n"
1167 programCollection.glslSources.add("frag") << glu::FragmentSource(src.str());
1171 TestInstance* createInstance (Context& context) const
1173 vector<deUint32> shadersExecutions;
1174 shadersExecutions.push_back(16u); //vert_0
1175 shadersExecutions.push_back(1u); //vert_1
1176 shadersExecutions.push_back(1u); //vert_2
1177 return new PipelineCacheGraphicTestInstance(context, shadersExecutions);
1184 tcu::TestCaseGroup* createInternallySynchronizedObjects (tcu::TestContext& testCtx)
1186 de::MovePtr<tcu::TestCaseGroup> tests(new tcu::TestCaseGroup(testCtx, "internally_synchronized_objects", "Internally synchronized objects"));
1187 tests->addChild(new PipelineCacheComputeTest(testCtx, "pipeline_cache_compute", "Internally synchronized object VkPipelineCache for compute pipeline is tested"));
1188 tests->addChild(new PipelineCacheGraphicTest(testCtx, "pipeline_cache_graphics", "Internally synchronized object VkPipelineCache for graphics pipeline is tested"));
1189 return tests.release();
1192 } // synchronization