1 /*-------------------------------------------------------------------------
2 * Vulkan Conformance Tests
3 * ------------------------
5 * Copyright (c) 2015 Google Inc.
7 * Licensed under the Apache License, Version 2.0 (the "License");
8 * you may not use this file except in compliance with the License.
9 * You may obtain a copy of the License at
11 * http://www.apache.org/licenses/LICENSE-2.0
13 * Unless required by applicable law or agreed to in writing, software
14 * distributed under the License is distributed on an "AS IS" BASIS,
15 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 * See the License for the specific language governing permissions and
17 * limitations under the License.
21 * \brief Pipeline barrier tests
22 *//*--------------------------------------------------------------------*/
24 #include "vktMemoryPipelineBarrierTests.hpp"
26 #include "vktTestCaseUtil.hpp"
29 #include "vkPlatform.hpp"
30 #include "vkRefUtil.hpp"
31 #include "vkQueryUtil.hpp"
32 #include "vkMemUtil.hpp"
33 #include "vkTypeUtil.hpp"
34 #include "vkPrograms.hpp"
36 #include "tcuMaybe.hpp"
37 #include "tcuTextureUtil.hpp"
38 #include "tcuTestLog.hpp"
39 #include "tcuResultCollector.hpp"
40 #include "tcuTexture.hpp"
41 #include "tcuImageCompare.hpp"
43 #include "deUniquePtr.hpp"
44 #include "deStringUtil.hpp"
45 #include "deRandom.hpp"
72 using tcu::ConstPixelBufferAccess;
73 using tcu::PixelBufferAccess;
74 using tcu::TextureFormat;
75 using tcu::TextureLevel;
85 MAX_UNIFORM_BUFFER_SIZE = 1024,
86 MAX_STORAGE_BUFFER_SIZE = (1<<28)
89 // \todo [mika] Add to utilities
91 T divRoundUp (const T& a, const T& b)
93 return (a / b) + (a % b == 0 ? 0 : 1);
98 ALL_PIPELINE_STAGES = vk::VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT
99 | vk::VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT
100 | vk::VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT
101 | vk::VK_PIPELINE_STAGE_VERTEX_INPUT_BIT
102 | vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT
103 | vk::VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT
104 | vk::VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT
105 | vk::VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT
106 | vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT
107 | vk::VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT
108 | vk::VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT
109 | vk::VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT
110 | vk::VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT
111 | vk::VK_PIPELINE_STAGE_TRANSFER_BIT
112 | vk::VK_PIPELINE_STAGE_HOST_BIT
117 ALL_ACCESSES = vk::VK_ACCESS_INDIRECT_COMMAND_READ_BIT
118 | vk::VK_ACCESS_INDEX_READ_BIT
119 | vk::VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT
120 | vk::VK_ACCESS_UNIFORM_READ_BIT
121 | vk::VK_ACCESS_INPUT_ATTACHMENT_READ_BIT
122 | vk::VK_ACCESS_SHADER_READ_BIT
123 | vk::VK_ACCESS_SHADER_WRITE_BIT
124 | vk::VK_ACCESS_COLOR_ATTACHMENT_READ_BIT
125 | vk::VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT
126 | vk::VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT
127 | vk::VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT
128 | vk::VK_ACCESS_TRANSFER_READ_BIT
129 | vk::VK_ACCESS_TRANSFER_WRITE_BIT
130 | vk::VK_ACCESS_HOST_READ_BIT
131 | vk::VK_ACCESS_HOST_WRITE_BIT
132 | vk::VK_ACCESS_MEMORY_READ_BIT
133 | vk::VK_ACCESS_MEMORY_WRITE_BIT
138 // Mapped host read and write
139 USAGE_HOST_READ = (0x1u<<0),
140 USAGE_HOST_WRITE = (0x1u<<1),
142 // Copy and other transfer operations
143 USAGE_TRANSFER_SRC = (0x1u<<2),
144 USAGE_TRANSFER_DST = (0x1u<<3),
146 // Buffer usage flags
147 USAGE_INDEX_BUFFER = (0x1u<<4),
148 USAGE_VERTEX_BUFFER = (0x1u<<5),
150 USAGE_UNIFORM_BUFFER = (0x1u<<6),
151 USAGE_STORAGE_BUFFER = (0x1u<<7),
153 USAGE_UNIFORM_TEXEL_BUFFER = (0x1u<<8),
154 USAGE_STORAGE_TEXEL_BUFFER = (0x1u<<9),
156 // \todo [2016-03-09 mika] This is probably almost impossible to do
157 USAGE_INDIRECT_BUFFER = (0x1u<<10),
159 // Texture usage flags
160 USAGE_SAMPLED_IMAGE = (0x1u<<11),
161 USAGE_STORAGE_IMAGE = (0x1u<<12),
162 USAGE_COLOR_ATTACHMENT = (0x1u<<13),
163 USAGE_INPUT_ATTACHMENT = (0x1u<<14),
164 USAGE_DEPTH_STENCIL_ATTACHMENT = (0x1u<<15),
167 bool supportsDeviceBufferWrites (Usage usage)
169 if (usage & USAGE_TRANSFER_DST)
172 if (usage & USAGE_STORAGE_BUFFER)
175 if (usage & USAGE_STORAGE_TEXEL_BUFFER)
181 bool supportsDeviceImageWrites (Usage usage)
183 if (usage & USAGE_TRANSFER_DST)
186 if (usage & USAGE_STORAGE_IMAGE)
189 if (usage & USAGE_COLOR_ATTACHMENT)
195 // Sequential access enums
198 ACCESS_INDIRECT_COMMAND_READ_BIT = 0,
199 ACCESS_INDEX_READ_BIT,
200 ACCESS_VERTEX_ATTRIBUTE_READ_BIT,
201 ACCESS_UNIFORM_READ_BIT,
202 ACCESS_INPUT_ATTACHMENT_READ_BIT,
203 ACCESS_SHADER_READ_BIT,
204 ACCESS_SHADER_WRITE_BIT,
205 ACCESS_COLOR_ATTACHMENT_READ_BIT,
206 ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
207 ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT,
208 ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT,
209 ACCESS_TRANSFER_READ_BIT,
210 ACCESS_TRANSFER_WRITE_BIT,
211 ACCESS_HOST_READ_BIT,
212 ACCESS_HOST_WRITE_BIT,
213 ACCESS_MEMORY_READ_BIT,
214 ACCESS_MEMORY_WRITE_BIT,
219 // Sequential stage enums
222 PIPELINESTAGE_TOP_OF_PIPE_BIT = 0,
223 PIPELINESTAGE_BOTTOM_OF_PIPE_BIT,
224 PIPELINESTAGE_DRAW_INDIRECT_BIT,
225 PIPELINESTAGE_VERTEX_INPUT_BIT,
226 PIPELINESTAGE_VERTEX_SHADER_BIT,
227 PIPELINESTAGE_TESSELLATION_CONTROL_SHADER_BIT,
228 PIPELINESTAGE_TESSELLATION_EVALUATION_SHADER_BIT,
229 PIPELINESTAGE_GEOMETRY_SHADER_BIT,
230 PIPELINESTAGE_FRAGMENT_SHADER_BIT,
231 PIPELINESTAGE_EARLY_FRAGMENT_TESTS_BIT,
232 PIPELINESTAGE_LATE_FRAGMENT_TESTS_BIT,
233 PIPELINESTAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
234 PIPELINESTAGE_COMPUTE_SHADER_BIT,
235 PIPELINESTAGE_TRANSFER_BIT,
236 PIPELINESTAGE_HOST_BIT,
241 PipelineStage pipelineStageFlagToPipelineStage (vk::VkPipelineStageFlagBits flags)
245 case vk::VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT: return PIPELINESTAGE_TOP_OF_PIPE_BIT;
246 case vk::VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT: return PIPELINESTAGE_BOTTOM_OF_PIPE_BIT;
247 case vk::VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT: return PIPELINESTAGE_DRAW_INDIRECT_BIT;
248 case vk::VK_PIPELINE_STAGE_VERTEX_INPUT_BIT: return PIPELINESTAGE_VERTEX_INPUT_BIT;
249 case vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT: return PIPELINESTAGE_VERTEX_SHADER_BIT;
250 case vk::VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT: return PIPELINESTAGE_TESSELLATION_CONTROL_SHADER_BIT;
251 case vk::VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT: return PIPELINESTAGE_TESSELLATION_EVALUATION_SHADER_BIT;
252 case vk::VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT: return PIPELINESTAGE_GEOMETRY_SHADER_BIT;
253 case vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT: return PIPELINESTAGE_FRAGMENT_SHADER_BIT;
254 case vk::VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT: return PIPELINESTAGE_EARLY_FRAGMENT_TESTS_BIT;
255 case vk::VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT: return PIPELINESTAGE_LATE_FRAGMENT_TESTS_BIT;
256 case vk::VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT: return PIPELINESTAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
257 case vk::VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT: return PIPELINESTAGE_COMPUTE_SHADER_BIT;
258 case vk::VK_PIPELINE_STAGE_TRANSFER_BIT: return PIPELINESTAGE_TRANSFER_BIT;
259 case vk::VK_PIPELINE_STAGE_HOST_BIT: return PIPELINESTAGE_HOST_BIT;
262 DE_FATAL("Unknown pipeline stage flags");
263 return PIPELINESTAGE_LAST;
267 Usage operator| (Usage a, Usage b)
269 return (Usage)((deUint32)a | (deUint32)b);
272 Usage operator& (Usage a, Usage b)
274 return (Usage)((deUint32)a & (deUint32)b);
277 string usageToName (Usage usage)
282 const char* const name;
285 { USAGE_HOST_READ, "host_read" },
286 { USAGE_HOST_WRITE, "host_write" },
288 { USAGE_TRANSFER_SRC, "transfer_src" },
289 { USAGE_TRANSFER_DST, "transfer_dst" },
291 { USAGE_INDEX_BUFFER, "index_buffer" },
292 { USAGE_VERTEX_BUFFER, "vertex_buffer" },
293 { USAGE_UNIFORM_BUFFER, "uniform_buffer" },
294 { USAGE_STORAGE_BUFFER, "storage_buffer" },
295 { USAGE_UNIFORM_TEXEL_BUFFER, "uniform_texel_buffer" },
296 { USAGE_STORAGE_TEXEL_BUFFER, "storage_texel_buffer" },
297 { USAGE_INDIRECT_BUFFER, "indirect_buffer" },
298 { USAGE_SAMPLED_IMAGE, "image_sampled" },
299 { USAGE_STORAGE_IMAGE, "storage_image" },
300 { USAGE_COLOR_ATTACHMENT, "color_attachment" },
301 { USAGE_INPUT_ATTACHMENT, "input_attachment" },
302 { USAGE_DEPTH_STENCIL_ATTACHMENT, "depth_stencil_attachment" },
305 std::ostringstream stream;
308 for (size_t usageNdx = 0; usageNdx < DE_LENGTH_OF_ARRAY(usageNames); usageNdx++)
310 if (usage & usageNames[usageNdx].usage)
317 stream << usageNames[usageNdx].name;
324 vk::VkBufferUsageFlags usageToBufferUsageFlags (Usage usage)
326 vk::VkBufferUsageFlags flags = 0;
328 if (usage & USAGE_TRANSFER_SRC)
329 flags |= vk::VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
331 if (usage & USAGE_TRANSFER_DST)
332 flags |= vk::VK_BUFFER_USAGE_TRANSFER_DST_BIT;
334 if (usage & USAGE_INDEX_BUFFER)
335 flags |= vk::VK_BUFFER_USAGE_INDEX_BUFFER_BIT;
337 if (usage & USAGE_VERTEX_BUFFER)
338 flags |= vk::VK_BUFFER_USAGE_VERTEX_BUFFER_BIT;
340 if (usage & USAGE_INDIRECT_BUFFER)
341 flags |= vk::VK_BUFFER_USAGE_INDIRECT_BUFFER_BIT;
343 if (usage & USAGE_UNIFORM_BUFFER)
344 flags |= vk::VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT;
346 if (usage & USAGE_STORAGE_BUFFER)
347 flags |= vk::VK_BUFFER_USAGE_STORAGE_BUFFER_BIT;
349 if (usage & USAGE_UNIFORM_TEXEL_BUFFER)
350 flags |= vk::VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT;
352 if (usage & USAGE_STORAGE_TEXEL_BUFFER)
353 flags |= vk::VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT;
358 vk::VkImageUsageFlags usageToImageUsageFlags (Usage usage)
360 vk::VkImageUsageFlags flags = 0;
362 if (usage & USAGE_TRANSFER_SRC)
363 flags |= vk::VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
365 if (usage & USAGE_TRANSFER_DST)
366 flags |= vk::VK_IMAGE_USAGE_TRANSFER_DST_BIT;
368 if (usage & USAGE_SAMPLED_IMAGE)
369 flags |= vk::VK_IMAGE_USAGE_SAMPLED_BIT;
371 if (usage & USAGE_STORAGE_IMAGE)
372 flags |= vk::VK_IMAGE_USAGE_STORAGE_BIT;
374 if (usage & USAGE_COLOR_ATTACHMENT)
375 flags |= vk::VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
377 if (usage & USAGE_INPUT_ATTACHMENT)
378 flags |= vk::VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT;
380 if (usage & USAGE_DEPTH_STENCIL_ATTACHMENT)
381 flags |= vk::VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT;
386 vk::VkPipelineStageFlags usageToStageFlags (Usage usage)
388 vk::VkPipelineStageFlags flags = 0;
390 if (usage & (USAGE_HOST_READ|USAGE_HOST_WRITE))
391 flags |= vk::VK_PIPELINE_STAGE_HOST_BIT;
393 if (usage & (USAGE_TRANSFER_SRC|USAGE_TRANSFER_DST))
394 flags |= vk::VK_PIPELINE_STAGE_TRANSFER_BIT;
396 if (usage & (USAGE_VERTEX_BUFFER|USAGE_INDEX_BUFFER))
397 flags |= vk::VK_PIPELINE_STAGE_VERTEX_INPUT_BIT;
399 if (usage & USAGE_INDIRECT_BUFFER)
400 flags |= vk::VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT;
403 (USAGE_UNIFORM_BUFFER
404 | USAGE_STORAGE_BUFFER
405 | USAGE_UNIFORM_TEXEL_BUFFER
406 | USAGE_STORAGE_TEXEL_BUFFER
407 | USAGE_SAMPLED_IMAGE
408 | USAGE_STORAGE_IMAGE))
410 flags |= (vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT
411 | vk::VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT
412 | vk::VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT
413 | vk::VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT
414 | vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT
415 | vk::VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT);
418 if (usage & USAGE_INPUT_ATTACHMENT)
419 flags |= vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
421 if (usage & USAGE_COLOR_ATTACHMENT)
422 flags |= vk::VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
424 if (usage & USAGE_DEPTH_STENCIL_ATTACHMENT)
426 flags |= vk::VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT
427 | vk::VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT;
433 vk::VkAccessFlags usageToAccessFlags (Usage usage)
435 vk::VkAccessFlags flags = 0;
437 if (usage & USAGE_HOST_READ)
438 flags |= vk::VK_ACCESS_HOST_READ_BIT;
440 if (usage & USAGE_HOST_WRITE)
441 flags |= vk::VK_ACCESS_HOST_WRITE_BIT;
443 if (usage & USAGE_TRANSFER_SRC)
444 flags |= vk::VK_ACCESS_TRANSFER_READ_BIT;
446 if (usage & USAGE_TRANSFER_DST)
447 flags |= vk::VK_ACCESS_TRANSFER_WRITE_BIT;
449 if (usage & USAGE_INDEX_BUFFER)
450 flags |= vk::VK_ACCESS_INDEX_READ_BIT;
452 if (usage & USAGE_VERTEX_BUFFER)
453 flags |= vk::VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT;
455 if (usage & (USAGE_UNIFORM_BUFFER | USAGE_UNIFORM_TEXEL_BUFFER))
456 flags |= vk::VK_ACCESS_UNIFORM_READ_BIT;
458 if (usage & USAGE_SAMPLED_IMAGE)
459 flags |= vk::VK_ACCESS_SHADER_READ_BIT;
461 if (usage & (USAGE_STORAGE_BUFFER
462 | USAGE_STORAGE_TEXEL_BUFFER
463 | USAGE_STORAGE_IMAGE))
464 flags |= vk::VK_ACCESS_SHADER_READ_BIT | vk::VK_ACCESS_SHADER_WRITE_BIT;
466 if (usage & USAGE_INDIRECT_BUFFER)
467 flags |= vk::VK_ACCESS_INDIRECT_COMMAND_READ_BIT;
469 if (usage & USAGE_COLOR_ATTACHMENT)
470 flags |= vk::VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | vk::VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
472 if (usage & USAGE_INPUT_ATTACHMENT)
473 flags |= vk::VK_ACCESS_INPUT_ATTACHMENT_READ_BIT;
475 if (usage & USAGE_DEPTH_STENCIL_ATTACHMENT)
476 flags |= vk::VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT
477 | vk::VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
485 vk::VkDeviceSize size;
486 vk::VkSharingMode sharing;
489 vk::Move<vk::VkCommandBuffer> createCommandBuffer (const vk::DeviceInterface& vkd,
491 vk::VkCommandPool pool,
492 vk::VkCommandBufferLevel level)
494 const vk::VkCommandBufferAllocateInfo bufferInfo =
496 vk::VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO,
504 return vk::allocateCommandBuffer(vkd, device, &bufferInfo);
507 vk::Move<vk::VkCommandBuffer> createBeginCommandBuffer (const vk::DeviceInterface& vkd,
509 vk::VkCommandPool pool,
510 vk::VkCommandBufferLevel level)
512 const vk::VkCommandBufferInheritanceInfo inheritInfo =
514 vk::VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_INFO,
523 const vk::VkCommandBufferBeginInfo beginInfo =
525 vk::VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO,
528 (level == vk::VK_COMMAND_BUFFER_LEVEL_SECONDARY ? &inheritInfo : (const vk::VkCommandBufferInheritanceInfo*)DE_NULL),
531 vk::Move<vk::VkCommandBuffer> commandBuffer (createCommandBuffer(vkd, device, pool, level));
533 vkd.beginCommandBuffer(*commandBuffer, &beginInfo);
535 return commandBuffer;
538 vk::Move<vk::VkCommandPool> createCommandPool (const vk::DeviceInterface& vkd,
540 deUint32 queueFamilyIndex)
542 const vk::VkCommandPoolCreateInfo poolInfo =
544 vk::VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO,
547 vk::VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT,
551 return vk::createCommandPool(vkd, device, &poolInfo);
554 vk::Move<vk::VkBuffer> createBuffer (const vk::DeviceInterface& vkd,
556 vk::VkDeviceSize size,
557 vk::VkBufferUsageFlags usage,
558 vk::VkSharingMode sharingMode,
559 const vector<deUint32>& queueFamilies)
561 const vk::VkBufferCreateInfo createInfo =
563 vk::VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
570 (deUint32)queueFamilies.size(),
574 return vk::createBuffer(vkd, device, &createInfo);
577 vk::Move<vk::VkDeviceMemory> allocMemory (const vk::DeviceInterface& vkd,
579 vk::VkDeviceSize size,
580 deUint32 memoryTypeIndex)
582 const vk::VkMemoryAllocateInfo alloc =
584 vk::VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO, // sType
591 return vk::allocateMemory(vkd, device, &alloc);
594 vk::Move<vk::VkDeviceMemory> bindBufferMemory (const vk::InstanceInterface& vki,
595 const vk::DeviceInterface& vkd,
596 vk::VkPhysicalDevice physicalDevice,
599 vk::VkMemoryPropertyFlags properties)
601 const vk::VkMemoryRequirements memoryRequirements = vk::getBufferMemoryRequirements(vkd, device, buffer);
602 const vk::VkPhysicalDeviceMemoryProperties memoryProperties = vk::getPhysicalDeviceMemoryProperties(vki, physicalDevice);
603 deUint32 memoryTypeIndex;
605 for (memoryTypeIndex = 0; memoryTypeIndex < memoryProperties.memoryTypeCount; memoryTypeIndex++)
607 if ((memoryRequirements.memoryTypeBits & (0x1u << memoryTypeIndex))
608 && (memoryProperties.memoryTypes[memoryTypeIndex].propertyFlags & properties) == properties)
612 const vk::VkMemoryAllocateInfo allocationInfo =
614 vk::VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
616 memoryRequirements.size,
619 vk::Move<vk::VkDeviceMemory> memory (vk::allocateMemory(vkd, device, &allocationInfo));
621 VK_CHECK(vkd.bindBufferMemory(device, buffer, *memory, 0));
625 catch (const vk::Error& error)
627 if (error.getError() == vk::VK_ERROR_OUT_OF_DEVICE_MEMORY
628 || error.getError() == vk::VK_ERROR_OUT_OF_HOST_MEMORY)
630 // Try next memory type/heap if out of memory
634 // Throw all other errors forward
641 TCU_FAIL("Failed to allocate memory for buffer");
644 vk::Move<vk::VkDeviceMemory> bindImageMemory (const vk::InstanceInterface& vki,
645 const vk::DeviceInterface& vkd,
646 vk::VkPhysicalDevice physicalDevice,
649 vk::VkMemoryPropertyFlags properties)
651 const vk::VkMemoryRequirements memoryRequirements = vk::getImageMemoryRequirements(vkd, device, image);
652 const vk::VkPhysicalDeviceMemoryProperties memoryProperties = vk::getPhysicalDeviceMemoryProperties(vki, physicalDevice);
653 deUint32 memoryTypeIndex;
655 for (memoryTypeIndex = 0; memoryTypeIndex < memoryProperties.memoryTypeCount; memoryTypeIndex++)
657 if ((memoryRequirements.memoryTypeBits & (0x1u << memoryTypeIndex))
658 && (memoryProperties.memoryTypes[memoryTypeIndex].propertyFlags & properties) == properties)
662 const vk::VkMemoryAllocateInfo allocationInfo =
664 vk::VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
666 memoryRequirements.size,
669 vk::Move<vk::VkDeviceMemory> memory (vk::allocateMemory(vkd, device, &allocationInfo));
671 VK_CHECK(vkd.bindImageMemory(device, image, *memory, 0));
675 catch (const vk::Error& error)
677 if (error.getError() == vk::VK_ERROR_OUT_OF_DEVICE_MEMORY
678 || error.getError() == vk::VK_ERROR_OUT_OF_HOST_MEMORY)
680 // Try next memory type/heap if out of memory
684 // Throw all other errors forward
691 TCU_FAIL("Failed to allocate memory for image");
694 void queueRun (const vk::DeviceInterface& vkd,
696 vk::VkCommandBuffer commandBuffer)
698 const vk::VkSubmitInfo submitInfo =
700 vk::VK_STRUCTURE_TYPE_SUBMIT_INFO,
705 (const vk::VkPipelineStageFlags*)DE_NULL,
714 VK_CHECK(vkd.queueSubmit(queue, 1, &submitInfo, 0));
715 VK_CHECK(vkd.queueWaitIdle(queue));
718 void* mapMemory (const vk::DeviceInterface& vkd,
720 vk::VkDeviceMemory memory,
721 vk::VkDeviceSize size)
725 VK_CHECK(vkd.mapMemory(device, memory, 0, size, 0, &ptr));
730 class ReferenceMemory
733 ReferenceMemory (size_t size);
735 void set (size_t pos, deUint8 val);
736 deUint8 get (size_t pos) const;
737 bool isDefined (size_t pos) const;
739 void setDefined (size_t offset, size_t size, const void* data);
740 void setUndefined (size_t offset, size_t size);
741 void setData (size_t offset, size_t size, const void* data);
743 size_t getSize (void) const { return m_data.size(); }
746 vector<deUint8> m_data;
747 vector<deUint64> m_defined;
750 ReferenceMemory::ReferenceMemory (size_t size)
752 , m_defined (size / 64 + (size % 64 == 0 ? 0 : 1), 0ull)
756 void ReferenceMemory::set (size_t pos, deUint8 val)
758 DE_ASSERT(pos < m_data.size());
761 m_defined[pos / 64] |= 0x1ull << (pos % 64);
764 void ReferenceMemory::setData (size_t offset, size_t size, const void* data_)
766 const deUint8* data = (const deUint8*)data_;
768 DE_ASSERT(offset < m_data.size());
769 DE_ASSERT(offset + size <= m_data.size());
771 // \todo [2016-03-09 mika] Optimize
772 for (size_t pos = 0; pos < size; pos++)
774 m_data[offset + pos] = data[pos];
775 m_defined[(offset + pos) / 64] |= 0x1ull << ((offset + pos) % 64);
779 void ReferenceMemory::setUndefined (size_t offset, size_t size)
781 // \todo [2016-03-09 mika] Optimize
782 for (size_t pos = 0; pos < size; pos++)
783 m_defined[(offset + pos) / 64] |= 0x1ull << ((offset + pos) % 64);
786 deUint8 ReferenceMemory::get (size_t pos) const
788 DE_ASSERT(pos < m_data.size());
789 DE_ASSERT(isDefined(pos));
793 bool ReferenceMemory::isDefined (size_t pos) const
795 DE_ASSERT(pos < m_data.size());
797 return (m_defined[pos / 64] & (0x1ull << (pos % 64))) != 0;
803 Memory (const vk::InstanceInterface& vki,
804 const vk::DeviceInterface& vkd,
805 vk::VkPhysicalDevice physicalDevice,
807 vk::VkDeviceSize size,
808 deUint32 memoryTypeIndex,
809 vk::VkDeviceSize maxBufferSize,
810 deInt32 maxImageWidth,
811 deInt32 maxImageHeight);
813 vk::VkDeviceSize getSize (void) const { return m_size; }
814 vk::VkDeviceSize getMaxBufferSize (void) const { return m_maxBufferSize; }
815 bool getSupportBuffers (void) const { return m_maxBufferSize > 0; }
817 deInt32 getMaxImageWidth (void) const { return m_maxImageWidth; }
818 deInt32 getMaxImageHeight (void) const { return m_maxImageHeight; }
819 bool getSupportImages (void) const { return m_maxImageWidth > 0; }
821 const vk::VkMemoryType& getMemoryType (void) const { return m_memoryType; }
822 deUint32 getMemoryTypeIndex (void) const { return m_memoryTypeIndex; }
823 vk::VkDeviceMemory getMemory (void) const { return *m_memory; }
826 const vk::VkDeviceSize m_size;
827 const deUint32 m_memoryTypeIndex;
828 const vk::VkMemoryType m_memoryType;
829 const vk::Unique<vk::VkDeviceMemory> m_memory;
830 const vk::VkDeviceSize m_maxBufferSize;
831 const deInt32 m_maxImageWidth;
832 const deInt32 m_maxImageHeight;
835 vk::VkMemoryType getMemoryTypeInfo (const vk::InstanceInterface& vki,
836 vk::VkPhysicalDevice device,
837 deUint32 memoryTypeIndex)
839 const vk::VkPhysicalDeviceMemoryProperties memoryProperties = vk::getPhysicalDeviceMemoryProperties(vki, device);
841 DE_ASSERT(memoryTypeIndex < memoryProperties.memoryTypeCount);
843 return memoryProperties.memoryTypes[memoryTypeIndex];
846 vk::VkDeviceSize findMaxBufferSize (const vk::DeviceInterface& vkd,
849 vk::VkBufferUsageFlags usage,
850 vk::VkSharingMode sharingMode,
851 const vector<deUint32>& queueFamilies,
853 vk::VkDeviceSize memorySize,
854 deUint32 memoryTypeIndex)
856 vk::VkDeviceSize lastSuccess = 0;
857 vk::VkDeviceSize currentSize = memorySize / 2;
860 const vk::Unique<vk::VkBuffer> buffer (createBuffer(vkd, device, memorySize, usage, sharingMode, queueFamilies));
861 const vk::VkMemoryRequirements requirements (vk::getBufferMemoryRequirements(vkd, device, *buffer));
863 if (requirements.size == memorySize && requirements.memoryTypeBits & (0x1u << memoryTypeIndex))
867 for (vk::VkDeviceSize stepSize = memorySize / 4; currentSize > 0; stepSize /= 2)
869 const vk::Unique<vk::VkBuffer> buffer (createBuffer(vkd, device, currentSize, usage, sharingMode, queueFamilies));
870 const vk::VkMemoryRequirements requirements (vk::getBufferMemoryRequirements(vkd, device, *buffer));
872 if (requirements.size <= memorySize && requirements.memoryTypeBits & (0x1u << memoryTypeIndex))
874 lastSuccess = currentSize;
875 currentSize += stepSize;
878 currentSize -= stepSize;
887 // Round size down maximum W * H * 4, where W and H < 4096
888 vk::VkDeviceSize roundBufferSizeToWxHx4 (vk::VkDeviceSize size)
890 const vk::VkDeviceSize maxTextureSize = 4096;
891 vk::VkDeviceSize maxTexelCount = size / 4;
892 vk::VkDeviceSize bestW = de::max(maxTexelCount, maxTextureSize);
893 vk::VkDeviceSize bestH = maxTexelCount / bestW;
895 // \todo [2016-03-09 mika] Could probably be faster?
896 for (vk::VkDeviceSize w = 1; w * w < maxTexelCount && w < maxTextureSize && bestW * bestH * 4 < size; w++)
898 const vk::VkDeviceSize h = maxTexelCount / w;
900 if (bestW * bestH < w * h)
907 return bestW * bestH * 4;
910 // Find RGBA8 image size that has exactly "size" of number of bytes.
911 // "size" must be W * H * 4 where W and H < 4096
912 IVec2 findImageSizeWxHx4 (vk::VkDeviceSize size)
914 const vk::VkDeviceSize maxTextureSize = 4096;
915 vk::VkDeviceSize texelCount = size / 4;
917 DE_ASSERT((size % 4) == 0);
919 // \todo [2016-03-09 mika] Could probably be faster?
920 for (vk::VkDeviceSize w = 1; w < maxTextureSize && w < texelCount; w++)
922 const vk::VkDeviceSize h = texelCount / w;
924 if ((texelCount % w) == 0 && h < maxTextureSize)
925 return IVec2((int)w, (int)h);
928 DE_FATAL("Invalid size");
929 return IVec2(-1, -1);
932 IVec2 findMaxRGBA8ImageSize (const vk::DeviceInterface& vkd,
935 vk::VkImageUsageFlags usage,
936 vk::VkSharingMode sharingMode,
937 const vector<deUint32>& queueFamilies,
939 vk::VkDeviceSize memorySize,
940 deUint32 memoryTypeIndex)
942 IVec2 lastSuccess (0);
946 const deUint32 texelCount = (deUint32)(memorySize / 4);
947 const deUint32 width = (deUint32)deFloatSqrt((float)texelCount);
948 const deUint32 height = texelCount / width;
950 currentSize[0] = deMaxu32(width, height);
951 currentSize[1] = deMinu32(width, height);
954 for (deInt32 stepSize = currentSize[0] / 2; currentSize[0] > 0; stepSize /= 2)
956 const vk::VkImageCreateInfo createInfo =
958 vk::VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
962 vk::VK_IMAGE_TYPE_2D,
963 vk::VK_FORMAT_R8G8B8A8_UNORM,
965 (deUint32)currentSize[0],
966 (deUint32)currentSize[1],
970 vk::VK_SAMPLE_COUNT_1_BIT,
971 vk::VK_IMAGE_TILING_OPTIMAL,
974 (deUint32)queueFamilies.size(),
976 vk::VK_IMAGE_LAYOUT_UNDEFINED
978 const vk::Unique<vk::VkImage> image (vk::createImage(vkd, device, &createInfo));
979 const vk::VkMemoryRequirements requirements (vk::getImageMemoryRequirements(vkd, device, *image));
981 if (requirements.size <= memorySize && requirements.memoryTypeBits & (0x1u << memoryTypeIndex))
983 lastSuccess = currentSize;
984 currentSize[0] += stepSize;
985 currentSize[1] += stepSize;
989 currentSize[0] -= stepSize;
990 currentSize[1] -= stepSize;
1000 Memory::Memory (const vk::InstanceInterface& vki,
1001 const vk::DeviceInterface& vkd,
1002 vk::VkPhysicalDevice physicalDevice,
1003 vk::VkDevice device,
1004 vk::VkDeviceSize size,
1005 deUint32 memoryTypeIndex,
1006 vk::VkDeviceSize maxBufferSize,
1007 deInt32 maxImageWidth,
1008 deInt32 maxImageHeight)
1010 , m_memoryTypeIndex (memoryTypeIndex)
1011 , m_memoryType (getMemoryTypeInfo(vki, physicalDevice, memoryTypeIndex))
1012 , m_memory (allocMemory(vkd, device, size, memoryTypeIndex))
1013 , m_maxBufferSize (maxBufferSize)
1014 , m_maxImageWidth (maxImageWidth)
1015 , m_maxImageHeight (maxImageHeight)
1022 Context (const vk::InstanceInterface& vki,
1023 const vk::DeviceInterface& vkd,
1024 vk::VkPhysicalDevice physicalDevice,
1025 vk::VkDevice device,
1027 deUint32 queueFamilyIndex,
1028 const vector<pair<deUint32, vk::VkQueue> >& queues,
1029 const vk::ProgramCollection<vk::ProgramBinary>& binaryCollection)
1032 , m_physicalDevice (physicalDevice)
1035 , m_queueFamilyIndex (queueFamilyIndex)
1037 , m_commandPool (createCommandPool(vkd, device, queueFamilyIndex))
1038 , m_binaryCollection (binaryCollection)
1040 for (size_t queueNdx = 0; queueNdx < m_queues.size(); queueNdx++)
1041 m_queueFamilies.push_back(m_queues[queueNdx].first);
1044 const vk::InstanceInterface& getInstanceInterface (void) const { return m_vki; }
1045 vk::VkPhysicalDevice getPhysicalDevice (void) const { return m_physicalDevice; }
1046 vk::VkDevice getDevice (void) const { return m_device; }
1047 const vk::DeviceInterface& getDeviceInterface (void) const { return m_vkd; }
1048 vk::VkQueue getQueue (void) const { return m_queue; }
1049 deUint32 getQueueFamily (void) const { return m_queueFamilyIndex; }
1050 const vector<pair<deUint32, vk::VkQueue> >& getQueues (void) const { return m_queues; }
1051 const vector<deUint32> getQueueFamilies (void) const { return m_queueFamilies; }
1052 vk::VkCommandPool getCommandPool (void) const { return *m_commandPool; }
1053 const vk::ProgramCollection<vk::ProgramBinary>& getBinaryCollection (void) const { return m_binaryCollection; }
1056 const vk::InstanceInterface& m_vki;
1057 const vk::DeviceInterface& m_vkd;
1058 const vk::VkPhysicalDevice m_physicalDevice;
1059 const vk::VkDevice m_device;
1060 const vk::VkQueue m_queue;
1061 const deUint32 m_queueFamilyIndex;
1062 const vector<pair<deUint32, vk::VkQueue> > m_queues;
1063 const vk::Unique<vk::VkCommandPool> m_commandPool;
1064 const vk::ProgramCollection<vk::ProgramBinary>& m_binaryCollection;
1065 vector<deUint32> m_queueFamilies;
1068 class PrepareContext
1071 PrepareContext (const Context& context,
1072 const Memory& memory)
1073 : m_context (context)
1078 const Memory& getMemory (void) const { return m_memory; }
1079 const Context& getContext (void) const { return m_context; }
1080 const vk::ProgramCollection<vk::ProgramBinary>& getBinaryCollection (void) const { return m_context.getBinaryCollection(); }
1082 void setBuffer (vk::Move<vk::VkBuffer> buffer,
1083 vk::VkDeviceSize size)
1085 DE_ASSERT(!m_currentImage);
1086 DE_ASSERT(!m_currentBuffer);
1088 m_currentBuffer = buffer;
1089 m_currentBufferSize = size;
1092 vk::VkBuffer getBuffer (void) const { return *m_currentBuffer; }
1093 vk::VkDeviceSize getBufferSize (void) const
1095 DE_ASSERT(m_currentBuffer);
1096 return m_currentBufferSize;
1099 void releaseBuffer (void) { m_currentBuffer.disown(); }
1101 void setImage (vk::Move<vk::VkImage> image,
1102 vk::VkImageLayout layout,
1103 vk::VkDeviceSize memorySize,
1107 DE_ASSERT(!m_currentImage);
1108 DE_ASSERT(!m_currentBuffer);
1110 m_currentImage = image;
1111 m_currentImageMemorySize = memorySize;
1112 m_currentImageLayout = layout;
1113 m_currentImageWidth = width;
1114 m_currentImageHeight = height;
1117 void setImageLayout (vk::VkImageLayout layout)
1119 DE_ASSERT(m_currentImage);
1120 m_currentImageLayout = layout;
1123 vk::VkImage getImage (void) const { return *m_currentImage; }
1124 deInt32 getImageWidth (void) const
1126 DE_ASSERT(m_currentImage);
1127 return m_currentImageWidth;
1129 deInt32 getImageHeight (void) const
1131 DE_ASSERT(m_currentImage);
1132 return m_currentImageHeight;
1134 vk::VkDeviceSize getImageMemorySize (void) const
1136 DE_ASSERT(m_currentImage);
1137 return m_currentImageMemorySize;
1140 void releaseImage (void) { m_currentImage.disown(); }
1142 vk::VkImageLayout getImageLayout (void) const
1144 DE_ASSERT(m_currentImage);
1145 return m_currentImageLayout;
1149 const Context& m_context;
1150 const Memory& m_memory;
1152 vk::Move<vk::VkBuffer> m_currentBuffer;
1153 vk::VkDeviceSize m_currentBufferSize;
1155 vk::Move<vk::VkImage> m_currentImage;
1156 vk::VkDeviceSize m_currentImageMemorySize;
1157 vk::VkImageLayout m_currentImageLayout;
1158 deInt32 m_currentImageWidth;
1159 deInt32 m_currentImageHeight;
1162 class ExecuteContext
1165 ExecuteContext (const Context& context)
1166 : m_context (context)
1170 const Context& getContext (void) const { return m_context; }
1171 void setMapping (void* ptr) { m_mapping = ptr; }
1172 void* getMapping (void) const { return m_mapping; }
1175 const Context& m_context;
1182 VerifyContext (TestLog& log,
1183 tcu::ResultCollector& resultCollector,
1184 const Context& context,
1185 vk::VkDeviceSize size)
1187 , m_resultCollector (resultCollector)
1188 , m_context (context)
1189 , m_reference ((size_t)size)
1193 const Context& getContext (void) const { return m_context; }
1194 TestLog& getLog (void) const { return m_log; }
1195 tcu::ResultCollector& getResultCollector (void) const { return m_resultCollector; }
1197 ReferenceMemory& getReference (void) { return m_reference; }
1198 TextureLevel& getReferenceImage (void) { return m_referenceImage;}
1202 tcu::ResultCollector& m_resultCollector;
1203 const Context& m_context;
1204 ReferenceMemory m_reference;
1205 TextureLevel m_referenceImage;
1211 // Constructor should allocate all non-vulkan resources.
1212 virtual ~Command (void) {}
1214 // Get name of the command
1215 virtual const char* getName (void) const = 0;
1217 // Log prepare operations
1218 virtual void logPrepare (TestLog&, size_t) const {}
1219 // Log executed operations
1220 virtual void logExecute (TestLog&, size_t) const {}
1222 // Prepare should allocate all vulkan resources and resources that require
1223 // that buffer or memory has been already allocated. This should build all
1224 // command buffers etc.
1225 virtual void prepare (PrepareContext&) {}
1227 // Execute command. Write or read mapped memory, submit commands to queue
1229 virtual void execute (ExecuteContext&) {}
1231 // Verify that results are correct.
1232 virtual void verify (VerifyContext&, size_t) {}
1235 // Allow only inheritance
1240 Command (const Command&);
1241 Command& operator& (const Command&);
1244 class Map : public Command
1249 const char* getName (void) const { return "Map"; }
1252 void logExecute (TestLog& log, size_t commandIndex) const
1254 log << TestLog::Message << commandIndex << ":" << getName() << " Map memory" << TestLog::EndMessage;
1257 void prepare (PrepareContext& context)
1259 m_memory = context.getMemory().getMemory();
1260 m_size = context.getMemory().getSize();
1263 void execute (ExecuteContext& context)
1265 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
1266 const vk::VkDevice device = context.getContext().getDevice();
1268 context.setMapping(mapMemory(vkd, device, m_memory, m_size));
1272 vk::VkDeviceMemory m_memory;
1273 vk::VkDeviceSize m_size;
1276 class UnMap : public Command
1281 const char* getName (void) const { return "UnMap"; }
1283 void logExecute (TestLog& log, size_t commandIndex) const
1285 log << TestLog::Message << commandIndex << ": Unmap memory" << TestLog::EndMessage;
1288 void prepare (PrepareContext& context)
1290 m_memory = context.getMemory().getMemory();
1293 void execute (ExecuteContext& context)
1295 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
1296 const vk::VkDevice device = context.getContext().getDevice();
1298 vkd.unmapMemory(device, m_memory);
1299 context.setMapping(DE_NULL);
1303 vk::VkDeviceMemory m_memory;
1306 class Invalidate : public Command
1309 Invalidate (void) {}
1310 ~Invalidate (void) {}
1311 const char* getName (void) const { return "Invalidate"; }
1313 void logExecute (TestLog& log, size_t commandIndex) const
1315 log << TestLog::Message << commandIndex << ": Invalidate mapped memory" << TestLog::EndMessage;
1318 void prepare (PrepareContext& context)
1320 m_memory = context.getMemory().getMemory();
1321 m_size = context.getMemory().getSize();
1324 void execute (ExecuteContext& context)
1326 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
1327 const vk::VkDevice device = context.getContext().getDevice();
1329 vk::invalidateMappedMemoryRange(vkd, device, m_memory, 0, m_size);
1333 vk::VkDeviceMemory m_memory;
1334 vk::VkDeviceSize m_size;
1337 class Flush : public Command
1342 const char* getName (void) const { return "Flush"; }
1344 void logExecute (TestLog& log, size_t commandIndex) const
1346 log << TestLog::Message << commandIndex << ": Flush mapped memory" << TestLog::EndMessage;
1349 void prepare (PrepareContext& context)
1351 m_memory = context.getMemory().getMemory();
1352 m_size = context.getMemory().getSize();
1355 void execute (ExecuteContext& context)
1357 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
1358 const vk::VkDevice device = context.getContext().getDevice();
1360 vk::flushMappedMemoryRange(vkd, device, m_memory, 0, m_size);
1364 vk::VkDeviceMemory m_memory;
1365 vk::VkDeviceSize m_size;
1368 // Host memory reads and writes
1369 class HostMemoryAccess : public Command
1372 HostMemoryAccess (bool read, bool write, deUint32 seed);
1373 ~HostMemoryAccess (void) {}
1374 const char* getName (void) const { return "HostMemoryAccess"; }
1376 void logExecute (TestLog& log, size_t commandIndex) const;
1377 void prepare (PrepareContext& context);
1378 void execute (ExecuteContext& context);
1379 void verify (VerifyContext& context, size_t commandIndex);
1384 const deUint32 m_seed;
1387 vector<deUint8> m_readData;
1390 HostMemoryAccess::HostMemoryAccess (bool read, bool write, deUint32 seed)
1397 void HostMemoryAccess::logExecute (TestLog& log, size_t commandIndex) const
1399 log << TestLog::Message << commandIndex << ": Host memory access:" << (m_read ? " read" : "") << (m_write ? " write" : "") << ", seed: " << m_seed << TestLog::EndMessage;
1402 void HostMemoryAccess::prepare (PrepareContext& context)
1404 m_size = (size_t)context.getMemory().getSize();
1407 m_readData.resize(m_size, 0);
1410 void HostMemoryAccess::execute (ExecuteContext& context)
1412 de::Random rng (m_seed);
1413 deUint8* const ptr = (deUint8*)context.getMapping();
1415 if (m_read && m_write)
1417 for (size_t pos = 0; pos < m_size; pos++)
1419 const deUint8 mask = rng.getUint8();
1420 const deUint8 value = ptr[pos];
1422 m_readData[pos] = value;
1423 ptr[pos] = value ^ mask;
1428 for (size_t pos = 0; pos < m_size; pos++)
1430 const deUint8 value = ptr[pos];
1432 m_readData[pos] = value;
1437 for (size_t pos = 0; pos < m_size; pos++)
1439 const deUint8 value = rng.getUint8();
1445 DE_FATAL("Host memory access without read or write.");
1448 void HostMemoryAccess::verify (VerifyContext& context, size_t commandIndex)
1450 tcu::ResultCollector& resultCollector = context.getResultCollector();
1451 ReferenceMemory& reference = context.getReference();
1452 de::Random rng (m_seed);
1454 if (m_read && m_write)
1456 for (size_t pos = 0; pos < m_size; pos++)
1458 const deUint8 mask = rng.getUint8();
1459 const deUint8 value = m_readData[pos];
1461 if (reference.isDefined(pos))
1463 if (value != reference.get(pos))
1465 resultCollector.fail(
1466 de::toString(commandIndex) + ":" + getName()
1467 + " Result differs from reference, Expected: "
1468 + de::toString(tcu::toHex<8>(reference.get(pos)))
1470 + de::toString(tcu::toHex<8>(value))
1472 + de::toString(pos));
1476 reference.set(pos, reference.get(pos) ^ mask);
1482 for (size_t pos = 0; pos < m_size; pos++)
1484 const deUint8 value = m_readData[pos];
1486 if (reference.isDefined(pos))
1488 if (value != reference.get(pos))
1490 resultCollector.fail(
1491 de::toString(commandIndex) + ":" + getName()
1492 + " Result differs from reference, Expected: "
1493 + de::toString(tcu::toHex<8>(reference.get(pos)))
1495 + de::toString(tcu::toHex<8>(value))
1497 + de::toString(pos));
1505 for (size_t pos = 0; pos < m_size; pos++)
1507 const deUint8 value = rng.getUint8();
1509 reference.set(pos, value);
1513 DE_FATAL("Host memory access without read or write.");
1516 class CreateBuffer : public Command
1519 CreateBuffer (vk::VkBufferUsageFlags usage,
1520 vk::VkSharingMode sharing);
1521 ~CreateBuffer (void) {}
1522 const char* getName (void) const { return "CreateBuffer"; }
1524 void logPrepare (TestLog& log, size_t commandIndex) const;
1525 void prepare (PrepareContext& context);
1528 const vk::VkBufferUsageFlags m_usage;
1529 const vk::VkSharingMode m_sharing;
1532 CreateBuffer::CreateBuffer (vk::VkBufferUsageFlags usage,
1533 vk::VkSharingMode sharing)
1535 , m_sharing (sharing)
1539 void CreateBuffer::logPrepare (TestLog& log, size_t commandIndex) const
1541 log << TestLog::Message << commandIndex << ":" << getName() << " Create buffer, Sharing mode: " << m_sharing << ", Usage: " << vk::getBufferUsageFlagsStr(m_usage) << TestLog::EndMessage;
1544 void CreateBuffer::prepare (PrepareContext& context)
1546 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
1547 const vk::VkDevice device = context.getContext().getDevice();
1548 const vk::VkDeviceSize bufferSize = context.getMemory().getMaxBufferSize();
1549 const vector<deUint32>& queueFamilies = context.getContext().getQueueFamilies();
1551 context.setBuffer(createBuffer(vkd, device, bufferSize, m_usage, m_sharing, queueFamilies), bufferSize);
1554 class DestroyBuffer : public Command
1557 DestroyBuffer (void);
1558 ~DestroyBuffer (void) {}
1559 const char* getName (void) const { return "DestroyBuffer"; }
1561 void logExecute (TestLog& log, size_t commandIndex) const;
1562 void prepare (PrepareContext& context);
1563 void execute (ExecuteContext& context);
1566 vk::Move<vk::VkBuffer> m_buffer;
1569 DestroyBuffer::DestroyBuffer (void)
1573 void DestroyBuffer::prepare (PrepareContext& context)
1575 m_buffer = vk::Move<vk::VkBuffer>(vk::check(context.getBuffer()), vk::Deleter<vk::VkBuffer>(context.getContext().getDeviceInterface(), context.getContext().getDevice(), DE_NULL));
1576 context.releaseBuffer();
1579 void DestroyBuffer::logExecute (TestLog& log, size_t commandIndex) const
1581 log << TestLog::Message << commandIndex << ":" << getName() << " Destroy buffer" << TestLog::EndMessage;
1584 void DestroyBuffer::execute (ExecuteContext& context)
1586 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
1587 const vk::VkDevice device = context.getContext().getDevice();
1589 vkd.destroyBuffer(device, m_buffer.disown(), DE_NULL);
1592 class BindBufferMemory : public Command
1595 BindBufferMemory (void) {}
1596 ~BindBufferMemory (void) {}
1597 const char* getName (void) const { return "BindBufferMemory"; }
1599 void logPrepare (TestLog& log, size_t commandIndex) const;
1600 void prepare (PrepareContext& context);
1603 void BindBufferMemory::logPrepare (TestLog& log, size_t commandIndex) const
1605 log << TestLog::Message << commandIndex << ":" << getName() << " Bind memory to buffer" << TestLog::EndMessage;
1608 void BindBufferMemory::prepare (PrepareContext& context)
1610 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
1611 const vk::VkDevice device = context.getContext().getDevice();
1613 VK_CHECK(vkd.bindBufferMemory(device, context.getBuffer(), context.getMemory().getMemory(), 0));
1616 class CreateImage : public Command
1619 CreateImage (vk::VkImageUsageFlags usage,
1620 vk::VkSharingMode sharing);
1621 ~CreateImage (void) {}
1622 const char* getName (void) const { return "CreateImage"; }
1624 void logPrepare (TestLog& log, size_t commandIndex) const;
1625 void prepare (PrepareContext& context);
1626 void verify (VerifyContext& context, size_t commandIndex);
1629 const vk::VkImageUsageFlags m_usage;
1630 const vk::VkSharingMode m_sharing;
1631 deInt32 m_imageWidth;
1632 deInt32 m_imageHeight;
1635 CreateImage::CreateImage (vk::VkImageUsageFlags usage,
1636 vk::VkSharingMode sharing)
1638 , m_sharing (sharing)
1642 void CreateImage::logPrepare (TestLog& log, size_t commandIndex) const
1644 log << TestLog::Message << commandIndex << ":" << getName() << " Create image, sharing: " << m_sharing << ", usage: " << vk::getImageUsageFlagsStr(m_usage) << TestLog::EndMessage;
1647 void CreateImage::prepare (PrepareContext& context)
1649 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
1650 const vk::VkDevice device = context.getContext().getDevice();
1651 const vector<deUint32>& queueFamilies = context.getContext().getQueueFamilies();
1653 m_imageWidth = context.getMemory().getMaxImageWidth();
1654 m_imageHeight = context.getMemory().getMaxImageHeight();
1657 const vk::VkImageCreateInfo createInfo =
1659 vk::VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
1663 vk::VK_IMAGE_TYPE_2D,
1664 vk::VK_FORMAT_R8G8B8A8_UNORM,
1666 (deUint32)m_imageWidth,
1667 (deUint32)m_imageHeight,
1671 vk::VK_SAMPLE_COUNT_1_BIT,
1672 vk::VK_IMAGE_TILING_OPTIMAL,
1675 (deUint32)queueFamilies.size(),
1677 vk::VK_IMAGE_LAYOUT_UNDEFINED
1679 vk::Move<vk::VkImage> image (createImage(vkd, device, &createInfo));
1680 const vk::VkMemoryRequirements requirements = vk::getImageMemoryRequirements(vkd, device, *image);
1682 context.setImage(image, vk::VK_IMAGE_LAYOUT_UNDEFINED, requirements.size, m_imageWidth, m_imageHeight);
1686 void CreateImage::verify (VerifyContext& context, size_t)
1688 context.getReferenceImage() = TextureLevel(TextureFormat(TextureFormat::RGBA, TextureFormat::UNORM_INT8), m_imageWidth, m_imageHeight);
1691 class DestroyImage : public Command
1694 DestroyImage (void);
1695 ~DestroyImage (void) {}
1696 const char* getName (void) const { return "DestroyImage"; }
1698 void logExecute (TestLog& log, size_t commandIndex) const;
1699 void prepare (PrepareContext& context);
1700 void execute (ExecuteContext& context);
1703 vk::Move<vk::VkImage> m_image;
1706 DestroyImage::DestroyImage (void)
1710 void DestroyImage::prepare (PrepareContext& context)
1712 m_image = vk::Move<vk::VkImage>(vk::check(context.getImage()), vk::Deleter<vk::VkImage>(context.getContext().getDeviceInterface(), context.getContext().getDevice(), DE_NULL));
1713 context.releaseImage();
1717 void DestroyImage::logExecute (TestLog& log, size_t commandIndex) const
1719 log << TestLog::Message << commandIndex << ":" << getName() << " Destroy image" << TestLog::EndMessage;
1722 void DestroyImage::execute (ExecuteContext& context)
1724 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
1725 const vk::VkDevice device = context.getContext().getDevice();
1727 vkd.destroyImage(device, m_image.disown(), DE_NULL);
1730 class BindImageMemory : public Command
1733 BindImageMemory (void) {}
1734 ~BindImageMemory (void) {}
1735 const char* getName (void) const { return "BindImageMemory"; }
1737 void logPrepare (TestLog& log, size_t commandIndex) const;
1738 void prepare (PrepareContext& context);
1741 void BindImageMemory::logPrepare (TestLog& log, size_t commandIndex) const
1743 log << TestLog::Message << commandIndex << ":" << getName() << " Bind memory to image" << TestLog::EndMessage;
1746 void BindImageMemory::prepare (PrepareContext& context)
1748 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
1749 const vk::VkDevice device = context.getContext().getDevice();
1751 VK_CHECK(vkd.bindImageMemory(device, context.getImage(), context.getMemory().getMemory(), 0));
1754 class QueueWaitIdle : public Command
1757 QueueWaitIdle (void) {}
1758 ~QueueWaitIdle (void) {}
1759 const char* getName (void) const { return "QueuetWaitIdle"; }
1761 void logExecute (TestLog& log, size_t commandIndex) const;
1762 void execute (ExecuteContext& context);
1765 void QueueWaitIdle::logExecute (TestLog& log, size_t commandIndex) const
1767 log << TestLog::Message << commandIndex << ":" << getName() << " Queue wait idle" << TestLog::EndMessage;
1770 void QueueWaitIdle::execute (ExecuteContext& context)
1772 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
1773 const vk::VkQueue queue = context.getContext().getQueue();
1775 VK_CHECK(vkd.queueWaitIdle(queue));
1778 class DeviceWaitIdle : public Command
1781 DeviceWaitIdle (void) {}
1782 ~DeviceWaitIdle (void) {}
1783 const char* getName (void) const { return "DeviceWaitIdle"; }
1785 void logExecute (TestLog& log, size_t commandIndex) const;
1786 void execute (ExecuteContext& context);
1789 void DeviceWaitIdle::logExecute (TestLog& log, size_t commandIndex) const
1791 log << TestLog::Message << commandIndex << ":" << getName() << " Device wait idle" << TestLog::EndMessage;
1794 void DeviceWaitIdle::execute (ExecuteContext& context)
1796 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
1797 const vk::VkDevice device = context.getContext().getDevice();
1799 VK_CHECK(vkd.deviceWaitIdle(device));
1805 SubmitContext (const PrepareContext& context,
1806 const vk::VkCommandBuffer commandBuffer)
1807 : m_context (context)
1808 , m_commandBuffer (commandBuffer)
1812 const Memory& getMemory (void) const { return m_context.getMemory(); }
1813 const Context& getContext (void) const { return m_context.getContext(); }
1814 vk::VkCommandBuffer getCommandBuffer (void) const { return m_commandBuffer; }
1816 vk::VkBuffer getBuffer (void) const { return m_context.getBuffer(); }
1817 vk::VkDeviceSize getBufferSize (void) const { return m_context.getBufferSize(); }
1819 vk::VkImage getImage (void) const { return m_context.getImage(); }
1820 deInt32 getImageWidth (void) const { return m_context.getImageWidth(); }
1821 deInt32 getImageHeight (void) const { return m_context.getImageHeight(); }
1824 const PrepareContext& m_context;
1825 const vk::VkCommandBuffer m_commandBuffer;
1831 virtual ~CmdCommand (void) {}
1832 virtual const char* getName (void) const = 0;
1834 // Log things that are done during prepare
1835 virtual void logPrepare (TestLog&, size_t) const {}
1836 // Log submitted calls etc.
1837 virtual void logSubmit (TestLog&, size_t) const {}
1839 // Allocate vulkan resources and prepare for submit.
1840 virtual void prepare (PrepareContext&) {}
1842 // Submit commands to command buffer.
1843 virtual void submit (SubmitContext&) {}
1846 virtual void verify (VerifyContext&, size_t) {}
1849 class SubmitCommandBuffer : public Command
1852 SubmitCommandBuffer (const vector<CmdCommand*>& commands);
1853 ~SubmitCommandBuffer (void);
1855 const char* getName (void) const { return "SubmitCommandBuffer"; }
1856 void logExecute (TestLog& log, size_t commandIndex) const;
1857 void logPrepare (TestLog& log, size_t commandIndex) const;
1859 // Allocate command buffer and submit commands to command buffer
1860 void prepare (PrepareContext& context);
1861 void execute (ExecuteContext& context);
1863 // Verify that results are correct.
1864 void verify (VerifyContext& context, size_t commandIndex);
1867 vector<CmdCommand*> m_commands;
1868 vk::Move<vk::VkCommandBuffer> m_commandBuffer;
1871 SubmitCommandBuffer::SubmitCommandBuffer (const vector<CmdCommand*>& commands)
1872 : m_commands (commands)
1876 SubmitCommandBuffer::~SubmitCommandBuffer (void)
1878 for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
1879 delete m_commands[cmdNdx];
1882 void SubmitCommandBuffer::prepare (PrepareContext& context)
1884 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
1885 const vk::VkDevice device = context.getContext().getDevice();
1886 const vk::VkCommandPool commandPool = context.getContext().getCommandPool();
1888 m_commandBuffer = createBeginCommandBuffer(vkd, device, commandPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY);
1890 for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
1892 CmdCommand& command = *m_commands[cmdNdx];
1894 command.prepare(context);
1898 SubmitContext submitContext (context, *m_commandBuffer);
1900 for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
1902 CmdCommand& command = *m_commands[cmdNdx];
1904 command.submit(submitContext);
1907 VK_CHECK(vkd.endCommandBuffer(*m_commandBuffer));
1911 void SubmitCommandBuffer::execute (ExecuteContext& context)
1913 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
1914 const vk::VkCommandBuffer cmd = *m_commandBuffer;
1915 const vk::VkQueue queue = context.getContext().getQueue();
1916 const vk::VkSubmitInfo submit =
1918 vk::VK_STRUCTURE_TYPE_SUBMIT_INFO,
1923 (const vk::VkPipelineStageFlags*)DE_NULL,
1932 vkd.queueSubmit(queue, 1, &submit, 0);
1935 void SubmitCommandBuffer::verify (VerifyContext& context, size_t commandIndex)
1937 const string sectionName (de::toString(commandIndex) + ":" + getName());
1938 const tcu::ScopedLogSection section (context.getLog(), sectionName, sectionName);
1940 for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
1941 m_commands[cmdNdx]->verify(context, cmdNdx);
1944 void SubmitCommandBuffer::logPrepare (TestLog& log, size_t commandIndex) const
1946 const string sectionName (de::toString(commandIndex) + ":" + getName());
1947 const tcu::ScopedLogSection section (log, sectionName, sectionName);
1949 for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
1950 m_commands[cmdNdx]->logPrepare(log, cmdNdx);
1953 void SubmitCommandBuffer::logExecute (TestLog& log, size_t commandIndex) const
1955 const string sectionName (de::toString(commandIndex) + ":" + getName());
1956 const tcu::ScopedLogSection section (log, sectionName, sectionName);
1958 for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
1959 m_commands[cmdNdx]->logSubmit(log, cmdNdx);
1962 class PipelineBarrier : public CmdCommand
1972 PipelineBarrier (const vk::VkPipelineStageFlags srcStages,
1973 const vk::VkAccessFlags srcAccesses,
1974 const vk::VkPipelineStageFlags dstStages,
1975 const vk::VkAccessFlags dstAccesses,
1977 const tcu::Maybe<vk::VkImageLayout> imageLayout);
1978 ~PipelineBarrier (void) {}
1979 const char* getName (void) const { return "PipelineBarrier"; }
1981 void logSubmit (TestLog& log, size_t commandIndex) const;
1982 void submit (SubmitContext& context);
1985 const vk::VkPipelineStageFlags m_srcStages;
1986 const vk::VkAccessFlags m_srcAccesses;
1987 const vk::VkPipelineStageFlags m_dstStages;
1988 const vk::VkAccessFlags m_dstAccesses;
1990 const tcu::Maybe<vk::VkImageLayout> m_imageLayout;
1993 PipelineBarrier::PipelineBarrier (const vk::VkPipelineStageFlags srcStages,
1994 const vk::VkAccessFlags srcAccesses,
1995 const vk::VkPipelineStageFlags dstStages,
1996 const vk::VkAccessFlags dstAccesses,
1998 const tcu::Maybe<vk::VkImageLayout> imageLayout)
1999 : m_srcStages (srcStages)
2000 , m_srcAccesses (srcAccesses)
2001 , m_dstStages (dstStages)
2002 , m_dstAccesses (dstAccesses)
2004 , m_imageLayout (imageLayout)
2008 void PipelineBarrier::logSubmit (TestLog& log, size_t commandIndex) const
2010 log << TestLog::Message << commandIndex << ":" << getName()
2011 << " " << (m_type == TYPE_GLOBAL ? "Global pipeline barrier"
2012 : m_type == TYPE_BUFFER ? "Buffer pipeline barrier"
2013 : "Image pipeline barrier")
2014 << ", srcStages: " << vk::getPipelineStageFlagsStr(m_srcStages) << ", srcAccesses: " << vk::getAccessFlagsStr(m_srcAccesses)
2015 << ", dstStages: " << vk::getPipelineStageFlagsStr(m_dstStages) << ", dstAccesses: " << vk::getAccessFlagsStr(m_dstAccesses) << TestLog::EndMessage;
2018 void PipelineBarrier::submit (SubmitContext& context)
2020 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
2021 const vk::VkCommandBuffer cmd = context.getCommandBuffer();
2027 const vk::VkMemoryBarrier barrier =
2029 vk::VK_STRUCTURE_TYPE_MEMORY_BARRIER,
2036 vkd.cmdPipelineBarrier(cmd, m_srcStages, m_dstStages, (vk::VkDependencyFlags)0, 1, &barrier, 0, (const vk::VkBufferMemoryBarrier*)DE_NULL, 0, (const vk::VkImageMemoryBarrier*)DE_NULL);
2042 const vk::VkBufferMemoryBarrier barrier =
2044 vk::VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER,
2050 VK_QUEUE_FAMILY_IGNORED,
2051 VK_QUEUE_FAMILY_IGNORED,
2053 context.getBuffer(),
2058 vkd.cmdPipelineBarrier(cmd, m_srcStages, m_dstStages, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 1, &barrier, 0, (const vk::VkImageMemoryBarrier*)DE_NULL);
2064 const vk::VkImageMemoryBarrier barrier =
2066 vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
2075 VK_QUEUE_FAMILY_IGNORED,
2076 VK_QUEUE_FAMILY_IGNORED,
2080 vk::VK_IMAGE_ASPECT_COLOR_BIT,
2086 vkd.cmdPipelineBarrier(cmd, m_srcStages, m_dstStages, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 0, (const vk::VkBufferMemoryBarrier*)DE_NULL, 1, &barrier);
2091 DE_FATAL("Unknown pipeline barrier type");
2095 class ImageTransition : public CmdCommand
2098 ImageTransition (vk::VkPipelineStageFlags srcStages,
2099 vk::VkAccessFlags srcAccesses,
2101 vk::VkPipelineStageFlags dstStages,
2102 vk::VkAccessFlags dstAccesses,
2104 vk::VkImageLayout srcLayout,
2105 vk::VkImageLayout dstLayout);
2107 ~ImageTransition (void) {}
2108 const char* getName (void) const { return "ImageTransition"; }
2110 void prepare (PrepareContext& context);
2111 void logSubmit (TestLog& log, size_t commandIndex) const;
2112 void submit (SubmitContext& context);
2113 void verify (VerifyContext& context, size_t);
2116 const vk::VkPipelineStageFlags m_srcStages;
2117 const vk::VkAccessFlags m_srcAccesses;
2118 const vk::VkPipelineStageFlags m_dstStages;
2119 const vk::VkAccessFlags m_dstAccesses;
2120 const vk::VkImageLayout m_srcLayout;
2121 const vk::VkImageLayout m_dstLayout;
2123 vk::VkDeviceSize m_imageMemorySize;
2126 ImageTransition::ImageTransition (vk::VkPipelineStageFlags srcStages,
2127 vk::VkAccessFlags srcAccesses,
2129 vk::VkPipelineStageFlags dstStages,
2130 vk::VkAccessFlags dstAccesses,
2132 vk::VkImageLayout srcLayout,
2133 vk::VkImageLayout dstLayout)
2134 : m_srcStages (srcStages)
2135 , m_srcAccesses (srcAccesses)
2136 , m_dstStages (dstStages)
2137 , m_dstAccesses (dstAccesses)
2138 , m_srcLayout (srcLayout)
2139 , m_dstLayout (dstLayout)
2143 void ImageTransition::logSubmit (TestLog& log, size_t commandIndex) const
2145 log << TestLog::Message << commandIndex << ":" << getName()
2146 << " Image transition pipeline barrier"
2147 << ", srcStages: " << vk::getPipelineStageFlagsStr(m_srcStages) << ", srcAccesses: " << vk::getAccessFlagsStr(m_srcAccesses)
2148 << ", dstStages: " << vk::getPipelineStageFlagsStr(m_dstStages) << ", dstAccesses: " << vk::getAccessFlagsStr(m_dstAccesses)
2149 << ", srcLayout: " << m_srcLayout << ", dstLayout: " << m_dstLayout << TestLog::EndMessage;
2152 void ImageTransition::prepare (PrepareContext& context)
2154 DE_ASSERT(context.getImageLayout() == vk::VK_IMAGE_LAYOUT_UNDEFINED || m_srcLayout == vk::VK_IMAGE_LAYOUT_UNDEFINED || context.getImageLayout() == m_srcLayout);
2156 context.setImageLayout(m_dstLayout);
2157 m_imageMemorySize = context.getImageMemorySize();
2160 void ImageTransition::submit (SubmitContext& context)
2162 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
2163 const vk::VkCommandBuffer cmd = context.getCommandBuffer();
2164 const vk::VkImageMemoryBarrier barrier =
2166 vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
2175 VK_QUEUE_FAMILY_IGNORED,
2176 VK_QUEUE_FAMILY_IGNORED,
2180 vk::VK_IMAGE_ASPECT_COLOR_BIT,
2186 vkd.cmdPipelineBarrier(cmd, m_srcStages, m_dstStages, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 0, (const vk::VkBufferMemoryBarrier*)DE_NULL, 1, &barrier);
2189 void ImageTransition::verify (VerifyContext& context, size_t)
2191 context.getReference().setUndefined(0, (size_t)m_imageMemorySize);
2194 class FillBuffer : public CmdCommand
2197 FillBuffer (deUint32 value) : m_value(value) {}
2198 ~FillBuffer (void) {}
2199 const char* getName (void) const { return "FillBuffer"; }
2201 void logSubmit (TestLog& log, size_t commandIndex) const;
2202 void submit (SubmitContext& context);
2203 void verify (VerifyContext& context, size_t commandIndex);
2206 const deUint32 m_value;
2207 vk::VkDeviceSize m_bufferSize;
2210 void FillBuffer::logSubmit (TestLog& log, size_t commandIndex) const
2212 log << TestLog::Message << commandIndex << ":" << getName() << " Fill value: " << m_value << TestLog::EndMessage;
2215 void FillBuffer::submit (SubmitContext& context)
2217 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
2218 const vk::VkCommandBuffer cmd = context.getCommandBuffer();
2219 const vk::VkBuffer buffer = context.getBuffer();
2220 const vk::VkDeviceSize sizeMask = ~(0x3ull); // \note Round down to multiple of 4
2222 m_bufferSize = sizeMask & context.getBufferSize();
2223 vkd.cmdFillBuffer(cmd, buffer, 0, m_bufferSize, m_value);
2226 void FillBuffer::verify (VerifyContext& context, size_t)
2228 ReferenceMemory& reference = context.getReference();
2230 for (size_t ndx = 0; ndx < m_bufferSize; ndx++)
2232 #if (DE_ENDIANNESS == DE_LITTLE_ENDIAN)
2233 reference.set(ndx, (deUint8)(0xffu & (m_value >> (8*(ndx % 4)))));
2235 reference.set(ndx, (deUint8)(0xffu & (m_value >> (8*(3 - (ndx % 4))))));
2240 class UpdateBuffer : public CmdCommand
2243 UpdateBuffer (deUint32 seed) : m_seed(seed) {}
2244 ~UpdateBuffer (void) {}
2245 const char* getName (void) const { return "UpdateBuffer"; }
2247 void logSubmit (TestLog& log, size_t commandIndex) const;
2248 void submit (SubmitContext& context);
2249 void verify (VerifyContext& context, size_t commandIndex);
2252 const deUint32 m_seed;
2253 vk::VkDeviceSize m_bufferSize;
2256 void UpdateBuffer::logSubmit (TestLog& log, size_t commandIndex) const
2258 log << TestLog::Message << commandIndex << ":" << getName() << " Update buffer, seed: " << m_seed << TestLog::EndMessage;
2261 void UpdateBuffer::submit (SubmitContext& context)
2263 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
2264 const vk::VkCommandBuffer cmd = context.getCommandBuffer();
2265 const vk::VkBuffer buffer = context.getBuffer();
2266 const size_t blockSize = 65536;
2267 std::vector<deUint8> data (blockSize, 0);
2268 de::Random rng (m_seed);
2270 m_bufferSize = context.getBufferSize();
2272 for (size_t updated = 0; updated < m_bufferSize; updated += blockSize)
2274 for (size_t ndx = 0; ndx < data.size(); ndx++)
2275 data[ndx] = rng.getUint8();
2277 if (m_bufferSize - updated > blockSize)
2278 vkd.cmdUpdateBuffer(cmd, buffer, updated, blockSize, (const deUint32*)(&data[0]));
2280 vkd.cmdUpdateBuffer(cmd, buffer, updated, m_bufferSize - updated, (const deUint32*)(&data[0]));
2284 void UpdateBuffer::verify (VerifyContext& context, size_t)
2286 ReferenceMemory& reference = context.getReference();
2287 const size_t blockSize = 65536;
2288 vector<deUint8> data (blockSize, 0);
2289 de::Random rng (m_seed);
2291 for (size_t updated = 0; updated < m_bufferSize; updated += blockSize)
2293 for (size_t ndx = 0; ndx < data.size(); ndx++)
2294 data[ndx] = rng.getUint8();
2296 if (m_bufferSize - updated > blockSize)
2297 reference.setData(updated, blockSize, &data[0]);
2299 reference.setData(updated, (size_t)(m_bufferSize - updated), &data[0]);
2303 class BufferCopyToBuffer : public CmdCommand
2306 BufferCopyToBuffer (void) {}
2307 ~BufferCopyToBuffer (void) {}
2308 const char* getName (void) const { return "BufferCopyToBuffer"; }
2310 void logPrepare (TestLog& log, size_t commandIndex) const;
2311 void prepare (PrepareContext& context);
2312 void logSubmit (TestLog& log, size_t commandIndex) const;
2313 void submit (SubmitContext& context);
2314 void verify (VerifyContext& context, size_t commandIndex);
2317 vk::VkDeviceSize m_bufferSize;
2318 vk::Move<vk::VkBuffer> m_dstBuffer;
2319 vk::Move<vk::VkDeviceMemory> m_memory;
2322 void BufferCopyToBuffer::logPrepare (TestLog& log, size_t commandIndex) const
2324 log << TestLog::Message << commandIndex << ":" << getName() << " Allocate destination buffer for buffer to buffer copy." << TestLog::EndMessage;
2327 void BufferCopyToBuffer::prepare (PrepareContext& context)
2329 const vk::InstanceInterface& vki = context.getContext().getInstanceInterface();
2330 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
2331 const vk::VkPhysicalDevice physicalDevice = context.getContext().getPhysicalDevice();
2332 const vk::VkDevice device = context.getContext().getDevice();
2333 const vector<deUint32>& queueFamilies = context.getContext().getQueueFamilies();
2335 m_bufferSize = context.getBufferSize();
2337 m_dstBuffer = createBuffer(vkd, device, m_bufferSize, vk::VK_BUFFER_USAGE_TRANSFER_DST_BIT, vk::VK_SHARING_MODE_EXCLUSIVE, queueFamilies);
2338 m_memory = bindBufferMemory(vki, vkd, physicalDevice, device, *m_dstBuffer, vk::VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT);
2341 void BufferCopyToBuffer::logSubmit (TestLog& log, size_t commandIndex) const
2343 log << TestLog::Message << commandIndex << ":" << getName() << " Copy buffer to another buffer" << TestLog::EndMessage;
2346 void BufferCopyToBuffer::submit (SubmitContext& context)
2348 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
2349 const vk::VkCommandBuffer commandBuffer = context.getCommandBuffer();
2350 const vk::VkBufferCopy range =
2356 vkd.cmdCopyBuffer(commandBuffer, context.getBuffer(), *m_dstBuffer, 1, &range);
2359 void BufferCopyToBuffer::verify (VerifyContext& context, size_t commandIndex)
2361 tcu::ResultCollector& resultCollector (context.getResultCollector());
2362 ReferenceMemory& reference (context.getReference());
2363 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
2364 const vk::VkDevice device = context.getContext().getDevice();
2365 const vk::VkQueue queue = context.getContext().getQueue();
2366 const vk::VkCommandPool commandPool = context.getContext().getCommandPool();
2367 const vk::Unique<vk::VkCommandBuffer> commandBuffer (createBeginCommandBuffer(vkd, device, commandPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY));
2368 const vk::VkBufferMemoryBarrier barrier =
2370 vk::VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER,
2373 vk::VK_ACCESS_TRANSFER_WRITE_BIT,
2374 vk::VK_ACCESS_HOST_READ_BIT,
2376 VK_QUEUE_FAMILY_IGNORED,
2377 VK_QUEUE_FAMILY_IGNORED,
2383 vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_PIPELINE_STAGE_HOST_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 1, &barrier, 0, (const vk::VkImageMemoryBarrier*)DE_NULL);
2385 VK_CHECK(vkd.endCommandBuffer(*commandBuffer));
2386 queueRun(vkd, queue, *commandBuffer);
2389 void* const ptr = mapMemory(vkd, device, *m_memory, m_bufferSize);
2392 vk::invalidateMappedMemoryRange(vkd, device, *m_memory, 0, m_bufferSize);
2395 const deUint8* const data = (const deUint8*)ptr;
2397 for (size_t pos = 0; pos < (size_t)m_bufferSize; pos++)
2399 if (reference.isDefined(pos))
2401 if (data[pos] != reference.get(pos))
2403 resultCollector.fail(
2404 de::toString(commandIndex) + ":" + getName()
2405 + " Result differs from reference, Expected: "
2406 + de::toString(tcu::toHex<8>(reference.get(pos)))
2408 + de::toString(tcu::toHex<8>(data[pos]))
2410 + de::toString(pos));
2417 vkd.unmapMemory(device, *m_memory);
2420 context.getLog() << TestLog::Message << commandIndex << ": Buffer copy to buffer verification failed" << TestLog::EndMessage;
2424 class BufferCopyFromBuffer : public CmdCommand
2427 BufferCopyFromBuffer (deUint32 seed) : m_seed(seed) {}
2428 ~BufferCopyFromBuffer (void) {}
2429 const char* getName (void) const { return "BufferCopyFromBuffer"; }
2431 void logPrepare (TestLog& log, size_t commandIndex) const;
2432 void prepare (PrepareContext& context);
2433 void logSubmit (TestLog& log, size_t commandIndex) const;
2434 void submit (SubmitContext& context);
2435 void verify (VerifyContext& context, size_t commandIndex);
2438 const deUint32 m_seed;
2439 vk::VkDeviceSize m_bufferSize;
2440 vk::Move<vk::VkBuffer> m_srcBuffer;
2441 vk::Move<vk::VkDeviceMemory> m_memory;
2444 void BufferCopyFromBuffer::logPrepare (TestLog& log, size_t commandIndex) const
2446 log << TestLog::Message << commandIndex << ":" << getName() << " Allocate source buffer for buffer to buffer copy. Seed: " << m_seed << TestLog::EndMessage;
2449 void BufferCopyFromBuffer::prepare (PrepareContext& context)
2451 const vk::InstanceInterface& vki = context.getContext().getInstanceInterface();
2452 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
2453 const vk::VkPhysicalDevice physicalDevice = context.getContext().getPhysicalDevice();
2454 const vk::VkDevice device = context.getContext().getDevice();
2455 const vector<deUint32>& queueFamilies = context.getContext().getQueueFamilies();
2457 m_bufferSize = context.getBufferSize();
2458 m_srcBuffer = createBuffer(vkd, device, m_bufferSize, vk::VK_BUFFER_USAGE_TRANSFER_SRC_BIT, vk::VK_SHARING_MODE_EXCLUSIVE, queueFamilies);
2459 m_memory = bindBufferMemory(vki, vkd, physicalDevice, device, *m_srcBuffer, vk::VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT);
2462 void* const ptr = mapMemory(vkd, device, *m_memory, m_bufferSize);
2463 de::Random rng (m_seed);
2466 deUint8* const data = (deUint8*)ptr;
2468 for (size_t ndx = 0; ndx < (size_t)m_bufferSize; ndx++)
2469 data[ndx] = rng.getUint8();
2472 vk::flushMappedMemoryRange(vkd, device, *m_memory, 0, m_bufferSize);
2473 vkd.unmapMemory(device, *m_memory);
2477 void BufferCopyFromBuffer::logSubmit (TestLog& log, size_t commandIndex) const
2479 log << TestLog::Message << commandIndex << ":" << getName() << " Copy buffer data from another buffer" << TestLog::EndMessage;
2482 void BufferCopyFromBuffer::submit (SubmitContext& context)
2484 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
2485 const vk::VkCommandBuffer commandBuffer = context.getCommandBuffer();
2486 const vk::VkBufferCopy range =
2492 vkd.cmdCopyBuffer(commandBuffer, *m_srcBuffer, context.getBuffer(), 1, &range);
2495 void BufferCopyFromBuffer::verify (VerifyContext& context, size_t)
2497 ReferenceMemory& reference (context.getReference());
2498 de::Random rng (m_seed);
2500 for (size_t ndx = 0; ndx < (size_t)m_bufferSize; ndx++)
2501 reference.set(ndx, rng.getUint8());
2504 class BufferCopyToImage : public CmdCommand
2507 BufferCopyToImage (void) {}
2508 ~BufferCopyToImage (void) {}
2509 const char* getName (void) const { return "BufferCopyToImage"; }
2511 void logPrepare (TestLog& log, size_t commandIndex) const;
2512 void prepare (PrepareContext& context);
2513 void logSubmit (TestLog& log, size_t commandIndex) const;
2514 void submit (SubmitContext& context);
2515 void verify (VerifyContext& context, size_t commandIndex);
2518 deInt32 m_imageWidth;
2519 deInt32 m_imageHeight;
2520 vk::Move<vk::VkImage> m_dstImage;
2521 vk::Move<vk::VkDeviceMemory> m_memory;
2524 void BufferCopyToImage::logPrepare (TestLog& log, size_t commandIndex) const
2526 log << TestLog::Message << commandIndex << ":" << getName() << " Allocate destination image for buffer to image copy." << TestLog::EndMessage;
2529 void BufferCopyToImage::prepare (PrepareContext& context)
2531 const vk::InstanceInterface& vki = context.getContext().getInstanceInterface();
2532 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
2533 const vk::VkPhysicalDevice physicalDevice = context.getContext().getPhysicalDevice();
2534 const vk::VkDevice device = context.getContext().getDevice();
2535 const vk::VkQueue queue = context.getContext().getQueue();
2536 const vk::VkCommandPool commandPool = context.getContext().getCommandPool();
2537 const vector<deUint32>& queueFamilies = context.getContext().getQueueFamilies();
2538 const IVec2 imageSize = findImageSizeWxHx4(context.getBufferSize());
2540 m_imageWidth = imageSize[0];
2541 m_imageHeight = imageSize[1];
2544 const vk::VkImageCreateInfo createInfo =
2546 vk::VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
2550 vk::VK_IMAGE_TYPE_2D,
2551 vk::VK_FORMAT_R8G8B8A8_UNORM,
2553 (deUint32)m_imageWidth,
2554 (deUint32)m_imageHeight,
2557 1, 1, // mipLevels, arrayLayers
2558 vk::VK_SAMPLE_COUNT_1_BIT,
2560 vk::VK_IMAGE_TILING_OPTIMAL,
2561 vk::VK_IMAGE_USAGE_TRANSFER_DST_BIT|vk::VK_IMAGE_USAGE_TRANSFER_SRC_BIT,
2562 vk::VK_SHARING_MODE_EXCLUSIVE,
2564 (deUint32)queueFamilies.size(),
2566 vk::VK_IMAGE_LAYOUT_UNDEFINED
2569 m_dstImage = vk::createImage(vkd, device, &createInfo);
2572 m_memory = bindImageMemory(vki, vkd, physicalDevice, device, *m_dstImage, 0);
2575 const vk::Unique<vk::VkCommandBuffer> commandBuffer (createBeginCommandBuffer(vkd, device, commandPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY));
2576 const vk::VkImageMemoryBarrier barrier =
2578 vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
2582 vk::VK_ACCESS_TRANSFER_WRITE_BIT,
2584 vk::VK_IMAGE_LAYOUT_UNDEFINED,
2585 vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
2587 VK_QUEUE_FAMILY_IGNORED,
2588 VK_QUEUE_FAMILY_IGNORED,
2592 vk::VK_IMAGE_ASPECT_COLOR_BIT,
2594 1, // Mip level count
2600 vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 0, (const vk::VkBufferMemoryBarrier*)DE_NULL, 1, &barrier);
2602 VK_CHECK(vkd.endCommandBuffer(*commandBuffer));
2603 queueRun(vkd, queue, *commandBuffer);
2607 void BufferCopyToImage::logSubmit (TestLog& log, size_t commandIndex) const
2609 log << TestLog::Message << commandIndex << ":" << getName() << " Copy buffer to image" << TestLog::EndMessage;
2612 void BufferCopyToImage::submit (SubmitContext& context)
2614 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
2615 const vk::VkCommandBuffer commandBuffer = context.getCommandBuffer();
2616 const vk::VkBufferImageCopy region =
2621 vk::VK_IMAGE_ASPECT_COLOR_BIT,
2628 (deUint32)m_imageWidth,
2629 (deUint32)m_imageHeight,
2634 vkd.cmdCopyBufferToImage(commandBuffer, context.getBuffer(), *m_dstImage, vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, ®ion);
2637 void BufferCopyToImage::verify (VerifyContext& context, size_t commandIndex)
2639 tcu::ResultCollector& resultCollector (context.getResultCollector());
2640 ReferenceMemory& reference (context.getReference());
2641 const vk::InstanceInterface& vki = context.getContext().getInstanceInterface();
2642 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
2643 const vk::VkPhysicalDevice physicalDevice = context.getContext().getPhysicalDevice();
2644 const vk::VkDevice device = context.getContext().getDevice();
2645 const vk::VkQueue queue = context.getContext().getQueue();
2646 const vk::VkCommandPool commandPool = context.getContext().getCommandPool();
2647 const vk::Unique<vk::VkCommandBuffer> commandBuffer (createBeginCommandBuffer(vkd, device, commandPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY));
2648 const vector<deUint32>& queueFamilies = context.getContext().getQueueFamilies();
2649 const vk::Unique<vk::VkBuffer> dstBuffer (createBuffer(vkd, device, 4 * m_imageWidth * m_imageHeight, vk::VK_BUFFER_USAGE_TRANSFER_DST_BIT, vk::VK_SHARING_MODE_EXCLUSIVE, queueFamilies));
2650 const vk::Unique<vk::VkDeviceMemory> memory (bindBufferMemory(vki, vkd, physicalDevice, device, *dstBuffer, vk::VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT));
2652 const vk::VkImageMemoryBarrier imageBarrier =
2654 vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
2657 vk::VK_ACCESS_TRANSFER_WRITE_BIT,
2658 vk::VK_ACCESS_TRANSFER_READ_BIT,
2660 vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
2661 vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
2663 VK_QUEUE_FAMILY_IGNORED,
2664 VK_QUEUE_FAMILY_IGNORED,
2668 vk::VK_IMAGE_ASPECT_COLOR_BIT,
2670 1, // Mip level count
2675 const vk::VkBufferMemoryBarrier bufferBarrier =
2677 vk::VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER,
2680 vk::VK_ACCESS_TRANSFER_WRITE_BIT,
2681 vk::VK_ACCESS_HOST_READ_BIT,
2683 VK_QUEUE_FAMILY_IGNORED,
2684 VK_QUEUE_FAMILY_IGNORED,
2690 const vk::VkBufferImageCopy region =
2695 vk::VK_IMAGE_ASPECT_COLOR_BIT,
2702 (deUint32)m_imageWidth,
2703 (deUint32)m_imageHeight,
2708 vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 0, (const vk::VkBufferMemoryBarrier*)DE_NULL, 1, &imageBarrier);
2709 vkd.cmdCopyImageToBuffer(*commandBuffer, *m_dstImage, vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, *dstBuffer, 1, ®ion);
2710 vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_PIPELINE_STAGE_HOST_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 1, &bufferBarrier, 0, (const vk::VkImageMemoryBarrier*)DE_NULL);
2713 VK_CHECK(vkd.endCommandBuffer(*commandBuffer));
2714 queueRun(vkd, queue, *commandBuffer);
2717 void* const ptr = mapMemory(vkd, device, *memory, 4 * m_imageWidth * m_imageHeight);
2719 vk::invalidateMappedMemoryRange(vkd, device, *memory, 0, 4 * m_imageWidth * m_imageHeight);
2722 const deUint8* const data = (const deUint8*)ptr;
2724 for (size_t pos = 0; pos < (size_t)( 4 * m_imageWidth * m_imageHeight); pos++)
2726 if (reference.isDefined(pos))
2728 if (data[pos] != reference.get(pos))
2730 resultCollector.fail(
2731 de::toString(commandIndex) + ":" + getName()
2732 + " Result differs from reference, Expected: "
2733 + de::toString(tcu::toHex<8>(reference.get(pos)))
2735 + de::toString(tcu::toHex<8>(data[pos]))
2737 + de::toString(pos));
2744 vkd.unmapMemory(device, *memory);
2748 class BufferCopyFromImage : public CmdCommand
2751 BufferCopyFromImage (deUint32 seed) : m_seed(seed) {}
2752 ~BufferCopyFromImage (void) {}
2753 const char* getName (void) const { return "BufferCopyFromImage"; }
2755 void logPrepare (TestLog& log, size_t commandIndex) const;
2756 void prepare (PrepareContext& context);
2757 void logSubmit (TestLog& log, size_t commandIndex) const;
2758 void submit (SubmitContext& context);
2759 void verify (VerifyContext& context, size_t commandIndex);
2762 const deUint32 m_seed;
2763 deInt32 m_imageWidth;
2764 deInt32 m_imageHeight;
2765 vk::Move<vk::VkImage> m_srcImage;
2766 vk::Move<vk::VkDeviceMemory> m_memory;
2769 void BufferCopyFromImage::logPrepare (TestLog& log, size_t commandIndex) const
2771 log << TestLog::Message << commandIndex << ":" << getName() << " Allocate source image for image to buffer copy." << TestLog::EndMessage;
2774 void BufferCopyFromImage::prepare (PrepareContext& context)
2776 const vk::InstanceInterface& vki = context.getContext().getInstanceInterface();
2777 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
2778 const vk::VkPhysicalDevice physicalDevice = context.getContext().getPhysicalDevice();
2779 const vk::VkDevice device = context.getContext().getDevice();
2780 const vk::VkQueue queue = context.getContext().getQueue();
2781 const vk::VkCommandPool commandPool = context.getContext().getCommandPool();
2782 const vector<deUint32>& queueFamilies = context.getContext().getQueueFamilies();
2783 const IVec2 imageSize = findImageSizeWxHx4(context.getBufferSize());
2785 m_imageWidth = imageSize[0];
2786 m_imageHeight = imageSize[1];
2789 const vk::VkImageCreateInfo createInfo =
2791 vk::VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
2795 vk::VK_IMAGE_TYPE_2D,
2796 vk::VK_FORMAT_R8G8B8A8_UNORM,
2798 (deUint32)m_imageWidth,
2799 (deUint32)m_imageHeight,
2802 1, 1, // mipLevels, arrayLayers
2803 vk::VK_SAMPLE_COUNT_1_BIT,
2805 vk::VK_IMAGE_TILING_OPTIMAL,
2806 vk::VK_IMAGE_USAGE_TRANSFER_DST_BIT|vk::VK_IMAGE_USAGE_TRANSFER_SRC_BIT,
2807 vk::VK_SHARING_MODE_EXCLUSIVE,
2809 (deUint32)queueFamilies.size(),
2811 vk::VK_IMAGE_LAYOUT_UNDEFINED
2814 m_srcImage = vk::createImage(vkd, device, &createInfo);
2817 m_memory = bindImageMemory(vki, vkd, physicalDevice, device, *m_srcImage, 0);
2820 const vk::Unique<vk::VkBuffer> srcBuffer (createBuffer(vkd, device, 4 * m_imageWidth * m_imageHeight, vk::VK_BUFFER_USAGE_TRANSFER_SRC_BIT, vk::VK_SHARING_MODE_EXCLUSIVE, queueFamilies));
2821 const vk::Unique<vk::VkDeviceMemory> memory (bindBufferMemory(vki, vkd, physicalDevice, device, *srcBuffer, vk::VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT));
2822 const vk::Unique<vk::VkCommandBuffer> commandBuffer (createBeginCommandBuffer(vkd, device, commandPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY));
2823 const vk::VkImageMemoryBarrier preImageBarrier =
2825 vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
2829 vk::VK_ACCESS_TRANSFER_WRITE_BIT,
2831 vk::VK_IMAGE_LAYOUT_UNDEFINED,
2832 vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
2834 VK_QUEUE_FAMILY_IGNORED,
2835 VK_QUEUE_FAMILY_IGNORED,
2839 vk::VK_IMAGE_ASPECT_COLOR_BIT,
2841 1, // Mip level count
2846 const vk::VkImageMemoryBarrier postImageBarrier =
2848 vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
2851 vk::VK_ACCESS_TRANSFER_WRITE_BIT,
2854 vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
2855 vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
2857 VK_QUEUE_FAMILY_IGNORED,
2858 VK_QUEUE_FAMILY_IGNORED,
2862 vk::VK_IMAGE_ASPECT_COLOR_BIT,
2864 1, // Mip level count
2869 const vk::VkBufferImageCopy region =
2874 vk::VK_IMAGE_ASPECT_COLOR_BIT,
2881 (deUint32)m_imageWidth,
2882 (deUint32)m_imageHeight,
2888 void* const ptr = mapMemory(vkd, device, *memory, 4 * m_imageWidth * m_imageHeight);
2889 de::Random rng (m_seed);
2892 deUint8* const data = (deUint8*)ptr;
2894 for (size_t ndx = 0; ndx < (size_t)(4 * m_imageWidth * m_imageHeight); ndx++)
2895 data[ndx] = rng.getUint8();
2898 vk::flushMappedMemoryRange(vkd, device, *memory, 0, 4 * m_imageWidth * m_imageHeight);
2899 vkd.unmapMemory(device, *memory);
2902 vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 0, (const vk::VkBufferMemoryBarrier*)DE_NULL, 1, &preImageBarrier);
2903 vkd.cmdCopyBufferToImage(*commandBuffer, *srcBuffer, *m_srcImage, vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, ®ion);
2904 vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 0, (const vk::VkBufferMemoryBarrier*)DE_NULL, 1, &postImageBarrier);
2906 VK_CHECK(vkd.endCommandBuffer(*commandBuffer));
2907 queueRun(vkd, queue, *commandBuffer);
2911 void BufferCopyFromImage::logSubmit (TestLog& log, size_t commandIndex) const
2913 log << TestLog::Message << commandIndex << ":" << getName() << " Copy buffer data from image" << TestLog::EndMessage;
2916 void BufferCopyFromImage::submit (SubmitContext& context)
2918 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
2919 const vk::VkCommandBuffer commandBuffer = context.getCommandBuffer();
2920 const vk::VkBufferImageCopy region =
2925 vk::VK_IMAGE_ASPECT_COLOR_BIT,
2932 (deUint32)m_imageWidth,
2933 (deUint32)m_imageHeight,
2938 vkd.cmdCopyImageToBuffer(commandBuffer, *m_srcImage, vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, context.getBuffer(), 1, ®ion);
2941 void BufferCopyFromImage::verify (VerifyContext& context, size_t)
2943 ReferenceMemory& reference (context.getReference());
2944 de::Random rng (m_seed);
2946 for (size_t ndx = 0; ndx < (size_t)(4 * m_imageWidth * m_imageHeight); ndx++)
2947 reference.set(ndx, rng.getUint8());
2950 class ImageCopyToBuffer : public CmdCommand
2953 ImageCopyToBuffer (vk::VkImageLayout imageLayout) : m_imageLayout (imageLayout) {}
2954 ~ImageCopyToBuffer (void) {}
2955 const char* getName (void) const { return "BufferCopyToImage"; }
2957 void logPrepare (TestLog& log, size_t commandIndex) const;
2958 void prepare (PrepareContext& context);
2959 void logSubmit (TestLog& log, size_t commandIndex) const;
2960 void submit (SubmitContext& context);
2961 void verify (VerifyContext& context, size_t commandIndex);
2964 vk::VkImageLayout m_imageLayout;
2965 vk::VkDeviceSize m_bufferSize;
2966 vk::Move<vk::VkBuffer> m_dstBuffer;
2967 vk::Move<vk::VkDeviceMemory> m_memory;
2968 vk::VkDeviceSize m_imageMemorySize;
2969 deInt32 m_imageWidth;
2970 deInt32 m_imageHeight;
2973 void ImageCopyToBuffer::logPrepare (TestLog& log, size_t commandIndex) const
2975 log << TestLog::Message << commandIndex << ":" << getName() << " Allocate destination buffer for image to buffer copy." << TestLog::EndMessage;
2978 void ImageCopyToBuffer::prepare (PrepareContext& context)
2980 const vk::InstanceInterface& vki = context.getContext().getInstanceInterface();
2981 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
2982 const vk::VkPhysicalDevice physicalDevice = context.getContext().getPhysicalDevice();
2983 const vk::VkDevice device = context.getContext().getDevice();
2984 const vector<deUint32>& queueFamilies = context.getContext().getQueueFamilies();
2986 m_imageWidth = context.getImageWidth();
2987 m_imageHeight = context.getImageHeight();
2988 m_bufferSize = 4 * m_imageWidth * m_imageHeight;
2989 m_imageMemorySize = context.getImageMemorySize();
2990 m_dstBuffer = createBuffer(vkd, device, m_bufferSize, vk::VK_BUFFER_USAGE_TRANSFER_DST_BIT, vk::VK_SHARING_MODE_EXCLUSIVE, queueFamilies);
2991 m_memory = bindBufferMemory(vki, vkd, physicalDevice, device, *m_dstBuffer, vk::VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT);
2994 void ImageCopyToBuffer::logSubmit (TestLog& log, size_t commandIndex) const
2996 log << TestLog::Message << commandIndex << ":" << getName() << " Copy image to buffer" << TestLog::EndMessage;
2999 void ImageCopyToBuffer::submit (SubmitContext& context)
3001 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
3002 const vk::VkCommandBuffer commandBuffer = context.getCommandBuffer();
3003 const vk::VkBufferImageCopy region =
3008 vk::VK_IMAGE_ASPECT_COLOR_BIT,
3015 (deUint32)m_imageWidth,
3016 (deUint32)m_imageHeight,
3021 vkd.cmdCopyImageToBuffer(commandBuffer, context.getImage(), m_imageLayout, *m_dstBuffer, 1, ®ion);
3024 void ImageCopyToBuffer::verify (VerifyContext& context, size_t commandIndex)
3026 tcu::ResultCollector& resultCollector (context.getResultCollector());
3027 ReferenceMemory& reference (context.getReference());
3028 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
3029 const vk::VkDevice device = context.getContext().getDevice();
3030 const vk::VkQueue queue = context.getContext().getQueue();
3031 const vk::VkCommandPool commandPool = context.getContext().getCommandPool();
3032 const vk::Unique<vk::VkCommandBuffer> commandBuffer (createBeginCommandBuffer(vkd, device, commandPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY));
3033 const vk::VkBufferMemoryBarrier barrier =
3035 vk::VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER,
3038 vk::VK_ACCESS_TRANSFER_WRITE_BIT,
3039 vk::VK_ACCESS_HOST_READ_BIT,
3041 VK_QUEUE_FAMILY_IGNORED,
3042 VK_QUEUE_FAMILY_IGNORED,
3048 vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_PIPELINE_STAGE_HOST_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 1, &barrier, 0, (const vk::VkImageMemoryBarrier*)DE_NULL);
3050 VK_CHECK(vkd.endCommandBuffer(*commandBuffer));
3051 queueRun(vkd, queue, *commandBuffer);
3053 reference.setUndefined(0, (size_t)m_imageMemorySize);
3055 void* const ptr = mapMemory(vkd, device, *m_memory, m_bufferSize);
3056 const ConstPixelBufferAccess referenceImage (context.getReferenceImage().getAccess());
3057 const ConstPixelBufferAccess resultImage (TextureFormat(TextureFormat::RGBA, TextureFormat::UNORM_INT8), m_imageWidth, m_imageHeight, 1, ptr);
3059 vk::invalidateMappedMemoryRange(vkd, device, *m_memory, 0, m_bufferSize);
3061 if (!tcu::intThresholdCompare(context.getLog(), (de::toString(commandIndex) + ":" + getName()).c_str(), (de::toString(commandIndex) + ":" + getName()).c_str(), referenceImage, resultImage, UVec4(0), tcu::COMPARE_LOG_ON_ERROR))
3062 resultCollector.fail(de::toString(commandIndex) + ":" + getName() + " Image comparison failed");
3064 vkd.unmapMemory(device, *m_memory);
3068 class ImageCopyFromBuffer : public CmdCommand
3071 ImageCopyFromBuffer (deUint32 seed, vk::VkImageLayout imageLayout) : m_seed(seed), m_imageLayout(imageLayout) {}
3072 ~ImageCopyFromBuffer (void) {}
3073 const char* getName (void) const { return "ImageCopyFromBuffer"; }
3075 void logPrepare (TestLog& log, size_t commandIndex) const;
3076 void prepare (PrepareContext& context);
3077 void logSubmit (TestLog& log, size_t commandIndex) const;
3078 void submit (SubmitContext& context);
3079 void verify (VerifyContext& context, size_t commandIndex);
3082 const deUint32 m_seed;
3083 const vk::VkImageLayout m_imageLayout;
3084 deInt32 m_imageWidth;
3085 deInt32 m_imageHeight;
3086 vk::VkDeviceSize m_imageMemorySize;
3087 vk::VkDeviceSize m_bufferSize;
3088 vk::Move<vk::VkBuffer> m_srcBuffer;
3089 vk::Move<vk::VkDeviceMemory> m_memory;
3092 void ImageCopyFromBuffer::logPrepare (TestLog& log, size_t commandIndex) const
3094 log << TestLog::Message << commandIndex << ":" << getName() << " Allocate source buffer for buffer to image copy. Seed: " << m_seed << TestLog::EndMessage;
3097 void ImageCopyFromBuffer::prepare (PrepareContext& context)
3099 const vk::InstanceInterface& vki = context.getContext().getInstanceInterface();
3100 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
3101 const vk::VkPhysicalDevice physicalDevice = context.getContext().getPhysicalDevice();
3102 const vk::VkDevice device = context.getContext().getDevice();
3103 const vector<deUint32>& queueFamilies = context.getContext().getQueueFamilies();
3105 m_imageWidth = context.getImageHeight();
3106 m_imageHeight = context.getImageWidth();
3107 m_imageMemorySize = context.getImageMemorySize();
3108 m_bufferSize = m_imageWidth * m_imageHeight * 4;
3109 m_srcBuffer = createBuffer(vkd, device, m_bufferSize, vk::VK_BUFFER_USAGE_TRANSFER_SRC_BIT, vk::VK_SHARING_MODE_EXCLUSIVE, queueFamilies);
3110 m_memory = bindBufferMemory(vki, vkd, physicalDevice, device, *m_srcBuffer, vk::VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT);
3113 void* const ptr = mapMemory(vkd, device, *m_memory, m_bufferSize);
3114 de::Random rng (m_seed);
3117 deUint8* const data = (deUint8*)ptr;
3119 for (size_t ndx = 0; ndx < (size_t)m_bufferSize; ndx++)
3120 data[ndx] = rng.getUint8();
3123 vk::flushMappedMemoryRange(vkd, device, *m_memory, 0, m_bufferSize);
3124 vkd.unmapMemory(device, *m_memory);
3128 void ImageCopyFromBuffer::logSubmit (TestLog& log, size_t commandIndex) const
3130 log << TestLog::Message << commandIndex << ":" << getName() << " Copy image data from buffer" << TestLog::EndMessage;
3133 void ImageCopyFromBuffer::submit (SubmitContext& context)
3135 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
3136 const vk::VkCommandBuffer commandBuffer = context.getCommandBuffer();
3137 const vk::VkBufferImageCopy region =
3142 vk::VK_IMAGE_ASPECT_COLOR_BIT,
3149 (deUint32)m_imageWidth,
3150 (deUint32)m_imageHeight,
3155 vkd.cmdCopyBufferToImage(commandBuffer, *m_srcBuffer, context.getImage(), m_imageLayout, 1, ®ion);
3158 void ImageCopyFromBuffer::verify (VerifyContext& context, size_t)
3160 ReferenceMemory& reference (context.getReference());
3161 de::Random rng (m_seed);
3163 reference.setUndefined(0, (size_t)m_imageMemorySize);
3166 const PixelBufferAccess& refAccess (context.getReferenceImage().getAccess());
3168 for (deInt32 y = 0; y < m_imageHeight; y++)
3169 for (deInt32 x = 0; x < m_imageWidth; x++)
3171 const deUint8 r8 = rng.getUint8();
3172 const deUint8 g8 = rng.getUint8();
3173 const deUint8 b8 = rng.getUint8();
3174 const deUint8 a8 = rng.getUint8();
3176 refAccess.setPixel(UVec4(r8, g8, b8, a8), x, y);
3181 class ImageCopyFromImage : public CmdCommand
3184 ImageCopyFromImage (deUint32 seed, vk::VkImageLayout imageLayout) : m_seed(seed), m_imageLayout(imageLayout) {}
3185 ~ImageCopyFromImage (void) {}
3186 const char* getName (void) const { return "ImageCopyFromImage"; }
3188 void logPrepare (TestLog& log, size_t commandIndex) const;
3189 void prepare (PrepareContext& context);
3190 void logSubmit (TestLog& log, size_t commandIndex) const;
3191 void submit (SubmitContext& context);
3192 void verify (VerifyContext& context, size_t commandIndex);
3195 const deUint32 m_seed;
3196 const vk::VkImageLayout m_imageLayout;
3197 deInt32 m_imageWidth;
3198 deInt32 m_imageHeight;
3199 vk::VkDeviceSize m_imageMemorySize;
3200 vk::Move<vk::VkImage> m_srcImage;
3201 vk::Move<vk::VkDeviceMemory> m_memory;
3204 void ImageCopyFromImage::logPrepare (TestLog& log, size_t commandIndex) const
3206 log << TestLog::Message << commandIndex << ":" << getName() << " Allocate source image for image to image copy." << TestLog::EndMessage;
3209 void ImageCopyFromImage::prepare (PrepareContext& context)
3211 const vk::InstanceInterface& vki = context.getContext().getInstanceInterface();
3212 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
3213 const vk::VkPhysicalDevice physicalDevice = context.getContext().getPhysicalDevice();
3214 const vk::VkDevice device = context.getContext().getDevice();
3215 const vk::VkQueue queue = context.getContext().getQueue();
3216 const vk::VkCommandPool commandPool = context.getContext().getCommandPool();
3217 const vector<deUint32>& queueFamilies = context.getContext().getQueueFamilies();
3219 m_imageWidth = context.getImageWidth();
3220 m_imageHeight = context.getImageHeight();
3221 m_imageMemorySize = context.getImageMemorySize();
3224 const vk::VkImageCreateInfo createInfo =
3226 vk::VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
3230 vk::VK_IMAGE_TYPE_2D,
3231 vk::VK_FORMAT_R8G8B8A8_UNORM,
3233 (deUint32)m_imageWidth,
3234 (deUint32)m_imageHeight,
3237 1, 1, // mipLevels, arrayLayers
3238 vk::VK_SAMPLE_COUNT_1_BIT,
3240 vk::VK_IMAGE_TILING_OPTIMAL,
3241 vk::VK_IMAGE_USAGE_TRANSFER_DST_BIT|vk::VK_IMAGE_USAGE_TRANSFER_SRC_BIT,
3242 vk::VK_SHARING_MODE_EXCLUSIVE,
3244 (deUint32)queueFamilies.size(),
3246 vk::VK_IMAGE_LAYOUT_UNDEFINED
3249 m_srcImage = vk::createImage(vkd, device, &createInfo);
3252 m_memory = bindImageMemory(vki, vkd, physicalDevice, device, *m_srcImage, 0);
3255 const vk::Unique<vk::VkBuffer> srcBuffer (createBuffer(vkd, device, 4 * m_imageWidth * m_imageHeight, vk::VK_BUFFER_USAGE_TRANSFER_SRC_BIT, vk::VK_SHARING_MODE_EXCLUSIVE, queueFamilies));
3256 const vk::Unique<vk::VkDeviceMemory> memory (bindBufferMemory(vki, vkd, physicalDevice, device, *srcBuffer, vk::VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT));
3257 const vk::Unique<vk::VkCommandBuffer> commandBuffer (createBeginCommandBuffer(vkd, device, commandPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY));
3258 const vk::VkImageMemoryBarrier preImageBarrier =
3260 vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
3264 vk::VK_ACCESS_TRANSFER_WRITE_BIT,
3266 vk::VK_IMAGE_LAYOUT_UNDEFINED,
3267 vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
3269 VK_QUEUE_FAMILY_IGNORED,
3270 VK_QUEUE_FAMILY_IGNORED,
3274 vk::VK_IMAGE_ASPECT_COLOR_BIT,
3276 1, // Mip level count
3281 const vk::VkImageMemoryBarrier postImageBarrier =
3283 vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
3286 vk::VK_ACCESS_TRANSFER_WRITE_BIT,
3289 vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
3290 vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
3292 VK_QUEUE_FAMILY_IGNORED,
3293 VK_QUEUE_FAMILY_IGNORED,
3297 vk::VK_IMAGE_ASPECT_COLOR_BIT,
3299 1, // Mip level count
3304 const vk::VkBufferImageCopy region =
3309 vk::VK_IMAGE_ASPECT_COLOR_BIT,
3316 (deUint32)m_imageWidth,
3317 (deUint32)m_imageHeight,
3323 void* const ptr = mapMemory(vkd, device, *memory, 4 * m_imageWidth * m_imageHeight);
3324 de::Random rng (m_seed);
3327 deUint8* const data = (deUint8*)ptr;
3329 for (size_t ndx = 0; ndx < (size_t)(4 * m_imageWidth * m_imageHeight); ndx++)
3330 data[ndx] = rng.getUint8();
3333 vk::flushMappedMemoryRange(vkd, device, *memory, 0, 4 * m_imageWidth * m_imageHeight);
3334 vkd.unmapMemory(device, *memory);
3337 vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 0, (const vk::VkBufferMemoryBarrier*)DE_NULL, 1, &preImageBarrier);
3338 vkd.cmdCopyBufferToImage(*commandBuffer, *srcBuffer, *m_srcImage, vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, ®ion);
3339 vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 0, (const vk::VkBufferMemoryBarrier*)DE_NULL, 1, &postImageBarrier);
3341 VK_CHECK(vkd.endCommandBuffer(*commandBuffer));
3342 queueRun(vkd, queue, *commandBuffer);
3346 void ImageCopyFromImage::logSubmit (TestLog& log, size_t commandIndex) const
3348 log << TestLog::Message << commandIndex << ":" << getName() << " Copy image data from another image" << TestLog::EndMessage;
3351 void ImageCopyFromImage::submit (SubmitContext& context)
3353 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
3354 const vk::VkCommandBuffer commandBuffer = context.getCommandBuffer();
3355 const vk::VkImageCopy region =
3358 vk::VK_IMAGE_ASPECT_COLOR_BIT,
3366 vk::VK_IMAGE_ASPECT_COLOR_BIT,
3373 (deUint32)m_imageWidth,
3374 (deUint32)m_imageHeight,
3379 vkd.cmdCopyImage(commandBuffer, *m_srcImage, vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, context.getImage(), m_imageLayout, 1, ®ion);
3382 void ImageCopyFromImage::verify (VerifyContext& context, size_t)
3384 ReferenceMemory& reference (context.getReference());
3385 de::Random rng (m_seed);
3387 reference.setUndefined(0, (size_t)m_imageMemorySize);
3390 const PixelBufferAccess& refAccess (context.getReferenceImage().getAccess());
3392 for (deInt32 y = 0; y < m_imageHeight; y++)
3393 for (deInt32 x = 0; x < m_imageWidth; x++)
3395 const deUint8 r8 = rng.getUint8();
3396 const deUint8 g8 = rng.getUint8();
3397 const deUint8 b8 = rng.getUint8();
3398 const deUint8 a8 = rng.getUint8();
3400 refAccess.setPixel(UVec4(r8, g8, b8, a8), x, y);
3405 class ImageCopyToImage : public CmdCommand
3408 ImageCopyToImage (vk::VkImageLayout imageLayout) : m_imageLayout(imageLayout) {}
3409 ~ImageCopyToImage (void) {}
3410 const char* getName (void) const { return "ImageCopyToImage"; }
3412 void logPrepare (TestLog& log, size_t commandIndex) const;
3413 void prepare (PrepareContext& context);
3414 void logSubmit (TestLog& log, size_t commandIndex) const;
3415 void submit (SubmitContext& context);
3416 void verify (VerifyContext& context, size_t commandIndex);
3419 const vk::VkImageLayout m_imageLayout;
3420 deInt32 m_imageWidth;
3421 deInt32 m_imageHeight;
3422 vk::VkDeviceSize m_imageMemorySize;
3423 vk::Move<vk::VkImage> m_dstImage;
3424 vk::Move<vk::VkDeviceMemory> m_memory;
3427 void ImageCopyToImage::logPrepare (TestLog& log, size_t commandIndex) const
3429 log << TestLog::Message << commandIndex << ":" << getName() << " Allocate destination image for image to image copy." << TestLog::EndMessage;
3432 void ImageCopyToImage::prepare (PrepareContext& context)
3434 const vk::InstanceInterface& vki = context.getContext().getInstanceInterface();
3435 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
3436 const vk::VkPhysicalDevice physicalDevice = context.getContext().getPhysicalDevice();
3437 const vk::VkDevice device = context.getContext().getDevice();
3438 const vk::VkQueue queue = context.getContext().getQueue();
3439 const vk::VkCommandPool commandPool = context.getContext().getCommandPool();
3440 const vector<deUint32>& queueFamilies = context.getContext().getQueueFamilies();
3442 m_imageWidth = context.getImageWidth();
3443 m_imageHeight = context.getImageHeight();
3444 m_imageMemorySize = context.getImageMemorySize();
3447 const vk::VkImageCreateInfo createInfo =
3449 vk::VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
3453 vk::VK_IMAGE_TYPE_2D,
3454 vk::VK_FORMAT_R8G8B8A8_UNORM,
3456 (deUint32)m_imageWidth,
3457 (deUint32)m_imageHeight,
3460 1, 1, // mipLevels, arrayLayers
3461 vk::VK_SAMPLE_COUNT_1_BIT,
3463 vk::VK_IMAGE_TILING_OPTIMAL,
3464 vk::VK_IMAGE_USAGE_TRANSFER_DST_BIT|vk::VK_IMAGE_USAGE_TRANSFER_SRC_BIT,
3465 vk::VK_SHARING_MODE_EXCLUSIVE,
3467 (deUint32)queueFamilies.size(),
3469 vk::VK_IMAGE_LAYOUT_UNDEFINED
3472 m_dstImage = vk::createImage(vkd, device, &createInfo);
3475 m_memory = bindImageMemory(vki, vkd, physicalDevice, device, *m_dstImage, 0);
3478 const vk::Unique<vk::VkCommandBuffer> commandBuffer (createBeginCommandBuffer(vkd, device, commandPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY));
3479 const vk::VkImageMemoryBarrier barrier =
3481 vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
3485 vk::VK_ACCESS_TRANSFER_WRITE_BIT,
3487 vk::VK_IMAGE_LAYOUT_UNDEFINED,
3488 vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
3490 VK_QUEUE_FAMILY_IGNORED,
3491 VK_QUEUE_FAMILY_IGNORED,
3495 vk::VK_IMAGE_ASPECT_COLOR_BIT,
3497 1, // Mip level count
3503 vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 0, (const vk::VkBufferMemoryBarrier*)DE_NULL, 1, &barrier);
3505 VK_CHECK(vkd.endCommandBuffer(*commandBuffer));
3506 queueRun(vkd, queue, *commandBuffer);
3510 void ImageCopyToImage::logSubmit (TestLog& log, size_t commandIndex) const
3512 log << TestLog::Message << commandIndex << ":" << getName() << " Copy image to another image" << TestLog::EndMessage;
3515 void ImageCopyToImage::submit (SubmitContext& context)
3517 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
3518 const vk::VkCommandBuffer commandBuffer = context.getCommandBuffer();
3519 const vk::VkImageCopy region =
3522 vk::VK_IMAGE_ASPECT_COLOR_BIT,
3530 vk::VK_IMAGE_ASPECT_COLOR_BIT,
3537 (deUint32)m_imageWidth,
3538 (deUint32)m_imageHeight,
3543 vkd.cmdCopyImage(commandBuffer, context.getImage(), m_imageLayout, *m_dstImage, vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, ®ion);
3546 void ImageCopyToImage::verify (VerifyContext& context, size_t commandIndex)
3548 tcu::ResultCollector& resultCollector (context.getResultCollector());
3549 const vk::InstanceInterface& vki = context.getContext().getInstanceInterface();
3550 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
3551 const vk::VkPhysicalDevice physicalDevice = context.getContext().getPhysicalDevice();
3552 const vk::VkDevice device = context.getContext().getDevice();
3553 const vk::VkQueue queue = context.getContext().getQueue();
3554 const vk::VkCommandPool commandPool = context.getContext().getCommandPool();
3555 const vk::Unique<vk::VkCommandBuffer> commandBuffer (createBeginCommandBuffer(vkd, device, commandPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY));
3556 const vector<deUint32>& queueFamilies = context.getContext().getQueueFamilies();
3557 const vk::Unique<vk::VkBuffer> dstBuffer (createBuffer(vkd, device, 4 * m_imageWidth * m_imageHeight, vk::VK_BUFFER_USAGE_TRANSFER_DST_BIT, vk::VK_SHARING_MODE_EXCLUSIVE, queueFamilies));
3558 const vk::Unique<vk::VkDeviceMemory> memory (bindBufferMemory(vki, vkd, physicalDevice, device, *dstBuffer, vk::VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT));
3560 const vk::VkImageMemoryBarrier imageBarrier =
3562 vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
3565 vk::VK_ACCESS_TRANSFER_WRITE_BIT,
3566 vk::VK_ACCESS_TRANSFER_READ_BIT,
3568 vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
3569 vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
3571 VK_QUEUE_FAMILY_IGNORED,
3572 VK_QUEUE_FAMILY_IGNORED,
3576 vk::VK_IMAGE_ASPECT_COLOR_BIT,
3578 1, // Mip level count
3583 const vk::VkBufferMemoryBarrier bufferBarrier =
3585 vk::VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER,
3588 vk::VK_ACCESS_TRANSFER_WRITE_BIT,
3589 vk::VK_ACCESS_HOST_READ_BIT,
3591 VK_QUEUE_FAMILY_IGNORED,
3592 VK_QUEUE_FAMILY_IGNORED,
3597 const vk::VkBufferImageCopy region =
3602 vk::VK_IMAGE_ASPECT_COLOR_BIT,
3609 (deUint32)m_imageWidth,
3610 (deUint32)m_imageHeight,
3615 vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 0, (const vk::VkBufferMemoryBarrier*)DE_NULL, 1, &imageBarrier);
3616 vkd.cmdCopyImageToBuffer(*commandBuffer, *m_dstImage, vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, *dstBuffer, 1, ®ion);
3617 vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_PIPELINE_STAGE_HOST_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 1, &bufferBarrier, 0, (const vk::VkImageMemoryBarrier*)DE_NULL);
3620 VK_CHECK(vkd.endCommandBuffer(*commandBuffer));
3621 queueRun(vkd, queue, *commandBuffer);
3624 void* const ptr = mapMemory(vkd, device, *memory, 4 * m_imageWidth * m_imageHeight);
3626 vk::invalidateMappedMemoryRange(vkd, device, *memory, 0, 4 * m_imageWidth * m_imageHeight);
3629 const deUint8* const data = (const deUint8*)ptr;
3630 const ConstPixelBufferAccess resAccess (TextureFormat(TextureFormat::RGBA, TextureFormat::UNORM_INT8), m_imageWidth, m_imageHeight, 1, data);
3631 const ConstPixelBufferAccess& refAccess (context.getReferenceImage().getAccess());
3633 if (!tcu::intThresholdCompare(context.getLog(), (de::toString(commandIndex) + ":" + getName()).c_str(), (de::toString(commandIndex) + ":" + getName()).c_str(), refAccess, resAccess, UVec4(0), tcu::COMPARE_LOG_ON_ERROR))
3634 resultCollector.fail(de::toString(commandIndex) + ":" + getName() + " Image comparison failed");
3637 vkd.unmapMemory(device, *memory);
3647 class ImageBlitFromImage : public CmdCommand
3650 ImageBlitFromImage (deUint32 seed, BlitScale scale, vk::VkImageLayout imageLayout) : m_seed(seed), m_scale(scale), m_imageLayout(imageLayout) {}
3651 ~ImageBlitFromImage (void) {}
3652 const char* getName (void) const { return "ImageBlitFromImage"; }
3654 void logPrepare (TestLog& log, size_t commandIndex) const;
3655 void prepare (PrepareContext& context);
3656 void logSubmit (TestLog& log, size_t commandIndex) const;
3657 void submit (SubmitContext& context);
3658 void verify (VerifyContext& context, size_t commandIndex);
3661 const deUint32 m_seed;
3662 const BlitScale m_scale;
3663 const vk::VkImageLayout m_imageLayout;
3664 deInt32 m_imageWidth;
3665 deInt32 m_imageHeight;
3666 vk::VkDeviceSize m_imageMemorySize;
3667 deInt32 m_srcImageWidth;
3668 deInt32 m_srcImageHeight;
3669 vk::Move<vk::VkImage> m_srcImage;
3670 vk::Move<vk::VkDeviceMemory> m_memory;
3673 void ImageBlitFromImage::logPrepare (TestLog& log, size_t commandIndex) const
3675 log << TestLog::Message << commandIndex << ":" << getName() << " Allocate source image for image to image blit." << TestLog::EndMessage;
3678 void ImageBlitFromImage::prepare (PrepareContext& context)
3680 const vk::InstanceInterface& vki = context.getContext().getInstanceInterface();
3681 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
3682 const vk::VkPhysicalDevice physicalDevice = context.getContext().getPhysicalDevice();
3683 const vk::VkDevice device = context.getContext().getDevice();
3684 const vk::VkQueue queue = context.getContext().getQueue();
3685 const vk::VkCommandPool commandPool = context.getContext().getCommandPool();
3686 const vector<deUint32>& queueFamilies = context.getContext().getQueueFamilies();
3688 m_imageWidth = context.getImageWidth();
3689 m_imageHeight = context.getImageHeight();
3690 m_imageMemorySize = context.getImageMemorySize();
3692 if (m_scale == BLIT_SCALE_10)
3694 m_srcImageWidth = m_imageWidth;
3695 m_srcImageHeight = m_imageHeight;
3697 else if (m_scale == BLIT_SCALE_20)
3699 m_srcImageWidth = m_imageWidth / 2;
3700 m_srcImageHeight = m_imageHeight / 2;
3703 DE_FATAL("Unsupported scale");
3706 const vk::VkImageCreateInfo createInfo =
3708 vk::VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
3712 vk::VK_IMAGE_TYPE_2D,
3713 vk::VK_FORMAT_R8G8B8A8_UNORM,
3715 (deUint32)m_srcImageWidth,
3716 (deUint32)m_srcImageHeight,
3719 1, 1, // mipLevels, arrayLayers
3720 vk::VK_SAMPLE_COUNT_1_BIT,
3722 vk::VK_IMAGE_TILING_OPTIMAL,
3723 vk::VK_IMAGE_USAGE_TRANSFER_DST_BIT|vk::VK_IMAGE_USAGE_TRANSFER_SRC_BIT,
3724 vk::VK_SHARING_MODE_EXCLUSIVE,
3726 (deUint32)queueFamilies.size(),
3728 vk::VK_IMAGE_LAYOUT_UNDEFINED
3731 m_srcImage = vk::createImage(vkd, device, &createInfo);
3734 m_memory = bindImageMemory(vki, vkd, physicalDevice, device, *m_srcImage, 0);
3737 const vk::Unique<vk::VkBuffer> srcBuffer (createBuffer(vkd, device, 4 * m_srcImageWidth * m_srcImageHeight, vk::VK_BUFFER_USAGE_TRANSFER_SRC_BIT, vk::VK_SHARING_MODE_EXCLUSIVE, queueFamilies));
3738 const vk::Unique<vk::VkDeviceMemory> memory (bindBufferMemory(vki, vkd, physicalDevice, device, *srcBuffer, vk::VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT));
3739 const vk::Unique<vk::VkCommandBuffer> commandBuffer (createBeginCommandBuffer(vkd, device, commandPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY));
3740 const vk::VkImageMemoryBarrier preImageBarrier =
3742 vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
3746 vk::VK_ACCESS_TRANSFER_WRITE_BIT,
3748 vk::VK_IMAGE_LAYOUT_UNDEFINED,
3749 vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
3751 VK_QUEUE_FAMILY_IGNORED,
3752 VK_QUEUE_FAMILY_IGNORED,
3756 vk::VK_IMAGE_ASPECT_COLOR_BIT,
3758 1, // Mip level count
3763 const vk::VkImageMemoryBarrier postImageBarrier =
3765 vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
3768 vk::VK_ACCESS_TRANSFER_WRITE_BIT,
3771 vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
3772 vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
3774 VK_QUEUE_FAMILY_IGNORED,
3775 VK_QUEUE_FAMILY_IGNORED,
3779 vk::VK_IMAGE_ASPECT_COLOR_BIT,
3781 1, // Mip level count
3786 const vk::VkBufferImageCopy region =
3791 vk::VK_IMAGE_ASPECT_COLOR_BIT,
3798 (deUint32)m_srcImageWidth,
3799 (deUint32)m_srcImageHeight,
3805 void* const ptr = mapMemory(vkd, device, *memory, 4 * m_srcImageWidth * m_srcImageHeight);
3806 de::Random rng (m_seed);
3809 deUint8* const data = (deUint8*)ptr;
3811 for (size_t ndx = 0; ndx < (size_t)(4 * m_srcImageWidth * m_srcImageHeight); ndx++)
3812 data[ndx] = rng.getUint8();
3815 vk::flushMappedMemoryRange(vkd, device, *memory, 0, 4 * m_srcImageWidth * m_srcImageHeight);
3816 vkd.unmapMemory(device, *memory);
3819 vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 0, (const vk::VkBufferMemoryBarrier*)DE_NULL, 1, &preImageBarrier);
3820 vkd.cmdCopyBufferToImage(*commandBuffer, *srcBuffer, *m_srcImage, vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, ®ion);
3821 vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 0, (const vk::VkBufferMemoryBarrier*)DE_NULL, 1, &postImageBarrier);
3823 VK_CHECK(vkd.endCommandBuffer(*commandBuffer));
3824 queueRun(vkd, queue, *commandBuffer);
3828 void ImageBlitFromImage::logSubmit (TestLog& log, size_t commandIndex) const
3830 log << TestLog::Message << commandIndex << ":" << getName() << " Blit from another image" << (m_scale == BLIT_SCALE_20 ? " scale 2x" : "") << TestLog::EndMessage;
3833 void ImageBlitFromImage::submit (SubmitContext& context)
3835 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
3836 const vk::VkCommandBuffer commandBuffer = context.getCommandBuffer();
3837 const vk::VkImageBlit region =
3841 vk::VK_IMAGE_ASPECT_COLOR_BIT,
3857 vk::VK_IMAGE_ASPECT_COLOR_BIT,
3871 vkd.cmdBlitImage(commandBuffer, *m_srcImage, vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, context.getImage(), m_imageLayout, 1, ®ion, vk::VK_FILTER_NEAREST);
3874 void ImageBlitFromImage::verify (VerifyContext& context, size_t)
3876 ReferenceMemory& reference (context.getReference());
3877 de::Random rng (m_seed);
3879 reference.setUndefined(0, (size_t)m_imageMemorySize);
3882 const PixelBufferAccess& refAccess (context.getReferenceImage().getAccess());
3884 if (m_scale == BLIT_SCALE_10)
3886 for (deInt32 y = 0; y < m_imageHeight; y++)
3887 for (deInt32 x = 0; x < m_imageWidth; x++)
3889 const deUint8 r8 = rng.getUint8();
3890 const deUint8 g8 = rng.getUint8();
3891 const deUint8 b8 = rng.getUint8();
3892 const deUint8 a8 = rng.getUint8();
3894 refAccess.setPixel(UVec4(r8, g8, b8, a8), x, y);
3897 else if (m_scale == BLIT_SCALE_20)
3899 tcu::TextureLevel source (TextureFormat(TextureFormat::RGBA, TextureFormat::UNORM_INT8), m_srcImageWidth, m_srcImageHeight);
3900 const float xscale = ((float)m_srcImageWidth) / (float)m_imageWidth;
3901 const float yscale = ((float)m_srcImageHeight) / (float)m_imageHeight;
3903 for (deInt32 y = 0; y < m_srcImageHeight; y++)
3904 for (deInt32 x = 0; x < m_srcImageWidth; x++)
3906 const deUint8 r8 = rng.getUint8();
3907 const deUint8 g8 = rng.getUint8();
3908 const deUint8 b8 = rng.getUint8();
3909 const deUint8 a8 = rng.getUint8();
3911 source.getAccess().setPixel(UVec4(r8, g8, b8, a8), x, y);
3914 for (deInt32 y = 0; y < m_imageHeight; y++)
3915 for (deInt32 x = 0; x < m_imageWidth; x++)
3916 refAccess.setPixel(source.getAccess().getPixelUint(int(float(x) * xscale), int(float(y) * yscale)), x, y);
3919 DE_FATAL("Unsupported scale");
3923 class ImageBlitToImage : public CmdCommand
3926 ImageBlitToImage (BlitScale scale, vk::VkImageLayout imageLayout) : m_scale(scale), m_imageLayout(imageLayout) {}
3927 ~ImageBlitToImage (void) {}
3928 const char* getName (void) const { return "ImageBlitToImage"; }
3930 void logPrepare (TestLog& log, size_t commandIndex) const;
3931 void prepare (PrepareContext& context);
3932 void logSubmit (TestLog& log, size_t commandIndex) const;
3933 void submit (SubmitContext& context);
3934 void verify (VerifyContext& context, size_t commandIndex);
3937 const BlitScale m_scale;
3938 const vk::VkImageLayout m_imageLayout;
3939 deInt32 m_imageWidth;
3940 deInt32 m_imageHeight;
3941 vk::VkDeviceSize m_imageMemorySize;
3942 deInt32 m_dstImageWidth;
3943 deInt32 m_dstImageHeight;
3944 vk::Move<vk::VkImage> m_dstImage;
3945 vk::Move<vk::VkDeviceMemory> m_memory;
3948 void ImageBlitToImage::logPrepare (TestLog& log, size_t commandIndex) const
3950 log << TestLog::Message << commandIndex << ":" << getName() << " Allocate destination image for image to image blit." << TestLog::EndMessage;
3953 void ImageBlitToImage::prepare (PrepareContext& context)
3955 const vk::InstanceInterface& vki = context.getContext().getInstanceInterface();
3956 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
3957 const vk::VkPhysicalDevice physicalDevice = context.getContext().getPhysicalDevice();
3958 const vk::VkDevice device = context.getContext().getDevice();
3959 const vk::VkQueue queue = context.getContext().getQueue();
3960 const vk::VkCommandPool commandPool = context.getContext().getCommandPool();
3961 const vector<deUint32>& queueFamilies = context.getContext().getQueueFamilies();
3963 m_imageWidth = context.getImageWidth();
3964 m_imageHeight = context.getImageHeight();
3965 m_imageMemorySize = context.getImageMemorySize();
3967 if (m_scale == BLIT_SCALE_10)
3969 m_dstImageWidth = context.getImageWidth();
3970 m_dstImageHeight = context.getImageHeight();
3972 else if (m_scale == BLIT_SCALE_20)
3974 m_dstImageWidth = context.getImageWidth() * 2;
3975 m_dstImageHeight = context.getImageHeight() * 2;
3978 DE_FATAL("Unsupportd blit scale");
3981 const vk::VkImageCreateInfo createInfo =
3983 vk::VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
3987 vk::VK_IMAGE_TYPE_2D,
3988 vk::VK_FORMAT_R8G8B8A8_UNORM,
3990 (deUint32)m_dstImageWidth,
3991 (deUint32)m_dstImageHeight,
3994 1, 1, // mipLevels, arrayLayers
3995 vk::VK_SAMPLE_COUNT_1_BIT,
3997 vk::VK_IMAGE_TILING_OPTIMAL,
3998 vk::VK_IMAGE_USAGE_TRANSFER_DST_BIT|vk::VK_IMAGE_USAGE_TRANSFER_SRC_BIT,
3999 vk::VK_SHARING_MODE_EXCLUSIVE,
4001 (deUint32)queueFamilies.size(),
4003 vk::VK_IMAGE_LAYOUT_UNDEFINED
4006 m_dstImage = vk::createImage(vkd, device, &createInfo);
4009 m_memory = bindImageMemory(vki, vkd, physicalDevice, device, *m_dstImage, 0);
4012 const vk::Unique<vk::VkCommandBuffer> commandBuffer (createBeginCommandBuffer(vkd, device, commandPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY));
4013 const vk::VkImageMemoryBarrier barrier =
4015 vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
4019 vk::VK_ACCESS_TRANSFER_WRITE_BIT,
4021 vk::VK_IMAGE_LAYOUT_UNDEFINED,
4022 vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
4024 VK_QUEUE_FAMILY_IGNORED,
4025 VK_QUEUE_FAMILY_IGNORED,
4029 vk::VK_IMAGE_ASPECT_COLOR_BIT,
4031 1, // Mip level count
4037 vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 0, (const vk::VkBufferMemoryBarrier*)DE_NULL, 1, &barrier);
4039 VK_CHECK(vkd.endCommandBuffer(*commandBuffer));
4040 queueRun(vkd, queue, *commandBuffer);
4044 void ImageBlitToImage::logSubmit (TestLog& log, size_t commandIndex) const
4046 log << TestLog::Message << commandIndex << ":" << getName() << " Blit image to another image" << (m_scale == BLIT_SCALE_20 ? " scale 2x" : "") << TestLog::EndMessage;
4049 void ImageBlitToImage::submit (SubmitContext& context)
4051 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
4052 const vk::VkCommandBuffer commandBuffer = context.getCommandBuffer();
4053 const vk::VkImageBlit region =
4057 vk::VK_IMAGE_ASPECT_COLOR_BIT,
4073 vk::VK_IMAGE_ASPECT_COLOR_BIT,
4087 vkd.cmdBlitImage(commandBuffer, context.getImage(), m_imageLayout, *m_dstImage, vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, ®ion, vk::VK_FILTER_NEAREST);
4090 void ImageBlitToImage::verify (VerifyContext& context, size_t commandIndex)
4092 tcu::ResultCollector& resultCollector (context.getResultCollector());
4093 const vk::InstanceInterface& vki = context.getContext().getInstanceInterface();
4094 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
4095 const vk::VkPhysicalDevice physicalDevice = context.getContext().getPhysicalDevice();
4096 const vk::VkDevice device = context.getContext().getDevice();
4097 const vk::VkQueue queue = context.getContext().getQueue();
4098 const vk::VkCommandPool commandPool = context.getContext().getCommandPool();
4099 const vk::Unique<vk::VkCommandBuffer> commandBuffer (createBeginCommandBuffer(vkd, device, commandPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY));
4100 const vector<deUint32>& queueFamilies = context.getContext().getQueueFamilies();
4101 const vk::Unique<vk::VkBuffer> dstBuffer (createBuffer(vkd, device, 4 * m_dstImageWidth * m_dstImageHeight, vk::VK_BUFFER_USAGE_TRANSFER_DST_BIT, vk::VK_SHARING_MODE_EXCLUSIVE, queueFamilies));
4102 const vk::Unique<vk::VkDeviceMemory> memory (bindBufferMemory(vki, vkd, physicalDevice, device, *dstBuffer, vk::VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT));
4104 const vk::VkImageMemoryBarrier imageBarrier =
4106 vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
4109 vk::VK_ACCESS_TRANSFER_WRITE_BIT,
4110 vk::VK_ACCESS_TRANSFER_READ_BIT,
4112 vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
4113 vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
4115 VK_QUEUE_FAMILY_IGNORED,
4116 VK_QUEUE_FAMILY_IGNORED,
4120 vk::VK_IMAGE_ASPECT_COLOR_BIT,
4122 1, // Mip level count
4127 const vk::VkBufferMemoryBarrier bufferBarrier =
4129 vk::VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER,
4132 vk::VK_ACCESS_TRANSFER_WRITE_BIT,
4133 vk::VK_ACCESS_HOST_READ_BIT,
4135 VK_QUEUE_FAMILY_IGNORED,
4136 VK_QUEUE_FAMILY_IGNORED,
4141 const vk::VkBufferImageCopy region =
4146 vk::VK_IMAGE_ASPECT_COLOR_BIT,
4153 (deUint32)m_dstImageWidth,
4154 (deUint32)m_dstImageHeight,
4159 vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 0, (const vk::VkBufferMemoryBarrier*)DE_NULL, 1, &imageBarrier);
4160 vkd.cmdCopyImageToBuffer(*commandBuffer, *m_dstImage, vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, *dstBuffer, 1, ®ion);
4161 vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_PIPELINE_STAGE_HOST_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 1, &bufferBarrier, 0, (const vk::VkImageMemoryBarrier*)DE_NULL);
4164 VK_CHECK(vkd.endCommandBuffer(*commandBuffer));
4165 queueRun(vkd, queue, *commandBuffer);
4168 void* const ptr = mapMemory(vkd, device, *memory, 4 * m_dstImageWidth * m_dstImageHeight);
4170 vk::invalidateMappedMemoryRange(vkd, device, *memory, 0, 4 * m_dstImageWidth * m_dstImageHeight);
4172 if (m_scale == BLIT_SCALE_10)
4174 const deUint8* const data = (const deUint8*)ptr;
4175 const ConstPixelBufferAccess resAccess (TextureFormat(TextureFormat::RGBA, TextureFormat::UNORM_INT8), m_dstImageWidth, m_dstImageHeight, 1, data);
4176 const ConstPixelBufferAccess& refAccess (context.getReferenceImage().getAccess());
4178 if (!tcu::intThresholdCompare(context.getLog(), (de::toString(commandIndex) + ":" + getName()).c_str(), (de::toString(commandIndex) + ":" + getName()).c_str(), refAccess, resAccess, UVec4(0), tcu::COMPARE_LOG_ON_ERROR))
4179 resultCollector.fail(de::toString(commandIndex) + ":" + getName() + " Image comparison failed");
4181 else if (m_scale == BLIT_SCALE_20)
4183 const deUint8* const data = (const deUint8*)ptr;
4184 const ConstPixelBufferAccess resAccess (TextureFormat(TextureFormat::RGBA, TextureFormat::UNORM_INT8), m_dstImageWidth, m_dstImageHeight, 1, data);
4185 tcu::TextureLevel reference (TextureFormat(TextureFormat::RGBA, TextureFormat::UNORM_INT8), m_dstImageWidth, m_dstImageHeight, 1);
4188 const ConstPixelBufferAccess& refAccess (context.getReferenceImage().getAccess());
4190 for (deInt32 y = 0; y < m_dstImageHeight; y++)
4191 for (deInt32 x = 0; x < m_dstImageWidth; x++)
4193 reference.getAccess().setPixel(refAccess.getPixel(x/2, y/2), x, y);
4197 if (!tcu::intThresholdCompare(context.getLog(), (de::toString(commandIndex) + ":" + getName()).c_str(), (de::toString(commandIndex) + ":" + getName()).c_str(), reference.getAccess(), resAccess, UVec4(0), tcu::COMPARE_LOG_ON_ERROR))
4198 resultCollector.fail(de::toString(commandIndex) + ":" + getName() + " Image comparison failed");
4201 DE_FATAL("Unknown scale");
4203 vkd.unmapMemory(device, *memory);
4207 class PrepareRenderPassContext
4210 PrepareRenderPassContext (PrepareContext& context,
4211 vk::VkRenderPass renderPass,
4212 vk::VkFramebuffer framebuffer,
4213 deInt32 targetWidth,
4214 deInt32 targetHeight)
4215 : m_context (context)
4216 , m_renderPass (renderPass)
4217 , m_framebuffer (framebuffer)
4218 , m_targetWidth (targetWidth)
4219 , m_targetHeight (targetHeight)
4223 const Memory& getMemory (void) const { return m_context.getMemory(); }
4224 const Context& getContext (void) const { return m_context.getContext(); }
4225 const vk::ProgramCollection<vk::ProgramBinary>& getBinaryCollection (void) const { return m_context.getBinaryCollection(); }
4227 vk::VkBuffer getBuffer (void) const { return m_context.getBuffer(); }
4228 vk::VkDeviceSize getBufferSize (void) const { return m_context.getBufferSize(); }
4230 vk::VkImage getImage (void) const { return m_context.getImage(); }
4231 deInt32 getImageWidth (void) const { return m_context.getImageWidth(); }
4232 deInt32 getImageHeight (void) const { return m_context.getImageHeight(); }
4233 vk::VkImageLayout getImageLayout (void) const { return m_context.getImageLayout(); }
4235 deInt32 getTargetWidth (void) const { return m_targetWidth; }
4236 deInt32 getTargetHeight (void) const { return m_targetHeight; }
4238 vk::VkRenderPass getRenderPass (void) const { return m_renderPass; }
4241 PrepareContext& m_context;
4242 const vk::VkRenderPass m_renderPass;
4243 const vk::VkFramebuffer m_framebuffer;
4244 const deInt32 m_targetWidth;
4245 const deInt32 m_targetHeight;
4248 class VerifyRenderPassContext
4251 VerifyRenderPassContext (VerifyContext& context,
4252 deInt32 targetWidth,
4253 deInt32 targetHeight)
4254 : m_context (context)
4255 , m_referenceTarget (TextureFormat(TextureFormat::RGBA, TextureFormat::UNORM_INT8), targetWidth, targetHeight)
4259 const Context& getContext (void) const { return m_context.getContext(); }
4260 TestLog& getLog (void) const { return m_context.getLog(); }
4261 tcu::ResultCollector& getResultCollector (void) const { return m_context.getResultCollector(); }
4263 TextureLevel& getReferenceTarget (void) { return m_referenceTarget; }
4265 ReferenceMemory& getReference (void) { return m_context.getReference(); }
4266 TextureLevel& getReferenceImage (void) { return m_context.getReferenceImage();}
4269 VerifyContext& m_context;
4270 TextureLevel m_referenceTarget;
4273 class RenderPassCommand
4276 virtual ~RenderPassCommand (void) {}
4277 virtual const char* getName (void) const = 0;
4279 // Log things that are done during prepare
4280 virtual void logPrepare (TestLog&, size_t) const {}
4281 // Log submitted calls etc.
4282 virtual void logSubmit (TestLog&, size_t) const {}
4284 // Allocate vulkan resources and prepare for submit.
4285 virtual void prepare (PrepareRenderPassContext&) {}
4287 // Submit commands to command buffer.
4288 virtual void submit (SubmitContext&) {}
4291 virtual void verify (VerifyRenderPassContext&, size_t) {}
4294 class SubmitRenderPass : public CmdCommand
4297 SubmitRenderPass (const vector<RenderPassCommand*>& commands);
4298 ~SubmitRenderPass (void);
4299 const char* getName (void) const { return "SubmitRenderPass"; }
4301 void logPrepare (TestLog&, size_t) const;
4302 void logSubmit (TestLog&, size_t) const;
4304 void prepare (PrepareContext&);
4305 void submit (SubmitContext&);
4307 void verify (VerifyContext&, size_t);
4310 const deInt32 m_targetWidth;
4311 const deInt32 m_targetHeight;
4312 vk::Move<vk::VkRenderPass> m_renderPass;
4313 vk::Move<vk::VkDeviceMemory> m_colorTargetMemory;
4314 de::MovePtr<vk::Allocation> m_colorTargetMemory2;
4315 vk::Move<vk::VkImage> m_colorTarget;
4316 vk::Move<vk::VkImageView> m_colorTargetView;
4317 vk::Move<vk::VkFramebuffer> m_framebuffer;
4318 vector<RenderPassCommand*> m_commands;
4321 SubmitRenderPass::SubmitRenderPass (const vector<RenderPassCommand*>& commands)
4322 : m_targetWidth (256)
4323 , m_targetHeight (256)
4324 , m_commands (commands)
4328 SubmitRenderPass::~SubmitRenderPass()
4330 for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
4331 delete m_commands[cmdNdx];
4334 void SubmitRenderPass::logPrepare (TestLog& log, size_t commandIndex) const
4336 const string sectionName (de::toString(commandIndex) + ":" + getName());
4337 const tcu::ScopedLogSection section (log, sectionName, sectionName);
4339 for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
4341 RenderPassCommand& command = *m_commands[cmdNdx];
4342 command.logPrepare(log, cmdNdx);
4346 void SubmitRenderPass::logSubmit (TestLog& log, size_t commandIndex) const
4348 const string sectionName (de::toString(commandIndex) + ":" + getName());
4349 const tcu::ScopedLogSection section (log, sectionName, sectionName);
4351 for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
4353 RenderPassCommand& command = *m_commands[cmdNdx];
4354 command.logSubmit(log, cmdNdx);
4358 void SubmitRenderPass::prepare (PrepareContext& context)
4360 const vk::InstanceInterface& vki = context.getContext().getInstanceInterface();
4361 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
4362 const vk::VkPhysicalDevice physicalDevice = context.getContext().getPhysicalDevice();
4363 const vk::VkDevice device = context.getContext().getDevice();
4364 const vector<deUint32>& queueFamilies = context.getContext().getQueueFamilies();
4366 const vk::VkAttachmentReference colorAttachments[] =
4368 { 0, vk::VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL }
4370 const vk::VkSubpassDescription subpass =
4373 vk::VK_PIPELINE_BIND_POINT_GRAPHICS,
4378 DE_LENGTH_OF_ARRAY(colorAttachments),
4385 const vk::VkAttachmentDescription attachment =
4388 vk::VK_FORMAT_R8G8B8A8_UNORM,
4389 vk::VK_SAMPLE_COUNT_1_BIT,
4391 vk::VK_ATTACHMENT_LOAD_OP_CLEAR,
4392 vk::VK_ATTACHMENT_STORE_OP_STORE,
4394 vk::VK_ATTACHMENT_LOAD_OP_DONT_CARE,
4395 vk::VK_ATTACHMENT_STORE_OP_DONT_CARE,
4397 vk::VK_IMAGE_LAYOUT_UNDEFINED,
4398 vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL
4401 const vk::VkImageCreateInfo createInfo =
4403 vk::VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
4407 vk::VK_IMAGE_TYPE_2D,
4408 vk::VK_FORMAT_R8G8B8A8_UNORM,
4409 { (deUint32)m_targetWidth, (deUint32)m_targetHeight, 1u },
4412 vk::VK_SAMPLE_COUNT_1_BIT,
4413 vk::VK_IMAGE_TILING_OPTIMAL,
4414 vk::VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | vk::VK_IMAGE_USAGE_TRANSFER_SRC_BIT,
4415 vk::VK_SHARING_MODE_EXCLUSIVE,
4416 (deUint32)queueFamilies.size(),
4418 vk::VK_IMAGE_LAYOUT_UNDEFINED
4421 m_colorTarget = vk::createImage(vkd, device, &createInfo);
4424 m_colorTargetMemory = bindImageMemory(vki, vkd, physicalDevice, device, *m_colorTarget, 0);
4427 const vk::VkImageViewCreateInfo createInfo =
4429 vk::VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
4434 vk::VK_IMAGE_VIEW_TYPE_2D,
4435 vk::VK_FORMAT_R8G8B8A8_UNORM,
4437 vk::VK_COMPONENT_SWIZZLE_R,
4438 vk::VK_COMPONENT_SWIZZLE_G,
4439 vk::VK_COMPONENT_SWIZZLE_B,
4440 vk::VK_COMPONENT_SWIZZLE_A
4443 vk::VK_IMAGE_ASPECT_COLOR_BIT,
4451 m_colorTargetView = vk::createImageView(vkd, device, &createInfo);
4454 const vk::VkRenderPassCreateInfo createInfo =
4456 vk::VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO,
4470 m_renderPass = vk::createRenderPass(vkd, device, &createInfo);
4474 const vk::VkImageView imageViews[] =
4478 const vk::VkFramebufferCreateInfo createInfo =
4480 vk::VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO,
4485 DE_LENGTH_OF_ARRAY(imageViews),
4487 (deUint32)m_targetWidth,
4488 (deUint32)m_targetHeight,
4492 m_framebuffer = vk::createFramebuffer(vkd, device, &createInfo);
4496 PrepareRenderPassContext renderpassContext (context, *m_renderPass, *m_framebuffer, m_targetWidth, m_targetHeight);
4498 for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
4500 RenderPassCommand& command = *m_commands[cmdNdx];
4501 command.prepare(renderpassContext);
4506 void SubmitRenderPass::submit (SubmitContext& context)
4508 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
4509 const vk::VkCommandBuffer commandBuffer = context.getCommandBuffer();
4510 const vk::VkClearValue clearValue = vk::makeClearValueColorF32(0.0f, 0.0f, 0.0f, 1.0f);
4512 const vk::VkRenderPassBeginInfo beginInfo =
4514 vk::VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO,
4520 { { 0, 0 }, { (deUint32)m_targetWidth, (deUint32)m_targetHeight } },
4525 vkd.cmdBeginRenderPass(commandBuffer, &beginInfo, vk::VK_SUBPASS_CONTENTS_INLINE);
4527 for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
4529 RenderPassCommand& command = *m_commands[cmdNdx];
4531 command.submit(context);
4534 vkd.cmdEndRenderPass(commandBuffer);
4537 void SubmitRenderPass::verify (VerifyContext& context, size_t commandIndex)
4539 TestLog& log (context.getLog());
4540 tcu::ResultCollector& resultCollector (context.getResultCollector());
4541 const string sectionName (de::toString(commandIndex) + ":" + getName());
4542 const tcu::ScopedLogSection section (log, sectionName, sectionName);
4543 VerifyRenderPassContext verifyContext (context, m_targetWidth, m_targetHeight);
4545 tcu::clear(verifyContext.getReferenceTarget().getAccess(), Vec4(0.0f, 0.0f, 0.0f, 1.0f));
4547 for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
4549 RenderPassCommand& command = *m_commands[cmdNdx];
4550 command.verify(verifyContext, cmdNdx);
4554 const vk::InstanceInterface& vki = context.getContext().getInstanceInterface();
4555 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
4556 const vk::VkPhysicalDevice physicalDevice = context.getContext().getPhysicalDevice();
4557 const vk::VkDevice device = context.getContext().getDevice();
4558 const vk::VkQueue queue = context.getContext().getQueue();
4559 const vk::VkCommandPool commandPool = context.getContext().getCommandPool();
4560 const vk::Unique<vk::VkCommandBuffer> commandBuffer (createBeginCommandBuffer(vkd, device, commandPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY));
4561 const vector<deUint32>& queueFamilies = context.getContext().getQueueFamilies();
4562 const vk::Unique<vk::VkBuffer> dstBuffer (createBuffer(vkd, device, 4 * m_targetWidth * m_targetHeight, vk::VK_BUFFER_USAGE_TRANSFER_DST_BIT, vk::VK_SHARING_MODE_EXCLUSIVE, queueFamilies));
4563 const vk::Unique<vk::VkDeviceMemory> memory (bindBufferMemory(vki, vkd, physicalDevice, device, *dstBuffer, vk::VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT));
4565 const vk::VkImageMemoryBarrier imageBarrier =
4567 vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
4570 vk::VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
4571 vk::VK_ACCESS_TRANSFER_READ_BIT,
4573 vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
4574 vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
4576 VK_QUEUE_FAMILY_IGNORED,
4577 VK_QUEUE_FAMILY_IGNORED,
4581 vk::VK_IMAGE_ASPECT_COLOR_BIT,
4583 1, // Mip level count
4588 const vk::VkBufferMemoryBarrier bufferBarrier =
4590 vk::VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER,
4593 vk::VK_ACCESS_TRANSFER_WRITE_BIT,
4594 vk::VK_ACCESS_HOST_READ_BIT,
4596 VK_QUEUE_FAMILY_IGNORED,
4597 VK_QUEUE_FAMILY_IGNORED,
4602 const vk::VkBufferImageCopy region =
4607 vk::VK_IMAGE_ASPECT_COLOR_BIT,
4614 (deUint32)m_targetWidth,
4615 (deUint32)m_targetHeight,
4620 vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 0, (const vk::VkBufferMemoryBarrier*)DE_NULL, 1, &imageBarrier);
4621 vkd.cmdCopyImageToBuffer(*commandBuffer, *m_colorTarget, vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, *dstBuffer, 1, ®ion);
4622 vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_PIPELINE_STAGE_HOST_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 1, &bufferBarrier, 0, (const vk::VkImageMemoryBarrier*)DE_NULL);
4625 VK_CHECK(vkd.endCommandBuffer(*commandBuffer));
4626 queueRun(vkd, queue, *commandBuffer);
4629 void* const ptr = mapMemory(vkd, device, *memory, 4 * m_targetWidth * m_targetHeight);
4631 vk::invalidateMappedMemoryRange(vkd, device, *memory, 0, 4 * m_targetWidth * m_targetHeight);
4634 const deUint8* const data = (const deUint8*)ptr;
4635 const ConstPixelBufferAccess resAccess (TextureFormat(TextureFormat::RGBA, TextureFormat::UNORM_INT8), m_targetWidth, m_targetHeight, 1, data);
4636 const ConstPixelBufferAccess& refAccess (verifyContext.getReferenceTarget().getAccess());
4638 if (!tcu::intThresholdCompare(context.getLog(), (de::toString(commandIndex) + ":" + getName()).c_str(), (de::toString(commandIndex) + ":" + getName()).c_str(), refAccess, resAccess, UVec4(0), tcu::COMPARE_LOG_ON_ERROR))
4639 resultCollector.fail(de::toString(commandIndex) + ":" + getName() + " Image comparison failed");
4642 vkd.unmapMemory(device, *memory);
4647 struct PipelineResources
4649 vk::Move<vk::VkPipeline> pipeline;
4650 vk::Move<vk::VkDescriptorSetLayout> descriptorSetLayout;
4651 vk::Move<vk::VkPipelineLayout> pipelineLayout;
4654 void createPipelineWithResources (const vk::DeviceInterface& vkd,
4655 const vk::VkDevice device,
4656 const vk::VkRenderPass renderPass,
4657 const deUint32 subpass,
4658 const vk::VkShaderModule& vertexShaderModule,
4659 const vk::VkShaderModule& fragmentShaderModule,
4660 const deUint32 viewPortWidth,
4661 const deUint32 viewPortHeight,
4662 const vector<vk::VkVertexInputBindingDescription>& vertexBindingDescriptions,
4663 const vector<vk::VkVertexInputAttributeDescription>& vertexAttributeDescriptions,
4664 const vector<vk::VkDescriptorSetLayoutBinding>& bindings,
4665 const vk::VkPrimitiveTopology topology,
4666 deUint32 pushConstantRangeCount,
4667 const vk::VkPushConstantRange* pushConstantRanges,
4668 PipelineResources& resources)
4670 if (!bindings.empty())
4672 const vk::VkDescriptorSetLayoutCreateInfo createInfo =
4674 vk::VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO,
4678 (deUint32)bindings.size(),
4679 bindings.empty() ? DE_NULL : &bindings[0]
4682 resources.descriptorSetLayout = vk::createDescriptorSetLayout(vkd, device, &createInfo);
4686 const vk::VkDescriptorSetLayout descriptorSetLayout_ = *resources.descriptorSetLayout;
4687 const vk::VkPipelineLayoutCreateInfo createInfo =
4689 vk::VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO,
4693 resources.descriptorSetLayout ? 1u : 0u,
4694 resources.descriptorSetLayout ? &descriptorSetLayout_ : DE_NULL,
4696 pushConstantRangeCount,
4700 resources.pipelineLayout = vk::createPipelineLayout(vkd, device, &createInfo);
4704 const vk::VkPipelineShaderStageCreateInfo shaderStages[] =
4707 vk::VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
4710 vk::VK_SHADER_STAGE_VERTEX_BIT,
4716 vk::VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
4719 vk::VK_SHADER_STAGE_FRAGMENT_BIT,
4720 fragmentShaderModule,
4725 const vk::VkPipelineDepthStencilStateCreateInfo depthStencilState =
4727 vk::VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO,
4732 vk::VK_COMPARE_OP_ALWAYS,
4736 vk::VK_STENCIL_OP_KEEP,
4737 vk::VK_STENCIL_OP_KEEP,
4738 vk::VK_STENCIL_OP_KEEP,
4739 vk::VK_COMPARE_OP_ALWAYS,
4745 vk::VK_STENCIL_OP_KEEP,
4746 vk::VK_STENCIL_OP_KEEP,
4747 vk::VK_STENCIL_OP_KEEP,
4748 vk::VK_COMPARE_OP_ALWAYS,
4756 const vk::VkPipelineVertexInputStateCreateInfo vertexInputState =
4758 vk::VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO,
4762 (deUint32)vertexBindingDescriptions.size(),
4763 vertexBindingDescriptions.empty() ? DE_NULL : &vertexBindingDescriptions[0],
4765 (deUint32)vertexAttributeDescriptions.size(),
4766 vertexAttributeDescriptions.empty() ? DE_NULL : &vertexAttributeDescriptions[0]
4768 const vk::VkPipelineInputAssemblyStateCreateInfo inputAssemblyState =
4770 vk::VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO,
4776 const vk::VkViewport viewports[] =
4778 { 0.0f, 0.0f, (float)viewPortWidth, (float)viewPortHeight, 0.0f, 1.0f }
4780 const vk::VkRect2D scissors[] =
4782 { { 0, 0 }, { (deUint32)viewPortWidth, (deUint32)viewPortHeight } }
4784 const vk::VkPipelineViewportStateCreateInfo viewportState =
4786 vk::VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO,
4789 DE_LENGTH_OF_ARRAY(viewports),
4791 DE_LENGTH_OF_ARRAY(scissors),
4794 const vk::VkPipelineRasterizationStateCreateInfo rasterState =
4796 vk::VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO,
4802 vk::VK_POLYGON_MODE_FILL,
4803 vk::VK_CULL_MODE_NONE,
4804 vk::VK_FRONT_FACE_COUNTER_CLOCKWISE,
4811 const vk::VkSampleMask sampleMask = ~0u;
4812 const vk::VkPipelineMultisampleStateCreateInfo multisampleState =
4814 vk::VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO,
4818 vk::VK_SAMPLE_COUNT_1_BIT,
4825 const vk::VkPipelineColorBlendAttachmentState attachments[] =
4829 vk::VK_BLEND_FACTOR_ONE,
4830 vk::VK_BLEND_FACTOR_ZERO,
4831 vk::VK_BLEND_OP_ADD,
4832 vk::VK_BLEND_FACTOR_ONE,
4833 vk::VK_BLEND_FACTOR_ZERO,
4834 vk::VK_BLEND_OP_ADD,
4835 (vk::VK_COLOR_COMPONENT_R_BIT|
4836 vk::VK_COLOR_COMPONENT_G_BIT|
4837 vk::VK_COLOR_COMPONENT_B_BIT|
4838 vk::VK_COLOR_COMPONENT_A_BIT)
4841 const vk::VkPipelineColorBlendStateCreateInfo colorBlendState =
4843 vk::VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO,
4848 vk::VK_LOGIC_OP_COPY,
4849 DE_LENGTH_OF_ARRAY(attachments),
4851 { 0.0f, 0.0f, 0.0f, 0.0f }
4853 const vk::VkGraphicsPipelineCreateInfo createInfo =
4855 vk::VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO,
4859 DE_LENGTH_OF_ARRAY(shaderStages),
4863 &inputAssemblyState,
4871 *resources.pipelineLayout,
4878 resources.pipeline = vk::createGraphicsPipeline(vkd, device, 0, &createInfo);
4882 class RenderIndexBuffer : public RenderPassCommand
4885 RenderIndexBuffer (void) {}
4886 ~RenderIndexBuffer (void) {}
4888 const char* getName (void) const { return "RenderIndexBuffer"; }
4889 void logPrepare (TestLog&, size_t) const;
4890 void logSubmit (TestLog&, size_t) const;
4891 void prepare (PrepareRenderPassContext&);
4892 void submit (SubmitContext& context);
4893 void verify (VerifyRenderPassContext&, size_t);
4896 PipelineResources m_resources;
4897 vk::VkDeviceSize m_bufferSize;
4900 void RenderIndexBuffer::logPrepare (TestLog& log, size_t commandIndex) const
4902 log << TestLog::Message << commandIndex << ":" << getName() << " Create pipeline for render buffer as index buffer." << TestLog::EndMessage;
4905 void RenderIndexBuffer::logSubmit (TestLog& log, size_t commandIndex) const
4907 log << TestLog::Message << commandIndex << ":" << getName() << " Render using buffer as index buffer." << TestLog::EndMessage;
4910 void RenderIndexBuffer::prepare (PrepareRenderPassContext& context)
4912 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
4913 const vk::VkDevice device = context.getContext().getDevice();
4914 const vk::VkRenderPass renderPass = context.getRenderPass();
4915 const deUint32 subpass = 0;
4916 const vk::Unique<vk::VkShaderModule> vertexShaderModule (vk::createShaderModule(vkd, device, context.getBinaryCollection().get("index-buffer.vert"), 0));
4917 const vk::Unique<vk::VkShaderModule> fragmentShaderModule (vk::createShaderModule(vkd, device, context.getBinaryCollection().get("render-white.frag"), 0));
4919 createPipelineWithResources(vkd, device, renderPass, subpass, *vertexShaderModule, *fragmentShaderModule, context.getTargetWidth(), context.getTargetHeight(),
4920 vector<vk::VkVertexInputBindingDescription>(), vector<vk::VkVertexInputAttributeDescription>(), vector<vk::VkDescriptorSetLayoutBinding>(), vk::VK_PRIMITIVE_TOPOLOGY_POINT_LIST, 0u, DE_NULL, m_resources);
4921 m_bufferSize = context.getBufferSize();
4924 void RenderIndexBuffer::submit (SubmitContext& context)
4926 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
4927 const vk::VkCommandBuffer commandBuffer = context.getCommandBuffer();
4929 vkd.cmdBindPipeline(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipeline);
4930 vkd.cmdBindIndexBuffer(commandBuffer, context.getBuffer(), 0, vk::VK_INDEX_TYPE_UINT16);
4931 vkd.cmdDrawIndexed(commandBuffer, (deUint32)(context.getBufferSize() / 2), 1, 0, 0, 0);
4934 void RenderIndexBuffer::verify (VerifyRenderPassContext& context, size_t)
4936 for (size_t pos = 0; pos < (size_t)m_bufferSize / 2; pos++)
4938 const deUint8 x = context.getReference().get(pos * 2);
4939 const deUint8 y = context.getReference().get((pos * 2) + 1);
4941 context.getReferenceTarget().getAccess().setPixel(Vec4(1.0f, 1.0f, 1.0f, 1.0f), x, y);
4945 class RenderVertexBuffer : public RenderPassCommand
4948 RenderVertexBuffer (void) {}
4949 ~RenderVertexBuffer (void) {}
4951 const char* getName (void) const { return "RenderVertexBuffer"; }
4952 void logPrepare (TestLog&, size_t) const;
4953 void logSubmit (TestLog&, size_t) const;
4954 void prepare (PrepareRenderPassContext&);
4955 void submit (SubmitContext& context);
4956 void verify (VerifyRenderPassContext&, size_t);
4959 PipelineResources m_resources;
4960 vk::VkDeviceSize m_bufferSize;
4963 void RenderVertexBuffer::logPrepare (TestLog& log, size_t commandIndex) const
4965 log << TestLog::Message << commandIndex << ":" << getName() << " Create pipeline for render buffer as vertex buffer." << TestLog::EndMessage;
4968 void RenderVertexBuffer::logSubmit (TestLog& log, size_t commandIndex) const
4970 log << TestLog::Message << commandIndex << ":" << getName() << " Render using buffer as vertex buffer." << TestLog::EndMessage;
4973 void RenderVertexBuffer::prepare (PrepareRenderPassContext& context)
4975 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
4976 const vk::VkDevice device = context.getContext().getDevice();
4977 const vk::VkRenderPass renderPass = context.getRenderPass();
4978 const deUint32 subpass = 0;
4979 const vk::Unique<vk::VkShaderModule> vertexShaderModule (vk::createShaderModule(vkd, device, context.getBinaryCollection().get("vertex-buffer.vert"), 0));
4980 const vk::Unique<vk::VkShaderModule> fragmentShaderModule (vk::createShaderModule(vkd, device, context.getBinaryCollection().get("render-white.frag"), 0));
4982 vector<vk::VkVertexInputAttributeDescription> vertexAttributeDescriptions;
4983 vector<vk::VkVertexInputBindingDescription> vertexBindingDescriptions;
4986 const vk::VkVertexInputBindingDescription vertexBindingDescription =
4990 vk::VK_VERTEX_INPUT_RATE_VERTEX
4993 vertexBindingDescriptions.push_back(vertexBindingDescription);
4996 const vk::VkVertexInputAttributeDescription vertexAttributeDescription =
5000 vk::VK_FORMAT_R8G8_UNORM,
5004 vertexAttributeDescriptions.push_back(vertexAttributeDescription);
5006 createPipelineWithResources(vkd, device, renderPass, subpass, *vertexShaderModule, *fragmentShaderModule, context.getTargetWidth(), context.getTargetHeight(),
5007 vertexBindingDescriptions, vertexAttributeDescriptions, vector<vk::VkDescriptorSetLayoutBinding>(), vk::VK_PRIMITIVE_TOPOLOGY_POINT_LIST, 0u, DE_NULL, m_resources);
5009 m_bufferSize = context.getBufferSize();
5012 void RenderVertexBuffer::submit (SubmitContext& context)
5014 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
5015 const vk::VkCommandBuffer commandBuffer = context.getCommandBuffer();
5016 const vk::VkDeviceSize offset = 0;
5017 const vk::VkBuffer buffer = context.getBuffer();
5019 vkd.cmdBindPipeline(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipeline);
5020 vkd.cmdBindVertexBuffers(commandBuffer, 0, 1, &buffer, &offset);
5021 vkd.cmdDraw(commandBuffer, (deUint32)(context.getBufferSize() / 2), 1, 0, 0);
5024 void RenderVertexBuffer::verify (VerifyRenderPassContext& context, size_t)
5026 for (size_t pos = 0; pos < (size_t)m_bufferSize / 2; pos++)
5028 const deUint8 x = context.getReference().get(pos * 2);
5029 const deUint8 y = context.getReference().get((pos * 2) + 1);
5031 context.getReferenceTarget().getAccess().setPixel(Vec4(1.0f, 1.0f, 1.0f, 1.0f), x, y);
5035 class RenderVertexUniformBuffer : public RenderPassCommand
5038 RenderVertexUniformBuffer (void) {}
5039 ~RenderVertexUniformBuffer (void);
5041 const char* getName (void) const { return "RenderVertexUniformBuffer"; }
5042 void logPrepare (TestLog&, size_t) const;
5043 void logSubmit (TestLog&, size_t) const;
5044 void prepare (PrepareRenderPassContext&);
5045 void submit (SubmitContext& context);
5046 void verify (VerifyRenderPassContext&, size_t);
5049 PipelineResources m_resources;
5050 vk::Move<vk::VkDescriptorPool> m_descriptorPool;
5051 vector<vk::VkDescriptorSet> m_descriptorSets;
5053 vk::VkDeviceSize m_bufferSize;
5056 RenderVertexUniformBuffer::~RenderVertexUniformBuffer (void)
5060 void RenderVertexUniformBuffer::logPrepare (TestLog& log, size_t commandIndex) const
5062 log << TestLog::Message << commandIndex << ":" << getName() << " Create pipeline for render buffer as uniform buffer." << TestLog::EndMessage;
5065 void RenderVertexUniformBuffer::logSubmit (TestLog& log, size_t commandIndex) const
5067 log << TestLog::Message << commandIndex << ":" << getName() << " Render using buffer as uniform buffer." << TestLog::EndMessage;
5070 void RenderVertexUniformBuffer::prepare (PrepareRenderPassContext& context)
5072 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
5073 const vk::VkDevice device = context.getContext().getDevice();
5074 const vk::VkRenderPass renderPass = context.getRenderPass();
5075 const deUint32 subpass = 0;
5076 const vk::Unique<vk::VkShaderModule> vertexShaderModule (vk::createShaderModule(vkd, device, context.getBinaryCollection().get("uniform-buffer.vert"), 0));
5077 const vk::Unique<vk::VkShaderModule> fragmentShaderModule (vk::createShaderModule(vkd, device, context.getBinaryCollection().get("render-white.frag"), 0));
5078 vector<vk::VkDescriptorSetLayoutBinding> bindings;
5080 m_bufferSize = context.getBufferSize();
5083 const vk::VkDescriptorSetLayoutBinding binding =
5086 vk::VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,
5088 vk::VK_SHADER_STAGE_VERTEX_BIT,
5092 bindings.push_back(binding);
5095 createPipelineWithResources(vkd, device, renderPass, subpass, *vertexShaderModule, *fragmentShaderModule, context.getTargetWidth(), context.getTargetHeight(),
5096 vector<vk::VkVertexInputBindingDescription>(), vector<vk::VkVertexInputAttributeDescription>(), bindings, vk::VK_PRIMITIVE_TOPOLOGY_POINT_LIST, 0u, DE_NULL, m_resources);
5099 const deUint32 descriptorCount = (deUint32)(divRoundUp(m_bufferSize, (vk::VkDeviceSize)MAX_UNIFORM_BUFFER_SIZE));
5100 const vk::VkDescriptorPoolSize poolSizes =
5102 vk::VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,
5105 const vk::VkDescriptorPoolCreateInfo createInfo =
5107 vk::VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
5109 vk::VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT,
5116 m_descriptorPool = vk::createDescriptorPool(vkd, device, &createInfo);
5117 m_descriptorSets.resize(descriptorCount);
5120 for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
5122 const vk::VkDescriptorSetLayout layout = *m_resources.descriptorSetLayout;
5123 const vk::VkDescriptorSetAllocateInfo allocateInfo =
5125 vk::VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
5133 m_descriptorSets[descriptorSetNdx] = vk::allocateDescriptorSet(vkd, device, &allocateInfo).disown();
5136 const vk::VkDescriptorBufferInfo bufferInfo =
5138 context.getBuffer(),
5139 (vk::VkDeviceSize)(descriptorSetNdx * (size_t)MAX_UNIFORM_BUFFER_SIZE),
5140 m_bufferSize < (descriptorSetNdx + 1) * (vk::VkDeviceSize)MAX_UNIFORM_BUFFER_SIZE
5141 ? m_bufferSize - descriptorSetNdx * (vk::VkDeviceSize)MAX_UNIFORM_BUFFER_SIZE
5142 : (vk::VkDeviceSize)MAX_UNIFORM_BUFFER_SIZE
5144 const vk::VkWriteDescriptorSet write =
5146 vk::VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
5148 m_descriptorSets[descriptorSetNdx],
5152 vk::VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,
5158 vkd.updateDescriptorSets(device, 1u, &write, 0u, DE_NULL);
5163 void RenderVertexUniformBuffer::submit (SubmitContext& context)
5165 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
5166 const vk::VkCommandBuffer commandBuffer = context.getCommandBuffer();
5168 vkd.cmdBindPipeline(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipeline);
5170 for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
5172 const size_t size = (size_t)(m_bufferSize < (descriptorSetNdx + 1) * (vk::VkDeviceSize)MAX_UNIFORM_BUFFER_SIZE
5173 ? m_bufferSize - descriptorSetNdx * (vk::VkDeviceSize)MAX_UNIFORM_BUFFER_SIZE
5174 : (size_t)MAX_UNIFORM_BUFFER_SIZE);
5175 const deUint32 count = (deUint32)(size / 2);
5177 vkd.cmdBindDescriptorSets(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipelineLayout, 0u, 1u, &m_descriptorSets[descriptorSetNdx], 0u, DE_NULL);
5178 vkd.cmdDraw(commandBuffer, count, 1, 0, 0);
5182 void RenderVertexUniformBuffer::verify (VerifyRenderPassContext& context, size_t)
5184 for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
5186 const size_t offset = descriptorSetNdx * MAX_UNIFORM_BUFFER_SIZE;
5187 const size_t size = (size_t)(m_bufferSize < (descriptorSetNdx + 1) * (vk::VkDeviceSize)MAX_UNIFORM_BUFFER_SIZE
5188 ? m_bufferSize - descriptorSetNdx * (vk::VkDeviceSize)MAX_UNIFORM_BUFFER_SIZE
5189 : (size_t)MAX_UNIFORM_BUFFER_SIZE);
5190 const size_t count = size / 2;
5192 for (size_t pos = 0; pos < count; pos++)
5194 const deUint8 x = context.getReference().get(offset + pos * 2);
5195 const deUint8 y = context.getReference().get(offset + (pos * 2) + 1);
5197 context.getReferenceTarget().getAccess().setPixel(Vec4(1.0f, 1.0f, 1.0f, 1.0f), x, y);
5202 class RenderVertexUniformTexelBuffer : public RenderPassCommand
5205 RenderVertexUniformTexelBuffer (void) {}
5206 ~RenderVertexUniformTexelBuffer (void);
5208 const char* getName (void) const { return "RenderVertexUniformTexelBuffer"; }
5209 void logPrepare (TestLog&, size_t) const;
5210 void logSubmit (TestLog&, size_t) const;
5211 void prepare (PrepareRenderPassContext&);
5212 void submit (SubmitContext& context);
5213 void verify (VerifyRenderPassContext&, size_t);
5216 PipelineResources m_resources;
5217 vk::Move<vk::VkDescriptorPool> m_descriptorPool;
5218 vector<vk::VkDescriptorSet> m_descriptorSets;
5219 vector<vk::VkBufferView> m_bufferViews;
5221 const vk::DeviceInterface* m_vkd;
5222 vk::VkDevice m_device;
5223 vk::VkDeviceSize m_bufferSize;
5224 deUint32 m_maxUniformTexelCount;
5227 RenderVertexUniformTexelBuffer::~RenderVertexUniformTexelBuffer (void)
5229 for (size_t bufferViewNdx = 0; bufferViewNdx < m_bufferViews.size(); bufferViewNdx++)
5231 if (!!m_bufferViews[bufferViewNdx])
5233 m_vkd->destroyBufferView(m_device, m_bufferViews[bufferViewNdx], DE_NULL);
5234 m_bufferViews[bufferViewNdx] = (vk::VkBufferView)0;
5239 void RenderVertexUniformTexelBuffer::logPrepare (TestLog& log, size_t commandIndex) const
5241 log << TestLog::Message << commandIndex << ":" << getName() << " Create pipeline for render buffer as uniform buffer." << TestLog::EndMessage;
5244 void RenderVertexUniformTexelBuffer::logSubmit (TestLog& log, size_t commandIndex) const
5246 log << TestLog::Message << commandIndex << ":" << getName() << " Render using buffer as uniform buffer." << TestLog::EndMessage;
5249 void RenderVertexUniformTexelBuffer::prepare (PrepareRenderPassContext& context)
5251 const vk::InstanceInterface& vki = context.getContext().getInstanceInterface();
5252 const vk::VkPhysicalDevice physicalDevice = context.getContext().getPhysicalDevice();
5253 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
5254 const vk::VkDevice device = context.getContext().getDevice();
5255 const vk::VkRenderPass renderPass = context.getRenderPass();
5256 const deUint32 subpass = 0;
5257 const vk::Unique<vk::VkShaderModule> vertexShaderModule (vk::createShaderModule(vkd, device, context.getBinaryCollection().get("uniform-texel-buffer.vert"), 0));
5258 const vk::Unique<vk::VkShaderModule> fragmentShaderModule (vk::createShaderModule(vkd, device, context.getBinaryCollection().get("render-white.frag"), 0));
5259 vector<vk::VkDescriptorSetLayoutBinding> bindings;
5263 m_bufferSize = context.getBufferSize();
5264 m_maxUniformTexelCount = vk::getPhysicalDeviceProperties(vki, physicalDevice).limits.maxTexelBufferElements;
5267 const vk::VkDescriptorSetLayoutBinding binding =
5270 vk::VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER,
5272 vk::VK_SHADER_STAGE_VERTEX_BIT,
5276 bindings.push_back(binding);
5279 createPipelineWithResources(vkd, device, renderPass, subpass, *vertexShaderModule, *fragmentShaderModule, context.getTargetWidth(), context.getTargetHeight(),
5280 vector<vk::VkVertexInputBindingDescription>(), vector<vk::VkVertexInputAttributeDescription>(), bindings, vk::VK_PRIMITIVE_TOPOLOGY_POINT_LIST, 0u, DE_NULL, m_resources);
5283 const deUint32 descriptorCount = (deUint32)(divRoundUp(m_bufferSize, (vk::VkDeviceSize)m_maxUniformTexelCount * 2));
5284 const vk::VkDescriptorPoolSize poolSizes =
5286 vk::VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER,
5289 const vk::VkDescriptorPoolCreateInfo createInfo =
5291 vk::VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
5293 vk::VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT,
5300 m_descriptorPool = vk::createDescriptorPool(vkd, device, &createInfo);
5301 m_descriptorSets.resize(descriptorCount, (vk::VkDescriptorSet)0);
5302 m_bufferViews.resize(descriptorCount, (vk::VkBufferView)0);
5305 for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
5307 const deUint32 count = (deUint32)(m_bufferSize < (descriptorSetNdx + 1) * m_maxUniformTexelCount * 2
5308 ? m_bufferSize - descriptorSetNdx * m_maxUniformTexelCount * 2
5309 : m_maxUniformTexelCount * 2) / 2;
5310 const vk::VkDescriptorSetLayout layout = *m_resources.descriptorSetLayout;
5311 const vk::VkDescriptorSetAllocateInfo allocateInfo =
5313 vk::VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
5321 m_descriptorSets[descriptorSetNdx] = vk::allocateDescriptorSet(vkd, device, &allocateInfo).disown();
5324 const vk::VkBufferViewCreateInfo createInfo =
5326 vk::VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO,
5330 context.getBuffer(),
5331 vk::VK_FORMAT_R16_UINT,
5332 descriptorSetNdx * m_maxUniformTexelCount * 2,
5336 VK_CHECK(vkd.createBufferView(device, &createInfo, DE_NULL, &m_bufferViews[descriptorSetNdx]));
5340 const vk::VkWriteDescriptorSet write =
5342 vk::VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
5344 m_descriptorSets[descriptorSetNdx],
5348 vk::VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER,
5351 &m_bufferViews[descriptorSetNdx]
5354 vkd.updateDescriptorSets(device, 1u, &write, 0u, DE_NULL);
5359 void RenderVertexUniformTexelBuffer::submit (SubmitContext& context)
5361 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
5362 const vk::VkCommandBuffer commandBuffer = context.getCommandBuffer();
5364 vkd.cmdBindPipeline(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipeline);
5366 for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
5368 const deUint32 count = (deUint32)(m_bufferSize < (descriptorSetNdx + 1) * m_maxUniformTexelCount * 2
5369 ? m_bufferSize - descriptorSetNdx * m_maxUniformTexelCount * 2
5370 : m_maxUniformTexelCount * 2) / 2;
5372 vkd.cmdBindDescriptorSets(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipelineLayout, 0u, 1u, &m_descriptorSets[descriptorSetNdx], 0u, DE_NULL);
5373 vkd.cmdDraw(commandBuffer, count, 1, 0, 0);
5377 void RenderVertexUniformTexelBuffer::verify (VerifyRenderPassContext& context, size_t)
5379 for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
5381 const size_t offset = descriptorSetNdx * m_maxUniformTexelCount * 2;
5382 const deUint32 count = (deUint32)(m_bufferSize < (descriptorSetNdx + 1) * m_maxUniformTexelCount * 2
5383 ? m_bufferSize - descriptorSetNdx * m_maxUniformTexelCount * 2
5384 : m_maxUniformTexelCount * 2) / 2;
5386 for (size_t pos = 0; pos < (size_t)count; pos++)
5388 const deUint8 x = context.getReference().get(offset + pos * 2);
5389 const deUint8 y = context.getReference().get(offset + (pos * 2) + 1);
5391 context.getReferenceTarget().getAccess().setPixel(Vec4(1.0f, 1.0f, 1.0f, 1.0f), x, y);
5396 class RenderVertexStorageBuffer : public RenderPassCommand
5399 RenderVertexStorageBuffer (void) {}
5400 ~RenderVertexStorageBuffer (void);
5402 const char* getName (void) const { return "RenderVertexStorageBuffer"; }
5403 void logPrepare (TestLog&, size_t) const;
5404 void logSubmit (TestLog&, size_t) const;
5405 void prepare (PrepareRenderPassContext&);
5406 void submit (SubmitContext& context);
5407 void verify (VerifyRenderPassContext&, size_t);
5410 PipelineResources m_resources;
5411 vk::Move<vk::VkDescriptorPool> m_descriptorPool;
5412 vector<vk::VkDescriptorSet> m_descriptorSets;
5414 vk::VkDeviceSize m_bufferSize;
5417 RenderVertexStorageBuffer::~RenderVertexStorageBuffer (void)
5421 void RenderVertexStorageBuffer::logPrepare (TestLog& log, size_t commandIndex) const
5423 log << TestLog::Message << commandIndex << ":" << getName() << " Create pipeline for render buffer as storage buffer." << TestLog::EndMessage;
5426 void RenderVertexStorageBuffer::logSubmit (TestLog& log, size_t commandIndex) const
5428 log << TestLog::Message << commandIndex << ":" << getName() << " Render using buffer as storage buffer." << TestLog::EndMessage;
5431 void RenderVertexStorageBuffer::prepare (PrepareRenderPassContext& context)
5433 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
5434 const vk::VkDevice device = context.getContext().getDevice();
5435 const vk::VkRenderPass renderPass = context.getRenderPass();
5436 const deUint32 subpass = 0;
5437 const vk::Unique<vk::VkShaderModule> vertexShaderModule (vk::createShaderModule(vkd, device, context.getBinaryCollection().get("storage-buffer.vert"), 0));
5438 const vk::Unique<vk::VkShaderModule> fragmentShaderModule (vk::createShaderModule(vkd, device, context.getBinaryCollection().get("render-white.frag"), 0));
5439 vector<vk::VkDescriptorSetLayoutBinding> bindings;
5441 m_bufferSize = context.getBufferSize();
5444 const vk::VkDescriptorSetLayoutBinding binding =
5447 vk::VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
5449 vk::VK_SHADER_STAGE_VERTEX_BIT,
5453 bindings.push_back(binding);
5456 createPipelineWithResources(vkd, device, renderPass, subpass, *vertexShaderModule, *fragmentShaderModule, context.getTargetWidth(), context.getTargetHeight(),
5457 vector<vk::VkVertexInputBindingDescription>(), vector<vk::VkVertexInputAttributeDescription>(), bindings, vk::VK_PRIMITIVE_TOPOLOGY_POINT_LIST, 0u, DE_NULL, m_resources);
5460 const deUint32 descriptorCount = (deUint32)(divRoundUp(m_bufferSize, (vk::VkDeviceSize)MAX_STORAGE_BUFFER_SIZE));
5461 const vk::VkDescriptorPoolSize poolSizes =
5463 vk::VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
5466 const vk::VkDescriptorPoolCreateInfo createInfo =
5468 vk::VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
5470 vk::VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT,
5477 m_descriptorPool = vk::createDescriptorPool(vkd, device, &createInfo);
5478 m_descriptorSets.resize(descriptorCount);
5481 for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
5483 const vk::VkDescriptorSetLayout layout = *m_resources.descriptorSetLayout;
5484 const vk::VkDescriptorSetAllocateInfo allocateInfo =
5486 vk::VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
5494 m_descriptorSets[descriptorSetNdx] = vk::allocateDescriptorSet(vkd, device, &allocateInfo).disown();
5497 const vk::VkDescriptorBufferInfo bufferInfo =
5499 context.getBuffer(),
5500 descriptorSetNdx * MAX_STORAGE_BUFFER_SIZE,
5501 de::min(m_bufferSize - descriptorSetNdx * MAX_STORAGE_BUFFER_SIZE, (vk::VkDeviceSize)MAX_STORAGE_BUFFER_SIZE)
5503 const vk::VkWriteDescriptorSet write =
5505 vk::VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
5507 m_descriptorSets[descriptorSetNdx],
5511 vk::VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
5517 vkd.updateDescriptorSets(device, 1u, &write, 0u, DE_NULL);
5522 void RenderVertexStorageBuffer::submit (SubmitContext& context)
5524 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
5525 const vk::VkCommandBuffer commandBuffer = context.getCommandBuffer();
5527 vkd.cmdBindPipeline(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipeline);
5529 for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
5531 const size_t size = m_bufferSize < (descriptorSetNdx + 1) * MAX_STORAGE_BUFFER_SIZE
5532 ? (size_t)(m_bufferSize - descriptorSetNdx * MAX_STORAGE_BUFFER_SIZE)
5533 : (size_t)(MAX_STORAGE_BUFFER_SIZE);
5535 vkd.cmdBindDescriptorSets(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipelineLayout, 0u, 1u, &m_descriptorSets[descriptorSetNdx], 0u, DE_NULL);
5536 vkd.cmdDraw(commandBuffer, (deUint32)(size / 2), 1, 0, 0);
5540 void RenderVertexStorageBuffer::verify (VerifyRenderPassContext& context, size_t)
5542 for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
5544 const size_t offset = descriptorSetNdx * MAX_STORAGE_BUFFER_SIZE;
5545 const size_t size = m_bufferSize < (descriptorSetNdx + 1) * MAX_STORAGE_BUFFER_SIZE
5546 ? (size_t)(m_bufferSize - descriptorSetNdx * MAX_STORAGE_BUFFER_SIZE)
5547 : (size_t)(MAX_STORAGE_BUFFER_SIZE);
5549 for (size_t pos = 0; pos < size / 2; pos++)
5551 const deUint8 x = context.getReference().get(offset + pos * 2);
5552 const deUint8 y = context.getReference().get(offset + (pos * 2) + 1);
5554 context.getReferenceTarget().getAccess().setPixel(Vec4(1.0f, 1.0f, 1.0f, 1.0f), x, y);
5559 class RenderVertexStorageTexelBuffer : public RenderPassCommand
5562 RenderVertexStorageTexelBuffer (void) {}
5563 ~RenderVertexStorageTexelBuffer (void);
5565 const char* getName (void) const { return "RenderVertexStorageTexelBuffer"; }
5566 void logPrepare (TestLog&, size_t) const;
5567 void logSubmit (TestLog&, size_t) const;
5568 void prepare (PrepareRenderPassContext&);
5569 void submit (SubmitContext& context);
5570 void verify (VerifyRenderPassContext&, size_t);
5573 PipelineResources m_resources;
5574 vk::Move<vk::VkDescriptorPool> m_descriptorPool;
5575 vector<vk::VkDescriptorSet> m_descriptorSets;
5576 vector<vk::VkBufferView> m_bufferViews;
5578 const vk::DeviceInterface* m_vkd;
5579 vk::VkDevice m_device;
5580 vk::VkDeviceSize m_bufferSize;
5581 deUint32 m_maxStorageTexelCount;
5584 RenderVertexStorageTexelBuffer::~RenderVertexStorageTexelBuffer (void)
5586 for (size_t bufferViewNdx = 0; bufferViewNdx < m_bufferViews.size(); bufferViewNdx++)
5588 if (!!m_bufferViews[bufferViewNdx])
5590 m_vkd->destroyBufferView(m_device, m_bufferViews[bufferViewNdx], DE_NULL);
5591 m_bufferViews[bufferViewNdx] = (vk::VkBufferView)0;
5596 void RenderVertexStorageTexelBuffer::logPrepare (TestLog& log, size_t commandIndex) const
5598 log << TestLog::Message << commandIndex << ":" << getName() << " Create pipeline for render buffer as storage buffer." << TestLog::EndMessage;
5601 void RenderVertexStorageTexelBuffer::logSubmit (TestLog& log, size_t commandIndex) const
5603 log << TestLog::Message << commandIndex << ":" << getName() << " Render using buffer as storage buffer." << TestLog::EndMessage;
5606 void RenderVertexStorageTexelBuffer::prepare (PrepareRenderPassContext& context)
5608 const vk::InstanceInterface& vki = context.getContext().getInstanceInterface();
5609 const vk::VkPhysicalDevice physicalDevice = context.getContext().getPhysicalDevice();
5610 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
5611 const vk::VkDevice device = context.getContext().getDevice();
5612 const vk::VkRenderPass renderPass = context.getRenderPass();
5613 const deUint32 subpass = 0;
5614 const vk::Unique<vk::VkShaderModule> vertexShaderModule (vk::createShaderModule(vkd, device, context.getBinaryCollection().get("storage-texel-buffer.vert"), 0));
5615 const vk::Unique<vk::VkShaderModule> fragmentShaderModule (vk::createShaderModule(vkd, device, context.getBinaryCollection().get("render-white.frag"), 0));
5616 vector<vk::VkDescriptorSetLayoutBinding> bindings;
5620 m_bufferSize = context.getBufferSize();
5621 m_maxStorageTexelCount = vk::getPhysicalDeviceProperties(vki, physicalDevice).limits.maxTexelBufferElements;
5624 const vk::VkDescriptorSetLayoutBinding binding =
5627 vk::VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER,
5629 vk::VK_SHADER_STAGE_VERTEX_BIT,
5633 bindings.push_back(binding);
5636 createPipelineWithResources(vkd, device, renderPass, subpass, *vertexShaderModule, *fragmentShaderModule, context.getTargetWidth(), context.getTargetHeight(),
5637 vector<vk::VkVertexInputBindingDescription>(), vector<vk::VkVertexInputAttributeDescription>(), bindings, vk::VK_PRIMITIVE_TOPOLOGY_POINT_LIST, 0u, DE_NULL, m_resources);
5640 const deUint32 descriptorCount = (deUint32)(divRoundUp(m_bufferSize, (vk::VkDeviceSize)m_maxStorageTexelCount * 4));
5641 const vk::VkDescriptorPoolSize poolSizes =
5643 vk::VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER,
5646 const vk::VkDescriptorPoolCreateInfo createInfo =
5648 vk::VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
5650 vk::VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT,
5657 m_descriptorPool = vk::createDescriptorPool(vkd, device, &createInfo);
5658 m_descriptorSets.resize(descriptorCount, (vk::VkDescriptorSet)0);
5659 m_bufferViews.resize(descriptorCount, (vk::VkBufferView)0);
5662 for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
5664 const vk::VkDescriptorSetLayout layout = *m_resources.descriptorSetLayout;
5665 const vk::VkDescriptorSetAllocateInfo allocateInfo =
5667 vk::VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
5675 m_descriptorSets[descriptorSetNdx] = vk::allocateDescriptorSet(vkd, device, &allocateInfo).disown();
5678 const vk::VkBufferViewCreateInfo createInfo =
5680 vk::VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO,
5684 context.getBuffer(),
5685 vk::VK_FORMAT_R32_UINT,
5686 descriptorSetNdx * m_maxStorageTexelCount * 4,
5687 (deUint32)de::min<vk::VkDeviceSize>(m_maxStorageTexelCount * 4, m_bufferSize - descriptorSetNdx * m_maxStorageTexelCount * 4)
5690 VK_CHECK(vkd.createBufferView(device, &createInfo, DE_NULL, &m_bufferViews[descriptorSetNdx]));
5694 const vk::VkWriteDescriptorSet write =
5696 vk::VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
5698 m_descriptorSets[descriptorSetNdx],
5702 vk::VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER,
5705 &m_bufferViews[descriptorSetNdx]
5708 vkd.updateDescriptorSets(device, 1u, &write, 0u, DE_NULL);
5713 void RenderVertexStorageTexelBuffer::submit (SubmitContext& context)
5715 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
5716 const vk::VkCommandBuffer commandBuffer = context.getCommandBuffer();
5718 vkd.cmdBindPipeline(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipeline);
5720 for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
5722 const deUint32 count = (deUint32)(m_bufferSize < (descriptorSetNdx + 1) * m_maxStorageTexelCount * 4
5723 ? m_bufferSize - descriptorSetNdx * m_maxStorageTexelCount * 4
5724 : m_maxStorageTexelCount * 4) / 2;
5726 vkd.cmdBindDescriptorSets(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipelineLayout, 0u, 1u, &m_descriptorSets[descriptorSetNdx], 0u, DE_NULL);
5727 vkd.cmdDraw(commandBuffer, count, 1, 0, 0);
5731 void RenderVertexStorageTexelBuffer::verify (VerifyRenderPassContext& context, size_t)
5733 for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
5735 const size_t offset = descriptorSetNdx * m_maxStorageTexelCount * 4;
5736 const deUint32 count = (deUint32)(m_bufferSize < (descriptorSetNdx + 1) * m_maxStorageTexelCount * 4
5737 ? m_bufferSize - descriptorSetNdx * m_maxStorageTexelCount * 4
5738 : m_maxStorageTexelCount * 4) / 2;
5740 DE_ASSERT(context.getReference().getSize() <= 4 * m_maxStorageTexelCount * m_descriptorSets.size());
5741 DE_ASSERT(context.getReference().getSize() > offset);
5742 DE_ASSERT(offset + count * 2 <= context.getReference().getSize());
5744 for (size_t pos = 0; pos < (size_t)count; pos++)
5746 const deUint8 x = context.getReference().get(offset + pos * 2);
5747 const deUint8 y = context.getReference().get(offset + (pos * 2) + 1);
5749 context.getReferenceTarget().getAccess().setPixel(Vec4(1.0f, 1.0f, 1.0f, 1.0f), x, y);
5754 class RenderVertexStorageImage : public RenderPassCommand
5757 RenderVertexStorageImage (void) {}
5758 ~RenderVertexStorageImage (void);
5760 const char* getName (void) const { return "RenderVertexStorageImage"; }
5761 void logPrepare (TestLog&, size_t) const;
5762 void logSubmit (TestLog&, size_t) const;
5763 void prepare (PrepareRenderPassContext&);
5764 void submit (SubmitContext& context);
5765 void verify (VerifyRenderPassContext&, size_t);
5768 PipelineResources m_resources;
5769 vk::Move<vk::VkDescriptorPool> m_descriptorPool;
5770 vk::Move<vk::VkDescriptorSet> m_descriptorSet;
5771 vk::Move<vk::VkImageView> m_imageView;
5774 RenderVertexStorageImage::~RenderVertexStorageImage (void)
5778 void RenderVertexStorageImage::logPrepare (TestLog& log, size_t commandIndex) const
5780 log << TestLog::Message << commandIndex << ":" << getName() << " Create pipeline for render storage image." << TestLog::EndMessage;
5783 void RenderVertexStorageImage::logSubmit (TestLog& log, size_t commandIndex) const
5785 log << TestLog::Message << commandIndex << ":" << getName() << " Render using storage image." << TestLog::EndMessage;
5788 void RenderVertexStorageImage::prepare (PrepareRenderPassContext& context)
5790 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
5791 const vk::VkDevice device = context.getContext().getDevice();
5792 const vk::VkRenderPass renderPass = context.getRenderPass();
5793 const deUint32 subpass = 0;
5794 const vk::Unique<vk::VkShaderModule> vertexShaderModule (vk::createShaderModule(vkd, device, context.getBinaryCollection().get("storage-image.vert"), 0));
5795 const vk::Unique<vk::VkShaderModule> fragmentShaderModule (vk::createShaderModule(vkd, device, context.getBinaryCollection().get("render-white.frag"), 0));
5796 vector<vk::VkDescriptorSetLayoutBinding> bindings;
5799 const vk::VkDescriptorSetLayoutBinding binding =
5802 vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE,
5804 vk::VK_SHADER_STAGE_VERTEX_BIT,
5808 bindings.push_back(binding);
5811 createPipelineWithResources(vkd, device, renderPass, subpass, *vertexShaderModule, *fragmentShaderModule, context.getTargetWidth(), context.getTargetHeight(),
5812 vector<vk::VkVertexInputBindingDescription>(), vector<vk::VkVertexInputAttributeDescription>(), bindings, vk::VK_PRIMITIVE_TOPOLOGY_POINT_LIST, 0u, DE_NULL, m_resources);
5815 const vk::VkDescriptorPoolSize poolSizes =
5817 vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE,
5820 const vk::VkDescriptorPoolCreateInfo createInfo =
5822 vk::VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
5824 vk::VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT,
5831 m_descriptorPool = vk::createDescriptorPool(vkd, device, &createInfo);
5835 const vk::VkDescriptorSetLayout layout = *m_resources.descriptorSetLayout;
5836 const vk::VkDescriptorSetAllocateInfo allocateInfo =
5838 vk::VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
5846 m_descriptorSet = vk::allocateDescriptorSet(vkd, device, &allocateInfo);
5849 const vk::VkImageViewCreateInfo createInfo =
5851 vk::VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
5856 vk::VK_IMAGE_VIEW_TYPE_2D,
5857 vk::VK_FORMAT_R8G8B8A8_UNORM,
5858 vk::makeComponentMappingRGBA(),
5860 vk::VK_IMAGE_ASPECT_COLOR_BIT,
5868 m_imageView = vk::createImageView(vkd, device, &createInfo);
5872 const vk::VkDescriptorImageInfo imageInfo =
5876 context.getImageLayout()
5878 const vk::VkWriteDescriptorSet write =
5880 vk::VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
5886 vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE,
5892 vkd.updateDescriptorSets(device, 1u, &write, 0u, DE_NULL);
5897 void RenderVertexStorageImage::submit (SubmitContext& context)
5899 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
5900 const vk::VkCommandBuffer commandBuffer = context.getCommandBuffer();
5902 vkd.cmdBindPipeline(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipeline);
5904 vkd.cmdBindDescriptorSets(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipelineLayout, 0u, 1u, &(*m_descriptorSet), 0u, DE_NULL);
5905 vkd.cmdDraw(commandBuffer, context.getImageWidth() * context.getImageHeight() * 2, 1, 0, 0);
5908 void RenderVertexStorageImage::verify (VerifyRenderPassContext& context, size_t)
5910 for (int pos = 0; pos < (int)(context.getReferenceImage().getWidth() * context.getReferenceImage().getHeight() * 2); pos++)
5912 const tcu::IVec3 size = context.getReferenceImage().getAccess().getSize();
5913 const tcu::UVec4 pixel = context.getReferenceImage().getAccess().getPixelUint((pos / 2) / size.x(), (pos / 2) % size.x());
5916 context.getReferenceTarget().getAccess().setPixel(Vec4(1.0f, 1.0f, 1.0f, 1.0f), pixel.x(), pixel.y());
5918 context.getReferenceTarget().getAccess().setPixel(Vec4(1.0f, 1.0f, 1.0f, 1.0f), pixel.z(), pixel.w());
5922 class RenderVertexSampledImage : public RenderPassCommand
5925 RenderVertexSampledImage (void) {}
5926 ~RenderVertexSampledImage (void);
5928 const char* getName (void) const { return "RenderVertexSampledImage"; }
5929 void logPrepare (TestLog&, size_t) const;
5930 void logSubmit (TestLog&, size_t) const;
5931 void prepare (PrepareRenderPassContext&);
5932 void submit (SubmitContext& context);
5933 void verify (VerifyRenderPassContext&, size_t);
5936 PipelineResources m_resources;
5937 vk::Move<vk::VkDescriptorPool> m_descriptorPool;
5938 vk::Move<vk::VkDescriptorSet> m_descriptorSet;
5939 vk::Move<vk::VkImageView> m_imageView;
5940 vk::Move<vk::VkSampler> m_sampler;
5943 RenderVertexSampledImage::~RenderVertexSampledImage (void)
5947 void RenderVertexSampledImage::logPrepare (TestLog& log, size_t commandIndex) const
5949 log << TestLog::Message << commandIndex << ":" << getName() << " Create pipeline for render sampled image." << TestLog::EndMessage;
5952 void RenderVertexSampledImage::logSubmit (TestLog& log, size_t commandIndex) const
5954 log << TestLog::Message << commandIndex << ":" << getName() << " Render using sampled image." << TestLog::EndMessage;
5957 void RenderVertexSampledImage::prepare (PrepareRenderPassContext& context)
5959 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
5960 const vk::VkDevice device = context.getContext().getDevice();
5961 const vk::VkRenderPass renderPass = context.getRenderPass();
5962 const deUint32 subpass = 0;
5963 const vk::Unique<vk::VkShaderModule> vertexShaderModule (vk::createShaderModule(vkd, device, context.getBinaryCollection().get("sampled-image.vert"), 0));
5964 const vk::Unique<vk::VkShaderModule> fragmentShaderModule (vk::createShaderModule(vkd, device, context.getBinaryCollection().get("render-white.frag"), 0));
5965 vector<vk::VkDescriptorSetLayoutBinding> bindings;
5968 const vk::VkDescriptorSetLayoutBinding binding =
5971 vk::VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
5973 vk::VK_SHADER_STAGE_VERTEX_BIT,
5977 bindings.push_back(binding);
5980 createPipelineWithResources(vkd, device, renderPass, subpass, *vertexShaderModule, *fragmentShaderModule, context.getTargetWidth(), context.getTargetHeight(),
5981 vector<vk::VkVertexInputBindingDescription>(), vector<vk::VkVertexInputAttributeDescription>(), bindings, vk::VK_PRIMITIVE_TOPOLOGY_POINT_LIST, 0u, DE_NULL, m_resources);
5984 const vk::VkDescriptorPoolSize poolSizes =
5986 vk::VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
5989 const vk::VkDescriptorPoolCreateInfo createInfo =
5991 vk::VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
5993 vk::VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT,
6000 m_descriptorPool = vk::createDescriptorPool(vkd, device, &createInfo);
6004 const vk::VkDescriptorSetLayout layout = *m_resources.descriptorSetLayout;
6005 const vk::VkDescriptorSetAllocateInfo allocateInfo =
6007 vk::VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
6015 m_descriptorSet = vk::allocateDescriptorSet(vkd, device, &allocateInfo);
6018 const vk::VkImageViewCreateInfo createInfo =
6020 vk::VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
6025 vk::VK_IMAGE_VIEW_TYPE_2D,
6026 vk::VK_FORMAT_R8G8B8A8_UNORM,
6027 vk::makeComponentMappingRGBA(),
6029 vk::VK_IMAGE_ASPECT_COLOR_BIT,
6037 m_imageView = vk::createImageView(vkd, device, &createInfo);
6041 const vk::VkSamplerCreateInfo createInfo =
6043 vk::VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO,
6047 vk::VK_FILTER_NEAREST,
6048 vk::VK_FILTER_NEAREST,
6050 vk::VK_SAMPLER_MIPMAP_MODE_LINEAR,
6051 vk::VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE,
6052 vk::VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE,
6053 vk::VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE,
6058 vk::VK_COMPARE_OP_ALWAYS,
6061 vk::VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK,
6065 m_sampler = vk::createSampler(vkd, device, &createInfo);
6069 const vk::VkDescriptorImageInfo imageInfo =
6073 context.getImageLayout()
6075 const vk::VkWriteDescriptorSet write =
6077 vk::VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
6083 vk::VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
6089 vkd.updateDescriptorSets(device, 1u, &write, 0u, DE_NULL);
6094 void RenderVertexSampledImage::submit (SubmitContext& context)
6096 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
6097 const vk::VkCommandBuffer commandBuffer = context.getCommandBuffer();
6099 vkd.cmdBindPipeline(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipeline);
6101 vkd.cmdBindDescriptorSets(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipelineLayout, 0u, 1u, &(*m_descriptorSet), 0u, DE_NULL);
6102 vkd.cmdDraw(commandBuffer, context.getImageWidth() * context.getImageHeight() * 2, 1, 0, 0);
6105 void RenderVertexSampledImage::verify (VerifyRenderPassContext& context, size_t)
6107 for (int pos = 0; pos < (int)(context.getReferenceImage().getWidth() * context.getReferenceImage().getHeight() * 2); pos++)
6109 const tcu::IVec3 size = context.getReferenceImage().getAccess().getSize();
6110 const tcu::UVec4 pixel = context.getReferenceImage().getAccess().getPixelUint((pos / 2) / size.x(), (pos / 2) % size.x());
6113 context.getReferenceTarget().getAccess().setPixel(Vec4(1.0f, 1.0f, 1.0f, 1.0f), pixel.x(), pixel.y());
6115 context.getReferenceTarget().getAccess().setPixel(Vec4(1.0f, 1.0f, 1.0f, 1.0f), pixel.z(), pixel.w());
6119 class RenderFragmentUniformBuffer : public RenderPassCommand
6122 RenderFragmentUniformBuffer (void) {}
6123 ~RenderFragmentUniformBuffer (void);
6125 const char* getName (void) const { return "RenderFragmentUniformBuffer"; }
6126 void logPrepare (TestLog&, size_t) const;
6127 void logSubmit (TestLog&, size_t) const;
6128 void prepare (PrepareRenderPassContext&);
6129 void submit (SubmitContext& context);
6130 void verify (VerifyRenderPassContext&, size_t);
6133 PipelineResources m_resources;
6134 vk::Move<vk::VkDescriptorPool> m_descriptorPool;
6135 vector<vk::VkDescriptorSet> m_descriptorSets;
6137 vk::VkDeviceSize m_bufferSize;
6138 size_t m_targetWidth;
6139 size_t m_targetHeight;
6142 RenderFragmentUniformBuffer::~RenderFragmentUniformBuffer (void)
6146 void RenderFragmentUniformBuffer::logPrepare (TestLog& log, size_t commandIndex) const
6148 log << TestLog::Message << commandIndex << ":" << getName() << " Create pipeline for render buffer as uniform buffer." << TestLog::EndMessage;
6151 void RenderFragmentUniformBuffer::logSubmit (TestLog& log, size_t commandIndex) const
6153 log << TestLog::Message << commandIndex << ":" << getName() << " Render using buffer as uniform buffer." << TestLog::EndMessage;
6156 void RenderFragmentUniformBuffer::prepare (PrepareRenderPassContext& context)
6158 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
6159 const vk::VkDevice device = context.getContext().getDevice();
6160 const vk::VkRenderPass renderPass = context.getRenderPass();
6161 const deUint32 subpass = 0;
6162 const vk::Unique<vk::VkShaderModule> vertexShaderModule (vk::createShaderModule(vkd, device, context.getBinaryCollection().get("render-quad.vert"), 0));
6163 const vk::Unique<vk::VkShaderModule> fragmentShaderModule (vk::createShaderModule(vkd, device, context.getBinaryCollection().get("uniform-buffer.frag"), 0));
6164 vector<vk::VkDescriptorSetLayoutBinding> bindings;
6166 m_bufferSize = context.getBufferSize();
6167 m_targetWidth = context.getTargetWidth();
6168 m_targetHeight = context.getTargetHeight();
6171 const vk::VkDescriptorSetLayoutBinding binding =
6174 vk::VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,
6176 vk::VK_SHADER_STAGE_FRAGMENT_BIT,
6180 bindings.push_back(binding);
6182 const vk::VkPushConstantRange pushConstantRange =
6184 vk::VK_SHADER_STAGE_FRAGMENT_BIT,
6189 createPipelineWithResources(vkd, device, renderPass, subpass, *vertexShaderModule, *fragmentShaderModule, context.getTargetWidth(), context.getTargetHeight(),
6190 vector<vk::VkVertexInputBindingDescription>(), vector<vk::VkVertexInputAttributeDescription>(), bindings, vk::VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST, 1u, &pushConstantRange, m_resources);
6193 const deUint32 descriptorCount = (deUint32)(divRoundUp(m_bufferSize, (vk::VkDeviceSize)MAX_UNIFORM_BUFFER_SIZE));
6194 const vk::VkDescriptorPoolSize poolSizes =
6196 vk::VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,
6199 const vk::VkDescriptorPoolCreateInfo createInfo =
6201 vk::VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
6203 vk::VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT,
6210 m_descriptorPool = vk::createDescriptorPool(vkd, device, &createInfo);
6211 m_descriptorSets.resize(descriptorCount);
6214 for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
6216 const vk::VkDescriptorSetLayout layout = *m_resources.descriptorSetLayout;
6217 const vk::VkDescriptorSetAllocateInfo allocateInfo =
6219 vk::VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
6227 m_descriptorSets[descriptorSetNdx] = vk::allocateDescriptorSet(vkd, device, &allocateInfo).disown();
6230 const vk::VkDescriptorBufferInfo bufferInfo =
6232 context.getBuffer(),
6233 (vk::VkDeviceSize)(descriptorSetNdx * (size_t)MAX_UNIFORM_BUFFER_SIZE),
6234 m_bufferSize < (descriptorSetNdx + 1) * (vk::VkDeviceSize)MAX_UNIFORM_BUFFER_SIZE
6235 ? m_bufferSize - descriptorSetNdx * (vk::VkDeviceSize)MAX_UNIFORM_BUFFER_SIZE
6236 : (vk::VkDeviceSize)MAX_UNIFORM_BUFFER_SIZE
6238 const vk::VkWriteDescriptorSet write =
6240 vk::VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
6242 m_descriptorSets[descriptorSetNdx],
6246 vk::VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,
6252 vkd.updateDescriptorSets(device, 1u, &write, 0u, DE_NULL);
6257 void RenderFragmentUniformBuffer::submit (SubmitContext& context)
6259 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
6260 const vk::VkCommandBuffer commandBuffer = context.getCommandBuffer();
6262 vkd.cmdBindPipeline(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipeline);
6264 for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
6268 const deUint32 callId;
6269 const deUint32 valuesPerPixel;
6272 (deUint32)descriptorSetNdx,
6273 (deUint32)divRoundUp<size_t>(m_descriptorSets.size() * (MAX_UNIFORM_BUFFER_SIZE / 4), m_targetWidth * m_targetHeight)
6276 vkd.cmdBindDescriptorSets(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipelineLayout, 0u, 1u, &m_descriptorSets[descriptorSetNdx], 0u, DE_NULL);
6277 vkd.cmdPushConstants(commandBuffer, *m_resources.pipelineLayout, vk::VK_SHADER_STAGE_FRAGMENT_BIT, 0u, (deUint32)sizeof(callParams), &callParams);
6278 vkd.cmdDraw(commandBuffer, 6, 1, 0, 0);
6282 void RenderFragmentUniformBuffer::verify (VerifyRenderPassContext& context, size_t)
6284 const deUint32 valuesPerPixel = (deUint32)divRoundUp<size_t>(m_descriptorSets.size() * (MAX_UNIFORM_BUFFER_SIZE / 4), m_targetWidth * m_targetHeight);
6285 const size_t arraySize = MAX_UNIFORM_BUFFER_SIZE / (sizeof(deUint32) * 4);
6286 const size_t arrayIntSize = arraySize * 4;
6288 for (int y = 0; y < context.getReferenceTarget().getSize().y(); y++)
6289 for (int x = 0; x < context.getReferenceTarget().getSize().x(); x++)
6291 const size_t firstDescriptorSetNdx = de::min<size_t>((y * 256u + x) / (arrayIntSize / valuesPerPixel), m_descriptorSets.size() - 1);
6293 for (size_t descriptorSetNdx = firstDescriptorSetNdx; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
6295 const size_t offset = descriptorSetNdx * MAX_UNIFORM_BUFFER_SIZE;
6296 const deUint32 callId = (deUint32)descriptorSetNdx;
6298 const deUint32 id = callId * ((deUint32)arrayIntSize / valuesPerPixel) + (deUint32)y * 256u + (deUint32)x;
6300 if (y * 256u + x < callId * (arrayIntSize / valuesPerPixel))
6304 deUint32 value = id;
6306 for (deUint32 i = 0; i < valuesPerPixel; i++)
6308 value = ((deUint32)context.getReference().get(offset + (value % (MAX_UNIFORM_BUFFER_SIZE / sizeof(deUint32))) * 4 + 0))
6309 | (((deUint32)context.getReference().get(offset + (value % (MAX_UNIFORM_BUFFER_SIZE / sizeof(deUint32))) * 4 + 1)) << 8u)
6310 | (((deUint32)context.getReference().get(offset + (value % (MAX_UNIFORM_BUFFER_SIZE / sizeof(deUint32))) * 4 + 2)) << 16u)
6311 | (((deUint32)context.getReference().get(offset + (value % (MAX_UNIFORM_BUFFER_SIZE / sizeof(deUint32))) * 4 + 3)) << 24u);
6314 const UVec4 vec ((value >> 0u) & 0xFFu,
6315 (value >> 8u) & 0xFFu,
6316 (value >> 16u) & 0xFFu,
6317 (value >> 24u) & 0xFFu);
6319 context.getReferenceTarget().getAccess().setPixel(vec.asFloat() / Vec4(255.0f), x, y);
6325 class RenderFragmentStorageBuffer : public RenderPassCommand
6328 RenderFragmentStorageBuffer (void) {}
6329 ~RenderFragmentStorageBuffer (void);
6331 const char* getName (void) const { return "RenderFragmentStorageBuffer"; }
6332 void logPrepare (TestLog&, size_t) const;
6333 void logSubmit (TestLog&, size_t) const;
6334 void prepare (PrepareRenderPassContext&);
6335 void submit (SubmitContext& context);
6336 void verify (VerifyRenderPassContext&, size_t);
6339 PipelineResources m_resources;
6340 vk::Move<vk::VkDescriptorPool> m_descriptorPool;
6341 vk::Move<vk::VkDescriptorSet> m_descriptorSet;
6343 vk::VkDeviceSize m_bufferSize;
6344 size_t m_targetWidth;
6345 size_t m_targetHeight;
6348 RenderFragmentStorageBuffer::~RenderFragmentStorageBuffer (void)
6352 void RenderFragmentStorageBuffer::logPrepare (TestLog& log, size_t commandIndex) const
6354 log << TestLog::Message << commandIndex << ":" << getName() << " Create pipeline to render buffer as storage buffer." << TestLog::EndMessage;
6357 void RenderFragmentStorageBuffer::logSubmit (TestLog& log, size_t commandIndex) const
6359 log << TestLog::Message << commandIndex << ":" << getName() << " Render using buffer as storage buffer." << TestLog::EndMessage;
6362 void RenderFragmentStorageBuffer::prepare (PrepareRenderPassContext& context)
6364 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
6365 const vk::VkDevice device = context.getContext().getDevice();
6366 const vk::VkRenderPass renderPass = context.getRenderPass();
6367 const deUint32 subpass = 0;
6368 const vk::Unique<vk::VkShaderModule> vertexShaderModule (vk::createShaderModule(vkd, device, context.getBinaryCollection().get("render-quad.vert"), 0));
6369 const vk::Unique<vk::VkShaderModule> fragmentShaderModule (vk::createShaderModule(vkd, device, context.getBinaryCollection().get("storage-buffer.frag"), 0));
6370 vector<vk::VkDescriptorSetLayoutBinding> bindings;
6372 m_bufferSize = context.getBufferSize();
6373 m_targetWidth = context.getTargetWidth();
6374 m_targetHeight = context.getTargetHeight();
6377 const vk::VkDescriptorSetLayoutBinding binding =
6380 vk::VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
6382 vk::VK_SHADER_STAGE_FRAGMENT_BIT,
6386 bindings.push_back(binding);
6388 const vk::VkPushConstantRange pushConstantRange =
6390 vk::VK_SHADER_STAGE_FRAGMENT_BIT,
6395 createPipelineWithResources(vkd, device, renderPass, subpass, *vertexShaderModule, *fragmentShaderModule, context.getTargetWidth(), context.getTargetHeight(),
6396 vector<vk::VkVertexInputBindingDescription>(), vector<vk::VkVertexInputAttributeDescription>(), bindings, vk::VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST, 1u, &pushConstantRange, m_resources);
6399 const deUint32 descriptorCount = 1;
6400 const vk::VkDescriptorPoolSize poolSizes =
6402 vk::VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
6405 const vk::VkDescriptorPoolCreateInfo createInfo =
6407 vk::VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
6409 vk::VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT,
6416 m_descriptorPool = vk::createDescriptorPool(vkd, device, &createInfo);
6420 const vk::VkDescriptorSetLayout layout = *m_resources.descriptorSetLayout;
6421 const vk::VkDescriptorSetAllocateInfo allocateInfo =
6423 vk::VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
6431 m_descriptorSet = vk::allocateDescriptorSet(vkd, device, &allocateInfo);
6434 const vk::VkDescriptorBufferInfo bufferInfo =
6436 context.getBuffer(),
6440 const vk::VkWriteDescriptorSet write =
6442 vk::VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
6444 m_descriptorSet.get(),
6448 vk::VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
6454 vkd.updateDescriptorSets(device, 1u, &write, 0u, DE_NULL);
6459 void RenderFragmentStorageBuffer::submit (SubmitContext& context)
6461 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
6462 const vk::VkCommandBuffer commandBuffer = context.getCommandBuffer();
6464 vkd.cmdBindPipeline(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipeline);
6468 const deUint32 valuesPerPixel;
6469 const deUint32 bufferSize;
6472 (deUint32)divRoundUp<vk::VkDeviceSize>(m_bufferSize / 4, m_targetWidth * m_targetHeight),
6473 (deUint32)m_bufferSize
6476 vkd.cmdBindDescriptorSets(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipelineLayout, 0u, 1u, &m_descriptorSet.get(), 0u, DE_NULL);
6477 vkd.cmdPushConstants(commandBuffer, *m_resources.pipelineLayout, vk::VK_SHADER_STAGE_FRAGMENT_BIT, 0u, (deUint32)sizeof(callParams), &callParams);
6478 vkd.cmdDraw(commandBuffer, 6, 1, 0, 0);
6481 void RenderFragmentStorageBuffer::verify (VerifyRenderPassContext& context, size_t)
6483 const deUint32 valuesPerPixel = (deUint32)divRoundUp<vk::VkDeviceSize>(m_bufferSize / 4, m_targetWidth * m_targetHeight);
6485 for (int y = 0; y < context.getReferenceTarget().getSize().y(); y++)
6486 for (int x = 0; x < context.getReferenceTarget().getSize().x(); x++)
6488 const deUint32 id = (deUint32)y * 256u + (deUint32)x;
6490 deUint32 value = id;
6492 for (deUint32 i = 0; i < valuesPerPixel; i++)
6494 value = (((deUint32)context.getReference().get((size_t)(value % (m_bufferSize / sizeof(deUint32))) * 4 + 0)) << 0u)
6495 | (((deUint32)context.getReference().get((size_t)(value % (m_bufferSize / sizeof(deUint32))) * 4 + 1)) << 8u)
6496 | (((deUint32)context.getReference().get((size_t)(value % (m_bufferSize / sizeof(deUint32))) * 4 + 2)) << 16u)
6497 | (((deUint32)context.getReference().get((size_t)(value % (m_bufferSize / sizeof(deUint32))) * 4 + 3)) << 24u);
6500 const UVec4 vec ((value >> 0u) & 0xFFu,
6501 (value >> 8u) & 0xFFu,
6502 (value >> 16u) & 0xFFu,
6503 (value >> 24u) & 0xFFu);
6505 context.getReferenceTarget().getAccess().setPixel(vec.asFloat() / Vec4(255.0f), x, y);
6509 class RenderFragmentUniformTexelBuffer : public RenderPassCommand
6512 RenderFragmentUniformTexelBuffer (void) {}
6513 ~RenderFragmentUniformTexelBuffer (void);
6515 const char* getName (void) const { return "RenderFragmentUniformTexelBuffer"; }
6516 void logPrepare (TestLog&, size_t) const;
6517 void logSubmit (TestLog&, size_t) const;
6518 void prepare (PrepareRenderPassContext&);
6519 void submit (SubmitContext& context);
6520 void verify (VerifyRenderPassContext&, size_t);
6523 PipelineResources m_resources;
6524 vk::Move<vk::VkDescriptorPool> m_descriptorPool;
6525 vector<vk::VkDescriptorSet> m_descriptorSets;
6526 vector<vk::VkBufferView> m_bufferViews;
6528 const vk::DeviceInterface* m_vkd;
6529 vk::VkDevice m_device;
6530 vk::VkDeviceSize m_bufferSize;
6531 deUint32 m_maxUniformTexelCount;
6532 size_t m_targetWidth;
6533 size_t m_targetHeight;
6536 RenderFragmentUniformTexelBuffer::~RenderFragmentUniformTexelBuffer (void)
6538 for (size_t bufferViewNdx = 0; bufferViewNdx < m_bufferViews.size(); bufferViewNdx++)
6540 if (!!m_bufferViews[bufferViewNdx])
6542 m_vkd->destroyBufferView(m_device, m_bufferViews[bufferViewNdx], DE_NULL);
6543 m_bufferViews[bufferViewNdx] = (vk::VkBufferView)0;
6548 void RenderFragmentUniformTexelBuffer::logPrepare (TestLog& log, size_t commandIndex) const
6550 log << TestLog::Message << commandIndex << ":" << getName() << " Create pipeline for render buffer as uniform buffer." << TestLog::EndMessage;
6553 void RenderFragmentUniformTexelBuffer::logSubmit (TestLog& log, size_t commandIndex) const
6555 log << TestLog::Message << commandIndex << ":" << getName() << " Render using buffer as uniform buffer." << TestLog::EndMessage;
6558 void RenderFragmentUniformTexelBuffer::prepare (PrepareRenderPassContext& context)
6560 const vk::InstanceInterface& vki = context.getContext().getInstanceInterface();
6561 const vk::VkPhysicalDevice physicalDevice = context.getContext().getPhysicalDevice();
6562 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
6563 const vk::VkDevice device = context.getContext().getDevice();
6564 const vk::VkRenderPass renderPass = context.getRenderPass();
6565 const deUint32 subpass = 0;
6566 const vk::Unique<vk::VkShaderModule> vertexShaderModule (vk::createShaderModule(vkd, device, context.getBinaryCollection().get("render-quad.vert"), 0));
6567 const vk::Unique<vk::VkShaderModule> fragmentShaderModule (vk::createShaderModule(vkd, device, context.getBinaryCollection().get("uniform-texel-buffer.frag"), 0));
6568 vector<vk::VkDescriptorSetLayoutBinding> bindings;
6572 m_bufferSize = context.getBufferSize();
6573 m_maxUniformTexelCount = vk::getPhysicalDeviceProperties(vki, physicalDevice).limits.maxTexelBufferElements;
6574 m_targetWidth = context.getTargetWidth();
6575 m_targetHeight = context.getTargetHeight();
6578 const vk::VkDescriptorSetLayoutBinding binding =
6581 vk::VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER,
6583 vk::VK_SHADER_STAGE_FRAGMENT_BIT,
6587 bindings.push_back(binding);
6589 const vk::VkPushConstantRange pushConstantRange =
6591 vk::VK_SHADER_STAGE_FRAGMENT_BIT,
6596 createPipelineWithResources(vkd, device, renderPass, subpass, *vertexShaderModule, *fragmentShaderModule, context.getTargetWidth(), context.getTargetHeight(),
6597 vector<vk::VkVertexInputBindingDescription>(), vector<vk::VkVertexInputAttributeDescription>(), bindings, vk::VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST, 1u, &pushConstantRange, m_resources);
6600 const deUint32 descriptorCount = (deUint32)(divRoundUp(m_bufferSize, (vk::VkDeviceSize)m_maxUniformTexelCount * 4));
6601 const vk::VkDescriptorPoolSize poolSizes =
6603 vk::VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER,
6606 const vk::VkDescriptorPoolCreateInfo createInfo =
6608 vk::VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
6610 vk::VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT,
6617 m_descriptorPool = vk::createDescriptorPool(vkd, device, &createInfo);
6618 m_descriptorSets.resize(descriptorCount, (vk::VkDescriptorSet)0);
6619 m_bufferViews.resize(descriptorCount, (vk::VkBufferView)0);
6622 for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
6624 const deUint32 count = (deUint32)(m_bufferSize < (descriptorSetNdx + 1) * m_maxUniformTexelCount * 4
6625 ? m_bufferSize - descriptorSetNdx * m_maxUniformTexelCount * 4
6626 : m_maxUniformTexelCount * 4) / 4;
6627 const vk::VkDescriptorSetLayout layout = *m_resources.descriptorSetLayout;
6628 const vk::VkDescriptorSetAllocateInfo allocateInfo =
6630 vk::VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
6638 m_descriptorSets[descriptorSetNdx] = vk::allocateDescriptorSet(vkd, device, &allocateInfo).disown();
6641 const vk::VkBufferViewCreateInfo createInfo =
6643 vk::VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO,
6647 context.getBuffer(),
6648 vk::VK_FORMAT_R32_UINT,
6649 descriptorSetNdx * m_maxUniformTexelCount * 4,
6653 VK_CHECK(vkd.createBufferView(device, &createInfo, DE_NULL, &m_bufferViews[descriptorSetNdx]));
6657 const vk::VkWriteDescriptorSet write =
6659 vk::VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
6661 m_descriptorSets[descriptorSetNdx],
6665 vk::VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER,
6668 &m_bufferViews[descriptorSetNdx]
6671 vkd.updateDescriptorSets(device, 1u, &write, 0u, DE_NULL);
6676 void RenderFragmentUniformTexelBuffer::submit (SubmitContext& context)
6678 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
6679 const vk::VkCommandBuffer commandBuffer = context.getCommandBuffer();
6681 vkd.cmdBindPipeline(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipeline);
6683 for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
6687 const deUint32 callId;
6688 const deUint32 valuesPerPixel;
6689 const deUint32 maxUniformTexelCount;
6692 (deUint32)descriptorSetNdx,
6693 (deUint32)divRoundUp<size_t>(m_descriptorSets.size() * de::min<size_t>((size_t)m_bufferSize / 4, m_maxUniformTexelCount), m_targetWidth * m_targetHeight),
6694 m_maxUniformTexelCount
6697 vkd.cmdBindDescriptorSets(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipelineLayout, 0u, 1u, &m_descriptorSets[descriptorSetNdx], 0u, DE_NULL);
6698 vkd.cmdPushConstants(commandBuffer, *m_resources.pipelineLayout, vk::VK_SHADER_STAGE_FRAGMENT_BIT, 0u, (deUint32)sizeof(callParams), &callParams);
6699 vkd.cmdDraw(commandBuffer, 6, 1, 0, 0);
6703 void RenderFragmentUniformTexelBuffer::verify (VerifyRenderPassContext& context, size_t)
6705 const deUint32 valuesPerPixel = (deUint32)divRoundUp<size_t>(m_descriptorSets.size() * de::min<size_t>((size_t)m_bufferSize / 4, m_maxUniformTexelCount), m_targetWidth * m_targetHeight);
6707 for (int y = 0; y < context.getReferenceTarget().getSize().y(); y++)
6708 for (int x = 0; x < context.getReferenceTarget().getSize().x(); x++)
6710 const size_t firstDescriptorSetNdx = de::min<size_t>((y * 256u + x) / (m_maxUniformTexelCount / valuesPerPixel), m_descriptorSets.size() - 1);
6712 for (size_t descriptorSetNdx = firstDescriptorSetNdx; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
6714 const size_t offset = descriptorSetNdx * m_maxUniformTexelCount * 4;
6715 const deUint32 callId = (deUint32)descriptorSetNdx;
6717 const deUint32 id = (deUint32)y * 256u + (deUint32)x;
6718 const deUint32 count = (deUint32)(m_bufferSize < (descriptorSetNdx + 1) * m_maxUniformTexelCount * 4
6719 ? m_bufferSize - descriptorSetNdx * m_maxUniformTexelCount * 4
6720 : m_maxUniformTexelCount * 4) / 4;
6722 if (y * 256u + x < callId * (m_maxUniformTexelCount / valuesPerPixel))
6726 deUint32 value = id;
6728 for (deUint32 i = 0; i < valuesPerPixel; i++)
6730 value = ((deUint32)context.getReference().get( offset + (value % count) * 4 + 0))
6731 | (((deUint32)context.getReference().get(offset + (value % count) * 4 + 1)) << 8u)
6732 | (((deUint32)context.getReference().get(offset + (value % count) * 4 + 2)) << 16u)
6733 | (((deUint32)context.getReference().get(offset + (value % count) * 4 + 3)) << 24u);
6736 const UVec4 vec ((value >> 0u) & 0xFFu,
6737 (value >> 8u) & 0xFFu,
6738 (value >> 16u) & 0xFFu,
6739 (value >> 24u) & 0xFFu);
6741 context.getReferenceTarget().getAccess().setPixel(vec.asFloat() / Vec4(255.0f), x, y);
6747 class RenderFragmentStorageTexelBuffer : public RenderPassCommand
6750 RenderFragmentStorageTexelBuffer (void) {}
6751 ~RenderFragmentStorageTexelBuffer (void);
6753 const char* getName (void) const { return "RenderFragmentStorageTexelBuffer"; }
6754 void logPrepare (TestLog&, size_t) const;
6755 void logSubmit (TestLog&, size_t) const;
6756 void prepare (PrepareRenderPassContext&);
6757 void submit (SubmitContext& context);
6758 void verify (VerifyRenderPassContext&, size_t);
6761 PipelineResources m_resources;
6762 vk::Move<vk::VkDescriptorPool> m_descriptorPool;
6763 vector<vk::VkDescriptorSet> m_descriptorSets;
6764 vector<vk::VkBufferView> m_bufferViews;
6766 const vk::DeviceInterface* m_vkd;
6767 vk::VkDevice m_device;
6768 vk::VkDeviceSize m_bufferSize;
6769 deUint32 m_maxStorageTexelCount;
6770 size_t m_targetWidth;
6771 size_t m_targetHeight;
6774 RenderFragmentStorageTexelBuffer::~RenderFragmentStorageTexelBuffer (void)
6776 for (size_t bufferViewNdx = 0; bufferViewNdx < m_bufferViews.size(); bufferViewNdx++)
6778 if (!!m_bufferViews[bufferViewNdx])
6780 m_vkd->destroyBufferView(m_device, m_bufferViews[bufferViewNdx], DE_NULL);
6781 m_bufferViews[bufferViewNdx] = (vk::VkBufferView)0;
6786 void RenderFragmentStorageTexelBuffer::logPrepare (TestLog& log, size_t commandIndex) const
6788 log << TestLog::Message << commandIndex << ":" << getName() << " Create pipeline for render buffer as storage buffer." << TestLog::EndMessage;
6791 void RenderFragmentStorageTexelBuffer::logSubmit (TestLog& log, size_t commandIndex) const
6793 log << TestLog::Message << commandIndex << ":" << getName() << " Render using buffer as storage buffer." << TestLog::EndMessage;
6796 void RenderFragmentStorageTexelBuffer::prepare (PrepareRenderPassContext& context)
6798 const vk::InstanceInterface& vki = context.getContext().getInstanceInterface();
6799 const vk::VkPhysicalDevice physicalDevice = context.getContext().getPhysicalDevice();
6800 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
6801 const vk::VkDevice device = context.getContext().getDevice();
6802 const vk::VkRenderPass renderPass = context.getRenderPass();
6803 const deUint32 subpass = 0;
6804 const vk::Unique<vk::VkShaderModule> vertexShaderModule (vk::createShaderModule(vkd, device, context.getBinaryCollection().get("render-quad.vert"), 0));
6805 const vk::Unique<vk::VkShaderModule> fragmentShaderModule (vk::createShaderModule(vkd, device, context.getBinaryCollection().get("storage-texel-buffer.frag"), 0));
6806 vector<vk::VkDescriptorSetLayoutBinding> bindings;
6810 m_bufferSize = context.getBufferSize();
6811 m_maxStorageTexelCount = vk::getPhysicalDeviceProperties(vki, physicalDevice).limits.maxTexelBufferElements;
6812 m_targetWidth = context.getTargetWidth();
6813 m_targetHeight = context.getTargetHeight();
6816 const vk::VkDescriptorSetLayoutBinding binding =
6819 vk::VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER,
6821 vk::VK_SHADER_STAGE_FRAGMENT_BIT,
6825 bindings.push_back(binding);
6827 const vk::VkPushConstantRange pushConstantRange =
6829 vk::VK_SHADER_STAGE_FRAGMENT_BIT,
6834 createPipelineWithResources(vkd, device, renderPass, subpass, *vertexShaderModule, *fragmentShaderModule, context.getTargetWidth(), context.getTargetHeight(),
6835 vector<vk::VkVertexInputBindingDescription>(), vector<vk::VkVertexInputAttributeDescription>(), bindings, vk::VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST, 1u, &pushConstantRange, m_resources);
6838 const deUint32 descriptorCount = (deUint32)(divRoundUp(m_bufferSize, (vk::VkDeviceSize)m_maxStorageTexelCount * 4));
6839 const vk::VkDescriptorPoolSize poolSizes =
6841 vk::VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER,
6844 const vk::VkDescriptorPoolCreateInfo createInfo =
6846 vk::VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
6848 vk::VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT,
6855 m_descriptorPool = vk::createDescriptorPool(vkd, device, &createInfo);
6856 m_descriptorSets.resize(descriptorCount, (vk::VkDescriptorSet)0);
6857 m_bufferViews.resize(descriptorCount, (vk::VkBufferView)0);
6860 for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
6862 const deUint32 count = (deUint32)(m_bufferSize < (descriptorSetNdx + 1) * m_maxStorageTexelCount * 4
6863 ? m_bufferSize - descriptorSetNdx * m_maxStorageTexelCount * 4
6864 : m_maxStorageTexelCount * 4) / 4;
6865 const vk::VkDescriptorSetLayout layout = *m_resources.descriptorSetLayout;
6866 const vk::VkDescriptorSetAllocateInfo allocateInfo =
6868 vk::VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
6876 m_descriptorSets[descriptorSetNdx] = vk::allocateDescriptorSet(vkd, device, &allocateInfo).disown();
6879 const vk::VkBufferViewCreateInfo createInfo =
6881 vk::VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO,
6885 context.getBuffer(),
6886 vk::VK_FORMAT_R32_UINT,
6887 descriptorSetNdx * m_maxStorageTexelCount * 4,
6891 VK_CHECK(vkd.createBufferView(device, &createInfo, DE_NULL, &m_bufferViews[descriptorSetNdx]));
6895 const vk::VkWriteDescriptorSet write =
6897 vk::VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
6899 m_descriptorSets[descriptorSetNdx],
6903 vk::VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER,
6906 &m_bufferViews[descriptorSetNdx]
6909 vkd.updateDescriptorSets(device, 1u, &write, 0u, DE_NULL);
6914 void RenderFragmentStorageTexelBuffer::submit (SubmitContext& context)
6916 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
6917 const vk::VkCommandBuffer commandBuffer = context.getCommandBuffer();
6919 vkd.cmdBindPipeline(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipeline);
6921 for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
6925 const deUint32 callId;
6926 const deUint32 valuesPerPixel;
6927 const deUint32 maxStorageTexelCount;
6928 const deUint32 width;
6931 (deUint32)descriptorSetNdx,
6932 (deUint32)divRoundUp<size_t>(m_descriptorSets.size() * de::min<size_t>(m_maxStorageTexelCount, (size_t)m_bufferSize / 4), m_targetWidth * m_targetHeight),
6933 m_maxStorageTexelCount,
6934 (deUint32)(m_bufferSize < (descriptorSetNdx + 1u) * m_maxStorageTexelCount * 4u
6935 ? m_bufferSize - descriptorSetNdx * m_maxStorageTexelCount * 4u
6936 : m_maxStorageTexelCount * 4u) / 4u
6939 vkd.cmdBindDescriptorSets(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipelineLayout, 0u, 1u, &m_descriptorSets[descriptorSetNdx], 0u, DE_NULL);
6940 vkd.cmdPushConstants(commandBuffer, *m_resources.pipelineLayout, vk::VK_SHADER_STAGE_FRAGMENT_BIT, 0u, (deUint32)sizeof(callParams), &callParams);
6941 vkd.cmdDraw(commandBuffer, 6, 1, 0, 0);
6945 void RenderFragmentStorageTexelBuffer::verify (VerifyRenderPassContext& context, size_t)
6947 const deUint32 valuesPerPixel = (deUint32)divRoundUp<size_t>(m_descriptorSets.size() * de::min<size_t>(m_maxStorageTexelCount, (size_t)m_bufferSize / 4), m_targetWidth * m_targetHeight);
6949 for (int y = 0; y < context.getReferenceTarget().getSize().y(); y++)
6950 for (int x = 0; x < context.getReferenceTarget().getSize().x(); x++)
6952 const size_t firstDescriptorSetNdx = de::min<size_t>((y * 256u + x) / (m_maxStorageTexelCount / valuesPerPixel), m_descriptorSets.size() - 1);
6954 for (size_t descriptorSetNdx = firstDescriptorSetNdx; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
6956 const size_t offset = descriptorSetNdx * m_maxStorageTexelCount * 4;
6957 const deUint32 callId = (deUint32)descriptorSetNdx;
6959 const deUint32 id = (deUint32)y * 256u + (deUint32)x;
6960 const deUint32 count = (deUint32)(m_bufferSize < (descriptorSetNdx + 1) * m_maxStorageTexelCount * 4
6961 ? m_bufferSize - descriptorSetNdx * m_maxStorageTexelCount * 4
6962 : m_maxStorageTexelCount * 4) / 4;
6964 if (y * 256u + x < callId * (m_maxStorageTexelCount / valuesPerPixel))
6968 deUint32 value = id;
6970 for (deUint32 i = 0; i < valuesPerPixel; i++)
6972 value = ((deUint32)context.getReference().get( offset + (value % count) * 4 + 0))
6973 | (((deUint32)context.getReference().get(offset + (value % count) * 4 + 1)) << 8u)
6974 | (((deUint32)context.getReference().get(offset + (value % count) * 4 + 2)) << 16u)
6975 | (((deUint32)context.getReference().get(offset + (value % count) * 4 + 3)) << 24u);
6978 const UVec4 vec ((value >> 0u) & 0xFFu,
6979 (value >> 8u) & 0xFFu,
6980 (value >> 16u) & 0xFFu,
6981 (value >> 24u) & 0xFFu);
6983 context.getReferenceTarget().getAccess().setPixel(vec.asFloat() / Vec4(255.0f), x, y);
6989 class RenderFragmentStorageImage : public RenderPassCommand
6992 RenderFragmentStorageImage (void) {}
6993 ~RenderFragmentStorageImage (void);
6995 const char* getName (void) const { return "RenderFragmentStorageImage"; }
6996 void logPrepare (TestLog&, size_t) const;
6997 void logSubmit (TestLog&, size_t) const;
6998 void prepare (PrepareRenderPassContext&);
6999 void submit (SubmitContext& context);
7000 void verify (VerifyRenderPassContext&, size_t);
7003 PipelineResources m_resources;
7004 vk::Move<vk::VkDescriptorPool> m_descriptorPool;
7005 vk::Move<vk::VkDescriptorSet> m_descriptorSet;
7006 vk::Move<vk::VkImageView> m_imageView;
7009 RenderFragmentStorageImage::~RenderFragmentStorageImage (void)
7013 void RenderFragmentStorageImage::logPrepare (TestLog& log, size_t commandIndex) const
7015 log << TestLog::Message << commandIndex << ":" << getName() << " Create pipeline for render storage image." << TestLog::EndMessage;
7018 void RenderFragmentStorageImage::logSubmit (TestLog& log, size_t commandIndex) const
7020 log << TestLog::Message << commandIndex << ":" << getName() << " Render using storage image." << TestLog::EndMessage;
7023 void RenderFragmentStorageImage::prepare (PrepareRenderPassContext& context)
7025 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
7026 const vk::VkDevice device = context.getContext().getDevice();
7027 const vk::VkRenderPass renderPass = context.getRenderPass();
7028 const deUint32 subpass = 0;
7029 const vk::Unique<vk::VkShaderModule> vertexShaderModule (vk::createShaderModule(vkd, device, context.getBinaryCollection().get("render-quad.vert"), 0));
7030 const vk::Unique<vk::VkShaderModule> fragmentShaderModule (vk::createShaderModule(vkd, device, context.getBinaryCollection().get("storage-image.frag"), 0));
7031 vector<vk::VkDescriptorSetLayoutBinding> bindings;
7034 const vk::VkDescriptorSetLayoutBinding binding =
7037 vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE,
7039 vk::VK_SHADER_STAGE_FRAGMENT_BIT,
7043 bindings.push_back(binding);
7046 createPipelineWithResources(vkd, device, renderPass, subpass, *vertexShaderModule, *fragmentShaderModule, context.getTargetWidth(), context.getTargetHeight(),
7047 vector<vk::VkVertexInputBindingDescription>(), vector<vk::VkVertexInputAttributeDescription>(), bindings, vk::VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST, 0u, DE_NULL, m_resources);
7050 const vk::VkDescriptorPoolSize poolSizes =
7052 vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE,
7055 const vk::VkDescriptorPoolCreateInfo createInfo =
7057 vk::VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
7059 vk::VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT,
7066 m_descriptorPool = vk::createDescriptorPool(vkd, device, &createInfo);
7070 const vk::VkDescriptorSetLayout layout = *m_resources.descriptorSetLayout;
7071 const vk::VkDescriptorSetAllocateInfo allocateInfo =
7073 vk::VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
7081 m_descriptorSet = vk::allocateDescriptorSet(vkd, device, &allocateInfo);
7084 const vk::VkImageViewCreateInfo createInfo =
7086 vk::VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
7091 vk::VK_IMAGE_VIEW_TYPE_2D,
7092 vk::VK_FORMAT_R8G8B8A8_UNORM,
7093 vk::makeComponentMappingRGBA(),
7095 vk::VK_IMAGE_ASPECT_COLOR_BIT,
7103 m_imageView = vk::createImageView(vkd, device, &createInfo);
7107 const vk::VkDescriptorImageInfo imageInfo =
7111 context.getImageLayout()
7113 const vk::VkWriteDescriptorSet write =
7115 vk::VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
7121 vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE,
7127 vkd.updateDescriptorSets(device, 1u, &write, 0u, DE_NULL);
7132 void RenderFragmentStorageImage::submit (SubmitContext& context)
7134 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
7135 const vk::VkCommandBuffer commandBuffer = context.getCommandBuffer();
7137 vkd.cmdBindPipeline(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipeline);
7139 vkd.cmdBindDescriptorSets(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipelineLayout, 0u, 1u, &(*m_descriptorSet), 0u, DE_NULL);
7140 vkd.cmdDraw(commandBuffer, 6, 1, 0, 0);
7143 void RenderFragmentStorageImage::verify (VerifyRenderPassContext& context, size_t)
7145 const UVec2 size = UVec2(context.getReferenceImage().getWidth(), context.getReferenceImage().getHeight());
7146 const deUint32 valuesPerPixel = de::max<deUint32>(1u, (size.x() * size.y()) / (256u * 256u));
7148 for (int y = 0; y < context.getReferenceTarget().getSize().y(); y++)
7149 for (int x = 0; x < context.getReferenceTarget().getSize().x(); x++)
7151 UVec4 value = UVec4(x, y, 0u, 0u);
7153 for (deUint32 i = 0; i < valuesPerPixel; i++)
7155 const UVec2 pos = UVec2(value.z() * 256u + (value.x() ^ value.z()), value.w() * 256u + (value.y() ^ value.w()));
7156 const Vec4 floatValue = context.getReferenceImage().getAccess().getPixel(pos.x() % size.x(), pos.y() % size.y());
7158 value = UVec4((deUint32)(floatValue.x() * 255.0f),
7159 (deUint32)(floatValue.y() * 255.0f),
7160 (deUint32)(floatValue.z() * 255.0f),
7161 (deUint32)(floatValue.w() * 255.0f));
7164 context.getReferenceTarget().getAccess().setPixel(value.asFloat() / Vec4(255.0f), x, y);
7168 class RenderFragmentSampledImage : public RenderPassCommand
7171 RenderFragmentSampledImage (void) {}
7172 ~RenderFragmentSampledImage (void);
7174 const char* getName (void) const { return "RenderFragmentSampledImage"; }
7175 void logPrepare (TestLog&, size_t) const;
7176 void logSubmit (TestLog&, size_t) const;
7177 void prepare (PrepareRenderPassContext&);
7178 void submit (SubmitContext& context);
7179 void verify (VerifyRenderPassContext&, size_t);
7182 PipelineResources m_resources;
7183 vk::Move<vk::VkDescriptorPool> m_descriptorPool;
7184 vk::Move<vk::VkDescriptorSet> m_descriptorSet;
7185 vk::Move<vk::VkImageView> m_imageView;
7186 vk::Move<vk::VkSampler> m_sampler;
7189 RenderFragmentSampledImage::~RenderFragmentSampledImage (void)
7193 void RenderFragmentSampledImage::logPrepare (TestLog& log, size_t commandIndex) const
7195 log << TestLog::Message << commandIndex << ":" << getName() << " Create pipeline for render storage image." << TestLog::EndMessage;
7198 void RenderFragmentSampledImage::logSubmit (TestLog& log, size_t commandIndex) const
7200 log << TestLog::Message << commandIndex << ":" << getName() << " Render using storage image." << TestLog::EndMessage;
7203 void RenderFragmentSampledImage::prepare (PrepareRenderPassContext& context)
7205 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
7206 const vk::VkDevice device = context.getContext().getDevice();
7207 const vk::VkRenderPass renderPass = context.getRenderPass();
7208 const deUint32 subpass = 0;
7209 const vk::Unique<vk::VkShaderModule> vertexShaderModule (vk::createShaderModule(vkd, device, context.getBinaryCollection().get("render-quad.vert"), 0));
7210 const vk::Unique<vk::VkShaderModule> fragmentShaderModule (vk::createShaderModule(vkd, device, context.getBinaryCollection().get("sampled-image.frag"), 0));
7211 vector<vk::VkDescriptorSetLayoutBinding> bindings;
7214 const vk::VkDescriptorSetLayoutBinding binding =
7217 vk::VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
7219 vk::VK_SHADER_STAGE_FRAGMENT_BIT,
7223 bindings.push_back(binding);
7226 createPipelineWithResources(vkd, device, renderPass, subpass, *vertexShaderModule, *fragmentShaderModule, context.getTargetWidth(), context.getTargetHeight(),
7227 vector<vk::VkVertexInputBindingDescription>(), vector<vk::VkVertexInputAttributeDescription>(), bindings, vk::VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST, 0u, DE_NULL, m_resources);
7230 const vk::VkDescriptorPoolSize poolSizes =
7232 vk::VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
7235 const vk::VkDescriptorPoolCreateInfo createInfo =
7237 vk::VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
7239 vk::VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT,
7246 m_descriptorPool = vk::createDescriptorPool(vkd, device, &createInfo);
7250 const vk::VkDescriptorSetLayout layout = *m_resources.descriptorSetLayout;
7251 const vk::VkDescriptorSetAllocateInfo allocateInfo =
7253 vk::VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
7261 m_descriptorSet = vk::allocateDescriptorSet(vkd, device, &allocateInfo);
7264 const vk::VkImageViewCreateInfo createInfo =
7266 vk::VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
7271 vk::VK_IMAGE_VIEW_TYPE_2D,
7272 vk::VK_FORMAT_R8G8B8A8_UNORM,
7273 vk::makeComponentMappingRGBA(),
7275 vk::VK_IMAGE_ASPECT_COLOR_BIT,
7283 m_imageView = vk::createImageView(vkd, device, &createInfo);
7287 const vk::VkSamplerCreateInfo createInfo =
7289 vk::VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO,
7293 vk::VK_FILTER_NEAREST,
7294 vk::VK_FILTER_NEAREST,
7296 vk::VK_SAMPLER_MIPMAP_MODE_LINEAR,
7297 vk::VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE,
7298 vk::VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE,
7299 vk::VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE,
7304 vk::VK_COMPARE_OP_ALWAYS,
7307 vk::VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK,
7311 m_sampler = vk::createSampler(vkd, device, &createInfo);
7315 const vk::VkDescriptorImageInfo imageInfo =
7319 context.getImageLayout()
7321 const vk::VkWriteDescriptorSet write =
7323 vk::VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
7329 vk::VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
7335 vkd.updateDescriptorSets(device, 1u, &write, 0u, DE_NULL);
7340 void RenderFragmentSampledImage::submit (SubmitContext& context)
7342 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
7343 const vk::VkCommandBuffer commandBuffer = context.getCommandBuffer();
7345 vkd.cmdBindPipeline(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipeline);
7347 vkd.cmdBindDescriptorSets(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipelineLayout, 0u, 1u, &(*m_descriptorSet), 0u, DE_NULL);
7348 vkd.cmdDraw(commandBuffer, 6u, 1u, 0u, 0u);
7351 void RenderFragmentSampledImage::verify (VerifyRenderPassContext& context, size_t)
7353 const UVec2 size = UVec2(context.getReferenceImage().getWidth(), context.getReferenceImage().getHeight());
7354 const deUint32 valuesPerPixel = de::max<deUint32>(1u, (size.x() * size.y()) / (256u * 256u));
7356 for (int y = 0; y < context.getReferenceTarget().getSize().y(); y++)
7357 for (int x = 0; x < context.getReferenceTarget().getSize().x(); x++)
7359 UVec4 value = UVec4(x, y, 0u, 0u);
7361 for (deUint32 i = 0; i < valuesPerPixel; i++)
7363 const UVec2 pos = UVec2(value.z() * 256u + (value.x() ^ value.z()), value.w() * 256u + (value.y() ^ value.w()));
7364 const Vec4 floatValue = context.getReferenceImage().getAccess().getPixel(pos.x() % size.x(), pos.y() % size.y());
7366 value = UVec4((deUint32)(floatValue.x() * 255.0f),
7367 (deUint32)(floatValue.y() * 255.0f),
7368 (deUint32)(floatValue.z() * 255.0f),
7369 (deUint32)(floatValue.w() * 255.0f));
7373 context.getReferenceTarget().getAccess().setPixel(value.asFloat() / Vec4(255.0f), x, y);
7391 OP_BUFFER_BINDMEMORY,
7393 OP_QUEUE_WAIT_FOR_IDLE,
7394 OP_DEVICE_WAIT_FOR_IDLE,
7396 OP_COMMAND_BUFFER_BEGIN,
7397 OP_COMMAND_BUFFER_END,
7399 // Buffer transfer operations
7403 OP_BUFFER_COPY_TO_BUFFER,
7404 OP_BUFFER_COPY_FROM_BUFFER,
7406 OP_BUFFER_COPY_TO_IMAGE,
7407 OP_BUFFER_COPY_FROM_IMAGE,
7411 OP_IMAGE_BINDMEMORY,
7413 OP_IMAGE_TRANSITION_LAYOUT,
7415 OP_IMAGE_COPY_TO_BUFFER,
7416 OP_IMAGE_COPY_FROM_BUFFER,
7418 OP_IMAGE_COPY_TO_IMAGE,
7419 OP_IMAGE_COPY_FROM_IMAGE,
7421 OP_IMAGE_BLIT_TO_IMAGE,
7422 OP_IMAGE_BLIT_FROM_IMAGE,
7426 OP_PIPELINE_BARRIER_GLOBAL,
7427 OP_PIPELINE_BARRIER_BUFFER,
7428 OP_PIPELINE_BARRIER_IMAGE,
7430 // Renderpass operations
7431 OP_RENDERPASS_BEGIN,
7434 // Commands inside render pass
7435 OP_RENDER_VERTEX_BUFFER,
7436 OP_RENDER_INDEX_BUFFER,
7438 OP_RENDER_VERTEX_UNIFORM_BUFFER,
7439 OP_RENDER_FRAGMENT_UNIFORM_BUFFER,
7441 OP_RENDER_VERTEX_UNIFORM_TEXEL_BUFFER,
7442 OP_RENDER_FRAGMENT_UNIFORM_TEXEL_BUFFER,
7444 OP_RENDER_VERTEX_STORAGE_BUFFER,
7445 OP_RENDER_FRAGMENT_STORAGE_BUFFER,
7447 OP_RENDER_VERTEX_STORAGE_TEXEL_BUFFER,
7448 OP_RENDER_FRAGMENT_STORAGE_TEXEL_BUFFER,
7450 OP_RENDER_VERTEX_STORAGE_IMAGE,
7451 OP_RENDER_FRAGMENT_STORAGE_IMAGE,
7453 OP_RENDER_VERTEX_SAMPLED_IMAGE,
7454 OP_RENDER_FRAGMENT_SAMPLED_IMAGE,
7460 STAGE_COMMAND_BUFFER,
7465 vk::VkAccessFlags getWriteAccessFlags (void)
7467 return vk::VK_ACCESS_SHADER_WRITE_BIT
7468 | vk::VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT
7469 | vk::VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT
7470 | vk::VK_ACCESS_TRANSFER_WRITE_BIT
7471 | vk::VK_ACCESS_HOST_WRITE_BIT
7472 | vk::VK_ACCESS_MEMORY_WRITE_BIT;
7475 bool isWriteAccess (vk::VkAccessFlagBits access)
7477 return (getWriteAccessFlags() & access) != 0;
7483 CacheState (vk::VkPipelineStageFlags allowedStages, vk::VkAccessFlags allowedAccesses);
7485 bool isValid (vk::VkPipelineStageFlagBits stage,
7486 vk::VkAccessFlagBits access) const;
7488 void perform (vk::VkPipelineStageFlagBits stage,
7489 vk::VkAccessFlagBits access);
7491 void submitCommandBuffer (void);
7492 void waitForIdle (void);
7494 void getFullBarrier (vk::VkPipelineStageFlags& srcStages,
7495 vk::VkAccessFlags& srcAccesses,
7496 vk::VkPipelineStageFlags& dstStages,
7497 vk::VkAccessFlags& dstAccesses) const;
7499 void barrier (vk::VkPipelineStageFlags srcStages,
7500 vk::VkAccessFlags srcAccesses,
7501 vk::VkPipelineStageFlags dstStages,
7502 vk::VkAccessFlags dstAccesses);
7504 void imageLayoutBarrier (vk::VkPipelineStageFlags srcStages,
7505 vk::VkAccessFlags srcAccesses,
7506 vk::VkPipelineStageFlags dstStages,
7507 vk::VkAccessFlags dstAccesses);
7509 void checkImageLayoutBarrier (vk::VkPipelineStageFlags srcStages,
7510 vk::VkAccessFlags srcAccesses,
7511 vk::VkPipelineStageFlags dstStages,
7512 vk::VkAccessFlags dstAccesses);
7514 // Everything is clean and there is no need for barriers
7515 bool isClean (void) const;
7517 vk::VkPipelineStageFlags getAllowedStages (void) const { return m_allowedStages; }
7518 vk::VkAccessFlags getAllowedAcceses (void) const { return m_allowedAccesses; }
7520 // Limit which stages and accesses are used by the CacheState tracker
7521 const vk::VkPipelineStageFlags m_allowedStages;
7522 const vk::VkAccessFlags m_allowedAccesses;
7524 // [dstStage][srcStage] = srcAccesses
7525 // In stage dstStage write srcAccesses from srcStage are not yet available
7526 vk::VkAccessFlags m_unavailableWriteOperations[PIPELINESTAGE_LAST][PIPELINESTAGE_LAST];
7527 // Latest pipeline transition is not available in stage
7528 bool m_unavailableLayoutTransition[PIPELINESTAGE_LAST];
7529 // [dstStage] = dstAccesses
7530 // In stage dstStage ops with dstAccesses are not yet visible
7531 vk::VkAccessFlags m_invisibleOperations[PIPELINESTAGE_LAST];
7533 // [dstStage] = srcStage
7534 // Memory operation in srcStage have not completed before dstStage
7535 vk::VkPipelineStageFlags m_incompleteOperations[PIPELINESTAGE_LAST];
7538 CacheState::CacheState (vk::VkPipelineStageFlags allowedStages, vk::VkAccessFlags allowedAccesses)
7539 : m_allowedStages (allowedStages)
7540 , m_allowedAccesses (allowedAccesses)
7542 for (vk::VkPipelineStageFlags dstStage_ = 1; dstStage_ <= m_allowedStages; dstStage_ <<= 1)
7544 const PipelineStage dstStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)dstStage_);
7546 if ((dstStage_ & m_allowedStages) == 0)
7549 // All operations are initially visible
7550 m_invisibleOperations[dstStage] = 0;
7552 // There are no incomplete read operations initially
7553 m_incompleteOperations[dstStage] = 0;
7555 // There are no incomplete layout transitions
7556 m_unavailableLayoutTransition[dstStage] = false;
7558 for (vk::VkPipelineStageFlags srcStage_ = 1; srcStage_ <= m_allowedStages; srcStage_ <<= 1)
7560 const PipelineStage srcStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)srcStage_);
7562 if ((srcStage_ & m_allowedStages) == 0)
7565 // There are no write operations that are not yet available
7567 m_unavailableWriteOperations[dstStage][srcStage] = 0;
7572 bool CacheState::isValid (vk::VkPipelineStageFlagBits stage,
7573 vk::VkAccessFlagBits access) const
7575 DE_ASSERT((access & (~m_allowedAccesses)) == 0);
7576 DE_ASSERT((stage & (~m_allowedStages)) == 0);
7578 const PipelineStage dstStage = pipelineStageFlagToPipelineStage(stage);
7580 // Previous operations are not visible to access on stage
7581 if (m_unavailableLayoutTransition[dstStage] || (m_invisibleOperations[dstStage] & access) != 0)
7584 if (isWriteAccess(access))
7586 // Memory operations from other stages have not completed before
7588 if (m_incompleteOperations[dstStage] != 0)
7595 void CacheState::perform (vk::VkPipelineStageFlagBits stage,
7596 vk::VkAccessFlagBits access)
7598 DE_ASSERT((access & (~m_allowedAccesses)) == 0);
7599 DE_ASSERT((stage & (~m_allowedStages)) == 0);
7601 const PipelineStage srcStage = pipelineStageFlagToPipelineStage(stage);
7603 for (vk::VkPipelineStageFlags dstStage_ = 1; dstStage_ <= m_allowedStages; dstStage_ <<= 1)
7605 const PipelineStage dstStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)dstStage_);
7607 if ((dstStage_ & m_allowedStages) == 0)
7610 // Mark stage as incomplete for all stages
7611 m_incompleteOperations[dstStage] |= stage;
7613 if (isWriteAccess(access))
7615 // Mark all accesses from all stages invisible
7616 m_invisibleOperations[dstStage] |= m_allowedAccesses;
7618 // Mark write access from srcStage unavailable to all stages
7619 m_unavailableWriteOperations[dstStage][srcStage] |= access;
7624 void CacheState::submitCommandBuffer (void)
7626 // Flush all host writes and reads
7627 barrier(m_allowedStages & vk::VK_PIPELINE_STAGE_HOST_BIT,
7628 m_allowedAccesses & (vk::VK_ACCESS_HOST_READ_BIT | vk::VK_ACCESS_HOST_WRITE_BIT),
7633 void CacheState::waitForIdle (void)
7635 // Make all writes available
7636 barrier(m_allowedStages,
7637 m_allowedAccesses & getWriteAccessFlags(),
7641 // Make all writes visible on device side
7642 barrier(m_allowedStages,
7644 m_allowedStages & (~vk::VK_PIPELINE_STAGE_HOST_BIT),
7648 void CacheState::getFullBarrier (vk::VkPipelineStageFlags& srcStages,
7649 vk::VkAccessFlags& srcAccesses,
7650 vk::VkPipelineStageFlags& dstStages,
7651 vk::VkAccessFlags& dstAccesses) const
7658 for (vk::VkPipelineStageFlags dstStage_ = 1; dstStage_ <= m_allowedStages; dstStage_ <<= 1)
7660 const PipelineStage dstStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)dstStage_);
7662 if ((dstStage_ & m_allowedStages) == 0)
7665 // Make sure all previous operation are complete in all stages
7666 if (m_incompleteOperations[dstStage])
7668 dstStages |= dstStage_;
7669 srcStages |= m_incompleteOperations[dstStage];
7672 // Make sure all read operations are visible in dstStage
7673 if (m_invisibleOperations[dstStage])
7675 dstStages |= dstStage_;
7676 dstAccesses |= m_invisibleOperations[dstStage];
7679 // Make sure all write operations fro mall stages are available
7680 for (vk::VkPipelineStageFlags srcStage_ = 1; srcStage_ <= m_allowedStages; srcStage_ <<= 1)
7682 const PipelineStage srcStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)srcStage_);
7684 if ((srcStage_ & m_allowedStages) == 0)
7687 if (m_unavailableWriteOperations[dstStage][srcStage])
7689 dstStages |= dstStage_;
7690 srcStages |= dstStage_;
7691 srcAccesses |= m_unavailableWriteOperations[dstStage][srcStage];
7694 if (m_unavailableLayoutTransition[dstStage] && !m_unavailableLayoutTransition[srcStage])
7696 // Add dependency between srcStage and dstStage if layout transition has not completed in dstStage,
7697 // but has completed in srcStage.
7698 dstStages |= dstStage_;
7699 srcStages |= dstStage_;
7704 DE_ASSERT((srcStages & (~m_allowedStages)) == 0);
7705 DE_ASSERT((srcAccesses & (~m_allowedAccesses)) == 0);
7706 DE_ASSERT((dstStages & (~m_allowedStages)) == 0);
7707 DE_ASSERT((dstAccesses & (~m_allowedAccesses)) == 0);
7710 void CacheState::checkImageLayoutBarrier (vk::VkPipelineStageFlags srcStages,
7711 vk::VkAccessFlags srcAccesses,
7712 vk::VkPipelineStageFlags dstStages,
7713 vk::VkAccessFlags dstAccesses)
7715 DE_ASSERT((srcStages & (~m_allowedStages)) == 0);
7716 DE_ASSERT((srcAccesses & (~m_allowedAccesses)) == 0);
7717 DE_ASSERT((dstStages & (~m_allowedStages)) == 0);
7718 DE_ASSERT((dstAccesses & (~m_allowedAccesses)) == 0);
7720 DE_UNREF(srcStages);
7721 DE_UNREF(srcAccesses);
7723 DE_UNREF(dstStages);
7724 DE_UNREF(dstAccesses);
7726 #if defined(DE_DEBUG)
7727 // Check that all stages have completed before srcStages or are in srcStages.
7729 vk::VkPipelineStageFlags completedStages = srcStages;
7731 for (vk::VkPipelineStageFlags srcStage_ = 1; srcStage_ <= srcStages; srcStage_ <<= 1)
7733 const PipelineStage srcStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)srcStage_);
7735 if ((srcStage_ & srcStages) == 0)
7738 completedStages |= (~m_incompleteOperations[srcStage]);
7741 DE_ASSERT((completedStages & m_allowedStages) == m_allowedStages);
7744 // Check that any write is available at least in one stage. Since all stages are complete even single flush is enough.
7745 if ((getWriteAccessFlags() & m_allowedAccesses) != 0 && (srcAccesses & getWriteAccessFlags()) == 0)
7747 bool anyWriteAvailable = false;
7749 for (vk::VkPipelineStageFlags dstStage_ = 1; dstStage_ <= m_allowedStages; dstStage_ <<= 1)
7751 const PipelineStage dstStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)dstStage_);
7753 if ((dstStage_ & m_allowedStages) == 0)
7756 for (vk::VkPipelineStageFlags srcStage_ = 1; srcStage_ <= m_allowedStages; srcStage_ <<= 1)
7758 const PipelineStage srcStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)srcStage_);
7760 if ((srcStage_ & m_allowedStages) == 0)
7763 if (m_unavailableWriteOperations[dstStage][srcStage] != (getWriteAccessFlags() & m_allowedAccesses))
7765 anyWriteAvailable = true;
7771 DE_ASSERT(anyWriteAvailable);
7776 void CacheState::imageLayoutBarrier (vk::VkPipelineStageFlags srcStages,
7777 vk::VkAccessFlags srcAccesses,
7778 vk::VkPipelineStageFlags dstStages,
7779 vk::VkAccessFlags dstAccesses)
7781 checkImageLayoutBarrier(srcStages, srcAccesses, dstStages, dstAccesses);
7783 for (vk::VkPipelineStageFlags dstStage_ = 1; dstStage_ <= m_allowedStages; dstStage_ <<= 1)
7785 const PipelineStage dstStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)dstStage_);
7787 if ((dstStage_ & m_allowedStages) == 0)
7790 // All stages are incomplete after the barrier except each dstStage in it self.
7791 m_incompleteOperations[dstStage] = m_allowedStages & (~dstStage_);
7793 // All memory operations are invisible unless they are listed in dstAccess
7794 m_invisibleOperations[dstStage] = m_allowedAccesses & (~dstAccesses);
7796 // Layout transition is unavailable in stage unless it was listed in dstStages
7797 m_unavailableLayoutTransition[dstStage]= (dstStage_ & dstStages) == 0;
7799 for (vk::VkPipelineStageFlags srcStage_ = 1; srcStage_ <= m_allowedStages; srcStage_ <<= 1)
7801 const PipelineStage srcStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)srcStage_);
7803 if ((srcStage_ & m_allowedStages) == 0)
7806 // All write operations are available after layout transition
7807 m_unavailableWriteOperations[dstStage][srcStage] = 0;
7812 void CacheState::barrier (vk::VkPipelineStageFlags srcStages,
7813 vk::VkAccessFlags srcAccesses,
7814 vk::VkPipelineStageFlags dstStages,
7815 vk::VkAccessFlags dstAccesses)
7817 DE_ASSERT((srcStages & (~m_allowedStages)) == 0);
7818 DE_ASSERT((srcAccesses & (~m_allowedAccesses)) == 0);
7819 DE_ASSERT((dstStages & (~m_allowedStages)) == 0);
7820 DE_ASSERT((dstAccesses & (~m_allowedAccesses)) == 0);
7824 vk::VkPipelineStageFlags oldIncompleteOperations[PIPELINESTAGE_LAST];
7825 vk::VkAccessFlags oldUnavailableWriteOperations[PIPELINESTAGE_LAST][PIPELINESTAGE_LAST];
7826 bool oldUnavailableLayoutTransition[PIPELINESTAGE_LAST];
7828 deMemcpy(oldIncompleteOperations, m_incompleteOperations, sizeof(oldIncompleteOperations));
7829 deMemcpy(oldUnavailableWriteOperations, m_unavailableWriteOperations, sizeof(oldUnavailableWriteOperations));
7830 deMemcpy(oldUnavailableLayoutTransition, m_unavailableLayoutTransition, sizeof(oldUnavailableLayoutTransition));
7832 for (vk::VkPipelineStageFlags srcStage_ = 1; srcStage_ <= srcStages; srcStage_ <<= 1)
7834 const PipelineStage srcStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)srcStage_);
7836 if ((srcStage_ & srcStages) == 0)
7839 for (vk::VkPipelineStageFlags dstStage_ = 1; dstStage_ <= dstStages; dstStage_ <<= 1)
7841 const PipelineStage dstStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)dstStage_);
7843 if ((dstStage_ & dstStages) == 0)
7846 // Stages that have completed before srcStage have also completed before dstStage
7847 m_incompleteOperations[dstStage] &= oldIncompleteOperations[srcStage];
7849 // Image layout transition in srcStage are now available in dstStage
7850 m_unavailableLayoutTransition[dstStage] &= oldUnavailableLayoutTransition[srcStage];
7852 for (vk::VkPipelineStageFlags sharedStage_ = 1; sharedStage_ <= m_allowedStages; sharedStage_ <<= 1)
7854 const PipelineStage sharedStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)sharedStage_);
7856 if ((sharedStage_ & m_allowedStages) == 0)
7859 // Writes that are available in srcStage are also available in dstStage
7860 m_unavailableWriteOperations[dstStage][sharedStage] &= oldUnavailableWriteOperations[srcStage][sharedStage];
7867 for (vk::VkPipelineStageFlags dstStage_ = 1; dstStage_ <= dstStages; dstStage_ <<= 1)
7869 const PipelineStage dstStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)dstStage_);
7870 bool allWritesAvailable = true;
7872 if ((dstStage_ & dstStages) == 0)
7875 // Operations in srcStages have completed before any stage in dstStages
7876 m_incompleteOperations[dstStage] &= ~srcStages;
7878 for (vk::VkPipelineStageFlags srcStage_ = 1; srcStage_ <= m_allowedStages; srcStage_ <<= 1)
7880 const PipelineStage srcStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)srcStage_);
7882 if ((srcStage_ & m_allowedStages) == 0)
7885 // Make srcAccesses from srcStage available in dstStage
7886 if ((srcStage_ & srcStages) != 0)
7887 m_unavailableWriteOperations[dstStage][srcStage] &= ~srcAccesses;
7889 if (m_unavailableWriteOperations[dstStage][srcStage] != 0)
7890 allWritesAvailable = false;
7893 // If all writes are available in dstStage make dstAccesses also visible
7894 if (allWritesAvailable)
7895 m_invisibleOperations[dstStage] &= ~dstAccesses;
7899 bool CacheState::isClean (void) const
7901 for (vk::VkPipelineStageFlags dstStage_ = 1; dstStage_ <= m_allowedStages; dstStage_ <<= 1)
7903 const PipelineStage dstStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)dstStage_);
7905 if ((dstStage_ & m_allowedStages) == 0)
7908 // Some operations are not visible to some stages
7909 if (m_invisibleOperations[dstStage] != 0)
7912 // There are operation that have not completed yet
7913 if (m_incompleteOperations[dstStage] != 0)
7916 // Layout transition has not completed yet
7917 if (m_unavailableLayoutTransition[dstStage])
7920 for (vk::VkPipelineStageFlags srcStage_ = 1; srcStage_ <= m_allowedStages; srcStage_ <<= 1)
7922 const PipelineStage srcStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)srcStage_);
7924 if ((srcStage_ & m_allowedStages) == 0)
7927 // Some write operations are not available yet
7928 if (m_unavailableWriteOperations[dstStage][srcStage] != 0)
7936 bool layoutSupportedByUsage (Usage usage, vk::VkImageLayout layout)
7940 case vk::VK_IMAGE_LAYOUT_GENERAL:
7943 case vk::VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL:
7944 return (usage & USAGE_COLOR_ATTACHMENT) != 0;
7946 case vk::VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL:
7947 return (usage & USAGE_DEPTH_STENCIL_ATTACHMENT) != 0;
7949 case vk::VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL:
7950 return (usage & USAGE_DEPTH_STENCIL_ATTACHMENT) != 0;
7952 case vk::VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL:
7953 // \todo [2016-03-09 mika] Should include input attachment
7954 return (usage & USAGE_SAMPLED_IMAGE) != 0;
7956 case vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL:
7957 return (usage & USAGE_TRANSFER_SRC) != 0;
7959 case vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL:
7960 return (usage & USAGE_TRANSFER_DST) != 0;
7962 case vk::VK_IMAGE_LAYOUT_PREINITIALIZED:
7966 DE_FATAL("Unknown layout");
7971 size_t getNumberOfSupportedLayouts (Usage usage)
7973 const vk::VkImageLayout layouts[] =
7975 vk::VK_IMAGE_LAYOUT_GENERAL,
7976 vk::VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
7977 vk::VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL,
7978 vk::VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL,
7979 vk::VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
7980 vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
7981 vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
7983 size_t supportedLayoutCount = 0;
7985 for (size_t layoutNdx = 0; layoutNdx < DE_LENGTH_OF_ARRAY(layouts); layoutNdx++)
7987 const vk::VkImageLayout layout = layouts[layoutNdx];
7989 if (layoutSupportedByUsage(usage, layout))
7990 supportedLayoutCount++;
7993 return supportedLayoutCount;
7996 vk::VkImageLayout getRandomNextLayout (de::Random& rng,
7998 vk::VkImageLayout previousLayout)
8000 const vk::VkImageLayout layouts[] =
8002 vk::VK_IMAGE_LAYOUT_GENERAL,
8003 vk::VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
8004 vk::VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL,
8005 vk::VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL,
8006 vk::VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
8007 vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
8008 vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
8010 const size_t supportedLayoutCount = getNumberOfSupportedLayouts(usage);
8012 DE_ASSERT(supportedLayoutCount > 0);
8014 size_t nextLayoutNdx = ((size_t)rng.getUint64()) % (previousLayout == vk::VK_IMAGE_LAYOUT_UNDEFINED
8015 ? supportedLayoutCount
8016 : supportedLayoutCount - 1);
8018 for (size_t layoutNdx = 0; layoutNdx < DE_LENGTH_OF_ARRAY(layouts); layoutNdx++)
8020 const vk::VkImageLayout layout = layouts[layoutNdx];
8022 if (layoutSupportedByUsage(usage, layout) && layout != previousLayout)
8024 if (nextLayoutNdx == 0)
8031 DE_FATAL("Unreachable");
8032 return vk::VK_IMAGE_LAYOUT_UNDEFINED;
8037 State (Usage usage, deUint32 seed)
8038 : stage (STAGE_HOST)
8039 , cache (usageToStageFlags(usage), usageToAccessFlags(usage))
8042 , hostInvalidated (true)
8043 , hostFlushed (true)
8044 , memoryDefined (false)
8046 , hasBoundBufferMemory (false)
8048 , hasBoundImageMemory (false)
8049 , imageLayout (vk::VK_IMAGE_LAYOUT_UNDEFINED)
8050 , imageDefined (false)
8053 , commandBufferIsEmpty (true)
8054 , renderPassIsEmpty (true)
8063 bool hostInvalidated;
8068 bool hasBoundBufferMemory;
8071 bool hasBoundImageMemory;
8072 vk::VkImageLayout imageLayout;
8078 bool commandBufferIsEmpty;
8079 bool renderPassIsEmpty;
8082 void getAvailableOps (const State& state, bool supportsBuffers, bool supportsImages, Usage usage, vector<Op>& ops)
8084 if (state.stage == STAGE_HOST)
8086 if (usage & (USAGE_HOST_READ | USAGE_HOST_WRITE))
8088 // Host memory operations
8091 ops.push_back(OP_UNMAP);
8093 // Avoid flush and finish if they are not needed
8094 if (!state.hostFlushed)
8095 ops.push_back(OP_MAP_FLUSH);
8097 if (!state.hostInvalidated
8099 && ((usage & USAGE_HOST_READ) == 0
8100 || state.cache.isValid(vk::VK_PIPELINE_STAGE_HOST_BIT, vk::VK_ACCESS_HOST_READ_BIT))
8101 && ((usage & USAGE_HOST_WRITE) == 0
8102 || state.cache.isValid(vk::VK_PIPELINE_STAGE_HOST_BIT, vk::VK_ACCESS_HOST_WRITE_BIT)))
8104 ops.push_back(OP_MAP_INVALIDATE);
8107 if (usage & USAGE_HOST_READ
8108 && usage & USAGE_HOST_WRITE
8109 && state.memoryDefined
8110 && state.hostInvalidated
8112 && state.cache.isValid(vk::VK_PIPELINE_STAGE_HOST_BIT, vk::VK_ACCESS_HOST_WRITE_BIT)
8113 && state.cache.isValid(vk::VK_PIPELINE_STAGE_HOST_BIT, vk::VK_ACCESS_HOST_READ_BIT))
8115 ops.push_back(OP_MAP_MODIFY);
8118 if (usage & USAGE_HOST_READ
8119 && state.memoryDefined
8120 && state.hostInvalidated
8122 && state.cache.isValid(vk::VK_PIPELINE_STAGE_HOST_BIT, vk::VK_ACCESS_HOST_READ_BIT))
8124 ops.push_back(OP_MAP_READ);
8127 if (usage & USAGE_HOST_WRITE
8128 && state.hostInvalidated
8130 && state.cache.isValid(vk::VK_PIPELINE_STAGE_HOST_BIT, vk::VK_ACCESS_HOST_WRITE_BIT))
8132 ops.push_back(OP_MAP_WRITE);
8136 ops.push_back(OP_MAP);
8139 if (state.hasBoundBufferMemory && state.queueIdle)
8141 // \note Destroy only buffers after they have been bound
8142 ops.push_back(OP_BUFFER_DESTROY);
8146 if (state.hasBuffer)
8148 if (!state.hasBoundBufferMemory)
8149 ops.push_back(OP_BUFFER_BINDMEMORY);
8151 else if (!state.hasImage && supportsBuffers) // Avoid creating buffer if there is already image
8152 ops.push_back(OP_BUFFER_CREATE);
8155 if (state.hasBoundImageMemory && state.queueIdle)
8157 // \note Destroy only image after they have been bound
8158 ops.push_back(OP_IMAGE_DESTROY);
8164 if (!state.hasBoundImageMemory)
8165 ops.push_back(OP_IMAGE_BINDMEMORY);
8167 else if (!state.hasBuffer && supportsImages) // Avoid creating image if there is already buffer
8168 ops.push_back(OP_IMAGE_CREATE);
8171 // Host writes must be flushed before GPU commands and there must be
8172 // buffer or image for GPU commands
8173 if (state.hostFlushed
8174 && (state.memoryDefined || supportsDeviceBufferWrites(usage) || state.imageDefined || supportsDeviceImageWrites(usage))
8175 && (state.hasBoundBufferMemory || state.hasBoundImageMemory) // Avoid command buffers if there is no object to use
8176 && (usageToStageFlags(usage) & (~vk::VK_PIPELINE_STAGE_HOST_BIT)) != 0) // Don't start command buffer if there are no ways to use memory from gpu
8178 ops.push_back(OP_COMMAND_BUFFER_BEGIN);
8181 if (!state.deviceIdle)
8182 ops.push_back(OP_DEVICE_WAIT_FOR_IDLE);
8184 if (!state.queueIdle)
8185 ops.push_back(OP_QUEUE_WAIT_FOR_IDLE);
8187 else if (state.stage == STAGE_COMMAND_BUFFER)
8189 if (!state.cache.isClean())
8191 ops.push_back(OP_PIPELINE_BARRIER_GLOBAL);
8194 ops.push_back(OP_PIPELINE_BARRIER_IMAGE);
8196 if (state.hasBuffer)
8197 ops.push_back(OP_PIPELINE_BARRIER_BUFFER);
8200 if (state.hasBoundBufferMemory)
8202 if (usage & USAGE_TRANSFER_DST
8203 && state.cache.isValid(vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_ACCESS_TRANSFER_WRITE_BIT))
8205 ops.push_back(OP_BUFFER_FILL);
8206 ops.push_back(OP_BUFFER_UPDATE);
8207 ops.push_back(OP_BUFFER_COPY_FROM_BUFFER);
8208 ops.push_back(OP_BUFFER_COPY_FROM_IMAGE);
8211 if (usage & USAGE_TRANSFER_SRC
8212 && state.memoryDefined
8213 && state.cache.isValid(vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_ACCESS_TRANSFER_READ_BIT))
8215 ops.push_back(OP_BUFFER_COPY_TO_BUFFER);
8216 ops.push_back(OP_BUFFER_COPY_TO_IMAGE);
8220 if (state.hasBoundImageMemory
8221 && (state.imageLayout == vk::VK_IMAGE_LAYOUT_UNDEFINED
8222 || getNumberOfSupportedLayouts(usage) > 1))
8224 ops.push_back(OP_IMAGE_TRANSITION_LAYOUT);
8227 if (usage & USAGE_TRANSFER_DST
8228 && (state.imageLayout == vk::VK_IMAGE_LAYOUT_GENERAL
8229 || state.imageLayout == vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL)
8230 && state.cache.isValid(vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_ACCESS_TRANSFER_WRITE_BIT))
8232 ops.push_back(OP_IMAGE_COPY_FROM_BUFFER);
8233 ops.push_back(OP_IMAGE_COPY_FROM_IMAGE);
8234 ops.push_back(OP_IMAGE_BLIT_FROM_IMAGE);
8237 if (usage & USAGE_TRANSFER_SRC
8238 && (state.imageLayout == vk::VK_IMAGE_LAYOUT_GENERAL
8239 || state.imageLayout == vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL)
8240 && state.imageDefined
8241 && state.cache.isValid(vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_ACCESS_TRANSFER_READ_BIT))
8243 ops.push_back(OP_IMAGE_COPY_TO_BUFFER);
8244 ops.push_back(OP_IMAGE_COPY_TO_IMAGE);
8245 ops.push_back(OP_IMAGE_BLIT_TO_IMAGE);
8250 // \todo [2016-03-09 mika] Add other usages?
8251 if ((state.memoryDefined
8252 && state.hasBoundBufferMemory
8253 && (((usage & USAGE_VERTEX_BUFFER)
8254 && state.cache.isValid(vk::VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, vk::VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT))
8255 || ((usage & USAGE_INDEX_BUFFER)
8256 && state.cache.isValid(vk::VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, vk::VK_ACCESS_INDEX_READ_BIT))
8257 || ((usage & USAGE_UNIFORM_BUFFER)
8258 && (state.cache.isValid(vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, vk::VK_ACCESS_UNIFORM_READ_BIT)
8259 || state.cache.isValid(vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, vk::VK_ACCESS_UNIFORM_READ_BIT)))
8260 || ((usage & USAGE_UNIFORM_TEXEL_BUFFER)
8261 && (state.cache.isValid(vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, vk::VK_ACCESS_UNIFORM_READ_BIT)
8262 || state.cache.isValid(vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, vk::VK_ACCESS_UNIFORM_READ_BIT)))
8263 || ((usage & USAGE_STORAGE_BUFFER)
8264 && (state.cache.isValid(vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT)
8265 || state.cache.isValid(vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT)))
8266 || ((usage & USAGE_STORAGE_TEXEL_BUFFER)
8267 && state.cache.isValid(vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT))))
8268 || (state.imageDefined
8269 && state.hasBoundImageMemory
8270 && (((usage & USAGE_STORAGE_IMAGE)
8271 && state.imageLayout == vk::VK_IMAGE_LAYOUT_GENERAL
8272 && (state.cache.isValid(vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT)
8273 || state.cache.isValid(vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT)))
8274 || ((usage & USAGE_SAMPLED_IMAGE)
8275 && (state.imageLayout == vk::VK_IMAGE_LAYOUT_GENERAL
8276 || state.imageLayout == vk::VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL)
8277 && (state.cache.isValid(vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT)
8278 || state.cache.isValid(vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT))))))
8280 ops.push_back(OP_RENDERPASS_BEGIN);
8283 // \note This depends on previous operations and has to be always the
8284 // last command buffer operation check
8285 if (ops.empty() || !state.commandBufferIsEmpty)
8286 ops.push_back(OP_COMMAND_BUFFER_END);
8288 else if (state.stage == STAGE_RENDER_PASS)
8290 if ((usage & USAGE_VERTEX_BUFFER) != 0
8291 && state.memoryDefined
8292 && state.hasBoundBufferMemory
8293 && state.cache.isValid(vk::VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, vk::VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT))
8295 ops.push_back(OP_RENDER_VERTEX_BUFFER);
8298 if ((usage & USAGE_INDEX_BUFFER) != 0
8299 && state.memoryDefined
8300 && state.hasBoundBufferMemory
8301 && state.cache.isValid(vk::VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, vk::VK_ACCESS_INDEX_READ_BIT))
8303 ops.push_back(OP_RENDER_INDEX_BUFFER);
8306 if ((usage & USAGE_UNIFORM_BUFFER) != 0
8307 && state.memoryDefined
8308 && state.hasBoundBufferMemory)
8310 if (state.cache.isValid(vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, vk::VK_ACCESS_UNIFORM_READ_BIT))
8311 ops.push_back(OP_RENDER_VERTEX_UNIFORM_BUFFER);
8313 if (state.cache.isValid(vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, vk::VK_ACCESS_UNIFORM_READ_BIT))
8314 ops.push_back(OP_RENDER_FRAGMENT_UNIFORM_BUFFER);
8317 if ((usage & USAGE_UNIFORM_TEXEL_BUFFER) != 0
8318 && state.memoryDefined
8319 && state.hasBoundBufferMemory)
8321 if (state.cache.isValid(vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, vk::VK_ACCESS_UNIFORM_READ_BIT))
8322 ops.push_back(OP_RENDER_VERTEX_UNIFORM_TEXEL_BUFFER);
8324 if (state.cache.isValid(vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, vk::VK_ACCESS_UNIFORM_READ_BIT))
8325 ops.push_back(OP_RENDER_FRAGMENT_UNIFORM_TEXEL_BUFFER);
8328 if ((usage & USAGE_STORAGE_BUFFER) != 0
8329 && state.memoryDefined
8330 && state.hasBoundBufferMemory)
8332 if (state.cache.isValid(vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT))
8333 ops.push_back(OP_RENDER_VERTEX_STORAGE_BUFFER);
8335 if (state.cache.isValid(vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT))
8336 ops.push_back(OP_RENDER_FRAGMENT_STORAGE_BUFFER);
8339 if ((usage & USAGE_STORAGE_TEXEL_BUFFER) != 0
8340 && state.memoryDefined
8341 && state.hasBoundBufferMemory)
8343 if (state.cache.isValid(vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT))
8344 ops.push_back(OP_RENDER_VERTEX_STORAGE_TEXEL_BUFFER);
8346 if (state.cache.isValid(vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT))
8347 ops.push_back(OP_RENDER_FRAGMENT_STORAGE_TEXEL_BUFFER);
8350 if ((usage & USAGE_STORAGE_IMAGE) != 0
8351 && state.imageDefined
8352 && state.hasBoundImageMemory
8353 && (state.imageLayout == vk::VK_IMAGE_LAYOUT_GENERAL))
8355 if (state.cache.isValid(vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT))
8356 ops.push_back(OP_RENDER_VERTEX_STORAGE_IMAGE);
8358 if (state.cache.isValid(vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT))
8359 ops.push_back(OP_RENDER_FRAGMENT_STORAGE_IMAGE);
8362 if ((usage & USAGE_SAMPLED_IMAGE) != 0
8363 && state.imageDefined
8364 && state.hasBoundImageMemory
8365 && (state.imageLayout == vk::VK_IMAGE_LAYOUT_GENERAL
8366 || state.imageLayout == vk::VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL))
8368 if (state.cache.isValid(vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT))
8369 ops.push_back(OP_RENDER_VERTEX_SAMPLED_IMAGE);
8371 if (state.cache.isValid(vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT))
8372 ops.push_back(OP_RENDER_FRAGMENT_SAMPLED_IMAGE);
8375 if (!state.renderPassIsEmpty)
8376 ops.push_back(OP_RENDERPASS_END);
8379 DE_FATAL("Unknown stage");
8382 void applyOp (State& state, const Memory& memory, Op op, Usage usage)
8387 DE_ASSERT(state.stage == STAGE_HOST);
8388 DE_ASSERT(!state.mapped);
8389 state.mapped = true;
8393 DE_ASSERT(state.stage == STAGE_HOST);
8394 DE_ASSERT(state.mapped);
8395 state.mapped = false;
8399 DE_ASSERT(state.stage == STAGE_HOST);
8400 DE_ASSERT(!state.hostFlushed);
8401 state.hostFlushed = true;
8404 case OP_MAP_INVALIDATE:
8405 DE_ASSERT(state.stage == STAGE_HOST);
8406 DE_ASSERT(!state.hostInvalidated);
8407 state.hostInvalidated = true;
8411 DE_ASSERT(state.stage == STAGE_HOST);
8412 DE_ASSERT(state.hostInvalidated);
8413 state.rng.getUint32();
8417 DE_ASSERT(state.stage == STAGE_HOST);
8418 if ((memory.getMemoryType().propertyFlags & vk::VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) == 0)
8419 state.hostFlushed = false;
8421 state.memoryDefined = true;
8422 state.imageDefined = false;
8423 state.imageLayout = vk::VK_IMAGE_LAYOUT_UNDEFINED;
8424 state.rng.getUint32();
8428 DE_ASSERT(state.stage == STAGE_HOST);
8429 DE_ASSERT(state.hostInvalidated);
8431 if ((memory.getMemoryType().propertyFlags & vk::VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) == 0)
8432 state.hostFlushed = false;
8434 state.rng.getUint32();
8437 case OP_BUFFER_CREATE:
8438 DE_ASSERT(state.stage == STAGE_HOST);
8439 DE_ASSERT(!state.hasBuffer);
8441 state.hasBuffer = true;
8444 case OP_BUFFER_DESTROY:
8445 DE_ASSERT(state.stage == STAGE_HOST);
8446 DE_ASSERT(state.hasBuffer);
8447 DE_ASSERT(state.hasBoundBufferMemory);
8449 state.hasBuffer = false;
8450 state.hasBoundBufferMemory = false;
8453 case OP_BUFFER_BINDMEMORY:
8454 DE_ASSERT(state.stage == STAGE_HOST);
8455 DE_ASSERT(state.hasBuffer);
8456 DE_ASSERT(!state.hasBoundBufferMemory);
8458 state.hasBoundBufferMemory = true;
8461 case OP_IMAGE_CREATE:
8462 DE_ASSERT(state.stage == STAGE_HOST);
8463 DE_ASSERT(!state.hasImage);
8464 DE_ASSERT(!state.hasBuffer);
8466 state.hasImage = true;
8469 case OP_IMAGE_DESTROY:
8470 DE_ASSERT(state.stage == STAGE_HOST);
8471 DE_ASSERT(state.hasImage);
8472 DE_ASSERT(state.hasBoundImageMemory);
8474 state.hasImage = false;
8475 state.hasBoundImageMemory = false;
8476 state.imageLayout = vk::VK_IMAGE_LAYOUT_UNDEFINED;
8477 state.imageDefined = false;
8480 case OP_IMAGE_BINDMEMORY:
8481 DE_ASSERT(state.stage == STAGE_HOST);
8482 DE_ASSERT(state.hasImage);
8483 DE_ASSERT(!state.hasBoundImageMemory);
8485 state.hasBoundImageMemory = true;
8488 case OP_IMAGE_TRANSITION_LAYOUT:
8490 DE_ASSERT(state.stage == STAGE_COMMAND_BUFFER);
8491 DE_ASSERT(state.hasImage);
8492 DE_ASSERT(state.hasBoundImageMemory);
8494 // \todo [2016-03-09 mika] Support linear tiling and predefined data
8495 const vk::VkImageLayout srcLayout = state.rng.getFloat() < 0.9f ? state.imageLayout : vk::VK_IMAGE_LAYOUT_UNDEFINED;
8496 const vk::VkImageLayout dstLayout = getRandomNextLayout(state.rng, usage, srcLayout);
8498 vk::VkPipelineStageFlags dirtySrcStages;
8499 vk::VkAccessFlags dirtySrcAccesses;
8500 vk::VkPipelineStageFlags dirtyDstStages;
8501 vk::VkAccessFlags dirtyDstAccesses;
8503 vk::VkPipelineStageFlags srcStages;
8504 vk::VkAccessFlags srcAccesses;
8505 vk::VkPipelineStageFlags dstStages;
8506 vk::VkAccessFlags dstAccesses;
8508 state.cache.getFullBarrier(dirtySrcStages, dirtySrcAccesses, dirtyDstStages, dirtyDstAccesses);
8510 // Try masking some random bits
8511 srcStages = dirtySrcStages;
8512 srcAccesses = dirtySrcAccesses;
8514 dstStages = state.cache.getAllowedStages() & state.rng.getUint32();
8515 dstAccesses = state.cache.getAllowedAcceses() & state.rng.getUint32();
8517 // If there are no bits in dst stage mask use all stages
8518 dstStages = dstStages ? dstStages : state.cache.getAllowedStages();
8521 srcStages = dstStages;
8523 if (srcLayout == vk::VK_IMAGE_LAYOUT_UNDEFINED)
8524 state.imageDefined = false;
8526 state.commandBufferIsEmpty = false;
8527 state.imageLayout = dstLayout;
8528 state.memoryDefined = false;
8529 state.cache.imageLayoutBarrier(srcStages, srcAccesses, dstStages, dstAccesses);
8533 case OP_QUEUE_WAIT_FOR_IDLE:
8534 DE_ASSERT(state.stage == STAGE_HOST);
8535 DE_ASSERT(!state.queueIdle);
8537 state.queueIdle = true;
8539 state.cache.waitForIdle();
8542 case OP_DEVICE_WAIT_FOR_IDLE:
8543 DE_ASSERT(state.stage == STAGE_HOST);
8544 DE_ASSERT(!state.deviceIdle);
8546 state.queueIdle = true;
8547 state.deviceIdle = true;
8549 state.cache.waitForIdle();
8552 case OP_COMMAND_BUFFER_BEGIN:
8553 DE_ASSERT(state.stage == STAGE_HOST);
8554 state.stage = STAGE_COMMAND_BUFFER;
8555 state.commandBufferIsEmpty = true;
8556 // Makes host writes visible to command buffer
8557 state.cache.submitCommandBuffer();
8560 case OP_COMMAND_BUFFER_END:
8561 DE_ASSERT(state.stage == STAGE_COMMAND_BUFFER);
8562 state.stage = STAGE_HOST;
8563 state.queueIdle = false;
8564 state.deviceIdle = false;
8567 case OP_BUFFER_COPY_FROM_BUFFER:
8568 case OP_BUFFER_COPY_FROM_IMAGE:
8569 case OP_BUFFER_UPDATE:
8570 case OP_BUFFER_FILL:
8571 state.rng.getUint32();
8572 DE_ASSERT(state.stage == STAGE_COMMAND_BUFFER);
8574 if ((memory.getMemoryType().propertyFlags & vk::VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) == 0)
8575 state.hostInvalidated = false;
8577 state.commandBufferIsEmpty = false;
8578 state.memoryDefined = true;
8579 state.imageDefined = false;
8580 state.imageLayout = vk::VK_IMAGE_LAYOUT_UNDEFINED;
8581 state.cache.perform(vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_ACCESS_TRANSFER_WRITE_BIT);
8584 case OP_BUFFER_COPY_TO_BUFFER:
8585 case OP_BUFFER_COPY_TO_IMAGE:
8586 DE_ASSERT(state.stage == STAGE_COMMAND_BUFFER);
8588 state.commandBufferIsEmpty = false;
8589 state.cache.perform(vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_ACCESS_TRANSFER_READ_BIT);
8592 case OP_IMAGE_BLIT_FROM_IMAGE:
8593 state.rng.getBool();
8595 case OP_IMAGE_COPY_FROM_BUFFER:
8596 case OP_IMAGE_COPY_FROM_IMAGE:
8597 state.rng.getUint32();
8598 DE_ASSERT(state.stage == STAGE_COMMAND_BUFFER);
8600 if ((memory.getMemoryType().propertyFlags & vk::VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) == 0)
8601 state.hostInvalidated = false;
8603 state.commandBufferIsEmpty = false;
8604 state.memoryDefined = false;
8605 state.imageDefined = true;
8606 state.cache.perform(vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_ACCESS_TRANSFER_WRITE_BIT);
8609 case OP_IMAGE_BLIT_TO_IMAGE:
8610 state.rng.getBool();
8612 case OP_IMAGE_COPY_TO_BUFFER:
8613 case OP_IMAGE_COPY_TO_IMAGE:
8614 DE_ASSERT(state.stage == STAGE_COMMAND_BUFFER);
8616 state.commandBufferIsEmpty = false;
8617 state.cache.perform(vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_ACCESS_TRANSFER_READ_BIT);
8620 case OP_PIPELINE_BARRIER_GLOBAL:
8621 case OP_PIPELINE_BARRIER_BUFFER:
8622 case OP_PIPELINE_BARRIER_IMAGE:
8624 DE_ASSERT(state.stage == STAGE_COMMAND_BUFFER);
8626 vk::VkPipelineStageFlags dirtySrcStages;
8627 vk::VkAccessFlags dirtySrcAccesses;
8628 vk::VkPipelineStageFlags dirtyDstStages;
8629 vk::VkAccessFlags dirtyDstAccesses;
8631 vk::VkPipelineStageFlags srcStages;
8632 vk::VkAccessFlags srcAccesses;
8633 vk::VkPipelineStageFlags dstStages;
8634 vk::VkAccessFlags dstAccesses;
8636 state.cache.getFullBarrier(dirtySrcStages, dirtySrcAccesses, dirtyDstStages, dirtyDstAccesses);
8638 // Try masking some random bits
8639 srcStages = dirtySrcStages & state.rng.getUint32();
8640 srcAccesses = dirtySrcAccesses & state.rng.getUint32();
8642 dstStages = dirtyDstStages & state.rng.getUint32();
8643 dstAccesses = dirtyDstAccesses & state.rng.getUint32();
8645 // If there are no bits in stage mask use the original dirty stages
8646 srcStages = srcStages ? srcStages : dirtySrcStages;
8647 dstStages = dstStages ? dstStages : dirtyDstStages;
8650 srcStages = dstStages;
8652 state.commandBufferIsEmpty = false;
8653 state.cache.barrier(srcStages, srcAccesses, dstStages, dstAccesses);
8657 case OP_RENDERPASS_BEGIN:
8659 DE_ASSERT(state.stage == STAGE_COMMAND_BUFFER);
8661 state.renderPassIsEmpty = true;
8662 state.stage = STAGE_RENDER_PASS;
8666 case OP_RENDERPASS_END:
8668 DE_ASSERT(state.stage == STAGE_RENDER_PASS);
8670 state.renderPassIsEmpty = true;
8671 state.stage = STAGE_COMMAND_BUFFER;
8675 case OP_RENDER_VERTEX_BUFFER:
8677 DE_ASSERT(state.stage == STAGE_RENDER_PASS);
8679 state.renderPassIsEmpty = false;
8680 state.cache.perform(vk::VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, vk::VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT);
8684 case OP_RENDER_INDEX_BUFFER:
8686 DE_ASSERT(state.stage == STAGE_RENDER_PASS);
8688 state.renderPassIsEmpty = false;
8689 state.cache.perform(vk::VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, vk::VK_ACCESS_INDEX_READ_BIT);
8693 case OP_RENDER_VERTEX_UNIFORM_BUFFER:
8694 case OP_RENDER_VERTEX_UNIFORM_TEXEL_BUFFER:
8696 DE_ASSERT(state.stage == STAGE_RENDER_PASS);
8698 state.renderPassIsEmpty = false;
8699 state.cache.perform(vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, vk::VK_ACCESS_UNIFORM_READ_BIT);
8703 case OP_RENDER_FRAGMENT_UNIFORM_BUFFER:
8704 case OP_RENDER_FRAGMENT_UNIFORM_TEXEL_BUFFER:
8706 DE_ASSERT(state.stage == STAGE_RENDER_PASS);
8708 state.renderPassIsEmpty = false;
8709 state.cache.perform(vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, vk::VK_ACCESS_UNIFORM_READ_BIT);
8713 case OP_RENDER_VERTEX_STORAGE_BUFFER:
8714 case OP_RENDER_VERTEX_STORAGE_TEXEL_BUFFER:
8716 DE_ASSERT(state.stage == STAGE_RENDER_PASS);
8718 state.renderPassIsEmpty = false;
8719 state.cache.perform(vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT);
8723 case OP_RENDER_FRAGMENT_STORAGE_BUFFER:
8724 case OP_RENDER_FRAGMENT_STORAGE_TEXEL_BUFFER:
8726 DE_ASSERT(state.stage == STAGE_RENDER_PASS);
8728 state.renderPassIsEmpty = false;
8729 state.cache.perform(vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT);
8733 case OP_RENDER_FRAGMENT_STORAGE_IMAGE:
8734 case OP_RENDER_FRAGMENT_SAMPLED_IMAGE:
8736 DE_ASSERT(state.stage == STAGE_RENDER_PASS);
8738 state.renderPassIsEmpty = false;
8739 state.cache.perform(vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT);
8743 case OP_RENDER_VERTEX_STORAGE_IMAGE:
8744 case OP_RENDER_VERTEX_SAMPLED_IMAGE:
8746 DE_ASSERT(state.stage == STAGE_RENDER_PASS);
8748 state.renderPassIsEmpty = false;
8749 state.cache.perform(vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT);
8754 DE_FATAL("Unknown op");
8758 de::MovePtr<Command> createHostCommand (Op op,
8761 vk::VkSharingMode sharing)
8765 case OP_MAP: return de::MovePtr<Command>(new Map());
8766 case OP_UNMAP: return de::MovePtr<Command>(new UnMap());
8768 case OP_MAP_FLUSH: return de::MovePtr<Command>(new Flush());
8769 case OP_MAP_INVALIDATE: return de::MovePtr<Command>(new Invalidate());
8771 case OP_MAP_READ: return de::MovePtr<Command>(new HostMemoryAccess(true, false, rng.getUint32()));
8772 case OP_MAP_WRITE: return de::MovePtr<Command>(new HostMemoryAccess(false, true, rng.getUint32()));
8773 case OP_MAP_MODIFY: return de::MovePtr<Command>(new HostMemoryAccess(true, true, rng.getUint32()));
8775 case OP_BUFFER_CREATE: return de::MovePtr<Command>(new CreateBuffer(usageToBufferUsageFlags(usage), sharing));
8776 case OP_BUFFER_DESTROY: return de::MovePtr<Command>(new DestroyBuffer());
8777 case OP_BUFFER_BINDMEMORY: return de::MovePtr<Command>(new BindBufferMemory());
8779 case OP_IMAGE_CREATE: return de::MovePtr<Command>(new CreateImage(usageToImageUsageFlags(usage), sharing));
8780 case OP_IMAGE_DESTROY: return de::MovePtr<Command>(new DestroyImage());
8781 case OP_IMAGE_BINDMEMORY: return de::MovePtr<Command>(new BindImageMemory());
8783 case OP_QUEUE_WAIT_FOR_IDLE: return de::MovePtr<Command>(new QueueWaitIdle());
8784 case OP_DEVICE_WAIT_FOR_IDLE: return de::MovePtr<Command>(new DeviceWaitIdle());
8787 DE_FATAL("Unknown op");
8788 return de::MovePtr<Command>(DE_NULL);
8792 de::MovePtr<CmdCommand> createCmdCommand (de::Random& rng,
8799 case OP_BUFFER_FILL: return de::MovePtr<CmdCommand>(new FillBuffer(rng.getUint32()));
8800 case OP_BUFFER_UPDATE: return de::MovePtr<CmdCommand>(new UpdateBuffer(rng.getUint32()));
8801 case OP_BUFFER_COPY_TO_BUFFER: return de::MovePtr<CmdCommand>(new BufferCopyToBuffer());
8802 case OP_BUFFER_COPY_FROM_BUFFER: return de::MovePtr<CmdCommand>(new BufferCopyFromBuffer(rng.getUint32()));
8804 case OP_BUFFER_COPY_TO_IMAGE: return de::MovePtr<CmdCommand>(new BufferCopyToImage());
8805 case OP_BUFFER_COPY_FROM_IMAGE: return de::MovePtr<CmdCommand>(new BufferCopyFromImage(rng.getUint32()));
8807 case OP_IMAGE_TRANSITION_LAYOUT:
8809 DE_ASSERT(state.stage == STAGE_COMMAND_BUFFER);
8810 DE_ASSERT(state.hasImage);
8811 DE_ASSERT(state.hasBoundImageMemory);
8813 const vk::VkImageLayout srcLayout = rng.getFloat() < 0.9f ? state.imageLayout : vk::VK_IMAGE_LAYOUT_UNDEFINED;
8814 const vk::VkImageLayout dstLayout = getRandomNextLayout(rng, usage, srcLayout);
8816 vk::VkPipelineStageFlags dirtySrcStages;
8817 vk::VkAccessFlags dirtySrcAccesses;
8818 vk::VkPipelineStageFlags dirtyDstStages;
8819 vk::VkAccessFlags dirtyDstAccesses;
8821 vk::VkPipelineStageFlags srcStages;
8822 vk::VkAccessFlags srcAccesses;
8823 vk::VkPipelineStageFlags dstStages;
8824 vk::VkAccessFlags dstAccesses;
8826 state.cache.getFullBarrier(dirtySrcStages, dirtySrcAccesses, dirtyDstStages, dirtyDstAccesses);
8828 // Try masking some random bits
8829 srcStages = dirtySrcStages;
8830 srcAccesses = dirtySrcAccesses;
8832 dstStages = state.cache.getAllowedStages() & rng.getUint32();
8833 dstAccesses = state.cache.getAllowedAcceses() & rng.getUint32();
8835 // If there are no bits in dst stage mask use all stages
8836 dstStages = dstStages ? dstStages : state.cache.getAllowedStages();
8839 srcStages = dstStages;
8841 return de::MovePtr<CmdCommand>(new ImageTransition(srcStages, srcAccesses, dstStages, dstAccesses, srcLayout, dstLayout));
8844 case OP_IMAGE_COPY_TO_BUFFER: return de::MovePtr<CmdCommand>(new ImageCopyToBuffer(state.imageLayout));
8845 case OP_IMAGE_COPY_FROM_BUFFER: return de::MovePtr<CmdCommand>(new ImageCopyFromBuffer(rng.getUint32(), state.imageLayout));
8846 case OP_IMAGE_COPY_TO_IMAGE: return de::MovePtr<CmdCommand>(new ImageCopyToImage(state.imageLayout));
8847 case OP_IMAGE_COPY_FROM_IMAGE: return de::MovePtr<CmdCommand>(new ImageCopyFromImage(rng.getUint32(), state.imageLayout));
8848 case OP_IMAGE_BLIT_TO_IMAGE:
8850 const BlitScale scale = rng.getBool() ? BLIT_SCALE_20 : BLIT_SCALE_10;
8851 return de::MovePtr<CmdCommand>(new ImageBlitToImage(scale, state.imageLayout));
8854 case OP_IMAGE_BLIT_FROM_IMAGE:
8856 const BlitScale scale = rng.getBool() ? BLIT_SCALE_20 : BLIT_SCALE_10;
8857 return de::MovePtr<CmdCommand>(new ImageBlitFromImage(rng.getUint32(), scale, state.imageLayout));
8860 case OP_PIPELINE_BARRIER_GLOBAL:
8861 case OP_PIPELINE_BARRIER_BUFFER:
8862 case OP_PIPELINE_BARRIER_IMAGE:
8864 vk::VkPipelineStageFlags dirtySrcStages;
8865 vk::VkAccessFlags dirtySrcAccesses;
8866 vk::VkPipelineStageFlags dirtyDstStages;
8867 vk::VkAccessFlags dirtyDstAccesses;
8869 vk::VkPipelineStageFlags srcStages;
8870 vk::VkAccessFlags srcAccesses;
8871 vk::VkPipelineStageFlags dstStages;
8872 vk::VkAccessFlags dstAccesses;
8874 state.cache.getFullBarrier(dirtySrcStages, dirtySrcAccesses, dirtyDstStages, dirtyDstAccesses);
8876 // Try masking some random bits
8877 srcStages = dirtySrcStages & rng.getUint32();
8878 srcAccesses = dirtySrcAccesses & rng.getUint32();
8880 dstStages = dirtyDstStages & rng.getUint32();
8881 dstAccesses = dirtyDstAccesses & rng.getUint32();
8883 // If there are no bits in stage mask use the original dirty stages
8884 srcStages = srcStages ? srcStages : dirtySrcStages;
8885 dstStages = dstStages ? dstStages : dirtyDstStages;
8888 srcStages = dstStages;
8890 PipelineBarrier::Type type;
8892 if (op == OP_PIPELINE_BARRIER_IMAGE)
8893 type = PipelineBarrier::TYPE_IMAGE;
8894 else if (op == OP_PIPELINE_BARRIER_BUFFER)
8895 type = PipelineBarrier::TYPE_BUFFER;
8896 else if (op == OP_PIPELINE_BARRIER_GLOBAL)
8897 type = PipelineBarrier::TYPE_GLOBAL;
8900 type = PipelineBarrier::TYPE_LAST;
8901 DE_FATAL("Unknown op");
8904 if (type == PipelineBarrier::TYPE_IMAGE)
8905 return de::MovePtr<CmdCommand>(new PipelineBarrier(srcStages, srcAccesses, dstStages, dstAccesses, type, tcu::just(state.imageLayout)));
8907 return de::MovePtr<CmdCommand>(new PipelineBarrier(srcStages, srcAccesses, dstStages, dstAccesses, type, tcu::nothing<vk::VkImageLayout>()));
8911 DE_FATAL("Unknown op");
8912 return de::MovePtr<CmdCommand>(DE_NULL);
8916 de::MovePtr<RenderPassCommand> createRenderPassCommand (de::Random&,
8922 case OP_RENDER_VERTEX_BUFFER: return de::MovePtr<RenderPassCommand>(new RenderVertexBuffer());
8923 case OP_RENDER_INDEX_BUFFER: return de::MovePtr<RenderPassCommand>(new RenderIndexBuffer());
8925 case OP_RENDER_VERTEX_UNIFORM_BUFFER: return de::MovePtr<RenderPassCommand>(new RenderVertexUniformBuffer());
8926 case OP_RENDER_FRAGMENT_UNIFORM_BUFFER: return de::MovePtr<RenderPassCommand>(new RenderFragmentUniformBuffer());
8928 case OP_RENDER_VERTEX_UNIFORM_TEXEL_BUFFER: return de::MovePtr<RenderPassCommand>(new RenderVertexUniformTexelBuffer());
8929 case OP_RENDER_FRAGMENT_UNIFORM_TEXEL_BUFFER: return de::MovePtr<RenderPassCommand>(new RenderFragmentUniformTexelBuffer());
8931 case OP_RENDER_VERTEX_STORAGE_BUFFER: return de::MovePtr<RenderPassCommand>(new RenderVertexStorageBuffer());
8932 case OP_RENDER_FRAGMENT_STORAGE_BUFFER: return de::MovePtr<RenderPassCommand>(new RenderFragmentStorageBuffer());
8934 case OP_RENDER_VERTEX_STORAGE_TEXEL_BUFFER: return de::MovePtr<RenderPassCommand>(new RenderVertexStorageTexelBuffer());
8935 case OP_RENDER_FRAGMENT_STORAGE_TEXEL_BUFFER: return de::MovePtr<RenderPassCommand>(new RenderFragmentStorageTexelBuffer());
8937 case OP_RENDER_VERTEX_STORAGE_IMAGE: return de::MovePtr<RenderPassCommand>(new RenderVertexStorageImage());
8938 case OP_RENDER_FRAGMENT_STORAGE_IMAGE: return de::MovePtr<RenderPassCommand>(new RenderFragmentStorageImage());
8940 case OP_RENDER_VERTEX_SAMPLED_IMAGE: return de::MovePtr<RenderPassCommand>(new RenderVertexSampledImage());
8941 case OP_RENDER_FRAGMENT_SAMPLED_IMAGE: return de::MovePtr<RenderPassCommand>(new RenderFragmentSampledImage());
8944 DE_FATAL("Unknown op");
8945 return de::MovePtr<RenderPassCommand>(DE_NULL);
8949 de::MovePtr<CmdCommand> createRenderPassCommands (const Memory& memory,
8950 de::Random& nextOpRng,
8956 vector<RenderPassCommand*> commands;
8960 for (; opNdx < opCount; opNdx++)
8964 getAvailableOps(state, memory.getSupportBuffers(), memory.getSupportImages(), usage, ops);
8966 DE_ASSERT(!ops.empty());
8969 const Op op = nextOpRng.choose<Op>(ops.begin(), ops.end());
8971 if (op == OP_RENDERPASS_END)
8977 de::Random rng (state.rng);
8979 commands.push_back(createRenderPassCommand(rng, state, op).release());
8980 applyOp(state, memory, op, usage);
8982 DE_ASSERT(state.rng == rng);
8987 applyOp(state, memory, OP_RENDERPASS_END, usage);
8988 return de::MovePtr<CmdCommand>(new SubmitRenderPass(commands));
8992 for (size_t commandNdx = 0; commandNdx < commands.size(); commandNdx++)
8993 delete commands[commandNdx];
8999 de::MovePtr<Command> createCmdCommands (const Memory& memory,
9000 de::Random& nextOpRng,
9006 vector<CmdCommand*> commands;
9010 for (; opNdx < opCount; opNdx++)
9014 getAvailableOps(state, memory.getSupportBuffers(), memory.getSupportImages(), usage, ops);
9016 DE_ASSERT(!ops.empty());
9019 const Op op = nextOpRng.choose<Op>(ops.begin(), ops.end());
9021 if (op == OP_COMMAND_BUFFER_END)
9027 // \note Command needs to known the state before the operation
9028 if (op == OP_RENDERPASS_BEGIN)
9030 applyOp(state, memory, op, usage);
9031 commands.push_back(createRenderPassCommands(memory, nextOpRng, state, usage, opNdx, opCount).release());
9035 de::Random rng (state.rng);
9037 commands.push_back(createCmdCommand(rng, state, op, usage).release());
9038 applyOp(state, memory, op, usage);
9040 DE_ASSERT(state.rng == rng);
9047 applyOp(state, memory, OP_COMMAND_BUFFER_END, usage);
9048 return de::MovePtr<Command>(new SubmitCommandBuffer(commands));
9052 for (size_t commandNdx = 0; commandNdx < commands.size(); commandNdx++)
9053 delete commands[commandNdx];
9059 void createCommands (vector<Command*>& commands,
9061 const Memory& memory,
9063 vk::VkSharingMode sharingMode,
9066 State state (usage, seed);
9067 // Used to select next operation only
9068 de::Random nextOpRng (seed ^ 12930809);
9070 commands.reserve(opCount);
9072 for (size_t opNdx = 0; opNdx < opCount; opNdx++)
9076 getAvailableOps(state, memory.getSupportBuffers(), memory.getSupportImages(), usage, ops);
9078 DE_ASSERT(!ops.empty());
9081 const Op op = nextOpRng.choose<Op>(ops.begin(), ops.end());
9083 if (op == OP_COMMAND_BUFFER_BEGIN)
9085 applyOp(state, memory, op, usage);
9086 commands.push_back(createCmdCommands(memory, nextOpRng, state, usage, opNdx, opCount).release());
9090 de::Random rng (state.rng);
9092 commands.push_back(createHostCommand(op, rng, usage, sharingMode).release());
9093 applyOp(state, memory, op, usage);
9095 // Make sure that random generator is in sync
9096 DE_ASSERT(state.rng == rng);
9101 // Clean up resources
9102 if (state.hasBuffer && state.hasImage)
9104 if (!state.queueIdle)
9105 commands.push_back(new QueueWaitIdle());
9107 if (state.hasBuffer)
9108 commands.push_back(new DestroyBuffer());
9111 commands.push_back(new DestroyImage());
9115 class MemoryTestInstance : public TestInstance
9119 typedef bool(MemoryTestInstance::*StageFunc)(void);
9121 MemoryTestInstance (::vkt::Context& context, const TestConfig& config);
9122 ~MemoryTestInstance (void);
9124 tcu::TestStatus iterate (void);
9127 const TestConfig m_config;
9128 const size_t m_iterationCount;
9129 const size_t m_opCount;
9130 const vk::VkPhysicalDeviceMemoryProperties m_memoryProperties;
9131 deUint32 m_memoryTypeNdx;
9134 tcu::ResultCollector m_resultCollector;
9136 vector<Command*> m_commands;
9137 MovePtr<Memory> m_memory;
9138 MovePtr<Context> m_renderContext;
9139 MovePtr<PrepareContext> m_prepareContext;
9141 bool nextIteration (void);
9142 bool nextMemoryType (void);
9144 bool createCommandsAndAllocateMemory (void);
9145 bool prepare (void);
9146 bool execute (void);
9148 void resetResources (void);
9151 void MemoryTestInstance::resetResources (void)
9153 const vk::DeviceInterface& vkd = m_context.getDeviceInterface();
9154 const vk::VkDevice device = m_context.getDevice();
9156 VK_CHECK(vkd.deviceWaitIdle(device));
9158 for (size_t commandNdx = 0; commandNdx < m_commands.size(); commandNdx++)
9160 delete m_commands[commandNdx];
9161 m_commands[commandNdx] = DE_NULL;
9165 m_prepareContext.clear();
9169 bool MemoryTestInstance::nextIteration (void)
9173 if (m_iteration < m_iterationCount)
9176 m_stage = &MemoryTestInstance::createCommandsAndAllocateMemory;
9180 return nextMemoryType();
9183 bool MemoryTestInstance::nextMemoryType (void)
9187 DE_ASSERT(m_commands.empty());
9191 if (m_memoryTypeNdx < m_memoryProperties.memoryTypeCount)
9194 m_stage = &MemoryTestInstance::createCommandsAndAllocateMemory;
9205 MemoryTestInstance::MemoryTestInstance (::vkt::Context& context, const TestConfig& config)
9206 : TestInstance (context)
9208 , m_iterationCount (5)
9210 , m_memoryProperties (vk::getPhysicalDeviceMemoryProperties(context.getInstanceInterface(), context.getPhysicalDevice()))
9211 , m_memoryTypeNdx (0)
9213 , m_stage (&MemoryTestInstance::createCommandsAndAllocateMemory)
9214 , m_resultCollector (context.getTestContext().getLog())
9216 , m_memory (DE_NULL)
9218 TestLog& log = context.getTestContext().getLog();
9220 const tcu::ScopedLogSection section (log, "TestCaseInfo", "Test Case Info");
9222 log << TestLog::Message << "Buffer size: " << config.size << TestLog::EndMessage;
9223 log << TestLog::Message << "Sharing: " << config.sharing << TestLog::EndMessage;
9224 log << TestLog::Message << "Access: " << config.usage << TestLog::EndMessage;
9228 const tcu::ScopedLogSection section (log, "MemoryProperties", "Memory Properties");
9230 for (deUint32 heapNdx = 0; heapNdx < m_memoryProperties.memoryHeapCount; heapNdx++)
9232 const tcu::ScopedLogSection heapSection (log, "Heap" + de::toString(heapNdx), "Heap " + de::toString(heapNdx));
9234 log << TestLog::Message << "Size: " << m_memoryProperties.memoryHeaps[heapNdx].size << TestLog::EndMessage;
9235 log << TestLog::Message << "Flags: " << m_memoryProperties.memoryHeaps[heapNdx].flags << TestLog::EndMessage;
9238 for (deUint32 memoryTypeNdx = 0; memoryTypeNdx < m_memoryProperties.memoryTypeCount; memoryTypeNdx++)
9240 const tcu::ScopedLogSection memoryTypeSection (log, "MemoryType" + de::toString(memoryTypeNdx), "Memory type " + de::toString(memoryTypeNdx));
9242 log << TestLog::Message << "Properties: " << m_memoryProperties.memoryTypes[memoryTypeNdx].propertyFlags << TestLog::EndMessage;
9243 log << TestLog::Message << "Heap: " << m_memoryProperties.memoryTypes[memoryTypeNdx].heapIndex << TestLog::EndMessage;
9248 const vk::InstanceInterface& vki = context.getInstanceInterface();
9249 const vk::VkPhysicalDevice physicalDevice = context.getPhysicalDevice();
9250 const vk::DeviceInterface& vkd = context.getDeviceInterface();
9251 const vk::VkDevice device = context.getDevice();
9252 const vk::VkQueue queue = context.getUniversalQueue();
9253 const deUint32 queueFamilyIndex = context.getUniversalQueueFamilyIndex();
9254 vector<pair<deUint32, vk::VkQueue> > queues;
9256 queues.push_back(std::make_pair(queueFamilyIndex, queue));
9258 m_renderContext = MovePtr<Context>(new Context(vki, vkd, physicalDevice, device, queue, queueFamilyIndex, queues, context.getBinaryCollection()));
9262 MemoryTestInstance::~MemoryTestInstance (void)
9267 bool MemoryTestInstance::createCommandsAndAllocateMemory (void)
9269 const vk::VkDevice device = m_context.getDevice();
9270 TestLog& log = m_context.getTestContext().getLog();
9271 const vk::InstanceInterface& vki = m_context.getInstanceInterface();
9272 const vk::VkPhysicalDevice physicalDevice = m_context.getPhysicalDevice();
9273 const vk::DeviceInterface& vkd = m_context.getDeviceInterface();
9274 const vk::VkPhysicalDeviceMemoryProperties memoryProperties = vk::getPhysicalDeviceMemoryProperties(vki, physicalDevice);
9275 const tcu::ScopedLogSection section (log, "MemoryType" + de::toString(m_memoryTypeNdx) + "CreateCommands" + de::toString(m_iteration),
9276 "Memory type " + de::toString(m_memoryTypeNdx) + " create commands iteration " + de::toString(m_iteration));
9277 const vector<deUint32>& queues = m_renderContext->getQueueFamilies();
9279 DE_ASSERT(m_commands.empty());
9281 if (m_config.usage & (USAGE_HOST_READ | USAGE_HOST_WRITE)
9282 && !(memoryProperties.memoryTypes[m_memoryTypeNdx].propertyFlags & vk::VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT))
9284 log << TestLog::Message << "Memory type not supported" << TestLog::EndMessage;
9286 return nextMemoryType();
9292 const vk::VkBufferUsageFlags bufferUsage = usageToBufferUsageFlags(m_config.usage);
9293 const vk::VkImageUsageFlags imageUsage = usageToImageUsageFlags(m_config.usage);
9294 const vk::VkDeviceSize maxBufferSize = bufferUsage != 0
9295 ? roundBufferSizeToWxHx4(findMaxBufferSize(vkd, device, bufferUsage, m_config.sharing, queues, m_config.size, m_memoryTypeNdx))
9297 const IVec2 maxImageSize = imageUsage != 0
9298 ? findMaxRGBA8ImageSize(vkd, device, imageUsage, m_config.sharing, queues, m_config.size, m_memoryTypeNdx)
9301 log << TestLog::Message << "Max buffer size: " << maxBufferSize << TestLog::EndMessage;
9302 log << TestLog::Message << "Max RGBA8 image size: " << maxImageSize << TestLog::EndMessage;
9304 // Skip tests if there are no supported operations
9305 if (maxBufferSize == 0
9306 && maxImageSize[0] == 0
9307 && (m_config.usage & (USAGE_HOST_READ|USAGE_HOST_WRITE)) == 0)
9309 log << TestLog::Message << "Skipping memory type. None of the usages are supported." << TestLog::EndMessage;
9311 return nextMemoryType();
9315 const deUint32 seed = 2830980989u ^ deUint32Hash((deUint32)(m_iteration) * m_memoryProperties.memoryTypeCount + m_memoryTypeNdx);
9317 m_memory = MovePtr<Memory>(new Memory(vki, vkd, physicalDevice, device, m_config.size, m_memoryTypeNdx, maxBufferSize, maxImageSize[0], maxImageSize[1]));
9319 log << TestLog::Message << "Create commands" << TestLog::EndMessage;
9320 createCommands(m_commands, seed, *m_memory, m_config.usage, m_config.sharing, m_opCount);
9322 m_stage = &MemoryTestInstance::prepare;
9326 catch (const tcu::TestError& e)
9328 m_resultCollector.fail("Failed, got exception: " + string(e.getMessage()));
9329 return nextMemoryType();
9334 bool MemoryTestInstance::prepare (void)
9336 TestLog& log = m_context.getTestContext().getLog();
9337 const tcu::ScopedLogSection section (log, "MemoryType" + de::toString(m_memoryTypeNdx) + "Prepare" + de::toString(m_iteration),
9338 "Memory type " + de::toString(m_memoryTypeNdx) + " prepare iteration" + de::toString(m_iteration));
9340 m_prepareContext = MovePtr<PrepareContext>(new PrepareContext(*m_renderContext, *m_memory));
9342 DE_ASSERT(!m_commands.empty());
9344 for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
9346 Command& command = *m_commands[cmdNdx];
9350 command.prepare(*m_prepareContext);
9352 catch (const tcu::TestError& e)
9354 m_resultCollector.fail(de::toString(cmdNdx) + ":" + command.getName() + " failed to prepare, got exception: " + string(e.getMessage()));
9355 return nextMemoryType();
9359 m_stage = &MemoryTestInstance::execute;
9363 bool MemoryTestInstance::execute (void)
9365 TestLog& log = m_context.getTestContext().getLog();
9366 const tcu::ScopedLogSection section (log, "MemoryType" + de::toString(m_memoryTypeNdx) + "Execute" + de::toString(m_iteration),
9367 "Memory type " + de::toString(m_memoryTypeNdx) + " execute iteration " + de::toString(m_iteration));
9368 ExecuteContext executeContext (*m_renderContext);
9369 const vk::VkDevice device = m_context.getDevice();
9370 const vk::DeviceInterface& vkd = m_context.getDeviceInterface();
9372 DE_ASSERT(!m_commands.empty());
9374 for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
9376 Command& command = *m_commands[cmdNdx];
9380 command.execute(executeContext);
9382 catch (const tcu::TestError& e)
9384 m_resultCollector.fail(de::toString(cmdNdx) + ":" + command.getName() + " failed to execute, got exception: " + string(e.getMessage()));
9385 return nextIteration();
9389 VK_CHECK(vkd.deviceWaitIdle(device));
9391 m_stage = &MemoryTestInstance::verify;
9395 bool MemoryTestInstance::verify (void)
9397 DE_ASSERT(!m_commands.empty());
9399 TestLog& log = m_context.getTestContext().getLog();
9400 const tcu::ScopedLogSection section (log, "MemoryType" + de::toString(m_memoryTypeNdx) + "Verify" + de::toString(m_iteration),
9401 "Memory type " + de::toString(m_memoryTypeNdx) + " verify iteration " + de::toString(m_iteration));
9402 VerifyContext verifyContext (log, m_resultCollector, *m_renderContext, m_config.size);
9404 log << TestLog::Message << "Begin verify" << TestLog::EndMessage;
9406 for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
9408 Command& command = *m_commands[cmdNdx];
9412 command.verify(verifyContext, cmdNdx);
9414 catch (const tcu::TestError& e)
9416 m_resultCollector.fail(de::toString(cmdNdx) + ":" + command.getName() + " failed to verify, got exception: " + string(e.getMessage()));
9417 return nextIteration();
9421 return nextIteration();
9424 tcu::TestStatus MemoryTestInstance::iterate (void)
9426 if ((this->*m_stage)())
9427 return tcu::TestStatus::incomplete();
9429 return tcu::TestStatus(m_resultCollector.getResult(), m_resultCollector.getMessage());
9434 void init (vk::SourceCollections& sources, TestConfig config) const
9436 // Vertex buffer rendering
9437 if (config.usage & USAGE_VERTEX_BUFFER)
9439 const char* const vertexShader =
9441 "layout(location = 0) in highp vec2 a_position;\n"
9442 "void main (void) {\n"
9443 "\tgl_PointSize = 1.0;\n"
9444 "\tgl_Position = vec4(1.998 * a_position - vec2(0.999), 0.0, 1.0);\n"
9447 sources.glslSources.add("vertex-buffer.vert")
9448 << glu::VertexSource(vertexShader);
9451 // Index buffer rendering
9452 if (config.usage & USAGE_INDEX_BUFFER)
9454 const char* const vertexShader =
9456 "precision highp float;\n"
9457 "void main (void) {\n"
9458 "\tgl_PointSize = 1.0;\n"
9459 "\thighp vec2 pos = vec2(gl_VertexIndex % 256, gl_VertexIndex / 256) / vec2(255.0);\n"
9460 "\tgl_Position = vec4(1.998 * pos - vec2(0.999), 0.0, 1.0);\n"
9463 sources.glslSources.add("index-buffer.vert")
9464 << glu::VertexSource(vertexShader);
9467 if (config.usage & USAGE_UNIFORM_BUFFER)
9470 std::ostringstream vertexShader;
9474 "precision highp float;\n"
9475 "layout(set=0, binding=0) uniform Block\n"
9477 "\thighp uvec4 values[" << de::toString<size_t>(MAX_UNIFORM_BUFFER_SIZE / (sizeof(deUint32) * 4)) << "];\n"
9479 "void main (void) {\n"
9480 "\tgl_PointSize = 1.0;\n"
9481 "\thighp uvec4 vecVal = block.values[gl_VertexIndex / 8];\n"
9482 "\thighp uint val;\n"
9483 "\tif (((gl_VertexIndex / 2) % 4 == 0))\n"
9484 "\t\tval = vecVal.x;\n"
9485 "\telse if (((gl_VertexIndex / 2) % 4 == 1))\n"
9486 "\t\tval = vecVal.y;\n"
9487 "\telse if (((gl_VertexIndex / 2) % 4 == 2))\n"
9488 "\t\tval = vecVal.z;\n"
9489 "\telse if (((gl_VertexIndex / 2) % 4 == 3))\n"
9490 "\t\tval = vecVal.w;\n"
9491 "\tif ((gl_VertexIndex % 2) == 0)\n"
9492 "\t\tval = val & 0xFFFFu;\n"
9494 "\t\tval = val >> 16u;\n"
9495 "\thighp vec2 pos = vec2(val & 0xFFu, val >> 8u) / vec2(255.0);\n"
9496 "\tgl_Position = vec4(1.998 * pos - vec2(0.999), 0.0, 1.0);\n"
9499 sources.glslSources.add("uniform-buffer.vert")
9500 << glu::VertexSource(vertexShader.str());
9504 const size_t arraySize = MAX_UNIFORM_BUFFER_SIZE / (sizeof(deUint32) * 4);
9505 const size_t arrayIntSize = arraySize * 4;
9506 std::ostringstream fragmentShader;
9510 "precision highp float;\n"
9511 "layout(location = 0) out highp vec4 o_color;\n"
9512 "layout(set=0, binding=0) uniform Block\n"
9514 "\thighp uvec4 values[" << arraySize << "];\n"
9516 "layout(push_constant) uniform PushC\n"
9519 "\tuint valuesPerPixel;\n"
9521 "void main (void) {\n"
9522 "\thighp uint id = pushC.callId * (" << arrayIntSize << "u / pushC.valuesPerPixel) + uint(gl_FragCoord.y) * 256u + uint(gl_FragCoord.x);\n"
9523 "\tif (uint(gl_FragCoord.y) * 256u + uint(gl_FragCoord.x) < pushC.callId * (" << arrayIntSize << "u / pushC.valuesPerPixel))\n"
9525 "\thighp uint value = id;\n"
9526 "\tfor (uint i = 0u; i < pushC.valuesPerPixel; i++)\n"
9528 "\t\thighp uvec4 vecVal = block.values[(value / 4u) % " << arraySize << "u];\n"
9529 "\t\tif ((value % 4u) == 0u)\n"
9530 "\t\t\tvalue = vecVal.x;\n"
9531 "\t\telse if ((value % 4u) == 1u)\n"
9532 "\t\t\tvalue = vecVal.y;\n"
9533 "\t\telse if ((value % 4u) == 2u)\n"
9534 "\t\t\tvalue = vecVal.z;\n"
9535 "\t\telse if ((value % 4u) == 3u)\n"
9536 "\t\t\tvalue = vecVal.w;\n"
9538 "\tuvec4 valueOut = uvec4(value & 0xFFu, (value >> 8u) & 0xFFu, (value >> 16u) & 0xFFu, (value >> 24u) & 0xFFu);\n"
9539 "\to_color = vec4(valueOut) / vec4(255.0);\n"
9542 sources.glslSources.add("uniform-buffer.frag")
9543 << glu::FragmentSource(fragmentShader.str());
9547 if (config.usage & USAGE_STORAGE_BUFFER)
9550 // Vertex storage buffer rendering
9551 const char* const vertexShader =
9553 "precision highp float;\n"
9554 "layout(set=0, binding=0) buffer Block\n"
9556 "\thighp uvec4 values[];\n"
9558 "void main (void) {\n"
9559 "\tgl_PointSize = 1.0;\n"
9560 "\thighp uvec4 vecVal = block.values[gl_VertexIndex / 8];\n"
9561 "\thighp uint val;\n"
9562 "\tif (((gl_VertexIndex / 2) % 4 == 0))\n"
9563 "\t\tval = vecVal.x;\n"
9564 "\telse if (((gl_VertexIndex / 2) % 4 == 1))\n"
9565 "\t\tval = vecVal.y;\n"
9566 "\telse if (((gl_VertexIndex / 2) % 4 == 2))\n"
9567 "\t\tval = vecVal.z;\n"
9568 "\telse if (((gl_VertexIndex / 2) % 4 == 3))\n"
9569 "\t\tval = vecVal.w;\n"
9570 "\tif ((gl_VertexIndex % 2) == 0)\n"
9571 "\t\tval = val & 0xFFFFu;\n"
9573 "\t\tval = val >> 16u;\n"
9574 "\thighp vec2 pos = vec2(val & 0xFFu, val >> 8u) / vec2(255.0);\n"
9575 "\tgl_Position = vec4(1.998 * pos - vec2(0.999), 0.0, 1.0);\n"
9578 sources.glslSources.add("storage-buffer.vert")
9579 << glu::VertexSource(vertexShader);
9583 std::ostringstream fragmentShader;
9587 "precision highp float;\n"
9588 "precision highp int;\n"
9589 "layout(location = 0) out highp vec4 o_color;\n"
9590 "layout(set=0, binding=0) buffer Block\n"
9592 "\thighp uvec4 values[];\n"
9594 "layout(push_constant) uniform PushC\n"
9596 "\tuint valuesPerPixel;\n"
9597 "\tuint bufferSize;\n"
9599 "void main (void) {\n"
9600 "\thighp uint arrayIntSize = pushC.bufferSize / 4u;\n"
9601 "\thighp uint id = uint(gl_FragCoord.y) * 256u + uint(gl_FragCoord.x);\n"
9602 "\thighp uint value = id;\n"
9603 "\tfor (uint i = 0u; i < pushC.valuesPerPixel; i++)\n"
9605 "\t\thighp uvec4 vecVal = block.values[(value / 4u) % (arrayIntSize / 4u)];\n"
9606 "\t\tif ((value % 4u) == 0u)\n"
9607 "\t\t\tvalue = vecVal.x;\n"
9608 "\t\telse if ((value % 4u) == 1u)\n"
9609 "\t\t\tvalue = vecVal.y;\n"
9610 "\t\telse if ((value % 4u) == 2u)\n"
9611 "\t\t\tvalue = vecVal.z;\n"
9612 "\t\telse if ((value % 4u) == 3u)\n"
9613 "\t\t\tvalue = vecVal.w;\n"
9615 "\tuvec4 valueOut = uvec4(value & 0xFFu, (value >> 8u) & 0xFFu, (value >> 16u) & 0xFFu, (value >> 24u) & 0xFFu);\n"
9616 "\to_color = vec4(valueOut) / vec4(255.0);\n"
9619 sources.glslSources.add("storage-buffer.frag")
9620 << glu::FragmentSource(fragmentShader.str());
9624 if (config.usage & USAGE_UNIFORM_TEXEL_BUFFER)
9627 // Vertex uniform texel buffer rendering
9628 const char* const vertexShader =
9630 "#extension GL_EXT_texture_buffer : require\n"
9631 "precision highp float;\n"
9632 "layout(set=0, binding=0) uniform highp usamplerBuffer u_sampler;\n"
9633 "void main (void) {\n"
9634 "\tgl_PointSize = 1.0;\n"
9635 "\thighp uint val = texelFetch(u_sampler, gl_VertexIndex).x;\n"
9636 "\thighp vec2 pos = vec2(val & 0xFFu, val >> 8u) / vec2(255.0);\n"
9637 "\tgl_Position = vec4(1.998 * pos - vec2(0.999), 0.0, 1.0);\n"
9640 sources.glslSources.add("uniform-texel-buffer.vert")
9641 << glu::VertexSource(vertexShader);
9645 // Fragment uniform texel buffer rendering
9646 const char* const fragmentShader =
9648 "#extension GL_EXT_texture_buffer : require\n"
9649 "precision highp float;\n"
9650 "layout(set=0, binding=0) uniform highp usamplerBuffer u_sampler;\n"
9651 "layout(location = 0) out highp vec4 o_color;\n"
9652 "layout(push_constant) uniform PushC\n"
9655 "\tuint valuesPerPixel;\n"
9656 "\tuint maxTexelCount;\n"
9658 "void main (void) {\n"
9659 "\thighp uint id = uint(gl_FragCoord.y) * 256u + uint(gl_FragCoord.x);\n"
9660 "\thighp uint value = id;\n"
9661 "\tif (uint(gl_FragCoord.y) * 256u + uint(gl_FragCoord.x) < pushC.callId * (pushC.maxTexelCount / pushC.valuesPerPixel))\n"
9663 "\tfor (uint i = 0u; i < pushC.valuesPerPixel; i++)\n"
9665 "\t\tvalue = texelFetch(u_sampler, int(value % uint(textureSize(u_sampler)))).x;\n"
9667 "\tuvec4 valueOut = uvec4(value & 0xFFu, (value >> 8u) & 0xFFu, (value >> 16u) & 0xFFu, (value >> 24u) & 0xFFu);\n"
9668 "\to_color = vec4(valueOut) / vec4(255.0);\n"
9671 sources.glslSources.add("uniform-texel-buffer.frag")
9672 << glu::FragmentSource(fragmentShader);
9676 if (config.usage & USAGE_STORAGE_TEXEL_BUFFER)
9679 // Vertex storage texel buffer rendering
9680 const char* const vertexShader =
9682 "#extension GL_EXT_texture_buffer : require\n"
9683 "precision highp float;\n"
9684 "layout(set=0, binding=0, r32ui) uniform readonly highp uimageBuffer u_sampler;\n"
9685 "out gl_PerVertex {\n"
9686 "\tvec4 gl_Position;\n"
9687 "\tfloat gl_PointSize;\n"
9689 "void main (void) {\n"
9690 "\tgl_PointSize = 1.0;\n"
9691 "\thighp uint val = imageLoad(u_sampler, gl_VertexIndex / 2).x;\n"
9692 "\tif (gl_VertexIndex % 2 == 0)\n"
9693 "\t\tval = val & 0xFFFFu;\n"
9695 "\t\tval = val >> 16;\n"
9696 "\thighp vec2 pos = vec2(val & 0xFFu, val >> 8u) / vec2(255.0);\n"
9697 "\tgl_Position = vec4(1.998 * pos - vec2(0.999), 0.0, 1.0);\n"
9700 sources.glslSources.add("storage-texel-buffer.vert")
9701 << glu::VertexSource(vertexShader);
9704 // Fragment storage texel buffer rendering
9705 const char* const fragmentShader =
9707 "#extension GL_EXT_texture_buffer : require\n"
9708 "precision highp float;\n"
9709 "layout(set=0, binding=0, r32ui) uniform readonly highp uimageBuffer u_sampler;\n"
9710 "layout(location = 0) out highp vec4 o_color;\n"
9711 "layout(push_constant) uniform PushC\n"
9714 "\tuint valuesPerPixel;\n"
9715 "\tuint maxTexelCount;\n"
9718 "void main (void) {\n"
9719 "\thighp uint id = uint(gl_FragCoord.y) * 256u + uint(gl_FragCoord.x);\n"
9720 "\thighp uint value = id;\n"
9721 "\tif (uint(gl_FragCoord.y) * 256u + uint(gl_FragCoord.x) < pushC.callId * (pushC.maxTexelCount / pushC.valuesPerPixel))\n"
9723 "\tfor (uint i = 0u; i < pushC.valuesPerPixel; i++)\n"
9725 "\t\tvalue = imageLoad(u_sampler, int(value % pushC.width)).x;\n"
9727 "\tuvec4 valueOut = uvec4(value & 0xFFu, (value >> 8u) & 0xFFu, (value >> 16u) & 0xFFu, (value >> 24u) & 0xFFu);\n"
9728 "\to_color = vec4(valueOut) / vec4(255.0);\n"
9731 sources.glslSources.add("storage-texel-buffer.frag")
9732 << glu::FragmentSource(fragmentShader);
9736 if (config.usage & USAGE_STORAGE_IMAGE)
9739 // Vertex storage image
9740 const char* const vertexShader =
9742 "precision highp float;\n"
9743 "layout(set=0, binding=0, rgba8) uniform image2D u_image;\n"
9744 "out gl_PerVertex {\n"
9745 "\tvec4 gl_Position;\n"
9746 "\tfloat gl_PointSize;\n"
9748 "void main (void) {\n"
9749 "\tgl_PointSize = 1.0;\n"
9750 "\thighp vec4 val = imageLoad(u_image, ivec2((gl_VertexIndex / 2) / imageSize(u_image).x, (gl_VertexIndex / 2) % imageSize(u_image).x));\n"
9751 "\thighp vec2 pos;\n"
9752 "\tif (gl_VertexIndex % 2 == 0)\n"
9753 "\t\tpos = val.xy;\n"
9755 "\t\tpos = val.zw;\n"
9756 "\tgl_Position = vec4(1.998 * pos - vec2(0.999), 0.0, 1.0);\n"
9759 sources.glslSources.add("storage-image.vert")
9760 << glu::VertexSource(vertexShader);
9763 // Fragment storage image
9764 const char* const fragmentShader =
9766 "#extension GL_EXT_texture_buffer : require\n"
9767 "precision highp float;\n"
9768 "layout(set=0, binding=0, rgba8) uniform image2D u_image;\n"
9769 "layout(location = 0) out highp vec4 o_color;\n"
9770 "void main (void) {\n"
9771 "\thighp uvec2 size = uvec2(imageSize(u_image).x, imageSize(u_image).y);\n"
9772 "\thighp uint valuesPerPixel = max(1u, (size.x * size.y) / (256u * 256u));\n"
9773 "\thighp uvec4 value = uvec4(uint(gl_FragCoord.x), uint(gl_FragCoord.y), 0u, 0u);\n"
9774 "\tfor (uint i = 0u; i < valuesPerPixel; i++)\n"
9776 "\t\thighp vec4 floatValue = imageLoad(u_image, ivec2(int((value.z * 256u + (value.x ^ value.z)) % size.x), int((value.w * 256u + (value.y ^ value.w)) % size.y)));\n"
9777 "\t\tvalue = uvec4(uint(floatValue.x * 255.0), uint(floatValue.y * 255.0), uint(floatValue.z * 255.0), uint(floatValue.w * 255.0));\n"
9779 "\to_color = vec4(value) / vec4(255.0);\n"
9782 sources.glslSources.add("storage-image.frag")
9783 << glu::FragmentSource(fragmentShader);
9787 if (config.usage & USAGE_SAMPLED_IMAGE)
9790 // Vertex storage image
9791 const char* const vertexShader =
9793 "precision highp float;\n"
9794 "layout(set=0, binding=0) uniform sampler2D u_sampler;\n"
9795 "out gl_PerVertex {\n"
9796 "\tvec4 gl_Position;\n"
9797 "\tfloat gl_PointSize;\n"
9799 "void main (void) {\n"
9800 "\tgl_PointSize = 1.0;\n"
9801 "\thighp vec4 val = texelFetch(u_sampler, ivec2((gl_VertexIndex / 2) / textureSize(u_sampler, 0).x, (gl_VertexIndex / 2) % textureSize(u_sampler, 0).x), 0);\n"
9802 "\thighp vec2 pos;\n"
9803 "\tif (gl_VertexIndex % 2 == 0)\n"
9804 "\t\tpos = val.xy;\n"
9806 "\t\tpos = val.zw;\n"
9807 "\tgl_Position = vec4(1.998 * pos - vec2(0.999), 0.0, 1.0);\n"
9810 sources.glslSources.add("sampled-image.vert")
9811 << glu::VertexSource(vertexShader);
9814 // Fragment storage image
9815 const char* const fragmentShader =
9817 "#extension GL_EXT_texture_buffer : require\n"
9818 "precision highp float;\n"
9819 "layout(set=0, binding=0) uniform sampler2D u_sampler;\n"
9820 "layout(location = 0) out highp vec4 o_color;\n"
9821 "void main (void) {\n"
9822 "\thighp uvec2 size = uvec2(textureSize(u_sampler, 0).x, textureSize(u_sampler, 0).y);\n"
9823 "\thighp uint valuesPerPixel = max(1u, (size.x * size.y) / (256u * 256u));\n"
9824 "\thighp uvec4 value = uvec4(uint(gl_FragCoord.x), uint(gl_FragCoord.y), 0u, 0u);\n"
9825 "\tfor (uint i = 0u; i < valuesPerPixel; i++)\n"
9827 "\t\thighp vec4 floatValue = texelFetch(u_sampler, ivec2(int((value.z * 256u + (value.x ^ value.z)) % size.x), int((value.w * 256u + (value.y ^ value.w)) % size.y)), 0);\n"
9828 "\t\tvalue = uvec4(uint(floatValue.x * 255.0), uint(floatValue.y * 255.0), uint(floatValue.z * 255.0), uint(floatValue.w * 255.0));\n"
9830 "\to_color = vec4(value) / vec4(255.0);\n"
9833 sources.glslSources.add("sampled-image.frag")
9834 << glu::FragmentSource(fragmentShader);
9839 const char* const vertexShader =
9841 "out gl_PerVertex {\n"
9842 "\tvec4 gl_Position;\n"
9844 "precision highp float;\n"
9845 "void main (void) {\n"
9846 "\tgl_Position = vec4(((gl_VertexIndex + 2) / 3) % 2 == 0 ? -1.0 : 1.0,\n"
9847 "\t ((gl_VertexIndex + 1) / 3) % 2 == 0 ? -1.0 : 1.0, 0.0, 1.0);\n"
9850 sources.glslSources.add("render-quad.vert")
9851 << glu::VertexSource(vertexShader);
9855 const char* const fragmentShader =
9857 "layout(location = 0) out highp vec4 o_color;\n"
9858 "void main (void) {\n"
9859 "\to_color = vec4(1.0);\n"
9862 sources.glslSources.add("render-white.frag")
9863 << glu::FragmentSource(fragmentShader);
9870 tcu::TestCaseGroup* createPipelineBarrierTests (tcu::TestContext& testCtx)
9872 de::MovePtr<tcu::TestCaseGroup> group (new tcu::TestCaseGroup(testCtx, "pipeline_barrier", "Pipeline barrier tests."));
9873 const vk::VkDeviceSize sizes[] =
9880 const Usage usages[] =
9886 USAGE_VERTEX_BUFFER,
9888 USAGE_UNIFORM_BUFFER,
9889 USAGE_UNIFORM_TEXEL_BUFFER,
9890 USAGE_STORAGE_BUFFER,
9891 USAGE_STORAGE_TEXEL_BUFFER,
9892 USAGE_STORAGE_IMAGE,
9895 const Usage readUsages[] =
9899 USAGE_VERTEX_BUFFER,
9901 USAGE_UNIFORM_BUFFER,
9902 USAGE_UNIFORM_TEXEL_BUFFER,
9903 USAGE_STORAGE_BUFFER,
9904 USAGE_STORAGE_TEXEL_BUFFER,
9905 USAGE_STORAGE_IMAGE,
9909 const Usage writeUsages[] =
9915 for (size_t writeUsageNdx = 0; writeUsageNdx < DE_LENGTH_OF_ARRAY(writeUsages); writeUsageNdx++)
9917 const Usage writeUsage = writeUsages[writeUsageNdx];
9919 for (size_t readUsageNdx = 0; readUsageNdx < DE_LENGTH_OF_ARRAY(readUsages); readUsageNdx++)
9921 const Usage readUsage = readUsages[readUsageNdx];
9922 const Usage usage = writeUsage | readUsage;
9923 const string usageGroupName (usageToName(usage));
9924 de::MovePtr<tcu::TestCaseGroup> usageGroup (new tcu::TestCaseGroup(testCtx, usageGroupName.c_str(), usageGroupName.c_str()));
9926 for (size_t sizeNdx = 0; sizeNdx < DE_LENGTH_OF_ARRAY(sizes); sizeNdx++)
9928 const vk::VkDeviceSize size = sizes[sizeNdx];
9929 const string testName (de::toString((deUint64)(size)));
9930 const TestConfig config =
9934 vk::VK_SHARING_MODE_EXCLUSIVE
9937 usageGroup->addChild(new InstanceFactory1<MemoryTestInstance, TestConfig, AddPrograms>(testCtx,tcu::NODETYPE_SELF_VALIDATE, testName, testName, AddPrograms(), config));
9940 group->addChild(usageGroup.get());
9941 usageGroup.release();
9946 Usage all = (Usage)0;
9948 for (size_t usageNdx = 0; usageNdx < DE_LENGTH_OF_ARRAY(usages); usageNdx++)
9949 all = all | usages[usageNdx];
9952 const string usageGroupName ("all");
9953 de::MovePtr<tcu::TestCaseGroup> usageGroup (new tcu::TestCaseGroup(testCtx, usageGroupName.c_str(), usageGroupName.c_str()));
9955 for (size_t sizeNdx = 0; sizeNdx < DE_LENGTH_OF_ARRAY(sizes); sizeNdx++)
9957 const vk::VkDeviceSize size = sizes[sizeNdx];
9958 const string testName (de::toString((deUint64)(size)));
9959 const TestConfig config =
9963 vk::VK_SHARING_MODE_EXCLUSIVE
9966 usageGroup->addChild(new InstanceFactory1<MemoryTestInstance, TestConfig, AddPrograms>(testCtx,tcu::NODETYPE_SELF_VALIDATE, testName, testName, AddPrograms(), config));
9969 group->addChild(usageGroup.get());
9970 usageGroup.release();
9974 const string usageGroupName ("all_device");
9975 de::MovePtr<tcu::TestCaseGroup> usageGroup (new tcu::TestCaseGroup(testCtx, usageGroupName.c_str(), usageGroupName.c_str()));
9977 for (size_t sizeNdx = 0; sizeNdx < DE_LENGTH_OF_ARRAY(sizes); sizeNdx++)
9979 const vk::VkDeviceSize size = sizes[sizeNdx];
9980 const string testName (de::toString((deUint64)(size)));
9981 const TestConfig config =
9983 (Usage)(all & (~(USAGE_HOST_READ|USAGE_HOST_WRITE))),
9985 vk::VK_SHARING_MODE_EXCLUSIVE
9988 usageGroup->addChild(new InstanceFactory1<MemoryTestInstance, TestConfig, AddPrograms>(testCtx,tcu::NODETYPE_SELF_VALIDATE, testName, testName, AddPrograms(), config));
9991 group->addChild(usageGroup.get());
9992 usageGroup.release();
9996 return group.release();