1 /*-------------------------------------------------------------------------
2 * Vulkan Conformance Tests
3 * ------------------------
5 * Copyright (c) 2015 Google Inc.
7 * Licensed under the Apache License, Version 2.0 (the "License");
8 * you may not use this file except in compliance with the License.
9 * You may obtain a copy of the License at
11 * http://www.apache.org/licenses/LICENSE-2.0
13 * Unless required by applicable law or agreed to in writing, software
14 * distributed under the License is distributed on an "AS IS" BASIS,
15 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 * See the License for the specific language governing permissions and
17 * limitations under the License.
21 * \brief Pipeline barrier tests
22 *//*--------------------------------------------------------------------*/
24 #include "vktMemoryPipelineBarrierTests.hpp"
26 #include "vktTestCaseUtil.hpp"
29 #include "vkPlatform.hpp"
30 #include "vkRefUtil.hpp"
31 #include "vkQueryUtil.hpp"
32 #include "vkMemUtil.hpp"
33 #include "vkTypeUtil.hpp"
34 #include "vkPrograms.hpp"
36 #include "tcuMaybe.hpp"
37 #include "tcuTextureUtil.hpp"
38 #include "tcuTestLog.hpp"
39 #include "tcuResultCollector.hpp"
40 #include "tcuTexture.hpp"
41 #include "tcuImageCompare.hpp"
43 #include "deUniquePtr.hpp"
44 #include "deStringUtil.hpp"
45 #include "deRandom.hpp"
72 using tcu::ConstPixelBufferAccess;
73 using tcu::PixelBufferAccess;
74 using tcu::TextureFormat;
75 using tcu::TextureLevel;
85 MAX_UNIFORM_BUFFER_SIZE = 1024,
86 MAX_STORAGE_BUFFER_SIZE = (1<<28),
87 MAX_SIZE = (128 * 1024)
90 // \todo [mika] Add to utilities
92 T divRoundUp (const T& a, const T& b)
94 return (a / b) + (a % b == 0 ? 0 : 1);
99 ALL_PIPELINE_STAGES = vk::VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT
100 | vk::VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT
101 | vk::VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT
102 | vk::VK_PIPELINE_STAGE_VERTEX_INPUT_BIT
103 | vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT
104 | vk::VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT
105 | vk::VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT
106 | vk::VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT
107 | vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT
108 | vk::VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT
109 | vk::VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT
110 | vk::VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT
111 | vk::VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT
112 | vk::VK_PIPELINE_STAGE_TRANSFER_BIT
113 | vk::VK_PIPELINE_STAGE_HOST_BIT
118 ALL_ACCESSES = vk::VK_ACCESS_INDIRECT_COMMAND_READ_BIT
119 | vk::VK_ACCESS_INDEX_READ_BIT
120 | vk::VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT
121 | vk::VK_ACCESS_UNIFORM_READ_BIT
122 | vk::VK_ACCESS_INPUT_ATTACHMENT_READ_BIT
123 | vk::VK_ACCESS_SHADER_READ_BIT
124 | vk::VK_ACCESS_SHADER_WRITE_BIT
125 | vk::VK_ACCESS_COLOR_ATTACHMENT_READ_BIT
126 | vk::VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT
127 | vk::VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT
128 | vk::VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT
129 | vk::VK_ACCESS_TRANSFER_READ_BIT
130 | vk::VK_ACCESS_TRANSFER_WRITE_BIT
131 | vk::VK_ACCESS_HOST_READ_BIT
132 | vk::VK_ACCESS_HOST_WRITE_BIT
133 | vk::VK_ACCESS_MEMORY_READ_BIT
134 | vk::VK_ACCESS_MEMORY_WRITE_BIT
139 // Mapped host read and write
140 USAGE_HOST_READ = (0x1u<<0),
141 USAGE_HOST_WRITE = (0x1u<<1),
143 // Copy and other transfer operations
144 USAGE_TRANSFER_SRC = (0x1u<<2),
145 USAGE_TRANSFER_DST = (0x1u<<3),
147 // Buffer usage flags
148 USAGE_INDEX_BUFFER = (0x1u<<4),
149 USAGE_VERTEX_BUFFER = (0x1u<<5),
151 USAGE_UNIFORM_BUFFER = (0x1u<<6),
152 USAGE_STORAGE_BUFFER = (0x1u<<7),
154 USAGE_UNIFORM_TEXEL_BUFFER = (0x1u<<8),
155 USAGE_STORAGE_TEXEL_BUFFER = (0x1u<<9),
157 // \todo [2016-03-09 mika] This is probably almost impossible to do
158 USAGE_INDIRECT_BUFFER = (0x1u<<10),
160 // Texture usage flags
161 USAGE_SAMPLED_IMAGE = (0x1u<<11),
162 USAGE_STORAGE_IMAGE = (0x1u<<12),
163 USAGE_COLOR_ATTACHMENT = (0x1u<<13),
164 USAGE_INPUT_ATTACHMENT = (0x1u<<14),
165 USAGE_DEPTH_STENCIL_ATTACHMENT = (0x1u<<15),
168 bool supportsDeviceBufferWrites (Usage usage)
170 if (usage & USAGE_TRANSFER_DST)
173 if (usage & USAGE_STORAGE_BUFFER)
176 if (usage & USAGE_STORAGE_TEXEL_BUFFER)
182 bool supportsDeviceImageWrites (Usage usage)
184 if (usage & USAGE_TRANSFER_DST)
187 if (usage & USAGE_STORAGE_IMAGE)
190 if (usage & USAGE_COLOR_ATTACHMENT)
196 // Sequential access enums
199 ACCESS_INDIRECT_COMMAND_READ_BIT = 0,
200 ACCESS_INDEX_READ_BIT,
201 ACCESS_VERTEX_ATTRIBUTE_READ_BIT,
202 ACCESS_UNIFORM_READ_BIT,
203 ACCESS_INPUT_ATTACHMENT_READ_BIT,
204 ACCESS_SHADER_READ_BIT,
205 ACCESS_SHADER_WRITE_BIT,
206 ACCESS_COLOR_ATTACHMENT_READ_BIT,
207 ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
208 ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT,
209 ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT,
210 ACCESS_TRANSFER_READ_BIT,
211 ACCESS_TRANSFER_WRITE_BIT,
212 ACCESS_HOST_READ_BIT,
213 ACCESS_HOST_WRITE_BIT,
214 ACCESS_MEMORY_READ_BIT,
215 ACCESS_MEMORY_WRITE_BIT,
220 // Sequential stage enums
223 PIPELINESTAGE_TOP_OF_PIPE_BIT = 0,
224 PIPELINESTAGE_BOTTOM_OF_PIPE_BIT,
225 PIPELINESTAGE_DRAW_INDIRECT_BIT,
226 PIPELINESTAGE_VERTEX_INPUT_BIT,
227 PIPELINESTAGE_VERTEX_SHADER_BIT,
228 PIPELINESTAGE_TESSELLATION_CONTROL_SHADER_BIT,
229 PIPELINESTAGE_TESSELLATION_EVALUATION_SHADER_BIT,
230 PIPELINESTAGE_GEOMETRY_SHADER_BIT,
231 PIPELINESTAGE_FRAGMENT_SHADER_BIT,
232 PIPELINESTAGE_EARLY_FRAGMENT_TESTS_BIT,
233 PIPELINESTAGE_LATE_FRAGMENT_TESTS_BIT,
234 PIPELINESTAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
235 PIPELINESTAGE_COMPUTE_SHADER_BIT,
236 PIPELINESTAGE_TRANSFER_BIT,
237 PIPELINESTAGE_HOST_BIT,
242 PipelineStage pipelineStageFlagToPipelineStage (vk::VkPipelineStageFlagBits flags)
246 case vk::VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT: return PIPELINESTAGE_TOP_OF_PIPE_BIT;
247 case vk::VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT: return PIPELINESTAGE_BOTTOM_OF_PIPE_BIT;
248 case vk::VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT: return PIPELINESTAGE_DRAW_INDIRECT_BIT;
249 case vk::VK_PIPELINE_STAGE_VERTEX_INPUT_BIT: return PIPELINESTAGE_VERTEX_INPUT_BIT;
250 case vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT: return PIPELINESTAGE_VERTEX_SHADER_BIT;
251 case vk::VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT: return PIPELINESTAGE_TESSELLATION_CONTROL_SHADER_BIT;
252 case vk::VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT: return PIPELINESTAGE_TESSELLATION_EVALUATION_SHADER_BIT;
253 case vk::VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT: return PIPELINESTAGE_GEOMETRY_SHADER_BIT;
254 case vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT: return PIPELINESTAGE_FRAGMENT_SHADER_BIT;
255 case vk::VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT: return PIPELINESTAGE_EARLY_FRAGMENT_TESTS_BIT;
256 case vk::VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT: return PIPELINESTAGE_LATE_FRAGMENT_TESTS_BIT;
257 case vk::VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT: return PIPELINESTAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
258 case vk::VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT: return PIPELINESTAGE_COMPUTE_SHADER_BIT;
259 case vk::VK_PIPELINE_STAGE_TRANSFER_BIT: return PIPELINESTAGE_TRANSFER_BIT;
260 case vk::VK_PIPELINE_STAGE_HOST_BIT: return PIPELINESTAGE_HOST_BIT;
263 DE_FATAL("Unknown pipeline stage flags");
264 return PIPELINESTAGE_LAST;
268 Usage operator| (Usage a, Usage b)
270 return (Usage)((deUint32)a | (deUint32)b);
273 Usage operator& (Usage a, Usage b)
275 return (Usage)((deUint32)a & (deUint32)b);
278 string usageToName (Usage usage)
283 const char* const name;
286 { USAGE_HOST_READ, "host_read" },
287 { USAGE_HOST_WRITE, "host_write" },
289 { USAGE_TRANSFER_SRC, "transfer_src" },
290 { USAGE_TRANSFER_DST, "transfer_dst" },
292 { USAGE_INDEX_BUFFER, "index_buffer" },
293 { USAGE_VERTEX_BUFFER, "vertex_buffer" },
294 { USAGE_UNIFORM_BUFFER, "uniform_buffer" },
295 { USAGE_STORAGE_BUFFER, "storage_buffer" },
296 { USAGE_UNIFORM_TEXEL_BUFFER, "uniform_texel_buffer" },
297 { USAGE_STORAGE_TEXEL_BUFFER, "storage_texel_buffer" },
298 { USAGE_INDIRECT_BUFFER, "indirect_buffer" },
299 { USAGE_SAMPLED_IMAGE, "image_sampled" },
300 { USAGE_STORAGE_IMAGE, "storage_image" },
301 { USAGE_COLOR_ATTACHMENT, "color_attachment" },
302 { USAGE_INPUT_ATTACHMENT, "input_attachment" },
303 { USAGE_DEPTH_STENCIL_ATTACHMENT, "depth_stencil_attachment" },
306 std::ostringstream stream;
309 for (size_t usageNdx = 0; usageNdx < DE_LENGTH_OF_ARRAY(usageNames); usageNdx++)
311 if (usage & usageNames[usageNdx].usage)
318 stream << usageNames[usageNdx].name;
325 vk::VkBufferUsageFlags usageToBufferUsageFlags (Usage usage)
327 vk::VkBufferUsageFlags flags = 0;
329 if (usage & USAGE_TRANSFER_SRC)
330 flags |= vk::VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
332 if (usage & USAGE_TRANSFER_DST)
333 flags |= vk::VK_BUFFER_USAGE_TRANSFER_DST_BIT;
335 if (usage & USAGE_INDEX_BUFFER)
336 flags |= vk::VK_BUFFER_USAGE_INDEX_BUFFER_BIT;
338 if (usage & USAGE_VERTEX_BUFFER)
339 flags |= vk::VK_BUFFER_USAGE_VERTEX_BUFFER_BIT;
341 if (usage & USAGE_INDIRECT_BUFFER)
342 flags |= vk::VK_BUFFER_USAGE_INDIRECT_BUFFER_BIT;
344 if (usage & USAGE_UNIFORM_BUFFER)
345 flags |= vk::VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT;
347 if (usage & USAGE_STORAGE_BUFFER)
348 flags |= vk::VK_BUFFER_USAGE_STORAGE_BUFFER_BIT;
350 if (usage & USAGE_UNIFORM_TEXEL_BUFFER)
351 flags |= vk::VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT;
353 if (usage & USAGE_STORAGE_TEXEL_BUFFER)
354 flags |= vk::VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT;
359 vk::VkImageUsageFlags usageToImageUsageFlags (Usage usage)
361 vk::VkImageUsageFlags flags = 0;
363 if (usage & USAGE_TRANSFER_SRC)
364 flags |= vk::VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
366 if (usage & USAGE_TRANSFER_DST)
367 flags |= vk::VK_IMAGE_USAGE_TRANSFER_DST_BIT;
369 if (usage & USAGE_SAMPLED_IMAGE)
370 flags |= vk::VK_IMAGE_USAGE_SAMPLED_BIT;
372 if (usage & USAGE_STORAGE_IMAGE)
373 flags |= vk::VK_IMAGE_USAGE_STORAGE_BIT;
375 if (usage & USAGE_COLOR_ATTACHMENT)
376 flags |= vk::VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
378 if (usage & USAGE_INPUT_ATTACHMENT)
379 flags |= vk::VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT;
381 if (usage & USAGE_DEPTH_STENCIL_ATTACHMENT)
382 flags |= vk::VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT;
387 vk::VkPipelineStageFlags usageToStageFlags (Usage usage)
389 vk::VkPipelineStageFlags flags = 0;
391 if (usage & (USAGE_HOST_READ|USAGE_HOST_WRITE))
392 flags |= vk::VK_PIPELINE_STAGE_HOST_BIT;
394 if (usage & (USAGE_TRANSFER_SRC|USAGE_TRANSFER_DST))
395 flags |= vk::VK_PIPELINE_STAGE_TRANSFER_BIT;
397 if (usage & (USAGE_VERTEX_BUFFER|USAGE_INDEX_BUFFER))
398 flags |= vk::VK_PIPELINE_STAGE_VERTEX_INPUT_BIT;
400 if (usage & USAGE_INDIRECT_BUFFER)
401 flags |= vk::VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT;
404 (USAGE_UNIFORM_BUFFER
405 | USAGE_STORAGE_BUFFER
406 | USAGE_UNIFORM_TEXEL_BUFFER
407 | USAGE_STORAGE_TEXEL_BUFFER
408 | USAGE_SAMPLED_IMAGE
409 | USAGE_STORAGE_IMAGE))
411 flags |= (vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT
412 | vk::VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT
413 | vk::VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT
414 | vk::VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT
415 | vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT
416 | vk::VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT);
419 if (usage & USAGE_INPUT_ATTACHMENT)
420 flags |= vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
422 if (usage & USAGE_COLOR_ATTACHMENT)
423 flags |= vk::VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
425 if (usage & USAGE_DEPTH_STENCIL_ATTACHMENT)
427 flags |= vk::VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT
428 | vk::VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT;
434 vk::VkAccessFlags usageToAccessFlags (Usage usage)
436 vk::VkAccessFlags flags = 0;
438 if (usage & USAGE_HOST_READ)
439 flags |= vk::VK_ACCESS_HOST_READ_BIT;
441 if (usage & USAGE_HOST_WRITE)
442 flags |= vk::VK_ACCESS_HOST_WRITE_BIT;
444 if (usage & USAGE_TRANSFER_SRC)
445 flags |= vk::VK_ACCESS_TRANSFER_READ_BIT;
447 if (usage & USAGE_TRANSFER_DST)
448 flags |= vk::VK_ACCESS_TRANSFER_WRITE_BIT;
450 if (usage & USAGE_INDEX_BUFFER)
451 flags |= vk::VK_ACCESS_INDEX_READ_BIT;
453 if (usage & USAGE_VERTEX_BUFFER)
454 flags |= vk::VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT;
456 if (usage & (USAGE_UNIFORM_BUFFER | USAGE_UNIFORM_TEXEL_BUFFER))
457 flags |= vk::VK_ACCESS_UNIFORM_READ_BIT;
459 if (usage & USAGE_SAMPLED_IMAGE)
460 flags |= vk::VK_ACCESS_SHADER_READ_BIT;
462 if (usage & (USAGE_STORAGE_BUFFER
463 | USAGE_STORAGE_TEXEL_BUFFER
464 | USAGE_STORAGE_IMAGE))
465 flags |= vk::VK_ACCESS_SHADER_READ_BIT | vk::VK_ACCESS_SHADER_WRITE_BIT;
467 if (usage & USAGE_INDIRECT_BUFFER)
468 flags |= vk::VK_ACCESS_INDIRECT_COMMAND_READ_BIT;
470 if (usage & USAGE_COLOR_ATTACHMENT)
471 flags |= vk::VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | vk::VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
473 if (usage & USAGE_INPUT_ATTACHMENT)
474 flags |= vk::VK_ACCESS_INPUT_ATTACHMENT_READ_BIT;
476 if (usage & USAGE_DEPTH_STENCIL_ATTACHMENT)
477 flags |= vk::VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT
478 | vk::VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
486 vk::VkDeviceSize size;
487 vk::VkSharingMode sharing;
490 vk::Move<vk::VkCommandBuffer> createBeginCommandBuffer (const vk::DeviceInterface& vkd,
492 vk::VkCommandPool pool,
493 vk::VkCommandBufferLevel level)
495 const vk::VkCommandBufferInheritanceInfo inheritInfo =
497 vk::VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_INFO,
506 const vk::VkCommandBufferBeginInfo beginInfo =
508 vk::VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO,
511 (level == vk::VK_COMMAND_BUFFER_LEVEL_SECONDARY ? &inheritInfo : (const vk::VkCommandBufferInheritanceInfo*)DE_NULL),
514 vk::Move<vk::VkCommandBuffer> commandBuffer (allocateCommandBuffer(vkd, device, pool, level));
516 vkd.beginCommandBuffer(*commandBuffer, &beginInfo);
518 return commandBuffer;
521 vk::Move<vk::VkBuffer> createBuffer (const vk::DeviceInterface& vkd,
523 vk::VkDeviceSize size,
524 vk::VkBufferUsageFlags usage,
525 vk::VkSharingMode sharingMode,
526 const vector<deUint32>& queueFamilies)
528 const vk::VkBufferCreateInfo createInfo =
530 vk::VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
537 (deUint32)queueFamilies.size(),
541 return vk::createBuffer(vkd, device, &createInfo);
544 vk::Move<vk::VkDeviceMemory> allocMemory (const vk::DeviceInterface& vkd,
546 vk::VkDeviceSize size,
547 deUint32 memoryTypeIndex)
549 const vk::VkMemoryAllocateInfo alloc =
551 vk::VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO, // sType
558 return vk::allocateMemory(vkd, device, &alloc);
561 vk::Move<vk::VkDeviceMemory> bindBufferMemory (const vk::InstanceInterface& vki,
562 const vk::DeviceInterface& vkd,
563 vk::VkPhysicalDevice physicalDevice,
566 vk::VkMemoryPropertyFlags properties)
568 const vk::VkMemoryRequirements memoryRequirements = vk::getBufferMemoryRequirements(vkd, device, buffer);
569 const vk::VkPhysicalDeviceMemoryProperties memoryProperties = vk::getPhysicalDeviceMemoryProperties(vki, physicalDevice);
570 deUint32 memoryTypeIndex;
572 for (memoryTypeIndex = 0; memoryTypeIndex < memoryProperties.memoryTypeCount; memoryTypeIndex++)
574 if ((memoryRequirements.memoryTypeBits & (0x1u << memoryTypeIndex))
575 && (memoryProperties.memoryTypes[memoryTypeIndex].propertyFlags & properties) == properties)
579 const vk::VkMemoryAllocateInfo allocationInfo =
581 vk::VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
583 memoryRequirements.size,
586 vk::Move<vk::VkDeviceMemory> memory (vk::allocateMemory(vkd, device, &allocationInfo));
588 VK_CHECK(vkd.bindBufferMemory(device, buffer, *memory, 0));
592 catch (const vk::Error& error)
594 if (error.getError() == vk::VK_ERROR_OUT_OF_DEVICE_MEMORY
595 || error.getError() == vk::VK_ERROR_OUT_OF_HOST_MEMORY)
597 // Try next memory type/heap if out of memory
601 // Throw all other errors forward
608 TCU_FAIL("Failed to allocate memory for buffer");
611 vk::Move<vk::VkDeviceMemory> bindImageMemory (const vk::InstanceInterface& vki,
612 const vk::DeviceInterface& vkd,
613 vk::VkPhysicalDevice physicalDevice,
616 vk::VkMemoryPropertyFlags properties)
618 const vk::VkMemoryRequirements memoryRequirements = vk::getImageMemoryRequirements(vkd, device, image);
619 const vk::VkPhysicalDeviceMemoryProperties memoryProperties = vk::getPhysicalDeviceMemoryProperties(vki, physicalDevice);
620 deUint32 memoryTypeIndex;
622 for (memoryTypeIndex = 0; memoryTypeIndex < memoryProperties.memoryTypeCount; memoryTypeIndex++)
624 if ((memoryRequirements.memoryTypeBits & (0x1u << memoryTypeIndex))
625 && (memoryProperties.memoryTypes[memoryTypeIndex].propertyFlags & properties) == properties)
629 const vk::VkMemoryAllocateInfo allocationInfo =
631 vk::VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
633 memoryRequirements.size,
636 vk::Move<vk::VkDeviceMemory> memory (vk::allocateMemory(vkd, device, &allocationInfo));
638 VK_CHECK(vkd.bindImageMemory(device, image, *memory, 0));
642 catch (const vk::Error& error)
644 if (error.getError() == vk::VK_ERROR_OUT_OF_DEVICE_MEMORY
645 || error.getError() == vk::VK_ERROR_OUT_OF_HOST_MEMORY)
647 // Try next memory type/heap if out of memory
651 // Throw all other errors forward
658 TCU_FAIL("Failed to allocate memory for image");
661 void queueRun (const vk::DeviceInterface& vkd,
663 vk::VkCommandBuffer commandBuffer)
665 const vk::VkSubmitInfo submitInfo =
667 vk::VK_STRUCTURE_TYPE_SUBMIT_INFO,
672 (const vk::VkPipelineStageFlags*)DE_NULL,
681 VK_CHECK(vkd.queueSubmit(queue, 1, &submitInfo, 0));
682 VK_CHECK(vkd.queueWaitIdle(queue));
685 void* mapMemory (const vk::DeviceInterface& vkd,
687 vk::VkDeviceMemory memory,
688 vk::VkDeviceSize size)
692 VK_CHECK(vkd.mapMemory(device, memory, 0, size, 0, &ptr));
697 class ReferenceMemory
700 ReferenceMemory (size_t size);
702 void set (size_t pos, deUint8 val);
703 deUint8 get (size_t pos) const;
704 bool isDefined (size_t pos) const;
706 void setDefined (size_t offset, size_t size, const void* data);
707 void setUndefined (size_t offset, size_t size);
708 void setData (size_t offset, size_t size, const void* data);
710 size_t getSize (void) const { return m_data.size(); }
713 vector<deUint8> m_data;
714 vector<deUint64> m_defined;
717 ReferenceMemory::ReferenceMemory (size_t size)
719 , m_defined (size / 64 + (size % 64 == 0 ? 0 : 1), 0ull)
723 void ReferenceMemory::set (size_t pos, deUint8 val)
725 DE_ASSERT(pos < m_data.size());
728 m_defined[pos / 64] |= 0x1ull << (pos % 64);
731 void ReferenceMemory::setData (size_t offset, size_t size, const void* data_)
733 const deUint8* data = (const deUint8*)data_;
735 DE_ASSERT(offset < m_data.size());
736 DE_ASSERT(offset + size <= m_data.size());
738 // \todo [2016-03-09 mika] Optimize
739 for (size_t pos = 0; pos < size; pos++)
741 m_data[offset + pos] = data[pos];
742 m_defined[(offset + pos) / 64] |= 0x1ull << ((offset + pos) % 64);
746 void ReferenceMemory::setUndefined (size_t offset, size_t size)
748 // \todo [2016-03-09 mika] Optimize
749 for (size_t pos = 0; pos < size; pos++)
750 m_defined[(offset + pos) / 64] |= 0x1ull << ((offset + pos) % 64);
753 deUint8 ReferenceMemory::get (size_t pos) const
755 DE_ASSERT(pos < m_data.size());
756 DE_ASSERT(isDefined(pos));
760 bool ReferenceMemory::isDefined (size_t pos) const
762 DE_ASSERT(pos < m_data.size());
764 return (m_defined[pos / 64] & (0x1ull << (pos % 64))) != 0;
770 Memory (const vk::InstanceInterface& vki,
771 const vk::DeviceInterface& vkd,
772 vk::VkPhysicalDevice physicalDevice,
774 vk::VkDeviceSize size,
775 deUint32 memoryTypeIndex,
776 vk::VkDeviceSize maxBufferSize,
777 deInt32 maxImageWidth,
778 deInt32 maxImageHeight);
780 vk::VkDeviceSize getSize (void) const { return m_size; }
781 vk::VkDeviceSize getMaxBufferSize (void) const { return m_maxBufferSize; }
782 bool getSupportBuffers (void) const { return m_maxBufferSize > 0; }
784 deInt32 getMaxImageWidth (void) const { return m_maxImageWidth; }
785 deInt32 getMaxImageHeight (void) const { return m_maxImageHeight; }
786 bool getSupportImages (void) const { return m_maxImageWidth > 0; }
788 const vk::VkMemoryType& getMemoryType (void) const { return m_memoryType; }
789 deUint32 getMemoryTypeIndex (void) const { return m_memoryTypeIndex; }
790 vk::VkDeviceMemory getMemory (void) const { return *m_memory; }
793 const vk::VkDeviceSize m_size;
794 const deUint32 m_memoryTypeIndex;
795 const vk::VkMemoryType m_memoryType;
796 const vk::Unique<vk::VkDeviceMemory> m_memory;
797 const vk::VkDeviceSize m_maxBufferSize;
798 const deInt32 m_maxImageWidth;
799 const deInt32 m_maxImageHeight;
802 vk::VkMemoryType getMemoryTypeInfo (const vk::InstanceInterface& vki,
803 vk::VkPhysicalDevice device,
804 deUint32 memoryTypeIndex)
806 const vk::VkPhysicalDeviceMemoryProperties memoryProperties = vk::getPhysicalDeviceMemoryProperties(vki, device);
808 DE_ASSERT(memoryTypeIndex < memoryProperties.memoryTypeCount);
810 return memoryProperties.memoryTypes[memoryTypeIndex];
813 vk::VkDeviceSize findMaxBufferSize (const vk::DeviceInterface& vkd,
816 vk::VkBufferUsageFlags usage,
817 vk::VkSharingMode sharingMode,
818 const vector<deUint32>& queueFamilies,
820 vk::VkDeviceSize memorySize,
821 deUint32 memoryTypeIndex)
823 vk::VkDeviceSize lastSuccess = 0;
824 vk::VkDeviceSize currentSize = memorySize / 2;
827 const vk::Unique<vk::VkBuffer> buffer (createBuffer(vkd, device, memorySize, usage, sharingMode, queueFamilies));
828 const vk::VkMemoryRequirements requirements (vk::getBufferMemoryRequirements(vkd, device, *buffer));
830 if (requirements.size == memorySize && requirements.memoryTypeBits & (0x1u << memoryTypeIndex))
834 for (vk::VkDeviceSize stepSize = memorySize / 4; currentSize > 0; stepSize /= 2)
836 const vk::Unique<vk::VkBuffer> buffer (createBuffer(vkd, device, currentSize, usage, sharingMode, queueFamilies));
837 const vk::VkMemoryRequirements requirements (vk::getBufferMemoryRequirements(vkd, device, *buffer));
839 if (requirements.size <= memorySize && requirements.memoryTypeBits & (0x1u << memoryTypeIndex))
841 lastSuccess = currentSize;
842 currentSize += stepSize;
845 currentSize -= stepSize;
854 // Round size down maximum W * H * 4, where W and H < 4096
855 vk::VkDeviceSize roundBufferSizeToWxHx4 (vk::VkDeviceSize size)
857 const vk::VkDeviceSize maxTextureSize = 4096;
858 vk::VkDeviceSize maxTexelCount = size / 4;
859 vk::VkDeviceSize bestW = de::max(maxTexelCount, maxTextureSize);
860 vk::VkDeviceSize bestH = maxTexelCount / bestW;
862 // \todo [2016-03-09 mika] Could probably be faster?
863 for (vk::VkDeviceSize w = 1; w * w < maxTexelCount && w < maxTextureSize && bestW * bestH * 4 < size; w++)
865 const vk::VkDeviceSize h = maxTexelCount / w;
867 if (bestW * bestH < w * h)
874 return bestW * bestH * 4;
877 // Find RGBA8 image size that has exactly "size" of number of bytes.
878 // "size" must be W * H * 4 where W and H < 4096
879 IVec2 findImageSizeWxHx4 (vk::VkDeviceSize size)
881 const vk::VkDeviceSize maxTextureSize = 4096;
882 vk::VkDeviceSize texelCount = size / 4;
884 DE_ASSERT((size % 4) == 0);
886 // \todo [2016-03-09 mika] Could probably be faster?
887 for (vk::VkDeviceSize w = 1; w < maxTextureSize && w < texelCount; w++)
889 const vk::VkDeviceSize h = texelCount / w;
891 if ((texelCount % w) == 0 && h < maxTextureSize)
892 return IVec2((int)w, (int)h);
895 DE_FATAL("Invalid size");
896 return IVec2(-1, -1);
899 IVec2 findMaxRGBA8ImageSize (const vk::DeviceInterface& vkd,
902 vk::VkImageUsageFlags usage,
903 vk::VkSharingMode sharingMode,
904 const vector<deUint32>& queueFamilies,
906 vk::VkDeviceSize memorySize,
907 deUint32 memoryTypeIndex)
909 IVec2 lastSuccess (0);
913 const deUint32 texelCount = (deUint32)(memorySize / 4);
914 const deUint32 width = (deUint32)deFloatSqrt((float)texelCount);
915 const deUint32 height = texelCount / width;
917 currentSize[0] = deMaxu32(width, height);
918 currentSize[1] = deMinu32(width, height);
921 for (deInt32 stepSize = currentSize[0] / 2; currentSize[0] > 0; stepSize /= 2)
923 const vk::VkImageCreateInfo createInfo =
925 vk::VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
929 vk::VK_IMAGE_TYPE_2D,
930 vk::VK_FORMAT_R8G8B8A8_UNORM,
932 (deUint32)currentSize[0],
933 (deUint32)currentSize[1],
937 vk::VK_SAMPLE_COUNT_1_BIT,
938 vk::VK_IMAGE_TILING_OPTIMAL,
941 (deUint32)queueFamilies.size(),
943 vk::VK_IMAGE_LAYOUT_UNDEFINED
945 const vk::Unique<vk::VkImage> image (vk::createImage(vkd, device, &createInfo));
946 const vk::VkMemoryRequirements requirements (vk::getImageMemoryRequirements(vkd, device, *image));
948 if (requirements.size <= memorySize && requirements.memoryTypeBits & (0x1u << memoryTypeIndex))
950 lastSuccess = currentSize;
951 currentSize[0] += stepSize;
952 currentSize[1] += stepSize;
956 currentSize[0] -= stepSize;
957 currentSize[1] -= stepSize;
967 Memory::Memory (const vk::InstanceInterface& vki,
968 const vk::DeviceInterface& vkd,
969 vk::VkPhysicalDevice physicalDevice,
971 vk::VkDeviceSize size,
972 deUint32 memoryTypeIndex,
973 vk::VkDeviceSize maxBufferSize,
974 deInt32 maxImageWidth,
975 deInt32 maxImageHeight)
977 , m_memoryTypeIndex (memoryTypeIndex)
978 , m_memoryType (getMemoryTypeInfo(vki, physicalDevice, memoryTypeIndex))
979 , m_memory (allocMemory(vkd, device, size, memoryTypeIndex))
980 , m_maxBufferSize (maxBufferSize)
981 , m_maxImageWidth (maxImageWidth)
982 , m_maxImageHeight (maxImageHeight)
989 Context (const vk::InstanceInterface& vki,
990 const vk::DeviceInterface& vkd,
991 vk::VkPhysicalDevice physicalDevice,
994 deUint32 queueFamilyIndex,
995 const vector<pair<deUint32, vk::VkQueue> >& queues,
996 const vk::ProgramCollection<vk::ProgramBinary>& binaryCollection)
999 , m_physicalDevice (physicalDevice)
1002 , m_queueFamilyIndex (queueFamilyIndex)
1004 , m_commandPool (createCommandPool(vkd, device, vk::VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT, queueFamilyIndex))
1005 , m_binaryCollection (binaryCollection)
1007 for (size_t queueNdx = 0; queueNdx < m_queues.size(); queueNdx++)
1008 m_queueFamilies.push_back(m_queues[queueNdx].first);
1011 const vk::InstanceInterface& getInstanceInterface (void) const { return m_vki; }
1012 vk::VkPhysicalDevice getPhysicalDevice (void) const { return m_physicalDevice; }
1013 vk::VkDevice getDevice (void) const { return m_device; }
1014 const vk::DeviceInterface& getDeviceInterface (void) const { return m_vkd; }
1015 vk::VkQueue getQueue (void) const { return m_queue; }
1016 deUint32 getQueueFamily (void) const { return m_queueFamilyIndex; }
1017 const vector<pair<deUint32, vk::VkQueue> >& getQueues (void) const { return m_queues; }
1018 const vector<deUint32> getQueueFamilies (void) const { return m_queueFamilies; }
1019 vk::VkCommandPool getCommandPool (void) const { return *m_commandPool; }
1020 const vk::ProgramCollection<vk::ProgramBinary>& getBinaryCollection (void) const { return m_binaryCollection; }
1023 const vk::InstanceInterface& m_vki;
1024 const vk::DeviceInterface& m_vkd;
1025 const vk::VkPhysicalDevice m_physicalDevice;
1026 const vk::VkDevice m_device;
1027 const vk::VkQueue m_queue;
1028 const deUint32 m_queueFamilyIndex;
1029 const vector<pair<deUint32, vk::VkQueue> > m_queues;
1030 const vk::Unique<vk::VkCommandPool> m_commandPool;
1031 const vk::ProgramCollection<vk::ProgramBinary>& m_binaryCollection;
1032 vector<deUint32> m_queueFamilies;
1035 class PrepareContext
1038 PrepareContext (const Context& context,
1039 const Memory& memory)
1040 : m_context (context)
1045 const Memory& getMemory (void) const { return m_memory; }
1046 const Context& getContext (void) const { return m_context; }
1047 const vk::ProgramCollection<vk::ProgramBinary>& getBinaryCollection (void) const { return m_context.getBinaryCollection(); }
1049 void setBuffer (vk::Move<vk::VkBuffer> buffer,
1050 vk::VkDeviceSize size)
1052 DE_ASSERT(!m_currentImage);
1053 DE_ASSERT(!m_currentBuffer);
1055 m_currentBuffer = buffer;
1056 m_currentBufferSize = size;
1059 vk::VkBuffer getBuffer (void) const { return *m_currentBuffer; }
1060 vk::VkDeviceSize getBufferSize (void) const
1062 DE_ASSERT(m_currentBuffer);
1063 return m_currentBufferSize;
1066 void releaseBuffer (void) { m_currentBuffer.disown(); }
1068 void setImage (vk::Move<vk::VkImage> image,
1069 vk::VkImageLayout layout,
1070 vk::VkDeviceSize memorySize,
1074 DE_ASSERT(!m_currentImage);
1075 DE_ASSERT(!m_currentBuffer);
1077 m_currentImage = image;
1078 m_currentImageMemorySize = memorySize;
1079 m_currentImageLayout = layout;
1080 m_currentImageWidth = width;
1081 m_currentImageHeight = height;
1084 void setImageLayout (vk::VkImageLayout layout)
1086 DE_ASSERT(m_currentImage);
1087 m_currentImageLayout = layout;
1090 vk::VkImage getImage (void) const { return *m_currentImage; }
1091 deInt32 getImageWidth (void) const
1093 DE_ASSERT(m_currentImage);
1094 return m_currentImageWidth;
1096 deInt32 getImageHeight (void) const
1098 DE_ASSERT(m_currentImage);
1099 return m_currentImageHeight;
1101 vk::VkDeviceSize getImageMemorySize (void) const
1103 DE_ASSERT(m_currentImage);
1104 return m_currentImageMemorySize;
1107 void releaseImage (void) { m_currentImage.disown(); }
1109 vk::VkImageLayout getImageLayout (void) const
1111 DE_ASSERT(m_currentImage);
1112 return m_currentImageLayout;
1116 const Context& m_context;
1117 const Memory& m_memory;
1119 vk::Move<vk::VkBuffer> m_currentBuffer;
1120 vk::VkDeviceSize m_currentBufferSize;
1122 vk::Move<vk::VkImage> m_currentImage;
1123 vk::VkDeviceSize m_currentImageMemorySize;
1124 vk::VkImageLayout m_currentImageLayout;
1125 deInt32 m_currentImageWidth;
1126 deInt32 m_currentImageHeight;
1129 class ExecuteContext
1132 ExecuteContext (const Context& context)
1133 : m_context (context)
1137 const Context& getContext (void) const { return m_context; }
1138 void setMapping (void* ptr) { m_mapping = ptr; }
1139 void* getMapping (void) const { return m_mapping; }
1142 const Context& m_context;
1149 VerifyContext (TestLog& log,
1150 tcu::ResultCollector& resultCollector,
1151 const Context& context,
1152 vk::VkDeviceSize size)
1154 , m_resultCollector (resultCollector)
1155 , m_context (context)
1156 , m_reference ((size_t)size)
1160 const Context& getContext (void) const { return m_context; }
1161 TestLog& getLog (void) const { return m_log; }
1162 tcu::ResultCollector& getResultCollector (void) const { return m_resultCollector; }
1164 ReferenceMemory& getReference (void) { return m_reference; }
1165 TextureLevel& getReferenceImage (void) { return m_referenceImage;}
1169 tcu::ResultCollector& m_resultCollector;
1170 const Context& m_context;
1171 ReferenceMemory m_reference;
1172 TextureLevel m_referenceImage;
1178 // Constructor should allocate all non-vulkan resources.
1179 virtual ~Command (void) {}
1181 // Get name of the command
1182 virtual const char* getName (void) const = 0;
1184 // Log prepare operations
1185 virtual void logPrepare (TestLog&, size_t) const {}
1186 // Log executed operations
1187 virtual void logExecute (TestLog&, size_t) const {}
1189 // Prepare should allocate all vulkan resources and resources that require
1190 // that buffer or memory has been already allocated. This should build all
1191 // command buffers etc.
1192 virtual void prepare (PrepareContext&) {}
1194 // Execute command. Write or read mapped memory, submit commands to queue
1196 virtual void execute (ExecuteContext&) {}
1198 // Verify that results are correct.
1199 virtual void verify (VerifyContext&, size_t) {}
1202 // Allow only inheritance
1207 Command (const Command&);
1208 Command& operator& (const Command&);
1211 class Map : public Command
1216 const char* getName (void) const { return "Map"; }
1219 void logExecute (TestLog& log, size_t commandIndex) const
1221 log << TestLog::Message << commandIndex << ":" << getName() << " Map memory" << TestLog::EndMessage;
1224 void prepare (PrepareContext& context)
1226 m_memory = context.getMemory().getMemory();
1227 m_size = context.getMemory().getSize();
1230 void execute (ExecuteContext& context)
1232 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
1233 const vk::VkDevice device = context.getContext().getDevice();
1235 context.setMapping(mapMemory(vkd, device, m_memory, m_size));
1239 vk::VkDeviceMemory m_memory;
1240 vk::VkDeviceSize m_size;
1243 class UnMap : public Command
1248 const char* getName (void) const { return "UnMap"; }
1250 void logExecute (TestLog& log, size_t commandIndex) const
1252 log << TestLog::Message << commandIndex << ": Unmap memory" << TestLog::EndMessage;
1255 void prepare (PrepareContext& context)
1257 m_memory = context.getMemory().getMemory();
1260 void execute (ExecuteContext& context)
1262 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
1263 const vk::VkDevice device = context.getContext().getDevice();
1265 vkd.unmapMemory(device, m_memory);
1266 context.setMapping(DE_NULL);
1270 vk::VkDeviceMemory m_memory;
1273 class Invalidate : public Command
1276 Invalidate (void) {}
1277 ~Invalidate (void) {}
1278 const char* getName (void) const { return "Invalidate"; }
1280 void logExecute (TestLog& log, size_t commandIndex) const
1282 log << TestLog::Message << commandIndex << ": Invalidate mapped memory" << TestLog::EndMessage;
1285 void prepare (PrepareContext& context)
1287 m_memory = context.getMemory().getMemory();
1288 m_size = context.getMemory().getSize();
1291 void execute (ExecuteContext& context)
1293 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
1294 const vk::VkDevice device = context.getContext().getDevice();
1296 vk::invalidateMappedMemoryRange(vkd, device, m_memory, 0, m_size);
1300 vk::VkDeviceMemory m_memory;
1301 vk::VkDeviceSize m_size;
1304 class Flush : public Command
1309 const char* getName (void) const { return "Flush"; }
1311 void logExecute (TestLog& log, size_t commandIndex) const
1313 log << TestLog::Message << commandIndex << ": Flush mapped memory" << TestLog::EndMessage;
1316 void prepare (PrepareContext& context)
1318 m_memory = context.getMemory().getMemory();
1319 m_size = context.getMemory().getSize();
1322 void execute (ExecuteContext& context)
1324 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
1325 const vk::VkDevice device = context.getContext().getDevice();
1327 vk::flushMappedMemoryRange(vkd, device, m_memory, 0, m_size);
1331 vk::VkDeviceMemory m_memory;
1332 vk::VkDeviceSize m_size;
1335 // Host memory reads and writes
1336 class HostMemoryAccess : public Command
1339 HostMemoryAccess (bool read, bool write, deUint32 seed);
1340 ~HostMemoryAccess (void) {}
1341 const char* getName (void) const { return "HostMemoryAccess"; }
1343 void logExecute (TestLog& log, size_t commandIndex) const;
1344 void prepare (PrepareContext& context);
1345 void execute (ExecuteContext& context);
1346 void verify (VerifyContext& context, size_t commandIndex);
1351 const deUint32 m_seed;
1354 vector<deUint8> m_readData;
1357 HostMemoryAccess::HostMemoryAccess (bool read, bool write, deUint32 seed)
1364 void HostMemoryAccess::logExecute (TestLog& log, size_t commandIndex) const
1366 log << TestLog::Message << commandIndex << ": Host memory access:" << (m_read ? " read" : "") << (m_write ? " write" : "") << ", seed: " << m_seed << TestLog::EndMessage;
1369 void HostMemoryAccess::prepare (PrepareContext& context)
1371 m_size = (size_t)context.getMemory().getSize();
1374 m_readData.resize(m_size, 0);
1377 void HostMemoryAccess::execute (ExecuteContext& context)
1379 de::Random rng (m_seed);
1380 deUint8* const ptr = (deUint8*)context.getMapping();
1382 if (m_read && m_write)
1384 for (size_t pos = 0; pos < m_size; pos++)
1386 const deUint8 mask = rng.getUint8();
1387 const deUint8 value = ptr[pos];
1389 m_readData[pos] = value;
1390 ptr[pos] = value ^ mask;
1395 for (size_t pos = 0; pos < m_size; pos++)
1397 const deUint8 value = ptr[pos];
1399 m_readData[pos] = value;
1404 for (size_t pos = 0; pos < m_size; pos++)
1406 const deUint8 value = rng.getUint8();
1412 DE_FATAL("Host memory access without read or write.");
1415 void HostMemoryAccess::verify (VerifyContext& context, size_t commandIndex)
1417 tcu::ResultCollector& resultCollector = context.getResultCollector();
1418 ReferenceMemory& reference = context.getReference();
1419 de::Random rng (m_seed);
1421 if (m_read && m_write)
1423 for (size_t pos = 0; pos < m_size; pos++)
1425 const deUint8 mask = rng.getUint8();
1426 const deUint8 value = m_readData[pos];
1428 if (reference.isDefined(pos))
1430 if (value != reference.get(pos))
1432 resultCollector.fail(
1433 de::toString(commandIndex) + ":" + getName()
1434 + " Result differs from reference, Expected: "
1435 + de::toString(tcu::toHex<8>(reference.get(pos)))
1437 + de::toString(tcu::toHex<8>(value))
1439 + de::toString(pos));
1443 reference.set(pos, reference.get(pos) ^ mask);
1449 for (size_t pos = 0; pos < m_size; pos++)
1451 const deUint8 value = m_readData[pos];
1453 if (reference.isDefined(pos))
1455 if (value != reference.get(pos))
1457 resultCollector.fail(
1458 de::toString(commandIndex) + ":" + getName()
1459 + " Result differs from reference, Expected: "
1460 + de::toString(tcu::toHex<8>(reference.get(pos)))
1462 + de::toString(tcu::toHex<8>(value))
1464 + de::toString(pos));
1472 for (size_t pos = 0; pos < m_size; pos++)
1474 const deUint8 value = rng.getUint8();
1476 reference.set(pos, value);
1480 DE_FATAL("Host memory access without read or write.");
1483 class CreateBuffer : public Command
1486 CreateBuffer (vk::VkBufferUsageFlags usage,
1487 vk::VkSharingMode sharing);
1488 ~CreateBuffer (void) {}
1489 const char* getName (void) const { return "CreateBuffer"; }
1491 void logPrepare (TestLog& log, size_t commandIndex) const;
1492 void prepare (PrepareContext& context);
1495 const vk::VkBufferUsageFlags m_usage;
1496 const vk::VkSharingMode m_sharing;
1499 CreateBuffer::CreateBuffer (vk::VkBufferUsageFlags usage,
1500 vk::VkSharingMode sharing)
1502 , m_sharing (sharing)
1506 void CreateBuffer::logPrepare (TestLog& log, size_t commandIndex) const
1508 log << TestLog::Message << commandIndex << ":" << getName() << " Create buffer, Sharing mode: " << m_sharing << ", Usage: " << vk::getBufferUsageFlagsStr(m_usage) << TestLog::EndMessage;
1511 void CreateBuffer::prepare (PrepareContext& context)
1513 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
1514 const vk::VkDevice device = context.getContext().getDevice();
1515 const vk::VkDeviceSize bufferSize = context.getMemory().getMaxBufferSize();
1516 const vector<deUint32>& queueFamilies = context.getContext().getQueueFamilies();
1518 context.setBuffer(createBuffer(vkd, device, bufferSize, m_usage, m_sharing, queueFamilies), bufferSize);
1521 class DestroyBuffer : public Command
1524 DestroyBuffer (void);
1525 ~DestroyBuffer (void) {}
1526 const char* getName (void) const { return "DestroyBuffer"; }
1528 void logExecute (TestLog& log, size_t commandIndex) const;
1529 void prepare (PrepareContext& context);
1530 void execute (ExecuteContext& context);
1533 vk::Move<vk::VkBuffer> m_buffer;
1536 DestroyBuffer::DestroyBuffer (void)
1540 void DestroyBuffer::prepare (PrepareContext& context)
1542 m_buffer = vk::Move<vk::VkBuffer>(vk::check(context.getBuffer()), vk::Deleter<vk::VkBuffer>(context.getContext().getDeviceInterface(), context.getContext().getDevice(), DE_NULL));
1543 context.releaseBuffer();
1546 void DestroyBuffer::logExecute (TestLog& log, size_t commandIndex) const
1548 log << TestLog::Message << commandIndex << ":" << getName() << " Destroy buffer" << TestLog::EndMessage;
1551 void DestroyBuffer::execute (ExecuteContext& context)
1553 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
1554 const vk::VkDevice device = context.getContext().getDevice();
1556 vkd.destroyBuffer(device, m_buffer.disown(), DE_NULL);
1559 class BindBufferMemory : public Command
1562 BindBufferMemory (void) {}
1563 ~BindBufferMemory (void) {}
1564 const char* getName (void) const { return "BindBufferMemory"; }
1566 void logPrepare (TestLog& log, size_t commandIndex) const;
1567 void prepare (PrepareContext& context);
1570 void BindBufferMemory::logPrepare (TestLog& log, size_t commandIndex) const
1572 log << TestLog::Message << commandIndex << ":" << getName() << " Bind memory to buffer" << TestLog::EndMessage;
1575 void BindBufferMemory::prepare (PrepareContext& context)
1577 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
1578 const vk::VkDevice device = context.getContext().getDevice();
1580 VK_CHECK(vkd.bindBufferMemory(device, context.getBuffer(), context.getMemory().getMemory(), 0));
1583 class CreateImage : public Command
1586 CreateImage (vk::VkImageUsageFlags usage,
1587 vk::VkSharingMode sharing);
1588 ~CreateImage (void) {}
1589 const char* getName (void) const { return "CreateImage"; }
1591 void logPrepare (TestLog& log, size_t commandIndex) const;
1592 void prepare (PrepareContext& context);
1593 void verify (VerifyContext& context, size_t commandIndex);
1596 const vk::VkImageUsageFlags m_usage;
1597 const vk::VkSharingMode m_sharing;
1598 deInt32 m_imageWidth;
1599 deInt32 m_imageHeight;
1602 CreateImage::CreateImage (vk::VkImageUsageFlags usage,
1603 vk::VkSharingMode sharing)
1605 , m_sharing (sharing)
1609 void CreateImage::logPrepare (TestLog& log, size_t commandIndex) const
1611 log << TestLog::Message << commandIndex << ":" << getName() << " Create image, sharing: " << m_sharing << ", usage: " << vk::getImageUsageFlagsStr(m_usage) << TestLog::EndMessage;
1614 void CreateImage::prepare (PrepareContext& context)
1616 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
1617 const vk::VkDevice device = context.getContext().getDevice();
1618 const vector<deUint32>& queueFamilies = context.getContext().getQueueFamilies();
1620 m_imageWidth = context.getMemory().getMaxImageWidth();
1621 m_imageHeight = context.getMemory().getMaxImageHeight();
1624 const vk::VkImageCreateInfo createInfo =
1626 vk::VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
1630 vk::VK_IMAGE_TYPE_2D,
1631 vk::VK_FORMAT_R8G8B8A8_UNORM,
1633 (deUint32)m_imageWidth,
1634 (deUint32)m_imageHeight,
1638 vk::VK_SAMPLE_COUNT_1_BIT,
1639 vk::VK_IMAGE_TILING_OPTIMAL,
1642 (deUint32)queueFamilies.size(),
1644 vk::VK_IMAGE_LAYOUT_UNDEFINED
1646 vk::Move<vk::VkImage> image (createImage(vkd, device, &createInfo));
1647 const vk::VkMemoryRequirements requirements = vk::getImageMemoryRequirements(vkd, device, *image);
1649 context.setImage(image, vk::VK_IMAGE_LAYOUT_UNDEFINED, requirements.size, m_imageWidth, m_imageHeight);
1653 void CreateImage::verify (VerifyContext& context, size_t)
1655 context.getReferenceImage() = TextureLevel(TextureFormat(TextureFormat::RGBA, TextureFormat::UNORM_INT8), m_imageWidth, m_imageHeight);
1658 class DestroyImage : public Command
1661 DestroyImage (void);
1662 ~DestroyImage (void) {}
1663 const char* getName (void) const { return "DestroyImage"; }
1665 void logExecute (TestLog& log, size_t commandIndex) const;
1666 void prepare (PrepareContext& context);
1667 void execute (ExecuteContext& context);
1670 vk::Move<vk::VkImage> m_image;
1673 DestroyImage::DestroyImage (void)
1677 void DestroyImage::prepare (PrepareContext& context)
1679 m_image = vk::Move<vk::VkImage>(vk::check(context.getImage()), vk::Deleter<vk::VkImage>(context.getContext().getDeviceInterface(), context.getContext().getDevice(), DE_NULL));
1680 context.releaseImage();
1684 void DestroyImage::logExecute (TestLog& log, size_t commandIndex) const
1686 log << TestLog::Message << commandIndex << ":" << getName() << " Destroy image" << TestLog::EndMessage;
1689 void DestroyImage::execute (ExecuteContext& context)
1691 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
1692 const vk::VkDevice device = context.getContext().getDevice();
1694 vkd.destroyImage(device, m_image.disown(), DE_NULL);
1697 class BindImageMemory : public Command
1700 BindImageMemory (void) {}
1701 ~BindImageMemory (void) {}
1702 const char* getName (void) const { return "BindImageMemory"; }
1704 void logPrepare (TestLog& log, size_t commandIndex) const;
1705 void prepare (PrepareContext& context);
1708 void BindImageMemory::logPrepare (TestLog& log, size_t commandIndex) const
1710 log << TestLog::Message << commandIndex << ":" << getName() << " Bind memory to image" << TestLog::EndMessage;
1713 void BindImageMemory::prepare (PrepareContext& context)
1715 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
1716 const vk::VkDevice device = context.getContext().getDevice();
1718 VK_CHECK(vkd.bindImageMemory(device, context.getImage(), context.getMemory().getMemory(), 0));
1721 class QueueWaitIdle : public Command
1724 QueueWaitIdle (void) {}
1725 ~QueueWaitIdle (void) {}
1726 const char* getName (void) const { return "QueuetWaitIdle"; }
1728 void logExecute (TestLog& log, size_t commandIndex) const;
1729 void execute (ExecuteContext& context);
1732 void QueueWaitIdle::logExecute (TestLog& log, size_t commandIndex) const
1734 log << TestLog::Message << commandIndex << ":" << getName() << " Queue wait idle" << TestLog::EndMessage;
1737 void QueueWaitIdle::execute (ExecuteContext& context)
1739 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
1740 const vk::VkQueue queue = context.getContext().getQueue();
1742 VK_CHECK(vkd.queueWaitIdle(queue));
1745 class DeviceWaitIdle : public Command
1748 DeviceWaitIdle (void) {}
1749 ~DeviceWaitIdle (void) {}
1750 const char* getName (void) const { return "DeviceWaitIdle"; }
1752 void logExecute (TestLog& log, size_t commandIndex) const;
1753 void execute (ExecuteContext& context);
1756 void DeviceWaitIdle::logExecute (TestLog& log, size_t commandIndex) const
1758 log << TestLog::Message << commandIndex << ":" << getName() << " Device wait idle" << TestLog::EndMessage;
1761 void DeviceWaitIdle::execute (ExecuteContext& context)
1763 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
1764 const vk::VkDevice device = context.getContext().getDevice();
1766 VK_CHECK(vkd.deviceWaitIdle(device));
1772 SubmitContext (const PrepareContext& context,
1773 const vk::VkCommandBuffer commandBuffer)
1774 : m_context (context)
1775 , m_commandBuffer (commandBuffer)
1779 const Memory& getMemory (void) const { return m_context.getMemory(); }
1780 const Context& getContext (void) const { return m_context.getContext(); }
1781 vk::VkCommandBuffer getCommandBuffer (void) const { return m_commandBuffer; }
1783 vk::VkBuffer getBuffer (void) const { return m_context.getBuffer(); }
1784 vk::VkDeviceSize getBufferSize (void) const { return m_context.getBufferSize(); }
1786 vk::VkImage getImage (void) const { return m_context.getImage(); }
1787 deInt32 getImageWidth (void) const { return m_context.getImageWidth(); }
1788 deInt32 getImageHeight (void) const { return m_context.getImageHeight(); }
1791 const PrepareContext& m_context;
1792 const vk::VkCommandBuffer m_commandBuffer;
1798 virtual ~CmdCommand (void) {}
1799 virtual const char* getName (void) const = 0;
1801 // Log things that are done during prepare
1802 virtual void logPrepare (TestLog&, size_t) const {}
1803 // Log submitted calls etc.
1804 virtual void logSubmit (TestLog&, size_t) const {}
1806 // Allocate vulkan resources and prepare for submit.
1807 virtual void prepare (PrepareContext&) {}
1809 // Submit commands to command buffer.
1810 virtual void submit (SubmitContext&) {}
1813 virtual void verify (VerifyContext&, size_t) {}
1816 class SubmitCommandBuffer : public Command
1819 SubmitCommandBuffer (const vector<CmdCommand*>& commands);
1820 ~SubmitCommandBuffer (void);
1822 const char* getName (void) const { return "SubmitCommandBuffer"; }
1823 void logExecute (TestLog& log, size_t commandIndex) const;
1824 void logPrepare (TestLog& log, size_t commandIndex) const;
1826 // Allocate command buffer and submit commands to command buffer
1827 void prepare (PrepareContext& context);
1828 void execute (ExecuteContext& context);
1830 // Verify that results are correct.
1831 void verify (VerifyContext& context, size_t commandIndex);
1834 vector<CmdCommand*> m_commands;
1835 vk::Move<vk::VkCommandBuffer> m_commandBuffer;
1838 SubmitCommandBuffer::SubmitCommandBuffer (const vector<CmdCommand*>& commands)
1839 : m_commands (commands)
1843 SubmitCommandBuffer::~SubmitCommandBuffer (void)
1845 for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
1846 delete m_commands[cmdNdx];
1849 void SubmitCommandBuffer::prepare (PrepareContext& context)
1851 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
1852 const vk::VkDevice device = context.getContext().getDevice();
1853 const vk::VkCommandPool commandPool = context.getContext().getCommandPool();
1855 m_commandBuffer = createBeginCommandBuffer(vkd, device, commandPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY);
1857 for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
1859 CmdCommand& command = *m_commands[cmdNdx];
1861 command.prepare(context);
1865 SubmitContext submitContext (context, *m_commandBuffer);
1867 for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
1869 CmdCommand& command = *m_commands[cmdNdx];
1871 command.submit(submitContext);
1874 VK_CHECK(vkd.endCommandBuffer(*m_commandBuffer));
1878 void SubmitCommandBuffer::execute (ExecuteContext& context)
1880 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
1881 const vk::VkCommandBuffer cmd = *m_commandBuffer;
1882 const vk::VkQueue queue = context.getContext().getQueue();
1883 const vk::VkSubmitInfo submit =
1885 vk::VK_STRUCTURE_TYPE_SUBMIT_INFO,
1890 (const vk::VkPipelineStageFlags*)DE_NULL,
1899 vkd.queueSubmit(queue, 1, &submit, 0);
1902 void SubmitCommandBuffer::verify (VerifyContext& context, size_t commandIndex)
1904 const string sectionName (de::toString(commandIndex) + ":" + getName());
1905 const tcu::ScopedLogSection section (context.getLog(), sectionName, sectionName);
1907 for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
1908 m_commands[cmdNdx]->verify(context, cmdNdx);
1911 void SubmitCommandBuffer::logPrepare (TestLog& log, size_t commandIndex) const
1913 const string sectionName (de::toString(commandIndex) + ":" + getName());
1914 const tcu::ScopedLogSection section (log, sectionName, sectionName);
1916 for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
1917 m_commands[cmdNdx]->logPrepare(log, cmdNdx);
1920 void SubmitCommandBuffer::logExecute (TestLog& log, size_t commandIndex) const
1922 const string sectionName (de::toString(commandIndex) + ":" + getName());
1923 const tcu::ScopedLogSection section (log, sectionName, sectionName);
1925 for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
1926 m_commands[cmdNdx]->logSubmit(log, cmdNdx);
1929 class PipelineBarrier : public CmdCommand
1939 PipelineBarrier (const vk::VkPipelineStageFlags srcStages,
1940 const vk::VkAccessFlags srcAccesses,
1941 const vk::VkPipelineStageFlags dstStages,
1942 const vk::VkAccessFlags dstAccesses,
1944 const tcu::Maybe<vk::VkImageLayout> imageLayout);
1945 ~PipelineBarrier (void) {}
1946 const char* getName (void) const { return "PipelineBarrier"; }
1948 void logSubmit (TestLog& log, size_t commandIndex) const;
1949 void submit (SubmitContext& context);
1952 const vk::VkPipelineStageFlags m_srcStages;
1953 const vk::VkAccessFlags m_srcAccesses;
1954 const vk::VkPipelineStageFlags m_dstStages;
1955 const vk::VkAccessFlags m_dstAccesses;
1957 const tcu::Maybe<vk::VkImageLayout> m_imageLayout;
1960 PipelineBarrier::PipelineBarrier (const vk::VkPipelineStageFlags srcStages,
1961 const vk::VkAccessFlags srcAccesses,
1962 const vk::VkPipelineStageFlags dstStages,
1963 const vk::VkAccessFlags dstAccesses,
1965 const tcu::Maybe<vk::VkImageLayout> imageLayout)
1966 : m_srcStages (srcStages)
1967 , m_srcAccesses (srcAccesses)
1968 , m_dstStages (dstStages)
1969 , m_dstAccesses (dstAccesses)
1971 , m_imageLayout (imageLayout)
1975 void PipelineBarrier::logSubmit (TestLog& log, size_t commandIndex) const
1977 log << TestLog::Message << commandIndex << ":" << getName()
1978 << " " << (m_type == TYPE_GLOBAL ? "Global pipeline barrier"
1979 : m_type == TYPE_BUFFER ? "Buffer pipeline barrier"
1980 : "Image pipeline barrier")
1981 << ", srcStages: " << vk::getPipelineStageFlagsStr(m_srcStages) << ", srcAccesses: " << vk::getAccessFlagsStr(m_srcAccesses)
1982 << ", dstStages: " << vk::getPipelineStageFlagsStr(m_dstStages) << ", dstAccesses: " << vk::getAccessFlagsStr(m_dstAccesses) << TestLog::EndMessage;
1985 void PipelineBarrier::submit (SubmitContext& context)
1987 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
1988 const vk::VkCommandBuffer cmd = context.getCommandBuffer();
1994 const vk::VkMemoryBarrier barrier =
1996 vk::VK_STRUCTURE_TYPE_MEMORY_BARRIER,
2003 vkd.cmdPipelineBarrier(cmd, m_srcStages, m_dstStages, (vk::VkDependencyFlags)0, 1, &barrier, 0, (const vk::VkBufferMemoryBarrier*)DE_NULL, 0, (const vk::VkImageMemoryBarrier*)DE_NULL);
2009 const vk::VkBufferMemoryBarrier barrier =
2011 vk::VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER,
2017 VK_QUEUE_FAMILY_IGNORED,
2018 VK_QUEUE_FAMILY_IGNORED,
2020 context.getBuffer(),
2025 vkd.cmdPipelineBarrier(cmd, m_srcStages, m_dstStages, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 1, &barrier, 0, (const vk::VkImageMemoryBarrier*)DE_NULL);
2031 const vk::VkImageMemoryBarrier barrier =
2033 vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
2042 VK_QUEUE_FAMILY_IGNORED,
2043 VK_QUEUE_FAMILY_IGNORED,
2047 vk::VK_IMAGE_ASPECT_COLOR_BIT,
2053 vkd.cmdPipelineBarrier(cmd, m_srcStages, m_dstStages, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 0, (const vk::VkBufferMemoryBarrier*)DE_NULL, 1, &barrier);
2058 DE_FATAL("Unknown pipeline barrier type");
2062 class ImageTransition : public CmdCommand
2065 ImageTransition (vk::VkPipelineStageFlags srcStages,
2066 vk::VkAccessFlags srcAccesses,
2068 vk::VkPipelineStageFlags dstStages,
2069 vk::VkAccessFlags dstAccesses,
2071 vk::VkImageLayout srcLayout,
2072 vk::VkImageLayout dstLayout);
2074 ~ImageTransition (void) {}
2075 const char* getName (void) const { return "ImageTransition"; }
2077 void prepare (PrepareContext& context);
2078 void logSubmit (TestLog& log, size_t commandIndex) const;
2079 void submit (SubmitContext& context);
2080 void verify (VerifyContext& context, size_t);
2083 const vk::VkPipelineStageFlags m_srcStages;
2084 const vk::VkAccessFlags m_srcAccesses;
2085 const vk::VkPipelineStageFlags m_dstStages;
2086 const vk::VkAccessFlags m_dstAccesses;
2087 const vk::VkImageLayout m_srcLayout;
2088 const vk::VkImageLayout m_dstLayout;
2090 vk::VkDeviceSize m_imageMemorySize;
2093 ImageTransition::ImageTransition (vk::VkPipelineStageFlags srcStages,
2094 vk::VkAccessFlags srcAccesses,
2096 vk::VkPipelineStageFlags dstStages,
2097 vk::VkAccessFlags dstAccesses,
2099 vk::VkImageLayout srcLayout,
2100 vk::VkImageLayout dstLayout)
2101 : m_srcStages (srcStages)
2102 , m_srcAccesses (srcAccesses)
2103 , m_dstStages (dstStages)
2104 , m_dstAccesses (dstAccesses)
2105 , m_srcLayout (srcLayout)
2106 , m_dstLayout (dstLayout)
2110 void ImageTransition::logSubmit (TestLog& log, size_t commandIndex) const
2112 log << TestLog::Message << commandIndex << ":" << getName()
2113 << " Image transition pipeline barrier"
2114 << ", srcStages: " << vk::getPipelineStageFlagsStr(m_srcStages) << ", srcAccesses: " << vk::getAccessFlagsStr(m_srcAccesses)
2115 << ", dstStages: " << vk::getPipelineStageFlagsStr(m_dstStages) << ", dstAccesses: " << vk::getAccessFlagsStr(m_dstAccesses)
2116 << ", srcLayout: " << m_srcLayout << ", dstLayout: " << m_dstLayout << TestLog::EndMessage;
2119 void ImageTransition::prepare (PrepareContext& context)
2121 DE_ASSERT(context.getImageLayout() == vk::VK_IMAGE_LAYOUT_UNDEFINED || m_srcLayout == vk::VK_IMAGE_LAYOUT_UNDEFINED || context.getImageLayout() == m_srcLayout);
2123 context.setImageLayout(m_dstLayout);
2124 m_imageMemorySize = context.getImageMemorySize();
2127 void ImageTransition::submit (SubmitContext& context)
2129 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
2130 const vk::VkCommandBuffer cmd = context.getCommandBuffer();
2131 const vk::VkImageMemoryBarrier barrier =
2133 vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
2142 VK_QUEUE_FAMILY_IGNORED,
2143 VK_QUEUE_FAMILY_IGNORED,
2147 vk::VK_IMAGE_ASPECT_COLOR_BIT,
2153 vkd.cmdPipelineBarrier(cmd, m_srcStages, m_dstStages, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 0, (const vk::VkBufferMemoryBarrier*)DE_NULL, 1, &barrier);
2156 void ImageTransition::verify (VerifyContext& context, size_t)
2158 context.getReference().setUndefined(0, (size_t)m_imageMemorySize);
2161 class FillBuffer : public CmdCommand
2164 FillBuffer (deUint32 value) : m_value(value) {}
2165 ~FillBuffer (void) {}
2166 const char* getName (void) const { return "FillBuffer"; }
2168 void logSubmit (TestLog& log, size_t commandIndex) const;
2169 void submit (SubmitContext& context);
2170 void verify (VerifyContext& context, size_t commandIndex);
2173 const deUint32 m_value;
2174 vk::VkDeviceSize m_bufferSize;
2177 void FillBuffer::logSubmit (TestLog& log, size_t commandIndex) const
2179 log << TestLog::Message << commandIndex << ":" << getName() << " Fill value: " << m_value << TestLog::EndMessage;
2182 void FillBuffer::submit (SubmitContext& context)
2184 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
2185 const vk::VkCommandBuffer cmd = context.getCommandBuffer();
2186 const vk::VkBuffer buffer = context.getBuffer();
2187 const vk::VkDeviceSize sizeMask = ~(0x3ull); // \note Round down to multiple of 4
2189 m_bufferSize = sizeMask & context.getBufferSize();
2190 vkd.cmdFillBuffer(cmd, buffer, 0, m_bufferSize, m_value);
2193 void FillBuffer::verify (VerifyContext& context, size_t)
2195 ReferenceMemory& reference = context.getReference();
2197 for (size_t ndx = 0; ndx < m_bufferSize; ndx++)
2199 #if (DE_ENDIANNESS == DE_LITTLE_ENDIAN)
2200 reference.set(ndx, (deUint8)(0xffu & (m_value >> (8*(ndx % 4)))));
2202 reference.set(ndx, (deUint8)(0xffu & (m_value >> (8*(3 - (ndx % 4))))));
2207 class UpdateBuffer : public CmdCommand
2210 UpdateBuffer (deUint32 seed) : m_seed(seed) {}
2211 ~UpdateBuffer (void) {}
2212 const char* getName (void) const { return "UpdateBuffer"; }
2214 void logSubmit (TestLog& log, size_t commandIndex) const;
2215 void submit (SubmitContext& context);
2216 void verify (VerifyContext& context, size_t commandIndex);
2219 const deUint32 m_seed;
2220 vk::VkDeviceSize m_bufferSize;
2223 void UpdateBuffer::logSubmit (TestLog& log, size_t commandIndex) const
2225 log << TestLog::Message << commandIndex << ":" << getName() << " Update buffer, seed: " << m_seed << TestLog::EndMessage;
2228 void UpdateBuffer::submit (SubmitContext& context)
2230 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
2231 const vk::VkCommandBuffer cmd = context.getCommandBuffer();
2232 const vk::VkBuffer buffer = context.getBuffer();
2233 const size_t blockSize = 65536;
2234 std::vector<deUint8> data (blockSize, 0);
2235 de::Random rng (m_seed);
2237 m_bufferSize = context.getBufferSize();
2239 for (size_t updated = 0; updated < m_bufferSize; updated += blockSize)
2241 for (size_t ndx = 0; ndx < data.size(); ndx++)
2242 data[ndx] = rng.getUint8();
2244 if (m_bufferSize - updated > blockSize)
2245 vkd.cmdUpdateBuffer(cmd, buffer, updated, blockSize, (const deUint32*)(&data[0]));
2247 vkd.cmdUpdateBuffer(cmd, buffer, updated, m_bufferSize - updated, (const deUint32*)(&data[0]));
2251 void UpdateBuffer::verify (VerifyContext& context, size_t)
2253 ReferenceMemory& reference = context.getReference();
2254 const size_t blockSize = 65536;
2255 vector<deUint8> data (blockSize, 0);
2256 de::Random rng (m_seed);
2258 for (size_t updated = 0; updated < m_bufferSize; updated += blockSize)
2260 for (size_t ndx = 0; ndx < data.size(); ndx++)
2261 data[ndx] = rng.getUint8();
2263 if (m_bufferSize - updated > blockSize)
2264 reference.setData(updated, blockSize, &data[0]);
2266 reference.setData(updated, (size_t)(m_bufferSize - updated), &data[0]);
2270 class BufferCopyToBuffer : public CmdCommand
2273 BufferCopyToBuffer (void) {}
2274 ~BufferCopyToBuffer (void) {}
2275 const char* getName (void) const { return "BufferCopyToBuffer"; }
2277 void logPrepare (TestLog& log, size_t commandIndex) const;
2278 void prepare (PrepareContext& context);
2279 void logSubmit (TestLog& log, size_t commandIndex) const;
2280 void submit (SubmitContext& context);
2281 void verify (VerifyContext& context, size_t commandIndex);
2284 vk::VkDeviceSize m_bufferSize;
2285 vk::Move<vk::VkBuffer> m_dstBuffer;
2286 vk::Move<vk::VkDeviceMemory> m_memory;
2289 void BufferCopyToBuffer::logPrepare (TestLog& log, size_t commandIndex) const
2291 log << TestLog::Message << commandIndex << ":" << getName() << " Allocate destination buffer for buffer to buffer copy." << TestLog::EndMessage;
2294 void BufferCopyToBuffer::prepare (PrepareContext& context)
2296 const vk::InstanceInterface& vki = context.getContext().getInstanceInterface();
2297 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
2298 const vk::VkPhysicalDevice physicalDevice = context.getContext().getPhysicalDevice();
2299 const vk::VkDevice device = context.getContext().getDevice();
2300 const vector<deUint32>& queueFamilies = context.getContext().getQueueFamilies();
2302 m_bufferSize = context.getBufferSize();
2304 m_dstBuffer = createBuffer(vkd, device, m_bufferSize, vk::VK_BUFFER_USAGE_TRANSFER_DST_BIT, vk::VK_SHARING_MODE_EXCLUSIVE, queueFamilies);
2305 m_memory = bindBufferMemory(vki, vkd, physicalDevice, device, *m_dstBuffer, vk::VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT);
2308 void BufferCopyToBuffer::logSubmit (TestLog& log, size_t commandIndex) const
2310 log << TestLog::Message << commandIndex << ":" << getName() << " Copy buffer to another buffer" << TestLog::EndMessage;
2313 void BufferCopyToBuffer::submit (SubmitContext& context)
2315 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
2316 const vk::VkCommandBuffer commandBuffer = context.getCommandBuffer();
2317 const vk::VkBufferCopy range =
2323 vkd.cmdCopyBuffer(commandBuffer, context.getBuffer(), *m_dstBuffer, 1, &range);
2326 void BufferCopyToBuffer::verify (VerifyContext& context, size_t commandIndex)
2328 tcu::ResultCollector& resultCollector (context.getResultCollector());
2329 ReferenceMemory& reference (context.getReference());
2330 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
2331 const vk::VkDevice device = context.getContext().getDevice();
2332 const vk::VkQueue queue = context.getContext().getQueue();
2333 const vk::VkCommandPool commandPool = context.getContext().getCommandPool();
2334 const vk::Unique<vk::VkCommandBuffer> commandBuffer (createBeginCommandBuffer(vkd, device, commandPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY));
2335 const vk::VkBufferMemoryBarrier barrier =
2337 vk::VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER,
2340 vk::VK_ACCESS_TRANSFER_WRITE_BIT,
2341 vk::VK_ACCESS_HOST_READ_BIT,
2343 VK_QUEUE_FAMILY_IGNORED,
2344 VK_QUEUE_FAMILY_IGNORED,
2350 vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_PIPELINE_STAGE_HOST_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 1, &barrier, 0, (const vk::VkImageMemoryBarrier*)DE_NULL);
2352 VK_CHECK(vkd.endCommandBuffer(*commandBuffer));
2353 queueRun(vkd, queue, *commandBuffer);
2356 void* const ptr = mapMemory(vkd, device, *m_memory, m_bufferSize);
2359 vk::invalidateMappedMemoryRange(vkd, device, *m_memory, 0, m_bufferSize);
2362 const deUint8* const data = (const deUint8*)ptr;
2364 for (size_t pos = 0; pos < (size_t)m_bufferSize; pos++)
2366 if (reference.isDefined(pos))
2368 if (data[pos] != reference.get(pos))
2370 resultCollector.fail(
2371 de::toString(commandIndex) + ":" + getName()
2372 + " Result differs from reference, Expected: "
2373 + de::toString(tcu::toHex<8>(reference.get(pos)))
2375 + de::toString(tcu::toHex<8>(data[pos]))
2377 + de::toString(pos));
2384 vkd.unmapMemory(device, *m_memory);
2387 context.getLog() << TestLog::Message << commandIndex << ": Buffer copy to buffer verification failed" << TestLog::EndMessage;
2391 class BufferCopyFromBuffer : public CmdCommand
2394 BufferCopyFromBuffer (deUint32 seed) : m_seed(seed) {}
2395 ~BufferCopyFromBuffer (void) {}
2396 const char* getName (void) const { return "BufferCopyFromBuffer"; }
2398 void logPrepare (TestLog& log, size_t commandIndex) const;
2399 void prepare (PrepareContext& context);
2400 void logSubmit (TestLog& log, size_t commandIndex) const;
2401 void submit (SubmitContext& context);
2402 void verify (VerifyContext& context, size_t commandIndex);
2405 const deUint32 m_seed;
2406 vk::VkDeviceSize m_bufferSize;
2407 vk::Move<vk::VkBuffer> m_srcBuffer;
2408 vk::Move<vk::VkDeviceMemory> m_memory;
2411 void BufferCopyFromBuffer::logPrepare (TestLog& log, size_t commandIndex) const
2413 log << TestLog::Message << commandIndex << ":" << getName() << " Allocate source buffer for buffer to buffer copy. Seed: " << m_seed << TestLog::EndMessage;
2416 void BufferCopyFromBuffer::prepare (PrepareContext& context)
2418 const vk::InstanceInterface& vki = context.getContext().getInstanceInterface();
2419 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
2420 const vk::VkPhysicalDevice physicalDevice = context.getContext().getPhysicalDevice();
2421 const vk::VkDevice device = context.getContext().getDevice();
2422 const vector<deUint32>& queueFamilies = context.getContext().getQueueFamilies();
2424 m_bufferSize = context.getBufferSize();
2425 m_srcBuffer = createBuffer(vkd, device, m_bufferSize, vk::VK_BUFFER_USAGE_TRANSFER_SRC_BIT, vk::VK_SHARING_MODE_EXCLUSIVE, queueFamilies);
2426 m_memory = bindBufferMemory(vki, vkd, physicalDevice, device, *m_srcBuffer, vk::VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT);
2429 void* const ptr = mapMemory(vkd, device, *m_memory, m_bufferSize);
2430 de::Random rng (m_seed);
2433 deUint8* const data = (deUint8*)ptr;
2435 for (size_t ndx = 0; ndx < (size_t)m_bufferSize; ndx++)
2436 data[ndx] = rng.getUint8();
2439 vk::flushMappedMemoryRange(vkd, device, *m_memory, 0, m_bufferSize);
2440 vkd.unmapMemory(device, *m_memory);
2444 void BufferCopyFromBuffer::logSubmit (TestLog& log, size_t commandIndex) const
2446 log << TestLog::Message << commandIndex << ":" << getName() << " Copy buffer data from another buffer" << TestLog::EndMessage;
2449 void BufferCopyFromBuffer::submit (SubmitContext& context)
2451 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
2452 const vk::VkCommandBuffer commandBuffer = context.getCommandBuffer();
2453 const vk::VkBufferCopy range =
2459 vkd.cmdCopyBuffer(commandBuffer, *m_srcBuffer, context.getBuffer(), 1, &range);
2462 void BufferCopyFromBuffer::verify (VerifyContext& context, size_t)
2464 ReferenceMemory& reference (context.getReference());
2465 de::Random rng (m_seed);
2467 for (size_t ndx = 0; ndx < (size_t)m_bufferSize; ndx++)
2468 reference.set(ndx, rng.getUint8());
2471 class BufferCopyToImage : public CmdCommand
2474 BufferCopyToImage (void) {}
2475 ~BufferCopyToImage (void) {}
2476 const char* getName (void) const { return "BufferCopyToImage"; }
2478 void logPrepare (TestLog& log, size_t commandIndex) const;
2479 void prepare (PrepareContext& context);
2480 void logSubmit (TestLog& log, size_t commandIndex) const;
2481 void submit (SubmitContext& context);
2482 void verify (VerifyContext& context, size_t commandIndex);
2485 deInt32 m_imageWidth;
2486 deInt32 m_imageHeight;
2487 vk::Move<vk::VkImage> m_dstImage;
2488 vk::Move<vk::VkDeviceMemory> m_memory;
2491 void BufferCopyToImage::logPrepare (TestLog& log, size_t commandIndex) const
2493 log << TestLog::Message << commandIndex << ":" << getName() << " Allocate destination image for buffer to image copy." << TestLog::EndMessage;
2496 void BufferCopyToImage::prepare (PrepareContext& context)
2498 const vk::InstanceInterface& vki = context.getContext().getInstanceInterface();
2499 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
2500 const vk::VkPhysicalDevice physicalDevice = context.getContext().getPhysicalDevice();
2501 const vk::VkDevice device = context.getContext().getDevice();
2502 const vk::VkQueue queue = context.getContext().getQueue();
2503 const vk::VkCommandPool commandPool = context.getContext().getCommandPool();
2504 const vector<deUint32>& queueFamilies = context.getContext().getQueueFamilies();
2505 const IVec2 imageSize = findImageSizeWxHx4(context.getBufferSize());
2507 m_imageWidth = imageSize[0];
2508 m_imageHeight = imageSize[1];
2511 const vk::VkImageCreateInfo createInfo =
2513 vk::VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
2517 vk::VK_IMAGE_TYPE_2D,
2518 vk::VK_FORMAT_R8G8B8A8_UNORM,
2520 (deUint32)m_imageWidth,
2521 (deUint32)m_imageHeight,
2524 1, 1, // mipLevels, arrayLayers
2525 vk::VK_SAMPLE_COUNT_1_BIT,
2527 vk::VK_IMAGE_TILING_OPTIMAL,
2528 vk::VK_IMAGE_USAGE_TRANSFER_DST_BIT|vk::VK_IMAGE_USAGE_TRANSFER_SRC_BIT,
2529 vk::VK_SHARING_MODE_EXCLUSIVE,
2531 (deUint32)queueFamilies.size(),
2533 vk::VK_IMAGE_LAYOUT_UNDEFINED
2536 m_dstImage = vk::createImage(vkd, device, &createInfo);
2539 m_memory = bindImageMemory(vki, vkd, physicalDevice, device, *m_dstImage, 0);
2542 const vk::Unique<vk::VkCommandBuffer> commandBuffer (createBeginCommandBuffer(vkd, device, commandPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY));
2543 const vk::VkImageMemoryBarrier barrier =
2545 vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
2549 vk::VK_ACCESS_TRANSFER_WRITE_BIT,
2551 vk::VK_IMAGE_LAYOUT_UNDEFINED,
2552 vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
2554 VK_QUEUE_FAMILY_IGNORED,
2555 VK_QUEUE_FAMILY_IGNORED,
2559 vk::VK_IMAGE_ASPECT_COLOR_BIT,
2561 1, // Mip level count
2567 vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 0, (const vk::VkBufferMemoryBarrier*)DE_NULL, 1, &barrier);
2569 VK_CHECK(vkd.endCommandBuffer(*commandBuffer));
2570 queueRun(vkd, queue, *commandBuffer);
2574 void BufferCopyToImage::logSubmit (TestLog& log, size_t commandIndex) const
2576 log << TestLog::Message << commandIndex << ":" << getName() << " Copy buffer to image" << TestLog::EndMessage;
2579 void BufferCopyToImage::submit (SubmitContext& context)
2581 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
2582 const vk::VkCommandBuffer commandBuffer = context.getCommandBuffer();
2583 const vk::VkBufferImageCopy region =
2588 vk::VK_IMAGE_ASPECT_COLOR_BIT,
2595 (deUint32)m_imageWidth,
2596 (deUint32)m_imageHeight,
2601 vkd.cmdCopyBufferToImage(commandBuffer, context.getBuffer(), *m_dstImage, vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, ®ion);
2604 void BufferCopyToImage::verify (VerifyContext& context, size_t commandIndex)
2606 tcu::ResultCollector& resultCollector (context.getResultCollector());
2607 ReferenceMemory& reference (context.getReference());
2608 const vk::InstanceInterface& vki = context.getContext().getInstanceInterface();
2609 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
2610 const vk::VkPhysicalDevice physicalDevice = context.getContext().getPhysicalDevice();
2611 const vk::VkDevice device = context.getContext().getDevice();
2612 const vk::VkQueue queue = context.getContext().getQueue();
2613 const vk::VkCommandPool commandPool = context.getContext().getCommandPool();
2614 const vk::Unique<vk::VkCommandBuffer> commandBuffer (createBeginCommandBuffer(vkd, device, commandPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY));
2615 const vector<deUint32>& queueFamilies = context.getContext().getQueueFamilies();
2616 const vk::Unique<vk::VkBuffer> dstBuffer (createBuffer(vkd, device, 4 * m_imageWidth * m_imageHeight, vk::VK_BUFFER_USAGE_TRANSFER_DST_BIT, vk::VK_SHARING_MODE_EXCLUSIVE, queueFamilies));
2617 const vk::Unique<vk::VkDeviceMemory> memory (bindBufferMemory(vki, vkd, physicalDevice, device, *dstBuffer, vk::VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT));
2619 const vk::VkImageMemoryBarrier imageBarrier =
2621 vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
2624 vk::VK_ACCESS_TRANSFER_WRITE_BIT,
2625 vk::VK_ACCESS_TRANSFER_READ_BIT,
2627 vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
2628 vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
2630 VK_QUEUE_FAMILY_IGNORED,
2631 VK_QUEUE_FAMILY_IGNORED,
2635 vk::VK_IMAGE_ASPECT_COLOR_BIT,
2637 1, // Mip level count
2642 const vk::VkBufferMemoryBarrier bufferBarrier =
2644 vk::VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER,
2647 vk::VK_ACCESS_TRANSFER_WRITE_BIT,
2648 vk::VK_ACCESS_HOST_READ_BIT,
2650 VK_QUEUE_FAMILY_IGNORED,
2651 VK_QUEUE_FAMILY_IGNORED,
2657 const vk::VkBufferImageCopy region =
2662 vk::VK_IMAGE_ASPECT_COLOR_BIT,
2669 (deUint32)m_imageWidth,
2670 (deUint32)m_imageHeight,
2675 vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 0, (const vk::VkBufferMemoryBarrier*)DE_NULL, 1, &imageBarrier);
2676 vkd.cmdCopyImageToBuffer(*commandBuffer, *m_dstImage, vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, *dstBuffer, 1, ®ion);
2677 vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_PIPELINE_STAGE_HOST_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 1, &bufferBarrier, 0, (const vk::VkImageMemoryBarrier*)DE_NULL);
2680 VK_CHECK(vkd.endCommandBuffer(*commandBuffer));
2681 queueRun(vkd, queue, *commandBuffer);
2684 void* const ptr = mapMemory(vkd, device, *memory, 4 * m_imageWidth * m_imageHeight);
2686 vk::invalidateMappedMemoryRange(vkd, device, *memory, 0, 4 * m_imageWidth * m_imageHeight);
2689 const deUint8* const data = (const deUint8*)ptr;
2691 for (size_t pos = 0; pos < (size_t)( 4 * m_imageWidth * m_imageHeight); pos++)
2693 if (reference.isDefined(pos))
2695 if (data[pos] != reference.get(pos))
2697 resultCollector.fail(
2698 de::toString(commandIndex) + ":" + getName()
2699 + " Result differs from reference, Expected: "
2700 + de::toString(tcu::toHex<8>(reference.get(pos)))
2702 + de::toString(tcu::toHex<8>(data[pos]))
2704 + de::toString(pos));
2711 vkd.unmapMemory(device, *memory);
2715 class BufferCopyFromImage : public CmdCommand
2718 BufferCopyFromImage (deUint32 seed) : m_seed(seed) {}
2719 ~BufferCopyFromImage (void) {}
2720 const char* getName (void) const { return "BufferCopyFromImage"; }
2722 void logPrepare (TestLog& log, size_t commandIndex) const;
2723 void prepare (PrepareContext& context);
2724 void logSubmit (TestLog& log, size_t commandIndex) const;
2725 void submit (SubmitContext& context);
2726 void verify (VerifyContext& context, size_t commandIndex);
2729 const deUint32 m_seed;
2730 deInt32 m_imageWidth;
2731 deInt32 m_imageHeight;
2732 vk::Move<vk::VkImage> m_srcImage;
2733 vk::Move<vk::VkDeviceMemory> m_memory;
2736 void BufferCopyFromImage::logPrepare (TestLog& log, size_t commandIndex) const
2738 log << TestLog::Message << commandIndex << ":" << getName() << " Allocate source image for image to buffer copy." << TestLog::EndMessage;
2741 void BufferCopyFromImage::prepare (PrepareContext& context)
2743 const vk::InstanceInterface& vki = context.getContext().getInstanceInterface();
2744 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
2745 const vk::VkPhysicalDevice physicalDevice = context.getContext().getPhysicalDevice();
2746 const vk::VkDevice device = context.getContext().getDevice();
2747 const vk::VkQueue queue = context.getContext().getQueue();
2748 const vk::VkCommandPool commandPool = context.getContext().getCommandPool();
2749 const vector<deUint32>& queueFamilies = context.getContext().getQueueFamilies();
2750 const IVec2 imageSize = findImageSizeWxHx4(context.getBufferSize());
2752 m_imageWidth = imageSize[0];
2753 m_imageHeight = imageSize[1];
2756 const vk::VkImageCreateInfo createInfo =
2758 vk::VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
2762 vk::VK_IMAGE_TYPE_2D,
2763 vk::VK_FORMAT_R8G8B8A8_UNORM,
2765 (deUint32)m_imageWidth,
2766 (deUint32)m_imageHeight,
2769 1, 1, // mipLevels, arrayLayers
2770 vk::VK_SAMPLE_COUNT_1_BIT,
2772 vk::VK_IMAGE_TILING_OPTIMAL,
2773 vk::VK_IMAGE_USAGE_TRANSFER_DST_BIT|vk::VK_IMAGE_USAGE_TRANSFER_SRC_BIT,
2774 vk::VK_SHARING_MODE_EXCLUSIVE,
2776 (deUint32)queueFamilies.size(),
2778 vk::VK_IMAGE_LAYOUT_UNDEFINED
2781 m_srcImage = vk::createImage(vkd, device, &createInfo);
2784 m_memory = bindImageMemory(vki, vkd, physicalDevice, device, *m_srcImage, 0);
2787 const vk::Unique<vk::VkBuffer> srcBuffer (createBuffer(vkd, device, 4 * m_imageWidth * m_imageHeight, vk::VK_BUFFER_USAGE_TRANSFER_SRC_BIT, vk::VK_SHARING_MODE_EXCLUSIVE, queueFamilies));
2788 const vk::Unique<vk::VkDeviceMemory> memory (bindBufferMemory(vki, vkd, physicalDevice, device, *srcBuffer, vk::VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT));
2789 const vk::Unique<vk::VkCommandBuffer> commandBuffer (createBeginCommandBuffer(vkd, device, commandPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY));
2790 const vk::VkImageMemoryBarrier preImageBarrier =
2792 vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
2796 vk::VK_ACCESS_TRANSFER_WRITE_BIT,
2798 vk::VK_IMAGE_LAYOUT_UNDEFINED,
2799 vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
2801 VK_QUEUE_FAMILY_IGNORED,
2802 VK_QUEUE_FAMILY_IGNORED,
2806 vk::VK_IMAGE_ASPECT_COLOR_BIT,
2808 1, // Mip level count
2813 const vk::VkImageMemoryBarrier postImageBarrier =
2815 vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
2818 vk::VK_ACCESS_TRANSFER_WRITE_BIT,
2821 vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
2822 vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
2824 VK_QUEUE_FAMILY_IGNORED,
2825 VK_QUEUE_FAMILY_IGNORED,
2829 vk::VK_IMAGE_ASPECT_COLOR_BIT,
2831 1, // Mip level count
2836 const vk::VkBufferImageCopy region =
2841 vk::VK_IMAGE_ASPECT_COLOR_BIT,
2848 (deUint32)m_imageWidth,
2849 (deUint32)m_imageHeight,
2855 void* const ptr = mapMemory(vkd, device, *memory, 4 * m_imageWidth * m_imageHeight);
2856 de::Random rng (m_seed);
2859 deUint8* const data = (deUint8*)ptr;
2861 for (size_t ndx = 0; ndx < (size_t)(4 * m_imageWidth * m_imageHeight); ndx++)
2862 data[ndx] = rng.getUint8();
2865 vk::flushMappedMemoryRange(vkd, device, *memory, 0, 4 * m_imageWidth * m_imageHeight);
2866 vkd.unmapMemory(device, *memory);
2869 vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 0, (const vk::VkBufferMemoryBarrier*)DE_NULL, 1, &preImageBarrier);
2870 vkd.cmdCopyBufferToImage(*commandBuffer, *srcBuffer, *m_srcImage, vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, ®ion);
2871 vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 0, (const vk::VkBufferMemoryBarrier*)DE_NULL, 1, &postImageBarrier);
2873 VK_CHECK(vkd.endCommandBuffer(*commandBuffer));
2874 queueRun(vkd, queue, *commandBuffer);
2878 void BufferCopyFromImage::logSubmit (TestLog& log, size_t commandIndex) const
2880 log << TestLog::Message << commandIndex << ":" << getName() << " Copy buffer data from image" << TestLog::EndMessage;
2883 void BufferCopyFromImage::submit (SubmitContext& context)
2885 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
2886 const vk::VkCommandBuffer commandBuffer = context.getCommandBuffer();
2887 const vk::VkBufferImageCopy region =
2892 vk::VK_IMAGE_ASPECT_COLOR_BIT,
2899 (deUint32)m_imageWidth,
2900 (deUint32)m_imageHeight,
2905 vkd.cmdCopyImageToBuffer(commandBuffer, *m_srcImage, vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, context.getBuffer(), 1, ®ion);
2908 void BufferCopyFromImage::verify (VerifyContext& context, size_t)
2910 ReferenceMemory& reference (context.getReference());
2911 de::Random rng (m_seed);
2913 for (size_t ndx = 0; ndx < (size_t)(4 * m_imageWidth * m_imageHeight); ndx++)
2914 reference.set(ndx, rng.getUint8());
2917 class ImageCopyToBuffer : public CmdCommand
2920 ImageCopyToBuffer (vk::VkImageLayout imageLayout) : m_imageLayout (imageLayout) {}
2921 ~ImageCopyToBuffer (void) {}
2922 const char* getName (void) const { return "BufferCopyToImage"; }
2924 void logPrepare (TestLog& log, size_t commandIndex) const;
2925 void prepare (PrepareContext& context);
2926 void logSubmit (TestLog& log, size_t commandIndex) const;
2927 void submit (SubmitContext& context);
2928 void verify (VerifyContext& context, size_t commandIndex);
2931 vk::VkImageLayout m_imageLayout;
2932 vk::VkDeviceSize m_bufferSize;
2933 vk::Move<vk::VkBuffer> m_dstBuffer;
2934 vk::Move<vk::VkDeviceMemory> m_memory;
2935 vk::VkDeviceSize m_imageMemorySize;
2936 deInt32 m_imageWidth;
2937 deInt32 m_imageHeight;
2940 void ImageCopyToBuffer::logPrepare (TestLog& log, size_t commandIndex) const
2942 log << TestLog::Message << commandIndex << ":" << getName() << " Allocate destination buffer for image to buffer copy." << TestLog::EndMessage;
2945 void ImageCopyToBuffer::prepare (PrepareContext& context)
2947 const vk::InstanceInterface& vki = context.getContext().getInstanceInterface();
2948 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
2949 const vk::VkPhysicalDevice physicalDevice = context.getContext().getPhysicalDevice();
2950 const vk::VkDevice device = context.getContext().getDevice();
2951 const vector<deUint32>& queueFamilies = context.getContext().getQueueFamilies();
2953 m_imageWidth = context.getImageWidth();
2954 m_imageHeight = context.getImageHeight();
2955 m_bufferSize = 4 * m_imageWidth * m_imageHeight;
2956 m_imageMemorySize = context.getImageMemorySize();
2957 m_dstBuffer = createBuffer(vkd, device, m_bufferSize, vk::VK_BUFFER_USAGE_TRANSFER_DST_BIT, vk::VK_SHARING_MODE_EXCLUSIVE, queueFamilies);
2958 m_memory = bindBufferMemory(vki, vkd, physicalDevice, device, *m_dstBuffer, vk::VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT);
2961 void ImageCopyToBuffer::logSubmit (TestLog& log, size_t commandIndex) const
2963 log << TestLog::Message << commandIndex << ":" << getName() << " Copy image to buffer" << TestLog::EndMessage;
2966 void ImageCopyToBuffer::submit (SubmitContext& context)
2968 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
2969 const vk::VkCommandBuffer commandBuffer = context.getCommandBuffer();
2970 const vk::VkBufferImageCopy region =
2975 vk::VK_IMAGE_ASPECT_COLOR_BIT,
2982 (deUint32)m_imageWidth,
2983 (deUint32)m_imageHeight,
2988 vkd.cmdCopyImageToBuffer(commandBuffer, context.getImage(), m_imageLayout, *m_dstBuffer, 1, ®ion);
2991 void ImageCopyToBuffer::verify (VerifyContext& context, size_t commandIndex)
2993 tcu::ResultCollector& resultCollector (context.getResultCollector());
2994 ReferenceMemory& reference (context.getReference());
2995 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
2996 const vk::VkDevice device = context.getContext().getDevice();
2997 const vk::VkQueue queue = context.getContext().getQueue();
2998 const vk::VkCommandPool commandPool = context.getContext().getCommandPool();
2999 const vk::Unique<vk::VkCommandBuffer> commandBuffer (createBeginCommandBuffer(vkd, device, commandPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY));
3000 const vk::VkBufferMemoryBarrier barrier =
3002 vk::VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER,
3005 vk::VK_ACCESS_TRANSFER_WRITE_BIT,
3006 vk::VK_ACCESS_HOST_READ_BIT,
3008 VK_QUEUE_FAMILY_IGNORED,
3009 VK_QUEUE_FAMILY_IGNORED,
3015 vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_PIPELINE_STAGE_HOST_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 1, &barrier, 0, (const vk::VkImageMemoryBarrier*)DE_NULL);
3017 VK_CHECK(vkd.endCommandBuffer(*commandBuffer));
3018 queueRun(vkd, queue, *commandBuffer);
3020 reference.setUndefined(0, (size_t)m_imageMemorySize);
3022 void* const ptr = mapMemory(vkd, device, *m_memory, m_bufferSize);
3023 const ConstPixelBufferAccess referenceImage (context.getReferenceImage().getAccess());
3024 const ConstPixelBufferAccess resultImage (TextureFormat(TextureFormat::RGBA, TextureFormat::UNORM_INT8), m_imageWidth, m_imageHeight, 1, ptr);
3026 vk::invalidateMappedMemoryRange(vkd, device, *m_memory, 0, m_bufferSize);
3028 if (!tcu::intThresholdCompare(context.getLog(), (de::toString(commandIndex) + ":" + getName()).c_str(), (de::toString(commandIndex) + ":" + getName()).c_str(), referenceImage, resultImage, UVec4(0), tcu::COMPARE_LOG_ON_ERROR))
3029 resultCollector.fail(de::toString(commandIndex) + ":" + getName() + " Image comparison failed");
3031 vkd.unmapMemory(device, *m_memory);
3035 class ImageCopyFromBuffer : public CmdCommand
3038 ImageCopyFromBuffer (deUint32 seed, vk::VkImageLayout imageLayout) : m_seed(seed), m_imageLayout(imageLayout) {}
3039 ~ImageCopyFromBuffer (void) {}
3040 const char* getName (void) const { return "ImageCopyFromBuffer"; }
3042 void logPrepare (TestLog& log, size_t commandIndex) const;
3043 void prepare (PrepareContext& context);
3044 void logSubmit (TestLog& log, size_t commandIndex) const;
3045 void submit (SubmitContext& context);
3046 void verify (VerifyContext& context, size_t commandIndex);
3049 const deUint32 m_seed;
3050 const vk::VkImageLayout m_imageLayout;
3051 deInt32 m_imageWidth;
3052 deInt32 m_imageHeight;
3053 vk::VkDeviceSize m_imageMemorySize;
3054 vk::VkDeviceSize m_bufferSize;
3055 vk::Move<vk::VkBuffer> m_srcBuffer;
3056 vk::Move<vk::VkDeviceMemory> m_memory;
3059 void ImageCopyFromBuffer::logPrepare (TestLog& log, size_t commandIndex) const
3061 log << TestLog::Message << commandIndex << ":" << getName() << " Allocate source buffer for buffer to image copy. Seed: " << m_seed << TestLog::EndMessage;
3064 void ImageCopyFromBuffer::prepare (PrepareContext& context)
3066 const vk::InstanceInterface& vki = context.getContext().getInstanceInterface();
3067 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
3068 const vk::VkPhysicalDevice physicalDevice = context.getContext().getPhysicalDevice();
3069 const vk::VkDevice device = context.getContext().getDevice();
3070 const vector<deUint32>& queueFamilies = context.getContext().getQueueFamilies();
3072 m_imageWidth = context.getImageHeight();
3073 m_imageHeight = context.getImageWidth();
3074 m_imageMemorySize = context.getImageMemorySize();
3075 m_bufferSize = m_imageWidth * m_imageHeight * 4;
3076 m_srcBuffer = createBuffer(vkd, device, m_bufferSize, vk::VK_BUFFER_USAGE_TRANSFER_SRC_BIT, vk::VK_SHARING_MODE_EXCLUSIVE, queueFamilies);
3077 m_memory = bindBufferMemory(vki, vkd, physicalDevice, device, *m_srcBuffer, vk::VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT);
3080 void* const ptr = mapMemory(vkd, device, *m_memory, m_bufferSize);
3081 de::Random rng (m_seed);
3084 deUint8* const data = (deUint8*)ptr;
3086 for (size_t ndx = 0; ndx < (size_t)m_bufferSize; ndx++)
3087 data[ndx] = rng.getUint8();
3090 vk::flushMappedMemoryRange(vkd, device, *m_memory, 0, m_bufferSize);
3091 vkd.unmapMemory(device, *m_memory);
3095 void ImageCopyFromBuffer::logSubmit (TestLog& log, size_t commandIndex) const
3097 log << TestLog::Message << commandIndex << ":" << getName() << " Copy image data from buffer" << TestLog::EndMessage;
3100 void ImageCopyFromBuffer::submit (SubmitContext& context)
3102 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
3103 const vk::VkCommandBuffer commandBuffer = context.getCommandBuffer();
3104 const vk::VkBufferImageCopy region =
3109 vk::VK_IMAGE_ASPECT_COLOR_BIT,
3116 (deUint32)m_imageWidth,
3117 (deUint32)m_imageHeight,
3122 vkd.cmdCopyBufferToImage(commandBuffer, *m_srcBuffer, context.getImage(), m_imageLayout, 1, ®ion);
3125 void ImageCopyFromBuffer::verify (VerifyContext& context, size_t)
3127 ReferenceMemory& reference (context.getReference());
3128 de::Random rng (m_seed);
3130 reference.setUndefined(0, (size_t)m_imageMemorySize);
3133 const PixelBufferAccess& refAccess (context.getReferenceImage().getAccess());
3135 for (deInt32 y = 0; y < m_imageHeight; y++)
3136 for (deInt32 x = 0; x < m_imageWidth; x++)
3138 const deUint8 r8 = rng.getUint8();
3139 const deUint8 g8 = rng.getUint8();
3140 const deUint8 b8 = rng.getUint8();
3141 const deUint8 a8 = rng.getUint8();
3143 refAccess.setPixel(UVec4(r8, g8, b8, a8), x, y);
3148 class ImageCopyFromImage : public CmdCommand
3151 ImageCopyFromImage (deUint32 seed, vk::VkImageLayout imageLayout) : m_seed(seed), m_imageLayout(imageLayout) {}
3152 ~ImageCopyFromImage (void) {}
3153 const char* getName (void) const { return "ImageCopyFromImage"; }
3155 void logPrepare (TestLog& log, size_t commandIndex) const;
3156 void prepare (PrepareContext& context);
3157 void logSubmit (TestLog& log, size_t commandIndex) const;
3158 void submit (SubmitContext& context);
3159 void verify (VerifyContext& context, size_t commandIndex);
3162 const deUint32 m_seed;
3163 const vk::VkImageLayout m_imageLayout;
3164 deInt32 m_imageWidth;
3165 deInt32 m_imageHeight;
3166 vk::VkDeviceSize m_imageMemorySize;
3167 vk::Move<vk::VkImage> m_srcImage;
3168 vk::Move<vk::VkDeviceMemory> m_memory;
3171 void ImageCopyFromImage::logPrepare (TestLog& log, size_t commandIndex) const
3173 log << TestLog::Message << commandIndex << ":" << getName() << " Allocate source image for image to image copy." << TestLog::EndMessage;
3176 void ImageCopyFromImage::prepare (PrepareContext& context)
3178 const vk::InstanceInterface& vki = context.getContext().getInstanceInterface();
3179 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
3180 const vk::VkPhysicalDevice physicalDevice = context.getContext().getPhysicalDevice();
3181 const vk::VkDevice device = context.getContext().getDevice();
3182 const vk::VkQueue queue = context.getContext().getQueue();
3183 const vk::VkCommandPool commandPool = context.getContext().getCommandPool();
3184 const vector<deUint32>& queueFamilies = context.getContext().getQueueFamilies();
3186 m_imageWidth = context.getImageWidth();
3187 m_imageHeight = context.getImageHeight();
3188 m_imageMemorySize = context.getImageMemorySize();
3191 const vk::VkImageCreateInfo createInfo =
3193 vk::VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
3197 vk::VK_IMAGE_TYPE_2D,
3198 vk::VK_FORMAT_R8G8B8A8_UNORM,
3200 (deUint32)m_imageWidth,
3201 (deUint32)m_imageHeight,
3204 1, 1, // mipLevels, arrayLayers
3205 vk::VK_SAMPLE_COUNT_1_BIT,
3207 vk::VK_IMAGE_TILING_OPTIMAL,
3208 vk::VK_IMAGE_USAGE_TRANSFER_DST_BIT|vk::VK_IMAGE_USAGE_TRANSFER_SRC_BIT,
3209 vk::VK_SHARING_MODE_EXCLUSIVE,
3211 (deUint32)queueFamilies.size(),
3213 vk::VK_IMAGE_LAYOUT_UNDEFINED
3216 m_srcImage = vk::createImage(vkd, device, &createInfo);
3219 m_memory = bindImageMemory(vki, vkd, physicalDevice, device, *m_srcImage, 0);
3222 const vk::Unique<vk::VkBuffer> srcBuffer (createBuffer(vkd, device, 4 * m_imageWidth * m_imageHeight, vk::VK_BUFFER_USAGE_TRANSFER_SRC_BIT, vk::VK_SHARING_MODE_EXCLUSIVE, queueFamilies));
3223 const vk::Unique<vk::VkDeviceMemory> memory (bindBufferMemory(vki, vkd, physicalDevice, device, *srcBuffer, vk::VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT));
3224 const vk::Unique<vk::VkCommandBuffer> commandBuffer (createBeginCommandBuffer(vkd, device, commandPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY));
3225 const vk::VkImageMemoryBarrier preImageBarrier =
3227 vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
3231 vk::VK_ACCESS_TRANSFER_WRITE_BIT,
3233 vk::VK_IMAGE_LAYOUT_UNDEFINED,
3234 vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
3236 VK_QUEUE_FAMILY_IGNORED,
3237 VK_QUEUE_FAMILY_IGNORED,
3241 vk::VK_IMAGE_ASPECT_COLOR_BIT,
3243 1, // Mip level count
3248 const vk::VkImageMemoryBarrier postImageBarrier =
3250 vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
3253 vk::VK_ACCESS_TRANSFER_WRITE_BIT,
3256 vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
3257 vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
3259 VK_QUEUE_FAMILY_IGNORED,
3260 VK_QUEUE_FAMILY_IGNORED,
3264 vk::VK_IMAGE_ASPECT_COLOR_BIT,
3266 1, // Mip level count
3271 const vk::VkBufferImageCopy region =
3276 vk::VK_IMAGE_ASPECT_COLOR_BIT,
3283 (deUint32)m_imageWidth,
3284 (deUint32)m_imageHeight,
3290 void* const ptr = mapMemory(vkd, device, *memory, 4 * m_imageWidth * m_imageHeight);
3291 de::Random rng (m_seed);
3294 deUint8* const data = (deUint8*)ptr;
3296 for (size_t ndx = 0; ndx < (size_t)(4 * m_imageWidth * m_imageHeight); ndx++)
3297 data[ndx] = rng.getUint8();
3300 vk::flushMappedMemoryRange(vkd, device, *memory, 0, 4 * m_imageWidth * m_imageHeight);
3301 vkd.unmapMemory(device, *memory);
3304 vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 0, (const vk::VkBufferMemoryBarrier*)DE_NULL, 1, &preImageBarrier);
3305 vkd.cmdCopyBufferToImage(*commandBuffer, *srcBuffer, *m_srcImage, vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, ®ion);
3306 vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 0, (const vk::VkBufferMemoryBarrier*)DE_NULL, 1, &postImageBarrier);
3308 VK_CHECK(vkd.endCommandBuffer(*commandBuffer));
3309 queueRun(vkd, queue, *commandBuffer);
3313 void ImageCopyFromImage::logSubmit (TestLog& log, size_t commandIndex) const
3315 log << TestLog::Message << commandIndex << ":" << getName() << " Copy image data from another image" << TestLog::EndMessage;
3318 void ImageCopyFromImage::submit (SubmitContext& context)
3320 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
3321 const vk::VkCommandBuffer commandBuffer = context.getCommandBuffer();
3322 const vk::VkImageCopy region =
3325 vk::VK_IMAGE_ASPECT_COLOR_BIT,
3333 vk::VK_IMAGE_ASPECT_COLOR_BIT,
3340 (deUint32)m_imageWidth,
3341 (deUint32)m_imageHeight,
3346 vkd.cmdCopyImage(commandBuffer, *m_srcImage, vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, context.getImage(), m_imageLayout, 1, ®ion);
3349 void ImageCopyFromImage::verify (VerifyContext& context, size_t)
3351 ReferenceMemory& reference (context.getReference());
3352 de::Random rng (m_seed);
3354 reference.setUndefined(0, (size_t)m_imageMemorySize);
3357 const PixelBufferAccess& refAccess (context.getReferenceImage().getAccess());
3359 for (deInt32 y = 0; y < m_imageHeight; y++)
3360 for (deInt32 x = 0; x < m_imageWidth; x++)
3362 const deUint8 r8 = rng.getUint8();
3363 const deUint8 g8 = rng.getUint8();
3364 const deUint8 b8 = rng.getUint8();
3365 const deUint8 a8 = rng.getUint8();
3367 refAccess.setPixel(UVec4(r8, g8, b8, a8), x, y);
3372 class ImageCopyToImage : public CmdCommand
3375 ImageCopyToImage (vk::VkImageLayout imageLayout) : m_imageLayout(imageLayout) {}
3376 ~ImageCopyToImage (void) {}
3377 const char* getName (void) const { return "ImageCopyToImage"; }
3379 void logPrepare (TestLog& log, size_t commandIndex) const;
3380 void prepare (PrepareContext& context);
3381 void logSubmit (TestLog& log, size_t commandIndex) const;
3382 void submit (SubmitContext& context);
3383 void verify (VerifyContext& context, size_t commandIndex);
3386 const vk::VkImageLayout m_imageLayout;
3387 deInt32 m_imageWidth;
3388 deInt32 m_imageHeight;
3389 vk::VkDeviceSize m_imageMemorySize;
3390 vk::Move<vk::VkImage> m_dstImage;
3391 vk::Move<vk::VkDeviceMemory> m_memory;
3394 void ImageCopyToImage::logPrepare (TestLog& log, size_t commandIndex) const
3396 log << TestLog::Message << commandIndex << ":" << getName() << " Allocate destination image for image to image copy." << TestLog::EndMessage;
3399 void ImageCopyToImage::prepare (PrepareContext& context)
3401 const vk::InstanceInterface& vki = context.getContext().getInstanceInterface();
3402 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
3403 const vk::VkPhysicalDevice physicalDevice = context.getContext().getPhysicalDevice();
3404 const vk::VkDevice device = context.getContext().getDevice();
3405 const vk::VkQueue queue = context.getContext().getQueue();
3406 const vk::VkCommandPool commandPool = context.getContext().getCommandPool();
3407 const vector<deUint32>& queueFamilies = context.getContext().getQueueFamilies();
3409 m_imageWidth = context.getImageWidth();
3410 m_imageHeight = context.getImageHeight();
3411 m_imageMemorySize = context.getImageMemorySize();
3414 const vk::VkImageCreateInfo createInfo =
3416 vk::VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
3420 vk::VK_IMAGE_TYPE_2D,
3421 vk::VK_FORMAT_R8G8B8A8_UNORM,
3423 (deUint32)m_imageWidth,
3424 (deUint32)m_imageHeight,
3427 1, 1, // mipLevels, arrayLayers
3428 vk::VK_SAMPLE_COUNT_1_BIT,
3430 vk::VK_IMAGE_TILING_OPTIMAL,
3431 vk::VK_IMAGE_USAGE_TRANSFER_DST_BIT|vk::VK_IMAGE_USAGE_TRANSFER_SRC_BIT,
3432 vk::VK_SHARING_MODE_EXCLUSIVE,
3434 (deUint32)queueFamilies.size(),
3436 vk::VK_IMAGE_LAYOUT_UNDEFINED
3439 m_dstImage = vk::createImage(vkd, device, &createInfo);
3442 m_memory = bindImageMemory(vki, vkd, physicalDevice, device, *m_dstImage, 0);
3445 const vk::Unique<vk::VkCommandBuffer> commandBuffer (createBeginCommandBuffer(vkd, device, commandPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY));
3446 const vk::VkImageMemoryBarrier barrier =
3448 vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
3452 vk::VK_ACCESS_TRANSFER_WRITE_BIT,
3454 vk::VK_IMAGE_LAYOUT_UNDEFINED,
3455 vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
3457 VK_QUEUE_FAMILY_IGNORED,
3458 VK_QUEUE_FAMILY_IGNORED,
3462 vk::VK_IMAGE_ASPECT_COLOR_BIT,
3464 1, // Mip level count
3470 vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 0, (const vk::VkBufferMemoryBarrier*)DE_NULL, 1, &barrier);
3472 VK_CHECK(vkd.endCommandBuffer(*commandBuffer));
3473 queueRun(vkd, queue, *commandBuffer);
3477 void ImageCopyToImage::logSubmit (TestLog& log, size_t commandIndex) const
3479 log << TestLog::Message << commandIndex << ":" << getName() << " Copy image to another image" << TestLog::EndMessage;
3482 void ImageCopyToImage::submit (SubmitContext& context)
3484 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
3485 const vk::VkCommandBuffer commandBuffer = context.getCommandBuffer();
3486 const vk::VkImageCopy region =
3489 vk::VK_IMAGE_ASPECT_COLOR_BIT,
3497 vk::VK_IMAGE_ASPECT_COLOR_BIT,
3504 (deUint32)m_imageWidth,
3505 (deUint32)m_imageHeight,
3510 vkd.cmdCopyImage(commandBuffer, context.getImage(), m_imageLayout, *m_dstImage, vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, ®ion);
3513 void ImageCopyToImage::verify (VerifyContext& context, size_t commandIndex)
3515 tcu::ResultCollector& resultCollector (context.getResultCollector());
3516 const vk::InstanceInterface& vki = context.getContext().getInstanceInterface();
3517 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
3518 const vk::VkPhysicalDevice physicalDevice = context.getContext().getPhysicalDevice();
3519 const vk::VkDevice device = context.getContext().getDevice();
3520 const vk::VkQueue queue = context.getContext().getQueue();
3521 const vk::VkCommandPool commandPool = context.getContext().getCommandPool();
3522 const vk::Unique<vk::VkCommandBuffer> commandBuffer (createBeginCommandBuffer(vkd, device, commandPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY));
3523 const vector<deUint32>& queueFamilies = context.getContext().getQueueFamilies();
3524 const vk::Unique<vk::VkBuffer> dstBuffer (createBuffer(vkd, device, 4 * m_imageWidth * m_imageHeight, vk::VK_BUFFER_USAGE_TRANSFER_DST_BIT, vk::VK_SHARING_MODE_EXCLUSIVE, queueFamilies));
3525 const vk::Unique<vk::VkDeviceMemory> memory (bindBufferMemory(vki, vkd, physicalDevice, device, *dstBuffer, vk::VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT));
3527 const vk::VkImageMemoryBarrier imageBarrier =
3529 vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
3532 vk::VK_ACCESS_TRANSFER_WRITE_BIT,
3533 vk::VK_ACCESS_TRANSFER_READ_BIT,
3535 vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
3536 vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
3538 VK_QUEUE_FAMILY_IGNORED,
3539 VK_QUEUE_FAMILY_IGNORED,
3543 vk::VK_IMAGE_ASPECT_COLOR_BIT,
3545 1, // Mip level count
3550 const vk::VkBufferMemoryBarrier bufferBarrier =
3552 vk::VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER,
3555 vk::VK_ACCESS_TRANSFER_WRITE_BIT,
3556 vk::VK_ACCESS_HOST_READ_BIT,
3558 VK_QUEUE_FAMILY_IGNORED,
3559 VK_QUEUE_FAMILY_IGNORED,
3564 const vk::VkBufferImageCopy region =
3569 vk::VK_IMAGE_ASPECT_COLOR_BIT,
3576 (deUint32)m_imageWidth,
3577 (deUint32)m_imageHeight,
3582 vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 0, (const vk::VkBufferMemoryBarrier*)DE_NULL, 1, &imageBarrier);
3583 vkd.cmdCopyImageToBuffer(*commandBuffer, *m_dstImage, vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, *dstBuffer, 1, ®ion);
3584 vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_PIPELINE_STAGE_HOST_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 1, &bufferBarrier, 0, (const vk::VkImageMemoryBarrier*)DE_NULL);
3587 VK_CHECK(vkd.endCommandBuffer(*commandBuffer));
3588 queueRun(vkd, queue, *commandBuffer);
3591 void* const ptr = mapMemory(vkd, device, *memory, 4 * m_imageWidth * m_imageHeight);
3593 vk::invalidateMappedMemoryRange(vkd, device, *memory, 0, 4 * m_imageWidth * m_imageHeight);
3596 const deUint8* const data = (const deUint8*)ptr;
3597 const ConstPixelBufferAccess resAccess (TextureFormat(TextureFormat::RGBA, TextureFormat::UNORM_INT8), m_imageWidth, m_imageHeight, 1, data);
3598 const ConstPixelBufferAccess& refAccess (context.getReferenceImage().getAccess());
3600 if (!tcu::intThresholdCompare(context.getLog(), (de::toString(commandIndex) + ":" + getName()).c_str(), (de::toString(commandIndex) + ":" + getName()).c_str(), refAccess, resAccess, UVec4(0), tcu::COMPARE_LOG_ON_ERROR))
3601 resultCollector.fail(de::toString(commandIndex) + ":" + getName() + " Image comparison failed");
3604 vkd.unmapMemory(device, *memory);
3614 class ImageBlitFromImage : public CmdCommand
3617 ImageBlitFromImage (deUint32 seed, BlitScale scale, vk::VkImageLayout imageLayout) : m_seed(seed), m_scale(scale), m_imageLayout(imageLayout) {}
3618 ~ImageBlitFromImage (void) {}
3619 const char* getName (void) const { return "ImageBlitFromImage"; }
3621 void logPrepare (TestLog& log, size_t commandIndex) const;
3622 void prepare (PrepareContext& context);
3623 void logSubmit (TestLog& log, size_t commandIndex) const;
3624 void submit (SubmitContext& context);
3625 void verify (VerifyContext& context, size_t commandIndex);
3628 const deUint32 m_seed;
3629 const BlitScale m_scale;
3630 const vk::VkImageLayout m_imageLayout;
3631 deInt32 m_imageWidth;
3632 deInt32 m_imageHeight;
3633 vk::VkDeviceSize m_imageMemorySize;
3634 deInt32 m_srcImageWidth;
3635 deInt32 m_srcImageHeight;
3636 vk::Move<vk::VkImage> m_srcImage;
3637 vk::Move<vk::VkDeviceMemory> m_memory;
3640 void ImageBlitFromImage::logPrepare (TestLog& log, size_t commandIndex) const
3642 log << TestLog::Message << commandIndex << ":" << getName() << " Allocate source image for image to image blit." << TestLog::EndMessage;
3645 void ImageBlitFromImage::prepare (PrepareContext& context)
3647 const vk::InstanceInterface& vki = context.getContext().getInstanceInterface();
3648 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
3649 const vk::VkPhysicalDevice physicalDevice = context.getContext().getPhysicalDevice();
3650 const vk::VkDevice device = context.getContext().getDevice();
3651 const vk::VkQueue queue = context.getContext().getQueue();
3652 const vk::VkCommandPool commandPool = context.getContext().getCommandPool();
3653 const vector<deUint32>& queueFamilies = context.getContext().getQueueFamilies();
3655 m_imageWidth = context.getImageWidth();
3656 m_imageHeight = context.getImageHeight();
3657 m_imageMemorySize = context.getImageMemorySize();
3659 if (m_scale == BLIT_SCALE_10)
3661 m_srcImageWidth = m_imageWidth;
3662 m_srcImageHeight = m_imageHeight;
3664 else if (m_scale == BLIT_SCALE_20)
3666 m_srcImageWidth = m_imageWidth / 2;
3667 m_srcImageHeight = m_imageHeight / 2;
3670 DE_FATAL("Unsupported scale");
3673 const vk::VkImageCreateInfo createInfo =
3675 vk::VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
3679 vk::VK_IMAGE_TYPE_2D,
3680 vk::VK_FORMAT_R8G8B8A8_UNORM,
3682 (deUint32)m_srcImageWidth,
3683 (deUint32)m_srcImageHeight,
3686 1, 1, // mipLevels, arrayLayers
3687 vk::VK_SAMPLE_COUNT_1_BIT,
3689 vk::VK_IMAGE_TILING_OPTIMAL,
3690 vk::VK_IMAGE_USAGE_TRANSFER_DST_BIT|vk::VK_IMAGE_USAGE_TRANSFER_SRC_BIT,
3691 vk::VK_SHARING_MODE_EXCLUSIVE,
3693 (deUint32)queueFamilies.size(),
3695 vk::VK_IMAGE_LAYOUT_UNDEFINED
3698 m_srcImage = vk::createImage(vkd, device, &createInfo);
3701 m_memory = bindImageMemory(vki, vkd, physicalDevice, device, *m_srcImage, 0);
3704 const vk::Unique<vk::VkBuffer> srcBuffer (createBuffer(vkd, device, 4 * m_srcImageWidth * m_srcImageHeight, vk::VK_BUFFER_USAGE_TRANSFER_SRC_BIT, vk::VK_SHARING_MODE_EXCLUSIVE, queueFamilies));
3705 const vk::Unique<vk::VkDeviceMemory> memory (bindBufferMemory(vki, vkd, physicalDevice, device, *srcBuffer, vk::VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT));
3706 const vk::Unique<vk::VkCommandBuffer> commandBuffer (createBeginCommandBuffer(vkd, device, commandPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY));
3707 const vk::VkImageMemoryBarrier preImageBarrier =
3709 vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
3713 vk::VK_ACCESS_TRANSFER_WRITE_BIT,
3715 vk::VK_IMAGE_LAYOUT_UNDEFINED,
3716 vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
3718 VK_QUEUE_FAMILY_IGNORED,
3719 VK_QUEUE_FAMILY_IGNORED,
3723 vk::VK_IMAGE_ASPECT_COLOR_BIT,
3725 1, // Mip level count
3730 const vk::VkImageMemoryBarrier postImageBarrier =
3732 vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
3735 vk::VK_ACCESS_TRANSFER_WRITE_BIT,
3738 vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
3739 vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
3741 VK_QUEUE_FAMILY_IGNORED,
3742 VK_QUEUE_FAMILY_IGNORED,
3746 vk::VK_IMAGE_ASPECT_COLOR_BIT,
3748 1, // Mip level count
3753 const vk::VkBufferImageCopy region =
3758 vk::VK_IMAGE_ASPECT_COLOR_BIT,
3765 (deUint32)m_srcImageWidth,
3766 (deUint32)m_srcImageHeight,
3772 void* const ptr = mapMemory(vkd, device, *memory, 4 * m_srcImageWidth * m_srcImageHeight);
3773 de::Random rng (m_seed);
3776 deUint8* const data = (deUint8*)ptr;
3778 for (size_t ndx = 0; ndx < (size_t)(4 * m_srcImageWidth * m_srcImageHeight); ndx++)
3779 data[ndx] = rng.getUint8();
3782 vk::flushMappedMemoryRange(vkd, device, *memory, 0, 4 * m_srcImageWidth * m_srcImageHeight);
3783 vkd.unmapMemory(device, *memory);
3786 vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 0, (const vk::VkBufferMemoryBarrier*)DE_NULL, 1, &preImageBarrier);
3787 vkd.cmdCopyBufferToImage(*commandBuffer, *srcBuffer, *m_srcImage, vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, ®ion);
3788 vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 0, (const vk::VkBufferMemoryBarrier*)DE_NULL, 1, &postImageBarrier);
3790 VK_CHECK(vkd.endCommandBuffer(*commandBuffer));
3791 queueRun(vkd, queue, *commandBuffer);
3795 void ImageBlitFromImage::logSubmit (TestLog& log, size_t commandIndex) const
3797 log << TestLog::Message << commandIndex << ":" << getName() << " Blit from another image" << (m_scale == BLIT_SCALE_20 ? " scale 2x" : "") << TestLog::EndMessage;
3800 void ImageBlitFromImage::submit (SubmitContext& context)
3802 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
3803 const vk::VkCommandBuffer commandBuffer = context.getCommandBuffer();
3804 const vk::VkImageBlit region =
3808 vk::VK_IMAGE_ASPECT_COLOR_BIT,
3824 vk::VK_IMAGE_ASPECT_COLOR_BIT,
3838 vkd.cmdBlitImage(commandBuffer, *m_srcImage, vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, context.getImage(), m_imageLayout, 1, ®ion, vk::VK_FILTER_NEAREST);
3841 void ImageBlitFromImage::verify (VerifyContext& context, size_t)
3843 ReferenceMemory& reference (context.getReference());
3844 de::Random rng (m_seed);
3846 reference.setUndefined(0, (size_t)m_imageMemorySize);
3849 const PixelBufferAccess& refAccess (context.getReferenceImage().getAccess());
3851 if (m_scale == BLIT_SCALE_10)
3853 for (deInt32 y = 0; y < m_imageHeight; y++)
3854 for (deInt32 x = 0; x < m_imageWidth; x++)
3856 const deUint8 r8 = rng.getUint8();
3857 const deUint8 g8 = rng.getUint8();
3858 const deUint8 b8 = rng.getUint8();
3859 const deUint8 a8 = rng.getUint8();
3861 refAccess.setPixel(UVec4(r8, g8, b8, a8), x, y);
3864 else if (m_scale == BLIT_SCALE_20)
3866 tcu::TextureLevel source (TextureFormat(TextureFormat::RGBA, TextureFormat::UNORM_INT8), m_srcImageWidth, m_srcImageHeight);
3867 const float xscale = ((float)m_srcImageWidth) / (float)m_imageWidth;
3868 const float yscale = ((float)m_srcImageHeight) / (float)m_imageHeight;
3870 for (deInt32 y = 0; y < m_srcImageHeight; y++)
3871 for (deInt32 x = 0; x < m_srcImageWidth; x++)
3873 const deUint8 r8 = rng.getUint8();
3874 const deUint8 g8 = rng.getUint8();
3875 const deUint8 b8 = rng.getUint8();
3876 const deUint8 a8 = rng.getUint8();
3878 source.getAccess().setPixel(UVec4(r8, g8, b8, a8), x, y);
3881 for (deInt32 y = 0; y < m_imageHeight; y++)
3882 for (deInt32 x = 0; x < m_imageWidth; x++)
3883 refAccess.setPixel(source.getAccess().getPixelUint(int((float(x) + 0.5f) * xscale), int((float(y) + 0.5f) * yscale)), x, y);
3886 DE_FATAL("Unsupported scale");
3890 class ImageBlitToImage : public CmdCommand
3893 ImageBlitToImage (BlitScale scale, vk::VkImageLayout imageLayout) : m_scale(scale), m_imageLayout(imageLayout) {}
3894 ~ImageBlitToImage (void) {}
3895 const char* getName (void) const { return "ImageBlitToImage"; }
3897 void logPrepare (TestLog& log, size_t commandIndex) const;
3898 void prepare (PrepareContext& context);
3899 void logSubmit (TestLog& log, size_t commandIndex) const;
3900 void submit (SubmitContext& context);
3901 void verify (VerifyContext& context, size_t commandIndex);
3904 const BlitScale m_scale;
3905 const vk::VkImageLayout m_imageLayout;
3906 deInt32 m_imageWidth;
3907 deInt32 m_imageHeight;
3908 vk::VkDeviceSize m_imageMemorySize;
3909 deInt32 m_dstImageWidth;
3910 deInt32 m_dstImageHeight;
3911 vk::Move<vk::VkImage> m_dstImage;
3912 vk::Move<vk::VkDeviceMemory> m_memory;
3915 void ImageBlitToImage::logPrepare (TestLog& log, size_t commandIndex) const
3917 log << TestLog::Message << commandIndex << ":" << getName() << " Allocate destination image for image to image blit." << TestLog::EndMessage;
3920 void ImageBlitToImage::prepare (PrepareContext& context)
3922 const vk::InstanceInterface& vki = context.getContext().getInstanceInterface();
3923 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
3924 const vk::VkPhysicalDevice physicalDevice = context.getContext().getPhysicalDevice();
3925 const vk::VkDevice device = context.getContext().getDevice();
3926 const vk::VkQueue queue = context.getContext().getQueue();
3927 const vk::VkCommandPool commandPool = context.getContext().getCommandPool();
3928 const vector<deUint32>& queueFamilies = context.getContext().getQueueFamilies();
3930 m_imageWidth = context.getImageWidth();
3931 m_imageHeight = context.getImageHeight();
3932 m_imageMemorySize = context.getImageMemorySize();
3934 if (m_scale == BLIT_SCALE_10)
3936 m_dstImageWidth = context.getImageWidth();
3937 m_dstImageHeight = context.getImageHeight();
3939 else if (m_scale == BLIT_SCALE_20)
3941 m_dstImageWidth = context.getImageWidth() * 2;
3942 m_dstImageHeight = context.getImageHeight() * 2;
3945 DE_FATAL("Unsupportd blit scale");
3948 const vk::VkImageCreateInfo createInfo =
3950 vk::VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
3954 vk::VK_IMAGE_TYPE_2D,
3955 vk::VK_FORMAT_R8G8B8A8_UNORM,
3957 (deUint32)m_dstImageWidth,
3958 (deUint32)m_dstImageHeight,
3961 1, 1, // mipLevels, arrayLayers
3962 vk::VK_SAMPLE_COUNT_1_BIT,
3964 vk::VK_IMAGE_TILING_OPTIMAL,
3965 vk::VK_IMAGE_USAGE_TRANSFER_DST_BIT|vk::VK_IMAGE_USAGE_TRANSFER_SRC_BIT,
3966 vk::VK_SHARING_MODE_EXCLUSIVE,
3968 (deUint32)queueFamilies.size(),
3970 vk::VK_IMAGE_LAYOUT_UNDEFINED
3973 m_dstImage = vk::createImage(vkd, device, &createInfo);
3976 m_memory = bindImageMemory(vki, vkd, physicalDevice, device, *m_dstImage, 0);
3979 const vk::Unique<vk::VkCommandBuffer> commandBuffer (createBeginCommandBuffer(vkd, device, commandPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY));
3980 const vk::VkImageMemoryBarrier barrier =
3982 vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
3986 vk::VK_ACCESS_TRANSFER_WRITE_BIT,
3988 vk::VK_IMAGE_LAYOUT_UNDEFINED,
3989 vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
3991 VK_QUEUE_FAMILY_IGNORED,
3992 VK_QUEUE_FAMILY_IGNORED,
3996 vk::VK_IMAGE_ASPECT_COLOR_BIT,
3998 1, // Mip level count
4004 vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 0, (const vk::VkBufferMemoryBarrier*)DE_NULL, 1, &barrier);
4006 VK_CHECK(vkd.endCommandBuffer(*commandBuffer));
4007 queueRun(vkd, queue, *commandBuffer);
4011 void ImageBlitToImage::logSubmit (TestLog& log, size_t commandIndex) const
4013 log << TestLog::Message << commandIndex << ":" << getName() << " Blit image to another image" << (m_scale == BLIT_SCALE_20 ? " scale 2x" : "") << TestLog::EndMessage;
4016 void ImageBlitToImage::submit (SubmitContext& context)
4018 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
4019 const vk::VkCommandBuffer commandBuffer = context.getCommandBuffer();
4020 const vk::VkImageBlit region =
4024 vk::VK_IMAGE_ASPECT_COLOR_BIT,
4040 vk::VK_IMAGE_ASPECT_COLOR_BIT,
4054 vkd.cmdBlitImage(commandBuffer, context.getImage(), m_imageLayout, *m_dstImage, vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, ®ion, vk::VK_FILTER_NEAREST);
4057 void ImageBlitToImage::verify (VerifyContext& context, size_t commandIndex)
4059 tcu::ResultCollector& resultCollector (context.getResultCollector());
4060 const vk::InstanceInterface& vki = context.getContext().getInstanceInterface();
4061 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
4062 const vk::VkPhysicalDevice physicalDevice = context.getContext().getPhysicalDevice();
4063 const vk::VkDevice device = context.getContext().getDevice();
4064 const vk::VkQueue queue = context.getContext().getQueue();
4065 const vk::VkCommandPool commandPool = context.getContext().getCommandPool();
4066 const vk::Unique<vk::VkCommandBuffer> commandBuffer (createBeginCommandBuffer(vkd, device, commandPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY));
4067 const vector<deUint32>& queueFamilies = context.getContext().getQueueFamilies();
4068 const vk::Unique<vk::VkBuffer> dstBuffer (createBuffer(vkd, device, 4 * m_dstImageWidth * m_dstImageHeight, vk::VK_BUFFER_USAGE_TRANSFER_DST_BIT, vk::VK_SHARING_MODE_EXCLUSIVE, queueFamilies));
4069 const vk::Unique<vk::VkDeviceMemory> memory (bindBufferMemory(vki, vkd, physicalDevice, device, *dstBuffer, vk::VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT));
4071 const vk::VkImageMemoryBarrier imageBarrier =
4073 vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
4076 vk::VK_ACCESS_TRANSFER_WRITE_BIT,
4077 vk::VK_ACCESS_TRANSFER_READ_BIT,
4079 vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
4080 vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
4082 VK_QUEUE_FAMILY_IGNORED,
4083 VK_QUEUE_FAMILY_IGNORED,
4087 vk::VK_IMAGE_ASPECT_COLOR_BIT,
4089 1, // Mip level count
4094 const vk::VkBufferMemoryBarrier bufferBarrier =
4096 vk::VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER,
4099 vk::VK_ACCESS_TRANSFER_WRITE_BIT,
4100 vk::VK_ACCESS_HOST_READ_BIT,
4102 VK_QUEUE_FAMILY_IGNORED,
4103 VK_QUEUE_FAMILY_IGNORED,
4108 const vk::VkBufferImageCopy region =
4113 vk::VK_IMAGE_ASPECT_COLOR_BIT,
4120 (deUint32)m_dstImageWidth,
4121 (deUint32)m_dstImageHeight,
4126 vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 0, (const vk::VkBufferMemoryBarrier*)DE_NULL, 1, &imageBarrier);
4127 vkd.cmdCopyImageToBuffer(*commandBuffer, *m_dstImage, vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, *dstBuffer, 1, ®ion);
4128 vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_PIPELINE_STAGE_HOST_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 1, &bufferBarrier, 0, (const vk::VkImageMemoryBarrier*)DE_NULL);
4131 VK_CHECK(vkd.endCommandBuffer(*commandBuffer));
4132 queueRun(vkd, queue, *commandBuffer);
4135 void* const ptr = mapMemory(vkd, device, *memory, 4 * m_dstImageWidth * m_dstImageHeight);
4137 vk::invalidateMappedMemoryRange(vkd, device, *memory, 0, 4 * m_dstImageWidth * m_dstImageHeight);
4139 if (m_scale == BLIT_SCALE_10)
4141 const deUint8* const data = (const deUint8*)ptr;
4142 const ConstPixelBufferAccess resAccess (TextureFormat(TextureFormat::RGBA, TextureFormat::UNORM_INT8), m_dstImageWidth, m_dstImageHeight, 1, data);
4143 const ConstPixelBufferAccess& refAccess (context.getReferenceImage().getAccess());
4145 if (!tcu::intThresholdCompare(context.getLog(), (de::toString(commandIndex) + ":" + getName()).c_str(), (de::toString(commandIndex) + ":" + getName()).c_str(), refAccess, resAccess, UVec4(0), tcu::COMPARE_LOG_ON_ERROR))
4146 resultCollector.fail(de::toString(commandIndex) + ":" + getName() + " Image comparison failed");
4148 else if (m_scale == BLIT_SCALE_20)
4150 const deUint8* const data = (const deUint8*)ptr;
4151 const ConstPixelBufferAccess resAccess (TextureFormat(TextureFormat::RGBA, TextureFormat::UNORM_INT8), m_dstImageWidth, m_dstImageHeight, 1, data);
4152 tcu::TextureLevel reference (TextureFormat(TextureFormat::RGBA, TextureFormat::UNORM_INT8), m_dstImageWidth, m_dstImageHeight, 1);
4155 const ConstPixelBufferAccess& refAccess (context.getReferenceImage().getAccess());
4157 for (deInt32 y = 0; y < m_dstImageHeight; y++)
4158 for (deInt32 x = 0; x < m_dstImageWidth; x++)
4160 reference.getAccess().setPixel(refAccess.getPixel(x/2, y/2), x, y);
4164 if (!tcu::intThresholdCompare(context.getLog(), (de::toString(commandIndex) + ":" + getName()).c_str(), (de::toString(commandIndex) + ":" + getName()).c_str(), reference.getAccess(), resAccess, UVec4(0), tcu::COMPARE_LOG_ON_ERROR))
4165 resultCollector.fail(de::toString(commandIndex) + ":" + getName() + " Image comparison failed");
4168 DE_FATAL("Unknown scale");
4170 vkd.unmapMemory(device, *memory);
4174 class PrepareRenderPassContext
4177 PrepareRenderPassContext (PrepareContext& context,
4178 vk::VkRenderPass renderPass,
4179 vk::VkFramebuffer framebuffer,
4180 deInt32 targetWidth,
4181 deInt32 targetHeight)
4182 : m_context (context)
4183 , m_renderPass (renderPass)
4184 , m_framebuffer (framebuffer)
4185 , m_targetWidth (targetWidth)
4186 , m_targetHeight (targetHeight)
4190 const Memory& getMemory (void) const { return m_context.getMemory(); }
4191 const Context& getContext (void) const { return m_context.getContext(); }
4192 const vk::ProgramCollection<vk::ProgramBinary>& getBinaryCollection (void) const { return m_context.getBinaryCollection(); }
4194 vk::VkBuffer getBuffer (void) const { return m_context.getBuffer(); }
4195 vk::VkDeviceSize getBufferSize (void) const { return m_context.getBufferSize(); }
4197 vk::VkImage getImage (void) const { return m_context.getImage(); }
4198 deInt32 getImageWidth (void) const { return m_context.getImageWidth(); }
4199 deInt32 getImageHeight (void) const { return m_context.getImageHeight(); }
4200 vk::VkImageLayout getImageLayout (void) const { return m_context.getImageLayout(); }
4202 deInt32 getTargetWidth (void) const { return m_targetWidth; }
4203 deInt32 getTargetHeight (void) const { return m_targetHeight; }
4205 vk::VkRenderPass getRenderPass (void) const { return m_renderPass; }
4208 PrepareContext& m_context;
4209 const vk::VkRenderPass m_renderPass;
4210 const vk::VkFramebuffer m_framebuffer;
4211 const deInt32 m_targetWidth;
4212 const deInt32 m_targetHeight;
4215 class VerifyRenderPassContext
4218 VerifyRenderPassContext (VerifyContext& context,
4219 deInt32 targetWidth,
4220 deInt32 targetHeight)
4221 : m_context (context)
4222 , m_referenceTarget (TextureFormat(TextureFormat::RGBA, TextureFormat::UNORM_INT8), targetWidth, targetHeight)
4226 const Context& getContext (void) const { return m_context.getContext(); }
4227 TestLog& getLog (void) const { return m_context.getLog(); }
4228 tcu::ResultCollector& getResultCollector (void) const { return m_context.getResultCollector(); }
4230 TextureLevel& getReferenceTarget (void) { return m_referenceTarget; }
4232 ReferenceMemory& getReference (void) { return m_context.getReference(); }
4233 TextureLevel& getReferenceImage (void) { return m_context.getReferenceImage();}
4236 VerifyContext& m_context;
4237 TextureLevel m_referenceTarget;
4240 class RenderPassCommand
4243 virtual ~RenderPassCommand (void) {}
4244 virtual const char* getName (void) const = 0;
4246 // Log things that are done during prepare
4247 virtual void logPrepare (TestLog&, size_t) const {}
4248 // Log submitted calls etc.
4249 virtual void logSubmit (TestLog&, size_t) const {}
4251 // Allocate vulkan resources and prepare for submit.
4252 virtual void prepare (PrepareRenderPassContext&) {}
4254 // Submit commands to command buffer.
4255 virtual void submit (SubmitContext&) {}
4258 virtual void verify (VerifyRenderPassContext&, size_t) {}
4261 class SubmitRenderPass : public CmdCommand
4264 SubmitRenderPass (const vector<RenderPassCommand*>& commands);
4265 ~SubmitRenderPass (void);
4266 const char* getName (void) const { return "SubmitRenderPass"; }
4268 void logPrepare (TestLog&, size_t) const;
4269 void logSubmit (TestLog&, size_t) const;
4271 void prepare (PrepareContext&);
4272 void submit (SubmitContext&);
4274 void verify (VerifyContext&, size_t);
4277 const deInt32 m_targetWidth;
4278 const deInt32 m_targetHeight;
4279 vk::Move<vk::VkRenderPass> m_renderPass;
4280 vk::Move<vk::VkDeviceMemory> m_colorTargetMemory;
4281 de::MovePtr<vk::Allocation> m_colorTargetMemory2;
4282 vk::Move<vk::VkImage> m_colorTarget;
4283 vk::Move<vk::VkImageView> m_colorTargetView;
4284 vk::Move<vk::VkFramebuffer> m_framebuffer;
4285 vector<RenderPassCommand*> m_commands;
4288 SubmitRenderPass::SubmitRenderPass (const vector<RenderPassCommand*>& commands)
4289 : m_targetWidth (256)
4290 , m_targetHeight (256)
4291 , m_commands (commands)
4295 SubmitRenderPass::~SubmitRenderPass()
4297 for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
4298 delete m_commands[cmdNdx];
4301 void SubmitRenderPass::logPrepare (TestLog& log, size_t commandIndex) const
4303 const string sectionName (de::toString(commandIndex) + ":" + getName());
4304 const tcu::ScopedLogSection section (log, sectionName, sectionName);
4306 for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
4308 RenderPassCommand& command = *m_commands[cmdNdx];
4309 command.logPrepare(log, cmdNdx);
4313 void SubmitRenderPass::logSubmit (TestLog& log, size_t commandIndex) const
4315 const string sectionName (de::toString(commandIndex) + ":" + getName());
4316 const tcu::ScopedLogSection section (log, sectionName, sectionName);
4318 for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
4320 RenderPassCommand& command = *m_commands[cmdNdx];
4321 command.logSubmit(log, cmdNdx);
4325 void SubmitRenderPass::prepare (PrepareContext& context)
4327 const vk::InstanceInterface& vki = context.getContext().getInstanceInterface();
4328 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
4329 const vk::VkPhysicalDevice physicalDevice = context.getContext().getPhysicalDevice();
4330 const vk::VkDevice device = context.getContext().getDevice();
4331 const vector<deUint32>& queueFamilies = context.getContext().getQueueFamilies();
4333 const vk::VkAttachmentReference colorAttachments[] =
4335 { 0, vk::VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL }
4337 const vk::VkSubpassDescription subpass =
4340 vk::VK_PIPELINE_BIND_POINT_GRAPHICS,
4345 DE_LENGTH_OF_ARRAY(colorAttachments),
4352 const vk::VkAttachmentDescription attachment =
4355 vk::VK_FORMAT_R8G8B8A8_UNORM,
4356 vk::VK_SAMPLE_COUNT_1_BIT,
4358 vk::VK_ATTACHMENT_LOAD_OP_CLEAR,
4359 vk::VK_ATTACHMENT_STORE_OP_STORE,
4361 vk::VK_ATTACHMENT_LOAD_OP_DONT_CARE,
4362 vk::VK_ATTACHMENT_STORE_OP_DONT_CARE,
4364 vk::VK_IMAGE_LAYOUT_UNDEFINED,
4365 vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL
4368 const vk::VkImageCreateInfo createInfo =
4370 vk::VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
4374 vk::VK_IMAGE_TYPE_2D,
4375 vk::VK_FORMAT_R8G8B8A8_UNORM,
4376 { (deUint32)m_targetWidth, (deUint32)m_targetHeight, 1u },
4379 vk::VK_SAMPLE_COUNT_1_BIT,
4380 vk::VK_IMAGE_TILING_OPTIMAL,
4381 vk::VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | vk::VK_IMAGE_USAGE_TRANSFER_SRC_BIT,
4382 vk::VK_SHARING_MODE_EXCLUSIVE,
4383 (deUint32)queueFamilies.size(),
4385 vk::VK_IMAGE_LAYOUT_UNDEFINED
4388 m_colorTarget = vk::createImage(vkd, device, &createInfo);
4391 m_colorTargetMemory = bindImageMemory(vki, vkd, physicalDevice, device, *m_colorTarget, 0);
4394 const vk::VkImageViewCreateInfo createInfo =
4396 vk::VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
4401 vk::VK_IMAGE_VIEW_TYPE_2D,
4402 vk::VK_FORMAT_R8G8B8A8_UNORM,
4404 vk::VK_COMPONENT_SWIZZLE_R,
4405 vk::VK_COMPONENT_SWIZZLE_G,
4406 vk::VK_COMPONENT_SWIZZLE_B,
4407 vk::VK_COMPONENT_SWIZZLE_A
4410 vk::VK_IMAGE_ASPECT_COLOR_BIT,
4418 m_colorTargetView = vk::createImageView(vkd, device, &createInfo);
4421 const vk::VkRenderPassCreateInfo createInfo =
4423 vk::VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO,
4437 m_renderPass = vk::createRenderPass(vkd, device, &createInfo);
4441 const vk::VkImageView imageViews[] =
4445 const vk::VkFramebufferCreateInfo createInfo =
4447 vk::VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO,
4452 DE_LENGTH_OF_ARRAY(imageViews),
4454 (deUint32)m_targetWidth,
4455 (deUint32)m_targetHeight,
4459 m_framebuffer = vk::createFramebuffer(vkd, device, &createInfo);
4463 PrepareRenderPassContext renderpassContext (context, *m_renderPass, *m_framebuffer, m_targetWidth, m_targetHeight);
4465 for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
4467 RenderPassCommand& command = *m_commands[cmdNdx];
4468 command.prepare(renderpassContext);
4473 void SubmitRenderPass::submit (SubmitContext& context)
4475 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
4476 const vk::VkCommandBuffer commandBuffer = context.getCommandBuffer();
4477 const vk::VkClearValue clearValue = vk::makeClearValueColorF32(0.0f, 0.0f, 0.0f, 1.0f);
4479 const vk::VkRenderPassBeginInfo beginInfo =
4481 vk::VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO,
4487 { { 0, 0 }, { (deUint32)m_targetWidth, (deUint32)m_targetHeight } },
4492 vkd.cmdBeginRenderPass(commandBuffer, &beginInfo, vk::VK_SUBPASS_CONTENTS_INLINE);
4494 for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
4496 RenderPassCommand& command = *m_commands[cmdNdx];
4498 command.submit(context);
4501 vkd.cmdEndRenderPass(commandBuffer);
4504 void SubmitRenderPass::verify (VerifyContext& context, size_t commandIndex)
4506 TestLog& log (context.getLog());
4507 tcu::ResultCollector& resultCollector (context.getResultCollector());
4508 const string sectionName (de::toString(commandIndex) + ":" + getName());
4509 const tcu::ScopedLogSection section (log, sectionName, sectionName);
4510 VerifyRenderPassContext verifyContext (context, m_targetWidth, m_targetHeight);
4512 tcu::clear(verifyContext.getReferenceTarget().getAccess(), Vec4(0.0f, 0.0f, 0.0f, 1.0f));
4514 for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
4516 RenderPassCommand& command = *m_commands[cmdNdx];
4517 command.verify(verifyContext, cmdNdx);
4521 const vk::InstanceInterface& vki = context.getContext().getInstanceInterface();
4522 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
4523 const vk::VkPhysicalDevice physicalDevice = context.getContext().getPhysicalDevice();
4524 const vk::VkDevice device = context.getContext().getDevice();
4525 const vk::VkQueue queue = context.getContext().getQueue();
4526 const vk::VkCommandPool commandPool = context.getContext().getCommandPool();
4527 const vk::Unique<vk::VkCommandBuffer> commandBuffer (createBeginCommandBuffer(vkd, device, commandPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY));
4528 const vector<deUint32>& queueFamilies = context.getContext().getQueueFamilies();
4529 const vk::Unique<vk::VkBuffer> dstBuffer (createBuffer(vkd, device, 4 * m_targetWidth * m_targetHeight, vk::VK_BUFFER_USAGE_TRANSFER_DST_BIT, vk::VK_SHARING_MODE_EXCLUSIVE, queueFamilies));
4530 const vk::Unique<vk::VkDeviceMemory> memory (bindBufferMemory(vki, vkd, physicalDevice, device, *dstBuffer, vk::VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT));
4532 const vk::VkImageMemoryBarrier imageBarrier =
4534 vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
4537 vk::VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
4538 vk::VK_ACCESS_TRANSFER_READ_BIT,
4540 vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
4541 vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
4543 VK_QUEUE_FAMILY_IGNORED,
4544 VK_QUEUE_FAMILY_IGNORED,
4548 vk::VK_IMAGE_ASPECT_COLOR_BIT,
4550 1, // Mip level count
4555 const vk::VkBufferMemoryBarrier bufferBarrier =
4557 vk::VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER,
4560 vk::VK_ACCESS_TRANSFER_WRITE_BIT,
4561 vk::VK_ACCESS_HOST_READ_BIT,
4563 VK_QUEUE_FAMILY_IGNORED,
4564 VK_QUEUE_FAMILY_IGNORED,
4569 const vk::VkBufferImageCopy region =
4574 vk::VK_IMAGE_ASPECT_COLOR_BIT,
4581 (deUint32)m_targetWidth,
4582 (deUint32)m_targetHeight,
4587 vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 0, (const vk::VkBufferMemoryBarrier*)DE_NULL, 1, &imageBarrier);
4588 vkd.cmdCopyImageToBuffer(*commandBuffer, *m_colorTarget, vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, *dstBuffer, 1, ®ion);
4589 vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_PIPELINE_STAGE_HOST_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 1, &bufferBarrier, 0, (const vk::VkImageMemoryBarrier*)DE_NULL);
4592 VK_CHECK(vkd.endCommandBuffer(*commandBuffer));
4593 queueRun(vkd, queue, *commandBuffer);
4596 void* const ptr = mapMemory(vkd, device, *memory, 4 * m_targetWidth * m_targetHeight);
4598 vk::invalidateMappedMemoryRange(vkd, device, *memory, 0, 4 * m_targetWidth * m_targetHeight);
4601 const deUint8* const data = (const deUint8*)ptr;
4602 const ConstPixelBufferAccess resAccess (TextureFormat(TextureFormat::RGBA, TextureFormat::UNORM_INT8), m_targetWidth, m_targetHeight, 1, data);
4603 const ConstPixelBufferAccess& refAccess (verifyContext.getReferenceTarget().getAccess());
4605 if (!tcu::intThresholdCompare(context.getLog(), (de::toString(commandIndex) + ":" + getName()).c_str(), (de::toString(commandIndex) + ":" + getName()).c_str(), refAccess, resAccess, UVec4(0), tcu::COMPARE_LOG_ON_ERROR))
4606 resultCollector.fail(de::toString(commandIndex) + ":" + getName() + " Image comparison failed");
4609 vkd.unmapMemory(device, *memory);
4614 struct PipelineResources
4616 vk::Move<vk::VkPipeline> pipeline;
4617 vk::Move<vk::VkDescriptorSetLayout> descriptorSetLayout;
4618 vk::Move<vk::VkPipelineLayout> pipelineLayout;
4621 void createPipelineWithResources (const vk::DeviceInterface& vkd,
4622 const vk::VkDevice device,
4623 const vk::VkRenderPass renderPass,
4624 const deUint32 subpass,
4625 const vk::VkShaderModule& vertexShaderModule,
4626 const vk::VkShaderModule& fragmentShaderModule,
4627 const deUint32 viewPortWidth,
4628 const deUint32 viewPortHeight,
4629 const vector<vk::VkVertexInputBindingDescription>& vertexBindingDescriptions,
4630 const vector<vk::VkVertexInputAttributeDescription>& vertexAttributeDescriptions,
4631 const vector<vk::VkDescriptorSetLayoutBinding>& bindings,
4632 const vk::VkPrimitiveTopology topology,
4633 deUint32 pushConstantRangeCount,
4634 const vk::VkPushConstantRange* pushConstantRanges,
4635 PipelineResources& resources)
4637 if (!bindings.empty())
4639 const vk::VkDescriptorSetLayoutCreateInfo createInfo =
4641 vk::VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO,
4645 (deUint32)bindings.size(),
4646 bindings.empty() ? DE_NULL : &bindings[0]
4649 resources.descriptorSetLayout = vk::createDescriptorSetLayout(vkd, device, &createInfo);
4653 const vk::VkDescriptorSetLayout descriptorSetLayout_ = *resources.descriptorSetLayout;
4654 const vk::VkPipelineLayoutCreateInfo createInfo =
4656 vk::VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO,
4660 resources.descriptorSetLayout ? 1u : 0u,
4661 resources.descriptorSetLayout ? &descriptorSetLayout_ : DE_NULL,
4663 pushConstantRangeCount,
4667 resources.pipelineLayout = vk::createPipelineLayout(vkd, device, &createInfo);
4671 const vk::VkPipelineShaderStageCreateInfo shaderStages[] =
4674 vk::VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
4677 vk::VK_SHADER_STAGE_VERTEX_BIT,
4683 vk::VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
4686 vk::VK_SHADER_STAGE_FRAGMENT_BIT,
4687 fragmentShaderModule,
4692 const vk::VkPipelineDepthStencilStateCreateInfo depthStencilState =
4694 vk::VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO,
4699 vk::VK_COMPARE_OP_ALWAYS,
4703 vk::VK_STENCIL_OP_KEEP,
4704 vk::VK_STENCIL_OP_KEEP,
4705 vk::VK_STENCIL_OP_KEEP,
4706 vk::VK_COMPARE_OP_ALWAYS,
4712 vk::VK_STENCIL_OP_KEEP,
4713 vk::VK_STENCIL_OP_KEEP,
4714 vk::VK_STENCIL_OP_KEEP,
4715 vk::VK_COMPARE_OP_ALWAYS,
4723 const vk::VkPipelineVertexInputStateCreateInfo vertexInputState =
4725 vk::VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO,
4729 (deUint32)vertexBindingDescriptions.size(),
4730 vertexBindingDescriptions.empty() ? DE_NULL : &vertexBindingDescriptions[0],
4732 (deUint32)vertexAttributeDescriptions.size(),
4733 vertexAttributeDescriptions.empty() ? DE_NULL : &vertexAttributeDescriptions[0]
4735 const vk::VkPipelineInputAssemblyStateCreateInfo inputAssemblyState =
4737 vk::VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO,
4743 const vk::VkViewport viewports[] =
4745 { 0.0f, 0.0f, (float)viewPortWidth, (float)viewPortHeight, 0.0f, 1.0f }
4747 const vk::VkRect2D scissors[] =
4749 { { 0, 0 }, { (deUint32)viewPortWidth, (deUint32)viewPortHeight } }
4751 const vk::VkPipelineViewportStateCreateInfo viewportState =
4753 vk::VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO,
4756 DE_LENGTH_OF_ARRAY(viewports),
4758 DE_LENGTH_OF_ARRAY(scissors),
4761 const vk::VkPipelineRasterizationStateCreateInfo rasterState =
4763 vk::VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO,
4769 vk::VK_POLYGON_MODE_FILL,
4770 vk::VK_CULL_MODE_NONE,
4771 vk::VK_FRONT_FACE_COUNTER_CLOCKWISE,
4778 const vk::VkSampleMask sampleMask = ~0u;
4779 const vk::VkPipelineMultisampleStateCreateInfo multisampleState =
4781 vk::VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO,
4785 vk::VK_SAMPLE_COUNT_1_BIT,
4792 const vk::VkPipelineColorBlendAttachmentState attachments[] =
4796 vk::VK_BLEND_FACTOR_ONE,
4797 vk::VK_BLEND_FACTOR_ZERO,
4798 vk::VK_BLEND_OP_ADD,
4799 vk::VK_BLEND_FACTOR_ONE,
4800 vk::VK_BLEND_FACTOR_ZERO,
4801 vk::VK_BLEND_OP_ADD,
4802 (vk::VK_COLOR_COMPONENT_R_BIT|
4803 vk::VK_COLOR_COMPONENT_G_BIT|
4804 vk::VK_COLOR_COMPONENT_B_BIT|
4805 vk::VK_COLOR_COMPONENT_A_BIT)
4808 const vk::VkPipelineColorBlendStateCreateInfo colorBlendState =
4810 vk::VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO,
4815 vk::VK_LOGIC_OP_COPY,
4816 DE_LENGTH_OF_ARRAY(attachments),
4818 { 0.0f, 0.0f, 0.0f, 0.0f }
4820 const vk::VkGraphicsPipelineCreateInfo createInfo =
4822 vk::VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO,
4826 DE_LENGTH_OF_ARRAY(shaderStages),
4830 &inputAssemblyState,
4838 *resources.pipelineLayout,
4845 resources.pipeline = vk::createGraphicsPipeline(vkd, device, 0, &createInfo);
4849 class RenderIndexBuffer : public RenderPassCommand
4852 RenderIndexBuffer (void) {}
4853 ~RenderIndexBuffer (void) {}
4855 const char* getName (void) const { return "RenderIndexBuffer"; }
4856 void logPrepare (TestLog&, size_t) const;
4857 void logSubmit (TestLog&, size_t) const;
4858 void prepare (PrepareRenderPassContext&);
4859 void submit (SubmitContext& context);
4860 void verify (VerifyRenderPassContext&, size_t);
4863 PipelineResources m_resources;
4864 vk::VkDeviceSize m_bufferSize;
4867 void RenderIndexBuffer::logPrepare (TestLog& log, size_t commandIndex) const
4869 log << TestLog::Message << commandIndex << ":" << getName() << " Create pipeline for render buffer as index buffer." << TestLog::EndMessage;
4872 void RenderIndexBuffer::logSubmit (TestLog& log, size_t commandIndex) const
4874 log << TestLog::Message << commandIndex << ":" << getName() << " Render using buffer as index buffer." << TestLog::EndMessage;
4877 void RenderIndexBuffer::prepare (PrepareRenderPassContext& context)
4879 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
4880 const vk::VkDevice device = context.getContext().getDevice();
4881 const vk::VkRenderPass renderPass = context.getRenderPass();
4882 const deUint32 subpass = 0;
4883 const vk::Unique<vk::VkShaderModule> vertexShaderModule (vk::createShaderModule(vkd, device, context.getBinaryCollection().get("index-buffer.vert"), 0));
4884 const vk::Unique<vk::VkShaderModule> fragmentShaderModule (vk::createShaderModule(vkd, device, context.getBinaryCollection().get("render-white.frag"), 0));
4886 createPipelineWithResources(vkd, device, renderPass, subpass, *vertexShaderModule, *fragmentShaderModule, context.getTargetWidth(), context.getTargetHeight(),
4887 vector<vk::VkVertexInputBindingDescription>(), vector<vk::VkVertexInputAttributeDescription>(), vector<vk::VkDescriptorSetLayoutBinding>(), vk::VK_PRIMITIVE_TOPOLOGY_POINT_LIST, 0u, DE_NULL, m_resources);
4888 m_bufferSize = context.getBufferSize();
4891 void RenderIndexBuffer::submit (SubmitContext& context)
4893 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
4894 const vk::VkCommandBuffer commandBuffer = context.getCommandBuffer();
4896 vkd.cmdBindPipeline(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipeline);
4897 vkd.cmdBindIndexBuffer(commandBuffer, context.getBuffer(), 0, vk::VK_INDEX_TYPE_UINT16);
4898 vkd.cmdDrawIndexed(commandBuffer, (deUint32)(context.getBufferSize() / 2), 1, 0, 0, 0);
4901 void RenderIndexBuffer::verify (VerifyRenderPassContext& context, size_t)
4903 for (size_t pos = 0; pos < (size_t)m_bufferSize / 2; pos++)
4905 const deUint8 x = context.getReference().get(pos * 2);
4906 const deUint8 y = context.getReference().get((pos * 2) + 1);
4908 context.getReferenceTarget().getAccess().setPixel(Vec4(1.0f, 1.0f, 1.0f, 1.0f), x, y);
4912 class RenderVertexBuffer : public RenderPassCommand
4915 RenderVertexBuffer (void) {}
4916 ~RenderVertexBuffer (void) {}
4918 const char* getName (void) const { return "RenderVertexBuffer"; }
4919 void logPrepare (TestLog&, size_t) const;
4920 void logSubmit (TestLog&, size_t) const;
4921 void prepare (PrepareRenderPassContext&);
4922 void submit (SubmitContext& context);
4923 void verify (VerifyRenderPassContext&, size_t);
4926 PipelineResources m_resources;
4927 vk::VkDeviceSize m_bufferSize;
4930 void RenderVertexBuffer::logPrepare (TestLog& log, size_t commandIndex) const
4932 log << TestLog::Message << commandIndex << ":" << getName() << " Create pipeline for render buffer as vertex buffer." << TestLog::EndMessage;
4935 void RenderVertexBuffer::logSubmit (TestLog& log, size_t commandIndex) const
4937 log << TestLog::Message << commandIndex << ":" << getName() << " Render using buffer as vertex buffer." << TestLog::EndMessage;
4940 void RenderVertexBuffer::prepare (PrepareRenderPassContext& context)
4942 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
4943 const vk::VkDevice device = context.getContext().getDevice();
4944 const vk::VkRenderPass renderPass = context.getRenderPass();
4945 const deUint32 subpass = 0;
4946 const vk::Unique<vk::VkShaderModule> vertexShaderModule (vk::createShaderModule(vkd, device, context.getBinaryCollection().get("vertex-buffer.vert"), 0));
4947 const vk::Unique<vk::VkShaderModule> fragmentShaderModule (vk::createShaderModule(vkd, device, context.getBinaryCollection().get("render-white.frag"), 0));
4949 vector<vk::VkVertexInputAttributeDescription> vertexAttributeDescriptions;
4950 vector<vk::VkVertexInputBindingDescription> vertexBindingDescriptions;
4953 const vk::VkVertexInputBindingDescription vertexBindingDescription =
4957 vk::VK_VERTEX_INPUT_RATE_VERTEX
4960 vertexBindingDescriptions.push_back(vertexBindingDescription);
4963 const vk::VkVertexInputAttributeDescription vertexAttributeDescription =
4967 vk::VK_FORMAT_R8G8_UNORM,
4971 vertexAttributeDescriptions.push_back(vertexAttributeDescription);
4973 createPipelineWithResources(vkd, device, renderPass, subpass, *vertexShaderModule, *fragmentShaderModule, context.getTargetWidth(), context.getTargetHeight(),
4974 vertexBindingDescriptions, vertexAttributeDescriptions, vector<vk::VkDescriptorSetLayoutBinding>(), vk::VK_PRIMITIVE_TOPOLOGY_POINT_LIST, 0u, DE_NULL, m_resources);
4976 m_bufferSize = context.getBufferSize();
4979 void RenderVertexBuffer::submit (SubmitContext& context)
4981 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
4982 const vk::VkCommandBuffer commandBuffer = context.getCommandBuffer();
4983 const vk::VkDeviceSize offset = 0;
4984 const vk::VkBuffer buffer = context.getBuffer();
4986 vkd.cmdBindPipeline(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipeline);
4987 vkd.cmdBindVertexBuffers(commandBuffer, 0, 1, &buffer, &offset);
4988 vkd.cmdDraw(commandBuffer, (deUint32)(context.getBufferSize() / 2), 1, 0, 0);
4991 void RenderVertexBuffer::verify (VerifyRenderPassContext& context, size_t)
4993 for (size_t pos = 0; pos < (size_t)m_bufferSize / 2; pos++)
4995 const deUint8 x = context.getReference().get(pos * 2);
4996 const deUint8 y = context.getReference().get((pos * 2) + 1);
4998 context.getReferenceTarget().getAccess().setPixel(Vec4(1.0f, 1.0f, 1.0f, 1.0f), x, y);
5002 class RenderVertexUniformBuffer : public RenderPassCommand
5005 RenderVertexUniformBuffer (void) {}
5006 ~RenderVertexUniformBuffer (void);
5008 const char* getName (void) const { return "RenderVertexUniformBuffer"; }
5009 void logPrepare (TestLog&, size_t) const;
5010 void logSubmit (TestLog&, size_t) const;
5011 void prepare (PrepareRenderPassContext&);
5012 void submit (SubmitContext& context);
5013 void verify (VerifyRenderPassContext&, size_t);
5016 PipelineResources m_resources;
5017 vk::Move<vk::VkDescriptorPool> m_descriptorPool;
5018 vector<vk::VkDescriptorSet> m_descriptorSets;
5020 vk::VkDeviceSize m_bufferSize;
5023 RenderVertexUniformBuffer::~RenderVertexUniformBuffer (void)
5027 void RenderVertexUniformBuffer::logPrepare (TestLog& log, size_t commandIndex) const
5029 log << TestLog::Message << commandIndex << ":" << getName() << " Create pipeline for render buffer as uniform buffer." << TestLog::EndMessage;
5032 void RenderVertexUniformBuffer::logSubmit (TestLog& log, size_t commandIndex) const
5034 log << TestLog::Message << commandIndex << ":" << getName() << " Render using buffer as uniform buffer." << TestLog::EndMessage;
5037 void RenderVertexUniformBuffer::prepare (PrepareRenderPassContext& context)
5039 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
5040 const vk::VkDevice device = context.getContext().getDevice();
5041 const vk::VkRenderPass renderPass = context.getRenderPass();
5042 const deUint32 subpass = 0;
5043 const vk::Unique<vk::VkShaderModule> vertexShaderModule (vk::createShaderModule(vkd, device, context.getBinaryCollection().get("uniform-buffer.vert"), 0));
5044 const vk::Unique<vk::VkShaderModule> fragmentShaderModule (vk::createShaderModule(vkd, device, context.getBinaryCollection().get("render-white.frag"), 0));
5045 vector<vk::VkDescriptorSetLayoutBinding> bindings;
5047 m_bufferSize = context.getBufferSize();
5050 const vk::VkDescriptorSetLayoutBinding binding =
5053 vk::VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,
5055 vk::VK_SHADER_STAGE_VERTEX_BIT,
5059 bindings.push_back(binding);
5062 createPipelineWithResources(vkd, device, renderPass, subpass, *vertexShaderModule, *fragmentShaderModule, context.getTargetWidth(), context.getTargetHeight(),
5063 vector<vk::VkVertexInputBindingDescription>(), vector<vk::VkVertexInputAttributeDescription>(), bindings, vk::VK_PRIMITIVE_TOPOLOGY_POINT_LIST, 0u, DE_NULL, m_resources);
5066 const deUint32 descriptorCount = (deUint32)(divRoundUp(m_bufferSize, (vk::VkDeviceSize)MAX_UNIFORM_BUFFER_SIZE));
5067 const vk::VkDescriptorPoolSize poolSizes =
5069 vk::VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,
5072 const vk::VkDescriptorPoolCreateInfo createInfo =
5074 vk::VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
5076 vk::VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT,
5083 m_descriptorPool = vk::createDescriptorPool(vkd, device, &createInfo);
5084 m_descriptorSets.resize(descriptorCount);
5087 for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
5089 const vk::VkDescriptorSetLayout layout = *m_resources.descriptorSetLayout;
5090 const vk::VkDescriptorSetAllocateInfo allocateInfo =
5092 vk::VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
5100 m_descriptorSets[descriptorSetNdx] = vk::allocateDescriptorSet(vkd, device, &allocateInfo).disown();
5103 const vk::VkDescriptorBufferInfo bufferInfo =
5105 context.getBuffer(),
5106 (vk::VkDeviceSize)(descriptorSetNdx * (size_t)MAX_UNIFORM_BUFFER_SIZE),
5107 m_bufferSize < (descriptorSetNdx + 1) * (vk::VkDeviceSize)MAX_UNIFORM_BUFFER_SIZE
5108 ? m_bufferSize - descriptorSetNdx * (vk::VkDeviceSize)MAX_UNIFORM_BUFFER_SIZE
5109 : (vk::VkDeviceSize)MAX_UNIFORM_BUFFER_SIZE
5111 const vk::VkWriteDescriptorSet write =
5113 vk::VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
5115 m_descriptorSets[descriptorSetNdx],
5119 vk::VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,
5125 vkd.updateDescriptorSets(device, 1u, &write, 0u, DE_NULL);
5130 void RenderVertexUniformBuffer::submit (SubmitContext& context)
5132 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
5133 const vk::VkCommandBuffer commandBuffer = context.getCommandBuffer();
5135 vkd.cmdBindPipeline(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipeline);
5137 for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
5139 const size_t size = (size_t)(m_bufferSize < (descriptorSetNdx + 1) * (vk::VkDeviceSize)MAX_UNIFORM_BUFFER_SIZE
5140 ? m_bufferSize - descriptorSetNdx * (vk::VkDeviceSize)MAX_UNIFORM_BUFFER_SIZE
5141 : (size_t)MAX_UNIFORM_BUFFER_SIZE);
5142 const deUint32 count = (deUint32)(size / 2);
5144 vkd.cmdBindDescriptorSets(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipelineLayout, 0u, 1u, &m_descriptorSets[descriptorSetNdx], 0u, DE_NULL);
5145 vkd.cmdDraw(commandBuffer, count, 1, 0, 0);
5149 void RenderVertexUniformBuffer::verify (VerifyRenderPassContext& context, size_t)
5151 for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
5153 const size_t offset = descriptorSetNdx * MAX_UNIFORM_BUFFER_SIZE;
5154 const size_t size = (size_t)(m_bufferSize < (descriptorSetNdx + 1) * (vk::VkDeviceSize)MAX_UNIFORM_BUFFER_SIZE
5155 ? m_bufferSize - descriptorSetNdx * (vk::VkDeviceSize)MAX_UNIFORM_BUFFER_SIZE
5156 : (size_t)MAX_UNIFORM_BUFFER_SIZE);
5157 const size_t count = size / 2;
5159 for (size_t pos = 0; pos < count; pos++)
5161 const deUint8 x = context.getReference().get(offset + pos * 2);
5162 const deUint8 y = context.getReference().get(offset + (pos * 2) + 1);
5164 context.getReferenceTarget().getAccess().setPixel(Vec4(1.0f, 1.0f, 1.0f, 1.0f), x, y);
5169 class RenderVertexUniformTexelBuffer : public RenderPassCommand
5172 RenderVertexUniformTexelBuffer (void) {}
5173 ~RenderVertexUniformTexelBuffer (void);
5175 const char* getName (void) const { return "RenderVertexUniformTexelBuffer"; }
5176 void logPrepare (TestLog&, size_t) const;
5177 void logSubmit (TestLog&, size_t) const;
5178 void prepare (PrepareRenderPassContext&);
5179 void submit (SubmitContext& context);
5180 void verify (VerifyRenderPassContext&, size_t);
5183 PipelineResources m_resources;
5184 vk::Move<vk::VkDescriptorPool> m_descriptorPool;
5185 vector<vk::VkDescriptorSet> m_descriptorSets;
5186 vector<vk::VkBufferView> m_bufferViews;
5188 const vk::DeviceInterface* m_vkd;
5189 vk::VkDevice m_device;
5190 vk::VkDeviceSize m_bufferSize;
5191 deUint32 m_maxUniformTexelCount;
5194 RenderVertexUniformTexelBuffer::~RenderVertexUniformTexelBuffer (void)
5196 for (size_t bufferViewNdx = 0; bufferViewNdx < m_bufferViews.size(); bufferViewNdx++)
5198 if (!!m_bufferViews[bufferViewNdx])
5200 m_vkd->destroyBufferView(m_device, m_bufferViews[bufferViewNdx], DE_NULL);
5201 m_bufferViews[bufferViewNdx] = (vk::VkBufferView)0;
5206 void RenderVertexUniformTexelBuffer::logPrepare (TestLog& log, size_t commandIndex) const
5208 log << TestLog::Message << commandIndex << ":" << getName() << " Create pipeline for render buffer as uniform buffer." << TestLog::EndMessage;
5211 void RenderVertexUniformTexelBuffer::logSubmit (TestLog& log, size_t commandIndex) const
5213 log << TestLog::Message << commandIndex << ":" << getName() << " Render using buffer as uniform buffer." << TestLog::EndMessage;
5216 void RenderVertexUniformTexelBuffer::prepare (PrepareRenderPassContext& context)
5218 const vk::InstanceInterface& vki = context.getContext().getInstanceInterface();
5219 const vk::VkPhysicalDevice physicalDevice = context.getContext().getPhysicalDevice();
5220 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
5221 const vk::VkDevice device = context.getContext().getDevice();
5222 const vk::VkRenderPass renderPass = context.getRenderPass();
5223 const deUint32 subpass = 0;
5224 const vk::Unique<vk::VkShaderModule> vertexShaderModule (vk::createShaderModule(vkd, device, context.getBinaryCollection().get("uniform-texel-buffer.vert"), 0));
5225 const vk::Unique<vk::VkShaderModule> fragmentShaderModule (vk::createShaderModule(vkd, device, context.getBinaryCollection().get("render-white.frag"), 0));
5226 vector<vk::VkDescriptorSetLayoutBinding> bindings;
5230 m_bufferSize = context.getBufferSize();
5231 m_maxUniformTexelCount = vk::getPhysicalDeviceProperties(vki, physicalDevice).limits.maxTexelBufferElements;
5234 const vk::VkDescriptorSetLayoutBinding binding =
5237 vk::VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER,
5239 vk::VK_SHADER_STAGE_VERTEX_BIT,
5243 bindings.push_back(binding);
5246 createPipelineWithResources(vkd, device, renderPass, subpass, *vertexShaderModule, *fragmentShaderModule, context.getTargetWidth(), context.getTargetHeight(),
5247 vector<vk::VkVertexInputBindingDescription>(), vector<vk::VkVertexInputAttributeDescription>(), bindings, vk::VK_PRIMITIVE_TOPOLOGY_POINT_LIST, 0u, DE_NULL, m_resources);
5250 const deUint32 descriptorCount = (deUint32)(divRoundUp(m_bufferSize, (vk::VkDeviceSize)m_maxUniformTexelCount * 2));
5251 const vk::VkDescriptorPoolSize poolSizes =
5253 vk::VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER,
5256 const vk::VkDescriptorPoolCreateInfo createInfo =
5258 vk::VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
5260 vk::VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT,
5267 m_descriptorPool = vk::createDescriptorPool(vkd, device, &createInfo);
5268 m_descriptorSets.resize(descriptorCount, (vk::VkDescriptorSet)0);
5269 m_bufferViews.resize(descriptorCount, (vk::VkBufferView)0);
5272 for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
5274 const deUint32 count = (deUint32)(m_bufferSize < (descriptorSetNdx + 1) * m_maxUniformTexelCount * 2
5275 ? m_bufferSize - descriptorSetNdx * m_maxUniformTexelCount * 2
5276 : m_maxUniformTexelCount * 2) / 2;
5277 const vk::VkDescriptorSetLayout layout = *m_resources.descriptorSetLayout;
5278 const vk::VkDescriptorSetAllocateInfo allocateInfo =
5280 vk::VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
5288 m_descriptorSets[descriptorSetNdx] = vk::allocateDescriptorSet(vkd, device, &allocateInfo).disown();
5291 const vk::VkBufferViewCreateInfo createInfo =
5293 vk::VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO,
5297 context.getBuffer(),
5298 vk::VK_FORMAT_R16_UINT,
5299 descriptorSetNdx * m_maxUniformTexelCount * 2,
5303 VK_CHECK(vkd.createBufferView(device, &createInfo, DE_NULL, &m_bufferViews[descriptorSetNdx]));
5307 const vk::VkWriteDescriptorSet write =
5309 vk::VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
5311 m_descriptorSets[descriptorSetNdx],
5315 vk::VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER,
5318 &m_bufferViews[descriptorSetNdx]
5321 vkd.updateDescriptorSets(device, 1u, &write, 0u, DE_NULL);
5326 void RenderVertexUniformTexelBuffer::submit (SubmitContext& context)
5328 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
5329 const vk::VkCommandBuffer commandBuffer = context.getCommandBuffer();
5331 vkd.cmdBindPipeline(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipeline);
5333 for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
5335 const deUint32 count = (deUint32)(m_bufferSize < (descriptorSetNdx + 1) * m_maxUniformTexelCount * 2
5336 ? m_bufferSize - descriptorSetNdx * m_maxUniformTexelCount * 2
5337 : m_maxUniformTexelCount * 2) / 2;
5339 vkd.cmdBindDescriptorSets(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipelineLayout, 0u, 1u, &m_descriptorSets[descriptorSetNdx], 0u, DE_NULL);
5340 vkd.cmdDraw(commandBuffer, count, 1, 0, 0);
5344 void RenderVertexUniformTexelBuffer::verify (VerifyRenderPassContext& context, size_t)
5346 for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
5348 const size_t offset = descriptorSetNdx * m_maxUniformTexelCount * 2;
5349 const deUint32 count = (deUint32)(m_bufferSize < (descriptorSetNdx + 1) * m_maxUniformTexelCount * 2
5350 ? m_bufferSize - descriptorSetNdx * m_maxUniformTexelCount * 2
5351 : m_maxUniformTexelCount * 2) / 2;
5353 for (size_t pos = 0; pos < (size_t)count; pos++)
5355 const deUint8 x = context.getReference().get(offset + pos * 2);
5356 const deUint8 y = context.getReference().get(offset + (pos * 2) + 1);
5358 context.getReferenceTarget().getAccess().setPixel(Vec4(1.0f, 1.0f, 1.0f, 1.0f), x, y);
5363 class RenderVertexStorageBuffer : public RenderPassCommand
5366 RenderVertexStorageBuffer (void) {}
5367 ~RenderVertexStorageBuffer (void);
5369 const char* getName (void) const { return "RenderVertexStorageBuffer"; }
5370 void logPrepare (TestLog&, size_t) const;
5371 void logSubmit (TestLog&, size_t) const;
5372 void prepare (PrepareRenderPassContext&);
5373 void submit (SubmitContext& context);
5374 void verify (VerifyRenderPassContext&, size_t);
5377 PipelineResources m_resources;
5378 vk::Move<vk::VkDescriptorPool> m_descriptorPool;
5379 vector<vk::VkDescriptorSet> m_descriptorSets;
5381 vk::VkDeviceSize m_bufferSize;
5384 RenderVertexStorageBuffer::~RenderVertexStorageBuffer (void)
5388 void RenderVertexStorageBuffer::logPrepare (TestLog& log, size_t commandIndex) const
5390 log << TestLog::Message << commandIndex << ":" << getName() << " Create pipeline for render buffer as storage buffer." << TestLog::EndMessage;
5393 void RenderVertexStorageBuffer::logSubmit (TestLog& log, size_t commandIndex) const
5395 log << TestLog::Message << commandIndex << ":" << getName() << " Render using buffer as storage buffer." << TestLog::EndMessage;
5398 void RenderVertexStorageBuffer::prepare (PrepareRenderPassContext& context)
5400 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
5401 const vk::VkDevice device = context.getContext().getDevice();
5402 const vk::VkRenderPass renderPass = context.getRenderPass();
5403 const deUint32 subpass = 0;
5404 const vk::Unique<vk::VkShaderModule> vertexShaderModule (vk::createShaderModule(vkd, device, context.getBinaryCollection().get("storage-buffer.vert"), 0));
5405 const vk::Unique<vk::VkShaderModule> fragmentShaderModule (vk::createShaderModule(vkd, device, context.getBinaryCollection().get("render-white.frag"), 0));
5406 vector<vk::VkDescriptorSetLayoutBinding> bindings;
5408 m_bufferSize = context.getBufferSize();
5411 const vk::VkDescriptorSetLayoutBinding binding =
5414 vk::VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
5416 vk::VK_SHADER_STAGE_VERTEX_BIT,
5420 bindings.push_back(binding);
5423 createPipelineWithResources(vkd, device, renderPass, subpass, *vertexShaderModule, *fragmentShaderModule, context.getTargetWidth(), context.getTargetHeight(),
5424 vector<vk::VkVertexInputBindingDescription>(), vector<vk::VkVertexInputAttributeDescription>(), bindings, vk::VK_PRIMITIVE_TOPOLOGY_POINT_LIST, 0u, DE_NULL, m_resources);
5427 const deUint32 descriptorCount = (deUint32)(divRoundUp(m_bufferSize, (vk::VkDeviceSize)MAX_STORAGE_BUFFER_SIZE));
5428 const vk::VkDescriptorPoolSize poolSizes =
5430 vk::VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
5433 const vk::VkDescriptorPoolCreateInfo createInfo =
5435 vk::VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
5437 vk::VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT,
5444 m_descriptorPool = vk::createDescriptorPool(vkd, device, &createInfo);
5445 m_descriptorSets.resize(descriptorCount);
5448 for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
5450 const vk::VkDescriptorSetLayout layout = *m_resources.descriptorSetLayout;
5451 const vk::VkDescriptorSetAllocateInfo allocateInfo =
5453 vk::VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
5461 m_descriptorSets[descriptorSetNdx] = vk::allocateDescriptorSet(vkd, device, &allocateInfo).disown();
5464 const vk::VkDescriptorBufferInfo bufferInfo =
5466 context.getBuffer(),
5467 descriptorSetNdx * MAX_STORAGE_BUFFER_SIZE,
5468 de::min(m_bufferSize - descriptorSetNdx * MAX_STORAGE_BUFFER_SIZE, (vk::VkDeviceSize)MAX_STORAGE_BUFFER_SIZE)
5470 const vk::VkWriteDescriptorSet write =
5472 vk::VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
5474 m_descriptorSets[descriptorSetNdx],
5478 vk::VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
5484 vkd.updateDescriptorSets(device, 1u, &write, 0u, DE_NULL);
5489 void RenderVertexStorageBuffer::submit (SubmitContext& context)
5491 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
5492 const vk::VkCommandBuffer commandBuffer = context.getCommandBuffer();
5494 vkd.cmdBindPipeline(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipeline);
5496 for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
5498 const size_t size = m_bufferSize < (descriptorSetNdx + 1) * MAX_STORAGE_BUFFER_SIZE
5499 ? (size_t)(m_bufferSize - descriptorSetNdx * MAX_STORAGE_BUFFER_SIZE)
5500 : (size_t)(MAX_STORAGE_BUFFER_SIZE);
5502 vkd.cmdBindDescriptorSets(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipelineLayout, 0u, 1u, &m_descriptorSets[descriptorSetNdx], 0u, DE_NULL);
5503 vkd.cmdDraw(commandBuffer, (deUint32)(size / 2), 1, 0, 0);
5507 void RenderVertexStorageBuffer::verify (VerifyRenderPassContext& context, size_t)
5509 for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
5511 const size_t offset = descriptorSetNdx * MAX_STORAGE_BUFFER_SIZE;
5512 const size_t size = m_bufferSize < (descriptorSetNdx + 1) * MAX_STORAGE_BUFFER_SIZE
5513 ? (size_t)(m_bufferSize - descriptorSetNdx * MAX_STORAGE_BUFFER_SIZE)
5514 : (size_t)(MAX_STORAGE_BUFFER_SIZE);
5516 for (size_t pos = 0; pos < size / 2; pos++)
5518 const deUint8 x = context.getReference().get(offset + pos * 2);
5519 const deUint8 y = context.getReference().get(offset + (pos * 2) + 1);
5521 context.getReferenceTarget().getAccess().setPixel(Vec4(1.0f, 1.0f, 1.0f, 1.0f), x, y);
5526 class RenderVertexStorageTexelBuffer : public RenderPassCommand
5529 RenderVertexStorageTexelBuffer (void) {}
5530 ~RenderVertexStorageTexelBuffer (void);
5532 const char* getName (void) const { return "RenderVertexStorageTexelBuffer"; }
5533 void logPrepare (TestLog&, size_t) const;
5534 void logSubmit (TestLog&, size_t) const;
5535 void prepare (PrepareRenderPassContext&);
5536 void submit (SubmitContext& context);
5537 void verify (VerifyRenderPassContext&, size_t);
5540 PipelineResources m_resources;
5541 vk::Move<vk::VkDescriptorPool> m_descriptorPool;
5542 vector<vk::VkDescriptorSet> m_descriptorSets;
5543 vector<vk::VkBufferView> m_bufferViews;
5545 const vk::DeviceInterface* m_vkd;
5546 vk::VkDevice m_device;
5547 vk::VkDeviceSize m_bufferSize;
5548 deUint32 m_maxStorageTexelCount;
5551 RenderVertexStorageTexelBuffer::~RenderVertexStorageTexelBuffer (void)
5553 for (size_t bufferViewNdx = 0; bufferViewNdx < m_bufferViews.size(); bufferViewNdx++)
5555 if (!!m_bufferViews[bufferViewNdx])
5557 m_vkd->destroyBufferView(m_device, m_bufferViews[bufferViewNdx], DE_NULL);
5558 m_bufferViews[bufferViewNdx] = (vk::VkBufferView)0;
5563 void RenderVertexStorageTexelBuffer::logPrepare (TestLog& log, size_t commandIndex) const
5565 log << TestLog::Message << commandIndex << ":" << getName() << " Create pipeline for render buffer as storage buffer." << TestLog::EndMessage;
5568 void RenderVertexStorageTexelBuffer::logSubmit (TestLog& log, size_t commandIndex) const
5570 log << TestLog::Message << commandIndex << ":" << getName() << " Render using buffer as storage buffer." << TestLog::EndMessage;
5573 void RenderVertexStorageTexelBuffer::prepare (PrepareRenderPassContext& context)
5575 const vk::InstanceInterface& vki = context.getContext().getInstanceInterface();
5576 const vk::VkPhysicalDevice physicalDevice = context.getContext().getPhysicalDevice();
5577 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
5578 const vk::VkDevice device = context.getContext().getDevice();
5579 const vk::VkRenderPass renderPass = context.getRenderPass();
5580 const deUint32 subpass = 0;
5581 const vk::Unique<vk::VkShaderModule> vertexShaderModule (vk::createShaderModule(vkd, device, context.getBinaryCollection().get("storage-texel-buffer.vert"), 0));
5582 const vk::Unique<vk::VkShaderModule> fragmentShaderModule (vk::createShaderModule(vkd, device, context.getBinaryCollection().get("render-white.frag"), 0));
5583 vector<vk::VkDescriptorSetLayoutBinding> bindings;
5587 m_bufferSize = context.getBufferSize();
5588 m_maxStorageTexelCount = vk::getPhysicalDeviceProperties(vki, physicalDevice).limits.maxTexelBufferElements;
5591 const vk::VkDescriptorSetLayoutBinding binding =
5594 vk::VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER,
5596 vk::VK_SHADER_STAGE_VERTEX_BIT,
5600 bindings.push_back(binding);
5603 createPipelineWithResources(vkd, device, renderPass, subpass, *vertexShaderModule, *fragmentShaderModule, context.getTargetWidth(), context.getTargetHeight(),
5604 vector<vk::VkVertexInputBindingDescription>(), vector<vk::VkVertexInputAttributeDescription>(), bindings, vk::VK_PRIMITIVE_TOPOLOGY_POINT_LIST, 0u, DE_NULL, m_resources);
5607 const deUint32 descriptorCount = (deUint32)(divRoundUp(m_bufferSize, (vk::VkDeviceSize)m_maxStorageTexelCount * 4));
5608 const vk::VkDescriptorPoolSize poolSizes =
5610 vk::VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER,
5613 const vk::VkDescriptorPoolCreateInfo createInfo =
5615 vk::VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
5617 vk::VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT,
5624 m_descriptorPool = vk::createDescriptorPool(vkd, device, &createInfo);
5625 m_descriptorSets.resize(descriptorCount, (vk::VkDescriptorSet)0);
5626 m_bufferViews.resize(descriptorCount, (vk::VkBufferView)0);
5629 for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
5631 const vk::VkDescriptorSetLayout layout = *m_resources.descriptorSetLayout;
5632 const vk::VkDescriptorSetAllocateInfo allocateInfo =
5634 vk::VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
5642 m_descriptorSets[descriptorSetNdx] = vk::allocateDescriptorSet(vkd, device, &allocateInfo).disown();
5645 const vk::VkBufferViewCreateInfo createInfo =
5647 vk::VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO,
5651 context.getBuffer(),
5652 vk::VK_FORMAT_R32_UINT,
5653 descriptorSetNdx * m_maxStorageTexelCount * 4,
5654 (deUint32)de::min<vk::VkDeviceSize>(m_maxStorageTexelCount * 4, m_bufferSize - descriptorSetNdx * m_maxStorageTexelCount * 4)
5657 VK_CHECK(vkd.createBufferView(device, &createInfo, DE_NULL, &m_bufferViews[descriptorSetNdx]));
5661 const vk::VkWriteDescriptorSet write =
5663 vk::VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
5665 m_descriptorSets[descriptorSetNdx],
5669 vk::VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER,
5672 &m_bufferViews[descriptorSetNdx]
5675 vkd.updateDescriptorSets(device, 1u, &write, 0u, DE_NULL);
5680 void RenderVertexStorageTexelBuffer::submit (SubmitContext& context)
5682 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
5683 const vk::VkCommandBuffer commandBuffer = context.getCommandBuffer();
5685 vkd.cmdBindPipeline(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipeline);
5687 for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
5689 const deUint32 count = (deUint32)(m_bufferSize < (descriptorSetNdx + 1) * m_maxStorageTexelCount * 4
5690 ? m_bufferSize - descriptorSetNdx * m_maxStorageTexelCount * 4
5691 : m_maxStorageTexelCount * 4) / 2;
5693 vkd.cmdBindDescriptorSets(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipelineLayout, 0u, 1u, &m_descriptorSets[descriptorSetNdx], 0u, DE_NULL);
5694 vkd.cmdDraw(commandBuffer, count, 1, 0, 0);
5698 void RenderVertexStorageTexelBuffer::verify (VerifyRenderPassContext& context, size_t)
5700 for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
5702 const size_t offset = descriptorSetNdx * m_maxStorageTexelCount * 4;
5703 const deUint32 count = (deUint32)(m_bufferSize < (descriptorSetNdx + 1) * m_maxStorageTexelCount * 4
5704 ? m_bufferSize - descriptorSetNdx * m_maxStorageTexelCount * 4
5705 : m_maxStorageTexelCount * 4) / 2;
5707 DE_ASSERT(context.getReference().getSize() <= 4 * m_maxStorageTexelCount * m_descriptorSets.size());
5708 DE_ASSERT(context.getReference().getSize() > offset);
5709 DE_ASSERT(offset + count * 2 <= context.getReference().getSize());
5711 for (size_t pos = 0; pos < (size_t)count; pos++)
5713 const deUint8 x = context.getReference().get(offset + pos * 2);
5714 const deUint8 y = context.getReference().get(offset + (pos * 2) + 1);
5716 context.getReferenceTarget().getAccess().setPixel(Vec4(1.0f, 1.0f, 1.0f, 1.0f), x, y);
5721 class RenderVertexStorageImage : public RenderPassCommand
5724 RenderVertexStorageImage (void) {}
5725 ~RenderVertexStorageImage (void);
5727 const char* getName (void) const { return "RenderVertexStorageImage"; }
5728 void logPrepare (TestLog&, size_t) const;
5729 void logSubmit (TestLog&, size_t) const;
5730 void prepare (PrepareRenderPassContext&);
5731 void submit (SubmitContext& context);
5732 void verify (VerifyRenderPassContext&, size_t);
5735 PipelineResources m_resources;
5736 vk::Move<vk::VkDescriptorPool> m_descriptorPool;
5737 vk::Move<vk::VkDescriptorSet> m_descriptorSet;
5738 vk::Move<vk::VkImageView> m_imageView;
5741 RenderVertexStorageImage::~RenderVertexStorageImage (void)
5745 void RenderVertexStorageImage::logPrepare (TestLog& log, size_t commandIndex) const
5747 log << TestLog::Message << commandIndex << ":" << getName() << " Create pipeline for render storage image." << TestLog::EndMessage;
5750 void RenderVertexStorageImage::logSubmit (TestLog& log, size_t commandIndex) const
5752 log << TestLog::Message << commandIndex << ":" << getName() << " Render using storage image." << TestLog::EndMessage;
5755 void RenderVertexStorageImage::prepare (PrepareRenderPassContext& context)
5757 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
5758 const vk::VkDevice device = context.getContext().getDevice();
5759 const vk::VkRenderPass renderPass = context.getRenderPass();
5760 const deUint32 subpass = 0;
5761 const vk::Unique<vk::VkShaderModule> vertexShaderModule (vk::createShaderModule(vkd, device, context.getBinaryCollection().get("storage-image.vert"), 0));
5762 const vk::Unique<vk::VkShaderModule> fragmentShaderModule (vk::createShaderModule(vkd, device, context.getBinaryCollection().get("render-white.frag"), 0));
5763 vector<vk::VkDescriptorSetLayoutBinding> bindings;
5766 const vk::VkDescriptorSetLayoutBinding binding =
5769 vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE,
5771 vk::VK_SHADER_STAGE_VERTEX_BIT,
5775 bindings.push_back(binding);
5778 createPipelineWithResources(vkd, device, renderPass, subpass, *vertexShaderModule, *fragmentShaderModule, context.getTargetWidth(), context.getTargetHeight(),
5779 vector<vk::VkVertexInputBindingDescription>(), vector<vk::VkVertexInputAttributeDescription>(), bindings, vk::VK_PRIMITIVE_TOPOLOGY_POINT_LIST, 0u, DE_NULL, m_resources);
5782 const vk::VkDescriptorPoolSize poolSizes =
5784 vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE,
5787 const vk::VkDescriptorPoolCreateInfo createInfo =
5789 vk::VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
5791 vk::VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT,
5798 m_descriptorPool = vk::createDescriptorPool(vkd, device, &createInfo);
5802 const vk::VkDescriptorSetLayout layout = *m_resources.descriptorSetLayout;
5803 const vk::VkDescriptorSetAllocateInfo allocateInfo =
5805 vk::VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
5813 m_descriptorSet = vk::allocateDescriptorSet(vkd, device, &allocateInfo);
5816 const vk::VkImageViewCreateInfo createInfo =
5818 vk::VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
5823 vk::VK_IMAGE_VIEW_TYPE_2D,
5824 vk::VK_FORMAT_R8G8B8A8_UNORM,
5825 vk::makeComponentMappingRGBA(),
5827 vk::VK_IMAGE_ASPECT_COLOR_BIT,
5835 m_imageView = vk::createImageView(vkd, device, &createInfo);
5839 const vk::VkDescriptorImageInfo imageInfo =
5843 context.getImageLayout()
5845 const vk::VkWriteDescriptorSet write =
5847 vk::VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
5853 vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE,
5859 vkd.updateDescriptorSets(device, 1u, &write, 0u, DE_NULL);
5864 void RenderVertexStorageImage::submit (SubmitContext& context)
5866 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
5867 const vk::VkCommandBuffer commandBuffer = context.getCommandBuffer();
5869 vkd.cmdBindPipeline(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipeline);
5871 vkd.cmdBindDescriptorSets(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipelineLayout, 0u, 1u, &(*m_descriptorSet), 0u, DE_NULL);
5872 vkd.cmdDraw(commandBuffer, context.getImageWidth() * context.getImageHeight() * 2, 1, 0, 0);
5875 void RenderVertexStorageImage::verify (VerifyRenderPassContext& context, size_t)
5877 for (int pos = 0; pos < (int)(context.getReferenceImage().getWidth() * context.getReferenceImage().getHeight() * 2); pos++)
5879 const tcu::IVec3 size = context.getReferenceImage().getAccess().getSize();
5880 const tcu::UVec4 pixel = context.getReferenceImage().getAccess().getPixelUint((pos / 2) / size.x(), (pos / 2) % size.x());
5883 context.getReferenceTarget().getAccess().setPixel(Vec4(1.0f, 1.0f, 1.0f, 1.0f), pixel.x(), pixel.y());
5885 context.getReferenceTarget().getAccess().setPixel(Vec4(1.0f, 1.0f, 1.0f, 1.0f), pixel.z(), pixel.w());
5889 class RenderVertexSampledImage : public RenderPassCommand
5892 RenderVertexSampledImage (void) {}
5893 ~RenderVertexSampledImage (void);
5895 const char* getName (void) const { return "RenderVertexSampledImage"; }
5896 void logPrepare (TestLog&, size_t) const;
5897 void logSubmit (TestLog&, size_t) const;
5898 void prepare (PrepareRenderPassContext&);
5899 void submit (SubmitContext& context);
5900 void verify (VerifyRenderPassContext&, size_t);
5903 PipelineResources m_resources;
5904 vk::Move<vk::VkDescriptorPool> m_descriptorPool;
5905 vk::Move<vk::VkDescriptorSet> m_descriptorSet;
5906 vk::Move<vk::VkImageView> m_imageView;
5907 vk::Move<vk::VkSampler> m_sampler;
5910 RenderVertexSampledImage::~RenderVertexSampledImage (void)
5914 void RenderVertexSampledImage::logPrepare (TestLog& log, size_t commandIndex) const
5916 log << TestLog::Message << commandIndex << ":" << getName() << " Create pipeline for render sampled image." << TestLog::EndMessage;
5919 void RenderVertexSampledImage::logSubmit (TestLog& log, size_t commandIndex) const
5921 log << TestLog::Message << commandIndex << ":" << getName() << " Render using sampled image." << TestLog::EndMessage;
5924 void RenderVertexSampledImage::prepare (PrepareRenderPassContext& context)
5926 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
5927 const vk::VkDevice device = context.getContext().getDevice();
5928 const vk::VkRenderPass renderPass = context.getRenderPass();
5929 const deUint32 subpass = 0;
5930 const vk::Unique<vk::VkShaderModule> vertexShaderModule (vk::createShaderModule(vkd, device, context.getBinaryCollection().get("sampled-image.vert"), 0));
5931 const vk::Unique<vk::VkShaderModule> fragmentShaderModule (vk::createShaderModule(vkd, device, context.getBinaryCollection().get("render-white.frag"), 0));
5932 vector<vk::VkDescriptorSetLayoutBinding> bindings;
5935 const vk::VkDescriptorSetLayoutBinding binding =
5938 vk::VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
5940 vk::VK_SHADER_STAGE_VERTEX_BIT,
5944 bindings.push_back(binding);
5947 createPipelineWithResources(vkd, device, renderPass, subpass, *vertexShaderModule, *fragmentShaderModule, context.getTargetWidth(), context.getTargetHeight(),
5948 vector<vk::VkVertexInputBindingDescription>(), vector<vk::VkVertexInputAttributeDescription>(), bindings, vk::VK_PRIMITIVE_TOPOLOGY_POINT_LIST, 0u, DE_NULL, m_resources);
5951 const vk::VkDescriptorPoolSize poolSizes =
5953 vk::VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
5956 const vk::VkDescriptorPoolCreateInfo createInfo =
5958 vk::VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
5960 vk::VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT,
5967 m_descriptorPool = vk::createDescriptorPool(vkd, device, &createInfo);
5971 const vk::VkDescriptorSetLayout layout = *m_resources.descriptorSetLayout;
5972 const vk::VkDescriptorSetAllocateInfo allocateInfo =
5974 vk::VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
5982 m_descriptorSet = vk::allocateDescriptorSet(vkd, device, &allocateInfo);
5985 const vk::VkImageViewCreateInfo createInfo =
5987 vk::VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
5992 vk::VK_IMAGE_VIEW_TYPE_2D,
5993 vk::VK_FORMAT_R8G8B8A8_UNORM,
5994 vk::makeComponentMappingRGBA(),
5996 vk::VK_IMAGE_ASPECT_COLOR_BIT,
6004 m_imageView = vk::createImageView(vkd, device, &createInfo);
6008 const vk::VkSamplerCreateInfo createInfo =
6010 vk::VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO,
6014 vk::VK_FILTER_NEAREST,
6015 vk::VK_FILTER_NEAREST,
6017 vk::VK_SAMPLER_MIPMAP_MODE_LINEAR,
6018 vk::VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE,
6019 vk::VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE,
6020 vk::VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE,
6025 vk::VK_COMPARE_OP_ALWAYS,
6028 vk::VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK,
6032 m_sampler = vk::createSampler(vkd, device, &createInfo);
6036 const vk::VkDescriptorImageInfo imageInfo =
6040 context.getImageLayout()
6042 const vk::VkWriteDescriptorSet write =
6044 vk::VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
6050 vk::VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
6056 vkd.updateDescriptorSets(device, 1u, &write, 0u, DE_NULL);
6061 void RenderVertexSampledImage::submit (SubmitContext& context)
6063 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
6064 const vk::VkCommandBuffer commandBuffer = context.getCommandBuffer();
6066 vkd.cmdBindPipeline(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipeline);
6068 vkd.cmdBindDescriptorSets(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipelineLayout, 0u, 1u, &(*m_descriptorSet), 0u, DE_NULL);
6069 vkd.cmdDraw(commandBuffer, context.getImageWidth() * context.getImageHeight() * 2, 1, 0, 0);
6072 void RenderVertexSampledImage::verify (VerifyRenderPassContext& context, size_t)
6074 for (int pos = 0; pos < (int)(context.getReferenceImage().getWidth() * context.getReferenceImage().getHeight() * 2); pos++)
6076 const tcu::IVec3 size = context.getReferenceImage().getAccess().getSize();
6077 const tcu::UVec4 pixel = context.getReferenceImage().getAccess().getPixelUint((pos / 2) / size.x(), (pos / 2) % size.x());
6080 context.getReferenceTarget().getAccess().setPixel(Vec4(1.0f, 1.0f, 1.0f, 1.0f), pixel.x(), pixel.y());
6082 context.getReferenceTarget().getAccess().setPixel(Vec4(1.0f, 1.0f, 1.0f, 1.0f), pixel.z(), pixel.w());
6086 class RenderFragmentUniformBuffer : public RenderPassCommand
6089 RenderFragmentUniformBuffer (void) {}
6090 ~RenderFragmentUniformBuffer (void);
6092 const char* getName (void) const { return "RenderFragmentUniformBuffer"; }
6093 void logPrepare (TestLog&, size_t) const;
6094 void logSubmit (TestLog&, size_t) const;
6095 void prepare (PrepareRenderPassContext&);
6096 void submit (SubmitContext& context);
6097 void verify (VerifyRenderPassContext&, size_t);
6100 PipelineResources m_resources;
6101 vk::Move<vk::VkDescriptorPool> m_descriptorPool;
6102 vector<vk::VkDescriptorSet> m_descriptorSets;
6104 vk::VkDeviceSize m_bufferSize;
6105 size_t m_targetWidth;
6106 size_t m_targetHeight;
6109 RenderFragmentUniformBuffer::~RenderFragmentUniformBuffer (void)
6113 void RenderFragmentUniformBuffer::logPrepare (TestLog& log, size_t commandIndex) const
6115 log << TestLog::Message << commandIndex << ":" << getName() << " Create pipeline for render buffer as uniform buffer." << TestLog::EndMessage;
6118 void RenderFragmentUniformBuffer::logSubmit (TestLog& log, size_t commandIndex) const
6120 log << TestLog::Message << commandIndex << ":" << getName() << " Render using buffer as uniform buffer." << TestLog::EndMessage;
6123 void RenderFragmentUniformBuffer::prepare (PrepareRenderPassContext& context)
6125 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
6126 const vk::VkDevice device = context.getContext().getDevice();
6127 const vk::VkRenderPass renderPass = context.getRenderPass();
6128 const deUint32 subpass = 0;
6129 const vk::Unique<vk::VkShaderModule> vertexShaderModule (vk::createShaderModule(vkd, device, context.getBinaryCollection().get("render-quad.vert"), 0));
6130 const vk::Unique<vk::VkShaderModule> fragmentShaderModule (vk::createShaderModule(vkd, device, context.getBinaryCollection().get("uniform-buffer.frag"), 0));
6131 vector<vk::VkDescriptorSetLayoutBinding> bindings;
6133 m_bufferSize = de::min(context.getBufferSize(), (vk::VkDeviceSize)MAX_SIZE);
6134 m_targetWidth = context.getTargetWidth();
6135 m_targetHeight = context.getTargetHeight();
6138 const vk::VkDescriptorSetLayoutBinding binding =
6141 vk::VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,
6143 vk::VK_SHADER_STAGE_FRAGMENT_BIT,
6147 bindings.push_back(binding);
6149 const vk::VkPushConstantRange pushConstantRange =
6151 vk::VK_SHADER_STAGE_FRAGMENT_BIT,
6156 createPipelineWithResources(vkd, device, renderPass, subpass, *vertexShaderModule, *fragmentShaderModule, context.getTargetWidth(), context.getTargetHeight(),
6157 vector<vk::VkVertexInputBindingDescription>(), vector<vk::VkVertexInputAttributeDescription>(), bindings, vk::VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST, 1u, &pushConstantRange, m_resources);
6160 const deUint32 descriptorCount = (deUint32)(divRoundUp(m_bufferSize, (vk::VkDeviceSize)MAX_UNIFORM_BUFFER_SIZE));
6161 const vk::VkDescriptorPoolSize poolSizes =
6163 vk::VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,
6166 const vk::VkDescriptorPoolCreateInfo createInfo =
6168 vk::VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
6170 vk::VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT,
6177 m_descriptorPool = vk::createDescriptorPool(vkd, device, &createInfo);
6178 m_descriptorSets.resize(descriptorCount);
6181 for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
6183 const vk::VkDescriptorSetLayout layout = *m_resources.descriptorSetLayout;
6184 const vk::VkDescriptorSetAllocateInfo allocateInfo =
6186 vk::VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
6194 m_descriptorSets[descriptorSetNdx] = vk::allocateDescriptorSet(vkd, device, &allocateInfo).disown();
6197 const vk::VkDescriptorBufferInfo bufferInfo =
6199 context.getBuffer(),
6200 (vk::VkDeviceSize)(descriptorSetNdx * (size_t)MAX_UNIFORM_BUFFER_SIZE),
6201 m_bufferSize < (descriptorSetNdx + 1) * (vk::VkDeviceSize)MAX_UNIFORM_BUFFER_SIZE
6202 ? m_bufferSize - descriptorSetNdx * (vk::VkDeviceSize)MAX_UNIFORM_BUFFER_SIZE
6203 : (vk::VkDeviceSize)MAX_UNIFORM_BUFFER_SIZE
6205 const vk::VkWriteDescriptorSet write =
6207 vk::VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
6209 m_descriptorSets[descriptorSetNdx],
6213 vk::VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,
6219 vkd.updateDescriptorSets(device, 1u, &write, 0u, DE_NULL);
6224 void RenderFragmentUniformBuffer::submit (SubmitContext& context)
6226 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
6227 const vk::VkCommandBuffer commandBuffer = context.getCommandBuffer();
6229 vkd.cmdBindPipeline(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipeline);
6231 for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
6235 const deUint32 callId;
6236 const deUint32 valuesPerPixel;
6239 (deUint32)descriptorSetNdx,
6240 (deUint32)divRoundUp<size_t>(m_descriptorSets.size() * (MAX_UNIFORM_BUFFER_SIZE / 4), m_targetWidth * m_targetHeight)
6243 vkd.cmdBindDescriptorSets(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipelineLayout, 0u, 1u, &m_descriptorSets[descriptorSetNdx], 0u, DE_NULL);
6244 vkd.cmdPushConstants(commandBuffer, *m_resources.pipelineLayout, vk::VK_SHADER_STAGE_FRAGMENT_BIT, 0u, (deUint32)sizeof(callParams), &callParams);
6245 vkd.cmdDraw(commandBuffer, 6, 1, 0, 0);
6249 void RenderFragmentUniformBuffer::verify (VerifyRenderPassContext& context, size_t)
6251 const deUint32 valuesPerPixel = (deUint32)divRoundUp<size_t>(m_descriptorSets.size() * (MAX_UNIFORM_BUFFER_SIZE / 4), m_targetWidth * m_targetHeight);
6252 const size_t arraySize = MAX_UNIFORM_BUFFER_SIZE / (sizeof(deUint32) * 4);
6253 const size_t arrayIntSize = arraySize * 4;
6255 for (int y = 0; y < context.getReferenceTarget().getSize().y(); y++)
6256 for (int x = 0; x < context.getReferenceTarget().getSize().x(); x++)
6258 const size_t firstDescriptorSetNdx = de::min<size_t>((y * 256u + x) / (arrayIntSize / valuesPerPixel), m_descriptorSets.size() - 1);
6260 for (size_t descriptorSetNdx = firstDescriptorSetNdx; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
6262 const size_t offset = descriptorSetNdx * MAX_UNIFORM_BUFFER_SIZE;
6263 const deUint32 callId = (deUint32)descriptorSetNdx;
6265 const deUint32 id = callId * ((deUint32)arrayIntSize / valuesPerPixel) + (deUint32)y * 256u + (deUint32)x;
6267 if (y * 256u + x < callId * (arrayIntSize / valuesPerPixel))
6271 deUint32 value = id;
6273 for (deUint32 i = 0; i < valuesPerPixel; i++)
6275 value = ((deUint32)context.getReference().get(offset + (value % (MAX_UNIFORM_BUFFER_SIZE / sizeof(deUint32))) * 4 + 0))
6276 | (((deUint32)context.getReference().get(offset + (value % (MAX_UNIFORM_BUFFER_SIZE / sizeof(deUint32))) * 4 + 1)) << 8u)
6277 | (((deUint32)context.getReference().get(offset + (value % (MAX_UNIFORM_BUFFER_SIZE / sizeof(deUint32))) * 4 + 2)) << 16u)
6278 | (((deUint32)context.getReference().get(offset + (value % (MAX_UNIFORM_BUFFER_SIZE / sizeof(deUint32))) * 4 + 3)) << 24u);
6281 const UVec4 vec ((value >> 0u) & 0xFFu,
6282 (value >> 8u) & 0xFFu,
6283 (value >> 16u) & 0xFFu,
6284 (value >> 24u) & 0xFFu);
6286 context.getReferenceTarget().getAccess().setPixel(vec.asFloat() / Vec4(255.0f), x, y);
6292 class RenderFragmentStorageBuffer : public RenderPassCommand
6295 RenderFragmentStorageBuffer (void) {}
6296 ~RenderFragmentStorageBuffer (void);
6298 const char* getName (void) const { return "RenderFragmentStorageBuffer"; }
6299 void logPrepare (TestLog&, size_t) const;
6300 void logSubmit (TestLog&, size_t) const;
6301 void prepare (PrepareRenderPassContext&);
6302 void submit (SubmitContext& context);
6303 void verify (VerifyRenderPassContext&, size_t);
6306 PipelineResources m_resources;
6307 vk::Move<vk::VkDescriptorPool> m_descriptorPool;
6308 vk::Move<vk::VkDescriptorSet> m_descriptorSet;
6310 vk::VkDeviceSize m_bufferSize;
6311 size_t m_targetWidth;
6312 size_t m_targetHeight;
6315 RenderFragmentStorageBuffer::~RenderFragmentStorageBuffer (void)
6319 void RenderFragmentStorageBuffer::logPrepare (TestLog& log, size_t commandIndex) const
6321 log << TestLog::Message << commandIndex << ":" << getName() << " Create pipeline to render buffer as storage buffer." << TestLog::EndMessage;
6324 void RenderFragmentStorageBuffer::logSubmit (TestLog& log, size_t commandIndex) const
6326 log << TestLog::Message << commandIndex << ":" << getName() << " Render using buffer as storage buffer." << TestLog::EndMessage;
6329 void RenderFragmentStorageBuffer::prepare (PrepareRenderPassContext& context)
6331 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
6332 const vk::VkDevice device = context.getContext().getDevice();
6333 const vk::VkRenderPass renderPass = context.getRenderPass();
6334 const deUint32 subpass = 0;
6335 const vk::Unique<vk::VkShaderModule> vertexShaderModule (vk::createShaderModule(vkd, device, context.getBinaryCollection().get("render-quad.vert"), 0));
6336 const vk::Unique<vk::VkShaderModule> fragmentShaderModule (vk::createShaderModule(vkd, device, context.getBinaryCollection().get("storage-buffer.frag"), 0));
6337 vector<vk::VkDescriptorSetLayoutBinding> bindings;
6339 m_bufferSize = context.getBufferSize();
6340 m_targetWidth = context.getTargetWidth();
6341 m_targetHeight = context.getTargetHeight();
6344 const vk::VkDescriptorSetLayoutBinding binding =
6347 vk::VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
6349 vk::VK_SHADER_STAGE_FRAGMENT_BIT,
6353 bindings.push_back(binding);
6355 const vk::VkPushConstantRange pushConstantRange =
6357 vk::VK_SHADER_STAGE_FRAGMENT_BIT,
6362 createPipelineWithResources(vkd, device, renderPass, subpass, *vertexShaderModule, *fragmentShaderModule, context.getTargetWidth(), context.getTargetHeight(),
6363 vector<vk::VkVertexInputBindingDescription>(), vector<vk::VkVertexInputAttributeDescription>(), bindings, vk::VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST, 1u, &pushConstantRange, m_resources);
6366 const deUint32 descriptorCount = 1;
6367 const vk::VkDescriptorPoolSize poolSizes =
6369 vk::VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
6372 const vk::VkDescriptorPoolCreateInfo createInfo =
6374 vk::VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
6376 vk::VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT,
6383 m_descriptorPool = vk::createDescriptorPool(vkd, device, &createInfo);
6387 const vk::VkDescriptorSetLayout layout = *m_resources.descriptorSetLayout;
6388 const vk::VkDescriptorSetAllocateInfo allocateInfo =
6390 vk::VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
6398 m_descriptorSet = vk::allocateDescriptorSet(vkd, device, &allocateInfo);
6401 const vk::VkDescriptorBufferInfo bufferInfo =
6403 context.getBuffer(),
6407 const vk::VkWriteDescriptorSet write =
6409 vk::VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
6411 m_descriptorSet.get(),
6415 vk::VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
6421 vkd.updateDescriptorSets(device, 1u, &write, 0u, DE_NULL);
6426 void RenderFragmentStorageBuffer::submit (SubmitContext& context)
6428 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
6429 const vk::VkCommandBuffer commandBuffer = context.getCommandBuffer();
6431 vkd.cmdBindPipeline(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipeline);
6435 const deUint32 valuesPerPixel;
6436 const deUint32 bufferSize;
6439 (deUint32)divRoundUp<vk::VkDeviceSize>(m_bufferSize / 4, m_targetWidth * m_targetHeight),
6440 (deUint32)m_bufferSize
6443 vkd.cmdBindDescriptorSets(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipelineLayout, 0u, 1u, &m_descriptorSet.get(), 0u, DE_NULL);
6444 vkd.cmdPushConstants(commandBuffer, *m_resources.pipelineLayout, vk::VK_SHADER_STAGE_FRAGMENT_BIT, 0u, (deUint32)sizeof(callParams), &callParams);
6445 vkd.cmdDraw(commandBuffer, 6, 1, 0, 0);
6448 void RenderFragmentStorageBuffer::verify (VerifyRenderPassContext& context, size_t)
6450 const deUint32 valuesPerPixel = (deUint32)divRoundUp<vk::VkDeviceSize>(m_bufferSize / 4, m_targetWidth * m_targetHeight);
6452 for (int y = 0; y < context.getReferenceTarget().getSize().y(); y++)
6453 for (int x = 0; x < context.getReferenceTarget().getSize().x(); x++)
6455 const deUint32 id = (deUint32)y * 256u + (deUint32)x;
6457 deUint32 value = id;
6459 for (deUint32 i = 0; i < valuesPerPixel; i++)
6461 value = (((deUint32)context.getReference().get((size_t)(value % (m_bufferSize / sizeof(deUint32))) * 4 + 0)) << 0u)
6462 | (((deUint32)context.getReference().get((size_t)(value % (m_bufferSize / sizeof(deUint32))) * 4 + 1)) << 8u)
6463 | (((deUint32)context.getReference().get((size_t)(value % (m_bufferSize / sizeof(deUint32))) * 4 + 2)) << 16u)
6464 | (((deUint32)context.getReference().get((size_t)(value % (m_bufferSize / sizeof(deUint32))) * 4 + 3)) << 24u);
6467 const UVec4 vec ((value >> 0u) & 0xFFu,
6468 (value >> 8u) & 0xFFu,
6469 (value >> 16u) & 0xFFu,
6470 (value >> 24u) & 0xFFu);
6472 context.getReferenceTarget().getAccess().setPixel(vec.asFloat() / Vec4(255.0f), x, y);
6476 class RenderFragmentUniformTexelBuffer : public RenderPassCommand
6479 RenderFragmentUniformTexelBuffer (void) {}
6480 ~RenderFragmentUniformTexelBuffer (void);
6482 const char* getName (void) const { return "RenderFragmentUniformTexelBuffer"; }
6483 void logPrepare (TestLog&, size_t) const;
6484 void logSubmit (TestLog&, size_t) const;
6485 void prepare (PrepareRenderPassContext&);
6486 void submit (SubmitContext& context);
6487 void verify (VerifyRenderPassContext&, size_t);
6490 PipelineResources m_resources;
6491 vk::Move<vk::VkDescriptorPool> m_descriptorPool;
6492 vector<vk::VkDescriptorSet> m_descriptorSets;
6493 vector<vk::VkBufferView> m_bufferViews;
6495 const vk::DeviceInterface* m_vkd;
6496 vk::VkDevice m_device;
6497 vk::VkDeviceSize m_bufferSize;
6498 deUint32 m_maxUniformTexelCount;
6499 size_t m_targetWidth;
6500 size_t m_targetHeight;
6503 RenderFragmentUniformTexelBuffer::~RenderFragmentUniformTexelBuffer (void)
6505 for (size_t bufferViewNdx = 0; bufferViewNdx < m_bufferViews.size(); bufferViewNdx++)
6507 if (!!m_bufferViews[bufferViewNdx])
6509 m_vkd->destroyBufferView(m_device, m_bufferViews[bufferViewNdx], DE_NULL);
6510 m_bufferViews[bufferViewNdx] = (vk::VkBufferView)0;
6515 void RenderFragmentUniformTexelBuffer::logPrepare (TestLog& log, size_t commandIndex) const
6517 log << TestLog::Message << commandIndex << ":" << getName() << " Create pipeline for render buffer as uniform buffer." << TestLog::EndMessage;
6520 void RenderFragmentUniformTexelBuffer::logSubmit (TestLog& log, size_t commandIndex) const
6522 log << TestLog::Message << commandIndex << ":" << getName() << " Render using buffer as uniform buffer." << TestLog::EndMessage;
6525 void RenderFragmentUniformTexelBuffer::prepare (PrepareRenderPassContext& context)
6527 const vk::InstanceInterface& vki = context.getContext().getInstanceInterface();
6528 const vk::VkPhysicalDevice physicalDevice = context.getContext().getPhysicalDevice();
6529 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
6530 const vk::VkDevice device = context.getContext().getDevice();
6531 const vk::VkRenderPass renderPass = context.getRenderPass();
6532 const deUint32 subpass = 0;
6533 const vk::Unique<vk::VkShaderModule> vertexShaderModule (vk::createShaderModule(vkd, device, context.getBinaryCollection().get("render-quad.vert"), 0));
6534 const vk::Unique<vk::VkShaderModule> fragmentShaderModule (vk::createShaderModule(vkd, device, context.getBinaryCollection().get("uniform-texel-buffer.frag"), 0));
6535 vector<vk::VkDescriptorSetLayoutBinding> bindings;
6539 m_bufferSize = context.getBufferSize();
6540 m_maxUniformTexelCount = vk::getPhysicalDeviceProperties(vki, physicalDevice).limits.maxTexelBufferElements;
6541 m_targetWidth = context.getTargetWidth();
6542 m_targetHeight = context.getTargetHeight();
6545 const vk::VkDescriptorSetLayoutBinding binding =
6548 vk::VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER,
6550 vk::VK_SHADER_STAGE_FRAGMENT_BIT,
6554 bindings.push_back(binding);
6556 const vk::VkPushConstantRange pushConstantRange =
6558 vk::VK_SHADER_STAGE_FRAGMENT_BIT,
6563 createPipelineWithResources(vkd, device, renderPass, subpass, *vertexShaderModule, *fragmentShaderModule, context.getTargetWidth(), context.getTargetHeight(),
6564 vector<vk::VkVertexInputBindingDescription>(), vector<vk::VkVertexInputAttributeDescription>(), bindings, vk::VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST, 1u, &pushConstantRange, m_resources);
6567 const deUint32 descriptorCount = (deUint32)(divRoundUp(m_bufferSize, (vk::VkDeviceSize)m_maxUniformTexelCount * 4));
6568 const vk::VkDescriptorPoolSize poolSizes =
6570 vk::VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER,
6573 const vk::VkDescriptorPoolCreateInfo createInfo =
6575 vk::VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
6577 vk::VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT,
6584 m_descriptorPool = vk::createDescriptorPool(vkd, device, &createInfo);
6585 m_descriptorSets.resize(descriptorCount, (vk::VkDescriptorSet)0);
6586 m_bufferViews.resize(descriptorCount, (vk::VkBufferView)0);
6589 for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
6591 const deUint32 count = (deUint32)(m_bufferSize < (descriptorSetNdx + 1) * m_maxUniformTexelCount * 4
6592 ? m_bufferSize - descriptorSetNdx * m_maxUniformTexelCount * 4
6593 : m_maxUniformTexelCount * 4) / 4;
6594 const vk::VkDescriptorSetLayout layout = *m_resources.descriptorSetLayout;
6595 const vk::VkDescriptorSetAllocateInfo allocateInfo =
6597 vk::VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
6605 m_descriptorSets[descriptorSetNdx] = vk::allocateDescriptorSet(vkd, device, &allocateInfo).disown();
6608 const vk::VkBufferViewCreateInfo createInfo =
6610 vk::VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO,
6614 context.getBuffer(),
6615 vk::VK_FORMAT_R32_UINT,
6616 descriptorSetNdx * m_maxUniformTexelCount * 4,
6620 VK_CHECK(vkd.createBufferView(device, &createInfo, DE_NULL, &m_bufferViews[descriptorSetNdx]));
6624 const vk::VkWriteDescriptorSet write =
6626 vk::VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
6628 m_descriptorSets[descriptorSetNdx],
6632 vk::VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER,
6635 &m_bufferViews[descriptorSetNdx]
6638 vkd.updateDescriptorSets(device, 1u, &write, 0u, DE_NULL);
6643 void RenderFragmentUniformTexelBuffer::submit (SubmitContext& context)
6645 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
6646 const vk::VkCommandBuffer commandBuffer = context.getCommandBuffer();
6648 vkd.cmdBindPipeline(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipeline);
6650 for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
6654 const deUint32 callId;
6655 const deUint32 valuesPerPixel;
6656 const deUint32 maxUniformTexelCount;
6659 (deUint32)descriptorSetNdx,
6660 (deUint32)divRoundUp<size_t>(m_descriptorSets.size() * de::min<size_t>((size_t)m_bufferSize / 4, m_maxUniformTexelCount), m_targetWidth * m_targetHeight),
6661 m_maxUniformTexelCount
6664 vkd.cmdBindDescriptorSets(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipelineLayout, 0u, 1u, &m_descriptorSets[descriptorSetNdx], 0u, DE_NULL);
6665 vkd.cmdPushConstants(commandBuffer, *m_resources.pipelineLayout, vk::VK_SHADER_STAGE_FRAGMENT_BIT, 0u, (deUint32)sizeof(callParams), &callParams);
6666 vkd.cmdDraw(commandBuffer, 6, 1, 0, 0);
6670 void RenderFragmentUniformTexelBuffer::verify (VerifyRenderPassContext& context, size_t)
6672 const deUint32 valuesPerPixel = (deUint32)divRoundUp<size_t>(m_descriptorSets.size() * de::min<size_t>((size_t)m_bufferSize / 4, m_maxUniformTexelCount), m_targetWidth * m_targetHeight);
6674 for (int y = 0; y < context.getReferenceTarget().getSize().y(); y++)
6675 for (int x = 0; x < context.getReferenceTarget().getSize().x(); x++)
6677 const size_t firstDescriptorSetNdx = de::min<size_t>((y * 256u + x) / (m_maxUniformTexelCount / valuesPerPixel), m_descriptorSets.size() - 1);
6679 for (size_t descriptorSetNdx = firstDescriptorSetNdx; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
6681 const size_t offset = descriptorSetNdx * m_maxUniformTexelCount * 4;
6682 const deUint32 callId = (deUint32)descriptorSetNdx;
6684 const deUint32 id = (deUint32)y * 256u + (deUint32)x;
6685 const deUint32 count = (deUint32)(m_bufferSize < (descriptorSetNdx + 1) * m_maxUniformTexelCount * 4
6686 ? m_bufferSize - descriptorSetNdx * m_maxUniformTexelCount * 4
6687 : m_maxUniformTexelCount * 4) / 4;
6689 if (y * 256u + x < callId * (m_maxUniformTexelCount / valuesPerPixel))
6693 deUint32 value = id;
6695 for (deUint32 i = 0; i < valuesPerPixel; i++)
6697 value = ((deUint32)context.getReference().get( offset + (value % count) * 4 + 0))
6698 | (((deUint32)context.getReference().get(offset + (value % count) * 4 + 1)) << 8u)
6699 | (((deUint32)context.getReference().get(offset + (value % count) * 4 + 2)) << 16u)
6700 | (((deUint32)context.getReference().get(offset + (value % count) * 4 + 3)) << 24u);
6703 const UVec4 vec ((value >> 0u) & 0xFFu,
6704 (value >> 8u) & 0xFFu,
6705 (value >> 16u) & 0xFFu,
6706 (value >> 24u) & 0xFFu);
6708 context.getReferenceTarget().getAccess().setPixel(vec.asFloat() / Vec4(255.0f), x, y);
6714 class RenderFragmentStorageTexelBuffer : public RenderPassCommand
6717 RenderFragmentStorageTexelBuffer (void) {}
6718 ~RenderFragmentStorageTexelBuffer (void);
6720 const char* getName (void) const { return "RenderFragmentStorageTexelBuffer"; }
6721 void logPrepare (TestLog&, size_t) const;
6722 void logSubmit (TestLog&, size_t) const;
6723 void prepare (PrepareRenderPassContext&);
6724 void submit (SubmitContext& context);
6725 void verify (VerifyRenderPassContext&, size_t);
6728 PipelineResources m_resources;
6729 vk::Move<vk::VkDescriptorPool> m_descriptorPool;
6730 vector<vk::VkDescriptorSet> m_descriptorSets;
6731 vector<vk::VkBufferView> m_bufferViews;
6733 const vk::DeviceInterface* m_vkd;
6734 vk::VkDevice m_device;
6735 vk::VkDeviceSize m_bufferSize;
6736 deUint32 m_maxStorageTexelCount;
6737 size_t m_targetWidth;
6738 size_t m_targetHeight;
6741 RenderFragmentStorageTexelBuffer::~RenderFragmentStorageTexelBuffer (void)
6743 for (size_t bufferViewNdx = 0; bufferViewNdx < m_bufferViews.size(); bufferViewNdx++)
6745 if (!!m_bufferViews[bufferViewNdx])
6747 m_vkd->destroyBufferView(m_device, m_bufferViews[bufferViewNdx], DE_NULL);
6748 m_bufferViews[bufferViewNdx] = (vk::VkBufferView)0;
6753 void RenderFragmentStorageTexelBuffer::logPrepare (TestLog& log, size_t commandIndex) const
6755 log << TestLog::Message << commandIndex << ":" << getName() << " Create pipeline for render buffer as storage buffer." << TestLog::EndMessage;
6758 void RenderFragmentStorageTexelBuffer::logSubmit (TestLog& log, size_t commandIndex) const
6760 log << TestLog::Message << commandIndex << ":" << getName() << " Render using buffer as storage buffer." << TestLog::EndMessage;
6763 void RenderFragmentStorageTexelBuffer::prepare (PrepareRenderPassContext& context)
6765 const vk::InstanceInterface& vki = context.getContext().getInstanceInterface();
6766 const vk::VkPhysicalDevice physicalDevice = context.getContext().getPhysicalDevice();
6767 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
6768 const vk::VkDevice device = context.getContext().getDevice();
6769 const vk::VkRenderPass renderPass = context.getRenderPass();
6770 const deUint32 subpass = 0;
6771 const vk::Unique<vk::VkShaderModule> vertexShaderModule (vk::createShaderModule(vkd, device, context.getBinaryCollection().get("render-quad.vert"), 0));
6772 const vk::Unique<vk::VkShaderModule> fragmentShaderModule (vk::createShaderModule(vkd, device, context.getBinaryCollection().get("storage-texel-buffer.frag"), 0));
6773 vector<vk::VkDescriptorSetLayoutBinding> bindings;
6777 m_bufferSize = context.getBufferSize();
6778 m_maxStorageTexelCount = vk::getPhysicalDeviceProperties(vki, physicalDevice).limits.maxTexelBufferElements;
6779 m_targetWidth = context.getTargetWidth();
6780 m_targetHeight = context.getTargetHeight();
6783 const vk::VkDescriptorSetLayoutBinding binding =
6786 vk::VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER,
6788 vk::VK_SHADER_STAGE_FRAGMENT_BIT,
6792 bindings.push_back(binding);
6794 const vk::VkPushConstantRange pushConstantRange =
6796 vk::VK_SHADER_STAGE_FRAGMENT_BIT,
6801 createPipelineWithResources(vkd, device, renderPass, subpass, *vertexShaderModule, *fragmentShaderModule, context.getTargetWidth(), context.getTargetHeight(),
6802 vector<vk::VkVertexInputBindingDescription>(), vector<vk::VkVertexInputAttributeDescription>(), bindings, vk::VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST, 1u, &pushConstantRange, m_resources);
6805 const deUint32 descriptorCount = (deUint32)(divRoundUp(m_bufferSize, (vk::VkDeviceSize)m_maxStorageTexelCount * 4));
6806 const vk::VkDescriptorPoolSize poolSizes =
6808 vk::VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER,
6811 const vk::VkDescriptorPoolCreateInfo createInfo =
6813 vk::VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
6815 vk::VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT,
6822 m_descriptorPool = vk::createDescriptorPool(vkd, device, &createInfo);
6823 m_descriptorSets.resize(descriptorCount, (vk::VkDescriptorSet)0);
6824 m_bufferViews.resize(descriptorCount, (vk::VkBufferView)0);
6827 for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
6829 const deUint32 count = (deUint32)(m_bufferSize < (descriptorSetNdx + 1) * m_maxStorageTexelCount * 4
6830 ? m_bufferSize - descriptorSetNdx * m_maxStorageTexelCount * 4
6831 : m_maxStorageTexelCount * 4) / 4;
6832 const vk::VkDescriptorSetLayout layout = *m_resources.descriptorSetLayout;
6833 const vk::VkDescriptorSetAllocateInfo allocateInfo =
6835 vk::VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
6843 m_descriptorSets[descriptorSetNdx] = vk::allocateDescriptorSet(vkd, device, &allocateInfo).disown();
6846 const vk::VkBufferViewCreateInfo createInfo =
6848 vk::VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO,
6852 context.getBuffer(),
6853 vk::VK_FORMAT_R32_UINT,
6854 descriptorSetNdx * m_maxStorageTexelCount * 4,
6858 VK_CHECK(vkd.createBufferView(device, &createInfo, DE_NULL, &m_bufferViews[descriptorSetNdx]));
6862 const vk::VkWriteDescriptorSet write =
6864 vk::VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
6866 m_descriptorSets[descriptorSetNdx],
6870 vk::VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER,
6873 &m_bufferViews[descriptorSetNdx]
6876 vkd.updateDescriptorSets(device, 1u, &write, 0u, DE_NULL);
6881 void RenderFragmentStorageTexelBuffer::submit (SubmitContext& context)
6883 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
6884 const vk::VkCommandBuffer commandBuffer = context.getCommandBuffer();
6886 vkd.cmdBindPipeline(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipeline);
6888 for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
6892 const deUint32 callId;
6893 const deUint32 valuesPerPixel;
6894 const deUint32 maxStorageTexelCount;
6895 const deUint32 width;
6898 (deUint32)descriptorSetNdx,
6899 (deUint32)divRoundUp<size_t>(m_descriptorSets.size() * de::min<size_t>(m_maxStorageTexelCount, (size_t)m_bufferSize / 4), m_targetWidth * m_targetHeight),
6900 m_maxStorageTexelCount,
6901 (deUint32)(m_bufferSize < (descriptorSetNdx + 1u) * m_maxStorageTexelCount * 4u
6902 ? m_bufferSize - descriptorSetNdx * m_maxStorageTexelCount * 4u
6903 : m_maxStorageTexelCount * 4u) / 4u
6906 vkd.cmdBindDescriptorSets(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipelineLayout, 0u, 1u, &m_descriptorSets[descriptorSetNdx], 0u, DE_NULL);
6907 vkd.cmdPushConstants(commandBuffer, *m_resources.pipelineLayout, vk::VK_SHADER_STAGE_FRAGMENT_BIT, 0u, (deUint32)sizeof(callParams), &callParams);
6908 vkd.cmdDraw(commandBuffer, 6, 1, 0, 0);
6912 void RenderFragmentStorageTexelBuffer::verify (VerifyRenderPassContext& context, size_t)
6914 const deUint32 valuesPerPixel = (deUint32)divRoundUp<size_t>(m_descriptorSets.size() * de::min<size_t>(m_maxStorageTexelCount, (size_t)m_bufferSize / 4), m_targetWidth * m_targetHeight);
6916 for (int y = 0; y < context.getReferenceTarget().getSize().y(); y++)
6917 for (int x = 0; x < context.getReferenceTarget().getSize().x(); x++)
6919 const size_t firstDescriptorSetNdx = de::min<size_t>((y * 256u + x) / (m_maxStorageTexelCount / valuesPerPixel), m_descriptorSets.size() - 1);
6921 for (size_t descriptorSetNdx = firstDescriptorSetNdx; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
6923 const size_t offset = descriptorSetNdx * m_maxStorageTexelCount * 4;
6924 const deUint32 callId = (deUint32)descriptorSetNdx;
6926 const deUint32 id = (deUint32)y * 256u + (deUint32)x;
6927 const deUint32 count = (deUint32)(m_bufferSize < (descriptorSetNdx + 1) * m_maxStorageTexelCount * 4
6928 ? m_bufferSize - descriptorSetNdx * m_maxStorageTexelCount * 4
6929 : m_maxStorageTexelCount * 4) / 4;
6931 if (y * 256u + x < callId * (m_maxStorageTexelCount / valuesPerPixel))
6935 deUint32 value = id;
6937 for (deUint32 i = 0; i < valuesPerPixel; i++)
6939 value = ((deUint32)context.getReference().get( offset + (value % count) * 4 + 0))
6940 | (((deUint32)context.getReference().get(offset + (value % count) * 4 + 1)) << 8u)
6941 | (((deUint32)context.getReference().get(offset + (value % count) * 4 + 2)) << 16u)
6942 | (((deUint32)context.getReference().get(offset + (value % count) * 4 + 3)) << 24u);
6945 const UVec4 vec ((value >> 0u) & 0xFFu,
6946 (value >> 8u) & 0xFFu,
6947 (value >> 16u) & 0xFFu,
6948 (value >> 24u) & 0xFFu);
6950 context.getReferenceTarget().getAccess().setPixel(vec.asFloat() / Vec4(255.0f), x, y);
6956 class RenderFragmentStorageImage : public RenderPassCommand
6959 RenderFragmentStorageImage (void) {}
6960 ~RenderFragmentStorageImage (void);
6962 const char* getName (void) const { return "RenderFragmentStorageImage"; }
6963 void logPrepare (TestLog&, size_t) const;
6964 void logSubmit (TestLog&, size_t) const;
6965 void prepare (PrepareRenderPassContext&);
6966 void submit (SubmitContext& context);
6967 void verify (VerifyRenderPassContext&, size_t);
6970 PipelineResources m_resources;
6971 vk::Move<vk::VkDescriptorPool> m_descriptorPool;
6972 vk::Move<vk::VkDescriptorSet> m_descriptorSet;
6973 vk::Move<vk::VkImageView> m_imageView;
6976 RenderFragmentStorageImage::~RenderFragmentStorageImage (void)
6980 void RenderFragmentStorageImage::logPrepare (TestLog& log, size_t commandIndex) const
6982 log << TestLog::Message << commandIndex << ":" << getName() << " Create pipeline for render storage image." << TestLog::EndMessage;
6985 void RenderFragmentStorageImage::logSubmit (TestLog& log, size_t commandIndex) const
6987 log << TestLog::Message << commandIndex << ":" << getName() << " Render using storage image." << TestLog::EndMessage;
6990 void RenderFragmentStorageImage::prepare (PrepareRenderPassContext& context)
6992 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
6993 const vk::VkDevice device = context.getContext().getDevice();
6994 const vk::VkRenderPass renderPass = context.getRenderPass();
6995 const deUint32 subpass = 0;
6996 const vk::Unique<vk::VkShaderModule> vertexShaderModule (vk::createShaderModule(vkd, device, context.getBinaryCollection().get("render-quad.vert"), 0));
6997 const vk::Unique<vk::VkShaderModule> fragmentShaderModule (vk::createShaderModule(vkd, device, context.getBinaryCollection().get("storage-image.frag"), 0));
6998 vector<vk::VkDescriptorSetLayoutBinding> bindings;
7001 const vk::VkDescriptorSetLayoutBinding binding =
7004 vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE,
7006 vk::VK_SHADER_STAGE_FRAGMENT_BIT,
7010 bindings.push_back(binding);
7013 createPipelineWithResources(vkd, device, renderPass, subpass, *vertexShaderModule, *fragmentShaderModule, context.getTargetWidth(), context.getTargetHeight(),
7014 vector<vk::VkVertexInputBindingDescription>(), vector<vk::VkVertexInputAttributeDescription>(), bindings, vk::VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST, 0u, DE_NULL, m_resources);
7017 const vk::VkDescriptorPoolSize poolSizes =
7019 vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE,
7022 const vk::VkDescriptorPoolCreateInfo createInfo =
7024 vk::VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
7026 vk::VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT,
7033 m_descriptorPool = vk::createDescriptorPool(vkd, device, &createInfo);
7037 const vk::VkDescriptorSetLayout layout = *m_resources.descriptorSetLayout;
7038 const vk::VkDescriptorSetAllocateInfo allocateInfo =
7040 vk::VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
7048 m_descriptorSet = vk::allocateDescriptorSet(vkd, device, &allocateInfo);
7051 const vk::VkImageViewCreateInfo createInfo =
7053 vk::VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
7058 vk::VK_IMAGE_VIEW_TYPE_2D,
7059 vk::VK_FORMAT_R8G8B8A8_UNORM,
7060 vk::makeComponentMappingRGBA(),
7062 vk::VK_IMAGE_ASPECT_COLOR_BIT,
7070 m_imageView = vk::createImageView(vkd, device, &createInfo);
7074 const vk::VkDescriptorImageInfo imageInfo =
7078 context.getImageLayout()
7080 const vk::VkWriteDescriptorSet write =
7082 vk::VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
7088 vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE,
7094 vkd.updateDescriptorSets(device, 1u, &write, 0u, DE_NULL);
7099 void RenderFragmentStorageImage::submit (SubmitContext& context)
7101 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
7102 const vk::VkCommandBuffer commandBuffer = context.getCommandBuffer();
7104 vkd.cmdBindPipeline(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipeline);
7106 vkd.cmdBindDescriptorSets(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipelineLayout, 0u, 1u, &(*m_descriptorSet), 0u, DE_NULL);
7107 vkd.cmdDraw(commandBuffer, 6, 1, 0, 0);
7110 void RenderFragmentStorageImage::verify (VerifyRenderPassContext& context, size_t)
7112 const UVec2 size = UVec2(context.getReferenceImage().getWidth(), context.getReferenceImage().getHeight());
7113 const deUint32 valuesPerPixel = de::max<deUint32>(1u, (size.x() * size.y()) / (256u * 256u));
7115 for (int y = 0; y < context.getReferenceTarget().getSize().y(); y++)
7116 for (int x = 0; x < context.getReferenceTarget().getSize().x(); x++)
7118 UVec4 value = UVec4(x, y, 0u, 0u);
7120 for (deUint32 i = 0; i < valuesPerPixel; i++)
7122 const UVec2 pos = UVec2(value.z() * 256u + (value.x() ^ value.z()), value.w() * 256u + (value.y() ^ value.w()));
7123 const Vec4 floatValue = context.getReferenceImage().getAccess().getPixel(pos.x() % size.x(), pos.y() % size.y());
7125 value = UVec4((deUint32)(floatValue.x() * 255.0f),
7126 (deUint32)(floatValue.y() * 255.0f),
7127 (deUint32)(floatValue.z() * 255.0f),
7128 (deUint32)(floatValue.w() * 255.0f));
7131 context.getReferenceTarget().getAccess().setPixel(value.asFloat() / Vec4(255.0f), x, y);
7135 class RenderFragmentSampledImage : public RenderPassCommand
7138 RenderFragmentSampledImage (void) {}
7139 ~RenderFragmentSampledImage (void);
7141 const char* getName (void) const { return "RenderFragmentSampledImage"; }
7142 void logPrepare (TestLog&, size_t) const;
7143 void logSubmit (TestLog&, size_t) const;
7144 void prepare (PrepareRenderPassContext&);
7145 void submit (SubmitContext& context);
7146 void verify (VerifyRenderPassContext&, size_t);
7149 PipelineResources m_resources;
7150 vk::Move<vk::VkDescriptorPool> m_descriptorPool;
7151 vk::Move<vk::VkDescriptorSet> m_descriptorSet;
7152 vk::Move<vk::VkImageView> m_imageView;
7153 vk::Move<vk::VkSampler> m_sampler;
7156 RenderFragmentSampledImage::~RenderFragmentSampledImage (void)
7160 void RenderFragmentSampledImage::logPrepare (TestLog& log, size_t commandIndex) const
7162 log << TestLog::Message << commandIndex << ":" << getName() << " Create pipeline for render storage image." << TestLog::EndMessage;
7165 void RenderFragmentSampledImage::logSubmit (TestLog& log, size_t commandIndex) const
7167 log << TestLog::Message << commandIndex << ":" << getName() << " Render using storage image." << TestLog::EndMessage;
7170 void RenderFragmentSampledImage::prepare (PrepareRenderPassContext& context)
7172 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
7173 const vk::VkDevice device = context.getContext().getDevice();
7174 const vk::VkRenderPass renderPass = context.getRenderPass();
7175 const deUint32 subpass = 0;
7176 const vk::Unique<vk::VkShaderModule> vertexShaderModule (vk::createShaderModule(vkd, device, context.getBinaryCollection().get("render-quad.vert"), 0));
7177 const vk::Unique<vk::VkShaderModule> fragmentShaderModule (vk::createShaderModule(vkd, device, context.getBinaryCollection().get("sampled-image.frag"), 0));
7178 vector<vk::VkDescriptorSetLayoutBinding> bindings;
7181 const vk::VkDescriptorSetLayoutBinding binding =
7184 vk::VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
7186 vk::VK_SHADER_STAGE_FRAGMENT_BIT,
7190 bindings.push_back(binding);
7193 createPipelineWithResources(vkd, device, renderPass, subpass, *vertexShaderModule, *fragmentShaderModule, context.getTargetWidth(), context.getTargetHeight(),
7194 vector<vk::VkVertexInputBindingDescription>(), vector<vk::VkVertexInputAttributeDescription>(), bindings, vk::VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST, 0u, DE_NULL, m_resources);
7197 const vk::VkDescriptorPoolSize poolSizes =
7199 vk::VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
7202 const vk::VkDescriptorPoolCreateInfo createInfo =
7204 vk::VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
7206 vk::VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT,
7213 m_descriptorPool = vk::createDescriptorPool(vkd, device, &createInfo);
7217 const vk::VkDescriptorSetLayout layout = *m_resources.descriptorSetLayout;
7218 const vk::VkDescriptorSetAllocateInfo allocateInfo =
7220 vk::VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
7228 m_descriptorSet = vk::allocateDescriptorSet(vkd, device, &allocateInfo);
7231 const vk::VkImageViewCreateInfo createInfo =
7233 vk::VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
7238 vk::VK_IMAGE_VIEW_TYPE_2D,
7239 vk::VK_FORMAT_R8G8B8A8_UNORM,
7240 vk::makeComponentMappingRGBA(),
7242 vk::VK_IMAGE_ASPECT_COLOR_BIT,
7250 m_imageView = vk::createImageView(vkd, device, &createInfo);
7254 const vk::VkSamplerCreateInfo createInfo =
7256 vk::VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO,
7260 vk::VK_FILTER_NEAREST,
7261 vk::VK_FILTER_NEAREST,
7263 vk::VK_SAMPLER_MIPMAP_MODE_LINEAR,
7264 vk::VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE,
7265 vk::VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE,
7266 vk::VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE,
7271 vk::VK_COMPARE_OP_ALWAYS,
7274 vk::VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK,
7278 m_sampler = vk::createSampler(vkd, device, &createInfo);
7282 const vk::VkDescriptorImageInfo imageInfo =
7286 context.getImageLayout()
7288 const vk::VkWriteDescriptorSet write =
7290 vk::VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
7296 vk::VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
7302 vkd.updateDescriptorSets(device, 1u, &write, 0u, DE_NULL);
7307 void RenderFragmentSampledImage::submit (SubmitContext& context)
7309 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
7310 const vk::VkCommandBuffer commandBuffer = context.getCommandBuffer();
7312 vkd.cmdBindPipeline(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipeline);
7314 vkd.cmdBindDescriptorSets(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipelineLayout, 0u, 1u, &(*m_descriptorSet), 0u, DE_NULL);
7315 vkd.cmdDraw(commandBuffer, 6u, 1u, 0u, 0u);
7318 void RenderFragmentSampledImage::verify (VerifyRenderPassContext& context, size_t)
7320 const UVec2 size = UVec2(context.getReferenceImage().getWidth(), context.getReferenceImage().getHeight());
7321 const deUint32 valuesPerPixel = de::max<deUint32>(1u, (size.x() * size.y()) / (256u * 256u));
7323 for (int y = 0; y < context.getReferenceTarget().getSize().y(); y++)
7324 for (int x = 0; x < context.getReferenceTarget().getSize().x(); x++)
7326 UVec4 value = UVec4(x, y, 0u, 0u);
7328 for (deUint32 i = 0; i < valuesPerPixel; i++)
7330 const UVec2 pos = UVec2(value.z() * 256u + (value.x() ^ value.z()), value.w() * 256u + (value.y() ^ value.w()));
7331 const Vec4 floatValue = context.getReferenceImage().getAccess().getPixel(pos.x() % size.x(), pos.y() % size.y());
7333 value = UVec4((deUint32)(floatValue.x() * 255.0f),
7334 (deUint32)(floatValue.y() * 255.0f),
7335 (deUint32)(floatValue.z() * 255.0f),
7336 (deUint32)(floatValue.w() * 255.0f));
7340 context.getReferenceTarget().getAccess().setPixel(value.asFloat() / Vec4(255.0f), x, y);
7358 OP_BUFFER_BINDMEMORY,
7360 OP_QUEUE_WAIT_FOR_IDLE,
7361 OP_DEVICE_WAIT_FOR_IDLE,
7363 OP_COMMAND_BUFFER_BEGIN,
7364 OP_COMMAND_BUFFER_END,
7366 // Buffer transfer operations
7370 OP_BUFFER_COPY_TO_BUFFER,
7371 OP_BUFFER_COPY_FROM_BUFFER,
7373 OP_BUFFER_COPY_TO_IMAGE,
7374 OP_BUFFER_COPY_FROM_IMAGE,
7378 OP_IMAGE_BINDMEMORY,
7380 OP_IMAGE_TRANSITION_LAYOUT,
7382 OP_IMAGE_COPY_TO_BUFFER,
7383 OP_IMAGE_COPY_FROM_BUFFER,
7385 OP_IMAGE_COPY_TO_IMAGE,
7386 OP_IMAGE_COPY_FROM_IMAGE,
7388 OP_IMAGE_BLIT_TO_IMAGE,
7389 OP_IMAGE_BLIT_FROM_IMAGE,
7393 OP_PIPELINE_BARRIER_GLOBAL,
7394 OP_PIPELINE_BARRIER_BUFFER,
7395 OP_PIPELINE_BARRIER_IMAGE,
7397 // Renderpass operations
7398 OP_RENDERPASS_BEGIN,
7401 // Commands inside render pass
7402 OP_RENDER_VERTEX_BUFFER,
7403 OP_RENDER_INDEX_BUFFER,
7405 OP_RENDER_VERTEX_UNIFORM_BUFFER,
7406 OP_RENDER_FRAGMENT_UNIFORM_BUFFER,
7408 OP_RENDER_VERTEX_UNIFORM_TEXEL_BUFFER,
7409 OP_RENDER_FRAGMENT_UNIFORM_TEXEL_BUFFER,
7411 OP_RENDER_VERTEX_STORAGE_BUFFER,
7412 OP_RENDER_FRAGMENT_STORAGE_BUFFER,
7414 OP_RENDER_VERTEX_STORAGE_TEXEL_BUFFER,
7415 OP_RENDER_FRAGMENT_STORAGE_TEXEL_BUFFER,
7417 OP_RENDER_VERTEX_STORAGE_IMAGE,
7418 OP_RENDER_FRAGMENT_STORAGE_IMAGE,
7420 OP_RENDER_VERTEX_SAMPLED_IMAGE,
7421 OP_RENDER_FRAGMENT_SAMPLED_IMAGE,
7427 STAGE_COMMAND_BUFFER,
7432 vk::VkAccessFlags getWriteAccessFlags (void)
7434 return vk::VK_ACCESS_SHADER_WRITE_BIT
7435 | vk::VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT
7436 | vk::VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT
7437 | vk::VK_ACCESS_TRANSFER_WRITE_BIT
7438 | vk::VK_ACCESS_HOST_WRITE_BIT
7439 | vk::VK_ACCESS_MEMORY_WRITE_BIT;
7442 bool isWriteAccess (vk::VkAccessFlagBits access)
7444 return (getWriteAccessFlags() & access) != 0;
7450 CacheState (vk::VkPipelineStageFlags allowedStages, vk::VkAccessFlags allowedAccesses);
7452 bool isValid (vk::VkPipelineStageFlagBits stage,
7453 vk::VkAccessFlagBits access) const;
7455 void perform (vk::VkPipelineStageFlagBits stage,
7456 vk::VkAccessFlagBits access);
7458 void submitCommandBuffer (void);
7459 void waitForIdle (void);
7461 void getFullBarrier (vk::VkPipelineStageFlags& srcStages,
7462 vk::VkAccessFlags& srcAccesses,
7463 vk::VkPipelineStageFlags& dstStages,
7464 vk::VkAccessFlags& dstAccesses) const;
7466 void barrier (vk::VkPipelineStageFlags srcStages,
7467 vk::VkAccessFlags srcAccesses,
7468 vk::VkPipelineStageFlags dstStages,
7469 vk::VkAccessFlags dstAccesses);
7471 void imageLayoutBarrier (vk::VkPipelineStageFlags srcStages,
7472 vk::VkAccessFlags srcAccesses,
7473 vk::VkPipelineStageFlags dstStages,
7474 vk::VkAccessFlags dstAccesses);
7476 void checkImageLayoutBarrier (vk::VkPipelineStageFlags srcStages,
7477 vk::VkAccessFlags srcAccesses,
7478 vk::VkPipelineStageFlags dstStages,
7479 vk::VkAccessFlags dstAccesses);
7481 // Everything is clean and there is no need for barriers
7482 bool isClean (void) const;
7484 vk::VkPipelineStageFlags getAllowedStages (void) const { return m_allowedStages; }
7485 vk::VkAccessFlags getAllowedAcceses (void) const { return m_allowedAccesses; }
7487 // Limit which stages and accesses are used by the CacheState tracker
7488 const vk::VkPipelineStageFlags m_allowedStages;
7489 const vk::VkAccessFlags m_allowedAccesses;
7491 // [dstStage][srcStage] = srcAccesses
7492 // In stage dstStage write srcAccesses from srcStage are not yet available
7493 vk::VkAccessFlags m_unavailableWriteOperations[PIPELINESTAGE_LAST][PIPELINESTAGE_LAST];
7494 // Latest pipeline transition is not available in stage
7495 bool m_unavailableLayoutTransition[PIPELINESTAGE_LAST];
7496 // [dstStage] = dstAccesses
7497 // In stage dstStage ops with dstAccesses are not yet visible
7498 vk::VkAccessFlags m_invisibleOperations[PIPELINESTAGE_LAST];
7500 // [dstStage] = srcStage
7501 // Memory operation in srcStage have not completed before dstStage
7502 vk::VkPipelineStageFlags m_incompleteOperations[PIPELINESTAGE_LAST];
7505 CacheState::CacheState (vk::VkPipelineStageFlags allowedStages, vk::VkAccessFlags allowedAccesses)
7506 : m_allowedStages (allowedStages)
7507 , m_allowedAccesses (allowedAccesses)
7509 for (vk::VkPipelineStageFlags dstStage_ = 1; dstStage_ <= m_allowedStages; dstStage_ <<= 1)
7511 const PipelineStage dstStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)dstStage_);
7513 if ((dstStage_ & m_allowedStages) == 0)
7516 // All operations are initially visible
7517 m_invisibleOperations[dstStage] = 0;
7519 // There are no incomplete read operations initially
7520 m_incompleteOperations[dstStage] = 0;
7522 // There are no incomplete layout transitions
7523 m_unavailableLayoutTransition[dstStage] = false;
7525 for (vk::VkPipelineStageFlags srcStage_ = 1; srcStage_ <= m_allowedStages; srcStage_ <<= 1)
7527 const PipelineStage srcStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)srcStage_);
7529 if ((srcStage_ & m_allowedStages) == 0)
7532 // There are no write operations that are not yet available
7534 m_unavailableWriteOperations[dstStage][srcStage] = 0;
7539 bool CacheState::isValid (vk::VkPipelineStageFlagBits stage,
7540 vk::VkAccessFlagBits access) const
7542 DE_ASSERT((access & (~m_allowedAccesses)) == 0);
7543 DE_ASSERT((stage & (~m_allowedStages)) == 0);
7545 const PipelineStage dstStage = pipelineStageFlagToPipelineStage(stage);
7547 // Previous operations are not visible to access on stage
7548 if (m_unavailableLayoutTransition[dstStage] || (m_invisibleOperations[dstStage] & access) != 0)
7551 if (isWriteAccess(access))
7553 // Memory operations from other stages have not completed before
7555 if (m_incompleteOperations[dstStage] != 0)
7562 void CacheState::perform (vk::VkPipelineStageFlagBits stage,
7563 vk::VkAccessFlagBits access)
7565 DE_ASSERT((access & (~m_allowedAccesses)) == 0);
7566 DE_ASSERT((stage & (~m_allowedStages)) == 0);
7568 const PipelineStage srcStage = pipelineStageFlagToPipelineStage(stage);
7570 for (vk::VkPipelineStageFlags dstStage_ = 1; dstStage_ <= m_allowedStages; dstStage_ <<= 1)
7572 const PipelineStage dstStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)dstStage_);
7574 if ((dstStage_ & m_allowedStages) == 0)
7577 // Mark stage as incomplete for all stages
7578 m_incompleteOperations[dstStage] |= stage;
7580 if (isWriteAccess(access))
7582 // Mark all accesses from all stages invisible
7583 m_invisibleOperations[dstStage] |= m_allowedAccesses;
7585 // Mark write access from srcStage unavailable to all stages
7586 m_unavailableWriteOperations[dstStage][srcStage] |= access;
7591 void CacheState::submitCommandBuffer (void)
7593 // Flush all host writes and reads
7594 barrier(m_allowedStages & vk::VK_PIPELINE_STAGE_HOST_BIT,
7595 m_allowedAccesses & (vk::VK_ACCESS_HOST_READ_BIT | vk::VK_ACCESS_HOST_WRITE_BIT),
7600 void CacheState::waitForIdle (void)
7602 // Make all writes available
7603 barrier(m_allowedStages,
7604 m_allowedAccesses & getWriteAccessFlags(),
7608 // Make all writes visible on device side
7609 barrier(m_allowedStages,
7611 m_allowedStages & (~vk::VK_PIPELINE_STAGE_HOST_BIT),
7615 void CacheState::getFullBarrier (vk::VkPipelineStageFlags& srcStages,
7616 vk::VkAccessFlags& srcAccesses,
7617 vk::VkPipelineStageFlags& dstStages,
7618 vk::VkAccessFlags& dstAccesses) const
7625 for (vk::VkPipelineStageFlags dstStage_ = 1; dstStage_ <= m_allowedStages; dstStage_ <<= 1)
7627 const PipelineStage dstStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)dstStage_);
7629 if ((dstStage_ & m_allowedStages) == 0)
7632 // Make sure all previous operation are complete in all stages
7633 if (m_incompleteOperations[dstStage])
7635 dstStages |= dstStage_;
7636 srcStages |= m_incompleteOperations[dstStage];
7639 // Make sure all read operations are visible in dstStage
7640 if (m_invisibleOperations[dstStage])
7642 dstStages |= dstStage_;
7643 dstAccesses |= m_invisibleOperations[dstStage];
7646 // Make sure all write operations fro mall stages are available
7647 for (vk::VkPipelineStageFlags srcStage_ = 1; srcStage_ <= m_allowedStages; srcStage_ <<= 1)
7649 const PipelineStage srcStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)srcStage_);
7651 if ((srcStage_ & m_allowedStages) == 0)
7654 if (m_unavailableWriteOperations[dstStage][srcStage])
7656 dstStages |= dstStage_;
7657 srcStages |= dstStage_;
7658 srcAccesses |= m_unavailableWriteOperations[dstStage][srcStage];
7661 if (m_unavailableLayoutTransition[dstStage] && !m_unavailableLayoutTransition[srcStage])
7663 // Add dependency between srcStage and dstStage if layout transition has not completed in dstStage,
7664 // but has completed in srcStage.
7665 dstStages |= dstStage_;
7666 srcStages |= dstStage_;
7671 DE_ASSERT((srcStages & (~m_allowedStages)) == 0);
7672 DE_ASSERT((srcAccesses & (~m_allowedAccesses)) == 0);
7673 DE_ASSERT((dstStages & (~m_allowedStages)) == 0);
7674 DE_ASSERT((dstAccesses & (~m_allowedAccesses)) == 0);
7677 void CacheState::checkImageLayoutBarrier (vk::VkPipelineStageFlags srcStages,
7678 vk::VkAccessFlags srcAccesses,
7679 vk::VkPipelineStageFlags dstStages,
7680 vk::VkAccessFlags dstAccesses)
7682 DE_ASSERT((srcStages & (~m_allowedStages)) == 0);
7683 DE_ASSERT((srcAccesses & (~m_allowedAccesses)) == 0);
7684 DE_ASSERT((dstStages & (~m_allowedStages)) == 0);
7685 DE_ASSERT((dstAccesses & (~m_allowedAccesses)) == 0);
7687 DE_UNREF(srcStages);
7688 DE_UNREF(srcAccesses);
7690 DE_UNREF(dstStages);
7691 DE_UNREF(dstAccesses);
7693 #if defined(DE_DEBUG)
7694 // Check that all stages have completed before srcStages or are in srcStages.
7696 vk::VkPipelineStageFlags completedStages = srcStages;
7698 for (vk::VkPipelineStageFlags srcStage_ = 1; srcStage_ <= srcStages; srcStage_ <<= 1)
7700 const PipelineStage srcStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)srcStage_);
7702 if ((srcStage_ & srcStages) == 0)
7705 completedStages |= (~m_incompleteOperations[srcStage]);
7708 DE_ASSERT((completedStages & m_allowedStages) == m_allowedStages);
7711 // Check that any write is available at least in one stage. Since all stages are complete even single flush is enough.
7712 if ((getWriteAccessFlags() & m_allowedAccesses) != 0 && (srcAccesses & getWriteAccessFlags()) == 0)
7714 bool anyWriteAvailable = false;
7716 for (vk::VkPipelineStageFlags dstStage_ = 1; dstStage_ <= m_allowedStages; dstStage_ <<= 1)
7718 const PipelineStage dstStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)dstStage_);
7720 if ((dstStage_ & m_allowedStages) == 0)
7723 for (vk::VkPipelineStageFlags srcStage_ = 1; srcStage_ <= m_allowedStages; srcStage_ <<= 1)
7725 const PipelineStage srcStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)srcStage_);
7727 if ((srcStage_ & m_allowedStages) == 0)
7730 if (m_unavailableWriteOperations[dstStage][srcStage] != (getWriteAccessFlags() & m_allowedAccesses))
7732 anyWriteAvailable = true;
7738 DE_ASSERT(anyWriteAvailable);
7743 void CacheState::imageLayoutBarrier (vk::VkPipelineStageFlags srcStages,
7744 vk::VkAccessFlags srcAccesses,
7745 vk::VkPipelineStageFlags dstStages,
7746 vk::VkAccessFlags dstAccesses)
7748 checkImageLayoutBarrier(srcStages, srcAccesses, dstStages, dstAccesses);
7750 for (vk::VkPipelineStageFlags dstStage_ = 1; dstStage_ <= m_allowedStages; dstStage_ <<= 1)
7752 const PipelineStage dstStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)dstStage_);
7754 if ((dstStage_ & m_allowedStages) == 0)
7757 // All stages are incomplete after the barrier except each dstStage in it self.
7758 m_incompleteOperations[dstStage] = m_allowedStages & (~dstStage_);
7760 // All memory operations are invisible unless they are listed in dstAccess
7761 m_invisibleOperations[dstStage] = m_allowedAccesses & (~dstAccesses);
7763 // Layout transition is unavailable in stage unless it was listed in dstStages
7764 m_unavailableLayoutTransition[dstStage]= (dstStage_ & dstStages) == 0;
7766 for (vk::VkPipelineStageFlags srcStage_ = 1; srcStage_ <= m_allowedStages; srcStage_ <<= 1)
7768 const PipelineStage srcStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)srcStage_);
7770 if ((srcStage_ & m_allowedStages) == 0)
7773 // All write operations are available after layout transition
7774 m_unavailableWriteOperations[dstStage][srcStage] = 0;
7779 void CacheState::barrier (vk::VkPipelineStageFlags srcStages,
7780 vk::VkAccessFlags srcAccesses,
7781 vk::VkPipelineStageFlags dstStages,
7782 vk::VkAccessFlags dstAccesses)
7784 DE_ASSERT((srcStages & (~m_allowedStages)) == 0);
7785 DE_ASSERT((srcAccesses & (~m_allowedAccesses)) == 0);
7786 DE_ASSERT((dstStages & (~m_allowedStages)) == 0);
7787 DE_ASSERT((dstAccesses & (~m_allowedAccesses)) == 0);
7791 vk::VkPipelineStageFlags oldIncompleteOperations[PIPELINESTAGE_LAST];
7792 vk::VkAccessFlags oldUnavailableWriteOperations[PIPELINESTAGE_LAST][PIPELINESTAGE_LAST];
7793 bool oldUnavailableLayoutTransition[PIPELINESTAGE_LAST];
7795 deMemcpy(oldIncompleteOperations, m_incompleteOperations, sizeof(oldIncompleteOperations));
7796 deMemcpy(oldUnavailableWriteOperations, m_unavailableWriteOperations, sizeof(oldUnavailableWriteOperations));
7797 deMemcpy(oldUnavailableLayoutTransition, m_unavailableLayoutTransition, sizeof(oldUnavailableLayoutTransition));
7799 for (vk::VkPipelineStageFlags srcStage_ = 1; srcStage_ <= srcStages; srcStage_ <<= 1)
7801 const PipelineStage srcStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)srcStage_);
7803 if ((srcStage_ & srcStages) == 0)
7806 for (vk::VkPipelineStageFlags dstStage_ = 1; dstStage_ <= dstStages; dstStage_ <<= 1)
7808 const PipelineStage dstStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)dstStage_);
7810 if ((dstStage_ & dstStages) == 0)
7813 // Stages that have completed before srcStage have also completed before dstStage
7814 m_incompleteOperations[dstStage] &= oldIncompleteOperations[srcStage];
7816 // Image layout transition in srcStage are now available in dstStage
7817 m_unavailableLayoutTransition[dstStage] &= oldUnavailableLayoutTransition[srcStage];
7819 for (vk::VkPipelineStageFlags sharedStage_ = 1; sharedStage_ <= m_allowedStages; sharedStage_ <<= 1)
7821 const PipelineStage sharedStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)sharedStage_);
7823 if ((sharedStage_ & m_allowedStages) == 0)
7826 // Writes that are available in srcStage are also available in dstStage
7827 m_unavailableWriteOperations[dstStage][sharedStage] &= oldUnavailableWriteOperations[srcStage][sharedStage];
7834 for (vk::VkPipelineStageFlags dstStage_ = 1; dstStage_ <= dstStages; dstStage_ <<= 1)
7836 const PipelineStage dstStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)dstStage_);
7837 bool allWritesAvailable = true;
7839 if ((dstStage_ & dstStages) == 0)
7842 // Operations in srcStages have completed before any stage in dstStages
7843 m_incompleteOperations[dstStage] &= ~srcStages;
7845 for (vk::VkPipelineStageFlags srcStage_ = 1; srcStage_ <= m_allowedStages; srcStage_ <<= 1)
7847 const PipelineStage srcStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)srcStage_);
7849 if ((srcStage_ & m_allowedStages) == 0)
7852 // Make srcAccesses from srcStage available in dstStage
7853 if ((srcStage_ & srcStages) != 0)
7854 m_unavailableWriteOperations[dstStage][srcStage] &= ~srcAccesses;
7856 if (m_unavailableWriteOperations[dstStage][srcStage] != 0)
7857 allWritesAvailable = false;
7860 // If all writes are available in dstStage make dstAccesses also visible
7861 if (allWritesAvailable)
7862 m_invisibleOperations[dstStage] &= ~dstAccesses;
7866 bool CacheState::isClean (void) const
7868 for (vk::VkPipelineStageFlags dstStage_ = 1; dstStage_ <= m_allowedStages; dstStage_ <<= 1)
7870 const PipelineStage dstStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)dstStage_);
7872 if ((dstStage_ & m_allowedStages) == 0)
7875 // Some operations are not visible to some stages
7876 if (m_invisibleOperations[dstStage] != 0)
7879 // There are operation that have not completed yet
7880 if (m_incompleteOperations[dstStage] != 0)
7883 // Layout transition has not completed yet
7884 if (m_unavailableLayoutTransition[dstStage])
7887 for (vk::VkPipelineStageFlags srcStage_ = 1; srcStage_ <= m_allowedStages; srcStage_ <<= 1)
7889 const PipelineStage srcStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)srcStage_);
7891 if ((srcStage_ & m_allowedStages) == 0)
7894 // Some write operations are not available yet
7895 if (m_unavailableWriteOperations[dstStage][srcStage] != 0)
7903 bool layoutSupportedByUsage (Usage usage, vk::VkImageLayout layout)
7907 case vk::VK_IMAGE_LAYOUT_GENERAL:
7910 case vk::VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL:
7911 return (usage & USAGE_COLOR_ATTACHMENT) != 0;
7913 case vk::VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL:
7914 return (usage & USAGE_DEPTH_STENCIL_ATTACHMENT) != 0;
7916 case vk::VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL:
7917 return (usage & USAGE_DEPTH_STENCIL_ATTACHMENT) != 0;
7919 case vk::VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL:
7920 // \todo [2016-03-09 mika] Should include input attachment
7921 return (usage & USAGE_SAMPLED_IMAGE) != 0;
7923 case vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL:
7924 return (usage & USAGE_TRANSFER_SRC) != 0;
7926 case vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL:
7927 return (usage & USAGE_TRANSFER_DST) != 0;
7929 case vk::VK_IMAGE_LAYOUT_PREINITIALIZED:
7933 DE_FATAL("Unknown layout");
7938 size_t getNumberOfSupportedLayouts (Usage usage)
7940 const vk::VkImageLayout layouts[] =
7942 vk::VK_IMAGE_LAYOUT_GENERAL,
7943 vk::VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
7944 vk::VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL,
7945 vk::VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL,
7946 vk::VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
7947 vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
7948 vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
7950 size_t supportedLayoutCount = 0;
7952 for (size_t layoutNdx = 0; layoutNdx < DE_LENGTH_OF_ARRAY(layouts); layoutNdx++)
7954 const vk::VkImageLayout layout = layouts[layoutNdx];
7956 if (layoutSupportedByUsage(usage, layout))
7957 supportedLayoutCount++;
7960 return supportedLayoutCount;
7963 vk::VkImageLayout getRandomNextLayout (de::Random& rng,
7965 vk::VkImageLayout previousLayout)
7967 const vk::VkImageLayout layouts[] =
7969 vk::VK_IMAGE_LAYOUT_GENERAL,
7970 vk::VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
7971 vk::VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL,
7972 vk::VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL,
7973 vk::VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
7974 vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
7975 vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
7977 const size_t supportedLayoutCount = getNumberOfSupportedLayouts(usage);
7979 DE_ASSERT(supportedLayoutCount > 0);
7981 size_t nextLayoutNdx = ((size_t)rng.getUint64()) % (previousLayout == vk::VK_IMAGE_LAYOUT_UNDEFINED
7982 ? supportedLayoutCount
7983 : supportedLayoutCount - 1);
7985 for (size_t layoutNdx = 0; layoutNdx < DE_LENGTH_OF_ARRAY(layouts); layoutNdx++)
7987 const vk::VkImageLayout layout = layouts[layoutNdx];
7989 if (layoutSupportedByUsage(usage, layout) && layout != previousLayout)
7991 if (nextLayoutNdx == 0)
7998 DE_FATAL("Unreachable");
7999 return vk::VK_IMAGE_LAYOUT_UNDEFINED;
8004 State (Usage usage, deUint32 seed)
8005 : stage (STAGE_HOST)
8006 , cache (usageToStageFlags(usage), usageToAccessFlags(usage))
8009 , hostInvalidated (true)
8010 , hostFlushed (true)
8011 , memoryDefined (false)
8013 , hasBoundBufferMemory (false)
8015 , hasBoundImageMemory (false)
8016 , imageLayout (vk::VK_IMAGE_LAYOUT_UNDEFINED)
8017 , imageDefined (false)
8020 , commandBufferIsEmpty (true)
8021 , renderPassIsEmpty (true)
8030 bool hostInvalidated;
8035 bool hasBoundBufferMemory;
8038 bool hasBoundImageMemory;
8039 vk::VkImageLayout imageLayout;
8045 bool commandBufferIsEmpty;
8046 bool renderPassIsEmpty;
8049 void getAvailableOps (const State& state, bool supportsBuffers, bool supportsImages, Usage usage, vector<Op>& ops)
8051 if (state.stage == STAGE_HOST)
8053 if (usage & (USAGE_HOST_READ | USAGE_HOST_WRITE))
8055 // Host memory operations
8058 ops.push_back(OP_UNMAP);
8060 // Avoid flush and finish if they are not needed
8061 if (!state.hostFlushed)
8062 ops.push_back(OP_MAP_FLUSH);
8064 if (!state.hostInvalidated
8066 && ((usage & USAGE_HOST_READ) == 0
8067 || state.cache.isValid(vk::VK_PIPELINE_STAGE_HOST_BIT, vk::VK_ACCESS_HOST_READ_BIT))
8068 && ((usage & USAGE_HOST_WRITE) == 0
8069 || state.cache.isValid(vk::VK_PIPELINE_STAGE_HOST_BIT, vk::VK_ACCESS_HOST_WRITE_BIT)))
8071 ops.push_back(OP_MAP_INVALIDATE);
8074 if (usage & USAGE_HOST_READ
8075 && usage & USAGE_HOST_WRITE
8076 && state.memoryDefined
8077 && state.hostInvalidated
8079 && state.cache.isValid(vk::VK_PIPELINE_STAGE_HOST_BIT, vk::VK_ACCESS_HOST_WRITE_BIT)
8080 && state.cache.isValid(vk::VK_PIPELINE_STAGE_HOST_BIT, vk::VK_ACCESS_HOST_READ_BIT))
8082 ops.push_back(OP_MAP_MODIFY);
8085 if (usage & USAGE_HOST_READ
8086 && state.memoryDefined
8087 && state.hostInvalidated
8089 && state.cache.isValid(vk::VK_PIPELINE_STAGE_HOST_BIT, vk::VK_ACCESS_HOST_READ_BIT))
8091 ops.push_back(OP_MAP_READ);
8094 if (usage & USAGE_HOST_WRITE
8095 && state.hostInvalidated
8097 && state.cache.isValid(vk::VK_PIPELINE_STAGE_HOST_BIT, vk::VK_ACCESS_HOST_WRITE_BIT))
8099 ops.push_back(OP_MAP_WRITE);
8103 ops.push_back(OP_MAP);
8106 if (state.hasBoundBufferMemory && state.queueIdle)
8108 // \note Destroy only buffers after they have been bound
8109 ops.push_back(OP_BUFFER_DESTROY);
8113 if (state.hasBuffer)
8115 if (!state.hasBoundBufferMemory)
8116 ops.push_back(OP_BUFFER_BINDMEMORY);
8118 else if (!state.hasImage && supportsBuffers) // Avoid creating buffer if there is already image
8119 ops.push_back(OP_BUFFER_CREATE);
8122 if (state.hasBoundImageMemory && state.queueIdle)
8124 // \note Destroy only image after they have been bound
8125 ops.push_back(OP_IMAGE_DESTROY);
8131 if (!state.hasBoundImageMemory)
8132 ops.push_back(OP_IMAGE_BINDMEMORY);
8134 else if (!state.hasBuffer && supportsImages) // Avoid creating image if there is already buffer
8135 ops.push_back(OP_IMAGE_CREATE);
8138 // Host writes must be flushed before GPU commands and there must be
8139 // buffer or image for GPU commands
8140 if (state.hostFlushed
8141 && (state.memoryDefined || supportsDeviceBufferWrites(usage) || state.imageDefined || supportsDeviceImageWrites(usage))
8142 && (state.hasBoundBufferMemory || state.hasBoundImageMemory) // Avoid command buffers if there is no object to use
8143 && (usageToStageFlags(usage) & (~vk::VK_PIPELINE_STAGE_HOST_BIT)) != 0) // Don't start command buffer if there are no ways to use memory from gpu
8145 ops.push_back(OP_COMMAND_BUFFER_BEGIN);
8148 if (!state.deviceIdle)
8149 ops.push_back(OP_DEVICE_WAIT_FOR_IDLE);
8151 if (!state.queueIdle)
8152 ops.push_back(OP_QUEUE_WAIT_FOR_IDLE);
8154 else if (state.stage == STAGE_COMMAND_BUFFER)
8156 if (!state.cache.isClean())
8158 ops.push_back(OP_PIPELINE_BARRIER_GLOBAL);
8161 ops.push_back(OP_PIPELINE_BARRIER_IMAGE);
8163 if (state.hasBuffer)
8164 ops.push_back(OP_PIPELINE_BARRIER_BUFFER);
8167 if (state.hasBoundBufferMemory)
8169 if (usage & USAGE_TRANSFER_DST
8170 && state.cache.isValid(vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_ACCESS_TRANSFER_WRITE_BIT))
8172 ops.push_back(OP_BUFFER_FILL);
8173 ops.push_back(OP_BUFFER_UPDATE);
8174 ops.push_back(OP_BUFFER_COPY_FROM_BUFFER);
8175 ops.push_back(OP_BUFFER_COPY_FROM_IMAGE);
8178 if (usage & USAGE_TRANSFER_SRC
8179 && state.memoryDefined
8180 && state.cache.isValid(vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_ACCESS_TRANSFER_READ_BIT))
8182 ops.push_back(OP_BUFFER_COPY_TO_BUFFER);
8183 ops.push_back(OP_BUFFER_COPY_TO_IMAGE);
8187 if (state.hasBoundImageMemory
8188 && (state.imageLayout == vk::VK_IMAGE_LAYOUT_UNDEFINED
8189 || getNumberOfSupportedLayouts(usage) > 1))
8191 ops.push_back(OP_IMAGE_TRANSITION_LAYOUT);
8194 if (usage & USAGE_TRANSFER_DST
8195 && (state.imageLayout == vk::VK_IMAGE_LAYOUT_GENERAL
8196 || state.imageLayout == vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL)
8197 && state.cache.isValid(vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_ACCESS_TRANSFER_WRITE_BIT))
8199 ops.push_back(OP_IMAGE_COPY_FROM_BUFFER);
8200 ops.push_back(OP_IMAGE_COPY_FROM_IMAGE);
8201 ops.push_back(OP_IMAGE_BLIT_FROM_IMAGE);
8204 if (usage & USAGE_TRANSFER_SRC
8205 && (state.imageLayout == vk::VK_IMAGE_LAYOUT_GENERAL
8206 || state.imageLayout == vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL)
8207 && state.imageDefined
8208 && state.cache.isValid(vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_ACCESS_TRANSFER_READ_BIT))
8210 ops.push_back(OP_IMAGE_COPY_TO_BUFFER);
8211 ops.push_back(OP_IMAGE_COPY_TO_IMAGE);
8212 ops.push_back(OP_IMAGE_BLIT_TO_IMAGE);
8217 // \todo [2016-03-09 mika] Add other usages?
8218 if ((state.memoryDefined
8219 && state.hasBoundBufferMemory
8220 && (((usage & USAGE_VERTEX_BUFFER)
8221 && state.cache.isValid(vk::VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, vk::VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT))
8222 || ((usage & USAGE_INDEX_BUFFER)
8223 && state.cache.isValid(vk::VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, vk::VK_ACCESS_INDEX_READ_BIT))
8224 || ((usage & USAGE_UNIFORM_BUFFER)
8225 && (state.cache.isValid(vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, vk::VK_ACCESS_UNIFORM_READ_BIT)
8226 || state.cache.isValid(vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, vk::VK_ACCESS_UNIFORM_READ_BIT)))
8227 || ((usage & USAGE_UNIFORM_TEXEL_BUFFER)
8228 && (state.cache.isValid(vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, vk::VK_ACCESS_UNIFORM_READ_BIT)
8229 || state.cache.isValid(vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, vk::VK_ACCESS_UNIFORM_READ_BIT)))
8230 || ((usage & USAGE_STORAGE_BUFFER)
8231 && (state.cache.isValid(vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT)
8232 || state.cache.isValid(vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT)))
8233 || ((usage & USAGE_STORAGE_TEXEL_BUFFER)
8234 && state.cache.isValid(vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT))))
8235 || (state.imageDefined
8236 && state.hasBoundImageMemory
8237 && (((usage & USAGE_STORAGE_IMAGE)
8238 && state.imageLayout == vk::VK_IMAGE_LAYOUT_GENERAL
8239 && (state.cache.isValid(vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT)
8240 || state.cache.isValid(vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT)))
8241 || ((usage & USAGE_SAMPLED_IMAGE)
8242 && (state.imageLayout == vk::VK_IMAGE_LAYOUT_GENERAL
8243 || state.imageLayout == vk::VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL)
8244 && (state.cache.isValid(vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT)
8245 || state.cache.isValid(vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT))))))
8247 ops.push_back(OP_RENDERPASS_BEGIN);
8250 // \note This depends on previous operations and has to be always the
8251 // last command buffer operation check
8252 if (ops.empty() || !state.commandBufferIsEmpty)
8253 ops.push_back(OP_COMMAND_BUFFER_END);
8255 else if (state.stage == STAGE_RENDER_PASS)
8257 if ((usage & USAGE_VERTEX_BUFFER) != 0
8258 && state.memoryDefined
8259 && state.hasBoundBufferMemory
8260 && state.cache.isValid(vk::VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, vk::VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT))
8262 ops.push_back(OP_RENDER_VERTEX_BUFFER);
8265 if ((usage & USAGE_INDEX_BUFFER) != 0
8266 && state.memoryDefined
8267 && state.hasBoundBufferMemory
8268 && state.cache.isValid(vk::VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, vk::VK_ACCESS_INDEX_READ_BIT))
8270 ops.push_back(OP_RENDER_INDEX_BUFFER);
8273 if ((usage & USAGE_UNIFORM_BUFFER) != 0
8274 && state.memoryDefined
8275 && state.hasBoundBufferMemory)
8277 if (state.cache.isValid(vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, vk::VK_ACCESS_UNIFORM_READ_BIT))
8278 ops.push_back(OP_RENDER_VERTEX_UNIFORM_BUFFER);
8280 if (state.cache.isValid(vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, vk::VK_ACCESS_UNIFORM_READ_BIT))
8281 ops.push_back(OP_RENDER_FRAGMENT_UNIFORM_BUFFER);
8284 if ((usage & USAGE_UNIFORM_TEXEL_BUFFER) != 0
8285 && state.memoryDefined
8286 && state.hasBoundBufferMemory)
8288 if (state.cache.isValid(vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, vk::VK_ACCESS_UNIFORM_READ_BIT))
8289 ops.push_back(OP_RENDER_VERTEX_UNIFORM_TEXEL_BUFFER);
8291 if (state.cache.isValid(vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, vk::VK_ACCESS_UNIFORM_READ_BIT))
8292 ops.push_back(OP_RENDER_FRAGMENT_UNIFORM_TEXEL_BUFFER);
8295 if ((usage & USAGE_STORAGE_BUFFER) != 0
8296 && state.memoryDefined
8297 && state.hasBoundBufferMemory)
8299 if (state.cache.isValid(vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT))
8300 ops.push_back(OP_RENDER_VERTEX_STORAGE_BUFFER);
8302 if (state.cache.isValid(vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT))
8303 ops.push_back(OP_RENDER_FRAGMENT_STORAGE_BUFFER);
8306 if ((usage & USAGE_STORAGE_TEXEL_BUFFER) != 0
8307 && state.memoryDefined
8308 && state.hasBoundBufferMemory)
8310 if (state.cache.isValid(vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT))
8311 ops.push_back(OP_RENDER_VERTEX_STORAGE_TEXEL_BUFFER);
8313 if (state.cache.isValid(vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT))
8314 ops.push_back(OP_RENDER_FRAGMENT_STORAGE_TEXEL_BUFFER);
8317 if ((usage & USAGE_STORAGE_IMAGE) != 0
8318 && state.imageDefined
8319 && state.hasBoundImageMemory
8320 && (state.imageLayout == vk::VK_IMAGE_LAYOUT_GENERAL))
8322 if (state.cache.isValid(vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT))
8323 ops.push_back(OP_RENDER_VERTEX_STORAGE_IMAGE);
8325 if (state.cache.isValid(vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT))
8326 ops.push_back(OP_RENDER_FRAGMENT_STORAGE_IMAGE);
8329 if ((usage & USAGE_SAMPLED_IMAGE) != 0
8330 && state.imageDefined
8331 && state.hasBoundImageMemory
8332 && (state.imageLayout == vk::VK_IMAGE_LAYOUT_GENERAL
8333 || state.imageLayout == vk::VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL))
8335 if (state.cache.isValid(vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT))
8336 ops.push_back(OP_RENDER_VERTEX_SAMPLED_IMAGE);
8338 if (state.cache.isValid(vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT))
8339 ops.push_back(OP_RENDER_FRAGMENT_SAMPLED_IMAGE);
8342 if (!state.renderPassIsEmpty)
8343 ops.push_back(OP_RENDERPASS_END);
8346 DE_FATAL("Unknown stage");
8349 void removeIllegalAccessFlags (vk::VkAccessFlags& accessflags, vk::VkPipelineStageFlags stageflags)
8351 if (!(stageflags & vk::VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT))
8352 accessflags &= ~vk::VK_ACCESS_INDIRECT_COMMAND_READ_BIT;
8354 if (!(stageflags & vk::VK_PIPELINE_STAGE_VERTEX_INPUT_BIT))
8355 accessflags &= ~vk::VK_ACCESS_INDEX_READ_BIT;
8357 if (!(stageflags & vk::VK_PIPELINE_STAGE_VERTEX_INPUT_BIT))
8358 accessflags &= ~vk::VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT;
8360 if (!(stageflags & (vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT |
8361 vk::VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT |
8362 vk::VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT |
8363 vk::VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT |
8364 vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT |
8365 vk::VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT)))
8366 accessflags &= ~vk::VK_ACCESS_UNIFORM_READ_BIT;
8368 if (!(stageflags & vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT))
8369 accessflags &= ~vk::VK_ACCESS_INPUT_ATTACHMENT_READ_BIT;
8371 if (!(stageflags & (vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT |
8372 vk::VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT |
8373 vk::VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT |
8374 vk::VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT |
8375 vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT |
8376 vk::VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT)))
8377 accessflags &= ~vk::VK_ACCESS_SHADER_READ_BIT;
8379 if (!(stageflags & (vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT |
8380 vk::VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT |
8381 vk::VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT |
8382 vk::VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT |
8383 vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT |
8384 vk::VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT)))
8385 accessflags &= ~vk::VK_ACCESS_SHADER_WRITE_BIT;
8387 if (!(stageflags & vk::VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT))
8388 accessflags &= ~vk::VK_ACCESS_COLOR_ATTACHMENT_READ_BIT;
8390 if (!(stageflags & vk::VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT))
8391 accessflags &= ~vk::VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
8393 if (!(stageflags & (vk::VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT |
8394 vk::VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT)))
8395 accessflags &= ~vk::VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT;
8397 if (!(stageflags & (vk::VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT |
8398 vk::VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT)))
8399 accessflags &= ~vk::VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
8401 if (!(stageflags & vk::VK_PIPELINE_STAGE_TRANSFER_BIT))
8402 accessflags &= ~vk::VK_ACCESS_TRANSFER_READ_BIT;
8404 if (!(stageflags & vk::VK_PIPELINE_STAGE_TRANSFER_BIT))
8405 accessflags &= ~vk::VK_ACCESS_TRANSFER_WRITE_BIT;
8407 if (!(stageflags & vk::VK_PIPELINE_STAGE_HOST_BIT))
8408 accessflags &= ~vk::VK_ACCESS_HOST_READ_BIT;
8410 if (!(stageflags & vk::VK_PIPELINE_STAGE_HOST_BIT))
8411 accessflags &= ~vk::VK_ACCESS_HOST_WRITE_BIT;
8414 void applyOp (State& state, const Memory& memory, Op op, Usage usage)
8419 DE_ASSERT(state.stage == STAGE_HOST);
8420 DE_ASSERT(!state.mapped);
8421 state.mapped = true;
8425 DE_ASSERT(state.stage == STAGE_HOST);
8426 DE_ASSERT(state.mapped);
8427 state.mapped = false;
8431 DE_ASSERT(state.stage == STAGE_HOST);
8432 DE_ASSERT(!state.hostFlushed);
8433 state.hostFlushed = true;
8436 case OP_MAP_INVALIDATE:
8437 DE_ASSERT(state.stage == STAGE_HOST);
8438 DE_ASSERT(!state.hostInvalidated);
8439 state.hostInvalidated = true;
8443 DE_ASSERT(state.stage == STAGE_HOST);
8444 DE_ASSERT(state.hostInvalidated);
8445 state.rng.getUint32();
8449 DE_ASSERT(state.stage == STAGE_HOST);
8450 if ((memory.getMemoryType().propertyFlags & vk::VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) == 0)
8451 state.hostFlushed = false;
8453 state.memoryDefined = true;
8454 state.imageDefined = false;
8455 state.imageLayout = vk::VK_IMAGE_LAYOUT_UNDEFINED;
8456 state.rng.getUint32();
8460 DE_ASSERT(state.stage == STAGE_HOST);
8461 DE_ASSERT(state.hostInvalidated);
8463 if ((memory.getMemoryType().propertyFlags & vk::VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) == 0)
8464 state.hostFlushed = false;
8466 state.rng.getUint32();
8469 case OP_BUFFER_CREATE:
8470 DE_ASSERT(state.stage == STAGE_HOST);
8471 DE_ASSERT(!state.hasBuffer);
8473 state.hasBuffer = true;
8476 case OP_BUFFER_DESTROY:
8477 DE_ASSERT(state.stage == STAGE_HOST);
8478 DE_ASSERT(state.hasBuffer);
8479 DE_ASSERT(state.hasBoundBufferMemory);
8481 state.hasBuffer = false;
8482 state.hasBoundBufferMemory = false;
8485 case OP_BUFFER_BINDMEMORY:
8486 DE_ASSERT(state.stage == STAGE_HOST);
8487 DE_ASSERT(state.hasBuffer);
8488 DE_ASSERT(!state.hasBoundBufferMemory);
8490 state.hasBoundBufferMemory = true;
8493 case OP_IMAGE_CREATE:
8494 DE_ASSERT(state.stage == STAGE_HOST);
8495 DE_ASSERT(!state.hasImage);
8496 DE_ASSERT(!state.hasBuffer);
8498 state.hasImage = true;
8501 case OP_IMAGE_DESTROY:
8502 DE_ASSERT(state.stage == STAGE_HOST);
8503 DE_ASSERT(state.hasImage);
8504 DE_ASSERT(state.hasBoundImageMemory);
8506 state.hasImage = false;
8507 state.hasBoundImageMemory = false;
8508 state.imageLayout = vk::VK_IMAGE_LAYOUT_UNDEFINED;
8509 state.imageDefined = false;
8512 case OP_IMAGE_BINDMEMORY:
8513 DE_ASSERT(state.stage == STAGE_HOST);
8514 DE_ASSERT(state.hasImage);
8515 DE_ASSERT(!state.hasBoundImageMemory);
8517 state.hasBoundImageMemory = true;
8520 case OP_IMAGE_TRANSITION_LAYOUT:
8522 DE_ASSERT(state.stage == STAGE_COMMAND_BUFFER);
8523 DE_ASSERT(state.hasImage);
8524 DE_ASSERT(state.hasBoundImageMemory);
8526 // \todo [2016-03-09 mika] Support linear tiling and predefined data
8527 const vk::VkImageLayout srcLayout = state.rng.getFloat() < 0.9f ? state.imageLayout : vk::VK_IMAGE_LAYOUT_UNDEFINED;
8528 const vk::VkImageLayout dstLayout = getRandomNextLayout(state.rng, usage, srcLayout);
8530 vk::VkPipelineStageFlags dirtySrcStages;
8531 vk::VkAccessFlags dirtySrcAccesses;
8532 vk::VkPipelineStageFlags dirtyDstStages;
8533 vk::VkAccessFlags dirtyDstAccesses;
8535 vk::VkPipelineStageFlags srcStages;
8536 vk::VkAccessFlags srcAccesses;
8537 vk::VkPipelineStageFlags dstStages;
8538 vk::VkAccessFlags dstAccesses;
8540 state.cache.getFullBarrier(dirtySrcStages, dirtySrcAccesses, dirtyDstStages, dirtyDstAccesses);
8542 // Try masking some random bits
8543 srcStages = dirtySrcStages;
8544 srcAccesses = dirtySrcAccesses;
8546 dstStages = state.cache.getAllowedStages() & state.rng.getUint32();
8547 dstAccesses = state.cache.getAllowedAcceses() & state.rng.getUint32();
8549 // If there are no bits in dst stage mask use all stages
8550 dstStages = dstStages ? dstStages : state.cache.getAllowedStages();
8553 srcStages = dstStages;
8555 removeIllegalAccessFlags(dstAccesses, dstStages);
8556 removeIllegalAccessFlags(srcAccesses, srcStages);
8558 if (srcLayout == vk::VK_IMAGE_LAYOUT_UNDEFINED)
8559 state.imageDefined = false;
8561 state.commandBufferIsEmpty = false;
8562 state.imageLayout = dstLayout;
8563 state.memoryDefined = false;
8564 state.cache.imageLayoutBarrier(srcStages, srcAccesses, dstStages, dstAccesses);
8568 case OP_QUEUE_WAIT_FOR_IDLE:
8569 DE_ASSERT(state.stage == STAGE_HOST);
8570 DE_ASSERT(!state.queueIdle);
8572 state.queueIdle = true;
8574 state.cache.waitForIdle();
8577 case OP_DEVICE_WAIT_FOR_IDLE:
8578 DE_ASSERT(state.stage == STAGE_HOST);
8579 DE_ASSERT(!state.deviceIdle);
8581 state.queueIdle = true;
8582 state.deviceIdle = true;
8584 state.cache.waitForIdle();
8587 case OP_COMMAND_BUFFER_BEGIN:
8588 DE_ASSERT(state.stage == STAGE_HOST);
8589 state.stage = STAGE_COMMAND_BUFFER;
8590 state.commandBufferIsEmpty = true;
8591 // Makes host writes visible to command buffer
8592 state.cache.submitCommandBuffer();
8595 case OP_COMMAND_BUFFER_END:
8596 DE_ASSERT(state.stage == STAGE_COMMAND_BUFFER);
8597 state.stage = STAGE_HOST;
8598 state.queueIdle = false;
8599 state.deviceIdle = false;
8602 case OP_BUFFER_COPY_FROM_BUFFER:
8603 case OP_BUFFER_COPY_FROM_IMAGE:
8604 case OP_BUFFER_UPDATE:
8605 case OP_BUFFER_FILL:
8606 state.rng.getUint32();
8607 DE_ASSERT(state.stage == STAGE_COMMAND_BUFFER);
8609 if ((memory.getMemoryType().propertyFlags & vk::VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) == 0)
8610 state.hostInvalidated = false;
8612 state.commandBufferIsEmpty = false;
8613 state.memoryDefined = true;
8614 state.imageDefined = false;
8615 state.imageLayout = vk::VK_IMAGE_LAYOUT_UNDEFINED;
8616 state.cache.perform(vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_ACCESS_TRANSFER_WRITE_BIT);
8619 case OP_BUFFER_COPY_TO_BUFFER:
8620 case OP_BUFFER_COPY_TO_IMAGE:
8621 DE_ASSERT(state.stage == STAGE_COMMAND_BUFFER);
8623 state.commandBufferIsEmpty = false;
8624 state.cache.perform(vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_ACCESS_TRANSFER_READ_BIT);
8627 case OP_IMAGE_BLIT_FROM_IMAGE:
8628 state.rng.getBool();
8630 case OP_IMAGE_COPY_FROM_BUFFER:
8631 case OP_IMAGE_COPY_FROM_IMAGE:
8632 state.rng.getUint32();
8633 DE_ASSERT(state.stage == STAGE_COMMAND_BUFFER);
8635 if ((memory.getMemoryType().propertyFlags & vk::VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) == 0)
8636 state.hostInvalidated = false;
8638 state.commandBufferIsEmpty = false;
8639 state.memoryDefined = false;
8640 state.imageDefined = true;
8641 state.cache.perform(vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_ACCESS_TRANSFER_WRITE_BIT);
8644 case OP_IMAGE_BLIT_TO_IMAGE:
8645 state.rng.getBool();
8647 case OP_IMAGE_COPY_TO_BUFFER:
8648 case OP_IMAGE_COPY_TO_IMAGE:
8649 DE_ASSERT(state.stage == STAGE_COMMAND_BUFFER);
8651 state.commandBufferIsEmpty = false;
8652 state.cache.perform(vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_ACCESS_TRANSFER_READ_BIT);
8655 case OP_PIPELINE_BARRIER_GLOBAL:
8656 case OP_PIPELINE_BARRIER_BUFFER:
8657 case OP_PIPELINE_BARRIER_IMAGE:
8659 DE_ASSERT(state.stage == STAGE_COMMAND_BUFFER);
8661 vk::VkPipelineStageFlags dirtySrcStages;
8662 vk::VkAccessFlags dirtySrcAccesses;
8663 vk::VkPipelineStageFlags dirtyDstStages;
8664 vk::VkAccessFlags dirtyDstAccesses;
8666 vk::VkPipelineStageFlags srcStages;
8667 vk::VkAccessFlags srcAccesses;
8668 vk::VkPipelineStageFlags dstStages;
8669 vk::VkAccessFlags dstAccesses;
8671 state.cache.getFullBarrier(dirtySrcStages, dirtySrcAccesses, dirtyDstStages, dirtyDstAccesses);
8673 // Try masking some random bits
8674 srcStages = dirtySrcStages & state.rng.getUint32();
8675 srcAccesses = dirtySrcAccesses & state.rng.getUint32();
8677 dstStages = dirtyDstStages & state.rng.getUint32();
8678 dstAccesses = dirtyDstAccesses & state.rng.getUint32();
8680 // If there are no bits in stage mask use the original dirty stages
8681 srcStages = srcStages ? srcStages : dirtySrcStages;
8682 dstStages = dstStages ? dstStages : dirtyDstStages;
8685 srcStages = dstStages;
8687 removeIllegalAccessFlags(dstAccesses, dstStages);
8688 removeIllegalAccessFlags(srcAccesses, srcStages);
8690 state.commandBufferIsEmpty = false;
8691 state.cache.barrier(srcStages, srcAccesses, dstStages, dstAccesses);
8695 case OP_RENDERPASS_BEGIN:
8697 DE_ASSERT(state.stage == STAGE_COMMAND_BUFFER);
8699 state.renderPassIsEmpty = true;
8700 state.stage = STAGE_RENDER_PASS;
8704 case OP_RENDERPASS_END:
8706 DE_ASSERT(state.stage == STAGE_RENDER_PASS);
8708 state.renderPassIsEmpty = true;
8709 state.stage = STAGE_COMMAND_BUFFER;
8713 case OP_RENDER_VERTEX_BUFFER:
8715 DE_ASSERT(state.stage == STAGE_RENDER_PASS);
8717 state.renderPassIsEmpty = false;
8718 state.cache.perform(vk::VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, vk::VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT);
8722 case OP_RENDER_INDEX_BUFFER:
8724 DE_ASSERT(state.stage == STAGE_RENDER_PASS);
8726 state.renderPassIsEmpty = false;
8727 state.cache.perform(vk::VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, vk::VK_ACCESS_INDEX_READ_BIT);
8731 case OP_RENDER_VERTEX_UNIFORM_BUFFER:
8732 case OP_RENDER_VERTEX_UNIFORM_TEXEL_BUFFER:
8734 DE_ASSERT(state.stage == STAGE_RENDER_PASS);
8736 state.renderPassIsEmpty = false;
8737 state.cache.perform(vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, vk::VK_ACCESS_UNIFORM_READ_BIT);
8741 case OP_RENDER_FRAGMENT_UNIFORM_BUFFER:
8742 case OP_RENDER_FRAGMENT_UNIFORM_TEXEL_BUFFER:
8744 DE_ASSERT(state.stage == STAGE_RENDER_PASS);
8746 state.renderPassIsEmpty = false;
8747 state.cache.perform(vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, vk::VK_ACCESS_UNIFORM_READ_BIT);
8751 case OP_RENDER_VERTEX_STORAGE_BUFFER:
8752 case OP_RENDER_VERTEX_STORAGE_TEXEL_BUFFER:
8754 DE_ASSERT(state.stage == STAGE_RENDER_PASS);
8756 state.renderPassIsEmpty = false;
8757 state.cache.perform(vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT);
8761 case OP_RENDER_FRAGMENT_STORAGE_BUFFER:
8762 case OP_RENDER_FRAGMENT_STORAGE_TEXEL_BUFFER:
8764 DE_ASSERT(state.stage == STAGE_RENDER_PASS);
8766 state.renderPassIsEmpty = false;
8767 state.cache.perform(vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT);
8771 case OP_RENDER_FRAGMENT_STORAGE_IMAGE:
8772 case OP_RENDER_FRAGMENT_SAMPLED_IMAGE:
8774 DE_ASSERT(state.stage == STAGE_RENDER_PASS);
8776 state.renderPassIsEmpty = false;
8777 state.cache.perform(vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT);
8781 case OP_RENDER_VERTEX_STORAGE_IMAGE:
8782 case OP_RENDER_VERTEX_SAMPLED_IMAGE:
8784 DE_ASSERT(state.stage == STAGE_RENDER_PASS);
8786 state.renderPassIsEmpty = false;
8787 state.cache.perform(vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT);
8792 DE_FATAL("Unknown op");
8796 de::MovePtr<Command> createHostCommand (Op op,
8799 vk::VkSharingMode sharing)
8803 case OP_MAP: return de::MovePtr<Command>(new Map());
8804 case OP_UNMAP: return de::MovePtr<Command>(new UnMap());
8806 case OP_MAP_FLUSH: return de::MovePtr<Command>(new Flush());
8807 case OP_MAP_INVALIDATE: return de::MovePtr<Command>(new Invalidate());
8809 case OP_MAP_READ: return de::MovePtr<Command>(new HostMemoryAccess(true, false, rng.getUint32()));
8810 case OP_MAP_WRITE: return de::MovePtr<Command>(new HostMemoryAccess(false, true, rng.getUint32()));
8811 case OP_MAP_MODIFY: return de::MovePtr<Command>(new HostMemoryAccess(true, true, rng.getUint32()));
8813 case OP_BUFFER_CREATE: return de::MovePtr<Command>(new CreateBuffer(usageToBufferUsageFlags(usage), sharing));
8814 case OP_BUFFER_DESTROY: return de::MovePtr<Command>(new DestroyBuffer());
8815 case OP_BUFFER_BINDMEMORY: return de::MovePtr<Command>(new BindBufferMemory());
8817 case OP_IMAGE_CREATE: return de::MovePtr<Command>(new CreateImage(usageToImageUsageFlags(usage), sharing));
8818 case OP_IMAGE_DESTROY: return de::MovePtr<Command>(new DestroyImage());
8819 case OP_IMAGE_BINDMEMORY: return de::MovePtr<Command>(new BindImageMemory());
8821 case OP_QUEUE_WAIT_FOR_IDLE: return de::MovePtr<Command>(new QueueWaitIdle());
8822 case OP_DEVICE_WAIT_FOR_IDLE: return de::MovePtr<Command>(new DeviceWaitIdle());
8825 DE_FATAL("Unknown op");
8826 return de::MovePtr<Command>(DE_NULL);
8830 de::MovePtr<CmdCommand> createCmdCommand (de::Random& rng,
8837 case OP_BUFFER_FILL: return de::MovePtr<CmdCommand>(new FillBuffer(rng.getUint32()));
8838 case OP_BUFFER_UPDATE: return de::MovePtr<CmdCommand>(new UpdateBuffer(rng.getUint32()));
8839 case OP_BUFFER_COPY_TO_BUFFER: return de::MovePtr<CmdCommand>(new BufferCopyToBuffer());
8840 case OP_BUFFER_COPY_FROM_BUFFER: return de::MovePtr<CmdCommand>(new BufferCopyFromBuffer(rng.getUint32()));
8842 case OP_BUFFER_COPY_TO_IMAGE: return de::MovePtr<CmdCommand>(new BufferCopyToImage());
8843 case OP_BUFFER_COPY_FROM_IMAGE: return de::MovePtr<CmdCommand>(new BufferCopyFromImage(rng.getUint32()));
8845 case OP_IMAGE_TRANSITION_LAYOUT:
8847 DE_ASSERT(state.stage == STAGE_COMMAND_BUFFER);
8848 DE_ASSERT(state.hasImage);
8849 DE_ASSERT(state.hasBoundImageMemory);
8851 const vk::VkImageLayout srcLayout = rng.getFloat() < 0.9f ? state.imageLayout : vk::VK_IMAGE_LAYOUT_UNDEFINED;
8852 const vk::VkImageLayout dstLayout = getRandomNextLayout(rng, usage, srcLayout);
8854 vk::VkPipelineStageFlags dirtySrcStages;
8855 vk::VkAccessFlags dirtySrcAccesses;
8856 vk::VkPipelineStageFlags dirtyDstStages;
8857 vk::VkAccessFlags dirtyDstAccesses;
8859 vk::VkPipelineStageFlags srcStages;
8860 vk::VkAccessFlags srcAccesses;
8861 vk::VkPipelineStageFlags dstStages;
8862 vk::VkAccessFlags dstAccesses;
8864 state.cache.getFullBarrier(dirtySrcStages, dirtySrcAccesses, dirtyDstStages, dirtyDstAccesses);
8866 // Try masking some random bits
8867 srcStages = dirtySrcStages;
8868 srcAccesses = dirtySrcAccesses;
8870 dstStages = state.cache.getAllowedStages() & rng.getUint32();
8871 dstAccesses = state.cache.getAllowedAcceses() & rng.getUint32();
8873 // If there are no bits in dst stage mask use all stages
8874 dstStages = dstStages ? dstStages : state.cache.getAllowedStages();
8877 srcStages = dstStages;
8879 removeIllegalAccessFlags(dstAccesses, dstStages);
8880 removeIllegalAccessFlags(srcAccesses, srcStages);
8882 return de::MovePtr<CmdCommand>(new ImageTransition(srcStages, srcAccesses, dstStages, dstAccesses, srcLayout, dstLayout));
8885 case OP_IMAGE_COPY_TO_BUFFER: return de::MovePtr<CmdCommand>(new ImageCopyToBuffer(state.imageLayout));
8886 case OP_IMAGE_COPY_FROM_BUFFER: return de::MovePtr<CmdCommand>(new ImageCopyFromBuffer(rng.getUint32(), state.imageLayout));
8887 case OP_IMAGE_COPY_TO_IMAGE: return de::MovePtr<CmdCommand>(new ImageCopyToImage(state.imageLayout));
8888 case OP_IMAGE_COPY_FROM_IMAGE: return de::MovePtr<CmdCommand>(new ImageCopyFromImage(rng.getUint32(), state.imageLayout));
8889 case OP_IMAGE_BLIT_TO_IMAGE:
8891 const BlitScale scale = rng.getBool() ? BLIT_SCALE_20 : BLIT_SCALE_10;
8892 return de::MovePtr<CmdCommand>(new ImageBlitToImage(scale, state.imageLayout));
8895 case OP_IMAGE_BLIT_FROM_IMAGE:
8897 const BlitScale scale = rng.getBool() ? BLIT_SCALE_20 : BLIT_SCALE_10;
8898 return de::MovePtr<CmdCommand>(new ImageBlitFromImage(rng.getUint32(), scale, state.imageLayout));
8901 case OP_PIPELINE_BARRIER_GLOBAL:
8902 case OP_PIPELINE_BARRIER_BUFFER:
8903 case OP_PIPELINE_BARRIER_IMAGE:
8905 vk::VkPipelineStageFlags dirtySrcStages;
8906 vk::VkAccessFlags dirtySrcAccesses;
8907 vk::VkPipelineStageFlags dirtyDstStages;
8908 vk::VkAccessFlags dirtyDstAccesses;
8910 vk::VkPipelineStageFlags srcStages;
8911 vk::VkAccessFlags srcAccesses;
8912 vk::VkPipelineStageFlags dstStages;
8913 vk::VkAccessFlags dstAccesses;
8915 state.cache.getFullBarrier(dirtySrcStages, dirtySrcAccesses, dirtyDstStages, dirtyDstAccesses);
8917 // Try masking some random bits
8918 srcStages = dirtySrcStages & rng.getUint32();
8919 srcAccesses = dirtySrcAccesses & rng.getUint32();
8921 dstStages = dirtyDstStages & rng.getUint32();
8922 dstAccesses = dirtyDstAccesses & rng.getUint32();
8924 // If there are no bits in stage mask use the original dirty stages
8925 srcStages = srcStages ? srcStages : dirtySrcStages;
8926 dstStages = dstStages ? dstStages : dirtyDstStages;
8929 srcStages = dstStages;
8931 removeIllegalAccessFlags(dstAccesses, dstStages);
8932 removeIllegalAccessFlags(srcAccesses, srcStages);
8934 PipelineBarrier::Type type;
8936 if (op == OP_PIPELINE_BARRIER_IMAGE)
8937 type = PipelineBarrier::TYPE_IMAGE;
8938 else if (op == OP_PIPELINE_BARRIER_BUFFER)
8939 type = PipelineBarrier::TYPE_BUFFER;
8940 else if (op == OP_PIPELINE_BARRIER_GLOBAL)
8941 type = PipelineBarrier::TYPE_GLOBAL;
8944 type = PipelineBarrier::TYPE_LAST;
8945 DE_FATAL("Unknown op");
8948 if (type == PipelineBarrier::TYPE_IMAGE)
8949 return de::MovePtr<CmdCommand>(new PipelineBarrier(srcStages, srcAccesses, dstStages, dstAccesses, type, tcu::just(state.imageLayout)));
8951 return de::MovePtr<CmdCommand>(new PipelineBarrier(srcStages, srcAccesses, dstStages, dstAccesses, type, tcu::nothing<vk::VkImageLayout>()));
8955 DE_FATAL("Unknown op");
8956 return de::MovePtr<CmdCommand>(DE_NULL);
8960 de::MovePtr<RenderPassCommand> createRenderPassCommand (de::Random&,
8966 case OP_RENDER_VERTEX_BUFFER: return de::MovePtr<RenderPassCommand>(new RenderVertexBuffer());
8967 case OP_RENDER_INDEX_BUFFER: return de::MovePtr<RenderPassCommand>(new RenderIndexBuffer());
8969 case OP_RENDER_VERTEX_UNIFORM_BUFFER: return de::MovePtr<RenderPassCommand>(new RenderVertexUniformBuffer());
8970 case OP_RENDER_FRAGMENT_UNIFORM_BUFFER: return de::MovePtr<RenderPassCommand>(new RenderFragmentUniformBuffer());
8972 case OP_RENDER_VERTEX_UNIFORM_TEXEL_BUFFER: return de::MovePtr<RenderPassCommand>(new RenderVertexUniformTexelBuffer());
8973 case OP_RENDER_FRAGMENT_UNIFORM_TEXEL_BUFFER: return de::MovePtr<RenderPassCommand>(new RenderFragmentUniformTexelBuffer());
8975 case OP_RENDER_VERTEX_STORAGE_BUFFER: return de::MovePtr<RenderPassCommand>(new RenderVertexStorageBuffer());
8976 case OP_RENDER_FRAGMENT_STORAGE_BUFFER: return de::MovePtr<RenderPassCommand>(new RenderFragmentStorageBuffer());
8978 case OP_RENDER_VERTEX_STORAGE_TEXEL_BUFFER: return de::MovePtr<RenderPassCommand>(new RenderVertexStorageTexelBuffer());
8979 case OP_RENDER_FRAGMENT_STORAGE_TEXEL_BUFFER: return de::MovePtr<RenderPassCommand>(new RenderFragmentStorageTexelBuffer());
8981 case OP_RENDER_VERTEX_STORAGE_IMAGE: return de::MovePtr<RenderPassCommand>(new RenderVertexStorageImage());
8982 case OP_RENDER_FRAGMENT_STORAGE_IMAGE: return de::MovePtr<RenderPassCommand>(new RenderFragmentStorageImage());
8984 case OP_RENDER_VERTEX_SAMPLED_IMAGE: return de::MovePtr<RenderPassCommand>(new RenderVertexSampledImage());
8985 case OP_RENDER_FRAGMENT_SAMPLED_IMAGE: return de::MovePtr<RenderPassCommand>(new RenderFragmentSampledImage());
8988 DE_FATAL("Unknown op");
8989 return de::MovePtr<RenderPassCommand>(DE_NULL);
8993 de::MovePtr<CmdCommand> createRenderPassCommands (const Memory& memory,
8994 de::Random& nextOpRng,
9000 vector<RenderPassCommand*> commands;
9004 for (; opNdx < opCount; opNdx++)
9008 getAvailableOps(state, memory.getSupportBuffers(), memory.getSupportImages(), usage, ops);
9010 DE_ASSERT(!ops.empty());
9013 const Op op = nextOpRng.choose<Op>(ops.begin(), ops.end());
9015 if (op == OP_RENDERPASS_END)
9021 de::Random rng (state.rng);
9023 commands.push_back(createRenderPassCommand(rng, state, op).release());
9024 applyOp(state, memory, op, usage);
9026 DE_ASSERT(state.rng == rng);
9031 applyOp(state, memory, OP_RENDERPASS_END, usage);
9032 return de::MovePtr<CmdCommand>(new SubmitRenderPass(commands));
9036 for (size_t commandNdx = 0; commandNdx < commands.size(); commandNdx++)
9037 delete commands[commandNdx];
9043 de::MovePtr<Command> createCmdCommands (const Memory& memory,
9044 de::Random& nextOpRng,
9050 vector<CmdCommand*> commands;
9054 for (; opNdx < opCount; opNdx++)
9058 getAvailableOps(state, memory.getSupportBuffers(), memory.getSupportImages(), usage, ops);
9060 DE_ASSERT(!ops.empty());
9063 const Op op = nextOpRng.choose<Op>(ops.begin(), ops.end());
9065 if (op == OP_COMMAND_BUFFER_END)
9071 // \note Command needs to known the state before the operation
9072 if (op == OP_RENDERPASS_BEGIN)
9074 applyOp(state, memory, op, usage);
9075 commands.push_back(createRenderPassCommands(memory, nextOpRng, state, usage, opNdx, opCount).release());
9079 de::Random rng (state.rng);
9081 commands.push_back(createCmdCommand(rng, state, op, usage).release());
9082 applyOp(state, memory, op, usage);
9084 DE_ASSERT(state.rng == rng);
9091 applyOp(state, memory, OP_COMMAND_BUFFER_END, usage);
9092 return de::MovePtr<Command>(new SubmitCommandBuffer(commands));
9096 for (size_t commandNdx = 0; commandNdx < commands.size(); commandNdx++)
9097 delete commands[commandNdx];
9103 void createCommands (vector<Command*>& commands,
9105 const Memory& memory,
9107 vk::VkSharingMode sharingMode,
9110 State state (usage, seed);
9111 // Used to select next operation only
9112 de::Random nextOpRng (seed ^ 12930809);
9114 commands.reserve(opCount);
9116 for (size_t opNdx = 0; opNdx < opCount; opNdx++)
9120 getAvailableOps(state, memory.getSupportBuffers(), memory.getSupportImages(), usage, ops);
9122 DE_ASSERT(!ops.empty());
9125 const Op op = nextOpRng.choose<Op>(ops.begin(), ops.end());
9127 if (op == OP_COMMAND_BUFFER_BEGIN)
9129 applyOp(state, memory, op, usage);
9130 commands.push_back(createCmdCommands(memory, nextOpRng, state, usage, opNdx, opCount).release());
9134 de::Random rng (state.rng);
9136 commands.push_back(createHostCommand(op, rng, usage, sharingMode).release());
9137 applyOp(state, memory, op, usage);
9139 // Make sure that random generator is in sync
9140 DE_ASSERT(state.rng == rng);
9145 // Clean up resources
9146 if (state.hasBuffer && state.hasImage)
9148 if (!state.queueIdle)
9149 commands.push_back(new QueueWaitIdle());
9151 if (state.hasBuffer)
9152 commands.push_back(new DestroyBuffer());
9155 commands.push_back(new DestroyImage());
9159 class MemoryTestInstance : public TestInstance
9163 typedef bool(MemoryTestInstance::*StageFunc)(void);
9165 MemoryTestInstance (::vkt::Context& context, const TestConfig& config);
9166 ~MemoryTestInstance (void);
9168 tcu::TestStatus iterate (void);
9171 const TestConfig m_config;
9172 const size_t m_iterationCount;
9173 const size_t m_opCount;
9174 const vk::VkPhysicalDeviceMemoryProperties m_memoryProperties;
9175 deUint32 m_memoryTypeNdx;
9178 tcu::ResultCollector m_resultCollector;
9180 vector<Command*> m_commands;
9181 MovePtr<Memory> m_memory;
9182 MovePtr<Context> m_renderContext;
9183 MovePtr<PrepareContext> m_prepareContext;
9185 bool nextIteration (void);
9186 bool nextMemoryType (void);
9188 bool createCommandsAndAllocateMemory (void);
9189 bool prepare (void);
9190 bool execute (void);
9192 void resetResources (void);
9195 void MemoryTestInstance::resetResources (void)
9197 const vk::DeviceInterface& vkd = m_context.getDeviceInterface();
9198 const vk::VkDevice device = m_context.getDevice();
9200 VK_CHECK(vkd.deviceWaitIdle(device));
9202 for (size_t commandNdx = 0; commandNdx < m_commands.size(); commandNdx++)
9204 delete m_commands[commandNdx];
9205 m_commands[commandNdx] = DE_NULL;
9209 m_prepareContext.clear();
9213 bool MemoryTestInstance::nextIteration (void)
9217 if (m_iteration < m_iterationCount)
9220 m_stage = &MemoryTestInstance::createCommandsAndAllocateMemory;
9224 return nextMemoryType();
9227 bool MemoryTestInstance::nextMemoryType (void)
9231 DE_ASSERT(m_commands.empty());
9235 if (m_memoryTypeNdx < m_memoryProperties.memoryTypeCount)
9238 m_stage = &MemoryTestInstance::createCommandsAndAllocateMemory;
9249 MemoryTestInstance::MemoryTestInstance (::vkt::Context& context, const TestConfig& config)
9250 : TestInstance (context)
9252 , m_iterationCount (5)
9254 , m_memoryProperties (vk::getPhysicalDeviceMemoryProperties(context.getInstanceInterface(), context.getPhysicalDevice()))
9255 , m_memoryTypeNdx (0)
9257 , m_stage (&MemoryTestInstance::createCommandsAndAllocateMemory)
9258 , m_resultCollector (context.getTestContext().getLog())
9260 , m_memory (DE_NULL)
9262 TestLog& log = context.getTestContext().getLog();
9264 const tcu::ScopedLogSection section (log, "TestCaseInfo", "Test Case Info");
9266 log << TestLog::Message << "Buffer size: " << config.size << TestLog::EndMessage;
9267 log << TestLog::Message << "Sharing: " << config.sharing << TestLog::EndMessage;
9268 log << TestLog::Message << "Access: " << config.usage << TestLog::EndMessage;
9272 const tcu::ScopedLogSection section (log, "MemoryProperties", "Memory Properties");
9274 for (deUint32 heapNdx = 0; heapNdx < m_memoryProperties.memoryHeapCount; heapNdx++)
9276 const tcu::ScopedLogSection heapSection (log, "Heap" + de::toString(heapNdx), "Heap " + de::toString(heapNdx));
9278 log << TestLog::Message << "Size: " << m_memoryProperties.memoryHeaps[heapNdx].size << TestLog::EndMessage;
9279 log << TestLog::Message << "Flags: " << m_memoryProperties.memoryHeaps[heapNdx].flags << TestLog::EndMessage;
9282 for (deUint32 memoryTypeNdx = 0; memoryTypeNdx < m_memoryProperties.memoryTypeCount; memoryTypeNdx++)
9284 const tcu::ScopedLogSection memoryTypeSection (log, "MemoryType" + de::toString(memoryTypeNdx), "Memory type " + de::toString(memoryTypeNdx));
9286 log << TestLog::Message << "Properties: " << m_memoryProperties.memoryTypes[memoryTypeNdx].propertyFlags << TestLog::EndMessage;
9287 log << TestLog::Message << "Heap: " << m_memoryProperties.memoryTypes[memoryTypeNdx].heapIndex << TestLog::EndMessage;
9292 const vk::InstanceInterface& vki = context.getInstanceInterface();
9293 const vk::VkPhysicalDevice physicalDevice = context.getPhysicalDevice();
9294 const vk::DeviceInterface& vkd = context.getDeviceInterface();
9295 const vk::VkDevice device = context.getDevice();
9296 const vk::VkQueue queue = context.getUniversalQueue();
9297 const deUint32 queueFamilyIndex = context.getUniversalQueueFamilyIndex();
9298 vector<pair<deUint32, vk::VkQueue> > queues;
9300 queues.push_back(std::make_pair(queueFamilyIndex, queue));
9302 m_renderContext = MovePtr<Context>(new Context(vki, vkd, physicalDevice, device, queue, queueFamilyIndex, queues, context.getBinaryCollection()));
9306 MemoryTestInstance::~MemoryTestInstance (void)
9311 bool MemoryTestInstance::createCommandsAndAllocateMemory (void)
9313 const vk::VkDevice device = m_context.getDevice();
9314 TestLog& log = m_context.getTestContext().getLog();
9315 const vk::InstanceInterface& vki = m_context.getInstanceInterface();
9316 const vk::VkPhysicalDevice physicalDevice = m_context.getPhysicalDevice();
9317 const vk::DeviceInterface& vkd = m_context.getDeviceInterface();
9318 const vk::VkPhysicalDeviceMemoryProperties memoryProperties = vk::getPhysicalDeviceMemoryProperties(vki, physicalDevice);
9319 const tcu::ScopedLogSection section (log, "MemoryType" + de::toString(m_memoryTypeNdx) + "CreateCommands" + de::toString(m_iteration),
9320 "Memory type " + de::toString(m_memoryTypeNdx) + " create commands iteration " + de::toString(m_iteration));
9321 const vector<deUint32>& queues = m_renderContext->getQueueFamilies();
9323 DE_ASSERT(m_commands.empty());
9325 if (m_config.usage & (USAGE_HOST_READ | USAGE_HOST_WRITE)
9326 && !(memoryProperties.memoryTypes[m_memoryTypeNdx].propertyFlags & vk::VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT))
9328 log << TestLog::Message << "Memory type not supported" << TestLog::EndMessage;
9330 return nextMemoryType();
9336 const vk::VkBufferUsageFlags bufferUsage = usageToBufferUsageFlags(m_config.usage);
9337 const vk::VkImageUsageFlags imageUsage = usageToImageUsageFlags(m_config.usage);
9338 const vk::VkDeviceSize maxBufferSize = bufferUsage != 0
9339 ? roundBufferSizeToWxHx4(findMaxBufferSize(vkd, device, bufferUsage, m_config.sharing, queues, m_config.size, m_memoryTypeNdx))
9341 const IVec2 maxImageSize = imageUsage != 0
9342 ? findMaxRGBA8ImageSize(vkd, device, imageUsage, m_config.sharing, queues, m_config.size, m_memoryTypeNdx)
9345 log << TestLog::Message << "Max buffer size: " << maxBufferSize << TestLog::EndMessage;
9346 log << TestLog::Message << "Max RGBA8 image size: " << maxImageSize << TestLog::EndMessage;
9348 // Skip tests if there are no supported operations
9349 if (maxBufferSize == 0
9350 && maxImageSize[0] == 0
9351 && (m_config.usage & (USAGE_HOST_READ|USAGE_HOST_WRITE)) == 0)
9353 log << TestLog::Message << "Skipping memory type. None of the usages are supported." << TestLog::EndMessage;
9355 return nextMemoryType();
9359 const deUint32 seed = 2830980989u ^ deUint32Hash((deUint32)(m_iteration) * m_memoryProperties.memoryTypeCount + m_memoryTypeNdx);
9361 m_memory = MovePtr<Memory>(new Memory(vki, vkd, physicalDevice, device, m_config.size, m_memoryTypeNdx, maxBufferSize, maxImageSize[0], maxImageSize[1]));
9363 log << TestLog::Message << "Create commands" << TestLog::EndMessage;
9364 createCommands(m_commands, seed, *m_memory, m_config.usage, m_config.sharing, m_opCount);
9366 m_stage = &MemoryTestInstance::prepare;
9370 catch (const tcu::TestError& e)
9372 m_resultCollector.fail("Failed, got exception: " + string(e.getMessage()));
9373 return nextMemoryType();
9378 bool MemoryTestInstance::prepare (void)
9380 TestLog& log = m_context.getTestContext().getLog();
9381 const tcu::ScopedLogSection section (log, "MemoryType" + de::toString(m_memoryTypeNdx) + "Prepare" + de::toString(m_iteration),
9382 "Memory type " + de::toString(m_memoryTypeNdx) + " prepare iteration" + de::toString(m_iteration));
9384 m_prepareContext = MovePtr<PrepareContext>(new PrepareContext(*m_renderContext, *m_memory));
9386 DE_ASSERT(!m_commands.empty());
9388 for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
9390 Command& command = *m_commands[cmdNdx];
9394 command.prepare(*m_prepareContext);
9396 catch (const tcu::TestError& e)
9398 m_resultCollector.fail(de::toString(cmdNdx) + ":" + command.getName() + " failed to prepare, got exception: " + string(e.getMessage()));
9399 return nextMemoryType();
9403 m_stage = &MemoryTestInstance::execute;
9407 bool MemoryTestInstance::execute (void)
9409 TestLog& log = m_context.getTestContext().getLog();
9410 const tcu::ScopedLogSection section (log, "MemoryType" + de::toString(m_memoryTypeNdx) + "Execute" + de::toString(m_iteration),
9411 "Memory type " + de::toString(m_memoryTypeNdx) + " execute iteration " + de::toString(m_iteration));
9412 ExecuteContext executeContext (*m_renderContext);
9413 const vk::VkDevice device = m_context.getDevice();
9414 const vk::DeviceInterface& vkd = m_context.getDeviceInterface();
9416 DE_ASSERT(!m_commands.empty());
9418 for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
9420 Command& command = *m_commands[cmdNdx];
9424 command.execute(executeContext);
9426 catch (const tcu::TestError& e)
9428 m_resultCollector.fail(de::toString(cmdNdx) + ":" + command.getName() + " failed to execute, got exception: " + string(e.getMessage()));
9429 return nextIteration();
9433 VK_CHECK(vkd.deviceWaitIdle(device));
9435 m_stage = &MemoryTestInstance::verify;
9439 bool MemoryTestInstance::verify (void)
9441 DE_ASSERT(!m_commands.empty());
9443 TestLog& log = m_context.getTestContext().getLog();
9444 const tcu::ScopedLogSection section (log, "MemoryType" + de::toString(m_memoryTypeNdx) + "Verify" + de::toString(m_iteration),
9445 "Memory type " + de::toString(m_memoryTypeNdx) + " verify iteration " + de::toString(m_iteration));
9446 VerifyContext verifyContext (log, m_resultCollector, *m_renderContext, m_config.size);
9448 log << TestLog::Message << "Begin verify" << TestLog::EndMessage;
9450 for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
9452 Command& command = *m_commands[cmdNdx];
9456 command.verify(verifyContext, cmdNdx);
9458 catch (const tcu::TestError& e)
9460 m_resultCollector.fail(de::toString(cmdNdx) + ":" + command.getName() + " failed to verify, got exception: " + string(e.getMessage()));
9461 return nextIteration();
9465 return nextIteration();
9468 tcu::TestStatus MemoryTestInstance::iterate (void)
9470 if ((this->*m_stage)())
9471 return tcu::TestStatus::incomplete();
9473 return tcu::TestStatus(m_resultCollector.getResult(), m_resultCollector.getMessage());
9478 void init (vk::SourceCollections& sources, TestConfig config) const
9480 // Vertex buffer rendering
9481 if (config.usage & USAGE_VERTEX_BUFFER)
9483 const char* const vertexShader =
9485 "layout(location = 0) in highp vec2 a_position;\n"
9486 "void main (void) {\n"
9487 "\tgl_PointSize = 1.0;\n"
9488 "\tgl_Position = vec4(1.998 * a_position - vec2(0.999), 0.0, 1.0);\n"
9491 sources.glslSources.add("vertex-buffer.vert")
9492 << glu::VertexSource(vertexShader);
9495 // Index buffer rendering
9496 if (config.usage & USAGE_INDEX_BUFFER)
9498 const char* const vertexShader =
9500 "precision highp float;\n"
9501 "void main (void) {\n"
9502 "\tgl_PointSize = 1.0;\n"
9503 "\thighp vec2 pos = vec2(gl_VertexIndex % 256, gl_VertexIndex / 256) / vec2(255.0);\n"
9504 "\tgl_Position = vec4(1.998 * pos - vec2(0.999), 0.0, 1.0);\n"
9507 sources.glslSources.add("index-buffer.vert")
9508 << glu::VertexSource(vertexShader);
9511 if (config.usage & USAGE_UNIFORM_BUFFER)
9514 std::ostringstream vertexShader;
9518 "precision highp float;\n"
9519 "layout(set=0, binding=0) uniform Block\n"
9521 "\thighp uvec4 values[" << de::toString<size_t>(MAX_UNIFORM_BUFFER_SIZE / (sizeof(deUint32) * 4)) << "];\n"
9523 "void main (void) {\n"
9524 "\tgl_PointSize = 1.0;\n"
9525 "\thighp uvec4 vecVal = block.values[gl_VertexIndex / 8];\n"
9526 "\thighp uint val;\n"
9527 "\tif (((gl_VertexIndex / 2) % 4 == 0))\n"
9528 "\t\tval = vecVal.x;\n"
9529 "\telse if (((gl_VertexIndex / 2) % 4 == 1))\n"
9530 "\t\tval = vecVal.y;\n"
9531 "\telse if (((gl_VertexIndex / 2) % 4 == 2))\n"
9532 "\t\tval = vecVal.z;\n"
9533 "\telse if (((gl_VertexIndex / 2) % 4 == 3))\n"
9534 "\t\tval = vecVal.w;\n"
9535 "\tif ((gl_VertexIndex % 2) == 0)\n"
9536 "\t\tval = val & 0xFFFFu;\n"
9538 "\t\tval = val >> 16u;\n"
9539 "\thighp vec2 pos = vec2(val & 0xFFu, val >> 8u) / vec2(255.0);\n"
9540 "\tgl_Position = vec4(1.998 * pos - vec2(0.999), 0.0, 1.0);\n"
9543 sources.glslSources.add("uniform-buffer.vert")
9544 << glu::VertexSource(vertexShader.str());
9548 const size_t arraySize = MAX_UNIFORM_BUFFER_SIZE / (sizeof(deUint32) * 4);
9549 const size_t arrayIntSize = arraySize * 4;
9550 std::ostringstream fragmentShader;
9554 "precision highp float;\n"
9555 "precision highp int;\n"
9556 "layout(location = 0) out highp vec4 o_color;\n"
9557 "layout(set=0, binding=0) uniform Block\n"
9559 "\thighp uvec4 values[" << arraySize << "];\n"
9561 "layout(push_constant) uniform PushC\n"
9564 "\tuint valuesPerPixel;\n"
9566 "void main (void) {\n"
9567 "\thighp uint id = pushC.callId * (" << arrayIntSize << "u / pushC.valuesPerPixel) + uint(gl_FragCoord.y) * 256u + uint(gl_FragCoord.x);\n"
9568 "\tif (uint(gl_FragCoord.y) * 256u + uint(gl_FragCoord.x) < pushC.callId * (" << arrayIntSize << "u / pushC.valuesPerPixel))\n"
9570 "\thighp uint value = id;\n"
9571 "\tfor (uint i = 0u; i < pushC.valuesPerPixel; i++)\n"
9573 "\t\thighp uvec4 vecVal = block.values[(value / 4u) % " << arraySize << "u];\n"
9574 "\t\tif ((value % 4u) == 0u)\n"
9575 "\t\t\tvalue = vecVal.x;\n"
9576 "\t\telse if ((value % 4u) == 1u)\n"
9577 "\t\t\tvalue = vecVal.y;\n"
9578 "\t\telse if ((value % 4u) == 2u)\n"
9579 "\t\t\tvalue = vecVal.z;\n"
9580 "\t\telse if ((value % 4u) == 3u)\n"
9581 "\t\t\tvalue = vecVal.w;\n"
9583 "\tuvec4 valueOut = uvec4(value & 0xFFu, (value >> 8u) & 0xFFu, (value >> 16u) & 0xFFu, (value >> 24u) & 0xFFu);\n"
9584 "\to_color = vec4(valueOut) / vec4(255.0);\n"
9587 sources.glslSources.add("uniform-buffer.frag")
9588 << glu::FragmentSource(fragmentShader.str());
9592 if (config.usage & USAGE_STORAGE_BUFFER)
9595 // Vertex storage buffer rendering
9596 const char* const vertexShader =
9598 "precision highp float;\n"
9599 "layout(set=0, binding=0) buffer Block\n"
9601 "\thighp uvec4 values[];\n"
9603 "void main (void) {\n"
9604 "\tgl_PointSize = 1.0;\n"
9605 "\thighp uvec4 vecVal = block.values[gl_VertexIndex / 8];\n"
9606 "\thighp uint val;\n"
9607 "\tif (((gl_VertexIndex / 2) % 4 == 0))\n"
9608 "\t\tval = vecVal.x;\n"
9609 "\telse if (((gl_VertexIndex / 2) % 4 == 1))\n"
9610 "\t\tval = vecVal.y;\n"
9611 "\telse if (((gl_VertexIndex / 2) % 4 == 2))\n"
9612 "\t\tval = vecVal.z;\n"
9613 "\telse if (((gl_VertexIndex / 2) % 4 == 3))\n"
9614 "\t\tval = vecVal.w;\n"
9615 "\tif ((gl_VertexIndex % 2) == 0)\n"
9616 "\t\tval = val & 0xFFFFu;\n"
9618 "\t\tval = val >> 16u;\n"
9619 "\thighp vec2 pos = vec2(val & 0xFFu, val >> 8u) / vec2(255.0);\n"
9620 "\tgl_Position = vec4(1.998 * pos - vec2(0.999), 0.0, 1.0);\n"
9623 sources.glslSources.add("storage-buffer.vert")
9624 << glu::VertexSource(vertexShader);
9628 std::ostringstream fragmentShader;
9632 "precision highp float;\n"
9633 "precision highp int;\n"
9634 "layout(location = 0) out highp vec4 o_color;\n"
9635 "layout(set=0, binding=0) buffer Block\n"
9637 "\thighp uvec4 values[];\n"
9639 "layout(push_constant) uniform PushC\n"
9641 "\tuint valuesPerPixel;\n"
9642 "\tuint bufferSize;\n"
9644 "void main (void) {\n"
9645 "\thighp uint arrayIntSize = pushC.bufferSize / 4u;\n"
9646 "\thighp uint id = uint(gl_FragCoord.y) * 256u + uint(gl_FragCoord.x);\n"
9647 "\thighp uint value = id;\n"
9648 "\tfor (uint i = 0u; i < pushC.valuesPerPixel; i++)\n"
9650 "\t\thighp uvec4 vecVal = block.values[(value / 4u) % (arrayIntSize / 4u)];\n"
9651 "\t\tif ((value % 4u) == 0u)\n"
9652 "\t\t\tvalue = vecVal.x;\n"
9653 "\t\telse if ((value % 4u) == 1u)\n"
9654 "\t\t\tvalue = vecVal.y;\n"
9655 "\t\telse if ((value % 4u) == 2u)\n"
9656 "\t\t\tvalue = vecVal.z;\n"
9657 "\t\telse if ((value % 4u) == 3u)\n"
9658 "\t\t\tvalue = vecVal.w;\n"
9660 "\tuvec4 valueOut = uvec4(value & 0xFFu, (value >> 8u) & 0xFFu, (value >> 16u) & 0xFFu, (value >> 24u) & 0xFFu);\n"
9661 "\to_color = vec4(valueOut) / vec4(255.0);\n"
9664 sources.glslSources.add("storage-buffer.frag")
9665 << glu::FragmentSource(fragmentShader.str());
9669 if (config.usage & USAGE_UNIFORM_TEXEL_BUFFER)
9672 // Vertex uniform texel buffer rendering
9673 const char* const vertexShader =
9675 "#extension GL_EXT_texture_buffer : require\n"
9676 "precision highp float;\n"
9677 "layout(set=0, binding=0) uniform highp usamplerBuffer u_sampler;\n"
9678 "void main (void) {\n"
9679 "\tgl_PointSize = 1.0;\n"
9680 "\thighp uint val = texelFetch(u_sampler, gl_VertexIndex).x;\n"
9681 "\thighp vec2 pos = vec2(val & 0xFFu, val >> 8u) / vec2(255.0);\n"
9682 "\tgl_Position = vec4(1.998 * pos - vec2(0.999), 0.0, 1.0);\n"
9685 sources.glslSources.add("uniform-texel-buffer.vert")
9686 << glu::VertexSource(vertexShader);
9690 // Fragment uniform texel buffer rendering
9691 const char* const fragmentShader =
9693 "#extension GL_EXT_texture_buffer : require\n"
9694 "precision highp float;\n"
9695 "precision highp int;\n"
9696 "layout(set=0, binding=0) uniform highp usamplerBuffer u_sampler;\n"
9697 "layout(location = 0) out highp vec4 o_color;\n"
9698 "layout(push_constant) uniform PushC\n"
9701 "\tuint valuesPerPixel;\n"
9702 "\tuint maxTexelCount;\n"
9704 "void main (void) {\n"
9705 "\thighp uint id = uint(gl_FragCoord.y) * 256u + uint(gl_FragCoord.x);\n"
9706 "\thighp uint value = id;\n"
9707 "\tif (uint(gl_FragCoord.y) * 256u + uint(gl_FragCoord.x) < pushC.callId * (pushC.maxTexelCount / pushC.valuesPerPixel))\n"
9709 "\tfor (uint i = 0u; i < pushC.valuesPerPixel; i++)\n"
9711 "\t\tvalue = texelFetch(u_sampler, int(value % uint(textureSize(u_sampler)))).x;\n"
9713 "\tuvec4 valueOut = uvec4(value & 0xFFu, (value >> 8u) & 0xFFu, (value >> 16u) & 0xFFu, (value >> 24u) & 0xFFu);\n"
9714 "\to_color = vec4(valueOut) / vec4(255.0);\n"
9717 sources.glslSources.add("uniform-texel-buffer.frag")
9718 << glu::FragmentSource(fragmentShader);
9722 if (config.usage & USAGE_STORAGE_TEXEL_BUFFER)
9725 // Vertex storage texel buffer rendering
9726 const char* const vertexShader =
9728 "#extension GL_EXT_texture_buffer : require\n"
9729 "precision highp float;\n"
9730 "layout(set=0, binding=0, r32ui) uniform readonly highp uimageBuffer u_sampler;\n"
9731 "out gl_PerVertex {\n"
9732 "\tvec4 gl_Position;\n"
9733 "\tfloat gl_PointSize;\n"
9735 "void main (void) {\n"
9736 "\tgl_PointSize = 1.0;\n"
9737 "\thighp uint val = imageLoad(u_sampler, gl_VertexIndex / 2).x;\n"
9738 "\tif (gl_VertexIndex % 2 == 0)\n"
9739 "\t\tval = val & 0xFFFFu;\n"
9741 "\t\tval = val >> 16;\n"
9742 "\thighp vec2 pos = vec2(val & 0xFFu, val >> 8u) / vec2(255.0);\n"
9743 "\tgl_Position = vec4(1.998 * pos - vec2(0.999), 0.0, 1.0);\n"
9746 sources.glslSources.add("storage-texel-buffer.vert")
9747 << glu::VertexSource(vertexShader);
9750 // Fragment storage texel buffer rendering
9751 const char* const fragmentShader =
9753 "#extension GL_EXT_texture_buffer : require\n"
9754 "precision highp float;\n"
9755 "precision highp int;\n"
9756 "layout(set=0, binding=0, r32ui) uniform readonly highp uimageBuffer u_sampler;\n"
9757 "layout(location = 0) out highp vec4 o_color;\n"
9758 "layout(push_constant) uniform PushC\n"
9761 "\tuint valuesPerPixel;\n"
9762 "\tuint maxTexelCount;\n"
9765 "void main (void) {\n"
9766 "\thighp uint id = uint(gl_FragCoord.y) * 256u + uint(gl_FragCoord.x);\n"
9767 "\thighp uint value = id;\n"
9768 "\tif (uint(gl_FragCoord.y) * 256u + uint(gl_FragCoord.x) < pushC.callId * (pushC.maxTexelCount / pushC.valuesPerPixel))\n"
9770 "\tfor (uint i = 0u; i < pushC.valuesPerPixel; i++)\n"
9772 "\t\tvalue = imageLoad(u_sampler, int(value % pushC.width)).x;\n"
9774 "\tuvec4 valueOut = uvec4(value & 0xFFu, (value >> 8u) & 0xFFu, (value >> 16u) & 0xFFu, (value >> 24u) & 0xFFu);\n"
9775 "\to_color = vec4(valueOut) / vec4(255.0);\n"
9778 sources.glslSources.add("storage-texel-buffer.frag")
9779 << glu::FragmentSource(fragmentShader);
9783 if (config.usage & USAGE_STORAGE_IMAGE)
9786 // Vertex storage image
9787 const char* const vertexShader =
9789 "precision highp float;\n"
9790 "layout(set=0, binding=0, rgba8) uniform image2D u_image;\n"
9791 "out gl_PerVertex {\n"
9792 "\tvec4 gl_Position;\n"
9793 "\tfloat gl_PointSize;\n"
9795 "void main (void) {\n"
9796 "\tgl_PointSize = 1.0;\n"
9797 "\thighp vec4 val = imageLoad(u_image, ivec2((gl_VertexIndex / 2) / imageSize(u_image).x, (gl_VertexIndex / 2) % imageSize(u_image).x));\n"
9798 "\thighp vec2 pos;\n"
9799 "\tif (gl_VertexIndex % 2 == 0)\n"
9800 "\t\tpos = val.xy;\n"
9802 "\t\tpos = val.zw;\n"
9803 "\tgl_Position = vec4(1.998 * pos - vec2(0.999), 0.0, 1.0);\n"
9806 sources.glslSources.add("storage-image.vert")
9807 << glu::VertexSource(vertexShader);
9810 // Fragment storage image
9811 const char* const fragmentShader =
9813 "#extension GL_EXT_texture_buffer : require\n"
9814 "precision highp float;\n"
9815 "layout(set=0, binding=0, rgba8) uniform image2D u_image;\n"
9816 "layout(location = 0) out highp vec4 o_color;\n"
9817 "void main (void) {\n"
9818 "\thighp uvec2 size = uvec2(imageSize(u_image).x, imageSize(u_image).y);\n"
9819 "\thighp uint valuesPerPixel = max(1u, (size.x * size.y) / (256u * 256u));\n"
9820 "\thighp uvec4 value = uvec4(uint(gl_FragCoord.x), uint(gl_FragCoord.y), 0u, 0u);\n"
9821 "\tfor (uint i = 0u; i < valuesPerPixel; i++)\n"
9823 "\t\thighp vec4 floatValue = imageLoad(u_image, ivec2(int((value.z * 256u + (value.x ^ value.z)) % size.x), int((value.w * 256u + (value.y ^ value.w)) % size.y)));\n"
9824 "\t\tvalue = uvec4(uint(floatValue.x * 255.0), uint(floatValue.y * 255.0), uint(floatValue.z * 255.0), uint(floatValue.w * 255.0));\n"
9826 "\to_color = vec4(value) / vec4(255.0);\n"
9829 sources.glslSources.add("storage-image.frag")
9830 << glu::FragmentSource(fragmentShader);
9834 if (config.usage & USAGE_SAMPLED_IMAGE)
9837 // Vertex storage image
9838 const char* const vertexShader =
9840 "precision highp float;\n"
9841 "layout(set=0, binding=0) uniform sampler2D u_sampler;\n"
9842 "out gl_PerVertex {\n"
9843 "\tvec4 gl_Position;\n"
9844 "\tfloat gl_PointSize;\n"
9846 "void main (void) {\n"
9847 "\tgl_PointSize = 1.0;\n"
9848 "\thighp vec4 val = texelFetch(u_sampler, ivec2((gl_VertexIndex / 2) / textureSize(u_sampler, 0).x, (gl_VertexIndex / 2) % textureSize(u_sampler, 0).x), 0);\n"
9849 "\thighp vec2 pos;\n"
9850 "\tif (gl_VertexIndex % 2 == 0)\n"
9851 "\t\tpos = val.xy;\n"
9853 "\t\tpos = val.zw;\n"
9854 "\tgl_Position = vec4(1.998 * pos - vec2(0.999), 0.0, 1.0);\n"
9857 sources.glslSources.add("sampled-image.vert")
9858 << glu::VertexSource(vertexShader);
9861 // Fragment storage image
9862 const char* const fragmentShader =
9864 "#extension GL_EXT_texture_buffer : require\n"
9865 "precision highp float;\n"
9866 "layout(set=0, binding=0) uniform sampler2D u_sampler;\n"
9867 "layout(location = 0) out highp vec4 o_color;\n"
9868 "void main (void) {\n"
9869 "\thighp uvec2 size = uvec2(textureSize(u_sampler, 0).x, textureSize(u_sampler, 0).y);\n"
9870 "\thighp uint valuesPerPixel = max(1u, (size.x * size.y) / (256u * 256u));\n"
9871 "\thighp uvec4 value = uvec4(uint(gl_FragCoord.x), uint(gl_FragCoord.y), 0u, 0u);\n"
9872 "\tfor (uint i = 0u; i < valuesPerPixel; i++)\n"
9874 "\t\thighp vec4 floatValue = texelFetch(u_sampler, ivec2(int((value.z * 256u + (value.x ^ value.z)) % size.x), int((value.w * 256u + (value.y ^ value.w)) % size.y)), 0);\n"
9875 "\t\tvalue = uvec4(uint(floatValue.x * 255.0), uint(floatValue.y * 255.0), uint(floatValue.z * 255.0), uint(floatValue.w * 255.0));\n"
9877 "\to_color = vec4(value) / vec4(255.0);\n"
9880 sources.glslSources.add("sampled-image.frag")
9881 << glu::FragmentSource(fragmentShader);
9886 const char* const vertexShader =
9888 "out gl_PerVertex {\n"
9889 "\tvec4 gl_Position;\n"
9891 "precision highp float;\n"
9892 "void main (void) {\n"
9893 "\tgl_Position = vec4(((gl_VertexIndex + 2) / 3) % 2 == 0 ? -1.0 : 1.0,\n"
9894 "\t ((gl_VertexIndex + 1) / 3) % 2 == 0 ? -1.0 : 1.0, 0.0, 1.0);\n"
9897 sources.glslSources.add("render-quad.vert")
9898 << glu::VertexSource(vertexShader);
9902 const char* const fragmentShader =
9904 "layout(location = 0) out highp vec4 o_color;\n"
9905 "void main (void) {\n"
9906 "\to_color = vec4(1.0);\n"
9909 sources.glslSources.add("render-white.frag")
9910 << glu::FragmentSource(fragmentShader);
9917 tcu::TestCaseGroup* createPipelineBarrierTests (tcu::TestContext& testCtx)
9919 de::MovePtr<tcu::TestCaseGroup> group (new tcu::TestCaseGroup(testCtx, "pipeline_barrier", "Pipeline barrier tests."));
9920 const vk::VkDeviceSize sizes[] =
9927 const Usage usages[] =
9933 USAGE_VERTEX_BUFFER,
9935 USAGE_UNIFORM_BUFFER,
9936 USAGE_UNIFORM_TEXEL_BUFFER,
9937 USAGE_STORAGE_BUFFER,
9938 USAGE_STORAGE_TEXEL_BUFFER,
9939 USAGE_STORAGE_IMAGE,
9942 const Usage readUsages[] =
9946 USAGE_VERTEX_BUFFER,
9948 USAGE_UNIFORM_BUFFER,
9949 USAGE_UNIFORM_TEXEL_BUFFER,
9950 USAGE_STORAGE_BUFFER,
9951 USAGE_STORAGE_TEXEL_BUFFER,
9952 USAGE_STORAGE_IMAGE,
9956 const Usage writeUsages[] =
9962 for (size_t writeUsageNdx = 0; writeUsageNdx < DE_LENGTH_OF_ARRAY(writeUsages); writeUsageNdx++)
9964 const Usage writeUsage = writeUsages[writeUsageNdx];
9966 for (size_t readUsageNdx = 0; readUsageNdx < DE_LENGTH_OF_ARRAY(readUsages); readUsageNdx++)
9968 const Usage readUsage = readUsages[readUsageNdx];
9969 const Usage usage = writeUsage | readUsage;
9970 const string usageGroupName (usageToName(usage));
9971 de::MovePtr<tcu::TestCaseGroup> usageGroup (new tcu::TestCaseGroup(testCtx, usageGroupName.c_str(), usageGroupName.c_str()));
9973 for (size_t sizeNdx = 0; sizeNdx < DE_LENGTH_OF_ARRAY(sizes); sizeNdx++)
9975 const vk::VkDeviceSize size = sizes[sizeNdx];
9976 const string testName (de::toString((deUint64)(size)));
9977 const TestConfig config =
9981 vk::VK_SHARING_MODE_EXCLUSIVE
9984 usageGroup->addChild(new InstanceFactory1<MemoryTestInstance, TestConfig, AddPrograms>(testCtx,tcu::NODETYPE_SELF_VALIDATE, testName, testName, AddPrograms(), config));
9987 group->addChild(usageGroup.get());
9988 usageGroup.release();
9993 Usage all = (Usage)0;
9995 for (size_t usageNdx = 0; usageNdx < DE_LENGTH_OF_ARRAY(usages); usageNdx++)
9996 all = all | usages[usageNdx];
9999 const string usageGroupName ("all");
10000 de::MovePtr<tcu::TestCaseGroup> usageGroup (new tcu::TestCaseGroup(testCtx, usageGroupName.c_str(), usageGroupName.c_str()));
10002 for (size_t sizeNdx = 0; sizeNdx < DE_LENGTH_OF_ARRAY(sizes); sizeNdx++)
10004 const vk::VkDeviceSize size = sizes[sizeNdx];
10005 const string testName (de::toString((deUint64)(size)));
10006 const TestConfig config =
10010 vk::VK_SHARING_MODE_EXCLUSIVE
10013 usageGroup->addChild(new InstanceFactory1<MemoryTestInstance, TestConfig, AddPrograms>(testCtx,tcu::NODETYPE_SELF_VALIDATE, testName, testName, AddPrograms(), config));
10016 group->addChild(usageGroup.get());
10017 usageGroup.release();
10021 const string usageGroupName ("all_device");
10022 de::MovePtr<tcu::TestCaseGroup> usageGroup (new tcu::TestCaseGroup(testCtx, usageGroupName.c_str(), usageGroupName.c_str()));
10024 for (size_t sizeNdx = 0; sizeNdx < DE_LENGTH_OF_ARRAY(sizes); sizeNdx++)
10026 const vk::VkDeviceSize size = sizes[sizeNdx];
10027 const string testName (de::toString((deUint64)(size)));
10028 const TestConfig config =
10030 (Usage)(all & (~(USAGE_HOST_READ|USAGE_HOST_WRITE))),
10032 vk::VK_SHARING_MODE_EXCLUSIVE
10035 usageGroup->addChild(new InstanceFactory1<MemoryTestInstance, TestConfig, AddPrograms>(testCtx,tcu::NODETYPE_SELF_VALIDATE, testName, testName, AddPrograms(), config));
10038 group->addChild(usageGroup.get());
10039 usageGroup.release();
10043 return group.release();