1 /*-------------------------------------------------------------------------
2 * Vulkan Conformance Tests
3 * ------------------------
5 * Copyright (c) 2015 Google Inc.
7 * Licensed under the Apache License, Version 2.0 (the "License");
8 * you may not use this file except in compliance with the License.
9 * You may obtain a copy of the License at
11 * http://www.apache.org/licenses/LICENSE-2.0
13 * Unless required by applicable law or agreed to in writing, software
14 * distributed under the License is distributed on an "AS IS" BASIS,
15 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 * See the License for the specific language governing permissions and
17 * limitations under the License.
21 * \brief Pipeline barrier tests
22 *//*--------------------------------------------------------------------*/
24 #include "vktMemoryPipelineBarrierTests.hpp"
26 #include "vktTestCaseUtil.hpp"
29 #include "vkPlatform.hpp"
30 #include "vkRefUtil.hpp"
31 #include "vkQueryUtil.hpp"
32 #include "vkMemUtil.hpp"
33 #include "vkTypeUtil.hpp"
34 #include "vkPrograms.hpp"
35 #include "vkCmdUtil.hpp"
36 #include "vkObjUtil.hpp"
38 #include "tcuMaybe.hpp"
39 #include "tcuTextureUtil.hpp"
40 #include "tcuTestLog.hpp"
41 #include "tcuResultCollector.hpp"
42 #include "tcuTexture.hpp"
43 #include "tcuImageCompare.hpp"
45 #include "deUniquePtr.hpp"
46 #include "deStringUtil.hpp"
47 #include "deRandom.hpp"
74 using tcu::ConstPixelBufferAccess;
75 using tcu::PixelBufferAccess;
76 using tcu::TextureFormat;
77 using tcu::TextureLevel;
86 #define ONE_MEGABYTE 1024*1024
89 MAX_UNIFORM_BUFFER_SIZE = 1024,
90 MAX_STORAGE_BUFFER_SIZE = (1<<28),
91 MAX_SIZE = (128 * 1024)
94 // \todo [mika] Add to utilities
96 T divRoundUp (const T& a, const T& b)
98 return (a / b) + (a % b == 0 ? 0 : 1);
103 // Mapped host read and write
104 USAGE_HOST_READ = (0x1u<<0),
105 USAGE_HOST_WRITE = (0x1u<<1),
107 // Copy and other transfer operations
108 USAGE_TRANSFER_SRC = (0x1u<<2),
109 USAGE_TRANSFER_DST = (0x1u<<3),
111 // Buffer usage flags
112 USAGE_INDEX_BUFFER = (0x1u<<4),
113 USAGE_VERTEX_BUFFER = (0x1u<<5),
115 USAGE_UNIFORM_BUFFER = (0x1u<<6),
116 USAGE_STORAGE_BUFFER = (0x1u<<7),
118 USAGE_UNIFORM_TEXEL_BUFFER = (0x1u<<8),
119 USAGE_STORAGE_TEXEL_BUFFER = (0x1u<<9),
121 // \todo [2016-03-09 mika] This is probably almost impossible to do
122 USAGE_INDIRECT_BUFFER = (0x1u<<10),
124 // Texture usage flags
125 USAGE_SAMPLED_IMAGE = (0x1u<<11),
126 USAGE_STORAGE_IMAGE = (0x1u<<12),
127 USAGE_COLOR_ATTACHMENT = (0x1u<<13),
128 USAGE_INPUT_ATTACHMENT = (0x1u<<14),
129 USAGE_DEPTH_STENCIL_ATTACHMENT = (0x1u<<15),
132 bool supportsDeviceBufferWrites (Usage usage)
134 if (usage & USAGE_TRANSFER_DST)
137 if (usage & USAGE_STORAGE_BUFFER)
140 if (usage & USAGE_STORAGE_TEXEL_BUFFER)
146 bool supportsDeviceImageWrites (Usage usage)
148 if (usage & USAGE_TRANSFER_DST)
151 if (usage & USAGE_STORAGE_IMAGE)
154 if (usage & USAGE_COLOR_ATTACHMENT)
160 // Sequential access enums
163 ACCESS_INDIRECT_COMMAND_READ_BIT = 0,
164 ACCESS_INDEX_READ_BIT,
165 ACCESS_VERTEX_ATTRIBUTE_READ_BIT,
166 ACCESS_UNIFORM_READ_BIT,
167 ACCESS_INPUT_ATTACHMENT_READ_BIT,
168 ACCESS_SHADER_READ_BIT,
169 ACCESS_SHADER_WRITE_BIT,
170 ACCESS_COLOR_ATTACHMENT_READ_BIT,
171 ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
172 ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT,
173 ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT,
174 ACCESS_TRANSFER_READ_BIT,
175 ACCESS_TRANSFER_WRITE_BIT,
176 ACCESS_HOST_READ_BIT,
177 ACCESS_HOST_WRITE_BIT,
178 ACCESS_MEMORY_READ_BIT,
179 ACCESS_MEMORY_WRITE_BIT,
184 Access accessFlagToAccess (vk::VkAccessFlagBits flag)
188 case vk::VK_ACCESS_INDIRECT_COMMAND_READ_BIT: return ACCESS_INDIRECT_COMMAND_READ_BIT;
189 case vk::VK_ACCESS_INDEX_READ_BIT: return ACCESS_INDEX_READ_BIT;
190 case vk::VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT: return ACCESS_VERTEX_ATTRIBUTE_READ_BIT;
191 case vk::VK_ACCESS_UNIFORM_READ_BIT: return ACCESS_UNIFORM_READ_BIT;
192 case vk::VK_ACCESS_INPUT_ATTACHMENT_READ_BIT: return ACCESS_INPUT_ATTACHMENT_READ_BIT;
193 case vk::VK_ACCESS_SHADER_READ_BIT: return ACCESS_SHADER_READ_BIT;
194 case vk::VK_ACCESS_SHADER_WRITE_BIT: return ACCESS_SHADER_WRITE_BIT;
195 case vk::VK_ACCESS_COLOR_ATTACHMENT_READ_BIT: return ACCESS_COLOR_ATTACHMENT_READ_BIT;
196 case vk::VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT: return ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
197 case vk::VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT: return ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT;
198 case vk::VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT: return ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
199 case vk::VK_ACCESS_TRANSFER_READ_BIT: return ACCESS_TRANSFER_READ_BIT;
200 case vk::VK_ACCESS_TRANSFER_WRITE_BIT: return ACCESS_TRANSFER_WRITE_BIT;
201 case vk::VK_ACCESS_HOST_READ_BIT: return ACCESS_HOST_READ_BIT;
202 case vk::VK_ACCESS_HOST_WRITE_BIT: return ACCESS_HOST_WRITE_BIT;
203 case vk::VK_ACCESS_MEMORY_READ_BIT: return ACCESS_MEMORY_READ_BIT;
204 case vk::VK_ACCESS_MEMORY_WRITE_BIT: return ACCESS_MEMORY_WRITE_BIT;
207 DE_FATAL("Unknown access flags");
212 // Sequential stage enums
215 PIPELINESTAGE_TOP_OF_PIPE_BIT = 0,
216 PIPELINESTAGE_BOTTOM_OF_PIPE_BIT,
217 PIPELINESTAGE_DRAW_INDIRECT_BIT,
218 PIPELINESTAGE_VERTEX_INPUT_BIT,
219 PIPELINESTAGE_VERTEX_SHADER_BIT,
220 PIPELINESTAGE_TESSELLATION_CONTROL_SHADER_BIT,
221 PIPELINESTAGE_TESSELLATION_EVALUATION_SHADER_BIT,
222 PIPELINESTAGE_GEOMETRY_SHADER_BIT,
223 PIPELINESTAGE_FRAGMENT_SHADER_BIT,
224 PIPELINESTAGE_EARLY_FRAGMENT_TESTS_BIT,
225 PIPELINESTAGE_LATE_FRAGMENT_TESTS_BIT,
226 PIPELINESTAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
227 PIPELINESTAGE_COMPUTE_SHADER_BIT,
228 PIPELINESTAGE_TRANSFER_BIT,
229 PIPELINESTAGE_HOST_BIT,
234 PipelineStage pipelineStageFlagToPipelineStage (vk::VkPipelineStageFlagBits flag)
238 case vk::VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT: return PIPELINESTAGE_TOP_OF_PIPE_BIT;
239 case vk::VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT: return PIPELINESTAGE_BOTTOM_OF_PIPE_BIT;
240 case vk::VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT: return PIPELINESTAGE_DRAW_INDIRECT_BIT;
241 case vk::VK_PIPELINE_STAGE_VERTEX_INPUT_BIT: return PIPELINESTAGE_VERTEX_INPUT_BIT;
242 case vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT: return PIPELINESTAGE_VERTEX_SHADER_BIT;
243 case vk::VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT: return PIPELINESTAGE_TESSELLATION_CONTROL_SHADER_BIT;
244 case vk::VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT: return PIPELINESTAGE_TESSELLATION_EVALUATION_SHADER_BIT;
245 case vk::VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT: return PIPELINESTAGE_GEOMETRY_SHADER_BIT;
246 case vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT: return PIPELINESTAGE_FRAGMENT_SHADER_BIT;
247 case vk::VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT: return PIPELINESTAGE_EARLY_FRAGMENT_TESTS_BIT;
248 case vk::VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT: return PIPELINESTAGE_LATE_FRAGMENT_TESTS_BIT;
249 case vk::VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT: return PIPELINESTAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
250 case vk::VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT: return PIPELINESTAGE_COMPUTE_SHADER_BIT;
251 case vk::VK_PIPELINE_STAGE_TRANSFER_BIT: return PIPELINESTAGE_TRANSFER_BIT;
252 case vk::VK_PIPELINE_STAGE_HOST_BIT: return PIPELINESTAGE_HOST_BIT;
255 DE_FATAL("Unknown pipeline stage flags");
256 return PIPELINESTAGE_LAST;
260 Usage operator| (Usage a, Usage b)
262 return (Usage)((deUint32)a | (deUint32)b);
265 Usage operator& (Usage a, Usage b)
267 return (Usage)((deUint32)a & (deUint32)b);
270 string usageToName (Usage usage)
275 const char* const name;
278 { USAGE_HOST_READ, "host_read" },
279 { USAGE_HOST_WRITE, "host_write" },
281 { USAGE_TRANSFER_SRC, "transfer_src" },
282 { USAGE_TRANSFER_DST, "transfer_dst" },
284 { USAGE_INDEX_BUFFER, "index_buffer" },
285 { USAGE_VERTEX_BUFFER, "vertex_buffer" },
286 { USAGE_UNIFORM_BUFFER, "uniform_buffer" },
287 { USAGE_STORAGE_BUFFER, "storage_buffer" },
288 { USAGE_UNIFORM_TEXEL_BUFFER, "uniform_texel_buffer" },
289 { USAGE_STORAGE_TEXEL_BUFFER, "storage_texel_buffer" },
290 { USAGE_INDIRECT_BUFFER, "indirect_buffer" },
291 { USAGE_SAMPLED_IMAGE, "image_sampled" },
292 { USAGE_STORAGE_IMAGE, "storage_image" },
293 { USAGE_COLOR_ATTACHMENT, "color_attachment" },
294 { USAGE_INPUT_ATTACHMENT, "input_attachment" },
295 { USAGE_DEPTH_STENCIL_ATTACHMENT, "depth_stencil_attachment" },
298 std::ostringstream stream;
301 for (size_t usageNdx = 0; usageNdx < DE_LENGTH_OF_ARRAY(usageNames); usageNdx++)
303 if (usage & usageNames[usageNdx].usage)
310 stream << usageNames[usageNdx].name;
317 vk::VkBufferUsageFlags usageToBufferUsageFlags (Usage usage)
319 vk::VkBufferUsageFlags flags = 0;
321 if (usage & USAGE_TRANSFER_SRC)
322 flags |= vk::VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
324 if (usage & USAGE_TRANSFER_DST)
325 flags |= vk::VK_BUFFER_USAGE_TRANSFER_DST_BIT;
327 if (usage & USAGE_INDEX_BUFFER)
328 flags |= vk::VK_BUFFER_USAGE_INDEX_BUFFER_BIT;
330 if (usage & USAGE_VERTEX_BUFFER)
331 flags |= vk::VK_BUFFER_USAGE_VERTEX_BUFFER_BIT;
333 if (usage & USAGE_INDIRECT_BUFFER)
334 flags |= vk::VK_BUFFER_USAGE_INDIRECT_BUFFER_BIT;
336 if (usage & USAGE_UNIFORM_BUFFER)
337 flags |= vk::VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT;
339 if (usage & USAGE_STORAGE_BUFFER)
340 flags |= vk::VK_BUFFER_USAGE_STORAGE_BUFFER_BIT;
342 if (usage & USAGE_UNIFORM_TEXEL_BUFFER)
343 flags |= vk::VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT;
345 if (usage & USAGE_STORAGE_TEXEL_BUFFER)
346 flags |= vk::VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT;
351 vk::VkImageUsageFlags usageToImageUsageFlags (Usage usage)
353 vk::VkImageUsageFlags flags = 0;
355 if (usage & USAGE_TRANSFER_SRC)
356 flags |= vk::VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
358 if (usage & USAGE_TRANSFER_DST)
359 flags |= vk::VK_IMAGE_USAGE_TRANSFER_DST_BIT;
361 if (usage & USAGE_SAMPLED_IMAGE)
362 flags |= vk::VK_IMAGE_USAGE_SAMPLED_BIT;
364 if (usage & USAGE_STORAGE_IMAGE)
365 flags |= vk::VK_IMAGE_USAGE_STORAGE_BIT;
367 if (usage & USAGE_COLOR_ATTACHMENT)
368 flags |= vk::VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
370 if (usage & USAGE_INPUT_ATTACHMENT)
371 flags |= vk::VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT;
373 if (usage & USAGE_DEPTH_STENCIL_ATTACHMENT)
374 flags |= vk::VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT;
379 vk::VkPipelineStageFlags usageToStageFlags (Usage usage)
381 vk::VkPipelineStageFlags flags = 0;
383 if (usage & (USAGE_HOST_READ|USAGE_HOST_WRITE))
384 flags |= vk::VK_PIPELINE_STAGE_HOST_BIT;
386 if (usage & (USAGE_TRANSFER_SRC|USAGE_TRANSFER_DST))
387 flags |= vk::VK_PIPELINE_STAGE_TRANSFER_BIT;
389 if (usage & (USAGE_VERTEX_BUFFER|USAGE_INDEX_BUFFER))
390 flags |= vk::VK_PIPELINE_STAGE_VERTEX_INPUT_BIT;
392 if (usage & USAGE_INDIRECT_BUFFER)
393 flags |= vk::VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT;
396 (USAGE_UNIFORM_BUFFER
397 | USAGE_STORAGE_BUFFER
398 | USAGE_UNIFORM_TEXEL_BUFFER
399 | USAGE_STORAGE_TEXEL_BUFFER
400 | USAGE_SAMPLED_IMAGE
401 | USAGE_STORAGE_IMAGE))
403 flags |= (vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT
404 | vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT
405 | vk::VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT);
408 if (usage & USAGE_INPUT_ATTACHMENT)
409 flags |= vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
411 if (usage & USAGE_COLOR_ATTACHMENT)
412 flags |= vk::VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
414 if (usage & USAGE_DEPTH_STENCIL_ATTACHMENT)
416 flags |= vk::VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT
417 | vk::VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT;
423 vk::VkAccessFlags usageToAccessFlags (Usage usage)
425 vk::VkAccessFlags flags = 0;
427 if (usage & USAGE_HOST_READ)
428 flags |= vk::VK_ACCESS_HOST_READ_BIT;
430 if (usage & USAGE_HOST_WRITE)
431 flags |= vk::VK_ACCESS_HOST_WRITE_BIT;
433 if (usage & USAGE_TRANSFER_SRC)
434 flags |= vk::VK_ACCESS_TRANSFER_READ_BIT;
436 if (usage & USAGE_TRANSFER_DST)
437 flags |= vk::VK_ACCESS_TRANSFER_WRITE_BIT;
439 if (usage & USAGE_INDEX_BUFFER)
440 flags |= vk::VK_ACCESS_INDEX_READ_BIT;
442 if (usage & USAGE_VERTEX_BUFFER)
443 flags |= vk::VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT;
445 if (usage & (USAGE_UNIFORM_BUFFER | USAGE_UNIFORM_TEXEL_BUFFER))
446 flags |= vk::VK_ACCESS_UNIFORM_READ_BIT;
448 if (usage & USAGE_SAMPLED_IMAGE)
449 flags |= vk::VK_ACCESS_SHADER_READ_BIT;
451 if (usage & (USAGE_STORAGE_BUFFER
452 | USAGE_STORAGE_TEXEL_BUFFER
453 | USAGE_STORAGE_IMAGE))
454 flags |= vk::VK_ACCESS_SHADER_READ_BIT | vk::VK_ACCESS_SHADER_WRITE_BIT;
456 if (usage & USAGE_INDIRECT_BUFFER)
457 flags |= vk::VK_ACCESS_INDIRECT_COMMAND_READ_BIT;
459 if (usage & USAGE_COLOR_ATTACHMENT)
460 flags |= vk::VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | vk::VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
462 if (usage & USAGE_INPUT_ATTACHMENT)
463 flags |= vk::VK_ACCESS_INPUT_ATTACHMENT_READ_BIT;
465 if (usage & USAGE_DEPTH_STENCIL_ATTACHMENT)
466 flags |= vk::VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT
467 | vk::VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
475 vk::VkDeviceSize size;
476 vk::VkSharingMode sharing;
479 vk::Move<vk::VkCommandBuffer> createBeginCommandBuffer (const vk::DeviceInterface& vkd,
481 vk::VkCommandPool pool,
482 vk::VkCommandBufferLevel level)
484 const vk::VkCommandBufferInheritanceInfo inheritInfo =
486 vk::VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_INFO,
495 const vk::VkCommandBufferBeginInfo beginInfo =
497 vk::VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO,
500 (level == vk::VK_COMMAND_BUFFER_LEVEL_SECONDARY ? &inheritInfo : (const vk::VkCommandBufferInheritanceInfo*)DE_NULL),
503 vk::Move<vk::VkCommandBuffer> commandBuffer (allocateCommandBuffer(vkd, device, pool, level));
505 vkd.beginCommandBuffer(*commandBuffer, &beginInfo);
507 return commandBuffer;
510 vk::Move<vk::VkBuffer> createBuffer (const vk::DeviceInterface& vkd,
512 vk::VkDeviceSize size,
513 vk::VkBufferUsageFlags usage,
514 vk::VkSharingMode sharingMode,
515 const vector<deUint32>& queueFamilies)
517 const vk::VkBufferCreateInfo createInfo =
519 vk::VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
526 (deUint32)queueFamilies.size(),
530 return vk::createBuffer(vkd, device, &createInfo);
533 vk::Move<vk::VkDeviceMemory> allocMemory (const vk::DeviceInterface& vkd,
535 vk::VkDeviceSize size,
536 deUint32 memoryTypeIndex)
538 const vk::VkMemoryAllocateInfo alloc =
540 vk::VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO, // sType
547 return vk::allocateMemory(vkd, device, &alloc);
550 vk::Move<vk::VkDeviceMemory> bindBufferMemory (const vk::InstanceInterface& vki,
551 const vk::DeviceInterface& vkd,
552 vk::VkPhysicalDevice physicalDevice,
555 vk::VkMemoryPropertyFlags properties)
557 const vk::VkMemoryRequirements memoryRequirements = vk::getBufferMemoryRequirements(vkd, device, buffer);
558 const vk::VkPhysicalDeviceMemoryProperties memoryProperties = vk::getPhysicalDeviceMemoryProperties(vki, physicalDevice);
559 deUint32 memoryTypeIndex;
561 for (memoryTypeIndex = 0; memoryTypeIndex < memoryProperties.memoryTypeCount; memoryTypeIndex++)
563 if ((memoryRequirements.memoryTypeBits & (0x1u << memoryTypeIndex))
564 && (memoryProperties.memoryTypes[memoryTypeIndex].propertyFlags & properties) == properties)
568 const vk::VkMemoryAllocateInfo allocationInfo =
570 vk::VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
572 memoryRequirements.size,
575 vk::Move<vk::VkDeviceMemory> memory (vk::allocateMemory(vkd, device, &allocationInfo));
577 VK_CHECK(vkd.bindBufferMemory(device, buffer, *memory, 0));
581 catch (const vk::Error& error)
583 if (error.getError() == vk::VK_ERROR_OUT_OF_DEVICE_MEMORY
584 || error.getError() == vk::VK_ERROR_OUT_OF_HOST_MEMORY)
586 // Try next memory type/heap if out of memory
590 // Throw all other errors forward
597 TCU_FAIL("Failed to allocate memory for buffer");
600 vk::Move<vk::VkDeviceMemory> bindImageMemory (const vk::InstanceInterface& vki,
601 const vk::DeviceInterface& vkd,
602 vk::VkPhysicalDevice physicalDevice,
605 vk::VkMemoryPropertyFlags properties)
607 const vk::VkMemoryRequirements memoryRequirements = vk::getImageMemoryRequirements(vkd, device, image);
608 const vk::VkPhysicalDeviceMemoryProperties memoryProperties = vk::getPhysicalDeviceMemoryProperties(vki, physicalDevice);
609 deUint32 memoryTypeIndex;
611 for (memoryTypeIndex = 0; memoryTypeIndex < memoryProperties.memoryTypeCount; memoryTypeIndex++)
613 if ((memoryRequirements.memoryTypeBits & (0x1u << memoryTypeIndex))
614 && (memoryProperties.memoryTypes[memoryTypeIndex].propertyFlags & properties) == properties)
618 const vk::VkMemoryAllocateInfo allocationInfo =
620 vk::VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
622 memoryRequirements.size,
625 vk::Move<vk::VkDeviceMemory> memory (vk::allocateMemory(vkd, device, &allocationInfo));
627 VK_CHECK(vkd.bindImageMemory(device, image, *memory, 0));
631 catch (const vk::Error& error)
633 if (error.getError() == vk::VK_ERROR_OUT_OF_DEVICE_MEMORY
634 || error.getError() == vk::VK_ERROR_OUT_OF_HOST_MEMORY)
636 // Try next memory type/heap if out of memory
640 // Throw all other errors forward
647 TCU_FAIL("Failed to allocate memory for image");
650 void* mapMemory (const vk::DeviceInterface& vkd,
652 vk::VkDeviceMemory memory,
653 vk::VkDeviceSize size)
657 VK_CHECK(vkd.mapMemory(device, memory, 0, size, 0, &ptr));
662 class ReferenceMemory
665 ReferenceMemory (size_t size);
667 void set (size_t pos, deUint8 val);
668 deUint8 get (size_t pos) const;
669 bool isDefined (size_t pos) const;
671 void setDefined (size_t offset, size_t size, const void* data);
672 void setUndefined (size_t offset, size_t size);
673 void setData (size_t offset, size_t size, const void* data);
675 size_t getSize (void) const { return m_data.size(); }
678 vector<deUint8> m_data;
679 vector<deUint64> m_defined;
682 ReferenceMemory::ReferenceMemory (size_t size)
684 , m_defined (size / 64 + (size % 64 == 0 ? 0 : 1), 0ull)
688 void ReferenceMemory::set (size_t pos, deUint8 val)
690 DE_ASSERT(pos < m_data.size());
693 m_defined[pos / 64] |= 0x1ull << (pos % 64);
696 void ReferenceMemory::setData (size_t offset, size_t size, const void* data_)
698 const deUint8* data = (const deUint8*)data_;
700 DE_ASSERT(offset < m_data.size());
701 DE_ASSERT(offset + size <= m_data.size());
703 // \todo [2016-03-09 mika] Optimize
704 for (size_t pos = 0; pos < size; pos++)
706 m_data[offset + pos] = data[pos];
707 m_defined[(offset + pos) / 64] |= 0x1ull << ((offset + pos) % 64);
711 void ReferenceMemory::setUndefined (size_t offset, size_t size)
713 // \todo [2016-03-09 mika] Optimize
714 for (size_t pos = 0; pos < size; pos++)
715 m_defined[(offset + pos) / 64] |= 0x1ull << ((offset + pos) % 64);
718 deUint8 ReferenceMemory::get (size_t pos) const
720 DE_ASSERT(pos < m_data.size());
721 DE_ASSERT(isDefined(pos));
725 bool ReferenceMemory::isDefined (size_t pos) const
727 DE_ASSERT(pos < m_data.size());
729 return (m_defined[pos / 64] & (0x1ull << (pos % 64))) != 0;
735 Memory (const vk::InstanceInterface& vki,
736 const vk::DeviceInterface& vkd,
737 vk::VkPhysicalDevice physicalDevice,
739 vk::VkDeviceSize size,
740 deUint32 memoryTypeIndex,
741 vk::VkDeviceSize maxBufferSize,
742 deInt32 maxImageWidth,
743 deInt32 maxImageHeight);
745 vk::VkDeviceSize getSize (void) const { return m_size; }
746 vk::VkDeviceSize getMaxBufferSize (void) const { return m_maxBufferSize; }
747 bool getSupportBuffers (void) const { return m_maxBufferSize > 0; }
749 deInt32 getMaxImageWidth (void) const { return m_maxImageWidth; }
750 deInt32 getMaxImageHeight (void) const { return m_maxImageHeight; }
751 bool getSupportImages (void) const { return m_maxImageWidth > 0; }
753 const vk::VkMemoryType& getMemoryType (void) const { return m_memoryType; }
754 deUint32 getMemoryTypeIndex (void) const { return m_memoryTypeIndex; }
755 vk::VkDeviceMemory getMemory (void) const { return *m_memory; }
758 const vk::VkDeviceSize m_size;
759 const deUint32 m_memoryTypeIndex;
760 const vk::VkMemoryType m_memoryType;
761 const vk::Unique<vk::VkDeviceMemory> m_memory;
762 const vk::VkDeviceSize m_maxBufferSize;
763 const deInt32 m_maxImageWidth;
764 const deInt32 m_maxImageHeight;
767 vk::VkMemoryType getMemoryTypeInfo (const vk::InstanceInterface& vki,
768 vk::VkPhysicalDevice device,
769 deUint32 memoryTypeIndex)
771 const vk::VkPhysicalDeviceMemoryProperties memoryProperties = vk::getPhysicalDeviceMemoryProperties(vki, device);
773 DE_ASSERT(memoryTypeIndex < memoryProperties.memoryTypeCount);
775 return memoryProperties.memoryTypes[memoryTypeIndex];
778 vk::VkDeviceSize findMaxBufferSize (const vk::DeviceInterface& vkd,
781 vk::VkBufferUsageFlags usage,
782 vk::VkSharingMode sharingMode,
783 const vector<deUint32>& queueFamilies,
785 vk::VkDeviceSize memorySize,
786 deUint32 memoryTypeIndex)
788 vk::VkDeviceSize lastSuccess = 0;
789 vk::VkDeviceSize currentSize = memorySize / 2;
792 const vk::Unique<vk::VkBuffer> buffer (createBuffer(vkd, device, memorySize, usage, sharingMode, queueFamilies));
793 const vk::VkMemoryRequirements requirements (vk::getBufferMemoryRequirements(vkd, device, *buffer));
795 if (requirements.size == memorySize && requirements.memoryTypeBits & (0x1u << memoryTypeIndex))
799 for (vk::VkDeviceSize stepSize = memorySize / 4; currentSize > 0; stepSize /= 2)
801 const vk::Unique<vk::VkBuffer> buffer (createBuffer(vkd, device, currentSize, usage, sharingMode, queueFamilies));
802 const vk::VkMemoryRequirements requirements (vk::getBufferMemoryRequirements(vkd, device, *buffer));
804 if (requirements.size <= memorySize && requirements.memoryTypeBits & (0x1u << memoryTypeIndex))
806 lastSuccess = currentSize;
807 currentSize += stepSize;
810 currentSize -= stepSize;
819 // Round size down maximum W * H * 4, where W and H < 4096
820 vk::VkDeviceSize roundBufferSizeToWxHx4 (vk::VkDeviceSize size)
822 const vk::VkDeviceSize maxTextureSize = 4096;
823 vk::VkDeviceSize maxTexelCount = size / 4;
824 vk::VkDeviceSize bestW = de::max(maxTexelCount, maxTextureSize);
825 vk::VkDeviceSize bestH = maxTexelCount / bestW;
827 // \todo [2016-03-09 mika] Could probably be faster?
828 for (vk::VkDeviceSize w = 1; w * w < maxTexelCount && w < maxTextureSize && bestW * bestH * 4 < size; w++)
830 const vk::VkDeviceSize h = maxTexelCount / w;
832 if (bestW * bestH < w * h)
839 return bestW * bestH * 4;
842 // Find RGBA8 image size that has exactly "size" of number of bytes.
843 // "size" must be W * H * 4 where W and H < 4096
844 IVec2 findImageSizeWxHx4 (vk::VkDeviceSize size)
846 const vk::VkDeviceSize maxTextureSize = 4096;
847 vk::VkDeviceSize texelCount = size / 4;
849 DE_ASSERT((size % 4) == 0);
851 // \todo [2016-03-09 mika] Could probably be faster?
852 for (vk::VkDeviceSize w = 1; w < maxTextureSize && w < texelCount; w++)
854 const vk::VkDeviceSize h = texelCount / w;
856 if ((texelCount % w) == 0 && h < maxTextureSize)
857 return IVec2((int)w, (int)h);
860 DE_FATAL("Invalid size");
861 return IVec2(-1, -1);
864 IVec2 findMaxRGBA8ImageSize (const vk::DeviceInterface& vkd,
867 vk::VkImageUsageFlags usage,
868 vk::VkSharingMode sharingMode,
869 const vector<deUint32>& queueFamilies,
871 vk::VkDeviceSize memorySize,
872 deUint32 memoryTypeIndex)
874 IVec2 lastSuccess (0);
878 const deUint32 texelCount = (deUint32)(memorySize / 4);
879 const deUint32 width = (deUint32)deFloatSqrt((float)texelCount);
880 const deUint32 height = texelCount / width;
882 currentSize[0] = deMaxu32(width, height);
883 currentSize[1] = deMinu32(width, height);
886 for (deInt32 stepSize = currentSize[0] / 2; currentSize[0] > 0; stepSize /= 2)
888 const vk::VkImageCreateInfo createInfo =
890 vk::VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
894 vk::VK_IMAGE_TYPE_2D,
895 vk::VK_FORMAT_R8G8B8A8_UNORM,
897 (deUint32)currentSize[0],
898 (deUint32)currentSize[1],
902 vk::VK_SAMPLE_COUNT_1_BIT,
903 vk::VK_IMAGE_TILING_OPTIMAL,
906 (deUint32)queueFamilies.size(),
908 vk::VK_IMAGE_LAYOUT_UNDEFINED
910 const vk::Unique<vk::VkImage> image (vk::createImage(vkd, device, &createInfo));
911 const vk::VkMemoryRequirements requirements (vk::getImageMemoryRequirements(vkd, device, *image));
913 if (requirements.size <= memorySize && requirements.memoryTypeBits & (0x1u << memoryTypeIndex))
915 lastSuccess = currentSize;
916 currentSize[0] += stepSize;
917 currentSize[1] += stepSize;
921 currentSize[0] -= stepSize;
922 currentSize[1] -= stepSize;
932 Memory::Memory (const vk::InstanceInterface& vki,
933 const vk::DeviceInterface& vkd,
934 vk::VkPhysicalDevice physicalDevice,
936 vk::VkDeviceSize size,
937 deUint32 memoryTypeIndex,
938 vk::VkDeviceSize maxBufferSize,
939 deInt32 maxImageWidth,
940 deInt32 maxImageHeight)
942 , m_memoryTypeIndex (memoryTypeIndex)
943 , m_memoryType (getMemoryTypeInfo(vki, physicalDevice, memoryTypeIndex))
944 , m_memory (allocMemory(vkd, device, size, memoryTypeIndex))
945 , m_maxBufferSize (maxBufferSize)
946 , m_maxImageWidth (maxImageWidth)
947 , m_maxImageHeight (maxImageHeight)
954 Context (const vk::InstanceInterface& vki,
955 const vk::DeviceInterface& vkd,
956 vk::VkPhysicalDevice physicalDevice,
959 deUint32 queueFamilyIndex,
960 const vector<pair<deUint32, vk::VkQueue> >& queues,
961 const vk::BinaryCollection& binaryCollection)
964 , m_physicalDevice (physicalDevice)
967 , m_queueFamilyIndex (queueFamilyIndex)
969 , m_commandPool (createCommandPool(vkd, device, vk::VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT, queueFamilyIndex))
970 , m_binaryCollection (binaryCollection)
972 for (size_t queueNdx = 0; queueNdx < m_queues.size(); queueNdx++)
973 m_queueFamilies.push_back(m_queues[queueNdx].first);
976 const vk::InstanceInterface& getInstanceInterface (void) const { return m_vki; }
977 vk::VkPhysicalDevice getPhysicalDevice (void) const { return m_physicalDevice; }
978 vk::VkDevice getDevice (void) const { return m_device; }
979 const vk::DeviceInterface& getDeviceInterface (void) const { return m_vkd; }
980 vk::VkQueue getQueue (void) const { return m_queue; }
981 deUint32 getQueueFamily (void) const { return m_queueFamilyIndex; }
982 const vector<pair<deUint32, vk::VkQueue> >& getQueues (void) const { return m_queues; }
983 const vector<deUint32> getQueueFamilies (void) const { return m_queueFamilies; }
984 vk::VkCommandPool getCommandPool (void) const { return *m_commandPool; }
985 const vk::BinaryCollection& getBinaryCollection (void) const { return m_binaryCollection; }
988 const vk::InstanceInterface& m_vki;
989 const vk::DeviceInterface& m_vkd;
990 const vk::VkPhysicalDevice m_physicalDevice;
991 const vk::VkDevice m_device;
992 const vk::VkQueue m_queue;
993 const deUint32 m_queueFamilyIndex;
994 const vector<pair<deUint32, vk::VkQueue> > m_queues;
995 const vk::Unique<vk::VkCommandPool> m_commandPool;
996 const vk::BinaryCollection& m_binaryCollection;
997 vector<deUint32> m_queueFamilies;
1000 class PrepareContext
1003 PrepareContext (const Context& context,
1004 const Memory& memory)
1005 : m_context (context)
1010 const Memory& getMemory (void) const { return m_memory; }
1011 const Context& getContext (void) const { return m_context; }
1012 const vk::BinaryCollection& getBinaryCollection (void) const { return m_context.getBinaryCollection(); }
1014 void setBuffer (vk::Move<vk::VkBuffer> buffer,
1015 vk::VkDeviceSize size)
1017 DE_ASSERT(!m_currentImage);
1018 DE_ASSERT(!m_currentBuffer);
1020 m_currentBuffer = buffer;
1021 m_currentBufferSize = size;
1024 vk::VkBuffer getBuffer (void) const { return *m_currentBuffer; }
1025 vk::VkDeviceSize getBufferSize (void) const
1027 DE_ASSERT(m_currentBuffer);
1028 return m_currentBufferSize;
1031 void releaseBuffer (void) { m_currentBuffer.disown(); }
1033 void setImage (vk::Move<vk::VkImage> image,
1034 vk::VkImageLayout layout,
1035 vk::VkDeviceSize memorySize,
1039 DE_ASSERT(!m_currentImage);
1040 DE_ASSERT(!m_currentBuffer);
1042 m_currentImage = image;
1043 m_currentImageMemorySize = memorySize;
1044 m_currentImageLayout = layout;
1045 m_currentImageWidth = width;
1046 m_currentImageHeight = height;
1049 void setImageLayout (vk::VkImageLayout layout)
1051 DE_ASSERT(m_currentImage);
1052 m_currentImageLayout = layout;
1055 vk::VkImage getImage (void) const { return *m_currentImage; }
1056 deInt32 getImageWidth (void) const
1058 DE_ASSERT(m_currentImage);
1059 return m_currentImageWidth;
1061 deInt32 getImageHeight (void) const
1063 DE_ASSERT(m_currentImage);
1064 return m_currentImageHeight;
1066 vk::VkDeviceSize getImageMemorySize (void) const
1068 DE_ASSERT(m_currentImage);
1069 return m_currentImageMemorySize;
1072 void releaseImage (void) { m_currentImage.disown(); }
1074 vk::VkImageLayout getImageLayout (void) const
1076 DE_ASSERT(m_currentImage);
1077 return m_currentImageLayout;
1081 const Context& m_context;
1082 const Memory& m_memory;
1084 vk::Move<vk::VkBuffer> m_currentBuffer;
1085 vk::VkDeviceSize m_currentBufferSize;
1087 vk::Move<vk::VkImage> m_currentImage;
1088 vk::VkDeviceSize m_currentImageMemorySize;
1089 vk::VkImageLayout m_currentImageLayout;
1090 deInt32 m_currentImageWidth;
1091 deInt32 m_currentImageHeight;
1094 class ExecuteContext
1097 ExecuteContext (const Context& context)
1098 : m_context (context)
1102 const Context& getContext (void) const { return m_context; }
1103 void setMapping (void* ptr) { m_mapping = ptr; }
1104 void* getMapping (void) const { return m_mapping; }
1107 const Context& m_context;
1114 VerifyContext (TestLog& log,
1115 tcu::ResultCollector& resultCollector,
1116 const Context& context,
1117 vk::VkDeviceSize size)
1119 , m_resultCollector (resultCollector)
1120 , m_context (context)
1121 , m_reference ((size_t)size)
1125 const Context& getContext (void) const { return m_context; }
1126 TestLog& getLog (void) const { return m_log; }
1127 tcu::ResultCollector& getResultCollector (void) const { return m_resultCollector; }
1129 ReferenceMemory& getReference (void) { return m_reference; }
1130 TextureLevel& getReferenceImage (void) { return m_referenceImage;}
1134 tcu::ResultCollector& m_resultCollector;
1135 const Context& m_context;
1136 ReferenceMemory m_reference;
1137 TextureLevel m_referenceImage;
1143 // Constructor should allocate all non-vulkan resources.
1144 virtual ~Command (void) {}
1146 // Get name of the command
1147 virtual const char* getName (void) const = 0;
1149 // Log prepare operations
1150 virtual void logPrepare (TestLog&, size_t) const {}
1151 // Log executed operations
1152 virtual void logExecute (TestLog&, size_t) const {}
1154 // Prepare should allocate all vulkan resources and resources that require
1155 // that buffer or memory has been already allocated. This should build all
1156 // command buffers etc.
1157 virtual void prepare (PrepareContext&) {}
1159 // Execute command. Write or read mapped memory, submit commands to queue
1161 virtual void execute (ExecuteContext&) {}
1163 // Verify that results are correct.
1164 virtual void verify (VerifyContext&, size_t) {}
1167 // Allow only inheritance
1172 Command (const Command&);
1173 Command& operator& (const Command&);
1176 class Map : public Command
1181 const char* getName (void) const { return "Map"; }
1184 void logExecute (TestLog& log, size_t commandIndex) const
1186 log << TestLog::Message << commandIndex << ":" << getName() << " Map memory" << TestLog::EndMessage;
1189 void prepare (PrepareContext& context)
1191 m_memory = context.getMemory().getMemory();
1192 m_size = context.getMemory().getSize();
1195 void execute (ExecuteContext& context)
1197 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
1198 const vk::VkDevice device = context.getContext().getDevice();
1200 context.setMapping(mapMemory(vkd, device, m_memory, m_size));
1204 vk::VkDeviceMemory m_memory;
1205 vk::VkDeviceSize m_size;
1208 class UnMap : public Command
1213 const char* getName (void) const { return "UnMap"; }
1215 void logExecute (TestLog& log, size_t commandIndex) const
1217 log << TestLog::Message << commandIndex << ": Unmap memory" << TestLog::EndMessage;
1220 void prepare (PrepareContext& context)
1222 m_memory = context.getMemory().getMemory();
1225 void execute (ExecuteContext& context)
1227 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
1228 const vk::VkDevice device = context.getContext().getDevice();
1230 vkd.unmapMemory(device, m_memory);
1231 context.setMapping(DE_NULL);
1235 vk::VkDeviceMemory m_memory;
1238 class Invalidate : public Command
1241 Invalidate (void) {}
1242 ~Invalidate (void) {}
1243 const char* getName (void) const { return "Invalidate"; }
1245 void logExecute (TestLog& log, size_t commandIndex) const
1247 log << TestLog::Message << commandIndex << ": Invalidate mapped memory" << TestLog::EndMessage;
1250 void prepare (PrepareContext& context)
1252 m_memory = context.getMemory().getMemory();
1253 m_size = context.getMemory().getSize();
1256 void execute (ExecuteContext& context)
1258 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
1259 const vk::VkDevice device = context.getContext().getDevice();
1261 vk::invalidateMappedMemoryRange(vkd, device, m_memory, 0, m_size);
1265 vk::VkDeviceMemory m_memory;
1266 vk::VkDeviceSize m_size;
1269 class Flush : public Command
1274 const char* getName (void) const { return "Flush"; }
1276 void logExecute (TestLog& log, size_t commandIndex) const
1278 log << TestLog::Message << commandIndex << ": Flush mapped memory" << TestLog::EndMessage;
1281 void prepare (PrepareContext& context)
1283 m_memory = context.getMemory().getMemory();
1284 m_size = context.getMemory().getSize();
1287 void execute (ExecuteContext& context)
1289 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
1290 const vk::VkDevice device = context.getContext().getDevice();
1292 vk::flushMappedMemoryRange(vkd, device, m_memory, 0, m_size);
1296 vk::VkDeviceMemory m_memory;
1297 vk::VkDeviceSize m_size;
1300 // Host memory reads and writes
1301 class HostMemoryAccess : public Command
1304 HostMemoryAccess (bool read, bool write, deUint32 seed);
1305 ~HostMemoryAccess (void) {}
1306 const char* getName (void) const { return "HostMemoryAccess"; }
1308 void logExecute (TestLog& log, size_t commandIndex) const;
1309 void prepare (PrepareContext& context);
1310 void execute (ExecuteContext& context);
1311 void verify (VerifyContext& context, size_t commandIndex);
1316 const deUint32 m_seed;
1319 vector<deUint8> m_readData;
1322 HostMemoryAccess::HostMemoryAccess (bool read, bool write, deUint32 seed)
1329 void HostMemoryAccess::logExecute (TestLog& log, size_t commandIndex) const
1331 log << TestLog::Message << commandIndex << ": Host memory access:" << (m_read ? " read" : "") << (m_write ? " write" : "") << ", seed: " << m_seed << TestLog::EndMessage;
1334 void HostMemoryAccess::prepare (PrepareContext& context)
1336 m_size = (size_t)context.getMemory().getSize();
1339 m_readData.resize(m_size, 0);
1342 void HostMemoryAccess::execute (ExecuteContext& context)
1344 if (m_read && m_write)
1346 de::Random rng (m_seed);
1347 deUint8* const ptr = (deUint8*)context.getMapping();
1348 if (m_size >= ONE_MEGABYTE)
1350 deMemcpy(&m_readData[0], ptr, m_size);
1351 for (size_t pos = 0; pos < m_size; ++pos)
1353 ptr[pos] = m_readData[pos] ^ rng.getUint8();
1358 for (size_t pos = 0; pos < m_size; ++pos)
1360 const deUint8 mask = rng.getUint8();
1361 const deUint8 value = ptr[pos];
1363 m_readData[pos] = value;
1364 ptr[pos] = value ^ mask;
1370 const deUint8* const ptr = (deUint8*)context.getMapping();
1371 if (m_size >= ONE_MEGABYTE)
1373 deMemcpy(&m_readData[0], ptr, m_size);
1377 for (size_t pos = 0; pos < m_size; ++pos)
1379 m_readData[pos] = ptr[pos];
1385 de::Random rng (m_seed);
1386 deUint8* const ptr = (deUint8*)context.getMapping();
1387 for (size_t pos = 0; pos < m_size; ++pos)
1389 ptr[pos] = rng.getUint8();
1393 DE_FATAL("Host memory access without read or write.");
1396 void HostMemoryAccess::verify (VerifyContext& context, size_t commandIndex)
1398 tcu::ResultCollector& resultCollector = context.getResultCollector();
1399 ReferenceMemory& reference = context.getReference();
1400 de::Random rng (m_seed);
1402 if (m_read && m_write)
1404 for (size_t pos = 0; pos < m_size; pos++)
1406 const deUint8 mask = rng.getUint8();
1407 const deUint8 value = m_readData[pos];
1409 if (reference.isDefined(pos))
1411 if (value != reference.get(pos))
1413 resultCollector.fail(
1414 de::toString(commandIndex) + ":" + getName()
1415 + " Result differs from reference, Expected: "
1416 + de::toString(tcu::toHex<8>(reference.get(pos)))
1418 + de::toString(tcu::toHex<8>(value))
1420 + de::toString(pos));
1424 reference.set(pos, reference.get(pos) ^ mask);
1430 for (size_t pos = 0; pos < m_size; pos++)
1432 const deUint8 value = m_readData[pos];
1434 if (reference.isDefined(pos))
1436 if (value != reference.get(pos))
1438 resultCollector.fail(
1439 de::toString(commandIndex) + ":" + getName()
1440 + " Result differs from reference, Expected: "
1441 + de::toString(tcu::toHex<8>(reference.get(pos)))
1443 + de::toString(tcu::toHex<8>(value))
1445 + de::toString(pos));
1453 for (size_t pos = 0; pos < m_size; pos++)
1455 const deUint8 value = rng.getUint8();
1457 reference.set(pos, value);
1461 DE_FATAL("Host memory access without read or write.");
1464 class CreateBuffer : public Command
1467 CreateBuffer (vk::VkBufferUsageFlags usage,
1468 vk::VkSharingMode sharing);
1469 ~CreateBuffer (void) {}
1470 const char* getName (void) const { return "CreateBuffer"; }
1472 void logPrepare (TestLog& log, size_t commandIndex) const;
1473 void prepare (PrepareContext& context);
1476 const vk::VkBufferUsageFlags m_usage;
1477 const vk::VkSharingMode m_sharing;
1480 CreateBuffer::CreateBuffer (vk::VkBufferUsageFlags usage,
1481 vk::VkSharingMode sharing)
1483 , m_sharing (sharing)
1487 void CreateBuffer::logPrepare (TestLog& log, size_t commandIndex) const
1489 log << TestLog::Message << commandIndex << ":" << getName() << " Create buffer, Sharing mode: " << m_sharing << ", Usage: " << vk::getBufferUsageFlagsStr(m_usage) << TestLog::EndMessage;
1492 void CreateBuffer::prepare (PrepareContext& context)
1494 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
1495 const vk::VkDevice device = context.getContext().getDevice();
1496 const vk::VkDeviceSize bufferSize = context.getMemory().getMaxBufferSize();
1497 const vector<deUint32>& queueFamilies = context.getContext().getQueueFamilies();
1499 context.setBuffer(createBuffer(vkd, device, bufferSize, m_usage, m_sharing, queueFamilies), bufferSize);
1502 class DestroyBuffer : public Command
1505 DestroyBuffer (void);
1506 ~DestroyBuffer (void) {}
1507 const char* getName (void) const { return "DestroyBuffer"; }
1509 void logExecute (TestLog& log, size_t commandIndex) const;
1510 void prepare (PrepareContext& context);
1511 void execute (ExecuteContext& context);
1514 vk::Move<vk::VkBuffer> m_buffer;
1517 DestroyBuffer::DestroyBuffer (void)
1521 void DestroyBuffer::prepare (PrepareContext& context)
1523 m_buffer = vk::Move<vk::VkBuffer>(vk::check(context.getBuffer()), vk::Deleter<vk::VkBuffer>(context.getContext().getDeviceInterface(), context.getContext().getDevice(), DE_NULL));
1524 context.releaseBuffer();
1527 void DestroyBuffer::logExecute (TestLog& log, size_t commandIndex) const
1529 log << TestLog::Message << commandIndex << ":" << getName() << " Destroy buffer" << TestLog::EndMessage;
1532 void DestroyBuffer::execute (ExecuteContext& context)
1534 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
1535 const vk::VkDevice device = context.getContext().getDevice();
1537 vkd.destroyBuffer(device, m_buffer.disown(), DE_NULL);
1540 class BindBufferMemory : public Command
1543 BindBufferMemory (void) {}
1544 ~BindBufferMemory (void) {}
1545 const char* getName (void) const { return "BindBufferMemory"; }
1547 void logPrepare (TestLog& log, size_t commandIndex) const;
1548 void prepare (PrepareContext& context);
1551 void BindBufferMemory::logPrepare (TestLog& log, size_t commandIndex) const
1553 log << TestLog::Message << commandIndex << ":" << getName() << " Bind memory to buffer" << TestLog::EndMessage;
1556 void BindBufferMemory::prepare (PrepareContext& context)
1558 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
1559 const vk::VkDevice device = context.getContext().getDevice();
1561 VK_CHECK(vkd.bindBufferMemory(device, context.getBuffer(), context.getMemory().getMemory(), 0));
1564 class CreateImage : public Command
1567 CreateImage (vk::VkImageUsageFlags usage,
1568 vk::VkSharingMode sharing);
1569 ~CreateImage (void) {}
1570 const char* getName (void) const { return "CreateImage"; }
1572 void logPrepare (TestLog& log, size_t commandIndex) const;
1573 void prepare (PrepareContext& context);
1574 void verify (VerifyContext& context, size_t commandIndex);
1577 const vk::VkImageUsageFlags m_usage;
1578 const vk::VkSharingMode m_sharing;
1579 deInt32 m_imageWidth;
1580 deInt32 m_imageHeight;
1583 CreateImage::CreateImage (vk::VkImageUsageFlags usage,
1584 vk::VkSharingMode sharing)
1586 , m_sharing (sharing)
1590 void CreateImage::logPrepare (TestLog& log, size_t commandIndex) const
1592 log << TestLog::Message << commandIndex << ":" << getName() << " Create image, sharing: " << m_sharing << ", usage: " << vk::getImageUsageFlagsStr(m_usage) << TestLog::EndMessage;
1595 void CreateImage::prepare (PrepareContext& context)
1597 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
1598 const vk::VkDevice device = context.getContext().getDevice();
1599 const vector<deUint32>& queueFamilies = context.getContext().getQueueFamilies();
1601 m_imageWidth = context.getMemory().getMaxImageWidth();
1602 m_imageHeight = context.getMemory().getMaxImageHeight();
1605 const vk::VkImageCreateInfo createInfo =
1607 vk::VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
1611 vk::VK_IMAGE_TYPE_2D,
1612 vk::VK_FORMAT_R8G8B8A8_UNORM,
1614 (deUint32)m_imageWidth,
1615 (deUint32)m_imageHeight,
1619 vk::VK_SAMPLE_COUNT_1_BIT,
1620 vk::VK_IMAGE_TILING_OPTIMAL,
1623 (deUint32)queueFamilies.size(),
1625 vk::VK_IMAGE_LAYOUT_UNDEFINED
1627 vk::Move<vk::VkImage> image (createImage(vkd, device, &createInfo));
1628 const vk::VkMemoryRequirements requirements = vk::getImageMemoryRequirements(vkd, device, *image);
1630 context.setImage(image, vk::VK_IMAGE_LAYOUT_UNDEFINED, requirements.size, m_imageWidth, m_imageHeight);
1634 void CreateImage::verify (VerifyContext& context, size_t)
1636 context.getReferenceImage() = TextureLevel(TextureFormat(TextureFormat::RGBA, TextureFormat::UNORM_INT8), m_imageWidth, m_imageHeight);
1639 class DestroyImage : public Command
1642 DestroyImage (void);
1643 ~DestroyImage (void) {}
1644 const char* getName (void) const { return "DestroyImage"; }
1646 void logExecute (TestLog& log, size_t commandIndex) const;
1647 void prepare (PrepareContext& context);
1648 void execute (ExecuteContext& context);
1651 vk::Move<vk::VkImage> m_image;
1654 DestroyImage::DestroyImage (void)
1658 void DestroyImage::prepare (PrepareContext& context)
1660 m_image = vk::Move<vk::VkImage>(vk::check(context.getImage()), vk::Deleter<vk::VkImage>(context.getContext().getDeviceInterface(), context.getContext().getDevice(), DE_NULL));
1661 context.releaseImage();
1665 void DestroyImage::logExecute (TestLog& log, size_t commandIndex) const
1667 log << TestLog::Message << commandIndex << ":" << getName() << " Destroy image" << TestLog::EndMessage;
1670 void DestroyImage::execute (ExecuteContext& context)
1672 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
1673 const vk::VkDevice device = context.getContext().getDevice();
1675 vkd.destroyImage(device, m_image.disown(), DE_NULL);
1678 class BindImageMemory : public Command
1681 BindImageMemory (void) {}
1682 ~BindImageMemory (void) {}
1683 const char* getName (void) const { return "BindImageMemory"; }
1685 void logPrepare (TestLog& log, size_t commandIndex) const;
1686 void prepare (PrepareContext& context);
1689 void BindImageMemory::logPrepare (TestLog& log, size_t commandIndex) const
1691 log << TestLog::Message << commandIndex << ":" << getName() << " Bind memory to image" << TestLog::EndMessage;
1694 void BindImageMemory::prepare (PrepareContext& context)
1696 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
1697 const vk::VkDevice device = context.getContext().getDevice();
1699 VK_CHECK(vkd.bindImageMemory(device, context.getImage(), context.getMemory().getMemory(), 0));
1702 class QueueWaitIdle : public Command
1705 QueueWaitIdle (void) {}
1706 ~QueueWaitIdle (void) {}
1707 const char* getName (void) const { return "QueuetWaitIdle"; }
1709 void logExecute (TestLog& log, size_t commandIndex) const;
1710 void execute (ExecuteContext& context);
1713 void QueueWaitIdle::logExecute (TestLog& log, size_t commandIndex) const
1715 log << TestLog::Message << commandIndex << ":" << getName() << " Queue wait idle" << TestLog::EndMessage;
1718 void QueueWaitIdle::execute (ExecuteContext& context)
1720 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
1721 const vk::VkQueue queue = context.getContext().getQueue();
1723 VK_CHECK(vkd.queueWaitIdle(queue));
1726 class DeviceWaitIdle : public Command
1729 DeviceWaitIdle (void) {}
1730 ~DeviceWaitIdle (void) {}
1731 const char* getName (void) const { return "DeviceWaitIdle"; }
1733 void logExecute (TestLog& log, size_t commandIndex) const;
1734 void execute (ExecuteContext& context);
1737 void DeviceWaitIdle::logExecute (TestLog& log, size_t commandIndex) const
1739 log << TestLog::Message << commandIndex << ":" << getName() << " Device wait idle" << TestLog::EndMessage;
1742 void DeviceWaitIdle::execute (ExecuteContext& context)
1744 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
1745 const vk::VkDevice device = context.getContext().getDevice();
1747 VK_CHECK(vkd.deviceWaitIdle(device));
1753 SubmitContext (const PrepareContext& context,
1754 const vk::VkCommandBuffer commandBuffer)
1755 : m_context (context)
1756 , m_commandBuffer (commandBuffer)
1760 const Memory& getMemory (void) const { return m_context.getMemory(); }
1761 const Context& getContext (void) const { return m_context.getContext(); }
1762 vk::VkCommandBuffer getCommandBuffer (void) const { return m_commandBuffer; }
1764 vk::VkBuffer getBuffer (void) const { return m_context.getBuffer(); }
1765 vk::VkDeviceSize getBufferSize (void) const { return m_context.getBufferSize(); }
1767 vk::VkImage getImage (void) const { return m_context.getImage(); }
1768 deInt32 getImageWidth (void) const { return m_context.getImageWidth(); }
1769 deInt32 getImageHeight (void) const { return m_context.getImageHeight(); }
1772 const PrepareContext& m_context;
1773 const vk::VkCommandBuffer m_commandBuffer;
1779 virtual ~CmdCommand (void) {}
1780 virtual const char* getName (void) const = 0;
1782 // Log things that are done during prepare
1783 virtual void logPrepare (TestLog&, size_t) const {}
1784 // Log submitted calls etc.
1785 virtual void logSubmit (TestLog&, size_t) const {}
1787 // Allocate vulkan resources and prepare for submit.
1788 virtual void prepare (PrepareContext&) {}
1790 // Submit commands to command buffer.
1791 virtual void submit (SubmitContext&) {}
1794 virtual void verify (VerifyContext&, size_t) {}
1797 class SubmitCommandBuffer : public Command
1800 SubmitCommandBuffer (const vector<CmdCommand*>& commands);
1801 ~SubmitCommandBuffer (void);
1803 const char* getName (void) const { return "SubmitCommandBuffer"; }
1804 void logExecute (TestLog& log, size_t commandIndex) const;
1805 void logPrepare (TestLog& log, size_t commandIndex) const;
1807 // Allocate command buffer and submit commands to command buffer
1808 void prepare (PrepareContext& context);
1809 void execute (ExecuteContext& context);
1811 // Verify that results are correct.
1812 void verify (VerifyContext& context, size_t commandIndex);
1815 vector<CmdCommand*> m_commands;
1816 vk::Move<vk::VkCommandBuffer> m_commandBuffer;
1819 SubmitCommandBuffer::SubmitCommandBuffer (const vector<CmdCommand*>& commands)
1820 : m_commands (commands)
1824 SubmitCommandBuffer::~SubmitCommandBuffer (void)
1826 for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
1827 delete m_commands[cmdNdx];
1830 void SubmitCommandBuffer::prepare (PrepareContext& context)
1832 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
1833 const vk::VkDevice device = context.getContext().getDevice();
1834 const vk::VkCommandPool commandPool = context.getContext().getCommandPool();
1836 m_commandBuffer = createBeginCommandBuffer(vkd, device, commandPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY);
1838 for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
1840 CmdCommand& command = *m_commands[cmdNdx];
1842 command.prepare(context);
1846 SubmitContext submitContext (context, *m_commandBuffer);
1848 for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
1850 CmdCommand& command = *m_commands[cmdNdx];
1852 command.submit(submitContext);
1855 endCommandBuffer(vkd, *m_commandBuffer);
1859 void SubmitCommandBuffer::execute (ExecuteContext& context)
1861 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
1862 const vk::VkCommandBuffer cmd = *m_commandBuffer;
1863 const vk::VkQueue queue = context.getContext().getQueue();
1864 const vk::VkSubmitInfo submit =
1866 vk::VK_STRUCTURE_TYPE_SUBMIT_INFO,
1871 (const vk::VkPipelineStageFlags*)DE_NULL,
1880 vkd.queueSubmit(queue, 1, &submit, 0);
1883 void SubmitCommandBuffer::verify (VerifyContext& context, size_t commandIndex)
1885 const string sectionName (de::toString(commandIndex) + ":" + getName());
1886 const tcu::ScopedLogSection section (context.getLog(), sectionName, sectionName);
1888 for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
1889 m_commands[cmdNdx]->verify(context, cmdNdx);
1892 void SubmitCommandBuffer::logPrepare (TestLog& log, size_t commandIndex) const
1894 const string sectionName (de::toString(commandIndex) + ":" + getName());
1895 const tcu::ScopedLogSection section (log, sectionName, sectionName);
1897 for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
1898 m_commands[cmdNdx]->logPrepare(log, cmdNdx);
1901 void SubmitCommandBuffer::logExecute (TestLog& log, size_t commandIndex) const
1903 const string sectionName (de::toString(commandIndex) + ":" + getName());
1904 const tcu::ScopedLogSection section (log, sectionName, sectionName);
1906 for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
1907 m_commands[cmdNdx]->logSubmit(log, cmdNdx);
1910 class PipelineBarrier : public CmdCommand
1920 PipelineBarrier (const vk::VkPipelineStageFlags srcStages,
1921 const vk::VkAccessFlags srcAccesses,
1922 const vk::VkPipelineStageFlags dstStages,
1923 const vk::VkAccessFlags dstAccesses,
1925 const tcu::Maybe<vk::VkImageLayout> imageLayout);
1926 ~PipelineBarrier (void) {}
1927 const char* getName (void) const { return "PipelineBarrier"; }
1929 void logSubmit (TestLog& log, size_t commandIndex) const;
1930 void submit (SubmitContext& context);
1933 const vk::VkPipelineStageFlags m_srcStages;
1934 const vk::VkAccessFlags m_srcAccesses;
1935 const vk::VkPipelineStageFlags m_dstStages;
1936 const vk::VkAccessFlags m_dstAccesses;
1938 const tcu::Maybe<vk::VkImageLayout> m_imageLayout;
1941 PipelineBarrier::PipelineBarrier (const vk::VkPipelineStageFlags srcStages,
1942 const vk::VkAccessFlags srcAccesses,
1943 const vk::VkPipelineStageFlags dstStages,
1944 const vk::VkAccessFlags dstAccesses,
1946 const tcu::Maybe<vk::VkImageLayout> imageLayout)
1947 : m_srcStages (srcStages)
1948 , m_srcAccesses (srcAccesses)
1949 , m_dstStages (dstStages)
1950 , m_dstAccesses (dstAccesses)
1952 , m_imageLayout (imageLayout)
1956 void PipelineBarrier::logSubmit (TestLog& log, size_t commandIndex) const
1958 log << TestLog::Message << commandIndex << ":" << getName()
1959 << " " << (m_type == TYPE_GLOBAL ? "Global pipeline barrier"
1960 : m_type == TYPE_BUFFER ? "Buffer pipeline barrier"
1961 : "Image pipeline barrier")
1962 << ", srcStages: " << vk::getPipelineStageFlagsStr(m_srcStages) << ", srcAccesses: " << vk::getAccessFlagsStr(m_srcAccesses)
1963 << ", dstStages: " << vk::getPipelineStageFlagsStr(m_dstStages) << ", dstAccesses: " << vk::getAccessFlagsStr(m_dstAccesses) << TestLog::EndMessage;
1966 void PipelineBarrier::submit (SubmitContext& context)
1968 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
1969 const vk::VkCommandBuffer cmd = context.getCommandBuffer();
1975 const vk::VkMemoryBarrier barrier =
1977 vk::VK_STRUCTURE_TYPE_MEMORY_BARRIER,
1984 vkd.cmdPipelineBarrier(cmd, m_srcStages, m_dstStages, (vk::VkDependencyFlags)0, 1, &barrier, 0, (const vk::VkBufferMemoryBarrier*)DE_NULL, 0, (const vk::VkImageMemoryBarrier*)DE_NULL);
1990 const vk::VkBufferMemoryBarrier barrier =
1992 vk::VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER,
1998 VK_QUEUE_FAMILY_IGNORED,
1999 VK_QUEUE_FAMILY_IGNORED,
2001 context.getBuffer(),
2006 vkd.cmdPipelineBarrier(cmd, m_srcStages, m_dstStages, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 1, &barrier, 0, (const vk::VkImageMemoryBarrier*)DE_NULL);
2012 const vk::VkImageMemoryBarrier barrier =
2014 vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
2023 VK_QUEUE_FAMILY_IGNORED,
2024 VK_QUEUE_FAMILY_IGNORED,
2028 vk::VK_IMAGE_ASPECT_COLOR_BIT,
2034 vkd.cmdPipelineBarrier(cmd, m_srcStages, m_dstStages, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 0, (const vk::VkBufferMemoryBarrier*)DE_NULL, 1, &barrier);
2039 DE_FATAL("Unknown pipeline barrier type");
2043 class ImageTransition : public CmdCommand
2046 ImageTransition (vk::VkPipelineStageFlags srcStages,
2047 vk::VkAccessFlags srcAccesses,
2049 vk::VkPipelineStageFlags dstStages,
2050 vk::VkAccessFlags dstAccesses,
2052 vk::VkImageLayout srcLayout,
2053 vk::VkImageLayout dstLayout);
2055 ~ImageTransition (void) {}
2056 const char* getName (void) const { return "ImageTransition"; }
2058 void prepare (PrepareContext& context);
2059 void logSubmit (TestLog& log, size_t commandIndex) const;
2060 void submit (SubmitContext& context);
2061 void verify (VerifyContext& context, size_t);
2064 const vk::VkPipelineStageFlags m_srcStages;
2065 const vk::VkAccessFlags m_srcAccesses;
2066 const vk::VkPipelineStageFlags m_dstStages;
2067 const vk::VkAccessFlags m_dstAccesses;
2068 const vk::VkImageLayout m_srcLayout;
2069 const vk::VkImageLayout m_dstLayout;
2071 vk::VkDeviceSize m_imageMemorySize;
2074 ImageTransition::ImageTransition (vk::VkPipelineStageFlags srcStages,
2075 vk::VkAccessFlags srcAccesses,
2077 vk::VkPipelineStageFlags dstStages,
2078 vk::VkAccessFlags dstAccesses,
2080 vk::VkImageLayout srcLayout,
2081 vk::VkImageLayout dstLayout)
2082 : m_srcStages (srcStages)
2083 , m_srcAccesses (srcAccesses)
2084 , m_dstStages (dstStages)
2085 , m_dstAccesses (dstAccesses)
2086 , m_srcLayout (srcLayout)
2087 , m_dstLayout (dstLayout)
2091 void ImageTransition::logSubmit (TestLog& log, size_t commandIndex) const
2093 log << TestLog::Message << commandIndex << ":" << getName()
2094 << " Image transition pipeline barrier"
2095 << ", srcStages: " << vk::getPipelineStageFlagsStr(m_srcStages) << ", srcAccesses: " << vk::getAccessFlagsStr(m_srcAccesses)
2096 << ", dstStages: " << vk::getPipelineStageFlagsStr(m_dstStages) << ", dstAccesses: " << vk::getAccessFlagsStr(m_dstAccesses)
2097 << ", srcLayout: " << m_srcLayout << ", dstLayout: " << m_dstLayout << TestLog::EndMessage;
2100 void ImageTransition::prepare (PrepareContext& context)
2102 DE_ASSERT(context.getImageLayout() == vk::VK_IMAGE_LAYOUT_UNDEFINED || m_srcLayout == vk::VK_IMAGE_LAYOUT_UNDEFINED || context.getImageLayout() == m_srcLayout);
2104 context.setImageLayout(m_dstLayout);
2105 m_imageMemorySize = context.getImageMemorySize();
2108 void ImageTransition::submit (SubmitContext& context)
2110 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
2111 const vk::VkCommandBuffer cmd = context.getCommandBuffer();
2112 const vk::VkImageMemoryBarrier barrier =
2114 vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
2123 VK_QUEUE_FAMILY_IGNORED,
2124 VK_QUEUE_FAMILY_IGNORED,
2128 vk::VK_IMAGE_ASPECT_COLOR_BIT,
2134 vkd.cmdPipelineBarrier(cmd, m_srcStages, m_dstStages, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 0, (const vk::VkBufferMemoryBarrier*)DE_NULL, 1, &barrier);
2137 void ImageTransition::verify (VerifyContext& context, size_t)
2139 context.getReference().setUndefined(0, (size_t)m_imageMemorySize);
2142 class FillBuffer : public CmdCommand
2145 FillBuffer (deUint32 value) : m_value(value) {}
2146 ~FillBuffer (void) {}
2147 const char* getName (void) const { return "FillBuffer"; }
2149 void logSubmit (TestLog& log, size_t commandIndex) const;
2150 void submit (SubmitContext& context);
2151 void verify (VerifyContext& context, size_t commandIndex);
2154 const deUint32 m_value;
2155 vk::VkDeviceSize m_bufferSize;
2158 void FillBuffer::logSubmit (TestLog& log, size_t commandIndex) const
2160 log << TestLog::Message << commandIndex << ":" << getName() << " Fill value: " << m_value << TestLog::EndMessage;
2163 void FillBuffer::submit (SubmitContext& context)
2165 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
2166 const vk::VkCommandBuffer cmd = context.getCommandBuffer();
2167 const vk::VkBuffer buffer = context.getBuffer();
2168 const vk::VkDeviceSize sizeMask = ~(0x3ull); // \note Round down to multiple of 4
2170 m_bufferSize = sizeMask & context.getBufferSize();
2171 vkd.cmdFillBuffer(cmd, buffer, 0, m_bufferSize, m_value);
2174 void FillBuffer::verify (VerifyContext& context, size_t)
2176 ReferenceMemory& reference = context.getReference();
2178 for (size_t ndx = 0; ndx < m_bufferSize; ndx++)
2180 #if (DE_ENDIANNESS == DE_LITTLE_ENDIAN)
2181 reference.set(ndx, (deUint8)(0xffu & (m_value >> (8*(ndx % 4)))));
2183 reference.set(ndx, (deUint8)(0xffu & (m_value >> (8*(3 - (ndx % 4))))));
2188 class UpdateBuffer : public CmdCommand
2191 UpdateBuffer (deUint32 seed) : m_seed(seed) {}
2192 ~UpdateBuffer (void) {}
2193 const char* getName (void) const { return "UpdateBuffer"; }
2195 void logSubmit (TestLog& log, size_t commandIndex) const;
2196 void submit (SubmitContext& context);
2197 void verify (VerifyContext& context, size_t commandIndex);
2200 const deUint32 m_seed;
2201 vk::VkDeviceSize m_bufferSize;
2204 void UpdateBuffer::logSubmit (TestLog& log, size_t commandIndex) const
2206 log << TestLog::Message << commandIndex << ":" << getName() << " Update buffer, seed: " << m_seed << TestLog::EndMessage;
2209 void UpdateBuffer::submit (SubmitContext& context)
2211 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
2212 const vk::VkCommandBuffer cmd = context.getCommandBuffer();
2213 const vk::VkBuffer buffer = context.getBuffer();
2214 const size_t blockSize = 65536;
2215 std::vector<deUint8> data (blockSize, 0);
2216 de::Random rng (m_seed);
2218 m_bufferSize = context.getBufferSize();
2220 for (size_t updated = 0; updated < m_bufferSize; updated += blockSize)
2222 for (size_t ndx = 0; ndx < data.size(); ndx++)
2223 data[ndx] = rng.getUint8();
2225 if (m_bufferSize - updated > blockSize)
2226 vkd.cmdUpdateBuffer(cmd, buffer, updated, blockSize, (const deUint32*)(&data[0]));
2228 vkd.cmdUpdateBuffer(cmd, buffer, updated, m_bufferSize - updated, (const deUint32*)(&data[0]));
2232 void UpdateBuffer::verify (VerifyContext& context, size_t)
2234 ReferenceMemory& reference = context.getReference();
2235 const size_t blockSize = 65536;
2236 vector<deUint8> data (blockSize, 0);
2237 de::Random rng (m_seed);
2239 for (size_t updated = 0; updated < m_bufferSize; updated += blockSize)
2241 for (size_t ndx = 0; ndx < data.size(); ndx++)
2242 data[ndx] = rng.getUint8();
2244 if (m_bufferSize - updated > blockSize)
2245 reference.setData(updated, blockSize, &data[0]);
2247 reference.setData(updated, (size_t)(m_bufferSize - updated), &data[0]);
2251 class BufferCopyToBuffer : public CmdCommand
2254 BufferCopyToBuffer (void) {}
2255 ~BufferCopyToBuffer (void) {}
2256 const char* getName (void) const { return "BufferCopyToBuffer"; }
2258 void logPrepare (TestLog& log, size_t commandIndex) const;
2259 void prepare (PrepareContext& context);
2260 void logSubmit (TestLog& log, size_t commandIndex) const;
2261 void submit (SubmitContext& context);
2262 void verify (VerifyContext& context, size_t commandIndex);
2265 vk::VkDeviceSize m_bufferSize;
2266 vk::Move<vk::VkBuffer> m_dstBuffer;
2267 vk::Move<vk::VkDeviceMemory> m_memory;
2270 void BufferCopyToBuffer::logPrepare (TestLog& log, size_t commandIndex) const
2272 log << TestLog::Message << commandIndex << ":" << getName() << " Allocate destination buffer for buffer to buffer copy." << TestLog::EndMessage;
2275 void BufferCopyToBuffer::prepare (PrepareContext& context)
2277 const vk::InstanceInterface& vki = context.getContext().getInstanceInterface();
2278 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
2279 const vk::VkPhysicalDevice physicalDevice = context.getContext().getPhysicalDevice();
2280 const vk::VkDevice device = context.getContext().getDevice();
2281 const vector<deUint32>& queueFamilies = context.getContext().getQueueFamilies();
2283 m_bufferSize = context.getBufferSize();
2285 m_dstBuffer = createBuffer(vkd, device, m_bufferSize, vk::VK_BUFFER_USAGE_TRANSFER_DST_BIT, vk::VK_SHARING_MODE_EXCLUSIVE, queueFamilies);
2286 m_memory = bindBufferMemory(vki, vkd, physicalDevice, device, *m_dstBuffer, vk::VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT);
2289 void BufferCopyToBuffer::logSubmit (TestLog& log, size_t commandIndex) const
2291 log << TestLog::Message << commandIndex << ":" << getName() << " Copy buffer to another buffer" << TestLog::EndMessage;
2294 void BufferCopyToBuffer::submit (SubmitContext& context)
2296 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
2297 const vk::VkCommandBuffer commandBuffer = context.getCommandBuffer();
2298 const vk::VkBufferCopy range =
2304 vkd.cmdCopyBuffer(commandBuffer, context.getBuffer(), *m_dstBuffer, 1, &range);
2307 void BufferCopyToBuffer::verify (VerifyContext& context, size_t commandIndex)
2309 tcu::ResultCollector& resultCollector (context.getResultCollector());
2310 ReferenceMemory& reference (context.getReference());
2311 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
2312 const vk::VkDevice device = context.getContext().getDevice();
2313 const vk::VkQueue queue = context.getContext().getQueue();
2314 const vk::VkCommandPool commandPool = context.getContext().getCommandPool();
2315 const vk::Unique<vk::VkCommandBuffer> commandBuffer (createBeginCommandBuffer(vkd, device, commandPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY));
2316 const vk::VkBufferMemoryBarrier barrier =
2318 vk::VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER,
2321 vk::VK_ACCESS_TRANSFER_WRITE_BIT,
2322 vk::VK_ACCESS_HOST_READ_BIT,
2324 VK_QUEUE_FAMILY_IGNORED,
2325 VK_QUEUE_FAMILY_IGNORED,
2331 vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_PIPELINE_STAGE_HOST_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 1, &barrier, 0, (const vk::VkImageMemoryBarrier*)DE_NULL);
2333 endCommandBuffer(vkd, *commandBuffer);
2334 submitCommandsAndWait(vkd, device, queue, *commandBuffer);
2337 void* const ptr = mapMemory(vkd, device, *m_memory, m_bufferSize);
2340 vk::invalidateMappedMemoryRange(vkd, device, *m_memory, 0, m_bufferSize);
2343 const deUint8* const data = (const deUint8*)ptr;
2345 for (size_t pos = 0; pos < (size_t)m_bufferSize; pos++)
2347 if (reference.isDefined(pos))
2349 if (data[pos] != reference.get(pos))
2351 resultCollector.fail(
2352 de::toString(commandIndex) + ":" + getName()
2353 + " Result differs from reference, Expected: "
2354 + de::toString(tcu::toHex<8>(reference.get(pos)))
2356 + de::toString(tcu::toHex<8>(data[pos]))
2358 + de::toString(pos));
2365 vkd.unmapMemory(device, *m_memory);
2368 context.getLog() << TestLog::Message << commandIndex << ": Buffer copy to buffer verification failed" << TestLog::EndMessage;
2372 class BufferCopyFromBuffer : public CmdCommand
2375 BufferCopyFromBuffer (deUint32 seed) : m_seed(seed) {}
2376 ~BufferCopyFromBuffer (void) {}
2377 const char* getName (void) const { return "BufferCopyFromBuffer"; }
2379 void logPrepare (TestLog& log, size_t commandIndex) const;
2380 void prepare (PrepareContext& context);
2381 void logSubmit (TestLog& log, size_t commandIndex) const;
2382 void submit (SubmitContext& context);
2383 void verify (VerifyContext& context, size_t commandIndex);
2386 const deUint32 m_seed;
2387 vk::VkDeviceSize m_bufferSize;
2388 vk::Move<vk::VkBuffer> m_srcBuffer;
2389 vk::Move<vk::VkDeviceMemory> m_memory;
2392 void BufferCopyFromBuffer::logPrepare (TestLog& log, size_t commandIndex) const
2394 log << TestLog::Message << commandIndex << ":" << getName() << " Allocate source buffer for buffer to buffer copy. Seed: " << m_seed << TestLog::EndMessage;
2397 void BufferCopyFromBuffer::prepare (PrepareContext& context)
2399 const vk::InstanceInterface& vki = context.getContext().getInstanceInterface();
2400 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
2401 const vk::VkPhysicalDevice physicalDevice = context.getContext().getPhysicalDevice();
2402 const vk::VkDevice device = context.getContext().getDevice();
2403 const vector<deUint32>& queueFamilies = context.getContext().getQueueFamilies();
2405 m_bufferSize = context.getBufferSize();
2406 m_srcBuffer = createBuffer(vkd, device, m_bufferSize, vk::VK_BUFFER_USAGE_TRANSFER_SRC_BIT, vk::VK_SHARING_MODE_EXCLUSIVE, queueFamilies);
2407 m_memory = bindBufferMemory(vki, vkd, physicalDevice, device, *m_srcBuffer, vk::VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT);
2410 void* const ptr = mapMemory(vkd, device, *m_memory, m_bufferSize);
2411 de::Random rng (m_seed);
2414 deUint8* const data = (deUint8*)ptr;
2416 for (size_t ndx = 0; ndx < (size_t)m_bufferSize; ndx++)
2417 data[ndx] = rng.getUint8();
2420 vk::flushMappedMemoryRange(vkd, device, *m_memory, 0, m_bufferSize);
2421 vkd.unmapMemory(device, *m_memory);
2425 void BufferCopyFromBuffer::logSubmit (TestLog& log, size_t commandIndex) const
2427 log << TestLog::Message << commandIndex << ":" << getName() << " Copy buffer data from another buffer" << TestLog::EndMessage;
2430 void BufferCopyFromBuffer::submit (SubmitContext& context)
2432 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
2433 const vk::VkCommandBuffer commandBuffer = context.getCommandBuffer();
2434 const vk::VkBufferCopy range =
2440 vkd.cmdCopyBuffer(commandBuffer, *m_srcBuffer, context.getBuffer(), 1, &range);
2443 void BufferCopyFromBuffer::verify (VerifyContext& context, size_t)
2445 ReferenceMemory& reference (context.getReference());
2446 de::Random rng (m_seed);
2448 for (size_t ndx = 0; ndx < (size_t)m_bufferSize; ndx++)
2449 reference.set(ndx, rng.getUint8());
2452 class BufferCopyToImage : public CmdCommand
2455 BufferCopyToImage (void) {}
2456 ~BufferCopyToImage (void) {}
2457 const char* getName (void) const { return "BufferCopyToImage"; }
2459 void logPrepare (TestLog& log, size_t commandIndex) const;
2460 void prepare (PrepareContext& context);
2461 void logSubmit (TestLog& log, size_t commandIndex) const;
2462 void submit (SubmitContext& context);
2463 void verify (VerifyContext& context, size_t commandIndex);
2466 deInt32 m_imageWidth;
2467 deInt32 m_imageHeight;
2468 vk::Move<vk::VkImage> m_dstImage;
2469 vk::Move<vk::VkDeviceMemory> m_memory;
2472 void BufferCopyToImage::logPrepare (TestLog& log, size_t commandIndex) const
2474 log << TestLog::Message << commandIndex << ":" << getName() << " Allocate destination image for buffer to image copy." << TestLog::EndMessage;
2477 void BufferCopyToImage::prepare (PrepareContext& context)
2479 const vk::InstanceInterface& vki = context.getContext().getInstanceInterface();
2480 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
2481 const vk::VkPhysicalDevice physicalDevice = context.getContext().getPhysicalDevice();
2482 const vk::VkDevice device = context.getContext().getDevice();
2483 const vk::VkQueue queue = context.getContext().getQueue();
2484 const vk::VkCommandPool commandPool = context.getContext().getCommandPool();
2485 const vector<deUint32>& queueFamilies = context.getContext().getQueueFamilies();
2486 const IVec2 imageSize = findImageSizeWxHx4(context.getBufferSize());
2488 m_imageWidth = imageSize[0];
2489 m_imageHeight = imageSize[1];
2492 const vk::VkImageCreateInfo createInfo =
2494 vk::VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
2498 vk::VK_IMAGE_TYPE_2D,
2499 vk::VK_FORMAT_R8G8B8A8_UNORM,
2501 (deUint32)m_imageWidth,
2502 (deUint32)m_imageHeight,
2505 1, 1, // mipLevels, arrayLayers
2506 vk::VK_SAMPLE_COUNT_1_BIT,
2508 vk::VK_IMAGE_TILING_OPTIMAL,
2509 vk::VK_IMAGE_USAGE_TRANSFER_DST_BIT|vk::VK_IMAGE_USAGE_TRANSFER_SRC_BIT,
2510 vk::VK_SHARING_MODE_EXCLUSIVE,
2512 (deUint32)queueFamilies.size(),
2514 vk::VK_IMAGE_LAYOUT_UNDEFINED
2517 m_dstImage = vk::createImage(vkd, device, &createInfo);
2520 m_memory = bindImageMemory(vki, vkd, physicalDevice, device, *m_dstImage, 0);
2523 const vk::Unique<vk::VkCommandBuffer> commandBuffer (createBeginCommandBuffer(vkd, device, commandPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY));
2524 const vk::VkImageMemoryBarrier barrier =
2526 vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
2530 vk::VK_ACCESS_TRANSFER_WRITE_BIT,
2532 vk::VK_IMAGE_LAYOUT_UNDEFINED,
2533 vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
2535 VK_QUEUE_FAMILY_IGNORED,
2536 VK_QUEUE_FAMILY_IGNORED,
2540 vk::VK_IMAGE_ASPECT_COLOR_BIT,
2542 1, // Mip level count
2548 vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 0, (const vk::VkBufferMemoryBarrier*)DE_NULL, 1, &barrier);
2550 endCommandBuffer(vkd, *commandBuffer);
2551 submitCommandsAndWait(vkd, device, queue, *commandBuffer);
2555 void BufferCopyToImage::logSubmit (TestLog& log, size_t commandIndex) const
2557 log << TestLog::Message << commandIndex << ":" << getName() << " Copy buffer to image" << TestLog::EndMessage;
2560 void BufferCopyToImage::submit (SubmitContext& context)
2562 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
2563 const vk::VkCommandBuffer commandBuffer = context.getCommandBuffer();
2564 const vk::VkBufferImageCopy region =
2569 vk::VK_IMAGE_ASPECT_COLOR_BIT,
2576 (deUint32)m_imageWidth,
2577 (deUint32)m_imageHeight,
2582 vkd.cmdCopyBufferToImage(commandBuffer, context.getBuffer(), *m_dstImage, vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, ®ion);
2585 void BufferCopyToImage::verify (VerifyContext& context, size_t commandIndex)
2587 tcu::ResultCollector& resultCollector (context.getResultCollector());
2588 ReferenceMemory& reference (context.getReference());
2589 const vk::InstanceInterface& vki = context.getContext().getInstanceInterface();
2590 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
2591 const vk::VkPhysicalDevice physicalDevice = context.getContext().getPhysicalDevice();
2592 const vk::VkDevice device = context.getContext().getDevice();
2593 const vk::VkQueue queue = context.getContext().getQueue();
2594 const vk::VkCommandPool commandPool = context.getContext().getCommandPool();
2595 const vk::Unique<vk::VkCommandBuffer> commandBuffer (createBeginCommandBuffer(vkd, device, commandPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY));
2596 const vector<deUint32>& queueFamilies = context.getContext().getQueueFamilies();
2597 const vk::Unique<vk::VkBuffer> dstBuffer (createBuffer(vkd, device, 4 * m_imageWidth * m_imageHeight, vk::VK_BUFFER_USAGE_TRANSFER_DST_BIT, vk::VK_SHARING_MODE_EXCLUSIVE, queueFamilies));
2598 const vk::Unique<vk::VkDeviceMemory> memory (bindBufferMemory(vki, vkd, physicalDevice, device, *dstBuffer, vk::VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT));
2600 const vk::VkImageMemoryBarrier imageBarrier =
2602 vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
2605 vk::VK_ACCESS_TRANSFER_WRITE_BIT,
2606 vk::VK_ACCESS_TRANSFER_READ_BIT,
2608 vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
2609 vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
2611 VK_QUEUE_FAMILY_IGNORED,
2612 VK_QUEUE_FAMILY_IGNORED,
2616 vk::VK_IMAGE_ASPECT_COLOR_BIT,
2618 1, // Mip level count
2623 const vk::VkBufferMemoryBarrier bufferBarrier =
2625 vk::VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER,
2628 vk::VK_ACCESS_TRANSFER_WRITE_BIT,
2629 vk::VK_ACCESS_HOST_READ_BIT,
2631 VK_QUEUE_FAMILY_IGNORED,
2632 VK_QUEUE_FAMILY_IGNORED,
2638 const vk::VkBufferImageCopy region =
2643 vk::VK_IMAGE_ASPECT_COLOR_BIT,
2650 (deUint32)m_imageWidth,
2651 (deUint32)m_imageHeight,
2656 vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 0, (const vk::VkBufferMemoryBarrier*)DE_NULL, 1, &imageBarrier);
2657 vkd.cmdCopyImageToBuffer(*commandBuffer, *m_dstImage, vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, *dstBuffer, 1, ®ion);
2658 vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_PIPELINE_STAGE_HOST_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 1, &bufferBarrier, 0, (const vk::VkImageMemoryBarrier*)DE_NULL);
2661 endCommandBuffer(vkd, *commandBuffer);
2662 submitCommandsAndWait(vkd, device, queue, *commandBuffer);
2665 void* const ptr = mapMemory(vkd, device, *memory, 4 * m_imageWidth * m_imageHeight);
2667 vk::invalidateMappedMemoryRange(vkd, device, *memory, 0, 4 * m_imageWidth * m_imageHeight);
2670 const deUint8* const data = (const deUint8*)ptr;
2672 for (size_t pos = 0; pos < (size_t)( 4 * m_imageWidth * m_imageHeight); pos++)
2674 if (reference.isDefined(pos))
2676 if (data[pos] != reference.get(pos))
2678 resultCollector.fail(
2679 de::toString(commandIndex) + ":" + getName()
2680 + " Result differs from reference, Expected: "
2681 + de::toString(tcu::toHex<8>(reference.get(pos)))
2683 + de::toString(tcu::toHex<8>(data[pos]))
2685 + de::toString(pos));
2692 vkd.unmapMemory(device, *memory);
2696 class BufferCopyFromImage : public CmdCommand
2699 BufferCopyFromImage (deUint32 seed) : m_seed(seed) {}
2700 ~BufferCopyFromImage (void) {}
2701 const char* getName (void) const { return "BufferCopyFromImage"; }
2703 void logPrepare (TestLog& log, size_t commandIndex) const;
2704 void prepare (PrepareContext& context);
2705 void logSubmit (TestLog& log, size_t commandIndex) const;
2706 void submit (SubmitContext& context);
2707 void verify (VerifyContext& context, size_t commandIndex);
2710 const deUint32 m_seed;
2711 deInt32 m_imageWidth;
2712 deInt32 m_imageHeight;
2713 vk::Move<vk::VkImage> m_srcImage;
2714 vk::Move<vk::VkDeviceMemory> m_memory;
2717 void BufferCopyFromImage::logPrepare (TestLog& log, size_t commandIndex) const
2719 log << TestLog::Message << commandIndex << ":" << getName() << " Allocate source image for image to buffer copy." << TestLog::EndMessage;
2722 void BufferCopyFromImage::prepare (PrepareContext& context)
2724 const vk::InstanceInterface& vki = context.getContext().getInstanceInterface();
2725 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
2726 const vk::VkPhysicalDevice physicalDevice = context.getContext().getPhysicalDevice();
2727 const vk::VkDevice device = context.getContext().getDevice();
2728 const vk::VkQueue queue = context.getContext().getQueue();
2729 const vk::VkCommandPool commandPool = context.getContext().getCommandPool();
2730 const vector<deUint32>& queueFamilies = context.getContext().getQueueFamilies();
2731 const IVec2 imageSize = findImageSizeWxHx4(context.getBufferSize());
2733 m_imageWidth = imageSize[0];
2734 m_imageHeight = imageSize[1];
2737 const vk::VkImageCreateInfo createInfo =
2739 vk::VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
2743 vk::VK_IMAGE_TYPE_2D,
2744 vk::VK_FORMAT_R8G8B8A8_UNORM,
2746 (deUint32)m_imageWidth,
2747 (deUint32)m_imageHeight,
2750 1, 1, // mipLevels, arrayLayers
2751 vk::VK_SAMPLE_COUNT_1_BIT,
2753 vk::VK_IMAGE_TILING_OPTIMAL,
2754 vk::VK_IMAGE_USAGE_TRANSFER_DST_BIT|vk::VK_IMAGE_USAGE_TRANSFER_SRC_BIT,
2755 vk::VK_SHARING_MODE_EXCLUSIVE,
2757 (deUint32)queueFamilies.size(),
2759 vk::VK_IMAGE_LAYOUT_UNDEFINED
2762 m_srcImage = vk::createImage(vkd, device, &createInfo);
2765 m_memory = bindImageMemory(vki, vkd, physicalDevice, device, *m_srcImage, 0);
2768 const vk::Unique<vk::VkBuffer> srcBuffer (createBuffer(vkd, device, 4 * m_imageWidth * m_imageHeight, vk::VK_BUFFER_USAGE_TRANSFER_SRC_BIT, vk::VK_SHARING_MODE_EXCLUSIVE, queueFamilies));
2769 const vk::Unique<vk::VkDeviceMemory> memory (bindBufferMemory(vki, vkd, physicalDevice, device, *srcBuffer, vk::VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT));
2770 const vk::Unique<vk::VkCommandBuffer> commandBuffer (createBeginCommandBuffer(vkd, device, commandPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY));
2771 const vk::VkImageMemoryBarrier preImageBarrier =
2773 vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
2777 vk::VK_ACCESS_TRANSFER_WRITE_BIT,
2779 vk::VK_IMAGE_LAYOUT_UNDEFINED,
2780 vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
2782 VK_QUEUE_FAMILY_IGNORED,
2783 VK_QUEUE_FAMILY_IGNORED,
2787 vk::VK_IMAGE_ASPECT_COLOR_BIT,
2789 1, // Mip level count
2794 const vk::VkImageMemoryBarrier postImageBarrier =
2796 vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
2799 vk::VK_ACCESS_TRANSFER_WRITE_BIT,
2802 vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
2803 vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
2805 VK_QUEUE_FAMILY_IGNORED,
2806 VK_QUEUE_FAMILY_IGNORED,
2810 vk::VK_IMAGE_ASPECT_COLOR_BIT,
2812 1, // Mip level count
2817 const vk::VkBufferImageCopy region =
2822 vk::VK_IMAGE_ASPECT_COLOR_BIT,
2829 (deUint32)m_imageWidth,
2830 (deUint32)m_imageHeight,
2836 void* const ptr = mapMemory(vkd, device, *memory, 4 * m_imageWidth * m_imageHeight);
2837 de::Random rng (m_seed);
2840 deUint8* const data = (deUint8*)ptr;
2842 for (size_t ndx = 0; ndx < (size_t)(4 * m_imageWidth * m_imageHeight); ndx++)
2843 data[ndx] = rng.getUint8();
2846 vk::flushMappedMemoryRange(vkd, device, *memory, 0, 4 * m_imageWidth * m_imageHeight);
2847 vkd.unmapMemory(device, *memory);
2850 vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 0, (const vk::VkBufferMemoryBarrier*)DE_NULL, 1, &preImageBarrier);
2851 vkd.cmdCopyBufferToImage(*commandBuffer, *srcBuffer, *m_srcImage, vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, ®ion);
2852 vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 0, (const vk::VkBufferMemoryBarrier*)DE_NULL, 1, &postImageBarrier);
2854 endCommandBuffer(vkd, *commandBuffer);
2855 submitCommandsAndWait(vkd, device, queue, *commandBuffer);
2859 void BufferCopyFromImage::logSubmit (TestLog& log, size_t commandIndex) const
2861 log << TestLog::Message << commandIndex << ":" << getName() << " Copy buffer data from image" << TestLog::EndMessage;
2864 void BufferCopyFromImage::submit (SubmitContext& context)
2866 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
2867 const vk::VkCommandBuffer commandBuffer = context.getCommandBuffer();
2868 const vk::VkBufferImageCopy region =
2873 vk::VK_IMAGE_ASPECT_COLOR_BIT,
2880 (deUint32)m_imageWidth,
2881 (deUint32)m_imageHeight,
2886 vkd.cmdCopyImageToBuffer(commandBuffer, *m_srcImage, vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, context.getBuffer(), 1, ®ion);
2889 void BufferCopyFromImage::verify (VerifyContext& context, size_t)
2891 ReferenceMemory& reference (context.getReference());
2892 de::Random rng (m_seed);
2894 for (size_t ndx = 0; ndx < (size_t)(4 * m_imageWidth * m_imageHeight); ndx++)
2895 reference.set(ndx, rng.getUint8());
2898 class ImageCopyToBuffer : public CmdCommand
2901 ImageCopyToBuffer (vk::VkImageLayout imageLayout) : m_imageLayout (imageLayout) {}
2902 ~ImageCopyToBuffer (void) {}
2903 const char* getName (void) const { return "BufferCopyToImage"; }
2905 void logPrepare (TestLog& log, size_t commandIndex) const;
2906 void prepare (PrepareContext& context);
2907 void logSubmit (TestLog& log, size_t commandIndex) const;
2908 void submit (SubmitContext& context);
2909 void verify (VerifyContext& context, size_t commandIndex);
2912 vk::VkImageLayout m_imageLayout;
2913 vk::VkDeviceSize m_bufferSize;
2914 vk::Move<vk::VkBuffer> m_dstBuffer;
2915 vk::Move<vk::VkDeviceMemory> m_memory;
2916 vk::VkDeviceSize m_imageMemorySize;
2917 deInt32 m_imageWidth;
2918 deInt32 m_imageHeight;
2921 void ImageCopyToBuffer::logPrepare (TestLog& log, size_t commandIndex) const
2923 log << TestLog::Message << commandIndex << ":" << getName() << " Allocate destination buffer for image to buffer copy." << TestLog::EndMessage;
2926 void ImageCopyToBuffer::prepare (PrepareContext& context)
2928 const vk::InstanceInterface& vki = context.getContext().getInstanceInterface();
2929 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
2930 const vk::VkPhysicalDevice physicalDevice = context.getContext().getPhysicalDevice();
2931 const vk::VkDevice device = context.getContext().getDevice();
2932 const vector<deUint32>& queueFamilies = context.getContext().getQueueFamilies();
2934 m_imageWidth = context.getImageWidth();
2935 m_imageHeight = context.getImageHeight();
2936 m_bufferSize = 4 * m_imageWidth * m_imageHeight;
2937 m_imageMemorySize = context.getImageMemorySize();
2938 m_dstBuffer = createBuffer(vkd, device, m_bufferSize, vk::VK_BUFFER_USAGE_TRANSFER_DST_BIT, vk::VK_SHARING_MODE_EXCLUSIVE, queueFamilies);
2939 m_memory = bindBufferMemory(vki, vkd, physicalDevice, device, *m_dstBuffer, vk::VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT);
2942 void ImageCopyToBuffer::logSubmit (TestLog& log, size_t commandIndex) const
2944 log << TestLog::Message << commandIndex << ":" << getName() << " Copy image to buffer" << TestLog::EndMessage;
2947 void ImageCopyToBuffer::submit (SubmitContext& context)
2949 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
2950 const vk::VkCommandBuffer commandBuffer = context.getCommandBuffer();
2951 const vk::VkBufferImageCopy region =
2956 vk::VK_IMAGE_ASPECT_COLOR_BIT,
2963 (deUint32)m_imageWidth,
2964 (deUint32)m_imageHeight,
2969 vkd.cmdCopyImageToBuffer(commandBuffer, context.getImage(), m_imageLayout, *m_dstBuffer, 1, ®ion);
2972 void ImageCopyToBuffer::verify (VerifyContext& context, size_t commandIndex)
2974 tcu::ResultCollector& resultCollector (context.getResultCollector());
2975 ReferenceMemory& reference (context.getReference());
2976 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
2977 const vk::VkDevice device = context.getContext().getDevice();
2978 const vk::VkQueue queue = context.getContext().getQueue();
2979 const vk::VkCommandPool commandPool = context.getContext().getCommandPool();
2980 const vk::Unique<vk::VkCommandBuffer> commandBuffer (createBeginCommandBuffer(vkd, device, commandPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY));
2981 const vk::VkBufferMemoryBarrier barrier =
2983 vk::VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER,
2986 vk::VK_ACCESS_TRANSFER_WRITE_BIT,
2987 vk::VK_ACCESS_HOST_READ_BIT,
2989 VK_QUEUE_FAMILY_IGNORED,
2990 VK_QUEUE_FAMILY_IGNORED,
2996 vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_PIPELINE_STAGE_HOST_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 1, &barrier, 0, (const vk::VkImageMemoryBarrier*)DE_NULL);
2998 endCommandBuffer(vkd, *commandBuffer);
2999 submitCommandsAndWait(vkd, device, queue, *commandBuffer);
3001 reference.setUndefined(0, (size_t)m_imageMemorySize);
3003 void* const ptr = mapMemory(vkd, device, *m_memory, m_bufferSize);
3004 const ConstPixelBufferAccess referenceImage (context.getReferenceImage().getAccess());
3005 const ConstPixelBufferAccess resultImage (TextureFormat(TextureFormat::RGBA, TextureFormat::UNORM_INT8), m_imageWidth, m_imageHeight, 1, ptr);
3007 vk::invalidateMappedMemoryRange(vkd, device, *m_memory, 0, m_bufferSize);
3009 if (!tcu::intThresholdCompare(context.getLog(), (de::toString(commandIndex) + ":" + getName()).c_str(), (de::toString(commandIndex) + ":" + getName()).c_str(), referenceImage, resultImage, UVec4(0), tcu::COMPARE_LOG_ON_ERROR))
3010 resultCollector.fail(de::toString(commandIndex) + ":" + getName() + " Image comparison failed");
3012 vkd.unmapMemory(device, *m_memory);
3016 class ImageCopyFromBuffer : public CmdCommand
3019 ImageCopyFromBuffer (deUint32 seed, vk::VkImageLayout imageLayout) : m_seed(seed), m_imageLayout(imageLayout) {}
3020 ~ImageCopyFromBuffer (void) {}
3021 const char* getName (void) const { return "ImageCopyFromBuffer"; }
3023 void logPrepare (TestLog& log, size_t commandIndex) const;
3024 void prepare (PrepareContext& context);
3025 void logSubmit (TestLog& log, size_t commandIndex) const;
3026 void submit (SubmitContext& context);
3027 void verify (VerifyContext& context, size_t commandIndex);
3030 const deUint32 m_seed;
3031 const vk::VkImageLayout m_imageLayout;
3032 deInt32 m_imageWidth;
3033 deInt32 m_imageHeight;
3034 vk::VkDeviceSize m_imageMemorySize;
3035 vk::VkDeviceSize m_bufferSize;
3036 vk::Move<vk::VkBuffer> m_srcBuffer;
3037 vk::Move<vk::VkDeviceMemory> m_memory;
3040 void ImageCopyFromBuffer::logPrepare (TestLog& log, size_t commandIndex) const
3042 log << TestLog::Message << commandIndex << ":" << getName() << " Allocate source buffer for buffer to image copy. Seed: " << m_seed << TestLog::EndMessage;
3045 void ImageCopyFromBuffer::prepare (PrepareContext& context)
3047 const vk::InstanceInterface& vki = context.getContext().getInstanceInterface();
3048 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
3049 const vk::VkPhysicalDevice physicalDevice = context.getContext().getPhysicalDevice();
3050 const vk::VkDevice device = context.getContext().getDevice();
3051 const vector<deUint32>& queueFamilies = context.getContext().getQueueFamilies();
3053 m_imageWidth = context.getImageHeight();
3054 m_imageHeight = context.getImageWidth();
3055 m_imageMemorySize = context.getImageMemorySize();
3056 m_bufferSize = m_imageWidth * m_imageHeight * 4;
3057 m_srcBuffer = createBuffer(vkd, device, m_bufferSize, vk::VK_BUFFER_USAGE_TRANSFER_SRC_BIT, vk::VK_SHARING_MODE_EXCLUSIVE, queueFamilies);
3058 m_memory = bindBufferMemory(vki, vkd, physicalDevice, device, *m_srcBuffer, vk::VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT);
3061 void* const ptr = mapMemory(vkd, device, *m_memory, m_bufferSize);
3062 de::Random rng (m_seed);
3065 deUint8* const data = (deUint8*)ptr;
3067 for (size_t ndx = 0; ndx < (size_t)m_bufferSize; ndx++)
3068 data[ndx] = rng.getUint8();
3071 vk::flushMappedMemoryRange(vkd, device, *m_memory, 0, m_bufferSize);
3072 vkd.unmapMemory(device, *m_memory);
3076 void ImageCopyFromBuffer::logSubmit (TestLog& log, size_t commandIndex) const
3078 log << TestLog::Message << commandIndex << ":" << getName() << " Copy image data from buffer" << TestLog::EndMessage;
3081 void ImageCopyFromBuffer::submit (SubmitContext& context)
3083 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
3084 const vk::VkCommandBuffer commandBuffer = context.getCommandBuffer();
3085 const vk::VkBufferImageCopy region =
3090 vk::VK_IMAGE_ASPECT_COLOR_BIT,
3097 (deUint32)m_imageWidth,
3098 (deUint32)m_imageHeight,
3103 vkd.cmdCopyBufferToImage(commandBuffer, *m_srcBuffer, context.getImage(), m_imageLayout, 1, ®ion);
3106 void ImageCopyFromBuffer::verify (VerifyContext& context, size_t)
3108 ReferenceMemory& reference (context.getReference());
3109 de::Random rng (m_seed);
3111 reference.setUndefined(0, (size_t)m_imageMemorySize);
3114 const PixelBufferAccess& refAccess (context.getReferenceImage().getAccess());
3116 for (deInt32 y = 0; y < m_imageHeight; y++)
3117 for (deInt32 x = 0; x < m_imageWidth; x++)
3119 const deUint8 r8 = rng.getUint8();
3120 const deUint8 g8 = rng.getUint8();
3121 const deUint8 b8 = rng.getUint8();
3122 const deUint8 a8 = rng.getUint8();
3124 refAccess.setPixel(UVec4(r8, g8, b8, a8), x, y);
3129 class ImageCopyFromImage : public CmdCommand
3132 ImageCopyFromImage (deUint32 seed, vk::VkImageLayout imageLayout) : m_seed(seed), m_imageLayout(imageLayout) {}
3133 ~ImageCopyFromImage (void) {}
3134 const char* getName (void) const { return "ImageCopyFromImage"; }
3136 void logPrepare (TestLog& log, size_t commandIndex) const;
3137 void prepare (PrepareContext& context);
3138 void logSubmit (TestLog& log, size_t commandIndex) const;
3139 void submit (SubmitContext& context);
3140 void verify (VerifyContext& context, size_t commandIndex);
3143 const deUint32 m_seed;
3144 const vk::VkImageLayout m_imageLayout;
3145 deInt32 m_imageWidth;
3146 deInt32 m_imageHeight;
3147 vk::VkDeviceSize m_imageMemorySize;
3148 vk::Move<vk::VkImage> m_srcImage;
3149 vk::Move<vk::VkDeviceMemory> m_memory;
3152 void ImageCopyFromImage::logPrepare (TestLog& log, size_t commandIndex) const
3154 log << TestLog::Message << commandIndex << ":" << getName() << " Allocate source image for image to image copy." << TestLog::EndMessage;
3157 void ImageCopyFromImage::prepare (PrepareContext& context)
3159 const vk::InstanceInterface& vki = context.getContext().getInstanceInterface();
3160 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
3161 const vk::VkPhysicalDevice physicalDevice = context.getContext().getPhysicalDevice();
3162 const vk::VkDevice device = context.getContext().getDevice();
3163 const vk::VkQueue queue = context.getContext().getQueue();
3164 const vk::VkCommandPool commandPool = context.getContext().getCommandPool();
3165 const vector<deUint32>& queueFamilies = context.getContext().getQueueFamilies();
3167 m_imageWidth = context.getImageWidth();
3168 m_imageHeight = context.getImageHeight();
3169 m_imageMemorySize = context.getImageMemorySize();
3172 const vk::VkImageCreateInfo createInfo =
3174 vk::VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
3178 vk::VK_IMAGE_TYPE_2D,
3179 vk::VK_FORMAT_R8G8B8A8_UNORM,
3181 (deUint32)m_imageWidth,
3182 (deUint32)m_imageHeight,
3185 1, 1, // mipLevels, arrayLayers
3186 vk::VK_SAMPLE_COUNT_1_BIT,
3188 vk::VK_IMAGE_TILING_OPTIMAL,
3189 vk::VK_IMAGE_USAGE_TRANSFER_DST_BIT|vk::VK_IMAGE_USAGE_TRANSFER_SRC_BIT,
3190 vk::VK_SHARING_MODE_EXCLUSIVE,
3192 (deUint32)queueFamilies.size(),
3194 vk::VK_IMAGE_LAYOUT_UNDEFINED
3197 m_srcImage = vk::createImage(vkd, device, &createInfo);
3200 m_memory = bindImageMemory(vki, vkd, physicalDevice, device, *m_srcImage, 0);
3203 const vk::Unique<vk::VkBuffer> srcBuffer (createBuffer(vkd, device, 4 * m_imageWidth * m_imageHeight, vk::VK_BUFFER_USAGE_TRANSFER_SRC_BIT, vk::VK_SHARING_MODE_EXCLUSIVE, queueFamilies));
3204 const vk::Unique<vk::VkDeviceMemory> memory (bindBufferMemory(vki, vkd, physicalDevice, device, *srcBuffer, vk::VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT));
3205 const vk::Unique<vk::VkCommandBuffer> commandBuffer (createBeginCommandBuffer(vkd, device, commandPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY));
3206 const vk::VkImageMemoryBarrier preImageBarrier =
3208 vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
3212 vk::VK_ACCESS_TRANSFER_WRITE_BIT,
3214 vk::VK_IMAGE_LAYOUT_UNDEFINED,
3215 vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
3217 VK_QUEUE_FAMILY_IGNORED,
3218 VK_QUEUE_FAMILY_IGNORED,
3222 vk::VK_IMAGE_ASPECT_COLOR_BIT,
3224 1, // Mip level count
3229 const vk::VkImageMemoryBarrier postImageBarrier =
3231 vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
3234 vk::VK_ACCESS_TRANSFER_WRITE_BIT,
3237 vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
3238 vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
3240 VK_QUEUE_FAMILY_IGNORED,
3241 VK_QUEUE_FAMILY_IGNORED,
3245 vk::VK_IMAGE_ASPECT_COLOR_BIT,
3247 1, // Mip level count
3252 const vk::VkBufferImageCopy region =
3257 vk::VK_IMAGE_ASPECT_COLOR_BIT,
3264 (deUint32)m_imageWidth,
3265 (deUint32)m_imageHeight,
3271 void* const ptr = mapMemory(vkd, device, *memory, 4 * m_imageWidth * m_imageHeight);
3272 de::Random rng (m_seed);
3275 deUint8* const data = (deUint8*)ptr;
3277 for (size_t ndx = 0; ndx < (size_t)(4 * m_imageWidth * m_imageHeight); ndx++)
3278 data[ndx] = rng.getUint8();
3281 vk::flushMappedMemoryRange(vkd, device, *memory, 0, 4 * m_imageWidth * m_imageHeight);
3282 vkd.unmapMemory(device, *memory);
3285 vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 0, (const vk::VkBufferMemoryBarrier*)DE_NULL, 1, &preImageBarrier);
3286 vkd.cmdCopyBufferToImage(*commandBuffer, *srcBuffer, *m_srcImage, vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, ®ion);
3287 vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 0, (const vk::VkBufferMemoryBarrier*)DE_NULL, 1, &postImageBarrier);
3289 endCommandBuffer(vkd, *commandBuffer);
3290 submitCommandsAndWait(vkd, device, queue, *commandBuffer);
3294 void ImageCopyFromImage::logSubmit (TestLog& log, size_t commandIndex) const
3296 log << TestLog::Message << commandIndex << ":" << getName() << " Copy image data from another image" << TestLog::EndMessage;
3299 void ImageCopyFromImage::submit (SubmitContext& context)
3301 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
3302 const vk::VkCommandBuffer commandBuffer = context.getCommandBuffer();
3303 const vk::VkImageCopy region =
3306 vk::VK_IMAGE_ASPECT_COLOR_BIT,
3314 vk::VK_IMAGE_ASPECT_COLOR_BIT,
3321 (deUint32)m_imageWidth,
3322 (deUint32)m_imageHeight,
3327 vkd.cmdCopyImage(commandBuffer, *m_srcImage, vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, context.getImage(), m_imageLayout, 1, ®ion);
3330 void ImageCopyFromImage::verify (VerifyContext& context, size_t)
3332 ReferenceMemory& reference (context.getReference());
3333 de::Random rng (m_seed);
3335 reference.setUndefined(0, (size_t)m_imageMemorySize);
3338 const PixelBufferAccess& refAccess (context.getReferenceImage().getAccess());
3340 for (deInt32 y = 0; y < m_imageHeight; y++)
3341 for (deInt32 x = 0; x < m_imageWidth; x++)
3343 const deUint8 r8 = rng.getUint8();
3344 const deUint8 g8 = rng.getUint8();
3345 const deUint8 b8 = rng.getUint8();
3346 const deUint8 a8 = rng.getUint8();
3348 refAccess.setPixel(UVec4(r8, g8, b8, a8), x, y);
3353 class ImageCopyToImage : public CmdCommand
3356 ImageCopyToImage (vk::VkImageLayout imageLayout) : m_imageLayout(imageLayout) {}
3357 ~ImageCopyToImage (void) {}
3358 const char* getName (void) const { return "ImageCopyToImage"; }
3360 void logPrepare (TestLog& log, size_t commandIndex) const;
3361 void prepare (PrepareContext& context);
3362 void logSubmit (TestLog& log, size_t commandIndex) const;
3363 void submit (SubmitContext& context);
3364 void verify (VerifyContext& context, size_t commandIndex);
3367 const vk::VkImageLayout m_imageLayout;
3368 deInt32 m_imageWidth;
3369 deInt32 m_imageHeight;
3370 vk::VkDeviceSize m_imageMemorySize;
3371 vk::Move<vk::VkImage> m_dstImage;
3372 vk::Move<vk::VkDeviceMemory> m_memory;
3375 void ImageCopyToImage::logPrepare (TestLog& log, size_t commandIndex) const
3377 log << TestLog::Message << commandIndex << ":" << getName() << " Allocate destination image for image to image copy." << TestLog::EndMessage;
3380 void ImageCopyToImage::prepare (PrepareContext& context)
3382 const vk::InstanceInterface& vki = context.getContext().getInstanceInterface();
3383 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
3384 const vk::VkPhysicalDevice physicalDevice = context.getContext().getPhysicalDevice();
3385 const vk::VkDevice device = context.getContext().getDevice();
3386 const vk::VkQueue queue = context.getContext().getQueue();
3387 const vk::VkCommandPool commandPool = context.getContext().getCommandPool();
3388 const vector<deUint32>& queueFamilies = context.getContext().getQueueFamilies();
3390 m_imageWidth = context.getImageWidth();
3391 m_imageHeight = context.getImageHeight();
3392 m_imageMemorySize = context.getImageMemorySize();
3395 const vk::VkImageCreateInfo createInfo =
3397 vk::VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
3401 vk::VK_IMAGE_TYPE_2D,
3402 vk::VK_FORMAT_R8G8B8A8_UNORM,
3404 (deUint32)m_imageWidth,
3405 (deUint32)m_imageHeight,
3408 1, 1, // mipLevels, arrayLayers
3409 vk::VK_SAMPLE_COUNT_1_BIT,
3411 vk::VK_IMAGE_TILING_OPTIMAL,
3412 vk::VK_IMAGE_USAGE_TRANSFER_DST_BIT|vk::VK_IMAGE_USAGE_TRANSFER_SRC_BIT,
3413 vk::VK_SHARING_MODE_EXCLUSIVE,
3415 (deUint32)queueFamilies.size(),
3417 vk::VK_IMAGE_LAYOUT_UNDEFINED
3420 m_dstImage = vk::createImage(vkd, device, &createInfo);
3423 m_memory = bindImageMemory(vki, vkd, physicalDevice, device, *m_dstImage, 0);
3426 const vk::Unique<vk::VkCommandBuffer> commandBuffer (createBeginCommandBuffer(vkd, device, commandPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY));
3427 const vk::VkImageMemoryBarrier barrier =
3429 vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
3433 vk::VK_ACCESS_TRANSFER_WRITE_BIT,
3435 vk::VK_IMAGE_LAYOUT_UNDEFINED,
3436 vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
3438 VK_QUEUE_FAMILY_IGNORED,
3439 VK_QUEUE_FAMILY_IGNORED,
3443 vk::VK_IMAGE_ASPECT_COLOR_BIT,
3445 1, // Mip level count
3451 vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 0, (const vk::VkBufferMemoryBarrier*)DE_NULL, 1, &barrier);
3453 endCommandBuffer(vkd, *commandBuffer);
3454 submitCommandsAndWait(vkd, device, queue, *commandBuffer);
3458 void ImageCopyToImage::logSubmit (TestLog& log, size_t commandIndex) const
3460 log << TestLog::Message << commandIndex << ":" << getName() << " Copy image to another image" << TestLog::EndMessage;
3463 void ImageCopyToImage::submit (SubmitContext& context)
3465 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
3466 const vk::VkCommandBuffer commandBuffer = context.getCommandBuffer();
3467 const vk::VkImageCopy region =
3470 vk::VK_IMAGE_ASPECT_COLOR_BIT,
3478 vk::VK_IMAGE_ASPECT_COLOR_BIT,
3485 (deUint32)m_imageWidth,
3486 (deUint32)m_imageHeight,
3491 vkd.cmdCopyImage(commandBuffer, context.getImage(), m_imageLayout, *m_dstImage, vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, ®ion);
3494 void ImageCopyToImage::verify (VerifyContext& context, size_t commandIndex)
3496 tcu::ResultCollector& resultCollector (context.getResultCollector());
3497 const vk::InstanceInterface& vki = context.getContext().getInstanceInterface();
3498 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
3499 const vk::VkPhysicalDevice physicalDevice = context.getContext().getPhysicalDevice();
3500 const vk::VkDevice device = context.getContext().getDevice();
3501 const vk::VkQueue queue = context.getContext().getQueue();
3502 const vk::VkCommandPool commandPool = context.getContext().getCommandPool();
3503 const vk::Unique<vk::VkCommandBuffer> commandBuffer (createBeginCommandBuffer(vkd, device, commandPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY));
3504 const vector<deUint32>& queueFamilies = context.getContext().getQueueFamilies();
3505 const vk::Unique<vk::VkBuffer> dstBuffer (createBuffer(vkd, device, 4 * m_imageWidth * m_imageHeight, vk::VK_BUFFER_USAGE_TRANSFER_DST_BIT, vk::VK_SHARING_MODE_EXCLUSIVE, queueFamilies));
3506 const vk::Unique<vk::VkDeviceMemory> memory (bindBufferMemory(vki, vkd, physicalDevice, device, *dstBuffer, vk::VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT));
3508 const vk::VkImageMemoryBarrier imageBarrier =
3510 vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
3513 vk::VK_ACCESS_TRANSFER_WRITE_BIT,
3514 vk::VK_ACCESS_TRANSFER_READ_BIT,
3516 vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
3517 vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
3519 VK_QUEUE_FAMILY_IGNORED,
3520 VK_QUEUE_FAMILY_IGNORED,
3524 vk::VK_IMAGE_ASPECT_COLOR_BIT,
3526 1, // Mip level count
3531 const vk::VkBufferMemoryBarrier bufferBarrier =
3533 vk::VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER,
3536 vk::VK_ACCESS_TRANSFER_WRITE_BIT,
3537 vk::VK_ACCESS_HOST_READ_BIT,
3539 VK_QUEUE_FAMILY_IGNORED,
3540 VK_QUEUE_FAMILY_IGNORED,
3545 const vk::VkBufferImageCopy region =
3550 vk::VK_IMAGE_ASPECT_COLOR_BIT,
3557 (deUint32)m_imageWidth,
3558 (deUint32)m_imageHeight,
3563 vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 0, (const vk::VkBufferMemoryBarrier*)DE_NULL, 1, &imageBarrier);
3564 vkd.cmdCopyImageToBuffer(*commandBuffer, *m_dstImage, vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, *dstBuffer, 1, ®ion);
3565 vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_PIPELINE_STAGE_HOST_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 1, &bufferBarrier, 0, (const vk::VkImageMemoryBarrier*)DE_NULL);
3568 endCommandBuffer(vkd, *commandBuffer);
3569 submitCommandsAndWait(vkd, device, queue, *commandBuffer);
3572 void* const ptr = mapMemory(vkd, device, *memory, 4 * m_imageWidth * m_imageHeight);
3574 vk::invalidateMappedMemoryRange(vkd, device, *memory, 0, 4 * m_imageWidth * m_imageHeight);
3577 const deUint8* const data = (const deUint8*)ptr;
3578 const ConstPixelBufferAccess resAccess (TextureFormat(TextureFormat::RGBA, TextureFormat::UNORM_INT8), m_imageWidth, m_imageHeight, 1, data);
3579 const ConstPixelBufferAccess& refAccess (context.getReferenceImage().getAccess());
3581 if (!tcu::intThresholdCompare(context.getLog(), (de::toString(commandIndex) + ":" + getName()).c_str(), (de::toString(commandIndex) + ":" + getName()).c_str(), refAccess, resAccess, UVec4(0), tcu::COMPARE_LOG_ON_ERROR))
3582 resultCollector.fail(de::toString(commandIndex) + ":" + getName() + " Image comparison failed");
3585 vkd.unmapMemory(device, *memory);
3595 class ImageBlitFromImage : public CmdCommand
3598 ImageBlitFromImage (deUint32 seed, BlitScale scale, vk::VkImageLayout imageLayout) : m_seed(seed), m_scale(scale), m_imageLayout(imageLayout) {}
3599 ~ImageBlitFromImage (void) {}
3600 const char* getName (void) const { return "ImageBlitFromImage"; }
3602 void logPrepare (TestLog& log, size_t commandIndex) const;
3603 void prepare (PrepareContext& context);
3604 void logSubmit (TestLog& log, size_t commandIndex) const;
3605 void submit (SubmitContext& context);
3606 void verify (VerifyContext& context, size_t commandIndex);
3609 const deUint32 m_seed;
3610 const BlitScale m_scale;
3611 const vk::VkImageLayout m_imageLayout;
3612 deInt32 m_imageWidth;
3613 deInt32 m_imageHeight;
3614 vk::VkDeviceSize m_imageMemorySize;
3615 deInt32 m_srcImageWidth;
3616 deInt32 m_srcImageHeight;
3617 vk::Move<vk::VkImage> m_srcImage;
3618 vk::Move<vk::VkDeviceMemory> m_memory;
3621 void ImageBlitFromImage::logPrepare (TestLog& log, size_t commandIndex) const
3623 log << TestLog::Message << commandIndex << ":" << getName() << " Allocate source image for image to image blit." << TestLog::EndMessage;
3626 void ImageBlitFromImage::prepare (PrepareContext& context)
3628 const vk::InstanceInterface& vki = context.getContext().getInstanceInterface();
3629 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
3630 const vk::VkPhysicalDevice physicalDevice = context.getContext().getPhysicalDevice();
3631 const vk::VkDevice device = context.getContext().getDevice();
3632 const vk::VkQueue queue = context.getContext().getQueue();
3633 const vk::VkCommandPool commandPool = context.getContext().getCommandPool();
3634 const vector<deUint32>& queueFamilies = context.getContext().getQueueFamilies();
3636 m_imageWidth = context.getImageWidth();
3637 m_imageHeight = context.getImageHeight();
3638 m_imageMemorySize = context.getImageMemorySize();
3640 if (m_scale == BLIT_SCALE_10)
3642 m_srcImageWidth = m_imageWidth;
3643 m_srcImageHeight = m_imageHeight;
3645 else if (m_scale == BLIT_SCALE_20)
3647 m_srcImageWidth = m_imageWidth / 2;
3648 m_srcImageHeight = m_imageHeight / 2;
3651 DE_FATAL("Unsupported scale");
3654 const vk::VkImageCreateInfo createInfo =
3656 vk::VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
3660 vk::VK_IMAGE_TYPE_2D,
3661 vk::VK_FORMAT_R8G8B8A8_UNORM,
3663 (deUint32)m_srcImageWidth,
3664 (deUint32)m_srcImageHeight,
3667 1, 1, // mipLevels, arrayLayers
3668 vk::VK_SAMPLE_COUNT_1_BIT,
3670 vk::VK_IMAGE_TILING_OPTIMAL,
3671 vk::VK_IMAGE_USAGE_TRANSFER_DST_BIT|vk::VK_IMAGE_USAGE_TRANSFER_SRC_BIT,
3672 vk::VK_SHARING_MODE_EXCLUSIVE,
3674 (deUint32)queueFamilies.size(),
3676 vk::VK_IMAGE_LAYOUT_UNDEFINED
3679 m_srcImage = vk::createImage(vkd, device, &createInfo);
3682 m_memory = bindImageMemory(vki, vkd, physicalDevice, device, *m_srcImage, 0);
3685 const vk::Unique<vk::VkBuffer> srcBuffer (createBuffer(vkd, device, 4 * m_srcImageWidth * m_srcImageHeight, vk::VK_BUFFER_USAGE_TRANSFER_SRC_BIT, vk::VK_SHARING_MODE_EXCLUSIVE, queueFamilies));
3686 const vk::Unique<vk::VkDeviceMemory> memory (bindBufferMemory(vki, vkd, physicalDevice, device, *srcBuffer, vk::VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT));
3687 const vk::Unique<vk::VkCommandBuffer> commandBuffer (createBeginCommandBuffer(vkd, device, commandPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY));
3688 const vk::VkImageMemoryBarrier preImageBarrier =
3690 vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
3694 vk::VK_ACCESS_TRANSFER_WRITE_BIT,
3696 vk::VK_IMAGE_LAYOUT_UNDEFINED,
3697 vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
3699 VK_QUEUE_FAMILY_IGNORED,
3700 VK_QUEUE_FAMILY_IGNORED,
3704 vk::VK_IMAGE_ASPECT_COLOR_BIT,
3706 1, // Mip level count
3711 const vk::VkImageMemoryBarrier postImageBarrier =
3713 vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
3716 vk::VK_ACCESS_TRANSFER_WRITE_BIT,
3719 vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
3720 vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
3722 VK_QUEUE_FAMILY_IGNORED,
3723 VK_QUEUE_FAMILY_IGNORED,
3727 vk::VK_IMAGE_ASPECT_COLOR_BIT,
3729 1, // Mip level count
3734 const vk::VkBufferImageCopy region =
3739 vk::VK_IMAGE_ASPECT_COLOR_BIT,
3746 (deUint32)m_srcImageWidth,
3747 (deUint32)m_srcImageHeight,
3753 void* const ptr = mapMemory(vkd, device, *memory, 4 * m_srcImageWidth * m_srcImageHeight);
3754 de::Random rng (m_seed);
3757 deUint8* const data = (deUint8*)ptr;
3759 for (size_t ndx = 0; ndx < (size_t)(4 * m_srcImageWidth * m_srcImageHeight); ndx++)
3760 data[ndx] = rng.getUint8();
3763 vk::flushMappedMemoryRange(vkd, device, *memory, 0, 4 * m_srcImageWidth * m_srcImageHeight);
3764 vkd.unmapMemory(device, *memory);
3767 vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 0, (const vk::VkBufferMemoryBarrier*)DE_NULL, 1, &preImageBarrier);
3768 vkd.cmdCopyBufferToImage(*commandBuffer, *srcBuffer, *m_srcImage, vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, ®ion);
3769 vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 0, (const vk::VkBufferMemoryBarrier*)DE_NULL, 1, &postImageBarrier);
3771 endCommandBuffer(vkd, *commandBuffer);
3772 submitCommandsAndWait(vkd, device, queue, *commandBuffer);
3776 void ImageBlitFromImage::logSubmit (TestLog& log, size_t commandIndex) const
3778 log << TestLog::Message << commandIndex << ":" << getName() << " Blit from another image" << (m_scale == BLIT_SCALE_20 ? " scale 2x" : "") << TestLog::EndMessage;
3781 void ImageBlitFromImage::submit (SubmitContext& context)
3783 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
3784 const vk::VkCommandBuffer commandBuffer = context.getCommandBuffer();
3785 const vk::VkImageBlit region =
3789 vk::VK_IMAGE_ASPECT_COLOR_BIT,
3805 vk::VK_IMAGE_ASPECT_COLOR_BIT,
3819 vkd.cmdBlitImage(commandBuffer, *m_srcImage, vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, context.getImage(), m_imageLayout, 1, ®ion, vk::VK_FILTER_NEAREST);
3822 void ImageBlitFromImage::verify (VerifyContext& context, size_t)
3824 ReferenceMemory& reference (context.getReference());
3825 de::Random rng (m_seed);
3827 reference.setUndefined(0, (size_t)m_imageMemorySize);
3830 const PixelBufferAccess& refAccess (context.getReferenceImage().getAccess());
3832 if (m_scale == BLIT_SCALE_10)
3834 for (deInt32 y = 0; y < m_imageHeight; y++)
3835 for (deInt32 x = 0; x < m_imageWidth; x++)
3837 const deUint8 r8 = rng.getUint8();
3838 const deUint8 g8 = rng.getUint8();
3839 const deUint8 b8 = rng.getUint8();
3840 const deUint8 a8 = rng.getUint8();
3842 refAccess.setPixel(UVec4(r8, g8, b8, a8), x, y);
3845 else if (m_scale == BLIT_SCALE_20)
3847 tcu::TextureLevel source (TextureFormat(TextureFormat::RGBA, TextureFormat::UNORM_INT8), m_srcImageWidth, m_srcImageHeight);
3848 const float xscale = ((float)m_srcImageWidth) / (float)m_imageWidth;
3849 const float yscale = ((float)m_srcImageHeight) / (float)m_imageHeight;
3851 for (deInt32 y = 0; y < m_srcImageHeight; y++)
3852 for (deInt32 x = 0; x < m_srcImageWidth; x++)
3854 const deUint8 r8 = rng.getUint8();
3855 const deUint8 g8 = rng.getUint8();
3856 const deUint8 b8 = rng.getUint8();
3857 const deUint8 a8 = rng.getUint8();
3859 source.getAccess().setPixel(UVec4(r8, g8, b8, a8), x, y);
3862 for (deInt32 y = 0; y < m_imageHeight; y++)
3863 for (deInt32 x = 0; x < m_imageWidth; x++)
3864 refAccess.setPixel(source.getAccess().getPixelUint(int((float(x) + 0.5f) * xscale), int((float(y) + 0.5f) * yscale)), x, y);
3867 DE_FATAL("Unsupported scale");
3871 class ImageBlitToImage : public CmdCommand
3874 ImageBlitToImage (BlitScale scale, vk::VkImageLayout imageLayout) : m_scale(scale), m_imageLayout(imageLayout) {}
3875 ~ImageBlitToImage (void) {}
3876 const char* getName (void) const { return "ImageBlitToImage"; }
3878 void logPrepare (TestLog& log, size_t commandIndex) const;
3879 void prepare (PrepareContext& context);
3880 void logSubmit (TestLog& log, size_t commandIndex) const;
3881 void submit (SubmitContext& context);
3882 void verify (VerifyContext& context, size_t commandIndex);
3885 const BlitScale m_scale;
3886 const vk::VkImageLayout m_imageLayout;
3887 deInt32 m_imageWidth;
3888 deInt32 m_imageHeight;
3889 vk::VkDeviceSize m_imageMemorySize;
3890 deInt32 m_dstImageWidth;
3891 deInt32 m_dstImageHeight;
3892 vk::Move<vk::VkImage> m_dstImage;
3893 vk::Move<vk::VkDeviceMemory> m_memory;
3896 void ImageBlitToImage::logPrepare (TestLog& log, size_t commandIndex) const
3898 log << TestLog::Message << commandIndex << ":" << getName() << " Allocate destination image for image to image blit." << TestLog::EndMessage;
3901 void ImageBlitToImage::prepare (PrepareContext& context)
3903 const vk::InstanceInterface& vki = context.getContext().getInstanceInterface();
3904 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
3905 const vk::VkPhysicalDevice physicalDevice = context.getContext().getPhysicalDevice();
3906 const vk::VkDevice device = context.getContext().getDevice();
3907 const vk::VkQueue queue = context.getContext().getQueue();
3908 const vk::VkCommandPool commandPool = context.getContext().getCommandPool();
3909 const vector<deUint32>& queueFamilies = context.getContext().getQueueFamilies();
3911 m_imageWidth = context.getImageWidth();
3912 m_imageHeight = context.getImageHeight();
3913 m_imageMemorySize = context.getImageMemorySize();
3915 if (m_scale == BLIT_SCALE_10)
3917 m_dstImageWidth = context.getImageWidth();
3918 m_dstImageHeight = context.getImageHeight();
3920 else if (m_scale == BLIT_SCALE_20)
3922 m_dstImageWidth = context.getImageWidth() * 2;
3923 m_dstImageHeight = context.getImageHeight() * 2;
3926 DE_FATAL("Unsupportd blit scale");
3929 const vk::VkImageCreateInfo createInfo =
3931 vk::VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
3935 vk::VK_IMAGE_TYPE_2D,
3936 vk::VK_FORMAT_R8G8B8A8_UNORM,
3938 (deUint32)m_dstImageWidth,
3939 (deUint32)m_dstImageHeight,
3942 1, 1, // mipLevels, arrayLayers
3943 vk::VK_SAMPLE_COUNT_1_BIT,
3945 vk::VK_IMAGE_TILING_OPTIMAL,
3946 vk::VK_IMAGE_USAGE_TRANSFER_DST_BIT|vk::VK_IMAGE_USAGE_TRANSFER_SRC_BIT,
3947 vk::VK_SHARING_MODE_EXCLUSIVE,
3949 (deUint32)queueFamilies.size(),
3951 vk::VK_IMAGE_LAYOUT_UNDEFINED
3954 m_dstImage = vk::createImage(vkd, device, &createInfo);
3957 m_memory = bindImageMemory(vki, vkd, physicalDevice, device, *m_dstImage, 0);
3960 const vk::Unique<vk::VkCommandBuffer> commandBuffer (createBeginCommandBuffer(vkd, device, commandPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY));
3961 const vk::VkImageMemoryBarrier barrier =
3963 vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
3967 vk::VK_ACCESS_TRANSFER_WRITE_BIT,
3969 vk::VK_IMAGE_LAYOUT_UNDEFINED,
3970 vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
3972 VK_QUEUE_FAMILY_IGNORED,
3973 VK_QUEUE_FAMILY_IGNORED,
3977 vk::VK_IMAGE_ASPECT_COLOR_BIT,
3979 1, // Mip level count
3985 vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 0, (const vk::VkBufferMemoryBarrier*)DE_NULL, 1, &barrier);
3987 endCommandBuffer(vkd, *commandBuffer);
3988 submitCommandsAndWait(vkd, device, queue, *commandBuffer);
3992 void ImageBlitToImage::logSubmit (TestLog& log, size_t commandIndex) const
3994 log << TestLog::Message << commandIndex << ":" << getName() << " Blit image to another image" << (m_scale == BLIT_SCALE_20 ? " scale 2x" : "") << TestLog::EndMessage;
3997 void ImageBlitToImage::submit (SubmitContext& context)
3999 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
4000 const vk::VkCommandBuffer commandBuffer = context.getCommandBuffer();
4001 const vk::VkImageBlit region =
4005 vk::VK_IMAGE_ASPECT_COLOR_BIT,
4021 vk::VK_IMAGE_ASPECT_COLOR_BIT,
4035 vkd.cmdBlitImage(commandBuffer, context.getImage(), m_imageLayout, *m_dstImage, vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, ®ion, vk::VK_FILTER_NEAREST);
4038 void ImageBlitToImage::verify (VerifyContext& context, size_t commandIndex)
4040 tcu::ResultCollector& resultCollector (context.getResultCollector());
4041 const vk::InstanceInterface& vki = context.getContext().getInstanceInterface();
4042 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
4043 const vk::VkPhysicalDevice physicalDevice = context.getContext().getPhysicalDevice();
4044 const vk::VkDevice device = context.getContext().getDevice();
4045 const vk::VkQueue queue = context.getContext().getQueue();
4046 const vk::VkCommandPool commandPool = context.getContext().getCommandPool();
4047 const vk::Unique<vk::VkCommandBuffer> commandBuffer (createBeginCommandBuffer(vkd, device, commandPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY));
4048 const vector<deUint32>& queueFamilies = context.getContext().getQueueFamilies();
4049 const vk::Unique<vk::VkBuffer> dstBuffer (createBuffer(vkd, device, 4 * m_dstImageWidth * m_dstImageHeight, vk::VK_BUFFER_USAGE_TRANSFER_DST_BIT, vk::VK_SHARING_MODE_EXCLUSIVE, queueFamilies));
4050 const vk::Unique<vk::VkDeviceMemory> memory (bindBufferMemory(vki, vkd, physicalDevice, device, *dstBuffer, vk::VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT));
4052 const vk::VkImageMemoryBarrier imageBarrier =
4054 vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
4057 vk::VK_ACCESS_TRANSFER_WRITE_BIT,
4058 vk::VK_ACCESS_TRANSFER_READ_BIT,
4060 vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
4061 vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
4063 VK_QUEUE_FAMILY_IGNORED,
4064 VK_QUEUE_FAMILY_IGNORED,
4068 vk::VK_IMAGE_ASPECT_COLOR_BIT,
4070 1, // Mip level count
4075 const vk::VkBufferMemoryBarrier bufferBarrier =
4077 vk::VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER,
4080 vk::VK_ACCESS_TRANSFER_WRITE_BIT,
4081 vk::VK_ACCESS_HOST_READ_BIT,
4083 VK_QUEUE_FAMILY_IGNORED,
4084 VK_QUEUE_FAMILY_IGNORED,
4089 const vk::VkBufferImageCopy region =
4094 vk::VK_IMAGE_ASPECT_COLOR_BIT,
4101 (deUint32)m_dstImageWidth,
4102 (deUint32)m_dstImageHeight,
4107 vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 0, (const vk::VkBufferMemoryBarrier*)DE_NULL, 1, &imageBarrier);
4108 vkd.cmdCopyImageToBuffer(*commandBuffer, *m_dstImage, vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, *dstBuffer, 1, ®ion);
4109 vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_PIPELINE_STAGE_HOST_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 1, &bufferBarrier, 0, (const vk::VkImageMemoryBarrier*)DE_NULL);
4112 endCommandBuffer(vkd, *commandBuffer);
4113 submitCommandsAndWait(vkd, device, queue, *commandBuffer);
4116 void* const ptr = mapMemory(vkd, device, *memory, 4 * m_dstImageWidth * m_dstImageHeight);
4118 vk::invalidateMappedMemoryRange(vkd, device, *memory, 0, 4 * m_dstImageWidth * m_dstImageHeight);
4120 if (m_scale == BLIT_SCALE_10)
4122 const deUint8* const data = (const deUint8*)ptr;
4123 const ConstPixelBufferAccess resAccess (TextureFormat(TextureFormat::RGBA, TextureFormat::UNORM_INT8), m_dstImageWidth, m_dstImageHeight, 1, data);
4124 const ConstPixelBufferAccess& refAccess (context.getReferenceImage().getAccess());
4126 if (!tcu::intThresholdCompare(context.getLog(), (de::toString(commandIndex) + ":" + getName()).c_str(), (de::toString(commandIndex) + ":" + getName()).c_str(), refAccess, resAccess, UVec4(0), tcu::COMPARE_LOG_ON_ERROR))
4127 resultCollector.fail(de::toString(commandIndex) + ":" + getName() + " Image comparison failed");
4129 else if (m_scale == BLIT_SCALE_20)
4131 const deUint8* const data = (const deUint8*)ptr;
4132 const ConstPixelBufferAccess resAccess (TextureFormat(TextureFormat::RGBA, TextureFormat::UNORM_INT8), m_dstImageWidth, m_dstImageHeight, 1, data);
4133 tcu::TextureLevel reference (TextureFormat(TextureFormat::RGBA, TextureFormat::UNORM_INT8), m_dstImageWidth, m_dstImageHeight, 1);
4136 const ConstPixelBufferAccess& refAccess (context.getReferenceImage().getAccess());
4138 for (deInt32 y = 0; y < m_dstImageHeight; y++)
4139 for (deInt32 x = 0; x < m_dstImageWidth; x++)
4141 reference.getAccess().setPixel(refAccess.getPixel(x/2, y/2), x, y);
4145 if (!tcu::intThresholdCompare(context.getLog(), (de::toString(commandIndex) + ":" + getName()).c_str(), (de::toString(commandIndex) + ":" + getName()).c_str(), reference.getAccess(), resAccess, UVec4(0), tcu::COMPARE_LOG_ON_ERROR))
4146 resultCollector.fail(de::toString(commandIndex) + ":" + getName() + " Image comparison failed");
4149 DE_FATAL("Unknown scale");
4151 vkd.unmapMemory(device, *memory);
4155 class PrepareRenderPassContext
4158 PrepareRenderPassContext (PrepareContext& context,
4159 vk::VkRenderPass renderPass,
4160 vk::VkFramebuffer framebuffer,
4161 deInt32 targetWidth,
4162 deInt32 targetHeight)
4163 : m_context (context)
4164 , m_renderPass (renderPass)
4165 , m_framebuffer (framebuffer)
4166 , m_targetWidth (targetWidth)
4167 , m_targetHeight (targetHeight)
4171 const Memory& getMemory (void) const { return m_context.getMemory(); }
4172 const Context& getContext (void) const { return m_context.getContext(); }
4173 const vk::BinaryCollection& getBinaryCollection (void) const { return m_context.getBinaryCollection(); }
4175 vk::VkBuffer getBuffer (void) const { return m_context.getBuffer(); }
4176 vk::VkDeviceSize getBufferSize (void) const { return m_context.getBufferSize(); }
4178 vk::VkImage getImage (void) const { return m_context.getImage(); }
4179 deInt32 getImageWidth (void) const { return m_context.getImageWidth(); }
4180 deInt32 getImageHeight (void) const { return m_context.getImageHeight(); }
4181 vk::VkImageLayout getImageLayout (void) const { return m_context.getImageLayout(); }
4183 deInt32 getTargetWidth (void) const { return m_targetWidth; }
4184 deInt32 getTargetHeight (void) const { return m_targetHeight; }
4186 vk::VkRenderPass getRenderPass (void) const { return m_renderPass; }
4189 PrepareContext& m_context;
4190 const vk::VkRenderPass m_renderPass;
4191 const vk::VkFramebuffer m_framebuffer;
4192 const deInt32 m_targetWidth;
4193 const deInt32 m_targetHeight;
4196 class VerifyRenderPassContext
4199 VerifyRenderPassContext (VerifyContext& context,
4200 deInt32 targetWidth,
4201 deInt32 targetHeight)
4202 : m_context (context)
4203 , m_referenceTarget (TextureFormat(TextureFormat::RGBA, TextureFormat::UNORM_INT8), targetWidth, targetHeight)
4207 const Context& getContext (void) const { return m_context.getContext(); }
4208 TestLog& getLog (void) const { return m_context.getLog(); }
4209 tcu::ResultCollector& getResultCollector (void) const { return m_context.getResultCollector(); }
4211 TextureLevel& getReferenceTarget (void) { return m_referenceTarget; }
4213 ReferenceMemory& getReference (void) { return m_context.getReference(); }
4214 TextureLevel& getReferenceImage (void) { return m_context.getReferenceImage();}
4217 VerifyContext& m_context;
4218 TextureLevel m_referenceTarget;
4221 class RenderPassCommand
4224 virtual ~RenderPassCommand (void) {}
4225 virtual const char* getName (void) const = 0;
4227 // Log things that are done during prepare
4228 virtual void logPrepare (TestLog&, size_t) const {}
4229 // Log submitted calls etc.
4230 virtual void logSubmit (TestLog&, size_t) const {}
4232 // Allocate vulkan resources and prepare for submit.
4233 virtual void prepare (PrepareRenderPassContext&) {}
4235 // Submit commands to command buffer.
4236 virtual void submit (SubmitContext&) {}
4239 virtual void verify (VerifyRenderPassContext&, size_t) {}
4242 class SubmitRenderPass : public CmdCommand
4245 SubmitRenderPass (const vector<RenderPassCommand*>& commands);
4246 ~SubmitRenderPass (void);
4247 const char* getName (void) const { return "SubmitRenderPass"; }
4249 void logPrepare (TestLog&, size_t) const;
4250 void logSubmit (TestLog&, size_t) const;
4252 void prepare (PrepareContext&);
4253 void submit (SubmitContext&);
4255 void verify (VerifyContext&, size_t);
4258 const deInt32 m_targetWidth;
4259 const deInt32 m_targetHeight;
4260 vk::Move<vk::VkRenderPass> m_renderPass;
4261 vk::Move<vk::VkDeviceMemory> m_colorTargetMemory;
4262 de::MovePtr<vk::Allocation> m_colorTargetMemory2;
4263 vk::Move<vk::VkImage> m_colorTarget;
4264 vk::Move<vk::VkImageView> m_colorTargetView;
4265 vk::Move<vk::VkFramebuffer> m_framebuffer;
4266 vector<RenderPassCommand*> m_commands;
4269 SubmitRenderPass::SubmitRenderPass (const vector<RenderPassCommand*>& commands)
4270 : m_targetWidth (256)
4271 , m_targetHeight (256)
4272 , m_commands (commands)
4276 SubmitRenderPass::~SubmitRenderPass()
4278 for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
4279 delete m_commands[cmdNdx];
4282 void SubmitRenderPass::logPrepare (TestLog& log, size_t commandIndex) const
4284 const string sectionName (de::toString(commandIndex) + ":" + getName());
4285 const tcu::ScopedLogSection section (log, sectionName, sectionName);
4287 for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
4289 RenderPassCommand& command = *m_commands[cmdNdx];
4290 command.logPrepare(log, cmdNdx);
4294 void SubmitRenderPass::logSubmit (TestLog& log, size_t commandIndex) const
4296 const string sectionName (de::toString(commandIndex) + ":" + getName());
4297 const tcu::ScopedLogSection section (log, sectionName, sectionName);
4299 for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
4301 RenderPassCommand& command = *m_commands[cmdNdx];
4302 command.logSubmit(log, cmdNdx);
4306 void SubmitRenderPass::prepare (PrepareContext& context)
4308 const vk::InstanceInterface& vki = context.getContext().getInstanceInterface();
4309 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
4310 const vk::VkPhysicalDevice physicalDevice = context.getContext().getPhysicalDevice();
4311 const vk::VkDevice device = context.getContext().getDevice();
4312 const vector<deUint32>& queueFamilies = context.getContext().getQueueFamilies();
4315 const vk::VkImageCreateInfo createInfo =
4317 vk::VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
4321 vk::VK_IMAGE_TYPE_2D,
4322 vk::VK_FORMAT_R8G8B8A8_UNORM,
4323 { (deUint32)m_targetWidth, (deUint32)m_targetHeight, 1u },
4326 vk::VK_SAMPLE_COUNT_1_BIT,
4327 vk::VK_IMAGE_TILING_OPTIMAL,
4328 vk::VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | vk::VK_IMAGE_USAGE_TRANSFER_SRC_BIT,
4329 vk::VK_SHARING_MODE_EXCLUSIVE,
4330 (deUint32)queueFamilies.size(),
4332 vk::VK_IMAGE_LAYOUT_UNDEFINED
4335 m_colorTarget = vk::createImage(vkd, device, &createInfo);
4338 m_colorTargetMemory = bindImageMemory(vki, vkd, physicalDevice, device, *m_colorTarget, 0);
4341 const vk::VkImageViewCreateInfo createInfo =
4343 vk::VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
4348 vk::VK_IMAGE_VIEW_TYPE_2D,
4349 vk::VK_FORMAT_R8G8B8A8_UNORM,
4351 vk::VK_COMPONENT_SWIZZLE_R,
4352 vk::VK_COMPONENT_SWIZZLE_G,
4353 vk::VK_COMPONENT_SWIZZLE_B,
4354 vk::VK_COMPONENT_SWIZZLE_A
4357 vk::VK_IMAGE_ASPECT_COLOR_BIT,
4365 m_colorTargetView = vk::createImageView(vkd, device, &createInfo);
4368 m_renderPass = vk::makeRenderPass(vkd, device, vk::VK_FORMAT_R8G8B8A8_UNORM, vk::VK_FORMAT_UNDEFINED, vk::VK_ATTACHMENT_LOAD_OP_CLEAR, vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL);
4371 const vk::VkImageView imageViews[] =
4375 const vk::VkFramebufferCreateInfo createInfo =
4377 vk::VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO,
4382 DE_LENGTH_OF_ARRAY(imageViews),
4384 (deUint32)m_targetWidth,
4385 (deUint32)m_targetHeight,
4389 m_framebuffer = vk::createFramebuffer(vkd, device, &createInfo);
4393 PrepareRenderPassContext renderpassContext (context, *m_renderPass, *m_framebuffer, m_targetWidth, m_targetHeight);
4395 for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
4397 RenderPassCommand& command = *m_commands[cmdNdx];
4398 command.prepare(renderpassContext);
4403 void SubmitRenderPass::submit (SubmitContext& context)
4405 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
4406 const vk::VkCommandBuffer commandBuffer = context.getCommandBuffer();
4408 beginRenderPass(vkd, commandBuffer, *m_renderPass, *m_framebuffer, vk::makeRect2D(0, 0, m_targetWidth, m_targetHeight), tcu::Vec4(0.0f, 0.0f, 0.0f, 1.0f));
4410 for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
4412 RenderPassCommand& command = *m_commands[cmdNdx];
4414 command.submit(context);
4417 endRenderPass(vkd, commandBuffer);
4420 void SubmitRenderPass::verify (VerifyContext& context, size_t commandIndex)
4422 TestLog& log (context.getLog());
4423 tcu::ResultCollector& resultCollector (context.getResultCollector());
4424 const string sectionName (de::toString(commandIndex) + ":" + getName());
4425 const tcu::ScopedLogSection section (log, sectionName, sectionName);
4426 VerifyRenderPassContext verifyContext (context, m_targetWidth, m_targetHeight);
4428 tcu::clear(verifyContext.getReferenceTarget().getAccess(), Vec4(0.0f, 0.0f, 0.0f, 1.0f));
4430 for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
4432 RenderPassCommand& command = *m_commands[cmdNdx];
4433 command.verify(verifyContext, cmdNdx);
4437 const vk::InstanceInterface& vki = context.getContext().getInstanceInterface();
4438 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
4439 const vk::VkPhysicalDevice physicalDevice = context.getContext().getPhysicalDevice();
4440 const vk::VkDevice device = context.getContext().getDevice();
4441 const vk::VkQueue queue = context.getContext().getQueue();
4442 const vk::VkCommandPool commandPool = context.getContext().getCommandPool();
4443 const vk::Unique<vk::VkCommandBuffer> commandBuffer (createBeginCommandBuffer(vkd, device, commandPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY));
4444 const vector<deUint32>& queueFamilies = context.getContext().getQueueFamilies();
4445 const vk::Unique<vk::VkBuffer> dstBuffer (createBuffer(vkd, device, 4 * m_targetWidth * m_targetHeight, vk::VK_BUFFER_USAGE_TRANSFER_DST_BIT, vk::VK_SHARING_MODE_EXCLUSIVE, queueFamilies));
4446 const vk::Unique<vk::VkDeviceMemory> memory (bindBufferMemory(vki, vkd, physicalDevice, device, *dstBuffer, vk::VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT));
4448 const vk::VkImageMemoryBarrier imageBarrier =
4450 vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
4453 vk::VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
4454 vk::VK_ACCESS_TRANSFER_READ_BIT,
4456 vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
4457 vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
4459 VK_QUEUE_FAMILY_IGNORED,
4460 VK_QUEUE_FAMILY_IGNORED,
4464 vk::VK_IMAGE_ASPECT_COLOR_BIT,
4466 1, // Mip level count
4471 const vk::VkBufferMemoryBarrier bufferBarrier =
4473 vk::VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER,
4476 vk::VK_ACCESS_TRANSFER_WRITE_BIT,
4477 vk::VK_ACCESS_HOST_READ_BIT,
4479 VK_QUEUE_FAMILY_IGNORED,
4480 VK_QUEUE_FAMILY_IGNORED,
4485 const vk::VkBufferImageCopy region =
4490 vk::VK_IMAGE_ASPECT_COLOR_BIT,
4497 (deUint32)m_targetWidth,
4498 (deUint32)m_targetHeight,
4503 vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 0, (const vk::VkBufferMemoryBarrier*)DE_NULL, 1, &imageBarrier);
4504 vkd.cmdCopyImageToBuffer(*commandBuffer, *m_colorTarget, vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, *dstBuffer, 1, ®ion);
4505 vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_PIPELINE_STAGE_HOST_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 1, &bufferBarrier, 0, (const vk::VkImageMemoryBarrier*)DE_NULL);
4508 endCommandBuffer(vkd, *commandBuffer);
4509 submitCommandsAndWait(vkd, device, queue, *commandBuffer);
4512 void* const ptr = mapMemory(vkd, device, *memory, 4 * m_targetWidth * m_targetHeight);
4514 vk::invalidateMappedMemoryRange(vkd, device, *memory, 0, 4 * m_targetWidth * m_targetHeight);
4517 const deUint8* const data = (const deUint8*)ptr;
4518 const ConstPixelBufferAccess resAccess (TextureFormat(TextureFormat::RGBA, TextureFormat::UNORM_INT8), m_targetWidth, m_targetHeight, 1, data);
4519 const ConstPixelBufferAccess& refAccess (verifyContext.getReferenceTarget().getAccess());
4521 if (!tcu::intThresholdCompare(context.getLog(), (de::toString(commandIndex) + ":" + getName()).c_str(), (de::toString(commandIndex) + ":" + getName()).c_str(), refAccess, resAccess, UVec4(0), tcu::COMPARE_LOG_ON_ERROR))
4522 resultCollector.fail(de::toString(commandIndex) + ":" + getName() + " Image comparison failed");
4525 vkd.unmapMemory(device, *memory);
4530 class ExecuteSecondaryCommandBuffer : public CmdCommand
4533 ExecuteSecondaryCommandBuffer (const vector<CmdCommand*>& commands);
4534 ~ExecuteSecondaryCommandBuffer (void);
4535 const char* getName (void) const { return "ExecuteSecondaryCommandBuffer"; }
4537 void logPrepare (TestLog&, size_t) const;
4538 void logSubmit (TestLog&, size_t) const;
4540 void prepare (PrepareContext&);
4541 void submit (SubmitContext&);
4543 void verify (VerifyContext&, size_t);
4546 vk::Move<vk::VkCommandBuffer> m_commandBuffer;
4547 vk::Move<vk::VkDeviceMemory> m_colorTargetMemory;
4548 de::MovePtr<vk::Allocation> m_colorTargetMemory2;
4549 vk::Move<vk::VkImage> m_colorTarget;
4550 vk::Move<vk::VkImageView> m_colorTargetView;
4551 vk::Move<vk::VkFramebuffer> m_framebuffer;
4552 vector<CmdCommand*> m_commands;
4555 ExecuteSecondaryCommandBuffer::ExecuteSecondaryCommandBuffer(const vector<CmdCommand*>& commands)
4556 : m_commands (commands)
4560 ExecuteSecondaryCommandBuffer::~ExecuteSecondaryCommandBuffer (void)
4562 for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
4563 delete m_commands[cmdNdx];
4566 void ExecuteSecondaryCommandBuffer::logPrepare (TestLog& log, size_t commandIndex) const
4568 const string sectionName (de::toString(commandIndex) + ":" + getName());
4569 const tcu::ScopedLogSection section (log, sectionName, sectionName);
4571 for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
4573 CmdCommand& command = *m_commands[cmdNdx];
4574 command.logPrepare(log, cmdNdx);
4578 void ExecuteSecondaryCommandBuffer::logSubmit (TestLog& log, size_t commandIndex) const
4580 const string sectionName (de::toString(commandIndex) + ":" + getName());
4581 const tcu::ScopedLogSection section (log, sectionName, sectionName);
4583 for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
4585 CmdCommand& command = *m_commands[cmdNdx];
4586 command.logSubmit(log, cmdNdx);
4590 void ExecuteSecondaryCommandBuffer::prepare (PrepareContext& context)
4592 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
4593 const vk::VkDevice device = context.getContext().getDevice();
4594 const vk::VkCommandPool commandPool = context.getContext().getCommandPool();
4596 for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
4598 CmdCommand& command = *m_commands[cmdNdx];
4600 command.prepare(context);
4603 m_commandBuffer = createBeginCommandBuffer(vkd, device, commandPool, vk::VK_COMMAND_BUFFER_LEVEL_SECONDARY);
4605 SubmitContext submitContext (context, *m_commandBuffer);
4607 for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
4609 CmdCommand& command = *m_commands[cmdNdx];
4611 command.submit(submitContext);
4614 endCommandBuffer(vkd, *m_commandBuffer);
4618 void ExecuteSecondaryCommandBuffer::submit (SubmitContext& context)
4620 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
4621 const vk::VkCommandBuffer commandBuffer = context.getCommandBuffer();
4625 vkd.cmdExecuteCommands(commandBuffer, 1, &m_commandBuffer.get());
4629 void ExecuteSecondaryCommandBuffer::verify (VerifyContext& context, size_t commandIndex)
4631 const string sectionName (de::toString(commandIndex) + ":" + getName());
4632 const tcu::ScopedLogSection section (context.getLog(), sectionName, sectionName);
4634 for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
4635 m_commands[cmdNdx]->verify(context, cmdNdx);
4638 struct PipelineResources
4640 vk::Move<vk::VkPipeline> pipeline;
4641 vk::Move<vk::VkDescriptorSetLayout> descriptorSetLayout;
4642 vk::Move<vk::VkPipelineLayout> pipelineLayout;
4645 void createPipelineWithResources (const vk::DeviceInterface& vkd,
4646 const vk::VkDevice device,
4647 const vk::VkRenderPass renderPass,
4648 const deUint32 subpass,
4649 const vk::VkShaderModule& vertexShaderModule,
4650 const vk::VkShaderModule& fragmentShaderModule,
4651 const deUint32 viewPortWidth,
4652 const deUint32 viewPortHeight,
4653 const vector<vk::VkVertexInputBindingDescription>& vertexBindingDescriptions,
4654 const vector<vk::VkVertexInputAttributeDescription>& vertexAttributeDescriptions,
4655 const vector<vk::VkDescriptorSetLayoutBinding>& bindings,
4656 const vk::VkPrimitiveTopology topology,
4657 deUint32 pushConstantRangeCount,
4658 const vk::VkPushConstantRange* pushConstantRanges,
4659 PipelineResources& resources)
4661 if (!bindings.empty())
4663 const vk::VkDescriptorSetLayoutCreateInfo createInfo =
4665 vk::VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO,
4669 (deUint32)bindings.size(),
4670 bindings.empty() ? DE_NULL : &bindings[0]
4673 resources.descriptorSetLayout = vk::createDescriptorSetLayout(vkd, device, &createInfo);
4677 const vk::VkDescriptorSetLayout descriptorSetLayout_ = *resources.descriptorSetLayout;
4678 const vk::VkPipelineLayoutCreateInfo createInfo =
4680 vk::VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO,
4684 resources.descriptorSetLayout ? 1u : 0u,
4685 resources.descriptorSetLayout ? &descriptorSetLayout_ : DE_NULL,
4687 pushConstantRangeCount,
4691 resources.pipelineLayout = vk::createPipelineLayout(vkd, device, &createInfo);
4695 const std::vector<vk::VkViewport> viewports (1, vk::makeViewport(0.0f, 0.0f, (float)viewPortWidth, (float)viewPortHeight, 0.0f, 1.0f));
4696 const std::vector<vk::VkRect2D> scissors (1, vk::makeRect2D(0, 0, viewPortWidth, viewPortHeight));
4698 const vk::VkPipelineVertexInputStateCreateInfo vertexInputState =
4700 vk::VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO,
4704 (deUint32)vertexBindingDescriptions.size(),
4705 vertexBindingDescriptions.empty() ? DE_NULL : &vertexBindingDescriptions[0],
4707 (deUint32)vertexAttributeDescriptions.size(),
4708 vertexAttributeDescriptions.empty() ? DE_NULL : &vertexAttributeDescriptions[0]
4711 resources.pipeline = vk::makeGraphicsPipeline(vkd, // const DeviceInterface& vk
4712 device, // const VkDevice device
4713 *resources.pipelineLayout, // const VkPipelineLayout pipelineLayout
4714 vertexShaderModule, // const VkShaderModule vertexShaderModule
4715 DE_NULL, // const VkShaderModule tessellationControlModule
4716 DE_NULL, // const VkShaderModule tessellationEvalModule
4717 DE_NULL, // const VkShaderModule geometryShaderModule
4718 fragmentShaderModule, // const VkShaderModule fragmentShaderModule
4719 renderPass, // const VkRenderPass renderPass
4720 viewports, // const std::vector<VkViewport>& viewports
4721 scissors, // const std::vector<VkRect2D>& scissors
4722 topology, // const VkPrimitiveTopology topology
4723 subpass, // const deUint32 subpass
4724 0u, // const deUint32 patchControlPoints
4725 &vertexInputState); // const VkPipelineVertexInputStateCreateInfo* vertexInputStateCreateInfo
4729 class RenderIndexBuffer : public RenderPassCommand
4732 RenderIndexBuffer (void) {}
4733 ~RenderIndexBuffer (void) {}
4735 const char* getName (void) const { return "RenderIndexBuffer"; }
4736 void logPrepare (TestLog&, size_t) const;
4737 void logSubmit (TestLog&, size_t) const;
4738 void prepare (PrepareRenderPassContext&);
4739 void submit (SubmitContext& context);
4740 void verify (VerifyRenderPassContext&, size_t);
4743 PipelineResources m_resources;
4744 vk::VkDeviceSize m_bufferSize;
4747 void RenderIndexBuffer::logPrepare (TestLog& log, size_t commandIndex) const
4749 log << TestLog::Message << commandIndex << ":" << getName() << " Create pipeline for render buffer as index buffer." << TestLog::EndMessage;
4752 void RenderIndexBuffer::logSubmit (TestLog& log, size_t commandIndex) const
4754 log << TestLog::Message << commandIndex << ":" << getName() << " Render using buffer as index buffer." << TestLog::EndMessage;
4757 void RenderIndexBuffer::prepare (PrepareRenderPassContext& context)
4759 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
4760 const vk::VkDevice device = context.getContext().getDevice();
4761 const vk::VkRenderPass renderPass = context.getRenderPass();
4762 const deUint32 subpass = 0;
4763 const vk::Unique<vk::VkShaderModule> vertexShaderModule (vk::createShaderModule(vkd, device, context.getBinaryCollection().get("index-buffer.vert"), 0));
4764 const vk::Unique<vk::VkShaderModule> fragmentShaderModule (vk::createShaderModule(vkd, device, context.getBinaryCollection().get("render-white.frag"), 0));
4766 createPipelineWithResources(vkd, device, renderPass, subpass, *vertexShaderModule, *fragmentShaderModule, context.getTargetWidth(), context.getTargetHeight(),
4767 vector<vk::VkVertexInputBindingDescription>(), vector<vk::VkVertexInputAttributeDescription>(), vector<vk::VkDescriptorSetLayoutBinding>(), vk::VK_PRIMITIVE_TOPOLOGY_POINT_LIST, 0u, DE_NULL, m_resources);
4768 m_bufferSize = context.getBufferSize();
4771 void RenderIndexBuffer::submit (SubmitContext& context)
4773 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
4774 const vk::VkCommandBuffer commandBuffer = context.getCommandBuffer();
4776 vkd.cmdBindPipeline(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipeline);
4777 vkd.cmdBindIndexBuffer(commandBuffer, context.getBuffer(), 0, vk::VK_INDEX_TYPE_UINT16);
4778 vkd.cmdDrawIndexed(commandBuffer, (deUint32)(context.getBufferSize() / 2), 1, 0, 0, 0);
4781 void RenderIndexBuffer::verify (VerifyRenderPassContext& context, size_t)
4783 for (size_t pos = 0; pos < (size_t)m_bufferSize / 2; pos++)
4785 const deUint8 x = context.getReference().get(pos * 2);
4786 const deUint8 y = context.getReference().get((pos * 2) + 1);
4788 context.getReferenceTarget().getAccess().setPixel(Vec4(1.0f, 1.0f, 1.0f, 1.0f), x, y);
4792 class RenderVertexBuffer : public RenderPassCommand
4795 RenderVertexBuffer (void) {}
4796 ~RenderVertexBuffer (void) {}
4798 const char* getName (void) const { return "RenderVertexBuffer"; }
4799 void logPrepare (TestLog&, size_t) const;
4800 void logSubmit (TestLog&, size_t) const;
4801 void prepare (PrepareRenderPassContext&);
4802 void submit (SubmitContext& context);
4803 void verify (VerifyRenderPassContext&, size_t);
4806 PipelineResources m_resources;
4807 vk::VkDeviceSize m_bufferSize;
4810 void RenderVertexBuffer::logPrepare (TestLog& log, size_t commandIndex) const
4812 log << TestLog::Message << commandIndex << ":" << getName() << " Create pipeline for render buffer as vertex buffer." << TestLog::EndMessage;
4815 void RenderVertexBuffer::logSubmit (TestLog& log, size_t commandIndex) const
4817 log << TestLog::Message << commandIndex << ":" << getName() << " Render using buffer as vertex buffer." << TestLog::EndMessage;
4820 void RenderVertexBuffer::prepare (PrepareRenderPassContext& context)
4822 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
4823 const vk::VkDevice device = context.getContext().getDevice();
4824 const vk::VkRenderPass renderPass = context.getRenderPass();
4825 const deUint32 subpass = 0;
4826 const vk::Unique<vk::VkShaderModule> vertexShaderModule (vk::createShaderModule(vkd, device, context.getBinaryCollection().get("vertex-buffer.vert"), 0));
4827 const vk::Unique<vk::VkShaderModule> fragmentShaderModule (vk::createShaderModule(vkd, device, context.getBinaryCollection().get("render-white.frag"), 0));
4829 vector<vk::VkVertexInputAttributeDescription> vertexAttributeDescriptions;
4830 vector<vk::VkVertexInputBindingDescription> vertexBindingDescriptions;
4833 const vk::VkVertexInputBindingDescription vertexBindingDescription =
4837 vk::VK_VERTEX_INPUT_RATE_VERTEX
4840 vertexBindingDescriptions.push_back(vertexBindingDescription);
4843 const vk::VkVertexInputAttributeDescription vertexAttributeDescription =
4847 vk::VK_FORMAT_R8G8_UNORM,
4851 vertexAttributeDescriptions.push_back(vertexAttributeDescription);
4853 createPipelineWithResources(vkd, device, renderPass, subpass, *vertexShaderModule, *fragmentShaderModule, context.getTargetWidth(), context.getTargetHeight(),
4854 vertexBindingDescriptions, vertexAttributeDescriptions, vector<vk::VkDescriptorSetLayoutBinding>(), vk::VK_PRIMITIVE_TOPOLOGY_POINT_LIST, 0u, DE_NULL, m_resources);
4856 m_bufferSize = context.getBufferSize();
4859 void RenderVertexBuffer::submit (SubmitContext& context)
4861 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
4862 const vk::VkCommandBuffer commandBuffer = context.getCommandBuffer();
4863 const vk::VkDeviceSize offset = 0;
4864 const vk::VkBuffer buffer = context.getBuffer();
4866 vkd.cmdBindPipeline(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipeline);
4867 vkd.cmdBindVertexBuffers(commandBuffer, 0, 1, &buffer, &offset);
4868 vkd.cmdDraw(commandBuffer, (deUint32)(context.getBufferSize() / 2), 1, 0, 0);
4871 void RenderVertexBuffer::verify (VerifyRenderPassContext& context, size_t)
4873 for (size_t pos = 0; pos < (size_t)m_bufferSize / 2; pos++)
4875 const deUint8 x = context.getReference().get(pos * 2);
4876 const deUint8 y = context.getReference().get((pos * 2) + 1);
4878 context.getReferenceTarget().getAccess().setPixel(Vec4(1.0f, 1.0f, 1.0f, 1.0f), x, y);
4882 class RenderVertexUniformBuffer : public RenderPassCommand
4885 RenderVertexUniformBuffer (void) {}
4886 ~RenderVertexUniformBuffer (void);
4888 const char* getName (void) const { return "RenderVertexUniformBuffer"; }
4889 void logPrepare (TestLog&, size_t) const;
4890 void logSubmit (TestLog&, size_t) const;
4891 void prepare (PrepareRenderPassContext&);
4892 void submit (SubmitContext& context);
4893 void verify (VerifyRenderPassContext&, size_t);
4896 PipelineResources m_resources;
4897 vk::Move<vk::VkDescriptorPool> m_descriptorPool;
4898 vector<vk::VkDescriptorSet> m_descriptorSets;
4900 vk::VkDeviceSize m_bufferSize;
4903 RenderVertexUniformBuffer::~RenderVertexUniformBuffer (void)
4907 void RenderVertexUniformBuffer::logPrepare (TestLog& log, size_t commandIndex) const
4909 log << TestLog::Message << commandIndex << ":" << getName() << " Create pipeline for render buffer as uniform buffer." << TestLog::EndMessage;
4912 void RenderVertexUniformBuffer::logSubmit (TestLog& log, size_t commandIndex) const
4914 log << TestLog::Message << commandIndex << ":" << getName() << " Render using buffer as uniform buffer." << TestLog::EndMessage;
4917 void RenderVertexUniformBuffer::prepare (PrepareRenderPassContext& context)
4919 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
4920 const vk::VkDevice device = context.getContext().getDevice();
4921 const vk::VkRenderPass renderPass = context.getRenderPass();
4922 const deUint32 subpass = 0;
4923 const vk::Unique<vk::VkShaderModule> vertexShaderModule (vk::createShaderModule(vkd, device, context.getBinaryCollection().get("uniform-buffer.vert"), 0));
4924 const vk::Unique<vk::VkShaderModule> fragmentShaderModule (vk::createShaderModule(vkd, device, context.getBinaryCollection().get("render-white.frag"), 0));
4925 vector<vk::VkDescriptorSetLayoutBinding> bindings;
4927 m_bufferSize = context.getBufferSize();
4930 const vk::VkDescriptorSetLayoutBinding binding =
4933 vk::VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,
4935 vk::VK_SHADER_STAGE_VERTEX_BIT,
4939 bindings.push_back(binding);
4942 createPipelineWithResources(vkd, device, renderPass, subpass, *vertexShaderModule, *fragmentShaderModule, context.getTargetWidth(), context.getTargetHeight(),
4943 vector<vk::VkVertexInputBindingDescription>(), vector<vk::VkVertexInputAttributeDescription>(), bindings, vk::VK_PRIMITIVE_TOPOLOGY_POINT_LIST, 0u, DE_NULL, m_resources);
4946 const deUint32 descriptorCount = (deUint32)(divRoundUp(m_bufferSize, (vk::VkDeviceSize)MAX_UNIFORM_BUFFER_SIZE));
4947 const vk::VkDescriptorPoolSize poolSizes =
4949 vk::VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,
4952 const vk::VkDescriptorPoolCreateInfo createInfo =
4954 vk::VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
4956 vk::VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT,
4963 m_descriptorPool = vk::createDescriptorPool(vkd, device, &createInfo);
4964 m_descriptorSets.resize(descriptorCount);
4967 for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
4969 const vk::VkDescriptorSetLayout layout = *m_resources.descriptorSetLayout;
4970 const vk::VkDescriptorSetAllocateInfo allocateInfo =
4972 vk::VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
4980 m_descriptorSets[descriptorSetNdx] = vk::allocateDescriptorSet(vkd, device, &allocateInfo).disown();
4983 const vk::VkDescriptorBufferInfo bufferInfo =
4985 context.getBuffer(),
4986 (vk::VkDeviceSize)(descriptorSetNdx * (size_t)MAX_UNIFORM_BUFFER_SIZE),
4987 m_bufferSize < (descriptorSetNdx + 1) * (vk::VkDeviceSize)MAX_UNIFORM_BUFFER_SIZE
4988 ? m_bufferSize - descriptorSetNdx * (vk::VkDeviceSize)MAX_UNIFORM_BUFFER_SIZE
4989 : (vk::VkDeviceSize)MAX_UNIFORM_BUFFER_SIZE
4991 const vk::VkWriteDescriptorSet write =
4993 vk::VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
4995 m_descriptorSets[descriptorSetNdx],
4999 vk::VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,
5005 vkd.updateDescriptorSets(device, 1u, &write, 0u, DE_NULL);
5010 void RenderVertexUniformBuffer::submit (SubmitContext& context)
5012 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
5013 const vk::VkCommandBuffer commandBuffer = context.getCommandBuffer();
5015 vkd.cmdBindPipeline(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipeline);
5017 for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
5019 const size_t size = (size_t)(m_bufferSize < (descriptorSetNdx + 1) * (vk::VkDeviceSize)MAX_UNIFORM_BUFFER_SIZE
5020 ? m_bufferSize - descriptorSetNdx * (vk::VkDeviceSize)MAX_UNIFORM_BUFFER_SIZE
5021 : (size_t)MAX_UNIFORM_BUFFER_SIZE);
5022 const deUint32 count = (deUint32)(size / 2);
5024 vkd.cmdBindDescriptorSets(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipelineLayout, 0u, 1u, &m_descriptorSets[descriptorSetNdx], 0u, DE_NULL);
5025 vkd.cmdDraw(commandBuffer, count, 1, 0, 0);
5029 void RenderVertexUniformBuffer::verify (VerifyRenderPassContext& context, size_t)
5031 for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
5033 const size_t offset = descriptorSetNdx * MAX_UNIFORM_BUFFER_SIZE;
5034 const size_t size = (size_t)(m_bufferSize < (descriptorSetNdx + 1) * (vk::VkDeviceSize)MAX_UNIFORM_BUFFER_SIZE
5035 ? m_bufferSize - descriptorSetNdx * (vk::VkDeviceSize)MAX_UNIFORM_BUFFER_SIZE
5036 : (size_t)MAX_UNIFORM_BUFFER_SIZE);
5037 const size_t count = size / 2;
5039 for (size_t pos = 0; pos < count; pos++)
5041 const deUint8 x = context.getReference().get(offset + pos * 2);
5042 const deUint8 y = context.getReference().get(offset + (pos * 2) + 1);
5044 context.getReferenceTarget().getAccess().setPixel(Vec4(1.0f, 1.0f, 1.0f, 1.0f), x, y);
5049 class RenderVertexUniformTexelBuffer : public RenderPassCommand
5052 RenderVertexUniformTexelBuffer (void) {}
5053 ~RenderVertexUniformTexelBuffer (void);
5055 const char* getName (void) const { return "RenderVertexUniformTexelBuffer"; }
5056 void logPrepare (TestLog&, size_t) const;
5057 void logSubmit (TestLog&, size_t) const;
5058 void prepare (PrepareRenderPassContext&);
5059 void submit (SubmitContext& context);
5060 void verify (VerifyRenderPassContext&, size_t);
5063 PipelineResources m_resources;
5064 vk::Move<vk::VkDescriptorPool> m_descriptorPool;
5065 vector<vk::VkDescriptorSet> m_descriptorSets;
5066 vector<vk::VkBufferView> m_bufferViews;
5068 const vk::DeviceInterface* m_vkd;
5069 vk::VkDevice m_device;
5070 vk::VkDeviceSize m_bufferSize;
5071 deUint32 m_maxUniformTexelCount;
5074 RenderVertexUniformTexelBuffer::~RenderVertexUniformTexelBuffer (void)
5076 for (size_t bufferViewNdx = 0; bufferViewNdx < m_bufferViews.size(); bufferViewNdx++)
5078 if (!!m_bufferViews[bufferViewNdx])
5080 m_vkd->destroyBufferView(m_device, m_bufferViews[bufferViewNdx], DE_NULL);
5081 m_bufferViews[bufferViewNdx] = (vk::VkBufferView)0;
5086 void RenderVertexUniformTexelBuffer::logPrepare (TestLog& log, size_t commandIndex) const
5088 log << TestLog::Message << commandIndex << ":" << getName() << " Create pipeline for render buffer as uniform buffer." << TestLog::EndMessage;
5091 void RenderVertexUniformTexelBuffer::logSubmit (TestLog& log, size_t commandIndex) const
5093 log << TestLog::Message << commandIndex << ":" << getName() << " Render using buffer as uniform buffer." << TestLog::EndMessage;
5096 void RenderVertexUniformTexelBuffer::prepare (PrepareRenderPassContext& context)
5098 const vk::InstanceInterface& vki = context.getContext().getInstanceInterface();
5099 const vk::VkPhysicalDevice physicalDevice = context.getContext().getPhysicalDevice();
5100 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
5101 const vk::VkDevice device = context.getContext().getDevice();
5102 const vk::VkRenderPass renderPass = context.getRenderPass();
5103 const deUint32 subpass = 0;
5104 const vk::Unique<vk::VkShaderModule> vertexShaderModule (vk::createShaderModule(vkd, device, context.getBinaryCollection().get("uniform-texel-buffer.vert"), 0));
5105 const vk::Unique<vk::VkShaderModule> fragmentShaderModule (vk::createShaderModule(vkd, device, context.getBinaryCollection().get("render-white.frag"), 0));
5106 vector<vk::VkDescriptorSetLayoutBinding> bindings;
5110 m_bufferSize = context.getBufferSize();
5111 m_maxUniformTexelCount = vk::getPhysicalDeviceProperties(vki, physicalDevice).limits.maxTexelBufferElements;
5114 const vk::VkDescriptorSetLayoutBinding binding =
5117 vk::VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER,
5119 vk::VK_SHADER_STAGE_VERTEX_BIT,
5123 bindings.push_back(binding);
5126 createPipelineWithResources(vkd, device, renderPass, subpass, *vertexShaderModule, *fragmentShaderModule, context.getTargetWidth(), context.getTargetHeight(),
5127 vector<vk::VkVertexInputBindingDescription>(), vector<vk::VkVertexInputAttributeDescription>(), bindings, vk::VK_PRIMITIVE_TOPOLOGY_POINT_LIST, 0u, DE_NULL, m_resources);
5130 const deUint32 descriptorCount = (deUint32)(divRoundUp(m_bufferSize, (vk::VkDeviceSize)m_maxUniformTexelCount * 2));
5131 const vk::VkDescriptorPoolSize poolSizes =
5133 vk::VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER,
5136 const vk::VkDescriptorPoolCreateInfo createInfo =
5138 vk::VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
5140 vk::VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT,
5147 m_descriptorPool = vk::createDescriptorPool(vkd, device, &createInfo);
5148 m_descriptorSets.resize(descriptorCount, (vk::VkDescriptorSet)0);
5149 m_bufferViews.resize(descriptorCount, (vk::VkBufferView)0);
5152 for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
5154 const deUint32 count = (deUint32)(m_bufferSize < (descriptorSetNdx + 1) * m_maxUniformTexelCount * 2
5155 ? m_bufferSize - descriptorSetNdx * m_maxUniformTexelCount * 2
5156 : m_maxUniformTexelCount * 2) / 2;
5157 const vk::VkDescriptorSetLayout layout = *m_resources.descriptorSetLayout;
5158 const vk::VkDescriptorSetAllocateInfo allocateInfo =
5160 vk::VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
5168 m_descriptorSets[descriptorSetNdx] = vk::allocateDescriptorSet(vkd, device, &allocateInfo).disown();
5171 const vk::VkBufferViewCreateInfo createInfo =
5173 vk::VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO,
5177 context.getBuffer(),
5178 vk::VK_FORMAT_R16_UINT,
5179 descriptorSetNdx * m_maxUniformTexelCount * 2,
5183 VK_CHECK(vkd.createBufferView(device, &createInfo, DE_NULL, &m_bufferViews[descriptorSetNdx]));
5187 const vk::VkWriteDescriptorSet write =
5189 vk::VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
5191 m_descriptorSets[descriptorSetNdx],
5195 vk::VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER,
5198 &m_bufferViews[descriptorSetNdx]
5201 vkd.updateDescriptorSets(device, 1u, &write, 0u, DE_NULL);
5206 void RenderVertexUniformTexelBuffer::submit (SubmitContext& context)
5208 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
5209 const vk::VkCommandBuffer commandBuffer = context.getCommandBuffer();
5211 vkd.cmdBindPipeline(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipeline);
5213 for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
5215 const deUint32 count = (deUint32)(m_bufferSize < (descriptorSetNdx + 1) * m_maxUniformTexelCount * 2
5216 ? m_bufferSize - descriptorSetNdx * m_maxUniformTexelCount * 2
5217 : m_maxUniformTexelCount * 2) / 2;
5219 vkd.cmdBindDescriptorSets(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipelineLayout, 0u, 1u, &m_descriptorSets[descriptorSetNdx], 0u, DE_NULL);
5220 vkd.cmdDraw(commandBuffer, count, 1, 0, 0);
5224 void RenderVertexUniformTexelBuffer::verify (VerifyRenderPassContext& context, size_t)
5226 for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
5228 const size_t offset = descriptorSetNdx * m_maxUniformTexelCount * 2;
5229 const deUint32 count = (deUint32)(m_bufferSize < (descriptorSetNdx + 1) * m_maxUniformTexelCount * 2
5230 ? m_bufferSize - descriptorSetNdx * m_maxUniformTexelCount * 2
5231 : m_maxUniformTexelCount * 2) / 2;
5233 for (size_t pos = 0; pos < (size_t)count; pos++)
5235 const deUint8 x = context.getReference().get(offset + pos * 2);
5236 const deUint8 y = context.getReference().get(offset + (pos * 2) + 1);
5238 context.getReferenceTarget().getAccess().setPixel(Vec4(1.0f, 1.0f, 1.0f, 1.0f), x, y);
5243 class RenderVertexStorageBuffer : public RenderPassCommand
5246 RenderVertexStorageBuffer (void) {}
5247 ~RenderVertexStorageBuffer (void);
5249 const char* getName (void) const { return "RenderVertexStorageBuffer"; }
5250 void logPrepare (TestLog&, size_t) const;
5251 void logSubmit (TestLog&, size_t) const;
5252 void prepare (PrepareRenderPassContext&);
5253 void submit (SubmitContext& context);
5254 void verify (VerifyRenderPassContext&, size_t);
5257 PipelineResources m_resources;
5258 vk::Move<vk::VkDescriptorPool> m_descriptorPool;
5259 vector<vk::VkDescriptorSet> m_descriptorSets;
5261 vk::VkDeviceSize m_bufferSize;
5264 RenderVertexStorageBuffer::~RenderVertexStorageBuffer (void)
5268 void RenderVertexStorageBuffer::logPrepare (TestLog& log, size_t commandIndex) const
5270 log << TestLog::Message << commandIndex << ":" << getName() << " Create pipeline for render buffer as storage buffer." << TestLog::EndMessage;
5273 void RenderVertexStorageBuffer::logSubmit (TestLog& log, size_t commandIndex) const
5275 log << TestLog::Message << commandIndex << ":" << getName() << " Render using buffer as storage buffer." << TestLog::EndMessage;
5278 void RenderVertexStorageBuffer::prepare (PrepareRenderPassContext& context)
5280 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
5281 const vk::VkDevice device = context.getContext().getDevice();
5282 const vk::VkRenderPass renderPass = context.getRenderPass();
5283 const deUint32 subpass = 0;
5284 const vk::Unique<vk::VkShaderModule> vertexShaderModule (vk::createShaderModule(vkd, device, context.getBinaryCollection().get("storage-buffer.vert"), 0));
5285 const vk::Unique<vk::VkShaderModule> fragmentShaderModule (vk::createShaderModule(vkd, device, context.getBinaryCollection().get("render-white.frag"), 0));
5286 vector<vk::VkDescriptorSetLayoutBinding> bindings;
5288 m_bufferSize = context.getBufferSize();
5291 const vk::VkDescriptorSetLayoutBinding binding =
5294 vk::VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
5296 vk::VK_SHADER_STAGE_VERTEX_BIT,
5300 bindings.push_back(binding);
5303 createPipelineWithResources(vkd, device, renderPass, subpass, *vertexShaderModule, *fragmentShaderModule, context.getTargetWidth(), context.getTargetHeight(),
5304 vector<vk::VkVertexInputBindingDescription>(), vector<vk::VkVertexInputAttributeDescription>(), bindings, vk::VK_PRIMITIVE_TOPOLOGY_POINT_LIST, 0u, DE_NULL, m_resources);
5307 const deUint32 descriptorCount = (deUint32)(divRoundUp(m_bufferSize, (vk::VkDeviceSize)MAX_STORAGE_BUFFER_SIZE));
5308 const vk::VkDescriptorPoolSize poolSizes =
5310 vk::VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
5313 const vk::VkDescriptorPoolCreateInfo createInfo =
5315 vk::VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
5317 vk::VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT,
5324 m_descriptorPool = vk::createDescriptorPool(vkd, device, &createInfo);
5325 m_descriptorSets.resize(descriptorCount);
5328 for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
5330 const vk::VkDescriptorSetLayout layout = *m_resources.descriptorSetLayout;
5331 const vk::VkDescriptorSetAllocateInfo allocateInfo =
5333 vk::VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
5341 m_descriptorSets[descriptorSetNdx] = vk::allocateDescriptorSet(vkd, device, &allocateInfo).disown();
5344 const vk::VkDescriptorBufferInfo bufferInfo =
5346 context.getBuffer(),
5347 descriptorSetNdx * MAX_STORAGE_BUFFER_SIZE,
5348 de::min(m_bufferSize - descriptorSetNdx * MAX_STORAGE_BUFFER_SIZE, (vk::VkDeviceSize)MAX_STORAGE_BUFFER_SIZE)
5350 const vk::VkWriteDescriptorSet write =
5352 vk::VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
5354 m_descriptorSets[descriptorSetNdx],
5358 vk::VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
5364 vkd.updateDescriptorSets(device, 1u, &write, 0u, DE_NULL);
5369 void RenderVertexStorageBuffer::submit (SubmitContext& context)
5371 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
5372 const vk::VkCommandBuffer commandBuffer = context.getCommandBuffer();
5374 vkd.cmdBindPipeline(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipeline);
5376 for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
5378 const size_t size = m_bufferSize < (descriptorSetNdx + 1) * MAX_STORAGE_BUFFER_SIZE
5379 ? (size_t)(m_bufferSize - descriptorSetNdx * MAX_STORAGE_BUFFER_SIZE)
5380 : (size_t)(MAX_STORAGE_BUFFER_SIZE);
5382 vkd.cmdBindDescriptorSets(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipelineLayout, 0u, 1u, &m_descriptorSets[descriptorSetNdx], 0u, DE_NULL);
5383 vkd.cmdDraw(commandBuffer, (deUint32)(size / 2), 1, 0, 0);
5387 void RenderVertexStorageBuffer::verify (VerifyRenderPassContext& context, size_t)
5389 for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
5391 const size_t offset = descriptorSetNdx * MAX_STORAGE_BUFFER_SIZE;
5392 const size_t size = m_bufferSize < (descriptorSetNdx + 1) * MAX_STORAGE_BUFFER_SIZE
5393 ? (size_t)(m_bufferSize - descriptorSetNdx * MAX_STORAGE_BUFFER_SIZE)
5394 : (size_t)(MAX_STORAGE_BUFFER_SIZE);
5396 for (size_t pos = 0; pos < size / 2; pos++)
5398 const deUint8 x = context.getReference().get(offset + pos * 2);
5399 const deUint8 y = context.getReference().get(offset + (pos * 2) + 1);
5401 context.getReferenceTarget().getAccess().setPixel(Vec4(1.0f, 1.0f, 1.0f, 1.0f), x, y);
5406 class RenderVertexStorageTexelBuffer : public RenderPassCommand
5409 RenderVertexStorageTexelBuffer (void) {}
5410 ~RenderVertexStorageTexelBuffer (void);
5412 const char* getName (void) const { return "RenderVertexStorageTexelBuffer"; }
5413 void logPrepare (TestLog&, size_t) const;
5414 void logSubmit (TestLog&, size_t) const;
5415 void prepare (PrepareRenderPassContext&);
5416 void submit (SubmitContext& context);
5417 void verify (VerifyRenderPassContext&, size_t);
5420 PipelineResources m_resources;
5421 vk::Move<vk::VkDescriptorPool> m_descriptorPool;
5422 vector<vk::VkDescriptorSet> m_descriptorSets;
5423 vector<vk::VkBufferView> m_bufferViews;
5425 const vk::DeviceInterface* m_vkd;
5426 vk::VkDevice m_device;
5427 vk::VkDeviceSize m_bufferSize;
5428 deUint32 m_maxStorageTexelCount;
5431 RenderVertexStorageTexelBuffer::~RenderVertexStorageTexelBuffer (void)
5433 for (size_t bufferViewNdx = 0; bufferViewNdx < m_bufferViews.size(); bufferViewNdx++)
5435 if (!!m_bufferViews[bufferViewNdx])
5437 m_vkd->destroyBufferView(m_device, m_bufferViews[bufferViewNdx], DE_NULL);
5438 m_bufferViews[bufferViewNdx] = (vk::VkBufferView)0;
5443 void RenderVertexStorageTexelBuffer::logPrepare (TestLog& log, size_t commandIndex) const
5445 log << TestLog::Message << commandIndex << ":" << getName() << " Create pipeline for render buffer as storage buffer." << TestLog::EndMessage;
5448 void RenderVertexStorageTexelBuffer::logSubmit (TestLog& log, size_t commandIndex) const
5450 log << TestLog::Message << commandIndex << ":" << getName() << " Render using buffer as storage buffer." << TestLog::EndMessage;
5453 void RenderVertexStorageTexelBuffer::prepare (PrepareRenderPassContext& context)
5455 const vk::InstanceInterface& vki = context.getContext().getInstanceInterface();
5456 const vk::VkPhysicalDevice physicalDevice = context.getContext().getPhysicalDevice();
5457 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
5458 const vk::VkDevice device = context.getContext().getDevice();
5459 const vk::VkRenderPass renderPass = context.getRenderPass();
5460 const deUint32 subpass = 0;
5461 const vk::Unique<vk::VkShaderModule> vertexShaderModule (vk::createShaderModule(vkd, device, context.getBinaryCollection().get("storage-texel-buffer.vert"), 0));
5462 const vk::Unique<vk::VkShaderModule> fragmentShaderModule (vk::createShaderModule(vkd, device, context.getBinaryCollection().get("render-white.frag"), 0));
5463 vector<vk::VkDescriptorSetLayoutBinding> bindings;
5467 m_bufferSize = context.getBufferSize();
5468 m_maxStorageTexelCount = vk::getPhysicalDeviceProperties(vki, physicalDevice).limits.maxTexelBufferElements;
5471 const vk::VkDescriptorSetLayoutBinding binding =
5474 vk::VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER,
5476 vk::VK_SHADER_STAGE_VERTEX_BIT,
5480 bindings.push_back(binding);
5483 createPipelineWithResources(vkd, device, renderPass, subpass, *vertexShaderModule, *fragmentShaderModule, context.getTargetWidth(), context.getTargetHeight(),
5484 vector<vk::VkVertexInputBindingDescription>(), vector<vk::VkVertexInputAttributeDescription>(), bindings, vk::VK_PRIMITIVE_TOPOLOGY_POINT_LIST, 0u, DE_NULL, m_resources);
5487 const deUint32 descriptorCount = (deUint32)(divRoundUp(m_bufferSize, (vk::VkDeviceSize)m_maxStorageTexelCount * 4));
5488 const vk::VkDescriptorPoolSize poolSizes =
5490 vk::VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER,
5493 const vk::VkDescriptorPoolCreateInfo createInfo =
5495 vk::VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
5497 vk::VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT,
5504 m_descriptorPool = vk::createDescriptorPool(vkd, device, &createInfo);
5505 m_descriptorSets.resize(descriptorCount, (vk::VkDescriptorSet)0);
5506 m_bufferViews.resize(descriptorCount, (vk::VkBufferView)0);
5509 for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
5511 const vk::VkDescriptorSetLayout layout = *m_resources.descriptorSetLayout;
5512 const vk::VkDescriptorSetAllocateInfo allocateInfo =
5514 vk::VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
5522 m_descriptorSets[descriptorSetNdx] = vk::allocateDescriptorSet(vkd, device, &allocateInfo).disown();
5525 const vk::VkBufferViewCreateInfo createInfo =
5527 vk::VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO,
5531 context.getBuffer(),
5532 vk::VK_FORMAT_R32_UINT,
5533 descriptorSetNdx * m_maxStorageTexelCount * 4,
5534 (deUint32)de::min<vk::VkDeviceSize>(m_maxStorageTexelCount * 4, m_bufferSize - descriptorSetNdx * m_maxStorageTexelCount * 4)
5537 VK_CHECK(vkd.createBufferView(device, &createInfo, DE_NULL, &m_bufferViews[descriptorSetNdx]));
5541 const vk::VkWriteDescriptorSet write =
5543 vk::VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
5545 m_descriptorSets[descriptorSetNdx],
5549 vk::VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER,
5552 &m_bufferViews[descriptorSetNdx]
5555 vkd.updateDescriptorSets(device, 1u, &write, 0u, DE_NULL);
5560 void RenderVertexStorageTexelBuffer::submit (SubmitContext& context)
5562 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
5563 const vk::VkCommandBuffer commandBuffer = context.getCommandBuffer();
5565 vkd.cmdBindPipeline(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipeline);
5567 for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
5569 const deUint32 count = (deUint32)(m_bufferSize < (descriptorSetNdx + 1) * m_maxStorageTexelCount * 4
5570 ? m_bufferSize - descriptorSetNdx * m_maxStorageTexelCount * 4
5571 : m_maxStorageTexelCount * 4) / 2;
5573 vkd.cmdBindDescriptorSets(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipelineLayout, 0u, 1u, &m_descriptorSets[descriptorSetNdx], 0u, DE_NULL);
5574 vkd.cmdDraw(commandBuffer, count, 1, 0, 0);
5578 void RenderVertexStorageTexelBuffer::verify (VerifyRenderPassContext& context, size_t)
5580 for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
5582 const size_t offset = descriptorSetNdx * m_maxStorageTexelCount * 4;
5583 const deUint32 count = (deUint32)(m_bufferSize < (descriptorSetNdx + 1) * m_maxStorageTexelCount * 4
5584 ? m_bufferSize - descriptorSetNdx * m_maxStorageTexelCount * 4
5585 : m_maxStorageTexelCount * 4) / 2;
5587 DE_ASSERT(context.getReference().getSize() <= 4 * m_maxStorageTexelCount * m_descriptorSets.size());
5588 DE_ASSERT(context.getReference().getSize() > offset);
5589 DE_ASSERT(offset + count * 2 <= context.getReference().getSize());
5591 for (size_t pos = 0; pos < (size_t)count; pos++)
5593 const deUint8 x = context.getReference().get(offset + pos * 2);
5594 const deUint8 y = context.getReference().get(offset + (pos * 2) + 1);
5596 context.getReferenceTarget().getAccess().setPixel(Vec4(1.0f, 1.0f, 1.0f, 1.0f), x, y);
5601 class RenderVertexStorageImage : public RenderPassCommand
5604 RenderVertexStorageImage (void) {}
5605 ~RenderVertexStorageImage (void);
5607 const char* getName (void) const { return "RenderVertexStorageImage"; }
5608 void logPrepare (TestLog&, size_t) const;
5609 void logSubmit (TestLog&, size_t) const;
5610 void prepare (PrepareRenderPassContext&);
5611 void submit (SubmitContext& context);
5612 void verify (VerifyRenderPassContext&, size_t);
5615 PipelineResources m_resources;
5616 vk::Move<vk::VkDescriptorPool> m_descriptorPool;
5617 vk::Move<vk::VkDescriptorSet> m_descriptorSet;
5618 vk::Move<vk::VkImageView> m_imageView;
5621 RenderVertexStorageImage::~RenderVertexStorageImage (void)
5625 void RenderVertexStorageImage::logPrepare (TestLog& log, size_t commandIndex) const
5627 log << TestLog::Message << commandIndex << ":" << getName() << " Create pipeline for render storage image." << TestLog::EndMessage;
5630 void RenderVertexStorageImage::logSubmit (TestLog& log, size_t commandIndex) const
5632 log << TestLog::Message << commandIndex << ":" << getName() << " Render using storage image." << TestLog::EndMessage;
5635 void RenderVertexStorageImage::prepare (PrepareRenderPassContext& context)
5637 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
5638 const vk::VkDevice device = context.getContext().getDevice();
5639 const vk::VkRenderPass renderPass = context.getRenderPass();
5640 const deUint32 subpass = 0;
5641 const vk::Unique<vk::VkShaderModule> vertexShaderModule (vk::createShaderModule(vkd, device, context.getBinaryCollection().get("storage-image.vert"), 0));
5642 const vk::Unique<vk::VkShaderModule> fragmentShaderModule (vk::createShaderModule(vkd, device, context.getBinaryCollection().get("render-white.frag"), 0));
5643 vector<vk::VkDescriptorSetLayoutBinding> bindings;
5646 const vk::VkDescriptorSetLayoutBinding binding =
5649 vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE,
5651 vk::VK_SHADER_STAGE_VERTEX_BIT,
5655 bindings.push_back(binding);
5658 createPipelineWithResources(vkd, device, renderPass, subpass, *vertexShaderModule, *fragmentShaderModule, context.getTargetWidth(), context.getTargetHeight(),
5659 vector<vk::VkVertexInputBindingDescription>(), vector<vk::VkVertexInputAttributeDescription>(), bindings, vk::VK_PRIMITIVE_TOPOLOGY_POINT_LIST, 0u, DE_NULL, m_resources);
5662 const vk::VkDescriptorPoolSize poolSizes =
5664 vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE,
5667 const vk::VkDescriptorPoolCreateInfo createInfo =
5669 vk::VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
5671 vk::VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT,
5678 m_descriptorPool = vk::createDescriptorPool(vkd, device, &createInfo);
5682 const vk::VkDescriptorSetLayout layout = *m_resources.descriptorSetLayout;
5683 const vk::VkDescriptorSetAllocateInfo allocateInfo =
5685 vk::VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
5693 m_descriptorSet = vk::allocateDescriptorSet(vkd, device, &allocateInfo);
5696 const vk::VkImageViewCreateInfo createInfo =
5698 vk::VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
5703 vk::VK_IMAGE_VIEW_TYPE_2D,
5704 vk::VK_FORMAT_R8G8B8A8_UNORM,
5705 vk::makeComponentMappingRGBA(),
5707 vk::VK_IMAGE_ASPECT_COLOR_BIT,
5715 m_imageView = vk::createImageView(vkd, device, &createInfo);
5719 const vk::VkDescriptorImageInfo imageInfo =
5723 context.getImageLayout()
5725 const vk::VkWriteDescriptorSet write =
5727 vk::VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
5733 vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE,
5739 vkd.updateDescriptorSets(device, 1u, &write, 0u, DE_NULL);
5744 void RenderVertexStorageImage::submit (SubmitContext& context)
5746 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
5747 const vk::VkCommandBuffer commandBuffer = context.getCommandBuffer();
5749 vkd.cmdBindPipeline(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipeline);
5751 vkd.cmdBindDescriptorSets(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipelineLayout, 0u, 1u, &(*m_descriptorSet), 0u, DE_NULL);
5752 vkd.cmdDraw(commandBuffer, context.getImageWidth() * context.getImageHeight() * 2, 1, 0, 0);
5755 void RenderVertexStorageImage::verify (VerifyRenderPassContext& context, size_t)
5757 for (int pos = 0; pos < (int)(context.getReferenceImage().getWidth() * context.getReferenceImage().getHeight() * 2); pos++)
5759 const tcu::IVec3 size = context.getReferenceImage().getAccess().getSize();
5760 const tcu::UVec4 pixel = context.getReferenceImage().getAccess().getPixelUint((pos / 2) / size.x(), (pos / 2) % size.x());
5763 context.getReferenceTarget().getAccess().setPixel(Vec4(1.0f, 1.0f, 1.0f, 1.0f), pixel.x(), pixel.y());
5765 context.getReferenceTarget().getAccess().setPixel(Vec4(1.0f, 1.0f, 1.0f, 1.0f), pixel.z(), pixel.w());
5769 class RenderVertexSampledImage : public RenderPassCommand
5772 RenderVertexSampledImage (void) {}
5773 ~RenderVertexSampledImage (void);
5775 const char* getName (void) const { return "RenderVertexSampledImage"; }
5776 void logPrepare (TestLog&, size_t) const;
5777 void logSubmit (TestLog&, size_t) const;
5778 void prepare (PrepareRenderPassContext&);
5779 void submit (SubmitContext& context);
5780 void verify (VerifyRenderPassContext&, size_t);
5783 PipelineResources m_resources;
5784 vk::Move<vk::VkDescriptorPool> m_descriptorPool;
5785 vk::Move<vk::VkDescriptorSet> m_descriptorSet;
5786 vk::Move<vk::VkImageView> m_imageView;
5787 vk::Move<vk::VkSampler> m_sampler;
5790 RenderVertexSampledImage::~RenderVertexSampledImage (void)
5794 void RenderVertexSampledImage::logPrepare (TestLog& log, size_t commandIndex) const
5796 log << TestLog::Message << commandIndex << ":" << getName() << " Create pipeline for render sampled image." << TestLog::EndMessage;
5799 void RenderVertexSampledImage::logSubmit (TestLog& log, size_t commandIndex) const
5801 log << TestLog::Message << commandIndex << ":" << getName() << " Render using sampled image." << TestLog::EndMessage;
5804 void RenderVertexSampledImage::prepare (PrepareRenderPassContext& context)
5806 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
5807 const vk::VkDevice device = context.getContext().getDevice();
5808 const vk::VkRenderPass renderPass = context.getRenderPass();
5809 const deUint32 subpass = 0;
5810 const vk::Unique<vk::VkShaderModule> vertexShaderModule (vk::createShaderModule(vkd, device, context.getBinaryCollection().get("sampled-image.vert"), 0));
5811 const vk::Unique<vk::VkShaderModule> fragmentShaderModule (vk::createShaderModule(vkd, device, context.getBinaryCollection().get("render-white.frag"), 0));
5812 vector<vk::VkDescriptorSetLayoutBinding> bindings;
5815 const vk::VkDescriptorSetLayoutBinding binding =
5818 vk::VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
5820 vk::VK_SHADER_STAGE_VERTEX_BIT,
5824 bindings.push_back(binding);
5827 createPipelineWithResources(vkd, device, renderPass, subpass, *vertexShaderModule, *fragmentShaderModule, context.getTargetWidth(), context.getTargetHeight(),
5828 vector<vk::VkVertexInputBindingDescription>(), vector<vk::VkVertexInputAttributeDescription>(), bindings, vk::VK_PRIMITIVE_TOPOLOGY_POINT_LIST, 0u, DE_NULL, m_resources);
5831 const vk::VkDescriptorPoolSize poolSizes =
5833 vk::VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
5836 const vk::VkDescriptorPoolCreateInfo createInfo =
5838 vk::VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
5840 vk::VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT,
5847 m_descriptorPool = vk::createDescriptorPool(vkd, device, &createInfo);
5851 const vk::VkDescriptorSetLayout layout = *m_resources.descriptorSetLayout;
5852 const vk::VkDescriptorSetAllocateInfo allocateInfo =
5854 vk::VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
5862 m_descriptorSet = vk::allocateDescriptorSet(vkd, device, &allocateInfo);
5865 const vk::VkImageViewCreateInfo createInfo =
5867 vk::VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
5872 vk::VK_IMAGE_VIEW_TYPE_2D,
5873 vk::VK_FORMAT_R8G8B8A8_UNORM,
5874 vk::makeComponentMappingRGBA(),
5876 vk::VK_IMAGE_ASPECT_COLOR_BIT,
5884 m_imageView = vk::createImageView(vkd, device, &createInfo);
5888 const vk::VkSamplerCreateInfo createInfo =
5890 vk::VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO,
5894 vk::VK_FILTER_NEAREST,
5895 vk::VK_FILTER_NEAREST,
5897 vk::VK_SAMPLER_MIPMAP_MODE_LINEAR,
5898 vk::VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE,
5899 vk::VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE,
5900 vk::VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE,
5905 vk::VK_COMPARE_OP_ALWAYS,
5908 vk::VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK,
5912 m_sampler = vk::createSampler(vkd, device, &createInfo);
5916 const vk::VkDescriptorImageInfo imageInfo =
5920 context.getImageLayout()
5922 const vk::VkWriteDescriptorSet write =
5924 vk::VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
5930 vk::VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
5936 vkd.updateDescriptorSets(device, 1u, &write, 0u, DE_NULL);
5941 void RenderVertexSampledImage::submit (SubmitContext& context)
5943 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
5944 const vk::VkCommandBuffer commandBuffer = context.getCommandBuffer();
5946 vkd.cmdBindPipeline(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipeline);
5948 vkd.cmdBindDescriptorSets(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipelineLayout, 0u, 1u, &(*m_descriptorSet), 0u, DE_NULL);
5949 vkd.cmdDraw(commandBuffer, context.getImageWidth() * context.getImageHeight() * 2, 1, 0, 0);
5952 void RenderVertexSampledImage::verify (VerifyRenderPassContext& context, size_t)
5954 for (int pos = 0; pos < (int)(context.getReferenceImage().getWidth() * context.getReferenceImage().getHeight() * 2); pos++)
5956 const tcu::IVec3 size = context.getReferenceImage().getAccess().getSize();
5957 const tcu::UVec4 pixel = context.getReferenceImage().getAccess().getPixelUint((pos / 2) / size.x(), (pos / 2) % size.x());
5960 context.getReferenceTarget().getAccess().setPixel(Vec4(1.0f, 1.0f, 1.0f, 1.0f), pixel.x(), pixel.y());
5962 context.getReferenceTarget().getAccess().setPixel(Vec4(1.0f, 1.0f, 1.0f, 1.0f), pixel.z(), pixel.w());
5966 class RenderFragmentUniformBuffer : public RenderPassCommand
5969 RenderFragmentUniformBuffer (void) {}
5970 ~RenderFragmentUniformBuffer (void);
5972 const char* getName (void) const { return "RenderFragmentUniformBuffer"; }
5973 void logPrepare (TestLog&, size_t) const;
5974 void logSubmit (TestLog&, size_t) const;
5975 void prepare (PrepareRenderPassContext&);
5976 void submit (SubmitContext& context);
5977 void verify (VerifyRenderPassContext&, size_t);
5980 PipelineResources m_resources;
5981 vk::Move<vk::VkDescriptorPool> m_descriptorPool;
5982 vector<vk::VkDescriptorSet> m_descriptorSets;
5984 vk::VkDeviceSize m_bufferSize;
5985 size_t m_targetWidth;
5986 size_t m_targetHeight;
5989 RenderFragmentUniformBuffer::~RenderFragmentUniformBuffer (void)
5993 void RenderFragmentUniformBuffer::logPrepare (TestLog& log, size_t commandIndex) const
5995 log << TestLog::Message << commandIndex << ":" << getName() << " Create pipeline for render buffer as uniform buffer." << TestLog::EndMessage;
5998 void RenderFragmentUniformBuffer::logSubmit (TestLog& log, size_t commandIndex) const
6000 log << TestLog::Message << commandIndex << ":" << getName() << " Render using buffer as uniform buffer." << TestLog::EndMessage;
6003 void RenderFragmentUniformBuffer::prepare (PrepareRenderPassContext& context)
6005 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
6006 const vk::VkDevice device = context.getContext().getDevice();
6007 const vk::VkRenderPass renderPass = context.getRenderPass();
6008 const deUint32 subpass = 0;
6009 const vk::Unique<vk::VkShaderModule> vertexShaderModule (vk::createShaderModule(vkd, device, context.getBinaryCollection().get("render-quad.vert"), 0));
6010 const vk::Unique<vk::VkShaderModule> fragmentShaderModule (vk::createShaderModule(vkd, device, context.getBinaryCollection().get("uniform-buffer.frag"), 0));
6011 vector<vk::VkDescriptorSetLayoutBinding> bindings;
6013 m_bufferSize = de::min(context.getBufferSize(), (vk::VkDeviceSize)MAX_SIZE);
6014 m_targetWidth = context.getTargetWidth();
6015 m_targetHeight = context.getTargetHeight();
6018 const vk::VkDescriptorSetLayoutBinding binding =
6021 vk::VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,
6023 vk::VK_SHADER_STAGE_FRAGMENT_BIT,
6027 bindings.push_back(binding);
6029 const vk::VkPushConstantRange pushConstantRange =
6031 vk::VK_SHADER_STAGE_FRAGMENT_BIT,
6036 createPipelineWithResources(vkd, device, renderPass, subpass, *vertexShaderModule, *fragmentShaderModule, context.getTargetWidth(), context.getTargetHeight(),
6037 vector<vk::VkVertexInputBindingDescription>(), vector<vk::VkVertexInputAttributeDescription>(), bindings, vk::VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST, 1u, &pushConstantRange, m_resources);
6040 const deUint32 descriptorCount = (deUint32)(divRoundUp(m_bufferSize, (vk::VkDeviceSize)MAX_UNIFORM_BUFFER_SIZE));
6041 const vk::VkDescriptorPoolSize poolSizes =
6043 vk::VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,
6046 const vk::VkDescriptorPoolCreateInfo createInfo =
6048 vk::VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
6050 vk::VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT,
6057 m_descriptorPool = vk::createDescriptorPool(vkd, device, &createInfo);
6058 m_descriptorSets.resize(descriptorCount);
6061 for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
6063 const vk::VkDescriptorSetLayout layout = *m_resources.descriptorSetLayout;
6064 const vk::VkDescriptorSetAllocateInfo allocateInfo =
6066 vk::VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
6074 m_descriptorSets[descriptorSetNdx] = vk::allocateDescriptorSet(vkd, device, &allocateInfo).disown();
6077 const vk::VkDescriptorBufferInfo bufferInfo =
6079 context.getBuffer(),
6080 (vk::VkDeviceSize)(descriptorSetNdx * (size_t)MAX_UNIFORM_BUFFER_SIZE),
6081 m_bufferSize < (descriptorSetNdx + 1) * (vk::VkDeviceSize)MAX_UNIFORM_BUFFER_SIZE
6082 ? m_bufferSize - descriptorSetNdx * (vk::VkDeviceSize)MAX_UNIFORM_BUFFER_SIZE
6083 : (vk::VkDeviceSize)MAX_UNIFORM_BUFFER_SIZE
6085 const vk::VkWriteDescriptorSet write =
6087 vk::VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
6089 m_descriptorSets[descriptorSetNdx],
6093 vk::VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,
6099 vkd.updateDescriptorSets(device, 1u, &write, 0u, DE_NULL);
6104 void RenderFragmentUniformBuffer::submit (SubmitContext& context)
6106 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
6107 const vk::VkCommandBuffer commandBuffer = context.getCommandBuffer();
6109 vkd.cmdBindPipeline(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipeline);
6111 for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
6115 const deUint32 callId;
6116 const deUint32 valuesPerPixel;
6119 (deUint32)descriptorSetNdx,
6120 (deUint32)divRoundUp<size_t>(m_descriptorSets.size() * (MAX_UNIFORM_BUFFER_SIZE / 4), m_targetWidth * m_targetHeight)
6123 vkd.cmdBindDescriptorSets(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipelineLayout, 0u, 1u, &m_descriptorSets[descriptorSetNdx], 0u, DE_NULL);
6124 vkd.cmdPushConstants(commandBuffer, *m_resources.pipelineLayout, vk::VK_SHADER_STAGE_FRAGMENT_BIT, 0u, (deUint32)sizeof(callParams), &callParams);
6125 vkd.cmdDraw(commandBuffer, 6, 1, 0, 0);
6129 void RenderFragmentUniformBuffer::verify (VerifyRenderPassContext& context, size_t)
6131 const deUint32 valuesPerPixel = (deUint32)divRoundUp<size_t>(m_descriptorSets.size() * (MAX_UNIFORM_BUFFER_SIZE / 4), m_targetWidth * m_targetHeight);
6132 const size_t arraySize = MAX_UNIFORM_BUFFER_SIZE / (sizeof(deUint32) * 4);
6133 const size_t arrayIntSize = arraySize * 4;
6135 for (int y = 0; y < context.getReferenceTarget().getSize().y(); y++)
6136 for (int x = 0; x < context.getReferenceTarget().getSize().x(); x++)
6138 const size_t firstDescriptorSetNdx = de::min<size_t>((y * 256u + x) / (arrayIntSize / valuesPerPixel), m_descriptorSets.size() - 1);
6140 for (size_t descriptorSetNdx = firstDescriptorSetNdx; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
6142 const size_t offset = descriptorSetNdx * MAX_UNIFORM_BUFFER_SIZE;
6143 const deUint32 callId = (deUint32)descriptorSetNdx;
6145 const deUint32 id = callId * ((deUint32)arrayIntSize / valuesPerPixel) + (deUint32)y * 256u + (deUint32)x;
6147 if (y * 256u + x < callId * (arrayIntSize / valuesPerPixel))
6151 deUint32 value = id;
6153 for (deUint32 i = 0; i < valuesPerPixel; i++)
6155 value = ((deUint32)context.getReference().get(offset + (value % (MAX_UNIFORM_BUFFER_SIZE / sizeof(deUint32))) * 4 + 0))
6156 | (((deUint32)context.getReference().get(offset + (value % (MAX_UNIFORM_BUFFER_SIZE / sizeof(deUint32))) * 4 + 1)) << 8u)
6157 | (((deUint32)context.getReference().get(offset + (value % (MAX_UNIFORM_BUFFER_SIZE / sizeof(deUint32))) * 4 + 2)) << 16u)
6158 | (((deUint32)context.getReference().get(offset + (value % (MAX_UNIFORM_BUFFER_SIZE / sizeof(deUint32))) * 4 + 3)) << 24u);
6161 const UVec4 vec ((value >> 0u) & 0xFFu,
6162 (value >> 8u) & 0xFFu,
6163 (value >> 16u) & 0xFFu,
6164 (value >> 24u) & 0xFFu);
6166 context.getReferenceTarget().getAccess().setPixel(vec.asFloat() / Vec4(255.0f), x, y);
6172 class RenderFragmentStorageBuffer : public RenderPassCommand
6175 RenderFragmentStorageBuffer (void) {}
6176 ~RenderFragmentStorageBuffer (void);
6178 const char* getName (void) const { return "RenderFragmentStorageBuffer"; }
6179 void logPrepare (TestLog&, size_t) const;
6180 void logSubmit (TestLog&, size_t) const;
6181 void prepare (PrepareRenderPassContext&);
6182 void submit (SubmitContext& context);
6183 void verify (VerifyRenderPassContext&, size_t);
6186 PipelineResources m_resources;
6187 vk::Move<vk::VkDescriptorPool> m_descriptorPool;
6188 vk::Move<vk::VkDescriptorSet> m_descriptorSet;
6190 vk::VkDeviceSize m_bufferSize;
6191 size_t m_targetWidth;
6192 size_t m_targetHeight;
6195 RenderFragmentStorageBuffer::~RenderFragmentStorageBuffer (void)
6199 void RenderFragmentStorageBuffer::logPrepare (TestLog& log, size_t commandIndex) const
6201 log << TestLog::Message << commandIndex << ":" << getName() << " Create pipeline to render buffer as storage buffer." << TestLog::EndMessage;
6204 void RenderFragmentStorageBuffer::logSubmit (TestLog& log, size_t commandIndex) const
6206 log << TestLog::Message << commandIndex << ":" << getName() << " Render using buffer as storage buffer." << TestLog::EndMessage;
6209 void RenderFragmentStorageBuffer::prepare (PrepareRenderPassContext& context)
6211 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
6212 const vk::VkDevice device = context.getContext().getDevice();
6213 const vk::VkRenderPass renderPass = context.getRenderPass();
6214 const deUint32 subpass = 0;
6215 const vk::Unique<vk::VkShaderModule> vertexShaderModule (vk::createShaderModule(vkd, device, context.getBinaryCollection().get("render-quad.vert"), 0));
6216 const vk::Unique<vk::VkShaderModule> fragmentShaderModule (vk::createShaderModule(vkd, device, context.getBinaryCollection().get("storage-buffer.frag"), 0));
6217 vector<vk::VkDescriptorSetLayoutBinding> bindings;
6219 m_bufferSize = context.getBufferSize();
6220 m_targetWidth = context.getTargetWidth();
6221 m_targetHeight = context.getTargetHeight();
6224 const vk::VkDescriptorSetLayoutBinding binding =
6227 vk::VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
6229 vk::VK_SHADER_STAGE_FRAGMENT_BIT,
6233 bindings.push_back(binding);
6235 const vk::VkPushConstantRange pushConstantRange =
6237 vk::VK_SHADER_STAGE_FRAGMENT_BIT,
6242 createPipelineWithResources(vkd, device, renderPass, subpass, *vertexShaderModule, *fragmentShaderModule, context.getTargetWidth(), context.getTargetHeight(),
6243 vector<vk::VkVertexInputBindingDescription>(), vector<vk::VkVertexInputAttributeDescription>(), bindings, vk::VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST, 1u, &pushConstantRange, m_resources);
6246 const deUint32 descriptorCount = 1;
6247 const vk::VkDescriptorPoolSize poolSizes =
6249 vk::VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
6252 const vk::VkDescriptorPoolCreateInfo createInfo =
6254 vk::VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
6256 vk::VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT,
6263 m_descriptorPool = vk::createDescriptorPool(vkd, device, &createInfo);
6267 const vk::VkDescriptorSetLayout layout = *m_resources.descriptorSetLayout;
6268 const vk::VkDescriptorSetAllocateInfo allocateInfo =
6270 vk::VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
6278 m_descriptorSet = vk::allocateDescriptorSet(vkd, device, &allocateInfo);
6281 const vk::VkDescriptorBufferInfo bufferInfo =
6283 context.getBuffer(),
6287 const vk::VkWriteDescriptorSet write =
6289 vk::VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
6291 m_descriptorSet.get(),
6295 vk::VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
6301 vkd.updateDescriptorSets(device, 1u, &write, 0u, DE_NULL);
6306 void RenderFragmentStorageBuffer::submit (SubmitContext& context)
6308 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
6309 const vk::VkCommandBuffer commandBuffer = context.getCommandBuffer();
6311 vkd.cmdBindPipeline(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipeline);
6315 const deUint32 valuesPerPixel;
6316 const deUint32 bufferSize;
6319 (deUint32)divRoundUp<vk::VkDeviceSize>(m_bufferSize / 4, m_targetWidth * m_targetHeight),
6320 (deUint32)m_bufferSize
6323 vkd.cmdBindDescriptorSets(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipelineLayout, 0u, 1u, &m_descriptorSet.get(), 0u, DE_NULL);
6324 vkd.cmdPushConstants(commandBuffer, *m_resources.pipelineLayout, vk::VK_SHADER_STAGE_FRAGMENT_BIT, 0u, (deUint32)sizeof(callParams), &callParams);
6325 vkd.cmdDraw(commandBuffer, 6, 1, 0, 0);
6328 void RenderFragmentStorageBuffer::verify (VerifyRenderPassContext& context, size_t)
6330 const deUint32 valuesPerPixel = (deUint32)divRoundUp<vk::VkDeviceSize>(m_bufferSize / 4, m_targetWidth * m_targetHeight);
6332 for (int y = 0; y < context.getReferenceTarget().getSize().y(); y++)
6333 for (int x = 0; x < context.getReferenceTarget().getSize().x(); x++)
6335 const deUint32 id = (deUint32)y * 256u + (deUint32)x;
6337 deUint32 value = id;
6339 for (deUint32 i = 0; i < valuesPerPixel; i++)
6341 value = (((deUint32)context.getReference().get((size_t)(value % (m_bufferSize / sizeof(deUint32))) * 4 + 0)) << 0u)
6342 | (((deUint32)context.getReference().get((size_t)(value % (m_bufferSize / sizeof(deUint32))) * 4 + 1)) << 8u)
6343 | (((deUint32)context.getReference().get((size_t)(value % (m_bufferSize / sizeof(deUint32))) * 4 + 2)) << 16u)
6344 | (((deUint32)context.getReference().get((size_t)(value % (m_bufferSize / sizeof(deUint32))) * 4 + 3)) << 24u);
6347 const UVec4 vec ((value >> 0u) & 0xFFu,
6348 (value >> 8u) & 0xFFu,
6349 (value >> 16u) & 0xFFu,
6350 (value >> 24u) & 0xFFu);
6352 context.getReferenceTarget().getAccess().setPixel(vec.asFloat() / Vec4(255.0f), x, y);
6356 class RenderFragmentUniformTexelBuffer : public RenderPassCommand
6359 RenderFragmentUniformTexelBuffer (void) {}
6360 ~RenderFragmentUniformTexelBuffer (void);
6362 const char* getName (void) const { return "RenderFragmentUniformTexelBuffer"; }
6363 void logPrepare (TestLog&, size_t) const;
6364 void logSubmit (TestLog&, size_t) const;
6365 void prepare (PrepareRenderPassContext&);
6366 void submit (SubmitContext& context);
6367 void verify (VerifyRenderPassContext&, size_t);
6370 PipelineResources m_resources;
6371 vk::Move<vk::VkDescriptorPool> m_descriptorPool;
6372 vector<vk::VkDescriptorSet> m_descriptorSets;
6373 vector<vk::VkBufferView> m_bufferViews;
6375 const vk::DeviceInterface* m_vkd;
6376 vk::VkDevice m_device;
6377 vk::VkDeviceSize m_bufferSize;
6378 deUint32 m_maxUniformTexelCount;
6379 size_t m_targetWidth;
6380 size_t m_targetHeight;
6383 RenderFragmentUniformTexelBuffer::~RenderFragmentUniformTexelBuffer (void)
6385 for (size_t bufferViewNdx = 0; bufferViewNdx < m_bufferViews.size(); bufferViewNdx++)
6387 if (!!m_bufferViews[bufferViewNdx])
6389 m_vkd->destroyBufferView(m_device, m_bufferViews[bufferViewNdx], DE_NULL);
6390 m_bufferViews[bufferViewNdx] = (vk::VkBufferView)0;
6395 void RenderFragmentUniformTexelBuffer::logPrepare (TestLog& log, size_t commandIndex) const
6397 log << TestLog::Message << commandIndex << ":" << getName() << " Create pipeline for render buffer as uniform buffer." << TestLog::EndMessage;
6400 void RenderFragmentUniformTexelBuffer::logSubmit (TestLog& log, size_t commandIndex) const
6402 log << TestLog::Message << commandIndex << ":" << getName() << " Render using buffer as uniform buffer." << TestLog::EndMessage;
6405 void RenderFragmentUniformTexelBuffer::prepare (PrepareRenderPassContext& context)
6407 const vk::InstanceInterface& vki = context.getContext().getInstanceInterface();
6408 const vk::VkPhysicalDevice physicalDevice = context.getContext().getPhysicalDevice();
6409 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
6410 const vk::VkDevice device = context.getContext().getDevice();
6411 const vk::VkRenderPass renderPass = context.getRenderPass();
6412 const deUint32 subpass = 0;
6413 const vk::Unique<vk::VkShaderModule> vertexShaderModule (vk::createShaderModule(vkd, device, context.getBinaryCollection().get("render-quad.vert"), 0));
6414 const vk::Unique<vk::VkShaderModule> fragmentShaderModule (vk::createShaderModule(vkd, device, context.getBinaryCollection().get("uniform-texel-buffer.frag"), 0));
6415 vector<vk::VkDescriptorSetLayoutBinding> bindings;
6419 m_bufferSize = context.getBufferSize();
6420 m_maxUniformTexelCount = vk::getPhysicalDeviceProperties(vki, physicalDevice).limits.maxTexelBufferElements;
6421 m_targetWidth = context.getTargetWidth();
6422 m_targetHeight = context.getTargetHeight();
6425 const vk::VkDescriptorSetLayoutBinding binding =
6428 vk::VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER,
6430 vk::VK_SHADER_STAGE_FRAGMENT_BIT,
6434 bindings.push_back(binding);
6436 const vk::VkPushConstantRange pushConstantRange =
6438 vk::VK_SHADER_STAGE_FRAGMENT_BIT,
6443 createPipelineWithResources(vkd, device, renderPass, subpass, *vertexShaderModule, *fragmentShaderModule, context.getTargetWidth(), context.getTargetHeight(),
6444 vector<vk::VkVertexInputBindingDescription>(), vector<vk::VkVertexInputAttributeDescription>(), bindings, vk::VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST, 1u, &pushConstantRange, m_resources);
6447 const deUint32 descriptorCount = (deUint32)(divRoundUp(m_bufferSize, (vk::VkDeviceSize)m_maxUniformTexelCount * 4));
6448 const vk::VkDescriptorPoolSize poolSizes =
6450 vk::VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER,
6453 const vk::VkDescriptorPoolCreateInfo createInfo =
6455 vk::VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
6457 vk::VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT,
6464 m_descriptorPool = vk::createDescriptorPool(vkd, device, &createInfo);
6465 m_descriptorSets.resize(descriptorCount, (vk::VkDescriptorSet)0);
6466 m_bufferViews.resize(descriptorCount, (vk::VkBufferView)0);
6469 for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
6471 const deUint32 count = (deUint32)(m_bufferSize < (descriptorSetNdx + 1) * m_maxUniformTexelCount * 4
6472 ? m_bufferSize - descriptorSetNdx * m_maxUniformTexelCount * 4
6473 : m_maxUniformTexelCount * 4) / 4;
6474 const vk::VkDescriptorSetLayout layout = *m_resources.descriptorSetLayout;
6475 const vk::VkDescriptorSetAllocateInfo allocateInfo =
6477 vk::VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
6485 m_descriptorSets[descriptorSetNdx] = vk::allocateDescriptorSet(vkd, device, &allocateInfo).disown();
6488 const vk::VkBufferViewCreateInfo createInfo =
6490 vk::VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO,
6494 context.getBuffer(),
6495 vk::VK_FORMAT_R32_UINT,
6496 descriptorSetNdx * m_maxUniformTexelCount * 4,
6500 VK_CHECK(vkd.createBufferView(device, &createInfo, DE_NULL, &m_bufferViews[descriptorSetNdx]));
6504 const vk::VkWriteDescriptorSet write =
6506 vk::VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
6508 m_descriptorSets[descriptorSetNdx],
6512 vk::VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER,
6515 &m_bufferViews[descriptorSetNdx]
6518 vkd.updateDescriptorSets(device, 1u, &write, 0u, DE_NULL);
6523 void RenderFragmentUniformTexelBuffer::submit (SubmitContext& context)
6525 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
6526 const vk::VkCommandBuffer commandBuffer = context.getCommandBuffer();
6528 vkd.cmdBindPipeline(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipeline);
6530 for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
6534 const deUint32 callId;
6535 const deUint32 valuesPerPixel;
6536 const deUint32 maxUniformTexelCount;
6539 (deUint32)descriptorSetNdx,
6540 (deUint32)divRoundUp<size_t>(m_descriptorSets.size() * de::min<size_t>((size_t)m_bufferSize / 4, m_maxUniformTexelCount), m_targetWidth * m_targetHeight),
6541 m_maxUniformTexelCount
6544 vkd.cmdBindDescriptorSets(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipelineLayout, 0u, 1u, &m_descriptorSets[descriptorSetNdx], 0u, DE_NULL);
6545 vkd.cmdPushConstants(commandBuffer, *m_resources.pipelineLayout, vk::VK_SHADER_STAGE_FRAGMENT_BIT, 0u, (deUint32)sizeof(callParams), &callParams);
6546 vkd.cmdDraw(commandBuffer, 6, 1, 0, 0);
6550 void RenderFragmentUniformTexelBuffer::verify (VerifyRenderPassContext& context, size_t)
6552 const deUint32 valuesPerPixel = (deUint32)divRoundUp<size_t>(m_descriptorSets.size() * de::min<size_t>((size_t)m_bufferSize / 4, m_maxUniformTexelCount), m_targetWidth * m_targetHeight);
6554 for (int y = 0; y < context.getReferenceTarget().getSize().y(); y++)
6555 for (int x = 0; x < context.getReferenceTarget().getSize().x(); x++)
6557 const size_t firstDescriptorSetNdx = de::min<size_t>((y * 256u + x) / (m_maxUniformTexelCount / valuesPerPixel), m_descriptorSets.size() - 1);
6559 for (size_t descriptorSetNdx = firstDescriptorSetNdx; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
6561 const size_t offset = descriptorSetNdx * m_maxUniformTexelCount * 4;
6562 const deUint32 callId = (deUint32)descriptorSetNdx;
6564 const deUint32 id = (deUint32)y * 256u + (deUint32)x;
6565 const deUint32 count = (deUint32)(m_bufferSize < (descriptorSetNdx + 1) * m_maxUniformTexelCount * 4
6566 ? m_bufferSize - descriptorSetNdx * m_maxUniformTexelCount * 4
6567 : m_maxUniformTexelCount * 4) / 4;
6569 if (y * 256u + x < callId * (m_maxUniformTexelCount / valuesPerPixel))
6573 deUint32 value = id;
6575 for (deUint32 i = 0; i < valuesPerPixel; i++)
6577 value = ((deUint32)context.getReference().get( offset + (value % count) * 4 + 0))
6578 | (((deUint32)context.getReference().get(offset + (value % count) * 4 + 1)) << 8u)
6579 | (((deUint32)context.getReference().get(offset + (value % count) * 4 + 2)) << 16u)
6580 | (((deUint32)context.getReference().get(offset + (value % count) * 4 + 3)) << 24u);
6583 const UVec4 vec ((value >> 0u) & 0xFFu,
6584 (value >> 8u) & 0xFFu,
6585 (value >> 16u) & 0xFFu,
6586 (value >> 24u) & 0xFFu);
6588 context.getReferenceTarget().getAccess().setPixel(vec.asFloat() / Vec4(255.0f), x, y);
6594 class RenderFragmentStorageTexelBuffer : public RenderPassCommand
6597 RenderFragmentStorageTexelBuffer (void) {}
6598 ~RenderFragmentStorageTexelBuffer (void);
6600 const char* getName (void) const { return "RenderFragmentStorageTexelBuffer"; }
6601 void logPrepare (TestLog&, size_t) const;
6602 void logSubmit (TestLog&, size_t) const;
6603 void prepare (PrepareRenderPassContext&);
6604 void submit (SubmitContext& context);
6605 void verify (VerifyRenderPassContext&, size_t);
6608 PipelineResources m_resources;
6609 vk::Move<vk::VkDescriptorPool> m_descriptorPool;
6610 vector<vk::VkDescriptorSet> m_descriptorSets;
6611 vector<vk::VkBufferView> m_bufferViews;
6613 const vk::DeviceInterface* m_vkd;
6614 vk::VkDevice m_device;
6615 vk::VkDeviceSize m_bufferSize;
6616 deUint32 m_maxStorageTexelCount;
6617 size_t m_targetWidth;
6618 size_t m_targetHeight;
6621 RenderFragmentStorageTexelBuffer::~RenderFragmentStorageTexelBuffer (void)
6623 for (size_t bufferViewNdx = 0; bufferViewNdx < m_bufferViews.size(); bufferViewNdx++)
6625 if (!!m_bufferViews[bufferViewNdx])
6627 m_vkd->destroyBufferView(m_device, m_bufferViews[bufferViewNdx], DE_NULL);
6628 m_bufferViews[bufferViewNdx] = (vk::VkBufferView)0;
6633 void RenderFragmentStorageTexelBuffer::logPrepare (TestLog& log, size_t commandIndex) const
6635 log << TestLog::Message << commandIndex << ":" << getName() << " Create pipeline for render buffer as storage buffer." << TestLog::EndMessage;
6638 void RenderFragmentStorageTexelBuffer::logSubmit (TestLog& log, size_t commandIndex) const
6640 log << TestLog::Message << commandIndex << ":" << getName() << " Render using buffer as storage buffer." << TestLog::EndMessage;
6643 void RenderFragmentStorageTexelBuffer::prepare (PrepareRenderPassContext& context)
6645 const vk::InstanceInterface& vki = context.getContext().getInstanceInterface();
6646 const vk::VkPhysicalDevice physicalDevice = context.getContext().getPhysicalDevice();
6647 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
6648 const vk::VkDevice device = context.getContext().getDevice();
6649 const vk::VkRenderPass renderPass = context.getRenderPass();
6650 const deUint32 subpass = 0;
6651 const vk::Unique<vk::VkShaderModule> vertexShaderModule (vk::createShaderModule(vkd, device, context.getBinaryCollection().get("render-quad.vert"), 0));
6652 const vk::Unique<vk::VkShaderModule> fragmentShaderModule (vk::createShaderModule(vkd, device, context.getBinaryCollection().get("storage-texel-buffer.frag"), 0));
6653 vector<vk::VkDescriptorSetLayoutBinding> bindings;
6657 m_bufferSize = context.getBufferSize();
6658 m_maxStorageTexelCount = vk::getPhysicalDeviceProperties(vki, physicalDevice).limits.maxTexelBufferElements;
6659 m_targetWidth = context.getTargetWidth();
6660 m_targetHeight = context.getTargetHeight();
6663 const vk::VkDescriptorSetLayoutBinding binding =
6666 vk::VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER,
6668 vk::VK_SHADER_STAGE_FRAGMENT_BIT,
6672 bindings.push_back(binding);
6674 const vk::VkPushConstantRange pushConstantRange =
6676 vk::VK_SHADER_STAGE_FRAGMENT_BIT,
6681 createPipelineWithResources(vkd, device, renderPass, subpass, *vertexShaderModule, *fragmentShaderModule, context.getTargetWidth(), context.getTargetHeight(),
6682 vector<vk::VkVertexInputBindingDescription>(), vector<vk::VkVertexInputAttributeDescription>(), bindings, vk::VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST, 1u, &pushConstantRange, m_resources);
6685 const deUint32 descriptorCount = (deUint32)(divRoundUp(m_bufferSize, (vk::VkDeviceSize)m_maxStorageTexelCount * 4));
6686 const vk::VkDescriptorPoolSize poolSizes =
6688 vk::VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER,
6691 const vk::VkDescriptorPoolCreateInfo createInfo =
6693 vk::VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
6695 vk::VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT,
6702 m_descriptorPool = vk::createDescriptorPool(vkd, device, &createInfo);
6703 m_descriptorSets.resize(descriptorCount, (vk::VkDescriptorSet)0);
6704 m_bufferViews.resize(descriptorCount, (vk::VkBufferView)0);
6707 for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
6709 const deUint32 count = (deUint32)(m_bufferSize < (descriptorSetNdx + 1) * m_maxStorageTexelCount * 4
6710 ? m_bufferSize - descriptorSetNdx * m_maxStorageTexelCount * 4
6711 : m_maxStorageTexelCount * 4) / 4;
6712 const vk::VkDescriptorSetLayout layout = *m_resources.descriptorSetLayout;
6713 const vk::VkDescriptorSetAllocateInfo allocateInfo =
6715 vk::VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
6723 m_descriptorSets[descriptorSetNdx] = vk::allocateDescriptorSet(vkd, device, &allocateInfo).disown();
6726 const vk::VkBufferViewCreateInfo createInfo =
6728 vk::VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO,
6732 context.getBuffer(),
6733 vk::VK_FORMAT_R32_UINT,
6734 descriptorSetNdx * m_maxStorageTexelCount * 4,
6738 VK_CHECK(vkd.createBufferView(device, &createInfo, DE_NULL, &m_bufferViews[descriptorSetNdx]));
6742 const vk::VkWriteDescriptorSet write =
6744 vk::VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
6746 m_descriptorSets[descriptorSetNdx],
6750 vk::VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER,
6753 &m_bufferViews[descriptorSetNdx]
6756 vkd.updateDescriptorSets(device, 1u, &write, 0u, DE_NULL);
6761 void RenderFragmentStorageTexelBuffer::submit (SubmitContext& context)
6763 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
6764 const vk::VkCommandBuffer commandBuffer = context.getCommandBuffer();
6766 vkd.cmdBindPipeline(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipeline);
6768 for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
6772 const deUint32 callId;
6773 const deUint32 valuesPerPixel;
6774 const deUint32 maxStorageTexelCount;
6775 const deUint32 width;
6778 (deUint32)descriptorSetNdx,
6779 (deUint32)divRoundUp<size_t>(m_descriptorSets.size() * de::min<size_t>(m_maxStorageTexelCount, (size_t)m_bufferSize / 4), m_targetWidth * m_targetHeight),
6780 m_maxStorageTexelCount,
6781 (deUint32)(m_bufferSize < (descriptorSetNdx + 1u) * m_maxStorageTexelCount * 4u
6782 ? m_bufferSize - descriptorSetNdx * m_maxStorageTexelCount * 4u
6783 : m_maxStorageTexelCount * 4u) / 4u
6786 vkd.cmdBindDescriptorSets(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipelineLayout, 0u, 1u, &m_descriptorSets[descriptorSetNdx], 0u, DE_NULL);
6787 vkd.cmdPushConstants(commandBuffer, *m_resources.pipelineLayout, vk::VK_SHADER_STAGE_FRAGMENT_BIT, 0u, (deUint32)sizeof(callParams), &callParams);
6788 vkd.cmdDraw(commandBuffer, 6, 1, 0, 0);
6792 void RenderFragmentStorageTexelBuffer::verify (VerifyRenderPassContext& context, size_t)
6794 const deUint32 valuesPerPixel = (deUint32)divRoundUp<size_t>(m_descriptorSets.size() * de::min<size_t>(m_maxStorageTexelCount, (size_t)m_bufferSize / 4), m_targetWidth * m_targetHeight);
6796 for (int y = 0; y < context.getReferenceTarget().getSize().y(); y++)
6797 for (int x = 0; x < context.getReferenceTarget().getSize().x(); x++)
6799 const size_t firstDescriptorSetNdx = de::min<size_t>((y * 256u + x) / (m_maxStorageTexelCount / valuesPerPixel), m_descriptorSets.size() - 1);
6801 for (size_t descriptorSetNdx = firstDescriptorSetNdx; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
6803 const size_t offset = descriptorSetNdx * m_maxStorageTexelCount * 4;
6804 const deUint32 callId = (deUint32)descriptorSetNdx;
6806 const deUint32 id = (deUint32)y * 256u + (deUint32)x;
6807 const deUint32 count = (deUint32)(m_bufferSize < (descriptorSetNdx + 1) * m_maxStorageTexelCount * 4
6808 ? m_bufferSize - descriptorSetNdx * m_maxStorageTexelCount * 4
6809 : m_maxStorageTexelCount * 4) / 4;
6811 if (y * 256u + x < callId * (m_maxStorageTexelCount / valuesPerPixel))
6815 deUint32 value = id;
6817 for (deUint32 i = 0; i < valuesPerPixel; i++)
6819 value = ((deUint32)context.getReference().get( offset + (value % count) * 4 + 0))
6820 | (((deUint32)context.getReference().get(offset + (value % count) * 4 + 1)) << 8u)
6821 | (((deUint32)context.getReference().get(offset + (value % count) * 4 + 2)) << 16u)
6822 | (((deUint32)context.getReference().get(offset + (value % count) * 4 + 3)) << 24u);
6825 const UVec4 vec ((value >> 0u) & 0xFFu,
6826 (value >> 8u) & 0xFFu,
6827 (value >> 16u) & 0xFFu,
6828 (value >> 24u) & 0xFFu);
6830 context.getReferenceTarget().getAccess().setPixel(vec.asFloat() / Vec4(255.0f), x, y);
6836 class RenderFragmentStorageImage : public RenderPassCommand
6839 RenderFragmentStorageImage (void) {}
6840 ~RenderFragmentStorageImage (void);
6842 const char* getName (void) const { return "RenderFragmentStorageImage"; }
6843 void logPrepare (TestLog&, size_t) const;
6844 void logSubmit (TestLog&, size_t) const;
6845 void prepare (PrepareRenderPassContext&);
6846 void submit (SubmitContext& context);
6847 void verify (VerifyRenderPassContext&, size_t);
6850 PipelineResources m_resources;
6851 vk::Move<vk::VkDescriptorPool> m_descriptorPool;
6852 vk::Move<vk::VkDescriptorSet> m_descriptorSet;
6853 vk::Move<vk::VkImageView> m_imageView;
6856 RenderFragmentStorageImage::~RenderFragmentStorageImage (void)
6860 void RenderFragmentStorageImage::logPrepare (TestLog& log, size_t commandIndex) const
6862 log << TestLog::Message << commandIndex << ":" << getName() << " Create pipeline for render storage image." << TestLog::EndMessage;
6865 void RenderFragmentStorageImage::logSubmit (TestLog& log, size_t commandIndex) const
6867 log << TestLog::Message << commandIndex << ":" << getName() << " Render using storage image." << TestLog::EndMessage;
6870 void RenderFragmentStorageImage::prepare (PrepareRenderPassContext& context)
6872 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
6873 const vk::VkDevice device = context.getContext().getDevice();
6874 const vk::VkRenderPass renderPass = context.getRenderPass();
6875 const deUint32 subpass = 0;
6876 const vk::Unique<vk::VkShaderModule> vertexShaderModule (vk::createShaderModule(vkd, device, context.getBinaryCollection().get("render-quad.vert"), 0));
6877 const vk::Unique<vk::VkShaderModule> fragmentShaderModule (vk::createShaderModule(vkd, device, context.getBinaryCollection().get("storage-image.frag"), 0));
6878 vector<vk::VkDescriptorSetLayoutBinding> bindings;
6881 const vk::VkDescriptorSetLayoutBinding binding =
6884 vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE,
6886 vk::VK_SHADER_STAGE_FRAGMENT_BIT,
6890 bindings.push_back(binding);
6893 createPipelineWithResources(vkd, device, renderPass, subpass, *vertexShaderModule, *fragmentShaderModule, context.getTargetWidth(), context.getTargetHeight(),
6894 vector<vk::VkVertexInputBindingDescription>(), vector<vk::VkVertexInputAttributeDescription>(), bindings, vk::VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST, 0u, DE_NULL, m_resources);
6897 const vk::VkDescriptorPoolSize poolSizes =
6899 vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE,
6902 const vk::VkDescriptorPoolCreateInfo createInfo =
6904 vk::VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
6906 vk::VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT,
6913 m_descriptorPool = vk::createDescriptorPool(vkd, device, &createInfo);
6917 const vk::VkDescriptorSetLayout layout = *m_resources.descriptorSetLayout;
6918 const vk::VkDescriptorSetAllocateInfo allocateInfo =
6920 vk::VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
6928 m_descriptorSet = vk::allocateDescriptorSet(vkd, device, &allocateInfo);
6931 const vk::VkImageViewCreateInfo createInfo =
6933 vk::VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
6938 vk::VK_IMAGE_VIEW_TYPE_2D,
6939 vk::VK_FORMAT_R8G8B8A8_UNORM,
6940 vk::makeComponentMappingRGBA(),
6942 vk::VK_IMAGE_ASPECT_COLOR_BIT,
6950 m_imageView = vk::createImageView(vkd, device, &createInfo);
6954 const vk::VkDescriptorImageInfo imageInfo =
6958 context.getImageLayout()
6960 const vk::VkWriteDescriptorSet write =
6962 vk::VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
6968 vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE,
6974 vkd.updateDescriptorSets(device, 1u, &write, 0u, DE_NULL);
6979 void RenderFragmentStorageImage::submit (SubmitContext& context)
6981 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
6982 const vk::VkCommandBuffer commandBuffer = context.getCommandBuffer();
6984 vkd.cmdBindPipeline(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipeline);
6986 vkd.cmdBindDescriptorSets(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipelineLayout, 0u, 1u, &(*m_descriptorSet), 0u, DE_NULL);
6987 vkd.cmdDraw(commandBuffer, 6, 1, 0, 0);
6990 void RenderFragmentStorageImage::verify (VerifyRenderPassContext& context, size_t)
6992 const UVec2 size = UVec2(context.getReferenceImage().getWidth(), context.getReferenceImage().getHeight());
6993 const deUint32 valuesPerPixel = de::max<deUint32>(1u, (size.x() * size.y()) / (256u * 256u));
6995 for (int y = 0; y < context.getReferenceTarget().getSize().y(); y++)
6996 for (int x = 0; x < context.getReferenceTarget().getSize().x(); x++)
6998 UVec4 value = UVec4(x, y, 0u, 0u);
7000 for (deUint32 i = 0; i < valuesPerPixel; i++)
7002 const UVec2 pos = UVec2(value.z() * 256u + (value.x() ^ value.z()), value.w() * 256u + (value.y() ^ value.w()));
7003 const Vec4 floatValue = context.getReferenceImage().getAccess().getPixel(pos.x() % size.x(), pos.y() % size.y());
7005 value = UVec4((deUint32)(floatValue.x() * 255.0f),
7006 (deUint32)(floatValue.y() * 255.0f),
7007 (deUint32)(floatValue.z() * 255.0f),
7008 (deUint32)(floatValue.w() * 255.0f));
7011 context.getReferenceTarget().getAccess().setPixel(value.asFloat() / Vec4(255.0f), x, y);
7015 class RenderFragmentSampledImage : public RenderPassCommand
7018 RenderFragmentSampledImage (void) {}
7019 ~RenderFragmentSampledImage (void);
7021 const char* getName (void) const { return "RenderFragmentSampledImage"; }
7022 void logPrepare (TestLog&, size_t) const;
7023 void logSubmit (TestLog&, size_t) const;
7024 void prepare (PrepareRenderPassContext&);
7025 void submit (SubmitContext& context);
7026 void verify (VerifyRenderPassContext&, size_t);
7029 PipelineResources m_resources;
7030 vk::Move<vk::VkDescriptorPool> m_descriptorPool;
7031 vk::Move<vk::VkDescriptorSet> m_descriptorSet;
7032 vk::Move<vk::VkImageView> m_imageView;
7033 vk::Move<vk::VkSampler> m_sampler;
7036 RenderFragmentSampledImage::~RenderFragmentSampledImage (void)
7040 void RenderFragmentSampledImage::logPrepare (TestLog& log, size_t commandIndex) const
7042 log << TestLog::Message << commandIndex << ":" << getName() << " Create pipeline for render storage image." << TestLog::EndMessage;
7045 void RenderFragmentSampledImage::logSubmit (TestLog& log, size_t commandIndex) const
7047 log << TestLog::Message << commandIndex << ":" << getName() << " Render using storage image." << TestLog::EndMessage;
7050 void RenderFragmentSampledImage::prepare (PrepareRenderPassContext& context)
7052 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
7053 const vk::VkDevice device = context.getContext().getDevice();
7054 const vk::VkRenderPass renderPass = context.getRenderPass();
7055 const deUint32 subpass = 0;
7056 const vk::Unique<vk::VkShaderModule> vertexShaderModule (vk::createShaderModule(vkd, device, context.getBinaryCollection().get("render-quad.vert"), 0));
7057 const vk::Unique<vk::VkShaderModule> fragmentShaderModule (vk::createShaderModule(vkd, device, context.getBinaryCollection().get("sampled-image.frag"), 0));
7058 vector<vk::VkDescriptorSetLayoutBinding> bindings;
7061 const vk::VkDescriptorSetLayoutBinding binding =
7064 vk::VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
7066 vk::VK_SHADER_STAGE_FRAGMENT_BIT,
7070 bindings.push_back(binding);
7073 createPipelineWithResources(vkd, device, renderPass, subpass, *vertexShaderModule, *fragmentShaderModule, context.getTargetWidth(), context.getTargetHeight(),
7074 vector<vk::VkVertexInputBindingDescription>(), vector<vk::VkVertexInputAttributeDescription>(), bindings, vk::VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST, 0u, DE_NULL, m_resources);
7077 const vk::VkDescriptorPoolSize poolSizes =
7079 vk::VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
7082 const vk::VkDescriptorPoolCreateInfo createInfo =
7084 vk::VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
7086 vk::VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT,
7093 m_descriptorPool = vk::createDescriptorPool(vkd, device, &createInfo);
7097 const vk::VkDescriptorSetLayout layout = *m_resources.descriptorSetLayout;
7098 const vk::VkDescriptorSetAllocateInfo allocateInfo =
7100 vk::VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
7108 m_descriptorSet = vk::allocateDescriptorSet(vkd, device, &allocateInfo);
7111 const vk::VkImageViewCreateInfo createInfo =
7113 vk::VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
7118 vk::VK_IMAGE_VIEW_TYPE_2D,
7119 vk::VK_FORMAT_R8G8B8A8_UNORM,
7120 vk::makeComponentMappingRGBA(),
7122 vk::VK_IMAGE_ASPECT_COLOR_BIT,
7130 m_imageView = vk::createImageView(vkd, device, &createInfo);
7134 const vk::VkSamplerCreateInfo createInfo =
7136 vk::VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO,
7140 vk::VK_FILTER_NEAREST,
7141 vk::VK_FILTER_NEAREST,
7143 vk::VK_SAMPLER_MIPMAP_MODE_LINEAR,
7144 vk::VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE,
7145 vk::VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE,
7146 vk::VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE,
7151 vk::VK_COMPARE_OP_ALWAYS,
7154 vk::VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK,
7158 m_sampler = vk::createSampler(vkd, device, &createInfo);
7162 const vk::VkDescriptorImageInfo imageInfo =
7166 context.getImageLayout()
7168 const vk::VkWriteDescriptorSet write =
7170 vk::VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
7176 vk::VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
7182 vkd.updateDescriptorSets(device, 1u, &write, 0u, DE_NULL);
7187 void RenderFragmentSampledImage::submit (SubmitContext& context)
7189 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
7190 const vk::VkCommandBuffer commandBuffer = context.getCommandBuffer();
7192 vkd.cmdBindPipeline(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipeline);
7194 vkd.cmdBindDescriptorSets(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipelineLayout, 0u, 1u, &(*m_descriptorSet), 0u, DE_NULL);
7195 vkd.cmdDraw(commandBuffer, 6u, 1u, 0u, 0u);
7198 void RenderFragmentSampledImage::verify (VerifyRenderPassContext& context, size_t)
7200 const UVec2 size = UVec2(context.getReferenceImage().getWidth(), context.getReferenceImage().getHeight());
7201 const deUint32 valuesPerPixel = de::max<deUint32>(1u, (size.x() * size.y()) / (256u * 256u));
7203 for (int y = 0; y < context.getReferenceTarget().getSize().y(); y++)
7204 for (int x = 0; x < context.getReferenceTarget().getSize().x(); x++)
7206 UVec4 value = UVec4(x, y, 0u, 0u);
7208 for (deUint32 i = 0; i < valuesPerPixel; i++)
7210 const UVec2 pos = UVec2(value.z() * 256u + (value.x() ^ value.z()), value.w() * 256u + (value.y() ^ value.w()));
7211 const Vec4 floatValue = context.getReferenceImage().getAccess().getPixel(pos.x() % size.x(), pos.y() % size.y());
7213 value = UVec4((deUint32)(floatValue.x() * 255.0f),
7214 (deUint32)(floatValue.y() * 255.0f),
7215 (deUint32)(floatValue.z() * 255.0f),
7216 (deUint32)(floatValue.w() * 255.0f));
7220 context.getReferenceTarget().getAccess().setPixel(value.asFloat() / Vec4(255.0f), x, y);
7238 OP_BUFFER_BINDMEMORY,
7240 OP_QUEUE_WAIT_FOR_IDLE,
7241 OP_DEVICE_WAIT_FOR_IDLE,
7243 OP_COMMAND_BUFFER_BEGIN,
7244 OP_COMMAND_BUFFER_END,
7246 // Secondary, non render pass command buffers
7247 // Render pass secondary command buffers are not currently covered
7248 OP_SECONDARY_COMMAND_BUFFER_BEGIN,
7249 OP_SECONDARY_COMMAND_BUFFER_END,
7251 // Buffer transfer operations
7255 OP_BUFFER_COPY_TO_BUFFER,
7256 OP_BUFFER_COPY_FROM_BUFFER,
7258 OP_BUFFER_COPY_TO_IMAGE,
7259 OP_BUFFER_COPY_FROM_IMAGE,
7263 OP_IMAGE_BINDMEMORY,
7265 OP_IMAGE_TRANSITION_LAYOUT,
7267 OP_IMAGE_COPY_TO_BUFFER,
7268 OP_IMAGE_COPY_FROM_BUFFER,
7270 OP_IMAGE_COPY_TO_IMAGE,
7271 OP_IMAGE_COPY_FROM_IMAGE,
7273 OP_IMAGE_BLIT_TO_IMAGE,
7274 OP_IMAGE_BLIT_FROM_IMAGE,
7278 OP_PIPELINE_BARRIER_GLOBAL,
7279 OP_PIPELINE_BARRIER_BUFFER,
7280 OP_PIPELINE_BARRIER_IMAGE,
7282 // Renderpass operations
7283 OP_RENDERPASS_BEGIN,
7286 // Commands inside render pass
7287 OP_RENDER_VERTEX_BUFFER,
7288 OP_RENDER_INDEX_BUFFER,
7290 OP_RENDER_VERTEX_UNIFORM_BUFFER,
7291 OP_RENDER_FRAGMENT_UNIFORM_BUFFER,
7293 OP_RENDER_VERTEX_UNIFORM_TEXEL_BUFFER,
7294 OP_RENDER_FRAGMENT_UNIFORM_TEXEL_BUFFER,
7296 OP_RENDER_VERTEX_STORAGE_BUFFER,
7297 OP_RENDER_FRAGMENT_STORAGE_BUFFER,
7299 OP_RENDER_VERTEX_STORAGE_TEXEL_BUFFER,
7300 OP_RENDER_FRAGMENT_STORAGE_TEXEL_BUFFER,
7302 OP_RENDER_VERTEX_STORAGE_IMAGE,
7303 OP_RENDER_FRAGMENT_STORAGE_IMAGE,
7305 OP_RENDER_VERTEX_SAMPLED_IMAGE,
7306 OP_RENDER_FRAGMENT_SAMPLED_IMAGE,
7312 STAGE_COMMAND_BUFFER,
7313 STAGE_SECONDARY_COMMAND_BUFFER,
7318 vk::VkAccessFlags getWriteAccessFlags (void)
7320 return vk::VK_ACCESS_SHADER_WRITE_BIT
7321 | vk::VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT
7322 | vk::VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT
7323 | vk::VK_ACCESS_TRANSFER_WRITE_BIT
7324 | vk::VK_ACCESS_HOST_WRITE_BIT
7325 | vk::VK_ACCESS_MEMORY_WRITE_BIT;
7328 bool isWriteAccess (vk::VkAccessFlagBits access)
7330 return (getWriteAccessFlags() & access) != 0;
7336 CacheState (vk::VkPipelineStageFlags allowedStages, vk::VkAccessFlags allowedAccesses);
7338 bool isValid (vk::VkPipelineStageFlagBits stage,
7339 vk::VkAccessFlagBits access) const;
7341 void perform (vk::VkPipelineStageFlagBits stage,
7342 vk::VkAccessFlagBits access);
7344 void submitCommandBuffer (void);
7345 void waitForIdle (void);
7347 void getFullBarrier (vk::VkPipelineStageFlags& srcStages,
7348 vk::VkAccessFlags& srcAccesses,
7349 vk::VkPipelineStageFlags& dstStages,
7350 vk::VkAccessFlags& dstAccesses) const;
7352 void barrier (vk::VkPipelineStageFlags srcStages,
7353 vk::VkAccessFlags srcAccesses,
7354 vk::VkPipelineStageFlags dstStages,
7355 vk::VkAccessFlags dstAccesses);
7357 void imageLayoutBarrier (vk::VkPipelineStageFlags srcStages,
7358 vk::VkAccessFlags srcAccesses,
7359 vk::VkPipelineStageFlags dstStages,
7360 vk::VkAccessFlags dstAccesses);
7362 void checkImageLayoutBarrier (vk::VkPipelineStageFlags srcStages,
7363 vk::VkAccessFlags srcAccesses,
7364 vk::VkPipelineStageFlags dstStages,
7365 vk::VkAccessFlags dstAccesses);
7367 // Everything is clean and there is no need for barriers
7368 bool isClean (void) const;
7370 vk::VkPipelineStageFlags getAllowedStages (void) const { return m_allowedStages; }
7371 vk::VkAccessFlags getAllowedAcceses (void) const { return m_allowedAccesses; }
7373 // Limit which stages and accesses are used by the CacheState tracker
7374 const vk::VkPipelineStageFlags m_allowedStages;
7375 const vk::VkAccessFlags m_allowedAccesses;
7377 // [dstStage][srcStage][dstAccess] = srcAccesses
7378 // In stage dstStage write srcAccesses from srcStage are not yet available for dstAccess
7379 vk::VkAccessFlags m_unavailableWriteOperations[PIPELINESTAGE_LAST][PIPELINESTAGE_LAST][ACCESS_LAST];
7380 // Latest pipeline transition is not available in stage
7381 bool m_unavailableLayoutTransition[PIPELINESTAGE_LAST];
7382 // [dstStage] = dstAccesses
7383 // In stage dstStage ops with dstAccesses are not yet visible
7384 vk::VkAccessFlags m_invisibleOperations[PIPELINESTAGE_LAST];
7386 // [dstStage] = srcStage
7387 // Memory operation in srcStage have not completed before dstStage
7388 vk::VkPipelineStageFlags m_incompleteOperations[PIPELINESTAGE_LAST];
7391 CacheState::CacheState (vk::VkPipelineStageFlags allowedStages, vk::VkAccessFlags allowedAccesses)
7392 : m_allowedStages (allowedStages)
7393 , m_allowedAccesses (allowedAccesses)
7395 for (vk::VkPipelineStageFlags dstStage_ = 1; dstStage_ <= m_allowedStages; dstStage_ <<= 1)
7397 const PipelineStage dstStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)dstStage_);
7399 if ((dstStage_ & m_allowedStages) == 0)
7402 // All operations are initially visible
7403 m_invisibleOperations[dstStage] = 0;
7405 // There are no incomplete read operations initially
7406 m_incompleteOperations[dstStage] = 0;
7408 // There are no incomplete layout transitions
7409 m_unavailableLayoutTransition[dstStage] = false;
7411 for (vk::VkPipelineStageFlags srcStage_ = 1; srcStage_ <= m_allowedStages; srcStage_ <<= 1)
7413 const PipelineStage srcStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)srcStage_);
7415 if ((srcStage_ & m_allowedStages) == 0)
7418 // There are no write operations that are not yet available
7420 for (vk::VkAccessFlags dstAccess_ = 1; dstAccess_ <= m_allowedAccesses; dstAccess_ <<= 1)
7422 const Access dstAccess = accessFlagToAccess((vk::VkAccessFlagBits)dstAccess_);
7424 if ((dstAccess_ & m_allowedAccesses) == 0)
7427 m_unavailableWriteOperations[dstStage][srcStage][dstAccess] = 0;
7433 bool CacheState::isValid (vk::VkPipelineStageFlagBits stage,
7434 vk::VkAccessFlagBits access) const
7436 DE_ASSERT((access & (~m_allowedAccesses)) == 0);
7437 DE_ASSERT((stage & (~m_allowedStages)) == 0);
7439 const PipelineStage dstStage = pipelineStageFlagToPipelineStage(stage);
7441 // Previous operations are not visible to access on stage
7442 if (m_unavailableLayoutTransition[dstStage] || (m_invisibleOperations[dstStage] & access) != 0)
7445 if (isWriteAccess(access))
7447 // Memory operations from other stages have not completed before
7449 if (m_incompleteOperations[dstStage] != 0)
7456 void CacheState::perform (vk::VkPipelineStageFlagBits stage,
7457 vk::VkAccessFlagBits access)
7459 DE_ASSERT((access & (~m_allowedAccesses)) == 0);
7460 DE_ASSERT((stage & (~m_allowedStages)) == 0);
7462 const PipelineStage srcStage = pipelineStageFlagToPipelineStage(stage);
7464 for (vk::VkPipelineStageFlags dstStage_ = 1; dstStage_ <= m_allowedStages; dstStage_ <<= 1)
7466 const PipelineStage dstStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)dstStage_);
7468 if ((dstStage_ & m_allowedStages) == 0)
7471 // Mark stage as incomplete for all stages
7472 m_incompleteOperations[dstStage] |= stage;
7474 if (isWriteAccess(access))
7476 // Mark all accesses from all stages invisible
7477 m_invisibleOperations[dstStage] |= m_allowedAccesses;
7479 // Mark write access from srcStage unavailable to all stages for all accesses
7480 for (vk::VkAccessFlags dstAccess_ = 1; dstAccess_ <= m_allowedAccesses; dstAccess_ <<= 1)
7482 const Access dstAccess = accessFlagToAccess((vk::VkAccessFlagBits)dstAccess_);
7484 if ((dstAccess_ & m_allowedAccesses) == 0)
7487 m_unavailableWriteOperations[dstStage][srcStage][dstAccess] |= access;
7493 void CacheState::submitCommandBuffer (void)
7495 // Flush all host writes and reads
7496 barrier(m_allowedStages & vk::VK_PIPELINE_STAGE_HOST_BIT,
7497 m_allowedAccesses & (vk::VK_ACCESS_HOST_READ_BIT | vk::VK_ACCESS_HOST_WRITE_BIT),
7502 void CacheState::waitForIdle (void)
7504 // Make all writes available
7505 barrier(m_allowedStages,
7506 m_allowedAccesses & getWriteAccessFlags(),
7510 // Make all writes visible on device side
7511 barrier(m_allowedStages,
7513 m_allowedStages & (~vk::VK_PIPELINE_STAGE_HOST_BIT),
7517 void CacheState::getFullBarrier (vk::VkPipelineStageFlags& srcStages,
7518 vk::VkAccessFlags& srcAccesses,
7519 vk::VkPipelineStageFlags& dstStages,
7520 vk::VkAccessFlags& dstAccesses) const
7527 for (vk::VkPipelineStageFlags dstStage_ = 1; dstStage_ <= m_allowedStages; dstStage_ <<= 1)
7529 const PipelineStage dstStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)dstStage_);
7531 if ((dstStage_ & m_allowedStages) == 0)
7534 // Make sure all previous operation are complete in all stages
7535 if (m_incompleteOperations[dstStage])
7537 dstStages |= dstStage_;
7538 srcStages |= m_incompleteOperations[dstStage];
7541 // Make sure all read operations are visible in dstStage
7542 if (m_invisibleOperations[dstStage])
7544 dstStages |= dstStage_;
7545 dstAccesses |= m_invisibleOperations[dstStage];
7548 // Make sure all write operations from all stages are available
7549 for (vk::VkPipelineStageFlags srcStage_ = 1; srcStage_ <= m_allowedStages; srcStage_ <<= 1)
7551 const PipelineStage srcStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)srcStage_);
7553 if ((srcStage_ & m_allowedStages) == 0)
7556 for (vk::VkAccessFlags dstAccess_ = 1; dstAccess_ <= m_allowedAccesses; dstAccess_ <<= 1)
7558 const Access dstAccess = accessFlagToAccess((vk::VkAccessFlagBits)dstAccess_);
7560 if ((dstAccess_ & m_allowedAccesses) == 0)
7563 if (m_unavailableWriteOperations[dstStage][srcStage][dstAccess])
7565 dstStages |= dstStage_;
7566 srcStages |= dstStage_;
7567 srcAccesses |= m_unavailableWriteOperations[dstStage][srcStage][dstAccess];
7571 if (m_unavailableLayoutTransition[dstStage] && !m_unavailableLayoutTransition[srcStage])
7573 // Add dependency between srcStage and dstStage if layout transition has not completed in dstStage,
7574 // but has completed in srcStage.
7575 dstStages |= dstStage_;
7576 srcStages |= dstStage_;
7581 DE_ASSERT((srcStages & (~m_allowedStages)) == 0);
7582 DE_ASSERT((srcAccesses & (~m_allowedAccesses)) == 0);
7583 DE_ASSERT((dstStages & (~m_allowedStages)) == 0);
7584 DE_ASSERT((dstAccesses & (~m_allowedAccesses)) == 0);
7587 void CacheState::checkImageLayoutBarrier (vk::VkPipelineStageFlags srcStages,
7588 vk::VkAccessFlags srcAccesses,
7589 vk::VkPipelineStageFlags dstStages,
7590 vk::VkAccessFlags dstAccesses)
7592 DE_ASSERT((srcStages & (~m_allowedStages)) == 0);
7593 DE_ASSERT((srcAccesses & (~m_allowedAccesses)) == 0);
7594 DE_ASSERT((dstStages & (~m_allowedStages)) == 0);
7595 DE_ASSERT((dstAccesses & (~m_allowedAccesses)) == 0);
7597 DE_UNREF(srcStages);
7598 DE_UNREF(srcAccesses);
7600 DE_UNREF(dstStages);
7601 DE_UNREF(dstAccesses);
7603 #if defined(DE_DEBUG)
7604 // Check that all stages have completed before srcStages or are in srcStages.
7606 vk::VkPipelineStageFlags completedStages = srcStages;
7608 for (vk::VkPipelineStageFlags srcStage_ = 1; srcStage_ <= srcStages; srcStage_ <<= 1)
7610 const PipelineStage srcStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)srcStage_);
7612 if ((srcStage_ & srcStages) == 0)
7615 completedStages |= (~m_incompleteOperations[srcStage]);
7618 DE_ASSERT((completedStages & m_allowedStages) == m_allowedStages);
7621 // Check that any write is available at least in one stage. Since all stages are complete even single flush is enough.
7622 if ((getWriteAccessFlags() & m_allowedAccesses) != 0 && (srcAccesses & getWriteAccessFlags()) == 0)
7624 bool anyWriteAvailable = false;
7626 for (vk::VkPipelineStageFlags dstStage_ = 1; dstStage_ <= m_allowedStages; dstStage_ <<= 1)
7628 const PipelineStage dstStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)dstStage_);
7630 if ((dstStage_ & m_allowedStages) == 0)
7633 for (vk::VkPipelineStageFlags srcStage_ = 1; srcStage_ <= m_allowedStages; srcStage_ <<= 1)
7635 const PipelineStage srcStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)srcStage_);
7637 if ((srcStage_ & m_allowedStages) == 0)
7640 for (vk::VkAccessFlags dstAccess_ = 1; dstAccess_ <= m_allowedAccesses; dstAccess_ <<= 1)
7642 const Access dstAccess = accessFlagToAccess((vk::VkAccessFlagBits)dstAccess_);
7644 if ((dstAccess_ & m_allowedAccesses) == 0)
7647 if (m_unavailableWriteOperations[dstStage][srcStage][dstAccess] != (getWriteAccessFlags() & m_allowedAccesses))
7649 anyWriteAvailable = true;
7656 DE_ASSERT(anyWriteAvailable);
7661 void CacheState::imageLayoutBarrier (vk::VkPipelineStageFlags srcStages,
7662 vk::VkAccessFlags srcAccesses,
7663 vk::VkPipelineStageFlags dstStages,
7664 vk::VkAccessFlags dstAccesses)
7666 checkImageLayoutBarrier(srcStages, srcAccesses, dstStages, dstAccesses);
7668 for (vk::VkPipelineStageFlags dstStage_ = 1; dstStage_ <= m_allowedStages; dstStage_ <<= 1)
7670 const PipelineStage dstStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)dstStage_);
7672 if ((dstStage_ & m_allowedStages) == 0)
7675 // All stages are incomplete after the barrier except each dstStage in it self.
7676 m_incompleteOperations[dstStage] = m_allowedStages & (~dstStage_);
7678 // All memory operations are invisible unless they are listed in dstAccess
7679 m_invisibleOperations[dstStage] = m_allowedAccesses & (~dstAccesses);
7681 // Layout transition is unavailable in stage unless it was listed in dstStages
7682 m_unavailableLayoutTransition[dstStage]= (dstStage_ & dstStages) == 0;
7684 for (vk::VkPipelineStageFlags srcStage_ = 1; srcStage_ <= m_allowedStages; srcStage_ <<= 1)
7686 const PipelineStage srcStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)srcStage_);
7688 if ((srcStage_ & m_allowedStages) == 0)
7691 // All write operations are available after layout transition
7692 for (vk::VkAccessFlags dstAccess_ = 1; dstAccess_ <= m_allowedAccesses; dstAccess_ <<= 1)
7694 const Access dstAccess = accessFlagToAccess((vk::VkAccessFlagBits)dstAccess_);
7696 if ((dstAccess_ & m_allowedAccesses) == 0)
7699 m_unavailableWriteOperations[dstStage][srcStage][dstAccess] = 0;
7705 void CacheState::barrier (vk::VkPipelineStageFlags srcStages,
7706 vk::VkAccessFlags srcAccesses,
7707 vk::VkPipelineStageFlags dstStages,
7708 vk::VkAccessFlags dstAccesses)
7710 DE_ASSERT((srcStages & (~m_allowedStages)) == 0);
7711 DE_ASSERT((srcAccesses & (~m_allowedAccesses)) == 0);
7712 DE_ASSERT((dstStages & (~m_allowedStages)) == 0);
7713 DE_ASSERT((dstAccesses & (~m_allowedAccesses)) == 0);
7717 vk::VkPipelineStageFlags oldIncompleteOperations[PIPELINESTAGE_LAST];
7718 vk::VkAccessFlags oldUnavailableWriteOperations[PIPELINESTAGE_LAST][PIPELINESTAGE_LAST][ACCESS_LAST];
7719 bool oldUnavailableLayoutTransition[PIPELINESTAGE_LAST];
7721 deMemcpy(oldIncompleteOperations, m_incompleteOperations, sizeof(oldIncompleteOperations));
7722 deMemcpy(oldUnavailableWriteOperations, m_unavailableWriteOperations, sizeof(oldUnavailableWriteOperations));
7723 deMemcpy(oldUnavailableLayoutTransition, m_unavailableLayoutTransition, sizeof(oldUnavailableLayoutTransition));
7725 for (vk::VkPipelineStageFlags srcStage_ = 1; srcStage_ <= srcStages; srcStage_ <<= 1)
7727 const PipelineStage srcStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)srcStage_);
7729 if ((srcStage_ & srcStages) == 0)
7732 for (vk::VkPipelineStageFlags dstStage_ = 1; dstStage_ <= dstStages; dstStage_ <<= 1)
7734 const PipelineStage dstStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)dstStage_);
7736 if ((dstStage_ & dstStages) == 0)
7739 // Stages that have completed before srcStage have also completed before dstStage
7740 m_incompleteOperations[dstStage] &= oldIncompleteOperations[srcStage];
7742 // Image layout transition in srcStage are now available in dstStage
7743 m_unavailableLayoutTransition[dstStage] &= oldUnavailableLayoutTransition[srcStage];
7745 for (vk::VkPipelineStageFlags sharedStage_ = 1; sharedStage_ <= m_allowedStages; sharedStage_ <<= 1)
7747 const PipelineStage sharedStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)sharedStage_);
7749 if ((sharedStage_ & m_allowedStages) == 0)
7752 // Writes that are available in srcStage are also available in dstStage
7753 for (vk::VkAccessFlags sharedAccess_ = 1; sharedAccess_ <= m_allowedAccesses; sharedAccess_ <<= 1)
7755 const Access sharedAccess = accessFlagToAccess((vk::VkAccessFlagBits)sharedAccess_);
7757 if ((sharedAccess_ & m_allowedAccesses) == 0)
7760 m_unavailableWriteOperations[dstStage][sharedStage][sharedAccess] &= oldUnavailableWriteOperations[srcStage][sharedStage][sharedAccess];
7768 for (vk::VkPipelineStageFlags dstStage_ = 1; dstStage_ <= dstStages; dstStage_ <<= 1)
7770 const PipelineStage dstStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)dstStage_);
7771 bool allWritesAvailable = true;
7773 if ((dstStage_ & dstStages) == 0)
7776 // Operations in srcStages have completed before any stage in dstStages
7777 m_incompleteOperations[dstStage] &= ~srcStages;
7779 for (vk::VkPipelineStageFlags srcStage_ = 1; srcStage_ <= m_allowedStages; srcStage_ <<= 1)
7781 const PipelineStage srcStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)srcStage_);
7783 if ((srcStage_ & m_allowedStages) == 0)
7786 // Make srcAccesses from srcStage available in dstStage for dstAccess
7787 for (vk::VkAccessFlags dstAccess_ = 1; dstAccess_ <= m_allowedAccesses; dstAccess_ <<= 1)
7789 const Access dstAccess = accessFlagToAccess((vk::VkAccessFlagBits)dstAccess_);
7791 if ((dstAccess_ & m_allowedAccesses) == 0)
7794 if (((srcStage_ & srcStages) != 0) && ((dstAccess_ & dstAccesses) != 0))
7795 m_unavailableWriteOperations[dstStage][srcStage][dstAccess] &= ~srcAccesses;
7797 if (m_unavailableWriteOperations[dstStage][srcStage][dstAccess] != 0)
7798 allWritesAvailable = false;
7802 // If all writes are available in dstStage make dstAccesses also visible
7803 if (allWritesAvailable)
7804 m_invisibleOperations[dstStage] &= ~dstAccesses;
7808 bool CacheState::isClean (void) const
7810 for (vk::VkPipelineStageFlags dstStage_ = 1; dstStage_ <= m_allowedStages; dstStage_ <<= 1)
7812 const PipelineStage dstStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)dstStage_);
7814 if ((dstStage_ & m_allowedStages) == 0)
7817 // Some operations are not visible to some stages
7818 if (m_invisibleOperations[dstStage] != 0)
7821 // There are operation that have not completed yet
7822 if (m_incompleteOperations[dstStage] != 0)
7825 // Layout transition has not completed yet
7826 if (m_unavailableLayoutTransition[dstStage])
7829 for (vk::VkPipelineStageFlags srcStage_ = 1; srcStage_ <= m_allowedStages; srcStage_ <<= 1)
7831 const PipelineStage srcStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)srcStage_);
7833 if ((srcStage_ & m_allowedStages) == 0)
7836 for (vk::VkAccessFlags dstAccess_ = 1; dstAccess_ <= m_allowedAccesses; dstAccess_ <<= 1)
7838 const Access dstAccess = accessFlagToAccess((vk::VkAccessFlagBits)dstAccess_);
7840 if ((dstAccess_ & m_allowedAccesses) == 0)
7843 // Some write operations are not available yet
7844 if (m_unavailableWriteOperations[dstStage][srcStage][dstAccess] != 0)
7853 bool layoutSupportedByUsage (Usage usage, vk::VkImageLayout layout)
7857 case vk::VK_IMAGE_LAYOUT_GENERAL:
7860 case vk::VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL:
7861 return (usage & USAGE_COLOR_ATTACHMENT) != 0;
7863 case vk::VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL:
7864 return (usage & USAGE_DEPTH_STENCIL_ATTACHMENT) != 0;
7866 case vk::VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL:
7867 return (usage & USAGE_DEPTH_STENCIL_ATTACHMENT) != 0;
7869 case vk::VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL:
7870 // \todo [2016-03-09 mika] Should include input attachment
7871 return (usage & USAGE_SAMPLED_IMAGE) != 0;
7873 case vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL:
7874 return (usage & USAGE_TRANSFER_SRC) != 0;
7876 case vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL:
7877 return (usage & USAGE_TRANSFER_DST) != 0;
7879 case vk::VK_IMAGE_LAYOUT_PREINITIALIZED:
7883 DE_FATAL("Unknown layout");
7888 size_t getNumberOfSupportedLayouts (Usage usage)
7890 const vk::VkImageLayout layouts[] =
7892 vk::VK_IMAGE_LAYOUT_GENERAL,
7893 vk::VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
7894 vk::VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL,
7895 vk::VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL,
7896 vk::VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
7897 vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
7898 vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
7900 size_t supportedLayoutCount = 0;
7902 for (size_t layoutNdx = 0; layoutNdx < DE_LENGTH_OF_ARRAY(layouts); layoutNdx++)
7904 const vk::VkImageLayout layout = layouts[layoutNdx];
7906 if (layoutSupportedByUsage(usage, layout))
7907 supportedLayoutCount++;
7910 return supportedLayoutCount;
7913 vk::VkImageLayout getRandomNextLayout (de::Random& rng,
7915 vk::VkImageLayout previousLayout)
7917 const vk::VkImageLayout layouts[] =
7919 vk::VK_IMAGE_LAYOUT_GENERAL,
7920 vk::VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
7921 vk::VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL,
7922 vk::VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL,
7923 vk::VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
7924 vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
7925 vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
7927 const size_t supportedLayoutCount = getNumberOfSupportedLayouts(usage);
7929 DE_ASSERT(supportedLayoutCount > 0);
7931 size_t nextLayoutNdx = ((size_t)rng.getUint64()) % (previousLayout == vk::VK_IMAGE_LAYOUT_UNDEFINED
7932 ? supportedLayoutCount
7933 : supportedLayoutCount - 1);
7935 for (size_t layoutNdx = 0; layoutNdx < DE_LENGTH_OF_ARRAY(layouts); layoutNdx++)
7937 const vk::VkImageLayout layout = layouts[layoutNdx];
7939 if (layoutSupportedByUsage(usage, layout) && layout != previousLayout)
7941 if (nextLayoutNdx == 0)
7948 DE_FATAL("Unreachable");
7949 return vk::VK_IMAGE_LAYOUT_UNDEFINED;
7954 State (Usage usage, deUint32 seed)
7955 : stage (STAGE_HOST)
7956 , cache (usageToStageFlags(usage), usageToAccessFlags(usage))
7959 , hostInvalidated (true)
7960 , hostFlushed (true)
7961 , memoryDefined (false)
7963 , hasBoundBufferMemory (false)
7965 , hasBoundImageMemory (false)
7966 , imageLayout (vk::VK_IMAGE_LAYOUT_UNDEFINED)
7967 , imageDefined (false)
7970 , commandBufferIsEmpty (true)
7971 , primaryCommandBufferIsEmpty (true)
7972 , renderPassIsEmpty (true)
7981 bool hostInvalidated;
7986 bool hasBoundBufferMemory;
7989 bool hasBoundImageMemory;
7990 vk::VkImageLayout imageLayout;
7996 bool commandBufferIsEmpty;
7998 // a copy of commandBufferIsEmpty value, when secondary command buffer is in use
7999 bool primaryCommandBufferIsEmpty;
8001 bool renderPassIsEmpty;
8004 void getAvailableOps (const State& state, bool supportsBuffers, bool supportsImages, Usage usage, vector<Op>& ops)
8006 if (state.stage == STAGE_HOST)
8008 if (usage & (USAGE_HOST_READ | USAGE_HOST_WRITE))
8010 // Host memory operations
8013 ops.push_back(OP_UNMAP);
8015 // Avoid flush and finish if they are not needed
8016 if (!state.hostFlushed)
8017 ops.push_back(OP_MAP_FLUSH);
8019 if (!state.hostInvalidated
8021 && ((usage & USAGE_HOST_READ) == 0
8022 || state.cache.isValid(vk::VK_PIPELINE_STAGE_HOST_BIT, vk::VK_ACCESS_HOST_READ_BIT))
8023 && ((usage & USAGE_HOST_WRITE) == 0
8024 || state.cache.isValid(vk::VK_PIPELINE_STAGE_HOST_BIT, vk::VK_ACCESS_HOST_WRITE_BIT)))
8026 ops.push_back(OP_MAP_INVALIDATE);
8029 if (usage & USAGE_HOST_READ
8030 && usage & USAGE_HOST_WRITE
8031 && state.memoryDefined
8032 && state.hostInvalidated
8034 && state.cache.isValid(vk::VK_PIPELINE_STAGE_HOST_BIT, vk::VK_ACCESS_HOST_WRITE_BIT)
8035 && state.cache.isValid(vk::VK_PIPELINE_STAGE_HOST_BIT, vk::VK_ACCESS_HOST_READ_BIT))
8037 ops.push_back(OP_MAP_MODIFY);
8040 if (usage & USAGE_HOST_READ
8041 && state.memoryDefined
8042 && state.hostInvalidated
8044 && state.cache.isValid(vk::VK_PIPELINE_STAGE_HOST_BIT, vk::VK_ACCESS_HOST_READ_BIT))
8046 ops.push_back(OP_MAP_READ);
8049 if (usage & USAGE_HOST_WRITE
8050 && state.hostInvalidated
8052 && state.cache.isValid(vk::VK_PIPELINE_STAGE_HOST_BIT, vk::VK_ACCESS_HOST_WRITE_BIT))
8054 ops.push_back(OP_MAP_WRITE);
8058 ops.push_back(OP_MAP);
8061 if (state.hasBoundBufferMemory && state.queueIdle)
8063 // \note Destroy only buffers after they have been bound
8064 ops.push_back(OP_BUFFER_DESTROY);
8068 if (state.hasBuffer)
8070 if (!state.hasBoundBufferMemory)
8071 ops.push_back(OP_BUFFER_BINDMEMORY);
8073 else if (!state.hasImage && supportsBuffers) // Avoid creating buffer if there is already image
8074 ops.push_back(OP_BUFFER_CREATE);
8077 if (state.hasBoundImageMemory && state.queueIdle)
8079 // \note Destroy only image after they have been bound
8080 ops.push_back(OP_IMAGE_DESTROY);
8086 if (!state.hasBoundImageMemory)
8087 ops.push_back(OP_IMAGE_BINDMEMORY);
8089 else if (!state.hasBuffer && supportsImages) // Avoid creating image if there is already buffer
8090 ops.push_back(OP_IMAGE_CREATE);
8093 // Host writes must be flushed before GPU commands and there must be
8094 // buffer or image for GPU commands
8095 if (state.hostFlushed
8096 && (state.memoryDefined || supportsDeviceBufferWrites(usage) || state.imageDefined || supportsDeviceImageWrites(usage))
8097 && (state.hasBoundBufferMemory || state.hasBoundImageMemory) // Avoid command buffers if there is no object to use
8098 && (usageToStageFlags(usage) & (~vk::VK_PIPELINE_STAGE_HOST_BIT)) != 0) // Don't start command buffer if there are no ways to use memory from gpu
8100 ops.push_back(OP_COMMAND_BUFFER_BEGIN);
8103 if (!state.deviceIdle)
8104 ops.push_back(OP_DEVICE_WAIT_FOR_IDLE);
8106 if (!state.queueIdle)
8107 ops.push_back(OP_QUEUE_WAIT_FOR_IDLE);
8109 else if (state.stage == STAGE_COMMAND_BUFFER)
8111 if (!state.cache.isClean())
8113 ops.push_back(OP_PIPELINE_BARRIER_GLOBAL);
8115 if (state.hasImage && (state.imageLayout != vk::VK_IMAGE_LAYOUT_UNDEFINED))
8116 ops.push_back(OP_PIPELINE_BARRIER_IMAGE);
8118 if (state.hasBuffer)
8119 ops.push_back(OP_PIPELINE_BARRIER_BUFFER);
8122 if (state.hasBoundBufferMemory)
8124 if (usage & USAGE_TRANSFER_DST
8125 && state.cache.isValid(vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_ACCESS_TRANSFER_WRITE_BIT))
8127 ops.push_back(OP_BUFFER_FILL);
8128 ops.push_back(OP_BUFFER_UPDATE);
8129 ops.push_back(OP_BUFFER_COPY_FROM_BUFFER);
8130 ops.push_back(OP_BUFFER_COPY_FROM_IMAGE);
8133 if (usage & USAGE_TRANSFER_SRC
8134 && state.memoryDefined
8135 && state.cache.isValid(vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_ACCESS_TRANSFER_READ_BIT))
8137 ops.push_back(OP_BUFFER_COPY_TO_BUFFER);
8138 ops.push_back(OP_BUFFER_COPY_TO_IMAGE);
8142 if (state.hasBoundImageMemory
8143 && (state.imageLayout == vk::VK_IMAGE_LAYOUT_UNDEFINED
8144 || getNumberOfSupportedLayouts(usage) > 1))
8146 ops.push_back(OP_IMAGE_TRANSITION_LAYOUT);
8149 if (usage & USAGE_TRANSFER_DST
8150 && (state.imageLayout == vk::VK_IMAGE_LAYOUT_GENERAL
8151 || state.imageLayout == vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL)
8152 && state.cache.isValid(vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_ACCESS_TRANSFER_WRITE_BIT))
8154 ops.push_back(OP_IMAGE_COPY_FROM_BUFFER);
8155 ops.push_back(OP_IMAGE_COPY_FROM_IMAGE);
8156 ops.push_back(OP_IMAGE_BLIT_FROM_IMAGE);
8159 if (usage & USAGE_TRANSFER_SRC
8160 && (state.imageLayout == vk::VK_IMAGE_LAYOUT_GENERAL
8161 || state.imageLayout == vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL)
8162 && state.imageDefined
8163 && state.cache.isValid(vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_ACCESS_TRANSFER_READ_BIT))
8165 ops.push_back(OP_IMAGE_COPY_TO_BUFFER);
8166 ops.push_back(OP_IMAGE_COPY_TO_IMAGE);
8167 ops.push_back(OP_IMAGE_BLIT_TO_IMAGE);
8172 // \todo [2016-03-09 mika] Add other usages?
8173 if ((state.memoryDefined
8174 && state.hasBoundBufferMemory
8175 && (((usage & USAGE_VERTEX_BUFFER)
8176 && state.cache.isValid(vk::VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, vk::VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT))
8177 || ((usage & USAGE_INDEX_BUFFER)
8178 && state.cache.isValid(vk::VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, vk::VK_ACCESS_INDEX_READ_BIT))
8179 || ((usage & USAGE_UNIFORM_BUFFER)
8180 && (state.cache.isValid(vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, vk::VK_ACCESS_UNIFORM_READ_BIT)
8181 || state.cache.isValid(vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, vk::VK_ACCESS_UNIFORM_READ_BIT)))
8182 || ((usage & USAGE_UNIFORM_TEXEL_BUFFER)
8183 && (state.cache.isValid(vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, vk::VK_ACCESS_UNIFORM_READ_BIT)
8184 || state.cache.isValid(vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, vk::VK_ACCESS_UNIFORM_READ_BIT)))
8185 || ((usage & USAGE_STORAGE_BUFFER)
8186 && (state.cache.isValid(vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT)
8187 || state.cache.isValid(vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT)))
8188 || ((usage & USAGE_STORAGE_TEXEL_BUFFER)
8189 && state.cache.isValid(vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT))))
8190 || (state.imageDefined
8191 && state.hasBoundImageMemory
8192 && (((usage & USAGE_STORAGE_IMAGE)
8193 && state.imageLayout == vk::VK_IMAGE_LAYOUT_GENERAL
8194 && (state.cache.isValid(vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT)
8195 || state.cache.isValid(vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT)))
8196 || ((usage & USAGE_SAMPLED_IMAGE)
8197 && (state.imageLayout == vk::VK_IMAGE_LAYOUT_GENERAL
8198 || state.imageLayout == vk::VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL)
8199 && (state.cache.isValid(vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT)
8200 || state.cache.isValid(vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT))))))
8202 ops.push_back(OP_RENDERPASS_BEGIN);
8205 ops.push_back(OP_SECONDARY_COMMAND_BUFFER_BEGIN);
8207 // \note This depends on previous operations and has to be always the
8208 // last command buffer operation check
8209 if (ops.empty() || !state.commandBufferIsEmpty)
8210 ops.push_back(OP_COMMAND_BUFFER_END);
8212 else if (state.stage == STAGE_SECONDARY_COMMAND_BUFFER)
8214 if (!state.cache.isClean())
8216 ops.push_back(OP_PIPELINE_BARRIER_GLOBAL);
8219 ops.push_back(OP_PIPELINE_BARRIER_IMAGE);
8221 if (state.hasBuffer)
8222 ops.push_back(OP_PIPELINE_BARRIER_BUFFER);
8225 if (state.hasBoundBufferMemory)
8227 if (usage & USAGE_TRANSFER_DST
8228 && state.cache.isValid(vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_ACCESS_TRANSFER_WRITE_BIT))
8230 ops.push_back(OP_BUFFER_FILL);
8231 ops.push_back(OP_BUFFER_UPDATE);
8232 ops.push_back(OP_BUFFER_COPY_FROM_BUFFER);
8233 ops.push_back(OP_BUFFER_COPY_FROM_IMAGE);
8236 if (usage & USAGE_TRANSFER_SRC
8237 && state.memoryDefined
8238 && state.cache.isValid(vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_ACCESS_TRANSFER_READ_BIT))
8240 ops.push_back(OP_BUFFER_COPY_TO_BUFFER);
8241 ops.push_back(OP_BUFFER_COPY_TO_IMAGE);
8245 if (state.hasBoundImageMemory
8246 && (state.imageLayout == vk::VK_IMAGE_LAYOUT_UNDEFINED
8247 || getNumberOfSupportedLayouts(usage) > 1))
8249 ops.push_back(OP_IMAGE_TRANSITION_LAYOUT);
8252 if (usage & USAGE_TRANSFER_DST
8253 && (state.imageLayout == vk::VK_IMAGE_LAYOUT_GENERAL
8254 || state.imageLayout == vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL)
8255 && state.cache.isValid(vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_ACCESS_TRANSFER_WRITE_BIT))
8257 ops.push_back(OP_IMAGE_COPY_FROM_BUFFER);
8258 ops.push_back(OP_IMAGE_COPY_FROM_IMAGE);
8259 ops.push_back(OP_IMAGE_BLIT_FROM_IMAGE);
8262 if (usage & USAGE_TRANSFER_SRC
8263 && (state.imageLayout == vk::VK_IMAGE_LAYOUT_GENERAL
8264 || state.imageLayout == vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL)
8265 && state.imageDefined
8266 && state.cache.isValid(vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_ACCESS_TRANSFER_READ_BIT))
8268 ops.push_back(OP_IMAGE_COPY_TO_BUFFER);
8269 ops.push_back(OP_IMAGE_COPY_TO_IMAGE);
8270 ops.push_back(OP_IMAGE_BLIT_TO_IMAGE);
8275 // \note This depends on previous operations and has to be always the
8276 // last command buffer operation check
8277 if (ops.empty() || !state.commandBufferIsEmpty)
8278 ops.push_back(OP_SECONDARY_COMMAND_BUFFER_END);
8280 else if (state.stage == STAGE_RENDER_PASS)
8282 if ((usage & USAGE_VERTEX_BUFFER) != 0
8283 && state.memoryDefined
8284 && state.hasBoundBufferMemory
8285 && state.cache.isValid(vk::VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, vk::VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT))
8287 ops.push_back(OP_RENDER_VERTEX_BUFFER);
8290 if ((usage & USAGE_INDEX_BUFFER) != 0
8291 && state.memoryDefined
8292 && state.hasBoundBufferMemory
8293 && state.cache.isValid(vk::VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, vk::VK_ACCESS_INDEX_READ_BIT))
8295 ops.push_back(OP_RENDER_INDEX_BUFFER);
8298 if ((usage & USAGE_UNIFORM_BUFFER) != 0
8299 && state.memoryDefined
8300 && state.hasBoundBufferMemory)
8302 if (state.cache.isValid(vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, vk::VK_ACCESS_UNIFORM_READ_BIT))
8303 ops.push_back(OP_RENDER_VERTEX_UNIFORM_BUFFER);
8305 if (state.cache.isValid(vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, vk::VK_ACCESS_UNIFORM_READ_BIT))
8306 ops.push_back(OP_RENDER_FRAGMENT_UNIFORM_BUFFER);
8309 if ((usage & USAGE_UNIFORM_TEXEL_BUFFER) != 0
8310 && state.memoryDefined
8311 && state.hasBoundBufferMemory)
8313 if (state.cache.isValid(vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, vk::VK_ACCESS_UNIFORM_READ_BIT))
8314 ops.push_back(OP_RENDER_VERTEX_UNIFORM_TEXEL_BUFFER);
8316 if (state.cache.isValid(vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, vk::VK_ACCESS_UNIFORM_READ_BIT))
8317 ops.push_back(OP_RENDER_FRAGMENT_UNIFORM_TEXEL_BUFFER);
8320 if ((usage & USAGE_STORAGE_BUFFER) != 0
8321 && state.memoryDefined
8322 && state.hasBoundBufferMemory)
8324 if (state.cache.isValid(vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT))
8325 ops.push_back(OP_RENDER_VERTEX_STORAGE_BUFFER);
8327 if (state.cache.isValid(vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT))
8328 ops.push_back(OP_RENDER_FRAGMENT_STORAGE_BUFFER);
8331 if ((usage & USAGE_STORAGE_TEXEL_BUFFER) != 0
8332 && state.memoryDefined
8333 && state.hasBoundBufferMemory)
8335 if (state.cache.isValid(vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT))
8336 ops.push_back(OP_RENDER_VERTEX_STORAGE_TEXEL_BUFFER);
8338 if (state.cache.isValid(vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT))
8339 ops.push_back(OP_RENDER_FRAGMENT_STORAGE_TEXEL_BUFFER);
8342 if ((usage & USAGE_STORAGE_IMAGE) != 0
8343 && state.imageDefined
8344 && state.hasBoundImageMemory
8345 && (state.imageLayout == vk::VK_IMAGE_LAYOUT_GENERAL))
8347 if (state.cache.isValid(vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT))
8348 ops.push_back(OP_RENDER_VERTEX_STORAGE_IMAGE);
8350 if (state.cache.isValid(vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT))
8351 ops.push_back(OP_RENDER_FRAGMENT_STORAGE_IMAGE);
8354 if ((usage & USAGE_SAMPLED_IMAGE) != 0
8355 && state.imageDefined
8356 && state.hasBoundImageMemory
8357 && (state.imageLayout == vk::VK_IMAGE_LAYOUT_GENERAL
8358 || state.imageLayout == vk::VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL))
8360 if (state.cache.isValid(vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT))
8361 ops.push_back(OP_RENDER_VERTEX_SAMPLED_IMAGE);
8363 if (state.cache.isValid(vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT))
8364 ops.push_back(OP_RENDER_FRAGMENT_SAMPLED_IMAGE);
8367 if (!state.renderPassIsEmpty)
8368 ops.push_back(OP_RENDERPASS_END);
8371 DE_FATAL("Unknown stage");
8374 void removeIllegalAccessFlags (vk::VkAccessFlags& accessflags, vk::VkPipelineStageFlags stageflags)
8376 if (!(stageflags & vk::VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT))
8377 accessflags &= ~vk::VK_ACCESS_INDIRECT_COMMAND_READ_BIT;
8379 if (!(stageflags & vk::VK_PIPELINE_STAGE_VERTEX_INPUT_BIT))
8380 accessflags &= ~vk::VK_ACCESS_INDEX_READ_BIT;
8382 if (!(stageflags & vk::VK_PIPELINE_STAGE_VERTEX_INPUT_BIT))
8383 accessflags &= ~vk::VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT;
8385 if (!(stageflags & (vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT |
8386 vk::VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT |
8387 vk::VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT |
8388 vk::VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT |
8389 vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT |
8390 vk::VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT)))
8391 accessflags &= ~vk::VK_ACCESS_UNIFORM_READ_BIT;
8393 if (!(stageflags & vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT))
8394 accessflags &= ~vk::VK_ACCESS_INPUT_ATTACHMENT_READ_BIT;
8396 if (!(stageflags & (vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT |
8397 vk::VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT |
8398 vk::VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT |
8399 vk::VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT |
8400 vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT |
8401 vk::VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT)))
8402 accessflags &= ~vk::VK_ACCESS_SHADER_READ_BIT;
8404 if (!(stageflags & (vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT |
8405 vk::VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT |
8406 vk::VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT |
8407 vk::VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT |
8408 vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT |
8409 vk::VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT)))
8410 accessflags &= ~vk::VK_ACCESS_SHADER_WRITE_BIT;
8412 if (!(stageflags & vk::VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT))
8413 accessflags &= ~vk::VK_ACCESS_COLOR_ATTACHMENT_READ_BIT;
8415 if (!(stageflags & vk::VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT))
8416 accessflags &= ~vk::VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
8418 if (!(stageflags & (vk::VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT |
8419 vk::VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT)))
8420 accessflags &= ~vk::VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT;
8422 if (!(stageflags & (vk::VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT |
8423 vk::VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT)))
8424 accessflags &= ~vk::VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
8426 if (!(stageflags & vk::VK_PIPELINE_STAGE_TRANSFER_BIT))
8427 accessflags &= ~vk::VK_ACCESS_TRANSFER_READ_BIT;
8429 if (!(stageflags & vk::VK_PIPELINE_STAGE_TRANSFER_BIT))
8430 accessflags &= ~vk::VK_ACCESS_TRANSFER_WRITE_BIT;
8432 if (!(stageflags & vk::VK_PIPELINE_STAGE_HOST_BIT))
8433 accessflags &= ~vk::VK_ACCESS_HOST_READ_BIT;
8435 if (!(stageflags & vk::VK_PIPELINE_STAGE_HOST_BIT))
8436 accessflags &= ~vk::VK_ACCESS_HOST_WRITE_BIT;
8439 void applyOp (State& state, const Memory& memory, Op op, Usage usage)
8444 DE_ASSERT(state.stage == STAGE_HOST);
8445 DE_ASSERT(!state.mapped);
8446 state.mapped = true;
8450 DE_ASSERT(state.stage == STAGE_HOST);
8451 DE_ASSERT(state.mapped);
8452 state.mapped = false;
8456 DE_ASSERT(state.stage == STAGE_HOST);
8457 DE_ASSERT(!state.hostFlushed);
8458 state.hostFlushed = true;
8461 case OP_MAP_INVALIDATE:
8462 DE_ASSERT(state.stage == STAGE_HOST);
8463 DE_ASSERT(!state.hostInvalidated);
8464 state.hostInvalidated = true;
8468 DE_ASSERT(state.stage == STAGE_HOST);
8469 DE_ASSERT(state.hostInvalidated);
8470 state.rng.getUint32();
8474 DE_ASSERT(state.stage == STAGE_HOST);
8475 if ((memory.getMemoryType().propertyFlags & vk::VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) == 0)
8476 state.hostFlushed = false;
8478 state.memoryDefined = true;
8479 state.imageDefined = false;
8480 state.imageLayout = vk::VK_IMAGE_LAYOUT_UNDEFINED;
8481 state.rng.getUint32();
8485 DE_ASSERT(state.stage == STAGE_HOST);
8486 DE_ASSERT(state.hostInvalidated);
8488 if ((memory.getMemoryType().propertyFlags & vk::VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) == 0)
8489 state.hostFlushed = false;
8491 state.rng.getUint32();
8494 case OP_BUFFER_CREATE:
8495 DE_ASSERT(state.stage == STAGE_HOST);
8496 DE_ASSERT(!state.hasBuffer);
8498 state.hasBuffer = true;
8501 case OP_BUFFER_DESTROY:
8502 DE_ASSERT(state.stage == STAGE_HOST);
8503 DE_ASSERT(state.hasBuffer);
8504 DE_ASSERT(state.hasBoundBufferMemory);
8506 state.hasBuffer = false;
8507 state.hasBoundBufferMemory = false;
8510 case OP_BUFFER_BINDMEMORY:
8511 DE_ASSERT(state.stage == STAGE_HOST);
8512 DE_ASSERT(state.hasBuffer);
8513 DE_ASSERT(!state.hasBoundBufferMemory);
8515 state.hasBoundBufferMemory = true;
8518 case OP_IMAGE_CREATE:
8519 DE_ASSERT(state.stage == STAGE_HOST);
8520 DE_ASSERT(!state.hasImage);
8521 DE_ASSERT(!state.hasBuffer);
8523 state.hasImage = true;
8526 case OP_IMAGE_DESTROY:
8527 DE_ASSERT(state.stage == STAGE_HOST);
8528 DE_ASSERT(state.hasImage);
8529 DE_ASSERT(state.hasBoundImageMemory);
8531 state.hasImage = false;
8532 state.hasBoundImageMemory = false;
8533 state.imageLayout = vk::VK_IMAGE_LAYOUT_UNDEFINED;
8534 state.imageDefined = false;
8537 case OP_IMAGE_BINDMEMORY:
8538 DE_ASSERT(state.stage == STAGE_HOST);
8539 DE_ASSERT(state.hasImage);
8540 DE_ASSERT(!state.hasBoundImageMemory);
8542 state.hasBoundImageMemory = true;
8545 case OP_IMAGE_TRANSITION_LAYOUT:
8547 DE_ASSERT(state.stage == STAGE_COMMAND_BUFFER || state.stage == STAGE_SECONDARY_COMMAND_BUFFER);
8548 DE_ASSERT(state.hasImage);
8549 DE_ASSERT(state.hasBoundImageMemory);
8551 // \todo [2016-03-09 mika] Support linear tiling and predefined data
8552 const vk::VkImageLayout srcLayout = state.rng.getFloat() < 0.9f ? state.imageLayout : vk::VK_IMAGE_LAYOUT_UNDEFINED;
8553 const vk::VkImageLayout dstLayout = getRandomNextLayout(state.rng, usage, srcLayout);
8555 vk::VkPipelineStageFlags dirtySrcStages;
8556 vk::VkAccessFlags dirtySrcAccesses;
8557 vk::VkPipelineStageFlags dirtyDstStages;
8558 vk::VkAccessFlags dirtyDstAccesses;
8560 vk::VkPipelineStageFlags srcStages;
8561 vk::VkAccessFlags srcAccesses;
8562 vk::VkPipelineStageFlags dstStages;
8563 vk::VkAccessFlags dstAccesses;
8565 state.cache.getFullBarrier(dirtySrcStages, dirtySrcAccesses, dirtyDstStages, dirtyDstAccesses);
8567 // Try masking some random bits
8568 srcStages = dirtySrcStages;
8569 srcAccesses = dirtySrcAccesses;
8571 dstStages = state.cache.getAllowedStages() & state.rng.getUint32();
8572 dstAccesses = state.cache.getAllowedAcceses() & state.rng.getUint32();
8574 // If there are no bits in dst stage mask use all stages
8575 dstStages = dstStages ? dstStages : state.cache.getAllowedStages();
8578 srcStages = dstStages;
8580 removeIllegalAccessFlags(dstAccesses, dstStages);
8581 removeIllegalAccessFlags(srcAccesses, srcStages);
8583 if (srcLayout == vk::VK_IMAGE_LAYOUT_UNDEFINED)
8584 state.imageDefined = false;
8586 state.commandBufferIsEmpty = false;
8587 state.imageLayout = dstLayout;
8588 state.memoryDefined = false;
8589 state.cache.imageLayoutBarrier(srcStages, srcAccesses, dstStages, dstAccesses);
8593 case OP_QUEUE_WAIT_FOR_IDLE:
8594 DE_ASSERT(state.stage == STAGE_HOST);
8595 DE_ASSERT(!state.queueIdle);
8597 state.queueIdle = true;
8599 state.cache.waitForIdle();
8602 case OP_DEVICE_WAIT_FOR_IDLE:
8603 DE_ASSERT(state.stage == STAGE_HOST);
8604 DE_ASSERT(!state.deviceIdle);
8606 state.queueIdle = true;
8607 state.deviceIdle = true;
8609 state.cache.waitForIdle();
8612 case OP_COMMAND_BUFFER_BEGIN:
8613 DE_ASSERT(state.stage == STAGE_HOST);
8614 state.stage = STAGE_COMMAND_BUFFER;
8615 state.commandBufferIsEmpty = true;
8616 // Makes host writes visible to command buffer
8617 state.cache.submitCommandBuffer();
8620 case OP_COMMAND_BUFFER_END:
8621 DE_ASSERT(state.stage == STAGE_COMMAND_BUFFER || state.stage == STAGE_SECONDARY_COMMAND_BUFFER);
8622 state.stage = STAGE_HOST;
8623 state.queueIdle = false;
8624 state.deviceIdle = false;
8627 case OP_SECONDARY_COMMAND_BUFFER_BEGIN:
8628 DE_ASSERT(state.stage == STAGE_COMMAND_BUFFER || state.stage == STAGE_SECONDARY_COMMAND_BUFFER);
8629 state.stage = STAGE_SECONDARY_COMMAND_BUFFER;
8630 state.primaryCommandBufferIsEmpty = state.commandBufferIsEmpty;
8631 state.commandBufferIsEmpty = true;
8634 case OP_SECONDARY_COMMAND_BUFFER_END:
8635 DE_ASSERT(state.stage == STAGE_SECONDARY_COMMAND_BUFFER || state.stage == STAGE_SECONDARY_COMMAND_BUFFER);
8636 state.stage = STAGE_COMMAND_BUFFER;
8637 state.commandBufferIsEmpty = state.primaryCommandBufferIsEmpty;
8640 case OP_BUFFER_COPY_FROM_BUFFER:
8641 case OP_BUFFER_COPY_FROM_IMAGE:
8642 case OP_BUFFER_UPDATE:
8643 case OP_BUFFER_FILL:
8644 state.rng.getUint32();
8645 DE_ASSERT(state.stage == STAGE_COMMAND_BUFFER || state.stage == STAGE_SECONDARY_COMMAND_BUFFER);
8647 if ((memory.getMemoryType().propertyFlags & vk::VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) == 0)
8648 state.hostInvalidated = false;
8650 state.commandBufferIsEmpty = false;
8651 state.memoryDefined = true;
8652 state.imageDefined = false;
8653 state.imageLayout = vk::VK_IMAGE_LAYOUT_UNDEFINED;
8654 state.cache.perform(vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_ACCESS_TRANSFER_WRITE_BIT);
8657 case OP_BUFFER_COPY_TO_BUFFER:
8658 case OP_BUFFER_COPY_TO_IMAGE:
8659 DE_ASSERT(state.stage == STAGE_COMMAND_BUFFER || state.stage == STAGE_SECONDARY_COMMAND_BUFFER);
8661 state.commandBufferIsEmpty = false;
8662 state.cache.perform(vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_ACCESS_TRANSFER_READ_BIT);
8665 case OP_IMAGE_BLIT_FROM_IMAGE:
8666 state.rng.getBool();
8668 case OP_IMAGE_COPY_FROM_BUFFER:
8669 case OP_IMAGE_COPY_FROM_IMAGE:
8670 state.rng.getUint32();
8671 DE_ASSERT(state.stage == STAGE_COMMAND_BUFFER || state.stage == STAGE_SECONDARY_COMMAND_BUFFER);
8673 if ((memory.getMemoryType().propertyFlags & vk::VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) == 0)
8674 state.hostInvalidated = false;
8676 state.commandBufferIsEmpty = false;
8677 state.memoryDefined = false;
8678 state.imageDefined = true;
8679 state.cache.perform(vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_ACCESS_TRANSFER_WRITE_BIT);
8682 case OP_IMAGE_BLIT_TO_IMAGE:
8683 state.rng.getBool();
8685 case OP_IMAGE_COPY_TO_BUFFER:
8686 case OP_IMAGE_COPY_TO_IMAGE:
8687 DE_ASSERT(state.stage == STAGE_COMMAND_BUFFER || state.stage == STAGE_SECONDARY_COMMAND_BUFFER);
8689 state.commandBufferIsEmpty = false;
8690 state.cache.perform(vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_ACCESS_TRANSFER_READ_BIT);
8693 case OP_PIPELINE_BARRIER_GLOBAL:
8694 case OP_PIPELINE_BARRIER_BUFFER:
8695 case OP_PIPELINE_BARRIER_IMAGE:
8697 DE_ASSERT(state.stage == STAGE_COMMAND_BUFFER || state.stage == STAGE_SECONDARY_COMMAND_BUFFER);
8699 vk::VkPipelineStageFlags dirtySrcStages;
8700 vk::VkAccessFlags dirtySrcAccesses;
8701 vk::VkPipelineStageFlags dirtyDstStages;
8702 vk::VkAccessFlags dirtyDstAccesses;
8704 vk::VkPipelineStageFlags srcStages;
8705 vk::VkAccessFlags srcAccesses;
8706 vk::VkPipelineStageFlags dstStages;
8707 vk::VkAccessFlags dstAccesses;
8709 state.cache.getFullBarrier(dirtySrcStages, dirtySrcAccesses, dirtyDstStages, dirtyDstAccesses);
8711 // Try masking some random bits
8712 srcStages = dirtySrcStages & state.rng.getUint32();
8713 srcAccesses = dirtySrcAccesses & state.rng.getUint32();
8715 dstStages = dirtyDstStages & state.rng.getUint32();
8716 dstAccesses = dirtyDstAccesses & state.rng.getUint32();
8718 // If there are no bits in stage mask use the original dirty stages
8719 srcStages = srcStages ? srcStages : dirtySrcStages;
8720 dstStages = dstStages ? dstStages : dirtyDstStages;
8723 srcStages = dstStages;
8725 removeIllegalAccessFlags(dstAccesses, dstStages);
8726 removeIllegalAccessFlags(srcAccesses, srcStages);
8728 state.commandBufferIsEmpty = false;
8729 state.cache.barrier(srcStages, srcAccesses, dstStages, dstAccesses);
8733 case OP_RENDERPASS_BEGIN:
8735 DE_ASSERT(state.stage == STAGE_COMMAND_BUFFER);
8737 state.renderPassIsEmpty = true;
8738 state.stage = STAGE_RENDER_PASS;
8742 case OP_RENDERPASS_END:
8744 DE_ASSERT(state.stage == STAGE_RENDER_PASS);
8746 state.renderPassIsEmpty = true;
8747 state.stage = STAGE_COMMAND_BUFFER;
8751 case OP_RENDER_VERTEX_BUFFER:
8753 DE_ASSERT(state.stage == STAGE_RENDER_PASS);
8755 state.renderPassIsEmpty = false;
8756 state.cache.perform(vk::VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, vk::VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT);
8760 case OP_RENDER_INDEX_BUFFER:
8762 DE_ASSERT(state.stage == STAGE_RENDER_PASS);
8764 state.renderPassIsEmpty = false;
8765 state.cache.perform(vk::VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, vk::VK_ACCESS_INDEX_READ_BIT);
8769 case OP_RENDER_VERTEX_UNIFORM_BUFFER:
8770 case OP_RENDER_VERTEX_UNIFORM_TEXEL_BUFFER:
8772 DE_ASSERT(state.stage == STAGE_RENDER_PASS);
8774 state.renderPassIsEmpty = false;
8775 state.cache.perform(vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, vk::VK_ACCESS_UNIFORM_READ_BIT);
8779 case OP_RENDER_FRAGMENT_UNIFORM_BUFFER:
8780 case OP_RENDER_FRAGMENT_UNIFORM_TEXEL_BUFFER:
8782 DE_ASSERT(state.stage == STAGE_RENDER_PASS);
8784 state.renderPassIsEmpty = false;
8785 state.cache.perform(vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, vk::VK_ACCESS_UNIFORM_READ_BIT);
8789 case OP_RENDER_VERTEX_STORAGE_BUFFER:
8790 case OP_RENDER_VERTEX_STORAGE_TEXEL_BUFFER:
8792 DE_ASSERT(state.stage == STAGE_RENDER_PASS);
8794 state.renderPassIsEmpty = false;
8795 state.cache.perform(vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT);
8799 case OP_RENDER_FRAGMENT_STORAGE_BUFFER:
8800 case OP_RENDER_FRAGMENT_STORAGE_TEXEL_BUFFER:
8802 DE_ASSERT(state.stage == STAGE_RENDER_PASS);
8804 state.renderPassIsEmpty = false;
8805 state.cache.perform(vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT);
8809 case OP_RENDER_FRAGMENT_STORAGE_IMAGE:
8810 case OP_RENDER_FRAGMENT_SAMPLED_IMAGE:
8812 DE_ASSERT(state.stage == STAGE_RENDER_PASS);
8814 state.renderPassIsEmpty = false;
8815 state.cache.perform(vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT);
8819 case OP_RENDER_VERTEX_STORAGE_IMAGE:
8820 case OP_RENDER_VERTEX_SAMPLED_IMAGE:
8822 DE_ASSERT(state.stage == STAGE_RENDER_PASS);
8824 state.renderPassIsEmpty = false;
8825 state.cache.perform(vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT);
8830 DE_FATAL("Unknown op");
8834 de::MovePtr<Command> createHostCommand (Op op,
8837 vk::VkSharingMode sharing)
8841 case OP_MAP: return de::MovePtr<Command>(new Map());
8842 case OP_UNMAP: return de::MovePtr<Command>(new UnMap());
8844 case OP_MAP_FLUSH: return de::MovePtr<Command>(new Flush());
8845 case OP_MAP_INVALIDATE: return de::MovePtr<Command>(new Invalidate());
8847 case OP_MAP_READ: return de::MovePtr<Command>(new HostMemoryAccess(true, false, rng.getUint32()));
8848 case OP_MAP_WRITE: return de::MovePtr<Command>(new HostMemoryAccess(false, true, rng.getUint32()));
8849 case OP_MAP_MODIFY: return de::MovePtr<Command>(new HostMemoryAccess(true, true, rng.getUint32()));
8851 case OP_BUFFER_CREATE: return de::MovePtr<Command>(new CreateBuffer(usageToBufferUsageFlags(usage), sharing));
8852 case OP_BUFFER_DESTROY: return de::MovePtr<Command>(new DestroyBuffer());
8853 case OP_BUFFER_BINDMEMORY: return de::MovePtr<Command>(new BindBufferMemory());
8855 case OP_IMAGE_CREATE: return de::MovePtr<Command>(new CreateImage(usageToImageUsageFlags(usage), sharing));
8856 case OP_IMAGE_DESTROY: return de::MovePtr<Command>(new DestroyImage());
8857 case OP_IMAGE_BINDMEMORY: return de::MovePtr<Command>(new BindImageMemory());
8859 case OP_QUEUE_WAIT_FOR_IDLE: return de::MovePtr<Command>(new QueueWaitIdle());
8860 case OP_DEVICE_WAIT_FOR_IDLE: return de::MovePtr<Command>(new DeviceWaitIdle());
8863 DE_FATAL("Unknown op");
8864 return de::MovePtr<Command>(DE_NULL);
8868 de::MovePtr<CmdCommand> createCmdCommand (de::Random& rng,
8875 case OP_BUFFER_FILL: return de::MovePtr<CmdCommand>(new FillBuffer(rng.getUint32()));
8876 case OP_BUFFER_UPDATE: return de::MovePtr<CmdCommand>(new UpdateBuffer(rng.getUint32()));
8877 case OP_BUFFER_COPY_TO_BUFFER: return de::MovePtr<CmdCommand>(new BufferCopyToBuffer());
8878 case OP_BUFFER_COPY_FROM_BUFFER: return de::MovePtr<CmdCommand>(new BufferCopyFromBuffer(rng.getUint32()));
8880 case OP_BUFFER_COPY_TO_IMAGE: return de::MovePtr<CmdCommand>(new BufferCopyToImage());
8881 case OP_BUFFER_COPY_FROM_IMAGE: return de::MovePtr<CmdCommand>(new BufferCopyFromImage(rng.getUint32()));
8883 case OP_IMAGE_TRANSITION_LAYOUT:
8885 DE_ASSERT(state.stage == STAGE_COMMAND_BUFFER || state.stage == STAGE_SECONDARY_COMMAND_BUFFER);
8886 DE_ASSERT(state.hasImage);
8887 DE_ASSERT(state.hasBoundImageMemory);
8889 const vk::VkImageLayout srcLayout = rng.getFloat() < 0.9f ? state.imageLayout : vk::VK_IMAGE_LAYOUT_UNDEFINED;
8890 const vk::VkImageLayout dstLayout = getRandomNextLayout(rng, usage, srcLayout);
8892 vk::VkPipelineStageFlags dirtySrcStages;
8893 vk::VkAccessFlags dirtySrcAccesses;
8894 vk::VkPipelineStageFlags dirtyDstStages;
8895 vk::VkAccessFlags dirtyDstAccesses;
8897 vk::VkPipelineStageFlags srcStages;
8898 vk::VkAccessFlags srcAccesses;
8899 vk::VkPipelineStageFlags dstStages;
8900 vk::VkAccessFlags dstAccesses;
8902 state.cache.getFullBarrier(dirtySrcStages, dirtySrcAccesses, dirtyDstStages, dirtyDstAccesses);
8904 // Try masking some random bits
8905 srcStages = dirtySrcStages;
8906 srcAccesses = dirtySrcAccesses;
8908 dstStages = state.cache.getAllowedStages() & rng.getUint32();
8909 dstAccesses = state.cache.getAllowedAcceses() & rng.getUint32();
8911 // If there are no bits in dst stage mask use all stages
8912 dstStages = dstStages ? dstStages : state.cache.getAllowedStages();
8915 srcStages = dstStages;
8917 removeIllegalAccessFlags(dstAccesses, dstStages);
8918 removeIllegalAccessFlags(srcAccesses, srcStages);
8920 return de::MovePtr<CmdCommand>(new ImageTransition(srcStages, srcAccesses, dstStages, dstAccesses, srcLayout, dstLayout));
8923 case OP_IMAGE_COPY_TO_BUFFER: return de::MovePtr<CmdCommand>(new ImageCopyToBuffer(state.imageLayout));
8924 case OP_IMAGE_COPY_FROM_BUFFER: return de::MovePtr<CmdCommand>(new ImageCopyFromBuffer(rng.getUint32(), state.imageLayout));
8925 case OP_IMAGE_COPY_TO_IMAGE: return de::MovePtr<CmdCommand>(new ImageCopyToImage(state.imageLayout));
8926 case OP_IMAGE_COPY_FROM_IMAGE: return de::MovePtr<CmdCommand>(new ImageCopyFromImage(rng.getUint32(), state.imageLayout));
8927 case OP_IMAGE_BLIT_TO_IMAGE:
8929 const BlitScale scale = rng.getBool() ? BLIT_SCALE_20 : BLIT_SCALE_10;
8930 return de::MovePtr<CmdCommand>(new ImageBlitToImage(scale, state.imageLayout));
8933 case OP_IMAGE_BLIT_FROM_IMAGE:
8935 const BlitScale scale = rng.getBool() ? BLIT_SCALE_20 : BLIT_SCALE_10;
8936 return de::MovePtr<CmdCommand>(new ImageBlitFromImage(rng.getUint32(), scale, state.imageLayout));
8939 case OP_PIPELINE_BARRIER_GLOBAL:
8940 case OP_PIPELINE_BARRIER_BUFFER:
8941 case OP_PIPELINE_BARRIER_IMAGE:
8943 vk::VkPipelineStageFlags dirtySrcStages;
8944 vk::VkAccessFlags dirtySrcAccesses;
8945 vk::VkPipelineStageFlags dirtyDstStages;
8946 vk::VkAccessFlags dirtyDstAccesses;
8948 vk::VkPipelineStageFlags srcStages;
8949 vk::VkAccessFlags srcAccesses;
8950 vk::VkPipelineStageFlags dstStages;
8951 vk::VkAccessFlags dstAccesses;
8953 state.cache.getFullBarrier(dirtySrcStages, dirtySrcAccesses, dirtyDstStages, dirtyDstAccesses);
8955 // Try masking some random bits
8956 srcStages = dirtySrcStages & rng.getUint32();
8957 srcAccesses = dirtySrcAccesses & rng.getUint32();
8959 dstStages = dirtyDstStages & rng.getUint32();
8960 dstAccesses = dirtyDstAccesses & rng.getUint32();
8962 // If there are no bits in stage mask use the original dirty stages
8963 srcStages = srcStages ? srcStages : dirtySrcStages;
8964 dstStages = dstStages ? dstStages : dirtyDstStages;
8967 srcStages = dstStages;
8969 removeIllegalAccessFlags(dstAccesses, dstStages);
8970 removeIllegalAccessFlags(srcAccesses, srcStages);
8972 PipelineBarrier::Type type;
8974 if (op == OP_PIPELINE_BARRIER_IMAGE)
8975 type = PipelineBarrier::TYPE_IMAGE;
8976 else if (op == OP_PIPELINE_BARRIER_BUFFER)
8977 type = PipelineBarrier::TYPE_BUFFER;
8978 else if (op == OP_PIPELINE_BARRIER_GLOBAL)
8979 type = PipelineBarrier::TYPE_GLOBAL;
8982 type = PipelineBarrier::TYPE_LAST;
8983 DE_FATAL("Unknown op");
8986 if (type == PipelineBarrier::TYPE_IMAGE)
8987 return de::MovePtr<CmdCommand>(new PipelineBarrier(srcStages, srcAccesses, dstStages, dstAccesses, type, tcu::just(state.imageLayout)));
8989 return de::MovePtr<CmdCommand>(new PipelineBarrier(srcStages, srcAccesses, dstStages, dstAccesses, type, tcu::nothing<vk::VkImageLayout>()));
8993 DE_FATAL("Unknown op");
8994 return de::MovePtr<CmdCommand>(DE_NULL);
8998 de::MovePtr<RenderPassCommand> createRenderPassCommand (de::Random&,
9004 case OP_RENDER_VERTEX_BUFFER: return de::MovePtr<RenderPassCommand>(new RenderVertexBuffer());
9005 case OP_RENDER_INDEX_BUFFER: return de::MovePtr<RenderPassCommand>(new RenderIndexBuffer());
9007 case OP_RENDER_VERTEX_UNIFORM_BUFFER: return de::MovePtr<RenderPassCommand>(new RenderVertexUniformBuffer());
9008 case OP_RENDER_FRAGMENT_UNIFORM_BUFFER: return de::MovePtr<RenderPassCommand>(new RenderFragmentUniformBuffer());
9010 case OP_RENDER_VERTEX_UNIFORM_TEXEL_BUFFER: return de::MovePtr<RenderPassCommand>(new RenderVertexUniformTexelBuffer());
9011 case OP_RENDER_FRAGMENT_UNIFORM_TEXEL_BUFFER: return de::MovePtr<RenderPassCommand>(new RenderFragmentUniformTexelBuffer());
9013 case OP_RENDER_VERTEX_STORAGE_BUFFER: return de::MovePtr<RenderPassCommand>(new RenderVertexStorageBuffer());
9014 case OP_RENDER_FRAGMENT_STORAGE_BUFFER: return de::MovePtr<RenderPassCommand>(new RenderFragmentStorageBuffer());
9016 case OP_RENDER_VERTEX_STORAGE_TEXEL_BUFFER: return de::MovePtr<RenderPassCommand>(new RenderVertexStorageTexelBuffer());
9017 case OP_RENDER_FRAGMENT_STORAGE_TEXEL_BUFFER: return de::MovePtr<RenderPassCommand>(new RenderFragmentStorageTexelBuffer());
9019 case OP_RENDER_VERTEX_STORAGE_IMAGE: return de::MovePtr<RenderPassCommand>(new RenderVertexStorageImage());
9020 case OP_RENDER_FRAGMENT_STORAGE_IMAGE: return de::MovePtr<RenderPassCommand>(new RenderFragmentStorageImage());
9022 case OP_RENDER_VERTEX_SAMPLED_IMAGE: return de::MovePtr<RenderPassCommand>(new RenderVertexSampledImage());
9023 case OP_RENDER_FRAGMENT_SAMPLED_IMAGE: return de::MovePtr<RenderPassCommand>(new RenderFragmentSampledImage());
9026 DE_FATAL("Unknown op");
9027 return de::MovePtr<RenderPassCommand>(DE_NULL);
9031 de::MovePtr<CmdCommand> createRenderPassCommands (const Memory& memory,
9032 de::Random& nextOpRng,
9038 vector<RenderPassCommand*> commands;
9042 for (; opNdx < opCount; opNdx++)
9046 getAvailableOps(state, memory.getSupportBuffers(), memory.getSupportImages(), usage, ops);
9048 DE_ASSERT(!ops.empty());
9051 const Op op = nextOpRng.choose<Op>(ops.begin(), ops.end());
9053 if (op == OP_RENDERPASS_END)
9059 de::Random rng (state.rng);
9061 commands.push_back(createRenderPassCommand(rng, state, op).release());
9062 applyOp(state, memory, op, usage);
9064 DE_ASSERT(state.rng == rng);
9069 applyOp(state, memory, OP_RENDERPASS_END, usage);
9070 return de::MovePtr<CmdCommand>(new SubmitRenderPass(commands));
9074 for (size_t commandNdx = 0; commandNdx < commands.size(); commandNdx++)
9075 delete commands[commandNdx];
9081 de::MovePtr<CmdCommand> createSecondaryCmdCommands (const Memory& memory,
9082 de::Random& nextOpRng,
9088 vector<CmdCommand*> commands;
9092 for (; opNdx < opCount; opNdx++)
9096 getAvailableOps(state, memory.getSupportBuffers(), memory.getSupportImages(), usage, ops);
9098 DE_ASSERT(!ops.empty());
9101 const Op op = nextOpRng.choose<Op>(ops.begin(), ops.end());
9103 if (op == OP_SECONDARY_COMMAND_BUFFER_END)
9109 de::Random rng(state.rng);
9111 commands.push_back(createCmdCommand(rng, state, op, usage).release());
9112 applyOp(state, memory, op, usage);
9114 DE_ASSERT(state.rng == rng);
9119 applyOp(state, memory, OP_SECONDARY_COMMAND_BUFFER_END, usage);
9120 return de::MovePtr<CmdCommand>(new ExecuteSecondaryCommandBuffer(commands));
9124 for (size_t commandNdx = 0; commandNdx < commands.size(); commandNdx++)
9125 delete commands[commandNdx];
9131 de::MovePtr<Command> createCmdCommands (const Memory& memory,
9132 de::Random& nextOpRng,
9138 vector<CmdCommand*> commands;
9142 // Insert a mostly-full barrier to order this work wrt previous command buffer.
9143 commands.push_back(new PipelineBarrier(state.cache.getAllowedStages(),
9144 state.cache.getAllowedAcceses(),
9145 state.cache.getAllowedStages(),
9146 state.cache.getAllowedAcceses(),
9147 PipelineBarrier::TYPE_GLOBAL,
9148 tcu::nothing<vk::VkImageLayout>()));
9150 for (; opNdx < opCount; opNdx++)
9154 getAvailableOps(state, memory.getSupportBuffers(), memory.getSupportImages(), usage, ops);
9156 DE_ASSERT(!ops.empty());
9159 const Op op = nextOpRng.choose<Op>(ops.begin(), ops.end());
9161 if (op == OP_COMMAND_BUFFER_END)
9167 // \note Command needs to known the state before the operation
9168 if (op == OP_RENDERPASS_BEGIN)
9170 applyOp(state, memory, op, usage);
9171 commands.push_back(createRenderPassCommands(memory, nextOpRng, state, usage, opNdx, opCount).release());
9173 else if (op == OP_SECONDARY_COMMAND_BUFFER_BEGIN)
9175 applyOp(state, memory, op, usage);
9176 commands.push_back(createSecondaryCmdCommands(memory, nextOpRng, state, usage, opNdx, opCount).release());
9180 de::Random rng (state.rng);
9182 commands.push_back(createCmdCommand(rng, state, op, usage).release());
9183 applyOp(state, memory, op, usage);
9185 DE_ASSERT(state.rng == rng);
9192 applyOp(state, memory, OP_COMMAND_BUFFER_END, usage);
9193 return de::MovePtr<Command>(new SubmitCommandBuffer(commands));
9197 for (size_t commandNdx = 0; commandNdx < commands.size(); commandNdx++)
9198 delete commands[commandNdx];
9204 void createCommands (vector<Command*>& commands,
9206 const Memory& memory,
9208 vk::VkSharingMode sharingMode,
9211 State state (usage, seed);
9212 // Used to select next operation only
9213 de::Random nextOpRng (seed ^ 12930809);
9215 commands.reserve(opCount);
9217 for (size_t opNdx = 0; opNdx < opCount; opNdx++)
9221 getAvailableOps(state, memory.getSupportBuffers(), memory.getSupportImages(), usage, ops);
9223 DE_ASSERT(!ops.empty());
9226 const Op op = nextOpRng.choose<Op>(ops.begin(), ops.end());
9228 if (op == OP_COMMAND_BUFFER_BEGIN)
9230 applyOp(state, memory, op, usage);
9231 commands.push_back(createCmdCommands(memory, nextOpRng, state, usage, opNdx, opCount).release());
9235 de::Random rng (state.rng);
9237 commands.push_back(createHostCommand(op, rng, usage, sharingMode).release());
9238 applyOp(state, memory, op, usage);
9240 // Make sure that random generator is in sync
9241 DE_ASSERT(state.rng == rng);
9246 // Clean up resources
9247 if (state.hasBuffer && state.hasImage)
9249 if (!state.queueIdle)
9250 commands.push_back(new QueueWaitIdle());
9252 if (state.hasBuffer)
9253 commands.push_back(new DestroyBuffer());
9256 commands.push_back(new DestroyImage());
9260 class MemoryTestInstance : public TestInstance
9264 typedef bool(MemoryTestInstance::*StageFunc)(void);
9266 MemoryTestInstance (::vkt::Context& context, const TestConfig& config);
9267 ~MemoryTestInstance (void);
9269 tcu::TestStatus iterate (void);
9272 const TestConfig m_config;
9273 const size_t m_iterationCount;
9274 const size_t m_opCount;
9275 const vk::VkPhysicalDeviceMemoryProperties m_memoryProperties;
9276 deUint32 m_memoryTypeNdx;
9279 tcu::ResultCollector m_resultCollector;
9281 vector<Command*> m_commands;
9282 MovePtr<Memory> m_memory;
9283 MovePtr<Context> m_renderContext;
9284 MovePtr<PrepareContext> m_prepareContext;
9286 bool nextIteration (void);
9287 bool nextMemoryType (void);
9289 bool createCommandsAndAllocateMemory (void);
9290 bool prepare (void);
9291 bool execute (void);
9293 void resetResources (void);
9296 void MemoryTestInstance::resetResources (void)
9298 const vk::DeviceInterface& vkd = m_context.getDeviceInterface();
9299 const vk::VkDevice device = m_context.getDevice();
9301 VK_CHECK(vkd.deviceWaitIdle(device));
9303 for (size_t commandNdx = 0; commandNdx < m_commands.size(); commandNdx++)
9305 delete m_commands[commandNdx];
9306 m_commands[commandNdx] = DE_NULL;
9310 m_prepareContext.clear();
9314 bool MemoryTestInstance::nextIteration (void)
9318 if (m_iteration < m_iterationCount)
9321 m_stage = &MemoryTestInstance::createCommandsAndAllocateMemory;
9325 return nextMemoryType();
9328 bool MemoryTestInstance::nextMemoryType (void)
9332 DE_ASSERT(m_commands.empty());
9336 if (m_memoryTypeNdx < m_memoryProperties.memoryTypeCount)
9339 m_stage = &MemoryTestInstance::createCommandsAndAllocateMemory;
9350 MemoryTestInstance::MemoryTestInstance (::vkt::Context& context, const TestConfig& config)
9351 : TestInstance (context)
9353 , m_iterationCount (5)
9355 , m_memoryProperties (vk::getPhysicalDeviceMemoryProperties(context.getInstanceInterface(), context.getPhysicalDevice()))
9356 , m_memoryTypeNdx (0)
9358 , m_stage (&MemoryTestInstance::createCommandsAndAllocateMemory)
9359 , m_resultCollector (context.getTestContext().getLog())
9361 , m_memory (DE_NULL)
9363 TestLog& log = context.getTestContext().getLog();
9365 const tcu::ScopedLogSection section (log, "TestCaseInfo", "Test Case Info");
9367 log << TestLog::Message << "Buffer size: " << config.size << TestLog::EndMessage;
9368 log << TestLog::Message << "Sharing: " << config.sharing << TestLog::EndMessage;
9369 log << TestLog::Message << "Access: " << config.usage << TestLog::EndMessage;
9373 const tcu::ScopedLogSection section (log, "MemoryProperties", "Memory Properties");
9375 for (deUint32 heapNdx = 0; heapNdx < m_memoryProperties.memoryHeapCount; heapNdx++)
9377 const tcu::ScopedLogSection heapSection (log, "Heap" + de::toString(heapNdx), "Heap " + de::toString(heapNdx));
9379 log << TestLog::Message << "Size: " << m_memoryProperties.memoryHeaps[heapNdx].size << TestLog::EndMessage;
9380 log << TestLog::Message << "Flags: " << m_memoryProperties.memoryHeaps[heapNdx].flags << TestLog::EndMessage;
9383 for (deUint32 memoryTypeNdx = 0; memoryTypeNdx < m_memoryProperties.memoryTypeCount; memoryTypeNdx++)
9385 const tcu::ScopedLogSection memoryTypeSection (log, "MemoryType" + de::toString(memoryTypeNdx), "Memory type " + de::toString(memoryTypeNdx));
9387 log << TestLog::Message << "Properties: " << m_memoryProperties.memoryTypes[memoryTypeNdx].propertyFlags << TestLog::EndMessage;
9388 log << TestLog::Message << "Heap: " << m_memoryProperties.memoryTypes[memoryTypeNdx].heapIndex << TestLog::EndMessage;
9393 const vk::InstanceInterface& vki = context.getInstanceInterface();
9394 const vk::VkPhysicalDevice physicalDevice = context.getPhysicalDevice();
9395 const vk::DeviceInterface& vkd = context.getDeviceInterface();
9396 const vk::VkDevice device = context.getDevice();
9397 const vk::VkQueue queue = context.getUniversalQueue();
9398 const deUint32 queueFamilyIndex = context.getUniversalQueueFamilyIndex();
9399 vector<pair<deUint32, vk::VkQueue> > queues;
9401 queues.push_back(std::make_pair(queueFamilyIndex, queue));
9403 m_renderContext = MovePtr<Context>(new Context(vki, vkd, physicalDevice, device, queue, queueFamilyIndex, queues, context.getBinaryCollection()));
9407 MemoryTestInstance::~MemoryTestInstance (void)
9412 bool MemoryTestInstance::createCommandsAndAllocateMemory (void)
9414 const vk::VkDevice device = m_context.getDevice();
9415 TestLog& log = m_context.getTestContext().getLog();
9416 const vk::InstanceInterface& vki = m_context.getInstanceInterface();
9417 const vk::VkPhysicalDevice physicalDevice = m_context.getPhysicalDevice();
9418 const vk::DeviceInterface& vkd = m_context.getDeviceInterface();
9419 const vk::VkPhysicalDeviceMemoryProperties memoryProperties = vk::getPhysicalDeviceMemoryProperties(vki, physicalDevice);
9420 const tcu::ScopedLogSection section (log, "MemoryType" + de::toString(m_memoryTypeNdx) + "CreateCommands" + de::toString(m_iteration),
9421 "Memory type " + de::toString(m_memoryTypeNdx) + " create commands iteration " + de::toString(m_iteration));
9422 const vector<deUint32>& queues = m_renderContext->getQueueFamilies();
9424 DE_ASSERT(m_commands.empty());
9426 if (m_config.usage & (USAGE_HOST_READ | USAGE_HOST_WRITE)
9427 && !(memoryProperties.memoryTypes[m_memoryTypeNdx].propertyFlags & vk::VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT))
9429 log << TestLog::Message << "Memory type not supported" << TestLog::EndMessage;
9431 return nextMemoryType();
9437 const vk::VkBufferUsageFlags bufferUsage = usageToBufferUsageFlags(m_config.usage);
9438 const vk::VkImageUsageFlags imageUsage = usageToImageUsageFlags(m_config.usage);
9439 const vk::VkDeviceSize maxBufferSize = bufferUsage != 0
9440 ? roundBufferSizeToWxHx4(findMaxBufferSize(vkd, device, bufferUsage, m_config.sharing, queues, m_config.size, m_memoryTypeNdx))
9442 const IVec2 maxImageSize = imageUsage != 0
9443 ? findMaxRGBA8ImageSize(vkd, device, imageUsage, m_config.sharing, queues, m_config.size, m_memoryTypeNdx)
9446 log << TestLog::Message << "Max buffer size: " << maxBufferSize << TestLog::EndMessage;
9447 log << TestLog::Message << "Max RGBA8 image size: " << maxImageSize << TestLog::EndMessage;
9449 // Skip tests if there are no supported operations
9450 if (maxBufferSize == 0
9451 && maxImageSize[0] == 0
9452 && (m_config.usage & (USAGE_HOST_READ|USAGE_HOST_WRITE)) == 0)
9454 log << TestLog::Message << "Skipping memory type. None of the usages are supported." << TestLog::EndMessage;
9456 return nextMemoryType();
9460 const deUint32 seed = 2830980989u ^ deUint32Hash((deUint32)(m_iteration) * m_memoryProperties.memoryTypeCount + m_memoryTypeNdx);
9462 m_memory = MovePtr<Memory>(new Memory(vki, vkd, physicalDevice, device, m_config.size, m_memoryTypeNdx, maxBufferSize, maxImageSize[0], maxImageSize[1]));
9464 log << TestLog::Message << "Create commands" << TestLog::EndMessage;
9465 createCommands(m_commands, seed, *m_memory, m_config.usage, m_config.sharing, m_opCount);
9467 m_stage = &MemoryTestInstance::prepare;
9471 catch (const tcu::TestError& e)
9473 m_resultCollector.fail("Failed, got exception: " + string(e.getMessage()));
9474 return nextMemoryType();
9479 bool MemoryTestInstance::prepare (void)
9481 TestLog& log = m_context.getTestContext().getLog();
9482 const tcu::ScopedLogSection section (log, "MemoryType" + de::toString(m_memoryTypeNdx) + "Prepare" + de::toString(m_iteration),
9483 "Memory type " + de::toString(m_memoryTypeNdx) + " prepare iteration" + de::toString(m_iteration));
9485 m_prepareContext = MovePtr<PrepareContext>(new PrepareContext(*m_renderContext, *m_memory));
9487 DE_ASSERT(!m_commands.empty());
9489 for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
9491 Command& command = *m_commands[cmdNdx];
9495 command.prepare(*m_prepareContext);
9497 catch (const tcu::TestError& e)
9499 m_resultCollector.fail(de::toString(cmdNdx) + ":" + command.getName() + " failed to prepare, got exception: " + string(e.getMessage()));
9500 return nextMemoryType();
9504 m_stage = &MemoryTestInstance::execute;
9508 bool MemoryTestInstance::execute (void)
9510 TestLog& log = m_context.getTestContext().getLog();
9511 const tcu::ScopedLogSection section (log, "MemoryType" + de::toString(m_memoryTypeNdx) + "Execute" + de::toString(m_iteration),
9512 "Memory type " + de::toString(m_memoryTypeNdx) + " execute iteration " + de::toString(m_iteration));
9513 ExecuteContext executeContext (*m_renderContext);
9514 const vk::VkDevice device = m_context.getDevice();
9515 const vk::DeviceInterface& vkd = m_context.getDeviceInterface();
9517 DE_ASSERT(!m_commands.empty());
9519 for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
9521 Command& command = *m_commands[cmdNdx];
9525 command.execute(executeContext);
9527 catch (const tcu::TestError& e)
9529 m_resultCollector.fail(de::toString(cmdNdx) + ":" + command.getName() + " failed to execute, got exception: " + string(e.getMessage()));
9530 return nextIteration();
9534 VK_CHECK(vkd.deviceWaitIdle(device));
9536 m_stage = &MemoryTestInstance::verify;
9540 bool MemoryTestInstance::verify (void)
9542 DE_ASSERT(!m_commands.empty());
9544 TestLog& log = m_context.getTestContext().getLog();
9545 const tcu::ScopedLogSection section (log, "MemoryType" + de::toString(m_memoryTypeNdx) + "Verify" + de::toString(m_iteration),
9546 "Memory type " + de::toString(m_memoryTypeNdx) + " verify iteration " + de::toString(m_iteration));
9547 VerifyContext verifyContext (log, m_resultCollector, *m_renderContext, m_config.size);
9549 log << TestLog::Message << "Begin verify" << TestLog::EndMessage;
9551 for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
9553 Command& command = *m_commands[cmdNdx];
9557 command.verify(verifyContext, cmdNdx);
9559 catch (const tcu::TestError& e)
9561 m_resultCollector.fail(de::toString(cmdNdx) + ":" + command.getName() + " failed to verify, got exception: " + string(e.getMessage()));
9562 return nextIteration();
9566 return nextIteration();
9569 tcu::TestStatus MemoryTestInstance::iterate (void)
9571 if ((this->*m_stage)())
9572 return tcu::TestStatus::incomplete();
9574 return tcu::TestStatus(m_resultCollector.getResult(), m_resultCollector.getMessage());
9579 void init (vk::SourceCollections& sources, TestConfig config) const
9581 // Vertex buffer rendering
9582 if (config.usage & USAGE_VERTEX_BUFFER)
9584 const char* const vertexShader =
9586 "layout(location = 0) in highp vec2 a_position;\n"
9587 "void main (void) {\n"
9588 "\tgl_PointSize = 1.0;\n"
9589 "\tgl_Position = vec4(1.998 * a_position - vec2(0.999), 0.0, 1.0);\n"
9592 sources.glslSources.add("vertex-buffer.vert")
9593 << glu::VertexSource(vertexShader);
9596 // Index buffer rendering
9597 if (config.usage & USAGE_INDEX_BUFFER)
9599 const char* const vertexShader =
9601 "precision highp float;\n"
9602 "void main (void) {\n"
9603 "\tgl_PointSize = 1.0;\n"
9604 "\thighp vec2 pos = vec2(gl_VertexIndex % 256, gl_VertexIndex / 256) / vec2(255.0);\n"
9605 "\tgl_Position = vec4(1.998 * pos - vec2(0.999), 0.0, 1.0);\n"
9608 sources.glslSources.add("index-buffer.vert")
9609 << glu::VertexSource(vertexShader);
9612 if (config.usage & USAGE_UNIFORM_BUFFER)
9615 std::ostringstream vertexShader;
9619 "precision highp float;\n"
9620 "layout(set=0, binding=0) uniform Block\n"
9622 "\thighp uvec4 values[" << de::toString<size_t>(MAX_UNIFORM_BUFFER_SIZE / (sizeof(deUint32) * 4)) << "];\n"
9624 "void main (void) {\n"
9625 "\tgl_PointSize = 1.0;\n"
9626 "\thighp uvec4 vecVal = block.values[gl_VertexIndex / 8];\n"
9627 "\thighp uint val;\n"
9628 "\tif (((gl_VertexIndex / 2) % 4 == 0))\n"
9629 "\t\tval = vecVal.x;\n"
9630 "\telse if (((gl_VertexIndex / 2) % 4 == 1))\n"
9631 "\t\tval = vecVal.y;\n"
9632 "\telse if (((gl_VertexIndex / 2) % 4 == 2))\n"
9633 "\t\tval = vecVal.z;\n"
9634 "\telse if (((gl_VertexIndex / 2) % 4 == 3))\n"
9635 "\t\tval = vecVal.w;\n"
9636 "\tif ((gl_VertexIndex % 2) == 0)\n"
9637 "\t\tval = val & 0xFFFFu;\n"
9639 "\t\tval = val >> 16u;\n"
9640 "\thighp vec2 pos = vec2(val & 0xFFu, val >> 8u) / vec2(255.0);\n"
9641 "\tgl_Position = vec4(1.998 * pos - vec2(0.999), 0.0, 1.0);\n"
9644 sources.glslSources.add("uniform-buffer.vert")
9645 << glu::VertexSource(vertexShader.str());
9649 const size_t arraySize = MAX_UNIFORM_BUFFER_SIZE / (sizeof(deUint32) * 4);
9650 const size_t arrayIntSize = arraySize * 4;
9651 std::ostringstream fragmentShader;
9655 "precision highp float;\n"
9656 "precision highp int;\n"
9657 "layout(location = 0) out highp vec4 o_color;\n"
9658 "layout(set=0, binding=0) uniform Block\n"
9660 "\thighp uvec4 values[" << arraySize << "];\n"
9662 "layout(push_constant) uniform PushC\n"
9665 "\tuint valuesPerPixel;\n"
9667 "void main (void) {\n"
9668 "\thighp uint id = pushC.callId * (" << arrayIntSize << "u / pushC.valuesPerPixel) + uint(gl_FragCoord.y) * 256u + uint(gl_FragCoord.x);\n"
9669 "\tif (uint(gl_FragCoord.y) * 256u + uint(gl_FragCoord.x) < pushC.callId * (" << arrayIntSize << "u / pushC.valuesPerPixel))\n"
9671 "\thighp uint value = id;\n"
9672 "\tfor (uint i = 0u; i < pushC.valuesPerPixel; i++)\n"
9674 "\t\thighp uvec4 vecVal = block.values[(value / 4u) % " << arraySize << "u];\n"
9675 "\t\tif ((value % 4u) == 0u)\n"
9676 "\t\t\tvalue = vecVal.x;\n"
9677 "\t\telse if ((value % 4u) == 1u)\n"
9678 "\t\t\tvalue = vecVal.y;\n"
9679 "\t\telse if ((value % 4u) == 2u)\n"
9680 "\t\t\tvalue = vecVal.z;\n"
9681 "\t\telse if ((value % 4u) == 3u)\n"
9682 "\t\t\tvalue = vecVal.w;\n"
9684 "\tuvec4 valueOut = uvec4(value & 0xFFu, (value >> 8u) & 0xFFu, (value >> 16u) & 0xFFu, (value >> 24u) & 0xFFu);\n"
9685 "\to_color = vec4(valueOut) / vec4(255.0);\n"
9688 sources.glslSources.add("uniform-buffer.frag")
9689 << glu::FragmentSource(fragmentShader.str());
9693 if (config.usage & USAGE_STORAGE_BUFFER)
9696 // Vertex storage buffer rendering
9697 const char* const vertexShader =
9699 "precision highp float;\n"
9700 "readonly layout(set=0, binding=0) buffer Block\n"
9702 "\thighp uvec4 values[];\n"
9704 "void main (void) {\n"
9705 "\tgl_PointSize = 1.0;\n"
9706 "\thighp uvec4 vecVal = block.values[gl_VertexIndex / 8];\n"
9707 "\thighp uint val;\n"
9708 "\tif (((gl_VertexIndex / 2) % 4 == 0))\n"
9709 "\t\tval = vecVal.x;\n"
9710 "\telse if (((gl_VertexIndex / 2) % 4 == 1))\n"
9711 "\t\tval = vecVal.y;\n"
9712 "\telse if (((gl_VertexIndex / 2) % 4 == 2))\n"
9713 "\t\tval = vecVal.z;\n"
9714 "\telse if (((gl_VertexIndex / 2) % 4 == 3))\n"
9715 "\t\tval = vecVal.w;\n"
9716 "\tif ((gl_VertexIndex % 2) == 0)\n"
9717 "\t\tval = val & 0xFFFFu;\n"
9719 "\t\tval = val >> 16u;\n"
9720 "\thighp vec2 pos = vec2(val & 0xFFu, val >> 8u) / vec2(255.0);\n"
9721 "\tgl_Position = vec4(1.998 * pos - vec2(0.999), 0.0, 1.0);\n"
9724 sources.glslSources.add("storage-buffer.vert")
9725 << glu::VertexSource(vertexShader);
9729 std::ostringstream fragmentShader;
9733 "precision highp float;\n"
9734 "precision highp int;\n"
9735 "layout(location = 0) out highp vec4 o_color;\n"
9736 "layout(set=0, binding=0) buffer Block\n"
9738 "\thighp uvec4 values[];\n"
9740 "layout(push_constant) uniform PushC\n"
9742 "\tuint valuesPerPixel;\n"
9743 "\tuint bufferSize;\n"
9745 "void main (void) {\n"
9746 "\thighp uint arrayIntSize = pushC.bufferSize / 4u;\n"
9747 "\thighp uint id = uint(gl_FragCoord.y) * 256u + uint(gl_FragCoord.x);\n"
9748 "\thighp uint value = id;\n"
9749 "\tfor (uint i = 0u; i < pushC.valuesPerPixel; i++)\n"
9751 "\t\thighp uvec4 vecVal = block.values[(value / 4u) % (arrayIntSize / 4u)];\n"
9752 "\t\tif ((value % 4u) == 0u)\n"
9753 "\t\t\tvalue = vecVal.x;\n"
9754 "\t\telse if ((value % 4u) == 1u)\n"
9755 "\t\t\tvalue = vecVal.y;\n"
9756 "\t\telse if ((value % 4u) == 2u)\n"
9757 "\t\t\tvalue = vecVal.z;\n"
9758 "\t\telse if ((value % 4u) == 3u)\n"
9759 "\t\t\tvalue = vecVal.w;\n"
9761 "\tuvec4 valueOut = uvec4(value & 0xFFu, (value >> 8u) & 0xFFu, (value >> 16u) & 0xFFu, (value >> 24u) & 0xFFu);\n"
9762 "\to_color = vec4(valueOut) / vec4(255.0);\n"
9765 sources.glslSources.add("storage-buffer.frag")
9766 << glu::FragmentSource(fragmentShader.str());
9770 if (config.usage & USAGE_UNIFORM_TEXEL_BUFFER)
9773 // Vertex uniform texel buffer rendering
9774 const char* const vertexShader =
9776 "#extension GL_EXT_texture_buffer : require\n"
9777 "precision highp float;\n"
9778 "layout(set=0, binding=0) uniform highp usamplerBuffer u_sampler;\n"
9779 "void main (void) {\n"
9780 "\tgl_PointSize = 1.0;\n"
9781 "\thighp uint val = texelFetch(u_sampler, gl_VertexIndex).x;\n"
9782 "\thighp vec2 pos = vec2(val & 0xFFu, val >> 8u) / vec2(255.0);\n"
9783 "\tgl_Position = vec4(1.998 * pos - vec2(0.999), 0.0, 1.0);\n"
9786 sources.glslSources.add("uniform-texel-buffer.vert")
9787 << glu::VertexSource(vertexShader);
9791 // Fragment uniform texel buffer rendering
9792 const char* const fragmentShader =
9794 "#extension GL_EXT_texture_buffer : require\n"
9795 "precision highp float;\n"
9796 "precision highp int;\n"
9797 "layout(set=0, binding=0) uniform highp usamplerBuffer u_sampler;\n"
9798 "layout(location = 0) out highp vec4 o_color;\n"
9799 "layout(push_constant) uniform PushC\n"
9802 "\tuint valuesPerPixel;\n"
9803 "\tuint maxTexelCount;\n"
9805 "void main (void) {\n"
9806 "\thighp uint id = uint(gl_FragCoord.y) * 256u + uint(gl_FragCoord.x);\n"
9807 "\thighp uint value = id;\n"
9808 "\tif (uint(gl_FragCoord.y) * 256u + uint(gl_FragCoord.x) < pushC.callId * (pushC.maxTexelCount / pushC.valuesPerPixel))\n"
9810 "\tfor (uint i = 0u; i < pushC.valuesPerPixel; i++)\n"
9812 "\t\tvalue = texelFetch(u_sampler, int(value % uint(textureSize(u_sampler)))).x;\n"
9814 "\tuvec4 valueOut = uvec4(value & 0xFFu, (value >> 8u) & 0xFFu, (value >> 16u) & 0xFFu, (value >> 24u) & 0xFFu);\n"
9815 "\to_color = vec4(valueOut) / vec4(255.0);\n"
9818 sources.glslSources.add("uniform-texel-buffer.frag")
9819 << glu::FragmentSource(fragmentShader);
9823 if (config.usage & USAGE_STORAGE_TEXEL_BUFFER)
9826 // Vertex storage texel buffer rendering
9827 const char* const vertexShader =
9829 "#extension GL_EXT_texture_buffer : require\n"
9830 "precision highp float;\n"
9831 "layout(set=0, binding=0, r32ui) uniform readonly highp uimageBuffer u_sampler;\n"
9832 "out gl_PerVertex {\n"
9833 "\tvec4 gl_Position;\n"
9834 "\tfloat gl_PointSize;\n"
9836 "void main (void) {\n"
9837 "\tgl_PointSize = 1.0;\n"
9838 "\thighp uint val = imageLoad(u_sampler, gl_VertexIndex / 2).x;\n"
9839 "\tif (gl_VertexIndex % 2 == 0)\n"
9840 "\t\tval = val & 0xFFFFu;\n"
9842 "\t\tval = val >> 16;\n"
9843 "\thighp vec2 pos = vec2(val & 0xFFu, val >> 8u) / vec2(255.0);\n"
9844 "\tgl_Position = vec4(1.998 * pos - vec2(0.999), 0.0, 1.0);\n"
9847 sources.glslSources.add("storage-texel-buffer.vert")
9848 << glu::VertexSource(vertexShader);
9851 // Fragment storage texel buffer rendering
9852 const char* const fragmentShader =
9854 "#extension GL_EXT_texture_buffer : require\n"
9855 "precision highp float;\n"
9856 "precision highp int;\n"
9857 "layout(set=0, binding=0, r32ui) uniform readonly highp uimageBuffer u_sampler;\n"
9858 "layout(location = 0) out highp vec4 o_color;\n"
9859 "layout(push_constant) uniform PushC\n"
9862 "\tuint valuesPerPixel;\n"
9863 "\tuint maxTexelCount;\n"
9866 "void main (void) {\n"
9867 "\thighp uint id = uint(gl_FragCoord.y) * 256u + uint(gl_FragCoord.x);\n"
9868 "\thighp uint value = id;\n"
9869 "\tif (uint(gl_FragCoord.y) * 256u + uint(gl_FragCoord.x) < pushC.callId * (pushC.maxTexelCount / pushC.valuesPerPixel))\n"
9871 "\tfor (uint i = 0u; i < pushC.valuesPerPixel; i++)\n"
9873 "\t\tvalue = imageLoad(u_sampler, int(value % pushC.width)).x;\n"
9875 "\tuvec4 valueOut = uvec4(value & 0xFFu, (value >> 8u) & 0xFFu, (value >> 16u) & 0xFFu, (value >> 24u) & 0xFFu);\n"
9876 "\to_color = vec4(valueOut) / vec4(255.0);\n"
9879 sources.glslSources.add("storage-texel-buffer.frag")
9880 << glu::FragmentSource(fragmentShader);
9884 if (config.usage & USAGE_STORAGE_IMAGE)
9887 // Vertex storage image
9888 const char* const vertexShader =
9890 "precision highp float;\n"
9891 "layout(set=0, binding=0, rgba8) uniform image2D u_image;\n"
9892 "out gl_PerVertex {\n"
9893 "\tvec4 gl_Position;\n"
9894 "\tfloat gl_PointSize;\n"
9896 "void main (void) {\n"
9897 "\tgl_PointSize = 1.0;\n"
9898 "\thighp vec4 val = imageLoad(u_image, ivec2((gl_VertexIndex / 2) / imageSize(u_image).x, (gl_VertexIndex / 2) % imageSize(u_image).x));\n"
9899 "\thighp vec2 pos;\n"
9900 "\tif (gl_VertexIndex % 2 == 0)\n"
9901 "\t\tpos = val.xy;\n"
9903 "\t\tpos = val.zw;\n"
9904 "\tgl_Position = vec4(1.998 * pos - vec2(0.999), 0.0, 1.0);\n"
9907 sources.glslSources.add("storage-image.vert")
9908 << glu::VertexSource(vertexShader);
9911 // Fragment storage image
9912 const char* const fragmentShader =
9914 "#extension GL_EXT_texture_buffer : require\n"
9915 "precision highp float;\n"
9916 "layout(set=0, binding=0, rgba8) uniform image2D u_image;\n"
9917 "layout(location = 0) out highp vec4 o_color;\n"
9918 "void main (void) {\n"
9919 "\thighp uvec2 size = uvec2(imageSize(u_image).x, imageSize(u_image).y);\n"
9920 "\thighp uint valuesPerPixel = max(1u, (size.x * size.y) / (256u * 256u));\n"
9921 "\thighp uvec4 value = uvec4(uint(gl_FragCoord.x), uint(gl_FragCoord.y), 0u, 0u);\n"
9922 "\tfor (uint i = 0u; i < valuesPerPixel; i++)\n"
9924 "\t\thighp vec4 floatValue = imageLoad(u_image, ivec2(int((value.z * 256u + (value.x ^ value.z)) % size.x), int((value.w * 256u + (value.y ^ value.w)) % size.y)));\n"
9925 "\t\tvalue = uvec4(uint(floatValue.x * 255.0), uint(floatValue.y * 255.0), uint(floatValue.z * 255.0), uint(floatValue.w * 255.0));\n"
9927 "\to_color = vec4(value) / vec4(255.0);\n"
9930 sources.glslSources.add("storage-image.frag")
9931 << glu::FragmentSource(fragmentShader);
9935 if (config.usage & USAGE_SAMPLED_IMAGE)
9938 // Vertex storage image
9939 const char* const vertexShader =
9941 "precision highp float;\n"
9942 "layout(set=0, binding=0) uniform sampler2D u_sampler;\n"
9943 "out gl_PerVertex {\n"
9944 "\tvec4 gl_Position;\n"
9945 "\tfloat gl_PointSize;\n"
9947 "void main (void) {\n"
9948 "\tgl_PointSize = 1.0;\n"
9949 "\thighp vec4 val = texelFetch(u_sampler, ivec2((gl_VertexIndex / 2) / textureSize(u_sampler, 0).x, (gl_VertexIndex / 2) % textureSize(u_sampler, 0).x), 0);\n"
9950 "\thighp vec2 pos;\n"
9951 "\tif (gl_VertexIndex % 2 == 0)\n"
9952 "\t\tpos = val.xy;\n"
9954 "\t\tpos = val.zw;\n"
9955 "\tgl_Position = vec4(1.998 * pos - vec2(0.999), 0.0, 1.0);\n"
9958 sources.glslSources.add("sampled-image.vert")
9959 << glu::VertexSource(vertexShader);
9962 // Fragment storage image
9963 const char* const fragmentShader =
9965 "#extension GL_EXT_texture_buffer : require\n"
9966 "precision highp float;\n"
9967 "layout(set=0, binding=0) uniform sampler2D u_sampler;\n"
9968 "layout(location = 0) out highp vec4 o_color;\n"
9969 "void main (void) {\n"
9970 "\thighp uvec2 size = uvec2(textureSize(u_sampler, 0).x, textureSize(u_sampler, 0).y);\n"
9971 "\thighp uint valuesPerPixel = max(1u, (size.x * size.y) / (256u * 256u));\n"
9972 "\thighp uvec4 value = uvec4(uint(gl_FragCoord.x), uint(gl_FragCoord.y), 0u, 0u);\n"
9973 "\tfor (uint i = 0u; i < valuesPerPixel; i++)\n"
9975 "\t\thighp vec4 floatValue = texelFetch(u_sampler, ivec2(int((value.z * 256u + (value.x ^ value.z)) % size.x), int((value.w * 256u + (value.y ^ value.w)) % size.y)), 0);\n"
9976 "\t\tvalue = uvec4(uint(floatValue.x * 255.0), uint(floatValue.y * 255.0), uint(floatValue.z * 255.0), uint(floatValue.w * 255.0));\n"
9978 "\to_color = vec4(value) / vec4(255.0);\n"
9981 sources.glslSources.add("sampled-image.frag")
9982 << glu::FragmentSource(fragmentShader);
9987 const char* const vertexShader =
9989 "out gl_PerVertex {\n"
9990 "\tvec4 gl_Position;\n"
9992 "precision highp float;\n"
9993 "void main (void) {\n"
9994 "\tgl_Position = vec4(((gl_VertexIndex + 2) / 3) % 2 == 0 ? -1.0 : 1.0,\n"
9995 "\t ((gl_VertexIndex + 1) / 3) % 2 == 0 ? -1.0 : 1.0, 0.0, 1.0);\n"
9998 sources.glslSources.add("render-quad.vert")
9999 << glu::VertexSource(vertexShader);
10003 const char* const fragmentShader =
10004 "#version 310 es\n"
10005 "layout(location = 0) out highp vec4 o_color;\n"
10006 "void main (void) {\n"
10007 "\to_color = vec4(1.0);\n"
10010 sources.glslSources.add("render-white.frag")
10011 << glu::FragmentSource(fragmentShader);
10018 tcu::TestCaseGroup* createPipelineBarrierTests (tcu::TestContext& testCtx)
10020 de::MovePtr<tcu::TestCaseGroup> group (new tcu::TestCaseGroup(testCtx, "pipeline_barrier", "Pipeline barrier tests."));
10021 const vk::VkDeviceSize sizes[] =
10026 ONE_MEGABYTE, // 1M
10028 const Usage usages[] =
10032 USAGE_TRANSFER_SRC,
10033 USAGE_TRANSFER_DST,
10034 USAGE_VERTEX_BUFFER,
10035 USAGE_INDEX_BUFFER,
10036 USAGE_UNIFORM_BUFFER,
10037 USAGE_UNIFORM_TEXEL_BUFFER,
10038 USAGE_STORAGE_BUFFER,
10039 USAGE_STORAGE_TEXEL_BUFFER,
10040 USAGE_STORAGE_IMAGE,
10041 USAGE_SAMPLED_IMAGE
10043 const Usage readUsages[] =
10046 USAGE_TRANSFER_SRC,
10047 USAGE_VERTEX_BUFFER,
10048 USAGE_INDEX_BUFFER,
10049 USAGE_UNIFORM_BUFFER,
10050 USAGE_UNIFORM_TEXEL_BUFFER,
10051 USAGE_STORAGE_BUFFER,
10052 USAGE_STORAGE_TEXEL_BUFFER,
10053 USAGE_STORAGE_IMAGE,
10054 USAGE_SAMPLED_IMAGE
10057 const Usage writeUsages[] =
10063 for (size_t writeUsageNdx = 0; writeUsageNdx < DE_LENGTH_OF_ARRAY(writeUsages); writeUsageNdx++)
10065 const Usage writeUsage = writeUsages[writeUsageNdx];
10067 for (size_t readUsageNdx = 0; readUsageNdx < DE_LENGTH_OF_ARRAY(readUsages); readUsageNdx++)
10069 const Usage readUsage = readUsages[readUsageNdx];
10070 const Usage usage = writeUsage | readUsage;
10071 const string usageGroupName (usageToName(usage));
10072 de::MovePtr<tcu::TestCaseGroup> usageGroup (new tcu::TestCaseGroup(testCtx, usageGroupName.c_str(), usageGroupName.c_str()));
10074 for (size_t sizeNdx = 0; sizeNdx < DE_LENGTH_OF_ARRAY(sizes); sizeNdx++)
10076 const vk::VkDeviceSize size = sizes[sizeNdx];
10077 const string testName (de::toString((deUint64)(size)));
10078 const TestConfig config =
10082 vk::VK_SHARING_MODE_EXCLUSIVE
10085 usageGroup->addChild(new InstanceFactory1<MemoryTestInstance, TestConfig, AddPrograms>(testCtx,tcu::NODETYPE_SELF_VALIDATE, testName, testName, AddPrograms(), config));
10088 group->addChild(usageGroup.get());
10089 usageGroup.release();
10094 Usage all = (Usage)0;
10096 for (size_t usageNdx = 0; usageNdx < DE_LENGTH_OF_ARRAY(usages); usageNdx++)
10097 all = all | usages[usageNdx];
10100 const string usageGroupName ("all");
10101 de::MovePtr<tcu::TestCaseGroup> usageGroup (new tcu::TestCaseGroup(testCtx, usageGroupName.c_str(), usageGroupName.c_str()));
10103 for (size_t sizeNdx = 0; sizeNdx < DE_LENGTH_OF_ARRAY(sizes); sizeNdx++)
10105 const vk::VkDeviceSize size = sizes[sizeNdx];
10106 const string testName (de::toString((deUint64)(size)));
10107 const TestConfig config =
10111 vk::VK_SHARING_MODE_EXCLUSIVE
10114 usageGroup->addChild(new InstanceFactory1<MemoryTestInstance, TestConfig, AddPrograms>(testCtx,tcu::NODETYPE_SELF_VALIDATE, testName, testName, AddPrograms(), config));
10117 group->addChild(usageGroup.get());
10118 usageGroup.release();
10122 const string usageGroupName ("all_device");
10123 de::MovePtr<tcu::TestCaseGroup> usageGroup (new tcu::TestCaseGroup(testCtx, usageGroupName.c_str(), usageGroupName.c_str()));
10125 for (size_t sizeNdx = 0; sizeNdx < DE_LENGTH_OF_ARRAY(sizes); sizeNdx++)
10127 const vk::VkDeviceSize size = sizes[sizeNdx];
10128 const string testName (de::toString((deUint64)(size)));
10129 const TestConfig config =
10131 (Usage)(all & (~(USAGE_HOST_READ|USAGE_HOST_WRITE))),
10133 vk::VK_SHARING_MODE_EXCLUSIVE
10136 usageGroup->addChild(new InstanceFactory1<MemoryTestInstance, TestConfig, AddPrograms>(testCtx,tcu::NODETYPE_SELF_VALIDATE, testName, testName, AddPrograms(), config));
10139 group->addChild(usageGroup.get());
10140 usageGroup.release();
10144 return group.release();