1 /*-------------------------------------------------------------------------
2 * Vulkan Conformance Tests
3 * ------------------------
5 * Copyright (c) 2015 Google Inc.
7 * Licensed under the Apache License, Version 2.0 (the "License");
8 * you may not use this file except in compliance with the License.
9 * You may obtain a copy of the License at
11 * http://www.apache.org/licenses/LICENSE-2.0
13 * Unless required by applicable law or agreed to in writing, software
14 * distributed under the License is distributed on an "AS IS" BASIS,
15 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 * See the License for the specific language governing permissions and
17 * limitations under the License.
21 * \brief Pipeline barrier tests
22 *//*--------------------------------------------------------------------*/
24 #include "vktMemoryPipelineBarrierTests.hpp"
26 #include "vktTestCaseUtil.hpp"
29 #include "vkPlatform.hpp"
30 #include "vkRefUtil.hpp"
31 #include "vkQueryUtil.hpp"
32 #include "vkMemUtil.hpp"
33 #include "vkTypeUtil.hpp"
34 #include "vkPrograms.hpp"
36 #include "tcuMaybe.hpp"
37 #include "tcuTextureUtil.hpp"
38 #include "tcuTestLog.hpp"
39 #include "tcuResultCollector.hpp"
40 #include "tcuTexture.hpp"
41 #include "tcuImageCompare.hpp"
43 #include "deUniquePtr.hpp"
44 #include "deStringUtil.hpp"
45 #include "deRandom.hpp"
71 using tcu::ConstPixelBufferAccess;
72 using tcu::PixelBufferAccess;
73 using tcu::TextureFormat;
74 using tcu::TextureLevel;
84 MAX_UNIFORM_BUFFER_SIZE = 1024,
85 MAX_STORAGE_BUFFER_SIZE = (1<<28)
88 // \todo [mika] Add to utilities
90 T divRoundUp (const T& a, const T& b)
92 return (a / b) + (a % b == 0 ? 0 : 1);
97 ALL_PIPELINE_STAGES = vk::VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT
98 | vk::VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT
99 | vk::VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT
100 | vk::VK_PIPELINE_STAGE_VERTEX_INPUT_BIT
101 | vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT
102 | vk::VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT
103 | vk::VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT
104 | vk::VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT
105 | vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT
106 | vk::VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT
107 | vk::VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT
108 | vk::VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT
109 | vk::VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT
110 | vk::VK_PIPELINE_STAGE_TRANSFER_BIT
111 | vk::VK_PIPELINE_STAGE_HOST_BIT
116 ALL_ACCESSES = vk::VK_ACCESS_INDIRECT_COMMAND_READ_BIT
117 | vk::VK_ACCESS_INDEX_READ_BIT
118 | vk::VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT
119 | vk::VK_ACCESS_UNIFORM_READ_BIT
120 | vk::VK_ACCESS_INPUT_ATTACHMENT_READ_BIT
121 | vk::VK_ACCESS_SHADER_READ_BIT
122 | vk::VK_ACCESS_SHADER_WRITE_BIT
123 | vk::VK_ACCESS_COLOR_ATTACHMENT_READ_BIT
124 | vk::VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT
125 | vk::VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT
126 | vk::VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT
127 | vk::VK_ACCESS_TRANSFER_READ_BIT
128 | vk::VK_ACCESS_TRANSFER_WRITE_BIT
129 | vk::VK_ACCESS_HOST_READ_BIT
130 | vk::VK_ACCESS_HOST_WRITE_BIT
131 | vk::VK_ACCESS_MEMORY_READ_BIT
132 | vk::VK_ACCESS_MEMORY_WRITE_BIT
137 // Mapped host read and write
138 USAGE_HOST_READ = (0x1u<<0),
139 USAGE_HOST_WRITE = (0x1u<<1),
141 // Copy and other transfer operations
142 USAGE_TRANSFER_SRC = (0x1u<<2),
143 USAGE_TRANSFER_DST = (0x1u<<3),
145 // Buffer usage flags
146 USAGE_INDEX_BUFFER = (0x1u<<4),
147 USAGE_VERTEX_BUFFER = (0x1u<<5),
149 USAGE_UNIFORM_BUFFER = (0x1u<<6),
150 USAGE_STORAGE_BUFFER = (0x1u<<7),
152 USAGE_UNIFORM_TEXEL_BUFFER = (0x1u<<8),
153 USAGE_STORAGE_TEXEL_BUFFER = (0x1u<<9),
155 // \todo [2016-03-09 mika] This is probably almost impossible to do
156 USAGE_INDIRECT_BUFFER = (0x1u<<10),
158 // Texture usage flags
159 USAGE_SAMPLED_IMAGE = (0x1u<<11),
160 USAGE_STORAGE_IMAGE = (0x1u<<12),
161 USAGE_COLOR_ATTACHMENT = (0x1u<<13),
162 USAGE_INPUT_ATTACHMENT = (0x1u<<14),
163 USAGE_DEPTH_STENCIL_ATTACHMENT = (0x1u<<15),
166 bool supportsDeviceBufferWrites (Usage usage)
168 if (usage & USAGE_TRANSFER_DST)
171 if (usage & USAGE_STORAGE_BUFFER)
174 if (usage & USAGE_STORAGE_TEXEL_BUFFER)
180 bool supportsDeviceImageWrites (Usage usage)
182 if (usage & USAGE_TRANSFER_DST)
185 if (usage & USAGE_STORAGE_IMAGE)
188 if (usage & USAGE_COLOR_ATTACHMENT)
194 // Sequential access enums
197 ACCESS_INDIRECT_COMMAND_READ_BIT = 0,
198 ACCESS_INDEX_READ_BIT,
199 ACCESS_VERTEX_ATTRIBUTE_READ_BIT,
200 ACCESS_UNIFORM_READ_BIT,
201 ACCESS_INPUT_ATTACHMENT_READ_BIT,
202 ACCESS_SHADER_READ_BIT,
203 ACCESS_SHADER_WRITE_BIT,
204 ACCESS_COLOR_ATTACHMENT_READ_BIT,
205 ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
206 ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT,
207 ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT,
208 ACCESS_TRANSFER_READ_BIT,
209 ACCESS_TRANSFER_WRITE_BIT,
210 ACCESS_HOST_READ_BIT,
211 ACCESS_HOST_WRITE_BIT,
212 ACCESS_MEMORY_READ_BIT,
213 ACCESS_MEMORY_WRITE_BIT,
218 // Sequential stage enums
221 PIPELINESTAGE_TOP_OF_PIPE_BIT = 0,
222 PIPELINESTAGE_BOTTOM_OF_PIPE_BIT,
223 PIPELINESTAGE_DRAW_INDIRECT_BIT,
224 PIPELINESTAGE_VERTEX_INPUT_BIT,
225 PIPELINESTAGE_VERTEX_SHADER_BIT,
226 PIPELINESTAGE_TESSELLATION_CONTROL_SHADER_BIT,
227 PIPELINESTAGE_TESSELLATION_EVALUATION_SHADER_BIT,
228 PIPELINESTAGE_GEOMETRY_SHADER_BIT,
229 PIPELINESTAGE_FRAGMENT_SHADER_BIT,
230 PIPELINESTAGE_EARLY_FRAGMENT_TESTS_BIT,
231 PIPELINESTAGE_LATE_FRAGMENT_TESTS_BIT,
232 PIPELINESTAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
233 PIPELINESTAGE_COMPUTE_SHADER_BIT,
234 PIPELINESTAGE_TRANSFER_BIT,
235 PIPELINESTAGE_HOST_BIT,
240 PipelineStage pipelineStageFlagToPipelineStage (vk::VkPipelineStageFlagBits flags)
244 case vk::VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT: return PIPELINESTAGE_TOP_OF_PIPE_BIT;
245 case vk::VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT: return PIPELINESTAGE_BOTTOM_OF_PIPE_BIT;
246 case vk::VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT: return PIPELINESTAGE_DRAW_INDIRECT_BIT;
247 case vk::VK_PIPELINE_STAGE_VERTEX_INPUT_BIT: return PIPELINESTAGE_VERTEX_INPUT_BIT;
248 case vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT: return PIPELINESTAGE_VERTEX_SHADER_BIT;
249 case vk::VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT: return PIPELINESTAGE_TESSELLATION_CONTROL_SHADER_BIT;
250 case vk::VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT: return PIPELINESTAGE_TESSELLATION_EVALUATION_SHADER_BIT;
251 case vk::VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT: return PIPELINESTAGE_GEOMETRY_SHADER_BIT;
252 case vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT: return PIPELINESTAGE_FRAGMENT_SHADER_BIT;
253 case vk::VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT: return PIPELINESTAGE_EARLY_FRAGMENT_TESTS_BIT;
254 case vk::VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT: return PIPELINESTAGE_LATE_FRAGMENT_TESTS_BIT;
255 case vk::VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT: return PIPELINESTAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
256 case vk::VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT: return PIPELINESTAGE_COMPUTE_SHADER_BIT;
257 case vk::VK_PIPELINE_STAGE_TRANSFER_BIT: return PIPELINESTAGE_TRANSFER_BIT;
258 case vk::VK_PIPELINE_STAGE_HOST_BIT: return PIPELINESTAGE_HOST_BIT;
261 DE_FATAL("Unknown pipeline stage flags");
262 return PIPELINESTAGE_LAST;
266 Usage operator| (Usage a, Usage b)
268 return (Usage)((deUint32)a | (deUint32)b);
271 Usage operator& (Usage a, Usage b)
273 return (Usage)((deUint32)a & (deUint32)b);
276 string usageToName (Usage usage)
281 const char* const name;
284 { USAGE_HOST_READ, "host_read" },
285 { USAGE_HOST_WRITE, "host_write" },
287 { USAGE_TRANSFER_SRC, "transfer_src" },
288 { USAGE_TRANSFER_DST, "transfer_dst" },
290 { USAGE_INDEX_BUFFER, "index_buffer" },
291 { USAGE_VERTEX_BUFFER, "vertex_buffer" },
292 { USAGE_UNIFORM_BUFFER, "uniform_buffer" },
293 { USAGE_STORAGE_BUFFER, "storage_buffer" },
294 { USAGE_UNIFORM_TEXEL_BUFFER, "uniform_texel_buffer" },
295 { USAGE_STORAGE_TEXEL_BUFFER, "storage_texel_buffer" },
296 { USAGE_INDIRECT_BUFFER, "indirect_buffer" },
297 { USAGE_SAMPLED_IMAGE, "image_sampled" },
298 { USAGE_STORAGE_IMAGE, "storage_image" },
299 { USAGE_COLOR_ATTACHMENT, "color_attachment" },
300 { USAGE_INPUT_ATTACHMENT, "input_attachment" },
301 { USAGE_DEPTH_STENCIL_ATTACHMENT, "depth_stencil_attachment" },
304 std::ostringstream stream;
307 for (size_t usageNdx = 0; usageNdx < DE_LENGTH_OF_ARRAY(usageNames); usageNdx++)
309 if (usage & usageNames[usageNdx].usage)
316 stream << usageNames[usageNdx].name;
323 vk::VkBufferUsageFlags usageToBufferUsageFlags (Usage usage)
325 vk::VkBufferUsageFlags flags = 0;
327 if (usage & USAGE_TRANSFER_SRC)
328 flags |= vk::VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
330 if (usage & USAGE_TRANSFER_DST)
331 flags |= vk::VK_BUFFER_USAGE_TRANSFER_DST_BIT;
333 if (usage & USAGE_INDEX_BUFFER)
334 flags |= vk::VK_BUFFER_USAGE_INDEX_BUFFER_BIT;
336 if (usage & USAGE_VERTEX_BUFFER)
337 flags |= vk::VK_BUFFER_USAGE_VERTEX_BUFFER_BIT;
339 if (usage & USAGE_INDIRECT_BUFFER)
340 flags |= vk::VK_BUFFER_USAGE_INDIRECT_BUFFER_BIT;
342 if (usage & USAGE_UNIFORM_BUFFER)
343 flags |= vk::VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT;
345 if (usage & USAGE_STORAGE_BUFFER)
346 flags |= vk::VK_BUFFER_USAGE_STORAGE_BUFFER_BIT;
348 if (usage & USAGE_UNIFORM_TEXEL_BUFFER)
349 flags |= vk::VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT;
351 if (usage & USAGE_STORAGE_TEXEL_BUFFER)
352 flags |= vk::VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT;
357 vk::VkImageUsageFlags usageToImageUsageFlags (Usage usage)
359 vk::VkImageUsageFlags flags = 0;
361 if (usage & USAGE_TRANSFER_SRC)
362 flags |= vk::VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
364 if (usage & USAGE_TRANSFER_DST)
365 flags |= vk::VK_IMAGE_USAGE_TRANSFER_DST_BIT;
367 if (usage & USAGE_SAMPLED_IMAGE)
368 flags |= vk::VK_IMAGE_USAGE_SAMPLED_BIT;
370 if (usage & USAGE_STORAGE_IMAGE)
371 flags |= vk::VK_IMAGE_USAGE_STORAGE_BIT;
373 if (usage & USAGE_COLOR_ATTACHMENT)
374 flags |= vk::VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
376 if (usage & USAGE_INPUT_ATTACHMENT)
377 flags |= vk::VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT;
379 if (usage & USAGE_DEPTH_STENCIL_ATTACHMENT)
380 flags |= vk::VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT;
385 vk::VkPipelineStageFlags usageToStageFlags (Usage usage)
387 vk::VkPipelineStageFlags flags = 0;
389 if (usage & (USAGE_HOST_READ|USAGE_HOST_WRITE))
390 flags |= vk::VK_PIPELINE_STAGE_HOST_BIT;
392 if (usage & (USAGE_TRANSFER_SRC|USAGE_TRANSFER_DST))
393 flags |= vk::VK_PIPELINE_STAGE_TRANSFER_BIT;
395 if (usage & (USAGE_VERTEX_BUFFER|USAGE_INDEX_BUFFER))
396 flags |= vk::VK_PIPELINE_STAGE_VERTEX_INPUT_BIT;
398 if (usage & USAGE_INDIRECT_BUFFER)
399 flags |= vk::VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT;
402 (USAGE_UNIFORM_BUFFER
403 | USAGE_STORAGE_BUFFER
404 | USAGE_UNIFORM_TEXEL_BUFFER
405 | USAGE_STORAGE_TEXEL_BUFFER
406 | USAGE_SAMPLED_IMAGE
407 | USAGE_STORAGE_IMAGE))
409 flags |= (vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT
410 | vk::VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT
411 | vk::VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT
412 | vk::VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT
413 | vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT
414 | vk::VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT);
417 if (usage & USAGE_INPUT_ATTACHMENT)
418 flags |= vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
420 if (usage & USAGE_COLOR_ATTACHMENT)
421 flags |= vk::VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
423 if (usage & USAGE_DEPTH_STENCIL_ATTACHMENT)
425 flags |= vk::VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT
426 | vk::VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT;
432 vk::VkAccessFlags usageToAccessFlags (Usage usage)
434 vk::VkAccessFlags flags = 0;
436 if (usage & USAGE_HOST_READ)
437 flags |= vk::VK_ACCESS_HOST_READ_BIT;
439 if (usage & USAGE_HOST_WRITE)
440 flags |= vk::VK_ACCESS_HOST_WRITE_BIT;
442 if (usage & USAGE_TRANSFER_SRC)
443 flags |= vk::VK_ACCESS_TRANSFER_READ_BIT;
445 if (usage & USAGE_TRANSFER_DST)
446 flags |= vk::VK_ACCESS_TRANSFER_WRITE_BIT;
448 if (usage & USAGE_INDEX_BUFFER)
449 flags |= vk::VK_ACCESS_INDEX_READ_BIT;
451 if (usage & USAGE_VERTEX_BUFFER)
452 flags |= vk::VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT;
454 if (usage & (USAGE_UNIFORM_BUFFER | USAGE_UNIFORM_TEXEL_BUFFER))
455 flags |= vk::VK_ACCESS_UNIFORM_READ_BIT;
457 if (usage & USAGE_SAMPLED_IMAGE)
458 flags |= vk::VK_ACCESS_SHADER_READ_BIT;
460 if (usage & (USAGE_STORAGE_BUFFER
461 | USAGE_STORAGE_TEXEL_BUFFER
462 | USAGE_STORAGE_IMAGE))
463 flags |= vk::VK_ACCESS_SHADER_READ_BIT | vk::VK_ACCESS_SHADER_WRITE_BIT;
465 if (usage & USAGE_INDIRECT_BUFFER)
466 flags |= vk::VK_ACCESS_INDIRECT_COMMAND_READ_BIT;
468 if (usage & USAGE_COLOR_ATTACHMENT)
469 flags |= vk::VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | vk::VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
471 if (usage & USAGE_INPUT_ATTACHMENT)
472 flags |= vk::VK_ACCESS_INPUT_ATTACHMENT_READ_BIT;
474 if (usage & USAGE_DEPTH_STENCIL_ATTACHMENT)
475 flags |= vk::VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT
476 | vk::VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
484 vk::VkDeviceSize size;
485 vk::VkSharingMode sharing;
488 vk::Move<vk::VkCommandBuffer> createCommandBuffer (const vk::DeviceInterface& vkd,
490 vk::VkCommandPool pool,
491 vk::VkCommandBufferLevel level)
493 const vk::VkCommandBufferAllocateInfo bufferInfo =
495 vk::VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO,
503 return vk::allocateCommandBuffer(vkd, device, &bufferInfo);
506 vk::Move<vk::VkCommandBuffer> createBeginCommandBuffer (const vk::DeviceInterface& vkd,
508 vk::VkCommandPool pool,
509 vk::VkCommandBufferLevel level)
511 const vk::VkCommandBufferInheritanceInfo inheritInfo =
513 vk::VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_INFO,
522 const vk::VkCommandBufferBeginInfo beginInfo =
524 vk::VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO,
527 (level == vk::VK_COMMAND_BUFFER_LEVEL_SECONDARY ? &inheritInfo : (const vk::VkCommandBufferInheritanceInfo*)DE_NULL),
530 vk::Move<vk::VkCommandBuffer> commandBuffer (createCommandBuffer(vkd, device, pool, level));
532 vkd.beginCommandBuffer(*commandBuffer, &beginInfo);
534 return commandBuffer;
537 vk::Move<vk::VkCommandPool> createCommandPool (const vk::DeviceInterface& vkd,
539 deUint32 queueFamilyIndex)
541 const vk::VkCommandPoolCreateInfo poolInfo =
543 vk::VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO,
546 vk::VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT,
550 return vk::createCommandPool(vkd, device, &poolInfo);
553 vk::Move<vk::VkBuffer> createBuffer (const vk::DeviceInterface& vkd,
555 vk::VkDeviceSize size,
556 vk::VkBufferUsageFlags usage,
557 vk::VkSharingMode sharingMode,
558 const vector<deUint32>& queueFamilies)
560 const vk::VkBufferCreateInfo createInfo =
562 vk::VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
569 (deUint32)queueFamilies.size(),
573 return vk::createBuffer(vkd, device, &createInfo);
576 vk::Move<vk::VkDeviceMemory> allocMemory (const vk::DeviceInterface& vkd,
578 vk::VkDeviceSize size,
579 deUint32 memoryTypeIndex)
581 const vk::VkMemoryAllocateInfo alloc =
583 vk::VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO, // sType
590 return vk::allocateMemory(vkd, device, &alloc);
593 vk::Move<vk::VkDeviceMemory> bindBufferMemory (const vk::InstanceInterface& vki,
594 const vk::DeviceInterface& vkd,
595 vk::VkPhysicalDevice physicalDevice,
598 vk::VkMemoryPropertyFlags properties)
600 const vk::VkMemoryRequirements memoryRequirements = vk::getBufferMemoryRequirements(vkd, device, buffer);
601 const vk::VkPhysicalDeviceMemoryProperties memoryProperties = vk::getPhysicalDeviceMemoryProperties(vki, physicalDevice);
602 deUint32 memoryTypeIndex;
604 for (memoryTypeIndex = 0; memoryTypeIndex < memoryProperties.memoryTypeCount; memoryTypeIndex++)
606 if ((memoryRequirements.memoryTypeBits & (0x1u << memoryTypeIndex))
607 && (memoryProperties.memoryTypes[memoryTypeIndex].propertyFlags & properties) == properties)
611 const vk::VkMemoryAllocateInfo allocationInfo =
613 vk::VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
615 memoryRequirements.size,
618 vk::Move<vk::VkDeviceMemory> memory (vk::allocateMemory(vkd, device, &allocationInfo));
620 VK_CHECK(vkd.bindBufferMemory(device, buffer, *memory, 0));
624 catch (const vk::Error& error)
626 if (error.getError() == vk::VK_ERROR_OUT_OF_DEVICE_MEMORY
627 || error.getError() == vk::VK_ERROR_OUT_OF_HOST_MEMORY)
629 // Try next memory type/heap if out of memory
633 // Throw all other errors forward
640 TCU_FAIL("Failed to allocate memory for buffer");
643 vk::Move<vk::VkDeviceMemory> bindImageMemory (const vk::InstanceInterface& vki,
644 const vk::DeviceInterface& vkd,
645 vk::VkPhysicalDevice physicalDevice,
648 vk::VkMemoryPropertyFlags properties)
650 const vk::VkMemoryRequirements memoryRequirements = vk::getImageMemoryRequirements(vkd, device, image);
651 const vk::VkPhysicalDeviceMemoryProperties memoryProperties = vk::getPhysicalDeviceMemoryProperties(vki, physicalDevice);
652 deUint32 memoryTypeIndex;
654 for (memoryTypeIndex = 0; memoryTypeIndex < memoryProperties.memoryTypeCount; memoryTypeIndex++)
656 if ((memoryRequirements.memoryTypeBits & (0x1u << memoryTypeIndex))
657 && (memoryProperties.memoryTypes[memoryTypeIndex].propertyFlags & properties) == properties)
661 const vk::VkMemoryAllocateInfo allocationInfo =
663 vk::VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
665 memoryRequirements.size,
668 vk::Move<vk::VkDeviceMemory> memory (vk::allocateMemory(vkd, device, &allocationInfo));
670 VK_CHECK(vkd.bindImageMemory(device, image, *memory, 0));
674 catch (const vk::Error& error)
676 if (error.getError() == vk::VK_ERROR_OUT_OF_DEVICE_MEMORY
677 || error.getError() == vk::VK_ERROR_OUT_OF_HOST_MEMORY)
679 // Try next memory type/heap if out of memory
683 // Throw all other errors forward
690 TCU_FAIL("Failed to allocate memory for image");
693 void queueRun (const vk::DeviceInterface& vkd,
695 vk::VkCommandBuffer commandBuffer)
697 const vk::VkSubmitInfo submitInfo =
699 vk::VK_STRUCTURE_TYPE_SUBMIT_INFO,
704 (const vk::VkPipelineStageFlags*)DE_NULL,
713 VK_CHECK(vkd.queueSubmit(queue, 1, &submitInfo, 0));
714 VK_CHECK(vkd.queueWaitIdle(queue));
717 void* mapMemory (const vk::DeviceInterface& vkd,
719 vk::VkDeviceMemory memory,
720 vk::VkDeviceSize size)
724 VK_CHECK(vkd.mapMemory(device, memory, 0, size, 0, &ptr));
729 class ReferenceMemory
732 ReferenceMemory (size_t size);
734 void set (size_t pos, deUint8 val);
735 deUint8 get (size_t pos) const;
736 bool isDefined (size_t pos) const;
738 void setDefined (size_t offset, size_t size, const void* data);
739 void setUndefined (size_t offset, size_t size);
740 void setData (size_t offset, size_t size, const void* data);
742 size_t getSize (void) const { return m_data.size(); }
745 vector<deUint8> m_data;
746 vector<deUint64> m_defined;
749 ReferenceMemory::ReferenceMemory (size_t size)
751 , m_defined (size / 64 + (size % 64 == 0 ? 0 : 1), 0ull)
755 void ReferenceMemory::set (size_t pos, deUint8 val)
757 DE_ASSERT(pos < m_data.size());
760 m_defined[pos / 64] |= 0x1ull << (pos % 64);
763 void ReferenceMemory::setData (size_t offset, size_t size, const void* data_)
765 const deUint8* data = (const deUint8*)data_;
767 DE_ASSERT(offset < m_data.size());
768 DE_ASSERT(offset + size <= m_data.size());
770 // \todo [2016-03-09 mika] Optimize
771 for (size_t pos = 0; pos < size; pos++)
773 m_data[offset + pos] = data[pos];
774 m_defined[(offset + pos) / 64] |= 0x1ull << ((offset + pos) % 64);
778 void ReferenceMemory::setUndefined (size_t offset, size_t size)
780 // \todo [2016-03-09 mika] Optimize
781 for (size_t pos = 0; pos < size; pos++)
782 m_defined[(offset + pos) / 64] |= 0x1ull << ((offset + pos) % 64);
785 deUint8 ReferenceMemory::get (size_t pos) const
787 DE_ASSERT(pos < m_data.size());
788 DE_ASSERT(isDefined(pos));
792 bool ReferenceMemory::isDefined (size_t pos) const
794 DE_ASSERT(pos < m_data.size());
796 return (m_defined[pos / 64] & (0x1ull << (pos % 64))) != 0;
802 Memory (const vk::InstanceInterface& vki,
803 const vk::DeviceInterface& vkd,
804 vk::VkPhysicalDevice physicalDevice,
806 vk::VkDeviceSize size,
807 deUint32 memoryTypeIndex,
808 vk::VkDeviceSize maxBufferSize,
809 deInt32 maxImageWidth,
810 deInt32 maxImageHeight);
812 vk::VkDeviceSize getSize (void) const { return m_size; }
813 vk::VkDeviceSize getMaxBufferSize (void) const { return m_maxBufferSize; }
814 bool getSupportBuffers (void) const { return m_maxBufferSize > 0; }
816 deInt32 getMaxImageWidth (void) const { return m_maxImageWidth; }
817 deInt32 getMaxImageHeight (void) const { return m_maxImageHeight; }
818 bool getSupportImages (void) const { return m_maxImageWidth > 0; }
820 const vk::VkMemoryType& getMemoryType (void) const { return m_memoryType; }
821 deUint32 getMemoryTypeIndex (void) const { return m_memoryTypeIndex; }
822 vk::VkDeviceMemory getMemory (void) const { return *m_memory; }
825 const vk::VkDeviceSize m_size;
826 const deUint32 m_memoryTypeIndex;
827 const vk::VkMemoryType m_memoryType;
828 const vk::Unique<vk::VkDeviceMemory> m_memory;
829 const vk::VkDeviceSize m_maxBufferSize;
830 const deInt32 m_maxImageWidth;
831 const deInt32 m_maxImageHeight;
834 vk::VkMemoryType getMemoryTypeInfo (const vk::InstanceInterface& vki,
835 vk::VkPhysicalDevice device,
836 deUint32 memoryTypeIndex)
838 const vk::VkPhysicalDeviceMemoryProperties memoryProperties = vk::getPhysicalDeviceMemoryProperties(vki, device);
840 DE_ASSERT(memoryTypeIndex < memoryProperties.memoryTypeCount);
842 return memoryProperties.memoryTypes[memoryTypeIndex];
845 vk::VkDeviceSize findMaxBufferSize (const vk::DeviceInterface& vkd,
848 vk::VkBufferUsageFlags usage,
849 vk::VkSharingMode sharingMode,
850 const vector<deUint32>& queueFamilies,
852 vk::VkDeviceSize memorySize,
853 deUint32 memoryTypeIndex)
855 vk::VkDeviceSize lastSuccess = 0;
856 vk::VkDeviceSize currentSize = memorySize / 2;
859 const vk::Unique<vk::VkBuffer> buffer (createBuffer(vkd, device, memorySize, usage, sharingMode, queueFamilies));
860 const vk::VkMemoryRequirements requirements (vk::getBufferMemoryRequirements(vkd, device, *buffer));
862 if (requirements.size == memorySize && requirements.memoryTypeBits & (0x1u << memoryTypeIndex))
866 for (vk::VkDeviceSize stepSize = memorySize / 4; currentSize > 0; stepSize /= 2)
868 const vk::Unique<vk::VkBuffer> buffer (createBuffer(vkd, device, currentSize, usage, sharingMode, queueFamilies));
869 const vk::VkMemoryRequirements requirements (vk::getBufferMemoryRequirements(vkd, device, *buffer));
871 if (requirements.size <= memorySize && requirements.memoryTypeBits & (0x1u << memoryTypeIndex))
873 lastSuccess = currentSize;
874 currentSize += stepSize;
877 currentSize -= stepSize;
886 // Round size down maximum W * H * 4, where W and H < 4096
887 vk::VkDeviceSize roundBufferSizeToWxHx4 (vk::VkDeviceSize size)
889 const vk::VkDeviceSize maxTextureSize = 4096;
890 vk::VkDeviceSize maxTexelCount = size / 4;
891 vk::VkDeviceSize bestW = de::max(maxTexelCount, maxTextureSize);
892 vk::VkDeviceSize bestH = maxTexelCount / bestW;
894 // \todo [2016-03-09 mika] Could probably be faster?
895 for (vk::VkDeviceSize w = 1; w * w < maxTexelCount && w < maxTextureSize && bestW * bestH * 4 < size; w++)
897 const vk::VkDeviceSize h = maxTexelCount / w;
899 if (bestW * bestH < w * h)
906 return bestW * bestH * 4;
909 // Find RGBA8 image size that has exactly "size" of number of bytes.
910 // "size" must be W * H * 4 where W and H < 4096
911 IVec2 findImageSizeWxHx4 (vk::VkDeviceSize size)
913 const vk::VkDeviceSize maxTextureSize = 4096;
914 vk::VkDeviceSize texelCount = size / 4;
916 DE_ASSERT((size % 4) == 0);
918 // \todo [2016-03-09 mika] Could probably be faster?
919 for (vk::VkDeviceSize w = 1; w < maxTextureSize && w < texelCount; w++)
921 const vk::VkDeviceSize h = texelCount / w;
923 if ((texelCount % w) == 0 && h < maxTextureSize)
924 return IVec2((int)w, (int)h);
927 DE_FATAL("Invalid size");
928 return IVec2(-1, -1);
931 IVec2 findMaxRGBA8ImageSize (const vk::DeviceInterface& vkd,
934 vk::VkImageUsageFlags usage,
935 vk::VkSharingMode sharingMode,
936 const vector<deUint32>& queueFamilies,
938 vk::VkDeviceSize memorySize,
939 deUint32 memoryTypeIndex)
941 IVec2 lastSuccess (0);
945 const deUint32 texelCount = (deUint32)(memorySize / 4);
946 const deUint32 width = (deUint32)deFloatSqrt((float)texelCount);
947 const deUint32 height = texelCount / width;
949 currentSize[0] = deMaxu32(width, height);
950 currentSize[1] = deMinu32(width, height);
953 for (deInt32 stepSize = currentSize[0] / 2; currentSize[0] > 0; stepSize /= 2)
955 const vk::VkImageCreateInfo createInfo =
957 vk::VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
961 vk::VK_IMAGE_TYPE_2D,
962 vk::VK_FORMAT_R8G8B8A8_UNORM,
964 (deUint32)currentSize[0],
965 (deUint32)currentSize[1],
969 vk::VK_SAMPLE_COUNT_1_BIT,
970 vk::VK_IMAGE_TILING_OPTIMAL,
973 (deUint32)queueFamilies.size(),
975 vk::VK_IMAGE_LAYOUT_UNDEFINED
977 const vk::Unique<vk::VkImage> image (vk::createImage(vkd, device, &createInfo));
978 const vk::VkMemoryRequirements requirements (vk::getImageMemoryRequirements(vkd, device, *image));
980 if (requirements.size <= memorySize && requirements.memoryTypeBits & (0x1u << memoryTypeIndex))
982 lastSuccess = currentSize;
983 currentSize[0] += stepSize;
984 currentSize[1] += stepSize;
988 currentSize[0] -= stepSize;
989 currentSize[1] -= stepSize;
999 Memory::Memory (const vk::InstanceInterface& vki,
1000 const vk::DeviceInterface& vkd,
1001 vk::VkPhysicalDevice physicalDevice,
1002 vk::VkDevice device,
1003 vk::VkDeviceSize size,
1004 deUint32 memoryTypeIndex,
1005 vk::VkDeviceSize maxBufferSize,
1006 deInt32 maxImageWidth,
1007 deInt32 maxImageHeight)
1009 , m_memoryTypeIndex (memoryTypeIndex)
1010 , m_memoryType (getMemoryTypeInfo(vki, physicalDevice, memoryTypeIndex))
1011 , m_memory (allocMemory(vkd, device, size, memoryTypeIndex))
1012 , m_maxBufferSize (maxBufferSize)
1013 , m_maxImageWidth (maxImageWidth)
1014 , m_maxImageHeight (maxImageHeight)
1021 Context (const vk::InstanceInterface& vki,
1022 const vk::DeviceInterface& vkd,
1023 vk::VkPhysicalDevice physicalDevice,
1024 vk::VkDevice device,
1026 deUint32 queueFamilyIndex,
1027 const vector<pair<deUint32, vk::VkQueue> >& queues,
1028 const vk::ProgramCollection<vk::ProgramBinary>& binaryCollection)
1031 , m_physicalDevice (physicalDevice)
1034 , m_queueFamilyIndex (queueFamilyIndex)
1036 , m_commandPool (createCommandPool(vkd, device, queueFamilyIndex))
1037 , m_binaryCollection (binaryCollection)
1039 for (size_t queueNdx = 0; queueNdx < m_queues.size(); queueNdx++)
1040 m_queueFamilies.push_back(m_queues[queueNdx].first);
1043 const vk::InstanceInterface& getInstanceInterface (void) const { return m_vki; }
1044 vk::VkPhysicalDevice getPhysicalDevice (void) const { return m_physicalDevice; }
1045 vk::VkDevice getDevice (void) const { return m_device; }
1046 const vk::DeviceInterface& getDeviceInterface (void) const { return m_vkd; }
1047 vk::VkQueue getQueue (void) const { return m_queue; }
1048 deUint32 getQueueFamily (void) const { return m_queueFamilyIndex; }
1049 const vector<pair<deUint32, vk::VkQueue> >& getQueues (void) const { return m_queues; }
1050 const vector<deUint32> getQueueFamilies (void) const { return m_queueFamilies; }
1051 vk::VkCommandPool getCommandPool (void) const { return *m_commandPool; }
1052 const vk::ProgramCollection<vk::ProgramBinary>& getBinaryCollection (void) const { return m_binaryCollection; }
1055 const vk::InstanceInterface& m_vki;
1056 const vk::DeviceInterface& m_vkd;
1057 const vk::VkPhysicalDevice m_physicalDevice;
1058 const vk::VkDevice m_device;
1059 const vk::VkQueue m_queue;
1060 const deUint32 m_queueFamilyIndex;
1061 const vector<pair<deUint32, vk::VkQueue> > m_queues;
1062 const vk::Unique<vk::VkCommandPool> m_commandPool;
1063 const vk::ProgramCollection<vk::ProgramBinary>& m_binaryCollection;
1064 vector<deUint32> m_queueFamilies;
1067 class PrepareContext
1070 PrepareContext (const Context& context,
1071 const Memory& memory)
1072 : m_context (context)
1077 const Memory& getMemory (void) const { return m_memory; }
1078 const Context& getContext (void) const { return m_context; }
1079 const vk::ProgramCollection<vk::ProgramBinary>& getBinaryCollection (void) const { return m_context.getBinaryCollection(); }
1081 void setBuffer (vk::Move<vk::VkBuffer> buffer,
1082 vk::VkDeviceSize size)
1084 DE_ASSERT(!m_currentImage);
1085 DE_ASSERT(!m_currentBuffer);
1087 m_currentBuffer = buffer;
1088 m_currentBufferSize = size;
1091 vk::VkBuffer getBuffer (void) const { return *m_currentBuffer; }
1092 vk::VkDeviceSize getBufferSize (void) const
1094 DE_ASSERT(m_currentBuffer);
1095 return m_currentBufferSize;
1098 void releaseBuffer (void) { m_currentBuffer.disown(); }
1100 void setImage (vk::Move<vk::VkImage> image,
1101 vk::VkImageLayout layout,
1102 vk::VkDeviceSize memorySize,
1106 DE_ASSERT(!m_currentImage);
1107 DE_ASSERT(!m_currentBuffer);
1109 m_currentImage = image;
1110 m_currentImageMemorySize = memorySize;
1111 m_currentImageLayout = layout;
1112 m_currentImageWidth = width;
1113 m_currentImageHeight = height;
1116 void setImageLayout (vk::VkImageLayout layout)
1118 DE_ASSERT(m_currentImage);
1119 m_currentImageLayout = layout;
1122 vk::VkImage getImage (void) const { return *m_currentImage; }
1123 deInt32 getImageWidth (void) const
1125 DE_ASSERT(m_currentImage);
1126 return m_currentImageWidth;
1128 deInt32 getImageHeight (void) const
1130 DE_ASSERT(m_currentImage);
1131 return m_currentImageHeight;
1133 vk::VkDeviceSize getImageMemorySize (void) const
1135 DE_ASSERT(m_currentImage);
1136 return m_currentImageMemorySize;
1139 void releaseImage (void) { m_currentImage.disown(); }
1141 vk::VkImageLayout getImageLayout (void) const
1143 DE_ASSERT(m_currentImage);
1144 return m_currentImageLayout;
1148 const Context& m_context;
1149 const Memory& m_memory;
1151 vk::Move<vk::VkBuffer> m_currentBuffer;
1152 vk::VkDeviceSize m_currentBufferSize;
1154 vk::Move<vk::VkImage> m_currentImage;
1155 vk::VkDeviceSize m_currentImageMemorySize;
1156 vk::VkImageLayout m_currentImageLayout;
1157 deInt32 m_currentImageWidth;
1158 deInt32 m_currentImageHeight;
1161 class ExecuteContext
1164 ExecuteContext (const Context& context)
1165 : m_context (context)
1169 const Context& getContext (void) const { return m_context; }
1170 void setMapping (void* ptr) { m_mapping = ptr; }
1171 void* getMapping (void) const { return m_mapping; }
1174 const Context& m_context;
1181 VerifyContext (TestLog& log,
1182 tcu::ResultCollector& resultCollector,
1183 const Context& context,
1184 vk::VkDeviceSize size)
1186 , m_resultCollector (resultCollector)
1187 , m_context (context)
1188 , m_reference ((size_t)size)
1192 const Context& getContext (void) const { return m_context; }
1193 TestLog& getLog (void) const { return m_log; }
1194 tcu::ResultCollector& getResultCollector (void) const { return m_resultCollector; }
1196 ReferenceMemory& getReference (void) { return m_reference; }
1197 TextureLevel& getReferenceImage (void) { return m_referenceImage;}
1201 tcu::ResultCollector& m_resultCollector;
1202 const Context& m_context;
1203 ReferenceMemory m_reference;
1204 TextureLevel m_referenceImage;
1210 // Constructor should allocate all non-vulkan resources.
1211 virtual ~Command (void) {}
1213 // Get name of the command
1214 virtual const char* getName (void) const = 0;
1216 // Log prepare operations
1217 virtual void logPrepare (TestLog&, size_t) const {}
1218 // Log executed operations
1219 virtual void logExecute (TestLog&, size_t) const {}
1221 // Prepare should allocate all vulkan resources and resources that require
1222 // that buffer or memory has been already allocated. This should build all
1223 // command buffers etc.
1224 virtual void prepare (PrepareContext&) {}
1226 // Execute command. Write or read mapped memory, submit commands to queue
1228 virtual void execute (ExecuteContext&) {}
1230 // Verify that results are correct.
1231 virtual void verify (VerifyContext&, size_t) {}
1234 // Allow only inheritance
1239 Command (const Command&);
1240 Command& operator& (const Command&);
1243 class Map : public Command
1248 const char* getName (void) const { return "Map"; }
1251 void logExecute (TestLog& log, size_t commandIndex) const
1253 log << TestLog::Message << commandIndex << ":" << getName() << " Map memory" << TestLog::EndMessage;
1256 void prepare (PrepareContext& context)
1258 m_memory = context.getMemory().getMemory();
1259 m_size = context.getMemory().getSize();
1262 void execute (ExecuteContext& context)
1264 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
1265 const vk::VkDevice device = context.getContext().getDevice();
1267 context.setMapping(mapMemory(vkd, device, m_memory, m_size));
1271 vk::VkDeviceMemory m_memory;
1272 vk::VkDeviceSize m_size;
1275 class UnMap : public Command
1280 const char* getName (void) const { return "UnMap"; }
1282 void logExecute (TestLog& log, size_t commandIndex) const
1284 log << TestLog::Message << commandIndex << ": Unmap memory" << TestLog::EndMessage;
1287 void prepare (PrepareContext& context)
1289 m_memory = context.getMemory().getMemory();
1292 void execute (ExecuteContext& context)
1294 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
1295 const vk::VkDevice device = context.getContext().getDevice();
1297 vkd.unmapMemory(device, m_memory);
1298 context.setMapping(DE_NULL);
1302 vk::VkDeviceMemory m_memory;
1305 class Invalidate : public Command
1308 Invalidate (void) {}
1309 ~Invalidate (void) {}
1310 const char* getName (void) const { return "Invalidate"; }
1312 void logExecute (TestLog& log, size_t commandIndex) const
1314 log << TestLog::Message << commandIndex << ": Invalidate mapped memory" << TestLog::EndMessage;
1317 void prepare (PrepareContext& context)
1319 m_memory = context.getMemory().getMemory();
1320 m_size = context.getMemory().getSize();
1323 void execute (ExecuteContext& context)
1325 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
1326 const vk::VkDevice device = context.getContext().getDevice();
1328 vk::invalidateMappedMemoryRange(vkd, device, m_memory, 0, m_size);
1332 vk::VkDeviceMemory m_memory;
1333 vk::VkDeviceSize m_size;
1336 class Flush : public Command
1341 const char* getName (void) const { return "Flush"; }
1343 void logExecute (TestLog& log, size_t commandIndex) const
1345 log << TestLog::Message << commandIndex << ": Flush mapped memory" << TestLog::EndMessage;
1348 void prepare (PrepareContext& context)
1350 m_memory = context.getMemory().getMemory();
1351 m_size = context.getMemory().getSize();
1354 void execute (ExecuteContext& context)
1356 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
1357 const vk::VkDevice device = context.getContext().getDevice();
1359 vk::flushMappedMemoryRange(vkd, device, m_memory, 0, m_size);
1363 vk::VkDeviceMemory m_memory;
1364 vk::VkDeviceSize m_size;
1367 // Host memory reads and writes
1368 class HostMemoryAccess : public Command
1371 HostMemoryAccess (bool read, bool write, deUint32 seed);
1372 ~HostMemoryAccess (void) {}
1373 const char* getName (void) const { return "HostMemoryAccess"; }
1375 void logExecute (TestLog& log, size_t commandIndex) const;
1376 void prepare (PrepareContext& context);
1377 void execute (ExecuteContext& context);
1378 void verify (VerifyContext& context, size_t commandIndex);
1383 const deUint32 m_seed;
1386 vector<deUint8> m_readData;
1389 HostMemoryAccess::HostMemoryAccess (bool read, bool write, deUint32 seed)
1396 void HostMemoryAccess::logExecute (TestLog& log, size_t commandIndex) const
1398 log << TestLog::Message << commandIndex << ": Host memory access:" << (m_read ? " read" : "") << (m_write ? " write" : "") << ", seed: " << m_seed << TestLog::EndMessage;
1401 void HostMemoryAccess::prepare (PrepareContext& context)
1403 m_size = (size_t)context.getMemory().getSize();
1406 m_readData.resize(m_size, 0);
1409 void HostMemoryAccess::execute (ExecuteContext& context)
1411 de::Random rng (m_seed);
1412 deUint8* const ptr = (deUint8*)context.getMapping();
1414 if (m_read && m_write)
1416 for (size_t pos = 0; pos < m_size; pos++)
1418 const deUint8 mask = rng.getUint8();
1419 const deUint8 value = ptr[pos];
1421 m_readData[pos] = value;
1422 ptr[pos] = value ^ mask;
1427 for (size_t pos = 0; pos < m_size; pos++)
1429 const deUint8 value = ptr[pos];
1431 m_readData[pos] = value;
1436 for (size_t pos = 0; pos < m_size; pos++)
1438 const deUint8 value = rng.getUint8();
1444 DE_FATAL("Host memory access without read or write.");
1447 void HostMemoryAccess::verify (VerifyContext& context, size_t commandIndex)
1449 tcu::ResultCollector& resultCollector = context.getResultCollector();
1450 ReferenceMemory& reference = context.getReference();
1451 de::Random rng (m_seed);
1453 if (m_read && m_write)
1455 for (size_t pos = 0; pos < m_size; pos++)
1457 const deUint8 mask = rng.getUint8();
1458 const deUint8 value = m_readData[pos];
1460 if (reference.isDefined(pos))
1462 if (value != reference.get(pos))
1464 resultCollector.fail(
1465 de::toString(commandIndex) + ":" + getName()
1466 + " Result differs from reference, Expected: "
1467 + de::toString(tcu::toHex<8>(reference.get(pos)))
1469 + de::toString(tcu::toHex<8>(value))
1471 + de::toString(pos));
1475 reference.set(pos, reference.get(pos) ^ mask);
1481 for (size_t pos = 0; pos < m_size; pos++)
1483 const deUint8 value = m_readData[pos];
1485 if (reference.isDefined(pos))
1487 if (value != reference.get(pos))
1489 resultCollector.fail(
1490 de::toString(commandIndex) + ":" + getName()
1491 + " Result differs from reference, Expected: "
1492 + de::toString(tcu::toHex<8>(reference.get(pos)))
1494 + de::toString(tcu::toHex<8>(value))
1496 + de::toString(pos));
1504 for (size_t pos = 0; pos < m_size; pos++)
1506 const deUint8 value = rng.getUint8();
1508 reference.set(pos, value);
1512 DE_FATAL("Host memory access without read or write.");
1515 class CreateBuffer : public Command
1518 CreateBuffer (vk::VkBufferUsageFlags usage,
1519 vk::VkSharingMode sharing);
1520 ~CreateBuffer (void) {}
1521 const char* getName (void) const { return "CreateBuffer"; }
1523 void logPrepare (TestLog& log, size_t commandIndex) const;
1524 void prepare (PrepareContext& context);
1527 const vk::VkBufferUsageFlags m_usage;
1528 const vk::VkSharingMode m_sharing;
1531 CreateBuffer::CreateBuffer (vk::VkBufferUsageFlags usage,
1532 vk::VkSharingMode sharing)
1534 , m_sharing (sharing)
1538 void CreateBuffer::logPrepare (TestLog& log, size_t commandIndex) const
1540 log << TestLog::Message << commandIndex << ":" << getName() << " Create buffer, Sharing mode: " << m_sharing << ", Usage: " << vk::getBufferUsageFlagsStr(m_usage) << TestLog::EndMessage;
1543 void CreateBuffer::prepare (PrepareContext& context)
1545 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
1546 const vk::VkDevice device = context.getContext().getDevice();
1547 const vk::VkDeviceSize bufferSize = context.getMemory().getMaxBufferSize();
1548 const vector<deUint32>& queueFamilies = context.getContext().getQueueFamilies();
1550 context.setBuffer(createBuffer(vkd, device, bufferSize, m_usage, m_sharing, queueFamilies), bufferSize);
1553 class DestroyBuffer : public Command
1556 DestroyBuffer (void);
1557 ~DestroyBuffer (void) {}
1558 const char* getName (void) const { return "DestroyBuffer"; }
1560 void logExecute (TestLog& log, size_t commandIndex) const;
1561 void prepare (PrepareContext& context);
1562 void execute (ExecuteContext& context);
1565 vk::Move<vk::VkBuffer> m_buffer;
1568 DestroyBuffer::DestroyBuffer (void)
1572 void DestroyBuffer::prepare (PrepareContext& context)
1574 m_buffer = vk::Move<vk::VkBuffer>(vk::check(context.getBuffer()), vk::Deleter<vk::VkBuffer>(context.getContext().getDeviceInterface(), context.getContext().getDevice(), DE_NULL));
1575 context.releaseBuffer();
1578 void DestroyBuffer::logExecute (TestLog& log, size_t commandIndex) const
1580 log << TestLog::Message << commandIndex << ":" << getName() << " Destroy buffer" << TestLog::EndMessage;
1583 void DestroyBuffer::execute (ExecuteContext& context)
1585 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
1586 const vk::VkDevice device = context.getContext().getDevice();
1588 vkd.destroyBuffer(device, m_buffer.disown(), DE_NULL);
1591 class BindBufferMemory : public Command
1594 BindBufferMemory (void) {}
1595 ~BindBufferMemory (void) {}
1596 const char* getName (void) const { return "BindBufferMemory"; }
1598 void logPrepare (TestLog& log, size_t commandIndex) const;
1599 void prepare (PrepareContext& context);
1602 void BindBufferMemory::logPrepare (TestLog& log, size_t commandIndex) const
1604 log << TestLog::Message << commandIndex << ":" << getName() << " Bind memory to buffer" << TestLog::EndMessage;
1607 void BindBufferMemory::prepare (PrepareContext& context)
1609 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
1610 const vk::VkDevice device = context.getContext().getDevice();
1612 VK_CHECK(vkd.bindBufferMemory(device, context.getBuffer(), context.getMemory().getMemory(), 0));
1615 class CreateImage : public Command
1618 CreateImage (vk::VkImageUsageFlags usage,
1619 vk::VkSharingMode sharing);
1620 ~CreateImage (void) {}
1621 const char* getName (void) const { return "CreateImage"; }
1623 void logPrepare (TestLog& log, size_t commandIndex) const;
1624 void prepare (PrepareContext& context);
1625 void verify (VerifyContext& context, size_t commandIndex);
1628 const vk::VkImageUsageFlags m_usage;
1629 const vk::VkSharingMode m_sharing;
1630 deInt32 m_imageWidth;
1631 deInt32 m_imageHeight;
1634 CreateImage::CreateImage (vk::VkImageUsageFlags usage,
1635 vk::VkSharingMode sharing)
1637 , m_sharing (sharing)
1641 void CreateImage::logPrepare (TestLog& log, size_t commandIndex) const
1643 log << TestLog::Message << commandIndex << ":" << getName() << " Create image, sharing: " << m_sharing << ", usage: " << vk::getImageUsageFlagsStr(m_usage) << TestLog::EndMessage;
1646 void CreateImage::prepare (PrepareContext& context)
1648 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
1649 const vk::VkDevice device = context.getContext().getDevice();
1650 const vector<deUint32>& queueFamilies = context.getContext().getQueueFamilies();
1652 m_imageWidth = context.getMemory().getMaxImageWidth();
1653 m_imageHeight = context.getMemory().getMaxImageHeight();
1656 const vk::VkImageCreateInfo createInfo =
1658 vk::VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
1662 vk::VK_IMAGE_TYPE_2D,
1663 vk::VK_FORMAT_R8G8B8A8_UNORM,
1665 (deUint32)m_imageWidth,
1666 (deUint32)m_imageHeight,
1670 vk::VK_SAMPLE_COUNT_1_BIT,
1671 vk::VK_IMAGE_TILING_OPTIMAL,
1674 (deUint32)queueFamilies.size(),
1676 vk::VK_IMAGE_LAYOUT_UNDEFINED
1678 vk::Move<vk::VkImage> image (createImage(vkd, device, &createInfo));
1679 const vk::VkMemoryRequirements requirements = vk::getImageMemoryRequirements(vkd, device, *image);
1681 context.setImage(image, vk::VK_IMAGE_LAYOUT_UNDEFINED, requirements.size, m_imageWidth, m_imageHeight);
1685 void CreateImage::verify (VerifyContext& context, size_t)
1687 context.getReferenceImage() = TextureLevel(TextureFormat(TextureFormat::RGBA, TextureFormat::UNORM_INT8), m_imageWidth, m_imageHeight);
1690 class DestroyImage : public Command
1693 DestroyImage (void);
1694 ~DestroyImage (void) {}
1695 const char* getName (void) const { return "DestroyImage"; }
1697 void logExecute (TestLog& log, size_t commandIndex) const;
1698 void prepare (PrepareContext& context);
1699 void execute (ExecuteContext& context);
1702 vk::Move<vk::VkImage> m_image;
1705 DestroyImage::DestroyImage (void)
1709 void DestroyImage::prepare (PrepareContext& context)
1711 m_image = vk::Move<vk::VkImage>(vk::check(context.getImage()), vk::Deleter<vk::VkImage>(context.getContext().getDeviceInterface(), context.getContext().getDevice(), DE_NULL));
1712 context.releaseImage();
1716 void DestroyImage::logExecute (TestLog& log, size_t commandIndex) const
1718 log << TestLog::Message << commandIndex << ":" << getName() << " Destroy image" << TestLog::EndMessage;
1721 void DestroyImage::execute (ExecuteContext& context)
1723 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
1724 const vk::VkDevice device = context.getContext().getDevice();
1726 vkd.destroyImage(device, m_image.disown(), DE_NULL);
1729 class BindImageMemory : public Command
1732 BindImageMemory (void) {}
1733 ~BindImageMemory (void) {}
1734 const char* getName (void) const { return "BindImageMemory"; }
1736 void logPrepare (TestLog& log, size_t commandIndex) const;
1737 void prepare (PrepareContext& context);
1740 void BindImageMemory::logPrepare (TestLog& log, size_t commandIndex) const
1742 log << TestLog::Message << commandIndex << ":" << getName() << " Bind memory to image" << TestLog::EndMessage;
1745 void BindImageMemory::prepare (PrepareContext& context)
1747 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
1748 const vk::VkDevice device = context.getContext().getDevice();
1750 VK_CHECK(vkd.bindImageMemory(device, context.getImage(), context.getMemory().getMemory(), 0));
1753 class QueueWaitIdle : public Command
1756 QueueWaitIdle (void) {}
1757 ~QueueWaitIdle (void) {}
1758 const char* getName (void) const { return "QueuetWaitIdle"; }
1760 void logExecute (TestLog& log, size_t commandIndex) const;
1761 void execute (ExecuteContext& context);
1764 void QueueWaitIdle::logExecute (TestLog& log, size_t commandIndex) const
1766 log << TestLog::Message << commandIndex << ":" << getName() << " Queue wait idle" << TestLog::EndMessage;
1769 void QueueWaitIdle::execute (ExecuteContext& context)
1771 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
1772 const vk::VkQueue queue = context.getContext().getQueue();
1774 VK_CHECK(vkd.queueWaitIdle(queue));
1777 class DeviceWaitIdle : public Command
1780 DeviceWaitIdle (void) {}
1781 ~DeviceWaitIdle (void) {}
1782 const char* getName (void) const { return "DeviceWaitIdle"; }
1784 void logExecute (TestLog& log, size_t commandIndex) const;
1785 void execute (ExecuteContext& context);
1788 void DeviceWaitIdle::logExecute (TestLog& log, size_t commandIndex) const
1790 log << TestLog::Message << commandIndex << ":" << getName() << " Device wait idle" << TestLog::EndMessage;
1793 void DeviceWaitIdle::execute (ExecuteContext& context)
1795 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
1796 const vk::VkDevice device = context.getContext().getDevice();
1798 VK_CHECK(vkd.deviceWaitIdle(device));
1804 SubmitContext (const PrepareContext& context,
1805 const vk::VkCommandBuffer commandBuffer)
1806 : m_context (context)
1807 , m_commandBuffer (commandBuffer)
1811 const Memory& getMemory (void) const { return m_context.getMemory(); }
1812 const Context& getContext (void) const { return m_context.getContext(); }
1813 vk::VkCommandBuffer getCommandBuffer (void) const { return m_commandBuffer; }
1815 vk::VkBuffer getBuffer (void) const { return m_context.getBuffer(); }
1816 vk::VkDeviceSize getBufferSize (void) const { return m_context.getBufferSize(); }
1818 vk::VkImage getImage (void) const { return m_context.getImage(); }
1819 deInt32 getImageWidth (void) const { return m_context.getImageWidth(); }
1820 deInt32 getImageHeight (void) const { return m_context.getImageHeight(); }
1823 const PrepareContext& m_context;
1824 const vk::VkCommandBuffer m_commandBuffer;
1830 virtual ~CmdCommand (void) {}
1831 virtual const char* getName (void) const = 0;
1833 // Log things that are done during prepare
1834 virtual void logPrepare (TestLog&, size_t) const {}
1835 // Log submitted calls etc.
1836 virtual void logSubmit (TestLog&, size_t) const {}
1838 // Allocate vulkan resources and prepare for submit.
1839 virtual void prepare (PrepareContext&) {}
1841 // Submit commands to command buffer.
1842 virtual void submit (SubmitContext&) {}
1845 virtual void verify (VerifyContext&, size_t) {}
1848 class SubmitCommandBuffer : public Command
1851 SubmitCommandBuffer (const vector<CmdCommand*>& commands);
1852 ~SubmitCommandBuffer (void);
1854 const char* getName (void) const { return "SubmitCommandBuffer"; }
1855 void logExecute (TestLog& log, size_t commandIndex) const;
1856 void logPrepare (TestLog& log, size_t commandIndex) const;
1858 // Allocate command buffer and submit commands to command buffer
1859 void prepare (PrepareContext& context);
1860 void execute (ExecuteContext& context);
1862 // Verify that results are correct.
1863 void verify (VerifyContext& context, size_t commandIndex);
1866 vector<CmdCommand*> m_commands;
1867 vk::Move<vk::VkCommandBuffer> m_commandBuffer;
1870 SubmitCommandBuffer::SubmitCommandBuffer (const vector<CmdCommand*>& commands)
1871 : m_commands (commands)
1875 SubmitCommandBuffer::~SubmitCommandBuffer (void)
1877 for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
1878 delete m_commands[cmdNdx];
1881 void SubmitCommandBuffer::prepare (PrepareContext& context)
1883 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
1884 const vk::VkDevice device = context.getContext().getDevice();
1885 const vk::VkCommandPool commandPool = context.getContext().getCommandPool();
1887 m_commandBuffer = createBeginCommandBuffer(vkd, device, commandPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY);
1889 for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
1891 CmdCommand& command = *m_commands[cmdNdx];
1893 command.prepare(context);
1897 SubmitContext submitContext (context, *m_commandBuffer);
1899 for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
1901 CmdCommand& command = *m_commands[cmdNdx];
1903 command.submit(submitContext);
1906 VK_CHECK(vkd.endCommandBuffer(*m_commandBuffer));
1910 void SubmitCommandBuffer::execute (ExecuteContext& context)
1912 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
1913 const vk::VkCommandBuffer cmd = *m_commandBuffer;
1914 const vk::VkQueue queue = context.getContext().getQueue();
1915 const vk::VkSubmitInfo submit =
1917 vk::VK_STRUCTURE_TYPE_SUBMIT_INFO,
1922 (const vk::VkPipelineStageFlags*)DE_NULL,
1931 vkd.queueSubmit(queue, 1, &submit, 0);
1934 void SubmitCommandBuffer::verify (VerifyContext& context, size_t commandIndex)
1936 const string sectionName (de::toString(commandIndex) + ":" + getName());
1937 const tcu::ScopedLogSection section (context.getLog(), sectionName, sectionName);
1939 for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
1940 m_commands[cmdNdx]->verify(context, cmdNdx);
1943 void SubmitCommandBuffer::logPrepare (TestLog& log, size_t commandIndex) const
1945 const string sectionName (de::toString(commandIndex) + ":" + getName());
1946 const tcu::ScopedLogSection section (log, sectionName, sectionName);
1948 for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
1949 m_commands[cmdNdx]->logPrepare(log, cmdNdx);
1952 void SubmitCommandBuffer::logExecute (TestLog& log, size_t commandIndex) const
1954 const string sectionName (de::toString(commandIndex) + ":" + getName());
1955 const tcu::ScopedLogSection section (log, sectionName, sectionName);
1957 for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
1958 m_commands[cmdNdx]->logSubmit(log, cmdNdx);
1961 class PipelineBarrier : public CmdCommand
1971 PipelineBarrier (const vk::VkPipelineStageFlags srcStages,
1972 const vk::VkAccessFlags srcAccesses,
1973 const vk::VkPipelineStageFlags dstStages,
1974 const vk::VkAccessFlags dstAccesses,
1976 const tcu::Maybe<vk::VkImageLayout> imageLayout);
1977 ~PipelineBarrier (void) {}
1978 const char* getName (void) const { return "PipelineBarrier"; }
1980 void logSubmit (TestLog& log, size_t commandIndex) const;
1981 void submit (SubmitContext& context);
1984 const vk::VkPipelineStageFlags m_srcStages;
1985 const vk::VkAccessFlags m_srcAccesses;
1986 const vk::VkPipelineStageFlags m_dstStages;
1987 const vk::VkAccessFlags m_dstAccesses;
1989 const tcu::Maybe<vk::VkImageLayout> m_imageLayout;
1992 PipelineBarrier::PipelineBarrier (const vk::VkPipelineStageFlags srcStages,
1993 const vk::VkAccessFlags srcAccesses,
1994 const vk::VkPipelineStageFlags dstStages,
1995 const vk::VkAccessFlags dstAccesses,
1997 const tcu::Maybe<vk::VkImageLayout> imageLayout)
1998 : m_srcStages (srcStages)
1999 , m_srcAccesses (srcAccesses)
2000 , m_dstStages (dstStages)
2001 , m_dstAccesses (dstAccesses)
2003 , m_imageLayout (imageLayout)
2007 void PipelineBarrier::logSubmit (TestLog& log, size_t commandIndex) const
2009 log << TestLog::Message << commandIndex << ":" << getName()
2010 << " " << (m_type == TYPE_GLOBAL ? "Global pipeline barrier"
2011 : m_type == TYPE_BUFFER ? "Buffer pipeline barrier"
2012 : "Image pipeline barrier")
2013 << ", srcStages: " << vk::getPipelineStageFlagsStr(m_srcStages) << ", srcAccesses: " << vk::getAccessFlagsStr(m_srcAccesses)
2014 << ", dstStages: " << vk::getPipelineStageFlagsStr(m_dstStages) << ", dstAccesses: " << vk::getAccessFlagsStr(m_dstAccesses) << TestLog::EndMessage;
2017 void PipelineBarrier::submit (SubmitContext& context)
2019 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
2020 const vk::VkCommandBuffer cmd = context.getCommandBuffer();
2026 const vk::VkMemoryBarrier barrier =
2028 vk::VK_STRUCTURE_TYPE_MEMORY_BARRIER,
2035 vkd.cmdPipelineBarrier(cmd, m_srcStages, m_dstStages, (vk::VkDependencyFlags)0, 1, &barrier, 0, (const vk::VkBufferMemoryBarrier*)DE_NULL, 0, (const vk::VkImageMemoryBarrier*)DE_NULL);
2041 const vk::VkBufferMemoryBarrier barrier =
2043 vk::VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER,
2049 VK_QUEUE_FAMILY_IGNORED,
2050 VK_QUEUE_FAMILY_IGNORED,
2052 context.getBuffer(),
2057 vkd.cmdPipelineBarrier(cmd, m_srcStages, m_dstStages, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 1, &barrier, 0, (const vk::VkImageMemoryBarrier*)DE_NULL);
2063 const vk::VkImageMemoryBarrier barrier =
2065 vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
2074 VK_QUEUE_FAMILY_IGNORED,
2075 VK_QUEUE_FAMILY_IGNORED,
2079 vk::VK_IMAGE_ASPECT_COLOR_BIT,
2085 vkd.cmdPipelineBarrier(cmd, m_srcStages, m_dstStages, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 0, (const vk::VkBufferMemoryBarrier*)DE_NULL, 1, &barrier);
2090 DE_FATAL("Unknown pipeline barrier type");
2094 class ImageTransition : public CmdCommand
2097 ImageTransition (vk::VkPipelineStageFlags srcStages,
2098 vk::VkAccessFlags srcAccesses,
2100 vk::VkPipelineStageFlags dstStages,
2101 vk::VkAccessFlags dstAccesses,
2103 vk::VkImageLayout srcLayout,
2104 vk::VkImageLayout dstLayout);
2106 ~ImageTransition (void) {}
2107 const char* getName (void) const { return "ImageTransition"; }
2109 void prepare (PrepareContext& context);
2110 void logSubmit (TestLog& log, size_t commandIndex) const;
2111 void submit (SubmitContext& context);
2112 void verify (VerifyContext& context, size_t);
2115 const vk::VkPipelineStageFlags m_srcStages;
2116 const vk::VkAccessFlags m_srcAccesses;
2117 const vk::VkPipelineStageFlags m_dstStages;
2118 const vk::VkAccessFlags m_dstAccesses;
2119 const vk::VkImageLayout m_srcLayout;
2120 const vk::VkImageLayout m_dstLayout;
2122 vk::VkDeviceSize m_imageMemorySize;
2125 ImageTransition::ImageTransition (vk::VkPipelineStageFlags srcStages,
2126 vk::VkAccessFlags srcAccesses,
2128 vk::VkPipelineStageFlags dstStages,
2129 vk::VkAccessFlags dstAccesses,
2131 vk::VkImageLayout srcLayout,
2132 vk::VkImageLayout dstLayout)
2133 : m_srcStages (srcStages)
2134 , m_srcAccesses (srcAccesses)
2135 , m_dstStages (dstStages)
2136 , m_dstAccesses (dstAccesses)
2137 , m_srcLayout (srcLayout)
2138 , m_dstLayout (dstLayout)
2142 void ImageTransition::logSubmit (TestLog& log, size_t commandIndex) const
2144 log << TestLog::Message << commandIndex << ":" << getName()
2145 << " Image transition pipeline barrier"
2146 << ", srcStages: " << vk::getPipelineStageFlagsStr(m_srcStages) << ", srcAccesses: " << vk::getAccessFlagsStr(m_srcAccesses)
2147 << ", dstStages: " << vk::getPipelineStageFlagsStr(m_dstStages) << ", dstAccesses: " << vk::getAccessFlagsStr(m_dstAccesses)
2148 << ", srcLayout: " << m_srcLayout << ", dstLayout: " << m_dstLayout << TestLog::EndMessage;
2151 void ImageTransition::prepare (PrepareContext& context)
2153 DE_ASSERT(context.getImageLayout() == vk::VK_IMAGE_LAYOUT_UNDEFINED || m_srcLayout == vk::VK_IMAGE_LAYOUT_UNDEFINED || context.getImageLayout() == m_srcLayout);
2155 context.setImageLayout(m_dstLayout);
2156 m_imageMemorySize = context.getImageMemorySize();
2159 void ImageTransition::submit (SubmitContext& context)
2161 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
2162 const vk::VkCommandBuffer cmd = context.getCommandBuffer();
2163 const vk::VkImageMemoryBarrier barrier =
2165 vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
2174 VK_QUEUE_FAMILY_IGNORED,
2175 VK_QUEUE_FAMILY_IGNORED,
2179 vk::VK_IMAGE_ASPECT_COLOR_BIT,
2185 vkd.cmdPipelineBarrier(cmd, m_srcStages, m_dstStages, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 0, (const vk::VkBufferMemoryBarrier*)DE_NULL, 1, &barrier);
2188 void ImageTransition::verify (VerifyContext& context, size_t)
2190 context.getReference().setUndefined(0, (size_t)m_imageMemorySize);
2193 class FillBuffer : public CmdCommand
2196 FillBuffer (deUint32 value) : m_value(value) {}
2197 ~FillBuffer (void) {}
2198 const char* getName (void) const { return "FillBuffer"; }
2200 void logSubmit (TestLog& log, size_t commandIndex) const;
2201 void submit (SubmitContext& context);
2202 void verify (VerifyContext& context, size_t commandIndex);
2205 const deUint32 m_value;
2206 vk::VkDeviceSize m_bufferSize;
2209 void FillBuffer::logSubmit (TestLog& log, size_t commandIndex) const
2211 log << TestLog::Message << commandIndex << ":" << getName() << " Fill value: " << m_value << TestLog::EndMessage;
2214 void FillBuffer::submit (SubmitContext& context)
2216 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
2217 const vk::VkCommandBuffer cmd = context.getCommandBuffer();
2218 const vk::VkBuffer buffer = context.getBuffer();
2219 const vk::VkDeviceSize sizeMask = ~(0x3ull); // \note Round down to multiple of 4
2221 m_bufferSize = sizeMask & context.getBufferSize();
2222 vkd.cmdFillBuffer(cmd, buffer, 0, m_bufferSize, m_value);
2225 void FillBuffer::verify (VerifyContext& context, size_t)
2227 ReferenceMemory& reference = context.getReference();
2229 for (size_t ndx = 0; ndx < m_bufferSize; ndx++)
2231 #if (DE_ENDIANNESS == DE_LITTLE_ENDIAN)
2232 reference.set(ndx, (deUint8)(0xffu & (m_value >> (8*(ndx % 4)))));
2234 reference.set(ndx, (deUint8)(0xffu & (m_value >> (8*(3 - (ndx % 4))))));
2239 class UpdateBuffer : public CmdCommand
2242 UpdateBuffer (deUint32 seed) : m_seed(seed) {}
2243 ~UpdateBuffer (void) {}
2244 const char* getName (void) const { return "UpdateBuffer"; }
2246 void logSubmit (TestLog& log, size_t commandIndex) const;
2247 void submit (SubmitContext& context);
2248 void verify (VerifyContext& context, size_t commandIndex);
2251 const deUint32 m_seed;
2252 vk::VkDeviceSize m_bufferSize;
2255 void UpdateBuffer::logSubmit (TestLog& log, size_t commandIndex) const
2257 log << TestLog::Message << commandIndex << ":" << getName() << " Update buffer, seed: " << m_seed << TestLog::EndMessage;
2260 void UpdateBuffer::submit (SubmitContext& context)
2262 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
2263 const vk::VkCommandBuffer cmd = context.getCommandBuffer();
2264 const vk::VkBuffer buffer = context.getBuffer();
2265 const size_t blockSize = 65536;
2266 std::vector<deUint8> data (blockSize, 0);
2267 de::Random rng (m_seed);
2269 m_bufferSize = context.getBufferSize();
2271 for (size_t updated = 0; updated < m_bufferSize; updated += blockSize)
2273 for (size_t ndx = 0; ndx < data.size(); ndx++)
2274 data[ndx] = rng.getUint8();
2276 if (m_bufferSize - updated > blockSize)
2277 vkd.cmdUpdateBuffer(cmd, buffer, updated, blockSize, (const deUint32*)(&data[0]));
2279 vkd.cmdUpdateBuffer(cmd, buffer, updated, m_bufferSize - updated, (const deUint32*)(&data[0]));
2283 void UpdateBuffer::verify (VerifyContext& context, size_t)
2285 ReferenceMemory& reference = context.getReference();
2286 const size_t blockSize = 65536;
2287 vector<deUint8> data (blockSize, 0);
2288 de::Random rng (m_seed);
2290 for (size_t updated = 0; updated < m_bufferSize; updated += blockSize)
2292 for (size_t ndx = 0; ndx < data.size(); ndx++)
2293 data[ndx] = rng.getUint8();
2295 if (m_bufferSize - updated > blockSize)
2296 reference.setData(updated, blockSize, &data[0]);
2298 reference.setData(updated, (size_t)(m_bufferSize - updated), &data[0]);
2302 class BufferCopyToBuffer : public CmdCommand
2305 BufferCopyToBuffer (void) {}
2306 ~BufferCopyToBuffer (void) {}
2307 const char* getName (void) const { return "BufferCopyToBuffer"; }
2309 void logPrepare (TestLog& log, size_t commandIndex) const;
2310 void prepare (PrepareContext& context);
2311 void logSubmit (TestLog& log, size_t commandIndex) const;
2312 void submit (SubmitContext& context);
2313 void verify (VerifyContext& context, size_t commandIndex);
2316 vk::VkDeviceSize m_bufferSize;
2317 vk::Move<vk::VkBuffer> m_dstBuffer;
2318 vk::Move<vk::VkDeviceMemory> m_memory;
2321 void BufferCopyToBuffer::logPrepare (TestLog& log, size_t commandIndex) const
2323 log << TestLog::Message << commandIndex << ":" << getName() << " Allocate destination buffer for buffer to buffer copy." << TestLog::EndMessage;
2326 void BufferCopyToBuffer::prepare (PrepareContext& context)
2328 const vk::InstanceInterface& vki = context.getContext().getInstanceInterface();
2329 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
2330 const vk::VkPhysicalDevice physicalDevice = context.getContext().getPhysicalDevice();
2331 const vk::VkDevice device = context.getContext().getDevice();
2332 const vector<deUint32>& queueFamilies = context.getContext().getQueueFamilies();
2334 m_bufferSize = context.getBufferSize();
2336 m_dstBuffer = createBuffer(vkd, device, m_bufferSize, vk::VK_BUFFER_USAGE_TRANSFER_DST_BIT, vk::VK_SHARING_MODE_EXCLUSIVE, queueFamilies);
2337 m_memory = bindBufferMemory(vki, vkd, physicalDevice, device, *m_dstBuffer, vk::VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT);
2340 void BufferCopyToBuffer::logSubmit (TestLog& log, size_t commandIndex) const
2342 log << TestLog::Message << commandIndex << ":" << getName() << " Copy buffer to another buffer" << TestLog::EndMessage;
2345 void BufferCopyToBuffer::submit (SubmitContext& context)
2347 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
2348 const vk::VkCommandBuffer commandBuffer = context.getCommandBuffer();
2349 const vk::VkBufferCopy range =
2355 vkd.cmdCopyBuffer(commandBuffer, context.getBuffer(), *m_dstBuffer, 1, &range);
2358 void BufferCopyToBuffer::verify (VerifyContext& context, size_t commandIndex)
2360 tcu::ResultCollector& resultCollector (context.getResultCollector());
2361 ReferenceMemory& reference (context.getReference());
2362 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
2363 const vk::VkDevice device = context.getContext().getDevice();
2364 const vk::VkQueue queue = context.getContext().getQueue();
2365 const vk::VkCommandPool commandPool = context.getContext().getCommandPool();
2366 const vk::Unique<vk::VkCommandBuffer> commandBuffer (createBeginCommandBuffer(vkd, device, commandPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY));
2367 const vk::VkBufferMemoryBarrier barrier =
2369 vk::VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER,
2372 vk::VK_ACCESS_TRANSFER_WRITE_BIT,
2373 vk::VK_ACCESS_HOST_READ_BIT,
2375 VK_QUEUE_FAMILY_IGNORED,
2376 VK_QUEUE_FAMILY_IGNORED,
2382 vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_PIPELINE_STAGE_HOST_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 1, &barrier, 0, (const vk::VkImageMemoryBarrier*)DE_NULL);
2384 VK_CHECK(vkd.endCommandBuffer(*commandBuffer));
2385 queueRun(vkd, queue, *commandBuffer);
2388 void* const ptr = mapMemory(vkd, device, *m_memory, m_bufferSize);
2391 vk::invalidateMappedMemoryRange(vkd, device, *m_memory, 0, m_bufferSize);
2394 const deUint8* const data = (const deUint8*)ptr;
2396 for (size_t pos = 0; pos < (size_t)m_bufferSize; pos++)
2398 if (reference.isDefined(pos))
2400 if (data[pos] != reference.get(pos))
2402 resultCollector.fail(
2403 de::toString(commandIndex) + ":" + getName()
2404 + " Result differs from reference, Expected: "
2405 + de::toString(tcu::toHex<8>(reference.get(pos)))
2407 + de::toString(tcu::toHex<8>(data[pos]))
2409 + de::toString(pos));
2416 vkd.unmapMemory(device, *m_memory);
2419 context.getLog() << TestLog::Message << commandIndex << ": Buffer copy to buffer verification failed" << TestLog::EndMessage;
2423 class BufferCopyFromBuffer : public CmdCommand
2426 BufferCopyFromBuffer (deUint32 seed) : m_seed(seed) {}
2427 ~BufferCopyFromBuffer (void) {}
2428 const char* getName (void) const { return "BufferCopyFromBuffer"; }
2430 void logPrepare (TestLog& log, size_t commandIndex) const;
2431 void prepare (PrepareContext& context);
2432 void logSubmit (TestLog& log, size_t commandIndex) const;
2433 void submit (SubmitContext& context);
2434 void verify (VerifyContext& context, size_t commandIndex);
2437 const deUint32 m_seed;
2438 vk::VkDeviceSize m_bufferSize;
2439 vk::Move<vk::VkBuffer> m_srcBuffer;
2440 vk::Move<vk::VkDeviceMemory> m_memory;
2443 void BufferCopyFromBuffer::logPrepare (TestLog& log, size_t commandIndex) const
2445 log << TestLog::Message << commandIndex << ":" << getName() << " Allocate source buffer for buffer to buffer copy. Seed: " << m_seed << TestLog::EndMessage;
2448 void BufferCopyFromBuffer::prepare (PrepareContext& context)
2450 const vk::InstanceInterface& vki = context.getContext().getInstanceInterface();
2451 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
2452 const vk::VkPhysicalDevice physicalDevice = context.getContext().getPhysicalDevice();
2453 const vk::VkDevice device = context.getContext().getDevice();
2454 const vector<deUint32>& queueFamilies = context.getContext().getQueueFamilies();
2456 m_bufferSize = context.getBufferSize();
2457 m_srcBuffer = createBuffer(vkd, device, m_bufferSize, vk::VK_BUFFER_USAGE_TRANSFER_SRC_BIT, vk::VK_SHARING_MODE_EXCLUSIVE, queueFamilies);
2458 m_memory = bindBufferMemory(vki, vkd, physicalDevice, device, *m_srcBuffer, vk::VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT);
2461 void* const ptr = mapMemory(vkd, device, *m_memory, m_bufferSize);
2462 de::Random rng (m_seed);
2465 deUint8* const data = (deUint8*)ptr;
2467 for (size_t ndx = 0; ndx < (size_t)m_bufferSize; ndx++)
2468 data[ndx] = rng.getUint8();
2471 vk::flushMappedMemoryRange(vkd, device, *m_memory, 0, m_bufferSize);
2472 vkd.unmapMemory(device, *m_memory);
2476 void BufferCopyFromBuffer::logSubmit (TestLog& log, size_t commandIndex) const
2478 log << TestLog::Message << commandIndex << ":" << getName() << " Copy buffer data from another buffer" << TestLog::EndMessage;
2481 void BufferCopyFromBuffer::submit (SubmitContext& context)
2483 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
2484 const vk::VkCommandBuffer commandBuffer = context.getCommandBuffer();
2485 const vk::VkBufferCopy range =
2491 vkd.cmdCopyBuffer(commandBuffer, *m_srcBuffer, context.getBuffer(), 1, &range);
2494 void BufferCopyFromBuffer::verify (VerifyContext& context, size_t)
2496 ReferenceMemory& reference (context.getReference());
2497 de::Random rng (m_seed);
2499 for (size_t ndx = 0; ndx < (size_t)m_bufferSize; ndx++)
2500 reference.set(ndx, rng.getUint8());
2503 class BufferCopyToImage : public CmdCommand
2506 BufferCopyToImage (void) {}
2507 ~BufferCopyToImage (void) {}
2508 const char* getName (void) const { return "BufferCopyToImage"; }
2510 void logPrepare (TestLog& log, size_t commandIndex) const;
2511 void prepare (PrepareContext& context);
2512 void logSubmit (TestLog& log, size_t commandIndex) const;
2513 void submit (SubmitContext& context);
2514 void verify (VerifyContext& context, size_t commandIndex);
2517 deInt32 m_imageWidth;
2518 deInt32 m_imageHeight;
2519 vk::Move<vk::VkImage> m_dstImage;
2520 vk::Move<vk::VkDeviceMemory> m_memory;
2523 void BufferCopyToImage::logPrepare (TestLog& log, size_t commandIndex) const
2525 log << TestLog::Message << commandIndex << ":" << getName() << " Allocate destination image for buffer to image copy." << TestLog::EndMessage;
2528 void BufferCopyToImage::prepare (PrepareContext& context)
2530 const vk::InstanceInterface& vki = context.getContext().getInstanceInterface();
2531 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
2532 const vk::VkPhysicalDevice physicalDevice = context.getContext().getPhysicalDevice();
2533 const vk::VkDevice device = context.getContext().getDevice();
2534 const vk::VkQueue queue = context.getContext().getQueue();
2535 const vk::VkCommandPool commandPool = context.getContext().getCommandPool();
2536 const vector<deUint32>& queueFamilies = context.getContext().getQueueFamilies();
2537 const IVec2 imageSize = findImageSizeWxHx4(context.getBufferSize());
2539 m_imageWidth = imageSize[0];
2540 m_imageHeight = imageSize[1];
2543 const vk::VkImageCreateInfo createInfo =
2545 vk::VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
2549 vk::VK_IMAGE_TYPE_2D,
2550 vk::VK_FORMAT_R8G8B8A8_UNORM,
2552 (deUint32)m_imageWidth,
2553 (deUint32)m_imageHeight,
2556 1, 1, // mipLevels, arrayLayers
2557 vk::VK_SAMPLE_COUNT_1_BIT,
2559 vk::VK_IMAGE_TILING_OPTIMAL,
2560 vk::VK_IMAGE_USAGE_TRANSFER_DST_BIT|vk::VK_IMAGE_USAGE_TRANSFER_SRC_BIT,
2561 vk::VK_SHARING_MODE_EXCLUSIVE,
2563 (deUint32)queueFamilies.size(),
2565 vk::VK_IMAGE_LAYOUT_UNDEFINED
2568 m_dstImage = vk::createImage(vkd, device, &createInfo);
2571 m_memory = bindImageMemory(vki, vkd, physicalDevice, device, *m_dstImage, 0);
2574 const vk::Unique<vk::VkCommandBuffer> commandBuffer (createBeginCommandBuffer(vkd, device, commandPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY));
2575 const vk::VkImageMemoryBarrier barrier =
2577 vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
2581 vk::VK_ACCESS_TRANSFER_WRITE_BIT,
2583 vk::VK_IMAGE_LAYOUT_UNDEFINED,
2584 vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
2586 VK_QUEUE_FAMILY_IGNORED,
2587 VK_QUEUE_FAMILY_IGNORED,
2591 vk::VK_IMAGE_ASPECT_COLOR_BIT,
2593 1, // Mip level count
2599 vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 0, (const vk::VkBufferMemoryBarrier*)DE_NULL, 1, &barrier);
2601 VK_CHECK(vkd.endCommandBuffer(*commandBuffer));
2602 queueRun(vkd, queue, *commandBuffer);
2606 void BufferCopyToImage::logSubmit (TestLog& log, size_t commandIndex) const
2608 log << TestLog::Message << commandIndex << ":" << getName() << " Copy buffer to image" << TestLog::EndMessage;
2611 void BufferCopyToImage::submit (SubmitContext& context)
2613 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
2614 const vk::VkCommandBuffer commandBuffer = context.getCommandBuffer();
2615 const vk::VkBufferImageCopy region =
2620 vk::VK_IMAGE_ASPECT_COLOR_BIT,
2627 (deUint32)m_imageWidth,
2628 (deUint32)m_imageHeight,
2633 vkd.cmdCopyBufferToImage(commandBuffer, context.getBuffer(), *m_dstImage, vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, ®ion);
2636 void BufferCopyToImage::verify (VerifyContext& context, size_t commandIndex)
2638 tcu::ResultCollector& resultCollector (context.getResultCollector());
2639 ReferenceMemory& reference (context.getReference());
2640 const vk::InstanceInterface& vki = context.getContext().getInstanceInterface();
2641 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
2642 const vk::VkPhysicalDevice physicalDevice = context.getContext().getPhysicalDevice();
2643 const vk::VkDevice device = context.getContext().getDevice();
2644 const vk::VkQueue queue = context.getContext().getQueue();
2645 const vk::VkCommandPool commandPool = context.getContext().getCommandPool();
2646 const vk::Unique<vk::VkCommandBuffer> commandBuffer (createBeginCommandBuffer(vkd, device, commandPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY));
2647 const vector<deUint32>& queueFamilies = context.getContext().getQueueFamilies();
2648 const vk::Unique<vk::VkBuffer> dstBuffer (createBuffer(vkd, device, 4 * m_imageWidth * m_imageHeight, vk::VK_BUFFER_USAGE_TRANSFER_DST_BIT, vk::VK_SHARING_MODE_EXCLUSIVE, queueFamilies));
2649 const vk::Unique<vk::VkDeviceMemory> memory (bindBufferMemory(vki, vkd, physicalDevice, device, *dstBuffer, vk::VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT));
2651 const vk::VkImageMemoryBarrier imageBarrier =
2653 vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
2656 vk::VK_ACCESS_TRANSFER_WRITE_BIT,
2657 vk::VK_ACCESS_TRANSFER_READ_BIT,
2659 vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
2660 vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
2662 VK_QUEUE_FAMILY_IGNORED,
2663 VK_QUEUE_FAMILY_IGNORED,
2667 vk::VK_IMAGE_ASPECT_COLOR_BIT,
2669 1, // Mip level count
2674 const vk::VkBufferMemoryBarrier bufferBarrier =
2676 vk::VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER,
2679 vk::VK_ACCESS_TRANSFER_WRITE_BIT,
2680 vk::VK_ACCESS_HOST_READ_BIT,
2682 VK_QUEUE_FAMILY_IGNORED,
2683 VK_QUEUE_FAMILY_IGNORED,
2689 const vk::VkBufferImageCopy region =
2694 vk::VK_IMAGE_ASPECT_COLOR_BIT,
2701 (deUint32)m_imageWidth,
2702 (deUint32)m_imageHeight,
2707 vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 0, (const vk::VkBufferMemoryBarrier*)DE_NULL, 1, &imageBarrier);
2708 vkd.cmdCopyImageToBuffer(*commandBuffer, *m_dstImage, vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, *dstBuffer, 1, ®ion);
2709 vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_PIPELINE_STAGE_HOST_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 1, &bufferBarrier, 0, (const vk::VkImageMemoryBarrier*)DE_NULL);
2712 VK_CHECK(vkd.endCommandBuffer(*commandBuffer));
2713 queueRun(vkd, queue, *commandBuffer);
2716 void* const ptr = mapMemory(vkd, device, *memory, 4 * m_imageWidth * m_imageHeight);
2718 vk::invalidateMappedMemoryRange(vkd, device, *memory, 0, 4 * m_imageWidth * m_imageHeight);
2721 const deUint8* const data = (const deUint8*)ptr;
2723 for (size_t pos = 0; pos < (size_t)( 4 * m_imageWidth * m_imageHeight); pos++)
2725 if (reference.isDefined(pos))
2727 if (data[pos] != reference.get(pos))
2729 resultCollector.fail(
2730 de::toString(commandIndex) + ":" + getName()
2731 + " Result differs from reference, Expected: "
2732 + de::toString(tcu::toHex<8>(reference.get(pos)))
2734 + de::toString(tcu::toHex<8>(data[pos]))
2736 + de::toString(pos));
2743 vkd.unmapMemory(device, *memory);
2747 class BufferCopyFromImage : public CmdCommand
2750 BufferCopyFromImage (deUint32 seed) : m_seed(seed) {}
2751 ~BufferCopyFromImage (void) {}
2752 const char* getName (void) const { return "BufferCopyFromImage"; }
2754 void logPrepare (TestLog& log, size_t commandIndex) const;
2755 void prepare (PrepareContext& context);
2756 void logSubmit (TestLog& log, size_t commandIndex) const;
2757 void submit (SubmitContext& context);
2758 void verify (VerifyContext& context, size_t commandIndex);
2761 const deUint32 m_seed;
2762 deInt32 m_imageWidth;
2763 deInt32 m_imageHeight;
2764 vk::Move<vk::VkImage> m_srcImage;
2765 vk::Move<vk::VkDeviceMemory> m_memory;
2768 void BufferCopyFromImage::logPrepare (TestLog& log, size_t commandIndex) const
2770 log << TestLog::Message << commandIndex << ":" << getName() << " Allocate source image for image to buffer copy." << TestLog::EndMessage;
2773 void BufferCopyFromImage::prepare (PrepareContext& context)
2775 const vk::InstanceInterface& vki = context.getContext().getInstanceInterface();
2776 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
2777 const vk::VkPhysicalDevice physicalDevice = context.getContext().getPhysicalDevice();
2778 const vk::VkDevice device = context.getContext().getDevice();
2779 const vk::VkQueue queue = context.getContext().getQueue();
2780 const vk::VkCommandPool commandPool = context.getContext().getCommandPool();
2781 const vector<deUint32>& queueFamilies = context.getContext().getQueueFamilies();
2782 const IVec2 imageSize = findImageSizeWxHx4(context.getBufferSize());
2784 m_imageWidth = imageSize[0];
2785 m_imageHeight = imageSize[1];
2788 const vk::VkImageCreateInfo createInfo =
2790 vk::VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
2794 vk::VK_IMAGE_TYPE_2D,
2795 vk::VK_FORMAT_R8G8B8A8_UNORM,
2797 (deUint32)m_imageWidth,
2798 (deUint32)m_imageHeight,
2801 1, 1, // mipLevels, arrayLayers
2802 vk::VK_SAMPLE_COUNT_1_BIT,
2804 vk::VK_IMAGE_TILING_OPTIMAL,
2805 vk::VK_IMAGE_USAGE_TRANSFER_DST_BIT|vk::VK_IMAGE_USAGE_TRANSFER_SRC_BIT,
2806 vk::VK_SHARING_MODE_EXCLUSIVE,
2808 (deUint32)queueFamilies.size(),
2810 vk::VK_IMAGE_LAYOUT_UNDEFINED
2813 m_srcImage = vk::createImage(vkd, device, &createInfo);
2816 m_memory = bindImageMemory(vki, vkd, physicalDevice, device, *m_srcImage, 0);
2819 const vk::Unique<vk::VkBuffer> srcBuffer (createBuffer(vkd, device, 4 * m_imageWidth * m_imageHeight, vk::VK_BUFFER_USAGE_TRANSFER_SRC_BIT, vk::VK_SHARING_MODE_EXCLUSIVE, queueFamilies));
2820 const vk::Unique<vk::VkDeviceMemory> memory (bindBufferMemory(vki, vkd, physicalDevice, device, *srcBuffer, vk::VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT));
2821 const vk::Unique<vk::VkCommandBuffer> commandBuffer (createBeginCommandBuffer(vkd, device, commandPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY));
2822 const vk::VkImageMemoryBarrier preImageBarrier =
2824 vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
2828 vk::VK_ACCESS_TRANSFER_WRITE_BIT,
2830 vk::VK_IMAGE_LAYOUT_UNDEFINED,
2831 vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
2833 VK_QUEUE_FAMILY_IGNORED,
2834 VK_QUEUE_FAMILY_IGNORED,
2838 vk::VK_IMAGE_ASPECT_COLOR_BIT,
2840 1, // Mip level count
2845 const vk::VkImageMemoryBarrier postImageBarrier =
2847 vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
2850 vk::VK_ACCESS_TRANSFER_WRITE_BIT,
2853 vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
2854 vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
2856 VK_QUEUE_FAMILY_IGNORED,
2857 VK_QUEUE_FAMILY_IGNORED,
2861 vk::VK_IMAGE_ASPECT_COLOR_BIT,
2863 1, // Mip level count
2868 const vk::VkBufferImageCopy region =
2873 vk::VK_IMAGE_ASPECT_COLOR_BIT,
2880 (deUint32)m_imageWidth,
2881 (deUint32)m_imageHeight,
2887 void* const ptr = mapMemory(vkd, device, *memory, 4 * m_imageWidth * m_imageHeight);
2888 de::Random rng (m_seed);
2891 deUint8* const data = (deUint8*)ptr;
2893 for (size_t ndx = 0; ndx < (size_t)(4 * m_imageWidth * m_imageHeight); ndx++)
2894 data[ndx] = rng.getUint8();
2897 vk::flushMappedMemoryRange(vkd, device, *memory, 0, 4 * m_imageWidth * m_imageHeight);
2898 vkd.unmapMemory(device, *memory);
2901 vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 0, (const vk::VkBufferMemoryBarrier*)DE_NULL, 1, &preImageBarrier);
2902 vkd.cmdCopyBufferToImage(*commandBuffer, *srcBuffer, *m_srcImage, vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, ®ion);
2903 vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 0, (const vk::VkBufferMemoryBarrier*)DE_NULL, 1, &postImageBarrier);
2905 VK_CHECK(vkd.endCommandBuffer(*commandBuffer));
2906 queueRun(vkd, queue, *commandBuffer);
2910 void BufferCopyFromImage::logSubmit (TestLog& log, size_t commandIndex) const
2912 log << TestLog::Message << commandIndex << ":" << getName() << " Copy buffer data from image" << TestLog::EndMessage;
2915 void BufferCopyFromImage::submit (SubmitContext& context)
2917 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
2918 const vk::VkCommandBuffer commandBuffer = context.getCommandBuffer();
2919 const vk::VkBufferImageCopy region =
2924 vk::VK_IMAGE_ASPECT_COLOR_BIT,
2931 (deUint32)m_imageWidth,
2932 (deUint32)m_imageHeight,
2937 vkd.cmdCopyImageToBuffer(commandBuffer, *m_srcImage, vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, context.getBuffer(), 1, ®ion);
2940 void BufferCopyFromImage::verify (VerifyContext& context, size_t)
2942 ReferenceMemory& reference (context.getReference());
2943 de::Random rng (m_seed);
2945 for (size_t ndx = 0; ndx < (size_t)(4 * m_imageWidth * m_imageHeight); ndx++)
2946 reference.set(ndx, rng.getUint8());
2949 class ImageCopyToBuffer : public CmdCommand
2952 ImageCopyToBuffer (vk::VkImageLayout imageLayout) : m_imageLayout (imageLayout) {}
2953 ~ImageCopyToBuffer (void) {}
2954 const char* getName (void) const { return "BufferCopyToImage"; }
2956 void logPrepare (TestLog& log, size_t commandIndex) const;
2957 void prepare (PrepareContext& context);
2958 void logSubmit (TestLog& log, size_t commandIndex) const;
2959 void submit (SubmitContext& context);
2960 void verify (VerifyContext& context, size_t commandIndex);
2963 vk::VkImageLayout m_imageLayout;
2964 vk::VkDeviceSize m_bufferSize;
2965 vk::Move<vk::VkBuffer> m_dstBuffer;
2966 vk::Move<vk::VkDeviceMemory> m_memory;
2967 vk::VkDeviceSize m_imageMemorySize;
2968 deInt32 m_imageWidth;
2969 deInt32 m_imageHeight;
2972 void ImageCopyToBuffer::logPrepare (TestLog& log, size_t commandIndex) const
2974 log << TestLog::Message << commandIndex << ":" << getName() << " Allocate destination buffer for image to buffer copy." << TestLog::EndMessage;
2977 void ImageCopyToBuffer::prepare (PrepareContext& context)
2979 const vk::InstanceInterface& vki = context.getContext().getInstanceInterface();
2980 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
2981 const vk::VkPhysicalDevice physicalDevice = context.getContext().getPhysicalDevice();
2982 const vk::VkDevice device = context.getContext().getDevice();
2983 const vector<deUint32>& queueFamilies = context.getContext().getQueueFamilies();
2985 m_imageWidth = context.getImageWidth();
2986 m_imageHeight = context.getImageHeight();
2987 m_bufferSize = 4 * m_imageWidth * m_imageHeight;
2988 m_imageMemorySize = context.getImageMemorySize();
2989 m_dstBuffer = createBuffer(vkd, device, m_bufferSize, vk::VK_BUFFER_USAGE_TRANSFER_DST_BIT, vk::VK_SHARING_MODE_EXCLUSIVE, queueFamilies);
2990 m_memory = bindBufferMemory(vki, vkd, physicalDevice, device, *m_dstBuffer, vk::VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT);
2993 void ImageCopyToBuffer::logSubmit (TestLog& log, size_t commandIndex) const
2995 log << TestLog::Message << commandIndex << ":" << getName() << " Copy image to buffer" << TestLog::EndMessage;
2998 void ImageCopyToBuffer::submit (SubmitContext& context)
3000 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
3001 const vk::VkCommandBuffer commandBuffer = context.getCommandBuffer();
3002 const vk::VkBufferImageCopy region =
3007 vk::VK_IMAGE_ASPECT_COLOR_BIT,
3014 (deUint32)m_imageWidth,
3015 (deUint32)m_imageHeight,
3020 vkd.cmdCopyImageToBuffer(commandBuffer, context.getImage(), m_imageLayout, *m_dstBuffer, 1, ®ion);
3023 void ImageCopyToBuffer::verify (VerifyContext& context, size_t commandIndex)
3025 tcu::ResultCollector& resultCollector (context.getResultCollector());
3026 ReferenceMemory& reference (context.getReference());
3027 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
3028 const vk::VkDevice device = context.getContext().getDevice();
3029 const vk::VkQueue queue = context.getContext().getQueue();
3030 const vk::VkCommandPool commandPool = context.getContext().getCommandPool();
3031 const vk::Unique<vk::VkCommandBuffer> commandBuffer (createBeginCommandBuffer(vkd, device, commandPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY));
3032 const vk::VkBufferMemoryBarrier barrier =
3034 vk::VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER,
3037 vk::VK_ACCESS_TRANSFER_WRITE_BIT,
3038 vk::VK_ACCESS_HOST_READ_BIT,
3040 VK_QUEUE_FAMILY_IGNORED,
3041 VK_QUEUE_FAMILY_IGNORED,
3047 vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_PIPELINE_STAGE_HOST_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 1, &barrier, 0, (const vk::VkImageMemoryBarrier*)DE_NULL);
3049 VK_CHECK(vkd.endCommandBuffer(*commandBuffer));
3050 queueRun(vkd, queue, *commandBuffer);
3052 reference.setUndefined(0, (size_t)m_imageMemorySize);
3054 void* const ptr = mapMemory(vkd, device, *m_memory, m_bufferSize);
3055 const ConstPixelBufferAccess referenceImage (context.getReferenceImage().getAccess());
3056 const ConstPixelBufferAccess resultImage (TextureFormat(TextureFormat::RGBA, TextureFormat::UNORM_INT8), m_imageWidth, m_imageHeight, 1, ptr);
3058 vk::invalidateMappedMemoryRange(vkd, device, *m_memory, 0, m_bufferSize);
3060 if (!tcu::intThresholdCompare(context.getLog(), (de::toString(commandIndex) + ":" + getName()).c_str(), (de::toString(commandIndex) + ":" + getName()).c_str(), referenceImage, resultImage, UVec4(0), tcu::COMPARE_LOG_ON_ERROR))
3061 resultCollector.fail(de::toString(commandIndex) + ":" + getName() + " Image comparison failed");
3063 vkd.unmapMemory(device, *m_memory);
3067 class ImageCopyFromBuffer : public CmdCommand
3070 ImageCopyFromBuffer (deUint32 seed, vk::VkImageLayout imageLayout) : m_seed(seed), m_imageLayout(imageLayout) {}
3071 ~ImageCopyFromBuffer (void) {}
3072 const char* getName (void) const { return "ImageCopyFromBuffer"; }
3074 void logPrepare (TestLog& log, size_t commandIndex) const;
3075 void prepare (PrepareContext& context);
3076 void logSubmit (TestLog& log, size_t commandIndex) const;
3077 void submit (SubmitContext& context);
3078 void verify (VerifyContext& context, size_t commandIndex);
3081 const deUint32 m_seed;
3082 const vk::VkImageLayout m_imageLayout;
3083 deInt32 m_imageWidth;
3084 deInt32 m_imageHeight;
3085 vk::VkDeviceSize m_imageMemorySize;
3086 vk::VkDeviceSize m_bufferSize;
3087 vk::Move<vk::VkBuffer> m_srcBuffer;
3088 vk::Move<vk::VkDeviceMemory> m_memory;
3091 void ImageCopyFromBuffer::logPrepare (TestLog& log, size_t commandIndex) const
3093 log << TestLog::Message << commandIndex << ":" << getName() << " Allocate source buffer for buffer to image copy. Seed: " << m_seed << TestLog::EndMessage;
3096 void ImageCopyFromBuffer::prepare (PrepareContext& context)
3098 const vk::InstanceInterface& vki = context.getContext().getInstanceInterface();
3099 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
3100 const vk::VkPhysicalDevice physicalDevice = context.getContext().getPhysicalDevice();
3101 const vk::VkDevice device = context.getContext().getDevice();
3102 const vector<deUint32>& queueFamilies = context.getContext().getQueueFamilies();
3104 m_imageWidth = context.getImageHeight();
3105 m_imageHeight = context.getImageWidth();
3106 m_imageMemorySize = context.getImageMemorySize();
3107 m_bufferSize = m_imageWidth * m_imageHeight * 4;
3108 m_srcBuffer = createBuffer(vkd, device, m_bufferSize, vk::VK_BUFFER_USAGE_TRANSFER_SRC_BIT, vk::VK_SHARING_MODE_EXCLUSIVE, queueFamilies);
3109 m_memory = bindBufferMemory(vki, vkd, physicalDevice, device, *m_srcBuffer, vk::VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT);
3112 void* const ptr = mapMemory(vkd, device, *m_memory, m_bufferSize);
3113 de::Random rng (m_seed);
3116 deUint8* const data = (deUint8*)ptr;
3118 for (size_t ndx = 0; ndx < (size_t)m_bufferSize; ndx++)
3119 data[ndx] = rng.getUint8();
3122 vk::flushMappedMemoryRange(vkd, device, *m_memory, 0, m_bufferSize);
3123 vkd.unmapMemory(device, *m_memory);
3127 void ImageCopyFromBuffer::logSubmit (TestLog& log, size_t commandIndex) const
3129 log << TestLog::Message << commandIndex << ":" << getName() << " Copy image data from buffer" << TestLog::EndMessage;
3132 void ImageCopyFromBuffer::submit (SubmitContext& context)
3134 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
3135 const vk::VkCommandBuffer commandBuffer = context.getCommandBuffer();
3136 const vk::VkBufferImageCopy region =
3141 vk::VK_IMAGE_ASPECT_COLOR_BIT,
3148 (deUint32)m_imageWidth,
3149 (deUint32)m_imageHeight,
3154 vkd.cmdCopyBufferToImage(commandBuffer, *m_srcBuffer, context.getImage(), m_imageLayout, 1, ®ion);
3157 void ImageCopyFromBuffer::verify (VerifyContext& context, size_t)
3159 ReferenceMemory& reference (context.getReference());
3160 de::Random rng (m_seed);
3162 reference.setUndefined(0, (size_t)m_imageMemorySize);
3165 const PixelBufferAccess& refAccess (context.getReferenceImage().getAccess());
3167 for (deInt32 y = 0; y < m_imageHeight; y++)
3168 for (deInt32 x = 0; x < m_imageWidth; x++)
3170 const deUint8 r8 = rng.getUint8();
3171 const deUint8 g8 = rng.getUint8();
3172 const deUint8 b8 = rng.getUint8();
3173 const deUint8 a8 = rng.getUint8();
3175 refAccess.setPixel(UVec4(r8, g8, b8, a8), x, y);
3180 class ImageCopyFromImage : public CmdCommand
3183 ImageCopyFromImage (deUint32 seed, vk::VkImageLayout imageLayout) : m_seed(seed), m_imageLayout(imageLayout) {}
3184 ~ImageCopyFromImage (void) {}
3185 const char* getName (void) const { return "ImageCopyFromImage"; }
3187 void logPrepare (TestLog& log, size_t commandIndex) const;
3188 void prepare (PrepareContext& context);
3189 void logSubmit (TestLog& log, size_t commandIndex) const;
3190 void submit (SubmitContext& context);
3191 void verify (VerifyContext& context, size_t commandIndex);
3194 const deUint32 m_seed;
3195 const vk::VkImageLayout m_imageLayout;
3196 deInt32 m_imageWidth;
3197 deInt32 m_imageHeight;
3198 vk::VkDeviceSize m_imageMemorySize;
3199 vk::Move<vk::VkImage> m_srcImage;
3200 vk::Move<vk::VkDeviceMemory> m_memory;
3203 void ImageCopyFromImage::logPrepare (TestLog& log, size_t commandIndex) const
3205 log << TestLog::Message << commandIndex << ":" << getName() << " Allocate source image for image to image copy." << TestLog::EndMessage;
3208 void ImageCopyFromImage::prepare (PrepareContext& context)
3210 const vk::InstanceInterface& vki = context.getContext().getInstanceInterface();
3211 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
3212 const vk::VkPhysicalDevice physicalDevice = context.getContext().getPhysicalDevice();
3213 const vk::VkDevice device = context.getContext().getDevice();
3214 const vk::VkQueue queue = context.getContext().getQueue();
3215 const vk::VkCommandPool commandPool = context.getContext().getCommandPool();
3216 const vector<deUint32>& queueFamilies = context.getContext().getQueueFamilies();
3218 m_imageWidth = context.getImageWidth();
3219 m_imageHeight = context.getImageHeight();
3220 m_imageMemorySize = context.getImageMemorySize();
3223 const vk::VkImageCreateInfo createInfo =
3225 vk::VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
3229 vk::VK_IMAGE_TYPE_2D,
3230 vk::VK_FORMAT_R8G8B8A8_UNORM,
3232 (deUint32)m_imageWidth,
3233 (deUint32)m_imageHeight,
3236 1, 1, // mipLevels, arrayLayers
3237 vk::VK_SAMPLE_COUNT_1_BIT,
3239 vk::VK_IMAGE_TILING_OPTIMAL,
3240 vk::VK_IMAGE_USAGE_TRANSFER_DST_BIT|vk::VK_IMAGE_USAGE_TRANSFER_SRC_BIT,
3241 vk::VK_SHARING_MODE_EXCLUSIVE,
3243 (deUint32)queueFamilies.size(),
3245 vk::VK_IMAGE_LAYOUT_UNDEFINED
3248 m_srcImage = vk::createImage(vkd, device, &createInfo);
3251 m_memory = bindImageMemory(vki, vkd, physicalDevice, device, *m_srcImage, 0);
3254 const vk::Unique<vk::VkBuffer> srcBuffer (createBuffer(vkd, device, 4 * m_imageWidth * m_imageHeight, vk::VK_BUFFER_USAGE_TRANSFER_SRC_BIT, vk::VK_SHARING_MODE_EXCLUSIVE, queueFamilies));
3255 const vk::Unique<vk::VkDeviceMemory> memory (bindBufferMemory(vki, vkd, physicalDevice, device, *srcBuffer, vk::VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT));
3256 const vk::Unique<vk::VkCommandBuffer> commandBuffer (createBeginCommandBuffer(vkd, device, commandPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY));
3257 const vk::VkImageMemoryBarrier preImageBarrier =
3259 vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
3263 vk::VK_ACCESS_TRANSFER_WRITE_BIT,
3265 vk::VK_IMAGE_LAYOUT_UNDEFINED,
3266 vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
3268 VK_QUEUE_FAMILY_IGNORED,
3269 VK_QUEUE_FAMILY_IGNORED,
3273 vk::VK_IMAGE_ASPECT_COLOR_BIT,
3275 1, // Mip level count
3280 const vk::VkImageMemoryBarrier postImageBarrier =
3282 vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
3285 vk::VK_ACCESS_TRANSFER_WRITE_BIT,
3288 vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
3289 vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
3291 VK_QUEUE_FAMILY_IGNORED,
3292 VK_QUEUE_FAMILY_IGNORED,
3296 vk::VK_IMAGE_ASPECT_COLOR_BIT,
3298 1, // Mip level count
3303 const vk::VkBufferImageCopy region =
3308 vk::VK_IMAGE_ASPECT_COLOR_BIT,
3315 (deUint32)m_imageWidth,
3316 (deUint32)m_imageHeight,
3322 void* const ptr = mapMemory(vkd, device, *memory, 4 * m_imageWidth * m_imageHeight);
3323 de::Random rng (m_seed);
3326 deUint8* const data = (deUint8*)ptr;
3328 for (size_t ndx = 0; ndx < (size_t)(4 * m_imageWidth * m_imageHeight); ndx++)
3329 data[ndx] = rng.getUint8();
3332 vk::flushMappedMemoryRange(vkd, device, *memory, 0, 4 * m_imageWidth * m_imageHeight);
3333 vkd.unmapMemory(device, *memory);
3336 vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 0, (const vk::VkBufferMemoryBarrier*)DE_NULL, 1, &preImageBarrier);
3337 vkd.cmdCopyBufferToImage(*commandBuffer, *srcBuffer, *m_srcImage, vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, ®ion);
3338 vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 0, (const vk::VkBufferMemoryBarrier*)DE_NULL, 1, &postImageBarrier);
3340 VK_CHECK(vkd.endCommandBuffer(*commandBuffer));
3341 queueRun(vkd, queue, *commandBuffer);
3345 void ImageCopyFromImage::logSubmit (TestLog& log, size_t commandIndex) const
3347 log << TestLog::Message << commandIndex << ":" << getName() << " Copy image data from another image" << TestLog::EndMessage;
3350 void ImageCopyFromImage::submit (SubmitContext& context)
3352 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
3353 const vk::VkCommandBuffer commandBuffer = context.getCommandBuffer();
3354 const vk::VkImageCopy region =
3357 vk::VK_IMAGE_ASPECT_COLOR_BIT,
3365 vk::VK_IMAGE_ASPECT_COLOR_BIT,
3372 (deUint32)m_imageWidth,
3373 (deUint32)m_imageHeight,
3378 vkd.cmdCopyImage(commandBuffer, *m_srcImage, vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, context.getImage(), m_imageLayout, 1, ®ion);
3381 void ImageCopyFromImage::verify (VerifyContext& context, size_t)
3383 ReferenceMemory& reference (context.getReference());
3384 de::Random rng (m_seed);
3386 reference.setUndefined(0, (size_t)m_imageMemorySize);
3389 const PixelBufferAccess& refAccess (context.getReferenceImage().getAccess());
3391 for (deInt32 y = 0; y < m_imageHeight; y++)
3392 for (deInt32 x = 0; x < m_imageWidth; x++)
3394 const deUint8 r8 = rng.getUint8();
3395 const deUint8 g8 = rng.getUint8();
3396 const deUint8 b8 = rng.getUint8();
3397 const deUint8 a8 = rng.getUint8();
3399 refAccess.setPixel(UVec4(r8, g8, b8, a8), x, y);
3404 class ImageCopyToImage : public CmdCommand
3407 ImageCopyToImage (vk::VkImageLayout imageLayout) : m_imageLayout(imageLayout) {}
3408 ~ImageCopyToImage (void) {}
3409 const char* getName (void) const { return "ImageCopyToImage"; }
3411 void logPrepare (TestLog& log, size_t commandIndex) const;
3412 void prepare (PrepareContext& context);
3413 void logSubmit (TestLog& log, size_t commandIndex) const;
3414 void submit (SubmitContext& context);
3415 void verify (VerifyContext& context, size_t commandIndex);
3418 const vk::VkImageLayout m_imageLayout;
3419 deInt32 m_imageWidth;
3420 deInt32 m_imageHeight;
3421 vk::VkDeviceSize m_imageMemorySize;
3422 vk::Move<vk::VkImage> m_dstImage;
3423 vk::Move<vk::VkDeviceMemory> m_memory;
3426 void ImageCopyToImage::logPrepare (TestLog& log, size_t commandIndex) const
3428 log << TestLog::Message << commandIndex << ":" << getName() << " Allocate destination image for image to image copy." << TestLog::EndMessage;
3431 void ImageCopyToImage::prepare (PrepareContext& context)
3433 const vk::InstanceInterface& vki = context.getContext().getInstanceInterface();
3434 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
3435 const vk::VkPhysicalDevice physicalDevice = context.getContext().getPhysicalDevice();
3436 const vk::VkDevice device = context.getContext().getDevice();
3437 const vk::VkQueue queue = context.getContext().getQueue();
3438 const vk::VkCommandPool commandPool = context.getContext().getCommandPool();
3439 const vector<deUint32>& queueFamilies = context.getContext().getQueueFamilies();
3441 m_imageWidth = context.getImageWidth();
3442 m_imageHeight = context.getImageHeight();
3443 m_imageMemorySize = context.getImageMemorySize();
3446 const vk::VkImageCreateInfo createInfo =
3448 vk::VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
3452 vk::VK_IMAGE_TYPE_2D,
3453 vk::VK_FORMAT_R8G8B8A8_UNORM,
3455 (deUint32)m_imageWidth,
3456 (deUint32)m_imageHeight,
3459 1, 1, // mipLevels, arrayLayers
3460 vk::VK_SAMPLE_COUNT_1_BIT,
3462 vk::VK_IMAGE_TILING_OPTIMAL,
3463 vk::VK_IMAGE_USAGE_TRANSFER_DST_BIT|vk::VK_IMAGE_USAGE_TRANSFER_SRC_BIT,
3464 vk::VK_SHARING_MODE_EXCLUSIVE,
3466 (deUint32)queueFamilies.size(),
3468 vk::VK_IMAGE_LAYOUT_UNDEFINED
3471 m_dstImage = vk::createImage(vkd, device, &createInfo);
3474 m_memory = bindImageMemory(vki, vkd, physicalDevice, device, *m_dstImage, 0);
3477 const vk::Unique<vk::VkCommandBuffer> commandBuffer (createBeginCommandBuffer(vkd, device, commandPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY));
3478 const vk::VkImageMemoryBarrier barrier =
3480 vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
3484 vk::VK_ACCESS_TRANSFER_WRITE_BIT,
3486 vk::VK_IMAGE_LAYOUT_UNDEFINED,
3487 vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
3489 VK_QUEUE_FAMILY_IGNORED,
3490 VK_QUEUE_FAMILY_IGNORED,
3494 vk::VK_IMAGE_ASPECT_COLOR_BIT,
3496 1, // Mip level count
3502 vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 0, (const vk::VkBufferMemoryBarrier*)DE_NULL, 1, &barrier);
3504 VK_CHECK(vkd.endCommandBuffer(*commandBuffer));
3505 queueRun(vkd, queue, *commandBuffer);
3509 void ImageCopyToImage::logSubmit (TestLog& log, size_t commandIndex) const
3511 log << TestLog::Message << commandIndex << ":" << getName() << " Copy image to another image" << TestLog::EndMessage;
3514 void ImageCopyToImage::submit (SubmitContext& context)
3516 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
3517 const vk::VkCommandBuffer commandBuffer = context.getCommandBuffer();
3518 const vk::VkImageCopy region =
3521 vk::VK_IMAGE_ASPECT_COLOR_BIT,
3529 vk::VK_IMAGE_ASPECT_COLOR_BIT,
3536 (deUint32)m_imageWidth,
3537 (deUint32)m_imageHeight,
3542 vkd.cmdCopyImage(commandBuffer, context.getImage(), m_imageLayout, *m_dstImage, vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, ®ion);
3545 void ImageCopyToImage::verify (VerifyContext& context, size_t commandIndex)
3547 tcu::ResultCollector& resultCollector (context.getResultCollector());
3548 const vk::InstanceInterface& vki = context.getContext().getInstanceInterface();
3549 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
3550 const vk::VkPhysicalDevice physicalDevice = context.getContext().getPhysicalDevice();
3551 const vk::VkDevice device = context.getContext().getDevice();
3552 const vk::VkQueue queue = context.getContext().getQueue();
3553 const vk::VkCommandPool commandPool = context.getContext().getCommandPool();
3554 const vk::Unique<vk::VkCommandBuffer> commandBuffer (createBeginCommandBuffer(vkd, device, commandPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY));
3555 const vector<deUint32>& queueFamilies = context.getContext().getQueueFamilies();
3556 const vk::Unique<vk::VkBuffer> dstBuffer (createBuffer(vkd, device, 4 * m_imageWidth * m_imageHeight, vk::VK_BUFFER_USAGE_TRANSFER_DST_BIT, vk::VK_SHARING_MODE_EXCLUSIVE, queueFamilies));
3557 const vk::Unique<vk::VkDeviceMemory> memory (bindBufferMemory(vki, vkd, physicalDevice, device, *dstBuffer, vk::VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT));
3559 const vk::VkImageMemoryBarrier imageBarrier =
3561 vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
3564 vk::VK_ACCESS_TRANSFER_WRITE_BIT,
3565 vk::VK_ACCESS_TRANSFER_READ_BIT,
3567 vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
3568 vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
3570 VK_QUEUE_FAMILY_IGNORED,
3571 VK_QUEUE_FAMILY_IGNORED,
3575 vk::VK_IMAGE_ASPECT_COLOR_BIT,
3577 1, // Mip level count
3582 const vk::VkBufferMemoryBarrier bufferBarrier =
3584 vk::VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER,
3587 vk::VK_ACCESS_TRANSFER_WRITE_BIT,
3588 vk::VK_ACCESS_HOST_READ_BIT,
3590 VK_QUEUE_FAMILY_IGNORED,
3591 VK_QUEUE_FAMILY_IGNORED,
3596 const vk::VkBufferImageCopy region =
3601 vk::VK_IMAGE_ASPECT_COLOR_BIT,
3608 (deUint32)m_imageWidth,
3609 (deUint32)m_imageHeight,
3614 vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 0, (const vk::VkBufferMemoryBarrier*)DE_NULL, 1, &imageBarrier);
3615 vkd.cmdCopyImageToBuffer(*commandBuffer, *m_dstImage, vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, *dstBuffer, 1, ®ion);
3616 vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_PIPELINE_STAGE_HOST_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 1, &bufferBarrier, 0, (const vk::VkImageMemoryBarrier*)DE_NULL);
3619 VK_CHECK(vkd.endCommandBuffer(*commandBuffer));
3620 queueRun(vkd, queue, *commandBuffer);
3623 void* const ptr = mapMemory(vkd, device, *memory, 4 * m_imageWidth * m_imageHeight);
3625 vk::invalidateMappedMemoryRange(vkd, device, *memory, 0, 4 * m_imageWidth * m_imageHeight);
3628 const deUint8* const data = (const deUint8*)ptr;
3629 const ConstPixelBufferAccess resAccess (TextureFormat(TextureFormat::RGBA, TextureFormat::UNORM_INT8), m_imageWidth, m_imageHeight, 1, data);
3630 const ConstPixelBufferAccess& refAccess (context.getReferenceImage().getAccess());
3632 if (!tcu::intThresholdCompare(context.getLog(), (de::toString(commandIndex) + ":" + getName()).c_str(), (de::toString(commandIndex) + ":" + getName()).c_str(), refAccess, resAccess, UVec4(0), tcu::COMPARE_LOG_ON_ERROR))
3633 resultCollector.fail(de::toString(commandIndex) + ":" + getName() + " Image comparison failed");
3636 vkd.unmapMemory(device, *memory);
3646 class ImageBlitFromImage : public CmdCommand
3649 ImageBlitFromImage (deUint32 seed, BlitScale scale, vk::VkImageLayout imageLayout) : m_seed(seed), m_scale(scale), m_imageLayout(imageLayout) {}
3650 ~ImageBlitFromImage (void) {}
3651 const char* getName (void) const { return "ImageBlitFromImage"; }
3653 void logPrepare (TestLog& log, size_t commandIndex) const;
3654 void prepare (PrepareContext& context);
3655 void logSubmit (TestLog& log, size_t commandIndex) const;
3656 void submit (SubmitContext& context);
3657 void verify (VerifyContext& context, size_t commandIndex);
3660 const deUint32 m_seed;
3661 const BlitScale m_scale;
3662 const vk::VkImageLayout m_imageLayout;
3663 deInt32 m_imageWidth;
3664 deInt32 m_imageHeight;
3665 vk::VkDeviceSize m_imageMemorySize;
3666 deInt32 m_srcImageWidth;
3667 deInt32 m_srcImageHeight;
3668 vk::Move<vk::VkImage> m_srcImage;
3669 vk::Move<vk::VkDeviceMemory> m_memory;
3672 void ImageBlitFromImage::logPrepare (TestLog& log, size_t commandIndex) const
3674 log << TestLog::Message << commandIndex << ":" << getName() << " Allocate source image for image to image blit." << TestLog::EndMessage;
3677 void ImageBlitFromImage::prepare (PrepareContext& context)
3679 const vk::InstanceInterface& vki = context.getContext().getInstanceInterface();
3680 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
3681 const vk::VkPhysicalDevice physicalDevice = context.getContext().getPhysicalDevice();
3682 const vk::VkDevice device = context.getContext().getDevice();
3683 const vk::VkQueue queue = context.getContext().getQueue();
3684 const vk::VkCommandPool commandPool = context.getContext().getCommandPool();
3685 const vector<deUint32>& queueFamilies = context.getContext().getQueueFamilies();
3687 m_imageWidth = context.getImageWidth();
3688 m_imageHeight = context.getImageHeight();
3689 m_imageMemorySize = context.getImageMemorySize();
3691 if (m_scale == BLIT_SCALE_10)
3693 m_srcImageWidth = m_imageWidth;
3694 m_srcImageHeight = m_imageHeight;
3696 else if (m_scale == BLIT_SCALE_20)
3698 m_srcImageWidth = m_imageWidth / 2;
3699 m_srcImageHeight = m_imageHeight / 2;
3702 DE_FATAL("Unsupported scale");
3705 const vk::VkImageCreateInfo createInfo =
3707 vk::VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
3711 vk::VK_IMAGE_TYPE_2D,
3712 vk::VK_FORMAT_R8G8B8A8_UNORM,
3714 (deUint32)m_srcImageWidth,
3715 (deUint32)m_srcImageHeight,
3718 1, 1, // mipLevels, arrayLayers
3719 vk::VK_SAMPLE_COUNT_1_BIT,
3721 vk::VK_IMAGE_TILING_OPTIMAL,
3722 vk::VK_IMAGE_USAGE_TRANSFER_DST_BIT|vk::VK_IMAGE_USAGE_TRANSFER_SRC_BIT,
3723 vk::VK_SHARING_MODE_EXCLUSIVE,
3725 (deUint32)queueFamilies.size(),
3727 vk::VK_IMAGE_LAYOUT_UNDEFINED
3730 m_srcImage = vk::createImage(vkd, device, &createInfo);
3733 m_memory = bindImageMemory(vki, vkd, physicalDevice, device, *m_srcImage, 0);
3736 const vk::Unique<vk::VkBuffer> srcBuffer (createBuffer(vkd, device, 4 * m_srcImageWidth * m_srcImageHeight, vk::VK_BUFFER_USAGE_TRANSFER_SRC_BIT, vk::VK_SHARING_MODE_EXCLUSIVE, queueFamilies));
3737 const vk::Unique<vk::VkDeviceMemory> memory (bindBufferMemory(vki, vkd, physicalDevice, device, *srcBuffer, vk::VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT));
3738 const vk::Unique<vk::VkCommandBuffer> commandBuffer (createBeginCommandBuffer(vkd, device, commandPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY));
3739 const vk::VkImageMemoryBarrier preImageBarrier =
3741 vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
3745 vk::VK_ACCESS_TRANSFER_WRITE_BIT,
3747 vk::VK_IMAGE_LAYOUT_UNDEFINED,
3748 vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
3750 VK_QUEUE_FAMILY_IGNORED,
3751 VK_QUEUE_FAMILY_IGNORED,
3755 vk::VK_IMAGE_ASPECT_COLOR_BIT,
3757 1, // Mip level count
3762 const vk::VkImageMemoryBarrier postImageBarrier =
3764 vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
3767 vk::VK_ACCESS_TRANSFER_WRITE_BIT,
3770 vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
3771 vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
3773 VK_QUEUE_FAMILY_IGNORED,
3774 VK_QUEUE_FAMILY_IGNORED,
3778 vk::VK_IMAGE_ASPECT_COLOR_BIT,
3780 1, // Mip level count
3785 const vk::VkBufferImageCopy region =
3790 vk::VK_IMAGE_ASPECT_COLOR_BIT,
3797 (deUint32)m_srcImageWidth,
3798 (deUint32)m_srcImageHeight,
3804 void* const ptr = mapMemory(vkd, device, *memory, 4 * m_srcImageWidth * m_srcImageHeight);
3805 de::Random rng (m_seed);
3808 deUint8* const data = (deUint8*)ptr;
3810 for (size_t ndx = 0; ndx < (size_t)(4 * m_srcImageWidth * m_srcImageHeight); ndx++)
3811 data[ndx] = rng.getUint8();
3814 vk::flushMappedMemoryRange(vkd, device, *memory, 0, 4 * m_srcImageWidth * m_srcImageHeight);
3815 vkd.unmapMemory(device, *memory);
3818 vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 0, (const vk::VkBufferMemoryBarrier*)DE_NULL, 1, &preImageBarrier);
3819 vkd.cmdCopyBufferToImage(*commandBuffer, *srcBuffer, *m_srcImage, vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, ®ion);
3820 vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 0, (const vk::VkBufferMemoryBarrier*)DE_NULL, 1, &postImageBarrier);
3822 VK_CHECK(vkd.endCommandBuffer(*commandBuffer));
3823 queueRun(vkd, queue, *commandBuffer);
3827 void ImageBlitFromImage::logSubmit (TestLog& log, size_t commandIndex) const
3829 log << TestLog::Message << commandIndex << ":" << getName() << " Blit from another image" << (m_scale == BLIT_SCALE_20 ? " scale 2x" : "") << TestLog::EndMessage;
3832 void ImageBlitFromImage::submit (SubmitContext& context)
3834 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
3835 const vk::VkCommandBuffer commandBuffer = context.getCommandBuffer();
3836 const vk::VkImageBlit region =
3840 vk::VK_IMAGE_ASPECT_COLOR_BIT,
3856 vk::VK_IMAGE_ASPECT_COLOR_BIT,
3870 vkd.cmdBlitImage(commandBuffer, *m_srcImage, vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, context.getImage(), m_imageLayout, 1, ®ion, vk::VK_FILTER_NEAREST);
3873 void ImageBlitFromImage::verify (VerifyContext& context, size_t)
3875 ReferenceMemory& reference (context.getReference());
3876 de::Random rng (m_seed);
3878 reference.setUndefined(0, (size_t)m_imageMemorySize);
3881 const PixelBufferAccess& refAccess (context.getReferenceImage().getAccess());
3883 if (m_scale == BLIT_SCALE_10)
3885 for (deInt32 y = 0; y < m_imageHeight; y++)
3886 for (deInt32 x = 0; x < m_imageWidth; x++)
3888 const deUint8 r8 = rng.getUint8();
3889 const deUint8 g8 = rng.getUint8();
3890 const deUint8 b8 = rng.getUint8();
3891 const deUint8 a8 = rng.getUint8();
3893 refAccess.setPixel(UVec4(r8, g8, b8, a8), x, y);
3896 else if (m_scale == BLIT_SCALE_20)
3898 tcu::TextureLevel source (TextureFormat(TextureFormat::RGBA, TextureFormat::UNORM_INT8), m_srcImageWidth, m_srcImageHeight);
3899 const float xscale = ((float)m_srcImageWidth) / (float)m_imageWidth;
3900 const float yscale = ((float)m_srcImageHeight) / (float)m_imageHeight;
3902 for (deInt32 y = 0; y < m_srcImageHeight; y++)
3903 for (deInt32 x = 0; x < m_srcImageWidth; x++)
3905 const deUint8 r8 = rng.getUint8();
3906 const deUint8 g8 = rng.getUint8();
3907 const deUint8 b8 = rng.getUint8();
3908 const deUint8 a8 = rng.getUint8();
3910 source.getAccess().setPixel(UVec4(r8, g8, b8, a8), x, y);
3913 for (deInt32 y = 0; y < m_imageHeight; y++)
3914 for (deInt32 x = 0; x < m_imageWidth; x++)
3915 refAccess.setPixel(source.getAccess().getPixelUint(int(float(x) * xscale), int(float(y) * yscale)), x, y);
3918 DE_FATAL("Unsupported scale");
3922 class ImageBlitToImage : public CmdCommand
3925 ImageBlitToImage (BlitScale scale, vk::VkImageLayout imageLayout) : m_scale(scale), m_imageLayout(imageLayout) {}
3926 ~ImageBlitToImage (void) {}
3927 const char* getName (void) const { return "ImageBlitToImage"; }
3929 void logPrepare (TestLog& log, size_t commandIndex) const;
3930 void prepare (PrepareContext& context);
3931 void logSubmit (TestLog& log, size_t commandIndex) const;
3932 void submit (SubmitContext& context);
3933 void verify (VerifyContext& context, size_t commandIndex);
3936 const BlitScale m_scale;
3937 const vk::VkImageLayout m_imageLayout;
3938 deInt32 m_imageWidth;
3939 deInt32 m_imageHeight;
3940 vk::VkDeviceSize m_imageMemorySize;
3941 deInt32 m_dstImageWidth;
3942 deInt32 m_dstImageHeight;
3943 vk::Move<vk::VkImage> m_dstImage;
3944 vk::Move<vk::VkDeviceMemory> m_memory;
3947 void ImageBlitToImage::logPrepare (TestLog& log, size_t commandIndex) const
3949 log << TestLog::Message << commandIndex << ":" << getName() << " Allocate destination image for image to image blit." << TestLog::EndMessage;
3952 void ImageBlitToImage::prepare (PrepareContext& context)
3954 const vk::InstanceInterface& vki = context.getContext().getInstanceInterface();
3955 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
3956 const vk::VkPhysicalDevice physicalDevice = context.getContext().getPhysicalDevice();
3957 const vk::VkDevice device = context.getContext().getDevice();
3958 const vk::VkQueue queue = context.getContext().getQueue();
3959 const vk::VkCommandPool commandPool = context.getContext().getCommandPool();
3960 const vector<deUint32>& queueFamilies = context.getContext().getQueueFamilies();
3962 m_imageWidth = context.getImageWidth();
3963 m_imageHeight = context.getImageHeight();
3964 m_imageMemorySize = context.getImageMemorySize();
3966 if (m_scale == BLIT_SCALE_10)
3968 m_dstImageWidth = context.getImageWidth();
3969 m_dstImageHeight = context.getImageHeight();
3971 else if (m_scale == BLIT_SCALE_20)
3973 m_dstImageWidth = context.getImageWidth() * 2;
3974 m_dstImageHeight = context.getImageHeight() * 2;
3977 DE_FATAL("Unsupportd blit scale");
3980 const vk::VkImageCreateInfo createInfo =
3982 vk::VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
3986 vk::VK_IMAGE_TYPE_2D,
3987 vk::VK_FORMAT_R8G8B8A8_UNORM,
3989 (deUint32)m_dstImageWidth,
3990 (deUint32)m_dstImageHeight,
3993 1, 1, // mipLevels, arrayLayers
3994 vk::VK_SAMPLE_COUNT_1_BIT,
3996 vk::VK_IMAGE_TILING_OPTIMAL,
3997 vk::VK_IMAGE_USAGE_TRANSFER_DST_BIT|vk::VK_IMAGE_USAGE_TRANSFER_SRC_BIT,
3998 vk::VK_SHARING_MODE_EXCLUSIVE,
4000 (deUint32)queueFamilies.size(),
4002 vk::VK_IMAGE_LAYOUT_UNDEFINED
4005 m_dstImage = vk::createImage(vkd, device, &createInfo);
4008 m_memory = bindImageMemory(vki, vkd, physicalDevice, device, *m_dstImage, 0);
4011 const vk::Unique<vk::VkCommandBuffer> commandBuffer (createBeginCommandBuffer(vkd, device, commandPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY));
4012 const vk::VkImageMemoryBarrier barrier =
4014 vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
4018 vk::VK_ACCESS_TRANSFER_WRITE_BIT,
4020 vk::VK_IMAGE_LAYOUT_UNDEFINED,
4021 vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
4023 VK_QUEUE_FAMILY_IGNORED,
4024 VK_QUEUE_FAMILY_IGNORED,
4028 vk::VK_IMAGE_ASPECT_COLOR_BIT,
4030 1, // Mip level count
4036 vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 0, (const vk::VkBufferMemoryBarrier*)DE_NULL, 1, &barrier);
4038 VK_CHECK(vkd.endCommandBuffer(*commandBuffer));
4039 queueRun(vkd, queue, *commandBuffer);
4043 void ImageBlitToImage::logSubmit (TestLog& log, size_t commandIndex) const
4045 log << TestLog::Message << commandIndex << ":" << getName() << " Blit image to another image" << (m_scale == BLIT_SCALE_20 ? " scale 2x" : "") << TestLog::EndMessage;
4048 void ImageBlitToImage::submit (SubmitContext& context)
4050 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
4051 const vk::VkCommandBuffer commandBuffer = context.getCommandBuffer();
4052 const vk::VkImageBlit region =
4056 vk::VK_IMAGE_ASPECT_COLOR_BIT,
4072 vk::VK_IMAGE_ASPECT_COLOR_BIT,
4086 vkd.cmdBlitImage(commandBuffer, context.getImage(), m_imageLayout, *m_dstImage, vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, ®ion, vk::VK_FILTER_NEAREST);
4089 void ImageBlitToImage::verify (VerifyContext& context, size_t commandIndex)
4091 tcu::ResultCollector& resultCollector (context.getResultCollector());
4092 const vk::InstanceInterface& vki = context.getContext().getInstanceInterface();
4093 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
4094 const vk::VkPhysicalDevice physicalDevice = context.getContext().getPhysicalDevice();
4095 const vk::VkDevice device = context.getContext().getDevice();
4096 const vk::VkQueue queue = context.getContext().getQueue();
4097 const vk::VkCommandPool commandPool = context.getContext().getCommandPool();
4098 const vk::Unique<vk::VkCommandBuffer> commandBuffer (createBeginCommandBuffer(vkd, device, commandPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY));
4099 const vector<deUint32>& queueFamilies = context.getContext().getQueueFamilies();
4100 const vk::Unique<vk::VkBuffer> dstBuffer (createBuffer(vkd, device, 4 * m_dstImageWidth * m_dstImageHeight, vk::VK_BUFFER_USAGE_TRANSFER_DST_BIT, vk::VK_SHARING_MODE_EXCLUSIVE, queueFamilies));
4101 const vk::Unique<vk::VkDeviceMemory> memory (bindBufferMemory(vki, vkd, physicalDevice, device, *dstBuffer, vk::VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT));
4103 const vk::VkImageMemoryBarrier imageBarrier =
4105 vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
4108 vk::VK_ACCESS_TRANSFER_WRITE_BIT,
4109 vk::VK_ACCESS_TRANSFER_READ_BIT,
4111 vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
4112 vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
4114 VK_QUEUE_FAMILY_IGNORED,
4115 VK_QUEUE_FAMILY_IGNORED,
4119 vk::VK_IMAGE_ASPECT_COLOR_BIT,
4121 1, // Mip level count
4126 const vk::VkBufferMemoryBarrier bufferBarrier =
4128 vk::VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER,
4131 vk::VK_ACCESS_TRANSFER_WRITE_BIT,
4132 vk::VK_ACCESS_HOST_READ_BIT,
4134 VK_QUEUE_FAMILY_IGNORED,
4135 VK_QUEUE_FAMILY_IGNORED,
4140 const vk::VkBufferImageCopy region =
4145 vk::VK_IMAGE_ASPECT_COLOR_BIT,
4152 (deUint32)m_dstImageWidth,
4153 (deUint32)m_dstImageHeight,
4158 vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 0, (const vk::VkBufferMemoryBarrier*)DE_NULL, 1, &imageBarrier);
4159 vkd.cmdCopyImageToBuffer(*commandBuffer, *m_dstImage, vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, *dstBuffer, 1, ®ion);
4160 vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_PIPELINE_STAGE_HOST_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 1, &bufferBarrier, 0, (const vk::VkImageMemoryBarrier*)DE_NULL);
4163 VK_CHECK(vkd.endCommandBuffer(*commandBuffer));
4164 queueRun(vkd, queue, *commandBuffer);
4167 void* const ptr = mapMemory(vkd, device, *memory, 4 * m_dstImageWidth * m_dstImageHeight);
4169 vk::invalidateMappedMemoryRange(vkd, device, *memory, 0, 4 * m_dstImageWidth * m_dstImageHeight);
4171 if (m_scale == BLIT_SCALE_10)
4173 const deUint8* const data = (const deUint8*)ptr;
4174 const ConstPixelBufferAccess resAccess (TextureFormat(TextureFormat::RGBA, TextureFormat::UNORM_INT8), m_dstImageWidth, m_dstImageHeight, 1, data);
4175 const ConstPixelBufferAccess& refAccess (context.getReferenceImage().getAccess());
4177 if (!tcu::intThresholdCompare(context.getLog(), (de::toString(commandIndex) + ":" + getName()).c_str(), (de::toString(commandIndex) + ":" + getName()).c_str(), refAccess, resAccess, UVec4(0), tcu::COMPARE_LOG_ON_ERROR))
4178 resultCollector.fail(de::toString(commandIndex) + ":" + getName() + " Image comparison failed");
4180 else if (m_scale == BLIT_SCALE_20)
4182 const deUint8* const data = (const deUint8*)ptr;
4183 const ConstPixelBufferAccess resAccess (TextureFormat(TextureFormat::RGBA, TextureFormat::UNORM_INT8), m_dstImageWidth, m_dstImageHeight, 1, data);
4184 tcu::TextureLevel reference (TextureFormat(TextureFormat::RGBA, TextureFormat::UNORM_INT8), m_dstImageWidth, m_dstImageHeight, 1);
4187 const ConstPixelBufferAccess& refAccess (context.getReferenceImage().getAccess());
4189 for (deInt32 y = 0; y < m_dstImageHeight; y++)
4190 for (deInt32 x = 0; x < m_dstImageWidth; x++)
4192 reference.getAccess().setPixel(refAccess.getPixel(x/2, y/2), x, y);
4196 if (!tcu::intThresholdCompare(context.getLog(), (de::toString(commandIndex) + ":" + getName()).c_str(), (de::toString(commandIndex) + ":" + getName()).c_str(), reference.getAccess(), resAccess, UVec4(0), tcu::COMPARE_LOG_ON_ERROR))
4197 resultCollector.fail(de::toString(commandIndex) + ":" + getName() + " Image comparison failed");
4200 DE_FATAL("Unknown scale");
4202 vkd.unmapMemory(device, *memory);
4206 class PrepareRenderPassContext
4209 PrepareRenderPassContext (PrepareContext& context,
4210 vk::VkRenderPass renderPass,
4211 vk::VkFramebuffer framebuffer,
4212 deInt32 targetWidth,
4213 deInt32 targetHeight)
4214 : m_context (context)
4215 , m_renderPass (renderPass)
4216 , m_framebuffer (framebuffer)
4217 , m_targetWidth (targetWidth)
4218 , m_targetHeight (targetHeight)
4222 const Memory& getMemory (void) const { return m_context.getMemory(); }
4223 const Context& getContext (void) const { return m_context.getContext(); }
4224 const vk::ProgramCollection<vk::ProgramBinary>& getBinaryCollection (void) const { return m_context.getBinaryCollection(); }
4226 vk::VkBuffer getBuffer (void) const { return m_context.getBuffer(); }
4227 vk::VkDeviceSize getBufferSize (void) const { return m_context.getBufferSize(); }
4229 vk::VkImage getImage (void) const { return m_context.getImage(); }
4230 deInt32 getImageWidth (void) const { return m_context.getImageWidth(); }
4231 deInt32 getImageHeight (void) const { return m_context.getImageHeight(); }
4232 vk::VkImageLayout getImageLayout (void) const { return m_context.getImageLayout(); }
4234 deInt32 getTargetWidth (void) const { return m_targetWidth; }
4235 deInt32 getTargetHeight (void) const { return m_targetHeight; }
4237 vk::VkRenderPass getRenderPass (void) const { return m_renderPass; }
4240 PrepareContext& m_context;
4241 const vk::VkRenderPass m_renderPass;
4242 const vk::VkFramebuffer m_framebuffer;
4243 const deInt32 m_targetWidth;
4244 const deInt32 m_targetHeight;
4247 class VerifyRenderPassContext
4250 VerifyRenderPassContext (VerifyContext& context,
4251 deInt32 targetWidth,
4252 deInt32 targetHeight)
4253 : m_context (context)
4254 , m_referenceTarget (TextureFormat(TextureFormat::RGBA, TextureFormat::UNORM_INT8), targetWidth, targetHeight)
4258 const Context& getContext (void) const { return m_context.getContext(); }
4259 TestLog& getLog (void) const { return m_context.getLog(); }
4260 tcu::ResultCollector& getResultCollector (void) const { return m_context.getResultCollector(); }
4262 TextureLevel& getReferenceTarget (void) { return m_referenceTarget; }
4264 ReferenceMemory& getReference (void) { return m_context.getReference(); }
4265 TextureLevel& getReferenceImage (void) { return m_context.getReferenceImage();}
4268 VerifyContext& m_context;
4269 TextureLevel m_referenceTarget;
4272 class RenderPassCommand
4275 virtual ~RenderPassCommand (void) {}
4276 virtual const char* getName (void) const = 0;
4278 // Log things that are done during prepare
4279 virtual void logPrepare (TestLog&, size_t) const {}
4280 // Log submitted calls etc.
4281 virtual void logSubmit (TestLog&, size_t) const {}
4283 // Allocate vulkan resources and prepare for submit.
4284 virtual void prepare (PrepareRenderPassContext&) {}
4286 // Submit commands to command buffer.
4287 virtual void submit (SubmitContext&) {}
4290 virtual void verify (VerifyRenderPassContext&, size_t) {}
4293 class SubmitRenderPass : public CmdCommand
4296 SubmitRenderPass (const vector<RenderPassCommand*>& commands);
4297 ~SubmitRenderPass (void);
4298 const char* getName (void) const { return "SubmitRenderPass"; }
4300 void logPrepare (TestLog&, size_t) const;
4301 void logSubmit (TestLog&, size_t) const;
4303 void prepare (PrepareContext&);
4304 void submit (SubmitContext&);
4306 void verify (VerifyContext&, size_t);
4309 const deInt32 m_targetWidth;
4310 const deInt32 m_targetHeight;
4311 vk::Move<vk::VkRenderPass> m_renderPass;
4312 vk::Move<vk::VkDeviceMemory> m_colorTargetMemory;
4313 de::MovePtr<vk::Allocation> m_colorTargetMemory2;
4314 vk::Move<vk::VkImage> m_colorTarget;
4315 vk::Move<vk::VkImageView> m_colorTargetView;
4316 vk::Move<vk::VkFramebuffer> m_framebuffer;
4317 vector<RenderPassCommand*> m_commands;
4320 SubmitRenderPass::SubmitRenderPass (const vector<RenderPassCommand*>& commands)
4321 : m_targetWidth (256)
4322 , m_targetHeight (256)
4323 , m_commands (commands)
4327 SubmitRenderPass::~SubmitRenderPass()
4329 for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
4330 delete m_commands[cmdNdx];
4333 void SubmitRenderPass::logPrepare (TestLog& log, size_t commandIndex) const
4335 const string sectionName (de::toString(commandIndex) + ":" + getName());
4336 const tcu::ScopedLogSection section (log, sectionName, sectionName);
4338 for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
4340 RenderPassCommand& command = *m_commands[cmdNdx];
4341 command.logPrepare(log, cmdNdx);
4345 void SubmitRenderPass::logSubmit (TestLog& log, size_t commandIndex) const
4347 const string sectionName (de::toString(commandIndex) + ":" + getName());
4348 const tcu::ScopedLogSection section (log, sectionName, sectionName);
4350 for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
4352 RenderPassCommand& command = *m_commands[cmdNdx];
4353 command.logSubmit(log, cmdNdx);
4357 void SubmitRenderPass::prepare (PrepareContext& context)
4359 const vk::InstanceInterface& vki = context.getContext().getInstanceInterface();
4360 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
4361 const vk::VkPhysicalDevice physicalDevice = context.getContext().getPhysicalDevice();
4362 const vk::VkDevice device = context.getContext().getDevice();
4363 const vector<deUint32>& queueFamilies = context.getContext().getQueueFamilies();
4365 const vk::VkAttachmentReference colorAttachments[] =
4367 { 0, vk::VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL }
4369 const vk::VkSubpassDescription subpass =
4372 vk::VK_PIPELINE_BIND_POINT_GRAPHICS,
4377 DE_LENGTH_OF_ARRAY(colorAttachments),
4384 const vk::VkAttachmentDescription attachment =
4387 vk::VK_FORMAT_R8G8B8A8_UNORM,
4388 vk::VK_SAMPLE_COUNT_1_BIT,
4390 vk::VK_ATTACHMENT_LOAD_OP_CLEAR,
4391 vk::VK_ATTACHMENT_STORE_OP_STORE,
4393 vk::VK_ATTACHMENT_LOAD_OP_DONT_CARE,
4394 vk::VK_ATTACHMENT_STORE_OP_DONT_CARE,
4396 vk::VK_IMAGE_LAYOUT_UNDEFINED,
4397 vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL
4400 const vk::VkImageCreateInfo createInfo =
4402 vk::VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
4406 vk::VK_IMAGE_TYPE_2D,
4407 vk::VK_FORMAT_R8G8B8A8_UNORM,
4408 { (deUint32)m_targetWidth, (deUint32)m_targetHeight, 1u },
4411 vk::VK_SAMPLE_COUNT_1_BIT,
4412 vk::VK_IMAGE_TILING_OPTIMAL,
4413 vk::VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | vk::VK_IMAGE_USAGE_TRANSFER_SRC_BIT,
4414 vk::VK_SHARING_MODE_EXCLUSIVE,
4415 (deUint32)queueFamilies.size(),
4417 vk::VK_IMAGE_LAYOUT_UNDEFINED
4420 m_colorTarget = vk::createImage(vkd, device, &createInfo);
4423 m_colorTargetMemory = bindImageMemory(vki, vkd, physicalDevice, device, *m_colorTarget, 0);
4426 const vk::VkImageViewCreateInfo createInfo =
4428 vk::VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
4433 vk::VK_IMAGE_VIEW_TYPE_2D,
4434 vk::VK_FORMAT_R8G8B8A8_UNORM,
4436 vk::VK_COMPONENT_SWIZZLE_R,
4437 vk::VK_COMPONENT_SWIZZLE_G,
4438 vk::VK_COMPONENT_SWIZZLE_B,
4439 vk::VK_COMPONENT_SWIZZLE_A
4442 vk::VK_IMAGE_ASPECT_COLOR_BIT,
4450 m_colorTargetView = vk::createImageView(vkd, device, &createInfo);
4453 const vk::VkRenderPassCreateInfo createInfo =
4455 vk::VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO,
4469 m_renderPass = vk::createRenderPass(vkd, device, &createInfo);
4473 const vk::VkImageView imageViews[] =
4477 const vk::VkFramebufferCreateInfo createInfo =
4479 vk::VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO,
4484 DE_LENGTH_OF_ARRAY(imageViews),
4486 (deUint32)m_targetWidth,
4487 (deUint32)m_targetHeight,
4491 m_framebuffer = vk::createFramebuffer(vkd, device, &createInfo);
4495 PrepareRenderPassContext renderpassContext (context, *m_renderPass, *m_framebuffer, m_targetWidth, m_targetHeight);
4497 for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
4499 RenderPassCommand& command = *m_commands[cmdNdx];
4500 command.prepare(renderpassContext);
4505 void SubmitRenderPass::submit (SubmitContext& context)
4507 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
4508 const vk::VkCommandBuffer commandBuffer = context.getCommandBuffer();
4509 const vk::VkClearValue clearValue = vk::makeClearValueColorF32(0.0f, 0.0f, 0.0f, 1.0f);
4511 const vk::VkRenderPassBeginInfo beginInfo =
4513 vk::VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO,
4519 { { 0, 0 }, { (deUint32)m_targetWidth, (deUint32)m_targetHeight } },
4524 vkd.cmdBeginRenderPass(commandBuffer, &beginInfo, vk::VK_SUBPASS_CONTENTS_INLINE);
4526 for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
4528 RenderPassCommand& command = *m_commands[cmdNdx];
4530 command.submit(context);
4533 vkd.cmdEndRenderPass(commandBuffer);
4536 void SubmitRenderPass::verify (VerifyContext& context, size_t commandIndex)
4538 TestLog& log (context.getLog());
4539 tcu::ResultCollector& resultCollector (context.getResultCollector());
4540 const string sectionName (de::toString(commandIndex) + ":" + getName());
4541 const tcu::ScopedLogSection section (log, sectionName, sectionName);
4542 VerifyRenderPassContext verifyContext (context, m_targetWidth, m_targetHeight);
4544 tcu::clear(verifyContext.getReferenceTarget().getAccess(), Vec4(0.0f, 0.0f, 0.0f, 1.0f));
4546 for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
4548 RenderPassCommand& command = *m_commands[cmdNdx];
4549 command.verify(verifyContext, cmdNdx);
4553 const vk::InstanceInterface& vki = context.getContext().getInstanceInterface();
4554 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
4555 const vk::VkPhysicalDevice physicalDevice = context.getContext().getPhysicalDevice();
4556 const vk::VkDevice device = context.getContext().getDevice();
4557 const vk::VkQueue queue = context.getContext().getQueue();
4558 const vk::VkCommandPool commandPool = context.getContext().getCommandPool();
4559 const vk::Unique<vk::VkCommandBuffer> commandBuffer (createBeginCommandBuffer(vkd, device, commandPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY));
4560 const vector<deUint32>& queueFamilies = context.getContext().getQueueFamilies();
4561 const vk::Unique<vk::VkBuffer> dstBuffer (createBuffer(vkd, device, 4 * m_targetWidth * m_targetHeight, vk::VK_BUFFER_USAGE_TRANSFER_DST_BIT, vk::VK_SHARING_MODE_EXCLUSIVE, queueFamilies));
4562 const vk::Unique<vk::VkDeviceMemory> memory (bindBufferMemory(vki, vkd, physicalDevice, device, *dstBuffer, vk::VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT));
4564 const vk::VkImageMemoryBarrier imageBarrier =
4566 vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
4569 vk::VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
4570 vk::VK_ACCESS_TRANSFER_READ_BIT,
4572 vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
4573 vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
4575 VK_QUEUE_FAMILY_IGNORED,
4576 VK_QUEUE_FAMILY_IGNORED,
4580 vk::VK_IMAGE_ASPECT_COLOR_BIT,
4582 1, // Mip level count
4587 const vk::VkBufferMemoryBarrier bufferBarrier =
4589 vk::VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER,
4592 vk::VK_ACCESS_TRANSFER_WRITE_BIT,
4593 vk::VK_ACCESS_HOST_READ_BIT,
4595 VK_QUEUE_FAMILY_IGNORED,
4596 VK_QUEUE_FAMILY_IGNORED,
4601 const vk::VkBufferImageCopy region =
4606 vk::VK_IMAGE_ASPECT_COLOR_BIT,
4613 (deUint32)m_targetWidth,
4614 (deUint32)m_targetHeight,
4619 vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 0, (const vk::VkBufferMemoryBarrier*)DE_NULL, 1, &imageBarrier);
4620 vkd.cmdCopyImageToBuffer(*commandBuffer, *m_colorTarget, vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, *dstBuffer, 1, ®ion);
4621 vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_PIPELINE_STAGE_HOST_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 1, &bufferBarrier, 0, (const vk::VkImageMemoryBarrier*)DE_NULL);
4624 VK_CHECK(vkd.endCommandBuffer(*commandBuffer));
4625 queueRun(vkd, queue, *commandBuffer);
4628 void* const ptr = mapMemory(vkd, device, *memory, 4 * m_targetWidth * m_targetHeight);
4630 vk::invalidateMappedMemoryRange(vkd, device, *memory, 0, 4 * m_targetWidth * m_targetHeight);
4633 const deUint8* const data = (const deUint8*)ptr;
4634 const ConstPixelBufferAccess resAccess (TextureFormat(TextureFormat::RGBA, TextureFormat::UNORM_INT8), m_targetWidth, m_targetHeight, 1, data);
4635 const ConstPixelBufferAccess& refAccess (verifyContext.getReferenceTarget().getAccess());
4637 if (!tcu::intThresholdCompare(context.getLog(), (de::toString(commandIndex) + ":" + getName()).c_str(), (de::toString(commandIndex) + ":" + getName()).c_str(), refAccess, resAccess, UVec4(0), tcu::COMPARE_LOG_ON_ERROR))
4638 resultCollector.fail(de::toString(commandIndex) + ":" + getName() + " Image comparison failed");
4641 vkd.unmapMemory(device, *memory);
4646 struct PipelineResources
4648 vk::Move<vk::VkPipeline> pipeline;
4649 vk::Move<vk::VkDescriptorSetLayout> descriptorSetLayout;
4650 vk::Move<vk::VkPipelineLayout> pipelineLayout;
4653 void createPipelineWithResources (const vk::DeviceInterface& vkd,
4654 const vk::VkDevice device,
4655 const vk::VkRenderPass renderPass,
4656 const deUint32 subpass,
4657 const vk::VkShaderModule& vertexShaderModule,
4658 const vk::VkShaderModule& fragmentShaderModule,
4659 const deUint32 viewPortWidth,
4660 const deUint32 viewPortHeight,
4661 const vector<vk::VkVertexInputBindingDescription>& vertexBindingDescriptions,
4662 const vector<vk::VkVertexInputAttributeDescription>& vertexAttributeDescriptions,
4663 const vector<vk::VkDescriptorSetLayoutBinding>& bindings,
4664 const vk::VkPrimitiveTopology topology,
4665 deUint32 pushConstantRangeCount,
4666 const vk::VkPushConstantRange* pushConstantRanges,
4667 PipelineResources& resources)
4669 if (!bindings.empty())
4671 const vk::VkDescriptorSetLayoutCreateInfo createInfo =
4673 vk::VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO,
4677 (deUint32)bindings.size(),
4678 bindings.empty() ? DE_NULL : &bindings[0]
4681 resources.descriptorSetLayout = vk::createDescriptorSetLayout(vkd, device, &createInfo);
4685 const vk::VkDescriptorSetLayout descriptorSetLayout_ = *resources.descriptorSetLayout;
4686 const vk::VkPipelineLayoutCreateInfo createInfo =
4688 vk::VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO,
4692 resources.descriptorSetLayout ? 1u : 0u,
4693 resources.descriptorSetLayout ? &descriptorSetLayout_ : DE_NULL,
4695 pushConstantRangeCount,
4699 resources.pipelineLayout = vk::createPipelineLayout(vkd, device, &createInfo);
4703 const vk::VkPipelineShaderStageCreateInfo shaderStages[] =
4706 vk::VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
4709 vk::VK_SHADER_STAGE_VERTEX_BIT,
4715 vk::VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
4718 vk::VK_SHADER_STAGE_FRAGMENT_BIT,
4719 fragmentShaderModule,
4724 const vk::VkPipelineDepthStencilStateCreateInfo depthStencilState =
4726 vk::VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO,
4731 vk::VK_COMPARE_OP_ALWAYS,
4735 vk::VK_STENCIL_OP_KEEP,
4736 vk::VK_STENCIL_OP_KEEP,
4737 vk::VK_STENCIL_OP_KEEP,
4738 vk::VK_COMPARE_OP_ALWAYS,
4744 vk::VK_STENCIL_OP_KEEP,
4745 vk::VK_STENCIL_OP_KEEP,
4746 vk::VK_STENCIL_OP_KEEP,
4747 vk::VK_COMPARE_OP_ALWAYS,
4755 const vk::VkPipelineVertexInputStateCreateInfo vertexInputState =
4757 vk::VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO,
4761 (deUint32)vertexBindingDescriptions.size(),
4762 vertexBindingDescriptions.empty() ? DE_NULL : &vertexBindingDescriptions[0],
4764 (deUint32)vertexAttributeDescriptions.size(),
4765 vertexAttributeDescriptions.empty() ? DE_NULL : &vertexAttributeDescriptions[0]
4767 const vk::VkPipelineInputAssemblyStateCreateInfo inputAssemblyState =
4769 vk::VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO,
4775 const vk::VkViewport viewports[] =
4777 { 0.0f, 0.0f, (float)viewPortWidth, (float)viewPortHeight, 0.0f, 1.0f }
4779 const vk::VkRect2D scissors[] =
4781 { { 0, 0 }, { (deUint32)viewPortWidth, (deUint32)viewPortHeight } }
4783 const vk::VkPipelineViewportStateCreateInfo viewportState =
4785 vk::VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO,
4788 DE_LENGTH_OF_ARRAY(viewports),
4790 DE_LENGTH_OF_ARRAY(scissors),
4793 const vk::VkPipelineRasterizationStateCreateInfo rasterState =
4795 vk::VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO,
4801 vk::VK_POLYGON_MODE_FILL,
4802 vk::VK_CULL_MODE_NONE,
4803 vk::VK_FRONT_FACE_COUNTER_CLOCKWISE,
4810 const vk::VkSampleMask sampleMask = ~0u;
4811 const vk::VkPipelineMultisampleStateCreateInfo multisampleState =
4813 vk::VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO,
4817 vk::VK_SAMPLE_COUNT_1_BIT,
4824 const vk::VkPipelineColorBlendAttachmentState attachments[] =
4828 vk::VK_BLEND_FACTOR_ONE,
4829 vk::VK_BLEND_FACTOR_ZERO,
4830 vk::VK_BLEND_OP_ADD,
4831 vk::VK_BLEND_FACTOR_ONE,
4832 vk::VK_BLEND_FACTOR_ZERO,
4833 vk::VK_BLEND_OP_ADD,
4834 (vk::VK_COLOR_COMPONENT_R_BIT|
4835 vk::VK_COLOR_COMPONENT_G_BIT|
4836 vk::VK_COLOR_COMPONENT_B_BIT|
4837 vk::VK_COLOR_COMPONENT_A_BIT)
4840 const vk::VkPipelineColorBlendStateCreateInfo colorBlendState =
4842 vk::VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO,
4847 vk::VK_LOGIC_OP_COPY,
4848 DE_LENGTH_OF_ARRAY(attachments),
4850 { 0.0f, 0.0f, 0.0f, 0.0f }
4852 const vk::VkGraphicsPipelineCreateInfo createInfo =
4854 vk::VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO,
4858 DE_LENGTH_OF_ARRAY(shaderStages),
4862 &inputAssemblyState,
4870 *resources.pipelineLayout,
4877 resources.pipeline = vk::createGraphicsPipeline(vkd, device, 0, &createInfo);
4881 class RenderIndexBuffer : public RenderPassCommand
4884 RenderIndexBuffer (void) {}
4885 ~RenderIndexBuffer (void) {}
4887 const char* getName (void) const { return "RenderIndexBuffer"; }
4888 void logPrepare (TestLog&, size_t) const;
4889 void logSubmit (TestLog&, size_t) const;
4890 void prepare (PrepareRenderPassContext&);
4891 void submit (SubmitContext& context);
4892 void verify (VerifyRenderPassContext&, size_t);
4895 PipelineResources m_resources;
4896 vk::VkDeviceSize m_bufferSize;
4899 void RenderIndexBuffer::logPrepare (TestLog& log, size_t commandIndex) const
4901 log << TestLog::Message << commandIndex << ":" << getName() << " Create pipeline for render buffer as index buffer." << TestLog::EndMessage;
4904 void RenderIndexBuffer::logSubmit (TestLog& log, size_t commandIndex) const
4906 log << TestLog::Message << commandIndex << ":" << getName() << " Render using buffer as index buffer." << TestLog::EndMessage;
4909 void RenderIndexBuffer::prepare (PrepareRenderPassContext& context)
4911 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
4912 const vk::VkDevice device = context.getContext().getDevice();
4913 const vk::VkRenderPass renderPass = context.getRenderPass();
4914 const deUint32 subpass = 0;
4915 const vk::Unique<vk::VkShaderModule> vertexShaderModule (vk::createShaderModule(vkd, device, context.getBinaryCollection().get("index-buffer.vert"), 0));
4916 const vk::Unique<vk::VkShaderModule> fragmentShaderModule (vk::createShaderModule(vkd, device, context.getBinaryCollection().get("render-white.frag"), 0));
4918 createPipelineWithResources(vkd, device, renderPass, subpass, *vertexShaderModule, *fragmentShaderModule, context.getTargetWidth(), context.getTargetHeight(),
4919 vector<vk::VkVertexInputBindingDescription>(), vector<vk::VkVertexInputAttributeDescription>(), vector<vk::VkDescriptorSetLayoutBinding>(), vk::VK_PRIMITIVE_TOPOLOGY_POINT_LIST, 0u, DE_NULL, m_resources);
4920 m_bufferSize = context.getBufferSize();
4923 void RenderIndexBuffer::submit (SubmitContext& context)
4925 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
4926 const vk::VkCommandBuffer commandBuffer = context.getCommandBuffer();
4928 vkd.cmdBindPipeline(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipeline);
4929 vkd.cmdBindIndexBuffer(commandBuffer, context.getBuffer(), 0, vk::VK_INDEX_TYPE_UINT16);
4930 vkd.cmdDrawIndexed(commandBuffer, (deUint32)(context.getBufferSize() / 2), 1, 0, 0, 0);
4933 void RenderIndexBuffer::verify (VerifyRenderPassContext& context, size_t)
4935 for (size_t pos = 0; pos < (size_t)m_bufferSize / 2; pos++)
4937 const deUint8 x = context.getReference().get(pos * 2);
4938 const deUint8 y = context.getReference().get((pos * 2) + 1);
4940 context.getReferenceTarget().getAccess().setPixel(Vec4(1.0f, 1.0f, 1.0f, 1.0f), x, y);
4944 class RenderVertexBuffer : public RenderPassCommand
4947 RenderVertexBuffer (void) {}
4948 ~RenderVertexBuffer (void) {}
4950 const char* getName (void) const { return "RenderVertexBuffer"; }
4951 void logPrepare (TestLog&, size_t) const;
4952 void logSubmit (TestLog&, size_t) const;
4953 void prepare (PrepareRenderPassContext&);
4954 void submit (SubmitContext& context);
4955 void verify (VerifyRenderPassContext&, size_t);
4958 PipelineResources m_resources;
4959 vk::VkDeviceSize m_bufferSize;
4962 void RenderVertexBuffer::logPrepare (TestLog& log, size_t commandIndex) const
4964 log << TestLog::Message << commandIndex << ":" << getName() << " Create pipeline for render buffer as vertex buffer." << TestLog::EndMessage;
4967 void RenderVertexBuffer::logSubmit (TestLog& log, size_t commandIndex) const
4969 log << TestLog::Message << commandIndex << ":" << getName() << " Render using buffer as vertex buffer." << TestLog::EndMessage;
4972 void RenderVertexBuffer::prepare (PrepareRenderPassContext& context)
4974 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
4975 const vk::VkDevice device = context.getContext().getDevice();
4976 const vk::VkRenderPass renderPass = context.getRenderPass();
4977 const deUint32 subpass = 0;
4978 const vk::Unique<vk::VkShaderModule> vertexShaderModule (vk::createShaderModule(vkd, device, context.getBinaryCollection().get("vertex-buffer.vert"), 0));
4979 const vk::Unique<vk::VkShaderModule> fragmentShaderModule (vk::createShaderModule(vkd, device, context.getBinaryCollection().get("render-white.frag"), 0));
4981 vector<vk::VkVertexInputAttributeDescription> vertexAttributeDescriptions;
4982 vector<vk::VkVertexInputBindingDescription> vertexBindingDescriptions;
4985 const vk::VkVertexInputBindingDescription vertexBindingDescription =
4989 vk::VK_VERTEX_INPUT_RATE_VERTEX
4992 vertexBindingDescriptions.push_back(vertexBindingDescription);
4995 const vk::VkVertexInputAttributeDescription vertexAttributeDescription =
4999 vk::VK_FORMAT_R8G8_UNORM,
5003 vertexAttributeDescriptions.push_back(vertexAttributeDescription);
5005 createPipelineWithResources(vkd, device, renderPass, subpass, *vertexShaderModule, *fragmentShaderModule, context.getTargetWidth(), context.getTargetHeight(),
5006 vertexBindingDescriptions, vertexAttributeDescriptions, vector<vk::VkDescriptorSetLayoutBinding>(), vk::VK_PRIMITIVE_TOPOLOGY_POINT_LIST, 0u, DE_NULL, m_resources);
5008 m_bufferSize = context.getBufferSize();
5011 void RenderVertexBuffer::submit (SubmitContext& context)
5013 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
5014 const vk::VkCommandBuffer commandBuffer = context.getCommandBuffer();
5015 const vk::VkDeviceSize offset = 0;
5016 const vk::VkBuffer buffer = context.getBuffer();
5018 vkd.cmdBindPipeline(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipeline);
5019 vkd.cmdBindVertexBuffers(commandBuffer, 0, 1, &buffer, &offset);
5020 vkd.cmdDraw(commandBuffer, (deUint32)(context.getBufferSize() / 2), 1, 0, 0);
5023 void RenderVertexBuffer::verify (VerifyRenderPassContext& context, size_t)
5025 for (size_t pos = 0; pos < (size_t)m_bufferSize / 2; pos++)
5027 const deUint8 x = context.getReference().get(pos * 2);
5028 const deUint8 y = context.getReference().get((pos * 2) + 1);
5030 context.getReferenceTarget().getAccess().setPixel(Vec4(1.0f, 1.0f, 1.0f, 1.0f), x, y);
5034 class RenderVertexUniformBuffer : public RenderPassCommand
5037 RenderVertexUniformBuffer (void) {}
5038 ~RenderVertexUniformBuffer (void);
5040 const char* getName (void) const { return "RenderVertexUniformBuffer"; }
5041 void logPrepare (TestLog&, size_t) const;
5042 void logSubmit (TestLog&, size_t) const;
5043 void prepare (PrepareRenderPassContext&);
5044 void submit (SubmitContext& context);
5045 void verify (VerifyRenderPassContext&, size_t);
5048 PipelineResources m_resources;
5049 vk::Move<vk::VkDescriptorPool> m_descriptorPool;
5050 vector<vk::VkDescriptorSet> m_descriptorSets;
5052 vk::VkDeviceSize m_bufferSize;
5055 RenderVertexUniformBuffer::~RenderVertexUniformBuffer (void)
5059 void RenderVertexUniformBuffer::logPrepare (TestLog& log, size_t commandIndex) const
5061 log << TestLog::Message << commandIndex << ":" << getName() << " Create pipeline for render buffer as uniform buffer." << TestLog::EndMessage;
5064 void RenderVertexUniformBuffer::logSubmit (TestLog& log, size_t commandIndex) const
5066 log << TestLog::Message << commandIndex << ":" << getName() << " Render using buffer as uniform buffer." << TestLog::EndMessage;
5069 void RenderVertexUniformBuffer::prepare (PrepareRenderPassContext& context)
5071 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
5072 const vk::VkDevice device = context.getContext().getDevice();
5073 const vk::VkRenderPass renderPass = context.getRenderPass();
5074 const deUint32 subpass = 0;
5075 const vk::Unique<vk::VkShaderModule> vertexShaderModule (vk::createShaderModule(vkd, device, context.getBinaryCollection().get("uniform-buffer.vert"), 0));
5076 const vk::Unique<vk::VkShaderModule> fragmentShaderModule (vk::createShaderModule(vkd, device, context.getBinaryCollection().get("render-white.frag"), 0));
5077 vector<vk::VkDescriptorSetLayoutBinding> bindings;
5079 m_bufferSize = context.getBufferSize();
5082 const vk::VkDescriptorSetLayoutBinding binding =
5085 vk::VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,
5087 vk::VK_SHADER_STAGE_VERTEX_BIT,
5091 bindings.push_back(binding);
5094 createPipelineWithResources(vkd, device, renderPass, subpass, *vertexShaderModule, *fragmentShaderModule, context.getTargetWidth(), context.getTargetHeight(),
5095 vector<vk::VkVertexInputBindingDescription>(), vector<vk::VkVertexInputAttributeDescription>(), bindings, vk::VK_PRIMITIVE_TOPOLOGY_POINT_LIST, 0u, DE_NULL, m_resources);
5098 const deUint32 descriptorCount = (deUint32)(divRoundUp(m_bufferSize, (vk::VkDeviceSize)MAX_UNIFORM_BUFFER_SIZE));
5099 const vk::VkDescriptorPoolSize poolSizes =
5101 vk::VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,
5104 const vk::VkDescriptorPoolCreateInfo createInfo =
5106 vk::VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
5108 vk::VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT,
5115 m_descriptorPool = vk::createDescriptorPool(vkd, device, &createInfo);
5116 m_descriptorSets.resize(descriptorCount);
5119 for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
5121 const vk::VkDescriptorSetLayout layout = *m_resources.descriptorSetLayout;
5122 const vk::VkDescriptorSetAllocateInfo allocateInfo =
5124 vk::VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
5132 m_descriptorSets[descriptorSetNdx] = vk::allocateDescriptorSet(vkd, device, &allocateInfo).disown();
5135 const vk::VkDescriptorBufferInfo bufferInfo =
5137 context.getBuffer(),
5138 (vk::VkDeviceSize)(descriptorSetNdx * (size_t)MAX_UNIFORM_BUFFER_SIZE),
5139 m_bufferSize < (descriptorSetNdx + 1) * (vk::VkDeviceSize)MAX_UNIFORM_BUFFER_SIZE
5140 ? m_bufferSize - descriptorSetNdx * (vk::VkDeviceSize)MAX_UNIFORM_BUFFER_SIZE
5141 : (vk::VkDeviceSize)MAX_UNIFORM_BUFFER_SIZE
5143 const vk::VkWriteDescriptorSet write =
5145 vk::VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
5147 m_descriptorSets[descriptorSetNdx],
5151 vk::VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,
5157 vkd.updateDescriptorSets(device, 1u, &write, 0u, DE_NULL);
5162 void RenderVertexUniformBuffer::submit (SubmitContext& context)
5164 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
5165 const vk::VkCommandBuffer commandBuffer = context.getCommandBuffer();
5167 vkd.cmdBindPipeline(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipeline);
5169 for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
5171 const size_t size = (size_t)(m_bufferSize < (descriptorSetNdx + 1) * (vk::VkDeviceSize)MAX_UNIFORM_BUFFER_SIZE
5172 ? m_bufferSize - descriptorSetNdx * (vk::VkDeviceSize)MAX_UNIFORM_BUFFER_SIZE
5173 : (size_t)MAX_UNIFORM_BUFFER_SIZE);
5174 const deUint32 count = (deUint32)(size / 2);
5176 vkd.cmdBindDescriptorSets(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipelineLayout, 0u, 1u, &m_descriptorSets[descriptorSetNdx], 0u, DE_NULL);
5177 vkd.cmdDraw(commandBuffer, count, 1, 0, 0);
5181 void RenderVertexUniformBuffer::verify (VerifyRenderPassContext& context, size_t)
5183 for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
5185 const size_t offset = descriptorSetNdx * MAX_UNIFORM_BUFFER_SIZE;
5186 const size_t size = (size_t)(m_bufferSize < (descriptorSetNdx + 1) * (vk::VkDeviceSize)MAX_UNIFORM_BUFFER_SIZE
5187 ? m_bufferSize - descriptorSetNdx * (vk::VkDeviceSize)MAX_UNIFORM_BUFFER_SIZE
5188 : (size_t)MAX_UNIFORM_BUFFER_SIZE);
5189 const size_t count = size / 2;
5191 for (size_t pos = 0; pos < count; pos++)
5193 const deUint8 x = context.getReference().get(offset + pos * 2);
5194 const deUint8 y = context.getReference().get(offset + (pos * 2) + 1);
5196 context.getReferenceTarget().getAccess().setPixel(Vec4(1.0f, 1.0f, 1.0f, 1.0f), x, y);
5201 class RenderVertexUniformTexelBuffer : public RenderPassCommand
5204 RenderVertexUniformTexelBuffer (void) {}
5205 ~RenderVertexUniformTexelBuffer (void);
5207 const char* getName (void) const { return "RenderVertexUniformTexelBuffer"; }
5208 void logPrepare (TestLog&, size_t) const;
5209 void logSubmit (TestLog&, size_t) const;
5210 void prepare (PrepareRenderPassContext&);
5211 void submit (SubmitContext& context);
5212 void verify (VerifyRenderPassContext&, size_t);
5215 PipelineResources m_resources;
5216 vk::Move<vk::VkDescriptorPool> m_descriptorPool;
5217 vector<vk::VkDescriptorSet> m_descriptorSets;
5218 vector<vk::VkBufferView> m_bufferViews;
5220 const vk::DeviceInterface* m_vkd;
5221 vk::VkDevice m_device;
5222 vk::VkDeviceSize m_bufferSize;
5223 deUint32 m_maxUniformTexelCount;
5226 RenderVertexUniformTexelBuffer::~RenderVertexUniformTexelBuffer (void)
5228 for (size_t bufferViewNdx = 0; bufferViewNdx < m_bufferViews.size(); bufferViewNdx++)
5230 if (!!m_bufferViews[bufferViewNdx])
5232 m_vkd->destroyBufferView(m_device, m_bufferViews[bufferViewNdx], DE_NULL);
5233 m_bufferViews[bufferViewNdx] = (vk::VkBufferView)0;
5238 void RenderVertexUniformTexelBuffer::logPrepare (TestLog& log, size_t commandIndex) const
5240 log << TestLog::Message << commandIndex << ":" << getName() << " Create pipeline for render buffer as uniform buffer." << TestLog::EndMessage;
5243 void RenderVertexUniformTexelBuffer::logSubmit (TestLog& log, size_t commandIndex) const
5245 log << TestLog::Message << commandIndex << ":" << getName() << " Render using buffer as uniform buffer." << TestLog::EndMessage;
5248 void RenderVertexUniformTexelBuffer::prepare (PrepareRenderPassContext& context)
5250 const vk::InstanceInterface& vki = context.getContext().getInstanceInterface();
5251 const vk::VkPhysicalDevice physicalDevice = context.getContext().getPhysicalDevice();
5252 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
5253 const vk::VkDevice device = context.getContext().getDevice();
5254 const vk::VkRenderPass renderPass = context.getRenderPass();
5255 const deUint32 subpass = 0;
5256 const vk::Unique<vk::VkShaderModule> vertexShaderModule (vk::createShaderModule(vkd, device, context.getBinaryCollection().get("uniform-texel-buffer.vert"), 0));
5257 const vk::Unique<vk::VkShaderModule> fragmentShaderModule (vk::createShaderModule(vkd, device, context.getBinaryCollection().get("render-white.frag"), 0));
5258 vector<vk::VkDescriptorSetLayoutBinding> bindings;
5262 m_bufferSize = context.getBufferSize();
5263 m_maxUniformTexelCount = vk::getPhysicalDeviceProperties(vki, physicalDevice).limits.maxTexelBufferElements;
5266 const vk::VkDescriptorSetLayoutBinding binding =
5269 vk::VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER,
5271 vk::VK_SHADER_STAGE_VERTEX_BIT,
5275 bindings.push_back(binding);
5278 createPipelineWithResources(vkd, device, renderPass, subpass, *vertexShaderModule, *fragmentShaderModule, context.getTargetWidth(), context.getTargetHeight(),
5279 vector<vk::VkVertexInputBindingDescription>(), vector<vk::VkVertexInputAttributeDescription>(), bindings, vk::VK_PRIMITIVE_TOPOLOGY_POINT_LIST, 0u, DE_NULL, m_resources);
5282 const deUint32 descriptorCount = (deUint32)(divRoundUp(m_bufferSize, (vk::VkDeviceSize)m_maxUniformTexelCount * 2));
5283 const vk::VkDescriptorPoolSize poolSizes =
5285 vk::VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER,
5288 const vk::VkDescriptorPoolCreateInfo createInfo =
5290 vk::VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
5292 vk::VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT,
5299 m_descriptorPool = vk::createDescriptorPool(vkd, device, &createInfo);
5300 m_descriptorSets.resize(descriptorCount, (vk::VkDescriptorSet)0);
5301 m_bufferViews.resize(descriptorCount, (vk::VkBufferView)0);
5304 for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
5306 const deUint32 count = (deUint32)(m_bufferSize < (descriptorSetNdx + 1) * m_maxUniformTexelCount * 2
5307 ? m_bufferSize - descriptorSetNdx * m_maxUniformTexelCount * 2
5308 : m_maxUniformTexelCount * 2) / 2;
5309 const vk::VkDescriptorSetLayout layout = *m_resources.descriptorSetLayout;
5310 const vk::VkDescriptorSetAllocateInfo allocateInfo =
5312 vk::VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
5320 m_descriptorSets[descriptorSetNdx] = vk::allocateDescriptorSet(vkd, device, &allocateInfo).disown();
5323 const vk::VkBufferViewCreateInfo createInfo =
5325 vk::VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO,
5329 context.getBuffer(),
5330 vk::VK_FORMAT_R16_UINT,
5331 descriptorSetNdx * m_maxUniformTexelCount * 2,
5335 VK_CHECK(vkd.createBufferView(device, &createInfo, DE_NULL, &m_bufferViews[descriptorSetNdx]));
5339 const vk::VkWriteDescriptorSet write =
5341 vk::VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
5343 m_descriptorSets[descriptorSetNdx],
5347 vk::VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER,
5350 &m_bufferViews[descriptorSetNdx]
5353 vkd.updateDescriptorSets(device, 1u, &write, 0u, DE_NULL);
5358 void RenderVertexUniformTexelBuffer::submit (SubmitContext& context)
5360 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
5361 const vk::VkCommandBuffer commandBuffer = context.getCommandBuffer();
5363 vkd.cmdBindPipeline(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipeline);
5365 for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
5367 const deUint32 count = (deUint32)(m_bufferSize < (descriptorSetNdx + 1) * m_maxUniformTexelCount * 2
5368 ? m_bufferSize - descriptorSetNdx * m_maxUniformTexelCount * 2
5369 : m_maxUniformTexelCount * 2) / 2;
5371 vkd.cmdBindDescriptorSets(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipelineLayout, 0u, 1u, &m_descriptorSets[descriptorSetNdx], 0u, DE_NULL);
5372 vkd.cmdDraw(commandBuffer, count, 1, 0, 0);
5376 void RenderVertexUniformTexelBuffer::verify (VerifyRenderPassContext& context, size_t)
5378 for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
5380 const size_t offset = descriptorSetNdx * m_maxUniformTexelCount * 2;
5381 const deUint32 count = (deUint32)(m_bufferSize < (descriptorSetNdx + 1) * m_maxUniformTexelCount * 2
5382 ? m_bufferSize - descriptorSetNdx * m_maxUniformTexelCount * 2
5383 : m_maxUniformTexelCount * 2) / 2;
5385 for (size_t pos = 0; pos < (size_t)count; pos++)
5387 const deUint8 x = context.getReference().get(offset + pos * 2);
5388 const deUint8 y = context.getReference().get(offset + (pos * 2) + 1);
5390 context.getReferenceTarget().getAccess().setPixel(Vec4(1.0f, 1.0f, 1.0f, 1.0f), x, y);
5395 class RenderVertexStorageBuffer : public RenderPassCommand
5398 RenderVertexStorageBuffer (void) {}
5399 ~RenderVertexStorageBuffer (void);
5401 const char* getName (void) const { return "RenderVertexStorageBuffer"; }
5402 void logPrepare (TestLog&, size_t) const;
5403 void logSubmit (TestLog&, size_t) const;
5404 void prepare (PrepareRenderPassContext&);
5405 void submit (SubmitContext& context);
5406 void verify (VerifyRenderPassContext&, size_t);
5409 PipelineResources m_resources;
5410 vk::Move<vk::VkDescriptorPool> m_descriptorPool;
5411 vector<vk::VkDescriptorSet> m_descriptorSets;
5413 vk::VkDeviceSize m_bufferSize;
5416 RenderVertexStorageBuffer::~RenderVertexStorageBuffer (void)
5420 void RenderVertexStorageBuffer::logPrepare (TestLog& log, size_t commandIndex) const
5422 log << TestLog::Message << commandIndex << ":" << getName() << " Create pipeline for render buffer as storage buffer." << TestLog::EndMessage;
5425 void RenderVertexStorageBuffer::logSubmit (TestLog& log, size_t commandIndex) const
5427 log << TestLog::Message << commandIndex << ":" << getName() << " Render using buffer as storage buffer." << TestLog::EndMessage;
5430 void RenderVertexStorageBuffer::prepare (PrepareRenderPassContext& context)
5432 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
5433 const vk::VkDevice device = context.getContext().getDevice();
5434 const vk::VkRenderPass renderPass = context.getRenderPass();
5435 const deUint32 subpass = 0;
5436 const vk::Unique<vk::VkShaderModule> vertexShaderModule (vk::createShaderModule(vkd, device, context.getBinaryCollection().get("storage-buffer.vert"), 0));
5437 const vk::Unique<vk::VkShaderModule> fragmentShaderModule (vk::createShaderModule(vkd, device, context.getBinaryCollection().get("render-white.frag"), 0));
5438 vector<vk::VkDescriptorSetLayoutBinding> bindings;
5440 m_bufferSize = context.getBufferSize();
5443 const vk::VkDescriptorSetLayoutBinding binding =
5446 vk::VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
5448 vk::VK_SHADER_STAGE_VERTEX_BIT,
5452 bindings.push_back(binding);
5455 createPipelineWithResources(vkd, device, renderPass, subpass, *vertexShaderModule, *fragmentShaderModule, context.getTargetWidth(), context.getTargetHeight(),
5456 vector<vk::VkVertexInputBindingDescription>(), vector<vk::VkVertexInputAttributeDescription>(), bindings, vk::VK_PRIMITIVE_TOPOLOGY_POINT_LIST, 0u, DE_NULL, m_resources);
5459 const deUint32 descriptorCount = (deUint32)(divRoundUp(m_bufferSize, (vk::VkDeviceSize)MAX_STORAGE_BUFFER_SIZE));
5460 const vk::VkDescriptorPoolSize poolSizes =
5462 vk::VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
5465 const vk::VkDescriptorPoolCreateInfo createInfo =
5467 vk::VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
5469 vk::VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT,
5476 m_descriptorPool = vk::createDescriptorPool(vkd, device, &createInfo);
5477 m_descriptorSets.resize(descriptorCount);
5480 for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
5482 const vk::VkDescriptorSetLayout layout = *m_resources.descriptorSetLayout;
5483 const vk::VkDescriptorSetAllocateInfo allocateInfo =
5485 vk::VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
5493 m_descriptorSets[descriptorSetNdx] = vk::allocateDescriptorSet(vkd, device, &allocateInfo).disown();
5496 const vk::VkDescriptorBufferInfo bufferInfo =
5498 context.getBuffer(),
5499 descriptorSetNdx * MAX_STORAGE_BUFFER_SIZE,
5500 de::min(m_bufferSize - descriptorSetNdx * MAX_STORAGE_BUFFER_SIZE, (vk::VkDeviceSize)MAX_STORAGE_BUFFER_SIZE)
5502 const vk::VkWriteDescriptorSet write =
5504 vk::VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
5506 m_descriptorSets[descriptorSetNdx],
5510 vk::VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
5516 vkd.updateDescriptorSets(device, 1u, &write, 0u, DE_NULL);
5521 void RenderVertexStorageBuffer::submit (SubmitContext& context)
5523 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
5524 const vk::VkCommandBuffer commandBuffer = context.getCommandBuffer();
5526 vkd.cmdBindPipeline(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipeline);
5528 for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
5530 const size_t size = m_bufferSize < (descriptorSetNdx + 1) * MAX_STORAGE_BUFFER_SIZE
5531 ? (size_t)(m_bufferSize - descriptorSetNdx * MAX_STORAGE_BUFFER_SIZE)
5532 : (size_t)(MAX_STORAGE_BUFFER_SIZE);
5534 vkd.cmdBindDescriptorSets(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipelineLayout, 0u, 1u, &m_descriptorSets[descriptorSetNdx], 0u, DE_NULL);
5535 vkd.cmdDraw(commandBuffer, (deUint32)(size / 2), 1, 0, 0);
5539 void RenderVertexStorageBuffer::verify (VerifyRenderPassContext& context, size_t)
5541 for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
5543 const size_t offset = descriptorSetNdx * MAX_STORAGE_BUFFER_SIZE;
5544 const size_t size = m_bufferSize < (descriptorSetNdx + 1) * MAX_STORAGE_BUFFER_SIZE
5545 ? (size_t)(m_bufferSize - descriptorSetNdx * MAX_STORAGE_BUFFER_SIZE)
5546 : (size_t)(MAX_STORAGE_BUFFER_SIZE);
5548 for (size_t pos = 0; pos < size / 2; pos++)
5550 const deUint8 x = context.getReference().get(offset + pos * 2);
5551 const deUint8 y = context.getReference().get(offset + (pos * 2) + 1);
5553 context.getReferenceTarget().getAccess().setPixel(Vec4(1.0f, 1.0f, 1.0f, 1.0f), x, y);
5558 class RenderVertexStorageTexelBuffer : public RenderPassCommand
5561 RenderVertexStorageTexelBuffer (void) {}
5562 ~RenderVertexStorageTexelBuffer (void);
5564 const char* getName (void) const { return "RenderVertexStorageTexelBuffer"; }
5565 void logPrepare (TestLog&, size_t) const;
5566 void logSubmit (TestLog&, size_t) const;
5567 void prepare (PrepareRenderPassContext&);
5568 void submit (SubmitContext& context);
5569 void verify (VerifyRenderPassContext&, size_t);
5572 PipelineResources m_resources;
5573 vk::Move<vk::VkDescriptorPool> m_descriptorPool;
5574 vector<vk::VkDescriptorSet> m_descriptorSets;
5575 vector<vk::VkBufferView> m_bufferViews;
5577 const vk::DeviceInterface* m_vkd;
5578 vk::VkDevice m_device;
5579 vk::VkDeviceSize m_bufferSize;
5580 deUint32 m_maxStorageTexelCount;
5583 RenderVertexStorageTexelBuffer::~RenderVertexStorageTexelBuffer (void)
5585 for (size_t bufferViewNdx = 0; bufferViewNdx < m_bufferViews.size(); bufferViewNdx++)
5587 if (!!m_bufferViews[bufferViewNdx])
5589 m_vkd->destroyBufferView(m_device, m_bufferViews[bufferViewNdx], DE_NULL);
5590 m_bufferViews[bufferViewNdx] = (vk::VkBufferView)0;
5595 void RenderVertexStorageTexelBuffer::logPrepare (TestLog& log, size_t commandIndex) const
5597 log << TestLog::Message << commandIndex << ":" << getName() << " Create pipeline for render buffer as storage buffer." << TestLog::EndMessage;
5600 void RenderVertexStorageTexelBuffer::logSubmit (TestLog& log, size_t commandIndex) const
5602 log << TestLog::Message << commandIndex << ":" << getName() << " Render using buffer as storage buffer." << TestLog::EndMessage;
5605 void RenderVertexStorageTexelBuffer::prepare (PrepareRenderPassContext& context)
5607 const vk::InstanceInterface& vki = context.getContext().getInstanceInterface();
5608 const vk::VkPhysicalDevice physicalDevice = context.getContext().getPhysicalDevice();
5609 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
5610 const vk::VkDevice device = context.getContext().getDevice();
5611 const vk::VkRenderPass renderPass = context.getRenderPass();
5612 const deUint32 subpass = 0;
5613 const vk::Unique<vk::VkShaderModule> vertexShaderModule (vk::createShaderModule(vkd, device, context.getBinaryCollection().get("storage-texel-buffer.vert"), 0));
5614 const vk::Unique<vk::VkShaderModule> fragmentShaderModule (vk::createShaderModule(vkd, device, context.getBinaryCollection().get("render-white.frag"), 0));
5615 vector<vk::VkDescriptorSetLayoutBinding> bindings;
5619 m_bufferSize = context.getBufferSize();
5620 m_maxStorageTexelCount = vk::getPhysicalDeviceProperties(vki, physicalDevice).limits.maxTexelBufferElements;
5623 const vk::VkDescriptorSetLayoutBinding binding =
5626 vk::VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER,
5628 vk::VK_SHADER_STAGE_VERTEX_BIT,
5632 bindings.push_back(binding);
5635 createPipelineWithResources(vkd, device, renderPass, subpass, *vertexShaderModule, *fragmentShaderModule, context.getTargetWidth(), context.getTargetHeight(),
5636 vector<vk::VkVertexInputBindingDescription>(), vector<vk::VkVertexInputAttributeDescription>(), bindings, vk::VK_PRIMITIVE_TOPOLOGY_POINT_LIST, 0u, DE_NULL, m_resources);
5639 const deUint32 descriptorCount = (deUint32)(divRoundUp(m_bufferSize, (vk::VkDeviceSize)m_maxStorageTexelCount * 4));
5640 const vk::VkDescriptorPoolSize poolSizes =
5642 vk::VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER,
5645 const vk::VkDescriptorPoolCreateInfo createInfo =
5647 vk::VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
5649 vk::VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT,
5656 m_descriptorPool = vk::createDescriptorPool(vkd, device, &createInfo);
5657 m_descriptorSets.resize(descriptorCount, (vk::VkDescriptorSet)0);
5658 m_bufferViews.resize(descriptorCount, (vk::VkBufferView)0);
5661 for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
5663 const vk::VkDescriptorSetLayout layout = *m_resources.descriptorSetLayout;
5664 const vk::VkDescriptorSetAllocateInfo allocateInfo =
5666 vk::VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
5674 m_descriptorSets[descriptorSetNdx] = vk::allocateDescriptorSet(vkd, device, &allocateInfo).disown();
5677 const vk::VkBufferViewCreateInfo createInfo =
5679 vk::VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO,
5683 context.getBuffer(),
5684 vk::VK_FORMAT_R32_UINT,
5685 descriptorSetNdx * m_maxStorageTexelCount * 4,
5686 (deUint32)de::min<vk::VkDeviceSize>(m_maxStorageTexelCount * 4, m_bufferSize - descriptorSetNdx * m_maxStorageTexelCount * 4)
5689 VK_CHECK(vkd.createBufferView(device, &createInfo, DE_NULL, &m_bufferViews[descriptorSetNdx]));
5693 const vk::VkWriteDescriptorSet write =
5695 vk::VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
5697 m_descriptorSets[descriptorSetNdx],
5701 vk::VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER,
5704 &m_bufferViews[descriptorSetNdx]
5707 vkd.updateDescriptorSets(device, 1u, &write, 0u, DE_NULL);
5712 void RenderVertexStorageTexelBuffer::submit (SubmitContext& context)
5714 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
5715 const vk::VkCommandBuffer commandBuffer = context.getCommandBuffer();
5717 vkd.cmdBindPipeline(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipeline);
5719 for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
5721 const deUint32 count = (deUint32)(m_bufferSize < (descriptorSetNdx + 1) * m_maxStorageTexelCount * 4
5722 ? m_bufferSize - descriptorSetNdx * m_maxStorageTexelCount * 4
5723 : m_maxStorageTexelCount * 4) / 2;
5725 vkd.cmdBindDescriptorSets(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipelineLayout, 0u, 1u, &m_descriptorSets[descriptorSetNdx], 0u, DE_NULL);
5726 vkd.cmdDraw(commandBuffer, count, 1, 0, 0);
5730 void RenderVertexStorageTexelBuffer::verify (VerifyRenderPassContext& context, size_t)
5732 for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
5734 const size_t offset = descriptorSetNdx * m_maxStorageTexelCount * 4;
5735 const deUint32 count = (deUint32)(m_bufferSize < (descriptorSetNdx + 1) * m_maxStorageTexelCount * 4
5736 ? m_bufferSize - descriptorSetNdx * m_maxStorageTexelCount * 4
5737 : m_maxStorageTexelCount * 4) / 2;
5739 DE_ASSERT(context.getReference().getSize() <= 4 * m_maxStorageTexelCount * m_descriptorSets.size());
5740 DE_ASSERT(context.getReference().getSize() > offset);
5741 DE_ASSERT(offset + count * 2 <= context.getReference().getSize());
5743 for (size_t pos = 0; pos < (size_t)count; pos++)
5745 const deUint8 x = context.getReference().get(offset + pos * 2);
5746 const deUint8 y = context.getReference().get(offset + (pos * 2) + 1);
5748 context.getReferenceTarget().getAccess().setPixel(Vec4(1.0f, 1.0f, 1.0f, 1.0f), x, y);
5753 class RenderVertexStorageImage : public RenderPassCommand
5756 RenderVertexStorageImage (void) {}
5757 ~RenderVertexStorageImage (void);
5759 const char* getName (void) const { return "RenderVertexStorageImage"; }
5760 void logPrepare (TestLog&, size_t) const;
5761 void logSubmit (TestLog&, size_t) const;
5762 void prepare (PrepareRenderPassContext&);
5763 void submit (SubmitContext& context);
5764 void verify (VerifyRenderPassContext&, size_t);
5767 PipelineResources m_resources;
5768 vk::Move<vk::VkDescriptorPool> m_descriptorPool;
5769 vk::Move<vk::VkDescriptorSet> m_descriptorSet;
5770 vk::Move<vk::VkImageView> m_imageView;
5773 RenderVertexStorageImage::~RenderVertexStorageImage (void)
5777 void RenderVertexStorageImage::logPrepare (TestLog& log, size_t commandIndex) const
5779 log << TestLog::Message << commandIndex << ":" << getName() << " Create pipeline for render storage image." << TestLog::EndMessage;
5782 void RenderVertexStorageImage::logSubmit (TestLog& log, size_t commandIndex) const
5784 log << TestLog::Message << commandIndex << ":" << getName() << " Render using storage image." << TestLog::EndMessage;
5787 void RenderVertexStorageImage::prepare (PrepareRenderPassContext& context)
5789 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
5790 const vk::VkDevice device = context.getContext().getDevice();
5791 const vk::VkRenderPass renderPass = context.getRenderPass();
5792 const deUint32 subpass = 0;
5793 const vk::Unique<vk::VkShaderModule> vertexShaderModule (vk::createShaderModule(vkd, device, context.getBinaryCollection().get("storage-image.vert"), 0));
5794 const vk::Unique<vk::VkShaderModule> fragmentShaderModule (vk::createShaderModule(vkd, device, context.getBinaryCollection().get("render-white.frag"), 0));
5795 vector<vk::VkDescriptorSetLayoutBinding> bindings;
5798 const vk::VkDescriptorSetLayoutBinding binding =
5801 vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE,
5803 vk::VK_SHADER_STAGE_VERTEX_BIT,
5807 bindings.push_back(binding);
5810 createPipelineWithResources(vkd, device, renderPass, subpass, *vertexShaderModule, *fragmentShaderModule, context.getTargetWidth(), context.getTargetHeight(),
5811 vector<vk::VkVertexInputBindingDescription>(), vector<vk::VkVertexInputAttributeDescription>(), bindings, vk::VK_PRIMITIVE_TOPOLOGY_POINT_LIST, 0u, DE_NULL, m_resources);
5814 const vk::VkDescriptorPoolSize poolSizes =
5816 vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE,
5819 const vk::VkDescriptorPoolCreateInfo createInfo =
5821 vk::VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
5823 vk::VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT,
5830 m_descriptorPool = vk::createDescriptorPool(vkd, device, &createInfo);
5834 const vk::VkDescriptorSetLayout layout = *m_resources.descriptorSetLayout;
5835 const vk::VkDescriptorSetAllocateInfo allocateInfo =
5837 vk::VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
5845 m_descriptorSet = vk::allocateDescriptorSet(vkd, device, &allocateInfo);
5848 const vk::VkImageViewCreateInfo createInfo =
5850 vk::VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
5855 vk::VK_IMAGE_VIEW_TYPE_2D,
5856 vk::VK_FORMAT_R8G8B8A8_UNORM,
5857 vk::makeComponentMappingRGBA(),
5859 vk::VK_IMAGE_ASPECT_COLOR_BIT,
5867 m_imageView = vk::createImageView(vkd, device, &createInfo);
5871 const vk::VkDescriptorImageInfo imageInfo =
5875 vk::VK_IMAGE_LAYOUT_GENERAL
5877 const vk::VkWriteDescriptorSet write =
5879 vk::VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
5885 vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE,
5891 vkd.updateDescriptorSets(device, 1u, &write, 0u, DE_NULL);
5896 void RenderVertexStorageImage::submit (SubmitContext& context)
5898 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
5899 const vk::VkCommandBuffer commandBuffer = context.getCommandBuffer();
5901 vkd.cmdBindPipeline(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipeline);
5903 vkd.cmdBindDescriptorSets(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipelineLayout, 0u, 1u, &(*m_descriptorSet), 0u, DE_NULL);
5904 vkd.cmdDraw(commandBuffer, context.getImageWidth() * context.getImageHeight() * 2, 1, 0, 0);
5907 void RenderVertexStorageImage::verify (VerifyRenderPassContext& context, size_t)
5909 for (int pos = 0; pos < (int)(context.getReferenceImage().getWidth() * context.getReferenceImage().getHeight() * 2); pos++)
5911 const tcu::IVec3 size = context.getReferenceImage().getAccess().getSize();
5912 const tcu::UVec4 pixel = context.getReferenceImage().getAccess().getPixelUint((pos / 2) / size.x(), (pos / 2) % size.x());
5915 context.getReferenceTarget().getAccess().setPixel(Vec4(1.0f, 1.0f, 1.0f, 1.0f), pixel.x(), pixel.y());
5917 context.getReferenceTarget().getAccess().setPixel(Vec4(1.0f, 1.0f, 1.0f, 1.0f), pixel.z(), pixel.w());
5921 class RenderVertexSampledImage : public RenderPassCommand
5924 RenderVertexSampledImage (void) {}
5925 ~RenderVertexSampledImage (void);
5927 const char* getName (void) const { return "RenderVertexSampledImage"; }
5928 void logPrepare (TestLog&, size_t) const;
5929 void logSubmit (TestLog&, size_t) const;
5930 void prepare (PrepareRenderPassContext&);
5931 void submit (SubmitContext& context);
5932 void verify (VerifyRenderPassContext&, size_t);
5935 PipelineResources m_resources;
5936 vk::Move<vk::VkDescriptorPool> m_descriptorPool;
5937 vk::Move<vk::VkDescriptorSet> m_descriptorSet;
5938 vk::Move<vk::VkImageView> m_imageView;
5939 vk::Move<vk::VkSampler> m_sampler;
5942 RenderVertexSampledImage::~RenderVertexSampledImage (void)
5946 void RenderVertexSampledImage::logPrepare (TestLog& log, size_t commandIndex) const
5948 log << TestLog::Message << commandIndex << ":" << getName() << " Create pipeline for render sampled image." << TestLog::EndMessage;
5951 void RenderVertexSampledImage::logSubmit (TestLog& log, size_t commandIndex) const
5953 log << TestLog::Message << commandIndex << ":" << getName() << " Render using sampled image." << TestLog::EndMessage;
5956 void RenderVertexSampledImage::prepare (PrepareRenderPassContext& context)
5958 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
5959 const vk::VkDevice device = context.getContext().getDevice();
5960 const vk::VkRenderPass renderPass = context.getRenderPass();
5961 const deUint32 subpass = 0;
5962 const vk::Unique<vk::VkShaderModule> vertexShaderModule (vk::createShaderModule(vkd, device, context.getBinaryCollection().get("sampled-image.vert"), 0));
5963 const vk::Unique<vk::VkShaderModule> fragmentShaderModule (vk::createShaderModule(vkd, device, context.getBinaryCollection().get("render-white.frag"), 0));
5964 vector<vk::VkDescriptorSetLayoutBinding> bindings;
5967 const vk::VkDescriptorSetLayoutBinding binding =
5970 vk::VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
5972 vk::VK_SHADER_STAGE_VERTEX_BIT,
5976 bindings.push_back(binding);
5979 createPipelineWithResources(vkd, device, renderPass, subpass, *vertexShaderModule, *fragmentShaderModule, context.getTargetWidth(), context.getTargetHeight(),
5980 vector<vk::VkVertexInputBindingDescription>(), vector<vk::VkVertexInputAttributeDescription>(), bindings, vk::VK_PRIMITIVE_TOPOLOGY_POINT_LIST, 0u, DE_NULL, m_resources);
5983 const vk::VkDescriptorPoolSize poolSizes =
5985 vk::VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
5988 const vk::VkDescriptorPoolCreateInfo createInfo =
5990 vk::VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
5992 vk::VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT,
5999 m_descriptorPool = vk::createDescriptorPool(vkd, device, &createInfo);
6003 const vk::VkDescriptorSetLayout layout = *m_resources.descriptorSetLayout;
6004 const vk::VkDescriptorSetAllocateInfo allocateInfo =
6006 vk::VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
6014 m_descriptorSet = vk::allocateDescriptorSet(vkd, device, &allocateInfo);
6017 const vk::VkImageViewCreateInfo createInfo =
6019 vk::VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
6024 vk::VK_IMAGE_VIEW_TYPE_2D,
6025 vk::VK_FORMAT_R8G8B8A8_UNORM,
6026 vk::makeComponentMappingRGBA(),
6028 vk::VK_IMAGE_ASPECT_COLOR_BIT,
6036 m_imageView = vk::createImageView(vkd, device, &createInfo);
6040 const vk::VkSamplerCreateInfo createInfo =
6042 vk::VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO,
6046 vk::VK_FILTER_NEAREST,
6047 vk::VK_FILTER_NEAREST,
6049 vk::VK_SAMPLER_MIPMAP_MODE_LINEAR,
6050 vk::VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE,
6051 vk::VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE,
6052 vk::VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE,
6057 vk::VK_COMPARE_OP_ALWAYS,
6060 vk::VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK,
6064 m_sampler = vk::createSampler(vkd, device, &createInfo);
6068 const vk::VkDescriptorImageInfo imageInfo =
6072 vk::VK_IMAGE_LAYOUT_GENERAL
6074 const vk::VkWriteDescriptorSet write =
6076 vk::VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
6082 vk::VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
6088 vkd.updateDescriptorSets(device, 1u, &write, 0u, DE_NULL);
6093 void RenderVertexSampledImage::submit (SubmitContext& context)
6095 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
6096 const vk::VkCommandBuffer commandBuffer = context.getCommandBuffer();
6098 vkd.cmdBindPipeline(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipeline);
6100 vkd.cmdBindDescriptorSets(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipelineLayout, 0u, 1u, &(*m_descriptorSet), 0u, DE_NULL);
6101 vkd.cmdDraw(commandBuffer, context.getImageWidth() * context.getImageHeight() * 2, 1, 0, 0);
6104 void RenderVertexSampledImage::verify (VerifyRenderPassContext& context, size_t)
6106 for (int pos = 0; pos < (int)(context.getReferenceImage().getWidth() * context.getReferenceImage().getHeight() * 2); pos++)
6108 const tcu::IVec3 size = context.getReferenceImage().getAccess().getSize();
6109 const tcu::UVec4 pixel = context.getReferenceImage().getAccess().getPixelUint((pos / 2) / size.x(), (pos / 2) % size.x());
6112 context.getReferenceTarget().getAccess().setPixel(Vec4(1.0f, 1.0f, 1.0f, 1.0f), pixel.x(), pixel.y());
6114 context.getReferenceTarget().getAccess().setPixel(Vec4(1.0f, 1.0f, 1.0f, 1.0f), pixel.z(), pixel.w());
6118 class RenderFragmentUniformBuffer : public RenderPassCommand
6121 RenderFragmentUniformBuffer (void) {}
6122 ~RenderFragmentUniformBuffer (void);
6124 const char* getName (void) const { return "RenderFragmentUniformBuffer"; }
6125 void logPrepare (TestLog&, size_t) const;
6126 void logSubmit (TestLog&, size_t) const;
6127 void prepare (PrepareRenderPassContext&);
6128 void submit (SubmitContext& context);
6129 void verify (VerifyRenderPassContext&, size_t);
6132 PipelineResources m_resources;
6133 vk::Move<vk::VkDescriptorPool> m_descriptorPool;
6134 vector<vk::VkDescriptorSet> m_descriptorSets;
6136 vk::VkDeviceSize m_bufferSize;
6137 size_t m_targetWidth;
6138 size_t m_targetHeight;
6141 RenderFragmentUniformBuffer::~RenderFragmentUniformBuffer (void)
6145 void RenderFragmentUniformBuffer::logPrepare (TestLog& log, size_t commandIndex) const
6147 log << TestLog::Message << commandIndex << ":" << getName() << " Create pipeline for render buffer as uniform buffer." << TestLog::EndMessage;
6150 void RenderFragmentUniformBuffer::logSubmit (TestLog& log, size_t commandIndex) const
6152 log << TestLog::Message << commandIndex << ":" << getName() << " Render using buffer as uniform buffer." << TestLog::EndMessage;
6155 void RenderFragmentUniformBuffer::prepare (PrepareRenderPassContext& context)
6157 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
6158 const vk::VkDevice device = context.getContext().getDevice();
6159 const vk::VkRenderPass renderPass = context.getRenderPass();
6160 const deUint32 subpass = 0;
6161 const vk::Unique<vk::VkShaderModule> vertexShaderModule (vk::createShaderModule(vkd, device, context.getBinaryCollection().get("render-quad.vert"), 0));
6162 const vk::Unique<vk::VkShaderModule> fragmentShaderModule (vk::createShaderModule(vkd, device, context.getBinaryCollection().get("uniform-buffer.frag"), 0));
6163 vector<vk::VkDescriptorSetLayoutBinding> bindings;
6165 m_bufferSize = context.getBufferSize();
6166 m_targetWidth = context.getTargetWidth();
6167 m_targetHeight = context.getTargetHeight();
6170 const vk::VkDescriptorSetLayoutBinding binding =
6173 vk::VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,
6175 vk::VK_SHADER_STAGE_FRAGMENT_BIT,
6179 bindings.push_back(binding);
6181 const vk::VkPushConstantRange pushConstantRange =
6183 vk::VK_SHADER_STAGE_FRAGMENT_BIT,
6188 createPipelineWithResources(vkd, device, renderPass, subpass, *vertexShaderModule, *fragmentShaderModule, context.getTargetWidth(), context.getTargetHeight(),
6189 vector<vk::VkVertexInputBindingDescription>(), vector<vk::VkVertexInputAttributeDescription>(), bindings, vk::VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST, 1u, &pushConstantRange, m_resources);
6192 const deUint32 descriptorCount = (deUint32)(divRoundUp(m_bufferSize, (vk::VkDeviceSize)MAX_UNIFORM_BUFFER_SIZE));
6193 const vk::VkDescriptorPoolSize poolSizes =
6195 vk::VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,
6198 const vk::VkDescriptorPoolCreateInfo createInfo =
6200 vk::VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
6202 vk::VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT,
6209 m_descriptorPool = vk::createDescriptorPool(vkd, device, &createInfo);
6210 m_descriptorSets.resize(descriptorCount);
6213 for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
6215 const vk::VkDescriptorSetLayout layout = *m_resources.descriptorSetLayout;
6216 const vk::VkDescriptorSetAllocateInfo allocateInfo =
6218 vk::VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
6226 m_descriptorSets[descriptorSetNdx] = vk::allocateDescriptorSet(vkd, device, &allocateInfo).disown();
6229 const vk::VkDescriptorBufferInfo bufferInfo =
6231 context.getBuffer(),
6232 (vk::VkDeviceSize)(descriptorSetNdx * (size_t)MAX_UNIFORM_BUFFER_SIZE),
6233 m_bufferSize < (descriptorSetNdx + 1) * (vk::VkDeviceSize)MAX_UNIFORM_BUFFER_SIZE
6234 ? m_bufferSize - descriptorSetNdx * (vk::VkDeviceSize)MAX_UNIFORM_BUFFER_SIZE
6235 : (vk::VkDeviceSize)MAX_UNIFORM_BUFFER_SIZE
6237 const vk::VkWriteDescriptorSet write =
6239 vk::VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
6241 m_descriptorSets[descriptorSetNdx],
6245 vk::VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,
6251 vkd.updateDescriptorSets(device, 1u, &write, 0u, DE_NULL);
6256 void RenderFragmentUniformBuffer::submit (SubmitContext& context)
6258 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
6259 const vk::VkCommandBuffer commandBuffer = context.getCommandBuffer();
6261 vkd.cmdBindPipeline(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipeline);
6263 for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
6267 const deUint32 callId;
6268 const deUint32 valuesPerPixel;
6271 (deUint32)descriptorSetNdx,
6272 (deUint32)divRoundUp<size_t>(m_descriptorSets.size() * (MAX_UNIFORM_BUFFER_SIZE / 4), m_targetWidth * m_targetHeight)
6275 vkd.cmdBindDescriptorSets(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipelineLayout, 0u, 1u, &m_descriptorSets[descriptorSetNdx], 0u, DE_NULL);
6276 vkd.cmdPushConstants(commandBuffer, *m_resources.pipelineLayout, vk::VK_SHADER_STAGE_FRAGMENT_BIT, 0u, (deUint32)sizeof(callParams), &callParams);
6277 vkd.cmdDraw(commandBuffer, 6, 1, 0, 0);
6281 void RenderFragmentUniformBuffer::verify (VerifyRenderPassContext& context, size_t)
6283 const deUint32 valuesPerPixel = (deUint32)divRoundUp<size_t>(m_descriptorSets.size() * (MAX_UNIFORM_BUFFER_SIZE / 4), m_targetWidth * m_targetHeight);
6284 const size_t arraySize = MAX_UNIFORM_BUFFER_SIZE / (sizeof(deUint32) * 4);
6285 const size_t arrayIntSize = arraySize * 4;
6287 for (int y = 0; y < context.getReferenceTarget().getSize().y(); y++)
6288 for (int x = 0; x < context.getReferenceTarget().getSize().x(); x++)
6290 const size_t firstDescriptorSetNdx = de::min<size_t>((y * 256u + x) / (arrayIntSize / valuesPerPixel), m_descriptorSets.size() - 1);
6292 for (size_t descriptorSetNdx = firstDescriptorSetNdx; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
6294 const size_t offset = descriptorSetNdx * MAX_UNIFORM_BUFFER_SIZE;
6295 const deUint32 callId = (deUint32)descriptorSetNdx;
6297 const deUint32 id = callId * ((deUint32)arrayIntSize / valuesPerPixel) + (deUint32)y * 256u + (deUint32)x;
6299 if (y * 256u + x < callId * (arrayIntSize / valuesPerPixel))
6303 deUint32 value = id;
6305 for (deUint32 i = 0; i < valuesPerPixel; i++)
6307 value = ((deUint32)context.getReference().get(offset + (value % (MAX_UNIFORM_BUFFER_SIZE / sizeof(deUint32))) * 4 + 0))
6308 | (((deUint32)context.getReference().get(offset + (value % (MAX_UNIFORM_BUFFER_SIZE / sizeof(deUint32))) * 4 + 1)) << 8u)
6309 | (((deUint32)context.getReference().get(offset + (value % (MAX_UNIFORM_BUFFER_SIZE / sizeof(deUint32))) * 4 + 2)) << 16u)
6310 | (((deUint32)context.getReference().get(offset + (value % (MAX_UNIFORM_BUFFER_SIZE / sizeof(deUint32))) * 4 + 3)) << 24u);
6313 const UVec4 vec ((value >> 0u) & 0xFFu,
6314 (value >> 8u) & 0xFFu,
6315 (value >> 16u) & 0xFFu,
6316 (value >> 24u) & 0xFFu);
6318 context.getReferenceTarget().getAccess().setPixel(vec.asFloat() / Vec4(255.0f), x, y);
6324 class RenderFragmentStorageBuffer : public RenderPassCommand
6327 RenderFragmentStorageBuffer (void) {}
6328 ~RenderFragmentStorageBuffer (void);
6330 const char* getName (void) const { return "RenderFragmentStorageBuffer"; }
6331 void logPrepare (TestLog&, size_t) const;
6332 void logSubmit (TestLog&, size_t) const;
6333 void prepare (PrepareRenderPassContext&);
6334 void submit (SubmitContext& context);
6335 void verify (VerifyRenderPassContext&, size_t);
6338 PipelineResources m_resources;
6339 vk::Move<vk::VkDescriptorPool> m_descriptorPool;
6340 vk::Move<vk::VkDescriptorSet> m_descriptorSet;
6342 vk::VkDeviceSize m_bufferSize;
6343 size_t m_targetWidth;
6344 size_t m_targetHeight;
6347 RenderFragmentStorageBuffer::~RenderFragmentStorageBuffer (void)
6351 void RenderFragmentStorageBuffer::logPrepare (TestLog& log, size_t commandIndex) const
6353 log << TestLog::Message << commandIndex << ":" << getName() << " Create pipeline to render buffer as storage buffer." << TestLog::EndMessage;
6356 void RenderFragmentStorageBuffer::logSubmit (TestLog& log, size_t commandIndex) const
6358 log << TestLog::Message << commandIndex << ":" << getName() << " Render using buffer as storage buffer." << TestLog::EndMessage;
6361 void RenderFragmentStorageBuffer::prepare (PrepareRenderPassContext& context)
6363 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
6364 const vk::VkDevice device = context.getContext().getDevice();
6365 const vk::VkRenderPass renderPass = context.getRenderPass();
6366 const deUint32 subpass = 0;
6367 const vk::Unique<vk::VkShaderModule> vertexShaderModule (vk::createShaderModule(vkd, device, context.getBinaryCollection().get("render-quad.vert"), 0));
6368 const vk::Unique<vk::VkShaderModule> fragmentShaderModule (vk::createShaderModule(vkd, device, context.getBinaryCollection().get("storage-buffer.frag"), 0));
6369 vector<vk::VkDescriptorSetLayoutBinding> bindings;
6371 m_bufferSize = context.getBufferSize();
6372 m_targetWidth = context.getTargetWidth();
6373 m_targetHeight = context.getTargetHeight();
6376 const vk::VkDescriptorSetLayoutBinding binding =
6379 vk::VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
6381 vk::VK_SHADER_STAGE_FRAGMENT_BIT,
6385 bindings.push_back(binding);
6387 const vk::VkPushConstantRange pushConstantRange =
6389 vk::VK_SHADER_STAGE_FRAGMENT_BIT,
6394 createPipelineWithResources(vkd, device, renderPass, subpass, *vertexShaderModule, *fragmentShaderModule, context.getTargetWidth(), context.getTargetHeight(),
6395 vector<vk::VkVertexInputBindingDescription>(), vector<vk::VkVertexInputAttributeDescription>(), bindings, vk::VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST, 1u, &pushConstantRange, m_resources);
6398 const deUint32 descriptorCount = 1;
6399 const vk::VkDescriptorPoolSize poolSizes =
6401 vk::VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
6404 const vk::VkDescriptorPoolCreateInfo createInfo =
6406 vk::VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
6408 vk::VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT,
6415 m_descriptorPool = vk::createDescriptorPool(vkd, device, &createInfo);
6419 const vk::VkDescriptorSetLayout layout = *m_resources.descriptorSetLayout;
6420 const vk::VkDescriptorSetAllocateInfo allocateInfo =
6422 vk::VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
6430 m_descriptorSet = vk::allocateDescriptorSet(vkd, device, &allocateInfo);
6433 const vk::VkDescriptorBufferInfo bufferInfo =
6435 context.getBuffer(),
6439 const vk::VkWriteDescriptorSet write =
6441 vk::VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
6443 m_descriptorSet.get(),
6447 vk::VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
6453 vkd.updateDescriptorSets(device, 1u, &write, 0u, DE_NULL);
6458 void RenderFragmentStorageBuffer::submit (SubmitContext& context)
6460 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
6461 const vk::VkCommandBuffer commandBuffer = context.getCommandBuffer();
6463 vkd.cmdBindPipeline(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipeline);
6467 const deUint32 valuesPerPixel;
6468 const deUint32 bufferSize;
6471 (deUint32)divRoundUp<vk::VkDeviceSize>(m_bufferSize / 4, m_targetWidth * m_targetHeight),
6472 (deUint32)m_bufferSize
6475 vkd.cmdBindDescriptorSets(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipelineLayout, 0u, 1u, &m_descriptorSet.get(), 0u, DE_NULL);
6476 vkd.cmdPushConstants(commandBuffer, *m_resources.pipelineLayout, vk::VK_SHADER_STAGE_FRAGMENT_BIT, 0u, (deUint32)sizeof(callParams), &callParams);
6477 vkd.cmdDraw(commandBuffer, 6, 1, 0, 0);
6480 void RenderFragmentStorageBuffer::verify (VerifyRenderPassContext& context, size_t)
6482 const deUint32 valuesPerPixel = (deUint32)divRoundUp<vk::VkDeviceSize>(m_bufferSize / 4, m_targetWidth * m_targetHeight);
6484 for (int y = 0; y < context.getReferenceTarget().getSize().y(); y++)
6485 for (int x = 0; x < context.getReferenceTarget().getSize().x(); x++)
6487 const deUint32 id = (deUint32)y * 256u + (deUint32)x;
6489 deUint32 value = id;
6491 for (deUint32 i = 0; i < valuesPerPixel; i++)
6493 value = (((deUint32)context.getReference().get((size_t)(value % (m_bufferSize / sizeof(deUint32))) * 4 + 0)) << 0u)
6494 | (((deUint32)context.getReference().get((size_t)(value % (m_bufferSize / sizeof(deUint32))) * 4 + 1)) << 8u)
6495 | (((deUint32)context.getReference().get((size_t)(value % (m_bufferSize / sizeof(deUint32))) * 4 + 2)) << 16u)
6496 | (((deUint32)context.getReference().get((size_t)(value % (m_bufferSize / sizeof(deUint32))) * 4 + 3)) << 24u);
6499 const UVec4 vec ((value >> 0u) & 0xFFu,
6500 (value >> 8u) & 0xFFu,
6501 (value >> 16u) & 0xFFu,
6502 (value >> 24u) & 0xFFu);
6504 context.getReferenceTarget().getAccess().setPixel(vec.asFloat() / Vec4(255.0f), x, y);
6508 class RenderFragmentUniformTexelBuffer : public RenderPassCommand
6511 RenderFragmentUniformTexelBuffer (void) {}
6512 ~RenderFragmentUniformTexelBuffer (void);
6514 const char* getName (void) const { return "RenderFragmentUniformTexelBuffer"; }
6515 void logPrepare (TestLog&, size_t) const;
6516 void logSubmit (TestLog&, size_t) const;
6517 void prepare (PrepareRenderPassContext&);
6518 void submit (SubmitContext& context);
6519 void verify (VerifyRenderPassContext&, size_t);
6522 PipelineResources m_resources;
6523 vk::Move<vk::VkDescriptorPool> m_descriptorPool;
6524 vector<vk::VkDescriptorSet> m_descriptorSets;
6525 vector<vk::VkBufferView> m_bufferViews;
6527 const vk::DeviceInterface* m_vkd;
6528 vk::VkDevice m_device;
6529 vk::VkDeviceSize m_bufferSize;
6530 deUint32 m_maxUniformTexelCount;
6531 size_t m_targetWidth;
6532 size_t m_targetHeight;
6535 RenderFragmentUniformTexelBuffer::~RenderFragmentUniformTexelBuffer (void)
6537 for (size_t bufferViewNdx = 0; bufferViewNdx < m_bufferViews.size(); bufferViewNdx++)
6539 if (!!m_bufferViews[bufferViewNdx])
6541 m_vkd->destroyBufferView(m_device, m_bufferViews[bufferViewNdx], DE_NULL);
6542 m_bufferViews[bufferViewNdx] = (vk::VkBufferView)0;
6547 void RenderFragmentUniformTexelBuffer::logPrepare (TestLog& log, size_t commandIndex) const
6549 log << TestLog::Message << commandIndex << ":" << getName() << " Create pipeline for render buffer as uniform buffer." << TestLog::EndMessage;
6552 void RenderFragmentUniformTexelBuffer::logSubmit (TestLog& log, size_t commandIndex) const
6554 log << TestLog::Message << commandIndex << ":" << getName() << " Render using buffer as uniform buffer." << TestLog::EndMessage;
6557 void RenderFragmentUniformTexelBuffer::prepare (PrepareRenderPassContext& context)
6559 const vk::InstanceInterface& vki = context.getContext().getInstanceInterface();
6560 const vk::VkPhysicalDevice physicalDevice = context.getContext().getPhysicalDevice();
6561 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
6562 const vk::VkDevice device = context.getContext().getDevice();
6563 const vk::VkRenderPass renderPass = context.getRenderPass();
6564 const deUint32 subpass = 0;
6565 const vk::Unique<vk::VkShaderModule> vertexShaderModule (vk::createShaderModule(vkd, device, context.getBinaryCollection().get("render-quad.vert"), 0));
6566 const vk::Unique<vk::VkShaderModule> fragmentShaderModule (vk::createShaderModule(vkd, device, context.getBinaryCollection().get("uniform-texel-buffer.frag"), 0));
6567 vector<vk::VkDescriptorSetLayoutBinding> bindings;
6571 m_bufferSize = context.getBufferSize();
6572 m_maxUniformTexelCount = vk::getPhysicalDeviceProperties(vki, physicalDevice).limits.maxTexelBufferElements;
6573 m_targetWidth = context.getTargetWidth();
6574 m_targetHeight = context.getTargetHeight();
6577 const vk::VkDescriptorSetLayoutBinding binding =
6580 vk::VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER,
6582 vk::VK_SHADER_STAGE_FRAGMENT_BIT,
6586 bindings.push_back(binding);
6588 const vk::VkPushConstantRange pushConstantRange =
6590 vk::VK_SHADER_STAGE_FRAGMENT_BIT,
6595 createPipelineWithResources(vkd, device, renderPass, subpass, *vertexShaderModule, *fragmentShaderModule, context.getTargetWidth(), context.getTargetHeight(),
6596 vector<vk::VkVertexInputBindingDescription>(), vector<vk::VkVertexInputAttributeDescription>(), bindings, vk::VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST, 1u, &pushConstantRange, m_resources);
6599 const deUint32 descriptorCount = (deUint32)(divRoundUp(m_bufferSize, (vk::VkDeviceSize)m_maxUniformTexelCount * 4));
6600 const vk::VkDescriptorPoolSize poolSizes =
6602 vk::VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER,
6605 const vk::VkDescriptorPoolCreateInfo createInfo =
6607 vk::VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
6609 vk::VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT,
6616 m_descriptorPool = vk::createDescriptorPool(vkd, device, &createInfo);
6617 m_descriptorSets.resize(descriptorCount, (vk::VkDescriptorSet)0);
6618 m_bufferViews.resize(descriptorCount, (vk::VkBufferView)0);
6621 for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
6623 const deUint32 count = (deUint32)(m_bufferSize < (descriptorSetNdx + 1) * m_maxUniformTexelCount * 4
6624 ? m_bufferSize - descriptorSetNdx * m_maxUniformTexelCount * 4
6625 : m_maxUniformTexelCount * 4) / 4;
6626 const vk::VkDescriptorSetLayout layout = *m_resources.descriptorSetLayout;
6627 const vk::VkDescriptorSetAllocateInfo allocateInfo =
6629 vk::VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
6637 m_descriptorSets[descriptorSetNdx] = vk::allocateDescriptorSet(vkd, device, &allocateInfo).disown();
6640 const vk::VkBufferViewCreateInfo createInfo =
6642 vk::VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO,
6646 context.getBuffer(),
6647 vk::VK_FORMAT_R32_UINT,
6648 descriptorSetNdx * m_maxUniformTexelCount * 4,
6652 VK_CHECK(vkd.createBufferView(device, &createInfo, DE_NULL, &m_bufferViews[descriptorSetNdx]));
6656 const vk::VkWriteDescriptorSet write =
6658 vk::VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
6660 m_descriptorSets[descriptorSetNdx],
6664 vk::VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER,
6667 &m_bufferViews[descriptorSetNdx]
6670 vkd.updateDescriptorSets(device, 1u, &write, 0u, DE_NULL);
6675 void RenderFragmentUniformTexelBuffer::submit (SubmitContext& context)
6677 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
6678 const vk::VkCommandBuffer commandBuffer = context.getCommandBuffer();
6680 vkd.cmdBindPipeline(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipeline);
6682 for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
6686 const deUint32 callId;
6687 const deUint32 valuesPerPixel;
6688 const deUint32 maxUniformTexelCount;
6691 (deUint32)descriptorSetNdx,
6692 (deUint32)divRoundUp<size_t>(m_descriptorSets.size() * de::min<size_t>((size_t)m_bufferSize / 4, m_maxUniformTexelCount), m_targetWidth * m_targetHeight),
6693 m_maxUniformTexelCount
6696 vkd.cmdBindDescriptorSets(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipelineLayout, 0u, 1u, &m_descriptorSets[descriptorSetNdx], 0u, DE_NULL);
6697 vkd.cmdPushConstants(commandBuffer, *m_resources.pipelineLayout, vk::VK_SHADER_STAGE_FRAGMENT_BIT, 0u, (deUint32)sizeof(callParams), &callParams);
6698 vkd.cmdDraw(commandBuffer, 6, 1, 0, 0);
6702 void RenderFragmentUniformTexelBuffer::verify (VerifyRenderPassContext& context, size_t)
6704 const deUint32 valuesPerPixel = (deUint32)divRoundUp<size_t>(m_descriptorSets.size() * de::min<size_t>((size_t)m_bufferSize / 4, m_maxUniformTexelCount), m_targetWidth * m_targetHeight);
6706 for (int y = 0; y < context.getReferenceTarget().getSize().y(); y++)
6707 for (int x = 0; x < context.getReferenceTarget().getSize().x(); x++)
6709 const size_t firstDescriptorSetNdx = de::min<size_t>((y * 256u + x) / (m_maxUniformTexelCount / valuesPerPixel), m_descriptorSets.size() - 1);
6711 for (size_t descriptorSetNdx = firstDescriptorSetNdx; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
6713 const size_t offset = descriptorSetNdx * m_maxUniformTexelCount * 4;
6714 const deUint32 callId = (deUint32)descriptorSetNdx;
6716 const deUint32 id = (deUint32)y * 256u + (deUint32)x;
6717 const deUint32 count = (deUint32)(m_bufferSize < (descriptorSetNdx + 1) * m_maxUniformTexelCount * 4
6718 ? m_bufferSize - descriptorSetNdx * m_maxUniformTexelCount * 4
6719 : m_maxUniformTexelCount * 4) / 4;
6721 if (y * 256u + x < callId * (m_maxUniformTexelCount / valuesPerPixel))
6725 deUint32 value = id;
6727 for (deUint32 i = 0; i < valuesPerPixel; i++)
6729 value = ((deUint32)context.getReference().get( offset + (value % count) * 4 + 0))
6730 | (((deUint32)context.getReference().get(offset + (value % count) * 4 + 1)) << 8u)
6731 | (((deUint32)context.getReference().get(offset + (value % count) * 4 + 2)) << 16u)
6732 | (((deUint32)context.getReference().get(offset + (value % count) * 4 + 3)) << 24u);
6735 const UVec4 vec ((value >> 0u) & 0xFFu,
6736 (value >> 8u) & 0xFFu,
6737 (value >> 16u) & 0xFFu,
6738 (value >> 24u) & 0xFFu);
6740 context.getReferenceTarget().getAccess().setPixel(vec.asFloat() / Vec4(255.0f), x, y);
6746 class RenderFragmentStorageTexelBuffer : public RenderPassCommand
6749 RenderFragmentStorageTexelBuffer (void) {}
6750 ~RenderFragmentStorageTexelBuffer (void);
6752 const char* getName (void) const { return "RenderFragmentStorageTexelBuffer"; }
6753 void logPrepare (TestLog&, size_t) const;
6754 void logSubmit (TestLog&, size_t) const;
6755 void prepare (PrepareRenderPassContext&);
6756 void submit (SubmitContext& context);
6757 void verify (VerifyRenderPassContext&, size_t);
6760 PipelineResources m_resources;
6761 vk::Move<vk::VkDescriptorPool> m_descriptorPool;
6762 vector<vk::VkDescriptorSet> m_descriptorSets;
6763 vector<vk::VkBufferView> m_bufferViews;
6765 const vk::DeviceInterface* m_vkd;
6766 vk::VkDevice m_device;
6767 vk::VkDeviceSize m_bufferSize;
6768 deUint32 m_maxStorageTexelCount;
6769 size_t m_targetWidth;
6770 size_t m_targetHeight;
6773 RenderFragmentStorageTexelBuffer::~RenderFragmentStorageTexelBuffer (void)
6775 for (size_t bufferViewNdx = 0; bufferViewNdx < m_bufferViews.size(); bufferViewNdx++)
6777 if (!!m_bufferViews[bufferViewNdx])
6779 m_vkd->destroyBufferView(m_device, m_bufferViews[bufferViewNdx], DE_NULL);
6780 m_bufferViews[bufferViewNdx] = (vk::VkBufferView)0;
6785 void RenderFragmentStorageTexelBuffer::logPrepare (TestLog& log, size_t commandIndex) const
6787 log << TestLog::Message << commandIndex << ":" << getName() << " Create pipeline for render buffer as storage buffer." << TestLog::EndMessage;
6790 void RenderFragmentStorageTexelBuffer::logSubmit (TestLog& log, size_t commandIndex) const
6792 log << TestLog::Message << commandIndex << ":" << getName() << " Render using buffer as storage buffer." << TestLog::EndMessage;
6795 void RenderFragmentStorageTexelBuffer::prepare (PrepareRenderPassContext& context)
6797 const vk::InstanceInterface& vki = context.getContext().getInstanceInterface();
6798 const vk::VkPhysicalDevice physicalDevice = context.getContext().getPhysicalDevice();
6799 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
6800 const vk::VkDevice device = context.getContext().getDevice();
6801 const vk::VkRenderPass renderPass = context.getRenderPass();
6802 const deUint32 subpass = 0;
6803 const vk::Unique<vk::VkShaderModule> vertexShaderModule (vk::createShaderModule(vkd, device, context.getBinaryCollection().get("render-quad.vert"), 0));
6804 const vk::Unique<vk::VkShaderModule> fragmentShaderModule (vk::createShaderModule(vkd, device, context.getBinaryCollection().get("storage-texel-buffer.frag"), 0));
6805 vector<vk::VkDescriptorSetLayoutBinding> bindings;
6809 m_bufferSize = context.getBufferSize();
6810 m_maxStorageTexelCount = vk::getPhysicalDeviceProperties(vki, physicalDevice).limits.maxTexelBufferElements;
6811 m_targetWidth = context.getTargetWidth();
6812 m_targetHeight = context.getTargetHeight();
6815 const vk::VkDescriptorSetLayoutBinding binding =
6818 vk::VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER,
6820 vk::VK_SHADER_STAGE_FRAGMENT_BIT,
6824 bindings.push_back(binding);
6826 const vk::VkPushConstantRange pushConstantRange =
6828 vk::VK_SHADER_STAGE_FRAGMENT_BIT,
6833 createPipelineWithResources(vkd, device, renderPass, subpass, *vertexShaderModule, *fragmentShaderModule, context.getTargetWidth(), context.getTargetHeight(),
6834 vector<vk::VkVertexInputBindingDescription>(), vector<vk::VkVertexInputAttributeDescription>(), bindings, vk::VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST, 1u, &pushConstantRange, m_resources);
6837 const deUint32 descriptorCount = (deUint32)(divRoundUp(m_bufferSize, (vk::VkDeviceSize)m_maxStorageTexelCount * 4));
6838 const vk::VkDescriptorPoolSize poolSizes =
6840 vk::VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER,
6843 const vk::VkDescriptorPoolCreateInfo createInfo =
6845 vk::VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
6847 vk::VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT,
6854 m_descriptorPool = vk::createDescriptorPool(vkd, device, &createInfo);
6855 m_descriptorSets.resize(descriptorCount, (vk::VkDescriptorSet)0);
6856 m_bufferViews.resize(descriptorCount, (vk::VkBufferView)0);
6859 for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
6861 const deUint32 count = (deUint32)(m_bufferSize < (descriptorSetNdx + 1) * m_maxStorageTexelCount * 4
6862 ? m_bufferSize - descriptorSetNdx * m_maxStorageTexelCount * 4
6863 : m_maxStorageTexelCount * 4) / 4;
6864 const vk::VkDescriptorSetLayout layout = *m_resources.descriptorSetLayout;
6865 const vk::VkDescriptorSetAllocateInfo allocateInfo =
6867 vk::VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
6875 m_descriptorSets[descriptorSetNdx] = vk::allocateDescriptorSet(vkd, device, &allocateInfo).disown();
6878 const vk::VkBufferViewCreateInfo createInfo =
6880 vk::VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO,
6884 context.getBuffer(),
6885 vk::VK_FORMAT_R32_UINT,
6886 descriptorSetNdx * m_maxStorageTexelCount * 4,
6890 VK_CHECK(vkd.createBufferView(device, &createInfo, DE_NULL, &m_bufferViews[descriptorSetNdx]));
6894 const vk::VkWriteDescriptorSet write =
6896 vk::VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
6898 m_descriptorSets[descriptorSetNdx],
6902 vk::VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER,
6905 &m_bufferViews[descriptorSetNdx]
6908 vkd.updateDescriptorSets(device, 1u, &write, 0u, DE_NULL);
6913 void RenderFragmentStorageTexelBuffer::submit (SubmitContext& context)
6915 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
6916 const vk::VkCommandBuffer commandBuffer = context.getCommandBuffer();
6918 vkd.cmdBindPipeline(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipeline);
6920 for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
6924 const deUint32 callId;
6925 const deUint32 valuesPerPixel;
6926 const deUint32 maxStorageTexelCount;
6927 const deUint32 width;
6930 (deUint32)descriptorSetNdx,
6931 (deUint32)divRoundUp<size_t>(m_descriptorSets.size() * de::min<size_t>(m_maxStorageTexelCount, (size_t)m_bufferSize / 4), m_targetWidth * m_targetHeight),
6932 m_maxStorageTexelCount,
6933 (deUint32)(m_bufferSize < (descriptorSetNdx + 1u) * m_maxStorageTexelCount * 4u
6934 ? m_bufferSize - descriptorSetNdx * m_maxStorageTexelCount * 4u
6935 : m_maxStorageTexelCount * 4u) / 4u
6938 vkd.cmdBindDescriptorSets(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipelineLayout, 0u, 1u, &m_descriptorSets[descriptorSetNdx], 0u, DE_NULL);
6939 vkd.cmdPushConstants(commandBuffer, *m_resources.pipelineLayout, vk::VK_SHADER_STAGE_FRAGMENT_BIT, 0u, (deUint32)sizeof(callParams), &callParams);
6940 vkd.cmdDraw(commandBuffer, 6, 1, 0, 0);
6944 void RenderFragmentStorageTexelBuffer::verify (VerifyRenderPassContext& context, size_t)
6946 const deUint32 valuesPerPixel = (deUint32)divRoundUp<size_t>(m_descriptorSets.size() * de::min<size_t>(m_maxStorageTexelCount, (size_t)m_bufferSize / 4), m_targetWidth * m_targetHeight);
6948 for (int y = 0; y < context.getReferenceTarget().getSize().y(); y++)
6949 for (int x = 0; x < context.getReferenceTarget().getSize().x(); x++)
6951 const size_t firstDescriptorSetNdx = de::min<size_t>((y * 256u + x) / (m_maxStorageTexelCount / valuesPerPixel), m_descriptorSets.size() - 1);
6953 for (size_t descriptorSetNdx = firstDescriptorSetNdx; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
6955 const size_t offset = descriptorSetNdx * m_maxStorageTexelCount * 4;
6956 const deUint32 callId = (deUint32)descriptorSetNdx;
6958 const deUint32 id = (deUint32)y * 256u + (deUint32)x;
6959 const deUint32 count = (deUint32)(m_bufferSize < (descriptorSetNdx + 1) * m_maxStorageTexelCount * 4
6960 ? m_bufferSize - descriptorSetNdx * m_maxStorageTexelCount * 4
6961 : m_maxStorageTexelCount * 4) / 4;
6963 if (y * 256u + x < callId * (m_maxStorageTexelCount / valuesPerPixel))
6967 deUint32 value = id;
6969 for (deUint32 i = 0; i < valuesPerPixel; i++)
6971 value = ((deUint32)context.getReference().get( offset + (value % count) * 4 + 0))
6972 | (((deUint32)context.getReference().get(offset + (value % count) * 4 + 1)) << 8u)
6973 | (((deUint32)context.getReference().get(offset + (value % count) * 4 + 2)) << 16u)
6974 | (((deUint32)context.getReference().get(offset + (value % count) * 4 + 3)) << 24u);
6977 const UVec4 vec ((value >> 0u) & 0xFFu,
6978 (value >> 8u) & 0xFFu,
6979 (value >> 16u) & 0xFFu,
6980 (value >> 24u) & 0xFFu);
6982 context.getReferenceTarget().getAccess().setPixel(vec.asFloat() / Vec4(255.0f), x, y);
7002 OP_BUFFER_BINDMEMORY,
7004 OP_QUEUE_WAIT_FOR_IDLE,
7005 OP_DEVICE_WAIT_FOR_IDLE,
7007 OP_COMMAND_BUFFER_BEGIN,
7008 OP_COMMAND_BUFFER_END,
7010 // Buffer transfer operations
7014 OP_BUFFER_COPY_TO_BUFFER,
7015 OP_BUFFER_COPY_FROM_BUFFER,
7017 OP_BUFFER_COPY_TO_IMAGE,
7018 OP_BUFFER_COPY_FROM_IMAGE,
7022 OP_IMAGE_BINDMEMORY,
7024 OP_IMAGE_TRANSITION_LAYOUT,
7026 OP_IMAGE_COPY_TO_BUFFER,
7027 OP_IMAGE_COPY_FROM_BUFFER,
7029 OP_IMAGE_COPY_TO_IMAGE,
7030 OP_IMAGE_COPY_FROM_IMAGE,
7032 OP_IMAGE_BLIT_TO_IMAGE,
7033 OP_IMAGE_BLIT_FROM_IMAGE,
7037 OP_PIPELINE_BARRIER_GLOBAL,
7038 OP_PIPELINE_BARRIER_BUFFER,
7039 OP_PIPELINE_BARRIER_IMAGE,
7041 // Renderpass operations
7042 OP_RENDERPASS_BEGIN,
7045 // Commands inside render pass
7046 OP_RENDER_VERTEX_BUFFER,
7047 OP_RENDER_INDEX_BUFFER,
7049 OP_RENDER_VERTEX_UNIFORM_BUFFER,
7050 OP_RENDER_FRAGMENT_UNIFORM_BUFFER,
7052 OP_RENDER_VERTEX_UNIFORM_TEXEL_BUFFER,
7053 OP_RENDER_FRAGMENT_UNIFORM_TEXEL_BUFFER,
7055 OP_RENDER_VERTEX_STORAGE_BUFFER,
7056 OP_RENDER_FRAGMENT_STORAGE_BUFFER,
7058 OP_RENDER_VERTEX_STORAGE_TEXEL_BUFFER,
7059 OP_RENDER_FRAGMENT_STORAGE_TEXEL_BUFFER,
7061 OP_RENDER_VERTEX_STORAGE_IMAGE,
7062 OP_RENDER_VERTEX_SAMPLED_IMAGE,
7068 STAGE_COMMAND_BUFFER,
7073 vk::VkAccessFlags getWriteAccessFlags (void)
7075 return vk::VK_ACCESS_SHADER_WRITE_BIT
7076 | vk::VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT
7077 | vk::VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT
7078 | vk::VK_ACCESS_TRANSFER_WRITE_BIT
7079 | vk::VK_ACCESS_HOST_WRITE_BIT
7080 | vk::VK_ACCESS_MEMORY_WRITE_BIT;
7083 bool isWriteAccess (vk::VkAccessFlagBits access)
7085 return (getWriteAccessFlags() & access) != 0;
7091 CacheState (vk::VkPipelineStageFlags allowedStages, vk::VkAccessFlags allowedAccesses);
7093 bool isValid (vk::VkPipelineStageFlagBits stage,
7094 vk::VkAccessFlagBits access) const;
7096 void perform (vk::VkPipelineStageFlagBits stage,
7097 vk::VkAccessFlagBits access);
7099 void submitCommandBuffer (void);
7100 void waitForIdle (void);
7102 void getFullBarrier (vk::VkPipelineStageFlags& srcStages,
7103 vk::VkAccessFlags& srcAccesses,
7104 vk::VkPipelineStageFlags& dstStages,
7105 vk::VkAccessFlags& dstAccesses) const;
7107 void barrier (vk::VkPipelineStageFlags srcStages,
7108 vk::VkAccessFlags srcAccesses,
7109 vk::VkPipelineStageFlags dstStages,
7110 vk::VkAccessFlags dstAccesses);
7112 void imageLayoutBarrier (vk::VkPipelineStageFlags srcStages,
7113 vk::VkAccessFlags srcAccesses,
7114 vk::VkPipelineStageFlags dstStages,
7115 vk::VkAccessFlags dstAccesses);
7117 void checkImageLayoutBarrier (vk::VkPipelineStageFlags srcStages,
7118 vk::VkAccessFlags srcAccesses,
7119 vk::VkPipelineStageFlags dstStages,
7120 vk::VkAccessFlags dstAccesses);
7122 // Everything is clean and there is no need for barriers
7123 bool isClean (void) const;
7125 vk::VkPipelineStageFlags getAllowedStages (void) const { return m_allowedStages; }
7126 vk::VkAccessFlags getAllowedAcceses (void) const { return m_allowedAccesses; }
7128 // Limit which stages and accesses are used by the CacheState tracker
7129 const vk::VkPipelineStageFlags m_allowedStages;
7130 const vk::VkAccessFlags m_allowedAccesses;
7132 // [dstStage][srcStage] = srcAccesses
7133 // In stage dstStage write srcAccesses from srcStage are not yet available
7134 vk::VkAccessFlags m_unavailableWriteOperations[PIPELINESTAGE_LAST][PIPELINESTAGE_LAST];
7135 // Latest pipeline transition is not available in stage
7136 bool m_unavailableLayoutTransition[PIPELINESTAGE_LAST];
7137 // [dstStage] = dstAccesses
7138 // In stage dstStage ops with dstAccesses are not yet visible
7139 vk::VkAccessFlags m_invisibleOperations[PIPELINESTAGE_LAST];
7141 // [dstStage] = srcStage
7142 // Memory operation in srcStage have not completed before dstStage
7143 vk::VkPipelineStageFlags m_incompleteOperations[PIPELINESTAGE_LAST];
7146 CacheState::CacheState (vk::VkPipelineStageFlags allowedStages, vk::VkAccessFlags allowedAccesses)
7147 : m_allowedStages (allowedStages)
7148 , m_allowedAccesses (allowedAccesses)
7150 for (vk::VkPipelineStageFlags dstStage_ = 1; dstStage_ <= m_allowedStages; dstStage_ <<= 1)
7152 const PipelineStage dstStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)dstStage_);
7154 if ((dstStage_ & m_allowedStages) == 0)
7157 // All operations are initially visible
7158 m_invisibleOperations[dstStage] = 0;
7160 // There are no incomplete read operations initially
7161 m_incompleteOperations[dstStage] = 0;
7163 // There are no incomplete layout transitions
7164 m_unavailableLayoutTransition[dstStage] = false;
7166 for (vk::VkPipelineStageFlags srcStage_ = 1; srcStage_ <= m_allowedStages; srcStage_ <<= 1)
7168 const PipelineStage srcStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)srcStage_);
7170 if ((srcStage_ & m_allowedStages) == 0)
7173 // There are no write operations that are not yet available
7175 m_unavailableWriteOperations[dstStage][srcStage] = 0;
7180 bool CacheState::isValid (vk::VkPipelineStageFlagBits stage,
7181 vk::VkAccessFlagBits access) const
7183 DE_ASSERT((access & (~m_allowedAccesses)) == 0);
7184 DE_ASSERT((stage & (~m_allowedStages)) == 0);
7186 const PipelineStage dstStage = pipelineStageFlagToPipelineStage(stage);
7188 // Previous operations are not visible to access on stage
7189 if (m_unavailableLayoutTransition[dstStage] || (m_invisibleOperations[dstStage] & access) != 0)
7192 if (isWriteAccess(access))
7194 // Memory operations from other stages have not completed before
7196 if (m_incompleteOperations[dstStage] != 0)
7203 void CacheState::perform (vk::VkPipelineStageFlagBits stage,
7204 vk::VkAccessFlagBits access)
7206 DE_ASSERT((access & (~m_allowedAccesses)) == 0);
7207 DE_ASSERT((stage & (~m_allowedStages)) == 0);
7209 const PipelineStage srcStage = pipelineStageFlagToPipelineStage(stage);
7211 for (vk::VkPipelineStageFlags dstStage_ = 1; dstStage_ <= m_allowedStages; dstStage_ <<= 1)
7213 const PipelineStage dstStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)dstStage_);
7215 if ((dstStage_ & m_allowedStages) == 0)
7218 // Mark stage as incomplete for all stages
7219 m_incompleteOperations[dstStage] |= stage;
7221 if (isWriteAccess(access))
7223 // Mark all accesses from all stages invisible
7224 m_invisibleOperations[dstStage] |= m_allowedAccesses;
7226 // Mark write access from srcStage unavailable to all stages
7227 m_unavailableWriteOperations[dstStage][srcStage] |= access;
7232 void CacheState::submitCommandBuffer (void)
7234 // Flush all host writes and reads
7235 barrier(m_allowedStages & vk::VK_PIPELINE_STAGE_HOST_BIT,
7236 m_allowedAccesses & (vk::VK_ACCESS_HOST_READ_BIT | vk::VK_ACCESS_HOST_WRITE_BIT),
7241 void CacheState::waitForIdle (void)
7243 // Make all writes available
7244 barrier(m_allowedStages,
7245 m_allowedAccesses & getWriteAccessFlags(),
7249 // Make all writes visible on device side
7250 barrier(m_allowedStages,
7252 m_allowedStages & (~vk::VK_PIPELINE_STAGE_HOST_BIT),
7256 void CacheState::getFullBarrier (vk::VkPipelineStageFlags& srcStages,
7257 vk::VkAccessFlags& srcAccesses,
7258 vk::VkPipelineStageFlags& dstStages,
7259 vk::VkAccessFlags& dstAccesses) const
7266 for (vk::VkPipelineStageFlags dstStage_ = 1; dstStage_ <= m_allowedStages; dstStage_ <<= 1)
7268 const PipelineStage dstStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)dstStage_);
7270 if ((dstStage_ & m_allowedStages) == 0)
7273 // Make sure all previous operation are complete in all stages
7274 if (m_incompleteOperations[dstStage])
7276 dstStages |= dstStage_;
7277 srcStages |= m_incompleteOperations[dstStage];
7280 // Make sure all read operations are visible in dstStage
7281 if (m_invisibleOperations[dstStage])
7283 dstStages |= dstStage_;
7284 dstAccesses |= m_invisibleOperations[dstStage];
7287 // Make sure all write operations fro mall stages are available
7288 for (vk::VkPipelineStageFlags srcStage_ = 1; srcStage_ <= m_allowedStages; srcStage_ <<= 1)
7290 const PipelineStage srcStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)srcStage_);
7292 if ((srcStage_ & m_allowedStages) == 0)
7295 if (m_unavailableWriteOperations[dstStage][srcStage])
7297 dstStages |= dstStage_;
7298 srcStages |= dstStage_;
7299 srcAccesses |= m_unavailableWriteOperations[dstStage][srcStage];
7302 if (m_unavailableLayoutTransition[dstStage] && !m_unavailableLayoutTransition[srcStage])
7304 // Add dependency between srcStage and dstStage if layout transition has not completed in dstStage,
7305 // but has completed in srcStage.
7306 dstStages |= dstStage_;
7307 srcStages |= dstStage_;
7312 DE_ASSERT((srcStages & (~m_allowedStages)) == 0);
7313 DE_ASSERT((srcAccesses & (~m_allowedAccesses)) == 0);
7314 DE_ASSERT((dstStages & (~m_allowedStages)) == 0);
7315 DE_ASSERT((dstAccesses & (~m_allowedAccesses)) == 0);
7318 void CacheState::checkImageLayoutBarrier (vk::VkPipelineStageFlags srcStages,
7319 vk::VkAccessFlags srcAccesses,
7320 vk::VkPipelineStageFlags dstStages,
7321 vk::VkAccessFlags dstAccesses)
7323 DE_ASSERT((srcStages & (~m_allowedStages)) == 0);
7324 DE_ASSERT((srcAccesses & (~m_allowedAccesses)) == 0);
7325 DE_ASSERT((dstStages & (~m_allowedStages)) == 0);
7326 DE_ASSERT((dstAccesses & (~m_allowedAccesses)) == 0);
7328 DE_UNREF(srcStages);
7329 DE_UNREF(srcAccesses);
7331 DE_UNREF(dstStages);
7332 DE_UNREF(dstAccesses);
7334 #if defined(DE_DEBUG)
7335 // Check that all stages have completed before srcStages or are in srcStages.
7337 vk::VkPipelineStageFlags completedStages = srcStages;
7339 for (vk::VkPipelineStageFlags srcStage_ = 1; srcStage_ <= srcStages; srcStage_ <<= 1)
7341 const PipelineStage srcStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)srcStage_);
7343 if ((srcStage_ & srcStages) == 0)
7346 completedStages |= (~m_incompleteOperations[srcStage]);
7349 DE_ASSERT((completedStages & m_allowedStages) == m_allowedStages);
7352 // Check that any write is available at least in one stage. Since all stages are complete even single flush is enough.
7353 if ((getWriteAccessFlags() & m_allowedAccesses) != 0 && (srcAccesses & getWriteAccessFlags()) == 0)
7355 bool anyWriteAvailable = false;
7357 for (vk::VkPipelineStageFlags dstStage_ = 1; dstStage_ <= m_allowedStages; dstStage_ <<= 1)
7359 const PipelineStage dstStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)dstStage_);
7361 if ((dstStage_ & m_allowedStages) == 0)
7364 for (vk::VkPipelineStageFlags srcStage_ = 1; srcStage_ <= m_allowedStages; srcStage_ <<= 1)
7366 const PipelineStage srcStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)srcStage_);
7368 if ((srcStage_ & m_allowedStages) == 0)
7371 if (m_unavailableWriteOperations[dstStage][srcStage] != (getWriteAccessFlags() & m_allowedAccesses))
7373 anyWriteAvailable = true;
7379 DE_ASSERT(anyWriteAvailable);
7384 void CacheState::imageLayoutBarrier (vk::VkPipelineStageFlags srcStages,
7385 vk::VkAccessFlags srcAccesses,
7386 vk::VkPipelineStageFlags dstStages,
7387 vk::VkAccessFlags dstAccesses)
7389 checkImageLayoutBarrier(srcStages, srcAccesses, dstStages, dstAccesses);
7391 for (vk::VkPipelineStageFlags dstStage_ = 1; dstStage_ <= m_allowedStages; dstStage_ <<= 1)
7393 const PipelineStage dstStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)dstStage_);
7395 if ((dstStage_ & m_allowedStages) == 0)
7398 // All stages are incomplete after the barrier except each dstStage in it self.
7399 m_incompleteOperations[dstStage] = m_allowedStages & (~dstStage_);
7401 // All memory operations are invisible unless they are listed in dstAccess
7402 m_invisibleOperations[dstStage] = m_allowedAccesses & (~dstAccesses);
7404 // Layout transition is unavailable in stage unless it was listed in dstStages
7405 m_unavailableLayoutTransition[dstStage]= (dstStage_ & dstStages) == 0;
7407 for (vk::VkPipelineStageFlags srcStage_ = 1; srcStage_ <= m_allowedStages; srcStage_ <<= 1)
7409 const PipelineStage srcStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)srcStage_);
7411 if ((srcStage_ & m_allowedStages) == 0)
7414 // All write operations are available after layout transition
7415 m_unavailableWriteOperations[dstStage][srcStage] = 0;
7420 void CacheState::barrier (vk::VkPipelineStageFlags srcStages,
7421 vk::VkAccessFlags srcAccesses,
7422 vk::VkPipelineStageFlags dstStages,
7423 vk::VkAccessFlags dstAccesses)
7425 DE_ASSERT((srcStages & (~m_allowedStages)) == 0);
7426 DE_ASSERT((srcAccesses & (~m_allowedAccesses)) == 0);
7427 DE_ASSERT((dstStages & (~m_allowedStages)) == 0);
7428 DE_ASSERT((dstAccesses & (~m_allowedAccesses)) == 0);
7432 vk::VkPipelineStageFlags oldIncompleteOperations[PIPELINESTAGE_LAST];
7433 vk::VkAccessFlags oldUnavailableWriteOperations[PIPELINESTAGE_LAST][PIPELINESTAGE_LAST];
7434 bool oldUnavailableLayoutTransition[PIPELINESTAGE_LAST];
7436 deMemcpy(oldIncompleteOperations, m_incompleteOperations, sizeof(oldIncompleteOperations));
7437 deMemcpy(oldUnavailableWriteOperations, m_unavailableWriteOperations, sizeof(oldUnavailableWriteOperations));
7438 deMemcpy(oldUnavailableLayoutTransition, m_unavailableLayoutTransition, sizeof(oldUnavailableLayoutTransition));
7440 for (vk::VkPipelineStageFlags srcStage_ = 1; srcStage_ <= srcStages; srcStage_ <<= 1)
7442 const PipelineStage srcStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)srcStage_);
7444 if ((srcStage_ & srcStages) == 0)
7447 for (vk::VkPipelineStageFlags dstStage_ = 1; dstStage_ <= dstStages; dstStage_ <<= 1)
7449 const PipelineStage dstStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)dstStage_);
7451 if ((dstStage_ & dstStages) == 0)
7454 // Stages that have completed before srcStage have also completed before dstStage
7455 m_incompleteOperations[dstStage] &= oldIncompleteOperations[srcStage];
7457 // Image layout transition in srcStage are now available in dstStage
7458 m_unavailableLayoutTransition[dstStage] &= oldUnavailableLayoutTransition[srcStage];
7460 for (vk::VkPipelineStageFlags sharedStage_ = 1; sharedStage_ <= m_allowedStages; sharedStage_ <<= 1)
7462 const PipelineStage sharedStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)sharedStage_);
7464 if ((sharedStage_ & m_allowedStages) == 0)
7467 // Writes that are available in srcStage are also available in dstStage
7468 m_unavailableWriteOperations[dstStage][sharedStage] &= oldUnavailableWriteOperations[srcStage][sharedStage];
7475 for (vk::VkPipelineStageFlags dstStage_ = 1; dstStage_ <= dstStages; dstStage_ <<= 1)
7477 const PipelineStage dstStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)dstStage_);
7478 bool allWritesAvailable = true;
7480 if ((dstStage_ & dstStages) == 0)
7483 // Operations in srcStages have completed before any stage in dstStages
7484 m_incompleteOperations[dstStage] &= ~srcStages;
7486 for (vk::VkPipelineStageFlags srcStage_ = 1; srcStage_ <= m_allowedStages; srcStage_ <<= 1)
7488 const PipelineStage srcStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)srcStage_);
7490 if ((srcStage_ & m_allowedStages) == 0)
7493 // Make srcAccesses from srcStage available in dstStage
7494 if ((srcStage_ & srcStages) != 0)
7495 m_unavailableWriteOperations[dstStage][srcStage] &= ~srcAccesses;
7497 if (m_unavailableWriteOperations[dstStage][srcStage] != 0)
7498 allWritesAvailable = false;
7501 // If all writes are available in dstStage make dstAccesses also visible
7502 if (allWritesAvailable)
7503 m_invisibleOperations[dstStage] &= ~dstAccesses;
7507 bool CacheState::isClean (void) const
7509 for (vk::VkPipelineStageFlags dstStage_ = 1; dstStage_ <= m_allowedStages; dstStage_ <<= 1)
7511 const PipelineStage dstStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)dstStage_);
7513 if ((dstStage_ & m_allowedStages) == 0)
7516 // Some operations are not visible to some stages
7517 if (m_invisibleOperations[dstStage] != 0)
7520 // There are operation that have not completed yet
7521 if (m_incompleteOperations[dstStage] != 0)
7524 // Layout transition has not completed yet
7525 if (m_unavailableLayoutTransition[dstStage])
7528 for (vk::VkPipelineStageFlags srcStage_ = 1; srcStage_ <= m_allowedStages; srcStage_ <<= 1)
7530 const PipelineStage srcStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)srcStage_);
7532 if ((srcStage_ & m_allowedStages) == 0)
7535 // Some write operations are not available yet
7536 if (m_unavailableWriteOperations[dstStage][srcStage] != 0)
7544 bool layoutSupportedByUsage (Usage usage, vk::VkImageLayout layout)
7548 case vk::VK_IMAGE_LAYOUT_GENERAL:
7551 case vk::VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL:
7552 return (usage & USAGE_COLOR_ATTACHMENT) != 0;
7554 case vk::VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL:
7555 return (usage & USAGE_DEPTH_STENCIL_ATTACHMENT) != 0;
7557 case vk::VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL:
7558 return (usage & USAGE_DEPTH_STENCIL_ATTACHMENT) != 0;
7560 case vk::VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL:
7561 // \todo [2016-03-09 mika] Should include input attachment
7562 return (usage & USAGE_SAMPLED_IMAGE) != 0;
7564 case vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL:
7565 return (usage & USAGE_TRANSFER_SRC) != 0;
7567 case vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL:
7568 return (usage & USAGE_TRANSFER_DST) != 0;
7570 case vk::VK_IMAGE_LAYOUT_PREINITIALIZED:
7574 DE_FATAL("Unknown layout");
7579 size_t getNumberOfSupportedLayouts (Usage usage)
7581 const vk::VkImageLayout layouts[] =
7583 vk::VK_IMAGE_LAYOUT_GENERAL,
7584 vk::VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
7585 vk::VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL,
7586 vk::VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL,
7587 vk::VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
7588 vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
7589 vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
7591 size_t supportedLayoutCount = 0;
7593 for (size_t layoutNdx = 0; layoutNdx < DE_LENGTH_OF_ARRAY(layouts); layoutNdx++)
7595 const vk::VkImageLayout layout = layouts[layoutNdx];
7597 if (layoutSupportedByUsage(usage, layout))
7598 supportedLayoutCount++;
7601 return supportedLayoutCount;
7604 vk::VkImageLayout getRandomNextLayout (de::Random& rng,
7606 vk::VkImageLayout previousLayout)
7608 const vk::VkImageLayout layouts[] =
7610 vk::VK_IMAGE_LAYOUT_GENERAL,
7611 vk::VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
7612 vk::VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL,
7613 vk::VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL,
7614 vk::VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
7615 vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
7616 vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
7618 const size_t supportedLayoutCount = getNumberOfSupportedLayouts(usage);
7620 DE_ASSERT(supportedLayoutCount > 0);
7622 size_t nextLayoutNdx = ((size_t)rng.getUint64()) % (previousLayout == vk::VK_IMAGE_LAYOUT_UNDEFINED
7623 ? supportedLayoutCount
7624 : supportedLayoutCount - 1);
7626 for (size_t layoutNdx = 0; layoutNdx < DE_LENGTH_OF_ARRAY(layouts); layoutNdx++)
7628 const vk::VkImageLayout layout = layouts[layoutNdx];
7630 if (layoutSupportedByUsage(usage, layout) && layout != previousLayout)
7632 if (nextLayoutNdx == 0)
7639 DE_FATAL("Unreachable");
7640 return vk::VK_IMAGE_LAYOUT_UNDEFINED;
7645 State (Usage usage, deUint32 seed)
7646 : stage (STAGE_HOST)
7647 , cache (usageToStageFlags(usage), usageToAccessFlags(usage))
7650 , hostInvalidated (true)
7651 , hostFlushed (true)
7652 , memoryDefined (false)
7654 , hasBoundBufferMemory (false)
7656 , hasBoundImageMemory (false)
7657 , imageLayout (vk::VK_IMAGE_LAYOUT_UNDEFINED)
7658 , imageDefined (false)
7661 , commandBufferIsEmpty (true)
7662 , renderPassIsEmpty (true)
7671 bool hostInvalidated;
7676 bool hasBoundBufferMemory;
7679 bool hasBoundImageMemory;
7680 vk::VkImageLayout imageLayout;
7686 bool commandBufferIsEmpty;
7687 bool renderPassIsEmpty;
7690 void getAvailableOps (const State& state, bool supportsBuffers, bool supportsImages, Usage usage, vector<Op>& ops)
7692 if (state.stage == STAGE_HOST)
7694 if (usage & (USAGE_HOST_READ | USAGE_HOST_WRITE))
7696 // Host memory operations
7699 ops.push_back(OP_UNMAP);
7701 // Avoid flush and finish if they are not needed
7702 if (!state.hostFlushed)
7703 ops.push_back(OP_MAP_FLUSH);
7705 if (!state.hostInvalidated
7707 && ((usage & USAGE_HOST_READ) == 0
7708 || state.cache.isValid(vk::VK_PIPELINE_STAGE_HOST_BIT, vk::VK_ACCESS_HOST_READ_BIT))
7709 && ((usage & USAGE_HOST_WRITE) == 0
7710 || state.cache.isValid(vk::VK_PIPELINE_STAGE_HOST_BIT, vk::VK_ACCESS_HOST_WRITE_BIT)))
7712 ops.push_back(OP_MAP_INVALIDATE);
7715 if (usage & USAGE_HOST_READ
7716 && usage & USAGE_HOST_WRITE
7717 && state.memoryDefined
7718 && state.hostInvalidated
7720 && state.cache.isValid(vk::VK_PIPELINE_STAGE_HOST_BIT, vk::VK_ACCESS_HOST_WRITE_BIT)
7721 && state.cache.isValid(vk::VK_PIPELINE_STAGE_HOST_BIT, vk::VK_ACCESS_HOST_READ_BIT))
7723 ops.push_back(OP_MAP_MODIFY);
7726 if (usage & USAGE_HOST_READ
7727 && state.memoryDefined
7728 && state.hostInvalidated
7730 && state.cache.isValid(vk::VK_PIPELINE_STAGE_HOST_BIT, vk::VK_ACCESS_HOST_READ_BIT))
7732 ops.push_back(OP_MAP_READ);
7735 if (usage & USAGE_HOST_WRITE
7736 && state.hostInvalidated
7738 && state.cache.isValid(vk::VK_PIPELINE_STAGE_HOST_BIT, vk::VK_ACCESS_HOST_WRITE_BIT))
7740 ops.push_back(OP_MAP_WRITE);
7744 ops.push_back(OP_MAP);
7747 if (state.hasBoundBufferMemory && state.queueIdle)
7749 // \note Destroy only buffers after they have been bound
7750 ops.push_back(OP_BUFFER_DESTROY);
7754 if (state.hasBuffer)
7756 if (!state.hasBoundBufferMemory)
7757 ops.push_back(OP_BUFFER_BINDMEMORY);
7759 else if (!state.hasImage && supportsBuffers) // Avoid creating buffer if there is already image
7760 ops.push_back(OP_BUFFER_CREATE);
7763 if (state.hasBoundImageMemory && state.queueIdle)
7765 // \note Destroy only image after they have been bound
7766 ops.push_back(OP_IMAGE_DESTROY);
7772 if (!state.hasBoundImageMemory)
7773 ops.push_back(OP_IMAGE_BINDMEMORY);
7775 else if (!state.hasBuffer && supportsImages) // Avoid creating image if there is already buffer
7776 ops.push_back(OP_IMAGE_CREATE);
7779 // Host writes must be flushed before GPU commands and there must be
7780 // buffer or image for GPU commands
7781 if (state.hostFlushed
7782 && (state.memoryDefined || supportsDeviceBufferWrites(usage) || state.imageDefined || supportsDeviceImageWrites(usage))
7783 && (state.hasBoundBufferMemory || state.hasBoundImageMemory) // Avoid command buffers if there is no object to use
7784 && (usageToStageFlags(usage) & (~vk::VK_PIPELINE_STAGE_HOST_BIT)) != 0) // Don't start command buffer if there are no ways to use memory from gpu
7786 ops.push_back(OP_COMMAND_BUFFER_BEGIN);
7789 if (!state.deviceIdle)
7790 ops.push_back(OP_DEVICE_WAIT_FOR_IDLE);
7792 if (!state.queueIdle)
7793 ops.push_back(OP_QUEUE_WAIT_FOR_IDLE);
7795 else if (state.stage == STAGE_COMMAND_BUFFER)
7797 if (!state.cache.isClean())
7799 ops.push_back(OP_PIPELINE_BARRIER_GLOBAL);
7802 ops.push_back(OP_PIPELINE_BARRIER_IMAGE);
7804 if (state.hasBuffer)
7805 ops.push_back(OP_PIPELINE_BARRIER_BUFFER);
7808 if (state.hasBoundBufferMemory)
7810 if (usage & USAGE_TRANSFER_DST
7811 && state.cache.isValid(vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_ACCESS_TRANSFER_WRITE_BIT))
7813 ops.push_back(OP_BUFFER_FILL);
7814 ops.push_back(OP_BUFFER_UPDATE);
7815 ops.push_back(OP_BUFFER_COPY_FROM_BUFFER);
7816 ops.push_back(OP_BUFFER_COPY_FROM_IMAGE);
7819 if (usage & USAGE_TRANSFER_SRC
7820 && state.memoryDefined
7821 && state.cache.isValid(vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_ACCESS_TRANSFER_READ_BIT))
7823 ops.push_back(OP_BUFFER_COPY_TO_BUFFER);
7824 ops.push_back(OP_BUFFER_COPY_TO_IMAGE);
7828 if (state.hasBoundImageMemory
7829 && (state.imageLayout == vk::VK_IMAGE_LAYOUT_UNDEFINED
7830 || getNumberOfSupportedLayouts(usage) > 1))
7832 ops.push_back(OP_IMAGE_TRANSITION_LAYOUT);
7835 if (usage & USAGE_TRANSFER_DST
7836 && (state.imageLayout == vk::VK_IMAGE_LAYOUT_GENERAL
7837 || state.imageLayout == vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL)
7838 && state.cache.isValid(vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_ACCESS_TRANSFER_WRITE_BIT))
7840 ops.push_back(OP_IMAGE_COPY_FROM_BUFFER);
7841 ops.push_back(OP_IMAGE_COPY_FROM_IMAGE);
7842 ops.push_back(OP_IMAGE_BLIT_FROM_IMAGE);
7845 if (usage & USAGE_TRANSFER_SRC
7846 && (state.imageLayout == vk::VK_IMAGE_LAYOUT_GENERAL
7847 || state.imageLayout == vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL)
7848 && state.imageDefined
7849 && state.cache.isValid(vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_ACCESS_TRANSFER_READ_BIT))
7851 ops.push_back(OP_IMAGE_COPY_TO_BUFFER);
7852 ops.push_back(OP_IMAGE_COPY_TO_IMAGE);
7853 ops.push_back(OP_IMAGE_BLIT_TO_IMAGE);
7858 // \todo [2016-03-09 mika] Add other usages?
7859 if ((state.memoryDefined
7860 && state.hasBoundBufferMemory
7861 && (((usage & USAGE_VERTEX_BUFFER)
7862 && state.cache.isValid(vk::VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, vk::VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT))
7863 || ((usage & USAGE_INDEX_BUFFER)
7864 && state.cache.isValid(vk::VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, vk::VK_ACCESS_INDEX_READ_BIT))
7865 || ((usage & USAGE_UNIFORM_BUFFER)
7866 && (state.cache.isValid(vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, vk::VK_ACCESS_UNIFORM_READ_BIT)
7867 || state.cache.isValid(vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, vk::VK_ACCESS_UNIFORM_READ_BIT)))
7868 || ((usage & USAGE_UNIFORM_TEXEL_BUFFER)
7869 && (state.cache.isValid(vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, vk::VK_ACCESS_UNIFORM_READ_BIT)
7870 || state.cache.isValid(vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, vk::VK_ACCESS_UNIFORM_READ_BIT)))
7871 || ((usage & USAGE_STORAGE_BUFFER)
7872 && (state.cache.isValid(vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT)
7873 || state.cache.isValid(vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT)))
7874 || ((usage & USAGE_STORAGE_TEXEL_BUFFER)
7875 && state.cache.isValid(vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT))))
7876 || (state.imageDefined
7877 && state.hasBoundImageMemory
7878 && (((usage & USAGE_STORAGE_IMAGE)
7879 && state.imageLayout == vk::VK_IMAGE_LAYOUT_GENERAL
7880 && state.cache.isValid(vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT))
7881 || ((usage & USAGE_SAMPLED_IMAGE)
7882 && (state.imageLayout == vk::VK_IMAGE_LAYOUT_GENERAL
7883 || state.imageLayout == vk::VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL)
7884 && state.cache.isValid(vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT)))))
7886 ops.push_back(OP_RENDERPASS_BEGIN);
7889 // \note This depends on previous operations and has to be always the
7890 // last command buffer operation check
7891 if (ops.empty() || !state.commandBufferIsEmpty)
7892 ops.push_back(OP_COMMAND_BUFFER_END);
7894 else if (state.stage == STAGE_RENDER_PASS)
7896 if ((usage & USAGE_VERTEX_BUFFER) != 0
7897 && state.memoryDefined
7898 && state.hasBoundBufferMemory
7899 && state.cache.isValid(vk::VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, vk::VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT))
7901 ops.push_back(OP_RENDER_VERTEX_BUFFER);
7904 if ((usage & USAGE_INDEX_BUFFER) != 0
7905 && state.memoryDefined
7906 && state.hasBoundBufferMemory
7907 && state.cache.isValid(vk::VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, vk::VK_ACCESS_INDEX_READ_BIT))
7909 ops.push_back(OP_RENDER_INDEX_BUFFER);
7912 if ((usage & USAGE_UNIFORM_BUFFER) != 0
7913 && state.memoryDefined
7914 && state.hasBoundBufferMemory)
7916 if (state.cache.isValid(vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, vk::VK_ACCESS_UNIFORM_READ_BIT))
7917 ops.push_back(OP_RENDER_VERTEX_UNIFORM_BUFFER);
7919 if (state.cache.isValid(vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, vk::VK_ACCESS_UNIFORM_READ_BIT))
7920 ops.push_back(OP_RENDER_FRAGMENT_UNIFORM_BUFFER);
7923 if ((usage & USAGE_UNIFORM_TEXEL_BUFFER) != 0
7924 && state.memoryDefined
7925 && state.hasBoundBufferMemory)
7927 if (state.cache.isValid(vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, vk::VK_ACCESS_UNIFORM_READ_BIT))
7928 ops.push_back(OP_RENDER_VERTEX_UNIFORM_TEXEL_BUFFER);
7930 if (state.cache.isValid(vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, vk::VK_ACCESS_UNIFORM_READ_BIT))
7931 ops.push_back(OP_RENDER_FRAGMENT_UNIFORM_TEXEL_BUFFER);
7934 if ((usage & USAGE_STORAGE_BUFFER) != 0
7935 && state.memoryDefined
7936 && state.hasBoundBufferMemory)
7938 if (state.cache.isValid(vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT))
7939 ops.push_back(OP_RENDER_VERTEX_STORAGE_BUFFER);
7941 if (state.cache.isValid(vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT))
7942 ops.push_back(OP_RENDER_FRAGMENT_STORAGE_BUFFER);
7945 if ((usage & USAGE_STORAGE_TEXEL_BUFFER) != 0
7946 && state.memoryDefined
7947 && state.hasBoundBufferMemory)
7949 if (state.cache.isValid(vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT))
7950 ops.push_back(OP_RENDER_VERTEX_STORAGE_TEXEL_BUFFER);
7952 if (state.cache.isValid(vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT))
7953 ops.push_back(OP_RENDER_FRAGMENT_STORAGE_TEXEL_BUFFER);
7956 if ((usage & USAGE_STORAGE_IMAGE) != 0
7957 && state.imageDefined
7958 && state.hasBoundImageMemory
7959 && (state.imageLayout == vk::VK_IMAGE_LAYOUT_GENERAL)
7960 && state.cache.isValid(vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT))
7962 ops.push_back(OP_RENDER_VERTEX_STORAGE_IMAGE);
7965 if ((usage & USAGE_SAMPLED_IMAGE) != 0
7966 && state.imageDefined
7967 && state.hasBoundImageMemory
7968 && (state.imageLayout == vk::VK_IMAGE_LAYOUT_GENERAL
7969 || state.imageLayout == vk::VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL)
7970 && state.cache.isValid(vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT))
7972 ops.push_back(OP_RENDER_VERTEX_SAMPLED_IMAGE);
7975 if (!state.renderPassIsEmpty)
7976 ops.push_back(OP_RENDERPASS_END);
7979 DE_FATAL("Unknown stage");
7982 void applyOp (State& state, const Memory& memory, Op op, Usage usage)
7987 DE_ASSERT(state.stage == STAGE_HOST);
7988 DE_ASSERT(!state.mapped);
7989 state.mapped = true;
7993 DE_ASSERT(state.stage == STAGE_HOST);
7994 DE_ASSERT(state.mapped);
7995 state.mapped = false;
7999 DE_ASSERT(state.stage == STAGE_HOST);
8000 DE_ASSERT(!state.hostFlushed);
8001 state.hostFlushed = true;
8004 case OP_MAP_INVALIDATE:
8005 DE_ASSERT(state.stage == STAGE_HOST);
8006 DE_ASSERT(!state.hostInvalidated);
8007 state.hostInvalidated = true;
8011 DE_ASSERT(state.stage == STAGE_HOST);
8012 DE_ASSERT(state.hostInvalidated);
8013 state.rng.getUint32();
8017 DE_ASSERT(state.stage == STAGE_HOST);
8018 if ((memory.getMemoryType().propertyFlags & vk::VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) == 0)
8019 state.hostFlushed = false;
8021 state.memoryDefined = true;
8022 state.imageDefined = false;
8023 state.imageLayout = vk::VK_IMAGE_LAYOUT_UNDEFINED;
8024 state.rng.getUint32();
8028 DE_ASSERT(state.stage == STAGE_HOST);
8029 DE_ASSERT(state.hostInvalidated);
8031 if ((memory.getMemoryType().propertyFlags & vk::VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) == 0)
8032 state.hostFlushed = false;
8034 state.rng.getUint32();
8037 case OP_BUFFER_CREATE:
8038 DE_ASSERT(state.stage == STAGE_HOST);
8039 DE_ASSERT(!state.hasBuffer);
8041 state.hasBuffer = true;
8044 case OP_BUFFER_DESTROY:
8045 DE_ASSERT(state.stage == STAGE_HOST);
8046 DE_ASSERT(state.hasBuffer);
8047 DE_ASSERT(state.hasBoundBufferMemory);
8049 state.hasBuffer = false;
8050 state.hasBoundBufferMemory = false;
8053 case OP_BUFFER_BINDMEMORY:
8054 DE_ASSERT(state.stage == STAGE_HOST);
8055 DE_ASSERT(state.hasBuffer);
8056 DE_ASSERT(!state.hasBoundBufferMemory);
8058 state.hasBoundBufferMemory = true;
8061 case OP_IMAGE_CREATE:
8062 DE_ASSERT(state.stage == STAGE_HOST);
8063 DE_ASSERT(!state.hasImage);
8064 DE_ASSERT(!state.hasBuffer);
8066 state.hasImage = true;
8069 case OP_IMAGE_DESTROY:
8070 DE_ASSERT(state.stage == STAGE_HOST);
8071 DE_ASSERT(state.hasImage);
8072 DE_ASSERT(state.hasBoundImageMemory);
8074 state.hasImage = false;
8075 state.hasBoundImageMemory = false;
8076 state.imageLayout = vk::VK_IMAGE_LAYOUT_UNDEFINED;
8077 state.imageDefined = false;
8080 case OP_IMAGE_BINDMEMORY:
8081 DE_ASSERT(state.stage == STAGE_HOST);
8082 DE_ASSERT(state.hasImage);
8083 DE_ASSERT(!state.hasBoundImageMemory);
8085 state.hasBoundImageMemory = true;
8088 case OP_IMAGE_TRANSITION_LAYOUT:
8090 DE_ASSERT(state.stage == STAGE_COMMAND_BUFFER);
8091 DE_ASSERT(state.hasImage);
8092 DE_ASSERT(state.hasBoundImageMemory);
8094 // \todo [2016-03-09 mika] Support linear tiling and predefined data
8095 const vk::VkImageLayout srcLayout = state.rng.getFloat() < 0.9f ? state.imageLayout : vk::VK_IMAGE_LAYOUT_UNDEFINED;
8096 const vk::VkImageLayout dstLayout = getRandomNextLayout(state.rng, usage, srcLayout);
8098 vk::VkPipelineStageFlags dirtySrcStages;
8099 vk::VkAccessFlags dirtySrcAccesses;
8100 vk::VkPipelineStageFlags dirtyDstStages;
8101 vk::VkAccessFlags dirtyDstAccesses;
8103 vk::VkPipelineStageFlags srcStages;
8104 vk::VkAccessFlags srcAccesses;
8105 vk::VkPipelineStageFlags dstStages;
8106 vk::VkAccessFlags dstAccesses;
8108 state.cache.getFullBarrier(dirtySrcStages, dirtySrcAccesses, dirtyDstStages, dirtyDstAccesses);
8110 // Try masking some random bits
8111 srcStages = dirtySrcStages;
8112 srcAccesses = dirtySrcAccesses;
8114 dstStages = state.cache.getAllowedStages() & state.rng.getUint32();
8115 dstAccesses = state.cache.getAllowedAcceses() & state.rng.getUint32();
8117 // If there are no bits in dst stage mask use all stages
8118 dstStages = dstStages ? dstStages : state.cache.getAllowedStages();
8121 srcStages = dstStages;
8123 if (srcLayout == vk::VK_IMAGE_LAYOUT_UNDEFINED)
8124 state.imageDefined = false;
8126 state.commandBufferIsEmpty = false;
8127 state.imageLayout = dstLayout;
8128 state.memoryDefined = false;
8129 state.cache.imageLayoutBarrier(srcStages, srcAccesses, dstStages, dstAccesses);
8133 case OP_QUEUE_WAIT_FOR_IDLE:
8134 DE_ASSERT(state.stage == STAGE_HOST);
8135 DE_ASSERT(!state.queueIdle);
8137 state.queueIdle = true;
8139 state.cache.waitForIdle();
8142 case OP_DEVICE_WAIT_FOR_IDLE:
8143 DE_ASSERT(state.stage == STAGE_HOST);
8144 DE_ASSERT(!state.deviceIdle);
8146 state.queueIdle = true;
8147 state.deviceIdle = true;
8149 state.cache.waitForIdle();
8152 case OP_COMMAND_BUFFER_BEGIN:
8153 DE_ASSERT(state.stage == STAGE_HOST);
8154 state.stage = STAGE_COMMAND_BUFFER;
8155 state.commandBufferIsEmpty = true;
8156 // Makes host writes visible to command buffer
8157 state.cache.submitCommandBuffer();
8160 case OP_COMMAND_BUFFER_END:
8161 DE_ASSERT(state.stage == STAGE_COMMAND_BUFFER);
8162 state.stage = STAGE_HOST;
8163 state.queueIdle = false;
8164 state.deviceIdle = false;
8167 case OP_BUFFER_COPY_FROM_BUFFER:
8168 case OP_BUFFER_COPY_FROM_IMAGE:
8169 case OP_BUFFER_UPDATE:
8170 case OP_BUFFER_FILL:
8171 state.rng.getUint32();
8172 DE_ASSERT(state.stage == STAGE_COMMAND_BUFFER);
8174 if ((memory.getMemoryType().propertyFlags & vk::VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) == 0)
8175 state.hostInvalidated = false;
8177 state.commandBufferIsEmpty = false;
8178 state.memoryDefined = true;
8179 state.imageDefined = false;
8180 state.imageLayout = vk::VK_IMAGE_LAYOUT_UNDEFINED;
8181 state.cache.perform(vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_ACCESS_TRANSFER_WRITE_BIT);
8184 case OP_BUFFER_COPY_TO_BUFFER:
8185 case OP_BUFFER_COPY_TO_IMAGE:
8186 DE_ASSERT(state.stage == STAGE_COMMAND_BUFFER);
8188 state.commandBufferIsEmpty = false;
8189 state.cache.perform(vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_ACCESS_TRANSFER_READ_BIT);
8192 case OP_IMAGE_BLIT_FROM_IMAGE:
8193 state.rng.getBool();
8195 case OP_IMAGE_COPY_FROM_BUFFER:
8196 case OP_IMAGE_COPY_FROM_IMAGE:
8197 state.rng.getUint32();
8198 DE_ASSERT(state.stage == STAGE_COMMAND_BUFFER);
8200 if ((memory.getMemoryType().propertyFlags & vk::VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) == 0)
8201 state.hostInvalidated = false;
8203 state.commandBufferIsEmpty = false;
8204 state.memoryDefined = false;
8205 state.imageDefined = true;
8206 state.cache.perform(vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_ACCESS_TRANSFER_WRITE_BIT);
8209 case OP_IMAGE_BLIT_TO_IMAGE:
8210 state.rng.getBool();
8212 case OP_IMAGE_COPY_TO_BUFFER:
8213 case OP_IMAGE_COPY_TO_IMAGE:
8214 DE_ASSERT(state.stage == STAGE_COMMAND_BUFFER);
8216 state.commandBufferIsEmpty = false;
8217 state.cache.perform(vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_ACCESS_TRANSFER_READ_BIT);
8220 case OP_PIPELINE_BARRIER_GLOBAL:
8221 case OP_PIPELINE_BARRIER_BUFFER:
8222 case OP_PIPELINE_BARRIER_IMAGE:
8224 DE_ASSERT(state.stage == STAGE_COMMAND_BUFFER);
8226 vk::VkPipelineStageFlags dirtySrcStages;
8227 vk::VkAccessFlags dirtySrcAccesses;
8228 vk::VkPipelineStageFlags dirtyDstStages;
8229 vk::VkAccessFlags dirtyDstAccesses;
8231 vk::VkPipelineStageFlags srcStages;
8232 vk::VkAccessFlags srcAccesses;
8233 vk::VkPipelineStageFlags dstStages;
8234 vk::VkAccessFlags dstAccesses;
8236 state.cache.getFullBarrier(dirtySrcStages, dirtySrcAccesses, dirtyDstStages, dirtyDstAccesses);
8238 // Try masking some random bits
8239 srcStages = dirtySrcStages & state.rng.getUint32();
8240 srcAccesses = dirtySrcAccesses & state.rng.getUint32();
8242 dstStages = dirtyDstStages & state.rng.getUint32();
8243 dstAccesses = dirtyDstAccesses & state.rng.getUint32();
8245 // If there are no bits in stage mask use the original dirty stages
8246 srcStages = srcStages ? srcStages : dirtySrcStages;
8247 dstStages = dstStages ? dstStages : dirtyDstStages;
8250 srcStages = dstStages;
8252 state.commandBufferIsEmpty = false;
8253 state.cache.barrier(srcStages, srcAccesses, dstStages, dstAccesses);
8257 case OP_RENDERPASS_BEGIN:
8259 DE_ASSERT(state.stage == STAGE_COMMAND_BUFFER);
8261 state.renderPassIsEmpty = true;
8262 state.stage = STAGE_RENDER_PASS;
8266 case OP_RENDERPASS_END:
8268 DE_ASSERT(state.stage == STAGE_RENDER_PASS);
8270 state.renderPassIsEmpty = true;
8271 state.stage = STAGE_COMMAND_BUFFER;
8275 case OP_RENDER_VERTEX_BUFFER:
8277 DE_ASSERT(state.stage == STAGE_RENDER_PASS);
8279 state.renderPassIsEmpty = false;
8280 state.cache.perform(vk::VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, vk::VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT);
8284 case OP_RENDER_INDEX_BUFFER:
8286 DE_ASSERT(state.stage == STAGE_RENDER_PASS);
8288 state.renderPassIsEmpty = false;
8289 state.cache.perform(vk::VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, vk::VK_ACCESS_INDEX_READ_BIT);
8293 case OP_RENDER_VERTEX_UNIFORM_BUFFER:
8294 case OP_RENDER_VERTEX_UNIFORM_TEXEL_BUFFER:
8296 DE_ASSERT(state.stage == STAGE_RENDER_PASS);
8298 state.renderPassIsEmpty = false;
8299 state.cache.perform(vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, vk::VK_ACCESS_UNIFORM_READ_BIT);
8303 case OP_RENDER_FRAGMENT_UNIFORM_BUFFER:
8304 case OP_RENDER_FRAGMENT_UNIFORM_TEXEL_BUFFER:
8306 DE_ASSERT(state.stage == STAGE_RENDER_PASS);
8308 state.renderPassIsEmpty = false;
8309 state.cache.perform(vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, vk::VK_ACCESS_UNIFORM_READ_BIT);
8313 case OP_RENDER_VERTEX_STORAGE_BUFFER:
8314 case OP_RENDER_VERTEX_STORAGE_TEXEL_BUFFER:
8316 DE_ASSERT(state.stage == STAGE_RENDER_PASS);
8318 state.renderPassIsEmpty = false;
8319 state.cache.perform(vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT);
8323 case OP_RENDER_FRAGMENT_STORAGE_BUFFER:
8324 case OP_RENDER_FRAGMENT_STORAGE_TEXEL_BUFFER:
8326 DE_ASSERT(state.stage == STAGE_RENDER_PASS);
8328 state.renderPassIsEmpty = false;
8329 state.cache.perform(vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT);
8333 case OP_RENDER_VERTEX_STORAGE_IMAGE:
8334 case OP_RENDER_VERTEX_SAMPLED_IMAGE:
8336 DE_ASSERT(state.stage == STAGE_RENDER_PASS);
8338 state.renderPassIsEmpty = false;
8339 state.cache.perform(vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT);
8344 DE_FATAL("Unknown op");
8348 de::MovePtr<Command> createHostCommand (Op op,
8351 vk::VkSharingMode sharing)
8355 case OP_MAP: return de::MovePtr<Command>(new Map());
8356 case OP_UNMAP: return de::MovePtr<Command>(new UnMap());
8358 case OP_MAP_FLUSH: return de::MovePtr<Command>(new Flush());
8359 case OP_MAP_INVALIDATE: return de::MovePtr<Command>(new Invalidate());
8361 case OP_MAP_READ: return de::MovePtr<Command>(new HostMemoryAccess(true, false, rng.getUint32()));
8362 case OP_MAP_WRITE: return de::MovePtr<Command>(new HostMemoryAccess(false, true, rng.getUint32()));
8363 case OP_MAP_MODIFY: return de::MovePtr<Command>(new HostMemoryAccess(true, true, rng.getUint32()));
8365 case OP_BUFFER_CREATE: return de::MovePtr<Command>(new CreateBuffer(usageToBufferUsageFlags(usage), sharing));
8366 case OP_BUFFER_DESTROY: return de::MovePtr<Command>(new DestroyBuffer());
8367 case OP_BUFFER_BINDMEMORY: return de::MovePtr<Command>(new BindBufferMemory());
8369 case OP_IMAGE_CREATE: return de::MovePtr<Command>(new CreateImage(usageToImageUsageFlags(usage), sharing));
8370 case OP_IMAGE_DESTROY: return de::MovePtr<Command>(new DestroyImage());
8371 case OP_IMAGE_BINDMEMORY: return de::MovePtr<Command>(new BindImageMemory());
8373 case OP_QUEUE_WAIT_FOR_IDLE: return de::MovePtr<Command>(new QueueWaitIdle());
8374 case OP_DEVICE_WAIT_FOR_IDLE: return de::MovePtr<Command>(new DeviceWaitIdle());
8377 DE_FATAL("Unknown op");
8378 return de::MovePtr<Command>(DE_NULL);
8382 de::MovePtr<CmdCommand> createCmdCommand (de::Random& rng,
8389 case OP_BUFFER_FILL: return de::MovePtr<CmdCommand>(new FillBuffer(rng.getUint32()));
8390 case OP_BUFFER_UPDATE: return de::MovePtr<CmdCommand>(new UpdateBuffer(rng.getUint32()));
8391 case OP_BUFFER_COPY_TO_BUFFER: return de::MovePtr<CmdCommand>(new BufferCopyToBuffer());
8392 case OP_BUFFER_COPY_FROM_BUFFER: return de::MovePtr<CmdCommand>(new BufferCopyFromBuffer(rng.getUint32()));
8394 case OP_BUFFER_COPY_TO_IMAGE: return de::MovePtr<CmdCommand>(new BufferCopyToImage());
8395 case OP_BUFFER_COPY_FROM_IMAGE: return de::MovePtr<CmdCommand>(new BufferCopyFromImage(rng.getUint32()));
8397 case OP_IMAGE_TRANSITION_LAYOUT:
8399 DE_ASSERT(state.stage == STAGE_COMMAND_BUFFER);
8400 DE_ASSERT(state.hasImage);
8401 DE_ASSERT(state.hasBoundImageMemory);
8403 const vk::VkImageLayout srcLayout = rng.getFloat() < 0.9f ? state.imageLayout : vk::VK_IMAGE_LAYOUT_UNDEFINED;
8404 const vk::VkImageLayout dstLayout = getRandomNextLayout(rng, usage, srcLayout);
8406 vk::VkPipelineStageFlags dirtySrcStages;
8407 vk::VkAccessFlags dirtySrcAccesses;
8408 vk::VkPipelineStageFlags dirtyDstStages;
8409 vk::VkAccessFlags dirtyDstAccesses;
8411 vk::VkPipelineStageFlags srcStages;
8412 vk::VkAccessFlags srcAccesses;
8413 vk::VkPipelineStageFlags dstStages;
8414 vk::VkAccessFlags dstAccesses;
8416 state.cache.getFullBarrier(dirtySrcStages, dirtySrcAccesses, dirtyDstStages, dirtyDstAccesses);
8418 // Try masking some random bits
8419 srcStages = dirtySrcStages;
8420 srcAccesses = dirtySrcAccesses;
8422 dstStages = state.cache.getAllowedStages() & rng.getUint32();
8423 dstAccesses = state.cache.getAllowedAcceses() & rng.getUint32();
8425 // If there are no bits in dst stage mask use all stages
8426 dstStages = dstStages ? dstStages : state.cache.getAllowedStages();
8429 srcStages = dstStages;
8431 return de::MovePtr<CmdCommand>(new ImageTransition(srcStages, srcAccesses, dstStages, dstAccesses, srcLayout, dstLayout));
8434 case OP_IMAGE_COPY_TO_BUFFER: return de::MovePtr<CmdCommand>(new ImageCopyToBuffer(state.imageLayout));
8435 case OP_IMAGE_COPY_FROM_BUFFER: return de::MovePtr<CmdCommand>(new ImageCopyFromBuffer(rng.getUint32(), state.imageLayout));
8436 case OP_IMAGE_COPY_TO_IMAGE: return de::MovePtr<CmdCommand>(new ImageCopyToImage(state.imageLayout));
8437 case OP_IMAGE_COPY_FROM_IMAGE: return de::MovePtr<CmdCommand>(new ImageCopyFromImage(rng.getUint32(), state.imageLayout));
8438 case OP_IMAGE_BLIT_TO_IMAGE:
8440 const BlitScale scale = rng.getBool() ? BLIT_SCALE_20 : BLIT_SCALE_10;
8441 return de::MovePtr<CmdCommand>(new ImageBlitToImage(scale, state.imageLayout));
8444 case OP_IMAGE_BLIT_FROM_IMAGE:
8446 const BlitScale scale = rng.getBool() ? BLIT_SCALE_20 : BLIT_SCALE_10;
8447 return de::MovePtr<CmdCommand>(new ImageBlitFromImage(rng.getUint32(), scale, state.imageLayout));
8450 case OP_PIPELINE_BARRIER_GLOBAL:
8451 case OP_PIPELINE_BARRIER_BUFFER:
8452 case OP_PIPELINE_BARRIER_IMAGE:
8454 vk::VkPipelineStageFlags dirtySrcStages;
8455 vk::VkAccessFlags dirtySrcAccesses;
8456 vk::VkPipelineStageFlags dirtyDstStages;
8457 vk::VkAccessFlags dirtyDstAccesses;
8459 vk::VkPipelineStageFlags srcStages;
8460 vk::VkAccessFlags srcAccesses;
8461 vk::VkPipelineStageFlags dstStages;
8462 vk::VkAccessFlags dstAccesses;
8464 state.cache.getFullBarrier(dirtySrcStages, dirtySrcAccesses, dirtyDstStages, dirtyDstAccesses);
8466 // Try masking some random bits
8467 srcStages = dirtySrcStages & rng.getUint32();
8468 srcAccesses = dirtySrcAccesses & rng.getUint32();
8470 dstStages = dirtyDstStages & rng.getUint32();
8471 dstAccesses = dirtyDstAccesses & rng.getUint32();
8473 // If there are no bits in stage mask use the original dirty stages
8474 srcStages = srcStages ? srcStages : dirtySrcStages;
8475 dstStages = dstStages ? dstStages : dirtyDstStages;
8478 srcStages = dstStages;
8480 PipelineBarrier::Type type;
8482 if (op == OP_PIPELINE_BARRIER_IMAGE)
8483 type = PipelineBarrier::TYPE_IMAGE;
8484 else if (op == OP_PIPELINE_BARRIER_BUFFER)
8485 type = PipelineBarrier::TYPE_BUFFER;
8486 else if (op == OP_PIPELINE_BARRIER_GLOBAL)
8487 type = PipelineBarrier::TYPE_GLOBAL;
8490 type = PipelineBarrier::TYPE_LAST;
8491 DE_FATAL("Unknown op");
8494 if (type == PipelineBarrier::TYPE_IMAGE)
8495 return de::MovePtr<CmdCommand>(new PipelineBarrier(srcStages, srcAccesses, dstStages, dstAccesses, type, tcu::just(state.imageLayout)));
8497 return de::MovePtr<CmdCommand>(new PipelineBarrier(srcStages, srcAccesses, dstStages, dstAccesses, type, tcu::nothing<vk::VkImageLayout>()));
8501 DE_FATAL("Unknown op");
8502 return de::MovePtr<CmdCommand>(DE_NULL);
8506 de::MovePtr<RenderPassCommand> createRenderPassCommand (de::Random&,
8512 case OP_RENDER_VERTEX_BUFFER: return de::MovePtr<RenderPassCommand>(new RenderVertexBuffer());
8513 case OP_RENDER_INDEX_BUFFER: return de::MovePtr<RenderPassCommand>(new RenderIndexBuffer());
8515 case OP_RENDER_VERTEX_UNIFORM_BUFFER: return de::MovePtr<RenderPassCommand>(new RenderVertexUniformBuffer());
8516 case OP_RENDER_FRAGMENT_UNIFORM_BUFFER: return de::MovePtr<RenderPassCommand>(new RenderFragmentUniformBuffer());
8518 case OP_RENDER_VERTEX_UNIFORM_TEXEL_BUFFER: return de::MovePtr<RenderPassCommand>(new RenderVertexUniformTexelBuffer());
8519 case OP_RENDER_FRAGMENT_UNIFORM_TEXEL_BUFFER: return de::MovePtr<RenderPassCommand>(new RenderFragmentUniformTexelBuffer());
8521 case OP_RENDER_VERTEX_STORAGE_BUFFER: return de::MovePtr<RenderPassCommand>(new RenderVertexStorageBuffer());
8522 case OP_RENDER_FRAGMENT_STORAGE_BUFFER: return de::MovePtr<RenderPassCommand>(new RenderFragmentStorageBuffer());
8524 case OP_RENDER_VERTEX_STORAGE_TEXEL_BUFFER: return de::MovePtr<RenderPassCommand>(new RenderVertexStorageTexelBuffer());
8525 case OP_RENDER_FRAGMENT_STORAGE_TEXEL_BUFFER: return de::MovePtr<RenderPassCommand>(new RenderFragmentStorageTexelBuffer());
8527 case OP_RENDER_VERTEX_STORAGE_IMAGE: return de::MovePtr<RenderPassCommand>(new RenderVertexStorageImage());
8528 case OP_RENDER_VERTEX_SAMPLED_IMAGE: return de::MovePtr<RenderPassCommand>(new RenderVertexSampledImage());
8531 DE_FATAL("Unknown op");
8532 return de::MovePtr<RenderPassCommand>(DE_NULL);
8536 de::MovePtr<CmdCommand> createRenderPassCommands (const Memory& memory,
8537 de::Random& nextOpRng,
8543 vector<RenderPassCommand*> commands;
8547 for (; opNdx < opCount; opNdx++)
8551 getAvailableOps(state, memory.getSupportBuffers(), memory.getSupportImages(), usage, ops);
8553 DE_ASSERT(!ops.empty());
8556 const Op op = nextOpRng.choose<Op>(ops.begin(), ops.end());
8558 if (op == OP_RENDERPASS_END)
8564 de::Random rng (state.rng);
8566 commands.push_back(createRenderPassCommand(rng, state, op).release());
8567 applyOp(state, memory, op, usage);
8569 DE_ASSERT(state.rng == rng);
8574 applyOp(state, memory, OP_RENDERPASS_END, usage);
8575 return de::MovePtr<CmdCommand>(new SubmitRenderPass(commands));
8579 for (size_t commandNdx = 0; commandNdx < commands.size(); commandNdx++)
8580 delete commands[commandNdx];
8586 de::MovePtr<Command> createCmdCommands (const Memory& memory,
8587 de::Random& nextOpRng,
8593 vector<CmdCommand*> commands;
8597 for (; opNdx < opCount; opNdx++)
8601 getAvailableOps(state, memory.getSupportBuffers(), memory.getSupportImages(), usage, ops);
8603 DE_ASSERT(!ops.empty());
8606 const Op op = nextOpRng.choose<Op>(ops.begin(), ops.end());
8608 if (op == OP_COMMAND_BUFFER_END)
8614 // \note Command needs to known the state before the operation
8615 if (op == OP_RENDERPASS_BEGIN)
8617 applyOp(state, memory, op, usage);
8618 commands.push_back(createRenderPassCommands(memory, nextOpRng, state, usage, opNdx, opCount).release());
8622 de::Random rng (state.rng);
8624 commands.push_back(createCmdCommand(rng, state, op, usage).release());
8625 applyOp(state, memory, op, usage);
8627 DE_ASSERT(state.rng == rng);
8634 applyOp(state, memory, OP_COMMAND_BUFFER_END, usage);
8635 return de::MovePtr<Command>(new SubmitCommandBuffer(commands));
8639 for (size_t commandNdx = 0; commandNdx < commands.size(); commandNdx++)
8640 delete commands[commandNdx];
8646 void createCommands (vector<Command*>& commands,
8648 const Memory& memory,
8650 vk::VkSharingMode sharingMode,
8653 State state (usage, seed);
8654 // Used to select next operation only
8655 de::Random nextOpRng (seed ^ 12930809);
8657 commands.reserve(opCount);
8659 for (size_t opNdx = 0; opNdx < opCount; opNdx++)
8663 getAvailableOps(state, memory.getSupportBuffers(), memory.getSupportImages(), usage, ops);
8665 DE_ASSERT(!ops.empty());
8668 const Op op = nextOpRng.choose<Op>(ops.begin(), ops.end());
8670 if (op == OP_COMMAND_BUFFER_BEGIN)
8672 applyOp(state, memory, op, usage);
8673 commands.push_back(createCmdCommands(memory, nextOpRng, state, usage, opNdx, opCount).release());
8677 de::Random rng (state.rng);
8679 commands.push_back(createHostCommand(op, rng, usage, sharingMode).release());
8680 applyOp(state, memory, op, usage);
8682 // Make sure that random generator is in sync
8683 DE_ASSERT(state.rng == rng);
8688 // Clean up resources
8689 if (state.hasBuffer && state.hasImage)
8691 if (!state.queueIdle)
8692 commands.push_back(new QueueWaitIdle());
8694 if (state.hasBuffer)
8695 commands.push_back(new DestroyBuffer());
8698 commands.push_back(new DestroyImage());
8702 class MemoryTestInstance : public TestInstance
8706 typedef bool(MemoryTestInstance::*StageFunc)(void);
8708 MemoryTestInstance (::vkt::Context& context, const TestConfig& config);
8709 ~MemoryTestInstance (void);
8711 tcu::TestStatus iterate (void);
8714 const TestConfig m_config;
8715 const size_t m_iterationCount;
8716 const size_t m_opCount;
8717 const vk::VkPhysicalDeviceMemoryProperties m_memoryProperties;
8718 deUint32 m_memoryTypeNdx;
8721 tcu::ResultCollector m_resultCollector;
8723 vector<Command*> m_commands;
8724 MovePtr<Memory> m_memory;
8725 MovePtr<Context> m_renderContext;
8726 MovePtr<PrepareContext> m_prepareContext;
8728 bool nextIteration (void);
8729 bool nextMemoryType (void);
8731 bool createCommandsAndAllocateMemory (void);
8732 bool prepare (void);
8733 bool execute (void);
8735 void resetResources (void);
8738 void MemoryTestInstance::resetResources (void)
8740 const vk::DeviceInterface& vkd = m_context.getDeviceInterface();
8741 const vk::VkDevice device = m_context.getDevice();
8743 VK_CHECK(vkd.deviceWaitIdle(device));
8745 for (size_t commandNdx = 0; commandNdx < m_commands.size(); commandNdx++)
8747 delete m_commands[commandNdx];
8748 m_commands[commandNdx] = DE_NULL;
8752 m_prepareContext.clear();
8756 bool MemoryTestInstance::nextIteration (void)
8760 if (m_iteration < m_iterationCount)
8763 m_stage = &MemoryTestInstance::createCommandsAndAllocateMemory;
8767 return nextMemoryType();
8770 bool MemoryTestInstance::nextMemoryType (void)
8774 DE_ASSERT(m_commands.empty());
8778 if (m_memoryTypeNdx < m_memoryProperties.memoryTypeCount)
8781 m_stage = &MemoryTestInstance::createCommandsAndAllocateMemory;
8792 MemoryTestInstance::MemoryTestInstance (::vkt::Context& context, const TestConfig& config)
8793 : TestInstance (context)
8795 , m_iterationCount (5)
8797 , m_memoryProperties (vk::getPhysicalDeviceMemoryProperties(context.getInstanceInterface(), context.getPhysicalDevice()))
8798 , m_memoryTypeNdx (0)
8800 , m_stage (&MemoryTestInstance::createCommandsAndAllocateMemory)
8801 , m_resultCollector (context.getTestContext().getLog())
8803 , m_memory (DE_NULL)
8805 TestLog& log = context.getTestContext().getLog();
8807 const tcu::ScopedLogSection section (log, "TestCaseInfo", "Test Case Info");
8809 log << TestLog::Message << "Buffer size: " << config.size << TestLog::EndMessage;
8810 log << TestLog::Message << "Sharing: " << config.sharing << TestLog::EndMessage;
8811 log << TestLog::Message << "Access: " << config.usage << TestLog::EndMessage;
8815 const tcu::ScopedLogSection section (log, "MemoryProperties", "Memory Properties");
8817 for (deUint32 heapNdx = 0; heapNdx < m_memoryProperties.memoryHeapCount; heapNdx++)
8819 const tcu::ScopedLogSection heapSection (log, "Heap" + de::toString(heapNdx), "Heap " + de::toString(heapNdx));
8821 log << TestLog::Message << "Size: " << m_memoryProperties.memoryHeaps[heapNdx].size << TestLog::EndMessage;
8822 log << TestLog::Message << "Flags: " << m_memoryProperties.memoryHeaps[heapNdx].flags << TestLog::EndMessage;
8825 for (deUint32 memoryTypeNdx = 0; memoryTypeNdx < m_memoryProperties.memoryTypeCount; memoryTypeNdx++)
8827 const tcu::ScopedLogSection memoryTypeSection (log, "MemoryType" + de::toString(memoryTypeNdx), "Memory type " + de::toString(memoryTypeNdx));
8829 log << TestLog::Message << "Properties: " << m_memoryProperties.memoryTypes[memoryTypeNdx].propertyFlags << TestLog::EndMessage;
8830 log << TestLog::Message << "Heap: " << m_memoryProperties.memoryTypes[memoryTypeNdx].heapIndex << TestLog::EndMessage;
8835 const vk::InstanceInterface& vki = context.getInstanceInterface();
8836 const vk::VkPhysicalDevice physicalDevice = context.getPhysicalDevice();
8837 const vk::DeviceInterface& vkd = context.getDeviceInterface();
8838 const vk::VkDevice device = context.getDevice();
8839 const vk::VkQueue queue = context.getUniversalQueue();
8840 const deUint32 queueFamilyIndex = context.getUniversalQueueFamilyIndex();
8841 vector<pair<deUint32, vk::VkQueue> > queues;
8843 queues.push_back(std::make_pair(queueFamilyIndex, queue));
8845 m_renderContext = MovePtr<Context>(new Context(vki, vkd, physicalDevice, device, queue, queueFamilyIndex, queues, context.getBinaryCollection()));
8849 MemoryTestInstance::~MemoryTestInstance (void)
8854 bool MemoryTestInstance::createCommandsAndAllocateMemory (void)
8856 const vk::VkDevice device = m_context.getDevice();
8857 TestLog& log = m_context.getTestContext().getLog();
8858 const vk::InstanceInterface& vki = m_context.getInstanceInterface();
8859 const vk::VkPhysicalDevice physicalDevice = m_context.getPhysicalDevice();
8860 const vk::DeviceInterface& vkd = m_context.getDeviceInterface();
8861 const vk::VkPhysicalDeviceMemoryProperties memoryProperties = vk::getPhysicalDeviceMemoryProperties(vki, physicalDevice);
8862 const tcu::ScopedLogSection section (log, "MemoryType" + de::toString(m_memoryTypeNdx) + "CreateCommands" + de::toString(m_iteration),
8863 "Memory type " + de::toString(m_memoryTypeNdx) + " create commands iteration " + de::toString(m_iteration));
8864 const vector<deUint32>& queues = m_renderContext->getQueueFamilies();
8866 DE_ASSERT(m_commands.empty());
8868 if (m_config.usage & (USAGE_HOST_READ | USAGE_HOST_WRITE)
8869 && !(memoryProperties.memoryTypes[m_memoryTypeNdx].propertyFlags & vk::VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT))
8871 log << TestLog::Message << "Memory type not supported" << TestLog::EndMessage;
8873 return nextMemoryType();
8879 const vk::VkBufferUsageFlags bufferUsage = usageToBufferUsageFlags(m_config.usage);
8880 const vk::VkImageUsageFlags imageUsage = usageToImageUsageFlags(m_config.usage);
8881 const vk::VkDeviceSize maxBufferSize = bufferUsage != 0
8882 ? roundBufferSizeToWxHx4(findMaxBufferSize(vkd, device, bufferUsage, m_config.sharing, queues, m_config.size, m_memoryTypeNdx))
8884 const IVec2 maxImageSize = imageUsage != 0
8885 ? findMaxRGBA8ImageSize(vkd, device, imageUsage, m_config.sharing, queues, m_config.size, m_memoryTypeNdx)
8888 log << TestLog::Message << "Max buffer size: " << maxBufferSize << TestLog::EndMessage;
8889 log << TestLog::Message << "Max RGBA8 image size: " << maxImageSize << TestLog::EndMessage;
8891 // Skip tests if there are no supported operations
8892 if (maxBufferSize == 0
8893 && maxImageSize[0] == 0
8894 && (m_config.usage & (USAGE_HOST_READ|USAGE_HOST_WRITE)) == 0)
8896 log << TestLog::Message << "Skipping memory type. None of the usages are supported." << TestLog::EndMessage;
8898 return nextMemoryType();
8902 const deUint32 seed = 2830980989u ^ deUint32Hash((deUint32)(m_iteration) * m_memoryProperties.memoryTypeCount + m_memoryTypeNdx);
8904 m_memory = MovePtr<Memory>(new Memory(vki, vkd, physicalDevice, device, m_config.size, m_memoryTypeNdx, maxBufferSize, maxImageSize[0], maxImageSize[1]));
8906 log << TestLog::Message << "Create commands" << TestLog::EndMessage;
8907 createCommands(m_commands, seed, *m_memory, m_config.usage, m_config.sharing, m_opCount);
8909 m_stage = &MemoryTestInstance::prepare;
8913 catch (const tcu::TestError& e)
8915 m_resultCollector.fail("Failed, got exception: " + string(e.getMessage()));
8916 return nextMemoryType();
8921 bool MemoryTestInstance::prepare (void)
8923 TestLog& log = m_context.getTestContext().getLog();
8924 const tcu::ScopedLogSection section (log, "MemoryType" + de::toString(m_memoryTypeNdx) + "Prepare" + de::toString(m_iteration),
8925 "Memory type " + de::toString(m_memoryTypeNdx) + " prepare iteration" + de::toString(m_iteration));
8927 m_prepareContext = MovePtr<PrepareContext>(new PrepareContext(*m_renderContext, *m_memory));
8929 DE_ASSERT(!m_commands.empty());
8931 for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
8933 Command& command = *m_commands[cmdNdx];
8937 command.prepare(*m_prepareContext);
8939 catch (const tcu::TestError& e)
8941 m_resultCollector.fail(de::toString(cmdNdx) + ":" + command.getName() + " failed to prepare, got exception: " + string(e.getMessage()));
8942 return nextMemoryType();
8946 m_stage = &MemoryTestInstance::execute;
8950 bool MemoryTestInstance::execute (void)
8952 TestLog& log = m_context.getTestContext().getLog();
8953 const tcu::ScopedLogSection section (log, "MemoryType" + de::toString(m_memoryTypeNdx) + "Execute" + de::toString(m_iteration),
8954 "Memory type " + de::toString(m_memoryTypeNdx) + " execute iteration " + de::toString(m_iteration));
8955 ExecuteContext executeContext (*m_renderContext);
8956 const vk::VkDevice device = m_context.getDevice();
8957 const vk::DeviceInterface& vkd = m_context.getDeviceInterface();
8959 DE_ASSERT(!m_commands.empty());
8961 for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
8963 Command& command = *m_commands[cmdNdx];
8967 command.execute(executeContext);
8969 catch (const tcu::TestError& e)
8971 m_resultCollector.fail(de::toString(cmdNdx) + ":" + command.getName() + " failed to execute, got exception: " + string(e.getMessage()));
8972 return nextIteration();
8976 VK_CHECK(vkd.deviceWaitIdle(device));
8978 m_stage = &MemoryTestInstance::verify;
8982 bool MemoryTestInstance::verify (void)
8984 DE_ASSERT(!m_commands.empty());
8986 TestLog& log = m_context.getTestContext().getLog();
8987 const tcu::ScopedLogSection section (log, "MemoryType" + de::toString(m_memoryTypeNdx) + "Verify" + de::toString(m_iteration),
8988 "Memory type " + de::toString(m_memoryTypeNdx) + " verify iteration " + de::toString(m_iteration));
8989 VerifyContext verifyContext (log, m_resultCollector, *m_renderContext, m_config.size);
8991 log << TestLog::Message << "Begin verify" << TestLog::EndMessage;
8993 for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
8995 Command& command = *m_commands[cmdNdx];
8999 command.verify(verifyContext, cmdNdx);
9001 catch (const tcu::TestError& e)
9003 m_resultCollector.fail(de::toString(cmdNdx) + ":" + command.getName() + " failed to verify, got exception: " + string(e.getMessage()));
9004 return nextIteration();
9008 return nextIteration();
9011 tcu::TestStatus MemoryTestInstance::iterate (void)
9013 if ((this->*m_stage)())
9014 return tcu::TestStatus::incomplete();
9016 return tcu::TestStatus(m_resultCollector.getResult(), m_resultCollector.getMessage());
9021 void init (vk::SourceCollections& sources, TestConfig config) const
9023 // Vertex buffer rendering
9024 if (config.usage & USAGE_VERTEX_BUFFER)
9026 const char* const vertexShader =
9028 "layout(location = 0) in highp vec2 a_position;\n"
9029 "void main (void) {\n"
9030 "\tgl_PointSize = 1.0;\n"
9031 "\tgl_Position = vec4(1.998 * a_position - vec2(0.999), 0.0, 1.0);\n"
9034 sources.glslSources.add("vertex-buffer.vert")
9035 << glu::VertexSource(vertexShader);
9038 // Index buffer rendering
9039 if (config.usage & USAGE_INDEX_BUFFER)
9041 const char* const vertexShader =
9044 "void main (void) {\n"
9045 "\tgl_PointSize = 1.0;\n"
9046 "\thighp vec2 pos = vec2(gl_VertexIndex % 256, gl_VertexIndex / 256) / vec2(255.0);\n"
9047 "\tgl_Position = vec4(1.998 * pos - vec2(0.999), 0.0, 1.0);\n"
9050 sources.glslSources.add("index-buffer.vert")
9051 << glu::VertexSource(vertexShader);
9054 if (config.usage & USAGE_UNIFORM_BUFFER)
9057 std::ostringstream vertexShader;
9062 "layout(set=0, binding=0) uniform Block\n"
9064 "\thighp uvec4 values[" << de::toString<size_t>(MAX_UNIFORM_BUFFER_SIZE / (sizeof(deUint32) * 4)) << "];\n"
9066 "void main (void) {\n"
9067 "\tgl_PointSize = 1.0;\n"
9068 "\thighp uvec4 vecVal = block.values[gl_VertexIndex / 8];\n"
9069 "\thighp uint val;\n"
9070 "\tif (((gl_VertexIndex / 2) % 4 == 0))\n"
9071 "\t\tval = vecVal.x;\n"
9072 "\telse if (((gl_VertexIndex / 2) % 4 == 1))\n"
9073 "\t\tval = vecVal.y;\n"
9074 "\telse if (((gl_VertexIndex / 2) % 4 == 2))\n"
9075 "\t\tval = vecVal.z;\n"
9076 "\telse if (((gl_VertexIndex / 2) % 4 == 3))\n"
9077 "\t\tval = vecVal.w;\n"
9078 "\tif ((gl_VertexIndex % 2) == 0)\n"
9079 "\t\tval = val & 0xFFFFu;\n"
9081 "\t\tval = val >> 16u;\n"
9082 "\thighp vec2 pos = vec2(val & 0xFFu, val >> 8u) / vec2(255.0);\n"
9083 "\tgl_Position = vec4(1.998 * pos - vec2(0.999), 0.0, 1.0);\n"
9086 sources.glslSources.add("uniform-buffer.vert")
9087 << glu::VertexSource(vertexShader.str());
9091 const size_t arraySize = MAX_UNIFORM_BUFFER_SIZE / (sizeof(deUint32) * 4);
9092 const size_t arrayIntSize = arraySize * 4;
9093 std::ostringstream fragmentShader;
9098 "layout(location = 0) out highp vec4 o_color;\n"
9099 "layout(set=0, binding=0) uniform Block\n"
9101 "\thighp uvec4 values[" << arraySize << "];\n"
9103 "layout(push_constant) uniform PushC\n"
9106 "\tuint valuesPerPixel;\n"
9108 "void main (void) {\n"
9109 "\thighp uint id = pushC.callId * (" << arrayIntSize << "u / pushC.valuesPerPixel) + uint(gl_FragCoord.y) * 256u + uint(gl_FragCoord.x);\n"
9110 "\tif (uint(gl_FragCoord.y) * 256u + uint(gl_FragCoord.x) < pushC.callId * (" << arrayIntSize << "u / pushC.valuesPerPixel))\n"
9112 "\thighp uint value = id;\n"
9113 "\tfor (uint i = 0u; i < pushC.valuesPerPixel; i++)\n"
9115 "\t\thighp uvec4 vecVal = block.values[(value / 4u) % " << arraySize << "u];\n"
9116 "\t\tif ((value % 4u) == 0u)\n"
9117 "\t\t\tvalue = vecVal.x;\n"
9118 "\t\telse if ((value % 4u) == 1u)\n"
9119 "\t\t\tvalue = vecVal.y;\n"
9120 "\t\telse if ((value % 4u) == 2u)\n"
9121 "\t\t\tvalue = vecVal.z;\n"
9122 "\t\telse if ((value % 4u) == 3u)\n"
9123 "\t\t\tvalue = vecVal.w;\n"
9125 "\tuvec4 valueOut = uvec4(value & 0xFFu, (value >> 8u) & 0xFFu, (value >> 16u) & 0xFFu, (value >> 24u) & 0xFFu);\n"
9126 "\to_color = vec4(valueOut) / vec4(255.0);\n"
9129 sources.glslSources.add("uniform-buffer.frag")
9130 << glu::FragmentSource(fragmentShader.str());
9134 if (config.usage & USAGE_STORAGE_BUFFER)
9137 // Vertex storage buffer rendering
9138 const char* const vertexShader =
9141 "layout(set=0, binding=0) buffer Block\n"
9143 "\thighp uvec4 values[];\n"
9145 "void main (void) {\n"
9146 "\tgl_PointSize = 1.0;\n"
9147 "\thighp uvec4 vecVal = block.values[gl_VertexIndex / 8];\n"
9148 "\thighp uint val;\n"
9149 "\tif (((gl_VertexIndex / 2) % 4 == 0))\n"
9150 "\t\tval = vecVal.x;\n"
9151 "\telse if (((gl_VertexIndex / 2) % 4 == 1))\n"
9152 "\t\tval = vecVal.y;\n"
9153 "\telse if (((gl_VertexIndex / 2) % 4 == 2))\n"
9154 "\t\tval = vecVal.z;\n"
9155 "\telse if (((gl_VertexIndex / 2) % 4 == 3))\n"
9156 "\t\tval = vecVal.w;\n"
9157 "\tif ((gl_VertexIndex % 2) == 0)\n"
9158 "\t\tval = val & 0xFFFFu;\n"
9160 "\t\tval = val >> 16u;\n"
9161 "\thighp vec2 pos = vec2(val & 0xFFu, val >> 8u) / vec2(255.0);\n"
9162 "\tgl_Position = vec4(1.998 * pos - vec2(0.999), 0.0, 1.0);\n"
9165 sources.glslSources.add("storage-buffer.vert")
9166 << glu::VertexSource(vertexShader);
9170 std::ostringstream fragmentShader;
9175 "layout(location = 0) out highp vec4 o_color;\n"
9176 "layout(set=0, binding=0) buffer Block\n"
9178 "\thighp uvec4 values[];\n"
9180 "layout(push_constant) uniform PushC\n"
9182 "\tuint valuesPerPixel;\n"
9183 "\tuint bufferSize;\n"
9185 "void main (void) {\n"
9186 "\thighp uint arrayIntSize = pushC.bufferSize / 4u;\n"
9187 "\thighp uint id = uint(gl_FragCoord.y) * 256u + uint(gl_FragCoord.x);\n"
9188 "\thighp uint value = id;\n"
9189 "\tfor (uint i = 0u; i < pushC.valuesPerPixel; i++)\n"
9191 "\t\thighp uvec4 vecVal = block.values[(value / 4u) % (arrayIntSize / 4u)];\n"
9192 "\t\tif ((value % 4u) == 0u)\n"
9193 "\t\t\tvalue = vecVal.x;\n"
9194 "\t\telse if ((value % 4u) == 1u)\n"
9195 "\t\t\tvalue = vecVal.y;\n"
9196 "\t\telse if ((value % 4u) == 2u)\n"
9197 "\t\t\tvalue = vecVal.z;\n"
9198 "\t\telse if ((value % 4u) == 3u)\n"
9199 "\t\t\tvalue = vecVal.w;\n"
9201 "\tuvec4 valueOut = uvec4(value & 0xFFu, (value >> 8u) & 0xFFu, (value >> 16u) & 0xFFu, (value >> 24u) & 0xFFu);\n"
9202 "\to_color = vec4(valueOut) / vec4(255.0);\n"
9205 sources.glslSources.add("storage-buffer.frag")
9206 << glu::FragmentSource(fragmentShader.str());
9210 if (config.usage & USAGE_UNIFORM_TEXEL_BUFFER)
9213 // Vertex uniform texel buffer rendering
9214 const char* const vertexShader =
9216 "#extension GL_EXT_texture_buffer : require\n"
9218 "layout(set=0, binding=0) uniform highp usamplerBuffer u_sampler;\n"
9219 "void main (void) {\n"
9220 "\tgl_PointSize = 1.0;\n"
9221 "\thighp uint val = texelFetch(u_sampler, gl_VertexIndex).x;\n"
9222 "\thighp vec2 pos = vec2(val & 0xFFu, val >> 8u) / vec2(255.0);\n"
9223 "\tgl_Position = vec4(1.998 * pos - vec2(0.999), 0.0, 1.0);\n"
9226 sources.glslSources.add("uniform-texel-buffer.vert")
9227 << glu::VertexSource(vertexShader);
9231 // Fragment uniform texel buffer rendering
9232 const char* const fragmentShader =
9234 "#extension GL_EXT_texture_buffer : require\n"
9236 "layout(set=0, binding=0) uniform highp usamplerBuffer u_sampler;\n"
9237 "layout(location = 0) out highp vec4 o_color;\n"
9238 "layout(push_constant) uniform PushC\n"
9241 "\tuint valuesPerPixel;\n"
9242 "\tuint maxTexelCount;\n"
9244 "void main (void) {\n"
9245 "\thighp uint id = uint(gl_FragCoord.y) * 256u + uint(gl_FragCoord.x);\n"
9246 "\thighp uint value = id;\n"
9247 "\tif (uint(gl_FragCoord.y) * 256u + uint(gl_FragCoord.x) < pushC.callId * (pushC.maxTexelCount / pushC.valuesPerPixel))\n"
9249 "\tfor (uint i = 0u; i < pushC.valuesPerPixel; i++)\n"
9251 "\t\tvalue = texelFetch(u_sampler, int(value % uint(textureSize(u_sampler)))).x;\n"
9253 "\tuvec4 valueOut = uvec4(value & 0xFFu, (value >> 8u) & 0xFFu, (value >> 16u) & 0xFFu, (value >> 24u) & 0xFFu);\n"
9254 "\to_color = vec4(valueOut) / vec4(255.0);\n"
9257 sources.glslSources.add("uniform-texel-buffer.frag")
9258 << glu::FragmentSource(fragmentShader);
9262 if (config.usage & USAGE_STORAGE_TEXEL_BUFFER)
9265 // Vertex storage texel buffer rendering
9266 const char* const vertexShader =
9268 "#extension GL_EXT_texture_buffer : require\n"
9270 "layout(set=0, binding=0, r32ui) uniform readonly highp uimageBuffer u_sampler;\n"
9271 "out gl_PerVertex {\n"
9272 "\tvec4 gl_Position;\n"
9273 "\tfloat gl_PointSize;\n"
9275 "void main (void) {\n"
9276 "\tgl_PointSize = 1.0;\n"
9277 "\thighp uint val = imageLoad(u_sampler, gl_VertexIndex / 2).x;\n"
9278 "\tif (gl_VertexIndex % 2 == 0)\n"
9279 "\t\tval = val & 0xFFFFu;\n"
9281 "\t\tval = val >> 16;\n"
9282 "\thighp vec2 pos = vec2(val & 0xFFu, val >> 8u) / vec2(255.0);\n"
9283 "\tgl_Position = vec4(1.998 * pos - vec2(0.999), 0.0, 1.0);\n"
9286 sources.glslSources.add("storage-texel-buffer.vert")
9287 << glu::VertexSource(vertexShader);
9290 // Fragment storage texel buffer rendering
9291 const char* const fragmentShader =
9293 "#extension GL_EXT_texture_buffer : require\n"
9295 "layout(set=0, binding=0, r32ui) uniform readonly highp uimageBuffer u_sampler;\n"
9296 "layout(location = 0) out highp vec4 o_color;\n"
9297 "layout(push_constant) uniform PushC\n"
9300 "\tuint valuesPerPixel;\n"
9301 "\tuint maxTexelCount;\n"
9304 "void main (void) {\n"
9305 "\thighp uint id = uint(gl_FragCoord.y) * 256u + uint(gl_FragCoord.x);\n"
9306 "\thighp uint value = id;\n"
9307 "\tif (uint(gl_FragCoord.y) * 256u + uint(gl_FragCoord.x) < pushC.callId * (pushC.maxTexelCount / pushC.valuesPerPixel))\n"
9309 "\tfor (uint i = 0u; i < pushC.valuesPerPixel; i++)\n"
9311 "\t\tvalue = imageLoad(u_sampler, int(value % pushC.width)).x;\n"
9313 "\tuvec4 valueOut = uvec4(value & 0xFFu, (value >> 8u) & 0xFFu, (value >> 16u) & 0xFFu, (value >> 24u) & 0xFFu);\n"
9314 "\to_color = vec4(valueOut) / vec4(255.0);\n"
9317 sources.glslSources.add("storage-texel-buffer.frag")
9318 << glu::FragmentSource(fragmentShader);
9322 if (config.usage & USAGE_STORAGE_IMAGE)
9324 // Vertex storage image
9325 const char* const vertexShader =
9328 "layout(set=0, binding=0, rgba8) uniform image2D u_image;\n"
9329 "out gl_PerVertex {\n"
9330 "\tvec4 gl_Position;\n"
9331 "\tfloat gl_PointSize;\n"
9333 "void main (void) {\n"
9334 "\tgl_PointSize = 1.0;\n"
9335 "\thighp vec4 val = imageLoad(u_image, ivec2((gl_VertexIndex / 2) / imageSize(u_image).x, (gl_VertexIndex / 2) % imageSize(u_image).x));\n"
9336 "\thighp vec2 pos;\n"
9337 "\tif (gl_VertexIndex % 2 == 0)\n"
9338 "\t\tpos = val.xy;\n"
9340 "\t\tpos = val.zw;\n"
9341 "\tgl_Position = vec4(1.998 * pos - vec2(0.999), 0.0, 1.0);\n"
9344 sources.glslSources.add("storage-image.vert")
9345 << glu::VertexSource(vertexShader);
9348 if (config.usage & USAGE_SAMPLED_IMAGE)
9350 // Vertex storage image
9351 const char* const vertexShader =
9354 "layout(set=0, binding=0) uniform sampler2D u_sampler;\n"
9355 "out gl_PerVertex {\n"
9356 "\tvec4 gl_Position;\n"
9357 "\tfloat gl_PointSize;\n"
9359 "void main (void) {\n"
9360 "\tgl_PointSize = 1.0;\n"
9361 "\thighp vec4 val = texelFetch(u_sampler, ivec2((gl_VertexIndex / 2) / textureSize(u_sampler, 0).x, (gl_VertexIndex / 2) % textureSize(u_sampler, 0).x), 0);\n"
9362 "\thighp vec2 pos;\n"
9363 "\tif (gl_VertexIndex % 2 == 0)\n"
9364 "\t\tpos = val.xy;\n"
9366 "\t\tpos = val.zw;\n"
9367 "\tgl_Position = vec4(1.998 * pos - vec2(0.999), 0.0, 1.0);\n"
9370 sources.glslSources.add("sampled-image.vert")
9371 << glu::VertexSource(vertexShader);
9375 const char* const vertexShader =
9377 "out gl_PerVertex {\n"
9378 "\tvec4 gl_Position;\n"
9381 "void main (void) {\n"
9382 "\tgl_Position = vec4(((gl_VertexIndex + 2) / 3) % 2 == 0 ? -1.0 : 1.0,\n"
9383 "\t ((gl_VertexIndex + 1) / 3) % 2 == 0 ? -1.0 : 1.0, 0.0, 1.0);\n"
9386 sources.glslSources.add("render-quad.vert")
9387 << glu::VertexSource(vertexShader);
9391 const char* const fragmentShader =
9393 "layout(location = 0) out highp vec4 o_color;\n"
9394 "void main (void) {\n"
9395 "\to_color = vec4(1.0);\n"
9398 sources.glslSources.add("render-white.frag")
9399 << glu::FragmentSource(fragmentShader);
9406 tcu::TestCaseGroup* createPipelineBarrierTests (tcu::TestContext& testCtx)
9408 de::MovePtr<tcu::TestCaseGroup> group (new tcu::TestCaseGroup(testCtx, "pipeline_barrier", "Pipeline barrier tests."));
9409 const vk::VkDeviceSize sizes[] =
9416 const Usage usages[] =
9422 USAGE_VERTEX_BUFFER,
9424 USAGE_UNIFORM_BUFFER,
9425 USAGE_UNIFORM_TEXEL_BUFFER,
9426 USAGE_STORAGE_BUFFER,
9427 USAGE_STORAGE_TEXEL_BUFFER,
9428 USAGE_STORAGE_IMAGE,
9431 const Usage readUsages[] =
9435 USAGE_VERTEX_BUFFER,
9437 USAGE_UNIFORM_BUFFER,
9438 USAGE_UNIFORM_TEXEL_BUFFER,
9439 USAGE_STORAGE_BUFFER,
9440 USAGE_STORAGE_TEXEL_BUFFER,
9441 USAGE_STORAGE_IMAGE,
9445 const Usage writeUsages[] =
9451 for (size_t writeUsageNdx = 0; writeUsageNdx < DE_LENGTH_OF_ARRAY(writeUsages); writeUsageNdx++)
9453 const Usage writeUsage = writeUsages[writeUsageNdx];
9455 for (size_t readUsageNdx = 0; readUsageNdx < DE_LENGTH_OF_ARRAY(readUsages); readUsageNdx++)
9457 const Usage readUsage = readUsages[readUsageNdx];
9458 const Usage usage = writeUsage | readUsage;
9459 const string usageGroupName (usageToName(usage));
9460 de::MovePtr<tcu::TestCaseGroup> usageGroup (new tcu::TestCaseGroup(testCtx, usageGroupName.c_str(), usageGroupName.c_str()));
9462 for (size_t sizeNdx = 0; sizeNdx < DE_LENGTH_OF_ARRAY(sizes); sizeNdx++)
9464 const vk::VkDeviceSize size = sizes[sizeNdx];
9465 const string testName (de::toString((deUint64)(size)));
9466 const TestConfig config =
9470 vk::VK_SHARING_MODE_EXCLUSIVE
9473 usageGroup->addChild(new InstanceFactory1<MemoryTestInstance, TestConfig, AddPrograms>(testCtx,tcu::NODETYPE_SELF_VALIDATE, testName, testName, AddPrograms(), config));
9476 group->addChild(usageGroup.get());
9477 usageGroup.release();
9482 Usage all = (Usage)0;
9484 for (size_t usageNdx = 0; usageNdx < DE_LENGTH_OF_ARRAY(usages); usageNdx++)
9485 all = all | usages[usageNdx];
9488 const string usageGroupName ("all");
9489 de::MovePtr<tcu::TestCaseGroup> usageGroup (new tcu::TestCaseGroup(testCtx, usageGroupName.c_str(), usageGroupName.c_str()));
9491 for (size_t sizeNdx = 0; sizeNdx < DE_LENGTH_OF_ARRAY(sizes); sizeNdx++)
9493 const vk::VkDeviceSize size = sizes[sizeNdx];
9494 const string testName (de::toString((deUint64)(size)));
9495 const TestConfig config =
9499 vk::VK_SHARING_MODE_EXCLUSIVE
9502 usageGroup->addChild(new InstanceFactory1<MemoryTestInstance, TestConfig, AddPrograms>(testCtx,tcu::NODETYPE_SELF_VALIDATE, testName, testName, AddPrograms(), config));
9505 group->addChild(usageGroup.get());
9506 usageGroup.release();
9510 const string usageGroupName ("all_device");
9511 de::MovePtr<tcu::TestCaseGroup> usageGroup (new tcu::TestCaseGroup(testCtx, usageGroupName.c_str(), usageGroupName.c_str()));
9513 for (size_t sizeNdx = 0; sizeNdx < DE_LENGTH_OF_ARRAY(sizes); sizeNdx++)
9515 const vk::VkDeviceSize size = sizes[sizeNdx];
9516 const string testName (de::toString((deUint64)(size)));
9517 const TestConfig config =
9519 (Usage)(all & (~(USAGE_HOST_READ|USAGE_HOST_WRITE))),
9521 vk::VK_SHARING_MODE_EXCLUSIVE
9524 usageGroup->addChild(new InstanceFactory1<MemoryTestInstance, TestConfig, AddPrograms>(testCtx,tcu::NODETYPE_SELF_VALIDATE, testName, testName, AddPrograms(), config));
9527 group->addChild(usageGroup.get());
9528 usageGroup.release();
9532 return group.release();