1 /*-------------------------------------------------------------------------
2 * Vulkan Conformance Tests
3 * ------------------------
5 * Copyright (c) 2015 Google Inc.
7 * Licensed under the Apache License, Version 2.0 (the "License");
8 * you may not use this file except in compliance with the License.
9 * You may obtain a copy of the License at
11 * http://www.apache.org/licenses/LICENSE-2.0
13 * Unless required by applicable law or agreed to in writing, software
14 * distributed under the License is distributed on an "AS IS" BASIS,
15 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 * See the License for the specific language governing permissions and
17 * limitations under the License.
21 * \brief Pipeline barrier tests
22 *//*--------------------------------------------------------------------*/
24 #include "vktMemoryPipelineBarrierTests.hpp"
26 #include "vktTestCaseUtil.hpp"
29 #include "vkPlatform.hpp"
30 #include "vkRefUtil.hpp"
31 #include "vkQueryUtil.hpp"
32 #include "vkMemUtil.hpp"
33 #include "vkTypeUtil.hpp"
34 #include "vkPrograms.hpp"
36 #include "tcuMaybe.hpp"
37 #include "tcuTextureUtil.hpp"
38 #include "tcuTestLog.hpp"
39 #include "tcuResultCollector.hpp"
40 #include "tcuTexture.hpp"
41 #include "tcuImageCompare.hpp"
43 #include "deUniquePtr.hpp"
44 #include "deStringUtil.hpp"
45 #include "deRandom.hpp"
72 using tcu::ConstPixelBufferAccess;
73 using tcu::PixelBufferAccess;
74 using tcu::TextureFormat;
75 using tcu::TextureLevel;
85 MAX_UNIFORM_BUFFER_SIZE = 1024,
86 MAX_STORAGE_BUFFER_SIZE = (1<<28)
89 // \todo [mika] Add to utilities
91 T divRoundUp (const T& a, const T& b)
93 return (a / b) + (a % b == 0 ? 0 : 1);
98 ALL_PIPELINE_STAGES = vk::VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT
99 | vk::VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT
100 | vk::VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT
101 | vk::VK_PIPELINE_STAGE_VERTEX_INPUT_BIT
102 | vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT
103 | vk::VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT
104 | vk::VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT
105 | vk::VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT
106 | vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT
107 | vk::VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT
108 | vk::VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT
109 | vk::VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT
110 | vk::VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT
111 | vk::VK_PIPELINE_STAGE_TRANSFER_BIT
112 | vk::VK_PIPELINE_STAGE_HOST_BIT
117 ALL_ACCESSES = vk::VK_ACCESS_INDIRECT_COMMAND_READ_BIT
118 | vk::VK_ACCESS_INDEX_READ_BIT
119 | vk::VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT
120 | vk::VK_ACCESS_UNIFORM_READ_BIT
121 | vk::VK_ACCESS_INPUT_ATTACHMENT_READ_BIT
122 | vk::VK_ACCESS_SHADER_READ_BIT
123 | vk::VK_ACCESS_SHADER_WRITE_BIT
124 | vk::VK_ACCESS_COLOR_ATTACHMENT_READ_BIT
125 | vk::VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT
126 | vk::VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT
127 | vk::VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT
128 | vk::VK_ACCESS_TRANSFER_READ_BIT
129 | vk::VK_ACCESS_TRANSFER_WRITE_BIT
130 | vk::VK_ACCESS_HOST_READ_BIT
131 | vk::VK_ACCESS_HOST_WRITE_BIT
132 | vk::VK_ACCESS_MEMORY_READ_BIT
133 | vk::VK_ACCESS_MEMORY_WRITE_BIT
138 // Mapped host read and write
139 USAGE_HOST_READ = (0x1u<<0),
140 USAGE_HOST_WRITE = (0x1u<<1),
142 // Copy and other transfer operations
143 USAGE_TRANSFER_SRC = (0x1u<<2),
144 USAGE_TRANSFER_DST = (0x1u<<3),
146 // Buffer usage flags
147 USAGE_INDEX_BUFFER = (0x1u<<4),
148 USAGE_VERTEX_BUFFER = (0x1u<<5),
150 USAGE_UNIFORM_BUFFER = (0x1u<<6),
151 USAGE_STORAGE_BUFFER = (0x1u<<7),
153 USAGE_UNIFORM_TEXEL_BUFFER = (0x1u<<8),
154 USAGE_STORAGE_TEXEL_BUFFER = (0x1u<<9),
156 // \todo [2016-03-09 mika] This is probably almost impossible to do
157 USAGE_INDIRECT_BUFFER = (0x1u<<10),
159 // Texture usage flags
160 USAGE_SAMPLED_IMAGE = (0x1u<<11),
161 USAGE_STORAGE_IMAGE = (0x1u<<12),
162 USAGE_COLOR_ATTACHMENT = (0x1u<<13),
163 USAGE_INPUT_ATTACHMENT = (0x1u<<14),
164 USAGE_DEPTH_STENCIL_ATTACHMENT = (0x1u<<15),
167 bool supportsDeviceBufferWrites (Usage usage)
169 if (usage & USAGE_TRANSFER_DST)
172 if (usage & USAGE_STORAGE_BUFFER)
175 if (usage & USAGE_STORAGE_TEXEL_BUFFER)
181 bool supportsDeviceImageWrites (Usage usage)
183 if (usage & USAGE_TRANSFER_DST)
186 if (usage & USAGE_STORAGE_IMAGE)
189 if (usage & USAGE_COLOR_ATTACHMENT)
195 // Sequential access enums
198 ACCESS_INDIRECT_COMMAND_READ_BIT = 0,
199 ACCESS_INDEX_READ_BIT,
200 ACCESS_VERTEX_ATTRIBUTE_READ_BIT,
201 ACCESS_UNIFORM_READ_BIT,
202 ACCESS_INPUT_ATTACHMENT_READ_BIT,
203 ACCESS_SHADER_READ_BIT,
204 ACCESS_SHADER_WRITE_BIT,
205 ACCESS_COLOR_ATTACHMENT_READ_BIT,
206 ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
207 ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT,
208 ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT,
209 ACCESS_TRANSFER_READ_BIT,
210 ACCESS_TRANSFER_WRITE_BIT,
211 ACCESS_HOST_READ_BIT,
212 ACCESS_HOST_WRITE_BIT,
213 ACCESS_MEMORY_READ_BIT,
214 ACCESS_MEMORY_WRITE_BIT,
219 // Sequential stage enums
222 PIPELINESTAGE_TOP_OF_PIPE_BIT = 0,
223 PIPELINESTAGE_BOTTOM_OF_PIPE_BIT,
224 PIPELINESTAGE_DRAW_INDIRECT_BIT,
225 PIPELINESTAGE_VERTEX_INPUT_BIT,
226 PIPELINESTAGE_VERTEX_SHADER_BIT,
227 PIPELINESTAGE_TESSELLATION_CONTROL_SHADER_BIT,
228 PIPELINESTAGE_TESSELLATION_EVALUATION_SHADER_BIT,
229 PIPELINESTAGE_GEOMETRY_SHADER_BIT,
230 PIPELINESTAGE_FRAGMENT_SHADER_BIT,
231 PIPELINESTAGE_EARLY_FRAGMENT_TESTS_BIT,
232 PIPELINESTAGE_LATE_FRAGMENT_TESTS_BIT,
233 PIPELINESTAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
234 PIPELINESTAGE_COMPUTE_SHADER_BIT,
235 PIPELINESTAGE_TRANSFER_BIT,
236 PIPELINESTAGE_HOST_BIT,
241 PipelineStage pipelineStageFlagToPipelineStage (vk::VkPipelineStageFlagBits flags)
245 case vk::VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT: return PIPELINESTAGE_TOP_OF_PIPE_BIT;
246 case vk::VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT: return PIPELINESTAGE_BOTTOM_OF_PIPE_BIT;
247 case vk::VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT: return PIPELINESTAGE_DRAW_INDIRECT_BIT;
248 case vk::VK_PIPELINE_STAGE_VERTEX_INPUT_BIT: return PIPELINESTAGE_VERTEX_INPUT_BIT;
249 case vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT: return PIPELINESTAGE_VERTEX_SHADER_BIT;
250 case vk::VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT: return PIPELINESTAGE_TESSELLATION_CONTROL_SHADER_BIT;
251 case vk::VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT: return PIPELINESTAGE_TESSELLATION_EVALUATION_SHADER_BIT;
252 case vk::VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT: return PIPELINESTAGE_GEOMETRY_SHADER_BIT;
253 case vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT: return PIPELINESTAGE_FRAGMENT_SHADER_BIT;
254 case vk::VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT: return PIPELINESTAGE_EARLY_FRAGMENT_TESTS_BIT;
255 case vk::VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT: return PIPELINESTAGE_LATE_FRAGMENT_TESTS_BIT;
256 case vk::VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT: return PIPELINESTAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
257 case vk::VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT: return PIPELINESTAGE_COMPUTE_SHADER_BIT;
258 case vk::VK_PIPELINE_STAGE_TRANSFER_BIT: return PIPELINESTAGE_TRANSFER_BIT;
259 case vk::VK_PIPELINE_STAGE_HOST_BIT: return PIPELINESTAGE_HOST_BIT;
262 DE_FATAL("Unknown pipeline stage flags");
263 return PIPELINESTAGE_LAST;
267 Usage operator| (Usage a, Usage b)
269 return (Usage)((deUint32)a | (deUint32)b);
272 Usage operator& (Usage a, Usage b)
274 return (Usage)((deUint32)a & (deUint32)b);
277 string usageToName (Usage usage)
282 const char* const name;
285 { USAGE_HOST_READ, "host_read" },
286 { USAGE_HOST_WRITE, "host_write" },
288 { USAGE_TRANSFER_SRC, "transfer_src" },
289 { USAGE_TRANSFER_DST, "transfer_dst" },
291 { USAGE_INDEX_BUFFER, "index_buffer" },
292 { USAGE_VERTEX_BUFFER, "vertex_buffer" },
293 { USAGE_UNIFORM_BUFFER, "uniform_buffer" },
294 { USAGE_STORAGE_BUFFER, "storage_buffer" },
295 { USAGE_UNIFORM_TEXEL_BUFFER, "uniform_texel_buffer" },
296 { USAGE_STORAGE_TEXEL_BUFFER, "storage_texel_buffer" },
297 { USAGE_INDIRECT_BUFFER, "indirect_buffer" },
298 { USAGE_SAMPLED_IMAGE, "image_sampled" },
299 { USAGE_STORAGE_IMAGE, "storage_image" },
300 { USAGE_COLOR_ATTACHMENT, "color_attachment" },
301 { USAGE_INPUT_ATTACHMENT, "input_attachment" },
302 { USAGE_DEPTH_STENCIL_ATTACHMENT, "depth_stencil_attachment" },
305 std::ostringstream stream;
308 for (size_t usageNdx = 0; usageNdx < DE_LENGTH_OF_ARRAY(usageNames); usageNdx++)
310 if (usage & usageNames[usageNdx].usage)
317 stream << usageNames[usageNdx].name;
324 vk::VkBufferUsageFlags usageToBufferUsageFlags (Usage usage)
326 vk::VkBufferUsageFlags flags = 0;
328 if (usage & USAGE_TRANSFER_SRC)
329 flags |= vk::VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
331 if (usage & USAGE_TRANSFER_DST)
332 flags |= vk::VK_BUFFER_USAGE_TRANSFER_DST_BIT;
334 if (usage & USAGE_INDEX_BUFFER)
335 flags |= vk::VK_BUFFER_USAGE_INDEX_BUFFER_BIT;
337 if (usage & USAGE_VERTEX_BUFFER)
338 flags |= vk::VK_BUFFER_USAGE_VERTEX_BUFFER_BIT;
340 if (usage & USAGE_INDIRECT_BUFFER)
341 flags |= vk::VK_BUFFER_USAGE_INDIRECT_BUFFER_BIT;
343 if (usage & USAGE_UNIFORM_BUFFER)
344 flags |= vk::VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT;
346 if (usage & USAGE_STORAGE_BUFFER)
347 flags |= vk::VK_BUFFER_USAGE_STORAGE_BUFFER_BIT;
349 if (usage & USAGE_UNIFORM_TEXEL_BUFFER)
350 flags |= vk::VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT;
352 if (usage & USAGE_STORAGE_TEXEL_BUFFER)
353 flags |= vk::VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT;
358 vk::VkImageUsageFlags usageToImageUsageFlags (Usage usage)
360 vk::VkImageUsageFlags flags = 0;
362 if (usage & USAGE_TRANSFER_SRC)
363 flags |= vk::VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
365 if (usage & USAGE_TRANSFER_DST)
366 flags |= vk::VK_IMAGE_USAGE_TRANSFER_DST_BIT;
368 if (usage & USAGE_SAMPLED_IMAGE)
369 flags |= vk::VK_IMAGE_USAGE_SAMPLED_BIT;
371 if (usage & USAGE_STORAGE_IMAGE)
372 flags |= vk::VK_IMAGE_USAGE_STORAGE_BIT;
374 if (usage & USAGE_COLOR_ATTACHMENT)
375 flags |= vk::VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
377 if (usage & USAGE_INPUT_ATTACHMENT)
378 flags |= vk::VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT;
380 if (usage & USAGE_DEPTH_STENCIL_ATTACHMENT)
381 flags |= vk::VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT;
386 vk::VkPipelineStageFlags usageToStageFlags (Usage usage)
388 vk::VkPipelineStageFlags flags = 0;
390 if (usage & (USAGE_HOST_READ|USAGE_HOST_WRITE))
391 flags |= vk::VK_PIPELINE_STAGE_HOST_BIT;
393 if (usage & (USAGE_TRANSFER_SRC|USAGE_TRANSFER_DST))
394 flags |= vk::VK_PIPELINE_STAGE_TRANSFER_BIT;
396 if (usage & (USAGE_VERTEX_BUFFER|USAGE_INDEX_BUFFER))
397 flags |= vk::VK_PIPELINE_STAGE_VERTEX_INPUT_BIT;
399 if (usage & USAGE_INDIRECT_BUFFER)
400 flags |= vk::VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT;
403 (USAGE_UNIFORM_BUFFER
404 | USAGE_STORAGE_BUFFER
405 | USAGE_UNIFORM_TEXEL_BUFFER
406 | USAGE_STORAGE_TEXEL_BUFFER
407 | USAGE_SAMPLED_IMAGE
408 | USAGE_STORAGE_IMAGE))
410 flags |= (vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT
411 | vk::VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT
412 | vk::VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT
413 | vk::VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT
414 | vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT
415 | vk::VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT);
418 if (usage & USAGE_INPUT_ATTACHMENT)
419 flags |= vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
421 if (usage & USAGE_COLOR_ATTACHMENT)
422 flags |= vk::VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
424 if (usage & USAGE_DEPTH_STENCIL_ATTACHMENT)
426 flags |= vk::VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT
427 | vk::VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT;
433 vk::VkAccessFlags usageToAccessFlags (Usage usage)
435 vk::VkAccessFlags flags = 0;
437 if (usage & USAGE_HOST_READ)
438 flags |= vk::VK_ACCESS_HOST_READ_BIT;
440 if (usage & USAGE_HOST_WRITE)
441 flags |= vk::VK_ACCESS_HOST_WRITE_BIT;
443 if (usage & USAGE_TRANSFER_SRC)
444 flags |= vk::VK_ACCESS_TRANSFER_READ_BIT;
446 if (usage & USAGE_TRANSFER_DST)
447 flags |= vk::VK_ACCESS_TRANSFER_WRITE_BIT;
449 if (usage & USAGE_INDEX_BUFFER)
450 flags |= vk::VK_ACCESS_INDEX_READ_BIT;
452 if (usage & USAGE_VERTEX_BUFFER)
453 flags |= vk::VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT;
455 if (usage & (USAGE_UNIFORM_BUFFER | USAGE_UNIFORM_TEXEL_BUFFER))
456 flags |= vk::VK_ACCESS_UNIFORM_READ_BIT;
458 if (usage & USAGE_SAMPLED_IMAGE)
459 flags |= vk::VK_ACCESS_SHADER_READ_BIT;
461 if (usage & (USAGE_STORAGE_BUFFER
462 | USAGE_STORAGE_TEXEL_BUFFER
463 | USAGE_STORAGE_IMAGE))
464 flags |= vk::VK_ACCESS_SHADER_READ_BIT | vk::VK_ACCESS_SHADER_WRITE_BIT;
466 if (usage & USAGE_INDIRECT_BUFFER)
467 flags |= vk::VK_ACCESS_INDIRECT_COMMAND_READ_BIT;
469 if (usage & USAGE_COLOR_ATTACHMENT)
470 flags |= vk::VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | vk::VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
472 if (usage & USAGE_INPUT_ATTACHMENT)
473 flags |= vk::VK_ACCESS_INPUT_ATTACHMENT_READ_BIT;
475 if (usage & USAGE_DEPTH_STENCIL_ATTACHMENT)
476 flags |= vk::VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT
477 | vk::VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
485 vk::VkDeviceSize size;
486 vk::VkSharingMode sharing;
489 vk::Move<vk::VkCommandBuffer> createBeginCommandBuffer (const vk::DeviceInterface& vkd,
491 vk::VkCommandPool pool,
492 vk::VkCommandBufferLevel level)
494 const vk::VkCommandBufferInheritanceInfo inheritInfo =
496 vk::VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_INFO,
505 const vk::VkCommandBufferBeginInfo beginInfo =
507 vk::VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO,
510 (level == vk::VK_COMMAND_BUFFER_LEVEL_SECONDARY ? &inheritInfo : (const vk::VkCommandBufferInheritanceInfo*)DE_NULL),
513 vk::Move<vk::VkCommandBuffer> commandBuffer (allocateCommandBuffer(vkd, device, pool, level));
515 vkd.beginCommandBuffer(*commandBuffer, &beginInfo);
517 return commandBuffer;
520 vk::Move<vk::VkBuffer> createBuffer (const vk::DeviceInterface& vkd,
522 vk::VkDeviceSize size,
523 vk::VkBufferUsageFlags usage,
524 vk::VkSharingMode sharingMode,
525 const vector<deUint32>& queueFamilies)
527 const vk::VkBufferCreateInfo createInfo =
529 vk::VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
536 (deUint32)queueFamilies.size(),
540 return vk::createBuffer(vkd, device, &createInfo);
543 vk::Move<vk::VkDeviceMemory> allocMemory (const vk::DeviceInterface& vkd,
545 vk::VkDeviceSize size,
546 deUint32 memoryTypeIndex)
548 const vk::VkMemoryAllocateInfo alloc =
550 vk::VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO, // sType
557 return vk::allocateMemory(vkd, device, &alloc);
560 vk::Move<vk::VkDeviceMemory> bindBufferMemory (const vk::InstanceInterface& vki,
561 const vk::DeviceInterface& vkd,
562 vk::VkPhysicalDevice physicalDevice,
565 vk::VkMemoryPropertyFlags properties)
567 const vk::VkMemoryRequirements memoryRequirements = vk::getBufferMemoryRequirements(vkd, device, buffer);
568 const vk::VkPhysicalDeviceMemoryProperties memoryProperties = vk::getPhysicalDeviceMemoryProperties(vki, physicalDevice);
569 deUint32 memoryTypeIndex;
571 for (memoryTypeIndex = 0; memoryTypeIndex < memoryProperties.memoryTypeCount; memoryTypeIndex++)
573 if ((memoryRequirements.memoryTypeBits & (0x1u << memoryTypeIndex))
574 && (memoryProperties.memoryTypes[memoryTypeIndex].propertyFlags & properties) == properties)
578 const vk::VkMemoryAllocateInfo allocationInfo =
580 vk::VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
582 memoryRequirements.size,
585 vk::Move<vk::VkDeviceMemory> memory (vk::allocateMemory(vkd, device, &allocationInfo));
587 VK_CHECK(vkd.bindBufferMemory(device, buffer, *memory, 0));
591 catch (const vk::Error& error)
593 if (error.getError() == vk::VK_ERROR_OUT_OF_DEVICE_MEMORY
594 || error.getError() == vk::VK_ERROR_OUT_OF_HOST_MEMORY)
596 // Try next memory type/heap if out of memory
600 // Throw all other errors forward
607 TCU_FAIL("Failed to allocate memory for buffer");
610 vk::Move<vk::VkDeviceMemory> bindImageMemory (const vk::InstanceInterface& vki,
611 const vk::DeviceInterface& vkd,
612 vk::VkPhysicalDevice physicalDevice,
615 vk::VkMemoryPropertyFlags properties)
617 const vk::VkMemoryRequirements memoryRequirements = vk::getImageMemoryRequirements(vkd, device, image);
618 const vk::VkPhysicalDeviceMemoryProperties memoryProperties = vk::getPhysicalDeviceMemoryProperties(vki, physicalDevice);
619 deUint32 memoryTypeIndex;
621 for (memoryTypeIndex = 0; memoryTypeIndex < memoryProperties.memoryTypeCount; memoryTypeIndex++)
623 if ((memoryRequirements.memoryTypeBits & (0x1u << memoryTypeIndex))
624 && (memoryProperties.memoryTypes[memoryTypeIndex].propertyFlags & properties) == properties)
628 const vk::VkMemoryAllocateInfo allocationInfo =
630 vk::VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
632 memoryRequirements.size,
635 vk::Move<vk::VkDeviceMemory> memory (vk::allocateMemory(vkd, device, &allocationInfo));
637 VK_CHECK(vkd.bindImageMemory(device, image, *memory, 0));
641 catch (const vk::Error& error)
643 if (error.getError() == vk::VK_ERROR_OUT_OF_DEVICE_MEMORY
644 || error.getError() == vk::VK_ERROR_OUT_OF_HOST_MEMORY)
646 // Try next memory type/heap if out of memory
650 // Throw all other errors forward
657 TCU_FAIL("Failed to allocate memory for image");
660 void queueRun (const vk::DeviceInterface& vkd,
662 vk::VkCommandBuffer commandBuffer)
664 const vk::VkSubmitInfo submitInfo =
666 vk::VK_STRUCTURE_TYPE_SUBMIT_INFO,
671 (const vk::VkPipelineStageFlags*)DE_NULL,
680 VK_CHECK(vkd.queueSubmit(queue, 1, &submitInfo, 0));
681 VK_CHECK(vkd.queueWaitIdle(queue));
684 void* mapMemory (const vk::DeviceInterface& vkd,
686 vk::VkDeviceMemory memory,
687 vk::VkDeviceSize size)
691 VK_CHECK(vkd.mapMemory(device, memory, 0, size, 0, &ptr));
696 class ReferenceMemory
699 ReferenceMemory (size_t size);
701 void set (size_t pos, deUint8 val);
702 deUint8 get (size_t pos) const;
703 bool isDefined (size_t pos) const;
705 void setDefined (size_t offset, size_t size, const void* data);
706 void setUndefined (size_t offset, size_t size);
707 void setData (size_t offset, size_t size, const void* data);
709 size_t getSize (void) const { return m_data.size(); }
712 vector<deUint8> m_data;
713 vector<deUint64> m_defined;
716 ReferenceMemory::ReferenceMemory (size_t size)
718 , m_defined (size / 64 + (size % 64 == 0 ? 0 : 1), 0ull)
722 void ReferenceMemory::set (size_t pos, deUint8 val)
724 DE_ASSERT(pos < m_data.size());
727 m_defined[pos / 64] |= 0x1ull << (pos % 64);
730 void ReferenceMemory::setData (size_t offset, size_t size, const void* data_)
732 const deUint8* data = (const deUint8*)data_;
734 DE_ASSERT(offset < m_data.size());
735 DE_ASSERT(offset + size <= m_data.size());
737 // \todo [2016-03-09 mika] Optimize
738 for (size_t pos = 0; pos < size; pos++)
740 m_data[offset + pos] = data[pos];
741 m_defined[(offset + pos) / 64] |= 0x1ull << ((offset + pos) % 64);
745 void ReferenceMemory::setUndefined (size_t offset, size_t size)
747 // \todo [2016-03-09 mika] Optimize
748 for (size_t pos = 0; pos < size; pos++)
749 m_defined[(offset + pos) / 64] |= 0x1ull << ((offset + pos) % 64);
752 deUint8 ReferenceMemory::get (size_t pos) const
754 DE_ASSERT(pos < m_data.size());
755 DE_ASSERT(isDefined(pos));
759 bool ReferenceMemory::isDefined (size_t pos) const
761 DE_ASSERT(pos < m_data.size());
763 return (m_defined[pos / 64] & (0x1ull << (pos % 64))) != 0;
769 Memory (const vk::InstanceInterface& vki,
770 const vk::DeviceInterface& vkd,
771 vk::VkPhysicalDevice physicalDevice,
773 vk::VkDeviceSize size,
774 deUint32 memoryTypeIndex,
775 vk::VkDeviceSize maxBufferSize,
776 deInt32 maxImageWidth,
777 deInt32 maxImageHeight);
779 vk::VkDeviceSize getSize (void) const { return m_size; }
780 vk::VkDeviceSize getMaxBufferSize (void) const { return m_maxBufferSize; }
781 bool getSupportBuffers (void) const { return m_maxBufferSize > 0; }
783 deInt32 getMaxImageWidth (void) const { return m_maxImageWidth; }
784 deInt32 getMaxImageHeight (void) const { return m_maxImageHeight; }
785 bool getSupportImages (void) const { return m_maxImageWidth > 0; }
787 const vk::VkMemoryType& getMemoryType (void) const { return m_memoryType; }
788 deUint32 getMemoryTypeIndex (void) const { return m_memoryTypeIndex; }
789 vk::VkDeviceMemory getMemory (void) const { return *m_memory; }
792 const vk::VkDeviceSize m_size;
793 const deUint32 m_memoryTypeIndex;
794 const vk::VkMemoryType m_memoryType;
795 const vk::Unique<vk::VkDeviceMemory> m_memory;
796 const vk::VkDeviceSize m_maxBufferSize;
797 const deInt32 m_maxImageWidth;
798 const deInt32 m_maxImageHeight;
801 vk::VkMemoryType getMemoryTypeInfo (const vk::InstanceInterface& vki,
802 vk::VkPhysicalDevice device,
803 deUint32 memoryTypeIndex)
805 const vk::VkPhysicalDeviceMemoryProperties memoryProperties = vk::getPhysicalDeviceMemoryProperties(vki, device);
807 DE_ASSERT(memoryTypeIndex < memoryProperties.memoryTypeCount);
809 return memoryProperties.memoryTypes[memoryTypeIndex];
812 vk::VkDeviceSize findMaxBufferSize (const vk::DeviceInterface& vkd,
815 vk::VkBufferUsageFlags usage,
816 vk::VkSharingMode sharingMode,
817 const vector<deUint32>& queueFamilies,
819 vk::VkDeviceSize memorySize,
820 deUint32 memoryTypeIndex)
822 vk::VkDeviceSize lastSuccess = 0;
823 vk::VkDeviceSize currentSize = memorySize / 2;
826 const vk::Unique<vk::VkBuffer> buffer (createBuffer(vkd, device, memorySize, usage, sharingMode, queueFamilies));
827 const vk::VkMemoryRequirements requirements (vk::getBufferMemoryRequirements(vkd, device, *buffer));
829 if (requirements.size == memorySize && requirements.memoryTypeBits & (0x1u << memoryTypeIndex))
833 for (vk::VkDeviceSize stepSize = memorySize / 4; currentSize > 0; stepSize /= 2)
835 const vk::Unique<vk::VkBuffer> buffer (createBuffer(vkd, device, currentSize, usage, sharingMode, queueFamilies));
836 const vk::VkMemoryRequirements requirements (vk::getBufferMemoryRequirements(vkd, device, *buffer));
838 if (requirements.size <= memorySize && requirements.memoryTypeBits & (0x1u << memoryTypeIndex))
840 lastSuccess = currentSize;
841 currentSize += stepSize;
844 currentSize -= stepSize;
853 // Round size down maximum W * H * 4, where W and H < 4096
854 vk::VkDeviceSize roundBufferSizeToWxHx4 (vk::VkDeviceSize size)
856 const vk::VkDeviceSize maxTextureSize = 4096;
857 vk::VkDeviceSize maxTexelCount = size / 4;
858 vk::VkDeviceSize bestW = de::max(maxTexelCount, maxTextureSize);
859 vk::VkDeviceSize bestH = maxTexelCount / bestW;
861 // \todo [2016-03-09 mika] Could probably be faster?
862 for (vk::VkDeviceSize w = 1; w * w < maxTexelCount && w < maxTextureSize && bestW * bestH * 4 < size; w++)
864 const vk::VkDeviceSize h = maxTexelCount / w;
866 if (bestW * bestH < w * h)
873 return bestW * bestH * 4;
876 // Find RGBA8 image size that has exactly "size" of number of bytes.
877 // "size" must be W * H * 4 where W and H < 4096
878 IVec2 findImageSizeWxHx4 (vk::VkDeviceSize size)
880 const vk::VkDeviceSize maxTextureSize = 4096;
881 vk::VkDeviceSize texelCount = size / 4;
883 DE_ASSERT((size % 4) == 0);
885 // \todo [2016-03-09 mika] Could probably be faster?
886 for (vk::VkDeviceSize w = 1; w < maxTextureSize && w < texelCount; w++)
888 const vk::VkDeviceSize h = texelCount / w;
890 if ((texelCount % w) == 0 && h < maxTextureSize)
891 return IVec2((int)w, (int)h);
894 DE_FATAL("Invalid size");
895 return IVec2(-1, -1);
898 IVec2 findMaxRGBA8ImageSize (const vk::DeviceInterface& vkd,
901 vk::VkImageUsageFlags usage,
902 vk::VkSharingMode sharingMode,
903 const vector<deUint32>& queueFamilies,
905 vk::VkDeviceSize memorySize,
906 deUint32 memoryTypeIndex)
908 IVec2 lastSuccess (0);
912 const deUint32 texelCount = (deUint32)(memorySize / 4);
913 const deUint32 width = (deUint32)deFloatSqrt((float)texelCount);
914 const deUint32 height = texelCount / width;
916 currentSize[0] = deMaxu32(width, height);
917 currentSize[1] = deMinu32(width, height);
920 for (deInt32 stepSize = currentSize[0] / 2; currentSize[0] > 0; stepSize /= 2)
922 const vk::VkImageCreateInfo createInfo =
924 vk::VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
928 vk::VK_IMAGE_TYPE_2D,
929 vk::VK_FORMAT_R8G8B8A8_UNORM,
931 (deUint32)currentSize[0],
932 (deUint32)currentSize[1],
936 vk::VK_SAMPLE_COUNT_1_BIT,
937 vk::VK_IMAGE_TILING_OPTIMAL,
940 (deUint32)queueFamilies.size(),
942 vk::VK_IMAGE_LAYOUT_UNDEFINED
944 const vk::Unique<vk::VkImage> image (vk::createImage(vkd, device, &createInfo));
945 const vk::VkMemoryRequirements requirements (vk::getImageMemoryRequirements(vkd, device, *image));
947 if (requirements.size <= memorySize && requirements.memoryTypeBits & (0x1u << memoryTypeIndex))
949 lastSuccess = currentSize;
950 currentSize[0] += stepSize;
951 currentSize[1] += stepSize;
955 currentSize[0] -= stepSize;
956 currentSize[1] -= stepSize;
966 Memory::Memory (const vk::InstanceInterface& vki,
967 const vk::DeviceInterface& vkd,
968 vk::VkPhysicalDevice physicalDevice,
970 vk::VkDeviceSize size,
971 deUint32 memoryTypeIndex,
972 vk::VkDeviceSize maxBufferSize,
973 deInt32 maxImageWidth,
974 deInt32 maxImageHeight)
976 , m_memoryTypeIndex (memoryTypeIndex)
977 , m_memoryType (getMemoryTypeInfo(vki, physicalDevice, memoryTypeIndex))
978 , m_memory (allocMemory(vkd, device, size, memoryTypeIndex))
979 , m_maxBufferSize (maxBufferSize)
980 , m_maxImageWidth (maxImageWidth)
981 , m_maxImageHeight (maxImageHeight)
988 Context (const vk::InstanceInterface& vki,
989 const vk::DeviceInterface& vkd,
990 vk::VkPhysicalDevice physicalDevice,
993 deUint32 queueFamilyIndex,
994 const vector<pair<deUint32, vk::VkQueue> >& queues,
995 const vk::ProgramCollection<vk::ProgramBinary>& binaryCollection)
998 , m_physicalDevice (physicalDevice)
1001 , m_queueFamilyIndex (queueFamilyIndex)
1003 , m_commandPool (createCommandPool(vkd, device, vk::VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT, queueFamilyIndex))
1004 , m_binaryCollection (binaryCollection)
1006 for (size_t queueNdx = 0; queueNdx < m_queues.size(); queueNdx++)
1007 m_queueFamilies.push_back(m_queues[queueNdx].first);
1010 const vk::InstanceInterface& getInstanceInterface (void) const { return m_vki; }
1011 vk::VkPhysicalDevice getPhysicalDevice (void) const { return m_physicalDevice; }
1012 vk::VkDevice getDevice (void) const { return m_device; }
1013 const vk::DeviceInterface& getDeviceInterface (void) const { return m_vkd; }
1014 vk::VkQueue getQueue (void) const { return m_queue; }
1015 deUint32 getQueueFamily (void) const { return m_queueFamilyIndex; }
1016 const vector<pair<deUint32, vk::VkQueue> >& getQueues (void) const { return m_queues; }
1017 const vector<deUint32> getQueueFamilies (void) const { return m_queueFamilies; }
1018 vk::VkCommandPool getCommandPool (void) const { return *m_commandPool; }
1019 const vk::ProgramCollection<vk::ProgramBinary>& getBinaryCollection (void) const { return m_binaryCollection; }
1022 const vk::InstanceInterface& m_vki;
1023 const vk::DeviceInterface& m_vkd;
1024 const vk::VkPhysicalDevice m_physicalDevice;
1025 const vk::VkDevice m_device;
1026 const vk::VkQueue m_queue;
1027 const deUint32 m_queueFamilyIndex;
1028 const vector<pair<deUint32, vk::VkQueue> > m_queues;
1029 const vk::Unique<vk::VkCommandPool> m_commandPool;
1030 const vk::ProgramCollection<vk::ProgramBinary>& m_binaryCollection;
1031 vector<deUint32> m_queueFamilies;
1034 class PrepareContext
1037 PrepareContext (const Context& context,
1038 const Memory& memory)
1039 : m_context (context)
1044 const Memory& getMemory (void) const { return m_memory; }
1045 const Context& getContext (void) const { return m_context; }
1046 const vk::ProgramCollection<vk::ProgramBinary>& getBinaryCollection (void) const { return m_context.getBinaryCollection(); }
1048 void setBuffer (vk::Move<vk::VkBuffer> buffer,
1049 vk::VkDeviceSize size)
1051 DE_ASSERT(!m_currentImage);
1052 DE_ASSERT(!m_currentBuffer);
1054 m_currentBuffer = buffer;
1055 m_currentBufferSize = size;
1058 vk::VkBuffer getBuffer (void) const { return *m_currentBuffer; }
1059 vk::VkDeviceSize getBufferSize (void) const
1061 DE_ASSERT(m_currentBuffer);
1062 return m_currentBufferSize;
1065 void releaseBuffer (void) { m_currentBuffer.disown(); }
1067 void setImage (vk::Move<vk::VkImage> image,
1068 vk::VkImageLayout layout,
1069 vk::VkDeviceSize memorySize,
1073 DE_ASSERT(!m_currentImage);
1074 DE_ASSERT(!m_currentBuffer);
1076 m_currentImage = image;
1077 m_currentImageMemorySize = memorySize;
1078 m_currentImageLayout = layout;
1079 m_currentImageWidth = width;
1080 m_currentImageHeight = height;
1083 void setImageLayout (vk::VkImageLayout layout)
1085 DE_ASSERT(m_currentImage);
1086 m_currentImageLayout = layout;
1089 vk::VkImage getImage (void) const { return *m_currentImage; }
1090 deInt32 getImageWidth (void) const
1092 DE_ASSERT(m_currentImage);
1093 return m_currentImageWidth;
1095 deInt32 getImageHeight (void) const
1097 DE_ASSERT(m_currentImage);
1098 return m_currentImageHeight;
1100 vk::VkDeviceSize getImageMemorySize (void) const
1102 DE_ASSERT(m_currentImage);
1103 return m_currentImageMemorySize;
1106 void releaseImage (void) { m_currentImage.disown(); }
1108 vk::VkImageLayout getImageLayout (void) const
1110 DE_ASSERT(m_currentImage);
1111 return m_currentImageLayout;
1115 const Context& m_context;
1116 const Memory& m_memory;
1118 vk::Move<vk::VkBuffer> m_currentBuffer;
1119 vk::VkDeviceSize m_currentBufferSize;
1121 vk::Move<vk::VkImage> m_currentImage;
1122 vk::VkDeviceSize m_currentImageMemorySize;
1123 vk::VkImageLayout m_currentImageLayout;
1124 deInt32 m_currentImageWidth;
1125 deInt32 m_currentImageHeight;
1128 class ExecuteContext
1131 ExecuteContext (const Context& context)
1132 : m_context (context)
1136 const Context& getContext (void) const { return m_context; }
1137 void setMapping (void* ptr) { m_mapping = ptr; }
1138 void* getMapping (void) const { return m_mapping; }
1141 const Context& m_context;
1148 VerifyContext (TestLog& log,
1149 tcu::ResultCollector& resultCollector,
1150 const Context& context,
1151 vk::VkDeviceSize size)
1153 , m_resultCollector (resultCollector)
1154 , m_context (context)
1155 , m_reference ((size_t)size)
1159 const Context& getContext (void) const { return m_context; }
1160 TestLog& getLog (void) const { return m_log; }
1161 tcu::ResultCollector& getResultCollector (void) const { return m_resultCollector; }
1163 ReferenceMemory& getReference (void) { return m_reference; }
1164 TextureLevel& getReferenceImage (void) { return m_referenceImage;}
1168 tcu::ResultCollector& m_resultCollector;
1169 const Context& m_context;
1170 ReferenceMemory m_reference;
1171 TextureLevel m_referenceImage;
1177 // Constructor should allocate all non-vulkan resources.
1178 virtual ~Command (void) {}
1180 // Get name of the command
1181 virtual const char* getName (void) const = 0;
1183 // Log prepare operations
1184 virtual void logPrepare (TestLog&, size_t) const {}
1185 // Log executed operations
1186 virtual void logExecute (TestLog&, size_t) const {}
1188 // Prepare should allocate all vulkan resources and resources that require
1189 // that buffer or memory has been already allocated. This should build all
1190 // command buffers etc.
1191 virtual void prepare (PrepareContext&) {}
1193 // Execute command. Write or read mapped memory, submit commands to queue
1195 virtual void execute (ExecuteContext&) {}
1197 // Verify that results are correct.
1198 virtual void verify (VerifyContext&, size_t) {}
1201 // Allow only inheritance
1206 Command (const Command&);
1207 Command& operator& (const Command&);
1210 class Map : public Command
1215 const char* getName (void) const { return "Map"; }
1218 void logExecute (TestLog& log, size_t commandIndex) const
1220 log << TestLog::Message << commandIndex << ":" << getName() << " Map memory" << TestLog::EndMessage;
1223 void prepare (PrepareContext& context)
1225 m_memory = context.getMemory().getMemory();
1226 m_size = context.getMemory().getSize();
1229 void execute (ExecuteContext& context)
1231 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
1232 const vk::VkDevice device = context.getContext().getDevice();
1234 context.setMapping(mapMemory(vkd, device, m_memory, m_size));
1238 vk::VkDeviceMemory m_memory;
1239 vk::VkDeviceSize m_size;
1242 class UnMap : public Command
1247 const char* getName (void) const { return "UnMap"; }
1249 void logExecute (TestLog& log, size_t commandIndex) const
1251 log << TestLog::Message << commandIndex << ": Unmap memory" << TestLog::EndMessage;
1254 void prepare (PrepareContext& context)
1256 m_memory = context.getMemory().getMemory();
1259 void execute (ExecuteContext& context)
1261 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
1262 const vk::VkDevice device = context.getContext().getDevice();
1264 vkd.unmapMemory(device, m_memory);
1265 context.setMapping(DE_NULL);
1269 vk::VkDeviceMemory m_memory;
1272 class Invalidate : public Command
1275 Invalidate (void) {}
1276 ~Invalidate (void) {}
1277 const char* getName (void) const { return "Invalidate"; }
1279 void logExecute (TestLog& log, size_t commandIndex) const
1281 log << TestLog::Message << commandIndex << ": Invalidate mapped memory" << TestLog::EndMessage;
1284 void prepare (PrepareContext& context)
1286 m_memory = context.getMemory().getMemory();
1287 m_size = context.getMemory().getSize();
1290 void execute (ExecuteContext& context)
1292 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
1293 const vk::VkDevice device = context.getContext().getDevice();
1295 vk::invalidateMappedMemoryRange(vkd, device, m_memory, 0, m_size);
1299 vk::VkDeviceMemory m_memory;
1300 vk::VkDeviceSize m_size;
1303 class Flush : public Command
1308 const char* getName (void) const { return "Flush"; }
1310 void logExecute (TestLog& log, size_t commandIndex) const
1312 log << TestLog::Message << commandIndex << ": Flush mapped memory" << TestLog::EndMessage;
1315 void prepare (PrepareContext& context)
1317 m_memory = context.getMemory().getMemory();
1318 m_size = context.getMemory().getSize();
1321 void execute (ExecuteContext& context)
1323 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
1324 const vk::VkDevice device = context.getContext().getDevice();
1326 vk::flushMappedMemoryRange(vkd, device, m_memory, 0, m_size);
1330 vk::VkDeviceMemory m_memory;
1331 vk::VkDeviceSize m_size;
1334 // Host memory reads and writes
1335 class HostMemoryAccess : public Command
1338 HostMemoryAccess (bool read, bool write, deUint32 seed);
1339 ~HostMemoryAccess (void) {}
1340 const char* getName (void) const { return "HostMemoryAccess"; }
1342 void logExecute (TestLog& log, size_t commandIndex) const;
1343 void prepare (PrepareContext& context);
1344 void execute (ExecuteContext& context);
1345 void verify (VerifyContext& context, size_t commandIndex);
1350 const deUint32 m_seed;
1353 vector<deUint8> m_readData;
1356 HostMemoryAccess::HostMemoryAccess (bool read, bool write, deUint32 seed)
1363 void HostMemoryAccess::logExecute (TestLog& log, size_t commandIndex) const
1365 log << TestLog::Message << commandIndex << ": Host memory access:" << (m_read ? " read" : "") << (m_write ? " write" : "") << ", seed: " << m_seed << TestLog::EndMessage;
1368 void HostMemoryAccess::prepare (PrepareContext& context)
1370 m_size = (size_t)context.getMemory().getSize();
1373 m_readData.resize(m_size, 0);
1376 void HostMemoryAccess::execute (ExecuteContext& context)
1378 de::Random rng (m_seed);
1379 deUint8* const ptr = (deUint8*)context.getMapping();
1381 if (m_read && m_write)
1383 for (size_t pos = 0; pos < m_size; pos++)
1385 const deUint8 mask = rng.getUint8();
1386 const deUint8 value = ptr[pos];
1388 m_readData[pos] = value;
1389 ptr[pos] = value ^ mask;
1394 for (size_t pos = 0; pos < m_size; pos++)
1396 const deUint8 value = ptr[pos];
1398 m_readData[pos] = value;
1403 for (size_t pos = 0; pos < m_size; pos++)
1405 const deUint8 value = rng.getUint8();
1411 DE_FATAL("Host memory access without read or write.");
1414 void HostMemoryAccess::verify (VerifyContext& context, size_t commandIndex)
1416 tcu::ResultCollector& resultCollector = context.getResultCollector();
1417 ReferenceMemory& reference = context.getReference();
1418 de::Random rng (m_seed);
1420 if (m_read && m_write)
1422 for (size_t pos = 0; pos < m_size; pos++)
1424 const deUint8 mask = rng.getUint8();
1425 const deUint8 value = m_readData[pos];
1427 if (reference.isDefined(pos))
1429 if (value != reference.get(pos))
1431 resultCollector.fail(
1432 de::toString(commandIndex) + ":" + getName()
1433 + " Result differs from reference, Expected: "
1434 + de::toString(tcu::toHex<8>(reference.get(pos)))
1436 + de::toString(tcu::toHex<8>(value))
1438 + de::toString(pos));
1442 reference.set(pos, reference.get(pos) ^ mask);
1448 for (size_t pos = 0; pos < m_size; pos++)
1450 const deUint8 value = m_readData[pos];
1452 if (reference.isDefined(pos))
1454 if (value != reference.get(pos))
1456 resultCollector.fail(
1457 de::toString(commandIndex) + ":" + getName()
1458 + " Result differs from reference, Expected: "
1459 + de::toString(tcu::toHex<8>(reference.get(pos)))
1461 + de::toString(tcu::toHex<8>(value))
1463 + de::toString(pos));
1471 for (size_t pos = 0; pos < m_size; pos++)
1473 const deUint8 value = rng.getUint8();
1475 reference.set(pos, value);
1479 DE_FATAL("Host memory access without read or write.");
1482 class CreateBuffer : public Command
1485 CreateBuffer (vk::VkBufferUsageFlags usage,
1486 vk::VkSharingMode sharing);
1487 ~CreateBuffer (void) {}
1488 const char* getName (void) const { return "CreateBuffer"; }
1490 void logPrepare (TestLog& log, size_t commandIndex) const;
1491 void prepare (PrepareContext& context);
1494 const vk::VkBufferUsageFlags m_usage;
1495 const vk::VkSharingMode m_sharing;
1498 CreateBuffer::CreateBuffer (vk::VkBufferUsageFlags usage,
1499 vk::VkSharingMode sharing)
1501 , m_sharing (sharing)
1505 void CreateBuffer::logPrepare (TestLog& log, size_t commandIndex) const
1507 log << TestLog::Message << commandIndex << ":" << getName() << " Create buffer, Sharing mode: " << m_sharing << ", Usage: " << vk::getBufferUsageFlagsStr(m_usage) << TestLog::EndMessage;
1510 void CreateBuffer::prepare (PrepareContext& context)
1512 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
1513 const vk::VkDevice device = context.getContext().getDevice();
1514 const vk::VkDeviceSize bufferSize = context.getMemory().getMaxBufferSize();
1515 const vector<deUint32>& queueFamilies = context.getContext().getQueueFamilies();
1517 context.setBuffer(createBuffer(vkd, device, bufferSize, m_usage, m_sharing, queueFamilies), bufferSize);
1520 class DestroyBuffer : public Command
1523 DestroyBuffer (void);
1524 ~DestroyBuffer (void) {}
1525 const char* getName (void) const { return "DestroyBuffer"; }
1527 void logExecute (TestLog& log, size_t commandIndex) const;
1528 void prepare (PrepareContext& context);
1529 void execute (ExecuteContext& context);
1532 vk::Move<vk::VkBuffer> m_buffer;
1535 DestroyBuffer::DestroyBuffer (void)
1539 void DestroyBuffer::prepare (PrepareContext& context)
1541 m_buffer = vk::Move<vk::VkBuffer>(vk::check(context.getBuffer()), vk::Deleter<vk::VkBuffer>(context.getContext().getDeviceInterface(), context.getContext().getDevice(), DE_NULL));
1542 context.releaseBuffer();
1545 void DestroyBuffer::logExecute (TestLog& log, size_t commandIndex) const
1547 log << TestLog::Message << commandIndex << ":" << getName() << " Destroy buffer" << TestLog::EndMessage;
1550 void DestroyBuffer::execute (ExecuteContext& context)
1552 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
1553 const vk::VkDevice device = context.getContext().getDevice();
1555 vkd.destroyBuffer(device, m_buffer.disown(), DE_NULL);
1558 class BindBufferMemory : public Command
1561 BindBufferMemory (void) {}
1562 ~BindBufferMemory (void) {}
1563 const char* getName (void) const { return "BindBufferMemory"; }
1565 void logPrepare (TestLog& log, size_t commandIndex) const;
1566 void prepare (PrepareContext& context);
1569 void BindBufferMemory::logPrepare (TestLog& log, size_t commandIndex) const
1571 log << TestLog::Message << commandIndex << ":" << getName() << " Bind memory to buffer" << TestLog::EndMessage;
1574 void BindBufferMemory::prepare (PrepareContext& context)
1576 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
1577 const vk::VkDevice device = context.getContext().getDevice();
1579 VK_CHECK(vkd.bindBufferMemory(device, context.getBuffer(), context.getMemory().getMemory(), 0));
1582 class CreateImage : public Command
1585 CreateImage (vk::VkImageUsageFlags usage,
1586 vk::VkSharingMode sharing);
1587 ~CreateImage (void) {}
1588 const char* getName (void) const { return "CreateImage"; }
1590 void logPrepare (TestLog& log, size_t commandIndex) const;
1591 void prepare (PrepareContext& context);
1592 void verify (VerifyContext& context, size_t commandIndex);
1595 const vk::VkImageUsageFlags m_usage;
1596 const vk::VkSharingMode m_sharing;
1597 deInt32 m_imageWidth;
1598 deInt32 m_imageHeight;
1601 CreateImage::CreateImage (vk::VkImageUsageFlags usage,
1602 vk::VkSharingMode sharing)
1604 , m_sharing (sharing)
1608 void CreateImage::logPrepare (TestLog& log, size_t commandIndex) const
1610 log << TestLog::Message << commandIndex << ":" << getName() << " Create image, sharing: " << m_sharing << ", usage: " << vk::getImageUsageFlagsStr(m_usage) << TestLog::EndMessage;
1613 void CreateImage::prepare (PrepareContext& context)
1615 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
1616 const vk::VkDevice device = context.getContext().getDevice();
1617 const vector<deUint32>& queueFamilies = context.getContext().getQueueFamilies();
1619 m_imageWidth = context.getMemory().getMaxImageWidth();
1620 m_imageHeight = context.getMemory().getMaxImageHeight();
1623 const vk::VkImageCreateInfo createInfo =
1625 vk::VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
1629 vk::VK_IMAGE_TYPE_2D,
1630 vk::VK_FORMAT_R8G8B8A8_UNORM,
1632 (deUint32)m_imageWidth,
1633 (deUint32)m_imageHeight,
1637 vk::VK_SAMPLE_COUNT_1_BIT,
1638 vk::VK_IMAGE_TILING_OPTIMAL,
1641 (deUint32)queueFamilies.size(),
1643 vk::VK_IMAGE_LAYOUT_UNDEFINED
1645 vk::Move<vk::VkImage> image (createImage(vkd, device, &createInfo));
1646 const vk::VkMemoryRequirements requirements = vk::getImageMemoryRequirements(vkd, device, *image);
1648 context.setImage(image, vk::VK_IMAGE_LAYOUT_UNDEFINED, requirements.size, m_imageWidth, m_imageHeight);
1652 void CreateImage::verify (VerifyContext& context, size_t)
1654 context.getReferenceImage() = TextureLevel(TextureFormat(TextureFormat::RGBA, TextureFormat::UNORM_INT8), m_imageWidth, m_imageHeight);
1657 class DestroyImage : public Command
1660 DestroyImage (void);
1661 ~DestroyImage (void) {}
1662 const char* getName (void) const { return "DestroyImage"; }
1664 void logExecute (TestLog& log, size_t commandIndex) const;
1665 void prepare (PrepareContext& context);
1666 void execute (ExecuteContext& context);
1669 vk::Move<vk::VkImage> m_image;
1672 DestroyImage::DestroyImage (void)
1676 void DestroyImage::prepare (PrepareContext& context)
1678 m_image = vk::Move<vk::VkImage>(vk::check(context.getImage()), vk::Deleter<vk::VkImage>(context.getContext().getDeviceInterface(), context.getContext().getDevice(), DE_NULL));
1679 context.releaseImage();
1683 void DestroyImage::logExecute (TestLog& log, size_t commandIndex) const
1685 log << TestLog::Message << commandIndex << ":" << getName() << " Destroy image" << TestLog::EndMessage;
1688 void DestroyImage::execute (ExecuteContext& context)
1690 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
1691 const vk::VkDevice device = context.getContext().getDevice();
1693 vkd.destroyImage(device, m_image.disown(), DE_NULL);
1696 class BindImageMemory : public Command
1699 BindImageMemory (void) {}
1700 ~BindImageMemory (void) {}
1701 const char* getName (void) const { return "BindImageMemory"; }
1703 void logPrepare (TestLog& log, size_t commandIndex) const;
1704 void prepare (PrepareContext& context);
1707 void BindImageMemory::logPrepare (TestLog& log, size_t commandIndex) const
1709 log << TestLog::Message << commandIndex << ":" << getName() << " Bind memory to image" << TestLog::EndMessage;
1712 void BindImageMemory::prepare (PrepareContext& context)
1714 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
1715 const vk::VkDevice device = context.getContext().getDevice();
1717 VK_CHECK(vkd.bindImageMemory(device, context.getImage(), context.getMemory().getMemory(), 0));
1720 class QueueWaitIdle : public Command
1723 QueueWaitIdle (void) {}
1724 ~QueueWaitIdle (void) {}
1725 const char* getName (void) const { return "QueuetWaitIdle"; }
1727 void logExecute (TestLog& log, size_t commandIndex) const;
1728 void execute (ExecuteContext& context);
1731 void QueueWaitIdle::logExecute (TestLog& log, size_t commandIndex) const
1733 log << TestLog::Message << commandIndex << ":" << getName() << " Queue wait idle" << TestLog::EndMessage;
1736 void QueueWaitIdle::execute (ExecuteContext& context)
1738 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
1739 const vk::VkQueue queue = context.getContext().getQueue();
1741 VK_CHECK(vkd.queueWaitIdle(queue));
1744 class DeviceWaitIdle : public Command
1747 DeviceWaitIdle (void) {}
1748 ~DeviceWaitIdle (void) {}
1749 const char* getName (void) const { return "DeviceWaitIdle"; }
1751 void logExecute (TestLog& log, size_t commandIndex) const;
1752 void execute (ExecuteContext& context);
1755 void DeviceWaitIdle::logExecute (TestLog& log, size_t commandIndex) const
1757 log << TestLog::Message << commandIndex << ":" << getName() << " Device wait idle" << TestLog::EndMessage;
1760 void DeviceWaitIdle::execute (ExecuteContext& context)
1762 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
1763 const vk::VkDevice device = context.getContext().getDevice();
1765 VK_CHECK(vkd.deviceWaitIdle(device));
1771 SubmitContext (const PrepareContext& context,
1772 const vk::VkCommandBuffer commandBuffer)
1773 : m_context (context)
1774 , m_commandBuffer (commandBuffer)
1778 const Memory& getMemory (void) const { return m_context.getMemory(); }
1779 const Context& getContext (void) const { return m_context.getContext(); }
1780 vk::VkCommandBuffer getCommandBuffer (void) const { return m_commandBuffer; }
1782 vk::VkBuffer getBuffer (void) const { return m_context.getBuffer(); }
1783 vk::VkDeviceSize getBufferSize (void) const { return m_context.getBufferSize(); }
1785 vk::VkImage getImage (void) const { return m_context.getImage(); }
1786 deInt32 getImageWidth (void) const { return m_context.getImageWidth(); }
1787 deInt32 getImageHeight (void) const { return m_context.getImageHeight(); }
1790 const PrepareContext& m_context;
1791 const vk::VkCommandBuffer m_commandBuffer;
1797 virtual ~CmdCommand (void) {}
1798 virtual const char* getName (void) const = 0;
1800 // Log things that are done during prepare
1801 virtual void logPrepare (TestLog&, size_t) const {}
1802 // Log submitted calls etc.
1803 virtual void logSubmit (TestLog&, size_t) const {}
1805 // Allocate vulkan resources and prepare for submit.
1806 virtual void prepare (PrepareContext&) {}
1808 // Submit commands to command buffer.
1809 virtual void submit (SubmitContext&) {}
1812 virtual void verify (VerifyContext&, size_t) {}
1815 class SubmitCommandBuffer : public Command
1818 SubmitCommandBuffer (const vector<CmdCommand*>& commands);
1819 ~SubmitCommandBuffer (void);
1821 const char* getName (void) const { return "SubmitCommandBuffer"; }
1822 void logExecute (TestLog& log, size_t commandIndex) const;
1823 void logPrepare (TestLog& log, size_t commandIndex) const;
1825 // Allocate command buffer and submit commands to command buffer
1826 void prepare (PrepareContext& context);
1827 void execute (ExecuteContext& context);
1829 // Verify that results are correct.
1830 void verify (VerifyContext& context, size_t commandIndex);
1833 vector<CmdCommand*> m_commands;
1834 vk::Move<vk::VkCommandBuffer> m_commandBuffer;
1837 SubmitCommandBuffer::SubmitCommandBuffer (const vector<CmdCommand*>& commands)
1838 : m_commands (commands)
1842 SubmitCommandBuffer::~SubmitCommandBuffer (void)
1844 for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
1845 delete m_commands[cmdNdx];
1848 void SubmitCommandBuffer::prepare (PrepareContext& context)
1850 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
1851 const vk::VkDevice device = context.getContext().getDevice();
1852 const vk::VkCommandPool commandPool = context.getContext().getCommandPool();
1854 m_commandBuffer = createBeginCommandBuffer(vkd, device, commandPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY);
1856 for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
1858 CmdCommand& command = *m_commands[cmdNdx];
1860 command.prepare(context);
1864 SubmitContext submitContext (context, *m_commandBuffer);
1866 for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
1868 CmdCommand& command = *m_commands[cmdNdx];
1870 command.submit(submitContext);
1873 VK_CHECK(vkd.endCommandBuffer(*m_commandBuffer));
1877 void SubmitCommandBuffer::execute (ExecuteContext& context)
1879 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
1880 const vk::VkCommandBuffer cmd = *m_commandBuffer;
1881 const vk::VkQueue queue = context.getContext().getQueue();
1882 const vk::VkSubmitInfo submit =
1884 vk::VK_STRUCTURE_TYPE_SUBMIT_INFO,
1889 (const vk::VkPipelineStageFlags*)DE_NULL,
1898 vkd.queueSubmit(queue, 1, &submit, 0);
1901 void SubmitCommandBuffer::verify (VerifyContext& context, size_t commandIndex)
1903 const string sectionName (de::toString(commandIndex) + ":" + getName());
1904 const tcu::ScopedLogSection section (context.getLog(), sectionName, sectionName);
1906 for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
1907 m_commands[cmdNdx]->verify(context, cmdNdx);
1910 void SubmitCommandBuffer::logPrepare (TestLog& log, size_t commandIndex) const
1912 const string sectionName (de::toString(commandIndex) + ":" + getName());
1913 const tcu::ScopedLogSection section (log, sectionName, sectionName);
1915 for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
1916 m_commands[cmdNdx]->logPrepare(log, cmdNdx);
1919 void SubmitCommandBuffer::logExecute (TestLog& log, size_t commandIndex) const
1921 const string sectionName (de::toString(commandIndex) + ":" + getName());
1922 const tcu::ScopedLogSection section (log, sectionName, sectionName);
1924 for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
1925 m_commands[cmdNdx]->logSubmit(log, cmdNdx);
1928 class PipelineBarrier : public CmdCommand
1938 PipelineBarrier (const vk::VkPipelineStageFlags srcStages,
1939 const vk::VkAccessFlags srcAccesses,
1940 const vk::VkPipelineStageFlags dstStages,
1941 const vk::VkAccessFlags dstAccesses,
1943 const tcu::Maybe<vk::VkImageLayout> imageLayout);
1944 ~PipelineBarrier (void) {}
1945 const char* getName (void) const { return "PipelineBarrier"; }
1947 void logSubmit (TestLog& log, size_t commandIndex) const;
1948 void submit (SubmitContext& context);
1951 const vk::VkPipelineStageFlags m_srcStages;
1952 const vk::VkAccessFlags m_srcAccesses;
1953 const vk::VkPipelineStageFlags m_dstStages;
1954 const vk::VkAccessFlags m_dstAccesses;
1956 const tcu::Maybe<vk::VkImageLayout> m_imageLayout;
1959 PipelineBarrier::PipelineBarrier (const vk::VkPipelineStageFlags srcStages,
1960 const vk::VkAccessFlags srcAccesses,
1961 const vk::VkPipelineStageFlags dstStages,
1962 const vk::VkAccessFlags dstAccesses,
1964 const tcu::Maybe<vk::VkImageLayout> imageLayout)
1965 : m_srcStages (srcStages)
1966 , m_srcAccesses (srcAccesses)
1967 , m_dstStages (dstStages)
1968 , m_dstAccesses (dstAccesses)
1970 , m_imageLayout (imageLayout)
1974 void PipelineBarrier::logSubmit (TestLog& log, size_t commandIndex) const
1976 log << TestLog::Message << commandIndex << ":" << getName()
1977 << " " << (m_type == TYPE_GLOBAL ? "Global pipeline barrier"
1978 : m_type == TYPE_BUFFER ? "Buffer pipeline barrier"
1979 : "Image pipeline barrier")
1980 << ", srcStages: " << vk::getPipelineStageFlagsStr(m_srcStages) << ", srcAccesses: " << vk::getAccessFlagsStr(m_srcAccesses)
1981 << ", dstStages: " << vk::getPipelineStageFlagsStr(m_dstStages) << ", dstAccesses: " << vk::getAccessFlagsStr(m_dstAccesses) << TestLog::EndMessage;
1984 void PipelineBarrier::submit (SubmitContext& context)
1986 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
1987 const vk::VkCommandBuffer cmd = context.getCommandBuffer();
1993 const vk::VkMemoryBarrier barrier =
1995 vk::VK_STRUCTURE_TYPE_MEMORY_BARRIER,
2002 vkd.cmdPipelineBarrier(cmd, m_srcStages, m_dstStages, (vk::VkDependencyFlags)0, 1, &barrier, 0, (const vk::VkBufferMemoryBarrier*)DE_NULL, 0, (const vk::VkImageMemoryBarrier*)DE_NULL);
2008 const vk::VkBufferMemoryBarrier barrier =
2010 vk::VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER,
2016 VK_QUEUE_FAMILY_IGNORED,
2017 VK_QUEUE_FAMILY_IGNORED,
2019 context.getBuffer(),
2024 vkd.cmdPipelineBarrier(cmd, m_srcStages, m_dstStages, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 1, &barrier, 0, (const vk::VkImageMemoryBarrier*)DE_NULL);
2030 const vk::VkImageMemoryBarrier barrier =
2032 vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
2041 VK_QUEUE_FAMILY_IGNORED,
2042 VK_QUEUE_FAMILY_IGNORED,
2046 vk::VK_IMAGE_ASPECT_COLOR_BIT,
2052 vkd.cmdPipelineBarrier(cmd, m_srcStages, m_dstStages, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 0, (const vk::VkBufferMemoryBarrier*)DE_NULL, 1, &barrier);
2057 DE_FATAL("Unknown pipeline barrier type");
2061 class ImageTransition : public CmdCommand
2064 ImageTransition (vk::VkPipelineStageFlags srcStages,
2065 vk::VkAccessFlags srcAccesses,
2067 vk::VkPipelineStageFlags dstStages,
2068 vk::VkAccessFlags dstAccesses,
2070 vk::VkImageLayout srcLayout,
2071 vk::VkImageLayout dstLayout);
2073 ~ImageTransition (void) {}
2074 const char* getName (void) const { return "ImageTransition"; }
2076 void prepare (PrepareContext& context);
2077 void logSubmit (TestLog& log, size_t commandIndex) const;
2078 void submit (SubmitContext& context);
2079 void verify (VerifyContext& context, size_t);
2082 const vk::VkPipelineStageFlags m_srcStages;
2083 const vk::VkAccessFlags m_srcAccesses;
2084 const vk::VkPipelineStageFlags m_dstStages;
2085 const vk::VkAccessFlags m_dstAccesses;
2086 const vk::VkImageLayout m_srcLayout;
2087 const vk::VkImageLayout m_dstLayout;
2089 vk::VkDeviceSize m_imageMemorySize;
2092 ImageTransition::ImageTransition (vk::VkPipelineStageFlags srcStages,
2093 vk::VkAccessFlags srcAccesses,
2095 vk::VkPipelineStageFlags dstStages,
2096 vk::VkAccessFlags dstAccesses,
2098 vk::VkImageLayout srcLayout,
2099 vk::VkImageLayout dstLayout)
2100 : m_srcStages (srcStages)
2101 , m_srcAccesses (srcAccesses)
2102 , m_dstStages (dstStages)
2103 , m_dstAccesses (dstAccesses)
2104 , m_srcLayout (srcLayout)
2105 , m_dstLayout (dstLayout)
2109 void ImageTransition::logSubmit (TestLog& log, size_t commandIndex) const
2111 log << TestLog::Message << commandIndex << ":" << getName()
2112 << " Image transition pipeline barrier"
2113 << ", srcStages: " << vk::getPipelineStageFlagsStr(m_srcStages) << ", srcAccesses: " << vk::getAccessFlagsStr(m_srcAccesses)
2114 << ", dstStages: " << vk::getPipelineStageFlagsStr(m_dstStages) << ", dstAccesses: " << vk::getAccessFlagsStr(m_dstAccesses)
2115 << ", srcLayout: " << m_srcLayout << ", dstLayout: " << m_dstLayout << TestLog::EndMessage;
2118 void ImageTransition::prepare (PrepareContext& context)
2120 DE_ASSERT(context.getImageLayout() == vk::VK_IMAGE_LAYOUT_UNDEFINED || m_srcLayout == vk::VK_IMAGE_LAYOUT_UNDEFINED || context.getImageLayout() == m_srcLayout);
2122 context.setImageLayout(m_dstLayout);
2123 m_imageMemorySize = context.getImageMemorySize();
2126 void ImageTransition::submit (SubmitContext& context)
2128 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
2129 const vk::VkCommandBuffer cmd = context.getCommandBuffer();
2130 const vk::VkImageMemoryBarrier barrier =
2132 vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
2141 VK_QUEUE_FAMILY_IGNORED,
2142 VK_QUEUE_FAMILY_IGNORED,
2146 vk::VK_IMAGE_ASPECT_COLOR_BIT,
2152 vkd.cmdPipelineBarrier(cmd, m_srcStages, m_dstStages, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 0, (const vk::VkBufferMemoryBarrier*)DE_NULL, 1, &barrier);
2155 void ImageTransition::verify (VerifyContext& context, size_t)
2157 context.getReference().setUndefined(0, (size_t)m_imageMemorySize);
2160 class FillBuffer : public CmdCommand
2163 FillBuffer (deUint32 value) : m_value(value) {}
2164 ~FillBuffer (void) {}
2165 const char* getName (void) const { return "FillBuffer"; }
2167 void logSubmit (TestLog& log, size_t commandIndex) const;
2168 void submit (SubmitContext& context);
2169 void verify (VerifyContext& context, size_t commandIndex);
2172 const deUint32 m_value;
2173 vk::VkDeviceSize m_bufferSize;
2176 void FillBuffer::logSubmit (TestLog& log, size_t commandIndex) const
2178 log << TestLog::Message << commandIndex << ":" << getName() << " Fill value: " << m_value << TestLog::EndMessage;
2181 void FillBuffer::submit (SubmitContext& context)
2183 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
2184 const vk::VkCommandBuffer cmd = context.getCommandBuffer();
2185 const vk::VkBuffer buffer = context.getBuffer();
2186 const vk::VkDeviceSize sizeMask = ~(0x3ull); // \note Round down to multiple of 4
2188 m_bufferSize = sizeMask & context.getBufferSize();
2189 vkd.cmdFillBuffer(cmd, buffer, 0, m_bufferSize, m_value);
2192 void FillBuffer::verify (VerifyContext& context, size_t)
2194 ReferenceMemory& reference = context.getReference();
2196 for (size_t ndx = 0; ndx < m_bufferSize; ndx++)
2198 #if (DE_ENDIANNESS == DE_LITTLE_ENDIAN)
2199 reference.set(ndx, (deUint8)(0xffu & (m_value >> (8*(ndx % 4)))));
2201 reference.set(ndx, (deUint8)(0xffu & (m_value >> (8*(3 - (ndx % 4))))));
2206 class UpdateBuffer : public CmdCommand
2209 UpdateBuffer (deUint32 seed) : m_seed(seed) {}
2210 ~UpdateBuffer (void) {}
2211 const char* getName (void) const { return "UpdateBuffer"; }
2213 void logSubmit (TestLog& log, size_t commandIndex) const;
2214 void submit (SubmitContext& context);
2215 void verify (VerifyContext& context, size_t commandIndex);
2218 const deUint32 m_seed;
2219 vk::VkDeviceSize m_bufferSize;
2222 void UpdateBuffer::logSubmit (TestLog& log, size_t commandIndex) const
2224 log << TestLog::Message << commandIndex << ":" << getName() << " Update buffer, seed: " << m_seed << TestLog::EndMessage;
2227 void UpdateBuffer::submit (SubmitContext& context)
2229 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
2230 const vk::VkCommandBuffer cmd = context.getCommandBuffer();
2231 const vk::VkBuffer buffer = context.getBuffer();
2232 const size_t blockSize = 65536;
2233 std::vector<deUint8> data (blockSize, 0);
2234 de::Random rng (m_seed);
2236 m_bufferSize = context.getBufferSize();
2238 for (size_t updated = 0; updated < m_bufferSize; updated += blockSize)
2240 for (size_t ndx = 0; ndx < data.size(); ndx++)
2241 data[ndx] = rng.getUint8();
2243 if (m_bufferSize - updated > blockSize)
2244 vkd.cmdUpdateBuffer(cmd, buffer, updated, blockSize, (const deUint32*)(&data[0]));
2246 vkd.cmdUpdateBuffer(cmd, buffer, updated, m_bufferSize - updated, (const deUint32*)(&data[0]));
2250 void UpdateBuffer::verify (VerifyContext& context, size_t)
2252 ReferenceMemory& reference = context.getReference();
2253 const size_t blockSize = 65536;
2254 vector<deUint8> data (blockSize, 0);
2255 de::Random rng (m_seed);
2257 for (size_t updated = 0; updated < m_bufferSize; updated += blockSize)
2259 for (size_t ndx = 0; ndx < data.size(); ndx++)
2260 data[ndx] = rng.getUint8();
2262 if (m_bufferSize - updated > blockSize)
2263 reference.setData(updated, blockSize, &data[0]);
2265 reference.setData(updated, (size_t)(m_bufferSize - updated), &data[0]);
2269 class BufferCopyToBuffer : public CmdCommand
2272 BufferCopyToBuffer (void) {}
2273 ~BufferCopyToBuffer (void) {}
2274 const char* getName (void) const { return "BufferCopyToBuffer"; }
2276 void logPrepare (TestLog& log, size_t commandIndex) const;
2277 void prepare (PrepareContext& context);
2278 void logSubmit (TestLog& log, size_t commandIndex) const;
2279 void submit (SubmitContext& context);
2280 void verify (VerifyContext& context, size_t commandIndex);
2283 vk::VkDeviceSize m_bufferSize;
2284 vk::Move<vk::VkBuffer> m_dstBuffer;
2285 vk::Move<vk::VkDeviceMemory> m_memory;
2288 void BufferCopyToBuffer::logPrepare (TestLog& log, size_t commandIndex) const
2290 log << TestLog::Message << commandIndex << ":" << getName() << " Allocate destination buffer for buffer to buffer copy." << TestLog::EndMessage;
2293 void BufferCopyToBuffer::prepare (PrepareContext& context)
2295 const vk::InstanceInterface& vki = context.getContext().getInstanceInterface();
2296 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
2297 const vk::VkPhysicalDevice physicalDevice = context.getContext().getPhysicalDevice();
2298 const vk::VkDevice device = context.getContext().getDevice();
2299 const vector<deUint32>& queueFamilies = context.getContext().getQueueFamilies();
2301 m_bufferSize = context.getBufferSize();
2303 m_dstBuffer = createBuffer(vkd, device, m_bufferSize, vk::VK_BUFFER_USAGE_TRANSFER_DST_BIT, vk::VK_SHARING_MODE_EXCLUSIVE, queueFamilies);
2304 m_memory = bindBufferMemory(vki, vkd, physicalDevice, device, *m_dstBuffer, vk::VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT);
2307 void BufferCopyToBuffer::logSubmit (TestLog& log, size_t commandIndex) const
2309 log << TestLog::Message << commandIndex << ":" << getName() << " Copy buffer to another buffer" << TestLog::EndMessage;
2312 void BufferCopyToBuffer::submit (SubmitContext& context)
2314 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
2315 const vk::VkCommandBuffer commandBuffer = context.getCommandBuffer();
2316 const vk::VkBufferCopy range =
2322 vkd.cmdCopyBuffer(commandBuffer, context.getBuffer(), *m_dstBuffer, 1, &range);
2325 void BufferCopyToBuffer::verify (VerifyContext& context, size_t commandIndex)
2327 tcu::ResultCollector& resultCollector (context.getResultCollector());
2328 ReferenceMemory& reference (context.getReference());
2329 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
2330 const vk::VkDevice device = context.getContext().getDevice();
2331 const vk::VkQueue queue = context.getContext().getQueue();
2332 const vk::VkCommandPool commandPool = context.getContext().getCommandPool();
2333 const vk::Unique<vk::VkCommandBuffer> commandBuffer (createBeginCommandBuffer(vkd, device, commandPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY));
2334 const vk::VkBufferMemoryBarrier barrier =
2336 vk::VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER,
2339 vk::VK_ACCESS_TRANSFER_WRITE_BIT,
2340 vk::VK_ACCESS_HOST_READ_BIT,
2342 VK_QUEUE_FAMILY_IGNORED,
2343 VK_QUEUE_FAMILY_IGNORED,
2349 vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_PIPELINE_STAGE_HOST_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 1, &barrier, 0, (const vk::VkImageMemoryBarrier*)DE_NULL);
2351 VK_CHECK(vkd.endCommandBuffer(*commandBuffer));
2352 queueRun(vkd, queue, *commandBuffer);
2355 void* const ptr = mapMemory(vkd, device, *m_memory, m_bufferSize);
2358 vk::invalidateMappedMemoryRange(vkd, device, *m_memory, 0, m_bufferSize);
2361 const deUint8* const data = (const deUint8*)ptr;
2363 for (size_t pos = 0; pos < (size_t)m_bufferSize; pos++)
2365 if (reference.isDefined(pos))
2367 if (data[pos] != reference.get(pos))
2369 resultCollector.fail(
2370 de::toString(commandIndex) + ":" + getName()
2371 + " Result differs from reference, Expected: "
2372 + de::toString(tcu::toHex<8>(reference.get(pos)))
2374 + de::toString(tcu::toHex<8>(data[pos]))
2376 + de::toString(pos));
2383 vkd.unmapMemory(device, *m_memory);
2386 context.getLog() << TestLog::Message << commandIndex << ": Buffer copy to buffer verification failed" << TestLog::EndMessage;
2390 class BufferCopyFromBuffer : public CmdCommand
2393 BufferCopyFromBuffer (deUint32 seed) : m_seed(seed) {}
2394 ~BufferCopyFromBuffer (void) {}
2395 const char* getName (void) const { return "BufferCopyFromBuffer"; }
2397 void logPrepare (TestLog& log, size_t commandIndex) const;
2398 void prepare (PrepareContext& context);
2399 void logSubmit (TestLog& log, size_t commandIndex) const;
2400 void submit (SubmitContext& context);
2401 void verify (VerifyContext& context, size_t commandIndex);
2404 const deUint32 m_seed;
2405 vk::VkDeviceSize m_bufferSize;
2406 vk::Move<vk::VkBuffer> m_srcBuffer;
2407 vk::Move<vk::VkDeviceMemory> m_memory;
2410 void BufferCopyFromBuffer::logPrepare (TestLog& log, size_t commandIndex) const
2412 log << TestLog::Message << commandIndex << ":" << getName() << " Allocate source buffer for buffer to buffer copy. Seed: " << m_seed << TestLog::EndMessage;
2415 void BufferCopyFromBuffer::prepare (PrepareContext& context)
2417 const vk::InstanceInterface& vki = context.getContext().getInstanceInterface();
2418 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
2419 const vk::VkPhysicalDevice physicalDevice = context.getContext().getPhysicalDevice();
2420 const vk::VkDevice device = context.getContext().getDevice();
2421 const vector<deUint32>& queueFamilies = context.getContext().getQueueFamilies();
2423 m_bufferSize = context.getBufferSize();
2424 m_srcBuffer = createBuffer(vkd, device, m_bufferSize, vk::VK_BUFFER_USAGE_TRANSFER_SRC_BIT, vk::VK_SHARING_MODE_EXCLUSIVE, queueFamilies);
2425 m_memory = bindBufferMemory(vki, vkd, physicalDevice, device, *m_srcBuffer, vk::VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT);
2428 void* const ptr = mapMemory(vkd, device, *m_memory, m_bufferSize);
2429 de::Random rng (m_seed);
2432 deUint8* const data = (deUint8*)ptr;
2434 for (size_t ndx = 0; ndx < (size_t)m_bufferSize; ndx++)
2435 data[ndx] = rng.getUint8();
2438 vk::flushMappedMemoryRange(vkd, device, *m_memory, 0, m_bufferSize);
2439 vkd.unmapMemory(device, *m_memory);
2443 void BufferCopyFromBuffer::logSubmit (TestLog& log, size_t commandIndex) const
2445 log << TestLog::Message << commandIndex << ":" << getName() << " Copy buffer data from another buffer" << TestLog::EndMessage;
2448 void BufferCopyFromBuffer::submit (SubmitContext& context)
2450 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
2451 const vk::VkCommandBuffer commandBuffer = context.getCommandBuffer();
2452 const vk::VkBufferCopy range =
2458 vkd.cmdCopyBuffer(commandBuffer, *m_srcBuffer, context.getBuffer(), 1, &range);
2461 void BufferCopyFromBuffer::verify (VerifyContext& context, size_t)
2463 ReferenceMemory& reference (context.getReference());
2464 de::Random rng (m_seed);
2466 for (size_t ndx = 0; ndx < (size_t)m_bufferSize; ndx++)
2467 reference.set(ndx, rng.getUint8());
2470 class BufferCopyToImage : public CmdCommand
2473 BufferCopyToImage (void) {}
2474 ~BufferCopyToImage (void) {}
2475 const char* getName (void) const { return "BufferCopyToImage"; }
2477 void logPrepare (TestLog& log, size_t commandIndex) const;
2478 void prepare (PrepareContext& context);
2479 void logSubmit (TestLog& log, size_t commandIndex) const;
2480 void submit (SubmitContext& context);
2481 void verify (VerifyContext& context, size_t commandIndex);
2484 deInt32 m_imageWidth;
2485 deInt32 m_imageHeight;
2486 vk::Move<vk::VkImage> m_dstImage;
2487 vk::Move<vk::VkDeviceMemory> m_memory;
2490 void BufferCopyToImage::logPrepare (TestLog& log, size_t commandIndex) const
2492 log << TestLog::Message << commandIndex << ":" << getName() << " Allocate destination image for buffer to image copy." << TestLog::EndMessage;
2495 void BufferCopyToImage::prepare (PrepareContext& context)
2497 const vk::InstanceInterface& vki = context.getContext().getInstanceInterface();
2498 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
2499 const vk::VkPhysicalDevice physicalDevice = context.getContext().getPhysicalDevice();
2500 const vk::VkDevice device = context.getContext().getDevice();
2501 const vk::VkQueue queue = context.getContext().getQueue();
2502 const vk::VkCommandPool commandPool = context.getContext().getCommandPool();
2503 const vector<deUint32>& queueFamilies = context.getContext().getQueueFamilies();
2504 const IVec2 imageSize = findImageSizeWxHx4(context.getBufferSize());
2506 m_imageWidth = imageSize[0];
2507 m_imageHeight = imageSize[1];
2510 const vk::VkImageCreateInfo createInfo =
2512 vk::VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
2516 vk::VK_IMAGE_TYPE_2D,
2517 vk::VK_FORMAT_R8G8B8A8_UNORM,
2519 (deUint32)m_imageWidth,
2520 (deUint32)m_imageHeight,
2523 1, 1, // mipLevels, arrayLayers
2524 vk::VK_SAMPLE_COUNT_1_BIT,
2526 vk::VK_IMAGE_TILING_OPTIMAL,
2527 vk::VK_IMAGE_USAGE_TRANSFER_DST_BIT|vk::VK_IMAGE_USAGE_TRANSFER_SRC_BIT,
2528 vk::VK_SHARING_MODE_EXCLUSIVE,
2530 (deUint32)queueFamilies.size(),
2532 vk::VK_IMAGE_LAYOUT_UNDEFINED
2535 m_dstImage = vk::createImage(vkd, device, &createInfo);
2538 m_memory = bindImageMemory(vki, vkd, physicalDevice, device, *m_dstImage, 0);
2541 const vk::Unique<vk::VkCommandBuffer> commandBuffer (createBeginCommandBuffer(vkd, device, commandPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY));
2542 const vk::VkImageMemoryBarrier barrier =
2544 vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
2548 vk::VK_ACCESS_TRANSFER_WRITE_BIT,
2550 vk::VK_IMAGE_LAYOUT_UNDEFINED,
2551 vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
2553 VK_QUEUE_FAMILY_IGNORED,
2554 VK_QUEUE_FAMILY_IGNORED,
2558 vk::VK_IMAGE_ASPECT_COLOR_BIT,
2560 1, // Mip level count
2566 vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 0, (const vk::VkBufferMemoryBarrier*)DE_NULL, 1, &barrier);
2568 VK_CHECK(vkd.endCommandBuffer(*commandBuffer));
2569 queueRun(vkd, queue, *commandBuffer);
2573 void BufferCopyToImage::logSubmit (TestLog& log, size_t commandIndex) const
2575 log << TestLog::Message << commandIndex << ":" << getName() << " Copy buffer to image" << TestLog::EndMessage;
2578 void BufferCopyToImage::submit (SubmitContext& context)
2580 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
2581 const vk::VkCommandBuffer commandBuffer = context.getCommandBuffer();
2582 const vk::VkBufferImageCopy region =
2587 vk::VK_IMAGE_ASPECT_COLOR_BIT,
2594 (deUint32)m_imageWidth,
2595 (deUint32)m_imageHeight,
2600 vkd.cmdCopyBufferToImage(commandBuffer, context.getBuffer(), *m_dstImage, vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, ®ion);
2603 void BufferCopyToImage::verify (VerifyContext& context, size_t commandIndex)
2605 tcu::ResultCollector& resultCollector (context.getResultCollector());
2606 ReferenceMemory& reference (context.getReference());
2607 const vk::InstanceInterface& vki = context.getContext().getInstanceInterface();
2608 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
2609 const vk::VkPhysicalDevice physicalDevice = context.getContext().getPhysicalDevice();
2610 const vk::VkDevice device = context.getContext().getDevice();
2611 const vk::VkQueue queue = context.getContext().getQueue();
2612 const vk::VkCommandPool commandPool = context.getContext().getCommandPool();
2613 const vk::Unique<vk::VkCommandBuffer> commandBuffer (createBeginCommandBuffer(vkd, device, commandPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY));
2614 const vector<deUint32>& queueFamilies = context.getContext().getQueueFamilies();
2615 const vk::Unique<vk::VkBuffer> dstBuffer (createBuffer(vkd, device, 4 * m_imageWidth * m_imageHeight, vk::VK_BUFFER_USAGE_TRANSFER_DST_BIT, vk::VK_SHARING_MODE_EXCLUSIVE, queueFamilies));
2616 const vk::Unique<vk::VkDeviceMemory> memory (bindBufferMemory(vki, vkd, physicalDevice, device, *dstBuffer, vk::VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT));
2618 const vk::VkImageMemoryBarrier imageBarrier =
2620 vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
2623 vk::VK_ACCESS_TRANSFER_WRITE_BIT,
2624 vk::VK_ACCESS_TRANSFER_READ_BIT,
2626 vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
2627 vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
2629 VK_QUEUE_FAMILY_IGNORED,
2630 VK_QUEUE_FAMILY_IGNORED,
2634 vk::VK_IMAGE_ASPECT_COLOR_BIT,
2636 1, // Mip level count
2641 const vk::VkBufferMemoryBarrier bufferBarrier =
2643 vk::VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER,
2646 vk::VK_ACCESS_TRANSFER_WRITE_BIT,
2647 vk::VK_ACCESS_HOST_READ_BIT,
2649 VK_QUEUE_FAMILY_IGNORED,
2650 VK_QUEUE_FAMILY_IGNORED,
2656 const vk::VkBufferImageCopy region =
2661 vk::VK_IMAGE_ASPECT_COLOR_BIT,
2668 (deUint32)m_imageWidth,
2669 (deUint32)m_imageHeight,
2674 vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 0, (const vk::VkBufferMemoryBarrier*)DE_NULL, 1, &imageBarrier);
2675 vkd.cmdCopyImageToBuffer(*commandBuffer, *m_dstImage, vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, *dstBuffer, 1, ®ion);
2676 vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_PIPELINE_STAGE_HOST_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 1, &bufferBarrier, 0, (const vk::VkImageMemoryBarrier*)DE_NULL);
2679 VK_CHECK(vkd.endCommandBuffer(*commandBuffer));
2680 queueRun(vkd, queue, *commandBuffer);
2683 void* const ptr = mapMemory(vkd, device, *memory, 4 * m_imageWidth * m_imageHeight);
2685 vk::invalidateMappedMemoryRange(vkd, device, *memory, 0, 4 * m_imageWidth * m_imageHeight);
2688 const deUint8* const data = (const deUint8*)ptr;
2690 for (size_t pos = 0; pos < (size_t)( 4 * m_imageWidth * m_imageHeight); pos++)
2692 if (reference.isDefined(pos))
2694 if (data[pos] != reference.get(pos))
2696 resultCollector.fail(
2697 de::toString(commandIndex) + ":" + getName()
2698 + " Result differs from reference, Expected: "
2699 + de::toString(tcu::toHex<8>(reference.get(pos)))
2701 + de::toString(tcu::toHex<8>(data[pos]))
2703 + de::toString(pos));
2710 vkd.unmapMemory(device, *memory);
2714 class BufferCopyFromImage : public CmdCommand
2717 BufferCopyFromImage (deUint32 seed) : m_seed(seed) {}
2718 ~BufferCopyFromImage (void) {}
2719 const char* getName (void) const { return "BufferCopyFromImage"; }
2721 void logPrepare (TestLog& log, size_t commandIndex) const;
2722 void prepare (PrepareContext& context);
2723 void logSubmit (TestLog& log, size_t commandIndex) const;
2724 void submit (SubmitContext& context);
2725 void verify (VerifyContext& context, size_t commandIndex);
2728 const deUint32 m_seed;
2729 deInt32 m_imageWidth;
2730 deInt32 m_imageHeight;
2731 vk::Move<vk::VkImage> m_srcImage;
2732 vk::Move<vk::VkDeviceMemory> m_memory;
2735 void BufferCopyFromImage::logPrepare (TestLog& log, size_t commandIndex) const
2737 log << TestLog::Message << commandIndex << ":" << getName() << " Allocate source image for image to buffer copy." << TestLog::EndMessage;
2740 void BufferCopyFromImage::prepare (PrepareContext& context)
2742 const vk::InstanceInterface& vki = context.getContext().getInstanceInterface();
2743 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
2744 const vk::VkPhysicalDevice physicalDevice = context.getContext().getPhysicalDevice();
2745 const vk::VkDevice device = context.getContext().getDevice();
2746 const vk::VkQueue queue = context.getContext().getQueue();
2747 const vk::VkCommandPool commandPool = context.getContext().getCommandPool();
2748 const vector<deUint32>& queueFamilies = context.getContext().getQueueFamilies();
2749 const IVec2 imageSize = findImageSizeWxHx4(context.getBufferSize());
2751 m_imageWidth = imageSize[0];
2752 m_imageHeight = imageSize[1];
2755 const vk::VkImageCreateInfo createInfo =
2757 vk::VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
2761 vk::VK_IMAGE_TYPE_2D,
2762 vk::VK_FORMAT_R8G8B8A8_UNORM,
2764 (deUint32)m_imageWidth,
2765 (deUint32)m_imageHeight,
2768 1, 1, // mipLevels, arrayLayers
2769 vk::VK_SAMPLE_COUNT_1_BIT,
2771 vk::VK_IMAGE_TILING_OPTIMAL,
2772 vk::VK_IMAGE_USAGE_TRANSFER_DST_BIT|vk::VK_IMAGE_USAGE_TRANSFER_SRC_BIT,
2773 vk::VK_SHARING_MODE_EXCLUSIVE,
2775 (deUint32)queueFamilies.size(),
2777 vk::VK_IMAGE_LAYOUT_UNDEFINED
2780 m_srcImage = vk::createImage(vkd, device, &createInfo);
2783 m_memory = bindImageMemory(vki, vkd, physicalDevice, device, *m_srcImage, 0);
2786 const vk::Unique<vk::VkBuffer> srcBuffer (createBuffer(vkd, device, 4 * m_imageWidth * m_imageHeight, vk::VK_BUFFER_USAGE_TRANSFER_SRC_BIT, vk::VK_SHARING_MODE_EXCLUSIVE, queueFamilies));
2787 const vk::Unique<vk::VkDeviceMemory> memory (bindBufferMemory(vki, vkd, physicalDevice, device, *srcBuffer, vk::VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT));
2788 const vk::Unique<vk::VkCommandBuffer> commandBuffer (createBeginCommandBuffer(vkd, device, commandPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY));
2789 const vk::VkImageMemoryBarrier preImageBarrier =
2791 vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
2795 vk::VK_ACCESS_TRANSFER_WRITE_BIT,
2797 vk::VK_IMAGE_LAYOUT_UNDEFINED,
2798 vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
2800 VK_QUEUE_FAMILY_IGNORED,
2801 VK_QUEUE_FAMILY_IGNORED,
2805 vk::VK_IMAGE_ASPECT_COLOR_BIT,
2807 1, // Mip level count
2812 const vk::VkImageMemoryBarrier postImageBarrier =
2814 vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
2817 vk::VK_ACCESS_TRANSFER_WRITE_BIT,
2820 vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
2821 vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
2823 VK_QUEUE_FAMILY_IGNORED,
2824 VK_QUEUE_FAMILY_IGNORED,
2828 vk::VK_IMAGE_ASPECT_COLOR_BIT,
2830 1, // Mip level count
2835 const vk::VkBufferImageCopy region =
2840 vk::VK_IMAGE_ASPECT_COLOR_BIT,
2847 (deUint32)m_imageWidth,
2848 (deUint32)m_imageHeight,
2854 void* const ptr = mapMemory(vkd, device, *memory, 4 * m_imageWidth * m_imageHeight);
2855 de::Random rng (m_seed);
2858 deUint8* const data = (deUint8*)ptr;
2860 for (size_t ndx = 0; ndx < (size_t)(4 * m_imageWidth * m_imageHeight); ndx++)
2861 data[ndx] = rng.getUint8();
2864 vk::flushMappedMemoryRange(vkd, device, *memory, 0, 4 * m_imageWidth * m_imageHeight);
2865 vkd.unmapMemory(device, *memory);
2868 vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 0, (const vk::VkBufferMemoryBarrier*)DE_NULL, 1, &preImageBarrier);
2869 vkd.cmdCopyBufferToImage(*commandBuffer, *srcBuffer, *m_srcImage, vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, ®ion);
2870 vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 0, (const vk::VkBufferMemoryBarrier*)DE_NULL, 1, &postImageBarrier);
2872 VK_CHECK(vkd.endCommandBuffer(*commandBuffer));
2873 queueRun(vkd, queue, *commandBuffer);
2877 void BufferCopyFromImage::logSubmit (TestLog& log, size_t commandIndex) const
2879 log << TestLog::Message << commandIndex << ":" << getName() << " Copy buffer data from image" << TestLog::EndMessage;
2882 void BufferCopyFromImage::submit (SubmitContext& context)
2884 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
2885 const vk::VkCommandBuffer commandBuffer = context.getCommandBuffer();
2886 const vk::VkBufferImageCopy region =
2891 vk::VK_IMAGE_ASPECT_COLOR_BIT,
2898 (deUint32)m_imageWidth,
2899 (deUint32)m_imageHeight,
2904 vkd.cmdCopyImageToBuffer(commandBuffer, *m_srcImage, vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, context.getBuffer(), 1, ®ion);
2907 void BufferCopyFromImage::verify (VerifyContext& context, size_t)
2909 ReferenceMemory& reference (context.getReference());
2910 de::Random rng (m_seed);
2912 for (size_t ndx = 0; ndx < (size_t)(4 * m_imageWidth * m_imageHeight); ndx++)
2913 reference.set(ndx, rng.getUint8());
2916 class ImageCopyToBuffer : public CmdCommand
2919 ImageCopyToBuffer (vk::VkImageLayout imageLayout) : m_imageLayout (imageLayout) {}
2920 ~ImageCopyToBuffer (void) {}
2921 const char* getName (void) const { return "BufferCopyToImage"; }
2923 void logPrepare (TestLog& log, size_t commandIndex) const;
2924 void prepare (PrepareContext& context);
2925 void logSubmit (TestLog& log, size_t commandIndex) const;
2926 void submit (SubmitContext& context);
2927 void verify (VerifyContext& context, size_t commandIndex);
2930 vk::VkImageLayout m_imageLayout;
2931 vk::VkDeviceSize m_bufferSize;
2932 vk::Move<vk::VkBuffer> m_dstBuffer;
2933 vk::Move<vk::VkDeviceMemory> m_memory;
2934 vk::VkDeviceSize m_imageMemorySize;
2935 deInt32 m_imageWidth;
2936 deInt32 m_imageHeight;
2939 void ImageCopyToBuffer::logPrepare (TestLog& log, size_t commandIndex) const
2941 log << TestLog::Message << commandIndex << ":" << getName() << " Allocate destination buffer for image to buffer copy." << TestLog::EndMessage;
2944 void ImageCopyToBuffer::prepare (PrepareContext& context)
2946 const vk::InstanceInterface& vki = context.getContext().getInstanceInterface();
2947 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
2948 const vk::VkPhysicalDevice physicalDevice = context.getContext().getPhysicalDevice();
2949 const vk::VkDevice device = context.getContext().getDevice();
2950 const vector<deUint32>& queueFamilies = context.getContext().getQueueFamilies();
2952 m_imageWidth = context.getImageWidth();
2953 m_imageHeight = context.getImageHeight();
2954 m_bufferSize = 4 * m_imageWidth * m_imageHeight;
2955 m_imageMemorySize = context.getImageMemorySize();
2956 m_dstBuffer = createBuffer(vkd, device, m_bufferSize, vk::VK_BUFFER_USAGE_TRANSFER_DST_BIT, vk::VK_SHARING_MODE_EXCLUSIVE, queueFamilies);
2957 m_memory = bindBufferMemory(vki, vkd, physicalDevice, device, *m_dstBuffer, vk::VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT);
2960 void ImageCopyToBuffer::logSubmit (TestLog& log, size_t commandIndex) const
2962 log << TestLog::Message << commandIndex << ":" << getName() << " Copy image to buffer" << TestLog::EndMessage;
2965 void ImageCopyToBuffer::submit (SubmitContext& context)
2967 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
2968 const vk::VkCommandBuffer commandBuffer = context.getCommandBuffer();
2969 const vk::VkBufferImageCopy region =
2974 vk::VK_IMAGE_ASPECT_COLOR_BIT,
2981 (deUint32)m_imageWidth,
2982 (deUint32)m_imageHeight,
2987 vkd.cmdCopyImageToBuffer(commandBuffer, context.getImage(), m_imageLayout, *m_dstBuffer, 1, ®ion);
2990 void ImageCopyToBuffer::verify (VerifyContext& context, size_t commandIndex)
2992 tcu::ResultCollector& resultCollector (context.getResultCollector());
2993 ReferenceMemory& reference (context.getReference());
2994 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
2995 const vk::VkDevice device = context.getContext().getDevice();
2996 const vk::VkQueue queue = context.getContext().getQueue();
2997 const vk::VkCommandPool commandPool = context.getContext().getCommandPool();
2998 const vk::Unique<vk::VkCommandBuffer> commandBuffer (createBeginCommandBuffer(vkd, device, commandPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY));
2999 const vk::VkBufferMemoryBarrier barrier =
3001 vk::VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER,
3004 vk::VK_ACCESS_TRANSFER_WRITE_BIT,
3005 vk::VK_ACCESS_HOST_READ_BIT,
3007 VK_QUEUE_FAMILY_IGNORED,
3008 VK_QUEUE_FAMILY_IGNORED,
3014 vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_PIPELINE_STAGE_HOST_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 1, &barrier, 0, (const vk::VkImageMemoryBarrier*)DE_NULL);
3016 VK_CHECK(vkd.endCommandBuffer(*commandBuffer));
3017 queueRun(vkd, queue, *commandBuffer);
3019 reference.setUndefined(0, (size_t)m_imageMemorySize);
3021 void* const ptr = mapMemory(vkd, device, *m_memory, m_bufferSize);
3022 const ConstPixelBufferAccess referenceImage (context.getReferenceImage().getAccess());
3023 const ConstPixelBufferAccess resultImage (TextureFormat(TextureFormat::RGBA, TextureFormat::UNORM_INT8), m_imageWidth, m_imageHeight, 1, ptr);
3025 vk::invalidateMappedMemoryRange(vkd, device, *m_memory, 0, m_bufferSize);
3027 if (!tcu::intThresholdCompare(context.getLog(), (de::toString(commandIndex) + ":" + getName()).c_str(), (de::toString(commandIndex) + ":" + getName()).c_str(), referenceImage, resultImage, UVec4(0), tcu::COMPARE_LOG_ON_ERROR))
3028 resultCollector.fail(de::toString(commandIndex) + ":" + getName() + " Image comparison failed");
3030 vkd.unmapMemory(device, *m_memory);
3034 class ImageCopyFromBuffer : public CmdCommand
3037 ImageCopyFromBuffer (deUint32 seed, vk::VkImageLayout imageLayout) : m_seed(seed), m_imageLayout(imageLayout) {}
3038 ~ImageCopyFromBuffer (void) {}
3039 const char* getName (void) const { return "ImageCopyFromBuffer"; }
3041 void logPrepare (TestLog& log, size_t commandIndex) const;
3042 void prepare (PrepareContext& context);
3043 void logSubmit (TestLog& log, size_t commandIndex) const;
3044 void submit (SubmitContext& context);
3045 void verify (VerifyContext& context, size_t commandIndex);
3048 const deUint32 m_seed;
3049 const vk::VkImageLayout m_imageLayout;
3050 deInt32 m_imageWidth;
3051 deInt32 m_imageHeight;
3052 vk::VkDeviceSize m_imageMemorySize;
3053 vk::VkDeviceSize m_bufferSize;
3054 vk::Move<vk::VkBuffer> m_srcBuffer;
3055 vk::Move<vk::VkDeviceMemory> m_memory;
3058 void ImageCopyFromBuffer::logPrepare (TestLog& log, size_t commandIndex) const
3060 log << TestLog::Message << commandIndex << ":" << getName() << " Allocate source buffer for buffer to image copy. Seed: " << m_seed << TestLog::EndMessage;
3063 void ImageCopyFromBuffer::prepare (PrepareContext& context)
3065 const vk::InstanceInterface& vki = context.getContext().getInstanceInterface();
3066 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
3067 const vk::VkPhysicalDevice physicalDevice = context.getContext().getPhysicalDevice();
3068 const vk::VkDevice device = context.getContext().getDevice();
3069 const vector<deUint32>& queueFamilies = context.getContext().getQueueFamilies();
3071 m_imageWidth = context.getImageHeight();
3072 m_imageHeight = context.getImageWidth();
3073 m_imageMemorySize = context.getImageMemorySize();
3074 m_bufferSize = m_imageWidth * m_imageHeight * 4;
3075 m_srcBuffer = createBuffer(vkd, device, m_bufferSize, vk::VK_BUFFER_USAGE_TRANSFER_SRC_BIT, vk::VK_SHARING_MODE_EXCLUSIVE, queueFamilies);
3076 m_memory = bindBufferMemory(vki, vkd, physicalDevice, device, *m_srcBuffer, vk::VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT);
3079 void* const ptr = mapMemory(vkd, device, *m_memory, m_bufferSize);
3080 de::Random rng (m_seed);
3083 deUint8* const data = (deUint8*)ptr;
3085 for (size_t ndx = 0; ndx < (size_t)m_bufferSize; ndx++)
3086 data[ndx] = rng.getUint8();
3089 vk::flushMappedMemoryRange(vkd, device, *m_memory, 0, m_bufferSize);
3090 vkd.unmapMemory(device, *m_memory);
3094 void ImageCopyFromBuffer::logSubmit (TestLog& log, size_t commandIndex) const
3096 log << TestLog::Message << commandIndex << ":" << getName() << " Copy image data from buffer" << TestLog::EndMessage;
3099 void ImageCopyFromBuffer::submit (SubmitContext& context)
3101 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
3102 const vk::VkCommandBuffer commandBuffer = context.getCommandBuffer();
3103 const vk::VkBufferImageCopy region =
3108 vk::VK_IMAGE_ASPECT_COLOR_BIT,
3115 (deUint32)m_imageWidth,
3116 (deUint32)m_imageHeight,
3121 vkd.cmdCopyBufferToImage(commandBuffer, *m_srcBuffer, context.getImage(), m_imageLayout, 1, ®ion);
3124 void ImageCopyFromBuffer::verify (VerifyContext& context, size_t)
3126 ReferenceMemory& reference (context.getReference());
3127 de::Random rng (m_seed);
3129 reference.setUndefined(0, (size_t)m_imageMemorySize);
3132 const PixelBufferAccess& refAccess (context.getReferenceImage().getAccess());
3134 for (deInt32 y = 0; y < m_imageHeight; y++)
3135 for (deInt32 x = 0; x < m_imageWidth; x++)
3137 const deUint8 r8 = rng.getUint8();
3138 const deUint8 g8 = rng.getUint8();
3139 const deUint8 b8 = rng.getUint8();
3140 const deUint8 a8 = rng.getUint8();
3142 refAccess.setPixel(UVec4(r8, g8, b8, a8), x, y);
3147 class ImageCopyFromImage : public CmdCommand
3150 ImageCopyFromImage (deUint32 seed, vk::VkImageLayout imageLayout) : m_seed(seed), m_imageLayout(imageLayout) {}
3151 ~ImageCopyFromImage (void) {}
3152 const char* getName (void) const { return "ImageCopyFromImage"; }
3154 void logPrepare (TestLog& log, size_t commandIndex) const;
3155 void prepare (PrepareContext& context);
3156 void logSubmit (TestLog& log, size_t commandIndex) const;
3157 void submit (SubmitContext& context);
3158 void verify (VerifyContext& context, size_t commandIndex);
3161 const deUint32 m_seed;
3162 const vk::VkImageLayout m_imageLayout;
3163 deInt32 m_imageWidth;
3164 deInt32 m_imageHeight;
3165 vk::VkDeviceSize m_imageMemorySize;
3166 vk::Move<vk::VkImage> m_srcImage;
3167 vk::Move<vk::VkDeviceMemory> m_memory;
3170 void ImageCopyFromImage::logPrepare (TestLog& log, size_t commandIndex) const
3172 log << TestLog::Message << commandIndex << ":" << getName() << " Allocate source image for image to image copy." << TestLog::EndMessage;
3175 void ImageCopyFromImage::prepare (PrepareContext& context)
3177 const vk::InstanceInterface& vki = context.getContext().getInstanceInterface();
3178 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
3179 const vk::VkPhysicalDevice physicalDevice = context.getContext().getPhysicalDevice();
3180 const vk::VkDevice device = context.getContext().getDevice();
3181 const vk::VkQueue queue = context.getContext().getQueue();
3182 const vk::VkCommandPool commandPool = context.getContext().getCommandPool();
3183 const vector<deUint32>& queueFamilies = context.getContext().getQueueFamilies();
3185 m_imageWidth = context.getImageWidth();
3186 m_imageHeight = context.getImageHeight();
3187 m_imageMemorySize = context.getImageMemorySize();
3190 const vk::VkImageCreateInfo createInfo =
3192 vk::VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
3196 vk::VK_IMAGE_TYPE_2D,
3197 vk::VK_FORMAT_R8G8B8A8_UNORM,
3199 (deUint32)m_imageWidth,
3200 (deUint32)m_imageHeight,
3203 1, 1, // mipLevels, arrayLayers
3204 vk::VK_SAMPLE_COUNT_1_BIT,
3206 vk::VK_IMAGE_TILING_OPTIMAL,
3207 vk::VK_IMAGE_USAGE_TRANSFER_DST_BIT|vk::VK_IMAGE_USAGE_TRANSFER_SRC_BIT,
3208 vk::VK_SHARING_MODE_EXCLUSIVE,
3210 (deUint32)queueFamilies.size(),
3212 vk::VK_IMAGE_LAYOUT_UNDEFINED
3215 m_srcImage = vk::createImage(vkd, device, &createInfo);
3218 m_memory = bindImageMemory(vki, vkd, physicalDevice, device, *m_srcImage, 0);
3221 const vk::Unique<vk::VkBuffer> srcBuffer (createBuffer(vkd, device, 4 * m_imageWidth * m_imageHeight, vk::VK_BUFFER_USAGE_TRANSFER_SRC_BIT, vk::VK_SHARING_MODE_EXCLUSIVE, queueFamilies));
3222 const vk::Unique<vk::VkDeviceMemory> memory (bindBufferMemory(vki, vkd, physicalDevice, device, *srcBuffer, vk::VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT));
3223 const vk::Unique<vk::VkCommandBuffer> commandBuffer (createBeginCommandBuffer(vkd, device, commandPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY));
3224 const vk::VkImageMemoryBarrier preImageBarrier =
3226 vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
3230 vk::VK_ACCESS_TRANSFER_WRITE_BIT,
3232 vk::VK_IMAGE_LAYOUT_UNDEFINED,
3233 vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
3235 VK_QUEUE_FAMILY_IGNORED,
3236 VK_QUEUE_FAMILY_IGNORED,
3240 vk::VK_IMAGE_ASPECT_COLOR_BIT,
3242 1, // Mip level count
3247 const vk::VkImageMemoryBarrier postImageBarrier =
3249 vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
3252 vk::VK_ACCESS_TRANSFER_WRITE_BIT,
3255 vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
3256 vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
3258 VK_QUEUE_FAMILY_IGNORED,
3259 VK_QUEUE_FAMILY_IGNORED,
3263 vk::VK_IMAGE_ASPECT_COLOR_BIT,
3265 1, // Mip level count
3270 const vk::VkBufferImageCopy region =
3275 vk::VK_IMAGE_ASPECT_COLOR_BIT,
3282 (deUint32)m_imageWidth,
3283 (deUint32)m_imageHeight,
3289 void* const ptr = mapMemory(vkd, device, *memory, 4 * m_imageWidth * m_imageHeight);
3290 de::Random rng (m_seed);
3293 deUint8* const data = (deUint8*)ptr;
3295 for (size_t ndx = 0; ndx < (size_t)(4 * m_imageWidth * m_imageHeight); ndx++)
3296 data[ndx] = rng.getUint8();
3299 vk::flushMappedMemoryRange(vkd, device, *memory, 0, 4 * m_imageWidth * m_imageHeight);
3300 vkd.unmapMemory(device, *memory);
3303 vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 0, (const vk::VkBufferMemoryBarrier*)DE_NULL, 1, &preImageBarrier);
3304 vkd.cmdCopyBufferToImage(*commandBuffer, *srcBuffer, *m_srcImage, vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, ®ion);
3305 vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 0, (const vk::VkBufferMemoryBarrier*)DE_NULL, 1, &postImageBarrier);
3307 VK_CHECK(vkd.endCommandBuffer(*commandBuffer));
3308 queueRun(vkd, queue, *commandBuffer);
3312 void ImageCopyFromImage::logSubmit (TestLog& log, size_t commandIndex) const
3314 log << TestLog::Message << commandIndex << ":" << getName() << " Copy image data from another image" << TestLog::EndMessage;
3317 void ImageCopyFromImage::submit (SubmitContext& context)
3319 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
3320 const vk::VkCommandBuffer commandBuffer = context.getCommandBuffer();
3321 const vk::VkImageCopy region =
3324 vk::VK_IMAGE_ASPECT_COLOR_BIT,
3332 vk::VK_IMAGE_ASPECT_COLOR_BIT,
3339 (deUint32)m_imageWidth,
3340 (deUint32)m_imageHeight,
3345 vkd.cmdCopyImage(commandBuffer, *m_srcImage, vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, context.getImage(), m_imageLayout, 1, ®ion);
3348 void ImageCopyFromImage::verify (VerifyContext& context, size_t)
3350 ReferenceMemory& reference (context.getReference());
3351 de::Random rng (m_seed);
3353 reference.setUndefined(0, (size_t)m_imageMemorySize);
3356 const PixelBufferAccess& refAccess (context.getReferenceImage().getAccess());
3358 for (deInt32 y = 0; y < m_imageHeight; y++)
3359 for (deInt32 x = 0; x < m_imageWidth; x++)
3361 const deUint8 r8 = rng.getUint8();
3362 const deUint8 g8 = rng.getUint8();
3363 const deUint8 b8 = rng.getUint8();
3364 const deUint8 a8 = rng.getUint8();
3366 refAccess.setPixel(UVec4(r8, g8, b8, a8), x, y);
3371 class ImageCopyToImage : public CmdCommand
3374 ImageCopyToImage (vk::VkImageLayout imageLayout) : m_imageLayout(imageLayout) {}
3375 ~ImageCopyToImage (void) {}
3376 const char* getName (void) const { return "ImageCopyToImage"; }
3378 void logPrepare (TestLog& log, size_t commandIndex) const;
3379 void prepare (PrepareContext& context);
3380 void logSubmit (TestLog& log, size_t commandIndex) const;
3381 void submit (SubmitContext& context);
3382 void verify (VerifyContext& context, size_t commandIndex);
3385 const vk::VkImageLayout m_imageLayout;
3386 deInt32 m_imageWidth;
3387 deInt32 m_imageHeight;
3388 vk::VkDeviceSize m_imageMemorySize;
3389 vk::Move<vk::VkImage> m_dstImage;
3390 vk::Move<vk::VkDeviceMemory> m_memory;
3393 void ImageCopyToImage::logPrepare (TestLog& log, size_t commandIndex) const
3395 log << TestLog::Message << commandIndex << ":" << getName() << " Allocate destination image for image to image copy." << TestLog::EndMessage;
3398 void ImageCopyToImage::prepare (PrepareContext& context)
3400 const vk::InstanceInterface& vki = context.getContext().getInstanceInterface();
3401 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
3402 const vk::VkPhysicalDevice physicalDevice = context.getContext().getPhysicalDevice();
3403 const vk::VkDevice device = context.getContext().getDevice();
3404 const vk::VkQueue queue = context.getContext().getQueue();
3405 const vk::VkCommandPool commandPool = context.getContext().getCommandPool();
3406 const vector<deUint32>& queueFamilies = context.getContext().getQueueFamilies();
3408 m_imageWidth = context.getImageWidth();
3409 m_imageHeight = context.getImageHeight();
3410 m_imageMemorySize = context.getImageMemorySize();
3413 const vk::VkImageCreateInfo createInfo =
3415 vk::VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
3419 vk::VK_IMAGE_TYPE_2D,
3420 vk::VK_FORMAT_R8G8B8A8_UNORM,
3422 (deUint32)m_imageWidth,
3423 (deUint32)m_imageHeight,
3426 1, 1, // mipLevels, arrayLayers
3427 vk::VK_SAMPLE_COUNT_1_BIT,
3429 vk::VK_IMAGE_TILING_OPTIMAL,
3430 vk::VK_IMAGE_USAGE_TRANSFER_DST_BIT|vk::VK_IMAGE_USAGE_TRANSFER_SRC_BIT,
3431 vk::VK_SHARING_MODE_EXCLUSIVE,
3433 (deUint32)queueFamilies.size(),
3435 vk::VK_IMAGE_LAYOUT_UNDEFINED
3438 m_dstImage = vk::createImage(vkd, device, &createInfo);
3441 m_memory = bindImageMemory(vki, vkd, physicalDevice, device, *m_dstImage, 0);
3444 const vk::Unique<vk::VkCommandBuffer> commandBuffer (createBeginCommandBuffer(vkd, device, commandPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY));
3445 const vk::VkImageMemoryBarrier barrier =
3447 vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
3451 vk::VK_ACCESS_TRANSFER_WRITE_BIT,
3453 vk::VK_IMAGE_LAYOUT_UNDEFINED,
3454 vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
3456 VK_QUEUE_FAMILY_IGNORED,
3457 VK_QUEUE_FAMILY_IGNORED,
3461 vk::VK_IMAGE_ASPECT_COLOR_BIT,
3463 1, // Mip level count
3469 vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 0, (const vk::VkBufferMemoryBarrier*)DE_NULL, 1, &barrier);
3471 VK_CHECK(vkd.endCommandBuffer(*commandBuffer));
3472 queueRun(vkd, queue, *commandBuffer);
3476 void ImageCopyToImage::logSubmit (TestLog& log, size_t commandIndex) const
3478 log << TestLog::Message << commandIndex << ":" << getName() << " Copy image to another image" << TestLog::EndMessage;
3481 void ImageCopyToImage::submit (SubmitContext& context)
3483 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
3484 const vk::VkCommandBuffer commandBuffer = context.getCommandBuffer();
3485 const vk::VkImageCopy region =
3488 vk::VK_IMAGE_ASPECT_COLOR_BIT,
3496 vk::VK_IMAGE_ASPECT_COLOR_BIT,
3503 (deUint32)m_imageWidth,
3504 (deUint32)m_imageHeight,
3509 vkd.cmdCopyImage(commandBuffer, context.getImage(), m_imageLayout, *m_dstImage, vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, ®ion);
3512 void ImageCopyToImage::verify (VerifyContext& context, size_t commandIndex)
3514 tcu::ResultCollector& resultCollector (context.getResultCollector());
3515 const vk::InstanceInterface& vki = context.getContext().getInstanceInterface();
3516 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
3517 const vk::VkPhysicalDevice physicalDevice = context.getContext().getPhysicalDevice();
3518 const vk::VkDevice device = context.getContext().getDevice();
3519 const vk::VkQueue queue = context.getContext().getQueue();
3520 const vk::VkCommandPool commandPool = context.getContext().getCommandPool();
3521 const vk::Unique<vk::VkCommandBuffer> commandBuffer (createBeginCommandBuffer(vkd, device, commandPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY));
3522 const vector<deUint32>& queueFamilies = context.getContext().getQueueFamilies();
3523 const vk::Unique<vk::VkBuffer> dstBuffer (createBuffer(vkd, device, 4 * m_imageWidth * m_imageHeight, vk::VK_BUFFER_USAGE_TRANSFER_DST_BIT, vk::VK_SHARING_MODE_EXCLUSIVE, queueFamilies));
3524 const vk::Unique<vk::VkDeviceMemory> memory (bindBufferMemory(vki, vkd, physicalDevice, device, *dstBuffer, vk::VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT));
3526 const vk::VkImageMemoryBarrier imageBarrier =
3528 vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
3531 vk::VK_ACCESS_TRANSFER_WRITE_BIT,
3532 vk::VK_ACCESS_TRANSFER_READ_BIT,
3534 vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
3535 vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
3537 VK_QUEUE_FAMILY_IGNORED,
3538 VK_QUEUE_FAMILY_IGNORED,
3542 vk::VK_IMAGE_ASPECT_COLOR_BIT,
3544 1, // Mip level count
3549 const vk::VkBufferMemoryBarrier bufferBarrier =
3551 vk::VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER,
3554 vk::VK_ACCESS_TRANSFER_WRITE_BIT,
3555 vk::VK_ACCESS_HOST_READ_BIT,
3557 VK_QUEUE_FAMILY_IGNORED,
3558 VK_QUEUE_FAMILY_IGNORED,
3563 const vk::VkBufferImageCopy region =
3568 vk::VK_IMAGE_ASPECT_COLOR_BIT,
3575 (deUint32)m_imageWidth,
3576 (deUint32)m_imageHeight,
3581 vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 0, (const vk::VkBufferMemoryBarrier*)DE_NULL, 1, &imageBarrier);
3582 vkd.cmdCopyImageToBuffer(*commandBuffer, *m_dstImage, vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, *dstBuffer, 1, ®ion);
3583 vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_PIPELINE_STAGE_HOST_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 1, &bufferBarrier, 0, (const vk::VkImageMemoryBarrier*)DE_NULL);
3586 VK_CHECK(vkd.endCommandBuffer(*commandBuffer));
3587 queueRun(vkd, queue, *commandBuffer);
3590 void* const ptr = mapMemory(vkd, device, *memory, 4 * m_imageWidth * m_imageHeight);
3592 vk::invalidateMappedMemoryRange(vkd, device, *memory, 0, 4 * m_imageWidth * m_imageHeight);
3595 const deUint8* const data = (const deUint8*)ptr;
3596 const ConstPixelBufferAccess resAccess (TextureFormat(TextureFormat::RGBA, TextureFormat::UNORM_INT8), m_imageWidth, m_imageHeight, 1, data);
3597 const ConstPixelBufferAccess& refAccess (context.getReferenceImage().getAccess());
3599 if (!tcu::intThresholdCompare(context.getLog(), (de::toString(commandIndex) + ":" + getName()).c_str(), (de::toString(commandIndex) + ":" + getName()).c_str(), refAccess, resAccess, UVec4(0), tcu::COMPARE_LOG_ON_ERROR))
3600 resultCollector.fail(de::toString(commandIndex) + ":" + getName() + " Image comparison failed");
3603 vkd.unmapMemory(device, *memory);
3613 class ImageBlitFromImage : public CmdCommand
3616 ImageBlitFromImage (deUint32 seed, BlitScale scale, vk::VkImageLayout imageLayout) : m_seed(seed), m_scale(scale), m_imageLayout(imageLayout) {}
3617 ~ImageBlitFromImage (void) {}
3618 const char* getName (void) const { return "ImageBlitFromImage"; }
3620 void logPrepare (TestLog& log, size_t commandIndex) const;
3621 void prepare (PrepareContext& context);
3622 void logSubmit (TestLog& log, size_t commandIndex) const;
3623 void submit (SubmitContext& context);
3624 void verify (VerifyContext& context, size_t commandIndex);
3627 const deUint32 m_seed;
3628 const BlitScale m_scale;
3629 const vk::VkImageLayout m_imageLayout;
3630 deInt32 m_imageWidth;
3631 deInt32 m_imageHeight;
3632 vk::VkDeviceSize m_imageMemorySize;
3633 deInt32 m_srcImageWidth;
3634 deInt32 m_srcImageHeight;
3635 vk::Move<vk::VkImage> m_srcImage;
3636 vk::Move<vk::VkDeviceMemory> m_memory;
3639 void ImageBlitFromImage::logPrepare (TestLog& log, size_t commandIndex) const
3641 log << TestLog::Message << commandIndex << ":" << getName() << " Allocate source image for image to image blit." << TestLog::EndMessage;
3644 void ImageBlitFromImage::prepare (PrepareContext& context)
3646 const vk::InstanceInterface& vki = context.getContext().getInstanceInterface();
3647 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
3648 const vk::VkPhysicalDevice physicalDevice = context.getContext().getPhysicalDevice();
3649 const vk::VkDevice device = context.getContext().getDevice();
3650 const vk::VkQueue queue = context.getContext().getQueue();
3651 const vk::VkCommandPool commandPool = context.getContext().getCommandPool();
3652 const vector<deUint32>& queueFamilies = context.getContext().getQueueFamilies();
3654 m_imageWidth = context.getImageWidth();
3655 m_imageHeight = context.getImageHeight();
3656 m_imageMemorySize = context.getImageMemorySize();
3658 if (m_scale == BLIT_SCALE_10)
3660 m_srcImageWidth = m_imageWidth;
3661 m_srcImageHeight = m_imageHeight;
3663 else if (m_scale == BLIT_SCALE_20)
3665 m_srcImageWidth = m_imageWidth / 2;
3666 m_srcImageHeight = m_imageHeight / 2;
3669 DE_FATAL("Unsupported scale");
3672 const vk::VkImageCreateInfo createInfo =
3674 vk::VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
3678 vk::VK_IMAGE_TYPE_2D,
3679 vk::VK_FORMAT_R8G8B8A8_UNORM,
3681 (deUint32)m_srcImageWidth,
3682 (deUint32)m_srcImageHeight,
3685 1, 1, // mipLevels, arrayLayers
3686 vk::VK_SAMPLE_COUNT_1_BIT,
3688 vk::VK_IMAGE_TILING_OPTIMAL,
3689 vk::VK_IMAGE_USAGE_TRANSFER_DST_BIT|vk::VK_IMAGE_USAGE_TRANSFER_SRC_BIT,
3690 vk::VK_SHARING_MODE_EXCLUSIVE,
3692 (deUint32)queueFamilies.size(),
3694 vk::VK_IMAGE_LAYOUT_UNDEFINED
3697 m_srcImage = vk::createImage(vkd, device, &createInfo);
3700 m_memory = bindImageMemory(vki, vkd, physicalDevice, device, *m_srcImage, 0);
3703 const vk::Unique<vk::VkBuffer> srcBuffer (createBuffer(vkd, device, 4 * m_srcImageWidth * m_srcImageHeight, vk::VK_BUFFER_USAGE_TRANSFER_SRC_BIT, vk::VK_SHARING_MODE_EXCLUSIVE, queueFamilies));
3704 const vk::Unique<vk::VkDeviceMemory> memory (bindBufferMemory(vki, vkd, physicalDevice, device, *srcBuffer, vk::VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT));
3705 const vk::Unique<vk::VkCommandBuffer> commandBuffer (createBeginCommandBuffer(vkd, device, commandPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY));
3706 const vk::VkImageMemoryBarrier preImageBarrier =
3708 vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
3712 vk::VK_ACCESS_TRANSFER_WRITE_BIT,
3714 vk::VK_IMAGE_LAYOUT_UNDEFINED,
3715 vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
3717 VK_QUEUE_FAMILY_IGNORED,
3718 VK_QUEUE_FAMILY_IGNORED,
3722 vk::VK_IMAGE_ASPECT_COLOR_BIT,
3724 1, // Mip level count
3729 const vk::VkImageMemoryBarrier postImageBarrier =
3731 vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
3734 vk::VK_ACCESS_TRANSFER_WRITE_BIT,
3737 vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
3738 vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
3740 VK_QUEUE_FAMILY_IGNORED,
3741 VK_QUEUE_FAMILY_IGNORED,
3745 vk::VK_IMAGE_ASPECT_COLOR_BIT,
3747 1, // Mip level count
3752 const vk::VkBufferImageCopy region =
3757 vk::VK_IMAGE_ASPECT_COLOR_BIT,
3764 (deUint32)m_srcImageWidth,
3765 (deUint32)m_srcImageHeight,
3771 void* const ptr = mapMemory(vkd, device, *memory, 4 * m_srcImageWidth * m_srcImageHeight);
3772 de::Random rng (m_seed);
3775 deUint8* const data = (deUint8*)ptr;
3777 for (size_t ndx = 0; ndx < (size_t)(4 * m_srcImageWidth * m_srcImageHeight); ndx++)
3778 data[ndx] = rng.getUint8();
3781 vk::flushMappedMemoryRange(vkd, device, *memory, 0, 4 * m_srcImageWidth * m_srcImageHeight);
3782 vkd.unmapMemory(device, *memory);
3785 vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 0, (const vk::VkBufferMemoryBarrier*)DE_NULL, 1, &preImageBarrier);
3786 vkd.cmdCopyBufferToImage(*commandBuffer, *srcBuffer, *m_srcImage, vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, ®ion);
3787 vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 0, (const vk::VkBufferMemoryBarrier*)DE_NULL, 1, &postImageBarrier);
3789 VK_CHECK(vkd.endCommandBuffer(*commandBuffer));
3790 queueRun(vkd, queue, *commandBuffer);
3794 void ImageBlitFromImage::logSubmit (TestLog& log, size_t commandIndex) const
3796 log << TestLog::Message << commandIndex << ":" << getName() << " Blit from another image" << (m_scale == BLIT_SCALE_20 ? " scale 2x" : "") << TestLog::EndMessage;
3799 void ImageBlitFromImage::submit (SubmitContext& context)
3801 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
3802 const vk::VkCommandBuffer commandBuffer = context.getCommandBuffer();
3803 const vk::VkImageBlit region =
3807 vk::VK_IMAGE_ASPECT_COLOR_BIT,
3823 vk::VK_IMAGE_ASPECT_COLOR_BIT,
3837 vkd.cmdBlitImage(commandBuffer, *m_srcImage, vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, context.getImage(), m_imageLayout, 1, ®ion, vk::VK_FILTER_NEAREST);
3840 void ImageBlitFromImage::verify (VerifyContext& context, size_t)
3842 ReferenceMemory& reference (context.getReference());
3843 de::Random rng (m_seed);
3845 reference.setUndefined(0, (size_t)m_imageMemorySize);
3848 const PixelBufferAccess& refAccess (context.getReferenceImage().getAccess());
3850 if (m_scale == BLIT_SCALE_10)
3852 for (deInt32 y = 0; y < m_imageHeight; y++)
3853 for (deInt32 x = 0; x < m_imageWidth; x++)
3855 const deUint8 r8 = rng.getUint8();
3856 const deUint8 g8 = rng.getUint8();
3857 const deUint8 b8 = rng.getUint8();
3858 const deUint8 a8 = rng.getUint8();
3860 refAccess.setPixel(UVec4(r8, g8, b8, a8), x, y);
3863 else if (m_scale == BLIT_SCALE_20)
3865 tcu::TextureLevel source (TextureFormat(TextureFormat::RGBA, TextureFormat::UNORM_INT8), m_srcImageWidth, m_srcImageHeight);
3866 const float xscale = ((float)m_srcImageWidth) / (float)m_imageWidth;
3867 const float yscale = ((float)m_srcImageHeight) / (float)m_imageHeight;
3869 for (deInt32 y = 0; y < m_srcImageHeight; y++)
3870 for (deInt32 x = 0; x < m_srcImageWidth; x++)
3872 const deUint8 r8 = rng.getUint8();
3873 const deUint8 g8 = rng.getUint8();
3874 const deUint8 b8 = rng.getUint8();
3875 const deUint8 a8 = rng.getUint8();
3877 source.getAccess().setPixel(UVec4(r8, g8, b8, a8), x, y);
3880 for (deInt32 y = 0; y < m_imageHeight; y++)
3881 for (deInt32 x = 0; x < m_imageWidth; x++)
3882 refAccess.setPixel(source.getAccess().getPixelUint(int((float(x) + 0.5f) * xscale), int((float(y) + 0.5f) * yscale)), x, y);
3885 DE_FATAL("Unsupported scale");
3889 class ImageBlitToImage : public CmdCommand
3892 ImageBlitToImage (BlitScale scale, vk::VkImageLayout imageLayout) : m_scale(scale), m_imageLayout(imageLayout) {}
3893 ~ImageBlitToImage (void) {}
3894 const char* getName (void) const { return "ImageBlitToImage"; }
3896 void logPrepare (TestLog& log, size_t commandIndex) const;
3897 void prepare (PrepareContext& context);
3898 void logSubmit (TestLog& log, size_t commandIndex) const;
3899 void submit (SubmitContext& context);
3900 void verify (VerifyContext& context, size_t commandIndex);
3903 const BlitScale m_scale;
3904 const vk::VkImageLayout m_imageLayout;
3905 deInt32 m_imageWidth;
3906 deInt32 m_imageHeight;
3907 vk::VkDeviceSize m_imageMemorySize;
3908 deInt32 m_dstImageWidth;
3909 deInt32 m_dstImageHeight;
3910 vk::Move<vk::VkImage> m_dstImage;
3911 vk::Move<vk::VkDeviceMemory> m_memory;
3914 void ImageBlitToImage::logPrepare (TestLog& log, size_t commandIndex) const
3916 log << TestLog::Message << commandIndex << ":" << getName() << " Allocate destination image for image to image blit." << TestLog::EndMessage;
3919 void ImageBlitToImage::prepare (PrepareContext& context)
3921 const vk::InstanceInterface& vki = context.getContext().getInstanceInterface();
3922 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
3923 const vk::VkPhysicalDevice physicalDevice = context.getContext().getPhysicalDevice();
3924 const vk::VkDevice device = context.getContext().getDevice();
3925 const vk::VkQueue queue = context.getContext().getQueue();
3926 const vk::VkCommandPool commandPool = context.getContext().getCommandPool();
3927 const vector<deUint32>& queueFamilies = context.getContext().getQueueFamilies();
3929 m_imageWidth = context.getImageWidth();
3930 m_imageHeight = context.getImageHeight();
3931 m_imageMemorySize = context.getImageMemorySize();
3933 if (m_scale == BLIT_SCALE_10)
3935 m_dstImageWidth = context.getImageWidth();
3936 m_dstImageHeight = context.getImageHeight();
3938 else if (m_scale == BLIT_SCALE_20)
3940 m_dstImageWidth = context.getImageWidth() * 2;
3941 m_dstImageHeight = context.getImageHeight() * 2;
3944 DE_FATAL("Unsupportd blit scale");
3947 const vk::VkImageCreateInfo createInfo =
3949 vk::VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
3953 vk::VK_IMAGE_TYPE_2D,
3954 vk::VK_FORMAT_R8G8B8A8_UNORM,
3956 (deUint32)m_dstImageWidth,
3957 (deUint32)m_dstImageHeight,
3960 1, 1, // mipLevels, arrayLayers
3961 vk::VK_SAMPLE_COUNT_1_BIT,
3963 vk::VK_IMAGE_TILING_OPTIMAL,
3964 vk::VK_IMAGE_USAGE_TRANSFER_DST_BIT|vk::VK_IMAGE_USAGE_TRANSFER_SRC_BIT,
3965 vk::VK_SHARING_MODE_EXCLUSIVE,
3967 (deUint32)queueFamilies.size(),
3969 vk::VK_IMAGE_LAYOUT_UNDEFINED
3972 m_dstImage = vk::createImage(vkd, device, &createInfo);
3975 m_memory = bindImageMemory(vki, vkd, physicalDevice, device, *m_dstImage, 0);
3978 const vk::Unique<vk::VkCommandBuffer> commandBuffer (createBeginCommandBuffer(vkd, device, commandPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY));
3979 const vk::VkImageMemoryBarrier barrier =
3981 vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
3985 vk::VK_ACCESS_TRANSFER_WRITE_BIT,
3987 vk::VK_IMAGE_LAYOUT_UNDEFINED,
3988 vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
3990 VK_QUEUE_FAMILY_IGNORED,
3991 VK_QUEUE_FAMILY_IGNORED,
3995 vk::VK_IMAGE_ASPECT_COLOR_BIT,
3997 1, // Mip level count
4003 vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 0, (const vk::VkBufferMemoryBarrier*)DE_NULL, 1, &barrier);
4005 VK_CHECK(vkd.endCommandBuffer(*commandBuffer));
4006 queueRun(vkd, queue, *commandBuffer);
4010 void ImageBlitToImage::logSubmit (TestLog& log, size_t commandIndex) const
4012 log << TestLog::Message << commandIndex << ":" << getName() << " Blit image to another image" << (m_scale == BLIT_SCALE_20 ? " scale 2x" : "") << TestLog::EndMessage;
4015 void ImageBlitToImage::submit (SubmitContext& context)
4017 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
4018 const vk::VkCommandBuffer commandBuffer = context.getCommandBuffer();
4019 const vk::VkImageBlit region =
4023 vk::VK_IMAGE_ASPECT_COLOR_BIT,
4039 vk::VK_IMAGE_ASPECT_COLOR_BIT,
4053 vkd.cmdBlitImage(commandBuffer, context.getImage(), m_imageLayout, *m_dstImage, vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, ®ion, vk::VK_FILTER_NEAREST);
4056 void ImageBlitToImage::verify (VerifyContext& context, size_t commandIndex)
4058 tcu::ResultCollector& resultCollector (context.getResultCollector());
4059 const vk::InstanceInterface& vki = context.getContext().getInstanceInterface();
4060 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
4061 const vk::VkPhysicalDevice physicalDevice = context.getContext().getPhysicalDevice();
4062 const vk::VkDevice device = context.getContext().getDevice();
4063 const vk::VkQueue queue = context.getContext().getQueue();
4064 const vk::VkCommandPool commandPool = context.getContext().getCommandPool();
4065 const vk::Unique<vk::VkCommandBuffer> commandBuffer (createBeginCommandBuffer(vkd, device, commandPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY));
4066 const vector<deUint32>& queueFamilies = context.getContext().getQueueFamilies();
4067 const vk::Unique<vk::VkBuffer> dstBuffer (createBuffer(vkd, device, 4 * m_dstImageWidth * m_dstImageHeight, vk::VK_BUFFER_USAGE_TRANSFER_DST_BIT, vk::VK_SHARING_MODE_EXCLUSIVE, queueFamilies));
4068 const vk::Unique<vk::VkDeviceMemory> memory (bindBufferMemory(vki, vkd, physicalDevice, device, *dstBuffer, vk::VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT));
4070 const vk::VkImageMemoryBarrier imageBarrier =
4072 vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
4075 vk::VK_ACCESS_TRANSFER_WRITE_BIT,
4076 vk::VK_ACCESS_TRANSFER_READ_BIT,
4078 vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
4079 vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
4081 VK_QUEUE_FAMILY_IGNORED,
4082 VK_QUEUE_FAMILY_IGNORED,
4086 vk::VK_IMAGE_ASPECT_COLOR_BIT,
4088 1, // Mip level count
4093 const vk::VkBufferMemoryBarrier bufferBarrier =
4095 vk::VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER,
4098 vk::VK_ACCESS_TRANSFER_WRITE_BIT,
4099 vk::VK_ACCESS_HOST_READ_BIT,
4101 VK_QUEUE_FAMILY_IGNORED,
4102 VK_QUEUE_FAMILY_IGNORED,
4107 const vk::VkBufferImageCopy region =
4112 vk::VK_IMAGE_ASPECT_COLOR_BIT,
4119 (deUint32)m_dstImageWidth,
4120 (deUint32)m_dstImageHeight,
4125 vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 0, (const vk::VkBufferMemoryBarrier*)DE_NULL, 1, &imageBarrier);
4126 vkd.cmdCopyImageToBuffer(*commandBuffer, *m_dstImage, vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, *dstBuffer, 1, ®ion);
4127 vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_PIPELINE_STAGE_HOST_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 1, &bufferBarrier, 0, (const vk::VkImageMemoryBarrier*)DE_NULL);
4130 VK_CHECK(vkd.endCommandBuffer(*commandBuffer));
4131 queueRun(vkd, queue, *commandBuffer);
4134 void* const ptr = mapMemory(vkd, device, *memory, 4 * m_dstImageWidth * m_dstImageHeight);
4136 vk::invalidateMappedMemoryRange(vkd, device, *memory, 0, 4 * m_dstImageWidth * m_dstImageHeight);
4138 if (m_scale == BLIT_SCALE_10)
4140 const deUint8* const data = (const deUint8*)ptr;
4141 const ConstPixelBufferAccess resAccess (TextureFormat(TextureFormat::RGBA, TextureFormat::UNORM_INT8), m_dstImageWidth, m_dstImageHeight, 1, data);
4142 const ConstPixelBufferAccess& refAccess (context.getReferenceImage().getAccess());
4144 if (!tcu::intThresholdCompare(context.getLog(), (de::toString(commandIndex) + ":" + getName()).c_str(), (de::toString(commandIndex) + ":" + getName()).c_str(), refAccess, resAccess, UVec4(0), tcu::COMPARE_LOG_ON_ERROR))
4145 resultCollector.fail(de::toString(commandIndex) + ":" + getName() + " Image comparison failed");
4147 else if (m_scale == BLIT_SCALE_20)
4149 const deUint8* const data = (const deUint8*)ptr;
4150 const ConstPixelBufferAccess resAccess (TextureFormat(TextureFormat::RGBA, TextureFormat::UNORM_INT8), m_dstImageWidth, m_dstImageHeight, 1, data);
4151 tcu::TextureLevel reference (TextureFormat(TextureFormat::RGBA, TextureFormat::UNORM_INT8), m_dstImageWidth, m_dstImageHeight, 1);
4154 const ConstPixelBufferAccess& refAccess (context.getReferenceImage().getAccess());
4156 for (deInt32 y = 0; y < m_dstImageHeight; y++)
4157 for (deInt32 x = 0; x < m_dstImageWidth; x++)
4159 reference.getAccess().setPixel(refAccess.getPixel(x/2, y/2), x, y);
4163 if (!tcu::intThresholdCompare(context.getLog(), (de::toString(commandIndex) + ":" + getName()).c_str(), (de::toString(commandIndex) + ":" + getName()).c_str(), reference.getAccess(), resAccess, UVec4(0), tcu::COMPARE_LOG_ON_ERROR))
4164 resultCollector.fail(de::toString(commandIndex) + ":" + getName() + " Image comparison failed");
4167 DE_FATAL("Unknown scale");
4169 vkd.unmapMemory(device, *memory);
4173 class PrepareRenderPassContext
4176 PrepareRenderPassContext (PrepareContext& context,
4177 vk::VkRenderPass renderPass,
4178 vk::VkFramebuffer framebuffer,
4179 deInt32 targetWidth,
4180 deInt32 targetHeight)
4181 : m_context (context)
4182 , m_renderPass (renderPass)
4183 , m_framebuffer (framebuffer)
4184 , m_targetWidth (targetWidth)
4185 , m_targetHeight (targetHeight)
4189 const Memory& getMemory (void) const { return m_context.getMemory(); }
4190 const Context& getContext (void) const { return m_context.getContext(); }
4191 const vk::ProgramCollection<vk::ProgramBinary>& getBinaryCollection (void) const { return m_context.getBinaryCollection(); }
4193 vk::VkBuffer getBuffer (void) const { return m_context.getBuffer(); }
4194 vk::VkDeviceSize getBufferSize (void) const { return m_context.getBufferSize(); }
4196 vk::VkImage getImage (void) const { return m_context.getImage(); }
4197 deInt32 getImageWidth (void) const { return m_context.getImageWidth(); }
4198 deInt32 getImageHeight (void) const { return m_context.getImageHeight(); }
4199 vk::VkImageLayout getImageLayout (void) const { return m_context.getImageLayout(); }
4201 deInt32 getTargetWidth (void) const { return m_targetWidth; }
4202 deInt32 getTargetHeight (void) const { return m_targetHeight; }
4204 vk::VkRenderPass getRenderPass (void) const { return m_renderPass; }
4207 PrepareContext& m_context;
4208 const vk::VkRenderPass m_renderPass;
4209 const vk::VkFramebuffer m_framebuffer;
4210 const deInt32 m_targetWidth;
4211 const deInt32 m_targetHeight;
4214 class VerifyRenderPassContext
4217 VerifyRenderPassContext (VerifyContext& context,
4218 deInt32 targetWidth,
4219 deInt32 targetHeight)
4220 : m_context (context)
4221 , m_referenceTarget (TextureFormat(TextureFormat::RGBA, TextureFormat::UNORM_INT8), targetWidth, targetHeight)
4225 const Context& getContext (void) const { return m_context.getContext(); }
4226 TestLog& getLog (void) const { return m_context.getLog(); }
4227 tcu::ResultCollector& getResultCollector (void) const { return m_context.getResultCollector(); }
4229 TextureLevel& getReferenceTarget (void) { return m_referenceTarget; }
4231 ReferenceMemory& getReference (void) { return m_context.getReference(); }
4232 TextureLevel& getReferenceImage (void) { return m_context.getReferenceImage();}
4235 VerifyContext& m_context;
4236 TextureLevel m_referenceTarget;
4239 class RenderPassCommand
4242 virtual ~RenderPassCommand (void) {}
4243 virtual const char* getName (void) const = 0;
4245 // Log things that are done during prepare
4246 virtual void logPrepare (TestLog&, size_t) const {}
4247 // Log submitted calls etc.
4248 virtual void logSubmit (TestLog&, size_t) const {}
4250 // Allocate vulkan resources and prepare for submit.
4251 virtual void prepare (PrepareRenderPassContext&) {}
4253 // Submit commands to command buffer.
4254 virtual void submit (SubmitContext&) {}
4257 virtual void verify (VerifyRenderPassContext&, size_t) {}
4260 class SubmitRenderPass : public CmdCommand
4263 SubmitRenderPass (const vector<RenderPassCommand*>& commands);
4264 ~SubmitRenderPass (void);
4265 const char* getName (void) const { return "SubmitRenderPass"; }
4267 void logPrepare (TestLog&, size_t) const;
4268 void logSubmit (TestLog&, size_t) const;
4270 void prepare (PrepareContext&);
4271 void submit (SubmitContext&);
4273 void verify (VerifyContext&, size_t);
4276 const deInt32 m_targetWidth;
4277 const deInt32 m_targetHeight;
4278 vk::Move<vk::VkRenderPass> m_renderPass;
4279 vk::Move<vk::VkDeviceMemory> m_colorTargetMemory;
4280 de::MovePtr<vk::Allocation> m_colorTargetMemory2;
4281 vk::Move<vk::VkImage> m_colorTarget;
4282 vk::Move<vk::VkImageView> m_colorTargetView;
4283 vk::Move<vk::VkFramebuffer> m_framebuffer;
4284 vector<RenderPassCommand*> m_commands;
4287 SubmitRenderPass::SubmitRenderPass (const vector<RenderPassCommand*>& commands)
4288 : m_targetWidth (256)
4289 , m_targetHeight (256)
4290 , m_commands (commands)
4294 SubmitRenderPass::~SubmitRenderPass()
4296 for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
4297 delete m_commands[cmdNdx];
4300 void SubmitRenderPass::logPrepare (TestLog& log, size_t commandIndex) const
4302 const string sectionName (de::toString(commandIndex) + ":" + getName());
4303 const tcu::ScopedLogSection section (log, sectionName, sectionName);
4305 for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
4307 RenderPassCommand& command = *m_commands[cmdNdx];
4308 command.logPrepare(log, cmdNdx);
4312 void SubmitRenderPass::logSubmit (TestLog& log, size_t commandIndex) const
4314 const string sectionName (de::toString(commandIndex) + ":" + getName());
4315 const tcu::ScopedLogSection section (log, sectionName, sectionName);
4317 for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
4319 RenderPassCommand& command = *m_commands[cmdNdx];
4320 command.logSubmit(log, cmdNdx);
4324 void SubmitRenderPass::prepare (PrepareContext& context)
4326 const vk::InstanceInterface& vki = context.getContext().getInstanceInterface();
4327 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
4328 const vk::VkPhysicalDevice physicalDevice = context.getContext().getPhysicalDevice();
4329 const vk::VkDevice device = context.getContext().getDevice();
4330 const vector<deUint32>& queueFamilies = context.getContext().getQueueFamilies();
4332 const vk::VkAttachmentReference colorAttachments[] =
4334 { 0, vk::VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL }
4336 const vk::VkSubpassDescription subpass =
4339 vk::VK_PIPELINE_BIND_POINT_GRAPHICS,
4344 DE_LENGTH_OF_ARRAY(colorAttachments),
4351 const vk::VkAttachmentDescription attachment =
4354 vk::VK_FORMAT_R8G8B8A8_UNORM,
4355 vk::VK_SAMPLE_COUNT_1_BIT,
4357 vk::VK_ATTACHMENT_LOAD_OP_CLEAR,
4358 vk::VK_ATTACHMENT_STORE_OP_STORE,
4360 vk::VK_ATTACHMENT_LOAD_OP_DONT_CARE,
4361 vk::VK_ATTACHMENT_STORE_OP_DONT_CARE,
4363 vk::VK_IMAGE_LAYOUT_UNDEFINED,
4364 vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL
4367 const vk::VkImageCreateInfo createInfo =
4369 vk::VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
4373 vk::VK_IMAGE_TYPE_2D,
4374 vk::VK_FORMAT_R8G8B8A8_UNORM,
4375 { (deUint32)m_targetWidth, (deUint32)m_targetHeight, 1u },
4378 vk::VK_SAMPLE_COUNT_1_BIT,
4379 vk::VK_IMAGE_TILING_OPTIMAL,
4380 vk::VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | vk::VK_IMAGE_USAGE_TRANSFER_SRC_BIT,
4381 vk::VK_SHARING_MODE_EXCLUSIVE,
4382 (deUint32)queueFamilies.size(),
4384 vk::VK_IMAGE_LAYOUT_UNDEFINED
4387 m_colorTarget = vk::createImage(vkd, device, &createInfo);
4390 m_colorTargetMemory = bindImageMemory(vki, vkd, physicalDevice, device, *m_colorTarget, 0);
4393 const vk::VkImageViewCreateInfo createInfo =
4395 vk::VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
4400 vk::VK_IMAGE_VIEW_TYPE_2D,
4401 vk::VK_FORMAT_R8G8B8A8_UNORM,
4403 vk::VK_COMPONENT_SWIZZLE_R,
4404 vk::VK_COMPONENT_SWIZZLE_G,
4405 vk::VK_COMPONENT_SWIZZLE_B,
4406 vk::VK_COMPONENT_SWIZZLE_A
4409 vk::VK_IMAGE_ASPECT_COLOR_BIT,
4417 m_colorTargetView = vk::createImageView(vkd, device, &createInfo);
4420 const vk::VkRenderPassCreateInfo createInfo =
4422 vk::VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO,
4436 m_renderPass = vk::createRenderPass(vkd, device, &createInfo);
4440 const vk::VkImageView imageViews[] =
4444 const vk::VkFramebufferCreateInfo createInfo =
4446 vk::VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO,
4451 DE_LENGTH_OF_ARRAY(imageViews),
4453 (deUint32)m_targetWidth,
4454 (deUint32)m_targetHeight,
4458 m_framebuffer = vk::createFramebuffer(vkd, device, &createInfo);
4462 PrepareRenderPassContext renderpassContext (context, *m_renderPass, *m_framebuffer, m_targetWidth, m_targetHeight);
4464 for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
4466 RenderPassCommand& command = *m_commands[cmdNdx];
4467 command.prepare(renderpassContext);
4472 void SubmitRenderPass::submit (SubmitContext& context)
4474 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
4475 const vk::VkCommandBuffer commandBuffer = context.getCommandBuffer();
4476 const vk::VkClearValue clearValue = vk::makeClearValueColorF32(0.0f, 0.0f, 0.0f, 1.0f);
4478 const vk::VkRenderPassBeginInfo beginInfo =
4480 vk::VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO,
4486 { { 0, 0 }, { (deUint32)m_targetWidth, (deUint32)m_targetHeight } },
4491 vkd.cmdBeginRenderPass(commandBuffer, &beginInfo, vk::VK_SUBPASS_CONTENTS_INLINE);
4493 for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
4495 RenderPassCommand& command = *m_commands[cmdNdx];
4497 command.submit(context);
4500 vkd.cmdEndRenderPass(commandBuffer);
4503 void SubmitRenderPass::verify (VerifyContext& context, size_t commandIndex)
4505 TestLog& log (context.getLog());
4506 tcu::ResultCollector& resultCollector (context.getResultCollector());
4507 const string sectionName (de::toString(commandIndex) + ":" + getName());
4508 const tcu::ScopedLogSection section (log, sectionName, sectionName);
4509 VerifyRenderPassContext verifyContext (context, m_targetWidth, m_targetHeight);
4511 tcu::clear(verifyContext.getReferenceTarget().getAccess(), Vec4(0.0f, 0.0f, 0.0f, 1.0f));
4513 for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
4515 RenderPassCommand& command = *m_commands[cmdNdx];
4516 command.verify(verifyContext, cmdNdx);
4520 const vk::InstanceInterface& vki = context.getContext().getInstanceInterface();
4521 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
4522 const vk::VkPhysicalDevice physicalDevice = context.getContext().getPhysicalDevice();
4523 const vk::VkDevice device = context.getContext().getDevice();
4524 const vk::VkQueue queue = context.getContext().getQueue();
4525 const vk::VkCommandPool commandPool = context.getContext().getCommandPool();
4526 const vk::Unique<vk::VkCommandBuffer> commandBuffer (createBeginCommandBuffer(vkd, device, commandPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY));
4527 const vector<deUint32>& queueFamilies = context.getContext().getQueueFamilies();
4528 const vk::Unique<vk::VkBuffer> dstBuffer (createBuffer(vkd, device, 4 * m_targetWidth * m_targetHeight, vk::VK_BUFFER_USAGE_TRANSFER_DST_BIT, vk::VK_SHARING_MODE_EXCLUSIVE, queueFamilies));
4529 const vk::Unique<vk::VkDeviceMemory> memory (bindBufferMemory(vki, vkd, physicalDevice, device, *dstBuffer, vk::VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT));
4531 const vk::VkImageMemoryBarrier imageBarrier =
4533 vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
4536 vk::VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
4537 vk::VK_ACCESS_TRANSFER_READ_BIT,
4539 vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
4540 vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
4542 VK_QUEUE_FAMILY_IGNORED,
4543 VK_QUEUE_FAMILY_IGNORED,
4547 vk::VK_IMAGE_ASPECT_COLOR_BIT,
4549 1, // Mip level count
4554 const vk::VkBufferMemoryBarrier bufferBarrier =
4556 vk::VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER,
4559 vk::VK_ACCESS_TRANSFER_WRITE_BIT,
4560 vk::VK_ACCESS_HOST_READ_BIT,
4562 VK_QUEUE_FAMILY_IGNORED,
4563 VK_QUEUE_FAMILY_IGNORED,
4568 const vk::VkBufferImageCopy region =
4573 vk::VK_IMAGE_ASPECT_COLOR_BIT,
4580 (deUint32)m_targetWidth,
4581 (deUint32)m_targetHeight,
4586 vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 0, (const vk::VkBufferMemoryBarrier*)DE_NULL, 1, &imageBarrier);
4587 vkd.cmdCopyImageToBuffer(*commandBuffer, *m_colorTarget, vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, *dstBuffer, 1, ®ion);
4588 vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_PIPELINE_STAGE_HOST_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 1, &bufferBarrier, 0, (const vk::VkImageMemoryBarrier*)DE_NULL);
4591 VK_CHECK(vkd.endCommandBuffer(*commandBuffer));
4592 queueRun(vkd, queue, *commandBuffer);
4595 void* const ptr = mapMemory(vkd, device, *memory, 4 * m_targetWidth * m_targetHeight);
4597 vk::invalidateMappedMemoryRange(vkd, device, *memory, 0, 4 * m_targetWidth * m_targetHeight);
4600 const deUint8* const data = (const deUint8*)ptr;
4601 const ConstPixelBufferAccess resAccess (TextureFormat(TextureFormat::RGBA, TextureFormat::UNORM_INT8), m_targetWidth, m_targetHeight, 1, data);
4602 const ConstPixelBufferAccess& refAccess (verifyContext.getReferenceTarget().getAccess());
4604 if (!tcu::intThresholdCompare(context.getLog(), (de::toString(commandIndex) + ":" + getName()).c_str(), (de::toString(commandIndex) + ":" + getName()).c_str(), refAccess, resAccess, UVec4(0), tcu::COMPARE_LOG_ON_ERROR))
4605 resultCollector.fail(de::toString(commandIndex) + ":" + getName() + " Image comparison failed");
4608 vkd.unmapMemory(device, *memory);
4613 struct PipelineResources
4615 vk::Move<vk::VkPipeline> pipeline;
4616 vk::Move<vk::VkDescriptorSetLayout> descriptorSetLayout;
4617 vk::Move<vk::VkPipelineLayout> pipelineLayout;
4620 void createPipelineWithResources (const vk::DeviceInterface& vkd,
4621 const vk::VkDevice device,
4622 const vk::VkRenderPass renderPass,
4623 const deUint32 subpass,
4624 const vk::VkShaderModule& vertexShaderModule,
4625 const vk::VkShaderModule& fragmentShaderModule,
4626 const deUint32 viewPortWidth,
4627 const deUint32 viewPortHeight,
4628 const vector<vk::VkVertexInputBindingDescription>& vertexBindingDescriptions,
4629 const vector<vk::VkVertexInputAttributeDescription>& vertexAttributeDescriptions,
4630 const vector<vk::VkDescriptorSetLayoutBinding>& bindings,
4631 const vk::VkPrimitiveTopology topology,
4632 deUint32 pushConstantRangeCount,
4633 const vk::VkPushConstantRange* pushConstantRanges,
4634 PipelineResources& resources)
4636 if (!bindings.empty())
4638 const vk::VkDescriptorSetLayoutCreateInfo createInfo =
4640 vk::VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO,
4644 (deUint32)bindings.size(),
4645 bindings.empty() ? DE_NULL : &bindings[0]
4648 resources.descriptorSetLayout = vk::createDescriptorSetLayout(vkd, device, &createInfo);
4652 const vk::VkDescriptorSetLayout descriptorSetLayout_ = *resources.descriptorSetLayout;
4653 const vk::VkPipelineLayoutCreateInfo createInfo =
4655 vk::VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO,
4659 resources.descriptorSetLayout ? 1u : 0u,
4660 resources.descriptorSetLayout ? &descriptorSetLayout_ : DE_NULL,
4662 pushConstantRangeCount,
4666 resources.pipelineLayout = vk::createPipelineLayout(vkd, device, &createInfo);
4670 const vk::VkPipelineShaderStageCreateInfo shaderStages[] =
4673 vk::VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
4676 vk::VK_SHADER_STAGE_VERTEX_BIT,
4682 vk::VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
4685 vk::VK_SHADER_STAGE_FRAGMENT_BIT,
4686 fragmentShaderModule,
4691 const vk::VkPipelineDepthStencilStateCreateInfo depthStencilState =
4693 vk::VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO,
4698 vk::VK_COMPARE_OP_ALWAYS,
4702 vk::VK_STENCIL_OP_KEEP,
4703 vk::VK_STENCIL_OP_KEEP,
4704 vk::VK_STENCIL_OP_KEEP,
4705 vk::VK_COMPARE_OP_ALWAYS,
4711 vk::VK_STENCIL_OP_KEEP,
4712 vk::VK_STENCIL_OP_KEEP,
4713 vk::VK_STENCIL_OP_KEEP,
4714 vk::VK_COMPARE_OP_ALWAYS,
4722 const vk::VkPipelineVertexInputStateCreateInfo vertexInputState =
4724 vk::VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO,
4728 (deUint32)vertexBindingDescriptions.size(),
4729 vertexBindingDescriptions.empty() ? DE_NULL : &vertexBindingDescriptions[0],
4731 (deUint32)vertexAttributeDescriptions.size(),
4732 vertexAttributeDescriptions.empty() ? DE_NULL : &vertexAttributeDescriptions[0]
4734 const vk::VkPipelineInputAssemblyStateCreateInfo inputAssemblyState =
4736 vk::VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO,
4742 const vk::VkViewport viewports[] =
4744 { 0.0f, 0.0f, (float)viewPortWidth, (float)viewPortHeight, 0.0f, 1.0f }
4746 const vk::VkRect2D scissors[] =
4748 { { 0, 0 }, { (deUint32)viewPortWidth, (deUint32)viewPortHeight } }
4750 const vk::VkPipelineViewportStateCreateInfo viewportState =
4752 vk::VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO,
4755 DE_LENGTH_OF_ARRAY(viewports),
4757 DE_LENGTH_OF_ARRAY(scissors),
4760 const vk::VkPipelineRasterizationStateCreateInfo rasterState =
4762 vk::VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO,
4768 vk::VK_POLYGON_MODE_FILL,
4769 vk::VK_CULL_MODE_NONE,
4770 vk::VK_FRONT_FACE_COUNTER_CLOCKWISE,
4777 const vk::VkSampleMask sampleMask = ~0u;
4778 const vk::VkPipelineMultisampleStateCreateInfo multisampleState =
4780 vk::VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO,
4784 vk::VK_SAMPLE_COUNT_1_BIT,
4791 const vk::VkPipelineColorBlendAttachmentState attachments[] =
4795 vk::VK_BLEND_FACTOR_ONE,
4796 vk::VK_BLEND_FACTOR_ZERO,
4797 vk::VK_BLEND_OP_ADD,
4798 vk::VK_BLEND_FACTOR_ONE,
4799 vk::VK_BLEND_FACTOR_ZERO,
4800 vk::VK_BLEND_OP_ADD,
4801 (vk::VK_COLOR_COMPONENT_R_BIT|
4802 vk::VK_COLOR_COMPONENT_G_BIT|
4803 vk::VK_COLOR_COMPONENT_B_BIT|
4804 vk::VK_COLOR_COMPONENT_A_BIT)
4807 const vk::VkPipelineColorBlendStateCreateInfo colorBlendState =
4809 vk::VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO,
4814 vk::VK_LOGIC_OP_COPY,
4815 DE_LENGTH_OF_ARRAY(attachments),
4817 { 0.0f, 0.0f, 0.0f, 0.0f }
4819 const vk::VkGraphicsPipelineCreateInfo createInfo =
4821 vk::VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO,
4825 DE_LENGTH_OF_ARRAY(shaderStages),
4829 &inputAssemblyState,
4837 *resources.pipelineLayout,
4844 resources.pipeline = vk::createGraphicsPipeline(vkd, device, 0, &createInfo);
4848 class RenderIndexBuffer : public RenderPassCommand
4851 RenderIndexBuffer (void) {}
4852 ~RenderIndexBuffer (void) {}
4854 const char* getName (void) const { return "RenderIndexBuffer"; }
4855 void logPrepare (TestLog&, size_t) const;
4856 void logSubmit (TestLog&, size_t) const;
4857 void prepare (PrepareRenderPassContext&);
4858 void submit (SubmitContext& context);
4859 void verify (VerifyRenderPassContext&, size_t);
4862 PipelineResources m_resources;
4863 vk::VkDeviceSize m_bufferSize;
4866 void RenderIndexBuffer::logPrepare (TestLog& log, size_t commandIndex) const
4868 log << TestLog::Message << commandIndex << ":" << getName() << " Create pipeline for render buffer as index buffer." << TestLog::EndMessage;
4871 void RenderIndexBuffer::logSubmit (TestLog& log, size_t commandIndex) const
4873 log << TestLog::Message << commandIndex << ":" << getName() << " Render using buffer as index buffer." << TestLog::EndMessage;
4876 void RenderIndexBuffer::prepare (PrepareRenderPassContext& context)
4878 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
4879 const vk::VkDevice device = context.getContext().getDevice();
4880 const vk::VkRenderPass renderPass = context.getRenderPass();
4881 const deUint32 subpass = 0;
4882 const vk::Unique<vk::VkShaderModule> vertexShaderModule (vk::createShaderModule(vkd, device, context.getBinaryCollection().get("index-buffer.vert"), 0));
4883 const vk::Unique<vk::VkShaderModule> fragmentShaderModule (vk::createShaderModule(vkd, device, context.getBinaryCollection().get("render-white.frag"), 0));
4885 createPipelineWithResources(vkd, device, renderPass, subpass, *vertexShaderModule, *fragmentShaderModule, context.getTargetWidth(), context.getTargetHeight(),
4886 vector<vk::VkVertexInputBindingDescription>(), vector<vk::VkVertexInputAttributeDescription>(), vector<vk::VkDescriptorSetLayoutBinding>(), vk::VK_PRIMITIVE_TOPOLOGY_POINT_LIST, 0u, DE_NULL, m_resources);
4887 m_bufferSize = context.getBufferSize();
4890 void RenderIndexBuffer::submit (SubmitContext& context)
4892 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
4893 const vk::VkCommandBuffer commandBuffer = context.getCommandBuffer();
4895 vkd.cmdBindPipeline(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipeline);
4896 vkd.cmdBindIndexBuffer(commandBuffer, context.getBuffer(), 0, vk::VK_INDEX_TYPE_UINT16);
4897 vkd.cmdDrawIndexed(commandBuffer, (deUint32)(context.getBufferSize() / 2), 1, 0, 0, 0);
4900 void RenderIndexBuffer::verify (VerifyRenderPassContext& context, size_t)
4902 for (size_t pos = 0; pos < (size_t)m_bufferSize / 2; pos++)
4904 const deUint8 x = context.getReference().get(pos * 2);
4905 const deUint8 y = context.getReference().get((pos * 2) + 1);
4907 context.getReferenceTarget().getAccess().setPixel(Vec4(1.0f, 1.0f, 1.0f, 1.0f), x, y);
4911 class RenderVertexBuffer : public RenderPassCommand
4914 RenderVertexBuffer (void) {}
4915 ~RenderVertexBuffer (void) {}
4917 const char* getName (void) const { return "RenderVertexBuffer"; }
4918 void logPrepare (TestLog&, size_t) const;
4919 void logSubmit (TestLog&, size_t) const;
4920 void prepare (PrepareRenderPassContext&);
4921 void submit (SubmitContext& context);
4922 void verify (VerifyRenderPassContext&, size_t);
4925 PipelineResources m_resources;
4926 vk::VkDeviceSize m_bufferSize;
4929 void RenderVertexBuffer::logPrepare (TestLog& log, size_t commandIndex) const
4931 log << TestLog::Message << commandIndex << ":" << getName() << " Create pipeline for render buffer as vertex buffer." << TestLog::EndMessage;
4934 void RenderVertexBuffer::logSubmit (TestLog& log, size_t commandIndex) const
4936 log << TestLog::Message << commandIndex << ":" << getName() << " Render using buffer as vertex buffer." << TestLog::EndMessage;
4939 void RenderVertexBuffer::prepare (PrepareRenderPassContext& context)
4941 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
4942 const vk::VkDevice device = context.getContext().getDevice();
4943 const vk::VkRenderPass renderPass = context.getRenderPass();
4944 const deUint32 subpass = 0;
4945 const vk::Unique<vk::VkShaderModule> vertexShaderModule (vk::createShaderModule(vkd, device, context.getBinaryCollection().get("vertex-buffer.vert"), 0));
4946 const vk::Unique<vk::VkShaderModule> fragmentShaderModule (vk::createShaderModule(vkd, device, context.getBinaryCollection().get("render-white.frag"), 0));
4948 vector<vk::VkVertexInputAttributeDescription> vertexAttributeDescriptions;
4949 vector<vk::VkVertexInputBindingDescription> vertexBindingDescriptions;
4952 const vk::VkVertexInputBindingDescription vertexBindingDescription =
4956 vk::VK_VERTEX_INPUT_RATE_VERTEX
4959 vertexBindingDescriptions.push_back(vertexBindingDescription);
4962 const vk::VkVertexInputAttributeDescription vertexAttributeDescription =
4966 vk::VK_FORMAT_R8G8_UNORM,
4970 vertexAttributeDescriptions.push_back(vertexAttributeDescription);
4972 createPipelineWithResources(vkd, device, renderPass, subpass, *vertexShaderModule, *fragmentShaderModule, context.getTargetWidth(), context.getTargetHeight(),
4973 vertexBindingDescriptions, vertexAttributeDescriptions, vector<vk::VkDescriptorSetLayoutBinding>(), vk::VK_PRIMITIVE_TOPOLOGY_POINT_LIST, 0u, DE_NULL, m_resources);
4975 m_bufferSize = context.getBufferSize();
4978 void RenderVertexBuffer::submit (SubmitContext& context)
4980 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
4981 const vk::VkCommandBuffer commandBuffer = context.getCommandBuffer();
4982 const vk::VkDeviceSize offset = 0;
4983 const vk::VkBuffer buffer = context.getBuffer();
4985 vkd.cmdBindPipeline(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipeline);
4986 vkd.cmdBindVertexBuffers(commandBuffer, 0, 1, &buffer, &offset);
4987 vkd.cmdDraw(commandBuffer, (deUint32)(context.getBufferSize() / 2), 1, 0, 0);
4990 void RenderVertexBuffer::verify (VerifyRenderPassContext& context, size_t)
4992 for (size_t pos = 0; pos < (size_t)m_bufferSize / 2; pos++)
4994 const deUint8 x = context.getReference().get(pos * 2);
4995 const deUint8 y = context.getReference().get((pos * 2) + 1);
4997 context.getReferenceTarget().getAccess().setPixel(Vec4(1.0f, 1.0f, 1.0f, 1.0f), x, y);
5001 class RenderVertexUniformBuffer : public RenderPassCommand
5004 RenderVertexUniformBuffer (void) {}
5005 ~RenderVertexUniformBuffer (void);
5007 const char* getName (void) const { return "RenderVertexUniformBuffer"; }
5008 void logPrepare (TestLog&, size_t) const;
5009 void logSubmit (TestLog&, size_t) const;
5010 void prepare (PrepareRenderPassContext&);
5011 void submit (SubmitContext& context);
5012 void verify (VerifyRenderPassContext&, size_t);
5015 PipelineResources m_resources;
5016 vk::Move<vk::VkDescriptorPool> m_descriptorPool;
5017 vector<vk::VkDescriptorSet> m_descriptorSets;
5019 vk::VkDeviceSize m_bufferSize;
5022 RenderVertexUniformBuffer::~RenderVertexUniformBuffer (void)
5026 void RenderVertexUniformBuffer::logPrepare (TestLog& log, size_t commandIndex) const
5028 log << TestLog::Message << commandIndex << ":" << getName() << " Create pipeline for render buffer as uniform buffer." << TestLog::EndMessage;
5031 void RenderVertexUniformBuffer::logSubmit (TestLog& log, size_t commandIndex) const
5033 log << TestLog::Message << commandIndex << ":" << getName() << " Render using buffer as uniform buffer." << TestLog::EndMessage;
5036 void RenderVertexUniformBuffer::prepare (PrepareRenderPassContext& context)
5038 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
5039 const vk::VkDevice device = context.getContext().getDevice();
5040 const vk::VkRenderPass renderPass = context.getRenderPass();
5041 const deUint32 subpass = 0;
5042 const vk::Unique<vk::VkShaderModule> vertexShaderModule (vk::createShaderModule(vkd, device, context.getBinaryCollection().get("uniform-buffer.vert"), 0));
5043 const vk::Unique<vk::VkShaderModule> fragmentShaderModule (vk::createShaderModule(vkd, device, context.getBinaryCollection().get("render-white.frag"), 0));
5044 vector<vk::VkDescriptorSetLayoutBinding> bindings;
5046 m_bufferSize = context.getBufferSize();
5049 const vk::VkDescriptorSetLayoutBinding binding =
5052 vk::VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,
5054 vk::VK_SHADER_STAGE_VERTEX_BIT,
5058 bindings.push_back(binding);
5061 createPipelineWithResources(vkd, device, renderPass, subpass, *vertexShaderModule, *fragmentShaderModule, context.getTargetWidth(), context.getTargetHeight(),
5062 vector<vk::VkVertexInputBindingDescription>(), vector<vk::VkVertexInputAttributeDescription>(), bindings, vk::VK_PRIMITIVE_TOPOLOGY_POINT_LIST, 0u, DE_NULL, m_resources);
5065 const deUint32 descriptorCount = (deUint32)(divRoundUp(m_bufferSize, (vk::VkDeviceSize)MAX_UNIFORM_BUFFER_SIZE));
5066 const vk::VkDescriptorPoolSize poolSizes =
5068 vk::VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,
5071 const vk::VkDescriptorPoolCreateInfo createInfo =
5073 vk::VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
5075 vk::VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT,
5082 m_descriptorPool = vk::createDescriptorPool(vkd, device, &createInfo);
5083 m_descriptorSets.resize(descriptorCount);
5086 for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
5088 const vk::VkDescriptorSetLayout layout = *m_resources.descriptorSetLayout;
5089 const vk::VkDescriptorSetAllocateInfo allocateInfo =
5091 vk::VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
5099 m_descriptorSets[descriptorSetNdx] = vk::allocateDescriptorSet(vkd, device, &allocateInfo).disown();
5102 const vk::VkDescriptorBufferInfo bufferInfo =
5104 context.getBuffer(),
5105 (vk::VkDeviceSize)(descriptorSetNdx * (size_t)MAX_UNIFORM_BUFFER_SIZE),
5106 m_bufferSize < (descriptorSetNdx + 1) * (vk::VkDeviceSize)MAX_UNIFORM_BUFFER_SIZE
5107 ? m_bufferSize - descriptorSetNdx * (vk::VkDeviceSize)MAX_UNIFORM_BUFFER_SIZE
5108 : (vk::VkDeviceSize)MAX_UNIFORM_BUFFER_SIZE
5110 const vk::VkWriteDescriptorSet write =
5112 vk::VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
5114 m_descriptorSets[descriptorSetNdx],
5118 vk::VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,
5124 vkd.updateDescriptorSets(device, 1u, &write, 0u, DE_NULL);
5129 void RenderVertexUniformBuffer::submit (SubmitContext& context)
5131 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
5132 const vk::VkCommandBuffer commandBuffer = context.getCommandBuffer();
5134 vkd.cmdBindPipeline(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipeline);
5136 for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
5138 const size_t size = (size_t)(m_bufferSize < (descriptorSetNdx + 1) * (vk::VkDeviceSize)MAX_UNIFORM_BUFFER_SIZE
5139 ? m_bufferSize - descriptorSetNdx * (vk::VkDeviceSize)MAX_UNIFORM_BUFFER_SIZE
5140 : (size_t)MAX_UNIFORM_BUFFER_SIZE);
5141 const deUint32 count = (deUint32)(size / 2);
5143 vkd.cmdBindDescriptorSets(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipelineLayout, 0u, 1u, &m_descriptorSets[descriptorSetNdx], 0u, DE_NULL);
5144 vkd.cmdDraw(commandBuffer, count, 1, 0, 0);
5148 void RenderVertexUniformBuffer::verify (VerifyRenderPassContext& context, size_t)
5150 for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
5152 const size_t offset = descriptorSetNdx * MAX_UNIFORM_BUFFER_SIZE;
5153 const size_t size = (size_t)(m_bufferSize < (descriptorSetNdx + 1) * (vk::VkDeviceSize)MAX_UNIFORM_BUFFER_SIZE
5154 ? m_bufferSize - descriptorSetNdx * (vk::VkDeviceSize)MAX_UNIFORM_BUFFER_SIZE
5155 : (size_t)MAX_UNIFORM_BUFFER_SIZE);
5156 const size_t count = size / 2;
5158 for (size_t pos = 0; pos < count; pos++)
5160 const deUint8 x = context.getReference().get(offset + pos * 2);
5161 const deUint8 y = context.getReference().get(offset + (pos * 2) + 1);
5163 context.getReferenceTarget().getAccess().setPixel(Vec4(1.0f, 1.0f, 1.0f, 1.0f), x, y);
5168 class RenderVertexUniformTexelBuffer : public RenderPassCommand
5171 RenderVertexUniformTexelBuffer (void) {}
5172 ~RenderVertexUniformTexelBuffer (void);
5174 const char* getName (void) const { return "RenderVertexUniformTexelBuffer"; }
5175 void logPrepare (TestLog&, size_t) const;
5176 void logSubmit (TestLog&, size_t) const;
5177 void prepare (PrepareRenderPassContext&);
5178 void submit (SubmitContext& context);
5179 void verify (VerifyRenderPassContext&, size_t);
5182 PipelineResources m_resources;
5183 vk::Move<vk::VkDescriptorPool> m_descriptorPool;
5184 vector<vk::VkDescriptorSet> m_descriptorSets;
5185 vector<vk::VkBufferView> m_bufferViews;
5187 const vk::DeviceInterface* m_vkd;
5188 vk::VkDevice m_device;
5189 vk::VkDeviceSize m_bufferSize;
5190 deUint32 m_maxUniformTexelCount;
5193 RenderVertexUniformTexelBuffer::~RenderVertexUniformTexelBuffer (void)
5195 for (size_t bufferViewNdx = 0; bufferViewNdx < m_bufferViews.size(); bufferViewNdx++)
5197 if (!!m_bufferViews[bufferViewNdx])
5199 m_vkd->destroyBufferView(m_device, m_bufferViews[bufferViewNdx], DE_NULL);
5200 m_bufferViews[bufferViewNdx] = (vk::VkBufferView)0;
5205 void RenderVertexUniformTexelBuffer::logPrepare (TestLog& log, size_t commandIndex) const
5207 log << TestLog::Message << commandIndex << ":" << getName() << " Create pipeline for render buffer as uniform buffer." << TestLog::EndMessage;
5210 void RenderVertexUniformTexelBuffer::logSubmit (TestLog& log, size_t commandIndex) const
5212 log << TestLog::Message << commandIndex << ":" << getName() << " Render using buffer as uniform buffer." << TestLog::EndMessage;
5215 void RenderVertexUniformTexelBuffer::prepare (PrepareRenderPassContext& context)
5217 const vk::InstanceInterface& vki = context.getContext().getInstanceInterface();
5218 const vk::VkPhysicalDevice physicalDevice = context.getContext().getPhysicalDevice();
5219 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
5220 const vk::VkDevice device = context.getContext().getDevice();
5221 const vk::VkRenderPass renderPass = context.getRenderPass();
5222 const deUint32 subpass = 0;
5223 const vk::Unique<vk::VkShaderModule> vertexShaderModule (vk::createShaderModule(vkd, device, context.getBinaryCollection().get("uniform-texel-buffer.vert"), 0));
5224 const vk::Unique<vk::VkShaderModule> fragmentShaderModule (vk::createShaderModule(vkd, device, context.getBinaryCollection().get("render-white.frag"), 0));
5225 vector<vk::VkDescriptorSetLayoutBinding> bindings;
5229 m_bufferSize = context.getBufferSize();
5230 m_maxUniformTexelCount = vk::getPhysicalDeviceProperties(vki, physicalDevice).limits.maxTexelBufferElements;
5233 const vk::VkDescriptorSetLayoutBinding binding =
5236 vk::VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER,
5238 vk::VK_SHADER_STAGE_VERTEX_BIT,
5242 bindings.push_back(binding);
5245 createPipelineWithResources(vkd, device, renderPass, subpass, *vertexShaderModule, *fragmentShaderModule, context.getTargetWidth(), context.getTargetHeight(),
5246 vector<vk::VkVertexInputBindingDescription>(), vector<vk::VkVertexInputAttributeDescription>(), bindings, vk::VK_PRIMITIVE_TOPOLOGY_POINT_LIST, 0u, DE_NULL, m_resources);
5249 const deUint32 descriptorCount = (deUint32)(divRoundUp(m_bufferSize, (vk::VkDeviceSize)m_maxUniformTexelCount * 2));
5250 const vk::VkDescriptorPoolSize poolSizes =
5252 vk::VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER,
5255 const vk::VkDescriptorPoolCreateInfo createInfo =
5257 vk::VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
5259 vk::VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT,
5266 m_descriptorPool = vk::createDescriptorPool(vkd, device, &createInfo);
5267 m_descriptorSets.resize(descriptorCount, (vk::VkDescriptorSet)0);
5268 m_bufferViews.resize(descriptorCount, (vk::VkBufferView)0);
5271 for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
5273 const deUint32 count = (deUint32)(m_bufferSize < (descriptorSetNdx + 1) * m_maxUniformTexelCount * 2
5274 ? m_bufferSize - descriptorSetNdx * m_maxUniformTexelCount * 2
5275 : m_maxUniformTexelCount * 2) / 2;
5276 const vk::VkDescriptorSetLayout layout = *m_resources.descriptorSetLayout;
5277 const vk::VkDescriptorSetAllocateInfo allocateInfo =
5279 vk::VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
5287 m_descriptorSets[descriptorSetNdx] = vk::allocateDescriptorSet(vkd, device, &allocateInfo).disown();
5290 const vk::VkBufferViewCreateInfo createInfo =
5292 vk::VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO,
5296 context.getBuffer(),
5297 vk::VK_FORMAT_R16_UINT,
5298 descriptorSetNdx * m_maxUniformTexelCount * 2,
5302 VK_CHECK(vkd.createBufferView(device, &createInfo, DE_NULL, &m_bufferViews[descriptorSetNdx]));
5306 const vk::VkWriteDescriptorSet write =
5308 vk::VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
5310 m_descriptorSets[descriptorSetNdx],
5314 vk::VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER,
5317 &m_bufferViews[descriptorSetNdx]
5320 vkd.updateDescriptorSets(device, 1u, &write, 0u, DE_NULL);
5325 void RenderVertexUniformTexelBuffer::submit (SubmitContext& context)
5327 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
5328 const vk::VkCommandBuffer commandBuffer = context.getCommandBuffer();
5330 vkd.cmdBindPipeline(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipeline);
5332 for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
5334 const deUint32 count = (deUint32)(m_bufferSize < (descriptorSetNdx + 1) * m_maxUniformTexelCount * 2
5335 ? m_bufferSize - descriptorSetNdx * m_maxUniformTexelCount * 2
5336 : m_maxUniformTexelCount * 2) / 2;
5338 vkd.cmdBindDescriptorSets(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipelineLayout, 0u, 1u, &m_descriptorSets[descriptorSetNdx], 0u, DE_NULL);
5339 vkd.cmdDraw(commandBuffer, count, 1, 0, 0);
5343 void RenderVertexUniformTexelBuffer::verify (VerifyRenderPassContext& context, size_t)
5345 for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
5347 const size_t offset = descriptorSetNdx * m_maxUniformTexelCount * 2;
5348 const deUint32 count = (deUint32)(m_bufferSize < (descriptorSetNdx + 1) * m_maxUniformTexelCount * 2
5349 ? m_bufferSize - descriptorSetNdx * m_maxUniformTexelCount * 2
5350 : m_maxUniformTexelCount * 2) / 2;
5352 for (size_t pos = 0; pos < (size_t)count; pos++)
5354 const deUint8 x = context.getReference().get(offset + pos * 2);
5355 const deUint8 y = context.getReference().get(offset + (pos * 2) + 1);
5357 context.getReferenceTarget().getAccess().setPixel(Vec4(1.0f, 1.0f, 1.0f, 1.0f), x, y);
5362 class RenderVertexStorageBuffer : public RenderPassCommand
5365 RenderVertexStorageBuffer (void) {}
5366 ~RenderVertexStorageBuffer (void);
5368 const char* getName (void) const { return "RenderVertexStorageBuffer"; }
5369 void logPrepare (TestLog&, size_t) const;
5370 void logSubmit (TestLog&, size_t) const;
5371 void prepare (PrepareRenderPassContext&);
5372 void submit (SubmitContext& context);
5373 void verify (VerifyRenderPassContext&, size_t);
5376 PipelineResources m_resources;
5377 vk::Move<vk::VkDescriptorPool> m_descriptorPool;
5378 vector<vk::VkDescriptorSet> m_descriptorSets;
5380 vk::VkDeviceSize m_bufferSize;
5383 RenderVertexStorageBuffer::~RenderVertexStorageBuffer (void)
5387 void RenderVertexStorageBuffer::logPrepare (TestLog& log, size_t commandIndex) const
5389 log << TestLog::Message << commandIndex << ":" << getName() << " Create pipeline for render buffer as storage buffer." << TestLog::EndMessage;
5392 void RenderVertexStorageBuffer::logSubmit (TestLog& log, size_t commandIndex) const
5394 log << TestLog::Message << commandIndex << ":" << getName() << " Render using buffer as storage buffer." << TestLog::EndMessage;
5397 void RenderVertexStorageBuffer::prepare (PrepareRenderPassContext& context)
5399 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
5400 const vk::VkDevice device = context.getContext().getDevice();
5401 const vk::VkRenderPass renderPass = context.getRenderPass();
5402 const deUint32 subpass = 0;
5403 const vk::Unique<vk::VkShaderModule> vertexShaderModule (vk::createShaderModule(vkd, device, context.getBinaryCollection().get("storage-buffer.vert"), 0));
5404 const vk::Unique<vk::VkShaderModule> fragmentShaderModule (vk::createShaderModule(vkd, device, context.getBinaryCollection().get("render-white.frag"), 0));
5405 vector<vk::VkDescriptorSetLayoutBinding> bindings;
5407 m_bufferSize = context.getBufferSize();
5410 const vk::VkDescriptorSetLayoutBinding binding =
5413 vk::VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
5415 vk::VK_SHADER_STAGE_VERTEX_BIT,
5419 bindings.push_back(binding);
5422 createPipelineWithResources(vkd, device, renderPass, subpass, *vertexShaderModule, *fragmentShaderModule, context.getTargetWidth(), context.getTargetHeight(),
5423 vector<vk::VkVertexInputBindingDescription>(), vector<vk::VkVertexInputAttributeDescription>(), bindings, vk::VK_PRIMITIVE_TOPOLOGY_POINT_LIST, 0u, DE_NULL, m_resources);
5426 const deUint32 descriptorCount = (deUint32)(divRoundUp(m_bufferSize, (vk::VkDeviceSize)MAX_STORAGE_BUFFER_SIZE));
5427 const vk::VkDescriptorPoolSize poolSizes =
5429 vk::VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
5432 const vk::VkDescriptorPoolCreateInfo createInfo =
5434 vk::VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
5436 vk::VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT,
5443 m_descriptorPool = vk::createDescriptorPool(vkd, device, &createInfo);
5444 m_descriptorSets.resize(descriptorCount);
5447 for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
5449 const vk::VkDescriptorSetLayout layout = *m_resources.descriptorSetLayout;
5450 const vk::VkDescriptorSetAllocateInfo allocateInfo =
5452 vk::VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
5460 m_descriptorSets[descriptorSetNdx] = vk::allocateDescriptorSet(vkd, device, &allocateInfo).disown();
5463 const vk::VkDescriptorBufferInfo bufferInfo =
5465 context.getBuffer(),
5466 descriptorSetNdx * MAX_STORAGE_BUFFER_SIZE,
5467 de::min(m_bufferSize - descriptorSetNdx * MAX_STORAGE_BUFFER_SIZE, (vk::VkDeviceSize)MAX_STORAGE_BUFFER_SIZE)
5469 const vk::VkWriteDescriptorSet write =
5471 vk::VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
5473 m_descriptorSets[descriptorSetNdx],
5477 vk::VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
5483 vkd.updateDescriptorSets(device, 1u, &write, 0u, DE_NULL);
5488 void RenderVertexStorageBuffer::submit (SubmitContext& context)
5490 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
5491 const vk::VkCommandBuffer commandBuffer = context.getCommandBuffer();
5493 vkd.cmdBindPipeline(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipeline);
5495 for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
5497 const size_t size = m_bufferSize < (descriptorSetNdx + 1) * MAX_STORAGE_BUFFER_SIZE
5498 ? (size_t)(m_bufferSize - descriptorSetNdx * MAX_STORAGE_BUFFER_SIZE)
5499 : (size_t)(MAX_STORAGE_BUFFER_SIZE);
5501 vkd.cmdBindDescriptorSets(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipelineLayout, 0u, 1u, &m_descriptorSets[descriptorSetNdx], 0u, DE_NULL);
5502 vkd.cmdDraw(commandBuffer, (deUint32)(size / 2), 1, 0, 0);
5506 void RenderVertexStorageBuffer::verify (VerifyRenderPassContext& context, size_t)
5508 for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
5510 const size_t offset = descriptorSetNdx * MAX_STORAGE_BUFFER_SIZE;
5511 const size_t size = m_bufferSize < (descriptorSetNdx + 1) * MAX_STORAGE_BUFFER_SIZE
5512 ? (size_t)(m_bufferSize - descriptorSetNdx * MAX_STORAGE_BUFFER_SIZE)
5513 : (size_t)(MAX_STORAGE_BUFFER_SIZE);
5515 for (size_t pos = 0; pos < size / 2; pos++)
5517 const deUint8 x = context.getReference().get(offset + pos * 2);
5518 const deUint8 y = context.getReference().get(offset + (pos * 2) + 1);
5520 context.getReferenceTarget().getAccess().setPixel(Vec4(1.0f, 1.0f, 1.0f, 1.0f), x, y);
5525 class RenderVertexStorageTexelBuffer : public RenderPassCommand
5528 RenderVertexStorageTexelBuffer (void) {}
5529 ~RenderVertexStorageTexelBuffer (void);
5531 const char* getName (void) const { return "RenderVertexStorageTexelBuffer"; }
5532 void logPrepare (TestLog&, size_t) const;
5533 void logSubmit (TestLog&, size_t) const;
5534 void prepare (PrepareRenderPassContext&);
5535 void submit (SubmitContext& context);
5536 void verify (VerifyRenderPassContext&, size_t);
5539 PipelineResources m_resources;
5540 vk::Move<vk::VkDescriptorPool> m_descriptorPool;
5541 vector<vk::VkDescriptorSet> m_descriptorSets;
5542 vector<vk::VkBufferView> m_bufferViews;
5544 const vk::DeviceInterface* m_vkd;
5545 vk::VkDevice m_device;
5546 vk::VkDeviceSize m_bufferSize;
5547 deUint32 m_maxStorageTexelCount;
5550 RenderVertexStorageTexelBuffer::~RenderVertexStorageTexelBuffer (void)
5552 for (size_t bufferViewNdx = 0; bufferViewNdx < m_bufferViews.size(); bufferViewNdx++)
5554 if (!!m_bufferViews[bufferViewNdx])
5556 m_vkd->destroyBufferView(m_device, m_bufferViews[bufferViewNdx], DE_NULL);
5557 m_bufferViews[bufferViewNdx] = (vk::VkBufferView)0;
5562 void RenderVertexStorageTexelBuffer::logPrepare (TestLog& log, size_t commandIndex) const
5564 log << TestLog::Message << commandIndex << ":" << getName() << " Create pipeline for render buffer as storage buffer." << TestLog::EndMessage;
5567 void RenderVertexStorageTexelBuffer::logSubmit (TestLog& log, size_t commandIndex) const
5569 log << TestLog::Message << commandIndex << ":" << getName() << " Render using buffer as storage buffer." << TestLog::EndMessage;
5572 void RenderVertexStorageTexelBuffer::prepare (PrepareRenderPassContext& context)
5574 const vk::InstanceInterface& vki = context.getContext().getInstanceInterface();
5575 const vk::VkPhysicalDevice physicalDevice = context.getContext().getPhysicalDevice();
5576 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
5577 const vk::VkDevice device = context.getContext().getDevice();
5578 const vk::VkRenderPass renderPass = context.getRenderPass();
5579 const deUint32 subpass = 0;
5580 const vk::Unique<vk::VkShaderModule> vertexShaderModule (vk::createShaderModule(vkd, device, context.getBinaryCollection().get("storage-texel-buffer.vert"), 0));
5581 const vk::Unique<vk::VkShaderModule> fragmentShaderModule (vk::createShaderModule(vkd, device, context.getBinaryCollection().get("render-white.frag"), 0));
5582 vector<vk::VkDescriptorSetLayoutBinding> bindings;
5586 m_bufferSize = context.getBufferSize();
5587 m_maxStorageTexelCount = vk::getPhysicalDeviceProperties(vki, physicalDevice).limits.maxTexelBufferElements;
5590 const vk::VkDescriptorSetLayoutBinding binding =
5593 vk::VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER,
5595 vk::VK_SHADER_STAGE_VERTEX_BIT,
5599 bindings.push_back(binding);
5602 createPipelineWithResources(vkd, device, renderPass, subpass, *vertexShaderModule, *fragmentShaderModule, context.getTargetWidth(), context.getTargetHeight(),
5603 vector<vk::VkVertexInputBindingDescription>(), vector<vk::VkVertexInputAttributeDescription>(), bindings, vk::VK_PRIMITIVE_TOPOLOGY_POINT_LIST, 0u, DE_NULL, m_resources);
5606 const deUint32 descriptorCount = (deUint32)(divRoundUp(m_bufferSize, (vk::VkDeviceSize)m_maxStorageTexelCount * 4));
5607 const vk::VkDescriptorPoolSize poolSizes =
5609 vk::VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER,
5612 const vk::VkDescriptorPoolCreateInfo createInfo =
5614 vk::VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
5616 vk::VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT,
5623 m_descriptorPool = vk::createDescriptorPool(vkd, device, &createInfo);
5624 m_descriptorSets.resize(descriptorCount, (vk::VkDescriptorSet)0);
5625 m_bufferViews.resize(descriptorCount, (vk::VkBufferView)0);
5628 for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
5630 const vk::VkDescriptorSetLayout layout = *m_resources.descriptorSetLayout;
5631 const vk::VkDescriptorSetAllocateInfo allocateInfo =
5633 vk::VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
5641 m_descriptorSets[descriptorSetNdx] = vk::allocateDescriptorSet(vkd, device, &allocateInfo).disown();
5644 const vk::VkBufferViewCreateInfo createInfo =
5646 vk::VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO,
5650 context.getBuffer(),
5651 vk::VK_FORMAT_R32_UINT,
5652 descriptorSetNdx * m_maxStorageTexelCount * 4,
5653 (deUint32)de::min<vk::VkDeviceSize>(m_maxStorageTexelCount * 4, m_bufferSize - descriptorSetNdx * m_maxStorageTexelCount * 4)
5656 VK_CHECK(vkd.createBufferView(device, &createInfo, DE_NULL, &m_bufferViews[descriptorSetNdx]));
5660 const vk::VkWriteDescriptorSet write =
5662 vk::VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
5664 m_descriptorSets[descriptorSetNdx],
5668 vk::VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER,
5671 &m_bufferViews[descriptorSetNdx]
5674 vkd.updateDescriptorSets(device, 1u, &write, 0u, DE_NULL);
5679 void RenderVertexStorageTexelBuffer::submit (SubmitContext& context)
5681 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
5682 const vk::VkCommandBuffer commandBuffer = context.getCommandBuffer();
5684 vkd.cmdBindPipeline(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipeline);
5686 for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
5688 const deUint32 count = (deUint32)(m_bufferSize < (descriptorSetNdx + 1) * m_maxStorageTexelCount * 4
5689 ? m_bufferSize - descriptorSetNdx * m_maxStorageTexelCount * 4
5690 : m_maxStorageTexelCount * 4) / 2;
5692 vkd.cmdBindDescriptorSets(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipelineLayout, 0u, 1u, &m_descriptorSets[descriptorSetNdx], 0u, DE_NULL);
5693 vkd.cmdDraw(commandBuffer, count, 1, 0, 0);
5697 void RenderVertexStorageTexelBuffer::verify (VerifyRenderPassContext& context, size_t)
5699 for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
5701 const size_t offset = descriptorSetNdx * m_maxStorageTexelCount * 4;
5702 const deUint32 count = (deUint32)(m_bufferSize < (descriptorSetNdx + 1) * m_maxStorageTexelCount * 4
5703 ? m_bufferSize - descriptorSetNdx * m_maxStorageTexelCount * 4
5704 : m_maxStorageTexelCount * 4) / 2;
5706 DE_ASSERT(context.getReference().getSize() <= 4 * m_maxStorageTexelCount * m_descriptorSets.size());
5707 DE_ASSERT(context.getReference().getSize() > offset);
5708 DE_ASSERT(offset + count * 2 <= context.getReference().getSize());
5710 for (size_t pos = 0; pos < (size_t)count; pos++)
5712 const deUint8 x = context.getReference().get(offset + pos * 2);
5713 const deUint8 y = context.getReference().get(offset + (pos * 2) + 1);
5715 context.getReferenceTarget().getAccess().setPixel(Vec4(1.0f, 1.0f, 1.0f, 1.0f), x, y);
5720 class RenderVertexStorageImage : public RenderPassCommand
5723 RenderVertexStorageImage (void) {}
5724 ~RenderVertexStorageImage (void);
5726 const char* getName (void) const { return "RenderVertexStorageImage"; }
5727 void logPrepare (TestLog&, size_t) const;
5728 void logSubmit (TestLog&, size_t) const;
5729 void prepare (PrepareRenderPassContext&);
5730 void submit (SubmitContext& context);
5731 void verify (VerifyRenderPassContext&, size_t);
5734 PipelineResources m_resources;
5735 vk::Move<vk::VkDescriptorPool> m_descriptorPool;
5736 vk::Move<vk::VkDescriptorSet> m_descriptorSet;
5737 vk::Move<vk::VkImageView> m_imageView;
5740 RenderVertexStorageImage::~RenderVertexStorageImage (void)
5744 void RenderVertexStorageImage::logPrepare (TestLog& log, size_t commandIndex) const
5746 log << TestLog::Message << commandIndex << ":" << getName() << " Create pipeline for render storage image." << TestLog::EndMessage;
5749 void RenderVertexStorageImage::logSubmit (TestLog& log, size_t commandIndex) const
5751 log << TestLog::Message << commandIndex << ":" << getName() << " Render using storage image." << TestLog::EndMessage;
5754 void RenderVertexStorageImage::prepare (PrepareRenderPassContext& context)
5756 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
5757 const vk::VkDevice device = context.getContext().getDevice();
5758 const vk::VkRenderPass renderPass = context.getRenderPass();
5759 const deUint32 subpass = 0;
5760 const vk::Unique<vk::VkShaderModule> vertexShaderModule (vk::createShaderModule(vkd, device, context.getBinaryCollection().get("storage-image.vert"), 0));
5761 const vk::Unique<vk::VkShaderModule> fragmentShaderModule (vk::createShaderModule(vkd, device, context.getBinaryCollection().get("render-white.frag"), 0));
5762 vector<vk::VkDescriptorSetLayoutBinding> bindings;
5765 const vk::VkDescriptorSetLayoutBinding binding =
5768 vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE,
5770 vk::VK_SHADER_STAGE_VERTEX_BIT,
5774 bindings.push_back(binding);
5777 createPipelineWithResources(vkd, device, renderPass, subpass, *vertexShaderModule, *fragmentShaderModule, context.getTargetWidth(), context.getTargetHeight(),
5778 vector<vk::VkVertexInputBindingDescription>(), vector<vk::VkVertexInputAttributeDescription>(), bindings, vk::VK_PRIMITIVE_TOPOLOGY_POINT_LIST, 0u, DE_NULL, m_resources);
5781 const vk::VkDescriptorPoolSize poolSizes =
5783 vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE,
5786 const vk::VkDescriptorPoolCreateInfo createInfo =
5788 vk::VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
5790 vk::VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT,
5797 m_descriptorPool = vk::createDescriptorPool(vkd, device, &createInfo);
5801 const vk::VkDescriptorSetLayout layout = *m_resources.descriptorSetLayout;
5802 const vk::VkDescriptorSetAllocateInfo allocateInfo =
5804 vk::VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
5812 m_descriptorSet = vk::allocateDescriptorSet(vkd, device, &allocateInfo);
5815 const vk::VkImageViewCreateInfo createInfo =
5817 vk::VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
5822 vk::VK_IMAGE_VIEW_TYPE_2D,
5823 vk::VK_FORMAT_R8G8B8A8_UNORM,
5824 vk::makeComponentMappingRGBA(),
5826 vk::VK_IMAGE_ASPECT_COLOR_BIT,
5834 m_imageView = vk::createImageView(vkd, device, &createInfo);
5838 const vk::VkDescriptorImageInfo imageInfo =
5842 context.getImageLayout()
5844 const vk::VkWriteDescriptorSet write =
5846 vk::VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
5852 vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE,
5858 vkd.updateDescriptorSets(device, 1u, &write, 0u, DE_NULL);
5863 void RenderVertexStorageImage::submit (SubmitContext& context)
5865 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
5866 const vk::VkCommandBuffer commandBuffer = context.getCommandBuffer();
5868 vkd.cmdBindPipeline(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipeline);
5870 vkd.cmdBindDescriptorSets(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipelineLayout, 0u, 1u, &(*m_descriptorSet), 0u, DE_NULL);
5871 vkd.cmdDraw(commandBuffer, context.getImageWidth() * context.getImageHeight() * 2, 1, 0, 0);
5874 void RenderVertexStorageImage::verify (VerifyRenderPassContext& context, size_t)
5876 for (int pos = 0; pos < (int)(context.getReferenceImage().getWidth() * context.getReferenceImage().getHeight() * 2); pos++)
5878 const tcu::IVec3 size = context.getReferenceImage().getAccess().getSize();
5879 const tcu::UVec4 pixel = context.getReferenceImage().getAccess().getPixelUint((pos / 2) / size.x(), (pos / 2) % size.x());
5882 context.getReferenceTarget().getAccess().setPixel(Vec4(1.0f, 1.0f, 1.0f, 1.0f), pixel.x(), pixel.y());
5884 context.getReferenceTarget().getAccess().setPixel(Vec4(1.0f, 1.0f, 1.0f, 1.0f), pixel.z(), pixel.w());
5888 class RenderVertexSampledImage : public RenderPassCommand
5891 RenderVertexSampledImage (void) {}
5892 ~RenderVertexSampledImage (void);
5894 const char* getName (void) const { return "RenderVertexSampledImage"; }
5895 void logPrepare (TestLog&, size_t) const;
5896 void logSubmit (TestLog&, size_t) const;
5897 void prepare (PrepareRenderPassContext&);
5898 void submit (SubmitContext& context);
5899 void verify (VerifyRenderPassContext&, size_t);
5902 PipelineResources m_resources;
5903 vk::Move<vk::VkDescriptorPool> m_descriptorPool;
5904 vk::Move<vk::VkDescriptorSet> m_descriptorSet;
5905 vk::Move<vk::VkImageView> m_imageView;
5906 vk::Move<vk::VkSampler> m_sampler;
5909 RenderVertexSampledImage::~RenderVertexSampledImage (void)
5913 void RenderVertexSampledImage::logPrepare (TestLog& log, size_t commandIndex) const
5915 log << TestLog::Message << commandIndex << ":" << getName() << " Create pipeline for render sampled image." << TestLog::EndMessage;
5918 void RenderVertexSampledImage::logSubmit (TestLog& log, size_t commandIndex) const
5920 log << TestLog::Message << commandIndex << ":" << getName() << " Render using sampled image." << TestLog::EndMessage;
5923 void RenderVertexSampledImage::prepare (PrepareRenderPassContext& context)
5925 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
5926 const vk::VkDevice device = context.getContext().getDevice();
5927 const vk::VkRenderPass renderPass = context.getRenderPass();
5928 const deUint32 subpass = 0;
5929 const vk::Unique<vk::VkShaderModule> vertexShaderModule (vk::createShaderModule(vkd, device, context.getBinaryCollection().get("sampled-image.vert"), 0));
5930 const vk::Unique<vk::VkShaderModule> fragmentShaderModule (vk::createShaderModule(vkd, device, context.getBinaryCollection().get("render-white.frag"), 0));
5931 vector<vk::VkDescriptorSetLayoutBinding> bindings;
5934 const vk::VkDescriptorSetLayoutBinding binding =
5937 vk::VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
5939 vk::VK_SHADER_STAGE_VERTEX_BIT,
5943 bindings.push_back(binding);
5946 createPipelineWithResources(vkd, device, renderPass, subpass, *vertexShaderModule, *fragmentShaderModule, context.getTargetWidth(), context.getTargetHeight(),
5947 vector<vk::VkVertexInputBindingDescription>(), vector<vk::VkVertexInputAttributeDescription>(), bindings, vk::VK_PRIMITIVE_TOPOLOGY_POINT_LIST, 0u, DE_NULL, m_resources);
5950 const vk::VkDescriptorPoolSize poolSizes =
5952 vk::VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
5955 const vk::VkDescriptorPoolCreateInfo createInfo =
5957 vk::VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
5959 vk::VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT,
5966 m_descriptorPool = vk::createDescriptorPool(vkd, device, &createInfo);
5970 const vk::VkDescriptorSetLayout layout = *m_resources.descriptorSetLayout;
5971 const vk::VkDescriptorSetAllocateInfo allocateInfo =
5973 vk::VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
5981 m_descriptorSet = vk::allocateDescriptorSet(vkd, device, &allocateInfo);
5984 const vk::VkImageViewCreateInfo createInfo =
5986 vk::VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
5991 vk::VK_IMAGE_VIEW_TYPE_2D,
5992 vk::VK_FORMAT_R8G8B8A8_UNORM,
5993 vk::makeComponentMappingRGBA(),
5995 vk::VK_IMAGE_ASPECT_COLOR_BIT,
6003 m_imageView = vk::createImageView(vkd, device, &createInfo);
6007 const vk::VkSamplerCreateInfo createInfo =
6009 vk::VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO,
6013 vk::VK_FILTER_NEAREST,
6014 vk::VK_FILTER_NEAREST,
6016 vk::VK_SAMPLER_MIPMAP_MODE_LINEAR,
6017 vk::VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE,
6018 vk::VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE,
6019 vk::VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE,
6024 vk::VK_COMPARE_OP_ALWAYS,
6027 vk::VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK,
6031 m_sampler = vk::createSampler(vkd, device, &createInfo);
6035 const vk::VkDescriptorImageInfo imageInfo =
6039 context.getImageLayout()
6041 const vk::VkWriteDescriptorSet write =
6043 vk::VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
6049 vk::VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
6055 vkd.updateDescriptorSets(device, 1u, &write, 0u, DE_NULL);
6060 void RenderVertexSampledImage::submit (SubmitContext& context)
6062 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
6063 const vk::VkCommandBuffer commandBuffer = context.getCommandBuffer();
6065 vkd.cmdBindPipeline(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipeline);
6067 vkd.cmdBindDescriptorSets(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipelineLayout, 0u, 1u, &(*m_descriptorSet), 0u, DE_NULL);
6068 vkd.cmdDraw(commandBuffer, context.getImageWidth() * context.getImageHeight() * 2, 1, 0, 0);
6071 void RenderVertexSampledImage::verify (VerifyRenderPassContext& context, size_t)
6073 for (int pos = 0; pos < (int)(context.getReferenceImage().getWidth() * context.getReferenceImage().getHeight() * 2); pos++)
6075 const tcu::IVec3 size = context.getReferenceImage().getAccess().getSize();
6076 const tcu::UVec4 pixel = context.getReferenceImage().getAccess().getPixelUint((pos / 2) / size.x(), (pos / 2) % size.x());
6079 context.getReferenceTarget().getAccess().setPixel(Vec4(1.0f, 1.0f, 1.0f, 1.0f), pixel.x(), pixel.y());
6081 context.getReferenceTarget().getAccess().setPixel(Vec4(1.0f, 1.0f, 1.0f, 1.0f), pixel.z(), pixel.w());
6085 class RenderFragmentUniformBuffer : public RenderPassCommand
6088 RenderFragmentUniformBuffer (void) {}
6089 ~RenderFragmentUniformBuffer (void);
6091 const char* getName (void) const { return "RenderFragmentUniformBuffer"; }
6092 void logPrepare (TestLog&, size_t) const;
6093 void logSubmit (TestLog&, size_t) const;
6094 void prepare (PrepareRenderPassContext&);
6095 void submit (SubmitContext& context);
6096 void verify (VerifyRenderPassContext&, size_t);
6099 PipelineResources m_resources;
6100 vk::Move<vk::VkDescriptorPool> m_descriptorPool;
6101 vector<vk::VkDescriptorSet> m_descriptorSets;
6103 vk::VkDeviceSize m_bufferSize;
6104 size_t m_targetWidth;
6105 size_t m_targetHeight;
6108 RenderFragmentUniformBuffer::~RenderFragmentUniformBuffer (void)
6112 void RenderFragmentUniformBuffer::logPrepare (TestLog& log, size_t commandIndex) const
6114 log << TestLog::Message << commandIndex << ":" << getName() << " Create pipeline for render buffer as uniform buffer." << TestLog::EndMessage;
6117 void RenderFragmentUniformBuffer::logSubmit (TestLog& log, size_t commandIndex) const
6119 log << TestLog::Message << commandIndex << ":" << getName() << " Render using buffer as uniform buffer." << TestLog::EndMessage;
6122 void RenderFragmentUniformBuffer::prepare (PrepareRenderPassContext& context)
6124 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
6125 const vk::VkDevice device = context.getContext().getDevice();
6126 const vk::VkRenderPass renderPass = context.getRenderPass();
6127 const deUint32 subpass = 0;
6128 const vk::Unique<vk::VkShaderModule> vertexShaderModule (vk::createShaderModule(vkd, device, context.getBinaryCollection().get("render-quad.vert"), 0));
6129 const vk::Unique<vk::VkShaderModule> fragmentShaderModule (vk::createShaderModule(vkd, device, context.getBinaryCollection().get("uniform-buffer.frag"), 0));
6130 vector<vk::VkDescriptorSetLayoutBinding> bindings;
6132 m_bufferSize = context.getBufferSize();
6133 m_targetWidth = context.getTargetWidth();
6134 m_targetHeight = context.getTargetHeight();
6137 const vk::VkDescriptorSetLayoutBinding binding =
6140 vk::VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,
6142 vk::VK_SHADER_STAGE_FRAGMENT_BIT,
6146 bindings.push_back(binding);
6148 const vk::VkPushConstantRange pushConstantRange =
6150 vk::VK_SHADER_STAGE_FRAGMENT_BIT,
6155 createPipelineWithResources(vkd, device, renderPass, subpass, *vertexShaderModule, *fragmentShaderModule, context.getTargetWidth(), context.getTargetHeight(),
6156 vector<vk::VkVertexInputBindingDescription>(), vector<vk::VkVertexInputAttributeDescription>(), bindings, vk::VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST, 1u, &pushConstantRange, m_resources);
6159 const deUint32 descriptorCount = (deUint32)(divRoundUp(m_bufferSize, (vk::VkDeviceSize)MAX_UNIFORM_BUFFER_SIZE));
6160 const vk::VkDescriptorPoolSize poolSizes =
6162 vk::VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,
6165 const vk::VkDescriptorPoolCreateInfo createInfo =
6167 vk::VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
6169 vk::VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT,
6176 m_descriptorPool = vk::createDescriptorPool(vkd, device, &createInfo);
6177 m_descriptorSets.resize(descriptorCount);
6180 for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
6182 const vk::VkDescriptorSetLayout layout = *m_resources.descriptorSetLayout;
6183 const vk::VkDescriptorSetAllocateInfo allocateInfo =
6185 vk::VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
6193 m_descriptorSets[descriptorSetNdx] = vk::allocateDescriptorSet(vkd, device, &allocateInfo).disown();
6196 const vk::VkDescriptorBufferInfo bufferInfo =
6198 context.getBuffer(),
6199 (vk::VkDeviceSize)(descriptorSetNdx * (size_t)MAX_UNIFORM_BUFFER_SIZE),
6200 m_bufferSize < (descriptorSetNdx + 1) * (vk::VkDeviceSize)MAX_UNIFORM_BUFFER_SIZE
6201 ? m_bufferSize - descriptorSetNdx * (vk::VkDeviceSize)MAX_UNIFORM_BUFFER_SIZE
6202 : (vk::VkDeviceSize)MAX_UNIFORM_BUFFER_SIZE
6204 const vk::VkWriteDescriptorSet write =
6206 vk::VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
6208 m_descriptorSets[descriptorSetNdx],
6212 vk::VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,
6218 vkd.updateDescriptorSets(device, 1u, &write, 0u, DE_NULL);
6223 void RenderFragmentUniformBuffer::submit (SubmitContext& context)
6225 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
6226 const vk::VkCommandBuffer commandBuffer = context.getCommandBuffer();
6228 vkd.cmdBindPipeline(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipeline);
6230 for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
6234 const deUint32 callId;
6235 const deUint32 valuesPerPixel;
6238 (deUint32)descriptorSetNdx,
6239 (deUint32)divRoundUp<size_t>(m_descriptorSets.size() * (MAX_UNIFORM_BUFFER_SIZE / 4), m_targetWidth * m_targetHeight)
6242 vkd.cmdBindDescriptorSets(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipelineLayout, 0u, 1u, &m_descriptorSets[descriptorSetNdx], 0u, DE_NULL);
6243 vkd.cmdPushConstants(commandBuffer, *m_resources.pipelineLayout, vk::VK_SHADER_STAGE_FRAGMENT_BIT, 0u, (deUint32)sizeof(callParams), &callParams);
6244 vkd.cmdDraw(commandBuffer, 6, 1, 0, 0);
6248 void RenderFragmentUniformBuffer::verify (VerifyRenderPassContext& context, size_t)
6250 const deUint32 valuesPerPixel = (deUint32)divRoundUp<size_t>(m_descriptorSets.size() * (MAX_UNIFORM_BUFFER_SIZE / 4), m_targetWidth * m_targetHeight);
6251 const size_t arraySize = MAX_UNIFORM_BUFFER_SIZE / (sizeof(deUint32) * 4);
6252 const size_t arrayIntSize = arraySize * 4;
6254 for (int y = 0; y < context.getReferenceTarget().getSize().y(); y++)
6255 for (int x = 0; x < context.getReferenceTarget().getSize().x(); x++)
6257 const size_t firstDescriptorSetNdx = de::min<size_t>((y * 256u + x) / (arrayIntSize / valuesPerPixel), m_descriptorSets.size() - 1);
6259 for (size_t descriptorSetNdx = firstDescriptorSetNdx; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
6261 const size_t offset = descriptorSetNdx * MAX_UNIFORM_BUFFER_SIZE;
6262 const deUint32 callId = (deUint32)descriptorSetNdx;
6264 const deUint32 id = callId * ((deUint32)arrayIntSize / valuesPerPixel) + (deUint32)y * 256u + (deUint32)x;
6266 if (y * 256u + x < callId * (arrayIntSize / valuesPerPixel))
6270 deUint32 value = id;
6272 for (deUint32 i = 0; i < valuesPerPixel; i++)
6274 value = ((deUint32)context.getReference().get(offset + (value % (MAX_UNIFORM_BUFFER_SIZE / sizeof(deUint32))) * 4 + 0))
6275 | (((deUint32)context.getReference().get(offset + (value % (MAX_UNIFORM_BUFFER_SIZE / sizeof(deUint32))) * 4 + 1)) << 8u)
6276 | (((deUint32)context.getReference().get(offset + (value % (MAX_UNIFORM_BUFFER_SIZE / sizeof(deUint32))) * 4 + 2)) << 16u)
6277 | (((deUint32)context.getReference().get(offset + (value % (MAX_UNIFORM_BUFFER_SIZE / sizeof(deUint32))) * 4 + 3)) << 24u);
6280 const UVec4 vec ((value >> 0u) & 0xFFu,
6281 (value >> 8u) & 0xFFu,
6282 (value >> 16u) & 0xFFu,
6283 (value >> 24u) & 0xFFu);
6285 context.getReferenceTarget().getAccess().setPixel(vec.asFloat() / Vec4(255.0f), x, y);
6291 class RenderFragmentStorageBuffer : public RenderPassCommand
6294 RenderFragmentStorageBuffer (void) {}
6295 ~RenderFragmentStorageBuffer (void);
6297 const char* getName (void) const { return "RenderFragmentStorageBuffer"; }
6298 void logPrepare (TestLog&, size_t) const;
6299 void logSubmit (TestLog&, size_t) const;
6300 void prepare (PrepareRenderPassContext&);
6301 void submit (SubmitContext& context);
6302 void verify (VerifyRenderPassContext&, size_t);
6305 PipelineResources m_resources;
6306 vk::Move<vk::VkDescriptorPool> m_descriptorPool;
6307 vk::Move<vk::VkDescriptorSet> m_descriptorSet;
6309 vk::VkDeviceSize m_bufferSize;
6310 size_t m_targetWidth;
6311 size_t m_targetHeight;
6314 RenderFragmentStorageBuffer::~RenderFragmentStorageBuffer (void)
6318 void RenderFragmentStorageBuffer::logPrepare (TestLog& log, size_t commandIndex) const
6320 log << TestLog::Message << commandIndex << ":" << getName() << " Create pipeline to render buffer as storage buffer." << TestLog::EndMessage;
6323 void RenderFragmentStorageBuffer::logSubmit (TestLog& log, size_t commandIndex) const
6325 log << TestLog::Message << commandIndex << ":" << getName() << " Render using buffer as storage buffer." << TestLog::EndMessage;
6328 void RenderFragmentStorageBuffer::prepare (PrepareRenderPassContext& context)
6330 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
6331 const vk::VkDevice device = context.getContext().getDevice();
6332 const vk::VkRenderPass renderPass = context.getRenderPass();
6333 const deUint32 subpass = 0;
6334 const vk::Unique<vk::VkShaderModule> vertexShaderModule (vk::createShaderModule(vkd, device, context.getBinaryCollection().get("render-quad.vert"), 0));
6335 const vk::Unique<vk::VkShaderModule> fragmentShaderModule (vk::createShaderModule(vkd, device, context.getBinaryCollection().get("storage-buffer.frag"), 0));
6336 vector<vk::VkDescriptorSetLayoutBinding> bindings;
6338 m_bufferSize = context.getBufferSize();
6339 m_targetWidth = context.getTargetWidth();
6340 m_targetHeight = context.getTargetHeight();
6343 const vk::VkDescriptorSetLayoutBinding binding =
6346 vk::VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
6348 vk::VK_SHADER_STAGE_FRAGMENT_BIT,
6352 bindings.push_back(binding);
6354 const vk::VkPushConstantRange pushConstantRange =
6356 vk::VK_SHADER_STAGE_FRAGMENT_BIT,
6361 createPipelineWithResources(vkd, device, renderPass, subpass, *vertexShaderModule, *fragmentShaderModule, context.getTargetWidth(), context.getTargetHeight(),
6362 vector<vk::VkVertexInputBindingDescription>(), vector<vk::VkVertexInputAttributeDescription>(), bindings, vk::VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST, 1u, &pushConstantRange, m_resources);
6365 const deUint32 descriptorCount = 1;
6366 const vk::VkDescriptorPoolSize poolSizes =
6368 vk::VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
6371 const vk::VkDescriptorPoolCreateInfo createInfo =
6373 vk::VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
6375 vk::VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT,
6382 m_descriptorPool = vk::createDescriptorPool(vkd, device, &createInfo);
6386 const vk::VkDescriptorSetLayout layout = *m_resources.descriptorSetLayout;
6387 const vk::VkDescriptorSetAllocateInfo allocateInfo =
6389 vk::VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
6397 m_descriptorSet = vk::allocateDescriptorSet(vkd, device, &allocateInfo);
6400 const vk::VkDescriptorBufferInfo bufferInfo =
6402 context.getBuffer(),
6406 const vk::VkWriteDescriptorSet write =
6408 vk::VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
6410 m_descriptorSet.get(),
6414 vk::VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
6420 vkd.updateDescriptorSets(device, 1u, &write, 0u, DE_NULL);
6425 void RenderFragmentStorageBuffer::submit (SubmitContext& context)
6427 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
6428 const vk::VkCommandBuffer commandBuffer = context.getCommandBuffer();
6430 vkd.cmdBindPipeline(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipeline);
6434 const deUint32 valuesPerPixel;
6435 const deUint32 bufferSize;
6438 (deUint32)divRoundUp<vk::VkDeviceSize>(m_bufferSize / 4, m_targetWidth * m_targetHeight),
6439 (deUint32)m_bufferSize
6442 vkd.cmdBindDescriptorSets(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipelineLayout, 0u, 1u, &m_descriptorSet.get(), 0u, DE_NULL);
6443 vkd.cmdPushConstants(commandBuffer, *m_resources.pipelineLayout, vk::VK_SHADER_STAGE_FRAGMENT_BIT, 0u, (deUint32)sizeof(callParams), &callParams);
6444 vkd.cmdDraw(commandBuffer, 6, 1, 0, 0);
6447 void RenderFragmentStorageBuffer::verify (VerifyRenderPassContext& context, size_t)
6449 const deUint32 valuesPerPixel = (deUint32)divRoundUp<vk::VkDeviceSize>(m_bufferSize / 4, m_targetWidth * m_targetHeight);
6451 for (int y = 0; y < context.getReferenceTarget().getSize().y(); y++)
6452 for (int x = 0; x < context.getReferenceTarget().getSize().x(); x++)
6454 const deUint32 id = (deUint32)y * 256u + (deUint32)x;
6456 deUint32 value = id;
6458 for (deUint32 i = 0; i < valuesPerPixel; i++)
6460 value = (((deUint32)context.getReference().get((size_t)(value % (m_bufferSize / sizeof(deUint32))) * 4 + 0)) << 0u)
6461 | (((deUint32)context.getReference().get((size_t)(value % (m_bufferSize / sizeof(deUint32))) * 4 + 1)) << 8u)
6462 | (((deUint32)context.getReference().get((size_t)(value % (m_bufferSize / sizeof(deUint32))) * 4 + 2)) << 16u)
6463 | (((deUint32)context.getReference().get((size_t)(value % (m_bufferSize / sizeof(deUint32))) * 4 + 3)) << 24u);
6466 const UVec4 vec ((value >> 0u) & 0xFFu,
6467 (value >> 8u) & 0xFFu,
6468 (value >> 16u) & 0xFFu,
6469 (value >> 24u) & 0xFFu);
6471 context.getReferenceTarget().getAccess().setPixel(vec.asFloat() / Vec4(255.0f), x, y);
6475 class RenderFragmentUniformTexelBuffer : public RenderPassCommand
6478 RenderFragmentUniformTexelBuffer (void) {}
6479 ~RenderFragmentUniformTexelBuffer (void);
6481 const char* getName (void) const { return "RenderFragmentUniformTexelBuffer"; }
6482 void logPrepare (TestLog&, size_t) const;
6483 void logSubmit (TestLog&, size_t) const;
6484 void prepare (PrepareRenderPassContext&);
6485 void submit (SubmitContext& context);
6486 void verify (VerifyRenderPassContext&, size_t);
6489 PipelineResources m_resources;
6490 vk::Move<vk::VkDescriptorPool> m_descriptorPool;
6491 vector<vk::VkDescriptorSet> m_descriptorSets;
6492 vector<vk::VkBufferView> m_bufferViews;
6494 const vk::DeviceInterface* m_vkd;
6495 vk::VkDevice m_device;
6496 vk::VkDeviceSize m_bufferSize;
6497 deUint32 m_maxUniformTexelCount;
6498 size_t m_targetWidth;
6499 size_t m_targetHeight;
6502 RenderFragmentUniformTexelBuffer::~RenderFragmentUniformTexelBuffer (void)
6504 for (size_t bufferViewNdx = 0; bufferViewNdx < m_bufferViews.size(); bufferViewNdx++)
6506 if (!!m_bufferViews[bufferViewNdx])
6508 m_vkd->destroyBufferView(m_device, m_bufferViews[bufferViewNdx], DE_NULL);
6509 m_bufferViews[bufferViewNdx] = (vk::VkBufferView)0;
6514 void RenderFragmentUniformTexelBuffer::logPrepare (TestLog& log, size_t commandIndex) const
6516 log << TestLog::Message << commandIndex << ":" << getName() << " Create pipeline for render buffer as uniform buffer." << TestLog::EndMessage;
6519 void RenderFragmentUniformTexelBuffer::logSubmit (TestLog& log, size_t commandIndex) const
6521 log << TestLog::Message << commandIndex << ":" << getName() << " Render using buffer as uniform buffer." << TestLog::EndMessage;
6524 void RenderFragmentUniformTexelBuffer::prepare (PrepareRenderPassContext& context)
6526 const vk::InstanceInterface& vki = context.getContext().getInstanceInterface();
6527 const vk::VkPhysicalDevice physicalDevice = context.getContext().getPhysicalDevice();
6528 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
6529 const vk::VkDevice device = context.getContext().getDevice();
6530 const vk::VkRenderPass renderPass = context.getRenderPass();
6531 const deUint32 subpass = 0;
6532 const vk::Unique<vk::VkShaderModule> vertexShaderModule (vk::createShaderModule(vkd, device, context.getBinaryCollection().get("render-quad.vert"), 0));
6533 const vk::Unique<vk::VkShaderModule> fragmentShaderModule (vk::createShaderModule(vkd, device, context.getBinaryCollection().get("uniform-texel-buffer.frag"), 0));
6534 vector<vk::VkDescriptorSetLayoutBinding> bindings;
6538 m_bufferSize = context.getBufferSize();
6539 m_maxUniformTexelCount = vk::getPhysicalDeviceProperties(vki, physicalDevice).limits.maxTexelBufferElements;
6540 m_targetWidth = context.getTargetWidth();
6541 m_targetHeight = context.getTargetHeight();
6544 const vk::VkDescriptorSetLayoutBinding binding =
6547 vk::VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER,
6549 vk::VK_SHADER_STAGE_FRAGMENT_BIT,
6553 bindings.push_back(binding);
6555 const vk::VkPushConstantRange pushConstantRange =
6557 vk::VK_SHADER_STAGE_FRAGMENT_BIT,
6562 createPipelineWithResources(vkd, device, renderPass, subpass, *vertexShaderModule, *fragmentShaderModule, context.getTargetWidth(), context.getTargetHeight(),
6563 vector<vk::VkVertexInputBindingDescription>(), vector<vk::VkVertexInputAttributeDescription>(), bindings, vk::VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST, 1u, &pushConstantRange, m_resources);
6566 const deUint32 descriptorCount = (deUint32)(divRoundUp(m_bufferSize, (vk::VkDeviceSize)m_maxUniformTexelCount * 4));
6567 const vk::VkDescriptorPoolSize poolSizes =
6569 vk::VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER,
6572 const vk::VkDescriptorPoolCreateInfo createInfo =
6574 vk::VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
6576 vk::VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT,
6583 m_descriptorPool = vk::createDescriptorPool(vkd, device, &createInfo);
6584 m_descriptorSets.resize(descriptorCount, (vk::VkDescriptorSet)0);
6585 m_bufferViews.resize(descriptorCount, (vk::VkBufferView)0);
6588 for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
6590 const deUint32 count = (deUint32)(m_bufferSize < (descriptorSetNdx + 1) * m_maxUniformTexelCount * 4
6591 ? m_bufferSize - descriptorSetNdx * m_maxUniformTexelCount * 4
6592 : m_maxUniformTexelCount * 4) / 4;
6593 const vk::VkDescriptorSetLayout layout = *m_resources.descriptorSetLayout;
6594 const vk::VkDescriptorSetAllocateInfo allocateInfo =
6596 vk::VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
6604 m_descriptorSets[descriptorSetNdx] = vk::allocateDescriptorSet(vkd, device, &allocateInfo).disown();
6607 const vk::VkBufferViewCreateInfo createInfo =
6609 vk::VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO,
6613 context.getBuffer(),
6614 vk::VK_FORMAT_R32_UINT,
6615 descriptorSetNdx * m_maxUniformTexelCount * 4,
6619 VK_CHECK(vkd.createBufferView(device, &createInfo, DE_NULL, &m_bufferViews[descriptorSetNdx]));
6623 const vk::VkWriteDescriptorSet write =
6625 vk::VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
6627 m_descriptorSets[descriptorSetNdx],
6631 vk::VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER,
6634 &m_bufferViews[descriptorSetNdx]
6637 vkd.updateDescriptorSets(device, 1u, &write, 0u, DE_NULL);
6642 void RenderFragmentUniformTexelBuffer::submit (SubmitContext& context)
6644 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
6645 const vk::VkCommandBuffer commandBuffer = context.getCommandBuffer();
6647 vkd.cmdBindPipeline(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipeline);
6649 for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
6653 const deUint32 callId;
6654 const deUint32 valuesPerPixel;
6655 const deUint32 maxUniformTexelCount;
6658 (deUint32)descriptorSetNdx,
6659 (deUint32)divRoundUp<size_t>(m_descriptorSets.size() * de::min<size_t>((size_t)m_bufferSize / 4, m_maxUniformTexelCount), m_targetWidth * m_targetHeight),
6660 m_maxUniformTexelCount
6663 vkd.cmdBindDescriptorSets(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipelineLayout, 0u, 1u, &m_descriptorSets[descriptorSetNdx], 0u, DE_NULL);
6664 vkd.cmdPushConstants(commandBuffer, *m_resources.pipelineLayout, vk::VK_SHADER_STAGE_FRAGMENT_BIT, 0u, (deUint32)sizeof(callParams), &callParams);
6665 vkd.cmdDraw(commandBuffer, 6, 1, 0, 0);
6669 void RenderFragmentUniformTexelBuffer::verify (VerifyRenderPassContext& context, size_t)
6671 const deUint32 valuesPerPixel = (deUint32)divRoundUp<size_t>(m_descriptorSets.size() * de::min<size_t>((size_t)m_bufferSize / 4, m_maxUniformTexelCount), m_targetWidth * m_targetHeight);
6673 for (int y = 0; y < context.getReferenceTarget().getSize().y(); y++)
6674 for (int x = 0; x < context.getReferenceTarget().getSize().x(); x++)
6676 const size_t firstDescriptorSetNdx = de::min<size_t>((y * 256u + x) / (m_maxUniformTexelCount / valuesPerPixel), m_descriptorSets.size() - 1);
6678 for (size_t descriptorSetNdx = firstDescriptorSetNdx; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
6680 const size_t offset = descriptorSetNdx * m_maxUniformTexelCount * 4;
6681 const deUint32 callId = (deUint32)descriptorSetNdx;
6683 const deUint32 id = (deUint32)y * 256u + (deUint32)x;
6684 const deUint32 count = (deUint32)(m_bufferSize < (descriptorSetNdx + 1) * m_maxUniformTexelCount * 4
6685 ? m_bufferSize - descriptorSetNdx * m_maxUniformTexelCount * 4
6686 : m_maxUniformTexelCount * 4) / 4;
6688 if (y * 256u + x < callId * (m_maxUniformTexelCount / valuesPerPixel))
6692 deUint32 value = id;
6694 for (deUint32 i = 0; i < valuesPerPixel; i++)
6696 value = ((deUint32)context.getReference().get( offset + (value % count) * 4 + 0))
6697 | (((deUint32)context.getReference().get(offset + (value % count) * 4 + 1)) << 8u)
6698 | (((deUint32)context.getReference().get(offset + (value % count) * 4 + 2)) << 16u)
6699 | (((deUint32)context.getReference().get(offset + (value % count) * 4 + 3)) << 24u);
6702 const UVec4 vec ((value >> 0u) & 0xFFu,
6703 (value >> 8u) & 0xFFu,
6704 (value >> 16u) & 0xFFu,
6705 (value >> 24u) & 0xFFu);
6707 context.getReferenceTarget().getAccess().setPixel(vec.asFloat() / Vec4(255.0f), x, y);
6713 class RenderFragmentStorageTexelBuffer : public RenderPassCommand
6716 RenderFragmentStorageTexelBuffer (void) {}
6717 ~RenderFragmentStorageTexelBuffer (void);
6719 const char* getName (void) const { return "RenderFragmentStorageTexelBuffer"; }
6720 void logPrepare (TestLog&, size_t) const;
6721 void logSubmit (TestLog&, size_t) const;
6722 void prepare (PrepareRenderPassContext&);
6723 void submit (SubmitContext& context);
6724 void verify (VerifyRenderPassContext&, size_t);
6727 PipelineResources m_resources;
6728 vk::Move<vk::VkDescriptorPool> m_descriptorPool;
6729 vector<vk::VkDescriptorSet> m_descriptorSets;
6730 vector<vk::VkBufferView> m_bufferViews;
6732 const vk::DeviceInterface* m_vkd;
6733 vk::VkDevice m_device;
6734 vk::VkDeviceSize m_bufferSize;
6735 deUint32 m_maxStorageTexelCount;
6736 size_t m_targetWidth;
6737 size_t m_targetHeight;
6740 RenderFragmentStorageTexelBuffer::~RenderFragmentStorageTexelBuffer (void)
6742 for (size_t bufferViewNdx = 0; bufferViewNdx < m_bufferViews.size(); bufferViewNdx++)
6744 if (!!m_bufferViews[bufferViewNdx])
6746 m_vkd->destroyBufferView(m_device, m_bufferViews[bufferViewNdx], DE_NULL);
6747 m_bufferViews[bufferViewNdx] = (vk::VkBufferView)0;
6752 void RenderFragmentStorageTexelBuffer::logPrepare (TestLog& log, size_t commandIndex) const
6754 log << TestLog::Message << commandIndex << ":" << getName() << " Create pipeline for render buffer as storage buffer." << TestLog::EndMessage;
6757 void RenderFragmentStorageTexelBuffer::logSubmit (TestLog& log, size_t commandIndex) const
6759 log << TestLog::Message << commandIndex << ":" << getName() << " Render using buffer as storage buffer." << TestLog::EndMessage;
6762 void RenderFragmentStorageTexelBuffer::prepare (PrepareRenderPassContext& context)
6764 const vk::InstanceInterface& vki = context.getContext().getInstanceInterface();
6765 const vk::VkPhysicalDevice physicalDevice = context.getContext().getPhysicalDevice();
6766 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
6767 const vk::VkDevice device = context.getContext().getDevice();
6768 const vk::VkRenderPass renderPass = context.getRenderPass();
6769 const deUint32 subpass = 0;
6770 const vk::Unique<vk::VkShaderModule> vertexShaderModule (vk::createShaderModule(vkd, device, context.getBinaryCollection().get("render-quad.vert"), 0));
6771 const vk::Unique<vk::VkShaderModule> fragmentShaderModule (vk::createShaderModule(vkd, device, context.getBinaryCollection().get("storage-texel-buffer.frag"), 0));
6772 vector<vk::VkDescriptorSetLayoutBinding> bindings;
6776 m_bufferSize = context.getBufferSize();
6777 m_maxStorageTexelCount = vk::getPhysicalDeviceProperties(vki, physicalDevice).limits.maxTexelBufferElements;
6778 m_targetWidth = context.getTargetWidth();
6779 m_targetHeight = context.getTargetHeight();
6782 const vk::VkDescriptorSetLayoutBinding binding =
6785 vk::VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER,
6787 vk::VK_SHADER_STAGE_FRAGMENT_BIT,
6791 bindings.push_back(binding);
6793 const vk::VkPushConstantRange pushConstantRange =
6795 vk::VK_SHADER_STAGE_FRAGMENT_BIT,
6800 createPipelineWithResources(vkd, device, renderPass, subpass, *vertexShaderModule, *fragmentShaderModule, context.getTargetWidth(), context.getTargetHeight(),
6801 vector<vk::VkVertexInputBindingDescription>(), vector<vk::VkVertexInputAttributeDescription>(), bindings, vk::VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST, 1u, &pushConstantRange, m_resources);
6804 const deUint32 descriptorCount = (deUint32)(divRoundUp(m_bufferSize, (vk::VkDeviceSize)m_maxStorageTexelCount * 4));
6805 const vk::VkDescriptorPoolSize poolSizes =
6807 vk::VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER,
6810 const vk::VkDescriptorPoolCreateInfo createInfo =
6812 vk::VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
6814 vk::VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT,
6821 m_descriptorPool = vk::createDescriptorPool(vkd, device, &createInfo);
6822 m_descriptorSets.resize(descriptorCount, (vk::VkDescriptorSet)0);
6823 m_bufferViews.resize(descriptorCount, (vk::VkBufferView)0);
6826 for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
6828 const deUint32 count = (deUint32)(m_bufferSize < (descriptorSetNdx + 1) * m_maxStorageTexelCount * 4
6829 ? m_bufferSize - descriptorSetNdx * m_maxStorageTexelCount * 4
6830 : m_maxStorageTexelCount * 4) / 4;
6831 const vk::VkDescriptorSetLayout layout = *m_resources.descriptorSetLayout;
6832 const vk::VkDescriptorSetAllocateInfo allocateInfo =
6834 vk::VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
6842 m_descriptorSets[descriptorSetNdx] = vk::allocateDescriptorSet(vkd, device, &allocateInfo).disown();
6845 const vk::VkBufferViewCreateInfo createInfo =
6847 vk::VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO,
6851 context.getBuffer(),
6852 vk::VK_FORMAT_R32_UINT,
6853 descriptorSetNdx * m_maxStorageTexelCount * 4,
6857 VK_CHECK(vkd.createBufferView(device, &createInfo, DE_NULL, &m_bufferViews[descriptorSetNdx]));
6861 const vk::VkWriteDescriptorSet write =
6863 vk::VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
6865 m_descriptorSets[descriptorSetNdx],
6869 vk::VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER,
6872 &m_bufferViews[descriptorSetNdx]
6875 vkd.updateDescriptorSets(device, 1u, &write, 0u, DE_NULL);
6880 void RenderFragmentStorageTexelBuffer::submit (SubmitContext& context)
6882 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
6883 const vk::VkCommandBuffer commandBuffer = context.getCommandBuffer();
6885 vkd.cmdBindPipeline(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipeline);
6887 for (size_t descriptorSetNdx = 0; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
6891 const deUint32 callId;
6892 const deUint32 valuesPerPixel;
6893 const deUint32 maxStorageTexelCount;
6894 const deUint32 width;
6897 (deUint32)descriptorSetNdx,
6898 (deUint32)divRoundUp<size_t>(m_descriptorSets.size() * de::min<size_t>(m_maxStorageTexelCount, (size_t)m_bufferSize / 4), m_targetWidth * m_targetHeight),
6899 m_maxStorageTexelCount,
6900 (deUint32)(m_bufferSize < (descriptorSetNdx + 1u) * m_maxStorageTexelCount * 4u
6901 ? m_bufferSize - descriptorSetNdx * m_maxStorageTexelCount * 4u
6902 : m_maxStorageTexelCount * 4u) / 4u
6905 vkd.cmdBindDescriptorSets(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipelineLayout, 0u, 1u, &m_descriptorSets[descriptorSetNdx], 0u, DE_NULL);
6906 vkd.cmdPushConstants(commandBuffer, *m_resources.pipelineLayout, vk::VK_SHADER_STAGE_FRAGMENT_BIT, 0u, (deUint32)sizeof(callParams), &callParams);
6907 vkd.cmdDraw(commandBuffer, 6, 1, 0, 0);
6911 void RenderFragmentStorageTexelBuffer::verify (VerifyRenderPassContext& context, size_t)
6913 const deUint32 valuesPerPixel = (deUint32)divRoundUp<size_t>(m_descriptorSets.size() * de::min<size_t>(m_maxStorageTexelCount, (size_t)m_bufferSize / 4), m_targetWidth * m_targetHeight);
6915 for (int y = 0; y < context.getReferenceTarget().getSize().y(); y++)
6916 for (int x = 0; x < context.getReferenceTarget().getSize().x(); x++)
6918 const size_t firstDescriptorSetNdx = de::min<size_t>((y * 256u + x) / (m_maxStorageTexelCount / valuesPerPixel), m_descriptorSets.size() - 1);
6920 for (size_t descriptorSetNdx = firstDescriptorSetNdx; descriptorSetNdx < m_descriptorSets.size(); descriptorSetNdx++)
6922 const size_t offset = descriptorSetNdx * m_maxStorageTexelCount * 4;
6923 const deUint32 callId = (deUint32)descriptorSetNdx;
6925 const deUint32 id = (deUint32)y * 256u + (deUint32)x;
6926 const deUint32 count = (deUint32)(m_bufferSize < (descriptorSetNdx + 1) * m_maxStorageTexelCount * 4
6927 ? m_bufferSize - descriptorSetNdx * m_maxStorageTexelCount * 4
6928 : m_maxStorageTexelCount * 4) / 4;
6930 if (y * 256u + x < callId * (m_maxStorageTexelCount / valuesPerPixel))
6934 deUint32 value = id;
6936 for (deUint32 i = 0; i < valuesPerPixel; i++)
6938 value = ((deUint32)context.getReference().get( offset + (value % count) * 4 + 0))
6939 | (((deUint32)context.getReference().get(offset + (value % count) * 4 + 1)) << 8u)
6940 | (((deUint32)context.getReference().get(offset + (value % count) * 4 + 2)) << 16u)
6941 | (((deUint32)context.getReference().get(offset + (value % count) * 4 + 3)) << 24u);
6944 const UVec4 vec ((value >> 0u) & 0xFFu,
6945 (value >> 8u) & 0xFFu,
6946 (value >> 16u) & 0xFFu,
6947 (value >> 24u) & 0xFFu);
6949 context.getReferenceTarget().getAccess().setPixel(vec.asFloat() / Vec4(255.0f), x, y);
6955 class RenderFragmentStorageImage : public RenderPassCommand
6958 RenderFragmentStorageImage (void) {}
6959 ~RenderFragmentStorageImage (void);
6961 const char* getName (void) const { return "RenderFragmentStorageImage"; }
6962 void logPrepare (TestLog&, size_t) const;
6963 void logSubmit (TestLog&, size_t) const;
6964 void prepare (PrepareRenderPassContext&);
6965 void submit (SubmitContext& context);
6966 void verify (VerifyRenderPassContext&, size_t);
6969 PipelineResources m_resources;
6970 vk::Move<vk::VkDescriptorPool> m_descriptorPool;
6971 vk::Move<vk::VkDescriptorSet> m_descriptorSet;
6972 vk::Move<vk::VkImageView> m_imageView;
6975 RenderFragmentStorageImage::~RenderFragmentStorageImage (void)
6979 void RenderFragmentStorageImage::logPrepare (TestLog& log, size_t commandIndex) const
6981 log << TestLog::Message << commandIndex << ":" << getName() << " Create pipeline for render storage image." << TestLog::EndMessage;
6984 void RenderFragmentStorageImage::logSubmit (TestLog& log, size_t commandIndex) const
6986 log << TestLog::Message << commandIndex << ":" << getName() << " Render using storage image." << TestLog::EndMessage;
6989 void RenderFragmentStorageImage::prepare (PrepareRenderPassContext& context)
6991 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
6992 const vk::VkDevice device = context.getContext().getDevice();
6993 const vk::VkRenderPass renderPass = context.getRenderPass();
6994 const deUint32 subpass = 0;
6995 const vk::Unique<vk::VkShaderModule> vertexShaderModule (vk::createShaderModule(vkd, device, context.getBinaryCollection().get("render-quad.vert"), 0));
6996 const vk::Unique<vk::VkShaderModule> fragmentShaderModule (vk::createShaderModule(vkd, device, context.getBinaryCollection().get("storage-image.frag"), 0));
6997 vector<vk::VkDescriptorSetLayoutBinding> bindings;
7000 const vk::VkDescriptorSetLayoutBinding binding =
7003 vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE,
7005 vk::VK_SHADER_STAGE_FRAGMENT_BIT,
7009 bindings.push_back(binding);
7012 createPipelineWithResources(vkd, device, renderPass, subpass, *vertexShaderModule, *fragmentShaderModule, context.getTargetWidth(), context.getTargetHeight(),
7013 vector<vk::VkVertexInputBindingDescription>(), vector<vk::VkVertexInputAttributeDescription>(), bindings, vk::VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST, 0u, DE_NULL, m_resources);
7016 const vk::VkDescriptorPoolSize poolSizes =
7018 vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE,
7021 const vk::VkDescriptorPoolCreateInfo createInfo =
7023 vk::VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
7025 vk::VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT,
7032 m_descriptorPool = vk::createDescriptorPool(vkd, device, &createInfo);
7036 const vk::VkDescriptorSetLayout layout = *m_resources.descriptorSetLayout;
7037 const vk::VkDescriptorSetAllocateInfo allocateInfo =
7039 vk::VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
7047 m_descriptorSet = vk::allocateDescriptorSet(vkd, device, &allocateInfo);
7050 const vk::VkImageViewCreateInfo createInfo =
7052 vk::VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
7057 vk::VK_IMAGE_VIEW_TYPE_2D,
7058 vk::VK_FORMAT_R8G8B8A8_UNORM,
7059 vk::makeComponentMappingRGBA(),
7061 vk::VK_IMAGE_ASPECT_COLOR_BIT,
7069 m_imageView = vk::createImageView(vkd, device, &createInfo);
7073 const vk::VkDescriptorImageInfo imageInfo =
7077 context.getImageLayout()
7079 const vk::VkWriteDescriptorSet write =
7081 vk::VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
7087 vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE,
7093 vkd.updateDescriptorSets(device, 1u, &write, 0u, DE_NULL);
7098 void RenderFragmentStorageImage::submit (SubmitContext& context)
7100 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
7101 const vk::VkCommandBuffer commandBuffer = context.getCommandBuffer();
7103 vkd.cmdBindPipeline(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipeline);
7105 vkd.cmdBindDescriptorSets(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipelineLayout, 0u, 1u, &(*m_descriptorSet), 0u, DE_NULL);
7106 vkd.cmdDraw(commandBuffer, 6, 1, 0, 0);
7109 void RenderFragmentStorageImage::verify (VerifyRenderPassContext& context, size_t)
7111 const UVec2 size = UVec2(context.getReferenceImage().getWidth(), context.getReferenceImage().getHeight());
7112 const deUint32 valuesPerPixel = de::max<deUint32>(1u, (size.x() * size.y()) / (256u * 256u));
7114 for (int y = 0; y < context.getReferenceTarget().getSize().y(); y++)
7115 for (int x = 0; x < context.getReferenceTarget().getSize().x(); x++)
7117 UVec4 value = UVec4(x, y, 0u, 0u);
7119 for (deUint32 i = 0; i < valuesPerPixel; i++)
7121 const UVec2 pos = UVec2(value.z() * 256u + (value.x() ^ value.z()), value.w() * 256u + (value.y() ^ value.w()));
7122 const Vec4 floatValue = context.getReferenceImage().getAccess().getPixel(pos.x() % size.x(), pos.y() % size.y());
7124 value = UVec4((deUint32)(floatValue.x() * 255.0f),
7125 (deUint32)(floatValue.y() * 255.0f),
7126 (deUint32)(floatValue.z() * 255.0f),
7127 (deUint32)(floatValue.w() * 255.0f));
7130 context.getReferenceTarget().getAccess().setPixel(value.asFloat() / Vec4(255.0f), x, y);
7134 class RenderFragmentSampledImage : public RenderPassCommand
7137 RenderFragmentSampledImage (void) {}
7138 ~RenderFragmentSampledImage (void);
7140 const char* getName (void) const { return "RenderFragmentSampledImage"; }
7141 void logPrepare (TestLog&, size_t) const;
7142 void logSubmit (TestLog&, size_t) const;
7143 void prepare (PrepareRenderPassContext&);
7144 void submit (SubmitContext& context);
7145 void verify (VerifyRenderPassContext&, size_t);
7148 PipelineResources m_resources;
7149 vk::Move<vk::VkDescriptorPool> m_descriptorPool;
7150 vk::Move<vk::VkDescriptorSet> m_descriptorSet;
7151 vk::Move<vk::VkImageView> m_imageView;
7152 vk::Move<vk::VkSampler> m_sampler;
7155 RenderFragmentSampledImage::~RenderFragmentSampledImage (void)
7159 void RenderFragmentSampledImage::logPrepare (TestLog& log, size_t commandIndex) const
7161 log << TestLog::Message << commandIndex << ":" << getName() << " Create pipeline for render storage image." << TestLog::EndMessage;
7164 void RenderFragmentSampledImage::logSubmit (TestLog& log, size_t commandIndex) const
7166 log << TestLog::Message << commandIndex << ":" << getName() << " Render using storage image." << TestLog::EndMessage;
7169 void RenderFragmentSampledImage::prepare (PrepareRenderPassContext& context)
7171 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
7172 const vk::VkDevice device = context.getContext().getDevice();
7173 const vk::VkRenderPass renderPass = context.getRenderPass();
7174 const deUint32 subpass = 0;
7175 const vk::Unique<vk::VkShaderModule> vertexShaderModule (vk::createShaderModule(vkd, device, context.getBinaryCollection().get("render-quad.vert"), 0));
7176 const vk::Unique<vk::VkShaderModule> fragmentShaderModule (vk::createShaderModule(vkd, device, context.getBinaryCollection().get("sampled-image.frag"), 0));
7177 vector<vk::VkDescriptorSetLayoutBinding> bindings;
7180 const vk::VkDescriptorSetLayoutBinding binding =
7183 vk::VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
7185 vk::VK_SHADER_STAGE_FRAGMENT_BIT,
7189 bindings.push_back(binding);
7192 createPipelineWithResources(vkd, device, renderPass, subpass, *vertexShaderModule, *fragmentShaderModule, context.getTargetWidth(), context.getTargetHeight(),
7193 vector<vk::VkVertexInputBindingDescription>(), vector<vk::VkVertexInputAttributeDescription>(), bindings, vk::VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST, 0u, DE_NULL, m_resources);
7196 const vk::VkDescriptorPoolSize poolSizes =
7198 vk::VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
7201 const vk::VkDescriptorPoolCreateInfo createInfo =
7203 vk::VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
7205 vk::VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT,
7212 m_descriptorPool = vk::createDescriptorPool(vkd, device, &createInfo);
7216 const vk::VkDescriptorSetLayout layout = *m_resources.descriptorSetLayout;
7217 const vk::VkDescriptorSetAllocateInfo allocateInfo =
7219 vk::VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
7227 m_descriptorSet = vk::allocateDescriptorSet(vkd, device, &allocateInfo);
7230 const vk::VkImageViewCreateInfo createInfo =
7232 vk::VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
7237 vk::VK_IMAGE_VIEW_TYPE_2D,
7238 vk::VK_FORMAT_R8G8B8A8_UNORM,
7239 vk::makeComponentMappingRGBA(),
7241 vk::VK_IMAGE_ASPECT_COLOR_BIT,
7249 m_imageView = vk::createImageView(vkd, device, &createInfo);
7253 const vk::VkSamplerCreateInfo createInfo =
7255 vk::VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO,
7259 vk::VK_FILTER_NEAREST,
7260 vk::VK_FILTER_NEAREST,
7262 vk::VK_SAMPLER_MIPMAP_MODE_LINEAR,
7263 vk::VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE,
7264 vk::VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE,
7265 vk::VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE,
7270 vk::VK_COMPARE_OP_ALWAYS,
7273 vk::VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK,
7277 m_sampler = vk::createSampler(vkd, device, &createInfo);
7281 const vk::VkDescriptorImageInfo imageInfo =
7285 context.getImageLayout()
7287 const vk::VkWriteDescriptorSet write =
7289 vk::VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
7295 vk::VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
7301 vkd.updateDescriptorSets(device, 1u, &write, 0u, DE_NULL);
7306 void RenderFragmentSampledImage::submit (SubmitContext& context)
7308 const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
7309 const vk::VkCommandBuffer commandBuffer = context.getCommandBuffer();
7311 vkd.cmdBindPipeline(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipeline);
7313 vkd.cmdBindDescriptorSets(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_resources.pipelineLayout, 0u, 1u, &(*m_descriptorSet), 0u, DE_NULL);
7314 vkd.cmdDraw(commandBuffer, 6u, 1u, 0u, 0u);
7317 void RenderFragmentSampledImage::verify (VerifyRenderPassContext& context, size_t)
7319 const UVec2 size = UVec2(context.getReferenceImage().getWidth(), context.getReferenceImage().getHeight());
7320 const deUint32 valuesPerPixel = de::max<deUint32>(1u, (size.x() * size.y()) / (256u * 256u));
7322 for (int y = 0; y < context.getReferenceTarget().getSize().y(); y++)
7323 for (int x = 0; x < context.getReferenceTarget().getSize().x(); x++)
7325 UVec4 value = UVec4(x, y, 0u, 0u);
7327 for (deUint32 i = 0; i < valuesPerPixel; i++)
7329 const UVec2 pos = UVec2(value.z() * 256u + (value.x() ^ value.z()), value.w() * 256u + (value.y() ^ value.w()));
7330 const Vec4 floatValue = context.getReferenceImage().getAccess().getPixel(pos.x() % size.x(), pos.y() % size.y());
7332 value = UVec4((deUint32)(floatValue.x() * 255.0f),
7333 (deUint32)(floatValue.y() * 255.0f),
7334 (deUint32)(floatValue.z() * 255.0f),
7335 (deUint32)(floatValue.w() * 255.0f));
7339 context.getReferenceTarget().getAccess().setPixel(value.asFloat() / Vec4(255.0f), x, y);
7357 OP_BUFFER_BINDMEMORY,
7359 OP_QUEUE_WAIT_FOR_IDLE,
7360 OP_DEVICE_WAIT_FOR_IDLE,
7362 OP_COMMAND_BUFFER_BEGIN,
7363 OP_COMMAND_BUFFER_END,
7365 // Buffer transfer operations
7369 OP_BUFFER_COPY_TO_BUFFER,
7370 OP_BUFFER_COPY_FROM_BUFFER,
7372 OP_BUFFER_COPY_TO_IMAGE,
7373 OP_BUFFER_COPY_FROM_IMAGE,
7377 OP_IMAGE_BINDMEMORY,
7379 OP_IMAGE_TRANSITION_LAYOUT,
7381 OP_IMAGE_COPY_TO_BUFFER,
7382 OP_IMAGE_COPY_FROM_BUFFER,
7384 OP_IMAGE_COPY_TO_IMAGE,
7385 OP_IMAGE_COPY_FROM_IMAGE,
7387 OP_IMAGE_BLIT_TO_IMAGE,
7388 OP_IMAGE_BLIT_FROM_IMAGE,
7392 OP_PIPELINE_BARRIER_GLOBAL,
7393 OP_PIPELINE_BARRIER_BUFFER,
7394 OP_PIPELINE_BARRIER_IMAGE,
7396 // Renderpass operations
7397 OP_RENDERPASS_BEGIN,
7400 // Commands inside render pass
7401 OP_RENDER_VERTEX_BUFFER,
7402 OP_RENDER_INDEX_BUFFER,
7404 OP_RENDER_VERTEX_UNIFORM_BUFFER,
7405 OP_RENDER_FRAGMENT_UNIFORM_BUFFER,
7407 OP_RENDER_VERTEX_UNIFORM_TEXEL_BUFFER,
7408 OP_RENDER_FRAGMENT_UNIFORM_TEXEL_BUFFER,
7410 OP_RENDER_VERTEX_STORAGE_BUFFER,
7411 OP_RENDER_FRAGMENT_STORAGE_BUFFER,
7413 OP_RENDER_VERTEX_STORAGE_TEXEL_BUFFER,
7414 OP_RENDER_FRAGMENT_STORAGE_TEXEL_BUFFER,
7416 OP_RENDER_VERTEX_STORAGE_IMAGE,
7417 OP_RENDER_FRAGMENT_STORAGE_IMAGE,
7419 OP_RENDER_VERTEX_SAMPLED_IMAGE,
7420 OP_RENDER_FRAGMENT_SAMPLED_IMAGE,
7426 STAGE_COMMAND_BUFFER,
7431 vk::VkAccessFlags getWriteAccessFlags (void)
7433 return vk::VK_ACCESS_SHADER_WRITE_BIT
7434 | vk::VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT
7435 | vk::VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT
7436 | vk::VK_ACCESS_TRANSFER_WRITE_BIT
7437 | vk::VK_ACCESS_HOST_WRITE_BIT
7438 | vk::VK_ACCESS_MEMORY_WRITE_BIT;
7441 bool isWriteAccess (vk::VkAccessFlagBits access)
7443 return (getWriteAccessFlags() & access) != 0;
7449 CacheState (vk::VkPipelineStageFlags allowedStages, vk::VkAccessFlags allowedAccesses);
7451 bool isValid (vk::VkPipelineStageFlagBits stage,
7452 vk::VkAccessFlagBits access) const;
7454 void perform (vk::VkPipelineStageFlagBits stage,
7455 vk::VkAccessFlagBits access);
7457 void submitCommandBuffer (void);
7458 void waitForIdle (void);
7460 void getFullBarrier (vk::VkPipelineStageFlags& srcStages,
7461 vk::VkAccessFlags& srcAccesses,
7462 vk::VkPipelineStageFlags& dstStages,
7463 vk::VkAccessFlags& dstAccesses) const;
7465 void barrier (vk::VkPipelineStageFlags srcStages,
7466 vk::VkAccessFlags srcAccesses,
7467 vk::VkPipelineStageFlags dstStages,
7468 vk::VkAccessFlags dstAccesses);
7470 void imageLayoutBarrier (vk::VkPipelineStageFlags srcStages,
7471 vk::VkAccessFlags srcAccesses,
7472 vk::VkPipelineStageFlags dstStages,
7473 vk::VkAccessFlags dstAccesses);
7475 void checkImageLayoutBarrier (vk::VkPipelineStageFlags srcStages,
7476 vk::VkAccessFlags srcAccesses,
7477 vk::VkPipelineStageFlags dstStages,
7478 vk::VkAccessFlags dstAccesses);
7480 // Everything is clean and there is no need for barriers
7481 bool isClean (void) const;
7483 vk::VkPipelineStageFlags getAllowedStages (void) const { return m_allowedStages; }
7484 vk::VkAccessFlags getAllowedAcceses (void) const { return m_allowedAccesses; }
7486 // Limit which stages and accesses are used by the CacheState tracker
7487 const vk::VkPipelineStageFlags m_allowedStages;
7488 const vk::VkAccessFlags m_allowedAccesses;
7490 // [dstStage][srcStage] = srcAccesses
7491 // In stage dstStage write srcAccesses from srcStage are not yet available
7492 vk::VkAccessFlags m_unavailableWriteOperations[PIPELINESTAGE_LAST][PIPELINESTAGE_LAST];
7493 // Latest pipeline transition is not available in stage
7494 bool m_unavailableLayoutTransition[PIPELINESTAGE_LAST];
7495 // [dstStage] = dstAccesses
7496 // In stage dstStage ops with dstAccesses are not yet visible
7497 vk::VkAccessFlags m_invisibleOperations[PIPELINESTAGE_LAST];
7499 // [dstStage] = srcStage
7500 // Memory operation in srcStage have not completed before dstStage
7501 vk::VkPipelineStageFlags m_incompleteOperations[PIPELINESTAGE_LAST];
7504 CacheState::CacheState (vk::VkPipelineStageFlags allowedStages, vk::VkAccessFlags allowedAccesses)
7505 : m_allowedStages (allowedStages)
7506 , m_allowedAccesses (allowedAccesses)
7508 for (vk::VkPipelineStageFlags dstStage_ = 1; dstStage_ <= m_allowedStages; dstStage_ <<= 1)
7510 const PipelineStage dstStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)dstStage_);
7512 if ((dstStage_ & m_allowedStages) == 0)
7515 // All operations are initially visible
7516 m_invisibleOperations[dstStage] = 0;
7518 // There are no incomplete read operations initially
7519 m_incompleteOperations[dstStage] = 0;
7521 // There are no incomplete layout transitions
7522 m_unavailableLayoutTransition[dstStage] = false;
7524 for (vk::VkPipelineStageFlags srcStage_ = 1; srcStage_ <= m_allowedStages; srcStage_ <<= 1)
7526 const PipelineStage srcStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)srcStage_);
7528 if ((srcStage_ & m_allowedStages) == 0)
7531 // There are no write operations that are not yet available
7533 m_unavailableWriteOperations[dstStage][srcStage] = 0;
7538 bool CacheState::isValid (vk::VkPipelineStageFlagBits stage,
7539 vk::VkAccessFlagBits access) const
7541 DE_ASSERT((access & (~m_allowedAccesses)) == 0);
7542 DE_ASSERT((stage & (~m_allowedStages)) == 0);
7544 const PipelineStage dstStage = pipelineStageFlagToPipelineStage(stage);
7546 // Previous operations are not visible to access on stage
7547 if (m_unavailableLayoutTransition[dstStage] || (m_invisibleOperations[dstStage] & access) != 0)
7550 if (isWriteAccess(access))
7552 // Memory operations from other stages have not completed before
7554 if (m_incompleteOperations[dstStage] != 0)
7561 void CacheState::perform (vk::VkPipelineStageFlagBits stage,
7562 vk::VkAccessFlagBits access)
7564 DE_ASSERT((access & (~m_allowedAccesses)) == 0);
7565 DE_ASSERT((stage & (~m_allowedStages)) == 0);
7567 const PipelineStage srcStage = pipelineStageFlagToPipelineStage(stage);
7569 for (vk::VkPipelineStageFlags dstStage_ = 1; dstStage_ <= m_allowedStages; dstStage_ <<= 1)
7571 const PipelineStage dstStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)dstStage_);
7573 if ((dstStage_ & m_allowedStages) == 0)
7576 // Mark stage as incomplete for all stages
7577 m_incompleteOperations[dstStage] |= stage;
7579 if (isWriteAccess(access))
7581 // Mark all accesses from all stages invisible
7582 m_invisibleOperations[dstStage] |= m_allowedAccesses;
7584 // Mark write access from srcStage unavailable to all stages
7585 m_unavailableWriteOperations[dstStage][srcStage] |= access;
7590 void CacheState::submitCommandBuffer (void)
7592 // Flush all host writes and reads
7593 barrier(m_allowedStages & vk::VK_PIPELINE_STAGE_HOST_BIT,
7594 m_allowedAccesses & (vk::VK_ACCESS_HOST_READ_BIT | vk::VK_ACCESS_HOST_WRITE_BIT),
7599 void CacheState::waitForIdle (void)
7601 // Make all writes available
7602 barrier(m_allowedStages,
7603 m_allowedAccesses & getWriteAccessFlags(),
7607 // Make all writes visible on device side
7608 barrier(m_allowedStages,
7610 m_allowedStages & (~vk::VK_PIPELINE_STAGE_HOST_BIT),
7614 void CacheState::getFullBarrier (vk::VkPipelineStageFlags& srcStages,
7615 vk::VkAccessFlags& srcAccesses,
7616 vk::VkPipelineStageFlags& dstStages,
7617 vk::VkAccessFlags& dstAccesses) const
7624 for (vk::VkPipelineStageFlags dstStage_ = 1; dstStage_ <= m_allowedStages; dstStage_ <<= 1)
7626 const PipelineStage dstStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)dstStage_);
7628 if ((dstStage_ & m_allowedStages) == 0)
7631 // Make sure all previous operation are complete in all stages
7632 if (m_incompleteOperations[dstStage])
7634 dstStages |= dstStage_;
7635 srcStages |= m_incompleteOperations[dstStage];
7638 // Make sure all read operations are visible in dstStage
7639 if (m_invisibleOperations[dstStage])
7641 dstStages |= dstStage_;
7642 dstAccesses |= m_invisibleOperations[dstStage];
7645 // Make sure all write operations fro mall stages are available
7646 for (vk::VkPipelineStageFlags srcStage_ = 1; srcStage_ <= m_allowedStages; srcStage_ <<= 1)
7648 const PipelineStage srcStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)srcStage_);
7650 if ((srcStage_ & m_allowedStages) == 0)
7653 if (m_unavailableWriteOperations[dstStage][srcStage])
7655 dstStages |= dstStage_;
7656 srcStages |= dstStage_;
7657 srcAccesses |= m_unavailableWriteOperations[dstStage][srcStage];
7660 if (m_unavailableLayoutTransition[dstStage] && !m_unavailableLayoutTransition[srcStage])
7662 // Add dependency between srcStage and dstStage if layout transition has not completed in dstStage,
7663 // but has completed in srcStage.
7664 dstStages |= dstStage_;
7665 srcStages |= dstStage_;
7670 DE_ASSERT((srcStages & (~m_allowedStages)) == 0);
7671 DE_ASSERT((srcAccesses & (~m_allowedAccesses)) == 0);
7672 DE_ASSERT((dstStages & (~m_allowedStages)) == 0);
7673 DE_ASSERT((dstAccesses & (~m_allowedAccesses)) == 0);
7676 void CacheState::checkImageLayoutBarrier (vk::VkPipelineStageFlags srcStages,
7677 vk::VkAccessFlags srcAccesses,
7678 vk::VkPipelineStageFlags dstStages,
7679 vk::VkAccessFlags dstAccesses)
7681 DE_ASSERT((srcStages & (~m_allowedStages)) == 0);
7682 DE_ASSERT((srcAccesses & (~m_allowedAccesses)) == 0);
7683 DE_ASSERT((dstStages & (~m_allowedStages)) == 0);
7684 DE_ASSERT((dstAccesses & (~m_allowedAccesses)) == 0);
7686 DE_UNREF(srcStages);
7687 DE_UNREF(srcAccesses);
7689 DE_UNREF(dstStages);
7690 DE_UNREF(dstAccesses);
7692 #if defined(DE_DEBUG)
7693 // Check that all stages have completed before srcStages or are in srcStages.
7695 vk::VkPipelineStageFlags completedStages = srcStages;
7697 for (vk::VkPipelineStageFlags srcStage_ = 1; srcStage_ <= srcStages; srcStage_ <<= 1)
7699 const PipelineStage srcStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)srcStage_);
7701 if ((srcStage_ & srcStages) == 0)
7704 completedStages |= (~m_incompleteOperations[srcStage]);
7707 DE_ASSERT((completedStages & m_allowedStages) == m_allowedStages);
7710 // Check that any write is available at least in one stage. Since all stages are complete even single flush is enough.
7711 if ((getWriteAccessFlags() & m_allowedAccesses) != 0 && (srcAccesses & getWriteAccessFlags()) == 0)
7713 bool anyWriteAvailable = false;
7715 for (vk::VkPipelineStageFlags dstStage_ = 1; dstStage_ <= m_allowedStages; dstStage_ <<= 1)
7717 const PipelineStage dstStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)dstStage_);
7719 if ((dstStage_ & m_allowedStages) == 0)
7722 for (vk::VkPipelineStageFlags srcStage_ = 1; srcStage_ <= m_allowedStages; srcStage_ <<= 1)
7724 const PipelineStage srcStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)srcStage_);
7726 if ((srcStage_ & m_allowedStages) == 0)
7729 if (m_unavailableWriteOperations[dstStage][srcStage] != (getWriteAccessFlags() & m_allowedAccesses))
7731 anyWriteAvailable = true;
7737 DE_ASSERT(anyWriteAvailable);
7742 void CacheState::imageLayoutBarrier (vk::VkPipelineStageFlags srcStages,
7743 vk::VkAccessFlags srcAccesses,
7744 vk::VkPipelineStageFlags dstStages,
7745 vk::VkAccessFlags dstAccesses)
7747 checkImageLayoutBarrier(srcStages, srcAccesses, dstStages, dstAccesses);
7749 for (vk::VkPipelineStageFlags dstStage_ = 1; dstStage_ <= m_allowedStages; dstStage_ <<= 1)
7751 const PipelineStage dstStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)dstStage_);
7753 if ((dstStage_ & m_allowedStages) == 0)
7756 // All stages are incomplete after the barrier except each dstStage in it self.
7757 m_incompleteOperations[dstStage] = m_allowedStages & (~dstStage_);
7759 // All memory operations are invisible unless they are listed in dstAccess
7760 m_invisibleOperations[dstStage] = m_allowedAccesses & (~dstAccesses);
7762 // Layout transition is unavailable in stage unless it was listed in dstStages
7763 m_unavailableLayoutTransition[dstStage]= (dstStage_ & dstStages) == 0;
7765 for (vk::VkPipelineStageFlags srcStage_ = 1; srcStage_ <= m_allowedStages; srcStage_ <<= 1)
7767 const PipelineStage srcStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)srcStage_);
7769 if ((srcStage_ & m_allowedStages) == 0)
7772 // All write operations are available after layout transition
7773 m_unavailableWriteOperations[dstStage][srcStage] = 0;
7778 void CacheState::barrier (vk::VkPipelineStageFlags srcStages,
7779 vk::VkAccessFlags srcAccesses,
7780 vk::VkPipelineStageFlags dstStages,
7781 vk::VkAccessFlags dstAccesses)
7783 DE_ASSERT((srcStages & (~m_allowedStages)) == 0);
7784 DE_ASSERT((srcAccesses & (~m_allowedAccesses)) == 0);
7785 DE_ASSERT((dstStages & (~m_allowedStages)) == 0);
7786 DE_ASSERT((dstAccesses & (~m_allowedAccesses)) == 0);
7790 vk::VkPipelineStageFlags oldIncompleteOperations[PIPELINESTAGE_LAST];
7791 vk::VkAccessFlags oldUnavailableWriteOperations[PIPELINESTAGE_LAST][PIPELINESTAGE_LAST];
7792 bool oldUnavailableLayoutTransition[PIPELINESTAGE_LAST];
7794 deMemcpy(oldIncompleteOperations, m_incompleteOperations, sizeof(oldIncompleteOperations));
7795 deMemcpy(oldUnavailableWriteOperations, m_unavailableWriteOperations, sizeof(oldUnavailableWriteOperations));
7796 deMemcpy(oldUnavailableLayoutTransition, m_unavailableLayoutTransition, sizeof(oldUnavailableLayoutTransition));
7798 for (vk::VkPipelineStageFlags srcStage_ = 1; srcStage_ <= srcStages; srcStage_ <<= 1)
7800 const PipelineStage srcStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)srcStage_);
7802 if ((srcStage_ & srcStages) == 0)
7805 for (vk::VkPipelineStageFlags dstStage_ = 1; dstStage_ <= dstStages; dstStage_ <<= 1)
7807 const PipelineStage dstStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)dstStage_);
7809 if ((dstStage_ & dstStages) == 0)
7812 // Stages that have completed before srcStage have also completed before dstStage
7813 m_incompleteOperations[dstStage] &= oldIncompleteOperations[srcStage];
7815 // Image layout transition in srcStage are now available in dstStage
7816 m_unavailableLayoutTransition[dstStage] &= oldUnavailableLayoutTransition[srcStage];
7818 for (vk::VkPipelineStageFlags sharedStage_ = 1; sharedStage_ <= m_allowedStages; sharedStage_ <<= 1)
7820 const PipelineStage sharedStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)sharedStage_);
7822 if ((sharedStage_ & m_allowedStages) == 0)
7825 // Writes that are available in srcStage are also available in dstStage
7826 m_unavailableWriteOperations[dstStage][sharedStage] &= oldUnavailableWriteOperations[srcStage][sharedStage];
7833 for (vk::VkPipelineStageFlags dstStage_ = 1; dstStage_ <= dstStages; dstStage_ <<= 1)
7835 const PipelineStage dstStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)dstStage_);
7836 bool allWritesAvailable = true;
7838 if ((dstStage_ & dstStages) == 0)
7841 // Operations in srcStages have completed before any stage in dstStages
7842 m_incompleteOperations[dstStage] &= ~srcStages;
7844 for (vk::VkPipelineStageFlags srcStage_ = 1; srcStage_ <= m_allowedStages; srcStage_ <<= 1)
7846 const PipelineStage srcStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)srcStage_);
7848 if ((srcStage_ & m_allowedStages) == 0)
7851 // Make srcAccesses from srcStage available in dstStage
7852 if ((srcStage_ & srcStages) != 0)
7853 m_unavailableWriteOperations[dstStage][srcStage] &= ~srcAccesses;
7855 if (m_unavailableWriteOperations[dstStage][srcStage] != 0)
7856 allWritesAvailable = false;
7859 // If all writes are available in dstStage make dstAccesses also visible
7860 if (allWritesAvailable)
7861 m_invisibleOperations[dstStage] &= ~dstAccesses;
7865 bool CacheState::isClean (void) const
7867 for (vk::VkPipelineStageFlags dstStage_ = 1; dstStage_ <= m_allowedStages; dstStage_ <<= 1)
7869 const PipelineStage dstStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)dstStage_);
7871 if ((dstStage_ & m_allowedStages) == 0)
7874 // Some operations are not visible to some stages
7875 if (m_invisibleOperations[dstStage] != 0)
7878 // There are operation that have not completed yet
7879 if (m_incompleteOperations[dstStage] != 0)
7882 // Layout transition has not completed yet
7883 if (m_unavailableLayoutTransition[dstStage])
7886 for (vk::VkPipelineStageFlags srcStage_ = 1; srcStage_ <= m_allowedStages; srcStage_ <<= 1)
7888 const PipelineStage srcStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)srcStage_);
7890 if ((srcStage_ & m_allowedStages) == 0)
7893 // Some write operations are not available yet
7894 if (m_unavailableWriteOperations[dstStage][srcStage] != 0)
7902 bool layoutSupportedByUsage (Usage usage, vk::VkImageLayout layout)
7906 case vk::VK_IMAGE_LAYOUT_GENERAL:
7909 case vk::VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL:
7910 return (usage & USAGE_COLOR_ATTACHMENT) != 0;
7912 case vk::VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL:
7913 return (usage & USAGE_DEPTH_STENCIL_ATTACHMENT) != 0;
7915 case vk::VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL:
7916 return (usage & USAGE_DEPTH_STENCIL_ATTACHMENT) != 0;
7918 case vk::VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL:
7919 // \todo [2016-03-09 mika] Should include input attachment
7920 return (usage & USAGE_SAMPLED_IMAGE) != 0;
7922 case vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL:
7923 return (usage & USAGE_TRANSFER_SRC) != 0;
7925 case vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL:
7926 return (usage & USAGE_TRANSFER_DST) != 0;
7928 case vk::VK_IMAGE_LAYOUT_PREINITIALIZED:
7932 DE_FATAL("Unknown layout");
7937 size_t getNumberOfSupportedLayouts (Usage usage)
7939 const vk::VkImageLayout layouts[] =
7941 vk::VK_IMAGE_LAYOUT_GENERAL,
7942 vk::VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
7943 vk::VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL,
7944 vk::VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL,
7945 vk::VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
7946 vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
7947 vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
7949 size_t supportedLayoutCount = 0;
7951 for (size_t layoutNdx = 0; layoutNdx < DE_LENGTH_OF_ARRAY(layouts); layoutNdx++)
7953 const vk::VkImageLayout layout = layouts[layoutNdx];
7955 if (layoutSupportedByUsage(usage, layout))
7956 supportedLayoutCount++;
7959 return supportedLayoutCount;
7962 vk::VkImageLayout getRandomNextLayout (de::Random& rng,
7964 vk::VkImageLayout previousLayout)
7966 const vk::VkImageLayout layouts[] =
7968 vk::VK_IMAGE_LAYOUT_GENERAL,
7969 vk::VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
7970 vk::VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL,
7971 vk::VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL,
7972 vk::VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
7973 vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
7974 vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
7976 const size_t supportedLayoutCount = getNumberOfSupportedLayouts(usage);
7978 DE_ASSERT(supportedLayoutCount > 0);
7980 size_t nextLayoutNdx = ((size_t)rng.getUint64()) % (previousLayout == vk::VK_IMAGE_LAYOUT_UNDEFINED
7981 ? supportedLayoutCount
7982 : supportedLayoutCount - 1);
7984 for (size_t layoutNdx = 0; layoutNdx < DE_LENGTH_OF_ARRAY(layouts); layoutNdx++)
7986 const vk::VkImageLayout layout = layouts[layoutNdx];
7988 if (layoutSupportedByUsage(usage, layout) && layout != previousLayout)
7990 if (nextLayoutNdx == 0)
7997 DE_FATAL("Unreachable");
7998 return vk::VK_IMAGE_LAYOUT_UNDEFINED;
8003 State (Usage usage, deUint32 seed)
8004 : stage (STAGE_HOST)
8005 , cache (usageToStageFlags(usage), usageToAccessFlags(usage))
8008 , hostInvalidated (true)
8009 , hostFlushed (true)
8010 , memoryDefined (false)
8012 , hasBoundBufferMemory (false)
8014 , hasBoundImageMemory (false)
8015 , imageLayout (vk::VK_IMAGE_LAYOUT_UNDEFINED)
8016 , imageDefined (false)
8019 , commandBufferIsEmpty (true)
8020 , renderPassIsEmpty (true)
8029 bool hostInvalidated;
8034 bool hasBoundBufferMemory;
8037 bool hasBoundImageMemory;
8038 vk::VkImageLayout imageLayout;
8044 bool commandBufferIsEmpty;
8045 bool renderPassIsEmpty;
8048 void getAvailableOps (const State& state, bool supportsBuffers, bool supportsImages, Usage usage, vector<Op>& ops)
8050 if (state.stage == STAGE_HOST)
8052 if (usage & (USAGE_HOST_READ | USAGE_HOST_WRITE))
8054 // Host memory operations
8057 ops.push_back(OP_UNMAP);
8059 // Avoid flush and finish if they are not needed
8060 if (!state.hostFlushed)
8061 ops.push_back(OP_MAP_FLUSH);
8063 if (!state.hostInvalidated
8065 && ((usage & USAGE_HOST_READ) == 0
8066 || state.cache.isValid(vk::VK_PIPELINE_STAGE_HOST_BIT, vk::VK_ACCESS_HOST_READ_BIT))
8067 && ((usage & USAGE_HOST_WRITE) == 0
8068 || state.cache.isValid(vk::VK_PIPELINE_STAGE_HOST_BIT, vk::VK_ACCESS_HOST_WRITE_BIT)))
8070 ops.push_back(OP_MAP_INVALIDATE);
8073 if (usage & USAGE_HOST_READ
8074 && usage & USAGE_HOST_WRITE
8075 && state.memoryDefined
8076 && state.hostInvalidated
8078 && state.cache.isValid(vk::VK_PIPELINE_STAGE_HOST_BIT, vk::VK_ACCESS_HOST_WRITE_BIT)
8079 && state.cache.isValid(vk::VK_PIPELINE_STAGE_HOST_BIT, vk::VK_ACCESS_HOST_READ_BIT))
8081 ops.push_back(OP_MAP_MODIFY);
8084 if (usage & USAGE_HOST_READ
8085 && state.memoryDefined
8086 && state.hostInvalidated
8088 && state.cache.isValid(vk::VK_PIPELINE_STAGE_HOST_BIT, vk::VK_ACCESS_HOST_READ_BIT))
8090 ops.push_back(OP_MAP_READ);
8093 if (usage & USAGE_HOST_WRITE
8094 && state.hostInvalidated
8096 && state.cache.isValid(vk::VK_PIPELINE_STAGE_HOST_BIT, vk::VK_ACCESS_HOST_WRITE_BIT))
8098 ops.push_back(OP_MAP_WRITE);
8102 ops.push_back(OP_MAP);
8105 if (state.hasBoundBufferMemory && state.queueIdle)
8107 // \note Destroy only buffers after they have been bound
8108 ops.push_back(OP_BUFFER_DESTROY);
8112 if (state.hasBuffer)
8114 if (!state.hasBoundBufferMemory)
8115 ops.push_back(OP_BUFFER_BINDMEMORY);
8117 else if (!state.hasImage && supportsBuffers) // Avoid creating buffer if there is already image
8118 ops.push_back(OP_BUFFER_CREATE);
8121 if (state.hasBoundImageMemory && state.queueIdle)
8123 // \note Destroy only image after they have been bound
8124 ops.push_back(OP_IMAGE_DESTROY);
8130 if (!state.hasBoundImageMemory)
8131 ops.push_back(OP_IMAGE_BINDMEMORY);
8133 else if (!state.hasBuffer && supportsImages) // Avoid creating image if there is already buffer
8134 ops.push_back(OP_IMAGE_CREATE);
8137 // Host writes must be flushed before GPU commands and there must be
8138 // buffer or image for GPU commands
8139 if (state.hostFlushed
8140 && (state.memoryDefined || supportsDeviceBufferWrites(usage) || state.imageDefined || supportsDeviceImageWrites(usage))
8141 && (state.hasBoundBufferMemory || state.hasBoundImageMemory) // Avoid command buffers if there is no object to use
8142 && (usageToStageFlags(usage) & (~vk::VK_PIPELINE_STAGE_HOST_BIT)) != 0) // Don't start command buffer if there are no ways to use memory from gpu
8144 ops.push_back(OP_COMMAND_BUFFER_BEGIN);
8147 if (!state.deviceIdle)
8148 ops.push_back(OP_DEVICE_WAIT_FOR_IDLE);
8150 if (!state.queueIdle)
8151 ops.push_back(OP_QUEUE_WAIT_FOR_IDLE);
8153 else if (state.stage == STAGE_COMMAND_BUFFER)
8155 if (!state.cache.isClean())
8157 ops.push_back(OP_PIPELINE_BARRIER_GLOBAL);
8160 ops.push_back(OP_PIPELINE_BARRIER_IMAGE);
8162 if (state.hasBuffer)
8163 ops.push_back(OP_PIPELINE_BARRIER_BUFFER);
8166 if (state.hasBoundBufferMemory)
8168 if (usage & USAGE_TRANSFER_DST
8169 && state.cache.isValid(vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_ACCESS_TRANSFER_WRITE_BIT))
8171 ops.push_back(OP_BUFFER_FILL);
8172 ops.push_back(OP_BUFFER_UPDATE);
8173 ops.push_back(OP_BUFFER_COPY_FROM_BUFFER);
8174 ops.push_back(OP_BUFFER_COPY_FROM_IMAGE);
8177 if (usage & USAGE_TRANSFER_SRC
8178 && state.memoryDefined
8179 && state.cache.isValid(vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_ACCESS_TRANSFER_READ_BIT))
8181 ops.push_back(OP_BUFFER_COPY_TO_BUFFER);
8182 ops.push_back(OP_BUFFER_COPY_TO_IMAGE);
8186 if (state.hasBoundImageMemory
8187 && (state.imageLayout == vk::VK_IMAGE_LAYOUT_UNDEFINED
8188 || getNumberOfSupportedLayouts(usage) > 1))
8190 ops.push_back(OP_IMAGE_TRANSITION_LAYOUT);
8193 if (usage & USAGE_TRANSFER_DST
8194 && (state.imageLayout == vk::VK_IMAGE_LAYOUT_GENERAL
8195 || state.imageLayout == vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL)
8196 && state.cache.isValid(vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_ACCESS_TRANSFER_WRITE_BIT))
8198 ops.push_back(OP_IMAGE_COPY_FROM_BUFFER);
8199 ops.push_back(OP_IMAGE_COPY_FROM_IMAGE);
8200 ops.push_back(OP_IMAGE_BLIT_FROM_IMAGE);
8203 if (usage & USAGE_TRANSFER_SRC
8204 && (state.imageLayout == vk::VK_IMAGE_LAYOUT_GENERAL
8205 || state.imageLayout == vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL)
8206 && state.imageDefined
8207 && state.cache.isValid(vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_ACCESS_TRANSFER_READ_BIT))
8209 ops.push_back(OP_IMAGE_COPY_TO_BUFFER);
8210 ops.push_back(OP_IMAGE_COPY_TO_IMAGE);
8211 ops.push_back(OP_IMAGE_BLIT_TO_IMAGE);
8216 // \todo [2016-03-09 mika] Add other usages?
8217 if ((state.memoryDefined
8218 && state.hasBoundBufferMemory
8219 && (((usage & USAGE_VERTEX_BUFFER)
8220 && state.cache.isValid(vk::VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, vk::VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT))
8221 || ((usage & USAGE_INDEX_BUFFER)
8222 && state.cache.isValid(vk::VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, vk::VK_ACCESS_INDEX_READ_BIT))
8223 || ((usage & USAGE_UNIFORM_BUFFER)
8224 && (state.cache.isValid(vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, vk::VK_ACCESS_UNIFORM_READ_BIT)
8225 || state.cache.isValid(vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, vk::VK_ACCESS_UNIFORM_READ_BIT)))
8226 || ((usage & USAGE_UNIFORM_TEXEL_BUFFER)
8227 && (state.cache.isValid(vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, vk::VK_ACCESS_UNIFORM_READ_BIT)
8228 || state.cache.isValid(vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, vk::VK_ACCESS_UNIFORM_READ_BIT)))
8229 || ((usage & USAGE_STORAGE_BUFFER)
8230 && (state.cache.isValid(vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT)
8231 || state.cache.isValid(vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT)))
8232 || ((usage & USAGE_STORAGE_TEXEL_BUFFER)
8233 && state.cache.isValid(vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT))))
8234 || (state.imageDefined
8235 && state.hasBoundImageMemory
8236 && (((usage & USAGE_STORAGE_IMAGE)
8237 && state.imageLayout == vk::VK_IMAGE_LAYOUT_GENERAL
8238 && (state.cache.isValid(vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT)
8239 || state.cache.isValid(vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT)))
8240 || ((usage & USAGE_SAMPLED_IMAGE)
8241 && (state.imageLayout == vk::VK_IMAGE_LAYOUT_GENERAL
8242 || state.imageLayout == vk::VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL)
8243 && (state.cache.isValid(vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT)
8244 || state.cache.isValid(vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT))))))
8246 ops.push_back(OP_RENDERPASS_BEGIN);
8249 // \note This depends on previous operations and has to be always the
8250 // last command buffer operation check
8251 if (ops.empty() || !state.commandBufferIsEmpty)
8252 ops.push_back(OP_COMMAND_BUFFER_END);
8254 else if (state.stage == STAGE_RENDER_PASS)
8256 if ((usage & USAGE_VERTEX_BUFFER) != 0
8257 && state.memoryDefined
8258 && state.hasBoundBufferMemory
8259 && state.cache.isValid(vk::VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, vk::VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT))
8261 ops.push_back(OP_RENDER_VERTEX_BUFFER);
8264 if ((usage & USAGE_INDEX_BUFFER) != 0
8265 && state.memoryDefined
8266 && state.hasBoundBufferMemory
8267 && state.cache.isValid(vk::VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, vk::VK_ACCESS_INDEX_READ_BIT))
8269 ops.push_back(OP_RENDER_INDEX_BUFFER);
8272 if ((usage & USAGE_UNIFORM_BUFFER) != 0
8273 && state.memoryDefined
8274 && state.hasBoundBufferMemory)
8276 if (state.cache.isValid(vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, vk::VK_ACCESS_UNIFORM_READ_BIT))
8277 ops.push_back(OP_RENDER_VERTEX_UNIFORM_BUFFER);
8279 if (state.cache.isValid(vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, vk::VK_ACCESS_UNIFORM_READ_BIT))
8280 ops.push_back(OP_RENDER_FRAGMENT_UNIFORM_BUFFER);
8283 if ((usage & USAGE_UNIFORM_TEXEL_BUFFER) != 0
8284 && state.memoryDefined
8285 && state.hasBoundBufferMemory)
8287 if (state.cache.isValid(vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, vk::VK_ACCESS_UNIFORM_READ_BIT))
8288 ops.push_back(OP_RENDER_VERTEX_UNIFORM_TEXEL_BUFFER);
8290 if (state.cache.isValid(vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, vk::VK_ACCESS_UNIFORM_READ_BIT))
8291 ops.push_back(OP_RENDER_FRAGMENT_UNIFORM_TEXEL_BUFFER);
8294 if ((usage & USAGE_STORAGE_BUFFER) != 0
8295 && state.memoryDefined
8296 && state.hasBoundBufferMemory)
8298 if (state.cache.isValid(vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT))
8299 ops.push_back(OP_RENDER_VERTEX_STORAGE_BUFFER);
8301 if (state.cache.isValid(vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT))
8302 ops.push_back(OP_RENDER_FRAGMENT_STORAGE_BUFFER);
8305 if ((usage & USAGE_STORAGE_TEXEL_BUFFER) != 0
8306 && state.memoryDefined
8307 && state.hasBoundBufferMemory)
8309 if (state.cache.isValid(vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT))
8310 ops.push_back(OP_RENDER_VERTEX_STORAGE_TEXEL_BUFFER);
8312 if (state.cache.isValid(vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT))
8313 ops.push_back(OP_RENDER_FRAGMENT_STORAGE_TEXEL_BUFFER);
8316 if ((usage & USAGE_STORAGE_IMAGE) != 0
8317 && state.imageDefined
8318 && state.hasBoundImageMemory
8319 && (state.imageLayout == vk::VK_IMAGE_LAYOUT_GENERAL))
8321 if (state.cache.isValid(vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT))
8322 ops.push_back(OP_RENDER_VERTEX_STORAGE_IMAGE);
8324 if (state.cache.isValid(vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT))
8325 ops.push_back(OP_RENDER_FRAGMENT_STORAGE_IMAGE);
8328 if ((usage & USAGE_SAMPLED_IMAGE) != 0
8329 && state.imageDefined
8330 && state.hasBoundImageMemory
8331 && (state.imageLayout == vk::VK_IMAGE_LAYOUT_GENERAL
8332 || state.imageLayout == vk::VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL))
8334 if (state.cache.isValid(vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT))
8335 ops.push_back(OP_RENDER_VERTEX_SAMPLED_IMAGE);
8337 if (state.cache.isValid(vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT))
8338 ops.push_back(OP_RENDER_FRAGMENT_SAMPLED_IMAGE);
8341 if (!state.renderPassIsEmpty)
8342 ops.push_back(OP_RENDERPASS_END);
8345 DE_FATAL("Unknown stage");
8348 void applyOp (State& state, const Memory& memory, Op op, Usage usage)
8353 DE_ASSERT(state.stage == STAGE_HOST);
8354 DE_ASSERT(!state.mapped);
8355 state.mapped = true;
8359 DE_ASSERT(state.stage == STAGE_HOST);
8360 DE_ASSERT(state.mapped);
8361 state.mapped = false;
8365 DE_ASSERT(state.stage == STAGE_HOST);
8366 DE_ASSERT(!state.hostFlushed);
8367 state.hostFlushed = true;
8370 case OP_MAP_INVALIDATE:
8371 DE_ASSERT(state.stage == STAGE_HOST);
8372 DE_ASSERT(!state.hostInvalidated);
8373 state.hostInvalidated = true;
8377 DE_ASSERT(state.stage == STAGE_HOST);
8378 DE_ASSERT(state.hostInvalidated);
8379 state.rng.getUint32();
8383 DE_ASSERT(state.stage == STAGE_HOST);
8384 if ((memory.getMemoryType().propertyFlags & vk::VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) == 0)
8385 state.hostFlushed = false;
8387 state.memoryDefined = true;
8388 state.imageDefined = false;
8389 state.imageLayout = vk::VK_IMAGE_LAYOUT_UNDEFINED;
8390 state.rng.getUint32();
8394 DE_ASSERT(state.stage == STAGE_HOST);
8395 DE_ASSERT(state.hostInvalidated);
8397 if ((memory.getMemoryType().propertyFlags & vk::VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) == 0)
8398 state.hostFlushed = false;
8400 state.rng.getUint32();
8403 case OP_BUFFER_CREATE:
8404 DE_ASSERT(state.stage == STAGE_HOST);
8405 DE_ASSERT(!state.hasBuffer);
8407 state.hasBuffer = true;
8410 case OP_BUFFER_DESTROY:
8411 DE_ASSERT(state.stage == STAGE_HOST);
8412 DE_ASSERT(state.hasBuffer);
8413 DE_ASSERT(state.hasBoundBufferMemory);
8415 state.hasBuffer = false;
8416 state.hasBoundBufferMemory = false;
8419 case OP_BUFFER_BINDMEMORY:
8420 DE_ASSERT(state.stage == STAGE_HOST);
8421 DE_ASSERT(state.hasBuffer);
8422 DE_ASSERT(!state.hasBoundBufferMemory);
8424 state.hasBoundBufferMemory = true;
8427 case OP_IMAGE_CREATE:
8428 DE_ASSERT(state.stage == STAGE_HOST);
8429 DE_ASSERT(!state.hasImage);
8430 DE_ASSERT(!state.hasBuffer);
8432 state.hasImage = true;
8435 case OP_IMAGE_DESTROY:
8436 DE_ASSERT(state.stage == STAGE_HOST);
8437 DE_ASSERT(state.hasImage);
8438 DE_ASSERT(state.hasBoundImageMemory);
8440 state.hasImage = false;
8441 state.hasBoundImageMemory = false;
8442 state.imageLayout = vk::VK_IMAGE_LAYOUT_UNDEFINED;
8443 state.imageDefined = false;
8446 case OP_IMAGE_BINDMEMORY:
8447 DE_ASSERT(state.stage == STAGE_HOST);
8448 DE_ASSERT(state.hasImage);
8449 DE_ASSERT(!state.hasBoundImageMemory);
8451 state.hasBoundImageMemory = true;
8454 case OP_IMAGE_TRANSITION_LAYOUT:
8456 DE_ASSERT(state.stage == STAGE_COMMAND_BUFFER);
8457 DE_ASSERT(state.hasImage);
8458 DE_ASSERT(state.hasBoundImageMemory);
8460 // \todo [2016-03-09 mika] Support linear tiling and predefined data
8461 const vk::VkImageLayout srcLayout = state.rng.getFloat() < 0.9f ? state.imageLayout : vk::VK_IMAGE_LAYOUT_UNDEFINED;
8462 const vk::VkImageLayout dstLayout = getRandomNextLayout(state.rng, usage, srcLayout);
8464 vk::VkPipelineStageFlags dirtySrcStages;
8465 vk::VkAccessFlags dirtySrcAccesses;
8466 vk::VkPipelineStageFlags dirtyDstStages;
8467 vk::VkAccessFlags dirtyDstAccesses;
8469 vk::VkPipelineStageFlags srcStages;
8470 vk::VkAccessFlags srcAccesses;
8471 vk::VkPipelineStageFlags dstStages;
8472 vk::VkAccessFlags dstAccesses;
8474 state.cache.getFullBarrier(dirtySrcStages, dirtySrcAccesses, dirtyDstStages, dirtyDstAccesses);
8476 // Try masking some random bits
8477 srcStages = dirtySrcStages;
8478 srcAccesses = dirtySrcAccesses;
8480 dstStages = state.cache.getAllowedStages() & state.rng.getUint32();
8481 dstAccesses = state.cache.getAllowedAcceses() & state.rng.getUint32();
8483 // If there are no bits in dst stage mask use all stages
8484 dstStages = dstStages ? dstStages : state.cache.getAllowedStages();
8487 srcStages = dstStages;
8489 if (srcLayout == vk::VK_IMAGE_LAYOUT_UNDEFINED)
8490 state.imageDefined = false;
8492 state.commandBufferIsEmpty = false;
8493 state.imageLayout = dstLayout;
8494 state.memoryDefined = false;
8495 state.cache.imageLayoutBarrier(srcStages, srcAccesses, dstStages, dstAccesses);
8499 case OP_QUEUE_WAIT_FOR_IDLE:
8500 DE_ASSERT(state.stage == STAGE_HOST);
8501 DE_ASSERT(!state.queueIdle);
8503 state.queueIdle = true;
8505 state.cache.waitForIdle();
8508 case OP_DEVICE_WAIT_FOR_IDLE:
8509 DE_ASSERT(state.stage == STAGE_HOST);
8510 DE_ASSERT(!state.deviceIdle);
8512 state.queueIdle = true;
8513 state.deviceIdle = true;
8515 state.cache.waitForIdle();
8518 case OP_COMMAND_BUFFER_BEGIN:
8519 DE_ASSERT(state.stage == STAGE_HOST);
8520 state.stage = STAGE_COMMAND_BUFFER;
8521 state.commandBufferIsEmpty = true;
8522 // Makes host writes visible to command buffer
8523 state.cache.submitCommandBuffer();
8526 case OP_COMMAND_BUFFER_END:
8527 DE_ASSERT(state.stage == STAGE_COMMAND_BUFFER);
8528 state.stage = STAGE_HOST;
8529 state.queueIdle = false;
8530 state.deviceIdle = false;
8533 case OP_BUFFER_COPY_FROM_BUFFER:
8534 case OP_BUFFER_COPY_FROM_IMAGE:
8535 case OP_BUFFER_UPDATE:
8536 case OP_BUFFER_FILL:
8537 state.rng.getUint32();
8538 DE_ASSERT(state.stage == STAGE_COMMAND_BUFFER);
8540 if ((memory.getMemoryType().propertyFlags & vk::VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) == 0)
8541 state.hostInvalidated = false;
8543 state.commandBufferIsEmpty = false;
8544 state.memoryDefined = true;
8545 state.imageDefined = false;
8546 state.imageLayout = vk::VK_IMAGE_LAYOUT_UNDEFINED;
8547 state.cache.perform(vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_ACCESS_TRANSFER_WRITE_BIT);
8550 case OP_BUFFER_COPY_TO_BUFFER:
8551 case OP_BUFFER_COPY_TO_IMAGE:
8552 DE_ASSERT(state.stage == STAGE_COMMAND_BUFFER);
8554 state.commandBufferIsEmpty = false;
8555 state.cache.perform(vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_ACCESS_TRANSFER_READ_BIT);
8558 case OP_IMAGE_BLIT_FROM_IMAGE:
8559 state.rng.getBool();
8561 case OP_IMAGE_COPY_FROM_BUFFER:
8562 case OP_IMAGE_COPY_FROM_IMAGE:
8563 state.rng.getUint32();
8564 DE_ASSERT(state.stage == STAGE_COMMAND_BUFFER);
8566 if ((memory.getMemoryType().propertyFlags & vk::VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) == 0)
8567 state.hostInvalidated = false;
8569 state.commandBufferIsEmpty = false;
8570 state.memoryDefined = false;
8571 state.imageDefined = true;
8572 state.cache.perform(vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_ACCESS_TRANSFER_WRITE_BIT);
8575 case OP_IMAGE_BLIT_TO_IMAGE:
8576 state.rng.getBool();
8578 case OP_IMAGE_COPY_TO_BUFFER:
8579 case OP_IMAGE_COPY_TO_IMAGE:
8580 DE_ASSERT(state.stage == STAGE_COMMAND_BUFFER);
8582 state.commandBufferIsEmpty = false;
8583 state.cache.perform(vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_ACCESS_TRANSFER_READ_BIT);
8586 case OP_PIPELINE_BARRIER_GLOBAL:
8587 case OP_PIPELINE_BARRIER_BUFFER:
8588 case OP_PIPELINE_BARRIER_IMAGE:
8590 DE_ASSERT(state.stage == STAGE_COMMAND_BUFFER);
8592 vk::VkPipelineStageFlags dirtySrcStages;
8593 vk::VkAccessFlags dirtySrcAccesses;
8594 vk::VkPipelineStageFlags dirtyDstStages;
8595 vk::VkAccessFlags dirtyDstAccesses;
8597 vk::VkPipelineStageFlags srcStages;
8598 vk::VkAccessFlags srcAccesses;
8599 vk::VkPipelineStageFlags dstStages;
8600 vk::VkAccessFlags dstAccesses;
8602 state.cache.getFullBarrier(dirtySrcStages, dirtySrcAccesses, dirtyDstStages, dirtyDstAccesses);
8604 // Try masking some random bits
8605 srcStages = dirtySrcStages & state.rng.getUint32();
8606 srcAccesses = dirtySrcAccesses & state.rng.getUint32();
8608 dstStages = dirtyDstStages & state.rng.getUint32();
8609 dstAccesses = dirtyDstAccesses & state.rng.getUint32();
8611 // If there are no bits in stage mask use the original dirty stages
8612 srcStages = srcStages ? srcStages : dirtySrcStages;
8613 dstStages = dstStages ? dstStages : dirtyDstStages;
8616 srcStages = dstStages;
8618 state.commandBufferIsEmpty = false;
8619 state.cache.barrier(srcStages, srcAccesses, dstStages, dstAccesses);
8623 case OP_RENDERPASS_BEGIN:
8625 DE_ASSERT(state.stage == STAGE_COMMAND_BUFFER);
8627 state.renderPassIsEmpty = true;
8628 state.stage = STAGE_RENDER_PASS;
8632 case OP_RENDERPASS_END:
8634 DE_ASSERT(state.stage == STAGE_RENDER_PASS);
8636 state.renderPassIsEmpty = true;
8637 state.stage = STAGE_COMMAND_BUFFER;
8641 case OP_RENDER_VERTEX_BUFFER:
8643 DE_ASSERT(state.stage == STAGE_RENDER_PASS);
8645 state.renderPassIsEmpty = false;
8646 state.cache.perform(vk::VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, vk::VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT);
8650 case OP_RENDER_INDEX_BUFFER:
8652 DE_ASSERT(state.stage == STAGE_RENDER_PASS);
8654 state.renderPassIsEmpty = false;
8655 state.cache.perform(vk::VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, vk::VK_ACCESS_INDEX_READ_BIT);
8659 case OP_RENDER_VERTEX_UNIFORM_BUFFER:
8660 case OP_RENDER_VERTEX_UNIFORM_TEXEL_BUFFER:
8662 DE_ASSERT(state.stage == STAGE_RENDER_PASS);
8664 state.renderPassIsEmpty = false;
8665 state.cache.perform(vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, vk::VK_ACCESS_UNIFORM_READ_BIT);
8669 case OP_RENDER_FRAGMENT_UNIFORM_BUFFER:
8670 case OP_RENDER_FRAGMENT_UNIFORM_TEXEL_BUFFER:
8672 DE_ASSERT(state.stage == STAGE_RENDER_PASS);
8674 state.renderPassIsEmpty = false;
8675 state.cache.perform(vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, vk::VK_ACCESS_UNIFORM_READ_BIT);
8679 case OP_RENDER_VERTEX_STORAGE_BUFFER:
8680 case OP_RENDER_VERTEX_STORAGE_TEXEL_BUFFER:
8682 DE_ASSERT(state.stage == STAGE_RENDER_PASS);
8684 state.renderPassIsEmpty = false;
8685 state.cache.perform(vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT);
8689 case OP_RENDER_FRAGMENT_STORAGE_BUFFER:
8690 case OP_RENDER_FRAGMENT_STORAGE_TEXEL_BUFFER:
8692 DE_ASSERT(state.stage == STAGE_RENDER_PASS);
8694 state.renderPassIsEmpty = false;
8695 state.cache.perform(vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT);
8699 case OP_RENDER_FRAGMENT_STORAGE_IMAGE:
8700 case OP_RENDER_FRAGMENT_SAMPLED_IMAGE:
8702 DE_ASSERT(state.stage == STAGE_RENDER_PASS);
8704 state.renderPassIsEmpty = false;
8705 state.cache.perform(vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT);
8709 case OP_RENDER_VERTEX_STORAGE_IMAGE:
8710 case OP_RENDER_VERTEX_SAMPLED_IMAGE:
8712 DE_ASSERT(state.stage == STAGE_RENDER_PASS);
8714 state.renderPassIsEmpty = false;
8715 state.cache.perform(vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, vk::VK_ACCESS_SHADER_READ_BIT);
8720 DE_FATAL("Unknown op");
8724 de::MovePtr<Command> createHostCommand (Op op,
8727 vk::VkSharingMode sharing)
8731 case OP_MAP: return de::MovePtr<Command>(new Map());
8732 case OP_UNMAP: return de::MovePtr<Command>(new UnMap());
8734 case OP_MAP_FLUSH: return de::MovePtr<Command>(new Flush());
8735 case OP_MAP_INVALIDATE: return de::MovePtr<Command>(new Invalidate());
8737 case OP_MAP_READ: return de::MovePtr<Command>(new HostMemoryAccess(true, false, rng.getUint32()));
8738 case OP_MAP_WRITE: return de::MovePtr<Command>(new HostMemoryAccess(false, true, rng.getUint32()));
8739 case OP_MAP_MODIFY: return de::MovePtr<Command>(new HostMemoryAccess(true, true, rng.getUint32()));
8741 case OP_BUFFER_CREATE: return de::MovePtr<Command>(new CreateBuffer(usageToBufferUsageFlags(usage), sharing));
8742 case OP_BUFFER_DESTROY: return de::MovePtr<Command>(new DestroyBuffer());
8743 case OP_BUFFER_BINDMEMORY: return de::MovePtr<Command>(new BindBufferMemory());
8745 case OP_IMAGE_CREATE: return de::MovePtr<Command>(new CreateImage(usageToImageUsageFlags(usage), sharing));
8746 case OP_IMAGE_DESTROY: return de::MovePtr<Command>(new DestroyImage());
8747 case OP_IMAGE_BINDMEMORY: return de::MovePtr<Command>(new BindImageMemory());
8749 case OP_QUEUE_WAIT_FOR_IDLE: return de::MovePtr<Command>(new QueueWaitIdle());
8750 case OP_DEVICE_WAIT_FOR_IDLE: return de::MovePtr<Command>(new DeviceWaitIdle());
8753 DE_FATAL("Unknown op");
8754 return de::MovePtr<Command>(DE_NULL);
8758 de::MovePtr<CmdCommand> createCmdCommand (de::Random& rng,
8765 case OP_BUFFER_FILL: return de::MovePtr<CmdCommand>(new FillBuffer(rng.getUint32()));
8766 case OP_BUFFER_UPDATE: return de::MovePtr<CmdCommand>(new UpdateBuffer(rng.getUint32()));
8767 case OP_BUFFER_COPY_TO_BUFFER: return de::MovePtr<CmdCommand>(new BufferCopyToBuffer());
8768 case OP_BUFFER_COPY_FROM_BUFFER: return de::MovePtr<CmdCommand>(new BufferCopyFromBuffer(rng.getUint32()));
8770 case OP_BUFFER_COPY_TO_IMAGE: return de::MovePtr<CmdCommand>(new BufferCopyToImage());
8771 case OP_BUFFER_COPY_FROM_IMAGE: return de::MovePtr<CmdCommand>(new BufferCopyFromImage(rng.getUint32()));
8773 case OP_IMAGE_TRANSITION_LAYOUT:
8775 DE_ASSERT(state.stage == STAGE_COMMAND_BUFFER);
8776 DE_ASSERT(state.hasImage);
8777 DE_ASSERT(state.hasBoundImageMemory);
8779 const vk::VkImageLayout srcLayout = rng.getFloat() < 0.9f ? state.imageLayout : vk::VK_IMAGE_LAYOUT_UNDEFINED;
8780 const vk::VkImageLayout dstLayout = getRandomNextLayout(rng, usage, srcLayout);
8782 vk::VkPipelineStageFlags dirtySrcStages;
8783 vk::VkAccessFlags dirtySrcAccesses;
8784 vk::VkPipelineStageFlags dirtyDstStages;
8785 vk::VkAccessFlags dirtyDstAccesses;
8787 vk::VkPipelineStageFlags srcStages;
8788 vk::VkAccessFlags srcAccesses;
8789 vk::VkPipelineStageFlags dstStages;
8790 vk::VkAccessFlags dstAccesses;
8792 state.cache.getFullBarrier(dirtySrcStages, dirtySrcAccesses, dirtyDstStages, dirtyDstAccesses);
8794 // Try masking some random bits
8795 srcStages = dirtySrcStages;
8796 srcAccesses = dirtySrcAccesses;
8798 dstStages = state.cache.getAllowedStages() & rng.getUint32();
8799 dstAccesses = state.cache.getAllowedAcceses() & rng.getUint32();
8801 // If there are no bits in dst stage mask use all stages
8802 dstStages = dstStages ? dstStages : state.cache.getAllowedStages();
8805 srcStages = dstStages;
8807 return de::MovePtr<CmdCommand>(new ImageTransition(srcStages, srcAccesses, dstStages, dstAccesses, srcLayout, dstLayout));
8810 case OP_IMAGE_COPY_TO_BUFFER: return de::MovePtr<CmdCommand>(new ImageCopyToBuffer(state.imageLayout));
8811 case OP_IMAGE_COPY_FROM_BUFFER: return de::MovePtr<CmdCommand>(new ImageCopyFromBuffer(rng.getUint32(), state.imageLayout));
8812 case OP_IMAGE_COPY_TO_IMAGE: return de::MovePtr<CmdCommand>(new ImageCopyToImage(state.imageLayout));
8813 case OP_IMAGE_COPY_FROM_IMAGE: return de::MovePtr<CmdCommand>(new ImageCopyFromImage(rng.getUint32(), state.imageLayout));
8814 case OP_IMAGE_BLIT_TO_IMAGE:
8816 const BlitScale scale = rng.getBool() ? BLIT_SCALE_20 : BLIT_SCALE_10;
8817 return de::MovePtr<CmdCommand>(new ImageBlitToImage(scale, state.imageLayout));
8820 case OP_IMAGE_BLIT_FROM_IMAGE:
8822 const BlitScale scale = rng.getBool() ? BLIT_SCALE_20 : BLIT_SCALE_10;
8823 return de::MovePtr<CmdCommand>(new ImageBlitFromImage(rng.getUint32(), scale, state.imageLayout));
8826 case OP_PIPELINE_BARRIER_GLOBAL:
8827 case OP_PIPELINE_BARRIER_BUFFER:
8828 case OP_PIPELINE_BARRIER_IMAGE:
8830 vk::VkPipelineStageFlags dirtySrcStages;
8831 vk::VkAccessFlags dirtySrcAccesses;
8832 vk::VkPipelineStageFlags dirtyDstStages;
8833 vk::VkAccessFlags dirtyDstAccesses;
8835 vk::VkPipelineStageFlags srcStages;
8836 vk::VkAccessFlags srcAccesses;
8837 vk::VkPipelineStageFlags dstStages;
8838 vk::VkAccessFlags dstAccesses;
8840 state.cache.getFullBarrier(dirtySrcStages, dirtySrcAccesses, dirtyDstStages, dirtyDstAccesses);
8842 // Try masking some random bits
8843 srcStages = dirtySrcStages & rng.getUint32();
8844 srcAccesses = dirtySrcAccesses & rng.getUint32();
8846 dstStages = dirtyDstStages & rng.getUint32();
8847 dstAccesses = dirtyDstAccesses & rng.getUint32();
8849 // If there are no bits in stage mask use the original dirty stages
8850 srcStages = srcStages ? srcStages : dirtySrcStages;
8851 dstStages = dstStages ? dstStages : dirtyDstStages;
8854 srcStages = dstStages;
8856 PipelineBarrier::Type type;
8858 if (op == OP_PIPELINE_BARRIER_IMAGE)
8859 type = PipelineBarrier::TYPE_IMAGE;
8860 else if (op == OP_PIPELINE_BARRIER_BUFFER)
8861 type = PipelineBarrier::TYPE_BUFFER;
8862 else if (op == OP_PIPELINE_BARRIER_GLOBAL)
8863 type = PipelineBarrier::TYPE_GLOBAL;
8866 type = PipelineBarrier::TYPE_LAST;
8867 DE_FATAL("Unknown op");
8870 if (type == PipelineBarrier::TYPE_IMAGE)
8871 return de::MovePtr<CmdCommand>(new PipelineBarrier(srcStages, srcAccesses, dstStages, dstAccesses, type, tcu::just(state.imageLayout)));
8873 return de::MovePtr<CmdCommand>(new PipelineBarrier(srcStages, srcAccesses, dstStages, dstAccesses, type, tcu::nothing<vk::VkImageLayout>()));
8877 DE_FATAL("Unknown op");
8878 return de::MovePtr<CmdCommand>(DE_NULL);
8882 de::MovePtr<RenderPassCommand> createRenderPassCommand (de::Random&,
8888 case OP_RENDER_VERTEX_BUFFER: return de::MovePtr<RenderPassCommand>(new RenderVertexBuffer());
8889 case OP_RENDER_INDEX_BUFFER: return de::MovePtr<RenderPassCommand>(new RenderIndexBuffer());
8891 case OP_RENDER_VERTEX_UNIFORM_BUFFER: return de::MovePtr<RenderPassCommand>(new RenderVertexUniformBuffer());
8892 case OP_RENDER_FRAGMENT_UNIFORM_BUFFER: return de::MovePtr<RenderPassCommand>(new RenderFragmentUniformBuffer());
8894 case OP_RENDER_VERTEX_UNIFORM_TEXEL_BUFFER: return de::MovePtr<RenderPassCommand>(new RenderVertexUniformTexelBuffer());
8895 case OP_RENDER_FRAGMENT_UNIFORM_TEXEL_BUFFER: return de::MovePtr<RenderPassCommand>(new RenderFragmentUniformTexelBuffer());
8897 case OP_RENDER_VERTEX_STORAGE_BUFFER: return de::MovePtr<RenderPassCommand>(new RenderVertexStorageBuffer());
8898 case OP_RENDER_FRAGMENT_STORAGE_BUFFER: return de::MovePtr<RenderPassCommand>(new RenderFragmentStorageBuffer());
8900 case OP_RENDER_VERTEX_STORAGE_TEXEL_BUFFER: return de::MovePtr<RenderPassCommand>(new RenderVertexStorageTexelBuffer());
8901 case OP_RENDER_FRAGMENT_STORAGE_TEXEL_BUFFER: return de::MovePtr<RenderPassCommand>(new RenderFragmentStorageTexelBuffer());
8903 case OP_RENDER_VERTEX_STORAGE_IMAGE: return de::MovePtr<RenderPassCommand>(new RenderVertexStorageImage());
8904 case OP_RENDER_FRAGMENT_STORAGE_IMAGE: return de::MovePtr<RenderPassCommand>(new RenderFragmentStorageImage());
8906 case OP_RENDER_VERTEX_SAMPLED_IMAGE: return de::MovePtr<RenderPassCommand>(new RenderVertexSampledImage());
8907 case OP_RENDER_FRAGMENT_SAMPLED_IMAGE: return de::MovePtr<RenderPassCommand>(new RenderFragmentSampledImage());
8910 DE_FATAL("Unknown op");
8911 return de::MovePtr<RenderPassCommand>(DE_NULL);
8915 de::MovePtr<CmdCommand> createRenderPassCommands (const Memory& memory,
8916 de::Random& nextOpRng,
8922 vector<RenderPassCommand*> commands;
8926 for (; opNdx < opCount; opNdx++)
8930 getAvailableOps(state, memory.getSupportBuffers(), memory.getSupportImages(), usage, ops);
8932 DE_ASSERT(!ops.empty());
8935 const Op op = nextOpRng.choose<Op>(ops.begin(), ops.end());
8937 if (op == OP_RENDERPASS_END)
8943 de::Random rng (state.rng);
8945 commands.push_back(createRenderPassCommand(rng, state, op).release());
8946 applyOp(state, memory, op, usage);
8948 DE_ASSERT(state.rng == rng);
8953 applyOp(state, memory, OP_RENDERPASS_END, usage);
8954 return de::MovePtr<CmdCommand>(new SubmitRenderPass(commands));
8958 for (size_t commandNdx = 0; commandNdx < commands.size(); commandNdx++)
8959 delete commands[commandNdx];
8965 de::MovePtr<Command> createCmdCommands (const Memory& memory,
8966 de::Random& nextOpRng,
8972 vector<CmdCommand*> commands;
8976 for (; opNdx < opCount; opNdx++)
8980 getAvailableOps(state, memory.getSupportBuffers(), memory.getSupportImages(), usage, ops);
8982 DE_ASSERT(!ops.empty());
8985 const Op op = nextOpRng.choose<Op>(ops.begin(), ops.end());
8987 if (op == OP_COMMAND_BUFFER_END)
8993 // \note Command needs to known the state before the operation
8994 if (op == OP_RENDERPASS_BEGIN)
8996 applyOp(state, memory, op, usage);
8997 commands.push_back(createRenderPassCommands(memory, nextOpRng, state, usage, opNdx, opCount).release());
9001 de::Random rng (state.rng);
9003 commands.push_back(createCmdCommand(rng, state, op, usage).release());
9004 applyOp(state, memory, op, usage);
9006 DE_ASSERT(state.rng == rng);
9013 applyOp(state, memory, OP_COMMAND_BUFFER_END, usage);
9014 return de::MovePtr<Command>(new SubmitCommandBuffer(commands));
9018 for (size_t commandNdx = 0; commandNdx < commands.size(); commandNdx++)
9019 delete commands[commandNdx];
9025 void createCommands (vector<Command*>& commands,
9027 const Memory& memory,
9029 vk::VkSharingMode sharingMode,
9032 State state (usage, seed);
9033 // Used to select next operation only
9034 de::Random nextOpRng (seed ^ 12930809);
9036 commands.reserve(opCount);
9038 for (size_t opNdx = 0; opNdx < opCount; opNdx++)
9042 getAvailableOps(state, memory.getSupportBuffers(), memory.getSupportImages(), usage, ops);
9044 DE_ASSERT(!ops.empty());
9047 const Op op = nextOpRng.choose<Op>(ops.begin(), ops.end());
9049 if (op == OP_COMMAND_BUFFER_BEGIN)
9051 applyOp(state, memory, op, usage);
9052 commands.push_back(createCmdCommands(memory, nextOpRng, state, usage, opNdx, opCount).release());
9056 de::Random rng (state.rng);
9058 commands.push_back(createHostCommand(op, rng, usage, sharingMode).release());
9059 applyOp(state, memory, op, usage);
9061 // Make sure that random generator is in sync
9062 DE_ASSERT(state.rng == rng);
9067 // Clean up resources
9068 if (state.hasBuffer && state.hasImage)
9070 if (!state.queueIdle)
9071 commands.push_back(new QueueWaitIdle());
9073 if (state.hasBuffer)
9074 commands.push_back(new DestroyBuffer());
9077 commands.push_back(new DestroyImage());
9081 class MemoryTestInstance : public TestInstance
9085 typedef bool(MemoryTestInstance::*StageFunc)(void);
9087 MemoryTestInstance (::vkt::Context& context, const TestConfig& config);
9088 ~MemoryTestInstance (void);
9090 tcu::TestStatus iterate (void);
9093 const TestConfig m_config;
9094 const size_t m_iterationCount;
9095 const size_t m_opCount;
9096 const vk::VkPhysicalDeviceMemoryProperties m_memoryProperties;
9097 deUint32 m_memoryTypeNdx;
9100 tcu::ResultCollector m_resultCollector;
9102 vector<Command*> m_commands;
9103 MovePtr<Memory> m_memory;
9104 MovePtr<Context> m_renderContext;
9105 MovePtr<PrepareContext> m_prepareContext;
9107 bool nextIteration (void);
9108 bool nextMemoryType (void);
9110 bool createCommandsAndAllocateMemory (void);
9111 bool prepare (void);
9112 bool execute (void);
9114 void resetResources (void);
9117 void MemoryTestInstance::resetResources (void)
9119 const vk::DeviceInterface& vkd = m_context.getDeviceInterface();
9120 const vk::VkDevice device = m_context.getDevice();
9122 VK_CHECK(vkd.deviceWaitIdle(device));
9124 for (size_t commandNdx = 0; commandNdx < m_commands.size(); commandNdx++)
9126 delete m_commands[commandNdx];
9127 m_commands[commandNdx] = DE_NULL;
9131 m_prepareContext.clear();
9135 bool MemoryTestInstance::nextIteration (void)
9139 if (m_iteration < m_iterationCount)
9142 m_stage = &MemoryTestInstance::createCommandsAndAllocateMemory;
9146 return nextMemoryType();
9149 bool MemoryTestInstance::nextMemoryType (void)
9153 DE_ASSERT(m_commands.empty());
9157 if (m_memoryTypeNdx < m_memoryProperties.memoryTypeCount)
9160 m_stage = &MemoryTestInstance::createCommandsAndAllocateMemory;
9171 MemoryTestInstance::MemoryTestInstance (::vkt::Context& context, const TestConfig& config)
9172 : TestInstance (context)
9174 , m_iterationCount (5)
9176 , m_memoryProperties (vk::getPhysicalDeviceMemoryProperties(context.getInstanceInterface(), context.getPhysicalDevice()))
9177 , m_memoryTypeNdx (0)
9179 , m_stage (&MemoryTestInstance::createCommandsAndAllocateMemory)
9180 , m_resultCollector (context.getTestContext().getLog())
9182 , m_memory (DE_NULL)
9184 TestLog& log = context.getTestContext().getLog();
9186 const tcu::ScopedLogSection section (log, "TestCaseInfo", "Test Case Info");
9188 log << TestLog::Message << "Buffer size: " << config.size << TestLog::EndMessage;
9189 log << TestLog::Message << "Sharing: " << config.sharing << TestLog::EndMessage;
9190 log << TestLog::Message << "Access: " << config.usage << TestLog::EndMessage;
9194 const tcu::ScopedLogSection section (log, "MemoryProperties", "Memory Properties");
9196 for (deUint32 heapNdx = 0; heapNdx < m_memoryProperties.memoryHeapCount; heapNdx++)
9198 const tcu::ScopedLogSection heapSection (log, "Heap" + de::toString(heapNdx), "Heap " + de::toString(heapNdx));
9200 log << TestLog::Message << "Size: " << m_memoryProperties.memoryHeaps[heapNdx].size << TestLog::EndMessage;
9201 log << TestLog::Message << "Flags: " << m_memoryProperties.memoryHeaps[heapNdx].flags << TestLog::EndMessage;
9204 for (deUint32 memoryTypeNdx = 0; memoryTypeNdx < m_memoryProperties.memoryTypeCount; memoryTypeNdx++)
9206 const tcu::ScopedLogSection memoryTypeSection (log, "MemoryType" + de::toString(memoryTypeNdx), "Memory type " + de::toString(memoryTypeNdx));
9208 log << TestLog::Message << "Properties: " << m_memoryProperties.memoryTypes[memoryTypeNdx].propertyFlags << TestLog::EndMessage;
9209 log << TestLog::Message << "Heap: " << m_memoryProperties.memoryTypes[memoryTypeNdx].heapIndex << TestLog::EndMessage;
9214 const vk::InstanceInterface& vki = context.getInstanceInterface();
9215 const vk::VkPhysicalDevice physicalDevice = context.getPhysicalDevice();
9216 const vk::DeviceInterface& vkd = context.getDeviceInterface();
9217 const vk::VkDevice device = context.getDevice();
9218 const vk::VkQueue queue = context.getUniversalQueue();
9219 const deUint32 queueFamilyIndex = context.getUniversalQueueFamilyIndex();
9220 vector<pair<deUint32, vk::VkQueue> > queues;
9222 queues.push_back(std::make_pair(queueFamilyIndex, queue));
9224 m_renderContext = MovePtr<Context>(new Context(vki, vkd, physicalDevice, device, queue, queueFamilyIndex, queues, context.getBinaryCollection()));
9228 MemoryTestInstance::~MemoryTestInstance (void)
9233 bool MemoryTestInstance::createCommandsAndAllocateMemory (void)
9235 const vk::VkDevice device = m_context.getDevice();
9236 TestLog& log = m_context.getTestContext().getLog();
9237 const vk::InstanceInterface& vki = m_context.getInstanceInterface();
9238 const vk::VkPhysicalDevice physicalDevice = m_context.getPhysicalDevice();
9239 const vk::DeviceInterface& vkd = m_context.getDeviceInterface();
9240 const vk::VkPhysicalDeviceMemoryProperties memoryProperties = vk::getPhysicalDeviceMemoryProperties(vki, physicalDevice);
9241 const tcu::ScopedLogSection section (log, "MemoryType" + de::toString(m_memoryTypeNdx) + "CreateCommands" + de::toString(m_iteration),
9242 "Memory type " + de::toString(m_memoryTypeNdx) + " create commands iteration " + de::toString(m_iteration));
9243 const vector<deUint32>& queues = m_renderContext->getQueueFamilies();
9245 DE_ASSERT(m_commands.empty());
9247 if (m_config.usage & (USAGE_HOST_READ | USAGE_HOST_WRITE)
9248 && !(memoryProperties.memoryTypes[m_memoryTypeNdx].propertyFlags & vk::VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT))
9250 log << TestLog::Message << "Memory type not supported" << TestLog::EndMessage;
9252 return nextMemoryType();
9258 const vk::VkBufferUsageFlags bufferUsage = usageToBufferUsageFlags(m_config.usage);
9259 const vk::VkImageUsageFlags imageUsage = usageToImageUsageFlags(m_config.usage);
9260 const vk::VkDeviceSize maxBufferSize = bufferUsage != 0
9261 ? roundBufferSizeToWxHx4(findMaxBufferSize(vkd, device, bufferUsage, m_config.sharing, queues, m_config.size, m_memoryTypeNdx))
9263 const IVec2 maxImageSize = imageUsage != 0
9264 ? findMaxRGBA8ImageSize(vkd, device, imageUsage, m_config.sharing, queues, m_config.size, m_memoryTypeNdx)
9267 log << TestLog::Message << "Max buffer size: " << maxBufferSize << TestLog::EndMessage;
9268 log << TestLog::Message << "Max RGBA8 image size: " << maxImageSize << TestLog::EndMessage;
9270 // Skip tests if there are no supported operations
9271 if (maxBufferSize == 0
9272 && maxImageSize[0] == 0
9273 && (m_config.usage & (USAGE_HOST_READ|USAGE_HOST_WRITE)) == 0)
9275 log << TestLog::Message << "Skipping memory type. None of the usages are supported." << TestLog::EndMessage;
9277 return nextMemoryType();
9281 const deUint32 seed = 2830980989u ^ deUint32Hash((deUint32)(m_iteration) * m_memoryProperties.memoryTypeCount + m_memoryTypeNdx);
9283 m_memory = MovePtr<Memory>(new Memory(vki, vkd, physicalDevice, device, m_config.size, m_memoryTypeNdx, maxBufferSize, maxImageSize[0], maxImageSize[1]));
9285 log << TestLog::Message << "Create commands" << TestLog::EndMessage;
9286 createCommands(m_commands, seed, *m_memory, m_config.usage, m_config.sharing, m_opCount);
9288 m_stage = &MemoryTestInstance::prepare;
9292 catch (const tcu::TestError& e)
9294 m_resultCollector.fail("Failed, got exception: " + string(e.getMessage()));
9295 return nextMemoryType();
9300 bool MemoryTestInstance::prepare (void)
9302 TestLog& log = m_context.getTestContext().getLog();
9303 const tcu::ScopedLogSection section (log, "MemoryType" + de::toString(m_memoryTypeNdx) + "Prepare" + de::toString(m_iteration),
9304 "Memory type " + de::toString(m_memoryTypeNdx) + " prepare iteration" + de::toString(m_iteration));
9306 m_prepareContext = MovePtr<PrepareContext>(new PrepareContext(*m_renderContext, *m_memory));
9308 DE_ASSERT(!m_commands.empty());
9310 for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
9312 Command& command = *m_commands[cmdNdx];
9316 command.prepare(*m_prepareContext);
9318 catch (const tcu::TestError& e)
9320 m_resultCollector.fail(de::toString(cmdNdx) + ":" + command.getName() + " failed to prepare, got exception: " + string(e.getMessage()));
9321 return nextMemoryType();
9325 m_stage = &MemoryTestInstance::execute;
9329 bool MemoryTestInstance::execute (void)
9331 TestLog& log = m_context.getTestContext().getLog();
9332 const tcu::ScopedLogSection section (log, "MemoryType" + de::toString(m_memoryTypeNdx) + "Execute" + de::toString(m_iteration),
9333 "Memory type " + de::toString(m_memoryTypeNdx) + " execute iteration " + de::toString(m_iteration));
9334 ExecuteContext executeContext (*m_renderContext);
9335 const vk::VkDevice device = m_context.getDevice();
9336 const vk::DeviceInterface& vkd = m_context.getDeviceInterface();
9338 DE_ASSERT(!m_commands.empty());
9340 for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
9342 Command& command = *m_commands[cmdNdx];
9346 command.execute(executeContext);
9348 catch (const tcu::TestError& e)
9350 m_resultCollector.fail(de::toString(cmdNdx) + ":" + command.getName() + " failed to execute, got exception: " + string(e.getMessage()));
9351 return nextIteration();
9355 VK_CHECK(vkd.deviceWaitIdle(device));
9357 m_stage = &MemoryTestInstance::verify;
9361 bool MemoryTestInstance::verify (void)
9363 DE_ASSERT(!m_commands.empty());
9365 TestLog& log = m_context.getTestContext().getLog();
9366 const tcu::ScopedLogSection section (log, "MemoryType" + de::toString(m_memoryTypeNdx) + "Verify" + de::toString(m_iteration),
9367 "Memory type " + de::toString(m_memoryTypeNdx) + " verify iteration " + de::toString(m_iteration));
9368 VerifyContext verifyContext (log, m_resultCollector, *m_renderContext, m_config.size);
9370 log << TestLog::Message << "Begin verify" << TestLog::EndMessage;
9372 for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
9374 Command& command = *m_commands[cmdNdx];
9378 command.verify(verifyContext, cmdNdx);
9380 catch (const tcu::TestError& e)
9382 m_resultCollector.fail(de::toString(cmdNdx) + ":" + command.getName() + " failed to verify, got exception: " + string(e.getMessage()));
9383 return nextIteration();
9387 return nextIteration();
9390 tcu::TestStatus MemoryTestInstance::iterate (void)
9392 if ((this->*m_stage)())
9393 return tcu::TestStatus::incomplete();
9395 return tcu::TestStatus(m_resultCollector.getResult(), m_resultCollector.getMessage());
9400 void init (vk::SourceCollections& sources, TestConfig config) const
9402 // Vertex buffer rendering
9403 if (config.usage & USAGE_VERTEX_BUFFER)
9405 const char* const vertexShader =
9407 "layout(location = 0) in highp vec2 a_position;\n"
9408 "void main (void) {\n"
9409 "\tgl_PointSize = 1.0;\n"
9410 "\tgl_Position = vec4(1.998 * a_position - vec2(0.999), 0.0, 1.0);\n"
9413 sources.glslSources.add("vertex-buffer.vert")
9414 << glu::VertexSource(vertexShader);
9417 // Index buffer rendering
9418 if (config.usage & USAGE_INDEX_BUFFER)
9420 const char* const vertexShader =
9422 "precision highp float;\n"
9423 "void main (void) {\n"
9424 "\tgl_PointSize = 1.0;\n"
9425 "\thighp vec2 pos = vec2(gl_VertexIndex % 256, gl_VertexIndex / 256) / vec2(255.0);\n"
9426 "\tgl_Position = vec4(1.998 * pos - vec2(0.999), 0.0, 1.0);\n"
9429 sources.glslSources.add("index-buffer.vert")
9430 << glu::VertexSource(vertexShader);
9433 if (config.usage & USAGE_UNIFORM_BUFFER)
9436 std::ostringstream vertexShader;
9440 "precision highp float;\n"
9441 "layout(set=0, binding=0) uniform Block\n"
9443 "\thighp uvec4 values[" << de::toString<size_t>(MAX_UNIFORM_BUFFER_SIZE / (sizeof(deUint32) * 4)) << "];\n"
9445 "void main (void) {\n"
9446 "\tgl_PointSize = 1.0;\n"
9447 "\thighp uvec4 vecVal = block.values[gl_VertexIndex / 8];\n"
9448 "\thighp uint val;\n"
9449 "\tif (((gl_VertexIndex / 2) % 4 == 0))\n"
9450 "\t\tval = vecVal.x;\n"
9451 "\telse if (((gl_VertexIndex / 2) % 4 == 1))\n"
9452 "\t\tval = vecVal.y;\n"
9453 "\telse if (((gl_VertexIndex / 2) % 4 == 2))\n"
9454 "\t\tval = vecVal.z;\n"
9455 "\telse if (((gl_VertexIndex / 2) % 4 == 3))\n"
9456 "\t\tval = vecVal.w;\n"
9457 "\tif ((gl_VertexIndex % 2) == 0)\n"
9458 "\t\tval = val & 0xFFFFu;\n"
9460 "\t\tval = val >> 16u;\n"
9461 "\thighp vec2 pos = vec2(val & 0xFFu, val >> 8u) / vec2(255.0);\n"
9462 "\tgl_Position = vec4(1.998 * pos - vec2(0.999), 0.0, 1.0);\n"
9465 sources.glslSources.add("uniform-buffer.vert")
9466 << glu::VertexSource(vertexShader.str());
9470 const size_t arraySize = MAX_UNIFORM_BUFFER_SIZE / (sizeof(deUint32) * 4);
9471 const size_t arrayIntSize = arraySize * 4;
9472 std::ostringstream fragmentShader;
9476 "precision highp float;\n"
9477 "precision highp int;\n"
9478 "layout(location = 0) out highp vec4 o_color;\n"
9479 "layout(set=0, binding=0) uniform Block\n"
9481 "\thighp uvec4 values[" << arraySize << "];\n"
9483 "layout(push_constant) uniform PushC\n"
9486 "\tuint valuesPerPixel;\n"
9488 "void main (void) {\n"
9489 "\thighp uint id = pushC.callId * (" << arrayIntSize << "u / pushC.valuesPerPixel) + uint(gl_FragCoord.y) * 256u + uint(gl_FragCoord.x);\n"
9490 "\tif (uint(gl_FragCoord.y) * 256u + uint(gl_FragCoord.x) < pushC.callId * (" << arrayIntSize << "u / pushC.valuesPerPixel))\n"
9492 "\thighp uint value = id;\n"
9493 "\tfor (uint i = 0u; i < pushC.valuesPerPixel; i++)\n"
9495 "\t\thighp uvec4 vecVal = block.values[(value / 4u) % " << arraySize << "u];\n"
9496 "\t\tif ((value % 4u) == 0u)\n"
9497 "\t\t\tvalue = vecVal.x;\n"
9498 "\t\telse if ((value % 4u) == 1u)\n"
9499 "\t\t\tvalue = vecVal.y;\n"
9500 "\t\telse if ((value % 4u) == 2u)\n"
9501 "\t\t\tvalue = vecVal.z;\n"
9502 "\t\telse if ((value % 4u) == 3u)\n"
9503 "\t\t\tvalue = vecVal.w;\n"
9505 "\tuvec4 valueOut = uvec4(value & 0xFFu, (value >> 8u) & 0xFFu, (value >> 16u) & 0xFFu, (value >> 24u) & 0xFFu);\n"
9506 "\to_color = vec4(valueOut) / vec4(255.0);\n"
9509 sources.glslSources.add("uniform-buffer.frag")
9510 << glu::FragmentSource(fragmentShader.str());
9514 if (config.usage & USAGE_STORAGE_BUFFER)
9517 // Vertex storage buffer rendering
9518 const char* const vertexShader =
9520 "precision highp float;\n"
9521 "layout(set=0, binding=0) buffer Block\n"
9523 "\thighp uvec4 values[];\n"
9525 "void main (void) {\n"
9526 "\tgl_PointSize = 1.0;\n"
9527 "\thighp uvec4 vecVal = block.values[gl_VertexIndex / 8];\n"
9528 "\thighp uint val;\n"
9529 "\tif (((gl_VertexIndex / 2) % 4 == 0))\n"
9530 "\t\tval = vecVal.x;\n"
9531 "\telse if (((gl_VertexIndex / 2) % 4 == 1))\n"
9532 "\t\tval = vecVal.y;\n"
9533 "\telse if (((gl_VertexIndex / 2) % 4 == 2))\n"
9534 "\t\tval = vecVal.z;\n"
9535 "\telse if (((gl_VertexIndex / 2) % 4 == 3))\n"
9536 "\t\tval = vecVal.w;\n"
9537 "\tif ((gl_VertexIndex % 2) == 0)\n"
9538 "\t\tval = val & 0xFFFFu;\n"
9540 "\t\tval = val >> 16u;\n"
9541 "\thighp vec2 pos = vec2(val & 0xFFu, val >> 8u) / vec2(255.0);\n"
9542 "\tgl_Position = vec4(1.998 * pos - vec2(0.999), 0.0, 1.0);\n"
9545 sources.glslSources.add("storage-buffer.vert")
9546 << glu::VertexSource(vertexShader);
9550 std::ostringstream fragmentShader;
9554 "precision highp float;\n"
9555 "precision highp int;\n"
9556 "layout(location = 0) out highp vec4 o_color;\n"
9557 "layout(set=0, binding=0) buffer Block\n"
9559 "\thighp uvec4 values[];\n"
9561 "layout(push_constant) uniform PushC\n"
9563 "\tuint valuesPerPixel;\n"
9564 "\tuint bufferSize;\n"
9566 "void main (void) {\n"
9567 "\thighp uint arrayIntSize = pushC.bufferSize / 4u;\n"
9568 "\thighp uint id = uint(gl_FragCoord.y) * 256u + uint(gl_FragCoord.x);\n"
9569 "\thighp uint value = id;\n"
9570 "\tfor (uint i = 0u; i < pushC.valuesPerPixel; i++)\n"
9572 "\t\thighp uvec4 vecVal = block.values[(value / 4u) % (arrayIntSize / 4u)];\n"
9573 "\t\tif ((value % 4u) == 0u)\n"
9574 "\t\t\tvalue = vecVal.x;\n"
9575 "\t\telse if ((value % 4u) == 1u)\n"
9576 "\t\t\tvalue = vecVal.y;\n"
9577 "\t\telse if ((value % 4u) == 2u)\n"
9578 "\t\t\tvalue = vecVal.z;\n"
9579 "\t\telse if ((value % 4u) == 3u)\n"
9580 "\t\t\tvalue = vecVal.w;\n"
9582 "\tuvec4 valueOut = uvec4(value & 0xFFu, (value >> 8u) & 0xFFu, (value >> 16u) & 0xFFu, (value >> 24u) & 0xFFu);\n"
9583 "\to_color = vec4(valueOut) / vec4(255.0);\n"
9586 sources.glslSources.add("storage-buffer.frag")
9587 << glu::FragmentSource(fragmentShader.str());
9591 if (config.usage & USAGE_UNIFORM_TEXEL_BUFFER)
9594 // Vertex uniform texel buffer rendering
9595 const char* const vertexShader =
9597 "#extension GL_EXT_texture_buffer : require\n"
9598 "precision highp float;\n"
9599 "layout(set=0, binding=0) uniform highp usamplerBuffer u_sampler;\n"
9600 "void main (void) {\n"
9601 "\tgl_PointSize = 1.0;\n"
9602 "\thighp uint val = texelFetch(u_sampler, gl_VertexIndex).x;\n"
9603 "\thighp vec2 pos = vec2(val & 0xFFu, val >> 8u) / vec2(255.0);\n"
9604 "\tgl_Position = vec4(1.998 * pos - vec2(0.999), 0.0, 1.0);\n"
9607 sources.glslSources.add("uniform-texel-buffer.vert")
9608 << glu::VertexSource(vertexShader);
9612 // Fragment uniform texel buffer rendering
9613 const char* const fragmentShader =
9615 "#extension GL_EXT_texture_buffer : require\n"
9616 "precision highp float;\n"
9617 "precision highp int;\n"
9618 "layout(set=0, binding=0) uniform highp usamplerBuffer u_sampler;\n"
9619 "layout(location = 0) out highp vec4 o_color;\n"
9620 "layout(push_constant) uniform PushC\n"
9623 "\tuint valuesPerPixel;\n"
9624 "\tuint maxTexelCount;\n"
9626 "void main (void) {\n"
9627 "\thighp uint id = uint(gl_FragCoord.y) * 256u + uint(gl_FragCoord.x);\n"
9628 "\thighp uint value = id;\n"
9629 "\tif (uint(gl_FragCoord.y) * 256u + uint(gl_FragCoord.x) < pushC.callId * (pushC.maxTexelCount / pushC.valuesPerPixel))\n"
9631 "\tfor (uint i = 0u; i < pushC.valuesPerPixel; i++)\n"
9633 "\t\tvalue = texelFetch(u_sampler, int(value % uint(textureSize(u_sampler)))).x;\n"
9635 "\tuvec4 valueOut = uvec4(value & 0xFFu, (value >> 8u) & 0xFFu, (value >> 16u) & 0xFFu, (value >> 24u) & 0xFFu);\n"
9636 "\to_color = vec4(valueOut) / vec4(255.0);\n"
9639 sources.glslSources.add("uniform-texel-buffer.frag")
9640 << glu::FragmentSource(fragmentShader);
9644 if (config.usage & USAGE_STORAGE_TEXEL_BUFFER)
9647 // Vertex storage texel buffer rendering
9648 const char* const vertexShader =
9650 "#extension GL_EXT_texture_buffer : require\n"
9651 "precision highp float;\n"
9652 "layout(set=0, binding=0, r32ui) uniform readonly highp uimageBuffer u_sampler;\n"
9653 "out gl_PerVertex {\n"
9654 "\tvec4 gl_Position;\n"
9655 "\tfloat gl_PointSize;\n"
9657 "void main (void) {\n"
9658 "\tgl_PointSize = 1.0;\n"
9659 "\thighp uint val = imageLoad(u_sampler, gl_VertexIndex / 2).x;\n"
9660 "\tif (gl_VertexIndex % 2 == 0)\n"
9661 "\t\tval = val & 0xFFFFu;\n"
9663 "\t\tval = val >> 16;\n"
9664 "\thighp vec2 pos = vec2(val & 0xFFu, val >> 8u) / vec2(255.0);\n"
9665 "\tgl_Position = vec4(1.998 * pos - vec2(0.999), 0.0, 1.0);\n"
9668 sources.glslSources.add("storage-texel-buffer.vert")
9669 << glu::VertexSource(vertexShader);
9672 // Fragment storage texel buffer rendering
9673 const char* const fragmentShader =
9675 "#extension GL_EXT_texture_buffer : require\n"
9676 "precision highp float;\n"
9677 "precision highp int;\n"
9678 "layout(set=0, binding=0, r32ui) uniform readonly highp uimageBuffer u_sampler;\n"
9679 "layout(location = 0) out highp vec4 o_color;\n"
9680 "layout(push_constant) uniform PushC\n"
9683 "\tuint valuesPerPixel;\n"
9684 "\tuint maxTexelCount;\n"
9687 "void main (void) {\n"
9688 "\thighp uint id = uint(gl_FragCoord.y) * 256u + uint(gl_FragCoord.x);\n"
9689 "\thighp uint value = id;\n"
9690 "\tif (uint(gl_FragCoord.y) * 256u + uint(gl_FragCoord.x) < pushC.callId * (pushC.maxTexelCount / pushC.valuesPerPixel))\n"
9692 "\tfor (uint i = 0u; i < pushC.valuesPerPixel; i++)\n"
9694 "\t\tvalue = imageLoad(u_sampler, int(value % pushC.width)).x;\n"
9696 "\tuvec4 valueOut = uvec4(value & 0xFFu, (value >> 8u) & 0xFFu, (value >> 16u) & 0xFFu, (value >> 24u) & 0xFFu);\n"
9697 "\to_color = vec4(valueOut) / vec4(255.0);\n"
9700 sources.glslSources.add("storage-texel-buffer.frag")
9701 << glu::FragmentSource(fragmentShader);
9705 if (config.usage & USAGE_STORAGE_IMAGE)
9708 // Vertex storage image
9709 const char* const vertexShader =
9711 "precision highp float;\n"
9712 "layout(set=0, binding=0, rgba8) uniform image2D u_image;\n"
9713 "out gl_PerVertex {\n"
9714 "\tvec4 gl_Position;\n"
9715 "\tfloat gl_PointSize;\n"
9717 "void main (void) {\n"
9718 "\tgl_PointSize = 1.0;\n"
9719 "\thighp vec4 val = imageLoad(u_image, ivec2((gl_VertexIndex / 2) / imageSize(u_image).x, (gl_VertexIndex / 2) % imageSize(u_image).x));\n"
9720 "\thighp vec2 pos;\n"
9721 "\tif (gl_VertexIndex % 2 == 0)\n"
9722 "\t\tpos = val.xy;\n"
9724 "\t\tpos = val.zw;\n"
9725 "\tgl_Position = vec4(1.998 * pos - vec2(0.999), 0.0, 1.0);\n"
9728 sources.glslSources.add("storage-image.vert")
9729 << glu::VertexSource(vertexShader);
9732 // Fragment storage image
9733 const char* const fragmentShader =
9735 "#extension GL_EXT_texture_buffer : require\n"
9736 "precision highp float;\n"
9737 "layout(set=0, binding=0, rgba8) uniform image2D u_image;\n"
9738 "layout(location = 0) out highp vec4 o_color;\n"
9739 "void main (void) {\n"
9740 "\thighp uvec2 size = uvec2(imageSize(u_image).x, imageSize(u_image).y);\n"
9741 "\thighp uint valuesPerPixel = max(1u, (size.x * size.y) / (256u * 256u));\n"
9742 "\thighp uvec4 value = uvec4(uint(gl_FragCoord.x), uint(gl_FragCoord.y), 0u, 0u);\n"
9743 "\tfor (uint i = 0u; i < valuesPerPixel; i++)\n"
9745 "\t\thighp vec4 floatValue = imageLoad(u_image, ivec2(int((value.z * 256u + (value.x ^ value.z)) % size.x), int((value.w * 256u + (value.y ^ value.w)) % size.y)));\n"
9746 "\t\tvalue = uvec4(uint(floatValue.x * 255.0), uint(floatValue.y * 255.0), uint(floatValue.z * 255.0), uint(floatValue.w * 255.0));\n"
9748 "\to_color = vec4(value) / vec4(255.0);\n"
9751 sources.glslSources.add("storage-image.frag")
9752 << glu::FragmentSource(fragmentShader);
9756 if (config.usage & USAGE_SAMPLED_IMAGE)
9759 // Vertex storage image
9760 const char* const vertexShader =
9762 "precision highp float;\n"
9763 "layout(set=0, binding=0) uniform sampler2D u_sampler;\n"
9764 "out gl_PerVertex {\n"
9765 "\tvec4 gl_Position;\n"
9766 "\tfloat gl_PointSize;\n"
9768 "void main (void) {\n"
9769 "\tgl_PointSize = 1.0;\n"
9770 "\thighp vec4 val = texelFetch(u_sampler, ivec2((gl_VertexIndex / 2) / textureSize(u_sampler, 0).x, (gl_VertexIndex / 2) % textureSize(u_sampler, 0).x), 0);\n"
9771 "\thighp vec2 pos;\n"
9772 "\tif (gl_VertexIndex % 2 == 0)\n"
9773 "\t\tpos = val.xy;\n"
9775 "\t\tpos = val.zw;\n"
9776 "\tgl_Position = vec4(1.998 * pos - vec2(0.999), 0.0, 1.0);\n"
9779 sources.glslSources.add("sampled-image.vert")
9780 << glu::VertexSource(vertexShader);
9783 // Fragment storage image
9784 const char* const fragmentShader =
9786 "#extension GL_EXT_texture_buffer : require\n"
9787 "precision highp float;\n"
9788 "layout(set=0, binding=0) uniform sampler2D u_sampler;\n"
9789 "layout(location = 0) out highp vec4 o_color;\n"
9790 "void main (void) {\n"
9791 "\thighp uvec2 size = uvec2(textureSize(u_sampler, 0).x, textureSize(u_sampler, 0).y);\n"
9792 "\thighp uint valuesPerPixel = max(1u, (size.x * size.y) / (256u * 256u));\n"
9793 "\thighp uvec4 value = uvec4(uint(gl_FragCoord.x), uint(gl_FragCoord.y), 0u, 0u);\n"
9794 "\tfor (uint i = 0u; i < valuesPerPixel; i++)\n"
9796 "\t\thighp vec4 floatValue = texelFetch(u_sampler, ivec2(int((value.z * 256u + (value.x ^ value.z)) % size.x), int((value.w * 256u + (value.y ^ value.w)) % size.y)), 0);\n"
9797 "\t\tvalue = uvec4(uint(floatValue.x * 255.0), uint(floatValue.y * 255.0), uint(floatValue.z * 255.0), uint(floatValue.w * 255.0));\n"
9799 "\to_color = vec4(value) / vec4(255.0);\n"
9802 sources.glslSources.add("sampled-image.frag")
9803 << glu::FragmentSource(fragmentShader);
9808 const char* const vertexShader =
9810 "out gl_PerVertex {\n"
9811 "\tvec4 gl_Position;\n"
9813 "precision highp float;\n"
9814 "void main (void) {\n"
9815 "\tgl_Position = vec4(((gl_VertexIndex + 2) / 3) % 2 == 0 ? -1.0 : 1.0,\n"
9816 "\t ((gl_VertexIndex + 1) / 3) % 2 == 0 ? -1.0 : 1.0, 0.0, 1.0);\n"
9819 sources.glslSources.add("render-quad.vert")
9820 << glu::VertexSource(vertexShader);
9824 const char* const fragmentShader =
9826 "layout(location = 0) out highp vec4 o_color;\n"
9827 "void main (void) {\n"
9828 "\to_color = vec4(1.0);\n"
9831 sources.glslSources.add("render-white.frag")
9832 << glu::FragmentSource(fragmentShader);
9839 tcu::TestCaseGroup* createPipelineBarrierTests (tcu::TestContext& testCtx)
9841 de::MovePtr<tcu::TestCaseGroup> group (new tcu::TestCaseGroup(testCtx, "pipeline_barrier", "Pipeline barrier tests."));
9842 const vk::VkDeviceSize sizes[] =
9849 const Usage usages[] =
9855 USAGE_VERTEX_BUFFER,
9857 USAGE_UNIFORM_BUFFER,
9858 USAGE_UNIFORM_TEXEL_BUFFER,
9859 USAGE_STORAGE_BUFFER,
9860 USAGE_STORAGE_TEXEL_BUFFER,
9861 USAGE_STORAGE_IMAGE,
9864 const Usage readUsages[] =
9868 USAGE_VERTEX_BUFFER,
9870 USAGE_UNIFORM_BUFFER,
9871 USAGE_UNIFORM_TEXEL_BUFFER,
9872 USAGE_STORAGE_BUFFER,
9873 USAGE_STORAGE_TEXEL_BUFFER,
9874 USAGE_STORAGE_IMAGE,
9878 const Usage writeUsages[] =
9884 for (size_t writeUsageNdx = 0; writeUsageNdx < DE_LENGTH_OF_ARRAY(writeUsages); writeUsageNdx++)
9886 const Usage writeUsage = writeUsages[writeUsageNdx];
9888 for (size_t readUsageNdx = 0; readUsageNdx < DE_LENGTH_OF_ARRAY(readUsages); readUsageNdx++)
9890 const Usage readUsage = readUsages[readUsageNdx];
9891 const Usage usage = writeUsage | readUsage;
9892 const string usageGroupName (usageToName(usage));
9893 de::MovePtr<tcu::TestCaseGroup> usageGroup (new tcu::TestCaseGroup(testCtx, usageGroupName.c_str(), usageGroupName.c_str()));
9895 for (size_t sizeNdx = 0; sizeNdx < DE_LENGTH_OF_ARRAY(sizes); sizeNdx++)
9897 const vk::VkDeviceSize size = sizes[sizeNdx];
9898 const string testName (de::toString((deUint64)(size)));
9899 const TestConfig config =
9903 vk::VK_SHARING_MODE_EXCLUSIVE
9906 usageGroup->addChild(new InstanceFactory1<MemoryTestInstance, TestConfig, AddPrograms>(testCtx,tcu::NODETYPE_SELF_VALIDATE, testName, testName, AddPrograms(), config));
9909 group->addChild(usageGroup.get());
9910 usageGroup.release();
9915 Usage all = (Usage)0;
9917 for (size_t usageNdx = 0; usageNdx < DE_LENGTH_OF_ARRAY(usages); usageNdx++)
9918 all = all | usages[usageNdx];
9921 const string usageGroupName ("all");
9922 de::MovePtr<tcu::TestCaseGroup> usageGroup (new tcu::TestCaseGroup(testCtx, usageGroupName.c_str(), usageGroupName.c_str()));
9924 for (size_t sizeNdx = 0; sizeNdx < DE_LENGTH_OF_ARRAY(sizes); sizeNdx++)
9926 const vk::VkDeviceSize size = sizes[sizeNdx];
9927 const string testName (de::toString((deUint64)(size)));
9928 const TestConfig config =
9932 vk::VK_SHARING_MODE_EXCLUSIVE
9935 usageGroup->addChild(new InstanceFactory1<MemoryTestInstance, TestConfig, AddPrograms>(testCtx,tcu::NODETYPE_SELF_VALIDATE, testName, testName, AddPrograms(), config));
9938 group->addChild(usageGroup.get());
9939 usageGroup.release();
9943 const string usageGroupName ("all_device");
9944 de::MovePtr<tcu::TestCaseGroup> usageGroup (new tcu::TestCaseGroup(testCtx, usageGroupName.c_str(), usageGroupName.c_str()));
9946 for (size_t sizeNdx = 0; sizeNdx < DE_LENGTH_OF_ARRAY(sizes); sizeNdx++)
9948 const vk::VkDeviceSize size = sizes[sizeNdx];
9949 const string testName (de::toString((deUint64)(size)));
9950 const TestConfig config =
9952 (Usage)(all & (~(USAGE_HOST_READ|USAGE_HOST_WRITE))),
9954 vk::VK_SHARING_MODE_EXCLUSIVE
9957 usageGroup->addChild(new InstanceFactory1<MemoryTestInstance, TestConfig, AddPrograms>(testCtx,tcu::NODETYPE_SELF_VALIDATE, testName, testName, AddPrograms(), config));
9960 group->addChild(usageGroup.get());
9961 usageGroup.release();
9965 return group.release();