1 /*------------------------------------------------------------------------
2 * Vulkan Conformance Tests
3 * ------------------------
5 * Copyright (c) 2015 The Khronos Group Inc.
6 * Copyright (c) 2015 Imagination Technologies Ltd.
8 * Licensed under the Apache License, Version 2.0 (the "License");
9 * you may not use this file except in compliance with the License.
10 * You may obtain a copy of the License at
12 * http://www.apache.org/licenses/LICENSE-2.0
14 * Unless required by applicable law or agreed to in writing, software
15 * distributed under the License is distributed on an "AS IS" BASIS,
16 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17 * See the License for the specific language governing permissions and
18 * limitations under the License.
22 * \brief Utilities for images.
23 *//*--------------------------------------------------------------------*/
25 #include "vktPipelineImageUtil.hpp"
26 #include "vkImageUtil.hpp"
27 #include "vkMemUtil.hpp"
28 #include "vkQueryUtil.hpp"
29 #include "vkRefUtil.hpp"
30 #include "tcuTextureUtil.hpp"
31 #include "tcuAstcUtil.hpp"
32 #include "deRandom.hpp"
33 #include "deSharedPtr.hpp"
42 /*! Gets the next multiple of a given divisor */
43 static deUint32 getNextMultiple (deUint32 divisor, deUint32 value)
45 if (value % divisor == 0)
49 return value + divisor - (value % divisor);
52 /*! Gets the next value that is multiple of all given divisors */
53 static deUint32 getNextMultiple (const std::vector<deUint32>& divisors, deUint32 value)
55 deUint32 nextMultiple = value;
56 bool nextMultipleFound = false;
60 nextMultipleFound = true;
62 for (size_t divNdx = 0; divNdx < divisors.size(); divNdx++)
63 nextMultipleFound = nextMultipleFound && (nextMultiple % divisors[divNdx] == 0);
65 if (nextMultipleFound)
68 DE_ASSERT(nextMultiple < ~((deUint32)0u));
69 nextMultiple = getNextMultiple(divisors[0], nextMultiple + 1);
75 bool isSupportedSamplableFormat (const InstanceInterface& instanceInterface, VkPhysicalDevice device, VkFormat format)
77 if (isCompressedFormat(format))
79 VkPhysicalDeviceFeatures physicalFeatures;
80 const tcu::CompressedTexFormat compressedFormat = mapVkCompressedFormat(format);
82 instanceInterface.getPhysicalDeviceFeatures(device, &physicalFeatures);
84 if (tcu::isAstcFormat(compressedFormat))
86 if (!physicalFeatures.textureCompressionASTC_LDR)
89 else if (tcu::isEtcFormat(compressedFormat))
91 if (!physicalFeatures.textureCompressionETC2)
96 DE_FATAL("Unsupported compressed format");
100 VkFormatProperties formatProps;
101 instanceInterface.getPhysicalDeviceFormatProperties(device, format, &formatProps);
103 return (formatProps.optimalTilingFeatures & VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT) != 0u;
106 // \todo [2016-01-21 pyry] Update this to just rely on vkDefs.hpp once
107 // CTS has been updated to 1.0.2.
110 VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_LINEAR_BIT = 0x00001000,
113 bool isLinearFilteringSupported (const InstanceInterface& vki, VkPhysicalDevice physicalDevice, VkFormat format, VkImageTiling tiling)
115 const VkFormatProperties formatProperties = getPhysicalDeviceFormatProperties(vki, physicalDevice, format);
116 const VkFormatFeatureFlags formatFeatures = tiling == VK_IMAGE_TILING_LINEAR
117 ? formatProperties.linearTilingFeatures
118 : formatProperties.optimalTilingFeatures;
122 case VK_FORMAT_R32_SFLOAT:
123 case VK_FORMAT_R32G32_SFLOAT:
124 case VK_FORMAT_R32G32B32_SFLOAT:
125 case VK_FORMAT_R32G32B32A32_SFLOAT:
126 case VK_FORMAT_R64_SFLOAT:
127 case VK_FORMAT_R64G64_SFLOAT:
128 case VK_FORMAT_R64G64B64_SFLOAT:
129 case VK_FORMAT_R64G64B64A64_SFLOAT:
130 return (formatFeatures & VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_LINEAR_BIT) != 0;
133 // \todo [2016-01-21 pyry] Check for all formats once drivers have been updated to 1.0.2
134 // and we have tests to verify format properties.
139 VkBorderColor getFormatBorderColor (BorderColor color, VkFormat format)
141 if (!isCompressedFormat(format) && (isIntFormat(format) || isUintFormat(format)))
145 case BORDER_COLOR_OPAQUE_BLACK: return VK_BORDER_COLOR_INT_OPAQUE_BLACK;
146 case BORDER_COLOR_OPAQUE_WHITE: return VK_BORDER_COLOR_INT_OPAQUE_WHITE;
147 case BORDER_COLOR_TRANSPARENT_BLACK: return VK_BORDER_COLOR_INT_TRANSPARENT_BLACK;
156 case BORDER_COLOR_OPAQUE_BLACK: return VK_BORDER_COLOR_FLOAT_OPAQUE_BLACK;
157 case BORDER_COLOR_OPAQUE_WHITE: return VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE;
158 case BORDER_COLOR_TRANSPARENT_BLACK: return VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK;
165 return VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK;
168 void getLookupScaleBias (vk::VkFormat format, tcu::Vec4& lookupScale, tcu::Vec4& lookupBias)
170 if (!isCompressedFormat(format))
172 const tcu::TextureFormatInfo fmtInfo = tcu::getTextureFormatInfo(mapVkFormat(format));
174 // Needed to normalize various formats to 0..1 range for writing into RT
175 lookupScale = fmtInfo.lookupScale;
176 lookupBias = fmtInfo.lookupBias;
182 case VK_FORMAT_EAC_R11_SNORM_BLOCK:
183 lookupScale = tcu::Vec4(0.5f, 1.0f, 1.0f, 1.0f);
184 lookupBias = tcu::Vec4(0.5f, 0.0f, 0.0f, 0.0f);
187 case VK_FORMAT_EAC_R11G11_SNORM_BLOCK:
188 lookupScale = tcu::Vec4(0.5f, 0.5f, 1.0f, 1.0f);
189 lookupBias = tcu::Vec4(0.5f, 0.5f, 0.0f, 0.0f);
193 // else: All supported compressed formats are fine with no normalization.
194 // ASTC LDR blocks decompress to f16 so querying normalization parameters
195 // based on uncompressed formats would actually lead to massive precision loss
196 // and complete lack of coverage in case of R8G8B8A8_UNORM RT.
197 lookupScale = tcu::Vec4(1.0f);
198 lookupBias = tcu::Vec4(0.0f);
204 de::MovePtr<tcu::TextureLevel> readColorAttachment (const vk::DeviceInterface& vk,
207 deUint32 queueFamilyIndex,
208 vk::Allocator& allocator,
211 const tcu::UVec2& renderSize)
213 Move<VkBuffer> buffer;
214 de::MovePtr<Allocation> bufferAlloc;
215 Move<VkCommandPool> cmdPool;
216 Move<VkCommandBuffer> cmdBuffer;
218 const tcu::TextureFormat tcuFormat = mapVkFormat(format);
219 const VkDeviceSize pixelDataSize = renderSize.x() * renderSize.y() * tcuFormat.getPixelSize();
220 de::MovePtr<tcu::TextureLevel> resultLevel (new tcu::TextureLevel(tcuFormat, renderSize.x(), renderSize.y()));
222 // Create destination buffer
224 const VkBufferCreateInfo bufferParams =
226 VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, // VkStructureType sType;
227 DE_NULL, // const void* pNext;
228 0u, // VkBufferCreateFlags flags;
229 pixelDataSize, // VkDeviceSize size;
230 VK_BUFFER_USAGE_TRANSFER_DST_BIT, // VkBufferUsageFlags usage;
231 VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode;
232 0u, // deUint32 queueFamilyIndexCount;
233 DE_NULL // const deUint32* pQueueFamilyIndices;
236 buffer = createBuffer(vk, device, &bufferParams);
237 bufferAlloc = allocator.allocate(getBufferMemoryRequirements(vk, device, *buffer), MemoryRequirement::HostVisible);
238 VK_CHECK(vk.bindBufferMemory(device, *buffer, bufferAlloc->getMemory(), bufferAlloc->getOffset()));
241 // Create command pool and buffer
242 cmdPool = createCommandPool(vk, device, VK_COMMAND_POOL_CREATE_TRANSIENT_BIT, queueFamilyIndex);
243 cmdBuffer = allocateCommandBuffer(vk, device, *cmdPool, VK_COMMAND_BUFFER_LEVEL_PRIMARY);
246 fence = createFence(vk, device);
248 // Barriers for copying image to buffer
250 const VkImageMemoryBarrier imageBarrier =
252 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // VkStructureType sType;
253 DE_NULL, // const void* pNext;
254 VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, // VkAccessFlags srcAccessMask;
255 VK_ACCESS_TRANSFER_READ_BIT, // VkAccessFlags dstAccessMask;
256 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // VkImageLayout oldLayout;
257 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, // VkImageLayout newLayout;
258 VK_QUEUE_FAMILY_IGNORED, // deUint32 srcQueueFamilyIndex;
259 VK_QUEUE_FAMILY_IGNORED, // deUint32 dstQueueFamilyIndex;
260 image, // VkImage image;
261 { // VkImageSubresourceRange subresourceRange;
262 VK_IMAGE_ASPECT_COLOR_BIT, // VkImageAspectFlags aspectMask;
263 0u, // deUint32 baseMipLevel;
264 1u, // deUint32 mipLevels;
265 0u, // deUint32 baseArraySlice;
266 1u // deUint32 arraySize;
270 const VkBufferMemoryBarrier bufferBarrier =
272 VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER, // VkStructureType sType;
273 DE_NULL, // const void* pNext;
274 VK_ACCESS_TRANSFER_WRITE_BIT, // VkAccessFlags srcAccessMask;
275 VK_ACCESS_HOST_READ_BIT, // VkAccessFlags dstAccessMask;
276 VK_QUEUE_FAMILY_IGNORED, // deUint32 srcQueueFamilyIndex;
277 VK_QUEUE_FAMILY_IGNORED, // deUint32 dstQueueFamilyIndex;
278 *buffer, // VkBuffer buffer;
279 0u, // VkDeviceSize offset;
280 pixelDataSize // VkDeviceSize size;
283 const VkCommandBufferBeginInfo cmdBufferBeginInfo =
285 VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO, // VkStructureType sType;
286 DE_NULL, // const void* pNext;
287 VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT, // VkCommandBufferUsageFlags flags;
288 (const VkCommandBufferInheritanceInfo*)DE_NULL,
291 // Copy image to buffer
293 const VkBufferImageCopy copyRegion =
295 0u, // VkDeviceSize bufferOffset;
296 (deUint32)renderSize.x(), // deUint32 bufferRowLength;
297 (deUint32)renderSize.y(), // deUint32 bufferImageHeight;
298 { VK_IMAGE_ASPECT_COLOR_BIT, 0u, 0u, 1u }, // VkImageSubresourceLayers imageSubresource;
299 { 0, 0, 0 }, // VkOffset3D imageOffset;
300 { renderSize.x(), renderSize.y(), 1u } // VkExtent3D imageExtent;
303 VK_CHECK(vk.beginCommandBuffer(*cmdBuffer, &cmdBufferBeginInfo));
304 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 0, (const VkBufferMemoryBarrier*)DE_NULL, 1, &imageBarrier);
305 vk.cmdCopyImageToBuffer(*cmdBuffer, image, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, *buffer, 1, ©Region);
306 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_HOST_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 1, &bufferBarrier, 0, (const VkImageMemoryBarrier*)DE_NULL);
307 VK_CHECK(vk.endCommandBuffer(*cmdBuffer));
309 const VkSubmitInfo submitInfo =
311 VK_STRUCTURE_TYPE_SUBMIT_INFO, // VkStructureType sType;
312 DE_NULL, // const void* pNext;
313 0u, // deUint32 waitSemaphoreCount;
314 DE_NULL, // const VkSemaphore* pWaitSemaphores;
316 1u, // deUint32 commandBufferCount;
317 &cmdBuffer.get(), // const VkCommandBuffer* pCommandBuffers;
318 0u, // deUint32 signalSemaphoreCount;
319 DE_NULL // const VkSemaphore* pSignalSemaphores;
322 VK_CHECK(vk.queueSubmit(queue, 1, &submitInfo, *fence));
323 VK_CHECK(vk.waitForFences(device, 1, &fence.get(), 0, ~(0ull) /* infinity */));
326 invalidateMappedMemoryRange(vk, device, bufferAlloc->getMemory(), bufferAlloc->getOffset(), VK_WHOLE_SIZE);
327 tcu::copy(*resultLevel, tcu::ConstPixelBufferAccess(resultLevel->getFormat(), resultLevel->getSize(), bufferAlloc->getHostPtr()));
335 VkImageAspectFlags getImageAspectFlags (const tcu::TextureFormat textureFormat)
337 VkImageAspectFlags imageAspectFlags = 0;
339 if (tcu::hasDepthComponent(textureFormat.order))
340 imageAspectFlags |= VK_IMAGE_ASPECT_DEPTH_BIT;
342 if (tcu::hasStencilComponent(textureFormat.order))
343 imageAspectFlags |= VK_IMAGE_ASPECT_STENCIL_BIT;
345 if (imageAspectFlags == 0)
346 imageAspectFlags = VK_IMAGE_ASPECT_COLOR_BIT;
348 return imageAspectFlags;
351 VkExtent3D mipLevelExtents (const VkExtent3D& baseExtents, const deUint32 mipLevel)
355 result.width = std::max(baseExtents.width >> mipLevel, 1u);
356 result.height = std::max(baseExtents.height >> mipLevel, 1u);
357 result.depth = std::max(baseExtents.depth >> mipLevel, 1u);
362 tcu::UVec3 alignedDivide (const VkExtent3D& extent, const VkExtent3D& divisor)
366 result.x() = extent.width / divisor.width + ((extent.width % divisor.width != 0) ? 1u : 0u);
367 result.y() = extent.height / divisor.height + ((extent.height % divisor.height != 0) ? 1u : 0u);
368 result.z() = extent.depth / divisor.depth + ((extent.depth % divisor.depth != 0) ? 1u : 0u);
375 void uploadTestTextureInternal (const DeviceInterface& vk,
378 deUint32 queueFamilyIndex,
379 Allocator& allocator,
380 const TestTexture& srcTexture,
381 const TestTexture* srcStencilTexture,
382 tcu::TextureFormat format,
386 Move<VkBuffer> buffer;
387 de::MovePtr<Allocation> bufferAlloc;
388 Move<VkCommandPool> cmdPool;
389 Move<VkCommandBuffer> cmdBuffer;
391 const VkImageAspectFlags imageAspectFlags = getImageAspectFlags(format);
392 deUint32 stencilOffset = 0u;
394 // Calculate buffer size
395 bufferSize = (srcTexture.isCompressed())? srcTexture.getCompressedSize(): srcTexture.getSize();
397 // Stencil-only texture should be provided if (and only if) the image has a combined DS format
398 DE_ASSERT((tcu::hasDepthComponent(format.order) && tcu::hasStencilComponent(format.order)) == (srcStencilTexture != DE_NULL));
400 if (srcStencilTexture != DE_NULL)
402 stencilOffset = static_cast<deUint32>(deAlign32(static_cast<deInt32>(bufferSize), 4));
403 bufferSize = stencilOffset + srcStencilTexture->getSize();
406 // Create source buffer
408 const VkBufferCreateInfo bufferParams =
410 VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, // VkStructureType sType;
411 DE_NULL, // const void* pNext;
412 0u, // VkBufferCreateFlags flags;
413 bufferSize, // VkDeviceSize size;
414 VK_BUFFER_USAGE_TRANSFER_SRC_BIT, // VkBufferUsageFlags usage;
415 VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode;
416 0u, // deUint32 queueFamilyIndexCount;
417 DE_NULL, // const deUint32* pQueueFamilyIndices;
420 buffer = createBuffer(vk, device, &bufferParams);
421 bufferAlloc = allocator.allocate(getBufferMemoryRequirements(vk, device, *buffer), MemoryRequirement::HostVisible);
422 VK_CHECK(vk.bindBufferMemory(device, *buffer, bufferAlloc->getMemory(), bufferAlloc->getOffset()));
425 // Create command pool and buffer
426 cmdPool = createCommandPool(vk, device, VK_COMMAND_POOL_CREATE_TRANSIENT_BIT, queueFamilyIndex);
427 cmdBuffer = allocateCommandBuffer(vk, device, *cmdPool, VK_COMMAND_BUFFER_LEVEL_PRIMARY);
430 fence = createFence(vk, device);
432 // Barriers for copying buffer to image
433 const VkBufferMemoryBarrier preBufferBarrier =
435 VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER, // VkStructureType sType;
436 DE_NULL, // const void* pNext;
437 VK_ACCESS_HOST_WRITE_BIT, // VkAccessFlags srcAccessMask;
438 VK_ACCESS_TRANSFER_READ_BIT, // VkAccessFlags dstAccessMask;
439 VK_QUEUE_FAMILY_IGNORED, // deUint32 srcQueueFamilyIndex;
440 VK_QUEUE_FAMILY_IGNORED, // deUint32 dstQueueFamilyIndex;
441 *buffer, // VkBuffer buffer;
442 0u, // VkDeviceSize offset;
443 bufferSize // VkDeviceSize size;
446 const VkImageMemoryBarrier preImageBarrier =
448 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // VkStructureType sType;
449 DE_NULL, // const void* pNext;
450 0u, // VkAccessFlags srcAccessMask;
451 VK_ACCESS_TRANSFER_WRITE_BIT, // VkAccessFlags dstAccessMask;
452 VK_IMAGE_LAYOUT_UNDEFINED, // VkImageLayout oldLayout;
453 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, // VkImageLayout newLayout;
454 VK_QUEUE_FAMILY_IGNORED, // deUint32 srcQueueFamilyIndex;
455 VK_QUEUE_FAMILY_IGNORED, // deUint32 dstQueueFamilyIndex;
456 destImage, // VkImage image;
457 { // VkImageSubresourceRange subresourceRange;
458 imageAspectFlags, // VkImageAspectFlags aspectMask;
459 0u, // deUint32 baseMipLevel;
460 (deUint32)srcTexture.getNumLevels(), // deUint32 mipLevels;
461 0u, // deUint32 baseArraySlice;
462 (deUint32)srcTexture.getArraySize(), // deUint32 arraySize;
466 const VkImageMemoryBarrier postImageBarrier =
468 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // VkStructureType sType;
469 DE_NULL, // const void* pNext;
470 VK_ACCESS_TRANSFER_WRITE_BIT, // VkAccessFlags srcAccessMask;
471 VK_ACCESS_SHADER_READ_BIT, // VkAccessFlags dstAccessMask;
472 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, // VkImageLayout oldLayout;
473 VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, // VkImageLayout newLayout;
474 VK_QUEUE_FAMILY_IGNORED, // deUint32 srcQueueFamilyIndex;
475 VK_QUEUE_FAMILY_IGNORED, // deUint32 dstQueueFamilyIndex;
476 destImage, // VkImage image;
477 { // VkImageSubresourceRange subresourceRange;
478 imageAspectFlags, // VkImageAspectFlags aspectMask;
479 0u, // deUint32 baseMipLevel;
480 (deUint32)srcTexture.getNumLevels(), // deUint32 mipLevels;
481 0u, // deUint32 baseArraySlice;
482 (deUint32)srcTexture.getArraySize(), // deUint32 arraySize;
486 const VkCommandBufferBeginInfo cmdBufferBeginInfo =
488 VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO, // VkStructureType sType;
489 DE_NULL, // const void* pNext;
490 VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT, // VkCommandBufferUsageFlags flags;
491 (const VkCommandBufferInheritanceInfo*)DE_NULL,
494 std::vector<VkBufferImageCopy> copyRegions = srcTexture.getBufferCopyRegions();
497 srcTexture.write(reinterpret_cast<deUint8*>(bufferAlloc->getHostPtr()));
499 if (srcStencilTexture != DE_NULL)
501 DE_ASSERT(stencilOffset != 0u);
503 srcStencilTexture->write(reinterpret_cast<deUint8*>(bufferAlloc->getHostPtr()) + stencilOffset);
505 std::vector<VkBufferImageCopy> stencilCopyRegions = srcStencilTexture->getBufferCopyRegions();
506 for (size_t regionIdx = 0; regionIdx < stencilCopyRegions.size(); regionIdx++)
508 VkBufferImageCopy region = stencilCopyRegions[regionIdx];
509 region.bufferOffset += stencilOffset;
511 copyRegions.push_back(region);
515 flushMappedMemoryRange(vk, device, bufferAlloc->getMemory(), bufferAlloc->getOffset(), VK_WHOLE_SIZE);
517 // Copy buffer to image
518 VK_CHECK(vk.beginCommandBuffer(*cmdBuffer, &cmdBufferBeginInfo));
519 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 1, &preBufferBarrier, 1, &preImageBarrier);
520 vk.cmdCopyBufferToImage(*cmdBuffer, *buffer, destImage, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, (deUint32)copyRegions.size(), copyRegions.data());
521 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 0, (const VkBufferMemoryBarrier*)DE_NULL, 1, &postImageBarrier);
523 VK_CHECK(vk.endCommandBuffer(*cmdBuffer));
525 const VkSubmitInfo submitInfo =
527 VK_STRUCTURE_TYPE_SUBMIT_INFO, // VkStructureType sType;
528 DE_NULL, // const void* pNext;
529 0u, // deUint32 waitSemaphoreCount;
530 DE_NULL, // const VkSemaphore* pWaitSemaphores;
532 1u, // deUint32 commandBufferCount;
533 &cmdBuffer.get(), // const VkCommandBuffer* pCommandBuffers;
534 0u, // deUint32 signalSemaphoreCount;
535 DE_NULL // const VkSemaphore* pSignalSemaphores;
538 VK_CHECK(vk.queueSubmit(queue, 1, &submitInfo, *fence));
539 VK_CHECK(vk.waitForFences(device, 1, &fence.get(), true, ~(0ull) /* infinity */));
542 void uploadTestTextureInternalSparse (const DeviceInterface& vk,
544 const VkPhysicalDevice physicalDevice,
545 const InstanceInterface& instance,
546 const VkImageCreateInfo& imageCreateInfo,
548 deUint32 queueFamilyIndex,
549 Allocator& allocator,
550 std::vector<de::SharedPtr<Allocation> >& allocations,
551 const TestTexture& srcTexture,
552 const TestTexture* srcStencilTexture,
553 tcu::TextureFormat format,
556 deUint32 bufferSize = (srcTexture.isCompressed()) ? srcTexture.getCompressedSize(): srcTexture.getSize();
557 const VkImageAspectFlags imageAspectFlags = getImageAspectFlags(format);
558 deUint32 stencilOffset = 0u;
559 const Unique<VkSemaphore> imageMemoryBindSemaphore(createSemaphore(vk, device));
561 // Stencil-only texture should be provided if (and only if) the image has a combined DS format
562 DE_ASSERT((tcu::hasDepthComponent(format.order) && tcu::hasStencilComponent(format.order)) == (srcStencilTexture != DE_NULL));
564 if (srcStencilTexture != DE_NULL)
566 stencilOffset = static_cast<deUint32>(deAlign32(static_cast<deInt32>(bufferSize), 4));
567 bufferSize = stencilOffset + srcStencilTexture->getSize();
571 const VkPhysicalDeviceProperties deviceProperties = getPhysicalDeviceProperties(instance, physicalDevice);
572 const VkPhysicalDeviceMemoryProperties deviceMemoryProperties = getPhysicalDeviceMemoryProperties(instance, physicalDevice);
573 deUint32 sparseMemoryReqCount = 0;
575 vk.getImageSparseMemoryRequirements(device, destImage, &sparseMemoryReqCount, DE_NULL);
577 DE_ASSERT(sparseMemoryReqCount != 0);
579 std::vector<VkSparseImageMemoryRequirements> sparseImageMemoryRequirements;
580 sparseImageMemoryRequirements.resize(sparseMemoryReqCount);
582 vk.getImageSparseMemoryRequirements(device, destImage, &sparseMemoryReqCount, &sparseImageMemoryRequirements[0]);
584 const deUint32 noMatchFound = ~((deUint32)0);
586 deUint32 aspectIndex = noMatchFound;
587 for (deUint32 memoryReqNdx = 0; memoryReqNdx < sparseMemoryReqCount; ++memoryReqNdx)
589 if (sparseImageMemoryRequirements[memoryReqNdx].formatProperties.aspectMask == imageAspectFlags)
591 aspectIndex = memoryReqNdx;
596 deUint32 metadataAspectIndex = noMatchFound;
597 for (deUint32 memoryReqNdx = 0; memoryReqNdx < sparseMemoryReqCount; ++memoryReqNdx)
599 if (sparseImageMemoryRequirements[memoryReqNdx].formatProperties.aspectMask & VK_IMAGE_ASPECT_METADATA_BIT)
601 metadataAspectIndex = memoryReqNdx;
606 if (aspectIndex == noMatchFound)
607 TCU_THROW(NotSupportedError, "Required image aspect not supported.");
609 const VkMemoryRequirements memoryRequirements = getImageMemoryRequirements(vk, device, destImage);
611 deUint32 memoryType = noMatchFound;
612 for (deUint32 memoryTypeNdx = 0; memoryTypeNdx < deviceMemoryProperties.memoryTypeCount; ++memoryTypeNdx)
614 if ((memoryRequirements.memoryTypeBits & (1u << memoryTypeNdx)) != 0 &&
615 MemoryRequirement::Any.matchesHeap(deviceMemoryProperties.memoryTypes[memoryTypeNdx].propertyFlags))
617 memoryType = memoryTypeNdx;
622 if (memoryType == noMatchFound)
623 TCU_THROW(NotSupportedError, "No matching memory type found.");
625 if (memoryRequirements.size > deviceProperties.limits.sparseAddressSpaceSize)
626 TCU_THROW(NotSupportedError, "Required memory size for sparse resource exceeds device limits.");
628 // Check if the image format supports sparse operations
629 const std::vector<VkSparseImageFormatProperties> sparseImageFormatPropVec =
630 getPhysicalDeviceSparseImageFormatProperties(instance, physicalDevice, imageCreateInfo.format, imageCreateInfo.imageType, imageCreateInfo.samples, imageCreateInfo.usage, imageCreateInfo.tiling);
632 if (sparseImageFormatPropVec.size() == 0)
633 TCU_THROW(NotSupportedError, "The image format does not support sparse operations.");
635 const VkSparseImageMemoryRequirements aspectRequirements = sparseImageMemoryRequirements[aspectIndex];
636 const VkExtent3D imageGranularity = aspectRequirements.formatProperties.imageGranularity;
638 std::vector<VkSparseImageMemoryBind> imageResidencyMemoryBinds;
639 std::vector<VkSparseMemoryBind> imageMipTailMemoryBinds;
641 for (deUint32 layerNdx = 0; layerNdx < imageCreateInfo.arrayLayers; ++layerNdx)
643 for (deUint32 mipLevelNdx = 0; mipLevelNdx < aspectRequirements.imageMipTailFirstLod; ++mipLevelNdx)
645 const VkExtent3D mipExtent = mipLevelExtents(imageCreateInfo.extent, mipLevelNdx);
646 const tcu::UVec3 numSparseBinds = alignedDivide(mipExtent, imageGranularity);
647 const tcu::UVec3 lastBlockExtent = tcu::UVec3(mipExtent.width % imageGranularity.width ? mipExtent.width % imageGranularity.width : imageGranularity.width,
648 mipExtent.height % imageGranularity.height ? mipExtent.height % imageGranularity.height : imageGranularity.height,
649 mipExtent.depth % imageGranularity.depth ? mipExtent.depth % imageGranularity.depth : imageGranularity.depth );
651 for (deUint32 z = 0; z < numSparseBinds.z(); ++z)
652 for (deUint32 y = 0; y < numSparseBinds.y(); ++y)
653 for (deUint32 x = 0; x < numSparseBinds.x(); ++x)
655 const VkMemoryRequirements allocRequirements =
657 // 28.7.5 alignment shows the block size in bytes
658 memoryRequirements.alignment, // VkDeviceSize size;
659 memoryRequirements.alignment, // VkDeviceSize alignment;
660 memoryRequirements.memoryTypeBits, // uint32_t memoryTypeBits;
663 de::SharedPtr<Allocation> allocation(allocator.allocate(allocRequirements, MemoryRequirement::Any).release());
664 allocations.push_back(allocation);
667 offset.x = x*imageGranularity.width;
668 offset.y = y*imageGranularity.height;
669 offset.z = z*imageGranularity.depth;
672 extent.width = (x == numSparseBinds.x() - 1) ? lastBlockExtent.x() : imageGranularity.width;
673 extent.height = (y == numSparseBinds.y() - 1) ? lastBlockExtent.y() : imageGranularity.height;
674 extent.depth = (z == numSparseBinds.z() - 1) ? lastBlockExtent.z() : imageGranularity.depth;
676 const VkSparseImageMemoryBind imageMemoryBind =
679 imageAspectFlags, // VkImageAspectFlags aspectMask;
680 mipLevelNdx, // uint32_t mipLevel;
681 layerNdx, // uint32_t arrayLayer;
682 }, // VkImageSubresource subresource;
683 offset, // VkOffset3D offset;
684 extent, // VkExtent3D extent;
685 allocation->getMemory(), // VkDeviceMemory memory;
686 allocation->getOffset(), // VkDeviceSize memoryOffset;
687 0u, // VkSparseMemoryBindFlags flags;
690 imageResidencyMemoryBinds.push_back(imageMemoryBind);
694 // Handle MIP tail. There are two cases to consider here:
696 // 1) VK_SPARSE_IMAGE_FORMAT_SINGLE_MIPTAIL_BIT is requested by the driver: each layer needs a separate tail.
697 // 2) otherwise: only one tail is needed.
699 if (imageMipTailMemoryBinds.size() == 0 || (aspectRequirements.formatProperties.flags & VK_SPARSE_IMAGE_FORMAT_SINGLE_MIPTAIL_BIT) == 0)
701 const VkMemoryRequirements allocRequirements =
703 aspectRequirements.imageMipTailSize, // VkDeviceSize size;
704 memoryRequirements.alignment, // VkDeviceSize alignment;
705 memoryRequirements.memoryTypeBits, // uint32_t memoryTypeBits;
708 const de::SharedPtr<Allocation> allocation(allocator.allocate(allocRequirements, MemoryRequirement::Any).release());
710 const VkSparseMemoryBind imageMipTailMemoryBind =
712 aspectRequirements.imageMipTailOffset + layerNdx * aspectRequirements.imageMipTailStride, // VkDeviceSize resourceOffset;
713 aspectRequirements.imageMipTailSize, // VkDeviceSize size;
714 allocation->getMemory(), // VkDeviceMemory memory;
715 allocation->getOffset(), // VkDeviceSize memoryOffset;
716 0u, // VkSparseMemoryBindFlags flags;
719 allocations.push_back(allocation);
721 imageMipTailMemoryBinds.push_back(imageMipTailMemoryBind);
725 if (metadataAspectIndex != noMatchFound)
727 const VkSparseImageMemoryRequirements metadataAspectRequirements = sparseImageMemoryRequirements[metadataAspectIndex];
729 if (imageMipTailMemoryBinds.size() == 1 || (metadataAspectRequirements.formatProperties.flags & VK_SPARSE_IMAGE_FORMAT_SINGLE_MIPTAIL_BIT) == 0)
731 const VkMemoryRequirements metadataAllocRequirements =
733 metadataAspectRequirements.imageMipTailSize, // VkDeviceSize size;
734 memoryRequirements.alignment, // VkDeviceSize alignment;
735 memoryRequirements.memoryTypeBits, // uint32_t memoryTypeBits;
737 const de::SharedPtr<Allocation> metadataAllocation(allocator.allocate(metadataAllocRequirements, MemoryRequirement::Any).release());
739 const VkSparseMemoryBind metadataMipTailMemoryBind =
741 metadataAspectRequirements.imageMipTailOffset +
742 layerNdx * metadataAspectRequirements.imageMipTailStride, // VkDeviceSize resourceOffset;
743 metadataAspectRequirements.imageMipTailSize, // VkDeviceSize size;
744 metadataAllocation->getMemory(), // VkDeviceMemory memory;
745 metadataAllocation->getOffset(), // VkDeviceSize memoryOffset;
746 VK_SPARSE_MEMORY_BIND_METADATA_BIT // VkSparseMemoryBindFlags flags;
749 allocations.push_back(metadataAllocation);
751 imageMipTailMemoryBinds.push_back(metadataMipTailMemoryBind);
757 VkBindSparseInfo bindSparseInfo =
759 VK_STRUCTURE_TYPE_BIND_SPARSE_INFO, //VkStructureType sType;
760 DE_NULL, //const void* pNext;
761 0u, //deUint32 waitSemaphoreCount;
762 DE_NULL, //const VkSemaphore* pWaitSemaphores;
763 0u, //deUint32 bufferBindCount;
764 DE_NULL, //const VkSparseBufferMemoryBindInfo* pBufferBinds;
765 0u, //deUint32 imageOpaqueBindCount;
766 DE_NULL, //const VkSparseImageOpaqueMemoryBindInfo* pImageOpaqueBinds;
767 0u, //deUint32 imageBindCount;
768 DE_NULL, //const VkSparseImageMemoryBindInfo* pImageBinds;
769 1u, //deUint32 signalSemaphoreCount;
770 &imageMemoryBindSemaphore.get() //const VkSemaphore* pSignalSemaphores;
773 VkSparseImageMemoryBindInfo imageResidencyBindInfo;
774 VkSparseImageOpaqueMemoryBindInfo imageMipTailBindInfo;
776 if (imageResidencyMemoryBinds.size() > 0)
778 imageResidencyBindInfo.image = destImage;;
779 imageResidencyBindInfo.bindCount = static_cast<deUint32>(imageResidencyMemoryBinds.size());
780 imageResidencyBindInfo.pBinds = &imageResidencyMemoryBinds[0];
782 bindSparseInfo.imageBindCount = 1u;
783 bindSparseInfo.pImageBinds = &imageResidencyBindInfo;
786 if (imageMipTailMemoryBinds.size() > 0)
788 imageMipTailBindInfo.image = destImage;;
789 imageMipTailBindInfo.bindCount = static_cast<deUint32>(imageMipTailMemoryBinds.size());
790 imageMipTailBindInfo.pBinds = &imageMipTailMemoryBinds[0];
792 bindSparseInfo.imageOpaqueBindCount = 1u;
793 bindSparseInfo.pImageOpaqueBinds = &imageMipTailBindInfo;
796 VK_CHECK(vk.queueBindSparse(queue, 1u, &bindSparseInfo, DE_NULL));
800 // Create source buffer
801 const VkBufferCreateInfo bufferParams =
803 VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, // VkStructureType sType;
804 DE_NULL, // const void* pNext;
805 0u, // VkBufferCreateFlags flags;
806 bufferSize, // VkDeviceSize size;
807 VK_BUFFER_USAGE_TRANSFER_SRC_BIT, // VkBufferUsageFlags usage;
808 VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode;
809 0u, // deUint32 queueFamilyIndexCount;
810 DE_NULL, // const deUint32* pQueueFamilyIndices;
813 Move<VkBuffer> buffer = createBuffer(vk, device, &bufferParams);
814 de::MovePtr<Allocation> bufferAlloc = allocator.allocate(getBufferMemoryRequirements(vk, device, *buffer), MemoryRequirement::HostVisible);
815 Move<VkCommandPool> cmdPool = createCommandPool(vk, device, VK_COMMAND_POOL_CREATE_TRANSIENT_BIT, queueFamilyIndex);
816 Move<VkCommandBuffer> cmdBuffer = allocateCommandBuffer(vk, device, *cmdPool, VK_COMMAND_BUFFER_LEVEL_PRIMARY);
817 Move<VkFence> fence = createFence(vk, device);
819 VK_CHECK(vk.bindBufferMemory(device, *buffer, bufferAlloc->getMemory(), bufferAlloc->getOffset()));
821 // Barriers for copying buffer to image
822 const VkBufferMemoryBarrier preBufferBarrier =
824 VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER, // VkStructureType sType;
825 DE_NULL, // const void* pNext;
826 VK_ACCESS_HOST_WRITE_BIT, // VkAccessFlags srcAccessMask;
827 VK_ACCESS_TRANSFER_READ_BIT, // VkAccessFlags dstAccessMask;
828 VK_QUEUE_FAMILY_IGNORED, // deUint32 srcQueueFamilyIndex;
829 VK_QUEUE_FAMILY_IGNORED, // deUint32 dstQueueFamilyIndex;
830 *buffer, // VkBuffer buffer;
831 0u, // VkDeviceSize offset;
832 bufferSize // VkDeviceSize size;
835 const VkImageMemoryBarrier preImageBarrier =
837 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // VkStructureType sType;
838 DE_NULL, // const void* pNext;
839 0u, // VkAccessFlags srcAccessMask;
840 VK_ACCESS_TRANSFER_WRITE_BIT, // VkAccessFlags dstAccessMask;
841 VK_IMAGE_LAYOUT_UNDEFINED, // VkImageLayout oldLayout;
842 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, // VkImageLayout newLayout;
843 VK_QUEUE_FAMILY_IGNORED, // deUint32 srcQueueFamilyIndex;
844 VK_QUEUE_FAMILY_IGNORED, // deUint32 dstQueueFamilyIndex;
845 destImage, // VkImage image;
846 { // VkImageSubresourceRange subresourceRange;
847 imageAspectFlags, // VkImageAspect aspect;
848 0u, // deUint32 baseMipLevel;
849 imageCreateInfo.mipLevels, // deUint32 mipLevels;
850 0u, // deUint32 baseArraySlice;
851 imageCreateInfo.arrayLayers // deUint32 arraySize;
855 const VkImageMemoryBarrier postImageBarrier =
857 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // VkStructureType sType;
858 DE_NULL, // const void* pNext;
859 VK_ACCESS_TRANSFER_WRITE_BIT, // VkAccessFlags srcAccessMask;
860 VK_ACCESS_SHADER_READ_BIT, // VkAccessFlags dstAccessMask;
861 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, // VkImageLayout oldLayout;
862 VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, // VkImageLayout newLayout;
863 VK_QUEUE_FAMILY_IGNORED, // deUint32 srcQueueFamilyIndex;
864 VK_QUEUE_FAMILY_IGNORED, // deUint32 dstQueueFamilyIndex;
865 destImage, // VkImage image;
866 { // VkImageSubresourceRange subresourceRange;
867 imageAspectFlags, // VkImageAspect aspect;
868 0u, // deUint32 baseMipLevel;
869 imageCreateInfo.mipLevels, // deUint32 mipLevels;
870 0u, // deUint32 baseArraySlice;
871 imageCreateInfo.arrayLayers // deUint32 arraySize;
875 const VkCommandBufferBeginInfo cmdBufferBeginInfo =
877 VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO, // VkStructureType sType;
878 DE_NULL, // const void* pNext;
879 VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT, // VkCommandBufferUsageFlags flags;
880 (const VkCommandBufferInheritanceInfo*)DE_NULL,
883 std::vector<VkBufferImageCopy> copyRegions = srcTexture.getBufferCopyRegions();
886 srcTexture.write(reinterpret_cast<deUint8*>(bufferAlloc->getHostPtr()));
888 if (srcStencilTexture != DE_NULL)
890 DE_ASSERT(stencilOffset != 0u);
892 srcStencilTexture->write(reinterpret_cast<deUint8*>(bufferAlloc->getHostPtr()) + stencilOffset);
894 std::vector<VkBufferImageCopy> stencilCopyRegions = srcStencilTexture->getBufferCopyRegions();
895 for (size_t regionIdx = 0; regionIdx < stencilCopyRegions.size(); regionIdx++)
897 VkBufferImageCopy region = stencilCopyRegions[regionIdx];
898 region.bufferOffset += stencilOffset;
900 copyRegions.push_back(region);
904 flushMappedMemoryRange(vk, device, bufferAlloc->getMemory(), bufferAlloc->getOffset(), VK_WHOLE_SIZE);
906 // Copy buffer to image
907 VK_CHECK(vk.beginCommandBuffer(*cmdBuffer, &cmdBufferBeginInfo));
908 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 1, &preBufferBarrier, 1, &preImageBarrier);
909 vk.cmdCopyBufferToImage(*cmdBuffer, *buffer, destImage, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, (deUint32)copyRegions.size(), copyRegions.data());
910 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 0, (const VkBufferMemoryBarrier*)DE_NULL, 1, &postImageBarrier);
911 VK_CHECK(vk.endCommandBuffer(*cmdBuffer));
913 const VkPipelineStageFlags pipelineStageFlags = VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT;
915 const VkSubmitInfo submitInfo =
917 VK_STRUCTURE_TYPE_SUBMIT_INFO, // VkStructureType sType;
918 DE_NULL, // const void* pNext;
919 1u, // deUint32 waitSemaphoreCount;
920 &imageMemoryBindSemaphore.get(), // const VkSemaphore* pWaitSemaphores;
921 &pipelineStageFlags, // const VkPipelineStageFlags* pWaitDstStageMask;
922 1u, // deUint32 commandBufferCount;
923 &cmdBuffer.get(), // const VkCommandBuffer* pCommandBuffers;
924 0u, // deUint32 signalSemaphoreCount;
925 DE_NULL // const VkSemaphore* pSignalSemaphores;
930 VK_CHECK(vk.queueSubmit(queue, 1, &submitInfo, *fence));
931 VK_CHECK(vk.waitForFences(device, 1, &fence.get(), true, ~(0ull) /* infinity */));
935 VK_CHECK(vk.deviceWaitIdle(device));
941 void uploadTestTexture (const DeviceInterface& vk,
944 deUint32 queueFamilyIndex,
945 Allocator& allocator,
946 const TestTexture& srcTexture,
949 if (tcu::isCombinedDepthStencilType(srcTexture.getTextureFormat().type))
951 de::MovePtr<TestTexture> srcDepthTexture;
952 de::MovePtr<TestTexture> srcStencilTexture;
954 if (tcu::hasDepthComponent(srcTexture.getTextureFormat().order))
956 tcu::TextureFormat format;
957 switch (srcTexture.getTextureFormat().type)
959 case tcu::TextureFormat::UNSIGNED_INT_16_8_8:
960 format = tcu::TextureFormat(tcu::TextureFormat::D, tcu::TextureFormat::UNORM_INT16);
962 case tcu::TextureFormat::UNSIGNED_INT_24_8_REV:
963 format = tcu::TextureFormat(tcu::TextureFormat::D, tcu::TextureFormat::UNSIGNED_INT_24_8_REV);
965 case tcu::TextureFormat::FLOAT_UNSIGNED_INT_24_8_REV:
966 format = tcu::TextureFormat(tcu::TextureFormat::D, tcu::TextureFormat::FLOAT);
969 DE_FATAL("Unexpected source texture format.");
972 srcDepthTexture = srcTexture.copy(format);
975 if (tcu::hasStencilComponent(srcTexture.getTextureFormat().order))
976 srcStencilTexture = srcTexture.copy(tcu::getEffectiveDepthStencilTextureFormat(srcTexture.getTextureFormat(), tcu::Sampler::MODE_STENCIL));
978 uploadTestTextureInternal(vk, device, queue, queueFamilyIndex, allocator, *srcDepthTexture, srcStencilTexture.get(), srcTexture.getTextureFormat(), destImage);
981 uploadTestTextureInternal(vk, device, queue, queueFamilyIndex, allocator, srcTexture, DE_NULL, srcTexture.getTextureFormat(), destImage);
984 void uploadTestTextureSparse (const DeviceInterface& vk,
986 const VkPhysicalDevice physicalDevice,
987 const InstanceInterface& instance,
988 const VkImageCreateInfo& imageCreateInfo,
990 deUint32 queueFamilyIndex,
991 Allocator& allocator,
992 std::vector<de::SharedPtr<Allocation> >& allocations,
993 const TestTexture& srcTexture,
996 if (tcu::isCombinedDepthStencilType(srcTexture.getTextureFormat().type))
998 de::MovePtr<TestTexture> srcDepthTexture;
999 de::MovePtr<TestTexture> srcStencilTexture;
1001 if (tcu::hasDepthComponent(srcTexture.getTextureFormat().order))
1003 tcu::TextureFormat format;
1004 switch (srcTexture.getTextureFormat().type)
1006 case tcu::TextureFormat::UNSIGNED_INT_16_8_8:
1007 format = tcu::TextureFormat(tcu::TextureFormat::D, tcu::TextureFormat::UNORM_INT16);
1009 case tcu::TextureFormat::UNSIGNED_INT_24_8_REV:
1010 format = tcu::TextureFormat(tcu::TextureFormat::D, tcu::TextureFormat::UNSIGNED_INT_24_8_REV);
1012 case tcu::TextureFormat::FLOAT_UNSIGNED_INT_24_8_REV:
1013 format = tcu::TextureFormat(tcu::TextureFormat::D, tcu::TextureFormat::FLOAT);
1016 DE_FATAL("Unexpected source texture format.");
1019 srcDepthTexture = srcTexture.copy(format);
1022 if (tcu::hasStencilComponent(srcTexture.getTextureFormat().order))
1023 srcStencilTexture = srcTexture.copy(tcu::getEffectiveDepthStencilTextureFormat(srcTexture.getTextureFormat(), tcu::Sampler::MODE_STENCIL));
1025 uploadTestTextureInternalSparse (vk,
1035 srcStencilTexture.get(),
1036 srcTexture.getTextureFormat(),
1041 uploadTestTextureInternalSparse (vk,
1052 srcTexture.getTextureFormat(),
1057 // Utilities for test textures
1059 template<typename TcuTextureType>
1060 void allocateLevels (TcuTextureType& texture)
1062 for (int levelNdx = 0; levelNdx < texture.getNumLevels(); levelNdx++)
1063 texture.allocLevel(levelNdx);
1066 template<typename TcuTextureType>
1067 std::vector<tcu::PixelBufferAccess> getLevelsVector (const TcuTextureType& texture)
1069 std::vector<tcu::PixelBufferAccess> levels(texture.getNumLevels());
1071 for (int levelNdx = 0; levelNdx < texture.getNumLevels(); levelNdx++)
1072 levels[levelNdx] = *reinterpret_cast<const tcu::PixelBufferAccess*>(&texture.getLevel(levelNdx));
1079 TestTexture::TestTexture (const tcu::TextureFormat& format, int width, int height, int depth)
1081 DE_ASSERT(width >= 1);
1082 DE_ASSERT(height >= 1);
1083 DE_ASSERT(depth >= 1);
1091 TestTexture::TestTexture (const tcu::CompressedTexFormat& format, int width, int height, int depth)
1093 DE_ASSERT(width >= 1);
1094 DE_ASSERT(height >= 1);
1095 DE_ASSERT(depth >= 1);
1103 TestTexture::~TestTexture (void)
1105 for (size_t levelNdx = 0; levelNdx < m_compressedLevels.size(); levelNdx++)
1106 delete m_compressedLevels[levelNdx];
1109 deUint32 TestTexture::getSize (void) const
1111 std::vector<deUint32> offsetMultiples;
1112 deUint32 textureSize = 0;
1114 offsetMultiples.push_back(4);
1115 offsetMultiples.push_back(getLevel(0, 0).getFormat().getPixelSize());
1117 for (int levelNdx = 0; levelNdx < getNumLevels(); levelNdx++)
1119 for (int layerNdx = 0; layerNdx < getArraySize(); layerNdx++)
1121 const tcu::ConstPixelBufferAccess level = getLevel(levelNdx, layerNdx);
1122 textureSize = getNextMultiple(offsetMultiples, textureSize);
1123 textureSize += level.getWidth() * level.getHeight() * level.getDepth() * level.getFormat().getPixelSize();
1130 deUint32 TestTexture::getCompressedSize (void) const
1132 if (!isCompressed())
1133 throw tcu::InternalError("Texture is not compressed");
1135 std::vector<deUint32> offsetMultiples;
1136 deUint32 textureSize = 0;
1138 offsetMultiples.push_back(4);
1139 offsetMultiples.push_back(tcu::getBlockSize(getCompressedLevel(0, 0).getFormat()));
1141 for (int levelNdx = 0; levelNdx < getNumLevels(); levelNdx++)
1143 for (int layerNdx = 0; layerNdx < getArraySize(); layerNdx++)
1145 textureSize = getNextMultiple(offsetMultiples, textureSize);
1146 textureSize += getCompressedLevel(levelNdx, layerNdx).getDataSize();
1153 tcu::CompressedTexture& TestTexture::getCompressedLevel (int level, int layer)
1155 DE_ASSERT(level >= 0 && level < getNumLevels());
1156 DE_ASSERT(layer >= 0 && layer < getArraySize());
1158 return *m_compressedLevels[level * getArraySize() + layer];
1161 const tcu::CompressedTexture& TestTexture::getCompressedLevel (int level, int layer) const
1163 DE_ASSERT(level >= 0 && level < getNumLevels());
1164 DE_ASSERT(layer >= 0 && layer < getArraySize());
1166 return *m_compressedLevels[level * getArraySize() + layer];
1169 std::vector<VkBufferImageCopy> TestTexture::getBufferCopyRegions (void) const
1171 std::vector<deUint32> offsetMultiples;
1172 std::vector<VkBufferImageCopy> regions;
1173 deUint32 layerDataOffset = 0;
1175 offsetMultiples.push_back(4);
1179 offsetMultiples.push_back(tcu::getBlockSize(getCompressedLevel(0, 0).getFormat()));
1181 for (int levelNdx = 0; levelNdx < getNumLevels(); levelNdx++)
1183 for (int layerNdx = 0; layerNdx < getArraySize(); layerNdx++)
1185 const tcu::CompressedTexture& level = getCompressedLevel(levelNdx, layerNdx);
1186 tcu::IVec3 blockPixelSize = getBlockPixelSize(level.getFormat());
1187 layerDataOffset = getNextMultiple(offsetMultiples, layerDataOffset);
1189 const VkBufferImageCopy layerRegion =
1191 layerDataOffset, // VkDeviceSize bufferOffset;
1192 (deUint32)getNextMultiple(blockPixelSize.x(), level.getWidth()), // deUint32 bufferRowLength;
1193 (deUint32)getNextMultiple(blockPixelSize.y(), level.getHeight()), // deUint32 bufferImageHeight;
1194 { // VkImageSubresourceLayers imageSubresource;
1195 VK_IMAGE_ASPECT_COLOR_BIT,
1200 { 0u, 0u, 0u }, // VkOffset3D imageOffset;
1201 { // VkExtent3D imageExtent;
1202 (deUint32)level.getWidth(),
1203 (deUint32)level.getHeight(),
1204 (deUint32)level.getDepth()
1208 regions.push_back(layerRegion);
1209 layerDataOffset += level.getDataSize();
1215 std::vector<VkImageAspectFlags> imageAspects;
1216 tcu::TextureFormat textureFormat = getTextureFormat();
1218 if (tcu::hasDepthComponent(textureFormat.order))
1219 imageAspects.push_back(VK_IMAGE_ASPECT_DEPTH_BIT);
1221 if (tcu::hasStencilComponent(textureFormat.order))
1222 imageAspects.push_back(VK_IMAGE_ASPECT_STENCIL_BIT);
1224 if (imageAspects.empty())
1225 imageAspects.push_back(VK_IMAGE_ASPECT_COLOR_BIT);
1227 offsetMultiples.push_back(getLevel(0, 0).getFormat().getPixelSize());
1229 for (int levelNdx = 0; levelNdx < getNumLevels(); levelNdx++)
1231 for (int layerNdx = 0; layerNdx < getArraySize(); layerNdx++)
1233 const tcu::ConstPixelBufferAccess level = getLevel(levelNdx, layerNdx);
1235 layerDataOffset = getNextMultiple(offsetMultiples, layerDataOffset);
1237 for (size_t aspectIndex = 0; aspectIndex < imageAspects.size(); ++aspectIndex)
1239 const VkBufferImageCopy layerRegion =
1241 layerDataOffset, // VkDeviceSize bufferOffset;
1242 (deUint32)level.getWidth(), // deUint32 bufferRowLength;
1243 (deUint32)level.getHeight(), // deUint32 bufferImageHeight;
1244 { // VkImageSubresourceLayers imageSubresource;
1245 imageAspects[aspectIndex],
1250 { 0u, 0u, 0u }, // VkOffset3D imageOffset;
1251 { // VkExtent3D imageExtent;
1252 (deUint32)level.getWidth(),
1253 (deUint32)level.getHeight(),
1254 (deUint32)level.getDepth()
1258 regions.push_back(layerRegion);
1260 layerDataOffset += level.getWidth() * level.getHeight() * level.getDepth() * level.getFormat().getPixelSize();
1268 void TestTexture::write (deUint8* destPtr) const
1270 std::vector<deUint32> offsetMultiples;
1271 deUint32 levelOffset = 0;
1273 offsetMultiples.push_back(4);
1277 offsetMultiples.push_back(tcu::getBlockSize(getCompressedLevel(0, 0).getFormat()));
1279 for (int levelNdx = 0; levelNdx < getNumLevels(); levelNdx++)
1281 for (int layerNdx = 0; layerNdx < getArraySize(); layerNdx++)
1283 levelOffset = getNextMultiple(offsetMultiples, levelOffset);
1285 const tcu::CompressedTexture& compressedTex = getCompressedLevel(levelNdx, layerNdx);
1287 deMemcpy(destPtr + levelOffset, compressedTex.getData(), compressedTex.getDataSize());
1288 levelOffset += compressedTex.getDataSize();
1294 offsetMultiples.push_back(getLevel(0, 0).getFormat().getPixelSize());
1296 for (int levelNdx = 0; levelNdx < getNumLevels(); levelNdx++)
1298 for (int layerNdx = 0; layerNdx < getArraySize(); layerNdx++)
1300 levelOffset = getNextMultiple(offsetMultiples, levelOffset);
1302 const tcu::ConstPixelBufferAccess srcAccess = getLevel(levelNdx, layerNdx);
1303 const tcu::PixelBufferAccess destAccess (srcAccess.getFormat(), srcAccess.getSize(), srcAccess.getPitch(), destPtr + levelOffset);
1305 tcu::copy(destAccess, srcAccess);
1306 levelOffset += srcAccess.getWidth() * srcAccess.getHeight() * srcAccess.getDepth() * srcAccess.getFormat().getPixelSize();
1312 void TestTexture::copyToTexture (TestTexture& destTexture) const
1314 for (int levelNdx = 0; levelNdx < getNumLevels(); levelNdx++)
1315 for (int layerNdx = 0; layerNdx < getArraySize(); layerNdx++)
1316 tcu::copy(destTexture.getLevel(levelNdx, layerNdx), getLevel(levelNdx, layerNdx));
1319 void TestTexture::populateLevels (const std::vector<tcu::PixelBufferAccess>& levels)
1321 for (size_t levelNdx = 0; levelNdx < levels.size(); levelNdx++)
1322 TestTexture::fillWithGradient(levels[levelNdx]);
1325 void TestTexture::populateCompressedLevels (tcu::CompressedTexFormat format, const std::vector<tcu::PixelBufferAccess>& decompressedLevels)
1327 // Generate random compressed data and update decompressed data
1329 de::Random random(123);
1331 for (size_t levelNdx = 0; levelNdx < decompressedLevels.size(); levelNdx++)
1333 const tcu::PixelBufferAccess level = decompressedLevels[levelNdx];
1334 tcu::CompressedTexture* compressedLevel = new tcu::CompressedTexture(format, level.getWidth(), level.getHeight(), level.getDepth());
1335 deUint8* const compressedData = (deUint8*)compressedLevel->getData();
1337 if (tcu::isAstcFormat(format))
1339 // \todo [2016-01-20 pyry] Comparison doesn't currently handle invalid blocks correctly so we use only valid blocks
1340 tcu::astc::generateRandomValidBlocks(compressedData, compressedLevel->getDataSize()/tcu::astc::BLOCK_SIZE_BYTES,
1341 format, tcu::TexDecompressionParams::ASTCMODE_LDR, random.getUint32());
1345 // Generate random compressed data
1346 // Random initial values cause assertion during the decompression in case of COMPRESSEDTEXFORMAT_ETC1_RGB8 format
1347 if (format != tcu::COMPRESSEDTEXFORMAT_ETC1_RGB8)
1348 for (int byteNdx = 0; byteNdx < compressedLevel->getDataSize(); byteNdx++)
1349 compressedData[byteNdx] = 0xFF & random.getUint32();
1352 m_compressedLevels.push_back(compressedLevel);
1354 // Store decompressed data
1355 compressedLevel->decompress(level, tcu::TexDecompressionParams(tcu::TexDecompressionParams::ASTCMODE_LDR));
1359 void TestTexture::fillWithGradient (const tcu::PixelBufferAccess& levelAccess)
1361 const tcu::TextureFormatInfo formatInfo = tcu::getTextureFormatInfo(levelAccess.getFormat());
1362 tcu::fillWithComponentGradients(levelAccess, formatInfo.valueMin, formatInfo.valueMax);
1367 TestTexture1D::TestTexture1D (const tcu::TextureFormat& format, int width)
1368 : TestTexture (format, width, 1, 1)
1369 , m_texture (format, width)
1371 allocateLevels(m_texture);
1372 TestTexture::populateLevels(getLevelsVector(m_texture));
1375 TestTexture1D::TestTexture1D (const tcu::CompressedTexFormat& format, int width)
1376 : TestTexture (format, width, 1, 1)
1377 , m_texture (tcu::getUncompressedFormat(format), width)
1379 allocateLevels(m_texture);
1380 TestTexture::populateCompressedLevels(format, getLevelsVector(m_texture));
1383 TestTexture1D::~TestTexture1D (void)
1387 int TestTexture1D::getNumLevels (void) const
1389 return m_texture.getNumLevels();
1392 tcu::PixelBufferAccess TestTexture1D::getLevel (int level, int layer)
1394 DE_ASSERT(layer == 0);
1396 return m_texture.getLevel(level);
1399 const tcu::ConstPixelBufferAccess TestTexture1D::getLevel (int level, int layer) const
1401 DE_ASSERT(layer == 0);
1403 return m_texture.getLevel(level);
1406 const tcu::Texture1D& TestTexture1D::getTexture (void) const
1411 tcu::Texture1D& TestTexture1D::getTexture (void)
1416 de::MovePtr<TestTexture> TestTexture1D::copy(const tcu::TextureFormat format) const
1418 DE_ASSERT(!isCompressed());
1420 de::MovePtr<TestTexture> texture (new TestTexture1D(format, m_texture.getWidth()));
1422 copyToTexture(*texture);
1427 // TestTexture1DArray
1429 TestTexture1DArray::TestTexture1DArray (const tcu::TextureFormat& format, int width, int arraySize)
1430 : TestTexture (format, width, 1, arraySize)
1431 , m_texture (format, width, arraySize)
1433 allocateLevels(m_texture);
1434 TestTexture::populateLevels(getLevelsVector(m_texture));
1437 TestTexture1DArray::TestTexture1DArray (const tcu::CompressedTexFormat& format, int width, int arraySize)
1438 : TestTexture (format, width, 1, arraySize)
1439 , m_texture (tcu::getUncompressedFormat(format), width, arraySize)
1441 allocateLevels(m_texture);
1443 std::vector<tcu::PixelBufferAccess> layers;
1444 for (int levelNdx = 0; levelNdx < m_texture.getNumLevels(); levelNdx++)
1445 for (int layerNdx = 0; layerNdx < m_texture.getNumLayers(); layerNdx++)
1446 layers.push_back(getLevel(levelNdx, layerNdx));
1448 TestTexture::populateCompressedLevels(format, layers);
1451 TestTexture1DArray::~TestTexture1DArray (void)
1455 int TestTexture1DArray::getNumLevels (void) const
1457 return m_texture.getNumLevels();
1460 tcu::PixelBufferAccess TestTexture1DArray::getLevel (int level, int layer)
1462 const tcu::PixelBufferAccess levelLayers = m_texture.getLevel(level);
1463 const deUint32 layerSize = levelLayers.getWidth() * levelLayers.getFormat().getPixelSize();
1464 const deUint32 layerOffset = layerSize * layer;
1466 return tcu::PixelBufferAccess(levelLayers.getFormat(), levelLayers.getWidth(), 1, 1, (deUint8*)levelLayers.getDataPtr() + layerOffset);
1469 const tcu::ConstPixelBufferAccess TestTexture1DArray::getLevel (int level, int layer) const
1471 const tcu::ConstPixelBufferAccess levelLayers = m_texture.getLevel(level);
1472 const deUint32 layerSize = levelLayers.getWidth() * levelLayers.getFormat().getPixelSize();
1473 const deUint32 layerOffset = layerSize * layer;
1475 return tcu::ConstPixelBufferAccess(levelLayers.getFormat(), levelLayers.getWidth(), 1, 1, (deUint8*)levelLayers.getDataPtr() + layerOffset);
1478 const tcu::Texture1DArray& TestTexture1DArray::getTexture (void) const
1483 tcu::Texture1DArray& TestTexture1DArray::getTexture (void)
1488 int TestTexture1DArray::getArraySize (void) const
1490 return m_texture.getNumLayers();
1493 de::MovePtr<TestTexture> TestTexture1DArray::copy(const tcu::TextureFormat format) const
1495 DE_ASSERT(!isCompressed());
1497 de::MovePtr<TestTexture> texture (new TestTexture1DArray(format, m_texture.getWidth(), getArraySize()));
1499 copyToTexture(*texture);
1506 TestTexture2D::TestTexture2D (const tcu::TextureFormat& format, int width, int height)
1507 : TestTexture (format, width, height, 1)
1508 , m_texture (format, width, height)
1510 allocateLevels(m_texture);
1511 TestTexture::populateLevels(getLevelsVector(m_texture));
1514 TestTexture2D::TestTexture2D (const tcu::CompressedTexFormat& format, int width, int height)
1515 : TestTexture (format, width, height, 1)
1516 , m_texture (tcu::getUncompressedFormat(format), width, height)
1518 allocateLevels(m_texture);
1519 TestTexture::populateCompressedLevels(format, getLevelsVector(m_texture));
1522 TestTexture2D::~TestTexture2D (void)
1526 int TestTexture2D::getNumLevels (void) const
1528 return m_texture.getNumLevels();
1531 tcu::PixelBufferAccess TestTexture2D::getLevel (int level, int layer)
1533 DE_ASSERT(layer == 0);
1535 return m_texture.getLevel(level);
1538 const tcu::ConstPixelBufferAccess TestTexture2D::getLevel (int level, int layer) const
1540 DE_ASSERT(layer == 0);
1542 return m_texture.getLevel(level);
1545 const tcu::Texture2D& TestTexture2D::getTexture (void) const
1550 tcu::Texture2D& TestTexture2D::getTexture (void)
1555 de::MovePtr<TestTexture> TestTexture2D::copy(const tcu::TextureFormat format) const
1557 DE_ASSERT(!isCompressed());
1559 de::MovePtr<TestTexture> texture (new TestTexture2D(format, m_texture.getWidth(), m_texture.getHeight()));
1561 copyToTexture(*texture);
1566 // TestTexture2DArray
1568 TestTexture2DArray::TestTexture2DArray (const tcu::TextureFormat& format, int width, int height, int arraySize)
1569 : TestTexture (format, width, height, arraySize)
1570 , m_texture (format, width, height, arraySize)
1572 allocateLevels(m_texture);
1573 TestTexture::populateLevels(getLevelsVector(m_texture));
1576 TestTexture2DArray::TestTexture2DArray (const tcu::CompressedTexFormat& format, int width, int height, int arraySize)
1577 : TestTexture (format, width, height, arraySize)
1578 , m_texture (tcu::getUncompressedFormat(format), width, height, arraySize)
1580 allocateLevels(m_texture);
1582 std::vector<tcu::PixelBufferAccess> layers;
1583 for (int levelNdx = 0; levelNdx < m_texture.getNumLevels(); levelNdx++)
1584 for (int layerNdx = 0; layerNdx < m_texture.getNumLayers(); layerNdx++)
1585 layers.push_back(getLevel(levelNdx, layerNdx));
1587 TestTexture::populateCompressedLevels(format, layers);
1590 TestTexture2DArray::~TestTexture2DArray (void)
1594 int TestTexture2DArray::getNumLevels (void) const
1596 return m_texture.getNumLevels();
1599 tcu::PixelBufferAccess TestTexture2DArray::getLevel (int level, int layer)
1601 const tcu::PixelBufferAccess levelLayers = m_texture.getLevel(level);
1602 const deUint32 layerSize = levelLayers.getWidth() * levelLayers.getHeight() * levelLayers.getFormat().getPixelSize();
1603 const deUint32 layerOffset = layerSize * layer;
1605 return tcu::PixelBufferAccess(levelLayers.getFormat(), levelLayers.getWidth(), levelLayers.getHeight(), 1, (deUint8*)levelLayers.getDataPtr() + layerOffset);
1608 const tcu::ConstPixelBufferAccess TestTexture2DArray::getLevel (int level, int layer) const
1610 const tcu::ConstPixelBufferAccess levelLayers = m_texture.getLevel(level);
1611 const deUint32 layerSize = levelLayers.getWidth() * levelLayers.getHeight() * levelLayers.getFormat().getPixelSize();
1612 const deUint32 layerOffset = layerSize * layer;
1614 return tcu::ConstPixelBufferAccess(levelLayers.getFormat(), levelLayers.getWidth(), levelLayers.getHeight(), 1, (deUint8*)levelLayers.getDataPtr() + layerOffset);
1617 const tcu::Texture2DArray& TestTexture2DArray::getTexture (void) const
1622 tcu::Texture2DArray& TestTexture2DArray::getTexture (void)
1627 int TestTexture2DArray::getArraySize (void) const
1629 return m_texture.getNumLayers();
1632 de::MovePtr<TestTexture> TestTexture2DArray::copy(const tcu::TextureFormat format) const
1634 DE_ASSERT(!isCompressed());
1636 de::MovePtr<TestTexture> texture (new TestTexture2DArray(format, m_texture.getWidth(), m_texture.getHeight(), getArraySize()));
1638 copyToTexture(*texture);
1645 TestTexture3D::TestTexture3D (const tcu::TextureFormat& format, int width, int height, int depth)
1646 : TestTexture (format, width, height, depth)
1647 , m_texture (format, width, height, depth)
1649 allocateLevels(m_texture);
1650 TestTexture::populateLevels(getLevelsVector(m_texture));
1653 TestTexture3D::TestTexture3D (const tcu::CompressedTexFormat& format, int width, int height, int depth)
1654 : TestTexture (format, width, height, depth)
1655 , m_texture (tcu::getUncompressedFormat(format), width, height, depth)
1657 allocateLevels(m_texture);
1658 TestTexture::populateCompressedLevels(format, getLevelsVector(m_texture));
1661 TestTexture3D::~TestTexture3D (void)
1665 int TestTexture3D::getNumLevels (void) const
1667 return m_texture.getNumLevels();
1670 tcu::PixelBufferAccess TestTexture3D::getLevel (int level, int layer)
1672 DE_ASSERT(layer == 0);
1674 return m_texture.getLevel(level);
1677 const tcu::ConstPixelBufferAccess TestTexture3D::getLevel (int level, int layer) const
1679 DE_ASSERT(layer == 0);
1681 return m_texture.getLevel(level);
1684 const tcu::Texture3D& TestTexture3D::getTexture (void) const
1689 tcu::Texture3D& TestTexture3D::getTexture (void)
1694 de::MovePtr<TestTexture> TestTexture3D::copy(const tcu::TextureFormat format) const
1696 DE_ASSERT(!isCompressed());
1698 de::MovePtr<TestTexture> texture (new TestTexture3D(format, m_texture.getWidth(), m_texture.getHeight(), m_texture.getDepth()));
1700 copyToTexture(*texture);
1707 const static tcu::CubeFace tcuFaceMapping[tcu::CUBEFACE_LAST] =
1709 tcu::CUBEFACE_POSITIVE_X,
1710 tcu::CUBEFACE_NEGATIVE_X,
1711 tcu::CUBEFACE_POSITIVE_Y,
1712 tcu::CUBEFACE_NEGATIVE_Y,
1713 tcu::CUBEFACE_POSITIVE_Z,
1714 tcu::CUBEFACE_NEGATIVE_Z
1717 TestTextureCube::TestTextureCube (const tcu::TextureFormat& format, int size)
1718 : TestTexture (format, size, size, 1)
1719 , m_texture (format, size)
1721 for (int levelNdx = 0; levelNdx < getNumLevels(); levelNdx++)
1723 for (int faceNdx = 0; faceNdx < tcu::CUBEFACE_LAST; faceNdx++)
1725 m_texture.allocLevel(tcuFaceMapping[faceNdx], levelNdx);
1726 TestTexture::fillWithGradient(m_texture.getLevelFace(levelNdx, tcuFaceMapping[faceNdx]));
1731 TestTextureCube::TestTextureCube (const tcu::CompressedTexFormat& format, int size)
1732 : TestTexture (format, size, size, 1)
1733 , m_texture (tcu::getUncompressedFormat(format), size)
1735 std::vector<tcu::PixelBufferAccess> levels(m_texture.getNumLevels() * tcu::CUBEFACE_LAST);
1737 for (int levelNdx = 0; levelNdx < getNumLevels(); levelNdx++)
1739 for (int faceNdx = 0; faceNdx < tcu::CUBEFACE_LAST; faceNdx++)
1741 m_texture.allocLevel(tcuFaceMapping[faceNdx], levelNdx);
1742 levels[levelNdx * tcu::CUBEFACE_LAST + faceNdx] = m_texture.getLevelFace(levelNdx, tcuFaceMapping[faceNdx]);
1746 TestTexture::populateCompressedLevels(format, levels);
1749 TestTextureCube::~TestTextureCube (void)
1753 int TestTextureCube::getNumLevels (void) const
1755 return m_texture.getNumLevels();
1758 tcu::PixelBufferAccess TestTextureCube::getLevel (int level, int layer)
1760 return m_texture.getLevelFace(level, tcuFaceMapping[layer]);
1763 const tcu::ConstPixelBufferAccess TestTextureCube::getLevel (int level, int layer) const
1765 return m_texture.getLevelFace(level, tcuFaceMapping[layer]);
1768 int TestTextureCube::getArraySize (void) const
1770 return (int)tcu::CUBEFACE_LAST;
1773 const tcu::TextureCube& TestTextureCube::getTexture (void) const
1778 tcu::TextureCube& TestTextureCube::getTexture (void)
1783 de::MovePtr<TestTexture> TestTextureCube::copy(const tcu::TextureFormat format) const
1785 DE_ASSERT(!isCompressed());
1787 de::MovePtr<TestTexture> texture (new TestTextureCube(format, m_texture.getSize()));
1789 copyToTexture(*texture);
1794 // TestTextureCubeArray
1796 TestTextureCubeArray::TestTextureCubeArray (const tcu::TextureFormat& format, int size, int arraySize)
1797 : TestTexture (format, size, size, arraySize)
1798 , m_texture (format, size, arraySize)
1800 allocateLevels(m_texture);
1801 TestTexture::populateLevels(getLevelsVector(m_texture));
1804 TestTextureCubeArray::TestTextureCubeArray (const tcu::CompressedTexFormat& format, int size, int arraySize)
1805 : TestTexture (format, size, size, arraySize)
1806 , m_texture (tcu::getUncompressedFormat(format), size, arraySize)
1808 DE_ASSERT(arraySize % 6 == 0);
1810 allocateLevels(m_texture);
1812 std::vector<tcu::PixelBufferAccess> layers;
1813 for (int levelNdx = 0; levelNdx < m_texture.getNumLevels(); levelNdx++)
1814 for (int layerNdx = 0; layerNdx < m_texture.getDepth(); layerNdx++)
1815 layers.push_back(getLevel(levelNdx, layerNdx));
1817 TestTexture::populateCompressedLevels(format, layers);
1820 TestTextureCubeArray::~TestTextureCubeArray (void)
1824 int TestTextureCubeArray::getNumLevels (void) const
1826 return m_texture.getNumLevels();
1829 tcu::PixelBufferAccess TestTextureCubeArray::getLevel (int level, int layer)
1831 const tcu::PixelBufferAccess levelLayers = m_texture.getLevel(level);
1832 const deUint32 layerSize = levelLayers.getWidth() * levelLayers.getHeight() * levelLayers.getFormat().getPixelSize();
1833 const deUint32 layerOffset = layerSize * layer;
1835 return tcu::PixelBufferAccess(levelLayers.getFormat(), levelLayers.getWidth(), levelLayers.getHeight(), 1, (deUint8*)levelLayers.getDataPtr() + layerOffset);
1838 const tcu::ConstPixelBufferAccess TestTextureCubeArray::getLevel (int level, int layer) const
1840 const tcu::ConstPixelBufferAccess levelLayers = m_texture.getLevel(level);
1841 const deUint32 layerSize = levelLayers.getWidth() * levelLayers.getHeight() * levelLayers.getFormat().getPixelSize();
1842 const deUint32 layerOffset = layerSize * layer;
1844 return tcu::ConstPixelBufferAccess(levelLayers.getFormat(), levelLayers.getWidth(), levelLayers.getHeight(), 1, (deUint8*)levelLayers.getDataPtr() + layerOffset);
1847 int TestTextureCubeArray::getArraySize (void) const
1849 return m_texture.getDepth();
1852 const tcu::TextureCubeArray& TestTextureCubeArray::getTexture (void) const
1857 tcu::TextureCubeArray& TestTextureCubeArray::getTexture (void)
1862 de::MovePtr<TestTexture> TestTextureCubeArray::copy(const tcu::TextureFormat format) const
1864 DE_ASSERT(!isCompressed());
1866 de::MovePtr<TestTexture> texture (new TestTextureCubeArray(format, m_texture.getSize(), getArraySize()));
1868 copyToTexture(*texture);