1 /*------------------------------------------------------------------------
2 * Vulkan Conformance Tests
3 * ------------------------
5 * Copyright (c) 2015 The Khronos Group Inc.
6 * Copyright (c) 2015 Imagination Technologies Ltd.
8 * Licensed under the Apache License, Version 2.0 (the "License");
9 * you may not use this file except in compliance with the License.
10 * You may obtain a copy of the License at
12 * http://www.apache.org/licenses/LICENSE-2.0
14 * Unless required by applicable law or agreed to in writing, software
15 * distributed under the License is distributed on an "AS IS" BASIS,
16 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17 * See the License for the specific language governing permissions and
18 * limitations under the License.
22 * \brief Utilities for images.
23 *//*--------------------------------------------------------------------*/
25 #include "vktPipelineImageUtil.hpp"
26 #include "vkImageUtil.hpp"
27 #include "vkMemUtil.hpp"
28 #include "vkQueryUtil.hpp"
29 #include "vkRefUtil.hpp"
30 #include "tcuTextureUtil.hpp"
31 #include "tcuAstcUtil.hpp"
32 #include "deRandom.hpp"
41 /*! Gets the next multiple of a given divisor */
42 static deUint32 getNextMultiple (deUint32 divisor, deUint32 value)
44 if (value % divisor == 0)
48 return value + divisor - (value % divisor);
51 /*! Gets the next value that is multiple of all given divisors */
52 static deUint32 getNextMultiple (const std::vector<deUint32>& divisors, deUint32 value)
54 deUint32 nextMultiple = value;
55 bool nextMultipleFound = false;
59 nextMultipleFound = true;
61 for (size_t divNdx = 0; divNdx < divisors.size(); divNdx++)
62 nextMultipleFound = nextMultipleFound && (nextMultiple % divisors[divNdx] == 0);
64 if (nextMultipleFound)
67 DE_ASSERT(nextMultiple < ~((deUint32)0u));
68 nextMultiple = getNextMultiple(divisors[0], nextMultiple + 1);
74 bool isSupportedSamplableFormat (const InstanceInterface& instanceInterface, VkPhysicalDevice device, VkFormat format)
76 if (isCompressedFormat(format))
78 VkPhysicalDeviceFeatures physicalFeatures;
79 const tcu::CompressedTexFormat compressedFormat = mapVkCompressedFormat(format);
81 instanceInterface.getPhysicalDeviceFeatures(device, &physicalFeatures);
83 if (tcu::isAstcFormat(compressedFormat))
85 if (!physicalFeatures.textureCompressionASTC_LDR)
88 else if (tcu::isEtcFormat(compressedFormat))
90 if (!physicalFeatures.textureCompressionETC2)
95 DE_FATAL("Unsupported compressed format");
99 VkFormatProperties formatProps;
100 instanceInterface.getPhysicalDeviceFormatProperties(device, format, &formatProps);
102 return (formatProps.optimalTilingFeatures & VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT) != 0u;
105 // \todo [2016-01-21 pyry] Update this to just rely on vkDefs.hpp once
106 // CTS has been updated to 1.0.2.
109 VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_LINEAR_BIT = 0x00001000,
112 bool isLinearFilteringSupported (const InstanceInterface& vki, VkPhysicalDevice physicalDevice, VkFormat format, VkImageTiling tiling)
114 const VkFormatProperties formatProperties = getPhysicalDeviceFormatProperties(vki, physicalDevice, format);
115 const VkFormatFeatureFlags formatFeatures = tiling == VK_IMAGE_TILING_LINEAR
116 ? formatProperties.linearTilingFeatures
117 : formatProperties.optimalTilingFeatures;
121 case VK_FORMAT_R32_SFLOAT:
122 case VK_FORMAT_R32G32_SFLOAT:
123 case VK_FORMAT_R32G32B32_SFLOAT:
124 case VK_FORMAT_R32G32B32A32_SFLOAT:
125 case VK_FORMAT_R64_SFLOAT:
126 case VK_FORMAT_R64G64_SFLOAT:
127 case VK_FORMAT_R64G64B64_SFLOAT:
128 case VK_FORMAT_R64G64B64A64_SFLOAT:
129 return (formatFeatures & VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_LINEAR_BIT) != 0;
132 // \todo [2016-01-21 pyry] Check for all formats once drivers have been updated to 1.0.2
133 // and we have tests to verify format properties.
138 VkBorderColor getFormatBorderColor (BorderColor color, VkFormat format)
140 if (!isCompressedFormat(format) && (isIntFormat(format) || isUintFormat(format)))
144 case BORDER_COLOR_OPAQUE_BLACK: return VK_BORDER_COLOR_INT_OPAQUE_BLACK;
145 case BORDER_COLOR_OPAQUE_WHITE: return VK_BORDER_COLOR_INT_OPAQUE_WHITE;
146 case BORDER_COLOR_TRANSPARENT_BLACK: return VK_BORDER_COLOR_INT_TRANSPARENT_BLACK;
155 case BORDER_COLOR_OPAQUE_BLACK: return VK_BORDER_COLOR_FLOAT_OPAQUE_BLACK;
156 case BORDER_COLOR_OPAQUE_WHITE: return VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE;
157 case BORDER_COLOR_TRANSPARENT_BLACK: return VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK;
164 return VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK;
167 void getLookupScaleBias (vk::VkFormat format, tcu::Vec4& lookupScale, tcu::Vec4& lookupBias)
169 if (!isCompressedFormat(format))
171 const tcu::TextureFormatInfo fmtInfo = tcu::getTextureFormatInfo(mapVkFormat(format));
173 // Needed to normalize various formats to 0..1 range for writing into RT
174 lookupScale = fmtInfo.lookupScale;
175 lookupBias = fmtInfo.lookupBias;
181 case VK_FORMAT_EAC_R11_SNORM_BLOCK:
182 lookupScale = tcu::Vec4(0.5f, 1.0f, 1.0f, 1.0f);
183 lookupBias = tcu::Vec4(0.5f, 0.0f, 0.0f, 0.0f);
186 case VK_FORMAT_EAC_R11G11_SNORM_BLOCK:
187 lookupScale = tcu::Vec4(0.5f, 0.5f, 1.0f, 1.0f);
188 lookupBias = tcu::Vec4(0.5f, 0.5f, 0.0f, 0.0f);
192 // else: All supported compressed formats are fine with no normalization.
193 // ASTC LDR blocks decompress to f16 so querying normalization parameters
194 // based on uncompressed formats would actually lead to massive precision loss
195 // and complete lack of coverage in case of R8G8B8A8_UNORM RT.
196 lookupScale = tcu::Vec4(1.0f);
197 lookupBias = tcu::Vec4(0.0f);
203 de::MovePtr<tcu::TextureLevel> readColorAttachment (const vk::DeviceInterface& vk,
206 deUint32 queueFamilyIndex,
207 vk::Allocator& allocator,
210 const tcu::UVec2& renderSize)
212 Move<VkBuffer> buffer;
213 de::MovePtr<Allocation> bufferAlloc;
214 Move<VkCommandPool> cmdPool;
215 Move<VkCommandBuffer> cmdBuffer;
217 const tcu::TextureFormat tcuFormat = mapVkFormat(format);
218 const VkDeviceSize pixelDataSize = renderSize.x() * renderSize.y() * tcuFormat.getPixelSize();
219 de::MovePtr<tcu::TextureLevel> resultLevel (new tcu::TextureLevel(tcuFormat, renderSize.x(), renderSize.y()));
221 // Create destination buffer
223 const VkBufferCreateInfo bufferParams =
225 VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, // VkStructureType sType;
226 DE_NULL, // const void* pNext;
227 0u, // VkBufferCreateFlags flags;
228 pixelDataSize, // VkDeviceSize size;
229 VK_BUFFER_USAGE_TRANSFER_DST_BIT, // VkBufferUsageFlags usage;
230 VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode;
231 0u, // deUint32 queueFamilyIndexCount;
232 DE_NULL // const deUint32* pQueueFamilyIndices;
235 buffer = createBuffer(vk, device, &bufferParams);
236 bufferAlloc = allocator.allocate(getBufferMemoryRequirements(vk, device, *buffer), MemoryRequirement::HostVisible);
237 VK_CHECK(vk.bindBufferMemory(device, *buffer, bufferAlloc->getMemory(), bufferAlloc->getOffset()));
240 // Create command pool and buffer
241 cmdPool = createCommandPool(vk, device, VK_COMMAND_POOL_CREATE_TRANSIENT_BIT, queueFamilyIndex);
242 cmdBuffer = allocateCommandBuffer(vk, device, *cmdPool, VK_COMMAND_BUFFER_LEVEL_PRIMARY);
245 fence = createFence(vk, device);
247 // Barriers for copying image to buffer
249 const VkImageMemoryBarrier imageBarrier =
251 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // VkStructureType sType;
252 DE_NULL, // const void* pNext;
253 VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, // VkAccessFlags srcAccessMask;
254 VK_ACCESS_TRANSFER_READ_BIT, // VkAccessFlags dstAccessMask;
255 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // VkImageLayout oldLayout;
256 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, // VkImageLayout newLayout;
257 VK_QUEUE_FAMILY_IGNORED, // deUint32 srcQueueFamilyIndex;
258 VK_QUEUE_FAMILY_IGNORED, // deUint32 dstQueueFamilyIndex;
259 image, // VkImage image;
260 { // VkImageSubresourceRange subresourceRange;
261 VK_IMAGE_ASPECT_COLOR_BIT, // VkImageAspectFlags aspectMask;
262 0u, // deUint32 baseMipLevel;
263 1u, // deUint32 mipLevels;
264 0u, // deUint32 baseArraySlice;
265 1u // deUint32 arraySize;
269 const VkBufferMemoryBarrier bufferBarrier =
271 VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER, // VkStructureType sType;
272 DE_NULL, // const void* pNext;
273 VK_ACCESS_TRANSFER_WRITE_BIT, // VkAccessFlags srcAccessMask;
274 VK_ACCESS_HOST_READ_BIT, // VkAccessFlags dstAccessMask;
275 VK_QUEUE_FAMILY_IGNORED, // deUint32 srcQueueFamilyIndex;
276 VK_QUEUE_FAMILY_IGNORED, // deUint32 dstQueueFamilyIndex;
277 *buffer, // VkBuffer buffer;
278 0u, // VkDeviceSize offset;
279 pixelDataSize // VkDeviceSize size;
282 const VkCommandBufferBeginInfo cmdBufferBeginInfo =
284 VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO, // VkStructureType sType;
285 DE_NULL, // const void* pNext;
286 VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT, // VkCommandBufferUsageFlags flags;
287 (const VkCommandBufferInheritanceInfo*)DE_NULL,
290 // Copy image to buffer
292 const VkBufferImageCopy copyRegion =
294 0u, // VkDeviceSize bufferOffset;
295 (deUint32)renderSize.x(), // deUint32 bufferRowLength;
296 (deUint32)renderSize.y(), // deUint32 bufferImageHeight;
297 { VK_IMAGE_ASPECT_COLOR_BIT, 0u, 0u, 1u }, // VkImageSubresourceLayers imageSubresource;
298 { 0, 0, 0 }, // VkOffset3D imageOffset;
299 { renderSize.x(), renderSize.y(), 1u } // VkExtent3D imageExtent;
302 VK_CHECK(vk.beginCommandBuffer(*cmdBuffer, &cmdBufferBeginInfo));
303 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 0, (const VkBufferMemoryBarrier*)DE_NULL, 1, &imageBarrier);
304 vk.cmdCopyImageToBuffer(*cmdBuffer, image, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, *buffer, 1, ©Region);
305 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 1, &bufferBarrier, 0, (const VkImageMemoryBarrier*)DE_NULL);
306 VK_CHECK(vk.endCommandBuffer(*cmdBuffer));
308 const VkSubmitInfo submitInfo =
310 VK_STRUCTURE_TYPE_SUBMIT_INFO, // VkStructureType sType;
311 DE_NULL, // const void* pNext;
312 0u, // deUint32 waitSemaphoreCount;
313 DE_NULL, // const VkSemaphore* pWaitSemaphores;
315 1u, // deUint32 commandBufferCount;
316 &cmdBuffer.get(), // const VkCommandBuffer* pCommandBuffers;
317 0u, // deUint32 signalSemaphoreCount;
318 DE_NULL // const VkSemaphore* pSignalSemaphores;
321 VK_CHECK(vk.queueSubmit(queue, 1, &submitInfo, *fence));
322 VK_CHECK(vk.waitForFences(device, 1, &fence.get(), 0, ~(0ull) /* infinity */));
325 invalidateMappedMemoryRange(vk, device, bufferAlloc->getMemory(), bufferAlloc->getOffset(), VK_WHOLE_SIZE);
326 tcu::copy(*resultLevel, tcu::ConstPixelBufferAccess(resultLevel->getFormat(), resultLevel->getSize(), bufferAlloc->getHostPtr()));
334 VkImageAspectFlags getImageAspectFlags (const tcu::TextureFormat textureFormat)
336 VkImageAspectFlags imageAspectFlags = 0;
338 if (tcu::hasDepthComponent(textureFormat.order))
339 imageAspectFlags |= VK_IMAGE_ASPECT_DEPTH_BIT;
341 if (tcu::hasStencilComponent(textureFormat.order))
342 imageAspectFlags |= VK_IMAGE_ASPECT_STENCIL_BIT;
344 if (imageAspectFlags == 0)
345 imageAspectFlags = VK_IMAGE_ASPECT_COLOR_BIT;
347 return imageAspectFlags;
352 void uploadTestTextureInternal (const DeviceInterface& vk,
355 deUint32 queueFamilyIndex,
356 Allocator& allocator,
357 const TestTexture& srcTexture,
358 const TestTexture* srcStencilTexture,
359 tcu::TextureFormat format,
363 Move<VkBuffer> buffer;
364 de::MovePtr<Allocation> bufferAlloc;
365 Move<VkCommandPool> cmdPool;
366 Move<VkCommandBuffer> cmdBuffer;
368 const VkImageAspectFlags imageAspectFlags = getImageAspectFlags(format);
369 deUint32 stencilOffset = 0u;
371 // Calculate buffer size
372 bufferSize = (srcTexture.isCompressed())? srcTexture.getCompressedSize(): srcTexture.getSize();
374 // Stencil-only texture should be provided if (and only if) the image has a combined DS format
375 DE_ASSERT((tcu::hasDepthComponent(format.order) && tcu::hasStencilComponent(format.order)) == (srcStencilTexture != DE_NULL));
377 if (srcStencilTexture != DE_NULL)
379 stencilOffset = static_cast<deUint32>(deAlign32(static_cast<deInt32>(bufferSize), 4));
380 bufferSize = stencilOffset + srcStencilTexture->getSize();
383 // Create source buffer
385 const VkBufferCreateInfo bufferParams =
387 VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, // VkStructureType sType;
388 DE_NULL, // const void* pNext;
389 0u, // VkBufferCreateFlags flags;
390 bufferSize, // VkDeviceSize size;
391 VK_BUFFER_USAGE_TRANSFER_SRC_BIT, // VkBufferUsageFlags usage;
392 VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode;
393 0u, // deUint32 queueFamilyIndexCount;
394 DE_NULL, // const deUint32* pQueueFamilyIndices;
397 buffer = createBuffer(vk, device, &bufferParams);
398 bufferAlloc = allocator.allocate(getBufferMemoryRequirements(vk, device, *buffer), MemoryRequirement::HostVisible);
399 VK_CHECK(vk.bindBufferMemory(device, *buffer, bufferAlloc->getMemory(), bufferAlloc->getOffset()));
402 // Create command pool and buffer
403 cmdPool = createCommandPool(vk, device, VK_COMMAND_POOL_CREATE_TRANSIENT_BIT, queueFamilyIndex);
404 cmdBuffer = allocateCommandBuffer(vk, device, *cmdPool, VK_COMMAND_BUFFER_LEVEL_PRIMARY);
407 fence = createFence(vk, device);
409 // Barriers for copying buffer to image
410 const VkBufferMemoryBarrier preBufferBarrier =
412 VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER, // VkStructureType sType;
413 DE_NULL, // const void* pNext;
414 VK_ACCESS_HOST_WRITE_BIT, // VkAccessFlags srcAccessMask;
415 VK_ACCESS_TRANSFER_READ_BIT, // VkAccessFlags dstAccessMask;
416 VK_QUEUE_FAMILY_IGNORED, // deUint32 srcQueueFamilyIndex;
417 VK_QUEUE_FAMILY_IGNORED, // deUint32 dstQueueFamilyIndex;
418 *buffer, // VkBuffer buffer;
419 0u, // VkDeviceSize offset;
420 bufferSize // VkDeviceSize size;
423 const VkImageMemoryBarrier preImageBarrier =
425 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // VkStructureType sType;
426 DE_NULL, // const void* pNext;
427 0u, // VkAccessFlags srcAccessMask;
428 VK_ACCESS_TRANSFER_WRITE_BIT, // VkAccessFlags dstAccessMask;
429 VK_IMAGE_LAYOUT_UNDEFINED, // VkImageLayout oldLayout;
430 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, // VkImageLayout newLayout;
431 VK_QUEUE_FAMILY_IGNORED, // deUint32 srcQueueFamilyIndex;
432 VK_QUEUE_FAMILY_IGNORED, // deUint32 dstQueueFamilyIndex;
433 destImage, // VkImage image;
434 { // VkImageSubresourceRange subresourceRange;
435 imageAspectFlags, // VkImageAspectFlags aspectMask;
436 0u, // deUint32 baseMipLevel;
437 (deUint32)srcTexture.getNumLevels(), // deUint32 mipLevels;
438 0u, // deUint32 baseArraySlice;
439 (deUint32)srcTexture.getArraySize(), // deUint32 arraySize;
443 const VkImageMemoryBarrier postImageBarrier =
445 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // VkStructureType sType;
446 DE_NULL, // const void* pNext;
447 VK_ACCESS_TRANSFER_WRITE_BIT, // VkAccessFlags srcAccessMask;
448 VK_ACCESS_SHADER_READ_BIT, // VkAccessFlags dstAccessMask;
449 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, // VkImageLayout oldLayout;
450 VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, // VkImageLayout newLayout;
451 VK_QUEUE_FAMILY_IGNORED, // deUint32 srcQueueFamilyIndex;
452 VK_QUEUE_FAMILY_IGNORED, // deUint32 dstQueueFamilyIndex;
453 destImage, // VkImage image;
454 { // VkImageSubresourceRange subresourceRange;
455 imageAspectFlags, // VkImageAspectFlags aspectMask;
456 0u, // deUint32 baseMipLevel;
457 (deUint32)srcTexture.getNumLevels(), // deUint32 mipLevels;
458 0u, // deUint32 baseArraySlice;
459 (deUint32)srcTexture.getArraySize(), // deUint32 arraySize;
463 const VkCommandBufferBeginInfo cmdBufferBeginInfo =
465 VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO, // VkStructureType sType;
466 DE_NULL, // const void* pNext;
467 VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT, // VkCommandBufferUsageFlags flags;
468 (const VkCommandBufferInheritanceInfo*)DE_NULL,
471 std::vector<VkBufferImageCopy> copyRegions = srcTexture.getBufferCopyRegions();
474 srcTexture.write(reinterpret_cast<deUint8*>(bufferAlloc->getHostPtr()));
476 if (srcStencilTexture != DE_NULL)
478 DE_ASSERT(stencilOffset != 0u);
480 srcStencilTexture->write(reinterpret_cast<deUint8*>(bufferAlloc->getHostPtr()) + stencilOffset);
482 std::vector<VkBufferImageCopy> stencilCopyRegions = srcStencilTexture->getBufferCopyRegions();
483 for (size_t regionIdx = 0; regionIdx < stencilCopyRegions.size(); regionIdx++)
485 VkBufferImageCopy region = stencilCopyRegions[regionIdx];
486 region.bufferOffset += stencilOffset;
488 copyRegions.push_back(region);
492 flushMappedMemoryRange(vk, device, bufferAlloc->getMemory(), bufferAlloc->getOffset(), VK_WHOLE_SIZE);
494 // Copy buffer to image
495 VK_CHECK(vk.beginCommandBuffer(*cmdBuffer, &cmdBufferBeginInfo));
496 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 1, &preBufferBarrier, 1, &preImageBarrier);
497 vk.cmdCopyBufferToImage(*cmdBuffer, *buffer, destImage, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, (deUint32)copyRegions.size(), copyRegions.data());
498 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 0, (const VkBufferMemoryBarrier*)DE_NULL, 1, &postImageBarrier);
500 VK_CHECK(vk.endCommandBuffer(*cmdBuffer));
502 const VkSubmitInfo submitInfo =
504 VK_STRUCTURE_TYPE_SUBMIT_INFO, // VkStructureType sType;
505 DE_NULL, // const void* pNext;
506 0u, // deUint32 waitSemaphoreCount;
507 DE_NULL, // const VkSemaphore* pWaitSemaphores;
509 1u, // deUint32 commandBufferCount;
510 &cmdBuffer.get(), // const VkCommandBuffer* pCommandBuffers;
511 0u, // deUint32 signalSemaphoreCount;
512 DE_NULL // const VkSemaphore* pSignalSemaphores;
515 VK_CHECK(vk.queueSubmit(queue, 1, &submitInfo, *fence));
516 VK_CHECK(vk.waitForFences(device, 1, &fence.get(), true, ~(0ull) /* infinity */));
519 void uploadTestTexture (const DeviceInterface& vk,
522 deUint32 queueFamilyIndex,
523 Allocator& allocator,
524 const TestTexture& srcTexture,
527 if (tcu::isCombinedDepthStencilType(srcTexture.getTextureFormat().type))
529 de::MovePtr<TestTexture> srcDepthTexture;
530 de::MovePtr<TestTexture> srcStencilTexture;
532 if (tcu::hasDepthComponent(srcTexture.getTextureFormat().order))
534 tcu::TextureFormat format;
535 switch (srcTexture.getTextureFormat().type) {
536 case tcu::TextureFormat::UNSIGNED_INT_16_8_8:
537 format = tcu::TextureFormat(tcu::TextureFormat::D, tcu::TextureFormat::UNORM_INT16);
539 case tcu::TextureFormat::UNSIGNED_INT_24_8_REV:
540 format = tcu::TextureFormat(tcu::TextureFormat::D, tcu::TextureFormat::UNSIGNED_INT_24_8_REV);
542 case tcu::TextureFormat::FLOAT_UNSIGNED_INT_24_8_REV:
543 format = tcu::TextureFormat(tcu::TextureFormat::D, tcu::TextureFormat::FLOAT);
549 srcDepthTexture = srcTexture.copy(format);
552 if (tcu::hasStencilComponent(srcTexture.getTextureFormat().order))
553 srcStencilTexture = srcTexture.copy(tcu::getEffectiveDepthStencilTextureFormat(srcTexture.getTextureFormat(), tcu::Sampler::MODE_STENCIL));
555 uploadTestTextureInternal(vk, device, queue, queueFamilyIndex, allocator, *srcDepthTexture, srcStencilTexture.get(), srcTexture.getTextureFormat(), destImage);
558 uploadTestTextureInternal(vk, device, queue, queueFamilyIndex, allocator, srcTexture, DE_NULL, srcTexture.getTextureFormat(), destImage);
561 // Utilities for test textures
563 template<typename TcuTextureType>
564 void allocateLevels (TcuTextureType& texture)
566 for (int levelNdx = 0; levelNdx < texture.getNumLevels(); levelNdx++)
567 texture.allocLevel(levelNdx);
570 template<typename TcuTextureType>
571 std::vector<tcu::PixelBufferAccess> getLevelsVector (const TcuTextureType& texture)
573 std::vector<tcu::PixelBufferAccess> levels(texture.getNumLevels());
575 for (int levelNdx = 0; levelNdx < texture.getNumLevels(); levelNdx++)
576 levels[levelNdx] = *reinterpret_cast<const tcu::PixelBufferAccess*>(&texture.getLevel(levelNdx));
583 TestTexture::TestTexture (const tcu::TextureFormat& format, int width, int height, int depth)
585 DE_ASSERT(width >= 1);
586 DE_ASSERT(height >= 1);
587 DE_ASSERT(depth >= 1);
595 TestTexture::TestTexture (const tcu::CompressedTexFormat& format, int width, int height, int depth)
597 DE_ASSERT(width >= 1);
598 DE_ASSERT(height >= 1);
599 DE_ASSERT(depth >= 1);
607 TestTexture::~TestTexture (void)
609 for (size_t levelNdx = 0; levelNdx < m_compressedLevels.size(); levelNdx++)
610 delete m_compressedLevels[levelNdx];
613 deUint32 TestTexture::getSize (void) const
615 std::vector<deUint32> offsetMultiples;
616 deUint32 textureSize = 0;
618 offsetMultiples.push_back(4);
619 offsetMultiples.push_back(getLevel(0, 0).getFormat().getPixelSize());
621 for (int levelNdx = 0; levelNdx < getNumLevels(); levelNdx++)
623 for (int layerNdx = 0; layerNdx < getArraySize(); layerNdx++)
625 const tcu::ConstPixelBufferAccess level = getLevel(levelNdx, layerNdx);
626 textureSize = getNextMultiple(offsetMultiples, textureSize);
627 textureSize += level.getWidth() * level.getHeight() * level.getDepth() * level.getFormat().getPixelSize();
634 deUint32 TestTexture::getCompressedSize (void) const
637 throw tcu::InternalError("Texture is not compressed");
639 std::vector<deUint32> offsetMultiples;
640 deUint32 textureSize = 0;
642 offsetMultiples.push_back(4);
643 offsetMultiples.push_back(tcu::getBlockSize(getCompressedLevel(0, 0).getFormat()));
645 for (int levelNdx = 0; levelNdx < getNumLevels(); levelNdx++)
647 for (int layerNdx = 0; layerNdx < getArraySize(); layerNdx++)
649 textureSize = getNextMultiple(offsetMultiples, textureSize);
650 textureSize += getCompressedLevel(levelNdx, layerNdx).getDataSize();
657 tcu::CompressedTexture& TestTexture::getCompressedLevel (int level, int layer)
659 DE_ASSERT(level >= 0 && level < getNumLevels());
660 DE_ASSERT(layer >= 0 && layer < getArraySize());
662 return *m_compressedLevels[level * getArraySize() + layer];
665 const tcu::CompressedTexture& TestTexture::getCompressedLevel (int level, int layer) const
667 DE_ASSERT(level >= 0 && level < getNumLevels());
668 DE_ASSERT(layer >= 0 && layer < getArraySize());
670 return *m_compressedLevels[level * getArraySize() + layer];
673 std::vector<VkBufferImageCopy> TestTexture::getBufferCopyRegions (void) const
675 std::vector<deUint32> offsetMultiples;
676 std::vector<VkBufferImageCopy> regions;
677 deUint32 layerDataOffset = 0;
679 offsetMultiples.push_back(4);
683 offsetMultiples.push_back(tcu::getBlockSize(getCompressedLevel(0, 0).getFormat()));
685 for (int levelNdx = 0; levelNdx < getNumLevels(); levelNdx++)
687 for (int layerNdx = 0; layerNdx < getArraySize(); layerNdx++)
689 const tcu::CompressedTexture& level = getCompressedLevel(levelNdx, layerNdx);
690 tcu::IVec3 blockPixelSize = getBlockPixelSize(level.getFormat());
691 layerDataOffset = getNextMultiple(offsetMultiples, layerDataOffset);
693 const VkBufferImageCopy layerRegion =
695 layerDataOffset, // VkDeviceSize bufferOffset;
696 (deUint32)getNextMultiple(blockPixelSize.x(), level.getWidth()), // deUint32 bufferRowLength;
697 (deUint32)getNextMultiple(blockPixelSize.y(), level.getHeight()), // deUint32 bufferImageHeight;
698 { // VkImageSubresourceLayers imageSubresource;
699 VK_IMAGE_ASPECT_COLOR_BIT,
704 { 0u, 0u, 0u }, // VkOffset3D imageOffset;
705 { // VkExtent3D imageExtent;
706 (deUint32)level.getWidth(),
707 (deUint32)level.getHeight(),
708 (deUint32)level.getDepth()
712 regions.push_back(layerRegion);
713 layerDataOffset += level.getDataSize();
719 std::vector<VkImageAspectFlags> imageAspects;
720 tcu::TextureFormat textureFormat = getTextureFormat();
722 if (tcu::hasDepthComponent(textureFormat.order))
723 imageAspects.push_back(VK_IMAGE_ASPECT_DEPTH_BIT);
725 if (tcu::hasStencilComponent(textureFormat.order))
726 imageAspects.push_back(VK_IMAGE_ASPECT_STENCIL_BIT);
728 if (imageAspects.empty())
729 imageAspects.push_back(VK_IMAGE_ASPECT_COLOR_BIT);
731 offsetMultiples.push_back(getLevel(0, 0).getFormat().getPixelSize());
733 for (int levelNdx = 0; levelNdx < getNumLevels(); levelNdx++)
735 for (int layerNdx = 0; layerNdx < getArraySize(); layerNdx++)
737 const tcu::ConstPixelBufferAccess level = getLevel(levelNdx, layerNdx);
739 layerDataOffset = getNextMultiple(offsetMultiples, layerDataOffset);
741 for (size_t aspectIndex = 0; aspectIndex < imageAspects.size(); ++aspectIndex)
743 const VkBufferImageCopy layerRegion =
745 layerDataOffset, // VkDeviceSize bufferOffset;
746 (deUint32)level.getWidth(), // deUint32 bufferRowLength;
747 (deUint32)level.getHeight(), // deUint32 bufferImageHeight;
748 { // VkImageSubresourceLayers imageSubresource;
749 imageAspects[aspectIndex],
754 { 0u, 0u, 0u }, // VkOffset3D imageOffset;
755 { // VkExtent3D imageExtent;
756 (deUint32)level.getWidth(),
757 (deUint32)level.getHeight(),
758 (deUint32)level.getDepth()
762 regions.push_back(layerRegion);
764 layerDataOffset += level.getWidth() * level.getHeight() * level.getDepth() * level.getFormat().getPixelSize();
772 void TestTexture::write (deUint8* destPtr) const
774 std::vector<deUint32> offsetMultiples;
775 deUint32 levelOffset = 0;
777 offsetMultiples.push_back(4);
781 offsetMultiples.push_back(tcu::getBlockSize(getCompressedLevel(0, 0).getFormat()));
783 for (int levelNdx = 0; levelNdx < getNumLevels(); levelNdx++)
785 for (int layerNdx = 0; layerNdx < getArraySize(); layerNdx++)
787 levelOffset = getNextMultiple(offsetMultiples, levelOffset);
789 const tcu::CompressedTexture& compressedTex = getCompressedLevel(levelNdx, layerNdx);
791 deMemcpy(destPtr + levelOffset, compressedTex.getData(), compressedTex.getDataSize());
792 levelOffset += compressedTex.getDataSize();
798 offsetMultiples.push_back(getLevel(0, 0).getFormat().getPixelSize());
800 for (int levelNdx = 0; levelNdx < getNumLevels(); levelNdx++)
802 for (int layerNdx = 0; layerNdx < getArraySize(); layerNdx++)
804 levelOffset = getNextMultiple(offsetMultiples, levelOffset);
806 const tcu::ConstPixelBufferAccess srcAccess = getLevel(levelNdx, layerNdx);
807 const tcu::PixelBufferAccess destAccess (srcAccess.getFormat(), srcAccess.getSize(), srcAccess.getPitch(), destPtr + levelOffset);
809 tcu::copy(destAccess, srcAccess);
810 levelOffset += srcAccess.getWidth() * srcAccess.getHeight() * srcAccess.getDepth() * srcAccess.getFormat().getPixelSize();
816 void TestTexture::copyToTexture (TestTexture& destTexture) const
818 for (int levelNdx = 0; levelNdx < getNumLevels(); levelNdx++)
819 for (int layerNdx = 0; layerNdx < getArraySize(); layerNdx++)
820 tcu::copy(destTexture.getLevel(levelNdx, layerNdx), getLevel(levelNdx, layerNdx));
823 void TestTexture::populateLevels (const std::vector<tcu::PixelBufferAccess>& levels)
825 for (size_t levelNdx = 0; levelNdx < levels.size(); levelNdx++)
826 TestTexture::fillWithGradient(levels[levelNdx]);
829 void TestTexture::populateCompressedLevels (tcu::CompressedTexFormat format, const std::vector<tcu::PixelBufferAccess>& decompressedLevels)
831 // Generate random compressed data and update decompressed data
833 de::Random random(123);
835 for (size_t levelNdx = 0; levelNdx < decompressedLevels.size(); levelNdx++)
837 const tcu::PixelBufferAccess level = decompressedLevels[levelNdx];
838 tcu::CompressedTexture* compressedLevel = new tcu::CompressedTexture(format, level.getWidth(), level.getHeight(), level.getDepth());
839 deUint8* const compressedData = (deUint8*)compressedLevel->getData();
841 if (tcu::isAstcFormat(format))
843 // \todo [2016-01-20 pyry] Comparison doesn't currently handle invalid blocks correctly so we use only valid blocks
844 tcu::astc::generateRandomValidBlocks(compressedData, compressedLevel->getDataSize()/tcu::astc::BLOCK_SIZE_BYTES,
845 format, tcu::TexDecompressionParams::ASTCMODE_LDR, random.getUint32());
849 // Generate random compressed data
850 // Random initial values cause assertion during the decompression in case of COMPRESSEDTEXFORMAT_ETC1_RGB8 format
851 if (format != tcu::COMPRESSEDTEXFORMAT_ETC1_RGB8)
852 for (int byteNdx = 0; byteNdx < compressedLevel->getDataSize(); byteNdx++)
853 compressedData[byteNdx] = 0xFF & random.getUint32();
856 m_compressedLevels.push_back(compressedLevel);
858 // Store decompressed data
859 compressedLevel->decompress(level, tcu::TexDecompressionParams(tcu::TexDecompressionParams::ASTCMODE_LDR));
863 void TestTexture::fillWithGradient (const tcu::PixelBufferAccess& levelAccess)
865 const tcu::TextureFormatInfo formatInfo = tcu::getTextureFormatInfo(levelAccess.getFormat());
866 tcu::fillWithComponentGradients(levelAccess, formatInfo.valueMin, formatInfo.valueMax);
871 TestTexture1D::TestTexture1D (const tcu::TextureFormat& format, int width)
872 : TestTexture (format, width, 1, 1)
873 , m_texture (format, width)
875 allocateLevels(m_texture);
876 TestTexture::populateLevels(getLevelsVector(m_texture));
879 TestTexture1D::TestTexture1D (const tcu::CompressedTexFormat& format, int width)
880 : TestTexture (format, width, 1, 1)
881 , m_texture (tcu::getUncompressedFormat(format), width)
883 allocateLevels(m_texture);
884 TestTexture::populateCompressedLevels(format, getLevelsVector(m_texture));
887 TestTexture1D::~TestTexture1D (void)
891 int TestTexture1D::getNumLevels (void) const
893 return m_texture.getNumLevels();
896 tcu::PixelBufferAccess TestTexture1D::getLevel (int level, int layer)
898 DE_ASSERT(layer == 0);
900 return m_texture.getLevel(level);
903 const tcu::ConstPixelBufferAccess TestTexture1D::getLevel (int level, int layer) const
905 DE_ASSERT(layer == 0);
907 return m_texture.getLevel(level);
910 const tcu::Texture1D& TestTexture1D::getTexture (void) const
915 tcu::Texture1D& TestTexture1D::getTexture (void)
920 de::MovePtr<TestTexture> TestTexture1D::copy(const tcu::TextureFormat format) const
922 DE_ASSERT(!isCompressed());
924 de::MovePtr<TestTexture> texture (new TestTexture1D(format, m_texture.getWidth()));
926 copyToTexture(*texture);
931 // TestTexture1DArray
933 TestTexture1DArray::TestTexture1DArray (const tcu::TextureFormat& format, int width, int arraySize)
934 : TestTexture (format, width, 1, arraySize)
935 , m_texture (format, width, arraySize)
937 allocateLevels(m_texture);
938 TestTexture::populateLevels(getLevelsVector(m_texture));
941 TestTexture1DArray::TestTexture1DArray (const tcu::CompressedTexFormat& format, int width, int arraySize)
942 : TestTexture (format, width, 1, arraySize)
943 , m_texture (tcu::getUncompressedFormat(format), width, arraySize)
945 allocateLevels(m_texture);
947 std::vector<tcu::PixelBufferAccess> layers;
948 for (int levelNdx = 0; levelNdx < m_texture.getNumLevels(); levelNdx++)
949 for (int layerNdx = 0; layerNdx < m_texture.getNumLayers(); layerNdx++)
950 layers.push_back(getLevel(levelNdx, layerNdx));
952 TestTexture::populateCompressedLevels(format, layers);
955 TestTexture1DArray::~TestTexture1DArray (void)
959 int TestTexture1DArray::getNumLevels (void) const
961 return m_texture.getNumLevels();
964 tcu::PixelBufferAccess TestTexture1DArray::getLevel (int level, int layer)
966 const tcu::PixelBufferAccess levelLayers = m_texture.getLevel(level);
967 const deUint32 layerSize = levelLayers.getWidth() * levelLayers.getFormat().getPixelSize();
968 const deUint32 layerOffset = layerSize * layer;
970 return tcu::PixelBufferAccess(levelLayers.getFormat(), levelLayers.getWidth(), 1, 1, (deUint8*)levelLayers.getDataPtr() + layerOffset);
973 const tcu::ConstPixelBufferAccess TestTexture1DArray::getLevel (int level, int layer) const
975 const tcu::ConstPixelBufferAccess levelLayers = m_texture.getLevel(level);
976 const deUint32 layerSize = levelLayers.getWidth() * levelLayers.getFormat().getPixelSize();
977 const deUint32 layerOffset = layerSize * layer;
979 return tcu::ConstPixelBufferAccess(levelLayers.getFormat(), levelLayers.getWidth(), 1, 1, (deUint8*)levelLayers.getDataPtr() + layerOffset);
982 const tcu::Texture1DArray& TestTexture1DArray::getTexture (void) const
987 tcu::Texture1DArray& TestTexture1DArray::getTexture (void)
992 int TestTexture1DArray::getArraySize (void) const
994 return m_texture.getNumLayers();
997 de::MovePtr<TestTexture> TestTexture1DArray::copy(const tcu::TextureFormat format) const
999 DE_ASSERT(!isCompressed());
1001 de::MovePtr<TestTexture> texture (new TestTexture1DArray(format, m_texture.getWidth(), getArraySize()));
1003 copyToTexture(*texture);
1010 TestTexture2D::TestTexture2D (const tcu::TextureFormat& format, int width, int height)
1011 : TestTexture (format, width, height, 1)
1012 , m_texture (format, width, height)
1014 allocateLevels(m_texture);
1015 TestTexture::populateLevels(getLevelsVector(m_texture));
1018 TestTexture2D::TestTexture2D (const tcu::CompressedTexFormat& format, int width, int height)
1019 : TestTexture (format, width, height, 1)
1020 , m_texture (tcu::getUncompressedFormat(format), width, height)
1022 allocateLevels(m_texture);
1023 TestTexture::populateCompressedLevels(format, getLevelsVector(m_texture));
1026 TestTexture2D::~TestTexture2D (void)
1030 int TestTexture2D::getNumLevels (void) const
1032 return m_texture.getNumLevels();
1035 tcu::PixelBufferAccess TestTexture2D::getLevel (int level, int layer)
1037 DE_ASSERT(layer == 0);
1039 return m_texture.getLevel(level);
1042 const tcu::ConstPixelBufferAccess TestTexture2D::getLevel (int level, int layer) const
1044 DE_ASSERT(layer == 0);
1046 return m_texture.getLevel(level);
1049 const tcu::Texture2D& TestTexture2D::getTexture (void) const
1054 tcu::Texture2D& TestTexture2D::getTexture (void)
1059 de::MovePtr<TestTexture> TestTexture2D::copy(const tcu::TextureFormat format) const
1061 DE_ASSERT(!isCompressed());
1063 de::MovePtr<TestTexture> texture (new TestTexture2D(format, m_texture.getWidth(), m_texture.getHeight()));
1065 copyToTexture(*texture);
1070 // TestTexture2DArray
1072 TestTexture2DArray::TestTexture2DArray (const tcu::TextureFormat& format, int width, int height, int arraySize)
1073 : TestTexture (format, width, height, arraySize)
1074 , m_texture (format, width, height, arraySize)
1076 allocateLevels(m_texture);
1077 TestTexture::populateLevels(getLevelsVector(m_texture));
1080 TestTexture2DArray::TestTexture2DArray (const tcu::CompressedTexFormat& format, int width, int height, int arraySize)
1081 : TestTexture (format, width, height, arraySize)
1082 , m_texture (tcu::getUncompressedFormat(format), width, height, arraySize)
1084 allocateLevels(m_texture);
1086 std::vector<tcu::PixelBufferAccess> layers;
1087 for (int levelNdx = 0; levelNdx < m_texture.getNumLevels(); levelNdx++)
1088 for (int layerNdx = 0; layerNdx < m_texture.getNumLayers(); layerNdx++)
1089 layers.push_back(getLevel(levelNdx, layerNdx));
1091 TestTexture::populateCompressedLevels(format, layers);
1094 TestTexture2DArray::~TestTexture2DArray (void)
1098 int TestTexture2DArray::getNumLevels (void) const
1100 return m_texture.getNumLevels();
1103 tcu::PixelBufferAccess TestTexture2DArray::getLevel (int level, int layer)
1105 const tcu::PixelBufferAccess levelLayers = m_texture.getLevel(level);
1106 const deUint32 layerSize = levelLayers.getWidth() * levelLayers.getHeight() * levelLayers.getFormat().getPixelSize();
1107 const deUint32 layerOffset = layerSize * layer;
1109 return tcu::PixelBufferAccess(levelLayers.getFormat(), levelLayers.getWidth(), levelLayers.getHeight(), 1, (deUint8*)levelLayers.getDataPtr() + layerOffset);
1112 const tcu::ConstPixelBufferAccess TestTexture2DArray::getLevel (int level, int layer) const
1114 const tcu::ConstPixelBufferAccess levelLayers = m_texture.getLevel(level);
1115 const deUint32 layerSize = levelLayers.getWidth() * levelLayers.getHeight() * levelLayers.getFormat().getPixelSize();
1116 const deUint32 layerOffset = layerSize * layer;
1118 return tcu::ConstPixelBufferAccess(levelLayers.getFormat(), levelLayers.getWidth(), levelLayers.getHeight(), 1, (deUint8*)levelLayers.getDataPtr() + layerOffset);
1121 const tcu::Texture2DArray& TestTexture2DArray::getTexture (void) const
1126 tcu::Texture2DArray& TestTexture2DArray::getTexture (void)
1131 int TestTexture2DArray::getArraySize (void) const
1133 return m_texture.getNumLayers();
1136 de::MovePtr<TestTexture> TestTexture2DArray::copy(const tcu::TextureFormat format) const
1138 DE_ASSERT(!isCompressed());
1140 de::MovePtr<TestTexture> texture (new TestTexture2DArray(format, m_texture.getWidth(), m_texture.getHeight(), getArraySize()));
1142 copyToTexture(*texture);
1149 TestTexture3D::TestTexture3D (const tcu::TextureFormat& format, int width, int height, int depth)
1150 : TestTexture (format, width, height, depth)
1151 , m_texture (format, width, height, depth)
1153 allocateLevels(m_texture);
1154 TestTexture::populateLevels(getLevelsVector(m_texture));
1157 TestTexture3D::TestTexture3D (const tcu::CompressedTexFormat& format, int width, int height, int depth)
1158 : TestTexture (format, width, height, depth)
1159 , m_texture (tcu::getUncompressedFormat(format), width, height, depth)
1161 allocateLevels(m_texture);
1162 TestTexture::populateCompressedLevels(format, getLevelsVector(m_texture));
1165 TestTexture3D::~TestTexture3D (void)
1169 int TestTexture3D::getNumLevels (void) const
1171 return m_texture.getNumLevels();
1174 tcu::PixelBufferAccess TestTexture3D::getLevel (int level, int layer)
1176 DE_ASSERT(layer == 0);
1178 return m_texture.getLevel(level);
1181 const tcu::ConstPixelBufferAccess TestTexture3D::getLevel (int level, int layer) const
1183 DE_ASSERT(layer == 0);
1185 return m_texture.getLevel(level);
1188 const tcu::Texture3D& TestTexture3D::getTexture (void) const
1193 tcu::Texture3D& TestTexture3D::getTexture (void)
1198 de::MovePtr<TestTexture> TestTexture3D::copy(const tcu::TextureFormat format) const
1200 DE_ASSERT(!isCompressed());
1202 de::MovePtr<TestTexture> texture (new TestTexture3D(format, m_texture.getWidth(), m_texture.getHeight(), m_texture.getDepth()));
1204 copyToTexture(*texture);
1211 const static tcu::CubeFace tcuFaceMapping[tcu::CUBEFACE_LAST] =
1213 tcu::CUBEFACE_POSITIVE_X,
1214 tcu::CUBEFACE_NEGATIVE_X,
1215 tcu::CUBEFACE_POSITIVE_Y,
1216 tcu::CUBEFACE_NEGATIVE_Y,
1217 tcu::CUBEFACE_POSITIVE_Z,
1218 tcu::CUBEFACE_NEGATIVE_Z
1221 TestTextureCube::TestTextureCube (const tcu::TextureFormat& format, int size)
1222 : TestTexture (format, size, size, 1)
1223 , m_texture (format, size)
1225 for (int levelNdx = 0; levelNdx < getNumLevels(); levelNdx++)
1227 for (int faceNdx = 0; faceNdx < tcu::CUBEFACE_LAST; faceNdx++)
1229 m_texture.allocLevel(tcuFaceMapping[faceNdx], levelNdx);
1230 TestTexture::fillWithGradient(m_texture.getLevelFace(levelNdx, tcuFaceMapping[faceNdx]));
1235 TestTextureCube::TestTextureCube (const tcu::CompressedTexFormat& format, int size)
1236 : TestTexture (format, size, size, 1)
1237 , m_texture (tcu::getUncompressedFormat(format), size)
1239 std::vector<tcu::PixelBufferAccess> levels(m_texture.getNumLevels() * tcu::CUBEFACE_LAST);
1241 for (int levelNdx = 0; levelNdx < getNumLevels(); levelNdx++)
1243 for (int faceNdx = 0; faceNdx < tcu::CUBEFACE_LAST; faceNdx++)
1245 m_texture.allocLevel(tcuFaceMapping[faceNdx], levelNdx);
1246 levels[levelNdx * tcu::CUBEFACE_LAST + faceNdx] = m_texture.getLevelFace(levelNdx, tcuFaceMapping[faceNdx]);
1250 TestTexture::populateCompressedLevels(format, levels);
1253 TestTextureCube::~TestTextureCube (void)
1257 int TestTextureCube::getNumLevels (void) const
1259 return m_texture.getNumLevels();
1262 tcu::PixelBufferAccess TestTextureCube::getLevel (int level, int layer)
1264 return m_texture.getLevelFace(level, tcuFaceMapping[layer]);
1267 const tcu::ConstPixelBufferAccess TestTextureCube::getLevel (int level, int layer) const
1269 return m_texture.getLevelFace(level, tcuFaceMapping[layer]);
1272 int TestTextureCube::getArraySize (void) const
1274 return (int)tcu::CUBEFACE_LAST;
1277 const tcu::TextureCube& TestTextureCube::getTexture (void) const
1282 tcu::TextureCube& TestTextureCube::getTexture (void)
1287 de::MovePtr<TestTexture> TestTextureCube::copy(const tcu::TextureFormat format) const
1289 DE_ASSERT(!isCompressed());
1291 de::MovePtr<TestTexture> texture (new TestTextureCube(format, m_texture.getSize()));
1293 copyToTexture(*texture);
1298 // TestTextureCubeArray
1300 TestTextureCubeArray::TestTextureCubeArray (const tcu::TextureFormat& format, int size, int arraySize)
1301 : TestTexture (format, size, size, arraySize)
1302 , m_texture (format, size, arraySize)
1304 allocateLevels(m_texture);
1305 TestTexture::populateLevels(getLevelsVector(m_texture));
1308 TestTextureCubeArray::TestTextureCubeArray (const tcu::CompressedTexFormat& format, int size, int arraySize)
1309 : TestTexture (format, size, size, arraySize)
1310 , m_texture (tcu::getUncompressedFormat(format), size, arraySize)
1312 DE_ASSERT(arraySize % 6 == 0);
1314 allocateLevels(m_texture);
1316 std::vector<tcu::PixelBufferAccess> layers;
1317 for (int levelNdx = 0; levelNdx < m_texture.getNumLevels(); levelNdx++)
1318 for (int layerNdx = 0; layerNdx < m_texture.getDepth(); layerNdx++)
1319 layers.push_back(getLevel(levelNdx, layerNdx));
1321 TestTexture::populateCompressedLevels(format, layers);
1324 TestTextureCubeArray::~TestTextureCubeArray (void)
1328 int TestTextureCubeArray::getNumLevels (void) const
1330 return m_texture.getNumLevels();
1333 tcu::PixelBufferAccess TestTextureCubeArray::getLevel (int level, int layer)
1335 const tcu::PixelBufferAccess levelLayers = m_texture.getLevel(level);
1336 const deUint32 layerSize = levelLayers.getWidth() * levelLayers.getHeight() * levelLayers.getFormat().getPixelSize();
1337 const deUint32 layerOffset = layerSize * layer;
1339 return tcu::PixelBufferAccess(levelLayers.getFormat(), levelLayers.getWidth(), levelLayers.getHeight(), 1, (deUint8*)levelLayers.getDataPtr() + layerOffset);
1342 const tcu::ConstPixelBufferAccess TestTextureCubeArray::getLevel (int level, int layer) const
1344 const tcu::ConstPixelBufferAccess levelLayers = m_texture.getLevel(level);
1345 const deUint32 layerSize = levelLayers.getWidth() * levelLayers.getHeight() * levelLayers.getFormat().getPixelSize();
1346 const deUint32 layerOffset = layerSize * layer;
1348 return tcu::ConstPixelBufferAccess(levelLayers.getFormat(), levelLayers.getWidth(), levelLayers.getHeight(), 1, (deUint8*)levelLayers.getDataPtr() + layerOffset);
1351 int TestTextureCubeArray::getArraySize (void) const
1353 return m_texture.getDepth();
1356 const tcu::TextureCubeArray& TestTextureCubeArray::getTexture (void) const
1361 tcu::TextureCubeArray& TestTextureCubeArray::getTexture (void)
1366 de::MovePtr<TestTexture> TestTextureCubeArray::copy(const tcu::TextureFormat format) const
1368 DE_ASSERT(!isCompressed());
1370 de::MovePtr<TestTexture> texture (new TestTextureCubeArray(format, m_texture.getSize(), getArraySize()));
1372 copyToTexture(*texture);