1 /*------------------------------------------------------------------------
2 * Vulkan Conformance Tests
3 * ------------------------
5 * Copyright (c) 2015 The Khronos Group Inc.
6 * Copyright (c) 2015 Imagination Technologies Ltd.
8 * Licensed under the Apache License, Version 2.0 (the "License");
9 * you may not use this file except in compliance with the License.
10 * You may obtain a copy of the License at
12 * http://www.apache.org/licenses/LICENSE-2.0
14 * Unless required by applicable law or agreed to in writing, software
15 * distributed under the License is distributed on an "AS IS" BASIS,
16 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17 * See the License for the specific language governing permissions and
18 * limitations under the License.
22 * \brief Utilities for images.
23 *//*--------------------------------------------------------------------*/
25 #include "vktPipelineImageUtil.hpp"
26 #include "vkImageUtil.hpp"
27 #include "vkMemUtil.hpp"
28 #include "vkQueryUtil.hpp"
29 #include "vkRefUtil.hpp"
30 #include "tcuTextureUtil.hpp"
31 #include "tcuAstcUtil.hpp"
32 #include "deRandom.hpp"
33 #include "deSharedPtr.hpp"
42 /*! Gets the next multiple of a given divisor */
43 static deUint32 getNextMultiple (deUint32 divisor, deUint32 value)
45 if (value % divisor == 0)
49 return value + divisor - (value % divisor);
52 /*! Gets the next value that is multiple of all given divisors */
53 static deUint32 getNextMultiple (const std::vector<deUint32>& divisors, deUint32 value)
55 deUint32 nextMultiple = value;
56 bool nextMultipleFound = false;
60 nextMultipleFound = true;
62 for (size_t divNdx = 0; divNdx < divisors.size(); divNdx++)
63 nextMultipleFound = nextMultipleFound && (nextMultiple % divisors[divNdx] == 0);
65 if (nextMultipleFound)
68 DE_ASSERT(nextMultiple < ~((deUint32)0u));
69 nextMultiple = getNextMultiple(divisors[0], nextMultiple + 1);
75 bool isSupportedSamplableFormat (const InstanceInterface& instanceInterface, VkPhysicalDevice device, VkFormat format)
77 if (isCompressedFormat(format))
79 VkPhysicalDeviceFeatures physicalFeatures;
80 const tcu::CompressedTexFormat compressedFormat = mapVkCompressedFormat(format);
82 instanceInterface.getPhysicalDeviceFeatures(device, &physicalFeatures);
84 if (tcu::isAstcFormat(compressedFormat))
86 if (!physicalFeatures.textureCompressionASTC_LDR)
89 else if (tcu::isEtcFormat(compressedFormat))
91 if (!physicalFeatures.textureCompressionETC2)
96 DE_FATAL("Unsupported compressed format");
100 VkFormatProperties formatProps;
101 instanceInterface.getPhysicalDeviceFormatProperties(device, format, &formatProps);
103 return (formatProps.optimalTilingFeatures & VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT) != 0u;
106 // \todo [2016-01-21 pyry] Update this to just rely on vkDefs.hpp once
107 // CTS has been updated to 1.0.2.
110 VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_LINEAR_BIT = 0x00001000,
113 bool isLinearFilteringSupported (const InstanceInterface& vki, VkPhysicalDevice physicalDevice, VkFormat format, VkImageTiling tiling)
115 const VkFormatProperties formatProperties = getPhysicalDeviceFormatProperties(vki, physicalDevice, format);
116 const VkFormatFeatureFlags formatFeatures = tiling == VK_IMAGE_TILING_LINEAR
117 ? formatProperties.linearTilingFeatures
118 : formatProperties.optimalTilingFeatures;
122 case VK_FORMAT_R32_SFLOAT:
123 case VK_FORMAT_R32G32_SFLOAT:
124 case VK_FORMAT_R32G32B32_SFLOAT:
125 case VK_FORMAT_R32G32B32A32_SFLOAT:
126 case VK_FORMAT_R64_SFLOAT:
127 case VK_FORMAT_R64G64_SFLOAT:
128 case VK_FORMAT_R64G64B64_SFLOAT:
129 case VK_FORMAT_R64G64B64A64_SFLOAT:
130 case VK_FORMAT_D16_UNORM:
131 case VK_FORMAT_X8_D24_UNORM_PACK32:
132 case VK_FORMAT_D32_SFLOAT:
133 case VK_FORMAT_D16_UNORM_S8_UINT:
134 case VK_FORMAT_D24_UNORM_S8_UINT:
135 case VK_FORMAT_D32_SFLOAT_S8_UINT:
136 return (formatFeatures & VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_LINEAR_BIT) != 0;
139 // \todo [2016-01-21 pyry] Check for all formats once drivers have been updated to 1.0.2
140 // and we have tests to verify format properties.
145 bool isMinMaxFilteringSupported (const InstanceInterface& vki, VkPhysicalDevice physicalDevice, VkFormat format, VkImageTiling tiling)
147 const VkFormatProperties formatProperties = getPhysicalDeviceFormatProperties(vki, physicalDevice, format);
148 const VkFormatFeatureFlags formatFeatures = tiling == VK_IMAGE_TILING_LINEAR
149 ? formatProperties.linearTilingFeatures
150 : formatProperties.optimalTilingFeatures;
152 return (formatFeatures & VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_MINMAX_BIT_EXT) != 0;
155 VkBorderColor getFormatBorderColor (BorderColor color, VkFormat format)
157 if (!isCompressedFormat(format) && (isIntFormat(format) || isUintFormat(format)))
161 case BORDER_COLOR_OPAQUE_BLACK: return VK_BORDER_COLOR_INT_OPAQUE_BLACK;
162 case BORDER_COLOR_OPAQUE_WHITE: return VK_BORDER_COLOR_INT_OPAQUE_WHITE;
163 case BORDER_COLOR_TRANSPARENT_BLACK: return VK_BORDER_COLOR_INT_TRANSPARENT_BLACK;
172 case BORDER_COLOR_OPAQUE_BLACK: return VK_BORDER_COLOR_FLOAT_OPAQUE_BLACK;
173 case BORDER_COLOR_OPAQUE_WHITE: return VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE;
174 case BORDER_COLOR_TRANSPARENT_BLACK: return VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK;
181 return VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK;
184 void getLookupScaleBias (vk::VkFormat format, tcu::Vec4& lookupScale, tcu::Vec4& lookupBias)
186 if (!isCompressedFormat(format))
188 const tcu::TextureFormatInfo fmtInfo = tcu::getTextureFormatInfo(mapVkFormat(format));
190 // Needed to normalize various formats to 0..1 range for writing into RT
191 lookupScale = fmtInfo.lookupScale;
192 lookupBias = fmtInfo.lookupBias;
198 case VK_FORMAT_EAC_R11_SNORM_BLOCK:
199 lookupScale = tcu::Vec4(0.5f, 1.0f, 1.0f, 1.0f);
200 lookupBias = tcu::Vec4(0.5f, 0.0f, 0.0f, 0.0f);
203 case VK_FORMAT_EAC_R11G11_SNORM_BLOCK:
204 lookupScale = tcu::Vec4(0.5f, 0.5f, 1.0f, 1.0f);
205 lookupBias = tcu::Vec4(0.5f, 0.5f, 0.0f, 0.0f);
209 // else: All supported compressed formats are fine with no normalization.
210 // ASTC LDR blocks decompress to f16 so querying normalization parameters
211 // based on uncompressed formats would actually lead to massive precision loss
212 // and complete lack of coverage in case of R8G8B8A8_UNORM RT.
213 lookupScale = tcu::Vec4(1.0f);
214 lookupBias = tcu::Vec4(0.0f);
220 de::MovePtr<tcu::TextureLevel> readColorAttachment (const vk::DeviceInterface& vk,
223 deUint32 queueFamilyIndex,
224 vk::Allocator& allocator,
227 const tcu::UVec2& renderSize)
229 Move<VkBuffer> buffer;
230 de::MovePtr<Allocation> bufferAlloc;
231 Move<VkCommandPool> cmdPool;
232 Move<VkCommandBuffer> cmdBuffer;
234 const tcu::TextureFormat tcuFormat = mapVkFormat(format);
235 const VkDeviceSize pixelDataSize = renderSize.x() * renderSize.y() * tcuFormat.getPixelSize();
236 de::MovePtr<tcu::TextureLevel> resultLevel (new tcu::TextureLevel(tcuFormat, renderSize.x(), renderSize.y()));
238 // Create destination buffer
240 const VkBufferCreateInfo bufferParams =
242 VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, // VkStructureType sType;
243 DE_NULL, // const void* pNext;
244 0u, // VkBufferCreateFlags flags;
245 pixelDataSize, // VkDeviceSize size;
246 VK_BUFFER_USAGE_TRANSFER_DST_BIT, // VkBufferUsageFlags usage;
247 VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode;
248 0u, // deUint32 queueFamilyIndexCount;
249 DE_NULL // const deUint32* pQueueFamilyIndices;
252 buffer = createBuffer(vk, device, &bufferParams);
253 bufferAlloc = allocator.allocate(getBufferMemoryRequirements(vk, device, *buffer), MemoryRequirement::HostVisible);
254 VK_CHECK(vk.bindBufferMemory(device, *buffer, bufferAlloc->getMemory(), bufferAlloc->getOffset()));
257 // Create command pool and buffer
258 cmdPool = createCommandPool(vk, device, VK_COMMAND_POOL_CREATE_TRANSIENT_BIT, queueFamilyIndex);
259 cmdBuffer = allocateCommandBuffer(vk, device, *cmdPool, VK_COMMAND_BUFFER_LEVEL_PRIMARY);
262 fence = createFence(vk, device);
264 // Barriers for copying image to buffer
266 const VkImageMemoryBarrier imageBarrier =
268 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // VkStructureType sType;
269 DE_NULL, // const void* pNext;
270 VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, // VkAccessFlags srcAccessMask;
271 VK_ACCESS_TRANSFER_READ_BIT, // VkAccessFlags dstAccessMask;
272 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // VkImageLayout oldLayout;
273 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, // VkImageLayout newLayout;
274 VK_QUEUE_FAMILY_IGNORED, // deUint32 srcQueueFamilyIndex;
275 VK_QUEUE_FAMILY_IGNORED, // deUint32 dstQueueFamilyIndex;
276 image, // VkImage image;
277 { // VkImageSubresourceRange subresourceRange;
278 VK_IMAGE_ASPECT_COLOR_BIT, // VkImageAspectFlags aspectMask;
279 0u, // deUint32 baseMipLevel;
280 1u, // deUint32 mipLevels;
281 0u, // deUint32 baseArraySlice;
282 1u // deUint32 arraySize;
286 const VkBufferMemoryBarrier bufferBarrier =
288 VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER, // VkStructureType sType;
289 DE_NULL, // const void* pNext;
290 VK_ACCESS_TRANSFER_WRITE_BIT, // VkAccessFlags srcAccessMask;
291 VK_ACCESS_HOST_READ_BIT, // VkAccessFlags dstAccessMask;
292 VK_QUEUE_FAMILY_IGNORED, // deUint32 srcQueueFamilyIndex;
293 VK_QUEUE_FAMILY_IGNORED, // deUint32 dstQueueFamilyIndex;
294 *buffer, // VkBuffer buffer;
295 0u, // VkDeviceSize offset;
296 pixelDataSize // VkDeviceSize size;
299 const VkCommandBufferBeginInfo cmdBufferBeginInfo =
301 VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO, // VkStructureType sType;
302 DE_NULL, // const void* pNext;
303 VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT, // VkCommandBufferUsageFlags flags;
304 (const VkCommandBufferInheritanceInfo*)DE_NULL,
307 // Copy image to buffer
309 const VkBufferImageCopy copyRegion =
311 0u, // VkDeviceSize bufferOffset;
312 (deUint32)renderSize.x(), // deUint32 bufferRowLength;
313 (deUint32)renderSize.y(), // deUint32 bufferImageHeight;
314 { VK_IMAGE_ASPECT_COLOR_BIT, 0u, 0u, 1u }, // VkImageSubresourceLayers imageSubresource;
315 { 0, 0, 0 }, // VkOffset3D imageOffset;
316 { renderSize.x(), renderSize.y(), 1u } // VkExtent3D imageExtent;
319 VK_CHECK(vk.beginCommandBuffer(*cmdBuffer, &cmdBufferBeginInfo));
320 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 0, (const VkBufferMemoryBarrier*)DE_NULL, 1, &imageBarrier);
321 vk.cmdCopyImageToBuffer(*cmdBuffer, image, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, *buffer, 1, ©Region);
322 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_HOST_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 1, &bufferBarrier, 0, (const VkImageMemoryBarrier*)DE_NULL);
323 VK_CHECK(vk.endCommandBuffer(*cmdBuffer));
325 const VkSubmitInfo submitInfo =
327 VK_STRUCTURE_TYPE_SUBMIT_INFO, // VkStructureType sType;
328 DE_NULL, // const void* pNext;
329 0u, // deUint32 waitSemaphoreCount;
330 DE_NULL, // const VkSemaphore* pWaitSemaphores;
332 1u, // deUint32 commandBufferCount;
333 &cmdBuffer.get(), // const VkCommandBuffer* pCommandBuffers;
334 0u, // deUint32 signalSemaphoreCount;
335 DE_NULL // const VkSemaphore* pSignalSemaphores;
338 VK_CHECK(vk.queueSubmit(queue, 1, &submitInfo, *fence));
339 VK_CHECK(vk.waitForFences(device, 1, &fence.get(), 0, ~(0ull) /* infinity */));
342 invalidateMappedMemoryRange(vk, device, bufferAlloc->getMemory(), bufferAlloc->getOffset(), VK_WHOLE_SIZE);
343 tcu::copy(*resultLevel, tcu::ConstPixelBufferAccess(resultLevel->getFormat(), resultLevel->getSize(), bufferAlloc->getHostPtr()));
351 VkImageAspectFlags getImageAspectFlags (const tcu::TextureFormat textureFormat)
353 VkImageAspectFlags imageAspectFlags = 0;
355 if (tcu::hasDepthComponent(textureFormat.order))
356 imageAspectFlags |= VK_IMAGE_ASPECT_DEPTH_BIT;
358 if (tcu::hasStencilComponent(textureFormat.order))
359 imageAspectFlags |= VK_IMAGE_ASPECT_STENCIL_BIT;
361 if (imageAspectFlags == 0)
362 imageAspectFlags = VK_IMAGE_ASPECT_COLOR_BIT;
364 return imageAspectFlags;
367 VkExtent3D mipLevelExtents (const VkExtent3D& baseExtents, const deUint32 mipLevel)
371 result.width = std::max(baseExtents.width >> mipLevel, 1u);
372 result.height = std::max(baseExtents.height >> mipLevel, 1u);
373 result.depth = std::max(baseExtents.depth >> mipLevel, 1u);
378 tcu::UVec3 alignedDivide (const VkExtent3D& extent, const VkExtent3D& divisor)
382 result.x() = extent.width / divisor.width + ((extent.width % divisor.width != 0) ? 1u : 0u);
383 result.y() = extent.height / divisor.height + ((extent.height % divisor.height != 0) ? 1u : 0u);
384 result.z() = extent.depth / divisor.depth + ((extent.depth % divisor.depth != 0) ? 1u : 0u);
391 void uploadTestTextureInternal (const DeviceInterface& vk,
394 deUint32 queueFamilyIndex,
395 Allocator& allocator,
396 const TestTexture& srcTexture,
397 const TestTexture* srcStencilTexture,
398 tcu::TextureFormat format,
402 Move<VkBuffer> buffer;
403 de::MovePtr<Allocation> bufferAlloc;
404 Move<VkCommandPool> cmdPool;
405 Move<VkCommandBuffer> cmdBuffer;
407 const VkImageAspectFlags imageAspectFlags = getImageAspectFlags(format);
408 deUint32 stencilOffset = 0u;
410 // Calculate buffer size
411 bufferSize = (srcTexture.isCompressed())? srcTexture.getCompressedSize(): srcTexture.getSize();
413 // Stencil-only texture should be provided if (and only if) the image has a combined DS format
414 DE_ASSERT((tcu::hasDepthComponent(format.order) && tcu::hasStencilComponent(format.order)) == (srcStencilTexture != DE_NULL));
416 if (srcStencilTexture != DE_NULL)
418 stencilOffset = static_cast<deUint32>(deAlign32(static_cast<deInt32>(bufferSize), 4));
419 bufferSize = stencilOffset + srcStencilTexture->getSize();
422 // Create source buffer
424 const VkBufferCreateInfo bufferParams =
426 VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, // VkStructureType sType;
427 DE_NULL, // const void* pNext;
428 0u, // VkBufferCreateFlags flags;
429 bufferSize, // VkDeviceSize size;
430 VK_BUFFER_USAGE_TRANSFER_SRC_BIT, // VkBufferUsageFlags usage;
431 VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode;
432 0u, // deUint32 queueFamilyIndexCount;
433 DE_NULL, // const deUint32* pQueueFamilyIndices;
436 buffer = createBuffer(vk, device, &bufferParams);
437 bufferAlloc = allocator.allocate(getBufferMemoryRequirements(vk, device, *buffer), MemoryRequirement::HostVisible);
438 VK_CHECK(vk.bindBufferMemory(device, *buffer, bufferAlloc->getMemory(), bufferAlloc->getOffset()));
441 // Create command pool and buffer
442 cmdPool = createCommandPool(vk, device, VK_COMMAND_POOL_CREATE_TRANSIENT_BIT, queueFamilyIndex);
443 cmdBuffer = allocateCommandBuffer(vk, device, *cmdPool, VK_COMMAND_BUFFER_LEVEL_PRIMARY);
446 fence = createFence(vk, device);
448 // Barriers for copying buffer to image
449 const VkBufferMemoryBarrier preBufferBarrier =
451 VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER, // VkStructureType sType;
452 DE_NULL, // const void* pNext;
453 VK_ACCESS_HOST_WRITE_BIT, // VkAccessFlags srcAccessMask;
454 VK_ACCESS_TRANSFER_READ_BIT, // VkAccessFlags dstAccessMask;
455 VK_QUEUE_FAMILY_IGNORED, // deUint32 srcQueueFamilyIndex;
456 VK_QUEUE_FAMILY_IGNORED, // deUint32 dstQueueFamilyIndex;
457 *buffer, // VkBuffer buffer;
458 0u, // VkDeviceSize offset;
459 bufferSize // VkDeviceSize size;
462 const VkImageMemoryBarrier preImageBarrier =
464 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // VkStructureType sType;
465 DE_NULL, // const void* pNext;
466 0u, // VkAccessFlags srcAccessMask;
467 VK_ACCESS_TRANSFER_WRITE_BIT, // VkAccessFlags dstAccessMask;
468 VK_IMAGE_LAYOUT_UNDEFINED, // VkImageLayout oldLayout;
469 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, // VkImageLayout newLayout;
470 VK_QUEUE_FAMILY_IGNORED, // deUint32 srcQueueFamilyIndex;
471 VK_QUEUE_FAMILY_IGNORED, // deUint32 dstQueueFamilyIndex;
472 destImage, // VkImage image;
473 { // VkImageSubresourceRange subresourceRange;
474 imageAspectFlags, // VkImageAspectFlags aspectMask;
475 0u, // deUint32 baseMipLevel;
476 (deUint32)srcTexture.getNumLevels(), // deUint32 mipLevels;
477 0u, // deUint32 baseArraySlice;
478 (deUint32)srcTexture.getArraySize(), // deUint32 arraySize;
482 const VkImageMemoryBarrier postImageBarrier =
484 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // VkStructureType sType;
485 DE_NULL, // const void* pNext;
486 VK_ACCESS_TRANSFER_WRITE_BIT, // VkAccessFlags srcAccessMask;
487 VK_ACCESS_SHADER_READ_BIT, // VkAccessFlags dstAccessMask;
488 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, // VkImageLayout oldLayout;
489 VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, // VkImageLayout newLayout;
490 VK_QUEUE_FAMILY_IGNORED, // deUint32 srcQueueFamilyIndex;
491 VK_QUEUE_FAMILY_IGNORED, // deUint32 dstQueueFamilyIndex;
492 destImage, // VkImage image;
493 { // VkImageSubresourceRange subresourceRange;
494 imageAspectFlags, // VkImageAspectFlags aspectMask;
495 0u, // deUint32 baseMipLevel;
496 (deUint32)srcTexture.getNumLevels(), // deUint32 mipLevels;
497 0u, // deUint32 baseArraySlice;
498 (deUint32)srcTexture.getArraySize(), // deUint32 arraySize;
502 const VkCommandBufferBeginInfo cmdBufferBeginInfo =
504 VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO, // VkStructureType sType;
505 DE_NULL, // const void* pNext;
506 VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT, // VkCommandBufferUsageFlags flags;
507 (const VkCommandBufferInheritanceInfo*)DE_NULL,
510 std::vector<VkBufferImageCopy> copyRegions = srcTexture.getBufferCopyRegions();
513 srcTexture.write(reinterpret_cast<deUint8*>(bufferAlloc->getHostPtr()));
515 if (srcStencilTexture != DE_NULL)
517 DE_ASSERT(stencilOffset != 0u);
519 srcStencilTexture->write(reinterpret_cast<deUint8*>(bufferAlloc->getHostPtr()) + stencilOffset);
521 std::vector<VkBufferImageCopy> stencilCopyRegions = srcStencilTexture->getBufferCopyRegions();
522 for (size_t regionIdx = 0; regionIdx < stencilCopyRegions.size(); regionIdx++)
524 VkBufferImageCopy region = stencilCopyRegions[regionIdx];
525 region.bufferOffset += stencilOffset;
527 copyRegions.push_back(region);
531 flushMappedMemoryRange(vk, device, bufferAlloc->getMemory(), bufferAlloc->getOffset(), VK_WHOLE_SIZE);
533 // Copy buffer to image
534 VK_CHECK(vk.beginCommandBuffer(*cmdBuffer, &cmdBufferBeginInfo));
535 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 1, &preBufferBarrier, 1, &preImageBarrier);
536 vk.cmdCopyBufferToImage(*cmdBuffer, *buffer, destImage, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, (deUint32)copyRegions.size(), copyRegions.data());
537 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 0, (const VkBufferMemoryBarrier*)DE_NULL, 1, &postImageBarrier);
539 VK_CHECK(vk.endCommandBuffer(*cmdBuffer));
541 const VkSubmitInfo submitInfo =
543 VK_STRUCTURE_TYPE_SUBMIT_INFO, // VkStructureType sType;
544 DE_NULL, // const void* pNext;
545 0u, // deUint32 waitSemaphoreCount;
546 DE_NULL, // const VkSemaphore* pWaitSemaphores;
548 1u, // deUint32 commandBufferCount;
549 &cmdBuffer.get(), // const VkCommandBuffer* pCommandBuffers;
550 0u, // deUint32 signalSemaphoreCount;
551 DE_NULL // const VkSemaphore* pSignalSemaphores;
554 VK_CHECK(vk.queueSubmit(queue, 1, &submitInfo, *fence));
555 VK_CHECK(vk.waitForFences(device, 1, &fence.get(), true, ~(0ull) /* infinity */));
558 bool checkSparseImageFormatSupport (const VkPhysicalDevice physicalDevice,
559 const InstanceInterface& instance,
560 const VkImageCreateInfo& imageCreateInfo)
562 const std::vector<VkSparseImageFormatProperties> sparseImageFormatPropVec =
563 getPhysicalDeviceSparseImageFormatProperties(instance, physicalDevice, imageCreateInfo.format, imageCreateInfo.imageType, imageCreateInfo.samples, imageCreateInfo.usage, imageCreateInfo.tiling);
565 return (sparseImageFormatPropVec.size() != 0);
568 void allocateAndBindSparseImage (const DeviceInterface& vk,
570 const VkPhysicalDevice physicalDevice,
571 const InstanceInterface& instance,
572 const VkImageCreateInfo& imageCreateInfo,
573 const VkSemaphore& signalSemaphore,
575 Allocator& allocator,
576 std::vector<de::SharedPtr<Allocation> >& allocations,
577 tcu::TextureFormat format,
580 const VkImageAspectFlags imageAspectFlags = getImageAspectFlags(format);
581 const VkPhysicalDeviceProperties deviceProperties = getPhysicalDeviceProperties(instance, physicalDevice);
582 const VkPhysicalDeviceMemoryProperties deviceMemoryProperties = getPhysicalDeviceMemoryProperties(instance, physicalDevice);
583 deUint32 sparseMemoryReqCount = 0;
585 // Check if the image format supports sparse operations
586 if (!checkSparseImageFormatSupport(physicalDevice, instance, imageCreateInfo))
587 TCU_THROW(NotSupportedError, "The image format does not support sparse operations.");
589 vk.getImageSparseMemoryRequirements(device, destImage, &sparseMemoryReqCount, DE_NULL);
591 DE_ASSERT(sparseMemoryReqCount != 0);
593 std::vector<VkSparseImageMemoryRequirements> sparseImageMemoryRequirements;
594 sparseImageMemoryRequirements.resize(sparseMemoryReqCount);
596 vk.getImageSparseMemoryRequirements(device, destImage, &sparseMemoryReqCount, &sparseImageMemoryRequirements[0]);
598 const deUint32 noMatchFound = ~((deUint32)0);
600 deUint32 aspectIndex = noMatchFound;
601 for (deUint32 memoryReqNdx = 0; memoryReqNdx < sparseMemoryReqCount; ++memoryReqNdx)
603 if (sparseImageMemoryRequirements[memoryReqNdx].formatProperties.aspectMask == imageAspectFlags)
605 aspectIndex = memoryReqNdx;
610 deUint32 metadataAspectIndex = noMatchFound;
611 for (deUint32 memoryReqNdx = 0; memoryReqNdx < sparseMemoryReqCount; ++memoryReqNdx)
613 if (sparseImageMemoryRequirements[memoryReqNdx].formatProperties.aspectMask & VK_IMAGE_ASPECT_METADATA_BIT)
615 metadataAspectIndex = memoryReqNdx;
620 if (aspectIndex == noMatchFound)
621 TCU_THROW(NotSupportedError, "Required image aspect not supported.");
623 const VkMemoryRequirements memoryRequirements = getImageMemoryRequirements(vk, device, destImage);
625 deUint32 memoryType = noMatchFound;
626 for (deUint32 memoryTypeNdx = 0; memoryTypeNdx < deviceMemoryProperties.memoryTypeCount; ++memoryTypeNdx)
628 if ((memoryRequirements.memoryTypeBits & (1u << memoryTypeNdx)) != 0 &&
629 MemoryRequirement::Any.matchesHeap(deviceMemoryProperties.memoryTypes[memoryTypeNdx].propertyFlags))
631 memoryType = memoryTypeNdx;
636 if (memoryType == noMatchFound)
637 TCU_THROW(NotSupportedError, "No matching memory type found.");
639 if (memoryRequirements.size > deviceProperties.limits.sparseAddressSpaceSize)
640 TCU_THROW(NotSupportedError, "Required memory size for sparse resource exceeds device limits.");
642 const VkSparseImageMemoryRequirements aspectRequirements = sparseImageMemoryRequirements[aspectIndex];
643 const VkExtent3D imageGranularity = aspectRequirements.formatProperties.imageGranularity;
645 std::vector<VkSparseImageMemoryBind> imageResidencyMemoryBinds;
646 std::vector<VkSparseMemoryBind> imageMipTailMemoryBinds;
648 for (deUint32 layerNdx = 0; layerNdx < imageCreateInfo.arrayLayers; ++layerNdx)
650 for (deUint32 mipLevelNdx = 0; mipLevelNdx < aspectRequirements.imageMipTailFirstLod; ++mipLevelNdx)
652 const VkExtent3D mipExtent = mipLevelExtents(imageCreateInfo.extent, mipLevelNdx);
653 const tcu::UVec3 numSparseBinds = alignedDivide(mipExtent, imageGranularity);
654 const tcu::UVec3 lastBlockExtent = tcu::UVec3(mipExtent.width % imageGranularity.width ? mipExtent.width % imageGranularity.width : imageGranularity.width,
655 mipExtent.height % imageGranularity.height ? mipExtent.height % imageGranularity.height : imageGranularity.height,
656 mipExtent.depth % imageGranularity.depth ? mipExtent.depth % imageGranularity.depth : imageGranularity.depth );
658 for (deUint32 z = 0; z < numSparseBinds.z(); ++z)
659 for (deUint32 y = 0; y < numSparseBinds.y(); ++y)
660 for (deUint32 x = 0; x < numSparseBinds.x(); ++x)
662 const VkMemoryRequirements allocRequirements =
664 // 28.7.5 alignment shows the block size in bytes
665 memoryRequirements.alignment, // VkDeviceSize size;
666 memoryRequirements.alignment, // VkDeviceSize alignment;
667 memoryRequirements.memoryTypeBits, // uint32_t memoryTypeBits;
670 de::SharedPtr<Allocation> allocation(allocator.allocate(allocRequirements, MemoryRequirement::Any).release());
671 allocations.push_back(allocation);
674 offset.x = x*imageGranularity.width;
675 offset.y = y*imageGranularity.height;
676 offset.z = z*imageGranularity.depth;
679 extent.width = (x == numSparseBinds.x() - 1) ? lastBlockExtent.x() : imageGranularity.width;
680 extent.height = (y == numSparseBinds.y() - 1) ? lastBlockExtent.y() : imageGranularity.height;
681 extent.depth = (z == numSparseBinds.z() - 1) ? lastBlockExtent.z() : imageGranularity.depth;
683 const VkSparseImageMemoryBind imageMemoryBind =
686 imageAspectFlags, // VkImageAspectFlags aspectMask;
687 mipLevelNdx, // uint32_t mipLevel;
688 layerNdx, // uint32_t arrayLayer;
689 }, // VkImageSubresource subresource;
690 offset, // VkOffset3D offset;
691 extent, // VkExtent3D extent;
692 allocation->getMemory(), // VkDeviceMemory memory;
693 allocation->getOffset(), // VkDeviceSize memoryOffset;
694 0u, // VkSparseMemoryBindFlags flags;
697 imageResidencyMemoryBinds.push_back(imageMemoryBind);
701 // Handle MIP tail. There are two cases to consider here:
703 // 1) VK_SPARSE_IMAGE_FORMAT_SINGLE_MIPTAIL_BIT is requested by the driver: each layer needs a separate tail.
704 // 2) otherwise: only one tail is needed.
705 if (aspectRequirements.imageMipTailSize > 0)
707 if (layerNdx == 0 || (aspectRequirements.formatProperties.flags & VK_SPARSE_IMAGE_FORMAT_SINGLE_MIPTAIL_BIT) == 0)
709 const VkMemoryRequirements allocRequirements =
711 aspectRequirements.imageMipTailSize, // VkDeviceSize size;
712 memoryRequirements.alignment, // VkDeviceSize alignment;
713 memoryRequirements.memoryTypeBits, // uint32_t memoryTypeBits;
716 const de::SharedPtr<Allocation> allocation(allocator.allocate(allocRequirements, MemoryRequirement::Any).release());
718 const VkSparseMemoryBind imageMipTailMemoryBind =
720 aspectRequirements.imageMipTailOffset + layerNdx * aspectRequirements.imageMipTailStride, // VkDeviceSize resourceOffset;
721 aspectRequirements.imageMipTailSize, // VkDeviceSize size;
722 allocation->getMemory(), // VkDeviceMemory memory;
723 allocation->getOffset(), // VkDeviceSize memoryOffset;
724 0u, // VkSparseMemoryBindFlags flags;
727 allocations.push_back(allocation);
729 imageMipTailMemoryBinds.push_back(imageMipTailMemoryBind);
732 // Handle Metadata. Similarly to MIP tail in aspectRequirements, there are two cases to consider here:
734 // 1) VK_SPARSE_IMAGE_FORMAT_SINGLE_MIPTAIL_BIT is requested by the driver: each layer needs a separate tail.
736 if (metadataAspectIndex != noMatchFound)
738 const VkSparseImageMemoryRequirements metadataAspectRequirements = sparseImageMemoryRequirements[metadataAspectIndex];
740 if (layerNdx == 0 || (metadataAspectRequirements.formatProperties.flags & VK_SPARSE_IMAGE_FORMAT_SINGLE_MIPTAIL_BIT) == 0)
742 const VkMemoryRequirements metadataAllocRequirements =
744 metadataAspectRequirements.imageMipTailSize, // VkDeviceSize size;
745 memoryRequirements.alignment, // VkDeviceSize alignment;
746 memoryRequirements.memoryTypeBits, // uint32_t memoryTypeBits;
748 const de::SharedPtr<Allocation> metadataAllocation(allocator.allocate(metadataAllocRequirements, MemoryRequirement::Any).release());
750 const VkSparseMemoryBind metadataMipTailMemoryBind =
752 metadataAspectRequirements.imageMipTailOffset +
753 layerNdx * metadataAspectRequirements.imageMipTailStride, // VkDeviceSize resourceOffset;
754 metadataAspectRequirements.imageMipTailSize, // VkDeviceSize size;
755 metadataAllocation->getMemory(), // VkDeviceMemory memory;
756 metadataAllocation->getOffset(), // VkDeviceSize memoryOffset;
757 VK_SPARSE_MEMORY_BIND_METADATA_BIT // VkSparseMemoryBindFlags flags;
760 allocations.push_back(metadataAllocation);
762 imageMipTailMemoryBinds.push_back(metadataMipTailMemoryBind);
767 VkBindSparseInfo bindSparseInfo =
769 VK_STRUCTURE_TYPE_BIND_SPARSE_INFO, //VkStructureType sType;
770 DE_NULL, //const void* pNext;
771 0u, //deUint32 waitSemaphoreCount;
772 DE_NULL, //const VkSemaphore* pWaitSemaphores;
773 0u, //deUint32 bufferBindCount;
774 DE_NULL, //const VkSparseBufferMemoryBindInfo* pBufferBinds;
775 0u, //deUint32 imageOpaqueBindCount;
776 DE_NULL, //const VkSparseImageOpaqueMemoryBindInfo* pImageOpaqueBinds;
777 0u, //deUint32 imageBindCount;
778 DE_NULL, //const VkSparseImageMemoryBindInfo* pImageBinds;
779 1u, //deUint32 signalSemaphoreCount;
780 &signalSemaphore //const VkSemaphore* pSignalSemaphores;
783 VkSparseImageMemoryBindInfo imageResidencyBindInfo;
784 VkSparseImageOpaqueMemoryBindInfo imageMipTailBindInfo;
786 if (imageResidencyMemoryBinds.size() > 0)
788 imageResidencyBindInfo.image = destImage;
789 imageResidencyBindInfo.bindCount = static_cast<deUint32>(imageResidencyMemoryBinds.size());
790 imageResidencyBindInfo.pBinds = &imageResidencyMemoryBinds[0];
792 bindSparseInfo.imageBindCount = 1u;
793 bindSparseInfo.pImageBinds = &imageResidencyBindInfo;
796 if (imageMipTailMemoryBinds.size() > 0)
798 imageMipTailBindInfo.image = destImage;
799 imageMipTailBindInfo.bindCount = static_cast<deUint32>(imageMipTailMemoryBinds.size());
800 imageMipTailBindInfo.pBinds = &imageMipTailMemoryBinds[0];
802 bindSparseInfo.imageOpaqueBindCount = 1u;
803 bindSparseInfo.pImageOpaqueBinds = &imageMipTailBindInfo;
806 VK_CHECK(vk.queueBindSparse(queue, 1u, &bindSparseInfo, DE_NULL));
809 void uploadTestTextureInternalSparse (const DeviceInterface& vk,
811 const VkPhysicalDevice physicalDevice,
812 const InstanceInterface& instance,
813 const VkImageCreateInfo& imageCreateInfo,
814 VkQueue universalQueue,
815 deUint32 universalQueueFamilyIndex,
817 Allocator& allocator,
818 std::vector<de::SharedPtr<Allocation> >& allocations,
819 const TestTexture& srcTexture,
820 const TestTexture* srcStencilTexture,
821 tcu::TextureFormat format,
824 deUint32 bufferSize = (srcTexture.isCompressed()) ? srcTexture.getCompressedSize(): srcTexture.getSize();
825 const VkImageAspectFlags imageAspectFlags = getImageAspectFlags(format);
826 deUint32 stencilOffset = 0u;
827 const Unique<VkSemaphore> imageMemoryBindSemaphore(createSemaphore(vk, device));
829 // Stencil-only texture should be provided if (and only if) the image has a combined DS format
830 DE_ASSERT((tcu::hasDepthComponent(format.order) && tcu::hasStencilComponent(format.order)) == (srcStencilTexture != DE_NULL));
832 if (srcStencilTexture != DE_NULL)
834 stencilOffset = static_cast<deUint32>(deAlign32(static_cast<deInt32>(bufferSize), 4));
835 bufferSize = stencilOffset + srcStencilTexture->getSize();
838 allocateAndBindSparseImage (vk, device, physicalDevice, instance, imageCreateInfo, imageMemoryBindSemaphore.get(), sparseQueue, allocator, allocations, format, destImage);
841 // Create source buffer
842 const VkBufferCreateInfo bufferParams =
844 VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, // VkStructureType sType;
845 DE_NULL, // const void* pNext;
846 0u, // VkBufferCreateFlags flags;
847 bufferSize, // VkDeviceSize size;
848 VK_BUFFER_USAGE_TRANSFER_SRC_BIT, // VkBufferUsageFlags usage;
849 VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode;
850 0u, // deUint32 queueFamilyIndexCount;
851 DE_NULL, // const deUint32* pQueueFamilyIndices;
854 Move<VkBuffer> buffer = createBuffer(vk, device, &bufferParams);
855 de::MovePtr<Allocation> bufferAlloc = allocator.allocate(getBufferMemoryRequirements(vk, device, *buffer), MemoryRequirement::HostVisible);
856 Move<VkCommandPool> cmdPool = createCommandPool(vk, device, VK_COMMAND_POOL_CREATE_TRANSIENT_BIT, universalQueueFamilyIndex);
857 Move<VkCommandBuffer> cmdBuffer = allocateCommandBuffer(vk, device, *cmdPool, VK_COMMAND_BUFFER_LEVEL_PRIMARY);
858 Move<VkFence> fence = createFence(vk, device);
860 VK_CHECK(vk.bindBufferMemory(device, *buffer, bufferAlloc->getMemory(), bufferAlloc->getOffset()));
862 // Barriers for copying buffer to image
863 const VkBufferMemoryBarrier preBufferBarrier =
865 VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER, // VkStructureType sType;
866 DE_NULL, // const void* pNext;
867 VK_ACCESS_HOST_WRITE_BIT, // VkAccessFlags srcAccessMask;
868 VK_ACCESS_TRANSFER_READ_BIT, // VkAccessFlags dstAccessMask;
869 VK_QUEUE_FAMILY_IGNORED, // deUint32 srcQueueFamilyIndex;
870 VK_QUEUE_FAMILY_IGNORED, // deUint32 dstQueueFamilyIndex;
871 *buffer, // VkBuffer buffer;
872 0u, // VkDeviceSize offset;
873 bufferSize // VkDeviceSize size;
876 const VkImageMemoryBarrier preImageBarrier =
878 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // VkStructureType sType;
879 DE_NULL, // const void* pNext;
880 0u, // VkAccessFlags srcAccessMask;
881 VK_ACCESS_TRANSFER_WRITE_BIT, // VkAccessFlags dstAccessMask;
882 VK_IMAGE_LAYOUT_UNDEFINED, // VkImageLayout oldLayout;
883 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, // VkImageLayout newLayout;
884 VK_QUEUE_FAMILY_IGNORED, // deUint32 srcQueueFamilyIndex;
885 VK_QUEUE_FAMILY_IGNORED, // deUint32 dstQueueFamilyIndex;
886 destImage, // VkImage image;
887 { // VkImageSubresourceRange subresourceRange;
888 imageAspectFlags, // VkImageAspect aspect;
889 0u, // deUint32 baseMipLevel;
890 imageCreateInfo.mipLevels, // deUint32 mipLevels;
891 0u, // deUint32 baseArraySlice;
892 imageCreateInfo.arrayLayers // deUint32 arraySize;
896 const VkImageMemoryBarrier postImageBarrier =
898 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // VkStructureType sType;
899 DE_NULL, // const void* pNext;
900 VK_ACCESS_TRANSFER_WRITE_BIT, // VkAccessFlags srcAccessMask;
901 VK_ACCESS_SHADER_READ_BIT, // VkAccessFlags dstAccessMask;
902 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, // VkImageLayout oldLayout;
903 VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, // VkImageLayout newLayout;
904 VK_QUEUE_FAMILY_IGNORED, // deUint32 srcQueueFamilyIndex;
905 VK_QUEUE_FAMILY_IGNORED, // deUint32 dstQueueFamilyIndex;
906 destImage, // VkImage image;
907 { // VkImageSubresourceRange subresourceRange;
908 imageAspectFlags, // VkImageAspect aspect;
909 0u, // deUint32 baseMipLevel;
910 imageCreateInfo.mipLevels, // deUint32 mipLevels;
911 0u, // deUint32 baseArraySlice;
912 imageCreateInfo.arrayLayers // deUint32 arraySize;
916 const VkCommandBufferBeginInfo cmdBufferBeginInfo =
918 VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO, // VkStructureType sType;
919 DE_NULL, // const void* pNext;
920 VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT, // VkCommandBufferUsageFlags flags;
921 (const VkCommandBufferInheritanceInfo*)DE_NULL,
924 std::vector<VkBufferImageCopy> copyRegions = srcTexture.getBufferCopyRegions();
927 srcTexture.write(reinterpret_cast<deUint8*>(bufferAlloc->getHostPtr()));
929 if (srcStencilTexture != DE_NULL)
931 DE_ASSERT(stencilOffset != 0u);
933 srcStencilTexture->write(reinterpret_cast<deUint8*>(bufferAlloc->getHostPtr()) + stencilOffset);
935 std::vector<VkBufferImageCopy> stencilCopyRegions = srcStencilTexture->getBufferCopyRegions();
936 for (size_t regionIdx = 0; regionIdx < stencilCopyRegions.size(); regionIdx++)
938 VkBufferImageCopy region = stencilCopyRegions[regionIdx];
939 region.bufferOffset += stencilOffset;
941 copyRegions.push_back(region);
945 flushMappedMemoryRange(vk, device, bufferAlloc->getMemory(), bufferAlloc->getOffset(), VK_WHOLE_SIZE);
947 // Copy buffer to image
948 VK_CHECK(vk.beginCommandBuffer(*cmdBuffer, &cmdBufferBeginInfo));
949 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 1, &preBufferBarrier, 1, &preImageBarrier);
950 vk.cmdCopyBufferToImage(*cmdBuffer, *buffer, destImage, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, (deUint32)copyRegions.size(), copyRegions.data());
951 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 0, (const VkBufferMemoryBarrier*)DE_NULL, 1, &postImageBarrier);
952 VK_CHECK(vk.endCommandBuffer(*cmdBuffer));
954 const VkPipelineStageFlags pipelineStageFlags = VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT;
956 const VkSubmitInfo submitInfo =
958 VK_STRUCTURE_TYPE_SUBMIT_INFO, // VkStructureType sType;
959 DE_NULL, // const void* pNext;
960 1u, // deUint32 waitSemaphoreCount;
961 &imageMemoryBindSemaphore.get(), // const VkSemaphore* pWaitSemaphores;
962 &pipelineStageFlags, // const VkPipelineStageFlags* pWaitDstStageMask;
963 1u, // deUint32 commandBufferCount;
964 &cmdBuffer.get(), // const VkCommandBuffer* pCommandBuffers;
965 0u, // deUint32 signalSemaphoreCount;
966 DE_NULL // const VkSemaphore* pSignalSemaphores;
971 VK_CHECK(vk.queueSubmit(universalQueue, 1, &submitInfo, *fence));
972 VK_CHECK(vk.waitForFences(device, 1, &fence.get(), true, ~(0ull) /* infinity */));
976 VK_CHECK(vk.deviceWaitIdle(device));
982 void uploadTestTexture (const DeviceInterface& vk,
985 deUint32 queueFamilyIndex,
986 Allocator& allocator,
987 const TestTexture& srcTexture,
990 if (tcu::isCombinedDepthStencilType(srcTexture.getTextureFormat().type))
992 de::MovePtr<TestTexture> srcDepthTexture;
993 de::MovePtr<TestTexture> srcStencilTexture;
995 if (tcu::hasDepthComponent(srcTexture.getTextureFormat().order))
997 tcu::TextureFormat format;
998 switch (srcTexture.getTextureFormat().type)
1000 case tcu::TextureFormat::UNSIGNED_INT_16_8_8:
1001 format = tcu::TextureFormat(tcu::TextureFormat::D, tcu::TextureFormat::UNORM_INT16);
1003 case tcu::TextureFormat::UNSIGNED_INT_24_8_REV:
1004 format = tcu::TextureFormat(tcu::TextureFormat::D, tcu::TextureFormat::UNSIGNED_INT_24_8_REV);
1006 case tcu::TextureFormat::FLOAT_UNSIGNED_INT_24_8_REV:
1007 format = tcu::TextureFormat(tcu::TextureFormat::D, tcu::TextureFormat::FLOAT);
1010 DE_FATAL("Unexpected source texture format.");
1013 srcDepthTexture = srcTexture.copy(format);
1016 if (tcu::hasStencilComponent(srcTexture.getTextureFormat().order))
1017 srcStencilTexture = srcTexture.copy(tcu::getEffectiveDepthStencilTextureFormat(srcTexture.getTextureFormat(), tcu::Sampler::MODE_STENCIL));
1019 uploadTestTextureInternal(vk, device, queue, queueFamilyIndex, allocator, *srcDepthTexture, srcStencilTexture.get(), srcTexture.getTextureFormat(), destImage);
1022 uploadTestTextureInternal(vk, device, queue, queueFamilyIndex, allocator, srcTexture, DE_NULL, srcTexture.getTextureFormat(), destImage);
1025 void uploadTestTextureSparse (const DeviceInterface& vk,
1027 const VkPhysicalDevice physicalDevice,
1028 const InstanceInterface& instance,
1029 const VkImageCreateInfo& imageCreateInfo,
1030 VkQueue universalQueue,
1031 deUint32 universalQueueFamilyIndex,
1032 VkQueue sparseQueue,
1033 Allocator& allocator,
1034 std::vector<de::SharedPtr<Allocation> >& allocations,
1035 const TestTexture& srcTexture,
1038 if (tcu::isCombinedDepthStencilType(srcTexture.getTextureFormat().type))
1040 de::MovePtr<TestTexture> srcDepthTexture;
1041 de::MovePtr<TestTexture> srcStencilTexture;
1043 if (tcu::hasDepthComponent(srcTexture.getTextureFormat().order))
1045 tcu::TextureFormat format;
1046 switch (srcTexture.getTextureFormat().type)
1048 case tcu::TextureFormat::UNSIGNED_INT_16_8_8:
1049 format = tcu::TextureFormat(tcu::TextureFormat::D, tcu::TextureFormat::UNORM_INT16);
1051 case tcu::TextureFormat::UNSIGNED_INT_24_8_REV:
1052 format = tcu::TextureFormat(tcu::TextureFormat::D, tcu::TextureFormat::UNSIGNED_INT_24_8_REV);
1054 case tcu::TextureFormat::FLOAT_UNSIGNED_INT_24_8_REV:
1055 format = tcu::TextureFormat(tcu::TextureFormat::D, tcu::TextureFormat::FLOAT);
1058 DE_FATAL("Unexpected source texture format.");
1061 srcDepthTexture = srcTexture.copy(format);
1064 if (tcu::hasStencilComponent(srcTexture.getTextureFormat().order))
1065 srcStencilTexture = srcTexture.copy(tcu::getEffectiveDepthStencilTextureFormat(srcTexture.getTextureFormat(), tcu::Sampler::MODE_STENCIL));
1067 uploadTestTextureInternalSparse (vk,
1073 universalQueueFamilyIndex,
1078 srcStencilTexture.get(),
1079 srcTexture.getTextureFormat(),
1084 uploadTestTextureInternalSparse (vk,
1090 universalQueueFamilyIndex,
1096 srcTexture.getTextureFormat(),
1101 // Utilities for test textures
1103 template<typename TcuTextureType>
1104 void allocateLevels (TcuTextureType& texture)
1106 for (int levelNdx = 0; levelNdx < texture.getNumLevels(); levelNdx++)
1107 texture.allocLevel(levelNdx);
1110 template<typename TcuTextureType>
1111 std::vector<tcu::PixelBufferAccess> getLevelsVector (const TcuTextureType& texture)
1113 std::vector<tcu::PixelBufferAccess> levels(texture.getNumLevels());
1115 for (int levelNdx = 0; levelNdx < texture.getNumLevels(); levelNdx++)
1116 levels[levelNdx] = *reinterpret_cast<const tcu::PixelBufferAccess*>(&texture.getLevel(levelNdx));
1123 TestTexture::TestTexture (const tcu::TextureFormat& format, int width, int height, int depth)
1125 DE_ASSERT(width >= 1);
1126 DE_ASSERT(height >= 1);
1127 DE_ASSERT(depth >= 1);
1135 TestTexture::TestTexture (const tcu::CompressedTexFormat& format, int width, int height, int depth)
1137 DE_ASSERT(width >= 1);
1138 DE_ASSERT(height >= 1);
1139 DE_ASSERT(depth >= 1);
1147 TestTexture::~TestTexture (void)
1149 for (size_t levelNdx = 0; levelNdx < m_compressedLevels.size(); levelNdx++)
1150 delete m_compressedLevels[levelNdx];
1153 deUint32 TestTexture::getSize (void) const
1155 std::vector<deUint32> offsetMultiples;
1156 deUint32 textureSize = 0;
1158 offsetMultiples.push_back(4);
1159 offsetMultiples.push_back(getLevel(0, 0).getFormat().getPixelSize());
1161 for (int levelNdx = 0; levelNdx < getNumLevels(); levelNdx++)
1163 for (int layerNdx = 0; layerNdx < getArraySize(); layerNdx++)
1165 const tcu::ConstPixelBufferAccess level = getLevel(levelNdx, layerNdx);
1166 textureSize = getNextMultiple(offsetMultiples, textureSize);
1167 textureSize += level.getWidth() * level.getHeight() * level.getDepth() * level.getFormat().getPixelSize();
1174 deUint32 TestTexture::getCompressedSize (void) const
1176 if (!isCompressed())
1177 throw tcu::InternalError("Texture is not compressed");
1179 std::vector<deUint32> offsetMultiples;
1180 deUint32 textureSize = 0;
1182 offsetMultiples.push_back(4);
1183 offsetMultiples.push_back(tcu::getBlockSize(getCompressedLevel(0, 0).getFormat()));
1185 for (int levelNdx = 0; levelNdx < getNumLevels(); levelNdx++)
1187 for (int layerNdx = 0; layerNdx < getArraySize(); layerNdx++)
1189 textureSize = getNextMultiple(offsetMultiples, textureSize);
1190 textureSize += getCompressedLevel(levelNdx, layerNdx).getDataSize();
1197 tcu::CompressedTexture& TestTexture::getCompressedLevel (int level, int layer)
1199 DE_ASSERT(level >= 0 && level < getNumLevels());
1200 DE_ASSERT(layer >= 0 && layer < getArraySize());
1202 return *m_compressedLevels[level * getArraySize() + layer];
1205 const tcu::CompressedTexture& TestTexture::getCompressedLevel (int level, int layer) const
1207 DE_ASSERT(level >= 0 && level < getNumLevels());
1208 DE_ASSERT(layer >= 0 && layer < getArraySize());
1210 return *m_compressedLevels[level * getArraySize() + layer];
1213 std::vector<VkBufferImageCopy> TestTexture::getBufferCopyRegions (void) const
1215 std::vector<deUint32> offsetMultiples;
1216 std::vector<VkBufferImageCopy> regions;
1217 deUint32 layerDataOffset = 0;
1219 offsetMultiples.push_back(4);
1223 offsetMultiples.push_back(tcu::getBlockSize(getCompressedLevel(0, 0).getFormat()));
1225 for (int levelNdx = 0; levelNdx < getNumLevels(); levelNdx++)
1227 for (int layerNdx = 0; layerNdx < getArraySize(); layerNdx++)
1229 const tcu::CompressedTexture& level = getCompressedLevel(levelNdx, layerNdx);
1230 tcu::IVec3 blockPixelSize = getBlockPixelSize(level.getFormat());
1231 layerDataOffset = getNextMultiple(offsetMultiples, layerDataOffset);
1233 const VkBufferImageCopy layerRegion =
1235 layerDataOffset, // VkDeviceSize bufferOffset;
1236 (deUint32)getNextMultiple(blockPixelSize.x(), level.getWidth()), // deUint32 bufferRowLength;
1237 (deUint32)getNextMultiple(blockPixelSize.y(), level.getHeight()), // deUint32 bufferImageHeight;
1238 { // VkImageSubresourceLayers imageSubresource;
1239 VK_IMAGE_ASPECT_COLOR_BIT,
1244 { 0u, 0u, 0u }, // VkOffset3D imageOffset;
1245 { // VkExtent3D imageExtent;
1246 (deUint32)level.getWidth(),
1247 (deUint32)level.getHeight(),
1248 (deUint32)level.getDepth()
1252 regions.push_back(layerRegion);
1253 layerDataOffset += level.getDataSize();
1259 std::vector<VkImageAspectFlags> imageAspects;
1260 tcu::TextureFormat textureFormat = getTextureFormat();
1262 if (tcu::hasDepthComponent(textureFormat.order))
1263 imageAspects.push_back(VK_IMAGE_ASPECT_DEPTH_BIT);
1265 if (tcu::hasStencilComponent(textureFormat.order))
1266 imageAspects.push_back(VK_IMAGE_ASPECT_STENCIL_BIT);
1268 if (imageAspects.empty())
1269 imageAspects.push_back(VK_IMAGE_ASPECT_COLOR_BIT);
1271 offsetMultiples.push_back(getLevel(0, 0).getFormat().getPixelSize());
1273 for (int levelNdx = 0; levelNdx < getNumLevels(); levelNdx++)
1275 for (int layerNdx = 0; layerNdx < getArraySize(); layerNdx++)
1277 const tcu::ConstPixelBufferAccess level = getLevel(levelNdx, layerNdx);
1279 layerDataOffset = getNextMultiple(offsetMultiples, layerDataOffset);
1281 for (size_t aspectIndex = 0; aspectIndex < imageAspects.size(); ++aspectIndex)
1283 const VkBufferImageCopy layerRegion =
1285 layerDataOffset, // VkDeviceSize bufferOffset;
1286 (deUint32)level.getWidth(), // deUint32 bufferRowLength;
1287 (deUint32)level.getHeight(), // deUint32 bufferImageHeight;
1288 { // VkImageSubresourceLayers imageSubresource;
1289 imageAspects[aspectIndex],
1294 { 0u, 0u, 0u }, // VkOffset3D imageOffset;
1295 { // VkExtent3D imageExtent;
1296 (deUint32)level.getWidth(),
1297 (deUint32)level.getHeight(),
1298 (deUint32)level.getDepth()
1302 regions.push_back(layerRegion);
1304 layerDataOffset += level.getWidth() * level.getHeight() * level.getDepth() * level.getFormat().getPixelSize();
1312 void TestTexture::write (deUint8* destPtr) const
1314 std::vector<deUint32> offsetMultiples;
1315 deUint32 levelOffset = 0;
1317 offsetMultiples.push_back(4);
1321 offsetMultiples.push_back(tcu::getBlockSize(getCompressedLevel(0, 0).getFormat()));
1323 for (int levelNdx = 0; levelNdx < getNumLevels(); levelNdx++)
1325 for (int layerNdx = 0; layerNdx < getArraySize(); layerNdx++)
1327 levelOffset = getNextMultiple(offsetMultiples, levelOffset);
1329 const tcu::CompressedTexture& compressedTex = getCompressedLevel(levelNdx, layerNdx);
1331 deMemcpy(destPtr + levelOffset, compressedTex.getData(), compressedTex.getDataSize());
1332 levelOffset += compressedTex.getDataSize();
1338 offsetMultiples.push_back(getLevel(0, 0).getFormat().getPixelSize());
1340 for (int levelNdx = 0; levelNdx < getNumLevels(); levelNdx++)
1342 for (int layerNdx = 0; layerNdx < getArraySize(); layerNdx++)
1344 levelOffset = getNextMultiple(offsetMultiples, levelOffset);
1346 const tcu::ConstPixelBufferAccess srcAccess = getLevel(levelNdx, layerNdx);
1347 const tcu::PixelBufferAccess destAccess (srcAccess.getFormat(), srcAccess.getSize(), srcAccess.getPitch(), destPtr + levelOffset);
1349 tcu::copy(destAccess, srcAccess);
1350 levelOffset += srcAccess.getWidth() * srcAccess.getHeight() * srcAccess.getDepth() * srcAccess.getFormat().getPixelSize();
1356 void TestTexture::copyToTexture (TestTexture& destTexture) const
1358 for (int levelNdx = 0; levelNdx < getNumLevels(); levelNdx++)
1359 for (int layerNdx = 0; layerNdx < getArraySize(); layerNdx++)
1360 tcu::copy(destTexture.getLevel(levelNdx, layerNdx), getLevel(levelNdx, layerNdx));
1363 void TestTexture::populateLevels (const std::vector<tcu::PixelBufferAccess>& levels)
1365 for (size_t levelNdx = 0; levelNdx < levels.size(); levelNdx++)
1366 TestTexture::fillWithGradient(levels[levelNdx]);
1369 void TestTexture::populateCompressedLevels (tcu::CompressedTexFormat format, const std::vector<tcu::PixelBufferAccess>& decompressedLevels)
1371 // Generate random compressed data and update decompressed data
1373 de::Random random(123);
1375 for (size_t levelNdx = 0; levelNdx < decompressedLevels.size(); levelNdx++)
1377 const tcu::PixelBufferAccess level = decompressedLevels[levelNdx];
1378 tcu::CompressedTexture* compressedLevel = new tcu::CompressedTexture(format, level.getWidth(), level.getHeight(), level.getDepth());
1379 deUint8* const compressedData = (deUint8*)compressedLevel->getData();
1381 if (tcu::isAstcFormat(format))
1383 // \todo [2016-01-20 pyry] Comparison doesn't currently handle invalid blocks correctly so we use only valid blocks
1384 tcu::astc::generateRandomValidBlocks(compressedData, compressedLevel->getDataSize()/tcu::astc::BLOCK_SIZE_BYTES,
1385 format, tcu::TexDecompressionParams::ASTCMODE_LDR, random.getUint32());
1389 // Generate random compressed data
1390 // Random initial values cause assertion during the decompression in case of COMPRESSEDTEXFORMAT_ETC1_RGB8 format
1391 if (format != tcu::COMPRESSEDTEXFORMAT_ETC1_RGB8)
1392 for (int byteNdx = 0; byteNdx < compressedLevel->getDataSize(); byteNdx++)
1393 compressedData[byteNdx] = 0xFF & random.getUint32();
1396 m_compressedLevels.push_back(compressedLevel);
1398 // Store decompressed data
1399 compressedLevel->decompress(level, tcu::TexDecompressionParams(tcu::TexDecompressionParams::ASTCMODE_LDR));
1403 void TestTexture::fillWithGradient (const tcu::PixelBufferAccess& levelAccess)
1405 const tcu::TextureFormatInfo formatInfo = tcu::getTextureFormatInfo(levelAccess.getFormat());
1406 tcu::fillWithComponentGradients(levelAccess, formatInfo.valueMin, formatInfo.valueMax);
1411 TestTexture1D::TestTexture1D (const tcu::TextureFormat& format, int width)
1412 : TestTexture (format, width, 1, 1)
1413 , m_texture (format, width)
1415 allocateLevels(m_texture);
1416 TestTexture::populateLevels(getLevelsVector(m_texture));
1419 TestTexture1D::TestTexture1D (const tcu::CompressedTexFormat& format, int width)
1420 : TestTexture (format, width, 1, 1)
1421 , m_texture (tcu::getUncompressedFormat(format), width)
1423 allocateLevels(m_texture);
1424 TestTexture::populateCompressedLevels(format, getLevelsVector(m_texture));
1427 TestTexture1D::~TestTexture1D (void)
1431 int TestTexture1D::getNumLevels (void) const
1433 return m_texture.getNumLevels();
1436 tcu::PixelBufferAccess TestTexture1D::getLevel (int level, int layer)
1438 DE_ASSERT(layer == 0);
1440 return m_texture.getLevel(level);
1443 const tcu::ConstPixelBufferAccess TestTexture1D::getLevel (int level, int layer) const
1445 DE_ASSERT(layer == 0);
1447 return m_texture.getLevel(level);
1450 const tcu::Texture1D& TestTexture1D::getTexture (void) const
1455 tcu::Texture1D& TestTexture1D::getTexture (void)
1460 de::MovePtr<TestTexture> TestTexture1D::copy(const tcu::TextureFormat format) const
1462 DE_ASSERT(!isCompressed());
1464 de::MovePtr<TestTexture> texture (new TestTexture1D(format, m_texture.getWidth()));
1466 copyToTexture(*texture);
1471 // TestTexture1DArray
1473 TestTexture1DArray::TestTexture1DArray (const tcu::TextureFormat& format, int width, int arraySize)
1474 : TestTexture (format, width, 1, arraySize)
1475 , m_texture (format, width, arraySize)
1477 allocateLevels(m_texture);
1478 TestTexture::populateLevels(getLevelsVector(m_texture));
1481 TestTexture1DArray::TestTexture1DArray (const tcu::CompressedTexFormat& format, int width, int arraySize)
1482 : TestTexture (format, width, 1, arraySize)
1483 , m_texture (tcu::getUncompressedFormat(format), width, arraySize)
1485 allocateLevels(m_texture);
1487 std::vector<tcu::PixelBufferAccess> layers;
1488 for (int levelNdx = 0; levelNdx < m_texture.getNumLevels(); levelNdx++)
1489 for (int layerNdx = 0; layerNdx < m_texture.getNumLayers(); layerNdx++)
1490 layers.push_back(getLevel(levelNdx, layerNdx));
1492 TestTexture::populateCompressedLevels(format, layers);
1495 TestTexture1DArray::~TestTexture1DArray (void)
1499 int TestTexture1DArray::getNumLevels (void) const
1501 return m_texture.getNumLevels();
1504 tcu::PixelBufferAccess TestTexture1DArray::getLevel (int level, int layer)
1506 const tcu::PixelBufferAccess levelLayers = m_texture.getLevel(level);
1507 const deUint32 layerSize = levelLayers.getWidth() * levelLayers.getFormat().getPixelSize();
1508 const deUint32 layerOffset = layerSize * layer;
1510 return tcu::PixelBufferAccess(levelLayers.getFormat(), levelLayers.getWidth(), 1, 1, (deUint8*)levelLayers.getDataPtr() + layerOffset);
1513 const tcu::ConstPixelBufferAccess TestTexture1DArray::getLevel (int level, int layer) const
1515 const tcu::ConstPixelBufferAccess levelLayers = m_texture.getLevel(level);
1516 const deUint32 layerSize = levelLayers.getWidth() * levelLayers.getFormat().getPixelSize();
1517 const deUint32 layerOffset = layerSize * layer;
1519 return tcu::ConstPixelBufferAccess(levelLayers.getFormat(), levelLayers.getWidth(), 1, 1, (deUint8*)levelLayers.getDataPtr() + layerOffset);
1522 const tcu::Texture1DArray& TestTexture1DArray::getTexture (void) const
1527 tcu::Texture1DArray& TestTexture1DArray::getTexture (void)
1532 int TestTexture1DArray::getArraySize (void) const
1534 return m_texture.getNumLayers();
1537 de::MovePtr<TestTexture> TestTexture1DArray::copy(const tcu::TextureFormat format) const
1539 DE_ASSERT(!isCompressed());
1541 de::MovePtr<TestTexture> texture (new TestTexture1DArray(format, m_texture.getWidth(), getArraySize()));
1543 copyToTexture(*texture);
1550 TestTexture2D::TestTexture2D (const tcu::TextureFormat& format, int width, int height)
1551 : TestTexture (format, width, height, 1)
1552 , m_texture (format, width, height)
1554 allocateLevels(m_texture);
1555 TestTexture::populateLevels(getLevelsVector(m_texture));
1558 TestTexture2D::TestTexture2D (const tcu::CompressedTexFormat& format, int width, int height)
1559 : TestTexture (format, width, height, 1)
1560 , m_texture (tcu::getUncompressedFormat(format), width, height)
1562 allocateLevels(m_texture);
1563 TestTexture::populateCompressedLevels(format, getLevelsVector(m_texture));
1566 TestTexture2D::~TestTexture2D (void)
1570 int TestTexture2D::getNumLevels (void) const
1572 return m_texture.getNumLevels();
1575 tcu::PixelBufferAccess TestTexture2D::getLevel (int level, int layer)
1577 DE_ASSERT(layer == 0);
1579 return m_texture.getLevel(level);
1582 const tcu::ConstPixelBufferAccess TestTexture2D::getLevel (int level, int layer) const
1584 DE_ASSERT(layer == 0);
1586 return m_texture.getLevel(level);
1589 const tcu::Texture2D& TestTexture2D::getTexture (void) const
1594 tcu::Texture2D& TestTexture2D::getTexture (void)
1599 de::MovePtr<TestTexture> TestTexture2D::copy(const tcu::TextureFormat format) const
1601 DE_ASSERT(!isCompressed());
1603 de::MovePtr<TestTexture> texture (new TestTexture2D(format, m_texture.getWidth(), m_texture.getHeight()));
1605 copyToTexture(*texture);
1610 // TestTexture2DArray
1612 TestTexture2DArray::TestTexture2DArray (const tcu::TextureFormat& format, int width, int height, int arraySize)
1613 : TestTexture (format, width, height, arraySize)
1614 , m_texture (format, width, height, arraySize)
1616 allocateLevels(m_texture);
1617 TestTexture::populateLevels(getLevelsVector(m_texture));
1620 TestTexture2DArray::TestTexture2DArray (const tcu::CompressedTexFormat& format, int width, int height, int arraySize)
1621 : TestTexture (format, width, height, arraySize)
1622 , m_texture (tcu::getUncompressedFormat(format), width, height, arraySize)
1624 allocateLevels(m_texture);
1626 std::vector<tcu::PixelBufferAccess> layers;
1627 for (int levelNdx = 0; levelNdx < m_texture.getNumLevels(); levelNdx++)
1628 for (int layerNdx = 0; layerNdx < m_texture.getNumLayers(); layerNdx++)
1629 layers.push_back(getLevel(levelNdx, layerNdx));
1631 TestTexture::populateCompressedLevels(format, layers);
1634 TestTexture2DArray::~TestTexture2DArray (void)
1638 int TestTexture2DArray::getNumLevels (void) const
1640 return m_texture.getNumLevels();
1643 tcu::PixelBufferAccess TestTexture2DArray::getLevel (int level, int layer)
1645 const tcu::PixelBufferAccess levelLayers = m_texture.getLevel(level);
1646 const deUint32 layerSize = levelLayers.getWidth() * levelLayers.getHeight() * levelLayers.getFormat().getPixelSize();
1647 const deUint32 layerOffset = layerSize * layer;
1649 return tcu::PixelBufferAccess(levelLayers.getFormat(), levelLayers.getWidth(), levelLayers.getHeight(), 1, (deUint8*)levelLayers.getDataPtr() + layerOffset);
1652 const tcu::ConstPixelBufferAccess TestTexture2DArray::getLevel (int level, int layer) const
1654 const tcu::ConstPixelBufferAccess levelLayers = m_texture.getLevel(level);
1655 const deUint32 layerSize = levelLayers.getWidth() * levelLayers.getHeight() * levelLayers.getFormat().getPixelSize();
1656 const deUint32 layerOffset = layerSize * layer;
1658 return tcu::ConstPixelBufferAccess(levelLayers.getFormat(), levelLayers.getWidth(), levelLayers.getHeight(), 1, (deUint8*)levelLayers.getDataPtr() + layerOffset);
1661 const tcu::Texture2DArray& TestTexture2DArray::getTexture (void) const
1666 tcu::Texture2DArray& TestTexture2DArray::getTexture (void)
1671 int TestTexture2DArray::getArraySize (void) const
1673 return m_texture.getNumLayers();
1676 de::MovePtr<TestTexture> TestTexture2DArray::copy(const tcu::TextureFormat format) const
1678 DE_ASSERT(!isCompressed());
1680 de::MovePtr<TestTexture> texture (new TestTexture2DArray(format, m_texture.getWidth(), m_texture.getHeight(), getArraySize()));
1682 copyToTexture(*texture);
1689 TestTexture3D::TestTexture3D (const tcu::TextureFormat& format, int width, int height, int depth)
1690 : TestTexture (format, width, height, depth)
1691 , m_texture (format, width, height, depth)
1693 allocateLevels(m_texture);
1694 TestTexture::populateLevels(getLevelsVector(m_texture));
1697 TestTexture3D::TestTexture3D (const tcu::CompressedTexFormat& format, int width, int height, int depth)
1698 : TestTexture (format, width, height, depth)
1699 , m_texture (tcu::getUncompressedFormat(format), width, height, depth)
1701 allocateLevels(m_texture);
1702 TestTexture::populateCompressedLevels(format, getLevelsVector(m_texture));
1705 TestTexture3D::~TestTexture3D (void)
1709 int TestTexture3D::getNumLevels (void) const
1711 return m_texture.getNumLevels();
1714 tcu::PixelBufferAccess TestTexture3D::getLevel (int level, int layer)
1716 DE_ASSERT(layer == 0);
1718 return m_texture.getLevel(level);
1721 const tcu::ConstPixelBufferAccess TestTexture3D::getLevel (int level, int layer) const
1723 DE_ASSERT(layer == 0);
1725 return m_texture.getLevel(level);
1728 const tcu::Texture3D& TestTexture3D::getTexture (void) const
1733 tcu::Texture3D& TestTexture3D::getTexture (void)
1738 de::MovePtr<TestTexture> TestTexture3D::copy(const tcu::TextureFormat format) const
1740 DE_ASSERT(!isCompressed());
1742 de::MovePtr<TestTexture> texture (new TestTexture3D(format, m_texture.getWidth(), m_texture.getHeight(), m_texture.getDepth()));
1744 copyToTexture(*texture);
1751 const static tcu::CubeFace tcuFaceMapping[tcu::CUBEFACE_LAST] =
1753 tcu::CUBEFACE_POSITIVE_X,
1754 tcu::CUBEFACE_NEGATIVE_X,
1755 tcu::CUBEFACE_POSITIVE_Y,
1756 tcu::CUBEFACE_NEGATIVE_Y,
1757 tcu::CUBEFACE_POSITIVE_Z,
1758 tcu::CUBEFACE_NEGATIVE_Z
1761 TestTextureCube::TestTextureCube (const tcu::TextureFormat& format, int size)
1762 : TestTexture (format, size, size, 1)
1763 , m_texture (format, size)
1765 for (int levelNdx = 0; levelNdx < getNumLevels(); levelNdx++)
1767 for (int faceNdx = 0; faceNdx < tcu::CUBEFACE_LAST; faceNdx++)
1769 m_texture.allocLevel(tcuFaceMapping[faceNdx], levelNdx);
1770 TestTexture::fillWithGradient(m_texture.getLevelFace(levelNdx, tcuFaceMapping[faceNdx]));
1775 TestTextureCube::TestTextureCube (const tcu::CompressedTexFormat& format, int size)
1776 : TestTexture (format, size, size, 1)
1777 , m_texture (tcu::getUncompressedFormat(format), size)
1779 std::vector<tcu::PixelBufferAccess> levels(m_texture.getNumLevels() * tcu::CUBEFACE_LAST);
1781 for (int levelNdx = 0; levelNdx < getNumLevels(); levelNdx++)
1783 for (int faceNdx = 0; faceNdx < tcu::CUBEFACE_LAST; faceNdx++)
1785 m_texture.allocLevel(tcuFaceMapping[faceNdx], levelNdx);
1786 levels[levelNdx * tcu::CUBEFACE_LAST + faceNdx] = m_texture.getLevelFace(levelNdx, tcuFaceMapping[faceNdx]);
1790 TestTexture::populateCompressedLevels(format, levels);
1793 TestTextureCube::~TestTextureCube (void)
1797 int TestTextureCube::getNumLevels (void) const
1799 return m_texture.getNumLevels();
1802 tcu::PixelBufferAccess TestTextureCube::getLevel (int level, int layer)
1804 return m_texture.getLevelFace(level, tcuFaceMapping[layer]);
1807 const tcu::ConstPixelBufferAccess TestTextureCube::getLevel (int level, int layer) const
1809 return m_texture.getLevelFace(level, tcuFaceMapping[layer]);
1812 int TestTextureCube::getArraySize (void) const
1814 return (int)tcu::CUBEFACE_LAST;
1817 const tcu::TextureCube& TestTextureCube::getTexture (void) const
1822 tcu::TextureCube& TestTextureCube::getTexture (void)
1827 de::MovePtr<TestTexture> TestTextureCube::copy(const tcu::TextureFormat format) const
1829 DE_ASSERT(!isCompressed());
1831 de::MovePtr<TestTexture> texture (new TestTextureCube(format, m_texture.getSize()));
1833 copyToTexture(*texture);
1838 // TestTextureCubeArray
1840 TestTextureCubeArray::TestTextureCubeArray (const tcu::TextureFormat& format, int size, int arraySize)
1841 : TestTexture (format, size, size, arraySize)
1842 , m_texture (format, size, arraySize)
1844 allocateLevels(m_texture);
1845 TestTexture::populateLevels(getLevelsVector(m_texture));
1848 TestTextureCubeArray::TestTextureCubeArray (const tcu::CompressedTexFormat& format, int size, int arraySize)
1849 : TestTexture (format, size, size, arraySize)
1850 , m_texture (tcu::getUncompressedFormat(format), size, arraySize)
1852 DE_ASSERT(arraySize % 6 == 0);
1854 allocateLevels(m_texture);
1856 std::vector<tcu::PixelBufferAccess> layers;
1857 for (int levelNdx = 0; levelNdx < m_texture.getNumLevels(); levelNdx++)
1858 for (int layerNdx = 0; layerNdx < m_texture.getDepth(); layerNdx++)
1859 layers.push_back(getLevel(levelNdx, layerNdx));
1861 TestTexture::populateCompressedLevels(format, layers);
1864 TestTextureCubeArray::~TestTextureCubeArray (void)
1868 int TestTextureCubeArray::getNumLevels (void) const
1870 return m_texture.getNumLevels();
1873 tcu::PixelBufferAccess TestTextureCubeArray::getLevel (int level, int layer)
1875 const tcu::PixelBufferAccess levelLayers = m_texture.getLevel(level);
1876 const deUint32 layerSize = levelLayers.getWidth() * levelLayers.getHeight() * levelLayers.getFormat().getPixelSize();
1877 const deUint32 layerOffset = layerSize * layer;
1879 return tcu::PixelBufferAccess(levelLayers.getFormat(), levelLayers.getWidth(), levelLayers.getHeight(), 1, (deUint8*)levelLayers.getDataPtr() + layerOffset);
1882 const tcu::ConstPixelBufferAccess TestTextureCubeArray::getLevel (int level, int layer) const
1884 const tcu::ConstPixelBufferAccess levelLayers = m_texture.getLevel(level);
1885 const deUint32 layerSize = levelLayers.getWidth() * levelLayers.getHeight() * levelLayers.getFormat().getPixelSize();
1886 const deUint32 layerOffset = layerSize * layer;
1888 return tcu::ConstPixelBufferAccess(levelLayers.getFormat(), levelLayers.getWidth(), levelLayers.getHeight(), 1, (deUint8*)levelLayers.getDataPtr() + layerOffset);
1891 int TestTextureCubeArray::getArraySize (void) const
1893 return m_texture.getDepth();
1896 const tcu::TextureCubeArray& TestTextureCubeArray::getTexture (void) const
1901 tcu::TextureCubeArray& TestTextureCubeArray::getTexture (void)
1906 de::MovePtr<TestTexture> TestTextureCubeArray::copy(const tcu::TextureFormat format) const
1908 DE_ASSERT(!isCompressed());
1910 de::MovePtr<TestTexture> texture (new TestTextureCubeArray(format, m_texture.getSize(), getArraySize()));
1912 copyToTexture(*texture);